]> git.saurik.com Git - apple/icu.git/blob - icuSources/common/unifiedcache.cpp
ICU-62108.0.1.tar.gz
[apple/icu.git] / icuSources / common / unifiedcache.cpp
1 // © 2016 and later: Unicode, Inc. and others.
2 // License & terms of use: http://www.unicode.org/copyright.html
3 /*
4 ******************************************************************************
5 * Copyright (C) 2015, International Business Machines Corporation and
6 * others. All Rights Reserved.
7 ******************************************************************************
8 *
9 * File unifiedcache.cpp
10 ******************************************************************************
11 */
12
13 #include "unifiedcache.h"
14
15 #include <algorithm> // For std::max()
16
17 #include "mutex.h"
18 #include "uassert.h"
19 #include "uhash.h"
20 #include "ucln_cmn.h"
21 #include "umutex.h"
22
23 static icu::UnifiedCache *gCache = NULL;
24 static UMutex gCacheMutex = U_MUTEX_INITIALIZER;
25 static UConditionVar gInProgressValueAddedCond = U_CONDITION_INITIALIZER;
26 static icu::UInitOnce gCacheInitOnce = U_INITONCE_INITIALIZER;
27
28 static const int32_t MAX_EVICT_ITERATIONS = 10;
29 static const int32_t DEFAULT_MAX_UNUSED = 1000;
30 static const int32_t DEFAULT_PERCENTAGE_OF_IN_USE = 100;
31
32
33 U_CDECL_BEGIN
34 static UBool U_CALLCONV unifiedcache_cleanup() {
35 gCacheInitOnce.reset();
36 if (gCache) {
37 delete gCache;
38 gCache = NULL;
39 }
40 return TRUE;
41 }
42 U_CDECL_END
43
44
45 U_NAMESPACE_BEGIN
46
47 U_CAPI int32_t U_EXPORT2
48 ucache_hashKeys(const UHashTok key) {
49 const CacheKeyBase *ckey = (const CacheKeyBase *) key.pointer;
50 return ckey->hashCode();
51 }
52
53 U_CAPI UBool U_EXPORT2
54 ucache_compareKeys(const UHashTok key1, const UHashTok key2) {
55 const CacheKeyBase *p1 = (const CacheKeyBase *) key1.pointer;
56 const CacheKeyBase *p2 = (const CacheKeyBase *) key2.pointer;
57 return *p1 == *p2;
58 }
59
60 U_CAPI void U_EXPORT2
61 ucache_deleteKey(void *obj) {
62 CacheKeyBase *p = (CacheKeyBase *) obj;
63 delete p;
64 }
65
66 CacheKeyBase::~CacheKeyBase() {
67 }
68
69 static void U_CALLCONV cacheInit(UErrorCode &status) {
70 U_ASSERT(gCache == NULL);
71 ucln_common_registerCleanup(
72 UCLN_COMMON_UNIFIED_CACHE, unifiedcache_cleanup);
73
74 gCache = new UnifiedCache(status);
75 if (gCache == NULL) {
76 status = U_MEMORY_ALLOCATION_ERROR;
77 }
78 if (U_FAILURE(status)) {
79 delete gCache;
80 gCache = NULL;
81 return;
82 }
83 }
84
85 UnifiedCache *UnifiedCache::getInstance(UErrorCode &status) {
86 umtx_initOnce(gCacheInitOnce, &cacheInit, status);
87 if (U_FAILURE(status)) {
88 return NULL;
89 }
90 U_ASSERT(gCache != NULL);
91 return gCache;
92 }
93
94 UnifiedCache::UnifiedCache(UErrorCode &status) :
95 fHashtable(NULL),
96 fEvictPos(UHASH_FIRST),
97 fNumValuesTotal(0),
98 fNumValuesInUse(0),
99 fMaxUnused(DEFAULT_MAX_UNUSED),
100 fMaxPercentageOfInUse(DEFAULT_PERCENTAGE_OF_IN_USE),
101 fAutoEvictedCount(0),
102 fNoValue(nullptr) {
103 if (U_FAILURE(status)) {
104 return;
105 }
106 fNoValue = new SharedObject();
107 if (fNoValue == nullptr) {
108 status = U_MEMORY_ALLOCATION_ERROR;
109 return;
110 }
111 fNoValue->softRefCount = 1; // Add fake references to prevent fNoValue from being deleted
112 fNoValue->hardRefCount = 1; // when other references to it are removed.
113 fNoValue->cachePtr = this;
114
115 fHashtable = uhash_open(
116 &ucache_hashKeys,
117 &ucache_compareKeys,
118 NULL,
119 &status);
120 if (U_FAILURE(status)) {
121 return;
122 }
123 uhash_setKeyDeleter(fHashtable, &ucache_deleteKey);
124 }
125
126 void UnifiedCache::setEvictionPolicy(
127 int32_t count, int32_t percentageOfInUseItems, UErrorCode &status) {
128 if (U_FAILURE(status)) {
129 return;
130 }
131 if (count < 0 || percentageOfInUseItems < 0) {
132 status = U_ILLEGAL_ARGUMENT_ERROR;
133 return;
134 }
135 Mutex lock(&gCacheMutex);
136 fMaxUnused = count;
137 fMaxPercentageOfInUse = percentageOfInUseItems;
138 }
139
140 int32_t UnifiedCache::unusedCount() const {
141 Mutex lock(&gCacheMutex);
142 return uhash_count(fHashtable) - fNumValuesInUse;
143 }
144
145 int64_t UnifiedCache::autoEvictedCount() const {
146 Mutex lock(&gCacheMutex);
147 return fAutoEvictedCount;
148 }
149
150 int32_t UnifiedCache::keyCount() const {
151 Mutex lock(&gCacheMutex);
152 return uhash_count(fHashtable);
153 }
154
155 void UnifiedCache::flush() const {
156 Mutex lock(&gCacheMutex);
157
158 // Use a loop in case cache items that are flushed held hard references to
159 // other cache items making those additional cache items eligible for
160 // flushing.
161 while (_flush(FALSE));
162 }
163
164 void UnifiedCache::handleUnreferencedObject() const {
165 Mutex lock(&gCacheMutex);
166 --fNumValuesInUse;
167 _runEvictionSlice();
168 }
169
170 #ifdef UNIFIED_CACHE_DEBUG
171 #include <stdio.h>
172
173 void UnifiedCache::dump() {
174 UErrorCode status = U_ZERO_ERROR;
175 const UnifiedCache *cache = getInstance(status);
176 if (U_FAILURE(status)) {
177 fprintf(stderr, "Unified Cache: Error fetching cache.\n");
178 return;
179 }
180 cache->dumpContents();
181 }
182
183 void UnifiedCache::dumpContents() const {
184 Mutex lock(&gCacheMutex);
185 _dumpContents();
186 }
187
188 // Dumps content of cache.
189 // On entry, gCacheMutex must be held.
190 // On exit, cache contents dumped to stderr.
191 void UnifiedCache::_dumpContents() const {
192 int32_t pos = UHASH_FIRST;
193 const UHashElement *element = uhash_nextElement(fHashtable, &pos);
194 char buffer[256];
195 int32_t cnt = 0;
196 for (; element != NULL; element = uhash_nextElement(fHashtable, &pos)) {
197 const SharedObject *sharedObject =
198 (const SharedObject *) element->value.pointer;
199 const CacheKeyBase *key =
200 (const CacheKeyBase *) element->key.pointer;
201 if (sharedObject->hasHardReferences()) {
202 ++cnt;
203 fprintf(
204 stderr,
205 "Unified Cache: Key '%s', error %d, value %p, total refcount %d, soft refcount %d\n",
206 key->writeDescription(buffer, 256),
207 key->creationStatus,
208 sharedObject == fNoValue ? NULL :sharedObject,
209 sharedObject->getRefCount(),
210 sharedObject->getSoftRefCount());
211 }
212 }
213 fprintf(stderr, "Unified Cache: %d out of a total of %d still have hard references\n", cnt, uhash_count(fHashtable));
214 }
215 #endif
216
217 UnifiedCache::~UnifiedCache() {
218 // Try our best to clean up first.
219 flush();
220 {
221 // Now all that should be left in the cache are entries that refer to
222 // each other and entries with hard references from outside the cache.
223 // Nothing we can do about these so proceed to wipe out the cache.
224 Mutex lock(&gCacheMutex);
225 _flush(TRUE);
226 }
227 uhash_close(fHashtable);
228 fHashtable = nullptr;
229 delete fNoValue;
230 fNoValue = nullptr;
231 }
232
233 const UHashElement *
234 UnifiedCache::_nextElement() const {
235 const UHashElement *element = uhash_nextElement(fHashtable, &fEvictPos);
236 if (element == NULL) {
237 fEvictPos = UHASH_FIRST;
238 return uhash_nextElement(fHashtable, &fEvictPos);
239 }
240 return element;
241 }
242
243 UBool UnifiedCache::_flush(UBool all) const {
244 UBool result = FALSE;
245 int32_t origSize = uhash_count(fHashtable);
246 for (int32_t i = 0; i < origSize; ++i) {
247 const UHashElement *element = _nextElement();
248 if (element == nullptr) {
249 break;
250 }
251 if (all || _isEvictable(element)) {
252 const SharedObject *sharedObject =
253 (const SharedObject *) element->value.pointer;
254 U_ASSERT(sharedObject->cachePtr = this);
255 uhash_removeElement(fHashtable, element);
256 removeSoftRef(sharedObject); // Deletes the sharedObject when softRefCount goes to zero.
257 result = TRUE;
258 }
259 }
260 return result;
261 }
262
263 int32_t UnifiedCache::_computeCountOfItemsToEvict() const {
264 int32_t totalItems = uhash_count(fHashtable);
265 int32_t evictableItems = totalItems - fNumValuesInUse;
266
267 int32_t unusedLimitByPercentage = fNumValuesInUse * fMaxPercentageOfInUse / 100;
268 int32_t unusedLimit = std::max(unusedLimitByPercentage, fMaxUnused);
269 int32_t countOfItemsToEvict = std::max(0, evictableItems - unusedLimit);
270 return countOfItemsToEvict;
271 }
272
273 void UnifiedCache::_runEvictionSlice() const {
274 int32_t maxItemsToEvict = _computeCountOfItemsToEvict();
275 if (maxItemsToEvict <= 0) {
276 return;
277 }
278 for (int32_t i = 0; i < MAX_EVICT_ITERATIONS; ++i) {
279 const UHashElement *element = _nextElement();
280 if (element == nullptr) {
281 break;
282 }
283 if (_isEvictable(element)) {
284 const SharedObject *sharedObject =
285 (const SharedObject *) element->value.pointer;
286 uhash_removeElement(fHashtable, element);
287 removeSoftRef(sharedObject); // Deletes sharedObject when SoftRefCount goes to zero.
288 ++fAutoEvictedCount;
289 if (--maxItemsToEvict == 0) {
290 break;
291 }
292 }
293 }
294 }
295
296 void UnifiedCache::_putNew(
297 const CacheKeyBase &key,
298 const SharedObject *value,
299 const UErrorCode creationStatus,
300 UErrorCode &status) const {
301 if (U_FAILURE(status)) {
302 return;
303 }
304 CacheKeyBase *keyToAdopt = key.clone();
305 if (keyToAdopt == NULL) {
306 status = U_MEMORY_ALLOCATION_ERROR;
307 return;
308 }
309 keyToAdopt->fCreationStatus = creationStatus;
310 if (value->softRefCount == 0) {
311 _registerMaster(keyToAdopt, value);
312 }
313 void *oldValue = uhash_put(fHashtable, keyToAdopt, (void *) value, &status);
314 U_ASSERT(oldValue == nullptr);
315 (void)oldValue;
316 if (U_SUCCESS(status)) {
317 value->softRefCount++;
318 }
319 }
320
321 void UnifiedCache::_putIfAbsentAndGet(
322 const CacheKeyBase &key,
323 const SharedObject *&value,
324 UErrorCode &status) const {
325 Mutex lock(&gCacheMutex);
326 const UHashElement *element = uhash_find(fHashtable, &key);
327 if (element != NULL && !_inProgress(element)) {
328 _fetch(element, value, status);
329 return;
330 }
331 if (element == NULL) {
332 UErrorCode putError = U_ZERO_ERROR;
333 // best-effort basis only.
334 _putNew(key, value, status, putError);
335 } else {
336 _put(element, value, status);
337 }
338 // Run an eviction slice. This will run even if we added a master entry
339 // which doesn't increase the unused count, but that is still o.k
340 _runEvictionSlice();
341 }
342
343
344 UBool UnifiedCache::_poll(
345 const CacheKeyBase &key,
346 const SharedObject *&value,
347 UErrorCode &status) const {
348 U_ASSERT(value == NULL);
349 U_ASSERT(status == U_ZERO_ERROR);
350 Mutex lock(&gCacheMutex);
351 const UHashElement *element = uhash_find(fHashtable, &key);
352
353 // If the hash table contains an inProgress placeholder entry for this key,
354 // this means that another thread is currently constructing the value object.
355 // Loop, waiting for that construction to complete.
356 while (element != NULL && _inProgress(element)) {
357 umtx_condWait(&gInProgressValueAddedCond, &gCacheMutex);
358 element = uhash_find(fHashtable, &key);
359 }
360
361 // If the hash table contains an entry for the key,
362 // fetch out the contents and return them.
363 if (element != NULL) {
364 _fetch(element, value, status);
365 return TRUE;
366 }
367
368 // The hash table contained nothing for this key.
369 // Insert an inProgress place holder value.
370 // Our caller will create the final value and update the hash table.
371 _putNew(key, fNoValue, U_ZERO_ERROR, status);
372 return FALSE;
373 }
374
375 void UnifiedCache::_get(
376 const CacheKeyBase &key,
377 const SharedObject *&value,
378 const void *creationContext,
379 UErrorCode &status) const {
380 U_ASSERT(value == NULL);
381 U_ASSERT(status == U_ZERO_ERROR);
382 if (_poll(key, value, status)) {
383 if (value == fNoValue) {
384 SharedObject::clearPtr(value);
385 }
386 return;
387 }
388 if (U_FAILURE(status)) {
389 return;
390 }
391 value = key.createObject(creationContext, status);
392 U_ASSERT(value == NULL || value->hasHardReferences());
393 U_ASSERT(value != NULL || status != U_ZERO_ERROR);
394 if (value == NULL) {
395 SharedObject::copyPtr(fNoValue, value);
396 }
397 _putIfAbsentAndGet(key, value, status);
398 if (value == fNoValue) {
399 SharedObject::clearPtr(value);
400 }
401 }
402
403 void UnifiedCache::_registerMaster(
404 const CacheKeyBase *theKey, const SharedObject *value) const {
405 theKey->fIsMaster = true;
406 value->cachePtr = this;
407 ++fNumValuesTotal;
408 ++fNumValuesInUse;
409 }
410
411 void UnifiedCache::_put(
412 const UHashElement *element,
413 const SharedObject *value,
414 const UErrorCode status) const {
415 U_ASSERT(_inProgress(element));
416 const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
417 const SharedObject *oldValue = (const SharedObject *) element->value.pointer;
418 theKey->fCreationStatus = status;
419 if (value->softRefCount == 0) {
420 _registerMaster(theKey, value);
421 }
422 value->softRefCount++;
423 UHashElement *ptr = const_cast<UHashElement *>(element);
424 ptr->value.pointer = (void *) value;
425 U_ASSERT(oldValue == fNoValue);
426 removeSoftRef(oldValue);
427
428 // Tell waiting threads that we replace in-progress status with
429 // an error.
430 umtx_condBroadcast(&gInProgressValueAddedCond);
431 }
432
433 void UnifiedCache::_fetch(
434 const UHashElement *element,
435 const SharedObject *&value,
436 UErrorCode &status) const {
437 const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
438 status = theKey->fCreationStatus;
439
440 // Since we have the cache lock, calling regular SharedObject add/removeRef
441 // could cause us to deadlock on ourselves since they may need to lock
442 // the cache mutex.
443 removeHardRef(value);
444 value = static_cast<const SharedObject *>(element->value.pointer);
445 addHardRef(value);
446 }
447
448
449 UBool UnifiedCache::_inProgress(const UHashElement* element) const {
450 UErrorCode status = U_ZERO_ERROR;
451 const SharedObject * value = NULL;
452 _fetch(element, value, status);
453 UBool result = _inProgress(value, status);
454 removeHardRef(value);
455 return result;
456 }
457
458 UBool UnifiedCache::_inProgress(
459 const SharedObject* theValue, UErrorCode creationStatus) const {
460 return (theValue == fNoValue && creationStatus == U_ZERO_ERROR);
461 }
462
463 UBool UnifiedCache::_isEvictable(const UHashElement *element) const
464 {
465 const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
466 const SharedObject *theValue =
467 (const SharedObject *) element->value.pointer;
468
469 // Entries that are under construction are never evictable
470 if (_inProgress(theValue, theKey->fCreationStatus)) {
471 return FALSE;
472 }
473
474 // We can evict entries that are either not a master or have just
475 // one reference (The one reference being from the cache itself).
476 return (!theKey->fIsMaster || (theValue->softRefCount == 1 && theValue->noHardReferences()));
477 }
478
479 void UnifiedCache::removeSoftRef(const SharedObject *value) const {
480 U_ASSERT(value->cachePtr == this);
481 U_ASSERT(value->softRefCount > 0);
482 if (--value->softRefCount == 0) {
483 --fNumValuesTotal;
484 if (value->noHardReferences()) {
485 delete value;
486 } else {
487 // This path only happens from flush(all). Which only happens from the
488 // UnifiedCache destructor. Nulling out value.cacheptr changes the behavior
489 // of value.removeRef(), causing the deletion to be done there.
490 value->cachePtr = nullptr;
491 }
492 }
493 }
494
495 int32_t UnifiedCache::removeHardRef(const SharedObject *value) const {
496 int refCount = 0;
497 if (value) {
498 refCount = umtx_atomic_dec(&value->hardRefCount);
499 U_ASSERT(refCount >= 0);
500 if (refCount == 0) {
501 --fNumValuesInUse;
502 }
503 }
504 return refCount;
505 }
506
507 int32_t UnifiedCache::addHardRef(const SharedObject *value) const {
508 int refCount = 0;
509 if (value) {
510 refCount = umtx_atomic_inc(&value->hardRefCount);
511 U_ASSERT(refCount >= 1);
512 if (refCount == 1) {
513 fNumValuesInUse++;
514 }
515 }
516 return refCount;
517 }
518
519 U_NAMESPACE_END