]> git.saurik.com Git - apple/icu.git/blob - icuSources/common/unifiedcache.cpp
58abcfdd3b4f8eeeafa04868baacfd70e5ba2dff
[apple/icu.git] / icuSources / common / unifiedcache.cpp
1 // © 2016 and later: Unicode, Inc. and others.
2 // License & terms of use: http://www.unicode.org/copyright.html
3 /*
4 ******************************************************************************
5 * Copyright (C) 2015, International Business Machines Corporation and
6 * others. All Rights Reserved.
7 ******************************************************************************
8 *
9 * File unifiedcache.cpp
10 ******************************************************************************
11 */
12
13 #include "unifiedcache.h"
14
15 #include <algorithm> // For std::max()
16 #include <mutex>
17
18 #include "uassert.h"
19 #include "uhash.h"
20 #include "ucln_cmn.h"
21
22 static icu::UnifiedCache *gCache = NULL;
23 static std::mutex &gCacheMutex() {
24 static std::mutex *m = STATIC_NEW(std::mutex);
25 return *m;
26 }
27 static std::condition_variable &gInProgressValueAddedCond() {
28 static std::condition_variable *cv = STATIC_NEW(std::condition_variable);
29 return *cv;
30 }
31 static icu::UInitOnce gCacheInitOnce = U_INITONCE_INITIALIZER;
32
33 static const int32_t MAX_EVICT_ITERATIONS = 10;
34 static const int32_t DEFAULT_MAX_UNUSED = 1000;
35 static const int32_t DEFAULT_PERCENTAGE_OF_IN_USE = 100;
36
37
38 U_CDECL_BEGIN
39 static UBool U_CALLCONV unifiedcache_cleanup() {
40 gCacheInitOnce.reset();
41 if (gCache) {
42 delete gCache;
43 gCache = NULL;
44 }
45 return TRUE;
46 }
47 U_CDECL_END
48
49
50 U_NAMESPACE_BEGIN
51
52 U_CAPI int32_t U_EXPORT2
53 ucache_hashKeys(const UHashTok key) {
54 const CacheKeyBase *ckey = (const CacheKeyBase *) key.pointer;
55 return ckey->hashCode();
56 }
57
58 U_CAPI UBool U_EXPORT2
59 ucache_compareKeys(const UHashTok key1, const UHashTok key2) {
60 const CacheKeyBase *p1 = (const CacheKeyBase *) key1.pointer;
61 const CacheKeyBase *p2 = (const CacheKeyBase *) key2.pointer;
62 return *p1 == *p2;
63 }
64
65 U_CAPI void U_EXPORT2
66 ucache_deleteKey(void *obj) {
67 CacheKeyBase *p = (CacheKeyBase *) obj;
68 delete p;
69 }
70
71 CacheKeyBase::~CacheKeyBase() {
72 }
73
74 static void U_CALLCONV cacheInit(UErrorCode &status) {
75 U_ASSERT(gCache == NULL);
76 ucln_common_registerCleanup(
77 UCLN_COMMON_UNIFIED_CACHE, unifiedcache_cleanup);
78
79 gCache = new UnifiedCache(status);
80 if (gCache == NULL) {
81 status = U_MEMORY_ALLOCATION_ERROR;
82 }
83 if (U_FAILURE(status)) {
84 delete gCache;
85 gCache = NULL;
86 return;
87 }
88 }
89
90 UnifiedCache *UnifiedCache::getInstance(UErrorCode &status) {
91 umtx_initOnce(gCacheInitOnce, &cacheInit, status);
92 if (U_FAILURE(status)) {
93 return NULL;
94 }
95 U_ASSERT(gCache != NULL);
96 return gCache;
97 }
98
99 UnifiedCache::UnifiedCache(UErrorCode &status) :
100 fHashtable(NULL),
101 fEvictPos(UHASH_FIRST),
102 fNumValuesTotal(0),
103 fNumValuesInUse(0),
104 fMaxUnused(DEFAULT_MAX_UNUSED),
105 fMaxPercentageOfInUse(DEFAULT_PERCENTAGE_OF_IN_USE),
106 fAutoEvictedCount(0),
107 fNoValue(nullptr) {
108 if (U_FAILURE(status)) {
109 return;
110 }
111 fNoValue = new SharedObject();
112 if (fNoValue == nullptr) {
113 status = U_MEMORY_ALLOCATION_ERROR;
114 return;
115 }
116 fNoValue->softRefCount = 1; // Add fake references to prevent fNoValue from being deleted
117 fNoValue->hardRefCount = 1; // when other references to it are removed.
118 fNoValue->cachePtr = this;
119
120 fHashtable = uhash_open(
121 &ucache_hashKeys,
122 &ucache_compareKeys,
123 NULL,
124 &status);
125 if (U_FAILURE(status)) {
126 return;
127 }
128 uhash_setKeyDeleter(fHashtable, &ucache_deleteKey);
129 }
130
131 void UnifiedCache::setEvictionPolicy(
132 int32_t count, int32_t percentageOfInUseItems, UErrorCode &status) {
133 if (U_FAILURE(status)) {
134 return;
135 }
136 if (count < 0 || percentageOfInUseItems < 0) {
137 status = U_ILLEGAL_ARGUMENT_ERROR;
138 return;
139 }
140 std::lock_guard<std::mutex> lock(gCacheMutex());
141 fMaxUnused = count;
142 fMaxPercentageOfInUse = percentageOfInUseItems;
143 }
144
145 int32_t UnifiedCache::unusedCount() const {
146 std::lock_guard<std::mutex> lock(gCacheMutex());
147 return uhash_count(fHashtable) - fNumValuesInUse;
148 }
149
150 int64_t UnifiedCache::autoEvictedCount() const {
151 std::lock_guard<std::mutex> lock(gCacheMutex());
152 return fAutoEvictedCount;
153 }
154
155 int32_t UnifiedCache::keyCount() const {
156 std::lock_guard<std::mutex> lock(gCacheMutex());
157 return uhash_count(fHashtable);
158 }
159
160 void UnifiedCache::flush() const {
161 std::lock_guard<std::mutex> lock(gCacheMutex());
162
163 // Use a loop in case cache items that are flushed held hard references to
164 // other cache items making those additional cache items eligible for
165 // flushing.
166 while (_flush(FALSE));
167 }
168
169 void UnifiedCache::handleUnreferencedObject() const {
170 std::lock_guard<std::mutex> lock(gCacheMutex());
171 --fNumValuesInUse;
172 _runEvictionSlice();
173 }
174
175 #ifdef UNIFIED_CACHE_DEBUG
176 #include <stdio.h>
177
178 void UnifiedCache::dump() {
179 UErrorCode status = U_ZERO_ERROR;
180 const UnifiedCache *cache = getInstance(status);
181 if (U_FAILURE(status)) {
182 fprintf(stderr, "Unified Cache: Error fetching cache.\n");
183 return;
184 }
185 cache->dumpContents();
186 }
187
188 void UnifiedCache::dumpContents() const {
189 std::lock_guard<std::mutex> lock(gCacheMutex());
190 _dumpContents();
191 }
192
193 // Dumps content of cache.
194 // On entry, gCacheMutex must be held.
195 // On exit, cache contents dumped to stderr.
196 void UnifiedCache::_dumpContents() const {
197 int32_t pos = UHASH_FIRST;
198 const UHashElement *element = uhash_nextElement(fHashtable, &pos);
199 char buffer[256];
200 int32_t cnt = 0;
201 for (; element != NULL; element = uhash_nextElement(fHashtable, &pos)) {
202 const SharedObject *sharedObject =
203 (const SharedObject *) element->value.pointer;
204 const CacheKeyBase *key =
205 (const CacheKeyBase *) element->key.pointer;
206 if (sharedObject->hasHardReferences()) {
207 ++cnt;
208 fprintf(
209 stderr,
210 "Unified Cache: Key '%s', error %d, value %p, total refcount %d, soft refcount %d\n",
211 key->writeDescription(buffer, 256),
212 key->creationStatus,
213 sharedObject == fNoValue ? NULL :sharedObject,
214 sharedObject->getRefCount(),
215 sharedObject->getSoftRefCount());
216 }
217 }
218 fprintf(stderr, "Unified Cache: %d out of a total of %d still have hard references\n", cnt, uhash_count(fHashtable));
219 }
220 #endif
221
222 UnifiedCache::~UnifiedCache() {
223 // Try our best to clean up first.
224 flush();
225 {
226 // Now all that should be left in the cache are entries that refer to
227 // each other and entries with hard references from outside the cache.
228 // Nothing we can do about these so proceed to wipe out the cache.
229 std::lock_guard<std::mutex> lock(gCacheMutex());
230 _flush(TRUE);
231 }
232 uhash_close(fHashtable);
233 fHashtable = nullptr;
234 delete fNoValue;
235 fNoValue = nullptr;
236 }
237
238 const UHashElement *
239 UnifiedCache::_nextElement() const {
240 const UHashElement *element = uhash_nextElement(fHashtable, &fEvictPos);
241 if (element == NULL) {
242 fEvictPos = UHASH_FIRST;
243 return uhash_nextElement(fHashtable, &fEvictPos);
244 }
245 return element;
246 }
247
248 UBool UnifiedCache::_flush(UBool all) const {
249 UBool result = FALSE;
250 int32_t origSize = uhash_count(fHashtable);
251 for (int32_t i = 0; i < origSize; ++i) {
252 const UHashElement *element = _nextElement();
253 if (element == nullptr) {
254 break;
255 }
256 if (all || _isEvictable(element)) {
257 const SharedObject *sharedObject =
258 (const SharedObject *) element->value.pointer;
259 U_ASSERT(sharedObject->cachePtr == this);
260 uhash_removeElement(fHashtable, element);
261 removeSoftRef(sharedObject); // Deletes the sharedObject when softRefCount goes to zero.
262 result = TRUE;
263 }
264 }
265 return result;
266 }
267
268 int32_t UnifiedCache::_computeCountOfItemsToEvict() const {
269 int32_t totalItems = uhash_count(fHashtable);
270 int32_t evictableItems = totalItems - fNumValuesInUse;
271
272 int32_t unusedLimitByPercentage = fNumValuesInUse * fMaxPercentageOfInUse / 100;
273 int32_t unusedLimit = std::max(unusedLimitByPercentage, fMaxUnused);
274 int32_t countOfItemsToEvict = std::max(0, evictableItems - unusedLimit);
275 return countOfItemsToEvict;
276 }
277
278 void UnifiedCache::_runEvictionSlice() const {
279 int32_t maxItemsToEvict = _computeCountOfItemsToEvict();
280 if (maxItemsToEvict <= 0) {
281 return;
282 }
283 for (int32_t i = 0; i < MAX_EVICT_ITERATIONS; ++i) {
284 const UHashElement *element = _nextElement();
285 if (element == nullptr) {
286 break;
287 }
288 if (_isEvictable(element)) {
289 const SharedObject *sharedObject =
290 (const SharedObject *) element->value.pointer;
291 uhash_removeElement(fHashtable, element);
292 removeSoftRef(sharedObject); // Deletes sharedObject when SoftRefCount goes to zero.
293 ++fAutoEvictedCount;
294 if (--maxItemsToEvict == 0) {
295 break;
296 }
297 }
298 }
299 }
300
301 void UnifiedCache::_putNew(
302 const CacheKeyBase &key,
303 const SharedObject *value,
304 const UErrorCode creationStatus,
305 UErrorCode &status) const {
306 if (U_FAILURE(status)) {
307 return;
308 }
309 CacheKeyBase *keyToAdopt = key.clone();
310 if (keyToAdopt == NULL) {
311 status = U_MEMORY_ALLOCATION_ERROR;
312 return;
313 }
314 keyToAdopt->fCreationStatus = creationStatus;
315 if (value->softRefCount == 0) {
316 _registerMaster(keyToAdopt, value);
317 }
318 void *oldValue = uhash_put(fHashtable, keyToAdopt, (void *) value, &status);
319 U_ASSERT(oldValue == nullptr);
320 (void)oldValue;
321 if (U_SUCCESS(status)) {
322 value->softRefCount++;
323 }
324 }
325
326 void UnifiedCache::_putIfAbsentAndGet(
327 const CacheKeyBase &key,
328 const SharedObject *&value,
329 UErrorCode &status) const {
330 std::lock_guard<std::mutex> lock(gCacheMutex());
331 const UHashElement *element = uhash_find(fHashtable, &key);
332 if (element != NULL && !_inProgress(element)) {
333 _fetch(element, value, status);
334 return;
335 }
336 if (element == NULL) {
337 UErrorCode putError = U_ZERO_ERROR;
338 // best-effort basis only.
339 _putNew(key, value, status, putError);
340 } else {
341 _put(element, value, status);
342 }
343 // Run an eviction slice. This will run even if we added a master entry
344 // which doesn't increase the unused count, but that is still o.k
345 _runEvictionSlice();
346 }
347
348
349 UBool UnifiedCache::_poll(
350 const CacheKeyBase &key,
351 const SharedObject *&value,
352 UErrorCode &status) const {
353 U_ASSERT(value == NULL);
354 U_ASSERT(status == U_ZERO_ERROR);
355 std::unique_lock<std::mutex> lock(gCacheMutex());
356 const UHashElement *element = uhash_find(fHashtable, &key);
357
358 // If the hash table contains an inProgress placeholder entry for this key,
359 // this means that another thread is currently constructing the value object.
360 // Loop, waiting for that construction to complete.
361 while (element != NULL && _inProgress(element)) {
362 gInProgressValueAddedCond().wait(lock);
363 element = uhash_find(fHashtable, &key);
364 }
365
366 // If the hash table contains an entry for the key,
367 // fetch out the contents and return them.
368 if (element != NULL) {
369 _fetch(element, value, status);
370 return TRUE;
371 }
372
373 // The hash table contained nothing for this key.
374 // Insert an inProgress place holder value.
375 // Our caller will create the final value and update the hash table.
376 _putNew(key, fNoValue, U_ZERO_ERROR, status);
377 return FALSE;
378 }
379
380 void UnifiedCache::_get(
381 const CacheKeyBase &key,
382 const SharedObject *&value,
383 const void *creationContext,
384 UErrorCode &status) const {
385 U_ASSERT(value == NULL);
386 U_ASSERT(status == U_ZERO_ERROR);
387 if (_poll(key, value, status)) {
388 if (value == fNoValue) {
389 SharedObject::clearPtr(value);
390 }
391 return;
392 }
393 if (U_FAILURE(status)) {
394 return;
395 }
396 value = key.createObject(creationContext, status);
397 U_ASSERT(value == NULL || value->hasHardReferences());
398 U_ASSERT(value != NULL || status != U_ZERO_ERROR);
399 if (value == NULL) {
400 SharedObject::copyPtr(fNoValue, value);
401 }
402 _putIfAbsentAndGet(key, value, status);
403 if (value == fNoValue) {
404 SharedObject::clearPtr(value);
405 }
406 }
407
408 void UnifiedCache::_registerMaster(
409 const CacheKeyBase *theKey, const SharedObject *value) const {
410 theKey->fIsMaster = true;
411 value->cachePtr = this;
412 ++fNumValuesTotal;
413 ++fNumValuesInUse;
414 }
415
416 void UnifiedCache::_put(
417 const UHashElement *element,
418 const SharedObject *value,
419 const UErrorCode status) const {
420 U_ASSERT(_inProgress(element));
421 const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
422 const SharedObject *oldValue = (const SharedObject *) element->value.pointer;
423 theKey->fCreationStatus = status;
424 if (value->softRefCount == 0) {
425 _registerMaster(theKey, value);
426 }
427 value->softRefCount++;
428 UHashElement *ptr = const_cast<UHashElement *>(element);
429 ptr->value.pointer = (void *) value;
430 U_ASSERT(oldValue == fNoValue);
431 removeSoftRef(oldValue);
432
433 // Tell waiting threads that we replace in-progress status with
434 // an error.
435 gInProgressValueAddedCond().notify_all();
436 }
437
438 void UnifiedCache::_fetch(
439 const UHashElement *element,
440 const SharedObject *&value,
441 UErrorCode &status) const {
442 const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
443 status = theKey->fCreationStatus;
444
445 // Since we have the cache lock, calling regular SharedObject add/removeRef
446 // could cause us to deadlock on ourselves since they may need to lock
447 // the cache mutex.
448 removeHardRef(value);
449 value = static_cast<const SharedObject *>(element->value.pointer);
450 addHardRef(value);
451 }
452
453
454 UBool UnifiedCache::_inProgress(const UHashElement* element) const {
455 UErrorCode status = U_ZERO_ERROR;
456 const SharedObject * value = NULL;
457 _fetch(element, value, status);
458 UBool result = _inProgress(value, status);
459 removeHardRef(value);
460 return result;
461 }
462
463 UBool UnifiedCache::_inProgress(
464 const SharedObject* theValue, UErrorCode creationStatus) const {
465 return (theValue == fNoValue && creationStatus == U_ZERO_ERROR);
466 }
467
468 UBool UnifiedCache::_isEvictable(const UHashElement *element) const
469 {
470 const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
471 const SharedObject *theValue =
472 (const SharedObject *) element->value.pointer;
473
474 // Entries that are under construction are never evictable
475 if (_inProgress(theValue, theKey->fCreationStatus)) {
476 return FALSE;
477 }
478
479 // We can evict entries that are either not a master or have just
480 // one reference (The one reference being from the cache itself).
481 return (!theKey->fIsMaster || (theValue->softRefCount == 1 && theValue->noHardReferences()));
482 }
483
484 void UnifiedCache::removeSoftRef(const SharedObject *value) const {
485 U_ASSERT(value->cachePtr == this);
486 U_ASSERT(value->softRefCount > 0);
487 if (--value->softRefCount == 0) {
488 --fNumValuesTotal;
489 if (value->noHardReferences()) {
490 delete value;
491 } else {
492 // This path only happens from flush(all). Which only happens from the
493 // UnifiedCache destructor. Nulling out value.cacheptr changes the behavior
494 // of value.removeRef(), causing the deletion to be done there.
495 value->cachePtr = nullptr;
496 }
497 }
498 }
499
500 int32_t UnifiedCache::removeHardRef(const SharedObject *value) const {
501 int refCount = 0;
502 if (value) {
503 refCount = umtx_atomic_dec(&value->hardRefCount);
504 U_ASSERT(refCount >= 0);
505 if (refCount == 0) {
506 --fNumValuesInUse;
507 }
508 }
509 return refCount;
510 }
511
512 int32_t UnifiedCache::addHardRef(const SharedObject *value) const {
513 int refCount = 0;
514 if (value) {
515 refCount = umtx_atomic_inc(&value->hardRefCount);
516 U_ASSERT(refCount >= 1);
517 if (refCount == 1) {
518 fNumValuesInUse++;
519 }
520 }
521 return refCount;
522 }
523
524 U_NAMESPACE_END