1 // © 2016 and later: Unicode, Inc. and others.
2 // License & terms of use: http://www.unicode.org/copyright.html
4 ******************************************************************************
5 * Copyright (C) 2015, International Business Machines Corporation and
6 * others. All Rights Reserved.
7 ******************************************************************************
9 * File unifiedcache.cpp
10 ******************************************************************************
13 #include "unifiedcache.h"
15 #include <algorithm> // For std::max()
22 static icu::UnifiedCache
*gCache
= NULL
;
23 static std::mutex
*gCacheMutex
= nullptr;
24 static std::condition_variable
*gInProgressValueAddedCond
;
25 static icu::UInitOnce gCacheInitOnce
= U_INITONCE_INITIALIZER
;
27 static const int32_t MAX_EVICT_ITERATIONS
= 10;
28 static const int32_t DEFAULT_MAX_UNUSED
= 1000;
29 static const int32_t DEFAULT_PERCENTAGE_OF_IN_USE
= 100;
33 static UBool U_CALLCONV
unifiedcache_cleanup() {
34 gCacheInitOnce
.reset();
37 gCacheMutex
->~mutex();
38 gCacheMutex
= nullptr;
39 gInProgressValueAddedCond
->~condition_variable();
40 gInProgressValueAddedCond
= nullptr;
48 U_CAPI
int32_t U_EXPORT2
49 ucache_hashKeys(const UHashTok key
) {
50 const CacheKeyBase
*ckey
= (const CacheKeyBase
*) key
.pointer
;
51 return ckey
->hashCode();
54 U_CAPI UBool U_EXPORT2
55 ucache_compareKeys(const UHashTok key1
, const UHashTok key2
) {
56 const CacheKeyBase
*p1
= (const CacheKeyBase
*) key1
.pointer
;
57 const CacheKeyBase
*p2
= (const CacheKeyBase
*) key2
.pointer
;
62 ucache_deleteKey(void *obj
) {
63 CacheKeyBase
*p
= (CacheKeyBase
*) obj
;
67 CacheKeyBase::~CacheKeyBase() {
70 static void U_CALLCONV
cacheInit(UErrorCode
&status
) {
71 U_ASSERT(gCache
== NULL
);
72 ucln_common_registerCleanup(
73 UCLN_COMMON_UNIFIED_CACHE
, unifiedcache_cleanup
);
75 gCacheMutex
= STATIC_NEW(std::mutex
);
76 gInProgressValueAddedCond
= STATIC_NEW(std::condition_variable
);
77 gCache
= new UnifiedCache(status
);
79 status
= U_MEMORY_ALLOCATION_ERROR
;
81 if (U_FAILURE(status
)) {
88 UnifiedCache
*UnifiedCache::getInstance(UErrorCode
&status
) {
89 umtx_initOnce(gCacheInitOnce
, &cacheInit
, status
);
90 if (U_FAILURE(status
)) {
93 U_ASSERT(gCache
!= NULL
);
97 UnifiedCache::UnifiedCache(UErrorCode
&status
) :
99 fEvictPos(UHASH_FIRST
),
102 fMaxUnused(DEFAULT_MAX_UNUSED
),
103 fMaxPercentageOfInUse(DEFAULT_PERCENTAGE_OF_IN_USE
),
104 fAutoEvictedCount(0),
106 if (U_FAILURE(status
)) {
109 fNoValue
= new SharedObject();
110 if (fNoValue
== nullptr) {
111 status
= U_MEMORY_ALLOCATION_ERROR
;
114 fNoValue
->softRefCount
= 1; // Add fake references to prevent fNoValue from being deleted
115 fNoValue
->hardRefCount
= 1; // when other references to it are removed.
116 fNoValue
->cachePtr
= this;
118 fHashtable
= uhash_open(
123 if (U_FAILURE(status
)) {
126 uhash_setKeyDeleter(fHashtable
, &ucache_deleteKey
);
129 void UnifiedCache::setEvictionPolicy(
130 int32_t count
, int32_t percentageOfInUseItems
, UErrorCode
&status
) {
131 if (U_FAILURE(status
)) {
134 if (count
< 0 || percentageOfInUseItems
< 0) {
135 status
= U_ILLEGAL_ARGUMENT_ERROR
;
138 std::lock_guard
<std::mutex
> lock(*gCacheMutex
);
140 fMaxPercentageOfInUse
= percentageOfInUseItems
;
143 int32_t UnifiedCache::unusedCount() const {
144 std::lock_guard
<std::mutex
> lock(*gCacheMutex
);
145 return uhash_count(fHashtable
) - fNumValuesInUse
;
148 int64_t UnifiedCache::autoEvictedCount() const {
149 std::lock_guard
<std::mutex
> lock(*gCacheMutex
);
150 return fAutoEvictedCount
;
153 int32_t UnifiedCache::keyCount() const {
154 std::lock_guard
<std::mutex
> lock(*gCacheMutex
);
155 return uhash_count(fHashtable
);
158 void UnifiedCache::flush() const {
159 std::lock_guard
<std::mutex
> lock(*gCacheMutex
);
161 // Use a loop in case cache items that are flushed held hard references to
162 // other cache items making those additional cache items eligible for
164 while (_flush(FALSE
));
167 void UnifiedCache::handleUnreferencedObject() const {
168 std::lock_guard
<std::mutex
> lock(*gCacheMutex
);
173 #ifdef UNIFIED_CACHE_DEBUG
176 void UnifiedCache::dump() {
177 UErrorCode status
= U_ZERO_ERROR
;
178 const UnifiedCache
*cache
= getInstance(status
);
179 if (U_FAILURE(status
)) {
180 fprintf(stderr
, "Unified Cache: Error fetching cache.\n");
183 cache
->dumpContents();
186 void UnifiedCache::dumpContents() const {
187 std::lock_guard
<std::mutex
> lock(*gCacheMutex
);
191 // Dumps content of cache.
192 // On entry, gCacheMutex must be held.
193 // On exit, cache contents dumped to stderr.
194 void UnifiedCache::_dumpContents() const {
195 int32_t pos
= UHASH_FIRST
;
196 const UHashElement
*element
= uhash_nextElement(fHashtable
, &pos
);
199 for (; element
!= NULL
; element
= uhash_nextElement(fHashtable
, &pos
)) {
200 const SharedObject
*sharedObject
=
201 (const SharedObject
*) element
->value
.pointer
;
202 const CacheKeyBase
*key
=
203 (const CacheKeyBase
*) element
->key
.pointer
;
204 if (sharedObject
->hasHardReferences()) {
208 "Unified Cache: Key '%s', error %d, value %p, total refcount %d, soft refcount %d\n",
209 key
->writeDescription(buffer
, 256),
211 sharedObject
== fNoValue
? NULL
:sharedObject
,
212 sharedObject
->getRefCount(),
213 sharedObject
->getSoftRefCount());
216 fprintf(stderr
, "Unified Cache: %d out of a total of %d still have hard references\n", cnt
, uhash_count(fHashtable
));
220 UnifiedCache::~UnifiedCache() {
221 // Try our best to clean up first.
224 // Now all that should be left in the cache are entries that refer to
225 // each other and entries with hard references from outside the cache.
226 // Nothing we can do about these so proceed to wipe out the cache.
227 std::lock_guard
<std::mutex
> lock(*gCacheMutex
);
230 uhash_close(fHashtable
);
231 fHashtable
= nullptr;
237 UnifiedCache::_nextElement() const {
238 const UHashElement
*element
= uhash_nextElement(fHashtable
, &fEvictPos
);
239 if (element
== NULL
) {
240 fEvictPos
= UHASH_FIRST
;
241 return uhash_nextElement(fHashtable
, &fEvictPos
);
246 UBool
UnifiedCache::_flush(UBool all
) const {
247 UBool result
= FALSE
;
248 int32_t origSize
= uhash_count(fHashtable
);
249 for (int32_t i
= 0; i
< origSize
; ++i
) {
250 const UHashElement
*element
= _nextElement();
251 if (element
== nullptr) {
254 if (all
|| _isEvictable(element
)) {
255 const SharedObject
*sharedObject
=
256 (const SharedObject
*) element
->value
.pointer
;
257 U_ASSERT(sharedObject
->cachePtr
== this);
258 uhash_removeElement(fHashtable
, element
);
259 removeSoftRef(sharedObject
); // Deletes the sharedObject when softRefCount goes to zero.
266 int32_t UnifiedCache::_computeCountOfItemsToEvict() const {
267 int32_t totalItems
= uhash_count(fHashtable
);
268 int32_t evictableItems
= totalItems
- fNumValuesInUse
;
270 int32_t unusedLimitByPercentage
= fNumValuesInUse
* fMaxPercentageOfInUse
/ 100;
271 int32_t unusedLimit
= std::max(unusedLimitByPercentage
, fMaxUnused
);
272 int32_t countOfItemsToEvict
= std::max(0, evictableItems
- unusedLimit
);
273 return countOfItemsToEvict
;
276 void UnifiedCache::_runEvictionSlice() const {
277 int32_t maxItemsToEvict
= _computeCountOfItemsToEvict();
278 if (maxItemsToEvict
<= 0) {
281 for (int32_t i
= 0; i
< MAX_EVICT_ITERATIONS
; ++i
) {
282 const UHashElement
*element
= _nextElement();
283 if (element
== nullptr) {
286 if (_isEvictable(element
)) {
287 const SharedObject
*sharedObject
=
288 (const SharedObject
*) element
->value
.pointer
;
289 uhash_removeElement(fHashtable
, element
);
290 removeSoftRef(sharedObject
); // Deletes sharedObject when SoftRefCount goes to zero.
292 if (--maxItemsToEvict
== 0) {
299 void UnifiedCache::_putNew(
300 const CacheKeyBase
&key
,
301 const SharedObject
*value
,
302 const UErrorCode creationStatus
,
303 UErrorCode
&status
) const {
304 if (U_FAILURE(status
)) {
307 CacheKeyBase
*keyToAdopt
= key
.clone();
308 if (keyToAdopt
== NULL
) {
309 status
= U_MEMORY_ALLOCATION_ERROR
;
312 keyToAdopt
->fCreationStatus
= creationStatus
;
313 if (value
->softRefCount
== 0) {
314 _registerMaster(keyToAdopt
, value
);
316 void *oldValue
= uhash_put(fHashtable
, keyToAdopt
, (void *) value
, &status
);
317 U_ASSERT(oldValue
== nullptr);
319 if (U_SUCCESS(status
)) {
320 value
->softRefCount
++;
324 void UnifiedCache::_putIfAbsentAndGet(
325 const CacheKeyBase
&key
,
326 const SharedObject
*&value
,
327 UErrorCode
&status
) const {
328 std::lock_guard
<std::mutex
> lock(*gCacheMutex
);
329 const UHashElement
*element
= uhash_find(fHashtable
, &key
);
330 if (element
!= NULL
&& !_inProgress(element
)) {
331 _fetch(element
, value
, status
);
334 if (element
== NULL
) {
335 UErrorCode putError
= U_ZERO_ERROR
;
336 // best-effort basis only.
337 _putNew(key
, value
, status
, putError
);
339 _put(element
, value
, status
);
341 // Run an eviction slice. This will run even if we added a master entry
342 // which doesn't increase the unused count, but that is still o.k
347 UBool
UnifiedCache::_poll(
348 const CacheKeyBase
&key
,
349 const SharedObject
*&value
,
350 UErrorCode
&status
) const {
351 U_ASSERT(value
== NULL
);
352 U_ASSERT(status
== U_ZERO_ERROR
);
353 std::unique_lock
<std::mutex
> lock(*gCacheMutex
);
354 const UHashElement
*element
= uhash_find(fHashtable
, &key
);
356 // If the hash table contains an inProgress placeholder entry for this key,
357 // this means that another thread is currently constructing the value object.
358 // Loop, waiting for that construction to complete.
359 while (element
!= NULL
&& _inProgress(element
)) {
360 gInProgressValueAddedCond
->wait(lock
);
361 element
= uhash_find(fHashtable
, &key
);
364 // If the hash table contains an entry for the key,
365 // fetch out the contents and return them.
366 if (element
!= NULL
) {
367 _fetch(element
, value
, status
);
371 // The hash table contained nothing for this key.
372 // Insert an inProgress place holder value.
373 // Our caller will create the final value and update the hash table.
374 _putNew(key
, fNoValue
, U_ZERO_ERROR
, status
);
378 void UnifiedCache::_get(
379 const CacheKeyBase
&key
,
380 const SharedObject
*&value
,
381 const void *creationContext
,
382 UErrorCode
&status
) const {
383 U_ASSERT(value
== NULL
);
384 U_ASSERT(status
== U_ZERO_ERROR
);
385 if (_poll(key
, value
, status
)) {
386 if (value
== fNoValue
) {
387 SharedObject::clearPtr(value
);
391 if (U_FAILURE(status
)) {
394 value
= key
.createObject(creationContext
, status
);
395 U_ASSERT(value
== NULL
|| value
->hasHardReferences());
396 U_ASSERT(value
!= NULL
|| status
!= U_ZERO_ERROR
);
398 SharedObject::copyPtr(fNoValue
, value
);
400 _putIfAbsentAndGet(key
, value
, status
);
401 if (value
== fNoValue
) {
402 SharedObject::clearPtr(value
);
406 void UnifiedCache::_registerMaster(
407 const CacheKeyBase
*theKey
, const SharedObject
*value
) const {
408 theKey
->fIsMaster
= true;
409 value
->cachePtr
= this;
414 void UnifiedCache::_put(
415 const UHashElement
*element
,
416 const SharedObject
*value
,
417 const UErrorCode status
) const {
418 U_ASSERT(_inProgress(element
));
419 const CacheKeyBase
*theKey
= (const CacheKeyBase
*) element
->key
.pointer
;
420 const SharedObject
*oldValue
= (const SharedObject
*) element
->value
.pointer
;
421 theKey
->fCreationStatus
= status
;
422 if (value
->softRefCount
== 0) {
423 _registerMaster(theKey
, value
);
425 value
->softRefCount
++;
426 UHashElement
*ptr
= const_cast<UHashElement
*>(element
);
427 ptr
->value
.pointer
= (void *) value
;
428 U_ASSERT(oldValue
== fNoValue
);
429 removeSoftRef(oldValue
);
431 // Tell waiting threads that we replace in-progress status with
433 gInProgressValueAddedCond
->notify_all();
436 void UnifiedCache::_fetch(
437 const UHashElement
*element
,
438 const SharedObject
*&value
,
439 UErrorCode
&status
) const {
440 const CacheKeyBase
*theKey
= (const CacheKeyBase
*) element
->key
.pointer
;
441 status
= theKey
->fCreationStatus
;
443 // Since we have the cache lock, calling regular SharedObject add/removeRef
444 // could cause us to deadlock on ourselves since they may need to lock
446 removeHardRef(value
);
447 value
= static_cast<const SharedObject
*>(element
->value
.pointer
);
452 UBool
UnifiedCache::_inProgress(const UHashElement
* element
) const {
453 UErrorCode status
= U_ZERO_ERROR
;
454 const SharedObject
* value
= NULL
;
455 _fetch(element
, value
, status
);
456 UBool result
= _inProgress(value
, status
);
457 removeHardRef(value
);
461 UBool
UnifiedCache::_inProgress(
462 const SharedObject
* theValue
, UErrorCode creationStatus
) const {
463 return (theValue
== fNoValue
&& creationStatus
== U_ZERO_ERROR
);
466 UBool
UnifiedCache::_isEvictable(const UHashElement
*element
) const
468 const CacheKeyBase
*theKey
= (const CacheKeyBase
*) element
->key
.pointer
;
469 const SharedObject
*theValue
=
470 (const SharedObject
*) element
->value
.pointer
;
472 // Entries that are under construction are never evictable
473 if (_inProgress(theValue
, theKey
->fCreationStatus
)) {
477 // We can evict entries that are either not a master or have just
478 // one reference (The one reference being from the cache itself).
479 return (!theKey
->fIsMaster
|| (theValue
->softRefCount
== 1 && theValue
->noHardReferences()));
482 void UnifiedCache::removeSoftRef(const SharedObject
*value
) const {
483 U_ASSERT(value
->cachePtr
== this);
484 U_ASSERT(value
->softRefCount
> 0);
485 if (--value
->softRefCount
== 0) {
487 if (value
->noHardReferences()) {
490 // This path only happens from flush(all). Which only happens from the
491 // UnifiedCache destructor. Nulling out value.cacheptr changes the behavior
492 // of value.removeRef(), causing the deletion to be done there.
493 value
->cachePtr
= nullptr;
498 int32_t UnifiedCache::removeHardRef(const SharedObject
*value
) const {
501 refCount
= umtx_atomic_dec(&value
->hardRefCount
);
502 U_ASSERT(refCount
>= 0);
510 int32_t UnifiedCache::addHardRef(const SharedObject
*value
) const {
513 refCount
= umtx_atomic_inc(&value
->hardRefCount
);
514 U_ASSERT(refCount
>= 1);