1 // © 2016 and later: Unicode, Inc. and others.
2 // License & terms of use: http://www.unicode.org/copyright.html
4 ******************************************************************************
5 * Copyright (C) 2015, International Business Machines Corporation and
6 * others. All Rights Reserved.
7 ******************************************************************************
9 * File unifiedcache.cpp
10 ******************************************************************************
13 #include "unifiedcache.h"
15 #include <algorithm> // For std::max()
22 static icu::UnifiedCache
*gCache
= NULL
;
23 static std::mutex
&gCacheMutex() {
24 static std::mutex
*m
= STATIC_NEW(std::mutex
);
27 static std::condition_variable
&gInProgressValueAddedCond() {
28 static std::condition_variable
*cv
= STATIC_NEW(std::condition_variable
);
31 static icu::UInitOnce gCacheInitOnce
= U_INITONCE_INITIALIZER
;
33 static const int32_t MAX_EVICT_ITERATIONS
= 10;
34 static const int32_t DEFAULT_MAX_UNUSED
= 1000;
35 static const int32_t DEFAULT_PERCENTAGE_OF_IN_USE
= 100;
39 static UBool U_CALLCONV
unifiedcache_cleanup() {
40 gCacheInitOnce
.reset();
52 U_CAPI
int32_t U_EXPORT2
53 ucache_hashKeys(const UHashTok key
) {
54 const CacheKeyBase
*ckey
= (const CacheKeyBase
*) key
.pointer
;
55 return ckey
->hashCode();
58 U_CAPI UBool U_EXPORT2
59 ucache_compareKeys(const UHashTok key1
, const UHashTok key2
) {
60 const CacheKeyBase
*p1
= (const CacheKeyBase
*) key1
.pointer
;
61 const CacheKeyBase
*p2
= (const CacheKeyBase
*) key2
.pointer
;
66 ucache_deleteKey(void *obj
) {
67 CacheKeyBase
*p
= (CacheKeyBase
*) obj
;
71 CacheKeyBase::~CacheKeyBase() {
74 static void U_CALLCONV
cacheInit(UErrorCode
&status
) {
75 U_ASSERT(gCache
== NULL
);
76 ucln_common_registerCleanup(
77 UCLN_COMMON_UNIFIED_CACHE
, unifiedcache_cleanup
);
79 gCache
= new UnifiedCache(status
);
81 status
= U_MEMORY_ALLOCATION_ERROR
;
83 if (U_FAILURE(status
)) {
90 UnifiedCache
*UnifiedCache::getInstance(UErrorCode
&status
) {
91 umtx_initOnce(gCacheInitOnce
, &cacheInit
, status
);
92 if (U_FAILURE(status
)) {
95 U_ASSERT(gCache
!= NULL
);
99 UnifiedCache::UnifiedCache(UErrorCode
&status
) :
101 fEvictPos(UHASH_FIRST
),
104 fMaxUnused(DEFAULT_MAX_UNUSED
),
105 fMaxPercentageOfInUse(DEFAULT_PERCENTAGE_OF_IN_USE
),
106 fAutoEvictedCount(0),
108 if (U_FAILURE(status
)) {
111 fNoValue
= new SharedObject();
112 if (fNoValue
== nullptr) {
113 status
= U_MEMORY_ALLOCATION_ERROR
;
116 fNoValue
->softRefCount
= 1; // Add fake references to prevent fNoValue from being deleted
117 fNoValue
->hardRefCount
= 1; // when other references to it are removed.
118 fNoValue
->cachePtr
= this;
120 fHashtable
= uhash_open(
125 if (U_FAILURE(status
)) {
128 uhash_setKeyDeleter(fHashtable
, &ucache_deleteKey
);
131 void UnifiedCache::setEvictionPolicy(
132 int32_t count
, int32_t percentageOfInUseItems
, UErrorCode
&status
) {
133 if (U_FAILURE(status
)) {
136 if (count
< 0 || percentageOfInUseItems
< 0) {
137 status
= U_ILLEGAL_ARGUMENT_ERROR
;
140 std::lock_guard
<std::mutex
> lock(gCacheMutex());
142 fMaxPercentageOfInUse
= percentageOfInUseItems
;
145 int32_t UnifiedCache::unusedCount() const {
146 std::lock_guard
<std::mutex
> lock(gCacheMutex());
147 return uhash_count(fHashtable
) - fNumValuesInUse
;
150 int64_t UnifiedCache::autoEvictedCount() const {
151 std::lock_guard
<std::mutex
> lock(gCacheMutex());
152 return fAutoEvictedCount
;
155 int32_t UnifiedCache::keyCount() const {
156 std::lock_guard
<std::mutex
> lock(gCacheMutex());
157 return uhash_count(fHashtable
);
160 void UnifiedCache::flush() const {
161 std::lock_guard
<std::mutex
> lock(gCacheMutex());
163 // Use a loop in case cache items that are flushed held hard references to
164 // other cache items making those additional cache items eligible for
166 while (_flush(FALSE
));
169 void UnifiedCache::handleUnreferencedObject() const {
170 std::lock_guard
<std::mutex
> lock(gCacheMutex());
175 #ifdef UNIFIED_CACHE_DEBUG
178 void UnifiedCache::dump() {
179 UErrorCode status
= U_ZERO_ERROR
;
180 const UnifiedCache
*cache
= getInstance(status
);
181 if (U_FAILURE(status
)) {
182 fprintf(stderr
, "Unified Cache: Error fetching cache.\n");
185 cache
->dumpContents();
188 void UnifiedCache::dumpContents() const {
189 std::lock_guard
<std::mutex
> lock(gCacheMutex());
193 // Dumps content of cache.
194 // On entry, gCacheMutex must be held.
195 // On exit, cache contents dumped to stderr.
196 void UnifiedCache::_dumpContents() const {
197 int32_t pos
= UHASH_FIRST
;
198 const UHashElement
*element
= uhash_nextElement(fHashtable
, &pos
);
201 for (; element
!= NULL
; element
= uhash_nextElement(fHashtable
, &pos
)) {
202 const SharedObject
*sharedObject
=
203 (const SharedObject
*) element
->value
.pointer
;
204 const CacheKeyBase
*key
=
205 (const CacheKeyBase
*) element
->key
.pointer
;
206 if (sharedObject
->hasHardReferences()) {
210 "Unified Cache: Key '%s', error %d, value %p, total refcount %d, soft refcount %d\n",
211 key
->writeDescription(buffer
, 256),
213 sharedObject
== fNoValue
? NULL
:sharedObject
,
214 sharedObject
->getRefCount(),
215 sharedObject
->getSoftRefCount());
218 fprintf(stderr
, "Unified Cache: %d out of a total of %d still have hard references\n", cnt
, uhash_count(fHashtable
));
222 UnifiedCache::~UnifiedCache() {
223 // Try our best to clean up first.
226 // Now all that should be left in the cache are entries that refer to
227 // each other and entries with hard references from outside the cache.
228 // Nothing we can do about these so proceed to wipe out the cache.
229 std::lock_guard
<std::mutex
> lock(gCacheMutex());
232 uhash_close(fHashtable
);
233 fHashtable
= nullptr;
239 UnifiedCache::_nextElement() const {
240 const UHashElement
*element
= uhash_nextElement(fHashtable
, &fEvictPos
);
241 if (element
== NULL
) {
242 fEvictPos
= UHASH_FIRST
;
243 return uhash_nextElement(fHashtable
, &fEvictPos
);
248 UBool
UnifiedCache::_flush(UBool all
) const {
249 UBool result
= FALSE
;
250 int32_t origSize
= uhash_count(fHashtable
);
251 for (int32_t i
= 0; i
< origSize
; ++i
) {
252 const UHashElement
*element
= _nextElement();
253 if (element
== nullptr) {
256 if (all
|| _isEvictable(element
)) {
257 const SharedObject
*sharedObject
=
258 (const SharedObject
*) element
->value
.pointer
;
259 U_ASSERT(sharedObject
->cachePtr
== this);
260 uhash_removeElement(fHashtable
, element
);
261 removeSoftRef(sharedObject
); // Deletes the sharedObject when softRefCount goes to zero.
268 int32_t UnifiedCache::_computeCountOfItemsToEvict() const {
269 int32_t totalItems
= uhash_count(fHashtable
);
270 int32_t evictableItems
= totalItems
- fNumValuesInUse
;
272 int32_t unusedLimitByPercentage
= fNumValuesInUse
* fMaxPercentageOfInUse
/ 100;
273 int32_t unusedLimit
= std::max(unusedLimitByPercentage
, fMaxUnused
);
274 int32_t countOfItemsToEvict
= std::max(0, evictableItems
- unusedLimit
);
275 return countOfItemsToEvict
;
278 void UnifiedCache::_runEvictionSlice() const {
279 int32_t maxItemsToEvict
= _computeCountOfItemsToEvict();
280 if (maxItemsToEvict
<= 0) {
283 for (int32_t i
= 0; i
< MAX_EVICT_ITERATIONS
; ++i
) {
284 const UHashElement
*element
= _nextElement();
285 if (element
== nullptr) {
288 if (_isEvictable(element
)) {
289 const SharedObject
*sharedObject
=
290 (const SharedObject
*) element
->value
.pointer
;
291 uhash_removeElement(fHashtable
, element
);
292 removeSoftRef(sharedObject
); // Deletes sharedObject when SoftRefCount goes to zero.
294 if (--maxItemsToEvict
== 0) {
301 void UnifiedCache::_putNew(
302 const CacheKeyBase
&key
,
303 const SharedObject
*value
,
304 const UErrorCode creationStatus
,
305 UErrorCode
&status
) const {
306 if (U_FAILURE(status
)) {
309 CacheKeyBase
*keyToAdopt
= key
.clone();
310 if (keyToAdopt
== NULL
) {
311 status
= U_MEMORY_ALLOCATION_ERROR
;
314 keyToAdopt
->fCreationStatus
= creationStatus
;
315 if (value
->softRefCount
== 0) {
316 _registerMaster(keyToAdopt
, value
);
318 void *oldValue
= uhash_put(fHashtable
, keyToAdopt
, (void *) value
, &status
);
319 U_ASSERT(oldValue
== nullptr);
321 if (U_SUCCESS(status
)) {
322 value
->softRefCount
++;
326 void UnifiedCache::_putIfAbsentAndGet(
327 const CacheKeyBase
&key
,
328 const SharedObject
*&value
,
329 UErrorCode
&status
) const {
330 std::lock_guard
<std::mutex
> lock(gCacheMutex());
331 const UHashElement
*element
= uhash_find(fHashtable
, &key
);
332 if (element
!= NULL
&& !_inProgress(element
)) {
333 _fetch(element
, value
, status
);
336 if (element
== NULL
) {
337 UErrorCode putError
= U_ZERO_ERROR
;
338 // best-effort basis only.
339 _putNew(key
, value
, status
, putError
);
341 _put(element
, value
, status
);
343 // Run an eviction slice. This will run even if we added a master entry
344 // which doesn't increase the unused count, but that is still o.k
349 UBool
UnifiedCache::_poll(
350 const CacheKeyBase
&key
,
351 const SharedObject
*&value
,
352 UErrorCode
&status
) const {
353 U_ASSERT(value
== NULL
);
354 U_ASSERT(status
== U_ZERO_ERROR
);
355 std::unique_lock
<std::mutex
> lock(gCacheMutex());
356 const UHashElement
*element
= uhash_find(fHashtable
, &key
);
358 // If the hash table contains an inProgress placeholder entry for this key,
359 // this means that another thread is currently constructing the value object.
360 // Loop, waiting for that construction to complete.
361 while (element
!= NULL
&& _inProgress(element
)) {
362 gInProgressValueAddedCond().wait(lock
);
363 element
= uhash_find(fHashtable
, &key
);
366 // If the hash table contains an entry for the key,
367 // fetch out the contents and return them.
368 if (element
!= NULL
) {
369 _fetch(element
, value
, status
);
373 // The hash table contained nothing for this key.
374 // Insert an inProgress place holder value.
375 // Our caller will create the final value and update the hash table.
376 _putNew(key
, fNoValue
, U_ZERO_ERROR
, status
);
380 void UnifiedCache::_get(
381 const CacheKeyBase
&key
,
382 const SharedObject
*&value
,
383 const void *creationContext
,
384 UErrorCode
&status
) const {
385 U_ASSERT(value
== NULL
);
386 U_ASSERT(status
== U_ZERO_ERROR
);
387 if (_poll(key
, value
, status
)) {
388 if (value
== fNoValue
) {
389 SharedObject::clearPtr(value
);
393 if (U_FAILURE(status
)) {
396 value
= key
.createObject(creationContext
, status
);
397 U_ASSERT(value
== NULL
|| value
->hasHardReferences());
398 U_ASSERT(value
!= NULL
|| status
!= U_ZERO_ERROR
);
400 SharedObject::copyPtr(fNoValue
, value
);
402 _putIfAbsentAndGet(key
, value
, status
);
403 if (value
== fNoValue
) {
404 SharedObject::clearPtr(value
);
408 void UnifiedCache::_registerMaster(
409 const CacheKeyBase
*theKey
, const SharedObject
*value
) const {
410 theKey
->fIsMaster
= true;
411 value
->cachePtr
= this;
416 void UnifiedCache::_put(
417 const UHashElement
*element
,
418 const SharedObject
*value
,
419 const UErrorCode status
) const {
420 U_ASSERT(_inProgress(element
));
421 const CacheKeyBase
*theKey
= (const CacheKeyBase
*) element
->key
.pointer
;
422 const SharedObject
*oldValue
= (const SharedObject
*) element
->value
.pointer
;
423 theKey
->fCreationStatus
= status
;
424 if (value
->softRefCount
== 0) {
425 _registerMaster(theKey
, value
);
427 value
->softRefCount
++;
428 UHashElement
*ptr
= const_cast<UHashElement
*>(element
);
429 ptr
->value
.pointer
= (void *) value
;
430 U_ASSERT(oldValue
== fNoValue
);
431 removeSoftRef(oldValue
);
433 // Tell waiting threads that we replace in-progress status with
435 gInProgressValueAddedCond().notify_all();
438 void UnifiedCache::_fetch(
439 const UHashElement
*element
,
440 const SharedObject
*&value
,
441 UErrorCode
&status
) const {
442 const CacheKeyBase
*theKey
= (const CacheKeyBase
*) element
->key
.pointer
;
443 status
= theKey
->fCreationStatus
;
445 // Since we have the cache lock, calling regular SharedObject add/removeRef
446 // could cause us to deadlock on ourselves since they may need to lock
448 removeHardRef(value
);
449 value
= static_cast<const SharedObject
*>(element
->value
.pointer
);
454 UBool
UnifiedCache::_inProgress(const UHashElement
* element
) const {
455 UErrorCode status
= U_ZERO_ERROR
;
456 const SharedObject
* value
= NULL
;
457 _fetch(element
, value
, status
);
458 UBool result
= _inProgress(value
, status
);
459 removeHardRef(value
);
463 UBool
UnifiedCache::_inProgress(
464 const SharedObject
* theValue
, UErrorCode creationStatus
) const {
465 return (theValue
== fNoValue
&& creationStatus
== U_ZERO_ERROR
);
468 UBool
UnifiedCache::_isEvictable(const UHashElement
*element
) const
470 const CacheKeyBase
*theKey
= (const CacheKeyBase
*) element
->key
.pointer
;
471 const SharedObject
*theValue
=
472 (const SharedObject
*) element
->value
.pointer
;
474 // Entries that are under construction are never evictable
475 if (_inProgress(theValue
, theKey
->fCreationStatus
)) {
479 // We can evict entries that are either not a master or have just
480 // one reference (The one reference being from the cache itself).
481 return (!theKey
->fIsMaster
|| (theValue
->softRefCount
== 1 && theValue
->noHardReferences()));
484 void UnifiedCache::removeSoftRef(const SharedObject
*value
) const {
485 U_ASSERT(value
->cachePtr
== this);
486 U_ASSERT(value
->softRefCount
> 0);
487 if (--value
->softRefCount
== 0) {
489 if (value
->noHardReferences()) {
492 // This path only happens from flush(all). Which only happens from the
493 // UnifiedCache destructor. Nulling out value.cacheptr changes the behavior
494 // of value.removeRef(), causing the deletion to be done there.
495 value
->cachePtr
= nullptr;
500 int32_t UnifiedCache::removeHardRef(const SharedObject
*value
) const {
503 refCount
= umtx_atomic_dec(&value
->hardRefCount
);
504 U_ASSERT(refCount
>= 0);
512 int32_t UnifiedCache::addHardRef(const SharedObject
*value
) const {
515 refCount
= umtx_atomic_inc(&value
->hardRefCount
);
516 U_ASSERT(refCount
>= 1);