X-Git-Url: https://git.saurik.com/redis.git/blobdiff_plain/aaada3f962a9f87fb239e55e3d26c1e794d411d5..52c863ddac711b08390216c5360bf703ecf29dc7:/src/dict.c diff --git a/src/dict.c b/src/dict.c index 2d1e752b..24001fdd 100644 --- a/src/dict.c +++ b/src/dict.c @@ -42,6 +42,7 @@ #include #include #include +#include #include "dict.h" #include "zmalloc.h" @@ -49,8 +50,13 @@ /* Using dictEnableResize() / dictDisableResize() we make possible to * enable/disable resizing of the hash table as needed. This is very important * for Redis, as we use copy-on-write and don't want to move too much memory - * around when there is a child performing saving operations. */ + * around when there is a child performing saving operations. + * + * Note that even when dict_can_resize is set to 0, not all resizes are + * prevented: an hash table is still allowed to grow if the ratio between + * the number of elements and the buckets > dict_force_resize_ratio. */ static int dict_can_resize = 1; +static unsigned int dict_force_resize_ratio = 5; /* -------------------------- private prototypes ---------------------------- */ @@ -89,6 +95,15 @@ unsigned int dictGenHashFunction(const unsigned char *buf, int len) { return hash; } +/* And a case insensitive version */ +unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len) { + unsigned int hash = 5381; + + while (len--) + hash = ((hash << 5) + hash) + (tolower(*buf++)); /* hash * 33 + c */ + return hash; +} + /* ----------------------------- API implementation ------------------------- */ /* Reset an hashtable already initialized with ht_init(). @@ -125,7 +140,7 @@ int _dictInit(dict *d, dictType *type, } /* Resize the table to the minimal size that contains all the elements, - * but with the invariant of a USER/BUCKETS ration near to <= 1 */ + * but with the invariant of a USER/BUCKETS ratio near to <= 1 */ int dictResize(dict *d) { int minimal; @@ -188,6 +203,7 @@ int dictRehash(dict *d, int n) { /* Note that rehashidx can't overflow as we are sure there are more * elements because ht[0].used != 0 */ + assert(d->ht[0].size > (unsigned)d->rehashidx); while(d->ht[0].table[d->rehashidx] == NULL) d->rehashidx++; de = d->ht[0].table[d->rehashidx]; /* Move all the keys in this bucket from the old to the new hash HT */ @@ -229,9 +245,9 @@ int dictRehashMilliseconds(dict *d, int ms) { } /* This function performs just a step of rehashing, and only if there are - * not iterators bound to our hash table. When we have iterators in the middle - * of a rehashing we can't mess with the two hash tables otherwise some element - * can be missed or duplicated. + * no safe iterators bound to our hash table. When we have iterators in the + * middle of a rehashing we can't mess with the two hash tables otherwise + * some element can be missed or duplicated. * * This function is called by common lookup or update operations in the * dictionary so that the hash table automatically migrates from H1 to H2 @@ -408,17 +424,26 @@ dictIterator *dictGetIterator(dict *d) iter->d = d; iter->table = 0; iter->index = -1; + iter->safe = 0; iter->entry = NULL; iter->nextEntry = NULL; return iter; } +dictIterator *dictGetSafeIterator(dict *d) { + dictIterator *i = dictGetIterator(d); + + i->safe = 1; + return i; +} + dictEntry *dictNext(dictIterator *iter) { while (1) { if (iter->entry == NULL) { dictht *ht = &iter->d->ht[iter->table]; - if (iter->index == -1 && iter->table == 0) iter->d->iterators++; + if (iter->safe && iter->index == -1 && iter->table == 0) + iter->d->iterators++; iter->index++; if (iter->index >= (signed) ht->size) { if (dictIsRehashing(iter->d) && iter->table == 0) { @@ -445,7 +470,8 @@ dictEntry *dictNext(dictIterator *iter) void dictReleaseIterator(dictIterator *iter) { - if (!(iter->index == -1 && iter->table == 0)) iter->d->iterators--; + if (iter->safe && !(iter->index == -1 && iter->table == 0)) + iter->d->iterators--; zfree(iter); } @@ -493,14 +519,23 @@ dictEntry *dictGetRandomKey(dict *d) /* Expand the hash table if needed */ static int _dictExpandIfNeeded(dict *d) { - /* If the hash table is empty expand it to the intial size, - * if the table is "full" dobule its size. */ + /* Incremental rehashing already in progress. Return. */ if (dictIsRehashing(d)) return DICT_OK; - if (d->ht[0].size == 0) - return dictExpand(d, DICT_HT_INITIAL_SIZE); - if (d->ht[0].used >= d->ht[0].size && dict_can_resize) + + /* If the hash table is empty expand it to the intial size. */ + if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE); + + /* If we reached the 1:1 ratio, and we are allowed to resize the hash + * table (global setting) or we should avoid it but the ratio between + * elements/buckets is over the "safe" threshold, we resize doubling + * the number of buckets. */ + if (d->ht[0].used >= d->ht[0].size && + (dict_can_resize || + d->ht[0].used/d->ht[0].size > dict_force_resize_ratio)) + { return dictExpand(d, ((d->ht[0].size > d->ht[0].used) ? d->ht[0].size : d->ht[0].used)*2); + } return DICT_OK; }