X-Git-Url: https://git.saurik.com/redis.git/blobdiff_plain/b4b5144694a4134544221a5587b81a98aec608be..9c21b72bb9b7844b22b60f63af37aa30dd78f898:/src/db.c diff --git a/src/db.c b/src/db.c index 5bab42af..5f07e2b6 100644 --- a/src/db.c +++ b/src/db.c @@ -1,105 +1,41 @@ #include "redis.h" #include +#include + +void SlotToKeyAdd(robj *key); +void SlotToKeyDel(robj *key); /*----------------------------------------------------------------------------- * C-level DB API *----------------------------------------------------------------------------*/ -/* Important notes on lookup and disk store. - * - * When disk store is enabled on lookup we can have different cases. - * - * a) The key is in memory: - * - If the key is not in IO_SAVEINPROG state we can access it. - * As if it's just IO_SAVE this means we have the key in the IO queue - * but can't be accessed by the IO thread (it requires to be - * translated into an IO Job by the cache cron function.) - * - If the key is in IO_SAVEINPROG we can't touch the key and have - * to blocking wait completion of operations. - * b) The key is not in memory: - * - If it's marked as non existing on disk as well (negative cache) - * we don't need to perform the disk access. - * - if the key MAY EXIST, but is not in memory, and it is marked as IO_SAVE - * then the key can only be a deleted one. As IO_SAVE keys are never - * evicted (dirty state), so the only possibility is that key was deleted. - * - if the key MAY EXIST we need to blocking load it. - * We check that the key is not in IO_SAVEINPROG state before accessing - * the disk object. If it is in this state, we wait. - */ - -void lookupWaitBusyKey(redisDb *db, robj *key) { - /* FIXME: wait just for this key, not everything */ - waitEmptyIOJobsQueue(); - processAllPendingIOJobs(); - redisAssert((cacheScheduleIOGetFlags(db,key) & REDIS_IO_SAVEINPROG) == 0); -} - robj *lookupKey(redisDb *db, robj *key) { dictEntry *de = dictFind(db->dict,key->ptr); if (de) { - robj *val = dictGetEntryVal(de); + robj *val = dictGetVal(de); /* Update the access time for the aging algorithm. * Don't do it if we have a saving child, as this will trigger * a copy on write madness. */ - if (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1) + if (server.rdb_child_pid == -1 && server.aof_child_pid == -1) val->lru = server.lruclock; - - if (server.ds_enabled && - cacheScheduleIOGetFlags(db,key) & REDIS_IO_SAVEINPROG) - { - /* Need to wait for the key to get unbusy */ - redisLog(REDIS_DEBUG,"Lookup found a key in SAVEINPROG state. Waiting. (Key was in the cache)"); - lookupWaitBusyKey(db,key); - } - server.stat_keyspace_hits++; return val; } else { - time_t expire; - robj *val; - - /* Key not found in the in memory hash table, but if disk store is - * enabled we may have this key on disk. If so load it in memory - * in a blocking way. */ - if (server.ds_enabled && cacheKeyMayExist(db,key)) { - long flags = cacheScheduleIOGetFlags(db,key); - - /* They key is not in cache, but it has a SAVE op in queue? - * The only possibility is that the key was deleted, since - * dirty keys are not evicted. */ - if (flags & REDIS_IO_SAVE) { - server.stat_keyspace_misses++; - return NULL; - } - - /* At this point we need to blocking load the key in memory. - * The first thing we do is waiting here if the key is busy. */ - if (flags & REDIS_IO_SAVEINPROG) { - redisLog(REDIS_DEBUG,"Lookup found a key in SAVEINPROG state. Waiting (while force loading)."); - lookupWaitBusyKey(db,key); - } - - redisLog(REDIS_DEBUG,"Force loading key %s via lookup", key->ptr); - val = dsGet(db,key,&expire); - if (val) { - int retval = dbAdd(db,key,val); - redisAssert(retval == REDIS_OK); - if (expire != -1) setExpire(db,key,expire); - server.stat_keyspace_hits++; - return val; - } else { - cacheSetKeyDoesNotExist(db,key); - } - } - server.stat_keyspace_misses++; return NULL; } } robj *lookupKeyRead(redisDb *db, robj *key) { + robj *val; + expireIfNeeded(db,key); - return lookupKey(db,key); + val = lookupKey(db,key); + if (val == NULL) + server.stat_keyspace_misses++; + else + server.stat_keyspace_hits++; + return val; } robj *lookupKeyWrite(redisDb *db, robj *key) { @@ -119,40 +55,45 @@ robj *lookupKeyWriteOrReply(redisClient *c, robj *key, robj *reply) { return o; } -/* Add the key to the DB. If the key already exists REDIS_ERR is returned, - * otherwise REDIS_OK is returned, and the caller should increment the - * refcount of 'val'. */ -int dbAdd(redisDb *db, robj *key, robj *val) { - /* Perform a lookup before adding the key, as we need to copy the - * key value. */ - if (dictFind(db->dict, key->ptr) != NULL) { - return REDIS_ERR; - } else { - sds copy = sdsdup(key->ptr); - dictAdd(db->dict, copy, val); - if (server.ds_enabled) cacheSetKeyMayExist(db,key); - return REDIS_OK; - } +/* Add the key to the DB. It's up to the caller to increment the reference + * counte of the value if needed. + * + * The program is aborted if the key already exists. */ +void dbAdd(redisDb *db, robj *key, robj *val) { + sds copy = sdsdup(key->ptr); + int retval = dictAdd(db->dict, copy, val); + + redisAssertWithInfo(NULL,key,retval == REDIS_OK); + if (server.cluster_enabled) SlotToKeyAdd(key); + } + +/* Overwrite an existing key with a new value. Incrementing the reference + * count of the new value is up to the caller. + * This function does not modify the expire time of the existing key. + * + * The program is aborted if the key was not already present. */ +void dbOverwrite(redisDb *db, robj *key, robj *val) { + struct dictEntry *de = dictFind(db->dict,key->ptr); + + redisAssertWithInfo(NULL,key,de != NULL); + dictReplace(db->dict, key->ptr, val); } -/* If the key does not exist, this is just like dbAdd(). Otherwise - * the value associated to the key is replaced with the new one. +/* High level Set operation. This function can be used in order to set + * a key, whatever it was existing or not, to a new object. * - * On update (key already existed) 0 is returned. Otherwise 1. */ -int dbReplace(redisDb *db, robj *key, robj *val) { - robj *oldval; - int retval; - - if ((oldval = dictFetchValue(db->dict,key->ptr)) == NULL) { - sds copy = sdsdup(key->ptr); - dictAdd(db->dict, copy, val); - retval = 1; + * 1) The ref count of the value object is incremented. + * 2) clients WATCHing for the destination key notified. + * 3) The expire time of the key is reset (the key is made persistent). */ +void setKey(redisDb *db, robj *key, robj *val) { + if (lookupKeyWrite(db,key) == NULL) { + dbAdd(db,key,val); } else { - dictReplace(db->dict, key->ptr, val); - retval = 0; + dbOverwrite(db,key,val); } - if (server.ds_enabled) cacheSetKeyMayExist(db,key); - return retval; + incrRefCount(val); + removeExpire(db,key); + signalModifiedKey(db,key); } int dbExists(redisDb *db, robj *key) { @@ -173,7 +114,7 @@ robj *dbRandomKey(redisDb *db) { de = dictGetRandomKey(db->dict); if (de == NULL) return NULL; - key = dictGetEntryKey(de); + key = dictGetKey(de); keyobj = createStringObject(key,sdslen(key)); if (dictFind(db->expires,key)) { if (expireIfNeeded(db,keyobj)) { @@ -187,22 +128,17 @@ robj *dbRandomKey(redisDb *db) { /* Delete a key, value, and associated expiration entry if any, from the DB */ int dbDelete(redisDb *db, robj *key) { - /* If diskstore is enabled make sure to awake waiting clients for this key - * as it is not really useful to wait for a key already deleted to be - * loaded from disk. */ - if (server.ds_enabled) { - handleClientsBlockedOnSwappedKey(db,key); - cacheSetKeyDoesNotExist(db,key); - } - /* Deleting an entry from the expires dict will not free the sds of * the key, because it is shared with the main dictionary. */ if (dictSize(db->expires) > 0) dictDelete(db->expires,key->ptr); - return dictDelete(db->dict,key->ptr) == DICT_OK; + if (dictDelete(db->dict,key->ptr) == DICT_OK) { + if (server.cluster_enabled) SlotToKeyDel(key); + return 1; + } else { + return 0; + } } -/* Empty the whole database. - * If diskstore is enabled this function will just flush the in-memory cache. */ long long emptyDb() { int j; long long removed = 0; @@ -211,7 +147,6 @@ long long emptyDb() { removed += dictSize(server.db[j].dict); dictEmpty(server.db[j].dict); dictEmpty(server.db[j].expires); - if (server.ds_enabled) dictEmpty(server.db[j].io_negcache); } return removed; } @@ -234,8 +169,6 @@ int selectDb(redisClient *c, int id) { void signalModifiedKey(redisDb *db, robj *key) { touchWatchedKey(db,key); - if (server.ds_enabled) - cacheScheduleIO(db,key,REDIS_IO_SAVE); } void signalFlushedDb(int dbid) { @@ -251,7 +184,6 @@ void flushdbCommand(redisClient *c) { signalFlushedDb(c->db->id); dictEmpty(c->db->dict); dictEmpty(c->db->expires); - if (server.ds_enabled) dsFlushDb(c->db->id); addReply(c,shared.ok); } @@ -259,14 +191,17 @@ void flushallCommand(redisClient *c) { signalFlushedDb(-1); server.dirty += emptyDb(); addReply(c,shared.ok); - if (server.bgsavechildpid != -1) { - kill(server.bgsavechildpid,SIGKILL); - rdbRemoveTempFile(server.bgsavechildpid); + if (server.rdb_child_pid != -1) { + kill(server.rdb_child_pid,SIGKILL); + rdbRemoveTempFile(server.rdb_child_pid); + } + if (server.saveparamslen > 0) { + /* Normally rdbSave() will reset dirty, but we don't want this here + * as otherwise FLUSHALL will not be replicated nor put into the AOF. */ + int saved_dirty = server.dirty; + rdbSave(server.rdb_filename); + server.dirty = saved_dirty; } - if (server.ds_enabled) - dsFlushDb(-1); - else - rdbSave(server.dbfilename); server.dirty++; } @@ -274,22 +209,10 @@ void delCommand(redisClient *c) { int deleted = 0, j; for (j = 1; j < c->argc; j++) { - if (server.ds_enabled) { - lookupKeyRead(c->db,c->argv[j]); - /* FIXME: this can be optimized a lot, no real need to load - * a possibly huge value. */ - } if (dbDelete(c->db,c->argv[j])) { signalModifiedKey(c->db,c->argv[j]); server.dirty++; deleted++; - } else if (server.ds_enabled) { - if (cacheKeyMayExist(c->db,c->argv[j]) && - dsExists(c->db,c->argv[j])) - { - cacheScheduleIO(c->db,c->argv[j],REDIS_IO_SAVE); - deleted = 1; - } } } addReplyLongLong(c,deleted); @@ -305,8 +228,16 @@ void existsCommand(redisClient *c) { } void selectCommand(redisClient *c) { - int id = atoi(c->argv[1]->ptr); + long id; + if (getLongFromObjectOrReply(c, c->argv[1], &id, + "invalid DB index") != REDIS_OK) + return; + + if (server.cluster_enabled && id != 0) { + addReplyError(c,"SELECT is not allowed in cluster mode"); + return; + } if (selectDb(c,id) == REDIS_ERR) { addReplyError(c,"invalid DB index"); } else { @@ -334,10 +265,10 @@ void keysCommand(redisClient *c) { unsigned long numkeys = 0; void *replylen = addDeferredMultiBulkLength(c); - di = dictGetIterator(c->db->dict); + di = dictGetSafeIterator(c->db->dict); allkeys = (pattern[0] == '*' && pattern[1] == '\0'); while((de = dictNext(di)) != NULL) { - sds key = dictGetEntryKey(de); + sds key = dictGetKey(de); robj *keyobj; if (allkeys || stringmatchlen(pattern,plen,key,sdslen(key),0)) { @@ -382,13 +313,28 @@ void typeCommand(redisClient *c) { } void shutdownCommand(redisClient *c) { - if (prepareForShutdown() == REDIS_OK) - exit(0); + int flags = 0; + + if (c->argc > 2) { + addReply(c,shared.syntaxerr); + return; + } else if (c->argc == 2) { + if (!strcasecmp(c->argv[1]->ptr,"nosave")) { + flags |= REDIS_SHUTDOWN_NOSAVE; + } else if (!strcasecmp(c->argv[1]->ptr,"save")) { + flags |= REDIS_SHUTDOWN_SAVE; + } else { + addReply(c,shared.syntaxerr); + return; + } + } + if (prepareForShutdown(flags) == REDIS_OK) exit(0); addReplyError(c,"Errors trying to SHUTDOWN. Check logs."); } void renameGenericCommand(redisClient *c, int nx) { robj *o; + long long expire; /* To use the same key as src and dst is probably an error */ if (sdscmp(c->argv[1]->ptr,c->argv[2]->ptr) == 0) { @@ -400,14 +346,18 @@ void renameGenericCommand(redisClient *c, int nx) { return; incrRefCount(o); - if (dbAdd(c->db,c->argv[2],o) == REDIS_ERR) { + expire = getExpire(c->db,c->argv[1]); + if (lookupKeyWrite(c->db,c->argv[2]) != NULL) { if (nx) { decrRefCount(o); addReply(c,shared.czero); return; } - dbReplace(c->db,c->argv[2],o); + /* Overwrite: delete the old key before creating the new one with the same name. */ + dbDelete(c->db,c->argv[2]); } + dbAdd(c->db,c->argv[2],o); + if (expire != -1) setExpire(c->db,c->argv[2],expire); dbDelete(c->db,c->argv[1]); signalModifiedKey(c->db,c->argv[1]); signalModifiedKey(c->db,c->argv[2]); @@ -428,6 +378,11 @@ void moveCommand(redisClient *c) { redisDb *src, *dst; int srcid; + if (server.cluster_enabled) { + addReplyError(c,"MOVE is not allowed in cluster mode"); + return; + } + /* Obtain source and target DB pointers */ src = c->db; srcid = c->db->id; @@ -452,11 +407,12 @@ void moveCommand(redisClient *c) { return; } - /* Try to add the element to the target DB */ - if (dbAdd(dst,c->argv[1],o) == REDIS_ERR) { + /* Return zero if the key already exists in the target DB */ + if (lookupKeyWrite(dst,c->argv[1]) != NULL) { addReply(c,shared.czero); return; } + dbAdd(dst,c->argv[1],o); incrRefCount(o); /* OK! key moved, free the entry in the source DB */ @@ -472,22 +428,23 @@ void moveCommand(redisClient *c) { int removeExpire(redisDb *db, robj *key) { /* An expire may only be removed if there is a corresponding entry in the * main dict. Otherwise, the key will never be freed. */ - redisAssert(dictFind(db->dict,key->ptr) != NULL); + redisAssertWithInfo(NULL,key,dictFind(db->dict,key->ptr) != NULL); return dictDelete(db->expires,key->ptr) == DICT_OK; } -void setExpire(redisDb *db, robj *key, time_t when) { - dictEntry *de; +void setExpire(redisDb *db, robj *key, long long when) { + dictEntry *kde, *de; /* Reuse the sds from the main dict in the expire dict */ - de = dictFind(db->dict,key->ptr); - redisAssert(de != NULL); - dictReplace(db->expires,dictGetEntryKey(de),(void*)when); + kde = dictFind(db->dict,key->ptr); + redisAssertWithInfo(NULL,key,kde != NULL); + de = dictReplaceRaw(db->expires,dictGetKey(kde)); + dictSetSignedIntegerVal(de,when); } /* Return the expire time of the specified key, or -1 if no expire * is associated with this key (i.e. the key is non volatile) */ -time_t getExpire(redisDb *db, robj *key) { +long long getExpire(redisDb *db, robj *key) { dictEntry *de; /* No expire? return ASAP */ @@ -496,8 +453,8 @@ time_t getExpire(redisDb *db, robj *key) { /* The entry was found in the expire dict, this means it should also * be present in the main dict (safety check). */ - redisAssert(dictFind(db->dict,key->ptr) != NULL); - return (time_t) dictGetEntryVal(de); + redisAssertWithInfo(NULL,key,dictFind(db->dict,key->ptr) != NULL); + return dictGetSignedIntegerVal(de); } /* Propagate expires into slaves and the AOF file. @@ -511,11 +468,12 @@ time_t getExpire(redisDb *db, robj *key) { void propagateExpire(redisDb *db, robj *key) { robj *argv[2]; - argv[0] = createStringObject("DEL",3); + argv[0] = shared.del; argv[1] = key; - incrRefCount(key); + incrRefCount(argv[0]); + incrRefCount(argv[1]); - if (server.appendonly) + if (server.aof_state != REDIS_AOF_OFF) feedAppendOnlyFile(server.delCommand,db->id,argv,2); if (listLength(server.slaves)) replicationFeedSlaves(server.slaves,db->id,argv,2); @@ -525,10 +483,13 @@ void propagateExpire(redisDb *db, robj *key) { } int expireIfNeeded(redisDb *db, robj *key) { - time_t when = getExpire(db,key); + long long when = getExpire(db,key); if (when < 0) return 0; /* No expire for this key */ + /* Don't expire anything while loading. It will be done later. */ + if (server.loading) return 0; + /* If we are running in the context of a slave, return ASAP: * the slave key expiration is controlled by the master that will * send us synthesized DEL operations for expired keys. @@ -537,11 +498,11 @@ int expireIfNeeded(redisDb *db, robj *key) { * that is, 0 if we think the key should be still valid, 1 if * we think the key is expired at this time. */ if (server.masterhost != NULL) { - return time(NULL) > when; + return mstime() > when; } /* Return when this key has not expired */ - if (time(NULL) <= when) return 0; + if (mstime() <= when) return 0; /* Delete the key */ server.stat_expiredkeys++; @@ -553,26 +514,49 @@ int expireIfNeeded(redisDb *db, robj *key) { * Expires Commands *----------------------------------------------------------------------------*/ -void expireGenericCommand(redisClient *c, robj *key, robj *param, long offset) { +/* This is the generic command implementation for EXPIRE, PEXPIRE, EXPIREAT + * and PEXPIREAT. Because the commad second argument may be relative or absolute + * the "basetime" argument is used to signal what the base time is (either 0 + * for *AT variants of the command, or the current time for relative expires). + * + * unit is either UNIT_SECONDS or UNIT_MILLISECONDS, and is only used for + * the argv[2] parameter. The basetime is always specified in milliesconds. */ +void expireGenericCommand(redisClient *c, long long basetime, int unit) { dictEntry *de; - long seconds; + robj *key = c->argv[1], *param = c->argv[2]; + long long when; /* unix time in milliseconds when the key will expire. */ - if (getLongFromObjectOrReply(c, param, &seconds, NULL) != REDIS_OK) return; + if (getLongLongFromObjectOrReply(c, param, &when, NULL) != REDIS_OK) + return; - seconds -= offset; + if (unit == UNIT_SECONDS) when *= 1000; + when += basetime; de = dictFind(c->db->dict,key->ptr); if (de == NULL) { addReply(c,shared.czero); return; } - if (seconds <= 0) { - if (dbDelete(c->db,key)) server.dirty++; - addReply(c, shared.cone); + /* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past + * should never be executed as a DEL when load the AOF or in the context + * of a slave instance. + * + * Instead we take the other branch of the IF statement setting an expire + * (possibly in the past) and wait for an explicit DEL from the master. */ + if (when <= mstime() && !server.loading && !server.masterhost) { + robj *aux; + + redisAssertWithInfo(c,key,dbDelete(c->db,key)); + server.dirty++; + + /* Replicate/AOF this as an explicit DEL. */ + aux = createStringObject("DEL",3); + rewriteClientCommandVector(c,2,aux,key); + decrRefCount(aux); signalModifiedKey(c->db,key); + addReply(c, shared.cone); return; } else { - time_t when = time(NULL)+seconds; setExpire(c->db,key,when); addReply(c,shared.cone); signalModifiedKey(c->db,key); @@ -582,23 +566,42 @@ void expireGenericCommand(redisClient *c, robj *key, robj *param, long offset) { } void expireCommand(redisClient *c) { - expireGenericCommand(c,c->argv[1],c->argv[2],0); + expireGenericCommand(c,mstime(),UNIT_SECONDS); } void expireatCommand(redisClient *c) { - expireGenericCommand(c,c->argv[1],c->argv[2],time(NULL)); + expireGenericCommand(c,0,UNIT_SECONDS); } -void ttlCommand(redisClient *c) { - time_t expire, ttl = -1; +void pexpireCommand(redisClient *c) { + expireGenericCommand(c,mstime(),UNIT_MILLISECONDS); +} + +void pexpireatCommand(redisClient *c) { + expireGenericCommand(c,0,UNIT_MILLISECONDS); +} + +void ttlGenericCommand(redisClient *c, int output_ms) { + long long expire, ttl = -1; - if (server.ds_enabled) lookupKeyRead(c->db,c->argv[1]); expire = getExpire(c->db,c->argv[1]); if (expire != -1) { - ttl = (expire-time(NULL)); + ttl = expire-mstime(); if (ttl < 0) ttl = -1; } - addReplyLongLong(c,(long long)ttl); + if (ttl == -1) { + addReplyLongLong(c,-1); + } else { + addReplyLongLong(c,output_ms ? ttl : ((ttl+500)/1000)); + } +} + +void ttlCommand(redisClient *c) { + ttlGenericCommand(c, 0); +} + +void pttlCommand(redisClient *c) { + ttlGenericCommand(c, 1); } void persistCommand(redisClient *c) { @@ -666,7 +669,7 @@ int *renameGetKeys(struct redisCommand *cmd,robj **argv, int argc, int *numkeys, int *keys = zmalloc(sizeof(int)); *numkeys = 1; keys[0] = 1; - return NULL; + return keys; } else { return getKeysUsingCommandTable(cmd,argv,argc,numkeys); } @@ -684,8 +687,40 @@ int *zunionInterGetKeys(struct redisCommand *cmd,robj **argv, int argc, int *num *numkeys = 0; return NULL; } - keys = zmalloc(num); + keys = zmalloc(sizeof(int)*num); for (i = 0; i < num; i++) keys[i] = 3+i; *numkeys = num; return keys; } + +/* Slot to Key API. This is used by Redis Cluster in order to obtain in + * a fast way a key that belongs to a specified hash slot. This is useful + * while rehashing the cluster. */ +void SlotToKeyAdd(robj *key) { + unsigned int hashslot = keyHashSlot(key->ptr,sdslen(key->ptr)); + + zslInsert(server.cluster.slots_to_keys,hashslot,key); + incrRefCount(key); +} + +void SlotToKeyDel(robj *key) { + unsigned int hashslot = keyHashSlot(key->ptr,sdslen(key->ptr)); + + zslDelete(server.cluster.slots_to_keys,hashslot,key); +} + +unsigned int GetKeysInSlot(unsigned int hashslot, robj **keys, unsigned int count) { + zskiplistNode *n; + zrangespec range; + int j = 0; + + range.min = range.max = hashslot; + range.minex = range.maxex = 0; + + n = zslFirstInRange(server.cluster.slots_to_keys, range); + while(n && n->score == hashslot && count--) { + keys[j++] = n->obj; + n = n->level[0].forward; + } + return j; +}