X-Git-Url: https://git.saurik.com/redis.git/blobdiff_plain/f081eaf1c094535458d93473aabed005f99317ce..a3309139992de03f3e68ec648db188c1c43057a0:/src/dscache.c diff --git a/src/dscache.c b/src/dscache.c index 6f180b92..46300e63 100644 --- a/src/dscache.c +++ b/src/dscache.c @@ -19,71 +19,70 @@ /* TODO: * - * - The WATCH helper will be used to signal the cache system - * we need to flush a given key/dbid into disk, adding this key/dbid - * pair into a server.ds_cache_dirty linked list AND hash table (so that we - * don't add the same thing multiple times). - * - * - cron() checks if there are elements on this list. When there are things - * to flush, we create an IO Job for the I/O thread. - * NOTE: We disalbe object sharing when server.ds_enabled == 1 so objects - * that are referenced an IO job for flushing on disk are marked as - * o->storage == REDIS_DS_SAVING. - * - * - This is what we do on key lookup: - * 1) The key already exists in memory. object->storage == REDIS_DS_MEMORY - * or it is object->storage == REDIS_DS_DIRTY: - * We don't do nothing special, lookup, return value object pointer. - * 2) The key is in memory but object->storage == REDIS_DS_SAVING. - * When this happens we block waiting for the I/O thread to process - * this object. Then continue. - * 3) The key is not in memory. We block to load the key from disk. - * Of course the key may not be present at all on the disk store as well, - * in such case we just detect this condition and continue, returning - * NULL from lookup. - * - * - Preloading of needed keys: - * 1) As it was done with VM, also with this new system we try preloading - * keys a client is going to use. We block the client, load keys - * using the I/O thread, unblock the client. Same code as VM more or less. - * - * - Reclaiming memory. - * In cron() we detect our memory limit was reached. What we - * do is deleting keys that are REDIS_DS_MEMORY, using LRU. - * - * If this is not enough to return again under the memory limits we also - * start to flush keys that need to be synched on disk synchronously, - * removing it from the memory. We do this blocking as memory limit is a - * much "harder" barrirer in the new design. - * - * - IO thread operations are no longer stopped for sync loading/saving of - * things. When a key is found to be in the process of being saved - * we simply wait for the IO thread to end its work. - * - * Otherwise if there is to load a key without any IO thread operation - * just started it is blocking-loaded in the lookup function. + * WARNING: most of the following todo items and design issues are no + * longer relevant with the new design. Here as a checklist to see if + * some old ideas still apply. * * - What happens when an object is destroyed? * - * If o->storage == REDIS_DS_MEMORY then we simply destory the object. - * If o->storage == REDIS_DS_DIRTY we can still remove the object. It had - * changes not flushed on disk, but is being removed so - * who cares. - * if o->storage == REDIS_DS_SAVING then the object is being saved so - * it is impossible that its refcount == 1, must be at - * least two. When the object is saved the storage will - * be set back to DS_MEMORY. + * If the object is destroyed since semantically it was deleted or + * replaced with something new, we don't care if there was a SAVE + * job pending for it. Anyway when the IO JOb will be created we'll get + * the pointer of the current value. * - * - What happens when keys are deleted? - * - * We simply schedule a key flush operation as usually, but when the - * IO thread will be created the object pointer will be set to NULL - * so the IO thread will know that the work to do is to delete the key - * from the disk store. + * If the object is already a REDIS_IO_SAVEINPROG object, then it is + * impossible that we get a decrRefCount() that will reach refcount of zero + * since the object is both in the dataset and in the io job entry. * * - What happens with MULTI/EXEC? * - * Good question. + * Good question. Without some kind of versioning with a global counter + * it is not possible to have trasactions on disk, but they are still + * useful since from the point of view of memory and client bugs it is + * a protection anyway. Also it's useful for WATCH. + * + * Btw there is to check what happens when WATCH gets combined to keys + * that gets removed from the object cache. Should be save but better + * to check. + * + * - Check if/why INCR will not update the LRU info for the object. + * + * - Fix/Check the following race condition: a key gets a DEL so there is + * a write operation scheduled against this key. Later the same key will + * be the argument of a GET, but the write operation was still not + * completed (to delete the file). If the GET will be for some reason + * a blocking loading (via lookup) we can load the old value on memory. + * + * This problems can be fixed with negative caching. We can use it + * to optimize the system, but also when a key is deleted we mark + * it as non existing on disk as well (in a way that this cache + * entry can't be evicted, setting time to 0), then we avoid looking at + * the disk at all if the key can't be there. When an IO Job complete + * a deletion, we set the time of the negative caching to a non zero + * value so it will be evicted later. + * + * Are there other patterns like this where we load stale data? + * + * Also, make sure that key preloading is ONLY done for keys that are + * not marked as cacheKeyDoesNotExist(), otherwise, again, we can load + * data from disk that should instead be deleted. + * + * - dsSet() should use rename(2) in order to avoid corruptions. + * + * - Don't add a LOAD if there is already a LOADINPROGRESS, or is this + * impossible since anyway the io_keys stuff will work as lock? + * + * - Serialize special encoded things in a raw form. + * + * - When putting IO read operations on top of the queue, do this only if + * the already-on-top operation is not a save or if it is a save that + * is scheduled for later execution. If there is a save that is ready to + * fire, let's insert the load operation just before the first save that + * is scheduled for later exection for instance. + * + * - Support MULTI/EXEC transactions via a journal file, that is played on + * startup to check if there is cleanup to do. This way we can implement + * transactions with our simple file based KV store. */ /* Virtual Memory is composed mainly of two subsystems: @@ -107,69 +106,36 @@ * as a fully non-blocking VM. */ +void spawnIOThread(void); +int cacheScheduleIOPushJobs(int flags); +int processActiveIOJobs(int max); + /* =================== Virtual Memory - Blocking Side ====================== */ -void vmInit(void) { - off_t totsize; +void dsInit(void) { int pipefds[2]; size_t stacksize; - struct flock fl; - if (server.vm_max_threads != 0) - zmalloc_enable_thread_safeness(); /* we need thread safe zmalloc() */ + zmalloc_enable_thread_safeness(); /* we need thread safe zmalloc() */ - redisLog(REDIS_NOTICE,"Using '%s' as swap file",server.vm_swap_file); - /* Try to open the old swap file, otherwise create it */ - if ((server.vm_fp = fopen(server.vm_swap_file,"r+b")) == NULL) { - server.vm_fp = fopen(server.vm_swap_file,"w+b"); - } - if (server.vm_fp == NULL) { - redisLog(REDIS_WARNING, - "Can't open the swap file: %s. Exiting.", - strerror(errno)); - exit(1); - } - server.vm_fd = fileno(server.vm_fp); - /* Lock the swap file for writing, this is useful in order to avoid - * another instance to use the same swap file for a config error. */ - fl.l_type = F_WRLCK; - fl.l_whence = SEEK_SET; - fl.l_start = fl.l_len = 0; - if (fcntl(server.vm_fd,F_SETLK,&fl) == -1) { - redisLog(REDIS_WARNING, - "Can't lock the swap file at '%s': %s. Make sure it is not used by another Redis instance.", server.vm_swap_file, strerror(errno)); + redisLog(REDIS_NOTICE,"Opening Disk Store: %s", server.ds_path); + /* Open Disk Store */ + if (dsOpen() != REDIS_OK) { + redisLog(REDIS_WARNING,"Fatal error opening disk store. Exiting."); exit(1); - } - /* Initialize */ - server.vm_next_page = 0; - server.vm_near_pages = 0; - server.vm_stats_used_pages = 0; - server.vm_stats_swapped_objects = 0; - server.vm_stats_swapouts = 0; - server.vm_stats_swapins = 0; - totsize = server.vm_pages*server.vm_page_size; - redisLog(REDIS_NOTICE,"Allocating %lld bytes of swap file",totsize); - if (ftruncate(server.vm_fd,totsize) == -1) { - redisLog(REDIS_WARNING,"Can't ftruncate swap file: %s. Exiting.", - strerror(errno)); - exit(1); - } else { - redisLog(REDIS_NOTICE,"Swap file allocated with success"); - } - server.vm_bitmap = zcalloc((server.vm_pages+7)/8); - redisLog(REDIS_VERBOSE,"Allocated %lld bytes page table for %lld pages", - (long long) (server.vm_pages+7)/8, server.vm_pages); + }; - /* Initialize threaded I/O (used by Virtual Memory) */ + /* Initialize threaded I/O for Object Cache */ server.io_newjobs = listCreate(); server.io_processing = listCreate(); server.io_processed = listCreate(); server.io_ready_clients = listCreate(); pthread_mutex_init(&server.io_mutex,NULL); - pthread_mutex_init(&server.io_swapfile_mutex,NULL); + pthread_cond_init(&server.io_condvar,NULL); + pthread_mutex_init(&server.bgsavethread_mutex,NULL); server.io_active_threads = 0; if (pipe(pipefds) == -1) { - redisLog(REDIS_WARNING,"Unable to intialized VM: pipe(2): %s. Exiting." + redisLog(REDIS_WARNING,"Unable to intialized DS: pipe(2): %s. Exiting." ,strerror(errno)); exit(1); } @@ -190,139 +156,21 @@ void vmInit(void) { if (aeCreateFileEvent(server.el, server.io_ready_pipe_read, AE_READABLE, vmThreadedIOCompletedJob, NULL) == AE_ERR) oom("creating file event"); -} - -/* Write the specified object at the specified page of the swap file */ -int vmWriteObjectOnSwap(robj *o, off_t page) { - if (server.vm_enabled) pthread_mutex_lock(&server.io_swapfile_mutex); - if (fseeko(server.vm_fp,page*server.vm_page_size,SEEK_SET) == -1) { - if (server.vm_enabled) pthread_mutex_unlock(&server.io_swapfile_mutex); - redisLog(REDIS_WARNING, - "Critical VM problem in vmWriteObjectOnSwap(): can't seek: %s", - strerror(errno)); - return REDIS_ERR; - } - rdbSaveObject(server.vm_fp,o); - fflush(server.vm_fp); - if (server.vm_enabled) pthread_mutex_unlock(&server.io_swapfile_mutex); - return REDIS_OK; -} - -/* Transfers the 'val' object to disk. Store all the information - * a 'vmpointer' object containing all the information needed to load the - * object back later is returned. - * - * If we can't find enough contiguous empty pages to swap the object on disk - * NULL is returned. */ -vmpointer *vmSwapObjectBlocking(robj *val) { - off_t pages = rdbSavedObjectPages(val); - off_t page; - vmpointer *vp; - - redisAssert(val->storage == REDIS_VM_MEMORY); - redisAssert(val->refcount == 1); - if (vmFindContiguousPages(&page,pages) == REDIS_ERR) return NULL; - if (vmWriteObjectOnSwap(val,page) == REDIS_ERR) return NULL; - - vp = createVmPointer(val->type); - vp->page = page; - vp->usedpages = pages; - decrRefCount(val); /* Deallocate the object from memory. */ - vmMarkPagesUsed(page,pages); - redisLog(REDIS_DEBUG,"VM: object %p swapped out at %lld (%lld pages)", - (void*) val, - (unsigned long long) page, (unsigned long long) pages); - server.vm_stats_swapped_objects++; - server.vm_stats_swapouts++; - return vp; -} - -robj *vmReadObjectFromSwap(off_t page, int type) { - robj *o; - - if (server.vm_enabled) pthread_mutex_lock(&server.io_swapfile_mutex); - if (fseeko(server.vm_fp,page*server.vm_page_size,SEEK_SET) == -1) { - redisLog(REDIS_WARNING, - "Unrecoverable VM problem in vmReadObjectFromSwap(): can't seek: %s", - strerror(errno)); - _exit(1); - } - o = rdbLoadObject(type,server.vm_fp); - if (o == NULL) { - redisLog(REDIS_WARNING, "Unrecoverable VM problem in vmReadObjectFromSwap(): can't load object from swap file: %s", strerror(errno)); - _exit(1); - } - if (server.vm_enabled) pthread_mutex_unlock(&server.io_swapfile_mutex); - return o; -} - -/* Load the specified object from swap to memory. - * The newly allocated object is returned. - * - * If preview is true the unserialized object is returned to the caller but - * the pages are not marked as freed, nor the vp object is freed. */ -robj *vmGenericLoadObject(vmpointer *vp, int preview) { - robj *val; - - redisAssert(vp->type == REDIS_VMPOINTER && - (vp->storage == REDIS_VM_SWAPPED || vp->storage == REDIS_VM_LOADING)); - val = vmReadObjectFromSwap(vp->page,vp->vtype); - if (!preview) { - redisLog(REDIS_DEBUG, "VM: object %p loaded from disk", (void*)vp); - vmMarkPagesFree(vp->page,vp->usedpages); - zfree(vp); - server.vm_stats_swapped_objects--; - } else { - redisLog(REDIS_DEBUG, "VM: object %p previewed from disk", (void*)vp); - } - server.vm_stats_swapins++; - return val; -} - -/* Plain object loading, from swap to memory. - * - * 'o' is actually a redisVmPointer structure that will be freed by the call. - * The return value is the loaded object. */ -robj *vmLoadObject(robj *o) { - /* If we are loading the object in background, stop it, we - * need to load this object synchronously ASAP. */ - if (o->storage == REDIS_VM_LOADING) - vmCancelThreadedIOJob(o); - return vmGenericLoadObject((vmpointer*)o,0); -} -/* Just load the value on disk, without to modify the key. - * This is useful when we want to perform some operation on the value - * without to really bring it from swap to memory, like while saving the - * dataset or rewriting the append only log. */ -robj *vmPreviewObject(robj *o) { - return vmGenericLoadObject((vmpointer*)o,1); + /* Spawn our I/O thread */ + spawnIOThread(); } -/* How a good candidate is this object for swapping? - * The better candidate it is, the greater the returned value. - * - * Currently we try to perform a fast estimation of the object size in - * memory, and combine it with aging informations. - * - * Basically swappability = idle-time * log(estimated size) - * - * Bigger objects are preferred over smaller objects, but not - * proportionally, this is why we use the logarithm. This algorithm is - * just a first try and will probably be tuned later. */ +/* Compute how good candidate the specified object is for eviction. + * An higher number means a better candidate. */ double computeObjectSwappability(robj *o) { /* actual age can be >= minage, but not < minage. As we use wrapping * 21 bit clocks with minutes resolution for the LRU. */ return (double) estimateObjectIdleTime(o); } -/* Try to swap an object that's a good candidate for swapping. - * Returns REDIS_OK if the object was swapped, REDIS_ERR if it's not possible - * to swap any object at all. - * - * If 'usethreaded' is true, Redis will try to swap the object in background - * using I/O threads. */ -int vmSwapOneObject(int usethreads) { +/* Try to free one entry from the diskstore object cache */ +int cacheFreeOneEntry(void) { int j, i; struct dictEntry *best = NULL; double best_swappability = 0; @@ -337,21 +185,21 @@ int vmSwapOneObject(int usethreads) { * are swappable objects */ int maxtries = 100; - if (dictSize(db->dict) == 0) continue; - for (i = 0; i < 5; i++) { + for (i = 0; i < 5 && dictSize(db->dict); i++) { dictEntry *de; double swappability; + robj keyobj; + sds keystr; if (maxtries) maxtries--; de = dictGetRandomKey(db->dict); + keystr = dictGetEntryKey(de); val = dictGetEntryVal(de); - /* Only swap objects that are currently in memory. - * - * Also don't swap shared objects: not a good idea in general and - * we need to ensure that the main thread does not touch the - * object while the I/O thread is using it, but we can't - * control other keys without adding additional mutex. */ - if (val->storage != REDIS_VM_MEMORY || val->refcount != 1) { + initStaticStringObject(keyobj,keystr); + + /* Don't remove objects that are currently target of a + * read or write operation. */ + if (cacheScheduleIOGetFlags(db,&keyobj) != 0) { if (maxtries) i--; /* don't count this try */ continue; } @@ -363,60 +211,106 @@ int vmSwapOneObject(int usethreads) { } } } - if (best == NULL) return REDIS_ERR; + if (best == NULL) { + /* Not able to free a single object? we should check if our + * IO queues have stuff in queue, and try to consume the queue + * otherwise we'll use an infinite amount of memory if changes to + * the dataset are faster than I/O */ + if (listLength(server.cache_io_queue) > 0) { + redisLog(REDIS_DEBUG,"--- Busy waiting IO to reclaim memory"); + cacheScheduleIOPushJobs(REDIS_IO_ASAP); + processActiveIOJobs(1); + return REDIS_OK; + } + /* Nothing to free at all... */ + return REDIS_ERR; + } key = dictGetEntryKey(best); val = dictGetEntryVal(best); - redisLog(REDIS_DEBUG,"Key with best swappability: %s, %f", + redisLog(REDIS_DEBUG,"Key selected for cache eviction: %s swappability:%f", key, best_swappability); - /* Swap it */ - if (usethreads) { - robj *keyobj = createStringObject(key,sdslen(key)); - vmSwapObjectThreaded(keyobj,val,best_db); - decrRefCount(keyobj); - return REDIS_OK; - } else { - vmpointer *vp; - - if ((vp = vmSwapObjectBlocking(val)) != NULL) { - dictGetEntryVal(best) = vp; - return REDIS_OK; - } else { - return REDIS_ERR; - } + /* Delete this key from memory */ + { + robj *kobj = createStringObject(key,sdslen(key)); + dbDelete(best_db,kobj); + decrRefCount(kobj); } + return REDIS_OK; } -int vmSwapOneObjectBlocking() { - return vmSwapOneObject(0); +/* ==================== Disk store negative caching ======================== + * + * When disk store is enabled, we need negative caching, that is, to remember + * keys that are for sure *not* on the disk key-value store. + * + * This is usefuls because without negative caching cache misses will cost us + * a disk lookup, even if the same non existing key is accessed again and again. + * + * With negative caching we remember that the key is not on disk, so if it's + * not in memory and we have a negative cache entry, we don't try a disk + * access at all. + */ + +/* Returns true if the specified key may exists on disk, that is, we don't + * have an entry in our negative cache for this key */ +int cacheKeyMayExist(redisDb *db, robj *key) { + return dictFind(db->io_negcache,key) == NULL; } -int vmSwapOneObjectThreaded() { - return vmSwapOneObject(1); +/* Set the specified key as an entry that may possibily exist on disk, that is, + * remove the negative cache entry for this key if any. */ +void cacheSetKeyMayExist(redisDb *db, robj *key) { + dictDelete(db->io_negcache,key); } -/* Return true if it's safe to swap out objects in a given moment. - * Basically we don't want to swap objects out while there is a BGSAVE - * or a BGAEOREWRITE running in backgroud. */ -int vmCanSwapOut(void) { - return (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1); +/* Set the specified key as non existing on disk, that is, create a negative + * cache entry for this key. */ +void cacheSetKeyDoesNotExist(redisDb *db, robj *key) { + if (dictReplace(db->io_negcache,key,(void*)time(NULL))) { + incrRefCount(key); + } } -/* =================== Virtual Memory - Threaded I/O ======================= */ +/* Remove one entry from negative cache using approximated LRU. */ +int negativeCacheEvictOneEntry(void) { + struct dictEntry *de; + robj *best = NULL; + redisDb *best_db = NULL; + time_t time, best_time = 0; + int j; -void freeIOJob(iojob *j) { - if ((j->type == REDIS_IOJOB_PREPARE_SWAP || - j->type == REDIS_IOJOB_DO_SWAP || - j->type == REDIS_IOJOB_LOAD) && j->val != NULL) - { - /* we fix the storage type, otherwise decrRefCount() will try to - * kill the I/O thread Job (that does no longer exists). */ - if (j->val->storage == REDIS_VM_SWAPPING) - j->val->storage = REDIS_VM_MEMORY; - decrRefCount(j->val); + for (j = 0; j < server.dbnum; j++) { + redisDb *db = server.db+j; + int i; + + if (dictSize(db->io_negcache) == 0) continue; + for (i = 0; i < 3; i++) { + de = dictGetRandomKey(db->io_negcache); + time = (time_t) dictGetEntryVal(de); + + if (best == NULL || time < best_time) { + best = dictGetEntryKey(de); + best_db = db; + best_time = time; + } + } + } + if (best) { + dictDelete(best_db->io_negcache,best); + return REDIS_OK; + } else { + return REDIS_ERR; } +} + +/* ================== Disk store cache - Threaded I/O ====================== */ + +void freeIOJob(iojob *j) { decrRefCount(j->key); + /* j->val can be NULL if the job is about deleting the key from disk. */ + if (j->val) decrRefCount(j->val); zfree(j); } @@ -424,30 +318,25 @@ void freeIOJob(iojob *j) { * of an unix pipe in order to "awake" the main thread, and this function * is called. * - * Note that this is called both by the event loop, when a I/O thread - * sends a byte in the notification pipe, and is also directly called from - * waitEmptyIOJobsQueue(). - * - * In the latter case we don't want to swap more, so we use the - * "privdata" argument setting it to a not NULL value to signal this - * condition. */ + * If privdata == NULL the function will try to put more jobs in the queue + * of IO jobs to process as more room is made. privdata is equal to NULL + * when the function is called from the event loop, so we want to push + * more IO jobs in the queue. Instead when the function is called by + * other functions that want to create a write-barrier to avoid race + * conditions we don't push new jobs in the queue. */ void vmThreadedIOCompletedJob(aeEventLoop *el, int fd, void *privdata, int mask) { char buf[1]; - int retval, processed = 0, toprocess = -1, trytoswap = 1; + int retval, processed = 0, toprocess = -1; REDIS_NOTUSED(el); REDIS_NOTUSED(mask); - REDIS_NOTUSED(privdata); - - if (privdata != NULL) trytoswap = 0; /* check the comments above... */ /* For every byte we read in the read side of the pipe, there is one * I/O job completed to process. */ while((retval = read(fd,buf,1)) == 1) { iojob *j; listNode *ln; - struct dictEntry *de; redisLog(REDIS_DEBUG,"Processing I/O completed job"); @@ -462,102 +351,44 @@ void vmThreadedIOCompletedJob(aeEventLoop *el, int fd, void *privdata, j = ln->value; listDelNode(server.io_processed,ln); unlockThreadedIO(); - /* If this job is marked as canceled, just ignore it */ - if (j->canceled) { - freeIOJob(j); - continue; - } + /* Post process it in the main thread, as there are things we * can do just here to avoid race conditions and/or invasive locks */ - redisLog(REDIS_DEBUG,"COMPLETED Job type: %d, ID %p, key: %s", j->type, (void*)j->id, (unsigned char*)j->key->ptr); - de = dictFind(j->db->dict,j->key->ptr); - redisAssert(de != NULL); + redisLog(REDIS_DEBUG,"COMPLETED Job type %s, key: %s", + (j->type == REDIS_IOJOB_LOAD) ? "load" : "save", + (unsigned char*)j->key->ptr); if (j->type == REDIS_IOJOB_LOAD) { - redisDb *db; - vmpointer *vp = dictGetEntryVal(de); - - /* Key loaded, bring it at home */ - vmMarkPagesFree(vp->page,vp->usedpages); - redisLog(REDIS_DEBUG, "VM: object %s loaded from disk (threaded)", - (unsigned char*) j->key->ptr); - server.vm_stats_swapped_objects--; - server.vm_stats_swapins++; - dictGetEntryVal(de) = j->val; - incrRefCount(j->val); - db = j->db; - /* Handle clients waiting for this key to be loaded. */ - handleClientsBlockedOnSwappedKey(db,j->key); - freeIOJob(j); - zfree(vp); - } else if (j->type == REDIS_IOJOB_PREPARE_SWAP) { - /* Now we know the amount of pages required to swap this object. - * Let's find some space for it, and queue this task again - * rebranded as REDIS_IOJOB_DO_SWAP. */ - if (!vmCanSwapOut() || - vmFindContiguousPages(&j->page,j->pages) == REDIS_ERR) - { - /* Ooops... no space or we can't swap as there is - * a fork()ed Redis trying to save stuff on disk. */ - j->val->storage = REDIS_VM_MEMORY; /* undo operation */ - freeIOJob(j); + /* Create the key-value pair in the in-memory database */ + if (j->val != NULL) { + /* Note: it's possible that the key is already in memory + * due to a blocking load operation. */ + if (dbAdd(j->db,j->key,j->val) == REDIS_OK) { + incrRefCount(j->val); + if (j->expire != -1) setExpire(j->db,j->key,j->expire); + } } else { - /* Note that we need to mark this pages as used now, - * if the job will be canceled, we'll mark them as freed - * again. */ - vmMarkPagesUsed(j->page,j->pages); - j->type = REDIS_IOJOB_DO_SWAP; - lockThreadedIO(); - queueIOJob(j); - unlockThreadedIO(); - } - } else if (j->type == REDIS_IOJOB_DO_SWAP) { - vmpointer *vp; - - /* Key swapped. We can finally free some memory. */ - if (j->val->storage != REDIS_VM_SWAPPING) { - vmpointer *vp = (vmpointer*) j->id; - printf("storage: %d\n",vp->storage); - printf("key->name: %s\n",(char*)j->key->ptr); - printf("val: %p\n",(void*)j->val); - printf("val->type: %d\n",j->val->type); - printf("val->ptr: %s\n",(char*)j->val->ptr); - } - redisAssert(j->val->storage == REDIS_VM_SWAPPING); - vp = createVmPointer(j->val->type); - vp->page = j->page; - vp->usedpages = j->pages; - dictGetEntryVal(de) = vp; - /* Fix the storage otherwise decrRefCount will attempt to - * remove the associated I/O job */ - j->val->storage = REDIS_VM_MEMORY; - decrRefCount(j->val); - redisLog(REDIS_DEBUG, - "VM: object %s swapped out at %lld (%lld pages) (threaded)", - (unsigned char*) j->key->ptr, - (unsigned long long) j->page, (unsigned long long) j->pages); - server.vm_stats_swapped_objects++; - server.vm_stats_swapouts++; - freeIOJob(j); - /* Put a few more swap requests in queue if we are still - * out of memory */ - if (trytoswap && vmCanSwapOut() && - zmalloc_used_memory() > server.vm_max_memory) - { - int more = 1; - while(more) { - lockThreadedIO(); - more = listLength(server.io_newjobs) < - (unsigned) server.vm_max_threads; - unlockThreadedIO(); - /* Don't waste CPU time if swappable objects are rare. */ - if (vmSwapOneObjectThreaded() == REDIS_ERR) { - trytoswap = 0; - break; - } + /* Key not found on disk. If it is also not in memory + * as a cached object, nor there is a job writing it + * in background, we are sure the key does not exist + * currently. + * + * So we set a negative cache entry avoiding that the + * resumed client will block load what does not exist... */ + if (dictFind(j->db->dict,j->key->ptr) == NULL && + (cacheScheduleIOGetFlags(j->db,j->key) & + (REDIS_IO_SAVE|REDIS_IO_SAVEINPROG)) == 0) + { + cacheSetKeyDoesNotExist(j->db,j->key); } } + cacheScheduleIODelFlag(j->db,j->key,REDIS_IO_LOADINPROG); + handleClientsBlockedOnSwappedKey(j->db,j->key); + } else if (j->type == REDIS_IOJOB_SAVE) { + cacheScheduleIODelFlag(j->db,j->key,REDIS_IO_SAVEINPROG); } + freeIOJob(j); processed++; + if (privdata == NULL) cacheScheduleIOPushJobs(0); if (processed == toprocess) return; } if (retval < 0 && errno != EAGAIN) { @@ -579,53 +410,66 @@ void *IOThreadEntryPoint(void *arg) { iojob *j; listNode *ln; REDIS_NOTUSED(arg); + long long start; pthread_detach(pthread_self()); + lockThreadedIO(); while(1) { /* Get a new job to process */ - lockThreadedIO(); if (listLength(server.io_newjobs) == 0) { - /* No new jobs in queue, exit. */ - redisLog(REDIS_DEBUG,"Thread %ld exiting, nothing to do", - (long) pthread_self()); - server.io_active_threads--; - unlockThreadedIO(); - return NULL; + /* Wait for more work to do */ + redisLog(REDIS_DEBUG,"[T] wait for signal"); + pthread_cond_wait(&server.io_condvar,&server.io_mutex); + redisLog(REDIS_DEBUG,"[T] signal received"); + continue; } + start = ustime(); + redisLog(REDIS_DEBUG,"[T] %ld IO jobs to process", + listLength(server.io_newjobs)); ln = listFirst(server.io_newjobs); j = ln->value; listDelNode(server.io_newjobs,ln); /* Add the job in the processing queue */ - j->thread = pthread_self(); listAddNodeTail(server.io_processing,j); ln = listLast(server.io_processing); /* We use ln later to remove it */ unlockThreadedIO(); - redisLog(REDIS_DEBUG,"Thread %ld got a new job (type %d): %p about key '%s'", - (long) pthread_self(), j->type, (void*)j, (char*)j->key->ptr); + + redisLog(REDIS_DEBUG,"[T] %ld: new job type %s: %p about key '%s'", + (long) pthread_self(), + (j->type == REDIS_IOJOB_LOAD) ? "load" : "save", + (void*)j, (char*)j->key->ptr); /* Process the Job */ if (j->type == REDIS_IOJOB_LOAD) { - vmpointer *vp = (vmpointer*)j->id; - j->val = vmReadObjectFromSwap(j->page,vp->vtype); - } else if (j->type == REDIS_IOJOB_PREPARE_SWAP) { - j->pages = rdbSavedObjectPages(j->val); - } else if (j->type == REDIS_IOJOB_DO_SWAP) { - if (vmWriteObjectOnSwap(j->val,j->page) == REDIS_ERR) - j->canceled = 1; + time_t expire; + + j->val = dsGet(j->db,j->key,&expire); + if (j->val) j->expire = expire; + } else if (j->type == REDIS_IOJOB_SAVE) { + if (j->val) { + dsSet(j->db,j->key,j->val,j->expire); + } else { + dsDel(j->db,j->key); + } } /* Done: insert the job into the processed queue */ - redisLog(REDIS_DEBUG,"Thread %ld completed the job: %p (key %s)", + redisLog(REDIS_DEBUG,"[T] %ld completed the job: %p (key %s)", (long) pthread_self(), (void*)j, (char*)j->key->ptr); + + redisLog(REDIS_DEBUG,"[T] lock IO"); lockThreadedIO(); + redisLog(REDIS_DEBUG,"[T] IO locked"); listDelNode(server.io_processing,ln); listAddNodeTail(server.io_processed,j); - unlockThreadedIO(); /* Signal the main thread there is new stuff to process */ redisAssert(write(server.io_ready_pipe_write,"x",1) == 1); + redisLog(REDIS_DEBUG,"TIME (%c): %lld\n", j->type == REDIS_IOJOB_LOAD ? 'L' : 'S', ustime()-start); } - return NULL; /* never reached */ + /* never reached, but that's the full pattern... */ + unlockThreadedIO(); + return NULL; } void spawnIOThread(void) { @@ -647,34 +491,95 @@ void spawnIOThread(void) { server.io_active_threads++; } -/* We need to wait for the last thread to exit before we are able to - * fork() in order to BGSAVE or BGREWRITEAOF. */ -void waitEmptyIOJobsQueue(void) { - while(1) { +/* Wait that up to 'max' pending IO Jobs are processed by the I/O thread. + * From our point of view an IO job processed means that the count of + * server.io_processed must increase by one. + * + * If max is -1, all the pending IO jobs will be processed. + * + * Returns the number of IO jobs processed. + * + * NOTE: while this may appear like a busy loop, we are actually blocked + * by IO since we continuously acquire/release the IO lock. */ +int processActiveIOJobs(int max) { + int processed = 0; + + while(max == -1 || max > 0) { int io_processed_len; + redisLog(REDIS_DEBUG,"[P] lock IO"); lockThreadedIO(); + redisLog(REDIS_DEBUG,"Waiting IO jobs processing: new:%d proessing:%d processed:%d",listLength(server.io_newjobs),listLength(server.io_processing),listLength(server.io_processed)); + if (listLength(server.io_newjobs) == 0 && - listLength(server.io_processing) == 0 && - server.io_active_threads == 0) + listLength(server.io_processing) == 0) { + /* There is nothing more to process */ + redisLog(REDIS_DEBUG,"[P] Nothing to process, unlock IO, return"); unlockThreadedIO(); - return; + break; + } + +#if 1 + /* If there are new jobs we need to signal the thread to + * process the next one. FIXME: drop this if useless. */ + redisLog(REDIS_DEBUG,"[P] waitEmptyIOJobsQueue: new %d, processing %d, processed %d", + listLength(server.io_newjobs), + listLength(server.io_processing), + listLength(server.io_processed)); + + if (listLength(server.io_newjobs)) { + redisLog(REDIS_DEBUG,"[P] There are new jobs, signal"); + pthread_cond_signal(&server.io_condvar); } - /* While waiting for empty jobs queue condition we post-process some - * finshed job, as I/O threads may be hanging trying to write against - * the io_ready_pipe_write FD but there are so much pending jobs that - * it's blocking. */ +#endif + + /* Check if we can process some finished job */ io_processed_len = listLength(server.io_processed); + redisLog(REDIS_DEBUG,"[P] Unblock IO"); unlockThreadedIO(); + redisLog(REDIS_DEBUG,"[P] Wait"); + usleep(10000); if (io_processed_len) { vmThreadedIOCompletedJob(NULL,server.io_ready_pipe_read, (void*)0xdeadbeef,0); - usleep(1000); /* 1 millisecond */ - } else { - usleep(10000); /* 10 milliseconds */ + processed++; + if (max != -1) max--; } } + return processed; +} + +void waitEmptyIOJobsQueue(void) { + processActiveIOJobs(-1); +} + +/* Process up to 'max' IO Jobs already completed by threads but still waiting + * processing from the main thread. + * + * If max == -1 all the pending jobs are processed. + * + * The number of processed jobs is returned. */ +int processPendingIOJobs(int max) { + int processed = 0; + + while(max == -1 || max > 0) { + int io_processed_len; + + lockThreadedIO(); + io_processed_len = listLength(server.io_processed); + unlockThreadedIO(); + if (io_processed_len == 0) break; + vmThreadedIOCompletedJob(NULL,server.io_ready_pipe_read, + (void*)0xdeadbeef,0); + if (max != -1) max--; + processed++; + } + return processed; +} + +void processAllPendingIOJobs(void) { + processPendingIOJobs(-1); } /* This function must be called while with threaded IO locked */ @@ -682,56 +587,287 @@ void queueIOJob(iojob *j) { redisLog(REDIS_DEBUG,"Queued IO Job %p type %d about key '%s'\n", (void*)j, j->type, (char*)j->key->ptr); listAddNodeTail(server.io_newjobs,j); - if (server.io_active_threads < server.vm_max_threads) - spawnIOThread(); } -int vmSwapObjectThreaded(robj *key, robj *val, redisDb *db) { +/* Consume all the IO scheduled operations, and all the thread IO jobs + * so that eventually the state of diskstore is a point-in-time snapshot. + * + * This is useful when we need to BGSAVE with diskstore enabled. */ +void cacheForcePointInTime(void) { + redisLog(REDIS_NOTICE,"Diskstore: synching on disk to reach point-in-time state."); + while (listLength(server.cache_io_queue) != 0) { + cacheScheduleIOPushJobs(REDIS_IO_ASAP); + processActiveIOJobs(1); + } + waitEmptyIOJobsQueue(); + processAllPendingIOJobs(); +} + +void cacheCreateIOJob(int type, redisDb *db, robj *key, robj *val, time_t expire) { iojob *j; j = zmalloc(sizeof(*j)); - j->type = REDIS_IOJOB_PREPARE_SWAP; + j->type = type; j->db = db; j->key = key; incrRefCount(key); - j->id = j->val = val; - incrRefCount(val); - j->canceled = 0; - j->thread = (pthread_t) -1; - val->storage = REDIS_VM_SWAPPING; + j->val = val; + if (val) incrRefCount(val); + j->expire = expire; lockThreadedIO(); queueIOJob(j); + pthread_cond_signal(&server.io_condvar); unlockThreadedIO(); - return REDIS_OK; } -/* ============ Virtual Memory - Blocking clients on missing keys =========== */ +/* ============= Disk store cache - Scheduling of IO operations ============= + * + * We use a queue and an hash table to hold the state of IO operations + * so that's fast to lookup if there is already an IO operation in queue + * for a given key. + * + * There are two types of IO operations for a given key: + * REDIS_IO_LOAD and REDIS_IO_SAVE. + * + * The function cacheScheduleIO() function pushes the specified IO operation + * in the queue, but avoid adding the same key for the same operation + * multiple times, thanks to the associated hash table. + * + * We take a set of flags per every key, so when the scheduled IO operation + * gets moved from the scheduled queue to the actual IO Jobs queue that + * is processed by the IO thread, we flag it as IO_LOADINPROG or + * IO_SAVEINPROG. + * + * So for every given key we always know if there is some IO operation + * scheduled, or in progress, for this key. + * + * NOTE: all this is very important in order to guarantee correctness of + * the Disk Store Cache. Jobs are always queued here. Load jobs are + * queued at the head for faster execution only in the case there is not + * already a write operation of some kind for this job. + * + * So we have ordering, but can do exceptions when there are no already + * operations for a given key. Also when we need to block load a given + * key, for an immediate lookup operation, we can check if the key can + * be accessed synchronously without race conditions (no IN PROGRESS + * operations for this key), otherwise we blocking wait for completion. */ + +#define REDIS_IO_LOAD 1 +#define REDIS_IO_SAVE 2 +#define REDIS_IO_LOADINPROG 4 +#define REDIS_IO_SAVEINPROG 8 + +void cacheScheduleIOAddFlag(redisDb *db, robj *key, long flag) { + struct dictEntry *de = dictFind(db->io_queued,key); + + if (!de) { + dictAdd(db->io_queued,key,(void*)flag); + incrRefCount(key); + return; + } else { + long flags = (long) dictGetEntryVal(de); + + if (flags & flag) { + redisLog(REDIS_WARNING,"Adding the same flag again: was: %ld, addede: %ld",flags,flag); + redisAssert(!(flags & flag)); + } + flags |= flag; + dictGetEntryVal(de) = (void*) flags; + } +} + +void cacheScheduleIODelFlag(redisDb *db, robj *key, long flag) { + struct dictEntry *de = dictFind(db->io_queued,key); + long flags; + + redisAssert(de != NULL); + flags = (long) dictGetEntryVal(de); + redisAssert(flags & flag); + flags &= ~flag; + if (flags == 0) { + dictDelete(db->io_queued,key); + } else { + dictGetEntryVal(de) = (void*) flags; + } +} + +int cacheScheduleIOGetFlags(redisDb *db, robj *key) { + struct dictEntry *de = dictFind(db->io_queued,key); + + return (de == NULL) ? 0 : ((long) dictGetEntryVal(de)); +} + +void cacheScheduleIO(redisDb *db, robj *key, int type) { + ioop *op; + long flags; + + if ((flags = cacheScheduleIOGetFlags(db,key)) & type) return; + + redisLog(REDIS_DEBUG,"Scheduling key %s for %s", + key->ptr, type == REDIS_IO_LOAD ? "loading" : "saving"); + cacheScheduleIOAddFlag(db,key,type); + op = zmalloc(sizeof(*op)); + op->type = type; + op->db = db; + op->key = key; + incrRefCount(key); + op->ctime = time(NULL); + + /* Give priority to load operations if there are no save already + * in queue for the same key. */ + if (type == REDIS_IO_LOAD && !(flags & REDIS_IO_SAVE)) { + listAddNodeHead(server.cache_io_queue, op); + cacheScheduleIOPushJobs(REDIS_IO_ONLYLOADS); + } else { + /* FIXME: probably when this happens we want to at least move + * the write job about this queue on top, and set the creation time + * to a value that will force processing ASAP. */ + listAddNodeTail(server.cache_io_queue, op); + } +} + +/* Push scheduled IO operations into IO Jobs that the IO thread can process. + * + * If flags include REDIS_IO_ONLYLOADS only load jobs are processed:this is + * useful since it's safe to push LOAD IO jobs from any place of the code, while + * SAVE io jobs should never be pushed while we are processing a command + * (not protected by lookupKey() that will block on keys in IO_SAVEINPROG + * state. + * + * The REDIS_IO_ASAP flag tells the function to don't wait for the IO job + * scheduled completion time, but just do the operation ASAP. This is useful + * when we need to reclaim memory from the IO queue. + */ +#define MAX_IO_JOBS_QUEUE 10 +int cacheScheduleIOPushJobs(int flags) { + time_t now = time(NULL); + listNode *ln; + int jobs, topush = 0, pushed = 0; + + /* Don't push new jobs if there is a threaded BGSAVE in progress. */ + if (server.bgsavethread != (pthread_t) -1) return 0; + + /* Sync stuff on disk, but only if we have less + * than MAX_IO_JOBS_QUEUE IO jobs. */ + lockThreadedIO(); + jobs = listLength(server.io_newjobs); + unlockThreadedIO(); + + topush = MAX_IO_JOBS_QUEUE-jobs; + if (topush < 0) topush = 0; + if (topush > (signed)listLength(server.cache_io_queue)) + topush = listLength(server.cache_io_queue); + + while((ln = listFirst(server.cache_io_queue)) != NULL) { + ioop *op = ln->value; + struct dictEntry *de; + robj *val; + + if (!topush) break; + topush--; + + if (op->type != REDIS_IO_LOAD && flags & REDIS_IO_ONLYLOADS) break; + + /* Don't execute SAVE before the scheduled time for completion */ + if (op->type == REDIS_IO_SAVE && !(flags & REDIS_IO_ASAP) && + (now - op->ctime) < server.cache_flush_delay) break; + + /* Don't add a SAVE job in the IO thread queue if there is already + * a save in progress for the same key. */ + if (op->type == REDIS_IO_SAVE && + cacheScheduleIOGetFlags(op->db,op->key) & REDIS_IO_SAVEINPROG) + { + /* Move the operation at the end of the list if there + * are other operations, so we can try to process the next one. + * Otherwise break, nothing to do here. */ + if (listLength(server.cache_io_queue) > 1) { + listDelNode(server.cache_io_queue,ln); + listAddNodeTail(server.cache_io_queue,op); + continue; + } else { + break; + } + } + + redisLog(REDIS_DEBUG,"Creating IO %s Job for key %s", + op->type == REDIS_IO_LOAD ? "load" : "save", op->key->ptr); + + if (op->type == REDIS_IO_LOAD) { + cacheCreateIOJob(REDIS_IOJOB_LOAD,op->db,op->key,NULL,0); + } else { + time_t expire = -1; + + /* Lookup the key, in order to put the current value in the IO + * Job. Otherwise if the key does not exists we schedule a disk + * store delete operation, setting the value to NULL. */ + de = dictFind(op->db->dict,op->key->ptr); + if (de) { + val = dictGetEntryVal(de); + expire = getExpire(op->db,op->key); + } else { + /* Setting the value to NULL tells the IO thread to delete + * the key on disk. */ + val = NULL; + } + cacheCreateIOJob(REDIS_IOJOB_SAVE,op->db,op->key,val,expire); + } + /* Mark the operation as in progress. */ + cacheScheduleIODelFlag(op->db,op->key,op->type); + cacheScheduleIOAddFlag(op->db,op->key, + (op->type == REDIS_IO_LOAD) ? REDIS_IO_LOADINPROG : + REDIS_IO_SAVEINPROG); + /* Finally remove the operation from the queue. + * But we'll have trace of it in the hash table. */ + listDelNode(server.cache_io_queue,ln); + decrRefCount(op->key); + zfree(op); + pushed++; + } + return pushed; +} + +void cacheCron(void) { + /* Push jobs */ + cacheScheduleIOPushJobs(0); + + /* Reclaim memory from the object cache */ + while (server.ds_enabled && zmalloc_used_memory() > + server.cache_max_memory) + { + int done = 0; + + if (cacheFreeOneEntry() == REDIS_OK) done++; + if (negativeCacheEvictOneEntry() == REDIS_OK) done++; + if (done == 0) break; /* nothing more to free */ + } +} + +/* ========== Disk store cache - Blocking clients on missing keys =========== */ /* This function makes the clinet 'c' waiting for the key 'key' to be loaded. - * If there is not already a job loading the key, it is craeted. - * The key is added to the io_keys list in the client structure, and also + * If the key is already in memory we don't need to block. + * + * FIXME: we should try if it's actually better to suspend the client + * accessing an object that is being saved, and awake it only when + * the saving was completed. + * + * Otherwise if the key is not in memory, we block the client and start + * an IO Job to load it: + * + * the key is added to the io_keys list in the client structure, and also * in the hash table mapping swapped keys to waiting clients, that is, * server.io_waited_keys. */ int waitForSwappedKey(redisClient *c, robj *key) { struct dictEntry *de; - robj *o; list *l; - /* If the key does not exist or is already in RAM we don't need to - * block the client at all. */ + /* Return ASAP if the key is in memory */ de = dictFind(c->db->dict,key->ptr); - if (de == NULL) return 0; - o = dictGetEntryVal(de); - if (o->storage == REDIS_VM_MEMORY) { - return 0; - } else if (o->storage == REDIS_VM_SWAPPING) { - /* We were swapping the key, undo it! */ - vmCancelThreadedIOJob(o); - return 0; - } + if (de != NULL) return 0; - /* OK: the key is either swapped, or being loaded just now. */ + /* Don't wait for keys we are sure are not on disk either */ + if (!cacheKeyMayExist(c->db,key)) return 0; /* Add the key to the list of keys this client is waiting for. * This maps clients to keys they are waiting for. */ @@ -754,81 +890,19 @@ int waitForSwappedKey(redisClient *c, robj *key) { listAddNodeTail(l,c); /* Are we already loading the key from disk? If not create a job */ - if (o->storage == REDIS_VM_SWAPPED) { - iojob *j; - vmpointer *vp = (vmpointer*)o; - - o->storage = REDIS_VM_LOADING; - j = zmalloc(sizeof(*j)); - j->type = REDIS_IOJOB_LOAD; - j->db = c->db; - j->id = (robj*)vp; - j->key = key; - incrRefCount(key); - j->page = vp->page; - j->val = NULL; - j->canceled = 0; - j->thread = (pthread_t) -1; - lockThreadedIO(); - queueIOJob(j); - unlockThreadedIO(); + if (de == NULL) { + int flags = cacheScheduleIOGetFlags(c->db,key); + + /* It is possible that even if there are no clients waiting for + * a load operation, still we have a load operation in progress. + * For instance think to a client performing a GET and then + * closing the connection */ + if ((flags & (REDIS_IO_LOAD|REDIS_IO_LOADINPROG)) == 0) + cacheScheduleIO(c->db,key,REDIS_IO_LOAD); } return 1; } -/* Preload keys for any command with first, last and step values for - * the command keys prototype, as defined in the command table. */ -void waitForMultipleSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) { - int j, last; - if (cmd->vm_firstkey == 0) return; - last = cmd->vm_lastkey; - if (last < 0) last = argc+last; - for (j = cmd->vm_firstkey; j <= last; j += cmd->vm_keystep) { - redisAssert(j < argc); - waitForSwappedKey(c,argv[j]); - } -} - -/* Preload keys needed for the ZUNIONSTORE and ZINTERSTORE commands. - * Note that the number of keys to preload is user-defined, so we need to - * apply a sanity check against argc. */ -void zunionInterBlockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) { - int i, num; - REDIS_NOTUSED(cmd); - - num = atoi(argv[2]->ptr); - if (num > (argc-3)) return; - for (i = 0; i < num; i++) { - waitForSwappedKey(c,argv[3+i]); - } -} - -/* Preload keys needed to execute the entire MULTI/EXEC block. - * - * This function is called by blockClientOnSwappedKeys when EXEC is issued, - * and will block the client when any command requires a swapped out value. */ -void execBlockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) { - int i, margc; - struct redisCommand *mcmd; - robj **margv; - REDIS_NOTUSED(cmd); - REDIS_NOTUSED(argc); - REDIS_NOTUSED(argv); - - if (!(c->flags & REDIS_MULTI)) return; - for (i = 0; i < c->mstate.count; i++) { - mcmd = c->mstate.commands[i].cmd; - margc = c->mstate.commands[i].argc; - margv = c->mstate.commands[i].argv; - - if (mcmd->vm_preload_proc != NULL) { - mcmd->vm_preload_proc(c,mcmd,margc,margv); - } else { - waitForMultipleSwappedKeys(c,mcmd,margc,margv); - } - } -} - /* Is this client attempting to run a command against swapped keys? * If so, block it ASAP, load the keys in background, then resume it. * @@ -840,17 +914,46 @@ void execBlockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd, int * Return 1 if the client is marked as blocked, 0 if the client can * continue as the keys it is going to access appear to be in memory. */ int blockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd) { - if (cmd->vm_preload_proc != NULL) { - cmd->vm_preload_proc(c,cmd,c->argc,c->argv); + int *keyindex, numkeys, j, i; + + /* EXEC is a special case, we need to preload all the commands + * queued into the transaction */ + if (cmd->proc == execCommand) { + struct redisCommand *mcmd; + robj **margv; + int margc; + + if (!(c->flags & REDIS_MULTI)) return 0; + for (i = 0; i < c->mstate.count; i++) { + mcmd = c->mstate.commands[i].cmd; + margc = c->mstate.commands[i].argc; + margv = c->mstate.commands[i].argv; + + keyindex = getKeysFromCommand(mcmd,margv,margc,&numkeys, + REDIS_GETKEYS_PRELOAD); + for (j = 0; j < numkeys; j++) { + redisLog(REDIS_DEBUG,"Preloading %s", + (char*)margv[keyindex[j]]->ptr); + waitForSwappedKey(c,margv[keyindex[j]]); + } + getKeysFreeResult(keyindex); + } } else { - waitForMultipleSwappedKeys(c,cmd,c->argc,c->argv); + keyindex = getKeysFromCommand(cmd,c->argv,c->argc,&numkeys, + REDIS_GETKEYS_PRELOAD); + for (j = 0; j < numkeys; j++) { + redisLog(REDIS_DEBUG,"Preloading %s", + (char*)c->argv[keyindex[j]]->ptr); + waitForSwappedKey(c,c->argv[keyindex[j]]); + } + getKeysFreeResult(keyindex); } /* If the client was blocked for at least one key, mark it as blocked. */ if (listLength(c->io_keys)) { c->flags |= REDIS_IO_WAIT; aeDeleteFileEvent(server.el,c->fd,AE_READABLE); - server.vm_blocked_clients++; + server.cache_blocked_clients++; return 1; } else { return 0;