X-Git-Url: https://git.saurik.com/redis.git/blobdiff_plain/419e1cca74102440d5ce174fbbf36d81a9b8956a..b1a8e3e89e7824e0375b149e487f9ff1012a2100:/src/dscache.c diff --git a/src/dscache.c b/src/dscache.c index 15134cc7..2dbc1134 100644 --- a/src/dscache.c +++ b/src/dscache.c @@ -212,7 +212,7 @@ int cacheFreeOneEntry(void) { } } if (best == NULL) { - /* Was not able to fix a single object... we should check if our + /* Not able to free a single object? we should check if our * IO queues have stuff in queue, and try to consume the queue * otherwise we'll use an infinite amount of memory if changes to * the dataset are faster than I/O */ @@ -240,13 +240,6 @@ int cacheFreeOneEntry(void) { return REDIS_OK; } -/* Return true if it's safe to swap out objects in a given moment. - * Basically we don't want to swap objects out while there is a BGSAVE - * or a BGAEOREWRITE running in backgroud. */ -int dsCanTouchDiskStore(void) { - return (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1); -} - /* ==================== Disk store negative caching ======================== * * When disk store is enabled, we need negative caching, that is, to remember @@ -325,8 +318,12 @@ void freeIOJob(iojob *j) { * of an unix pipe in order to "awake" the main thread, and this function * is called. * - * If privdata != NULL the function will try to put more jobs in the queue - * of IO jobs to process as more room is made. */ + * If privdata == NULL the function will try to put more jobs in the queue + * of IO jobs to process as more room is made. privdata is equal to NULL + * when the function is called from the event loop, so we want to push + * more IO jobs in the queue. Instead when the function is called by + * other functions that want to create a write-barrier to avoid race + * conditions we don't push new jobs in the queue. */ void vmThreadedIOCompletedJob(aeEventLoop *el, int fd, void *privdata, int mask) { @@ -386,13 +383,12 @@ void vmThreadedIOCompletedJob(aeEventLoop *el, int fd, void *privdata, } cacheScheduleIODelFlag(j->db,j->key,REDIS_IO_LOADINPROG); handleClientsBlockedOnSwappedKey(j->db,j->key); - freeIOJob(j); } else if (j->type == REDIS_IOJOB_SAVE) { cacheScheduleIODelFlag(j->db,j->key,REDIS_IO_SAVEINPROG); - freeIOJob(j); } + freeIOJob(j); processed++; - if (privdata != NULL) cacheScheduleIOPushJobs(0); + if (privdata == NULL) cacheScheduleIOPushJobs(0); if (processed == toprocess) return; } if (retval < 0 && errno != EAGAIN) { @@ -469,7 +465,7 @@ void *IOThreadEntryPoint(void *arg) { /* Signal the main thread there is new stuff to process */ redisAssert(write(server.io_ready_pipe_write,"x",1) == 1); - redisLog(REDIS_WARNING,"TIME (%c): %lld\n", j->type == REDIS_IOJOB_LOAD ? 'L' : 'S', ustime()-start); + redisLog(REDIS_DEBUG,"TIME (%c): %lld\n", j->type == REDIS_IOJOB_LOAD ? 'L' : 'S', ustime()-start); } /* never reached, but that's the full pattern... */ unlockThreadedIO(); @@ -591,8 +587,6 @@ void queueIOJob(iojob *j) { redisLog(REDIS_DEBUG,"Queued IO Job %p type %d about key '%s'\n", (void*)j, j->type, (char*)j->key->ptr); listAddNodeTail(server.io_newjobs,j); - if (server.io_active_threads < server.vm_max_threads) - spawnIOThread(); } /* Consume all the IO scheduled operations, and all the thread IO jobs