From: antirez Date: Fri, 31 Dec 2010 15:10:09 +0000 (+0100) Subject: don't overload the IO job queue if there are alrady too much entries X-Git-Url: https://git.saurik.com/redis.git/commitdiff_plain/c4b64a13954124742575d04251da8766fbd121d4 don't overload the IO job queue if there are alrady too much entries --- diff --git a/src/diskstore.c b/src/diskstore.c index 6e59bc67..06d4ae85 100644 --- a/src/diskstore.c +++ b/src/diskstore.c @@ -239,6 +239,8 @@ robj *dsGet(redisDb *db, robj *key, time_t *expire) { return val; readerr: + redisLog(REDIS_WARNING,"Read error reading reading %s. Corrupted key?", + buf); redisPanic("Unrecoverable error reading from disk store"); return NULL; /* unreached */ } diff --git a/src/dscache.c b/src/dscache.c index cd2ef850..7be3bf86 100644 --- a/src/dscache.c +++ b/src/dscache.c @@ -362,6 +362,8 @@ void *IOThreadEntryPoint(void *arg) { pthread_cond_wait(&server.io_condvar,&server.io_mutex); continue; } + redisLog(REDIS_DEBUG,"%ld IO jobs to process", + listLength(server.io_newjobs)); ln = listFirst(server.io_newjobs); j = ln->value; listDelNode(server.io_newjobs,ln); @@ -530,11 +532,22 @@ void cacheScheduleForFlush(redisDb *db, robj *key) { void cacheCron(void) { time_t now = time(NULL); listNode *ln; + int jobs, topush = 0; + + /* Sync stuff on disk, but only if we have less than 100 IO jobs */ + lockThreadedIO(); + jobs = listLength(server.io_newjobs); + unlockThreadedIO(); + + topush = 100-jobs; + if (topush < 0) topush = 0; - /* Sync stuff on disk */ while((ln = listFirst(server.cache_flush_queue)) != NULL) { dirtykey *dk = ln->value; + if (!topush) break; + topush--; + if ((now - dk->ctime) >= server.cache_flush_delay) { struct dictEntry *de; robj *val;