]> git.saurik.com Git - redis.git/commitdiff
don't overload the IO job queue if there are alrady too much entries
authorantirez <antirez@gmail.com>
Fri, 31 Dec 2010 15:10:09 +0000 (16:10 +0100)
committerantirez <antirez@gmail.com>
Fri, 31 Dec 2010 15:10:09 +0000 (16:10 +0100)
src/diskstore.c
src/dscache.c

index 6e59bc679cf704678945830b0d69b91ddef98fc1..06d4ae85b15a8b49207bdfb0f04a01a5d6a5e76a 100644 (file)
@@ -239,6 +239,8 @@ robj *dsGet(redisDb *db, robj *key, time_t *expire) {
     return val;
 
 readerr:
+    redisLog(REDIS_WARNING,"Read error reading reading %s. Corrupted key?",
+        buf);
     redisPanic("Unrecoverable error reading from disk store");
     return NULL; /* unreached */
 }
index cd2ef85028a4b456ad73ae6585aa5933d796ba74..7be3bf86ac27aff7154abcf0d8986919e025b020 100644 (file)
@@ -362,6 +362,8 @@ void *IOThreadEntryPoint(void *arg) {
             pthread_cond_wait(&server.io_condvar,&server.io_mutex);
             continue;
         }
+        redisLog(REDIS_DEBUG,"%ld IO jobs to process",
+            listLength(server.io_newjobs));
         ln = listFirst(server.io_newjobs);
         j = ln->value;
         listDelNode(server.io_newjobs,ln);
@@ -530,11 +532,22 @@ void cacheScheduleForFlush(redisDb *db, robj *key) {
 void cacheCron(void) {
     time_t now = time(NULL);
     listNode *ln;
+    int jobs, topush = 0;
+
+    /* Sync stuff on disk, but only if we have less than 100 IO jobs */
+    lockThreadedIO();
+    jobs = listLength(server.io_newjobs);
+    unlockThreadedIO();
+
+    topush = 100-jobs;
+    if (topush < 0) topush = 0;
 
-    /* Sync stuff on disk */
     while((ln = listFirst(server.cache_flush_queue)) != NULL) {
         dirtykey *dk = ln->value;
 
+        if (!topush) break;
+        topush--;
+
         if ((now - dk->ctime) >= server.cache_flush_delay) {
             struct dictEntry *de;
             robj *val;