]>
Commit | Line | Data |
---|---|---|
e2641e09 | 1 | #include "redis.h" |
2 | ||
3 | #include <fcntl.h> | |
4 | #include <pthread.h> | |
5 | #include <math.h> | |
6 | #include <signal.h> | |
7 | ||
33388d43 | 8 | /* dscache.c - Disk store cache for disk store backend. |
9 | * | |
10 | * When Redis is configured for using disk as backend instead of memory, the | |
11 | * memory is used as a cache, so that recently accessed keys are taken in | |
12 | * memory for fast read and write operations. | |
13 | * | |
14 | * Modified keys are marked to be flushed on disk, and will be flushed | |
15 | * as long as the maxium configured flush time elapsed. | |
16 | * | |
17 | * This file implements the whole caching subsystem and contains further | |
18 | * documentation. */ | |
19 | ||
20 | /* TODO: | |
21 | * | |
22 | * - The WATCH helper will be used to signal the cache system | |
23 | * we need to flush a given key/dbid into disk, adding this key/dbid | |
24 | * pair into a server.ds_cache_dirty linked list AND hash table (so that we | |
25 | * don't add the same thing multiple times). | |
26 | * | |
27 | * - cron() checks if there are elements on this list. When there are things | |
28 | * to flush, we create an IO Job for the I/O thread. | |
16d77878 | 29 | * NOTE: We disalbe object sharing when server.ds_enabled == 1 so objects |
30 | * that are referenced an IO job for flushing on disk are marked as | |
31 | * o->storage == REDIS_DS_SAVING. | |
33388d43 | 32 | * |
33 | * - This is what we do on key lookup: | |
16d77878 | 34 | * 1) The key already exists in memory. object->storage == REDIS_DS_MEMORY |
35 | * or it is object->storage == REDIS_DS_DIRTY: | |
33388d43 | 36 | * We don't do nothing special, lookup, return value object pointer. |
37 | * 2) The key is in memory but object->storage == REDIS_DS_SAVING. | |
16d77878 | 38 | * When this happens we block waiting for the I/O thread to process |
39 | * this object. Then continue. | |
33388d43 | 40 | * 3) The key is not in memory. We block to load the key from disk. |
41 | * Of course the key may not be present at all on the disk store as well, | |
42 | * in such case we just detect this condition and continue, returning | |
43 | * NULL from lookup. | |
44 | * | |
45 | * - Preloading of needed keys: | |
46 | * 1) As it was done with VM, also with this new system we try preloading | |
47 | * keys a client is going to use. We block the client, load keys | |
48 | * using the I/O thread, unblock the client. Same code as VM more or less. | |
49 | * | |
16d77878 | 50 | * - Reclaiming memory. |
51 | * In cron() we detect our memory limit was reached. What we | |
52 | * do is deleting keys that are REDIS_DS_MEMORY, using LRU. | |
53 | * | |
33388d43 | 54 | * If this is not enough to return again under the memory limits we also |
55 | * start to flush keys that need to be synched on disk synchronously, | |
16d77878 | 56 | * removing it from the memory. We do this blocking as memory limit is a |
57 | * much "harder" barrirer in the new design. | |
33388d43 | 58 | * |
59 | * - IO thread operations are no longer stopped for sync loading/saving of | |
16d77878 | 60 | * things. When a key is found to be in the process of being saved |
61 | * we simply wait for the IO thread to end its work. | |
33388d43 | 62 | * |
63 | * Otherwise if there is to load a key without any IO thread operation | |
64 | * just started it is blocking-loaded in the lookup function. | |
16d77878 | 65 | * |
66 | * - What happens when an object is destroyed? | |
67 | * | |
68 | * If o->storage == REDIS_DS_MEMORY then we simply destory the object. | |
69 | * If o->storage == REDIS_DS_DIRTY we can still remove the object. It had | |
70 | * changes not flushed on disk, but is being removed so | |
71 | * who cares. | |
72 | * if o->storage == REDIS_DS_SAVING then the object is being saved so | |
73 | * it is impossible that its refcount == 1, must be at | |
74 | * least two. When the object is saved the storage will | |
75 | * be set back to DS_MEMORY. | |
76 | * | |
77 | * - What happens when keys are deleted? | |
78 | * | |
79 | * We simply schedule a key flush operation as usually, but when the | |
80 | * IO thread will be created the object pointer will be set to NULL | |
81 | * so the IO thread will know that the work to do is to delete the key | |
82 | * from the disk store. | |
83 | * | |
84 | * - What happens with MULTI/EXEC? | |
85 | * | |
86 | * Good question. | |
4ab98823 | 87 | * |
88 | * - If dsSet() fails on the write thread log the error and reschedule the | |
89 | * key for flush. | |
98a9abb6 | 90 | * |
91 | * - Check why INCR will not update the LRU info for the object. | |
8e6bb671 | 92 | * |
93 | * - Fix/Check the following race condition: a key gets a DEL so there is | |
94 | * a write operation scheduled against this key. Later the same key will | |
95 | * be the argument of a GET, but the write operation was still not | |
96 | * completed (to delete the file). If the GET will be for some reason | |
97 | * a blocking loading (via lookup) we can load the old value on memory. | |
98 | * | |
99 | * This problems can be fixed with negative caching. We can use it | |
100 | * to optimize the system, but also when a key is deleted we mark | |
101 | * it as non existing on disk as well (in a way that this cache | |
102 | * entry can't be evicted, setting time to 0), then we avoid looking at | |
103 | * the disk at all if the key can't be there. When an IO Job complete | |
104 | * a deletion, we set the time of the negative caching to a non zero | |
105 | * value so it will be evicted later. | |
106 | * | |
107 | * Are there other patterns like this where we load stale data? | |
d934e1e8 | 108 | * |
109 | * Also, make sure that key preloading is ONLY done for keys that are | |
110 | * not marked as cacheKeyDoesNotExist(), otherwise, again, we can load | |
111 | * data from disk that should instead be deleted. | |
33388d43 | 112 | */ |
113 | ||
e2641e09 | 114 | /* Virtual Memory is composed mainly of two subsystems: |
115 | * - Blocking Virutal Memory | |
116 | * - Threaded Virtual Memory I/O | |
117 | * The two parts are not fully decoupled, but functions are split among two | |
118 | * different sections of the source code (delimited by comments) in order to | |
119 | * make more clear what functionality is about the blocking VM and what about | |
120 | * the threaded (not blocking) VM. | |
121 | * | |
122 | * Redis VM design: | |
123 | * | |
124 | * Redis VM is a blocking VM (one that blocks reading swapped values from | |
125 | * disk into memory when a value swapped out is needed in memory) that is made | |
126 | * unblocking by trying to examine the command argument vector in order to | |
127 | * load in background values that will likely be needed in order to exec | |
128 | * the command. The command is executed only once all the relevant keys | |
129 | * are loaded into memory. | |
130 | * | |
131 | * This basically is almost as simple of a blocking VM, but almost as parallel | |
132 | * as a fully non-blocking VM. | |
133 | */ | |
134 | ||
f34a6cd8 | 135 | void spawnIOThread(void); |
136 | ||
e2641e09 | 137 | /* =================== Virtual Memory - Blocking Side ====================== */ |
138 | ||
f2da3a62 | 139 | void dsInit(void) { |
e2641e09 | 140 | int pipefds[2]; |
141 | size_t stacksize; | |
e2641e09 | 142 | |
f2da3a62 | 143 | zmalloc_enable_thread_safeness(); /* we need thread safe zmalloc() */ |
e2641e09 | 144 | |
67b0b41c | 145 | redisLog(REDIS_NOTICE,"Opening Disk Store: %s", server.ds_path); |
f2da3a62 | 146 | /* Open Disk Store */ |
147 | if (dsOpen() != REDIS_OK) { | |
148 | redisLog(REDIS_WARNING,"Fatal error opening disk store. Exiting."); | |
e2641e09 | 149 | exit(1); |
f2da3a62 | 150 | }; |
e2641e09 | 151 | |
f2da3a62 | 152 | /* Initialize threaded I/O for Object Cache */ |
e2641e09 | 153 | server.io_newjobs = listCreate(); |
154 | server.io_processing = listCreate(); | |
155 | server.io_processed = listCreate(); | |
156 | server.io_ready_clients = listCreate(); | |
157 | pthread_mutex_init(&server.io_mutex,NULL); | |
98a9abb6 | 158 | pthread_cond_init(&server.io_condvar,NULL); |
e2641e09 | 159 | server.io_active_threads = 0; |
160 | if (pipe(pipefds) == -1) { | |
f2da3a62 | 161 | redisLog(REDIS_WARNING,"Unable to intialized DS: pipe(2): %s. Exiting." |
e2641e09 | 162 | ,strerror(errno)); |
163 | exit(1); | |
164 | } | |
165 | server.io_ready_pipe_read = pipefds[0]; | |
166 | server.io_ready_pipe_write = pipefds[1]; | |
167 | redisAssert(anetNonBlock(NULL,server.io_ready_pipe_read) != ANET_ERR); | |
168 | /* LZF requires a lot of stack */ | |
169 | pthread_attr_init(&server.io_threads_attr); | |
170 | pthread_attr_getstacksize(&server.io_threads_attr, &stacksize); | |
556bdfba | 171 | |
172 | /* Solaris may report a stacksize of 0, let's set it to 1 otherwise | |
173 | * multiplying it by 2 in the while loop later will not really help ;) */ | |
174 | if (!stacksize) stacksize = 1; | |
175 | ||
e2641e09 | 176 | while (stacksize < REDIS_THREAD_STACK_SIZE) stacksize *= 2; |
177 | pthread_attr_setstacksize(&server.io_threads_attr, stacksize); | |
178 | /* Listen for events in the threaded I/O pipe */ | |
179 | if (aeCreateFileEvent(server.el, server.io_ready_pipe_read, AE_READABLE, | |
180 | vmThreadedIOCompletedJob, NULL) == AE_ERR) | |
181 | oom("creating file event"); | |
e2641e09 | 182 | |
f2da3a62 | 183 | /* Spawn our I/O thread */ |
184 | spawnIOThread(); | |
e2641e09 | 185 | } |
186 | ||
f2da3a62 | 187 | /* Compute how good candidate the specified object is for eviction. |
188 | * An higher number means a better candidate. */ | |
e2641e09 | 189 | double computeObjectSwappability(robj *o) { |
190 | /* actual age can be >= minage, but not < minage. As we use wrapping | |
191 | * 21 bit clocks with minutes resolution for the LRU. */ | |
f081eaf1 | 192 | return (double) estimateObjectIdleTime(o); |
e2641e09 | 193 | } |
194 | ||
f2da3a62 | 195 | /* Try to free one entry from the diskstore object cache */ |
196 | int cacheFreeOneEntry(void) { | |
e2641e09 | 197 | int j, i; |
198 | struct dictEntry *best = NULL; | |
199 | double best_swappability = 0; | |
200 | redisDb *best_db = NULL; | |
201 | robj *val; | |
202 | sds key; | |
203 | ||
204 | for (j = 0; j < server.dbnum; j++) { | |
205 | redisDb *db = server.db+j; | |
206 | /* Why maxtries is set to 100? | |
207 | * Because this way (usually) we'll find 1 object even if just 1% - 2% | |
208 | * are swappable objects */ | |
209 | int maxtries = 100; | |
210 | ||
211 | if (dictSize(db->dict) == 0) continue; | |
212 | for (i = 0; i < 5; i++) { | |
213 | dictEntry *de; | |
214 | double swappability; | |
215 | ||
216 | if (maxtries) maxtries--; | |
217 | de = dictGetRandomKey(db->dict); | |
218 | val = dictGetEntryVal(de); | |
219 | /* Only swap objects that are currently in memory. | |
220 | * | |
221 | * Also don't swap shared objects: not a good idea in general and | |
222 | * we need to ensure that the main thread does not touch the | |
223 | * object while the I/O thread is using it, but we can't | |
224 | * control other keys without adding additional mutex. */ | |
f2da3a62 | 225 | if (val->storage != REDIS_DS_MEMORY) { |
e2641e09 | 226 | if (maxtries) i--; /* don't count this try */ |
227 | continue; | |
228 | } | |
229 | swappability = computeObjectSwappability(val); | |
230 | if (!best || swappability > best_swappability) { | |
231 | best = de; | |
232 | best_swappability = swappability; | |
233 | best_db = db; | |
234 | } | |
235 | } | |
236 | } | |
f2da3a62 | 237 | if (best == NULL) { |
238 | /* FIXME: If there are objects marked as DS_DIRTY or DS_SAVING | |
239 | * let's wait for this objects to be clear and retry... | |
240 | * | |
241 | * Object cache vm limit is considered an hard limit. */ | |
242 | return REDIS_ERR; | |
243 | } | |
e2641e09 | 244 | key = dictGetEntryKey(best); |
245 | val = dictGetEntryVal(best); | |
246 | ||
f2da3a62 | 247 | redisLog(REDIS_DEBUG,"Key selected for cache eviction: %s swappability:%f", |
e2641e09 | 248 | key, best_swappability); |
249 | ||
f2da3a62 | 250 | /* Delete this key from memory */ |
251 | { | |
252 | robj *kobj = createStringObject(key,sdslen(key)); | |
253 | dbDelete(best_db,kobj); | |
254 | decrRefCount(kobj); | |
e2641e09 | 255 | } |
5ef64098 | 256 | return REDIS_OK; |
e2641e09 | 257 | } |
258 | ||
e2641e09 | 259 | /* Return true if it's safe to swap out objects in a given moment. |
260 | * Basically we don't want to swap objects out while there is a BGSAVE | |
261 | * or a BGAEOREWRITE running in backgroud. */ | |
f2da3a62 | 262 | int dsCanTouchDiskStore(void) { |
e2641e09 | 263 | return (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1); |
264 | } | |
265 | ||
d934e1e8 | 266 | /* ==================== Disk store negative caching ======================== |
267 | * | |
268 | * When disk store is enabled, we need negative caching, that is, to remember | |
269 | * keys that are for sure *not* on the disk key-value store. | |
270 | * | |
271 | * This is useful for two reasons: | |
272 | * | |
273 | * 1) Without negative caching cache misses will cost us a disk lookup, even | |
274 | * if the same non existing key is accessed again and again. We negative | |
275 | * caching we remember that the key is not on disk, so if it's not in memory | |
276 | * and we have a negative cache entry, we don't try a disk access at all. | |
277 | * | |
278 | * 2) Negative caching is the way to fix a specific race condition. For instance | |
279 | * think at the following sequence of commands: | |
280 | * | |
281 | * SET foo bar | |
282 | * DEL foo | |
283 | * GET foo | |
284 | * | |
285 | * After the SET, we'll mark the value as dirty, so it will be flushed | |
286 | * on disk at some time. Later the key is deleted, so will be removed | |
287 | * from memory. Another job will be created to remove the key from the disk | |
288 | * store, but the removal is not synchronous, so may happen later in time. | |
289 | * | |
290 | * Finally we have a GET foo operation. This operation may result in | |
291 | * reading back a value from disk that is not updated data, as the deletion | |
292 | * operaiton against the disk KV store was still not completed, so we | |
293 | * read old data. | |
294 | * | |
295 | * Remembering that the given key is deleted is important. We can discard this | |
296 | * information once the key was really removed from the disk. | |
297 | * | |
298 | * So actually there are two kind of negative caching entries: entries that | |
299 | * can be evicted when we need to reclaim memory, and entries that will | |
300 | * not be evicted, for all the time we need this information to be available. | |
301 | * | |
302 | * The API allows to create both kind of negative caching. */ | |
303 | ||
304 | int cacheKeyMayExist(redisDb *db, robj *key) { | |
305 | return dictFind(db->io_negcache,key) == NULL; | |
306 | } | |
307 | ||
308 | void cacheSetKeyMayExist(redisDb *db, robj *key) { | |
309 | dictDelete(db->io_negcache,key); | |
310 | } | |
311 | ||
312 | void cacheSetKeyDoesNotExist(redisDb *db, robj *key) { | |
313 | struct dictEntry *de; | |
314 | ||
315 | /* Don't overwrite negative cached entries with val set to 0, as this | |
316 | * entries were created with cacheSetKeyDoesNotExistRemember(). */ | |
317 | de = dictFind(db->io_negcache,key); | |
318 | if (de != NULL && dictGetEntryVal(de) == NULL) return; | |
319 | ||
320 | if (dictReplace(db->io_negcache,key,(void*)time(NULL))) { | |
321 | incrRefCount(key); | |
322 | } | |
323 | } | |
324 | ||
325 | void cacheSetKeyDoesNotExistRemember(redisDb *db, robj *key) { | |
326 | if (dictReplace(db->io_negcache,key,NULL)) { | |
327 | incrRefCount(key); | |
328 | } | |
329 | } | |
330 | ||
331 | /* ================== Disk store cache - Threaded I/O ====================== */ | |
e2641e09 | 332 | |
333 | void freeIOJob(iojob *j) { | |
e2641e09 | 334 | decrRefCount(j->key); |
5ef64098 | 335 | /* j->val can be NULL if the job is about deleting the key from disk. */ |
336 | if (j->val) decrRefCount(j->val); | |
e2641e09 | 337 | zfree(j); |
338 | } | |
339 | ||
340 | /* Every time a thread finished a Job, it writes a byte into the write side | |
341 | * of an unix pipe in order to "awake" the main thread, and this function | |
f34a6cd8 | 342 | * is called. */ |
e2641e09 | 343 | void vmThreadedIOCompletedJob(aeEventLoop *el, int fd, void *privdata, |
344 | int mask) | |
345 | { | |
346 | char buf[1]; | |
f34a6cd8 | 347 | int retval, processed = 0, toprocess = -1; |
e2641e09 | 348 | REDIS_NOTUSED(el); |
349 | REDIS_NOTUSED(mask); | |
350 | REDIS_NOTUSED(privdata); | |
351 | ||
352 | /* For every byte we read in the read side of the pipe, there is one | |
353 | * I/O job completed to process. */ | |
354 | while((retval = read(fd,buf,1)) == 1) { | |
355 | iojob *j; | |
356 | listNode *ln; | |
e2641e09 | 357 | |
358 | redisLog(REDIS_DEBUG,"Processing I/O completed job"); | |
359 | ||
360 | /* Get the processed element (the oldest one) */ | |
361 | lockThreadedIO(); | |
362 | redisAssert(listLength(server.io_processed) != 0); | |
363 | if (toprocess == -1) { | |
364 | toprocess = (listLength(server.io_processed)*REDIS_MAX_COMPLETED_JOBS_PROCESSED)/100; | |
365 | if (toprocess <= 0) toprocess = 1; | |
366 | } | |
367 | ln = listFirst(server.io_processed); | |
368 | j = ln->value; | |
369 | listDelNode(server.io_processed,ln); | |
370 | unlockThreadedIO(); | |
f34a6cd8 | 371 | |
e2641e09 | 372 | /* Post process it in the main thread, as there are things we |
373 | * can do just here to avoid race conditions and/or invasive locks */ | |
5ef64098 | 374 | redisLog(REDIS_DEBUG,"COMPLETED Job type %s, key: %s", |
375 | (j->type == REDIS_IOJOB_LOAD) ? "load" : "save", | |
376 | (unsigned char*)j->key->ptr); | |
e2641e09 | 377 | if (j->type == REDIS_IOJOB_LOAD) { |
5ef64098 | 378 | /* Create the key-value pair in the in-memory database */ |
4ab98823 | 379 | if (j->val != NULL) { |
ad01a255 | 380 | /* Note: the key may already be here if between the time |
381 | * this key loading was scheduled and now there was the | |
d934e1e8 | 382 | * need to blocking load the key for a key lookup. |
383 | * | |
384 | * Also we don't add a key that was deleted in the | |
385 | * meantime and should not be on disk either. */ | |
386 | if (cacheKeyMayExist(j->db,j->key) && | |
387 | dbAdd(j->db,j->key,j->val) == REDIS_OK) | |
388 | { | |
ad01a255 | 389 | incrRefCount(j->val); |
390 | if (j->expire != -1) setExpire(j->db,j->key,j->expire); | |
391 | } | |
4ab98823 | 392 | } else { |
393 | /* The key does not exist. Create a negative cache entry | |
394 | * for this key. */ | |
d934e1e8 | 395 | cacheSetKeyDoesNotExist(j->db,j->key); |
4ab98823 | 396 | } |
5ef64098 | 397 | /* Handle clients waiting for this key to be loaded. */ |
398 | handleClientsBlockedOnSwappedKey(j->db,j->key); | |
e2641e09 | 399 | freeIOJob(j); |
5f6e1183 | 400 | } else if (j->type == REDIS_IOJOB_SAVE) { |
31222292 | 401 | if (j->val) { |
402 | redisAssert(j->val->storage == REDIS_DS_SAVING); | |
403 | j->val->storage = REDIS_DS_MEMORY; | |
d934e1e8 | 404 | cacheSetKeyMayExist(j->db,j->key); |
405 | } else { | |
406 | /* Key deleted. Probably we have this key marked as | |
407 | * non existing, and impossible to evict, in our negative | |
408 | * cache entry. Add it as a normal negative cache entry. */ | |
409 | cacheSetKeyMayExist(j->db,j->key); | |
31222292 | 410 | } |
e2641e09 | 411 | freeIOJob(j); |
e2641e09 | 412 | } |
413 | processed++; | |
414 | if (processed == toprocess) return; | |
415 | } | |
416 | if (retval < 0 && errno != EAGAIN) { | |
417 | redisLog(REDIS_WARNING, | |
418 | "WARNING: read(2) error in vmThreadedIOCompletedJob() %s", | |
419 | strerror(errno)); | |
420 | } | |
421 | } | |
422 | ||
423 | void lockThreadedIO(void) { | |
424 | pthread_mutex_lock(&server.io_mutex); | |
425 | } | |
426 | ||
427 | void unlockThreadedIO(void) { | |
428 | pthread_mutex_unlock(&server.io_mutex); | |
429 | } | |
430 | ||
e2641e09 | 431 | void *IOThreadEntryPoint(void *arg) { |
432 | iojob *j; | |
433 | listNode *ln; | |
434 | REDIS_NOTUSED(arg); | |
435 | ||
436 | pthread_detach(pthread_self()); | |
98a9abb6 | 437 | lockThreadedIO(); |
e2641e09 | 438 | while(1) { |
439 | /* Get a new job to process */ | |
e2641e09 | 440 | if (listLength(server.io_newjobs) == 0) { |
a440ecf0 | 441 | /* Wait for more work to do */ |
442 | pthread_cond_wait(&server.io_condvar,&server.io_mutex); | |
1609a1c4 | 443 | continue; |
e2641e09 | 444 | } |
c4b64a13 | 445 | redisLog(REDIS_DEBUG,"%ld IO jobs to process", |
446 | listLength(server.io_newjobs)); | |
e2641e09 | 447 | ln = listFirst(server.io_newjobs); |
448 | j = ln->value; | |
449 | listDelNode(server.io_newjobs,ln); | |
450 | /* Add the job in the processing queue */ | |
e2641e09 | 451 | listAddNodeTail(server.io_processing,j); |
452 | ln = listLast(server.io_processing); /* We use ln later to remove it */ | |
453 | unlockThreadedIO(); | |
98a9abb6 | 454 | |
5ef64098 | 455 | redisLog(REDIS_DEBUG,"Thread %ld: new job type %s: %p about key '%s'", |
456 | (long) pthread_self(), | |
457 | (j->type == REDIS_IOJOB_LOAD) ? "load" : "save", | |
458 | (void*)j, (char*)j->key->ptr); | |
e2641e09 | 459 | |
460 | /* Process the Job */ | |
461 | if (j->type == REDIS_IOJOB_LOAD) { | |
4ab98823 | 462 | time_t expire; |
463 | ||
464 | j->val = dsGet(j->db,j->key,&expire); | |
465 | if (j->val) j->expire = expire; | |
5ef64098 | 466 | } else if (j->type == REDIS_IOJOB_SAVE) { |
31222292 | 467 | if (j->val) { |
468 | redisAssert(j->val->storage == REDIS_DS_SAVING); | |
5ef64098 | 469 | dsSet(j->db,j->key,j->val); |
31222292 | 470 | } else { |
5ef64098 | 471 | dsDel(j->db,j->key); |
31222292 | 472 | } |
e2641e09 | 473 | } |
474 | ||
475 | /* Done: insert the job into the processed queue */ | |
476 | redisLog(REDIS_DEBUG,"Thread %ld completed the job: %p (key %s)", | |
477 | (long) pthread_self(), (void*)j, (char*)j->key->ptr); | |
98a9abb6 | 478 | |
e2641e09 | 479 | lockThreadedIO(); |
480 | listDelNode(server.io_processing,ln); | |
481 | listAddNodeTail(server.io_processed,j); | |
e2641e09 | 482 | |
483 | /* Signal the main thread there is new stuff to process */ | |
484 | redisAssert(write(server.io_ready_pipe_write,"x",1) == 1); | |
485 | } | |
98a9abb6 | 486 | /* never reached, but that's the full pattern... */ |
487 | unlockThreadedIO(); | |
488 | return NULL; | |
e2641e09 | 489 | } |
490 | ||
491 | void spawnIOThread(void) { | |
492 | pthread_t thread; | |
493 | sigset_t mask, omask; | |
494 | int err; | |
495 | ||
496 | sigemptyset(&mask); | |
497 | sigaddset(&mask,SIGCHLD); | |
498 | sigaddset(&mask,SIGHUP); | |
499 | sigaddset(&mask,SIGPIPE); | |
500 | pthread_sigmask(SIG_SETMASK, &mask, &omask); | |
501 | while ((err = pthread_create(&thread,&server.io_threads_attr,IOThreadEntryPoint,NULL)) != 0) { | |
502 | redisLog(REDIS_WARNING,"Unable to spawn an I/O thread: %s", | |
503 | strerror(err)); | |
504 | usleep(1000000); | |
505 | } | |
506 | pthread_sigmask(SIG_SETMASK, &omask, NULL); | |
507 | server.io_active_threads++; | |
508 | } | |
509 | ||
8d51fb6a | 510 | /* Wait that all the pending IO Jobs are processed */ |
e2641e09 | 511 | void waitEmptyIOJobsQueue(void) { |
512 | while(1) { | |
513 | int io_processed_len; | |
514 | ||
515 | lockThreadedIO(); | |
516 | if (listLength(server.io_newjobs) == 0 && | |
8d51fb6a | 517 | listLength(server.io_processing) == 0) |
e2641e09 | 518 | { |
519 | unlockThreadedIO(); | |
520 | return; | |
521 | } | |
a440ecf0 | 522 | /* If there are new jobs we need to signal the thread to |
523 | * process the next one. */ | |
524 | redisLog(REDIS_DEBUG,"waitEmptyIOJobsQueue: new %d, processing %d", | |
525 | listLength(server.io_newjobs), | |
526 | listLength(server.io_processing)); | |
527 | /* | |
528 | if (listLength(server.io_newjobs)) { | |
529 | pthread_cond_signal(&server.io_condvar); | |
530 | } | |
531 | */ | |
e2641e09 | 532 | /* While waiting for empty jobs queue condition we post-process some |
533 | * finshed job, as I/O threads may be hanging trying to write against | |
534 | * the io_ready_pipe_write FD but there are so much pending jobs that | |
535 | * it's blocking. */ | |
536 | io_processed_len = listLength(server.io_processed); | |
537 | unlockThreadedIO(); | |
538 | if (io_processed_len) { | |
c1ae36ae | 539 | vmThreadedIOCompletedJob(NULL,server.io_ready_pipe_read, |
540 | (void*)0xdeadbeef,0); | |
e2641e09 | 541 | usleep(1000); /* 1 millisecond */ |
542 | } else { | |
543 | usleep(10000); /* 10 milliseconds */ | |
544 | } | |
545 | } | |
546 | } | |
547 | ||
8d51fb6a | 548 | /* Process all the IO Jobs already completed by threads but still waiting |
549 | * processing from the main thread. */ | |
550 | void processAllPendingIOJobs(void) { | |
551 | while(1) { | |
552 | int io_processed_len; | |
553 | ||
554 | lockThreadedIO(); | |
555 | io_processed_len = listLength(server.io_processed); | |
556 | unlockThreadedIO(); | |
557 | if (io_processed_len == 0) return; | |
558 | vmThreadedIOCompletedJob(NULL,server.io_ready_pipe_read, | |
559 | (void*)0xdeadbeef,0); | |
560 | } | |
561 | } | |
562 | ||
e2641e09 | 563 | /* This function must be called while with threaded IO locked */ |
564 | void queueIOJob(iojob *j) { | |
565 | redisLog(REDIS_DEBUG,"Queued IO Job %p type %d about key '%s'\n", | |
566 | (void*)j, j->type, (char*)j->key->ptr); | |
567 | listAddNodeTail(server.io_newjobs,j); | |
568 | if (server.io_active_threads < server.vm_max_threads) | |
569 | spawnIOThread(); | |
570 | } | |
571 | ||
5ef64098 | 572 | void dsCreateIOJob(int type, redisDb *db, robj *key, robj *val) { |
e2641e09 | 573 | iojob *j; |
574 | ||
575 | j = zmalloc(sizeof(*j)); | |
5ef64098 | 576 | j->type = type; |
e2641e09 | 577 | j->db = db; |
578 | j->key = key; | |
579 | incrRefCount(key); | |
5ef64098 | 580 | j->val = val; |
1609a1c4 | 581 | if (val) incrRefCount(val); |
e2641e09 | 582 | |
583 | lockThreadedIO(); | |
584 | queueIOJob(j); | |
98a9abb6 | 585 | pthread_cond_signal(&server.io_condvar); |
e2641e09 | 586 | unlockThreadedIO(); |
e2641e09 | 587 | } |
588 | ||
f63f0928 | 589 | void cacheScheduleForFlush(redisDb *db, robj *key) { |
590 | dirtykey *dk; | |
591 | dictEntry *de; | |
592 | ||
593 | de = dictFind(db->dict,key->ptr); | |
594 | if (de) { | |
595 | robj *val = dictGetEntryVal(de); | |
596 | if (val->storage == REDIS_DS_DIRTY) | |
597 | return; | |
598 | else | |
599 | val->storage = REDIS_DS_DIRTY; | |
600 | } | |
601 | ||
a440ecf0 | 602 | redisLog(REDIS_DEBUG,"Scheduling key %s for saving (%s)",key->ptr, |
603 | de ? "key exists" : "key does not exist"); | |
f63f0928 | 604 | dk = zmalloc(sizeof(*dk)); |
605 | dk->db = db; | |
606 | dk->key = key; | |
607 | incrRefCount(key); | |
608 | dk->ctime = time(NULL); | |
eea15afe | 609 | listAddNodeTail(server.cache_flush_queue, dk); |
f63f0928 | 610 | } |
611 | ||
612 | void cacheCron(void) { | |
613 | time_t now = time(NULL); | |
614 | listNode *ln; | |
c4b64a13 | 615 | int jobs, topush = 0; |
616 | ||
617 | /* Sync stuff on disk, but only if we have less than 100 IO jobs */ | |
618 | lockThreadedIO(); | |
619 | jobs = listLength(server.io_newjobs); | |
620 | unlockThreadedIO(); | |
621 | ||
622 | topush = 100-jobs; | |
623 | if (topush < 0) topush = 0; | |
f63f0928 | 624 | |
f63f0928 | 625 | while((ln = listFirst(server.cache_flush_queue)) != NULL) { |
626 | dirtykey *dk = ln->value; | |
627 | ||
c4b64a13 | 628 | if (!topush) break; |
629 | topush--; | |
630 | ||
f63f0928 | 631 | if ((now - dk->ctime) >= server.cache_flush_delay) { |
632 | struct dictEntry *de; | |
633 | robj *val; | |
634 | ||
ddbc81af | 635 | redisLog(REDIS_DEBUG,"Creating IO Job to save key %s",dk->key->ptr); |
636 | ||
eea15afe | 637 | /* Lookup the key, in order to put the current value in the IO |
a440ecf0 | 638 | * Job and mark it as DS_SAVING. |
eea15afe | 639 | * Otherwise if the key does not exists we schedule a disk store |
640 | * delete operation, setting the value to NULL. */ | |
f63f0928 | 641 | de = dictFind(dk->db->dict,dk->key->ptr); |
642 | if (de) { | |
643 | val = dictGetEntryVal(de); | |
644 | redisAssert(val->storage == REDIS_DS_DIRTY); | |
645 | val->storage = REDIS_DS_SAVING; | |
646 | } else { | |
647 | /* Setting the value to NULL tells the IO thread to delete | |
648 | * the key on disk. */ | |
649 | val = NULL; | |
650 | } | |
651 | dsCreateIOJob(REDIS_IOJOB_SAVE,dk->db,dk->key,val); | |
652 | listDelNode(server.cache_flush_queue,ln); | |
eea15afe | 653 | decrRefCount(dk->key); |
654 | zfree(dk); | |
f63f0928 | 655 | } else { |
656 | break; /* too early */ | |
657 | } | |
658 | } | |
659 | ||
660 | /* Reclaim memory from the object cache */ | |
661 | while (server.ds_enabled && zmalloc_used_memory() > | |
662 | server.cache_max_memory) | |
663 | { | |
664 | if (cacheFreeOneEntry() == REDIS_ERR) break; | |
665 | } | |
666 | } | |
667 | ||
e2641e09 | 668 | /* ============ Virtual Memory - Blocking clients on missing keys =========== */ |
669 | ||
670 | /* This function makes the clinet 'c' waiting for the key 'key' to be loaded. | |
5ef64098 | 671 | * If the key is already in memory we don't need to block, regardless |
672 | * of the storage of the value object for this key: | |
673 | * | |
674 | * - If it's REDIS_DS_MEMORY we have the key in memory. | |
675 | * - If it's REDIS_DS_DIRTY they key was modified, but still in memory. | |
676 | * - if it's REDIS_DS_SAVING the key is being saved by an IO Job. When | |
677 | * the client will lookup the key it will block if the key is still | |
678 | * in this stage but it's more or less the best we can do. | |
f63f0928 | 679 | * |
5ef64098 | 680 | * FIXME: we should try if it's actually better to suspend the client |
681 | * accessing an object that is being saved, and awake it only when | |
682 | * the saving was completed. | |
683 | * | |
684 | * Otherwise if the key is not in memory, we block the client and start | |
685 | * an IO Job to load it: | |
686 | * | |
687 | * the key is added to the io_keys list in the client structure, and also | |
e2641e09 | 688 | * in the hash table mapping swapped keys to waiting clients, that is, |
689 | * server.io_waited_keys. */ | |
690 | int waitForSwappedKey(redisClient *c, robj *key) { | |
691 | struct dictEntry *de; | |
e2641e09 | 692 | list *l; |
693 | ||
5ef64098 | 694 | /* Return ASAP if the key is in memory */ |
e2641e09 | 695 | de = dictFind(c->db->dict,key->ptr); |
5ef64098 | 696 | if (de != NULL) return 0; |
e2641e09 | 697 | |
d934e1e8 | 698 | /* Don't wait for keys we are sure are not on disk either */ |
699 | if (!cacheKeyMayExist(c->db,key)) return 0; | |
700 | ||
e2641e09 | 701 | /* Add the key to the list of keys this client is waiting for. |
702 | * This maps clients to keys they are waiting for. */ | |
703 | listAddNodeTail(c->io_keys,key); | |
704 | incrRefCount(key); | |
705 | ||
706 | /* Add the client to the swapped keys => clients waiting map. */ | |
707 | de = dictFind(c->db->io_keys,key); | |
708 | if (de == NULL) { | |
709 | int retval; | |
710 | ||
711 | /* For every key we take a list of clients blocked for it */ | |
712 | l = listCreate(); | |
713 | retval = dictAdd(c->db->io_keys,key,l); | |
714 | incrRefCount(key); | |
715 | redisAssert(retval == DICT_OK); | |
716 | } else { | |
717 | l = dictGetEntryVal(de); | |
718 | } | |
719 | listAddNodeTail(l,c); | |
720 | ||
721 | /* Are we already loading the key from disk? If not create a job */ | |
5ef64098 | 722 | if (de == NULL) |
723 | dsCreateIOJob(REDIS_IOJOB_LOAD,c->db,key,NULL); | |
e2641e09 | 724 | return 1; |
725 | } | |
726 | ||
727 | /* Preload keys for any command with first, last and step values for | |
728 | * the command keys prototype, as defined in the command table. */ | |
729 | void waitForMultipleSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) { | |
730 | int j, last; | |
731 | if (cmd->vm_firstkey == 0) return; | |
732 | last = cmd->vm_lastkey; | |
733 | if (last < 0) last = argc+last; | |
734 | for (j = cmd->vm_firstkey; j <= last; j += cmd->vm_keystep) { | |
735 | redisAssert(j < argc); | |
736 | waitForSwappedKey(c,argv[j]); | |
737 | } | |
738 | } | |
739 | ||
740 | /* Preload keys needed for the ZUNIONSTORE and ZINTERSTORE commands. | |
741 | * Note that the number of keys to preload is user-defined, so we need to | |
742 | * apply a sanity check against argc. */ | |
743 | void zunionInterBlockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) { | |
744 | int i, num; | |
745 | REDIS_NOTUSED(cmd); | |
746 | ||
747 | num = atoi(argv[2]->ptr); | |
748 | if (num > (argc-3)) return; | |
749 | for (i = 0; i < num; i++) { | |
750 | waitForSwappedKey(c,argv[3+i]); | |
751 | } | |
752 | } | |
753 | ||
754 | /* Preload keys needed to execute the entire MULTI/EXEC block. | |
755 | * | |
756 | * This function is called by blockClientOnSwappedKeys when EXEC is issued, | |
757 | * and will block the client when any command requires a swapped out value. */ | |
758 | void execBlockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) { | |
759 | int i, margc; | |
760 | struct redisCommand *mcmd; | |
761 | robj **margv; | |
762 | REDIS_NOTUSED(cmd); | |
763 | REDIS_NOTUSED(argc); | |
764 | REDIS_NOTUSED(argv); | |
765 | ||
766 | if (!(c->flags & REDIS_MULTI)) return; | |
767 | for (i = 0; i < c->mstate.count; i++) { | |
768 | mcmd = c->mstate.commands[i].cmd; | |
769 | margc = c->mstate.commands[i].argc; | |
770 | margv = c->mstate.commands[i].argv; | |
771 | ||
772 | if (mcmd->vm_preload_proc != NULL) { | |
773 | mcmd->vm_preload_proc(c,mcmd,margc,margv); | |
774 | } else { | |
775 | waitForMultipleSwappedKeys(c,mcmd,margc,margv); | |
776 | } | |
777 | } | |
778 | } | |
779 | ||
780 | /* Is this client attempting to run a command against swapped keys? | |
781 | * If so, block it ASAP, load the keys in background, then resume it. | |
782 | * | |
783 | * The important idea about this function is that it can fail! If keys will | |
784 | * still be swapped when the client is resumed, this key lookups will | |
785 | * just block loading keys from disk. In practical terms this should only | |
786 | * happen with SORT BY command or if there is a bug in this function. | |
787 | * | |
788 | * Return 1 if the client is marked as blocked, 0 if the client can | |
789 | * continue as the keys it is going to access appear to be in memory. */ | |
790 | int blockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd) { | |
791 | if (cmd->vm_preload_proc != NULL) { | |
792 | cmd->vm_preload_proc(c,cmd,c->argc,c->argv); | |
793 | } else { | |
794 | waitForMultipleSwappedKeys(c,cmd,c->argc,c->argv); | |
795 | } | |
796 | ||
797 | /* If the client was blocked for at least one key, mark it as blocked. */ | |
798 | if (listLength(c->io_keys)) { | |
799 | c->flags |= REDIS_IO_WAIT; | |
800 | aeDeleteFileEvent(server.el,c->fd,AE_READABLE); | |
5ef64098 | 801 | server.cache_blocked_clients++; |
e2641e09 | 802 | return 1; |
803 | } else { | |
804 | return 0; | |
805 | } | |
806 | } | |
807 | ||
808 | /* Remove the 'key' from the list of blocked keys for a given client. | |
809 | * | |
810 | * The function returns 1 when there are no longer blocking keys after | |
811 | * the current one was removed (and the client can be unblocked). */ | |
812 | int dontWaitForSwappedKey(redisClient *c, robj *key) { | |
813 | list *l; | |
814 | listNode *ln; | |
815 | listIter li; | |
816 | struct dictEntry *de; | |
817 | ||
c8a10631 PN |
818 | /* The key object might be destroyed when deleted from the c->io_keys |
819 | * list (and the "key" argument is physically the same object as the | |
820 | * object inside the list), so we need to protect it. */ | |
821 | incrRefCount(key); | |
822 | ||
e2641e09 | 823 | /* Remove the key from the list of keys this client is waiting for. */ |
824 | listRewind(c->io_keys,&li); | |
825 | while ((ln = listNext(&li)) != NULL) { | |
826 | if (equalStringObjects(ln->value,key)) { | |
827 | listDelNode(c->io_keys,ln); | |
828 | break; | |
829 | } | |
830 | } | |
831 | redisAssert(ln != NULL); | |
832 | ||
833 | /* Remove the client form the key => waiting clients map. */ | |
834 | de = dictFind(c->db->io_keys,key); | |
835 | redisAssert(de != NULL); | |
836 | l = dictGetEntryVal(de); | |
837 | ln = listSearchKey(l,c); | |
838 | redisAssert(ln != NULL); | |
839 | listDelNode(l,ln); | |
840 | if (listLength(l) == 0) | |
841 | dictDelete(c->db->io_keys,key); | |
842 | ||
c8a10631 | 843 | decrRefCount(key); |
e2641e09 | 844 | return listLength(c->io_keys) == 0; |
845 | } | |
846 | ||
847 | /* Every time we now a key was loaded back in memory, we handle clients | |
848 | * waiting for this key if any. */ | |
849 | void handleClientsBlockedOnSwappedKey(redisDb *db, robj *key) { | |
850 | struct dictEntry *de; | |
851 | list *l; | |
852 | listNode *ln; | |
853 | int len; | |
854 | ||
855 | de = dictFind(db->io_keys,key); | |
856 | if (!de) return; | |
857 | ||
858 | l = dictGetEntryVal(de); | |
859 | len = listLength(l); | |
860 | /* Note: we can't use something like while(listLength(l)) as the list | |
861 | * can be freed by the calling function when we remove the last element. */ | |
862 | while (len--) { | |
863 | ln = listFirst(l); | |
864 | redisClient *c = ln->value; | |
865 | ||
866 | if (dontWaitForSwappedKey(c,key)) { | |
867 | /* Put the client in the list of clients ready to go as we | |
868 | * loaded all the keys about it. */ | |
869 | listAddNodeTail(server.io_ready_clients,c); | |
870 | } | |
871 | } | |
872 | } |