]> git.saurik.com Git - redis.git/blob - src/vm.c
support for write operations against expiring keys, by master-controlled expiring...
[redis.git] / src / vm.c
1 #include "redis.h"
2
3 #include <fcntl.h>
4 #include <pthread.h>
5 #include <math.h>
6 #include <signal.h>
7
8 /* Virtual Memory is composed mainly of two subsystems:
9 * - Blocking Virutal Memory
10 * - Threaded Virtual Memory I/O
11 * The two parts are not fully decoupled, but functions are split among two
12 * different sections of the source code (delimited by comments) in order to
13 * make more clear what functionality is about the blocking VM and what about
14 * the threaded (not blocking) VM.
15 *
16 * Redis VM design:
17 *
18 * Redis VM is a blocking VM (one that blocks reading swapped values from
19 * disk into memory when a value swapped out is needed in memory) that is made
20 * unblocking by trying to examine the command argument vector in order to
21 * load in background values that will likely be needed in order to exec
22 * the command. The command is executed only once all the relevant keys
23 * are loaded into memory.
24 *
25 * This basically is almost as simple of a blocking VM, but almost as parallel
26 * as a fully non-blocking VM.
27 */
28
29 /* =================== Virtual Memory - Blocking Side ====================== */
30
31 /* Create a VM pointer object. This kind of objects are used in place of
32 * values in the key -> value hash table, for swapped out objects. */
33 vmpointer *createVmPointer(int vtype) {
34 vmpointer *vp = zmalloc(sizeof(vmpointer));
35
36 vp->type = REDIS_VMPOINTER;
37 vp->storage = REDIS_VM_SWAPPED;
38 vp->vtype = vtype;
39 return vp;
40 }
41
42 void vmInit(void) {
43 off_t totsize;
44 int pipefds[2];
45 size_t stacksize;
46 struct flock fl;
47
48 if (server.vm_max_threads != 0)
49 zmalloc_enable_thread_safeness(); /* we need thread safe zmalloc() */
50
51 redisLog(REDIS_NOTICE,"Using '%s' as swap file",server.vm_swap_file);
52 /* Try to open the old swap file, otherwise create it */
53 if ((server.vm_fp = fopen(server.vm_swap_file,"r+b")) == NULL) {
54 server.vm_fp = fopen(server.vm_swap_file,"w+b");
55 }
56 if (server.vm_fp == NULL) {
57 redisLog(REDIS_WARNING,
58 "Can't open the swap file: %s. Exiting.",
59 strerror(errno));
60 exit(1);
61 }
62 server.vm_fd = fileno(server.vm_fp);
63 /* Lock the swap file for writing, this is useful in order to avoid
64 * another instance to use the same swap file for a config error. */
65 fl.l_type = F_WRLCK;
66 fl.l_whence = SEEK_SET;
67 fl.l_start = fl.l_len = 0;
68 if (fcntl(server.vm_fd,F_SETLK,&fl) == -1) {
69 redisLog(REDIS_WARNING,
70 "Can't lock the swap file at '%s': %s. Make sure it is not used by another Redis instance.", server.vm_swap_file, strerror(errno));
71 exit(1);
72 }
73 /* Initialize */
74 server.vm_next_page = 0;
75 server.vm_near_pages = 0;
76 server.vm_stats_used_pages = 0;
77 server.vm_stats_swapped_objects = 0;
78 server.vm_stats_swapouts = 0;
79 server.vm_stats_swapins = 0;
80 totsize = server.vm_pages*server.vm_page_size;
81 redisLog(REDIS_NOTICE,"Allocating %lld bytes of swap file",totsize);
82 if (ftruncate(server.vm_fd,totsize) == -1) {
83 redisLog(REDIS_WARNING,"Can't ftruncate swap file: %s. Exiting.",
84 strerror(errno));
85 exit(1);
86 } else {
87 redisLog(REDIS_NOTICE,"Swap file allocated with success");
88 }
89 server.vm_bitmap = zcalloc((server.vm_pages+7)/8);
90 redisLog(REDIS_VERBOSE,"Allocated %lld bytes page table for %lld pages",
91 (long long) (server.vm_pages+7)/8, server.vm_pages);
92
93 /* Initialize threaded I/O (used by Virtual Memory) */
94 server.io_newjobs = listCreate();
95 server.io_processing = listCreate();
96 server.io_processed = listCreate();
97 server.io_ready_clients = listCreate();
98 pthread_mutex_init(&server.io_mutex,NULL);
99 pthread_mutex_init(&server.obj_freelist_mutex,NULL);
100 pthread_mutex_init(&server.io_swapfile_mutex,NULL);
101 server.io_active_threads = 0;
102 if (pipe(pipefds) == -1) {
103 redisLog(REDIS_WARNING,"Unable to intialized VM: pipe(2): %s. Exiting."
104 ,strerror(errno));
105 exit(1);
106 }
107 server.io_ready_pipe_read = pipefds[0];
108 server.io_ready_pipe_write = pipefds[1];
109 redisAssert(anetNonBlock(NULL,server.io_ready_pipe_read) != ANET_ERR);
110 /* LZF requires a lot of stack */
111 pthread_attr_init(&server.io_threads_attr);
112 pthread_attr_getstacksize(&server.io_threads_attr, &stacksize);
113 while (stacksize < REDIS_THREAD_STACK_SIZE) stacksize *= 2;
114 pthread_attr_setstacksize(&server.io_threads_attr, stacksize);
115 /* Listen for events in the threaded I/O pipe */
116 if (aeCreateFileEvent(server.el, server.io_ready_pipe_read, AE_READABLE,
117 vmThreadedIOCompletedJob, NULL) == AE_ERR)
118 oom("creating file event");
119 }
120
121 /* Mark the page as used */
122 void vmMarkPageUsed(off_t page) {
123 off_t byte = page/8;
124 int bit = page&7;
125 redisAssert(vmFreePage(page) == 1);
126 server.vm_bitmap[byte] |= 1<<bit;
127 }
128
129 /* Mark N contiguous pages as used, with 'page' being the first. */
130 void vmMarkPagesUsed(off_t page, off_t count) {
131 off_t j;
132
133 for (j = 0; j < count; j++)
134 vmMarkPageUsed(page+j);
135 server.vm_stats_used_pages += count;
136 redisLog(REDIS_DEBUG,"Mark USED pages: %lld pages at %lld\n",
137 (long long)count, (long long)page);
138 }
139
140 /* Mark the page as free */
141 void vmMarkPageFree(off_t page) {
142 off_t byte = page/8;
143 int bit = page&7;
144 redisAssert(vmFreePage(page) == 0);
145 server.vm_bitmap[byte] &= ~(1<<bit);
146 }
147
148 /* Mark N contiguous pages as free, with 'page' being the first. */
149 void vmMarkPagesFree(off_t page, off_t count) {
150 off_t j;
151
152 for (j = 0; j < count; j++)
153 vmMarkPageFree(page+j);
154 server.vm_stats_used_pages -= count;
155 redisLog(REDIS_DEBUG,"Mark FREE pages: %lld pages at %lld\n",
156 (long long)count, (long long)page);
157 }
158
159 /* Test if the page is free */
160 int vmFreePage(off_t page) {
161 off_t byte = page/8;
162 int bit = page&7;
163 return (server.vm_bitmap[byte] & (1<<bit)) == 0;
164 }
165
166 /* Find N contiguous free pages storing the first page of the cluster in *first.
167 * Returns REDIS_OK if it was able to find N contiguous pages, otherwise
168 * REDIS_ERR is returned.
169 *
170 * This function uses a simple algorithm: we try to allocate
171 * REDIS_VM_MAX_NEAR_PAGES sequentially, when we reach this limit we start
172 * again from the start of the swap file searching for free spaces.
173 *
174 * If it looks pretty clear that there are no free pages near our offset
175 * we try to find less populated places doing a forward jump of
176 * REDIS_VM_MAX_RANDOM_JUMP, then we start scanning again a few pages
177 * without hurry, and then we jump again and so forth...
178 *
179 * This function can be improved using a free list to avoid to guess
180 * too much, since we could collect data about freed pages.
181 *
182 * note: I implemented this function just after watching an episode of
183 * Battlestar Galactica, where the hybrid was continuing to say "JUMP!"
184 */
185 int vmFindContiguousPages(off_t *first, off_t n) {
186 off_t base, offset = 0, since_jump = 0, numfree = 0;
187
188 if (server.vm_near_pages == REDIS_VM_MAX_NEAR_PAGES) {
189 server.vm_near_pages = 0;
190 server.vm_next_page = 0;
191 }
192 server.vm_near_pages++; /* Yet another try for pages near to the old ones */
193 base = server.vm_next_page;
194
195 while(offset < server.vm_pages) {
196 off_t this = base+offset;
197
198 /* If we overflow, restart from page zero */
199 if (this >= server.vm_pages) {
200 this -= server.vm_pages;
201 if (this == 0) {
202 /* Just overflowed, what we found on tail is no longer
203 * interesting, as it's no longer contiguous. */
204 numfree = 0;
205 }
206 }
207 if (vmFreePage(this)) {
208 /* This is a free page */
209 numfree++;
210 /* Already got N free pages? Return to the caller, with success */
211 if (numfree == n) {
212 *first = this-(n-1);
213 server.vm_next_page = this+1;
214 redisLog(REDIS_DEBUG, "FOUND CONTIGUOUS PAGES: %lld pages at %lld\n", (long long) n, (long long) *first);
215 return REDIS_OK;
216 }
217 } else {
218 /* The current one is not a free page */
219 numfree = 0;
220 }
221
222 /* Fast-forward if the current page is not free and we already
223 * searched enough near this place. */
224 since_jump++;
225 if (!numfree && since_jump >= REDIS_VM_MAX_RANDOM_JUMP/4) {
226 offset += random() % REDIS_VM_MAX_RANDOM_JUMP;
227 since_jump = 0;
228 /* Note that even if we rewind after the jump, we are don't need
229 * to make sure numfree is set to zero as we only jump *if* it
230 * is set to zero. */
231 } else {
232 /* Otherwise just check the next page */
233 offset++;
234 }
235 }
236 return REDIS_ERR;
237 }
238
239 /* Write the specified object at the specified page of the swap file */
240 int vmWriteObjectOnSwap(robj *o, off_t page) {
241 if (server.vm_enabled) pthread_mutex_lock(&server.io_swapfile_mutex);
242 if (fseeko(server.vm_fp,page*server.vm_page_size,SEEK_SET) == -1) {
243 if (server.vm_enabled) pthread_mutex_unlock(&server.io_swapfile_mutex);
244 redisLog(REDIS_WARNING,
245 "Critical VM problem in vmWriteObjectOnSwap(): can't seek: %s",
246 strerror(errno));
247 return REDIS_ERR;
248 }
249 rdbSaveObject(server.vm_fp,o);
250 fflush(server.vm_fp);
251 if (server.vm_enabled) pthread_mutex_unlock(&server.io_swapfile_mutex);
252 return REDIS_OK;
253 }
254
255 /* Transfers the 'val' object to disk. Store all the information
256 * a 'vmpointer' object containing all the information needed to load the
257 * object back later is returned.
258 *
259 * If we can't find enough contiguous empty pages to swap the object on disk
260 * NULL is returned. */
261 vmpointer *vmSwapObjectBlocking(robj *val) {
262 off_t pages = rdbSavedObjectPages(val,NULL);
263 off_t page;
264 vmpointer *vp;
265
266 redisAssert(val->storage == REDIS_VM_MEMORY);
267 redisAssert(val->refcount == 1);
268 if (vmFindContiguousPages(&page,pages) == REDIS_ERR) return NULL;
269 if (vmWriteObjectOnSwap(val,page) == REDIS_ERR) return NULL;
270
271 vp = createVmPointer(val->type);
272 vp->page = page;
273 vp->usedpages = pages;
274 decrRefCount(val); /* Deallocate the object from memory. */
275 vmMarkPagesUsed(page,pages);
276 redisLog(REDIS_DEBUG,"VM: object %p swapped out at %lld (%lld pages)",
277 (void*) val,
278 (unsigned long long) page, (unsigned long long) pages);
279 server.vm_stats_swapped_objects++;
280 server.vm_stats_swapouts++;
281 return vp;
282 }
283
284 robj *vmReadObjectFromSwap(off_t page, int type) {
285 robj *o;
286
287 if (server.vm_enabled) pthread_mutex_lock(&server.io_swapfile_mutex);
288 if (fseeko(server.vm_fp,page*server.vm_page_size,SEEK_SET) == -1) {
289 redisLog(REDIS_WARNING,
290 "Unrecoverable VM problem in vmReadObjectFromSwap(): can't seek: %s",
291 strerror(errno));
292 _exit(1);
293 }
294 o = rdbLoadObject(type,server.vm_fp);
295 if (o == NULL) {
296 redisLog(REDIS_WARNING, "Unrecoverable VM problem in vmReadObjectFromSwap(): can't load object from swap file: %s", strerror(errno));
297 _exit(1);
298 }
299 if (server.vm_enabled) pthread_mutex_unlock(&server.io_swapfile_mutex);
300 return o;
301 }
302
303 /* Load the specified object from swap to memory.
304 * The newly allocated object is returned.
305 *
306 * If preview is true the unserialized object is returned to the caller but
307 * the pages are not marked as freed, nor the vp object is freed. */
308 robj *vmGenericLoadObject(vmpointer *vp, int preview) {
309 robj *val;
310
311 redisAssert(vp->type == REDIS_VMPOINTER &&
312 (vp->storage == REDIS_VM_SWAPPED || vp->storage == REDIS_VM_LOADING));
313 val = vmReadObjectFromSwap(vp->page,vp->vtype);
314 if (!preview) {
315 redisLog(REDIS_DEBUG, "VM: object %p loaded from disk", (void*)vp);
316 vmMarkPagesFree(vp->page,vp->usedpages);
317 zfree(vp);
318 server.vm_stats_swapped_objects--;
319 } else {
320 redisLog(REDIS_DEBUG, "VM: object %p previewed from disk", (void*)vp);
321 }
322 server.vm_stats_swapins++;
323 return val;
324 }
325
326 /* Plain object loading, from swap to memory.
327 *
328 * 'o' is actually a redisVmPointer structure that will be freed by the call.
329 * The return value is the loaded object. */
330 robj *vmLoadObject(robj *o) {
331 /* If we are loading the object in background, stop it, we
332 * need to load this object synchronously ASAP. */
333 if (o->storage == REDIS_VM_LOADING)
334 vmCancelThreadedIOJob(o);
335 return vmGenericLoadObject((vmpointer*)o,0);
336 }
337
338 /* Just load the value on disk, without to modify the key.
339 * This is useful when we want to perform some operation on the value
340 * without to really bring it from swap to memory, like while saving the
341 * dataset or rewriting the append only log. */
342 robj *vmPreviewObject(robj *o) {
343 return vmGenericLoadObject((vmpointer*)o,1);
344 }
345
346 /* How a good candidate is this object for swapping?
347 * The better candidate it is, the greater the returned value.
348 *
349 * Currently we try to perform a fast estimation of the object size in
350 * memory, and combine it with aging informations.
351 *
352 * Basically swappability = idle-time * log(estimated size)
353 *
354 * Bigger objects are preferred over smaller objects, but not
355 * proportionally, this is why we use the logarithm. This algorithm is
356 * just a first try and will probably be tuned later. */
357 double computeObjectSwappability(robj *o) {
358 /* actual age can be >= minage, but not < minage. As we use wrapping
359 * 21 bit clocks with minutes resolution for the LRU. */
360 time_t minage = abs(server.lruclock - o->lru);
361 long asize = 0, elesize;
362 robj *ele;
363 list *l;
364 listNode *ln;
365 dict *d;
366 struct dictEntry *de;
367 int z;
368
369 if (minage <= 0) return 0;
370 switch(o->type) {
371 case REDIS_STRING:
372 if (o->encoding != REDIS_ENCODING_RAW) {
373 asize = sizeof(*o);
374 } else {
375 asize = sdslen(o->ptr)+sizeof(*o)+sizeof(long)*2;
376 }
377 break;
378 case REDIS_LIST:
379 if (o->encoding == REDIS_ENCODING_ZIPLIST) {
380 asize = sizeof(*o)+ziplistSize(o->ptr);
381 } else {
382 l = o->ptr;
383 ln = listFirst(l);
384 asize = sizeof(list);
385 if (ln) {
386 ele = ln->value;
387 elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
388 (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
389 asize += (sizeof(listNode)+elesize)*listLength(l);
390 }
391 }
392 break;
393 case REDIS_SET:
394 case REDIS_ZSET:
395 z = (o->type == REDIS_ZSET);
396 d = z ? ((zset*)o->ptr)->dict : o->ptr;
397
398 asize = sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d));
399 if (z) asize += sizeof(zset)-sizeof(dict);
400 if (dictSize(d)) {
401 de = dictGetRandomKey(d);
402 ele = dictGetEntryKey(de);
403 elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
404 (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
405 asize += (sizeof(struct dictEntry)+elesize)*dictSize(d);
406 if (z) asize += sizeof(zskiplistNode)*dictSize(d);
407 }
408 break;
409 case REDIS_HASH:
410 if (o->encoding == REDIS_ENCODING_ZIPMAP) {
411 unsigned char *p = zipmapRewind((unsigned char*)o->ptr);
412 unsigned int len = zipmapLen((unsigned char*)o->ptr);
413 unsigned int klen, vlen;
414 unsigned char *key, *val;
415
416 if ((p = zipmapNext(p,&key,&klen,&val,&vlen)) == NULL) {
417 klen = 0;
418 vlen = 0;
419 }
420 asize = len*(klen+vlen+3);
421 } else if (o->encoding == REDIS_ENCODING_HT) {
422 d = o->ptr;
423 asize = sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d));
424 if (dictSize(d)) {
425 de = dictGetRandomKey(d);
426 ele = dictGetEntryKey(de);
427 elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
428 (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
429 ele = dictGetEntryVal(de);
430 elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
431 (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
432 asize += (sizeof(struct dictEntry)+elesize)*dictSize(d);
433 }
434 }
435 break;
436 }
437 return (double)minage*log(1+asize);
438 }
439
440 /* Try to swap an object that's a good candidate for swapping.
441 * Returns REDIS_OK if the object was swapped, REDIS_ERR if it's not possible
442 * to swap any object at all.
443 *
444 * If 'usethreaded' is true, Redis will try to swap the object in background
445 * using I/O threads. */
446 int vmSwapOneObject(int usethreads) {
447 int j, i;
448 struct dictEntry *best = NULL;
449 double best_swappability = 0;
450 redisDb *best_db = NULL;
451 robj *val;
452 sds key;
453
454 for (j = 0; j < server.dbnum; j++) {
455 redisDb *db = server.db+j;
456 /* Why maxtries is set to 100?
457 * Because this way (usually) we'll find 1 object even if just 1% - 2%
458 * are swappable objects */
459 int maxtries = 100;
460
461 if (dictSize(db->dict) == 0) continue;
462 for (i = 0; i < 5; i++) {
463 dictEntry *de;
464 double swappability;
465
466 if (maxtries) maxtries--;
467 de = dictGetRandomKey(db->dict);
468 val = dictGetEntryVal(de);
469 /* Only swap objects that are currently in memory.
470 *
471 * Also don't swap shared objects: not a good idea in general and
472 * we need to ensure that the main thread does not touch the
473 * object while the I/O thread is using it, but we can't
474 * control other keys without adding additional mutex. */
475 if (val->storage != REDIS_VM_MEMORY || val->refcount != 1) {
476 if (maxtries) i--; /* don't count this try */
477 continue;
478 }
479 swappability = computeObjectSwappability(val);
480 if (!best || swappability > best_swappability) {
481 best = de;
482 best_swappability = swappability;
483 best_db = db;
484 }
485 }
486 }
487 if (best == NULL) return REDIS_ERR;
488 key = dictGetEntryKey(best);
489 val = dictGetEntryVal(best);
490
491 redisLog(REDIS_DEBUG,"Key with best swappability: %s, %f",
492 key, best_swappability);
493
494 /* Swap it */
495 if (usethreads) {
496 robj *keyobj = createStringObject(key,sdslen(key));
497 vmSwapObjectThreaded(keyobj,val,best_db);
498 decrRefCount(keyobj);
499 return REDIS_OK;
500 } else {
501 vmpointer *vp;
502
503 if ((vp = vmSwapObjectBlocking(val)) != NULL) {
504 dictGetEntryVal(best) = vp;
505 return REDIS_OK;
506 } else {
507 return REDIS_ERR;
508 }
509 }
510 }
511
512 int vmSwapOneObjectBlocking() {
513 return vmSwapOneObject(0);
514 }
515
516 int vmSwapOneObjectThreaded() {
517 return vmSwapOneObject(1);
518 }
519
520 /* Return true if it's safe to swap out objects in a given moment.
521 * Basically we don't want to swap objects out while there is a BGSAVE
522 * or a BGAEOREWRITE running in backgroud. */
523 int vmCanSwapOut(void) {
524 return (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1);
525 }
526
527 /* =================== Virtual Memory - Threaded I/O ======================= */
528
529 void freeIOJob(iojob *j) {
530 if ((j->type == REDIS_IOJOB_PREPARE_SWAP ||
531 j->type == REDIS_IOJOB_DO_SWAP ||
532 j->type == REDIS_IOJOB_LOAD) && j->val != NULL)
533 {
534 /* we fix the storage type, otherwise decrRefCount() will try to
535 * kill the I/O thread Job (that does no longer exists). */
536 if (j->val->storage == REDIS_VM_SWAPPING)
537 j->val->storage = REDIS_VM_MEMORY;
538 decrRefCount(j->val);
539 }
540 decrRefCount(j->key);
541 zfree(j);
542 }
543
544 /* Every time a thread finished a Job, it writes a byte into the write side
545 * of an unix pipe in order to "awake" the main thread, and this function
546 * is called. */
547 void vmThreadedIOCompletedJob(aeEventLoop *el, int fd, void *privdata,
548 int mask)
549 {
550 char buf[1];
551 int retval, processed = 0, toprocess = -1, trytoswap = 1;
552 REDIS_NOTUSED(el);
553 REDIS_NOTUSED(mask);
554 REDIS_NOTUSED(privdata);
555
556 /* For every byte we read in the read side of the pipe, there is one
557 * I/O job completed to process. */
558 while((retval = read(fd,buf,1)) == 1) {
559 iojob *j;
560 listNode *ln;
561 struct dictEntry *de;
562
563 redisLog(REDIS_DEBUG,"Processing I/O completed job");
564
565 /* Get the processed element (the oldest one) */
566 lockThreadedIO();
567 redisAssert(listLength(server.io_processed) != 0);
568 if (toprocess == -1) {
569 toprocess = (listLength(server.io_processed)*REDIS_MAX_COMPLETED_JOBS_PROCESSED)/100;
570 if (toprocess <= 0) toprocess = 1;
571 }
572 ln = listFirst(server.io_processed);
573 j = ln->value;
574 listDelNode(server.io_processed,ln);
575 unlockThreadedIO();
576 /* If this job is marked as canceled, just ignore it */
577 if (j->canceled) {
578 freeIOJob(j);
579 continue;
580 }
581 /* Post process it in the main thread, as there are things we
582 * can do just here to avoid race conditions and/or invasive locks */
583 redisLog(REDIS_DEBUG,"COMPLETED Job type: %d, ID %p, key: %s", j->type, (void*)j->id, (unsigned char*)j->key->ptr);
584 de = dictFind(j->db->dict,j->key->ptr);
585 redisAssert(de != NULL);
586 if (j->type == REDIS_IOJOB_LOAD) {
587 redisDb *db;
588 vmpointer *vp = dictGetEntryVal(de);
589
590 /* Key loaded, bring it at home */
591 vmMarkPagesFree(vp->page,vp->usedpages);
592 redisLog(REDIS_DEBUG, "VM: object %s loaded from disk (threaded)",
593 (unsigned char*) j->key->ptr);
594 server.vm_stats_swapped_objects--;
595 server.vm_stats_swapins++;
596 dictGetEntryVal(de) = j->val;
597 incrRefCount(j->val);
598 db = j->db;
599 /* Handle clients waiting for this key to be loaded. */
600 handleClientsBlockedOnSwappedKey(db,j->key);
601 freeIOJob(j);
602 zfree(vp);
603 } else if (j->type == REDIS_IOJOB_PREPARE_SWAP) {
604 /* Now we know the amount of pages required to swap this object.
605 * Let's find some space for it, and queue this task again
606 * rebranded as REDIS_IOJOB_DO_SWAP. */
607 if (!vmCanSwapOut() ||
608 vmFindContiguousPages(&j->page,j->pages) == REDIS_ERR)
609 {
610 /* Ooops... no space or we can't swap as there is
611 * a fork()ed Redis trying to save stuff on disk. */
612 j->val->storage = REDIS_VM_MEMORY; /* undo operation */
613 freeIOJob(j);
614 } else {
615 /* Note that we need to mark this pages as used now,
616 * if the job will be canceled, we'll mark them as freed
617 * again. */
618 vmMarkPagesUsed(j->page,j->pages);
619 j->type = REDIS_IOJOB_DO_SWAP;
620 lockThreadedIO();
621 queueIOJob(j);
622 unlockThreadedIO();
623 }
624 } else if (j->type == REDIS_IOJOB_DO_SWAP) {
625 vmpointer *vp;
626
627 /* Key swapped. We can finally free some memory. */
628 if (j->val->storage != REDIS_VM_SWAPPING) {
629 vmpointer *vp = (vmpointer*) j->id;
630 printf("storage: %d\n",vp->storage);
631 printf("key->name: %s\n",(char*)j->key->ptr);
632 printf("val: %p\n",(void*)j->val);
633 printf("val->type: %d\n",j->val->type);
634 printf("val->ptr: %s\n",(char*)j->val->ptr);
635 }
636 redisAssert(j->val->storage == REDIS_VM_SWAPPING);
637 vp = createVmPointer(j->val->type);
638 vp->page = j->page;
639 vp->usedpages = j->pages;
640 dictGetEntryVal(de) = vp;
641 /* Fix the storage otherwise decrRefCount will attempt to
642 * remove the associated I/O job */
643 j->val->storage = REDIS_VM_MEMORY;
644 decrRefCount(j->val);
645 redisLog(REDIS_DEBUG,
646 "VM: object %s swapped out at %lld (%lld pages) (threaded)",
647 (unsigned char*) j->key->ptr,
648 (unsigned long long) j->page, (unsigned long long) j->pages);
649 server.vm_stats_swapped_objects++;
650 server.vm_stats_swapouts++;
651 freeIOJob(j);
652 /* Put a few more swap requests in queue if we are still
653 * out of memory */
654 if (trytoswap && vmCanSwapOut() &&
655 zmalloc_used_memory() > server.vm_max_memory)
656 {
657 int more = 1;
658 while(more) {
659 lockThreadedIO();
660 more = listLength(server.io_newjobs) <
661 (unsigned) server.vm_max_threads;
662 unlockThreadedIO();
663 /* Don't waste CPU time if swappable objects are rare. */
664 if (vmSwapOneObjectThreaded() == REDIS_ERR) {
665 trytoswap = 0;
666 break;
667 }
668 }
669 }
670 }
671 processed++;
672 if (processed == toprocess) return;
673 }
674 if (retval < 0 && errno != EAGAIN) {
675 redisLog(REDIS_WARNING,
676 "WARNING: read(2) error in vmThreadedIOCompletedJob() %s",
677 strerror(errno));
678 }
679 }
680
681 void lockThreadedIO(void) {
682 pthread_mutex_lock(&server.io_mutex);
683 }
684
685 void unlockThreadedIO(void) {
686 pthread_mutex_unlock(&server.io_mutex);
687 }
688
689 /* Remove the specified object from the threaded I/O queue if still not
690 * processed, otherwise make sure to flag it as canceled. */
691 void vmCancelThreadedIOJob(robj *o) {
692 list *lists[3] = {
693 server.io_newjobs, /* 0 */
694 server.io_processing, /* 1 */
695 server.io_processed /* 2 */
696 };
697 int i;
698
699 redisAssert(o->storage == REDIS_VM_LOADING || o->storage == REDIS_VM_SWAPPING);
700 again:
701 lockThreadedIO();
702 /* Search for a matching object in one of the queues */
703 for (i = 0; i < 3; i++) {
704 listNode *ln;
705 listIter li;
706
707 listRewind(lists[i],&li);
708 while ((ln = listNext(&li)) != NULL) {
709 iojob *job = ln->value;
710
711 if (job->canceled) continue; /* Skip this, already canceled. */
712 if (job->id == o) {
713 redisLog(REDIS_DEBUG,"*** CANCELED %p (key %s) (type %d) (LIST ID %d)\n",
714 (void*)job, (char*)job->key->ptr, job->type, i);
715 /* Mark the pages as free since the swap didn't happened
716 * or happened but is now discarded. */
717 if (i != 1 && job->type == REDIS_IOJOB_DO_SWAP)
718 vmMarkPagesFree(job->page,job->pages);
719 /* Cancel the job. It depends on the list the job is
720 * living in. */
721 switch(i) {
722 case 0: /* io_newjobs */
723 /* If the job was yet not processed the best thing to do
724 * is to remove it from the queue at all */
725 freeIOJob(job);
726 listDelNode(lists[i],ln);
727 break;
728 case 1: /* io_processing */
729 /* Oh Shi- the thread is messing with the Job:
730 *
731 * Probably it's accessing the object if this is a
732 * PREPARE_SWAP or DO_SWAP job.
733 * If it's a LOAD job it may be reading from disk and
734 * if we don't wait for the job to terminate before to
735 * cancel it, maybe in a few microseconds data can be
736 * corrupted in this pages. So the short story is:
737 *
738 * Better to wait for the job to move into the
739 * next queue (processed)... */
740
741 /* We try again and again until the job is completed. */
742 unlockThreadedIO();
743 /* But let's wait some time for the I/O thread
744 * to finish with this job. After all this condition
745 * should be very rare. */
746 usleep(1);
747 goto again;
748 case 2: /* io_processed */
749 /* The job was already processed, that's easy...
750 * just mark it as canceled so that we'll ignore it
751 * when processing completed jobs. */
752 job->canceled = 1;
753 break;
754 }
755 /* Finally we have to adjust the storage type of the object
756 * in order to "UNDO" the operaiton. */
757 if (o->storage == REDIS_VM_LOADING)
758 o->storage = REDIS_VM_SWAPPED;
759 else if (o->storage == REDIS_VM_SWAPPING)
760 o->storage = REDIS_VM_MEMORY;
761 unlockThreadedIO();
762 redisLog(REDIS_DEBUG,"*** DONE");
763 return;
764 }
765 }
766 }
767 unlockThreadedIO();
768 printf("Not found: %p\n", (void*)o);
769 redisAssert(1 != 1); /* We should never reach this */
770 }
771
772 void *IOThreadEntryPoint(void *arg) {
773 iojob *j;
774 listNode *ln;
775 REDIS_NOTUSED(arg);
776
777 pthread_detach(pthread_self());
778 while(1) {
779 /* Get a new job to process */
780 lockThreadedIO();
781 if (listLength(server.io_newjobs) == 0) {
782 /* No new jobs in queue, exit. */
783 redisLog(REDIS_DEBUG,"Thread %ld exiting, nothing to do",
784 (long) pthread_self());
785 server.io_active_threads--;
786 unlockThreadedIO();
787 return NULL;
788 }
789 ln = listFirst(server.io_newjobs);
790 j = ln->value;
791 listDelNode(server.io_newjobs,ln);
792 /* Add the job in the processing queue */
793 j->thread = pthread_self();
794 listAddNodeTail(server.io_processing,j);
795 ln = listLast(server.io_processing); /* We use ln later to remove it */
796 unlockThreadedIO();
797 redisLog(REDIS_DEBUG,"Thread %ld got a new job (type %d): %p about key '%s'",
798 (long) pthread_self(), j->type, (void*)j, (char*)j->key->ptr);
799
800 /* Process the Job */
801 if (j->type == REDIS_IOJOB_LOAD) {
802 vmpointer *vp = (vmpointer*)j->id;
803 j->val = vmReadObjectFromSwap(j->page,vp->vtype);
804 } else if (j->type == REDIS_IOJOB_PREPARE_SWAP) {
805 FILE *fp = fopen("/dev/null","w+");
806 j->pages = rdbSavedObjectPages(j->val,fp);
807 fclose(fp);
808 } else if (j->type == REDIS_IOJOB_DO_SWAP) {
809 if (vmWriteObjectOnSwap(j->val,j->page) == REDIS_ERR)
810 j->canceled = 1;
811 }
812
813 /* Done: insert the job into the processed queue */
814 redisLog(REDIS_DEBUG,"Thread %ld completed the job: %p (key %s)",
815 (long) pthread_self(), (void*)j, (char*)j->key->ptr);
816 lockThreadedIO();
817 listDelNode(server.io_processing,ln);
818 listAddNodeTail(server.io_processed,j);
819 unlockThreadedIO();
820
821 /* Signal the main thread there is new stuff to process */
822 redisAssert(write(server.io_ready_pipe_write,"x",1) == 1);
823 }
824 return NULL; /* never reached */
825 }
826
827 void spawnIOThread(void) {
828 pthread_t thread;
829 sigset_t mask, omask;
830 int err;
831
832 sigemptyset(&mask);
833 sigaddset(&mask,SIGCHLD);
834 sigaddset(&mask,SIGHUP);
835 sigaddset(&mask,SIGPIPE);
836 pthread_sigmask(SIG_SETMASK, &mask, &omask);
837 while ((err = pthread_create(&thread,&server.io_threads_attr,IOThreadEntryPoint,NULL)) != 0) {
838 redisLog(REDIS_WARNING,"Unable to spawn an I/O thread: %s",
839 strerror(err));
840 usleep(1000000);
841 }
842 pthread_sigmask(SIG_SETMASK, &omask, NULL);
843 server.io_active_threads++;
844 }
845
846 /* We need to wait for the last thread to exit before we are able to
847 * fork() in order to BGSAVE or BGREWRITEAOF. */
848 void waitEmptyIOJobsQueue(void) {
849 while(1) {
850 int io_processed_len;
851
852 lockThreadedIO();
853 if (listLength(server.io_newjobs) == 0 &&
854 listLength(server.io_processing) == 0 &&
855 server.io_active_threads == 0)
856 {
857 unlockThreadedIO();
858 return;
859 }
860 /* While waiting for empty jobs queue condition we post-process some
861 * finshed job, as I/O threads may be hanging trying to write against
862 * the io_ready_pipe_write FD but there are so much pending jobs that
863 * it's blocking. */
864 io_processed_len = listLength(server.io_processed);
865 unlockThreadedIO();
866 if (io_processed_len) {
867 vmThreadedIOCompletedJob(NULL,server.io_ready_pipe_read,NULL,0);
868 usleep(1000); /* 1 millisecond */
869 } else {
870 usleep(10000); /* 10 milliseconds */
871 }
872 }
873 }
874
875 void vmReopenSwapFile(void) {
876 /* Note: we don't close the old one as we are in the child process
877 * and don't want to mess at all with the original file object. */
878 server.vm_fp = fopen(server.vm_swap_file,"r+b");
879 if (server.vm_fp == NULL) {
880 redisLog(REDIS_WARNING,"Can't re-open the VM swap file: %s. Exiting.",
881 server.vm_swap_file);
882 _exit(1);
883 }
884 server.vm_fd = fileno(server.vm_fp);
885 }
886
887 /* This function must be called while with threaded IO locked */
888 void queueIOJob(iojob *j) {
889 redisLog(REDIS_DEBUG,"Queued IO Job %p type %d about key '%s'\n",
890 (void*)j, j->type, (char*)j->key->ptr);
891 listAddNodeTail(server.io_newjobs,j);
892 if (server.io_active_threads < server.vm_max_threads)
893 spawnIOThread();
894 }
895
896 int vmSwapObjectThreaded(robj *key, robj *val, redisDb *db) {
897 iojob *j;
898
899 j = zmalloc(sizeof(*j));
900 j->type = REDIS_IOJOB_PREPARE_SWAP;
901 j->db = db;
902 j->key = key;
903 incrRefCount(key);
904 j->id = j->val = val;
905 incrRefCount(val);
906 j->canceled = 0;
907 j->thread = (pthread_t) -1;
908 val->storage = REDIS_VM_SWAPPING;
909
910 lockThreadedIO();
911 queueIOJob(j);
912 unlockThreadedIO();
913 return REDIS_OK;
914 }
915
916 /* ============ Virtual Memory - Blocking clients on missing keys =========== */
917
918 /* This function makes the clinet 'c' waiting for the key 'key' to be loaded.
919 * If there is not already a job loading the key, it is craeted.
920 * The key is added to the io_keys list in the client structure, and also
921 * in the hash table mapping swapped keys to waiting clients, that is,
922 * server.io_waited_keys. */
923 int waitForSwappedKey(redisClient *c, robj *key) {
924 struct dictEntry *de;
925 robj *o;
926 list *l;
927
928 /* If the key does not exist or is already in RAM we don't need to
929 * block the client at all. */
930 de = dictFind(c->db->dict,key->ptr);
931 if (de == NULL) return 0;
932 o = dictGetEntryVal(de);
933 if (o->storage == REDIS_VM_MEMORY) {
934 return 0;
935 } else if (o->storage == REDIS_VM_SWAPPING) {
936 /* We were swapping the key, undo it! */
937 vmCancelThreadedIOJob(o);
938 return 0;
939 }
940
941 /* OK: the key is either swapped, or being loaded just now. */
942
943 /* Add the key to the list of keys this client is waiting for.
944 * This maps clients to keys they are waiting for. */
945 listAddNodeTail(c->io_keys,key);
946 incrRefCount(key);
947
948 /* Add the client to the swapped keys => clients waiting map. */
949 de = dictFind(c->db->io_keys,key);
950 if (de == NULL) {
951 int retval;
952
953 /* For every key we take a list of clients blocked for it */
954 l = listCreate();
955 retval = dictAdd(c->db->io_keys,key,l);
956 incrRefCount(key);
957 redisAssert(retval == DICT_OK);
958 } else {
959 l = dictGetEntryVal(de);
960 }
961 listAddNodeTail(l,c);
962
963 /* Are we already loading the key from disk? If not create a job */
964 if (o->storage == REDIS_VM_SWAPPED) {
965 iojob *j;
966 vmpointer *vp = (vmpointer*)o;
967
968 o->storage = REDIS_VM_LOADING;
969 j = zmalloc(sizeof(*j));
970 j->type = REDIS_IOJOB_LOAD;
971 j->db = c->db;
972 j->id = (robj*)vp;
973 j->key = key;
974 incrRefCount(key);
975 j->page = vp->page;
976 j->val = NULL;
977 j->canceled = 0;
978 j->thread = (pthread_t) -1;
979 lockThreadedIO();
980 queueIOJob(j);
981 unlockThreadedIO();
982 }
983 return 1;
984 }
985
986 /* Preload keys for any command with first, last and step values for
987 * the command keys prototype, as defined in the command table. */
988 void waitForMultipleSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) {
989 int j, last;
990 if (cmd->vm_firstkey == 0) return;
991 last = cmd->vm_lastkey;
992 if (last < 0) last = argc+last;
993 for (j = cmd->vm_firstkey; j <= last; j += cmd->vm_keystep) {
994 redisAssert(j < argc);
995 waitForSwappedKey(c,argv[j]);
996 }
997 }
998
999 /* Preload keys needed for the ZUNIONSTORE and ZINTERSTORE commands.
1000 * Note that the number of keys to preload is user-defined, so we need to
1001 * apply a sanity check against argc. */
1002 void zunionInterBlockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) {
1003 int i, num;
1004 REDIS_NOTUSED(cmd);
1005
1006 num = atoi(argv[2]->ptr);
1007 if (num > (argc-3)) return;
1008 for (i = 0; i < num; i++) {
1009 waitForSwappedKey(c,argv[3+i]);
1010 }
1011 }
1012
1013 /* Preload keys needed to execute the entire MULTI/EXEC block.
1014 *
1015 * This function is called by blockClientOnSwappedKeys when EXEC is issued,
1016 * and will block the client when any command requires a swapped out value. */
1017 void execBlockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) {
1018 int i, margc;
1019 struct redisCommand *mcmd;
1020 robj **margv;
1021 REDIS_NOTUSED(cmd);
1022 REDIS_NOTUSED(argc);
1023 REDIS_NOTUSED(argv);
1024
1025 if (!(c->flags & REDIS_MULTI)) return;
1026 for (i = 0; i < c->mstate.count; i++) {
1027 mcmd = c->mstate.commands[i].cmd;
1028 margc = c->mstate.commands[i].argc;
1029 margv = c->mstate.commands[i].argv;
1030
1031 if (mcmd->vm_preload_proc != NULL) {
1032 mcmd->vm_preload_proc(c,mcmd,margc,margv);
1033 } else {
1034 waitForMultipleSwappedKeys(c,mcmd,margc,margv);
1035 }
1036 }
1037 }
1038
1039 /* Is this client attempting to run a command against swapped keys?
1040 * If so, block it ASAP, load the keys in background, then resume it.
1041 *
1042 * The important idea about this function is that it can fail! If keys will
1043 * still be swapped when the client is resumed, this key lookups will
1044 * just block loading keys from disk. In practical terms this should only
1045 * happen with SORT BY command or if there is a bug in this function.
1046 *
1047 * Return 1 if the client is marked as blocked, 0 if the client can
1048 * continue as the keys it is going to access appear to be in memory. */
1049 int blockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd) {
1050 if (cmd->vm_preload_proc != NULL) {
1051 cmd->vm_preload_proc(c,cmd,c->argc,c->argv);
1052 } else {
1053 waitForMultipleSwappedKeys(c,cmd,c->argc,c->argv);
1054 }
1055
1056 /* If the client was blocked for at least one key, mark it as blocked. */
1057 if (listLength(c->io_keys)) {
1058 c->flags |= REDIS_IO_WAIT;
1059 aeDeleteFileEvent(server.el,c->fd,AE_READABLE);
1060 server.vm_blocked_clients++;
1061 return 1;
1062 } else {
1063 return 0;
1064 }
1065 }
1066
1067 /* Remove the 'key' from the list of blocked keys for a given client.
1068 *
1069 * The function returns 1 when there are no longer blocking keys after
1070 * the current one was removed (and the client can be unblocked). */
1071 int dontWaitForSwappedKey(redisClient *c, robj *key) {
1072 list *l;
1073 listNode *ln;
1074 listIter li;
1075 struct dictEntry *de;
1076
1077 /* The key object might be destroyed when deleted from the c->io_keys
1078 * list (and the "key" argument is physically the same object as the
1079 * object inside the list), so we need to protect it. */
1080 incrRefCount(key);
1081
1082 /* Remove the key from the list of keys this client is waiting for. */
1083 listRewind(c->io_keys,&li);
1084 while ((ln = listNext(&li)) != NULL) {
1085 if (equalStringObjects(ln->value,key)) {
1086 listDelNode(c->io_keys,ln);
1087 break;
1088 }
1089 }
1090 redisAssert(ln != NULL);
1091
1092 /* Remove the client form the key => waiting clients map. */
1093 de = dictFind(c->db->io_keys,key);
1094 redisAssert(de != NULL);
1095 l = dictGetEntryVal(de);
1096 ln = listSearchKey(l,c);
1097 redisAssert(ln != NULL);
1098 listDelNode(l,ln);
1099 if (listLength(l) == 0)
1100 dictDelete(c->db->io_keys,key);
1101
1102 decrRefCount(key);
1103 return listLength(c->io_keys) == 0;
1104 }
1105
1106 /* Every time we now a key was loaded back in memory, we handle clients
1107 * waiting for this key if any. */
1108 void handleClientsBlockedOnSwappedKey(redisDb *db, robj *key) {
1109 struct dictEntry *de;
1110 list *l;
1111 listNode *ln;
1112 int len;
1113
1114 de = dictFind(db->io_keys,key);
1115 if (!de) return;
1116
1117 l = dictGetEntryVal(de);
1118 len = listLength(l);
1119 /* Note: we can't use something like while(listLength(l)) as the list
1120 * can be freed by the calling function when we remove the last element. */
1121 while (len--) {
1122 ln = listFirst(l);
1123 redisClient *c = ln->value;
1124
1125 if (dontWaitForSwappedKey(c,key)) {
1126 /* Put the client in the list of clients ready to go as we
1127 * loaded all the keys about it. */
1128 listAddNodeTail(server.io_ready_clients,c);
1129 }
1130 }
1131 }