]> git.saurik.com Git - redis.git/blob - src/vm.c
exit with non-zero status when there are failed tests
[redis.git] / src / vm.c
1 #include "redis.h"
2
3 #include <fcntl.h>
4 #include <pthread.h>
5 #include <math.h>
6 #include <signal.h>
7
8 /* Virtual Memory is composed mainly of two subsystems:
9 * - Blocking Virutal Memory
10 * - Threaded Virtual Memory I/O
11 * The two parts are not fully decoupled, but functions are split among two
12 * different sections of the source code (delimited by comments) in order to
13 * make more clear what functionality is about the blocking VM and what about
14 * the threaded (not blocking) VM.
15 *
16 * Redis VM design:
17 *
18 * Redis VM is a blocking VM (one that blocks reading swapped values from
19 * disk into memory when a value swapped out is needed in memory) that is made
20 * unblocking by trying to examine the command argument vector in order to
21 * load in background values that will likely be needed in order to exec
22 * the command. The command is executed only once all the relevant keys
23 * are loaded into memory.
24 *
25 * This basically is almost as simple of a blocking VM, but almost as parallel
26 * as a fully non-blocking VM.
27 */
28
29 /* =================== Virtual Memory - Blocking Side ====================== */
30
31 /* Create a VM pointer object. This kind of objects are used in place of
32 * values in the key -> value hash table, for swapped out objects. */
33 vmpointer *createVmPointer(int vtype) {
34 vmpointer *vp = zmalloc(sizeof(vmpointer));
35
36 vp->type = REDIS_VMPOINTER;
37 vp->storage = REDIS_VM_SWAPPED;
38 vp->vtype = vtype;
39 return vp;
40 }
41
42 void vmInit(void) {
43 off_t totsize;
44 int pipefds[2];
45 size_t stacksize;
46 struct flock fl;
47
48 if (server.vm_max_threads != 0)
49 zmalloc_enable_thread_safeness(); /* we need thread safe zmalloc() */
50
51 redisLog(REDIS_NOTICE,"Using '%s' as swap file",server.vm_swap_file);
52 /* Try to open the old swap file, otherwise create it */
53 if ((server.vm_fp = fopen(server.vm_swap_file,"r+b")) == NULL) {
54 server.vm_fp = fopen(server.vm_swap_file,"w+b");
55 }
56 if (server.vm_fp == NULL) {
57 redisLog(REDIS_WARNING,
58 "Can't open the swap file: %s. Exiting.",
59 strerror(errno));
60 exit(1);
61 }
62 server.vm_fd = fileno(server.vm_fp);
63 /* Lock the swap file for writing, this is useful in order to avoid
64 * another instance to use the same swap file for a config error. */
65 fl.l_type = F_WRLCK;
66 fl.l_whence = SEEK_SET;
67 fl.l_start = fl.l_len = 0;
68 if (fcntl(server.vm_fd,F_SETLK,&fl) == -1) {
69 redisLog(REDIS_WARNING,
70 "Can't lock the swap file at '%s': %s. Make sure it is not used by another Redis instance.", server.vm_swap_file, strerror(errno));
71 exit(1);
72 }
73 /* Initialize */
74 server.vm_next_page = 0;
75 server.vm_near_pages = 0;
76 server.vm_stats_used_pages = 0;
77 server.vm_stats_swapped_objects = 0;
78 server.vm_stats_swapouts = 0;
79 server.vm_stats_swapins = 0;
80 totsize = server.vm_pages*server.vm_page_size;
81 redisLog(REDIS_NOTICE,"Allocating %lld bytes of swap file",totsize);
82 if (ftruncate(server.vm_fd,totsize) == -1) {
83 redisLog(REDIS_WARNING,"Can't ftruncate swap file: %s. Exiting.",
84 strerror(errno));
85 exit(1);
86 } else {
87 redisLog(REDIS_NOTICE,"Swap file allocated with success");
88 }
89 server.vm_bitmap = zmalloc((server.vm_pages+7)/8);
90 redisLog(REDIS_VERBOSE,"Allocated %lld bytes page table for %lld pages",
91 (long long) (server.vm_pages+7)/8, server.vm_pages);
92 memset(server.vm_bitmap,0,(server.vm_pages+7)/8);
93
94 /* Initialize threaded I/O (used by Virtual Memory) */
95 server.io_newjobs = listCreate();
96 server.io_processing = listCreate();
97 server.io_processed = listCreate();
98 server.io_ready_clients = listCreate();
99 pthread_mutex_init(&server.io_mutex,NULL);
100 pthread_mutex_init(&server.obj_freelist_mutex,NULL);
101 pthread_mutex_init(&server.io_swapfile_mutex,NULL);
102 server.io_active_threads = 0;
103 if (pipe(pipefds) == -1) {
104 redisLog(REDIS_WARNING,"Unable to intialized VM: pipe(2): %s. Exiting."
105 ,strerror(errno));
106 exit(1);
107 }
108 server.io_ready_pipe_read = pipefds[0];
109 server.io_ready_pipe_write = pipefds[1];
110 redisAssert(anetNonBlock(NULL,server.io_ready_pipe_read) != ANET_ERR);
111 /* LZF requires a lot of stack */
112 pthread_attr_init(&server.io_threads_attr);
113 pthread_attr_getstacksize(&server.io_threads_attr, &stacksize);
114 while (stacksize < REDIS_THREAD_STACK_SIZE) stacksize *= 2;
115 pthread_attr_setstacksize(&server.io_threads_attr, stacksize);
116 /* Listen for events in the threaded I/O pipe */
117 if (aeCreateFileEvent(server.el, server.io_ready_pipe_read, AE_READABLE,
118 vmThreadedIOCompletedJob, NULL) == AE_ERR)
119 oom("creating file event");
120 }
121
122 /* Mark the page as used */
123 void vmMarkPageUsed(off_t page) {
124 off_t byte = page/8;
125 int bit = page&7;
126 redisAssert(vmFreePage(page) == 1);
127 server.vm_bitmap[byte] |= 1<<bit;
128 }
129
130 /* Mark N contiguous pages as used, with 'page' being the first. */
131 void vmMarkPagesUsed(off_t page, off_t count) {
132 off_t j;
133
134 for (j = 0; j < count; j++)
135 vmMarkPageUsed(page+j);
136 server.vm_stats_used_pages += count;
137 redisLog(REDIS_DEBUG,"Mark USED pages: %lld pages at %lld\n",
138 (long long)count, (long long)page);
139 }
140
141 /* Mark the page as free */
142 void vmMarkPageFree(off_t page) {
143 off_t byte = page/8;
144 int bit = page&7;
145 redisAssert(vmFreePage(page) == 0);
146 server.vm_bitmap[byte] &= ~(1<<bit);
147 }
148
149 /* Mark N contiguous pages as free, with 'page' being the first. */
150 void vmMarkPagesFree(off_t page, off_t count) {
151 off_t j;
152
153 for (j = 0; j < count; j++)
154 vmMarkPageFree(page+j);
155 server.vm_stats_used_pages -= count;
156 redisLog(REDIS_DEBUG,"Mark FREE pages: %lld pages at %lld\n",
157 (long long)count, (long long)page);
158 }
159
160 /* Test if the page is free */
161 int vmFreePage(off_t page) {
162 off_t byte = page/8;
163 int bit = page&7;
164 return (server.vm_bitmap[byte] & (1<<bit)) == 0;
165 }
166
167 /* Find N contiguous free pages storing the first page of the cluster in *first.
168 * Returns REDIS_OK if it was able to find N contiguous pages, otherwise
169 * REDIS_ERR is returned.
170 *
171 * This function uses a simple algorithm: we try to allocate
172 * REDIS_VM_MAX_NEAR_PAGES sequentially, when we reach this limit we start
173 * again from the start of the swap file searching for free spaces.
174 *
175 * If it looks pretty clear that there are no free pages near our offset
176 * we try to find less populated places doing a forward jump of
177 * REDIS_VM_MAX_RANDOM_JUMP, then we start scanning again a few pages
178 * without hurry, and then we jump again and so forth...
179 *
180 * This function can be improved using a free list to avoid to guess
181 * too much, since we could collect data about freed pages.
182 *
183 * note: I implemented this function just after watching an episode of
184 * Battlestar Galactica, where the hybrid was continuing to say "JUMP!"
185 */
186 int vmFindContiguousPages(off_t *first, off_t n) {
187 off_t base, offset = 0, since_jump = 0, numfree = 0;
188
189 if (server.vm_near_pages == REDIS_VM_MAX_NEAR_PAGES) {
190 server.vm_near_pages = 0;
191 server.vm_next_page = 0;
192 }
193 server.vm_near_pages++; /* Yet another try for pages near to the old ones */
194 base = server.vm_next_page;
195
196 while(offset < server.vm_pages) {
197 off_t this = base+offset;
198
199 /* If we overflow, restart from page zero */
200 if (this >= server.vm_pages) {
201 this -= server.vm_pages;
202 if (this == 0) {
203 /* Just overflowed, what we found on tail is no longer
204 * interesting, as it's no longer contiguous. */
205 numfree = 0;
206 }
207 }
208 if (vmFreePage(this)) {
209 /* This is a free page */
210 numfree++;
211 /* Already got N free pages? Return to the caller, with success */
212 if (numfree == n) {
213 *first = this-(n-1);
214 server.vm_next_page = this+1;
215 redisLog(REDIS_DEBUG, "FOUND CONTIGUOUS PAGES: %lld pages at %lld\n", (long long) n, (long long) *first);
216 return REDIS_OK;
217 }
218 } else {
219 /* The current one is not a free page */
220 numfree = 0;
221 }
222
223 /* Fast-forward if the current page is not free and we already
224 * searched enough near this place. */
225 since_jump++;
226 if (!numfree && since_jump >= REDIS_VM_MAX_RANDOM_JUMP/4) {
227 offset += random() % REDIS_VM_MAX_RANDOM_JUMP;
228 since_jump = 0;
229 /* Note that even if we rewind after the jump, we are don't need
230 * to make sure numfree is set to zero as we only jump *if* it
231 * is set to zero. */
232 } else {
233 /* Otherwise just check the next page */
234 offset++;
235 }
236 }
237 return REDIS_ERR;
238 }
239
240 /* Write the specified object at the specified page of the swap file */
241 int vmWriteObjectOnSwap(robj *o, off_t page) {
242 if (server.vm_enabled) pthread_mutex_lock(&server.io_swapfile_mutex);
243 if (fseeko(server.vm_fp,page*server.vm_page_size,SEEK_SET) == -1) {
244 if (server.vm_enabled) pthread_mutex_unlock(&server.io_swapfile_mutex);
245 redisLog(REDIS_WARNING,
246 "Critical VM problem in vmWriteObjectOnSwap(): can't seek: %s",
247 strerror(errno));
248 return REDIS_ERR;
249 }
250 rdbSaveObject(server.vm_fp,o);
251 fflush(server.vm_fp);
252 if (server.vm_enabled) pthread_mutex_unlock(&server.io_swapfile_mutex);
253 return REDIS_OK;
254 }
255
256 /* Transfers the 'val' object to disk. Store all the information
257 * a 'vmpointer' object containing all the information needed to load the
258 * object back later is returned.
259 *
260 * If we can't find enough contiguous empty pages to swap the object on disk
261 * NULL is returned. */
262 vmpointer *vmSwapObjectBlocking(robj *val) {
263 off_t pages = rdbSavedObjectPages(val,NULL);
264 off_t page;
265 vmpointer *vp;
266
267 redisAssert(val->storage == REDIS_VM_MEMORY);
268 redisAssert(val->refcount == 1);
269 if (vmFindContiguousPages(&page,pages) == REDIS_ERR) return NULL;
270 if (vmWriteObjectOnSwap(val,page) == REDIS_ERR) return NULL;
271
272 vp = createVmPointer(val->type);
273 vp->page = page;
274 vp->usedpages = pages;
275 decrRefCount(val); /* Deallocate the object from memory. */
276 vmMarkPagesUsed(page,pages);
277 redisLog(REDIS_DEBUG,"VM: object %p swapped out at %lld (%lld pages)",
278 (void*) val,
279 (unsigned long long) page, (unsigned long long) pages);
280 server.vm_stats_swapped_objects++;
281 server.vm_stats_swapouts++;
282 return vp;
283 }
284
285 robj *vmReadObjectFromSwap(off_t page, int type) {
286 robj *o;
287
288 if (server.vm_enabled) pthread_mutex_lock(&server.io_swapfile_mutex);
289 if (fseeko(server.vm_fp,page*server.vm_page_size,SEEK_SET) == -1) {
290 redisLog(REDIS_WARNING,
291 "Unrecoverable VM problem in vmReadObjectFromSwap(): can't seek: %s",
292 strerror(errno));
293 _exit(1);
294 }
295 o = rdbLoadObject(type,server.vm_fp);
296 if (o == NULL) {
297 redisLog(REDIS_WARNING, "Unrecoverable VM problem in vmReadObjectFromSwap(): can't load object from swap file: %s", strerror(errno));
298 _exit(1);
299 }
300 if (server.vm_enabled) pthread_mutex_unlock(&server.io_swapfile_mutex);
301 return o;
302 }
303
304 /* Load the specified object from swap to memory.
305 * The newly allocated object is returned.
306 *
307 * If preview is true the unserialized object is returned to the caller but
308 * the pages are not marked as freed, nor the vp object is freed. */
309 robj *vmGenericLoadObject(vmpointer *vp, int preview) {
310 robj *val;
311
312 redisAssert(vp->type == REDIS_VMPOINTER &&
313 (vp->storage == REDIS_VM_SWAPPED || vp->storage == REDIS_VM_LOADING));
314 val = vmReadObjectFromSwap(vp->page,vp->vtype);
315 if (!preview) {
316 redisLog(REDIS_DEBUG, "VM: object %p loaded from disk", (void*)vp);
317 vmMarkPagesFree(vp->page,vp->usedpages);
318 zfree(vp);
319 server.vm_stats_swapped_objects--;
320 } else {
321 redisLog(REDIS_DEBUG, "VM: object %p previewed from disk", (void*)vp);
322 }
323 server.vm_stats_swapins++;
324 return val;
325 }
326
327 /* Plain object loading, from swap to memory.
328 *
329 * 'o' is actually a redisVmPointer structure that will be freed by the call.
330 * The return value is the loaded object. */
331 robj *vmLoadObject(robj *o) {
332 /* If we are loading the object in background, stop it, we
333 * need to load this object synchronously ASAP. */
334 if (o->storage == REDIS_VM_LOADING)
335 vmCancelThreadedIOJob(o);
336 return vmGenericLoadObject((vmpointer*)o,0);
337 }
338
339 /* Just load the value on disk, without to modify the key.
340 * This is useful when we want to perform some operation on the value
341 * without to really bring it from swap to memory, like while saving the
342 * dataset or rewriting the append only log. */
343 robj *vmPreviewObject(robj *o) {
344 return vmGenericLoadObject((vmpointer*)o,1);
345 }
346
347 /* How a good candidate is this object for swapping?
348 * The better candidate it is, the greater the returned value.
349 *
350 * Currently we try to perform a fast estimation of the object size in
351 * memory, and combine it with aging informations.
352 *
353 * Basically swappability = idle-time * log(estimated size)
354 *
355 * Bigger objects are preferred over smaller objects, but not
356 * proportionally, this is why we use the logarithm. This algorithm is
357 * just a first try and will probably be tuned later. */
358 double computeObjectSwappability(robj *o) {
359 /* actual age can be >= minage, but not < minage. As we use wrapping
360 * 21 bit clocks with minutes resolution for the LRU. */
361 time_t minage = abs(server.lruclock - o->lru);
362 long asize = 0, elesize;
363 robj *ele;
364 list *l;
365 listNode *ln;
366 dict *d;
367 struct dictEntry *de;
368 int z;
369
370 if (minage <= 0) return 0;
371 switch(o->type) {
372 case REDIS_STRING:
373 if (o->encoding != REDIS_ENCODING_RAW) {
374 asize = sizeof(*o);
375 } else {
376 asize = sdslen(o->ptr)+sizeof(*o)+sizeof(long)*2;
377 }
378 break;
379 case REDIS_LIST:
380 if (o->encoding == REDIS_ENCODING_ZIPLIST) {
381 asize = sizeof(*o)+ziplistSize(o->ptr);
382 } else {
383 l = o->ptr;
384 ln = listFirst(l);
385 asize = sizeof(list);
386 if (ln) {
387 ele = ln->value;
388 elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
389 (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
390 asize += (sizeof(listNode)+elesize)*listLength(l);
391 }
392 }
393 break;
394 case REDIS_SET:
395 case REDIS_ZSET:
396 z = (o->type == REDIS_ZSET);
397 d = z ? ((zset*)o->ptr)->dict : o->ptr;
398
399 asize = sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d));
400 if (z) asize += sizeof(zset)-sizeof(dict);
401 if (dictSize(d)) {
402 de = dictGetRandomKey(d);
403 ele = dictGetEntryKey(de);
404 elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
405 (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
406 asize += (sizeof(struct dictEntry)+elesize)*dictSize(d);
407 if (z) asize += sizeof(zskiplistNode)*dictSize(d);
408 }
409 break;
410 case REDIS_HASH:
411 if (o->encoding == REDIS_ENCODING_ZIPMAP) {
412 unsigned char *p = zipmapRewind((unsigned char*)o->ptr);
413 unsigned int len = zipmapLen((unsigned char*)o->ptr);
414 unsigned int klen, vlen;
415 unsigned char *key, *val;
416
417 if ((p = zipmapNext(p,&key,&klen,&val,&vlen)) == NULL) {
418 klen = 0;
419 vlen = 0;
420 }
421 asize = len*(klen+vlen+3);
422 } else if (o->encoding == REDIS_ENCODING_HT) {
423 d = o->ptr;
424 asize = sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d));
425 if (dictSize(d)) {
426 de = dictGetRandomKey(d);
427 ele = dictGetEntryKey(de);
428 elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
429 (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
430 ele = dictGetEntryVal(de);
431 elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
432 (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
433 asize += (sizeof(struct dictEntry)+elesize)*dictSize(d);
434 }
435 }
436 break;
437 }
438 return (double)minage*log(1+asize);
439 }
440
441 /* Try to swap an object that's a good candidate for swapping.
442 * Returns REDIS_OK if the object was swapped, REDIS_ERR if it's not possible
443 * to swap any object at all.
444 *
445 * If 'usethreaded' is true, Redis will try to swap the object in background
446 * using I/O threads. */
447 int vmSwapOneObject(int usethreads) {
448 int j, i;
449 struct dictEntry *best = NULL;
450 double best_swappability = 0;
451 redisDb *best_db = NULL;
452 robj *val;
453 sds key;
454
455 for (j = 0; j < server.dbnum; j++) {
456 redisDb *db = server.db+j;
457 /* Why maxtries is set to 100?
458 * Because this way (usually) we'll find 1 object even if just 1% - 2%
459 * are swappable objects */
460 int maxtries = 100;
461
462 if (dictSize(db->dict) == 0) continue;
463 for (i = 0; i < 5; i++) {
464 dictEntry *de;
465 double swappability;
466
467 if (maxtries) maxtries--;
468 de = dictGetRandomKey(db->dict);
469 val = dictGetEntryVal(de);
470 /* Only swap objects that are currently in memory.
471 *
472 * Also don't swap shared objects: not a good idea in general and
473 * we need to ensure that the main thread does not touch the
474 * object while the I/O thread is using it, but we can't
475 * control other keys without adding additional mutex. */
476 if (val->storage != REDIS_VM_MEMORY || val->refcount != 1) {
477 if (maxtries) i--; /* don't count this try */
478 continue;
479 }
480 swappability = computeObjectSwappability(val);
481 if (!best || swappability > best_swappability) {
482 best = de;
483 best_swappability = swappability;
484 best_db = db;
485 }
486 }
487 }
488 if (best == NULL) return REDIS_ERR;
489 key = dictGetEntryKey(best);
490 val = dictGetEntryVal(best);
491
492 redisLog(REDIS_DEBUG,"Key with best swappability: %s, %f",
493 key, best_swappability);
494
495 /* Swap it */
496 if (usethreads) {
497 robj *keyobj = createStringObject(key,sdslen(key));
498 vmSwapObjectThreaded(keyobj,val,best_db);
499 decrRefCount(keyobj);
500 return REDIS_OK;
501 } else {
502 vmpointer *vp;
503
504 if ((vp = vmSwapObjectBlocking(val)) != NULL) {
505 dictGetEntryVal(best) = vp;
506 return REDIS_OK;
507 } else {
508 return REDIS_ERR;
509 }
510 }
511 }
512
513 int vmSwapOneObjectBlocking() {
514 return vmSwapOneObject(0);
515 }
516
517 int vmSwapOneObjectThreaded() {
518 return vmSwapOneObject(1);
519 }
520
521 /* Return true if it's safe to swap out objects in a given moment.
522 * Basically we don't want to swap objects out while there is a BGSAVE
523 * or a BGAEOREWRITE running in backgroud. */
524 int vmCanSwapOut(void) {
525 return (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1);
526 }
527
528 /* =================== Virtual Memory - Threaded I/O ======================= */
529
530 void freeIOJob(iojob *j) {
531 if ((j->type == REDIS_IOJOB_PREPARE_SWAP ||
532 j->type == REDIS_IOJOB_DO_SWAP ||
533 j->type == REDIS_IOJOB_LOAD) && j->val != NULL)
534 {
535 /* we fix the storage type, otherwise decrRefCount() will try to
536 * kill the I/O thread Job (that does no longer exists). */
537 if (j->val->storage == REDIS_VM_SWAPPING)
538 j->val->storage = REDIS_VM_MEMORY;
539 decrRefCount(j->val);
540 }
541 decrRefCount(j->key);
542 zfree(j);
543 }
544
545 /* Every time a thread finished a Job, it writes a byte into the write side
546 * of an unix pipe in order to "awake" the main thread, and this function
547 * is called. */
548 void vmThreadedIOCompletedJob(aeEventLoop *el, int fd, void *privdata,
549 int mask)
550 {
551 char buf[1];
552 int retval, processed = 0, toprocess = -1, trytoswap = 1;
553 REDIS_NOTUSED(el);
554 REDIS_NOTUSED(mask);
555 REDIS_NOTUSED(privdata);
556
557 /* For every byte we read in the read side of the pipe, there is one
558 * I/O job completed to process. */
559 while((retval = read(fd,buf,1)) == 1) {
560 iojob *j;
561 listNode *ln;
562 struct dictEntry *de;
563
564 redisLog(REDIS_DEBUG,"Processing I/O completed job");
565
566 /* Get the processed element (the oldest one) */
567 lockThreadedIO();
568 redisAssert(listLength(server.io_processed) != 0);
569 if (toprocess == -1) {
570 toprocess = (listLength(server.io_processed)*REDIS_MAX_COMPLETED_JOBS_PROCESSED)/100;
571 if (toprocess <= 0) toprocess = 1;
572 }
573 ln = listFirst(server.io_processed);
574 j = ln->value;
575 listDelNode(server.io_processed,ln);
576 unlockThreadedIO();
577 /* If this job is marked as canceled, just ignore it */
578 if (j->canceled) {
579 freeIOJob(j);
580 continue;
581 }
582 /* Post process it in the main thread, as there are things we
583 * can do just here to avoid race conditions and/or invasive locks */
584 redisLog(REDIS_DEBUG,"COMPLETED Job type: %d, ID %p, key: %s", j->type, (void*)j->id, (unsigned char*)j->key->ptr);
585 de = dictFind(j->db->dict,j->key->ptr);
586 redisAssert(de != NULL);
587 if (j->type == REDIS_IOJOB_LOAD) {
588 redisDb *db;
589 vmpointer *vp = dictGetEntryVal(de);
590
591 /* Key loaded, bring it at home */
592 vmMarkPagesFree(vp->page,vp->usedpages);
593 redisLog(REDIS_DEBUG, "VM: object %s loaded from disk (threaded)",
594 (unsigned char*) j->key->ptr);
595 server.vm_stats_swapped_objects--;
596 server.vm_stats_swapins++;
597 dictGetEntryVal(de) = j->val;
598 incrRefCount(j->val);
599 db = j->db;
600 /* Handle clients waiting for this key to be loaded. */
601 handleClientsBlockedOnSwappedKey(db,j->key);
602 freeIOJob(j);
603 zfree(vp);
604 } else if (j->type == REDIS_IOJOB_PREPARE_SWAP) {
605 /* Now we know the amount of pages required to swap this object.
606 * Let's find some space for it, and queue this task again
607 * rebranded as REDIS_IOJOB_DO_SWAP. */
608 if (!vmCanSwapOut() ||
609 vmFindContiguousPages(&j->page,j->pages) == REDIS_ERR)
610 {
611 /* Ooops... no space or we can't swap as there is
612 * a fork()ed Redis trying to save stuff on disk. */
613 j->val->storage = REDIS_VM_MEMORY; /* undo operation */
614 freeIOJob(j);
615 } else {
616 /* Note that we need to mark this pages as used now,
617 * if the job will be canceled, we'll mark them as freed
618 * again. */
619 vmMarkPagesUsed(j->page,j->pages);
620 j->type = REDIS_IOJOB_DO_SWAP;
621 lockThreadedIO();
622 queueIOJob(j);
623 unlockThreadedIO();
624 }
625 } else if (j->type == REDIS_IOJOB_DO_SWAP) {
626 vmpointer *vp;
627
628 /* Key swapped. We can finally free some memory. */
629 if (j->val->storage != REDIS_VM_SWAPPING) {
630 vmpointer *vp = (vmpointer*) j->id;
631 printf("storage: %d\n",vp->storage);
632 printf("key->name: %s\n",(char*)j->key->ptr);
633 printf("val: %p\n",(void*)j->val);
634 printf("val->type: %d\n",j->val->type);
635 printf("val->ptr: %s\n",(char*)j->val->ptr);
636 }
637 redisAssert(j->val->storage == REDIS_VM_SWAPPING);
638 vp = createVmPointer(j->val->type);
639 vp->page = j->page;
640 vp->usedpages = j->pages;
641 dictGetEntryVal(de) = vp;
642 /* Fix the storage otherwise decrRefCount will attempt to
643 * remove the associated I/O job */
644 j->val->storage = REDIS_VM_MEMORY;
645 decrRefCount(j->val);
646 redisLog(REDIS_DEBUG,
647 "VM: object %s swapped out at %lld (%lld pages) (threaded)",
648 (unsigned char*) j->key->ptr,
649 (unsigned long long) j->page, (unsigned long long) j->pages);
650 server.vm_stats_swapped_objects++;
651 server.vm_stats_swapouts++;
652 freeIOJob(j);
653 /* Put a few more swap requests in queue if we are still
654 * out of memory */
655 if (trytoswap && vmCanSwapOut() &&
656 zmalloc_used_memory() > server.vm_max_memory)
657 {
658 int more = 1;
659 while(more) {
660 lockThreadedIO();
661 more = listLength(server.io_newjobs) <
662 (unsigned) server.vm_max_threads;
663 unlockThreadedIO();
664 /* Don't waste CPU time if swappable objects are rare. */
665 if (vmSwapOneObjectThreaded() == REDIS_ERR) {
666 trytoswap = 0;
667 break;
668 }
669 }
670 }
671 }
672 processed++;
673 if (processed == toprocess) return;
674 }
675 if (retval < 0 && errno != EAGAIN) {
676 redisLog(REDIS_WARNING,
677 "WARNING: read(2) error in vmThreadedIOCompletedJob() %s",
678 strerror(errno));
679 }
680 }
681
682 void lockThreadedIO(void) {
683 pthread_mutex_lock(&server.io_mutex);
684 }
685
686 void unlockThreadedIO(void) {
687 pthread_mutex_unlock(&server.io_mutex);
688 }
689
690 /* Remove the specified object from the threaded I/O queue if still not
691 * processed, otherwise make sure to flag it as canceled. */
692 void vmCancelThreadedIOJob(robj *o) {
693 list *lists[3] = {
694 server.io_newjobs, /* 0 */
695 server.io_processing, /* 1 */
696 server.io_processed /* 2 */
697 };
698 int i;
699
700 redisAssert(o->storage == REDIS_VM_LOADING || o->storage == REDIS_VM_SWAPPING);
701 again:
702 lockThreadedIO();
703 /* Search for a matching object in one of the queues */
704 for (i = 0; i < 3; i++) {
705 listNode *ln;
706 listIter li;
707
708 listRewind(lists[i],&li);
709 while ((ln = listNext(&li)) != NULL) {
710 iojob *job = ln->value;
711
712 if (job->canceled) continue; /* Skip this, already canceled. */
713 if (job->id == o) {
714 redisLog(REDIS_DEBUG,"*** CANCELED %p (key %s) (type %d) (LIST ID %d)\n",
715 (void*)job, (char*)job->key->ptr, job->type, i);
716 /* Mark the pages as free since the swap didn't happened
717 * or happened but is now discarded. */
718 if (i != 1 && job->type == REDIS_IOJOB_DO_SWAP)
719 vmMarkPagesFree(job->page,job->pages);
720 /* Cancel the job. It depends on the list the job is
721 * living in. */
722 switch(i) {
723 case 0: /* io_newjobs */
724 /* If the job was yet not processed the best thing to do
725 * is to remove it from the queue at all */
726 freeIOJob(job);
727 listDelNode(lists[i],ln);
728 break;
729 case 1: /* io_processing */
730 /* Oh Shi- the thread is messing with the Job:
731 *
732 * Probably it's accessing the object if this is a
733 * PREPARE_SWAP or DO_SWAP job.
734 * If it's a LOAD job it may be reading from disk and
735 * if we don't wait for the job to terminate before to
736 * cancel it, maybe in a few microseconds data can be
737 * corrupted in this pages. So the short story is:
738 *
739 * Better to wait for the job to move into the
740 * next queue (processed)... */
741
742 /* We try again and again until the job is completed. */
743 unlockThreadedIO();
744 /* But let's wait some time for the I/O thread
745 * to finish with this job. After all this condition
746 * should be very rare. */
747 usleep(1);
748 goto again;
749 case 2: /* io_processed */
750 /* The job was already processed, that's easy...
751 * just mark it as canceled so that we'll ignore it
752 * when processing completed jobs. */
753 job->canceled = 1;
754 break;
755 }
756 /* Finally we have to adjust the storage type of the object
757 * in order to "UNDO" the operaiton. */
758 if (o->storage == REDIS_VM_LOADING)
759 o->storage = REDIS_VM_SWAPPED;
760 else if (o->storage == REDIS_VM_SWAPPING)
761 o->storage = REDIS_VM_MEMORY;
762 unlockThreadedIO();
763 redisLog(REDIS_DEBUG,"*** DONE");
764 return;
765 }
766 }
767 }
768 unlockThreadedIO();
769 printf("Not found: %p\n", (void*)o);
770 redisAssert(1 != 1); /* We should never reach this */
771 }
772
773 void *IOThreadEntryPoint(void *arg) {
774 iojob *j;
775 listNode *ln;
776 REDIS_NOTUSED(arg);
777
778 pthread_detach(pthread_self());
779 while(1) {
780 /* Get a new job to process */
781 lockThreadedIO();
782 if (listLength(server.io_newjobs) == 0) {
783 /* No new jobs in queue, exit. */
784 redisLog(REDIS_DEBUG,"Thread %ld exiting, nothing to do",
785 (long) pthread_self());
786 server.io_active_threads--;
787 unlockThreadedIO();
788 return NULL;
789 }
790 ln = listFirst(server.io_newjobs);
791 j = ln->value;
792 listDelNode(server.io_newjobs,ln);
793 /* Add the job in the processing queue */
794 j->thread = pthread_self();
795 listAddNodeTail(server.io_processing,j);
796 ln = listLast(server.io_processing); /* We use ln later to remove it */
797 unlockThreadedIO();
798 redisLog(REDIS_DEBUG,"Thread %ld got a new job (type %d): %p about key '%s'",
799 (long) pthread_self(), j->type, (void*)j, (char*)j->key->ptr);
800
801 /* Process the Job */
802 if (j->type == REDIS_IOJOB_LOAD) {
803 vmpointer *vp = (vmpointer*)j->id;
804 j->val = vmReadObjectFromSwap(j->page,vp->vtype);
805 } else if (j->type == REDIS_IOJOB_PREPARE_SWAP) {
806 FILE *fp = fopen("/dev/null","w+");
807 j->pages = rdbSavedObjectPages(j->val,fp);
808 fclose(fp);
809 } else if (j->type == REDIS_IOJOB_DO_SWAP) {
810 if (vmWriteObjectOnSwap(j->val,j->page) == REDIS_ERR)
811 j->canceled = 1;
812 }
813
814 /* Done: insert the job into the processed queue */
815 redisLog(REDIS_DEBUG,"Thread %ld completed the job: %p (key %s)",
816 (long) pthread_self(), (void*)j, (char*)j->key->ptr);
817 lockThreadedIO();
818 listDelNode(server.io_processing,ln);
819 listAddNodeTail(server.io_processed,j);
820 unlockThreadedIO();
821
822 /* Signal the main thread there is new stuff to process */
823 redisAssert(write(server.io_ready_pipe_write,"x",1) == 1);
824 }
825 return NULL; /* never reached */
826 }
827
828 void spawnIOThread(void) {
829 pthread_t thread;
830 sigset_t mask, omask;
831 int err;
832
833 sigemptyset(&mask);
834 sigaddset(&mask,SIGCHLD);
835 sigaddset(&mask,SIGHUP);
836 sigaddset(&mask,SIGPIPE);
837 pthread_sigmask(SIG_SETMASK, &mask, &omask);
838 while ((err = pthread_create(&thread,&server.io_threads_attr,IOThreadEntryPoint,NULL)) != 0) {
839 redisLog(REDIS_WARNING,"Unable to spawn an I/O thread: %s",
840 strerror(err));
841 usleep(1000000);
842 }
843 pthread_sigmask(SIG_SETMASK, &omask, NULL);
844 server.io_active_threads++;
845 }
846
847 /* We need to wait for the last thread to exit before we are able to
848 * fork() in order to BGSAVE or BGREWRITEAOF. */
849 void waitEmptyIOJobsQueue(void) {
850 while(1) {
851 int io_processed_len;
852
853 lockThreadedIO();
854 if (listLength(server.io_newjobs) == 0 &&
855 listLength(server.io_processing) == 0 &&
856 server.io_active_threads == 0)
857 {
858 unlockThreadedIO();
859 return;
860 }
861 /* While waiting for empty jobs queue condition we post-process some
862 * finshed job, as I/O threads may be hanging trying to write against
863 * the io_ready_pipe_write FD but there are so much pending jobs that
864 * it's blocking. */
865 io_processed_len = listLength(server.io_processed);
866 unlockThreadedIO();
867 if (io_processed_len) {
868 vmThreadedIOCompletedJob(NULL,server.io_ready_pipe_read,NULL,0);
869 usleep(1000); /* 1 millisecond */
870 } else {
871 usleep(10000); /* 10 milliseconds */
872 }
873 }
874 }
875
876 void vmReopenSwapFile(void) {
877 /* Note: we don't close the old one as we are in the child process
878 * and don't want to mess at all with the original file object. */
879 server.vm_fp = fopen(server.vm_swap_file,"r+b");
880 if (server.vm_fp == NULL) {
881 redisLog(REDIS_WARNING,"Can't re-open the VM swap file: %s. Exiting.",
882 server.vm_swap_file);
883 _exit(1);
884 }
885 server.vm_fd = fileno(server.vm_fp);
886 }
887
888 /* This function must be called while with threaded IO locked */
889 void queueIOJob(iojob *j) {
890 redisLog(REDIS_DEBUG,"Queued IO Job %p type %d about key '%s'\n",
891 (void*)j, j->type, (char*)j->key->ptr);
892 listAddNodeTail(server.io_newjobs,j);
893 if (server.io_active_threads < server.vm_max_threads)
894 spawnIOThread();
895 }
896
897 int vmSwapObjectThreaded(robj *key, robj *val, redisDb *db) {
898 iojob *j;
899
900 j = zmalloc(sizeof(*j));
901 j->type = REDIS_IOJOB_PREPARE_SWAP;
902 j->db = db;
903 j->key = key;
904 incrRefCount(key);
905 j->id = j->val = val;
906 incrRefCount(val);
907 j->canceled = 0;
908 j->thread = (pthread_t) -1;
909 val->storage = REDIS_VM_SWAPPING;
910
911 lockThreadedIO();
912 queueIOJob(j);
913 unlockThreadedIO();
914 return REDIS_OK;
915 }
916
917 /* ============ Virtual Memory - Blocking clients on missing keys =========== */
918
919 /* This function makes the clinet 'c' waiting for the key 'key' to be loaded.
920 * If there is not already a job loading the key, it is craeted.
921 * The key is added to the io_keys list in the client structure, and also
922 * in the hash table mapping swapped keys to waiting clients, that is,
923 * server.io_waited_keys. */
924 int waitForSwappedKey(redisClient *c, robj *key) {
925 struct dictEntry *de;
926 robj *o;
927 list *l;
928
929 /* If the key does not exist or is already in RAM we don't need to
930 * block the client at all. */
931 de = dictFind(c->db->dict,key->ptr);
932 if (de == NULL) return 0;
933 o = dictGetEntryVal(de);
934 if (o->storage == REDIS_VM_MEMORY) {
935 return 0;
936 } else if (o->storage == REDIS_VM_SWAPPING) {
937 /* We were swapping the key, undo it! */
938 vmCancelThreadedIOJob(o);
939 return 0;
940 }
941
942 /* OK: the key is either swapped, or being loaded just now. */
943
944 /* Add the key to the list of keys this client is waiting for.
945 * This maps clients to keys they are waiting for. */
946 listAddNodeTail(c->io_keys,key);
947 incrRefCount(key);
948
949 /* Add the client to the swapped keys => clients waiting map. */
950 de = dictFind(c->db->io_keys,key);
951 if (de == NULL) {
952 int retval;
953
954 /* For every key we take a list of clients blocked for it */
955 l = listCreate();
956 retval = dictAdd(c->db->io_keys,key,l);
957 incrRefCount(key);
958 redisAssert(retval == DICT_OK);
959 } else {
960 l = dictGetEntryVal(de);
961 }
962 listAddNodeTail(l,c);
963
964 /* Are we already loading the key from disk? If not create a job */
965 if (o->storage == REDIS_VM_SWAPPED) {
966 iojob *j;
967 vmpointer *vp = (vmpointer*)o;
968
969 o->storage = REDIS_VM_LOADING;
970 j = zmalloc(sizeof(*j));
971 j->type = REDIS_IOJOB_LOAD;
972 j->db = c->db;
973 j->id = (robj*)vp;
974 j->key = key;
975 incrRefCount(key);
976 j->page = vp->page;
977 j->val = NULL;
978 j->canceled = 0;
979 j->thread = (pthread_t) -1;
980 lockThreadedIO();
981 queueIOJob(j);
982 unlockThreadedIO();
983 }
984 return 1;
985 }
986
987 /* Preload keys for any command with first, last and step values for
988 * the command keys prototype, as defined in the command table. */
989 void waitForMultipleSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) {
990 int j, last;
991 if (cmd->vm_firstkey == 0) return;
992 last = cmd->vm_lastkey;
993 if (last < 0) last = argc+last;
994 for (j = cmd->vm_firstkey; j <= last; j += cmd->vm_keystep) {
995 redisAssert(j < argc);
996 waitForSwappedKey(c,argv[j]);
997 }
998 }
999
1000 /* Preload keys needed for the ZUNIONSTORE and ZINTERSTORE commands.
1001 * Note that the number of keys to preload is user-defined, so we need to
1002 * apply a sanity check against argc. */
1003 void zunionInterBlockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) {
1004 int i, num;
1005 REDIS_NOTUSED(cmd);
1006
1007 num = atoi(argv[2]->ptr);
1008 if (num > (argc-3)) return;
1009 for (i = 0; i < num; i++) {
1010 waitForSwappedKey(c,argv[3+i]);
1011 }
1012 }
1013
1014 /* Preload keys needed to execute the entire MULTI/EXEC block.
1015 *
1016 * This function is called by blockClientOnSwappedKeys when EXEC is issued,
1017 * and will block the client when any command requires a swapped out value. */
1018 void execBlockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd, int argc, robj **argv) {
1019 int i, margc;
1020 struct redisCommand *mcmd;
1021 robj **margv;
1022 REDIS_NOTUSED(cmd);
1023 REDIS_NOTUSED(argc);
1024 REDIS_NOTUSED(argv);
1025
1026 if (!(c->flags & REDIS_MULTI)) return;
1027 for (i = 0; i < c->mstate.count; i++) {
1028 mcmd = c->mstate.commands[i].cmd;
1029 margc = c->mstate.commands[i].argc;
1030 margv = c->mstate.commands[i].argv;
1031
1032 if (mcmd->vm_preload_proc != NULL) {
1033 mcmd->vm_preload_proc(c,mcmd,margc,margv);
1034 } else {
1035 waitForMultipleSwappedKeys(c,mcmd,margc,margv);
1036 }
1037 }
1038 }
1039
1040 /* Is this client attempting to run a command against swapped keys?
1041 * If so, block it ASAP, load the keys in background, then resume it.
1042 *
1043 * The important idea about this function is that it can fail! If keys will
1044 * still be swapped when the client is resumed, this key lookups will
1045 * just block loading keys from disk. In practical terms this should only
1046 * happen with SORT BY command or if there is a bug in this function.
1047 *
1048 * Return 1 if the client is marked as blocked, 0 if the client can
1049 * continue as the keys it is going to access appear to be in memory. */
1050 int blockClientOnSwappedKeys(redisClient *c, struct redisCommand *cmd) {
1051 if (cmd->vm_preload_proc != NULL) {
1052 cmd->vm_preload_proc(c,cmd,c->argc,c->argv);
1053 } else {
1054 waitForMultipleSwappedKeys(c,cmd,c->argc,c->argv);
1055 }
1056
1057 /* If the client was blocked for at least one key, mark it as blocked. */
1058 if (listLength(c->io_keys)) {
1059 c->flags |= REDIS_IO_WAIT;
1060 aeDeleteFileEvent(server.el,c->fd,AE_READABLE);
1061 server.vm_blocked_clients++;
1062 return 1;
1063 } else {
1064 return 0;
1065 }
1066 }
1067
1068 /* Remove the 'key' from the list of blocked keys for a given client.
1069 *
1070 * The function returns 1 when there are no longer blocking keys after
1071 * the current one was removed (and the client can be unblocked). */
1072 int dontWaitForSwappedKey(redisClient *c, robj *key) {
1073 list *l;
1074 listNode *ln;
1075 listIter li;
1076 struct dictEntry *de;
1077
1078 /* The key object might be destroyed when deleted from the c->io_keys
1079 * list (and the "key" argument is physically the same object as the
1080 * object inside the list), so we need to protect it. */
1081 incrRefCount(key);
1082
1083 /* Remove the key from the list of keys this client is waiting for. */
1084 listRewind(c->io_keys,&li);
1085 while ((ln = listNext(&li)) != NULL) {
1086 if (equalStringObjects(ln->value,key)) {
1087 listDelNode(c->io_keys,ln);
1088 break;
1089 }
1090 }
1091 redisAssert(ln != NULL);
1092
1093 /* Remove the client form the key => waiting clients map. */
1094 de = dictFind(c->db->io_keys,key);
1095 redisAssert(de != NULL);
1096 l = dictGetEntryVal(de);
1097 ln = listSearchKey(l,c);
1098 redisAssert(ln != NULL);
1099 listDelNode(l,ln);
1100 if (listLength(l) == 0)
1101 dictDelete(c->db->io_keys,key);
1102
1103 decrRefCount(key);
1104 return listLength(c->io_keys) == 0;
1105 }
1106
1107 /* Every time we now a key was loaded back in memory, we handle clients
1108 * waiting for this key if any. */
1109 void handleClientsBlockedOnSwappedKey(redisDb *db, robj *key) {
1110 struct dictEntry *de;
1111 list *l;
1112 listNode *ln;
1113 int len;
1114
1115 de = dictFind(db->io_keys,key);
1116 if (!de) return;
1117
1118 l = dictGetEntryVal(de);
1119 len = listLength(l);
1120 /* Note: we can't use something like while(listLength(l)) as the list
1121 * can be freed by the calling function when we remove the last element. */
1122 while (len--) {
1123 ln = listFirst(l);
1124 redisClient *c = ln->value;
1125
1126 if (dontWaitForSwappedKey(c,key)) {
1127 /* Put the client in the list of clients ready to go as we
1128 * loaded all the keys about it. */
1129 listAddNodeTail(server.io_ready_clients,c);
1130 }
1131 }
1132 }