]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_purgeable.c
xnu-1228.12.14.tar.gz
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <mach/mach_types.h>
25#include <vm/vm_page.h>
cf7d32b8 26#include <vm/vm_kern.h> /* kmem_alloc */
2d21ac55
A
27#include <vm/vm_purgeable_internal.h>
28#include <sys/kdebug.h>
cf7d32b8 29#include <kern/sched_prim.h>
2d21ac55
A
30
31struct token {
32 token_cnt_t count;
33 token_idx_t next;
34};
35
cf7d32b8
A
36struct token *tokens;
37token_idx_t token_q_max_cnt = 0;
38vm_size_t token_q_cur_size = 0;
2d21ac55 39
4a3eedf9
A
40token_idx_t token_free_idx = 0; /* head of free queue */
41token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
42int32_t token_new_pagecount = 0; /* count of pages that will
2d21ac55
A
43 * be added onto token queue */
44
45int available_for_purge = 0; /* increase when ripe token
46 * added, decrease when ripe
47 * token removed protect with
48 * page_queue_lock */
49
cf7d32b8
A
50static int token_q_allocating = 0; /* flag to singlethread allocator */
51
2d21ac55
A
52struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
53
54#define TOKEN_ADD 0x40/* 0x100 */
55#define TOKEN_DELETE 0x41/* 0x104 */
56#define TOKEN_QUEUE_ADVANCE 0x42/* 0x108 actually means "token ripened" */
57#define TOKEN_OBJECT_PURGED 0x43/* 0x10c */
58#define OBJECT_ADDED 0x50/* 0x140 */
59#define OBJECT_REMOVED 0x51/* 0x144 */
60
2d21ac55
A
61static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
62
63#if MACH_ASSERT
64static void
65vm_purgeable_token_check_queue(purgeable_q_t queue)
66{
67 int token_cnt = 0, page_cnt = 0;
68 token_idx_t token = queue->token_q_head;
69 token_idx_t unripe = 0;
70 int our_inactive_count;
71
72 while (token) {
73 if (tokens[token].count != 0) {
74 assert(queue->token_q_unripe);
75 if (unripe == 0) {
76 assert(token == queue->token_q_unripe);
77 unripe = token;
78 }
79 page_cnt += tokens[token].count;
80 }
81 if (tokens[token].next == 0)
82 assert(queue->token_q_tail == token);
83
84 token_cnt++;
85 token = tokens[token].next;
86 }
87
88 if (unripe)
89 assert(queue->token_q_unripe == unripe);
90 assert(token_cnt == queue->debug_count_tokens);
593a1d5f
A
91
92 /* obsolete queue doesn't maintain token counts */
93 if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
94 {
95 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
96 assert(our_inactive_count >= 0);
97 assert((uint32_t) our_inactive_count == vm_page_inactive_count);
98 }
2d21ac55
A
99}
100#endif
101
102kern_return_t
103vm_purgeable_token_add(purgeable_q_t queue)
104{
105 /* new token */
106 token_idx_t token;
107 enum purgeable_q_type i;
108
cf7d32b8
A
109find_available_token:
110
111 if (token_free_idx) { /* unused tokens available */
2d21ac55
A
112 token = token_free_idx;
113 token_free_idx = tokens[token_free_idx].next;
cf7d32b8
A
114 } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
115 token = token_init_idx;
116 token_init_idx++;
117 } else { /* allocate more memory */
118 /* Wait if another thread is inside the memory alloc section */
119 while(token_q_allocating) {
120 wait_result_t res = thread_sleep_mutex((event_t)&token_q_allocating,
121 &vm_page_queue_lock,
122 THREAD_UNINT);
123 if(res != THREAD_AWAKENED) return KERN_ABORTED;
124 };
125
126 /* Check whether memory is still maxed out */
127 if(token_init_idx < token_q_max_cnt)
128 goto find_available_token;
129
130 /* Still no memory. Allocate some. */
131 token_q_allocating = 1;
132
133 /* Drop page queue lock so we can allocate */
134 vm_page_unlock_queues();
135
136 struct token *new_loc;
137 vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
138 kern_return_t result;
139
140 if (token_q_cur_size) {
141 result=kmem_realloc(kernel_map, (vm_offset_t)tokens, token_q_cur_size,
142 (vm_offset_t*)&new_loc, alloc_size);
143 } else {
144 result=kmem_alloc(kernel_map, (vm_offset_t*)&new_loc, alloc_size);
145 }
146
147 vm_page_lock_queues();
148
149 if (result) {
150 /* Unblock waiting threads */
151 token_q_allocating = 0;
152 thread_wakeup((event_t)&token_q_allocating);
153 return result;
154 }
155
156 /* If we get here, we allocated new memory. Update pointers and
157 * dealloc old range */
158 struct token *old_tokens=tokens;
159 tokens=new_loc;
160 vm_size_t old_token_q_cur_size=token_q_cur_size;
161 token_q_cur_size=alloc_size;
162 token_q_max_cnt = token_q_cur_size / sizeof(struct token);
163 assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */
164
165 if (old_token_q_cur_size) { /* clean up old mapping */
166 vm_page_unlock_queues();
167 /* kmem_realloc leaves the old region mapped. Get rid of it. */
168 kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
169 vm_page_lock_queues();
170 }
171
172 /* Unblock waiting threads */
173 token_q_allocating = 0;
174 thread_wakeup((event_t)&token_q_allocating);
175
176 goto find_available_token;
2d21ac55 177 }
cf7d32b8
A
178
179 assert (token);
180
2d21ac55
A
181 /*
182 * the new pagecount we got need to be applied to all queues except
183 * obsolete
184 */
185 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
4a3eedf9
A
186 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
187 assert(pages >= 0);
188 assert(pages <= TOKEN_COUNT_MAX);
189 purgeable_queues[i].new_pages=pages;
2d21ac55
A
190 }
191 token_new_pagecount = 0;
192
193 /* set token counter value */
194 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
195 tokens[token].count = queue->new_pages;
196 else
197 tokens[token].count = 0; /* all obsolete items are
198 * ripe immediately */
199 queue->new_pages = 0;
200
201 /* put token on token counter list */
202 tokens[token].next = 0;
203 if (queue->token_q_tail == 0) {
204 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
205 queue->token_q_head = token;
206 } else {
207 tokens[queue->token_q_tail].next = token;
208 }
209 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
210 * count == 0) in queue */
211 if (tokens[token].count > 0)
212 queue->token_q_unripe = token; /* first unripe token */
213 else
214 available_for_purge++; /* added a ripe token?
215 * increase available count */
216 }
217 queue->token_q_tail = token;
218
219#if MACH_ASSERT
220 queue->debug_count_tokens++;
221 /* Check both queues, since we modified the new_pages count on each */
222 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
223 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
224
225 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
226 queue->type,
227 tokens[token].count, /* num pages on token
228 * (last token) */
229 queue->debug_count_tokens,
230 0,
231 0);
232#endif
233
234 return KERN_SUCCESS;
235}
236
237/*
238 * Remove first token from queue and return its index. Add its count to the
239 * count of the next token.
240 */
241static token_idx_t
242vm_purgeable_token_remove_first(purgeable_q_t queue)
243{
244 token_idx_t token;
245 token = queue->token_q_head;
246
247 assert(token);
248
249 if (token) {
250 assert(queue->token_q_tail);
251 if (queue->token_q_head == queue->token_q_unripe) {
252 /* no ripe tokens... must move unripe pointer */
253 queue->token_q_unripe = tokens[token].next;
254 } else {
255 /* we're removing a ripe token. decrease count */
256 available_for_purge--;
257 assert(available_for_purge >= 0);
258 }
259
260 if (queue->token_q_tail == queue->token_q_head)
261 assert(tokens[token].next == 0);
262
263 queue->token_q_head = tokens[token].next;
264 if (queue->token_q_head) {
265 tokens[queue->token_q_head].count += tokens[token].count;
266 } else {
267 /* currently no other tokens in the queue */
268 /*
269 * the page count must be added to the next newly
270 * created token
271 */
272 queue->new_pages += tokens[token].count;
273 /* if head is zero, tail is too */
274 queue->token_q_tail = 0;
275 }
276
277#if MACH_ASSERT
278 queue->debug_count_tokens--;
279 vm_purgeable_token_check_queue(queue);
280
281 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
282 queue->type,
283 tokens[queue->token_q_head].count, /* num pages on new
284 * first token */
285 token_new_pagecount, /* num pages waiting for
286 * next token */
287 available_for_purge,
288 0);
289#endif
290 }
291 return token;
292}
293
294/* Delete first token from queue. Return token to token queue. */
295void
296vm_purgeable_token_delete_first(purgeable_q_t queue)
297{
298 token_idx_t token = vm_purgeable_token_remove_first(queue);
299
300 if (token) {
301 /* stick removed token on free queue */
302 tokens[token].next = token_free_idx;
303 token_free_idx = token;
304 }
305}
306
307
308void
cf7d32b8 309vm_purgeable_q_advance_all()
2d21ac55 310{
4a3eedf9
A
311 /* check queue counters - if they get really large, scale them back.
312 * They tend to get that large when there is no purgeable queue action */
313 int i;
cf7d32b8 314 if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */
4a3eedf9
A
315 {
316 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
317 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
318 assert(pages >= 0);
319 assert(pages <= TOKEN_COUNT_MAX);
320 purgeable_queues[i].new_pages=pages;
321 }
322 token_new_pagecount = 0;
323 }
324
2d21ac55 325 /*
cf7d32b8
A
326 * Decrement token counters. A token counter can be zero, this means the
327 * object is ripe to be purged. It is not purged immediately, because that
328 * could cause several objects to be purged even if purging one would satisfy
329 * the memory needs. Instead, the pageout thread purges one after the other
330 * by calling vm_purgeable_object_purge_one and then rechecking the memory
331 * balance.
332 *
333 * No need to advance obsolete queue - all items are ripe there,
2d21ac55
A
334 * always
335 */
cf7d32b8
A
336 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
337 purgeable_q_t queue = &purgeable_queues[i];
338 uint32_t num_pages = 1;
339
340 /* Iterate over tokens as long as there are unripe tokens. */
341 while (queue->token_q_unripe) {
342 if (tokens[queue->token_q_unripe].count && num_pages)
343 {
344 tokens[queue->token_q_unripe].count -= 1;
345 num_pages -= 1;
346 }
2d21ac55 347
cf7d32b8
A
348 if (tokens[queue->token_q_unripe].count == 0) {
349 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
350 available_for_purge++;
351 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_QUEUE_ADVANCE)),
352 queue->type,
353 tokens[queue->token_q_head].count, /* num pages on new
354 * first token */
355 0,
356 available_for_purge,
357 0);
358 continue; /* One token ripened. Make sure to
359 * check the next. */
360 }
361 if (num_pages == 0)
362 break; /* Current token not ripe and no more pages.
363 * Work done. */
2d21ac55 364 }
2d21ac55 365
cf7d32b8
A
366 /*
367 * if there are no unripe tokens in the queue, decrement the
368 * new_pages counter instead new_pages can be negative, but must be
369 * canceled out by token_new_pagecount -- since inactive queue as a
370 * whole always contains a nonnegative number of pages
371 */
372 if (!queue->token_q_unripe) {
373 queue->new_pages -= num_pages;
374 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
375 }
2d21ac55 376#if MACH_ASSERT
cf7d32b8 377 vm_purgeable_token_check_queue(queue);
2d21ac55 378#endif
cf7d32b8 379 }
2d21ac55
A
380}
381
382/*
383 * grab any ripe object and purge it obsolete queue first. then, go through
384 * each volatile group. Select a queue with a ripe token.
385 * Start with first group (0)
386 * 1. Look at queue. Is there an object?
387 * Yes - purge it. Remove token.
388 * No - check other queue. Is there an object?
389 * No - increment group, then go to (1)
390 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
391 * token from other queue and migrate unripe token from this
392 * queue to other queue.
393 */
394static void
395vm_purgeable_token_remove_ripe(purgeable_q_t queue)
396{
397 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
398 /* return token to free list. advance token list. */
399 token_idx_t new_head = tokens[queue->token_q_head].next;
400 tokens[queue->token_q_head].next = token_free_idx;
401 token_free_idx = queue->token_q_head;
402 queue->token_q_head = new_head;
403 if (new_head == 0)
404 queue->token_q_tail = 0;
405
406#if MACH_ASSERT
407 queue->debug_count_tokens--;
408 vm_purgeable_token_check_queue(queue);
409#endif
410
411 available_for_purge--;
412 assert(available_for_purge >= 0);
413}
414
415/*
416 * Delete a ripe token from the given queue. If there are no ripe tokens on
417 * that queue, delete a ripe token from queue2, and migrate an unripe token
418 * from queue to queue2
419 */
420static void
421vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
422{
423 assert(queue->token_q_head);
424
425 if (tokens[queue->token_q_head].count == 0) {
426 /* This queue has a ripe token. Remove. */
427 vm_purgeable_token_remove_ripe(queue);
428 } else {
429 assert(queue2);
430 /*
431 * queue2 must have a ripe token. Remove, and migrate one
432 * from queue to queue2.
433 */
434 vm_purgeable_token_remove_ripe(queue2);
435 /* migrate unripe token */
436 token_idx_t token;
437 token_cnt_t count;
438
439 /* remove token from queue1 */
440 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
441 * tokens, remember? */
442 token = vm_purgeable_token_remove_first(queue);
443 assert(token);
444
445 count = tokens[token].count;
446
447 /* migrate to queue2 */
448 /* go to migration target loc */
449 token_idx_t *token_in_queue2 = &queue2->token_q_head;
450 while (*token_in_queue2 && count > tokens[*token_in_queue2].count) {
451 count -= tokens[*token_in_queue2].count;
452 token_in_queue2 = &tokens[*token_in_queue2].next;
453 }
454
455 if ((*token_in_queue2 == queue2->token_q_unripe) || /* becomes the first
456 * unripe token */
457 (queue2->token_q_unripe == 0))
458 queue2->token_q_unripe = token; /* must update unripe
459 * pointer */
460
461 /* insert token */
462 tokens[token].count = count;
463 tokens[token].next = *token_in_queue2;
464
465 /*
466 * if inserting at end, reduce new_pages by that value if
467 * inserting before token, reduce counter of that token
468 */
469 if (*token_in_queue2 == 0) { /* insertion at end of queue2 */
470 queue2->token_q_tail = token; /* must update tail
471 * pointer */
472 assert(queue2->new_pages >= (int32_t) count);
473 queue2->new_pages -= count;
474 } else {
475 assert(tokens[*token_in_queue2].count >= count);
476 tokens[*token_in_queue2].count -= count;
477 }
478 *token_in_queue2 = token;
479
480#if MACH_ASSERT
481 queue2->debug_count_tokens++;
482 vm_purgeable_token_check_queue(queue2);
483#endif
484 }
485}
486
487/* Find an object that can be locked. Returns locked object. */
488static vm_object_t
489vm_purgeable_object_find_and_lock(purgeable_q_t queue, int group)
490{
491 /*
492 * Usually we would pick the first element from a queue. However, we
493 * might not be able to get a lock on it, in which case we try the
494 * remaining elements in order.
495 */
496
497 vm_object_t object;
498 for (object = (vm_object_t) queue_first(&queue->objq[group]);
499 !queue_end(&queue->objq[group], (queue_entry_t) object);
500 object = (vm_object_t) queue_next(&object->objq)) {
501 if (vm_object_lock_try(object)) {
502 /* Locked. Great. We'll take it. Remove and return. */
503 queue_remove(&queue->objq[group], object,
504 vm_object_t, objq);
505 object->objq.next = 0;
506 object->objq.prev = 0;
507#if MACH_ASSERT
508 queue->debug_count_objects--;
509#endif
510 return object;
511 }
512 }
513
514 return 0;
515}
516
517void
518vm_purgeable_object_purge_one(void)
519{
520 enum purgeable_q_type i;
521 int group;
522 vm_object_t object = 0;
593a1d5f 523 purgeable_q_t queue, queue2;
2d21ac55
A
524
525 mutex_lock(&vm_purgeable_queue_lock);
526 /* Cycle through all queues */
527 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
593a1d5f 528 queue = &purgeable_queues[i];
2d21ac55
A
529
530 /*
531 * Are there any ripe tokens on this queue? If yes, we'll
532 * find an object to purge there
533 */
534 if (!(queue->token_q_head && tokens[queue->token_q_head].count == 0))
535 continue; /* no token? Look at next purgeable
536 * queue */
537
538 /*
539 * Now look through all groups, starting from the lowest. If
540 * we find an object in that group, try to lock it (this can
541 * fail). If locking is successful, we can drop the queue
542 * lock, remove a token and then purge the object.
543 */
544 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
593a1d5f
A
545 if (!queue_empty(&queue->objq[group]) &&
546 (object = vm_purgeable_object_find_and_lock(queue, group))) {
2d21ac55
A
547 mutex_unlock(&vm_purgeable_queue_lock);
548 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
549 goto purge_now;
593a1d5f
A
550 }
551 if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
552 /* This is the token migration case, and it works between
553 * FIFO and LIFO only */
554 queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
555 PURGEABLE_Q_TYPE_FIFO :
556 PURGEABLE_Q_TYPE_LIFO];
557
558 if (!queue_empty(&queue2->objq[group]) &&
559 (object = vm_purgeable_object_find_and_lock(queue2, group))) {
2d21ac55
A
560 mutex_unlock(&vm_purgeable_queue_lock);
561 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
562 goto purge_now;
563 }
564 }
565 assert(queue->debug_count_objects >= 0);
566 }
567 }
568 /*
569 * because we have to do a try_lock on the objects which could fail,
570 * we could end up with no object to purge at this time, even though
571 * we have objects in a purgeable state
572 */
573 mutex_unlock(&vm_purgeable_queue_lock);
574 return;
575
576purge_now:
577
578 assert(object);
579 (void) vm_object_purge(object);
580 vm_object_unlock(object);
581
582 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_OBJECT_PURGED)),
583 (unsigned int) object, /* purged object */
584 0,
585 available_for_purge,
586 0,
587 0);
588}
589
590void
591vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
592{
593 mutex_lock(&vm_purgeable_queue_lock);
594
595 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
596 group = 0;
597 if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are
598 * fifo-queued */
599 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
600 else
601 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
602
603#if MACH_ASSERT
604 queue->debug_count_objects++;
605 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADDED)),
606 0,
607 tokens[queue->token_q_head].count,
608 queue->type,
609 group,
610 0);
611#endif
612
613 mutex_unlock(&vm_purgeable_queue_lock);
614}
615
616/* Look for object. If found, remove from purgeable queue. */
617purgeable_q_t
618vm_purgeable_object_remove(vm_object_t object)
619{
620 enum purgeable_q_type i;
621 int group;
622
623 mutex_lock(&vm_purgeable_queue_lock);
593a1d5f 624 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
2d21ac55
A
625 purgeable_q_t queue = &purgeable_queues[i];
626 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
627 vm_object_t o;
628 for (o = (vm_object_t) queue_first(&queue->objq[group]);
629 !queue_end(&queue->objq[group], (queue_entry_t) o);
630 o = (vm_object_t) queue_next(&o->objq)) {
631 if (o == object) {
632 queue_remove(&queue->objq[group], object,
633 vm_object_t, objq);
634#if MACH_ASSERT
635 queue->debug_count_objects--;
636 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVED)),
637 0,
638 tokens[queue->token_q_head].count,
639 queue->type,
640 group,
641 0);
642#endif
643 mutex_unlock(&vm_purgeable_queue_lock);
644 object->objq.next = 0;
645 object->objq.prev = 0;
646 return &purgeable_queues[i];
647 }
648 }
649 }
650 }
651 mutex_unlock(&vm_purgeable_queue_lock);
652 return 0;
653}