]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_purgeable.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <mach/mach_types.h>
25 #include <vm/vm_page.h>
26 #include <vm/vm_kern.h> /* kmem_alloc */
27 #include <vm/vm_purgeable_internal.h>
28 #include <sys/kdebug.h>
29 #include <kern/sched_prim.h>
30
31 struct token {
32 token_cnt_t count;
33 token_idx_t next;
34 };
35
36 struct token *tokens;
37 token_idx_t token_q_max_cnt = 0;
38 vm_size_t token_q_cur_size = 0;
39
40 token_idx_t token_free_idx = 0; /* head of free queue */
41 token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
42 int32_t token_new_pagecount = 0; /* count of pages that will
43 * be added onto token queue */
44
45 int available_for_purge = 0; /* increase when ripe token
46 * added, decrease when ripe
47 * token removed.
48 * protected by page_queue_lock
49 */
50
51 static int token_q_allocating = 0; /* flag for singlethreading
52 * allocator */
53
54 struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
55
56 decl_lck_mtx_data(,vm_purgeable_queue_lock)
57
58 #define TOKEN_ADD 0x40 /* 0x100 */
59 #define TOKEN_DELETE 0x41 /* 0x104 */
60 #define TOKEN_RIPEN 0x42 /* 0x108 */
61 #define OBJECT_ADD 0x48 /* 0x120 */
62 #define OBJECT_REMOVE 0x49 /* 0x124 */
63 #define OBJECT_PURGE 0x4a /* 0x128 */
64 #define OBJECT_PURGE_ALL 0x4b /* 0x12c */
65
66 static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
67
68 #if MACH_ASSERT
69 static void
70 vm_purgeable_token_check_queue(purgeable_q_t queue)
71 {
72 int token_cnt = 0, page_cnt = 0;
73 token_idx_t token = queue->token_q_head;
74 token_idx_t unripe = 0;
75 int our_inactive_count;
76
77 while (token) {
78 if (tokens[token].count != 0) {
79 assert(queue->token_q_unripe);
80 if (unripe == 0) {
81 assert(token == queue->token_q_unripe);
82 unripe = token;
83 }
84 page_cnt += tokens[token].count;
85 }
86 if (tokens[token].next == 0)
87 assert(queue->token_q_tail == token);
88
89 token_cnt++;
90 token = tokens[token].next;
91 }
92
93 if (unripe)
94 assert(queue->token_q_unripe == unripe);
95 assert(token_cnt == queue->debug_count_tokens);
96
97 /* obsolete queue doesn't maintain token counts */
98 if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
99 {
100 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
101 assert(our_inactive_count >= 0);
102 assert((uint32_t) our_inactive_count == vm_page_inactive_count);
103 }
104 }
105 #endif
106
107 /*
108 * Add a token. Allocate token queue memory if necessary.
109 * Call with page queue locked.
110 */
111 kern_return_t
112 vm_purgeable_token_add(purgeable_q_t queue)
113 {
114 #if MACH_ASSERT
115 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
116 #endif
117
118 /* new token */
119 token_idx_t token;
120 enum purgeable_q_type i;
121
122 find_available_token:
123
124 if (token_free_idx) { /* unused tokens available */
125 token = token_free_idx;
126 token_free_idx = tokens[token_free_idx].next;
127 } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
128 token = token_init_idx;
129 token_init_idx++;
130 } else { /* allocate more memory */
131 /* Wait if another thread is inside the memory alloc section */
132 while(token_q_allocating) {
133 wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
134 LCK_SLEEP_DEFAULT,
135 (event_t)&token_q_allocating,
136 THREAD_UNINT);
137 if(res != THREAD_AWAKENED) return KERN_ABORTED;
138 };
139
140 /* Check whether memory is still maxed out */
141 if(token_init_idx < token_q_max_cnt)
142 goto find_available_token;
143
144 /* Still no memory. Allocate some. */
145 token_q_allocating = 1;
146
147 /* Drop page queue lock so we can allocate */
148 vm_page_unlock_queues();
149
150 struct token *new_loc;
151 vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
152 kern_return_t result;
153
154 if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) {
155 result = KERN_RESOURCE_SHORTAGE;
156 } else {
157 if (token_q_cur_size) {
158 result = kmem_realloc(kernel_map,
159 (vm_offset_t) tokens,
160 token_q_cur_size,
161 (vm_offset_t *) &new_loc,
162 alloc_size);
163 } else {
164 result = kmem_alloc(kernel_map,
165 (vm_offset_t *) &new_loc,
166 alloc_size);
167 }
168 }
169
170 vm_page_lock_queues();
171
172 if (result) {
173 /* Unblock waiting threads */
174 token_q_allocating = 0;
175 thread_wakeup((event_t)&token_q_allocating);
176 return result;
177 }
178
179 /* If we get here, we allocated new memory. Update pointers and
180 * dealloc old range */
181 struct token *old_tokens=tokens;
182 tokens=new_loc;
183 vm_size_t old_token_q_cur_size=token_q_cur_size;
184 token_q_cur_size=alloc_size;
185 token_q_max_cnt = (token_idx_t) (token_q_cur_size /
186 sizeof(struct token));
187 assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */
188
189 if (old_token_q_cur_size) { /* clean up old mapping */
190 vm_page_unlock_queues();
191 /* kmem_realloc leaves the old region mapped. Get rid of it. */
192 kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
193 vm_page_lock_queues();
194 }
195
196 /* Unblock waiting threads */
197 token_q_allocating = 0;
198 thread_wakeup((event_t)&token_q_allocating);
199
200 goto find_available_token;
201 }
202
203 assert (token);
204
205 /*
206 * the new pagecount we got need to be applied to all queues except
207 * obsolete
208 */
209 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
210 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
211 assert(pages >= 0);
212 assert(pages <= TOKEN_COUNT_MAX);
213 purgeable_queues[i].new_pages = (int32_t) pages;
214 assert(purgeable_queues[i].new_pages == pages);
215 }
216 token_new_pagecount = 0;
217
218 /* set token counter value */
219 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
220 tokens[token].count = queue->new_pages;
221 else
222 tokens[token].count = 0; /* all obsolete items are
223 * ripe immediately */
224 queue->new_pages = 0;
225
226 /* put token on token counter list */
227 tokens[token].next = 0;
228 if (queue->token_q_tail == 0) {
229 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
230 queue->token_q_head = token;
231 } else {
232 tokens[queue->token_q_tail].next = token;
233 }
234 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
235 * count == 0) in queue */
236 if (tokens[token].count > 0)
237 queue->token_q_unripe = token; /* first unripe token */
238 else
239 available_for_purge++; /* added a ripe token?
240 * increase available count */
241 }
242 queue->token_q_tail = token;
243
244 #if MACH_ASSERT
245 queue->debug_count_tokens++;
246 /* Check both queues, since we modified the new_pages count on each */
247 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
248 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
249
250 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
251 queue->type,
252 tokens[token].count, /* num pages on token
253 * (last token) */
254 queue->debug_count_tokens,
255 0,
256 0);
257 #endif
258
259 return KERN_SUCCESS;
260 }
261
262 /*
263 * Remove first token from queue and return its index. Add its count to the
264 * count of the next token.
265 * Call with page queue locked.
266 */
267 static token_idx_t
268 vm_purgeable_token_remove_first(purgeable_q_t queue)
269 {
270 #if MACH_ASSERT
271 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
272 #endif
273
274 token_idx_t token;
275 token = queue->token_q_head;
276
277 assert(token);
278
279 if (token) {
280 assert(queue->token_q_tail);
281 if (queue->token_q_head == queue->token_q_unripe) {
282 /* no ripe tokens... must move unripe pointer */
283 queue->token_q_unripe = tokens[token].next;
284 } else {
285 /* we're removing a ripe token. decrease count */
286 available_for_purge--;
287 assert(available_for_purge >= 0);
288 }
289
290 if (queue->token_q_tail == queue->token_q_head)
291 assert(tokens[token].next == 0);
292
293 queue->token_q_head = tokens[token].next;
294 if (queue->token_q_head) {
295 tokens[queue->token_q_head].count += tokens[token].count;
296 } else {
297 /* currently no other tokens in the queue */
298 /*
299 * the page count must be added to the next newly
300 * created token
301 */
302 queue->new_pages += tokens[token].count;
303 /* if head is zero, tail is too */
304 queue->token_q_tail = 0;
305 }
306
307 #if MACH_ASSERT
308 queue->debug_count_tokens--;
309 vm_purgeable_token_check_queue(queue);
310
311 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
312 queue->type,
313 tokens[queue->token_q_head].count, /* num pages on new
314 * first token */
315 token_new_pagecount, /* num pages waiting for
316 * next token */
317 available_for_purge,
318 0);
319 #endif
320 }
321 return token;
322 }
323
324 /*
325 * Delete first token from queue. Return token to token queue.
326 * Call with page queue locked.
327 */
328 void
329 vm_purgeable_token_delete_first(purgeable_q_t queue)
330 {
331 #if MACH_ASSERT
332 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
333 #endif
334 token_idx_t token = vm_purgeable_token_remove_first(queue);
335
336 if (token) {
337 /* stick removed token on free queue */
338 tokens[token].next = token_free_idx;
339 token_free_idx = token;
340 }
341 }
342
343
344 /* Call with page queue locked. */
345 void
346 vm_purgeable_q_advance_all()
347 {
348 #if MACH_ASSERT
349 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
350 #endif
351
352 /* check queue counters - if they get really large, scale them back.
353 * They tend to get that large when there is no purgeable queue action */
354 int i;
355 if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */
356 {
357 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
358 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
359 assert(pages >= 0);
360 assert(pages <= TOKEN_COUNT_MAX);
361 purgeable_queues[i].new_pages = (int32_t) pages;
362 assert(purgeable_queues[i].new_pages == pages);
363 }
364 token_new_pagecount = 0;
365 }
366
367 /*
368 * Decrement token counters. A token counter can be zero, this means the
369 * object is ripe to be purged. It is not purged immediately, because that
370 * could cause several objects to be purged even if purging one would satisfy
371 * the memory needs. Instead, the pageout thread purges one after the other
372 * by calling vm_purgeable_object_purge_one and then rechecking the memory
373 * balance.
374 *
375 * No need to advance obsolete queue - all items are ripe there,
376 * always
377 */
378 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
379 purgeable_q_t queue = &purgeable_queues[i];
380 uint32_t num_pages = 1;
381
382 /* Iterate over tokens as long as there are unripe tokens. */
383 while (queue->token_q_unripe) {
384 if (tokens[queue->token_q_unripe].count && num_pages)
385 {
386 tokens[queue->token_q_unripe].count -= 1;
387 num_pages -= 1;
388 }
389
390 if (tokens[queue->token_q_unripe].count == 0) {
391 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
392 available_for_purge++;
393 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
394 queue->type,
395 tokens[queue->token_q_head].count, /* num pages on new
396 * first token */
397 0,
398 available_for_purge,
399 0);
400 continue; /* One token ripened. Make sure to
401 * check the next. */
402 }
403 if (num_pages == 0)
404 break; /* Current token not ripe and no more pages.
405 * Work done. */
406 }
407
408 /*
409 * if there are no unripe tokens in the queue, decrement the
410 * new_pages counter instead new_pages can be negative, but must be
411 * canceled out by token_new_pagecount -- since inactive queue as a
412 * whole always contains a nonnegative number of pages
413 */
414 if (!queue->token_q_unripe) {
415 queue->new_pages -= num_pages;
416 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
417 }
418 #if MACH_ASSERT
419 vm_purgeable_token_check_queue(queue);
420 #endif
421 }
422 }
423
424 /*
425 * grab any ripe object and purge it obsolete queue first. then, go through
426 * each volatile group. Select a queue with a ripe token.
427 * Start with first group (0)
428 * 1. Look at queue. Is there an object?
429 * Yes - purge it. Remove token.
430 * No - check other queue. Is there an object?
431 * No - increment group, then go to (1)
432 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
433 * token from other queue and migrate unripe token from this
434 * queue to other queue.
435 * Call with page queue locked.
436 */
437 static void
438 vm_purgeable_token_remove_ripe(purgeable_q_t queue)
439 {
440 #if MACH_ASSERT
441 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
442 #endif
443 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
444 /* return token to free list. advance token list. */
445 token_idx_t new_head = tokens[queue->token_q_head].next;
446 tokens[queue->token_q_head].next = token_free_idx;
447 token_free_idx = queue->token_q_head;
448 queue->token_q_head = new_head;
449 if (new_head == 0)
450 queue->token_q_tail = 0;
451
452 #if MACH_ASSERT
453 queue->debug_count_tokens--;
454 vm_purgeable_token_check_queue(queue);
455 #endif
456
457 available_for_purge--;
458 assert(available_for_purge >= 0);
459 }
460
461 /*
462 * Delete a ripe token from the given queue. If there are no ripe tokens on
463 * that queue, delete a ripe token from queue2, and migrate an unripe token
464 * from queue to queue2
465 * Call with page queue locked.
466 */
467 static void
468 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
469 {
470 #if MACH_ASSERT
471 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
472 #endif
473 assert(queue->token_q_head);
474
475 if (tokens[queue->token_q_head].count == 0) {
476 /* This queue has a ripe token. Remove. */
477 vm_purgeable_token_remove_ripe(queue);
478 } else {
479 assert(queue2);
480 /*
481 * queue2 must have a ripe token. Remove, and migrate one
482 * from queue to queue2.
483 */
484 vm_purgeable_token_remove_ripe(queue2);
485 /* migrate unripe token */
486 token_idx_t token;
487 token_cnt_t count;
488
489 /* remove token from queue1 */
490 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
491 * tokens, remember? */
492 token = vm_purgeable_token_remove_first(queue);
493 assert(token);
494
495 count = tokens[token].count;
496
497 /* migrate to queue2 */
498 /* go to migration target loc */
499 token_idx_t *token_in_queue2 = &queue2->token_q_head;
500 while (*token_in_queue2 && count > tokens[*token_in_queue2].count) {
501 count -= tokens[*token_in_queue2].count;
502 token_in_queue2 = &tokens[*token_in_queue2].next;
503 }
504
505 if ((*token_in_queue2 == queue2->token_q_unripe) || /* becomes the first
506 * unripe token */
507 (queue2->token_q_unripe == 0))
508 queue2->token_q_unripe = token; /* must update unripe
509 * pointer */
510
511 /* insert token */
512 tokens[token].count = count;
513 tokens[token].next = *token_in_queue2;
514
515 /*
516 * if inserting at end, reduce new_pages by that value if
517 * inserting before token, reduce counter of that token
518 */
519 if (*token_in_queue2 == 0) { /* insertion at end of queue2 */
520 queue2->token_q_tail = token; /* must update tail
521 * pointer */
522 assert(queue2->new_pages >= (int32_t) count);
523 queue2->new_pages -= count;
524 } else {
525 assert(tokens[*token_in_queue2].count >= count);
526 tokens[*token_in_queue2].count -= count;
527 }
528 *token_in_queue2 = token;
529
530 #if MACH_ASSERT
531 queue2->debug_count_tokens++;
532 vm_purgeable_token_check_queue(queue2);
533 #endif
534 }
535 }
536
537 /* Find an object that can be locked. Returns locked object. */
538 /* Call with purgeable queue locked. */
539 static vm_object_t
540 vm_purgeable_object_find_and_lock(purgeable_q_t queue, int group)
541 {
542 lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
543 /*
544 * Usually we would pick the first element from a queue. However, we
545 * might not be able to get a lock on it, in which case we try the
546 * remaining elements in order.
547 */
548
549 vm_object_t object;
550 for (object = (vm_object_t) queue_first(&queue->objq[group]);
551 !queue_end(&queue->objq[group], (queue_entry_t) object);
552 object = (vm_object_t) queue_next(&object->objq)) {
553 if (vm_object_lock_try(object)) {
554 /* Locked. Great. We'll take it. Remove and return. */
555 queue_remove(&queue->objq[group], object,
556 vm_object_t, objq);
557 object->objq.next = 0;
558 object->objq.prev = 0;
559 #if MACH_ASSERT
560 queue->debug_count_objects--;
561 #endif
562 return object;
563 }
564 }
565
566 return 0;
567 }
568
569 /* Can be called without holding locks */
570 void
571 vm_purgeable_object_purge_all(void)
572 {
573 enum purgeable_q_type i;
574 int group;
575 vm_object_t object;
576 unsigned int purged_count;
577 uint32_t collisions;
578
579 purged_count = 0;
580 collisions = 0;
581
582 restart:
583 lck_mtx_lock(&vm_purgeable_queue_lock);
584 /* Cycle through all queues */
585 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
586 purgeable_q_t queue;
587
588 queue = &purgeable_queues[i];
589
590 /*
591 * Look through all groups, starting from the lowest. If
592 * we find an object in that group, try to lock it (this can
593 * fail). If locking is successful, we can drop the queue
594 * lock, remove a token and then purge the object.
595 */
596 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
597 while (!queue_empty(&queue->objq[group])) {
598 object = vm_purgeable_object_find_and_lock(queue, group);
599 if (object == VM_OBJECT_NULL) {
600 lck_mtx_unlock(&vm_purgeable_queue_lock);
601 mutex_pause(collisions++);
602 goto restart;
603 }
604
605 lck_mtx_unlock(&vm_purgeable_queue_lock);
606
607 /* Lock the page queue here so we don't hold it
608 * over the whole, legthy operation */
609 vm_page_lock_queues();
610 vm_purgeable_token_remove_first(queue);
611 vm_page_unlock_queues();
612
613 assert(object->purgable == VM_PURGABLE_VOLATILE);
614 (void) vm_object_purge(object);
615 vm_object_unlock(object);
616 purged_count++;
617 goto restart;
618 }
619 assert(queue->debug_count_objects >= 0);
620 }
621 }
622 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
623 purged_count, /* # of purged objects */
624 0,
625 available_for_purge,
626 0,
627 0);
628 lck_mtx_unlock(&vm_purgeable_queue_lock);
629 return;
630 }
631
632 boolean_t
633 vm_purgeable_object_purge_one(void)
634 {
635 enum purgeable_q_type i;
636 int group;
637 vm_object_t object = 0;
638 purgeable_q_t queue, queue2;
639
640 /* Need the page queue lock since we'll be changing the token queue. */
641 #if MACH_ASSERT
642 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
643 #endif
644 lck_mtx_lock(&vm_purgeable_queue_lock);
645
646 /* Cycle through all queues */
647 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
648 queue = &purgeable_queues[i];
649
650 /*
651 * Are there any ripe tokens on this queue? If yes, we'll
652 * find an object to purge there
653 */
654 if (!(queue->token_q_head && tokens[queue->token_q_head].count == 0))
655 continue; /* no token? Look at next purgeable
656 * queue */
657
658 /*
659 * Now look through all groups, starting from the lowest. If
660 * we find an object in that group, try to lock it (this can
661 * fail). If locking is successful, we can drop the queue
662 * lock, remove a token and then purge the object.
663 */
664 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
665 if (!queue_empty(&queue->objq[group]) &&
666 (object = vm_purgeable_object_find_and_lock(queue, group))) {
667 lck_mtx_unlock(&vm_purgeable_queue_lock);
668 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
669 goto purge_now;
670 }
671 if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
672 /* This is the token migration case, and it works between
673 * FIFO and LIFO only */
674 queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
675 PURGEABLE_Q_TYPE_FIFO :
676 PURGEABLE_Q_TYPE_LIFO];
677
678 if (!queue_empty(&queue2->objq[group]) &&
679 (object = vm_purgeable_object_find_and_lock(queue2, group))) {
680 lck_mtx_unlock(&vm_purgeable_queue_lock);
681 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
682 goto purge_now;
683 }
684 }
685 assert(queue->debug_count_objects >= 0);
686 }
687 }
688 /*
689 * because we have to do a try_lock on the objects which could fail,
690 * we could end up with no object to purge at this time, even though
691 * we have objects in a purgeable state
692 */
693 lck_mtx_unlock(&vm_purgeable_queue_lock);
694 return FALSE;
695
696 purge_now:
697
698 assert(object);
699 assert(object->purgable == VM_PURGABLE_VOLATILE);
700 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
701 (void) vm_object_purge(object);
702 vm_object_unlock(object);
703 vm_page_lock_queues();
704
705 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
706 object, /* purged object */
707 0,
708 available_for_purge,
709 0,
710 0);
711
712 return TRUE;
713 }
714
715 /* Called with object lock held */
716 void
717 vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
718 {
719 vm_object_lock_assert_exclusive(object);
720 lck_mtx_lock(&vm_purgeable_queue_lock);
721
722 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
723 group = 0;
724 if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are
725 * fifo-queued */
726 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
727 else
728 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
729
730 #if MACH_ASSERT
731 queue->debug_count_objects++;
732 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
733 0,
734 tokens[queue->token_q_head].count,
735 queue->type,
736 group,
737 0);
738 #endif
739
740 lck_mtx_unlock(&vm_purgeable_queue_lock);
741 }
742
743 /* Look for object. If found, remove from purgeable queue. */
744 /* Called with object lock held */
745 purgeable_q_t
746 vm_purgeable_object_remove(vm_object_t object)
747 {
748 enum purgeable_q_type i;
749 int group;
750
751 vm_object_lock_assert_exclusive(object);
752 lck_mtx_lock(&vm_purgeable_queue_lock);
753
754 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
755 purgeable_q_t queue = &purgeable_queues[i];
756 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
757 vm_object_t o;
758 for (o = (vm_object_t) queue_first(&queue->objq[group]);
759 !queue_end(&queue->objq[group], (queue_entry_t) o);
760 o = (vm_object_t) queue_next(&o->objq)) {
761 if (o == object) {
762 queue_remove(&queue->objq[group], object,
763 vm_object_t, objq);
764 #if MACH_ASSERT
765 queue->debug_count_objects--;
766 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
767 0,
768 tokens[queue->token_q_head].count,
769 queue->type,
770 group,
771 0);
772 #endif
773 lck_mtx_unlock(&vm_purgeable_queue_lock);
774 object->objq.next = 0;
775 object->objq.prev = 0;
776 return &purgeable_queues[i];
777 }
778 }
779 }
780 }
781 lck_mtx_unlock(&vm_purgeable_queue_lock);
782 return 0;
783 }