]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_purgeable.c
84abed964cf4dd4c1c952b795a4ed1c71f41ba74
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
26 #include <kern/policy_internal.h>
27
28 #include <libkern/OSDebug.h>
29
30 #include <mach/mach_types.h>
31
32 #include <machine/limits.h>
33
34 #include <vm/vm_compressor_pager.h>
35 #include <vm/vm_kern.h> /* kmem_alloc */
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
38 #include <vm/vm_protos.h>
39 #include <vm/vm_purgeable_internal.h>
40
41 #include <sys/kdebug.h>
42
43 /*
44 * LOCK ORDERING for task-owned purgeable objects
45 *
46 * Whenever we need to hold multiple locks while adding to, removing from,
47 * or scanning a task's task_objq list of VM objects it owns, locks should
48 * be taken in this order:
49 *
50 * VM object ==> vm_purgeable_queue_lock ==> owner_task->task_objq_lock
51 *
52 * If one needs to acquire the VM object lock after any of the other 2 locks,
53 * one needs to use vm_object_lock_try() and, if that fails, release the
54 * other locks and retake them all in the correct order.
55 */
56
57 extern vm_pressure_level_t memorystatus_vm_pressure_level;
58
59 struct token {
60 token_cnt_t count;
61 token_idx_t prev;
62 token_idx_t next;
63 };
64
65 struct token *tokens;
66 token_idx_t token_q_max_cnt = 0;
67 vm_size_t token_q_cur_size = 0;
68
69 token_idx_t token_free_idx = 0; /* head of free queue */
70 token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
71 int32_t token_new_pagecount = 0; /* count of pages that will
72 * be added onto token queue */
73
74 int available_for_purge = 0; /* increase when ripe token
75 * added, decrease when ripe
76 * token removed.
77 * protected by page_queue_lock
78 */
79
80 static int token_q_allocating = 0; /* flag for singlethreading
81 * allocator */
82
83 struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
84 queue_head_t purgeable_nonvolatile_queue;
85 int purgeable_nonvolatile_count;
86
87 decl_lck_mtx_data(,vm_purgeable_queue_lock)
88
89 static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
90
91 static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task);
92
93 void vm_purgeable_nonvolatile_owner_update(task_t owner,
94 int delta);
95 void vm_purgeable_volatile_owner_update(task_t owner,
96 int delta);
97
98
99 #if MACH_ASSERT
100 static void
101 vm_purgeable_token_check_queue(purgeable_q_t queue)
102 {
103 int token_cnt = 0, page_cnt = 0;
104 token_idx_t token = queue->token_q_head;
105 token_idx_t unripe = 0;
106 int our_inactive_count;
107
108 #if DEVELOPMENT
109 static unsigned lightweight_check = 0;
110
111 /*
112 * Due to performance impact, only perform this check
113 * every 100 times on DEVELOPMENT kernels.
114 */
115 if (lightweight_check++ < 100) {
116 return;
117 }
118
119 lightweight_check = 0;
120 #endif
121
122 while (token) {
123 if (tokens[token].count != 0) {
124 assert(queue->token_q_unripe);
125 if (unripe == 0) {
126 assert(token == queue->token_q_unripe);
127 unripe = token;
128 }
129 page_cnt += tokens[token].count;
130 }
131 if (tokens[token].next == 0)
132 assert(queue->token_q_tail == token);
133
134 token_cnt++;
135 token = tokens[token].next;
136 }
137
138 if (unripe)
139 assert(queue->token_q_unripe == unripe);
140 assert(token_cnt == queue->debug_count_tokens);
141
142 /* obsolete queue doesn't maintain token counts */
143 if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
144 {
145 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
146 assert(our_inactive_count >= 0);
147 assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count);
148 }
149 }
150 #endif
151
152 /*
153 * Add a token. Allocate token queue memory if necessary.
154 * Call with page queue locked.
155 */
156 kern_return_t
157 vm_purgeable_token_add(purgeable_q_t queue)
158 {
159 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
160
161 /* new token */
162 token_idx_t token;
163 enum purgeable_q_type i;
164
165 find_available_token:
166
167 if (token_free_idx) { /* unused tokens available */
168 token = token_free_idx;
169 token_free_idx = tokens[token_free_idx].next;
170 } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
171 token = token_init_idx;
172 token_init_idx++;
173 } else { /* allocate more memory */
174 /* Wait if another thread is inside the memory alloc section */
175 while(token_q_allocating) {
176 wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
177 LCK_SLEEP_DEFAULT,
178 (event_t)&token_q_allocating,
179 THREAD_UNINT);
180 if(res != THREAD_AWAKENED) return KERN_ABORTED;
181 };
182
183 /* Check whether memory is still maxed out */
184 if(token_init_idx < token_q_max_cnt)
185 goto find_available_token;
186
187 /* Still no memory. Allocate some. */
188 token_q_allocating = 1;
189
190 /* Drop page queue lock so we can allocate */
191 vm_page_unlock_queues();
192
193 struct token *new_loc;
194 vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
195 kern_return_t result;
196
197 if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) {
198 result = KERN_RESOURCE_SHORTAGE;
199 } else {
200 if (token_q_cur_size) {
201 result = kmem_realloc(kernel_map,
202 (vm_offset_t) tokens,
203 token_q_cur_size,
204 (vm_offset_t *) &new_loc,
205 alloc_size, VM_KERN_MEMORY_OSFMK);
206 } else {
207 result = kmem_alloc(kernel_map,
208 (vm_offset_t *) &new_loc,
209 alloc_size, VM_KERN_MEMORY_OSFMK);
210 }
211 }
212
213 vm_page_lock_queues();
214
215 if (result) {
216 /* Unblock waiting threads */
217 token_q_allocating = 0;
218 thread_wakeup((event_t)&token_q_allocating);
219 return result;
220 }
221
222 /* If we get here, we allocated new memory. Update pointers and
223 * dealloc old range */
224 struct token *old_tokens=tokens;
225 tokens=new_loc;
226 vm_size_t old_token_q_cur_size=token_q_cur_size;
227 token_q_cur_size=alloc_size;
228 token_q_max_cnt = (token_idx_t) (token_q_cur_size /
229 sizeof(struct token));
230 assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */
231
232 if (old_token_q_cur_size) { /* clean up old mapping */
233 vm_page_unlock_queues();
234 /* kmem_realloc leaves the old region mapped. Get rid of it. */
235 kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
236 vm_page_lock_queues();
237 }
238
239 /* Unblock waiting threads */
240 token_q_allocating = 0;
241 thread_wakeup((event_t)&token_q_allocating);
242
243 goto find_available_token;
244 }
245
246 assert (token);
247
248 /*
249 * the new pagecount we got need to be applied to all queues except
250 * obsolete
251 */
252 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
253 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
254 assert(pages >= 0);
255 assert(pages <= TOKEN_COUNT_MAX);
256 purgeable_queues[i].new_pages = (int32_t) pages;
257 assert(purgeable_queues[i].new_pages == pages);
258 }
259 token_new_pagecount = 0;
260
261 /* set token counter value */
262 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
263 tokens[token].count = queue->new_pages;
264 else
265 tokens[token].count = 0; /* all obsolete items are
266 * ripe immediately */
267 queue->new_pages = 0;
268
269 /* put token on token counter list */
270 tokens[token].next = 0;
271 if (queue->token_q_tail == 0) {
272 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
273 queue->token_q_head = token;
274 tokens[token].prev = 0;
275 } else {
276 tokens[queue->token_q_tail].next = token;
277 tokens[token].prev = queue->token_q_tail;
278 }
279 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
280 * count == 0) in queue */
281 if (tokens[token].count > 0)
282 queue->token_q_unripe = token; /* first unripe token */
283 else
284 available_for_purge++; /* added a ripe token?
285 * increase available count */
286 }
287 queue->token_q_tail = token;
288
289 #if MACH_ASSERT
290 queue->debug_count_tokens++;
291 /* Check both queues, since we modified the new_pages count on each */
292 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
293 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
294
295 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
296 queue->type,
297 tokens[token].count, /* num pages on token
298 * (last token) */
299 queue->debug_count_tokens,
300 0,
301 0);
302 #endif
303
304 return KERN_SUCCESS;
305 }
306
307 /*
308 * Remove first token from queue and return its index. Add its count to the
309 * count of the next token.
310 * Call with page queue locked.
311 */
312 static token_idx_t
313 vm_purgeable_token_remove_first(purgeable_q_t queue)
314 {
315 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
316
317 token_idx_t token;
318 token = queue->token_q_head;
319
320 assert(token);
321
322 if (token) {
323 assert(queue->token_q_tail);
324 if (queue->token_q_head == queue->token_q_unripe) {
325 /* no ripe tokens... must move unripe pointer */
326 queue->token_q_unripe = tokens[token].next;
327 } else {
328 /* we're removing a ripe token. decrease count */
329 available_for_purge--;
330 assert(available_for_purge >= 0);
331 }
332
333 if (queue->token_q_tail == queue->token_q_head)
334 assert(tokens[token].next == 0);
335
336 queue->token_q_head = tokens[token].next;
337 if (queue->token_q_head) {
338 tokens[queue->token_q_head].count += tokens[token].count;
339 tokens[queue->token_q_head].prev = 0;
340 } else {
341 /* currently no other tokens in the queue */
342 /*
343 * the page count must be added to the next newly
344 * created token
345 */
346 queue->new_pages += tokens[token].count;
347 /* if head is zero, tail is too */
348 queue->token_q_tail = 0;
349 }
350
351 #if MACH_ASSERT
352 queue->debug_count_tokens--;
353 vm_purgeable_token_check_queue(queue);
354
355 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
356 queue->type,
357 tokens[queue->token_q_head].count, /* num pages on new
358 * first token */
359 token_new_pagecount, /* num pages waiting for
360 * next token */
361 available_for_purge,
362 0);
363 #endif
364 }
365 return token;
366 }
367
368 static token_idx_t
369 vm_purgeable_token_remove_last(purgeable_q_t queue)
370 {
371 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
372
373 token_idx_t token;
374 token = queue->token_q_tail;
375
376 assert(token);
377
378 if (token) {
379 assert(queue->token_q_head);
380
381 if (queue->token_q_tail == queue->token_q_head)
382 assert(tokens[token].next == 0);
383
384 if (queue->token_q_unripe == 0) {
385 /* we're removing a ripe token. decrease count */
386 available_for_purge--;
387 assert(available_for_purge >= 0);
388 } else if (queue->token_q_unripe == token) {
389 /* we're removing the only unripe token */
390 queue->token_q_unripe = 0;
391 }
392
393 if (token == queue->token_q_head) {
394 /* token is the last one in the queue */
395 queue->token_q_head = 0;
396 queue->token_q_tail = 0;
397 } else {
398 token_idx_t new_tail;
399
400 new_tail = tokens[token].prev;
401
402 assert(new_tail);
403 assert(tokens[new_tail].next == token);
404
405 queue->token_q_tail = new_tail;
406 tokens[new_tail].next = 0;
407 }
408
409 queue->new_pages += tokens[token].count;
410
411 #if MACH_ASSERT
412 queue->debug_count_tokens--;
413 vm_purgeable_token_check_queue(queue);
414
415 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
416 queue->type,
417 tokens[queue->token_q_head].count, /* num pages on new
418 * first token */
419 token_new_pagecount, /* num pages waiting for
420 * next token */
421 available_for_purge,
422 0);
423 #endif
424 }
425 return token;
426 }
427
428 /*
429 * Delete first token from queue. Return token to token queue.
430 * Call with page queue locked.
431 */
432 void
433 vm_purgeable_token_delete_first(purgeable_q_t queue)
434 {
435 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
436 token_idx_t token = vm_purgeable_token_remove_first(queue);
437
438 if (token) {
439 /* stick removed token on free queue */
440 tokens[token].next = token_free_idx;
441 tokens[token].prev = 0;
442 token_free_idx = token;
443 }
444 }
445
446 void
447 vm_purgeable_token_delete_last(purgeable_q_t queue)
448 {
449 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
450 token_idx_t token = vm_purgeable_token_remove_last(queue);
451
452 if (token) {
453 /* stick removed token on free queue */
454 tokens[token].next = token_free_idx;
455 tokens[token].prev = 0;
456 token_free_idx = token;
457 }
458 }
459
460
461 /* Call with page queue locked. */
462 void
463 vm_purgeable_q_advance_all()
464 {
465 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
466
467 /* check queue counters - if they get really large, scale them back.
468 * They tend to get that large when there is no purgeable queue action */
469 int i;
470 if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */
471 {
472 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
473 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
474 assert(pages >= 0);
475 assert(pages <= TOKEN_COUNT_MAX);
476 purgeable_queues[i].new_pages = (int32_t) pages;
477 assert(purgeable_queues[i].new_pages == pages);
478 }
479 token_new_pagecount = 0;
480 }
481
482 /*
483 * Decrement token counters. A token counter can be zero, this means the
484 * object is ripe to be purged. It is not purged immediately, because that
485 * could cause several objects to be purged even if purging one would satisfy
486 * the memory needs. Instead, the pageout thread purges one after the other
487 * by calling vm_purgeable_object_purge_one and then rechecking the memory
488 * balance.
489 *
490 * No need to advance obsolete queue - all items are ripe there,
491 * always
492 */
493 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
494 purgeable_q_t queue = &purgeable_queues[i];
495 uint32_t num_pages = 1;
496
497 /* Iterate over tokens as long as there are unripe tokens. */
498 while (queue->token_q_unripe) {
499 if (tokens[queue->token_q_unripe].count && num_pages)
500 {
501 tokens[queue->token_q_unripe].count -= 1;
502 num_pages -= 1;
503 }
504
505 if (tokens[queue->token_q_unripe].count == 0) {
506 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
507 available_for_purge++;
508 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
509 queue->type,
510 tokens[queue->token_q_head].count, /* num pages on new
511 * first token */
512 0,
513 available_for_purge,
514 0);
515 continue; /* One token ripened. Make sure to
516 * check the next. */
517 }
518 if (num_pages == 0)
519 break; /* Current token not ripe and no more pages.
520 * Work done. */
521 }
522
523 /*
524 * if there are no unripe tokens in the queue, decrement the
525 * new_pages counter instead new_pages can be negative, but must be
526 * canceled out by token_new_pagecount -- since inactive queue as a
527 * whole always contains a nonnegative number of pages
528 */
529 if (!queue->token_q_unripe) {
530 queue->new_pages -= num_pages;
531 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
532 }
533 #if MACH_ASSERT
534 vm_purgeable_token_check_queue(queue);
535 #endif
536 }
537 }
538
539 /*
540 * grab any ripe object and purge it obsolete queue first. then, go through
541 * each volatile group. Select a queue with a ripe token.
542 * Start with first group (0)
543 * 1. Look at queue. Is there an object?
544 * Yes - purge it. Remove token.
545 * No - check other queue. Is there an object?
546 * No - increment group, then go to (1)
547 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
548 * token from other queue and migrate unripe token from this
549 * queue to other queue.
550 * Call with page queue locked.
551 */
552 static void
553 vm_purgeable_token_remove_ripe(purgeable_q_t queue)
554 {
555 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
556 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
557 /* return token to free list. advance token list. */
558 token_idx_t new_head = tokens[queue->token_q_head].next;
559 tokens[queue->token_q_head].next = token_free_idx;
560 tokens[queue->token_q_head].prev = 0;
561 token_free_idx = queue->token_q_head;
562 queue->token_q_head = new_head;
563 tokens[new_head].prev = 0;
564 if (new_head == 0)
565 queue->token_q_tail = 0;
566
567 #if MACH_ASSERT
568 queue->debug_count_tokens--;
569 vm_purgeable_token_check_queue(queue);
570 #endif
571
572 available_for_purge--;
573 assert(available_for_purge >= 0);
574 }
575
576 /*
577 * Delete a ripe token from the given queue. If there are no ripe tokens on
578 * that queue, delete a ripe token from queue2, and migrate an unripe token
579 * from queue to queue2
580 * Call with page queue locked.
581 */
582 static void
583 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
584 {
585 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
586 assert(queue->token_q_head);
587
588 if (tokens[queue->token_q_head].count == 0) {
589 /* This queue has a ripe token. Remove. */
590 vm_purgeable_token_remove_ripe(queue);
591 } else {
592 assert(queue2);
593 /*
594 * queue2 must have a ripe token. Remove, and migrate one
595 * from queue to queue2.
596 */
597 vm_purgeable_token_remove_ripe(queue2);
598 /* migrate unripe token */
599 token_idx_t token;
600 token_cnt_t count;
601
602 /* remove token from queue1 */
603 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
604 * tokens, remember? */
605 token = vm_purgeable_token_remove_first(queue);
606 assert(token);
607
608 count = tokens[token].count;
609
610 /* migrate to queue2 */
611 /* go to migration target loc */
612
613 token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after;
614
615 while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) {
616 count -= tokens[token_to_insert_before].count;
617 token_to_insert_before = tokens[token_to_insert_before].next;
618 }
619
620 /* token_to_insert_before is now set correctly */
621
622 /* should the inserted token become the first unripe token? */
623 if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0))
624 queue2->token_q_unripe = token; /* if so, must update unripe pointer */
625
626 /*
627 * insert token.
628 * if inserting at end, reduce new_pages by that value;
629 * otherwise, reduce counter of next token
630 */
631
632 tokens[token].count = count;
633
634 if (token_to_insert_before != 0) {
635 token_to_insert_after = tokens[token_to_insert_before].prev;
636
637 tokens[token].next = token_to_insert_before;
638 tokens[token_to_insert_before].prev = token;
639
640 assert(tokens[token_to_insert_before].count >= count);
641 tokens[token_to_insert_before].count -= count;
642 } else {
643 /* if we ran off the end of the list, the token to insert after is the tail */
644 token_to_insert_after = queue2->token_q_tail;
645
646 tokens[token].next = 0;
647 queue2->token_q_tail = token;
648
649 assert(queue2->new_pages >= (int32_t) count);
650 queue2->new_pages -= count;
651 }
652
653 if (token_to_insert_after != 0) {
654 tokens[token].prev = token_to_insert_after;
655 tokens[token_to_insert_after].next = token;
656 } else {
657 /* is this case possible? */
658 tokens[token].prev = 0;
659 queue2->token_q_head = token;
660 }
661
662 #if MACH_ASSERT
663 queue2->debug_count_tokens++;
664 vm_purgeable_token_check_queue(queue2);
665 #endif
666 }
667 }
668
669 /* Find an object that can be locked. Returns locked object. */
670 /* Call with purgeable queue locked. */
671 static vm_object_t
672 vm_purgeable_object_find_and_lock(
673 purgeable_q_t queue,
674 int group,
675 boolean_t pick_ripe)
676 {
677 vm_object_t object, best_object;
678 int object_task_importance;
679 int best_object_task_importance;
680 int best_object_skipped;
681 int num_objects_skipped;
682 int try_lock_failed = 0;
683 int try_lock_succeeded = 0;
684 task_t owner;
685
686 best_object = VM_OBJECT_NULL;
687 best_object_task_importance = INT_MAX;
688
689 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
690 /*
691 * Usually we would pick the first element from a queue. However, we
692 * might not be able to get a lock on it, in which case we try the
693 * remaining elements in order.
694 */
695
696 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START),
697 pick_ripe,
698 group,
699 VM_KERNEL_UNSLIDE_OR_PERM(queue),
700 0,
701 0);
702
703 num_objects_skipped = 0;
704 for (object = (vm_object_t) queue_first(&queue->objq[group]);
705 !queue_end(&queue->objq[group], (queue_entry_t) object);
706 object = (vm_object_t) queue_next(&object->objq),
707 num_objects_skipped++) {
708
709 /*
710 * To prevent us looping for an excessively long time, choose
711 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
712 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
713 * we keep going until we find the first eligible object.
714 */
715 if ((num_objects_skipped >= PURGEABLE_LOOP_MAX) && (best_object != NULL)) {
716 break;
717 }
718
719 if (pick_ripe &&
720 ! object->purgeable_when_ripe) {
721 /* we want an object that has a ripe token */
722 continue;
723 }
724
725 object_task_importance = 0;
726
727 owner = object->vo_purgeable_owner;
728 if (owner) {
729 #if CONFIG_EMBEDDED
730 #if CONFIG_JETSAM
731 object_task_importance = proc_get_memstat_priority((struct proc *)get_bsdtask_info(owner), TRUE);
732 #endif /* CONFIG_JETSAM */
733 #else /* CONFIG_EMBEDDED */
734 object_task_importance = task_importance_estimate(owner);
735 #endif /* CONFIG_EMBEDDED */
736 }
737
738 if (object_task_importance < best_object_task_importance) {
739 if (vm_object_lock_try(object)) {
740 try_lock_succeeded++;
741 if (best_object != VM_OBJECT_NULL) {
742 /* forget about previous best object */
743 vm_object_unlock(best_object);
744 }
745 best_object = object;
746 best_object_task_importance = object_task_importance;
747 best_object_skipped = num_objects_skipped;
748 if (best_object_task_importance == 0) {
749 /* can't get any better: stop looking */
750 break;
751 }
752 } else {
753 try_lock_failed++;
754 }
755 }
756 }
757
758 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END),
759 num_objects_skipped, /* considered objects */
760 try_lock_failed,
761 try_lock_succeeded,
762 VM_KERNEL_UNSLIDE_OR_PERM(best_object),
763 ((best_object == NULL) ? 0 : best_object->resident_page_count));
764
765 object = best_object;
766
767 if (object == VM_OBJECT_NULL) {
768 return VM_OBJECT_NULL;
769 }
770
771 /* Locked. Great. We'll take it. Remove and return. */
772 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
773
774 vm_object_lock_assert_exclusive(object);
775
776 queue_remove(&queue->objq[group], object,
777 vm_object_t, objq);
778 object->objq.next = NULL;
779 object->objq.prev = NULL;
780 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
781 object->purgeable_queue_group = 0;
782 /* one less volatile object for this object's owner */
783 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1);
784
785 #if DEBUG
786 object->vo_purgeable_volatilizer = NULL;
787 #endif /* DEBUG */
788
789 /* keep queue of non-volatile objects */
790 queue_enter(&purgeable_nonvolatile_queue, object,
791 vm_object_t, objq);
792 assert(purgeable_nonvolatile_count >= 0);
793 purgeable_nonvolatile_count++;
794 assert(purgeable_nonvolatile_count > 0);
795 /* one more nonvolatile object for this object's owner */
796 vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, +1);
797
798 #if MACH_ASSERT
799 queue->debug_count_objects--;
800 #endif
801 return object;
802 }
803
804 /* Can be called without holding locks */
805 void
806 vm_purgeable_object_purge_all(void)
807 {
808 enum purgeable_q_type i;
809 int group;
810 vm_object_t object;
811 unsigned int purged_count;
812 uint32_t collisions;
813
814 purged_count = 0;
815 collisions = 0;
816
817 restart:
818 lck_mtx_lock(&vm_purgeable_queue_lock);
819 /* Cycle through all queues */
820 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
821 purgeable_q_t queue;
822
823 queue = &purgeable_queues[i];
824
825 /*
826 * Look through all groups, starting from the lowest. If
827 * we find an object in that group, try to lock it (this can
828 * fail). If locking is successful, we can drop the queue
829 * lock, remove a token and then purge the object.
830 */
831 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
832 while (!queue_empty(&queue->objq[group])) {
833 object = vm_purgeable_object_find_and_lock(queue, group, FALSE);
834 if (object == VM_OBJECT_NULL) {
835 lck_mtx_unlock(&vm_purgeable_queue_lock);
836 mutex_pause(collisions++);
837 goto restart;
838 }
839
840 lck_mtx_unlock(&vm_purgeable_queue_lock);
841
842 /* Lock the page queue here so we don't hold it
843 * over the whole, legthy operation */
844 if (object->purgeable_when_ripe) {
845 vm_page_lock_queues();
846 vm_purgeable_token_remove_first(queue);
847 vm_page_unlock_queues();
848 }
849
850 (void) vm_object_purge(object, 0);
851 assert(object->purgable == VM_PURGABLE_EMPTY);
852 /* no change in purgeable accounting */
853
854 vm_object_unlock(object);
855 purged_count++;
856 goto restart;
857 }
858 assert(queue->debug_count_objects >= 0);
859 }
860 }
861 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
862 purged_count, /* # of purged objects */
863 0,
864 available_for_purge,
865 0,
866 0);
867 lck_mtx_unlock(&vm_purgeable_queue_lock);
868 return;
869 }
870
871 boolean_t
872 vm_purgeable_object_purge_one_unlocked(
873 int force_purge_below_group)
874 {
875 boolean_t retval;
876
877 vm_page_lock_queues();
878 retval = vm_purgeable_object_purge_one(force_purge_below_group, 0);
879 vm_page_unlock_queues();
880
881 return retval;
882 }
883
884 boolean_t
885 vm_purgeable_object_purge_one(
886 int force_purge_below_group,
887 int flags)
888 {
889 enum purgeable_q_type i;
890 int group;
891 vm_object_t object = 0;
892 purgeable_q_t queue, queue2;
893 boolean_t forced_purge;
894
895 /* Need the page queue lock since we'll be changing the token queue. */
896 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
897 lck_mtx_lock(&vm_purgeable_queue_lock);
898
899 /* Cycle through all queues */
900 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
901 queue = &purgeable_queues[i];
902
903 if (force_purge_below_group == 0) {
904 /*
905 * Are there any ripe tokens on this queue? If yes,
906 * we'll find an object to purge there
907 */
908 if (!queue->token_q_head) {
909 /* no token: look at next purgeable queue */
910 continue;
911 }
912
913 if (tokens[queue->token_q_head].count != 0) {
914 /* no ripe token: next queue */
915 continue;
916 }
917 }
918
919 /*
920 * Now look through all groups, starting from the lowest. If
921 * we find an object in that group, try to lock it (this can
922 * fail). If locking is successful, we can drop the queue
923 * lock, remove a token and then purge the object.
924 */
925 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
926 if (!queue->token_q_head ||
927 tokens[queue->token_q_head].count != 0) {
928 /* no tokens or no ripe tokens */
929
930 if (group >= force_purge_below_group) {
931 /* no more groups to force-purge */
932 break;
933 }
934
935 /*
936 * Try and purge an object in this group
937 * even though no tokens are ripe.
938 */
939 if (!queue_empty(&queue->objq[group]) &&
940 (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) {
941 lck_mtx_unlock(&vm_purgeable_queue_lock);
942 if (object->purgeable_when_ripe) {
943 vm_purgeable_token_delete_first(queue);
944 }
945 forced_purge = TRUE;
946 goto purge_now;
947 }
948
949 /* nothing to purge in this group: next group */
950 continue;
951 }
952 if (!queue_empty(&queue->objq[group]) &&
953 (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) {
954 lck_mtx_unlock(&vm_purgeable_queue_lock);
955 if (object->purgeable_when_ripe) {
956 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
957 }
958 forced_purge = FALSE;
959 goto purge_now;
960 }
961 if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
962 /* This is the token migration case, and it works between
963 * FIFO and LIFO only */
964 queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
965 PURGEABLE_Q_TYPE_FIFO :
966 PURGEABLE_Q_TYPE_LIFO];
967
968 if (!queue_empty(&queue2->objq[group]) &&
969 (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) {
970 lck_mtx_unlock(&vm_purgeable_queue_lock);
971 if (object->purgeable_when_ripe) {
972 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
973 }
974 forced_purge = FALSE;
975 goto purge_now;
976 }
977 }
978 assert(queue->debug_count_objects >= 0);
979 }
980 }
981 /*
982 * because we have to do a try_lock on the objects which could fail,
983 * we could end up with no object to purge at this time, even though
984 * we have objects in a purgeable state
985 */
986 lck_mtx_unlock(&vm_purgeable_queue_lock);
987 return FALSE;
988
989 purge_now:
990
991 assert(object);
992 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
993 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
994 (void) vm_object_purge(object, flags);
995 assert(object->purgable == VM_PURGABLE_EMPTY);
996 /* no change in purgeable accounting */
997 vm_object_unlock(object);
998 vm_page_lock_queues();
999
1000 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
1001 VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
1002 0,
1003 available_for_purge,
1004 0,
1005 0);
1006
1007 return TRUE;
1008 }
1009
1010 /* Called with object lock held */
1011 void
1012 vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
1013 {
1014 vm_object_lock_assert_exclusive(object);
1015 lck_mtx_lock(&vm_purgeable_queue_lock);
1016
1017 assert(object->objq.next != NULL);
1018 assert(object->objq.prev != NULL);
1019 queue_remove(&purgeable_nonvolatile_queue, object,
1020 vm_object_t, objq);
1021 object->objq.next = NULL;
1022 object->objq.prev = NULL;
1023 assert(purgeable_nonvolatile_count > 0);
1024 purgeable_nonvolatile_count--;
1025 assert(purgeable_nonvolatile_count >= 0);
1026 /* one less nonvolatile object for this object's owner */
1027 vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, -1);
1028
1029 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
1030 group = 0;
1031
1032 if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are
1033 * fifo-queued */
1034 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
1035 else
1036 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
1037 /* one more volatile object for this object's owner */
1038 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, +1);
1039
1040 object->purgeable_queue_type = queue->type;
1041 object->purgeable_queue_group = group;
1042
1043 #if DEBUG
1044 assert(object->vo_purgeable_volatilizer == NULL);
1045 object->vo_purgeable_volatilizer = current_task();
1046 OSBacktrace(&object->purgeable_volatilizer_bt[0], 16);
1047 #endif /* DEBUG */
1048
1049 #if MACH_ASSERT
1050 queue->debug_count_objects++;
1051 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
1052 0,
1053 tokens[queue->token_q_head].count,
1054 queue->type,
1055 group,
1056 0);
1057 #endif
1058
1059 lck_mtx_unlock(&vm_purgeable_queue_lock);
1060 }
1061
1062 /* Look for object. If found, remove from purgeable queue. */
1063 /* Called with object lock held */
1064 purgeable_q_t
1065 vm_purgeable_object_remove(vm_object_t object)
1066 {
1067 int group;
1068 enum purgeable_q_type type;
1069 purgeable_q_t queue;
1070
1071 vm_object_lock_assert_exclusive(object);
1072
1073 type = object->purgeable_queue_type;
1074 group = object->purgeable_queue_group;
1075
1076 if (type == PURGEABLE_Q_TYPE_MAX) {
1077 if (object->objq.prev || object->objq.next)
1078 panic("unmarked object on purgeable q");
1079
1080 return NULL;
1081 } else if (!(object->objq.prev && object->objq.next))
1082 panic("marked object not on purgeable q");
1083
1084 lck_mtx_lock(&vm_purgeable_queue_lock);
1085
1086 queue = &purgeable_queues[type];
1087
1088 queue_remove(&queue->objq[group], object, vm_object_t, objq);
1089 object->objq.next = NULL;
1090 object->objq.prev = NULL;
1091 /* one less volatile object for this object's owner */
1092 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1);
1093 #if DEBUG
1094 object->vo_purgeable_volatilizer = NULL;
1095 #endif /* DEBUG */
1096 /* keep queue of non-volatile objects */
1097 if (object->alive && !object->terminating) {
1098 task_t owner;
1099 queue_enter(&purgeable_nonvolatile_queue, object,
1100 vm_object_t, objq);
1101 assert(purgeable_nonvolatile_count >= 0);
1102 purgeable_nonvolatile_count++;
1103 assert(purgeable_nonvolatile_count > 0);
1104 /* one more nonvolatile object for this object's owner */
1105 owner = object->vo_purgeable_owner;
1106 vm_purgeable_nonvolatile_owner_update(owner, +1);
1107 }
1108
1109 #if MACH_ASSERT
1110 queue->debug_count_objects--;
1111 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
1112 0,
1113 tokens[queue->token_q_head].count,
1114 queue->type,
1115 group,
1116 0);
1117 #endif
1118
1119 lck_mtx_unlock(&vm_purgeable_queue_lock);
1120
1121 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
1122 object->purgeable_queue_group = 0;
1123
1124 vm_object_lock_assert_exclusive(object);
1125
1126 return &purgeable_queues[type];
1127 }
1128
1129 void
1130 vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task)
1131 {
1132 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
1133
1134 stat->count = stat->size = 0;
1135 vm_object_t object;
1136 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1137 !queue_end(&queue->objq[group], (queue_entry_t) object);
1138 object = (vm_object_t) queue_next(&object->objq)) {
1139 if (!target_task || object->vo_purgeable_owner == target_task) {
1140 stat->count++;
1141 stat->size += (object->resident_page_count * PAGE_SIZE);
1142 }
1143 }
1144 return;
1145 }
1146
1147 void
1148 vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task)
1149 {
1150 purgeable_q_t queue;
1151 int group;
1152
1153 lck_mtx_lock(&vm_purgeable_queue_lock);
1154
1155 /* Populate fifo_data */
1156 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1157 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1158 vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task);
1159
1160 /* Populate lifo_data */
1161 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1162 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1163 vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task);
1164
1165 /* Populate obsolete data */
1166 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1167 vm_purgeable_stats_helper(&(info->obsolete_data), queue, 0, target_task);
1168
1169 lck_mtx_unlock(&vm_purgeable_queue_lock);
1170 return;
1171 }
1172
1173 #if DEVELOPMENT || DEBUG
1174 static void
1175 vm_purgeable_account_volatile_queue(
1176 purgeable_q_t queue,
1177 int group,
1178 task_t task,
1179 pvm_account_info_t acnt_info)
1180 {
1181 vm_object_t object;
1182 uint64_t compressed_count;
1183
1184 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1185 !queue_end(&queue->objq[group], (queue_entry_t) object);
1186 object = (vm_object_t) queue_next(&object->objq)) {
1187 if (object->vo_purgeable_owner == task) {
1188 compressed_count = vm_compressor_pager_get_count(object->pager);
1189 acnt_info->pvm_volatile_compressed_count += compressed_count;
1190 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1191 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1192 }
1193 }
1194
1195 }
1196
1197 /*
1198 * Walks the purgeable object queues and calculates the usage
1199 * associated with the objects for the given task.
1200 */
1201 kern_return_t
1202 vm_purgeable_account(
1203 task_t task,
1204 pvm_account_info_t acnt_info)
1205 {
1206 queue_head_t *nonvolatile_q;
1207 vm_object_t object;
1208 int group;
1209 int state;
1210 uint64_t compressed_count;
1211 purgeable_q_t volatile_q;
1212
1213
1214 if ((task == NULL) || (acnt_info == NULL)) {
1215 return KERN_INVALID_ARGUMENT;
1216 }
1217
1218 acnt_info->pvm_volatile_count = 0;
1219 acnt_info->pvm_volatile_compressed_count = 0;
1220 acnt_info->pvm_nonvolatile_count = 0;
1221 acnt_info->pvm_nonvolatile_compressed_count = 0;
1222
1223 lck_mtx_lock(&vm_purgeable_queue_lock);
1224
1225 nonvolatile_q = &purgeable_nonvolatile_queue;
1226 for (object = (vm_object_t) queue_first(nonvolatile_q);
1227 !queue_end(nonvolatile_q, (queue_entry_t) object);
1228 object = (vm_object_t) queue_next(&object->objq)) {
1229 if (object->vo_purgeable_owner == task) {
1230 state = object->purgable;
1231 compressed_count = vm_compressor_pager_get_count(object->pager);
1232 if (state == VM_PURGABLE_EMPTY) {
1233 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1234 acnt_info->pvm_volatile_compressed_count += compressed_count;
1235 } else {
1236 acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count);
1237 acnt_info->pvm_nonvolatile_compressed_count += compressed_count;
1238 }
1239 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1240 }
1241 }
1242
1243 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1244 vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info);
1245
1246 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1247 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1248 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1249 }
1250
1251 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1252 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1253 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1254 }
1255 lck_mtx_unlock(&vm_purgeable_queue_lock);
1256
1257 acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE);
1258 acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE);
1259 acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE);
1260 acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE);
1261
1262 return KERN_SUCCESS;
1263 }
1264 #endif /* DEVELOPMENT || DEBUG */
1265
1266 void
1267 vm_purgeable_disown(
1268 task_t task)
1269 {
1270 vm_object_t next_object;
1271 vm_object_t object;
1272 int collisions;
1273
1274 if (task == NULL) {
1275 return;
1276 }
1277
1278 /*
1279 * Scan the purgeable objects queues for objects owned by "task".
1280 * This has to be done "atomically" under the "vm_purgeable_queue"
1281 * lock, to ensure that no new purgeable object get associated
1282 * with this task or moved between queues while we're scanning.
1283 */
1284
1285 /*
1286 * Scan non-volatile queue for objects owned by "task".
1287 */
1288
1289 collisions = 0;
1290
1291 again:
1292 if (task->task_purgeable_disowned) {
1293 /* task has already disowned its purgeable memory */
1294 assert(task->task_volatile_objects == 0);
1295 assert(task->task_nonvolatile_objects == 0);
1296 return;
1297 }
1298
1299 lck_mtx_lock(&vm_purgeable_queue_lock);
1300 task_objq_lock(task);
1301
1302 task->task_purgeable_disowning = TRUE;
1303
1304 for (object = (vm_object_t) queue_first(&task->task_objq);
1305 !queue_end(&task->task_objq, (queue_entry_t) object);
1306 object = next_object) {
1307 if (task->task_nonvolatile_objects == 0 &&
1308 task->task_volatile_objects == 0) {
1309 /* no more purgeable objects owned by "task" */
1310 break;
1311 }
1312
1313 next_object = (vm_object_t) queue_next(&object->task_objq);
1314 if (object->purgable == VM_PURGABLE_DENY) {
1315 /* not a purgeable object: skip */
1316 continue;
1317 }
1318
1319 #if DEBUG
1320 assert(object->vo_purgeable_volatilizer == NULL);
1321 #endif /* DEBUG */
1322 assert(object->vo_purgeable_owner == task);
1323 if (!vm_object_lock_try(object)) {
1324 lck_mtx_unlock(&vm_purgeable_queue_lock);
1325 task_objq_unlock(task);
1326 mutex_pause(collisions++);
1327 goto again;
1328 }
1329 vm_purgeable_accounting(object,
1330 object->purgable,
1331 TRUE, /* disown */
1332 TRUE);/* task_objq_lock is locked */
1333 assert(object->vo_purgeable_owner == NULL);
1334 vm_object_unlock(object);
1335 }
1336
1337 if (__improbable(task->task_volatile_objects != 0 ||
1338 task->task_nonvolatile_objects != 0)) {
1339 panic("%s(%p): volatile=%d nonvolatile=%d q=%p q_first=%p q_last=%p",
1340 __FUNCTION__,
1341 task,
1342 task->task_volatile_objects,
1343 task->task_nonvolatile_objects,
1344 &task->task_objq,
1345 queue_first(&task->task_objq),
1346 queue_last(&task->task_objq));
1347 }
1348
1349 /* there shouldn't be any purgeable objects owned by task now */
1350 assert(task->task_volatile_objects == 0);
1351 assert(task->task_nonvolatile_objects == 0);
1352 assert(task->task_purgeable_disowning);
1353
1354 /* and we don't need to try and disown again */
1355 task->task_purgeable_disowned = TRUE;
1356
1357 lck_mtx_unlock(&vm_purgeable_queue_lock);
1358 task_objq_unlock(task);
1359 }
1360
1361
1362 static uint64_t
1363 vm_purgeable_queue_purge_task_owned(
1364 purgeable_q_t queue,
1365 int group,
1366 task_t task)
1367 {
1368 vm_object_t object = VM_OBJECT_NULL;
1369 int collisions = 0;
1370 uint64_t num_pages_purged = 0;
1371
1372 num_pages_purged = 0;
1373 collisions = 0;
1374
1375 look_again:
1376 lck_mtx_lock(&vm_purgeable_queue_lock);
1377
1378 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1379 !queue_end(&queue->objq[group], (queue_entry_t) object);
1380 object = (vm_object_t) queue_next(&object->objq)) {
1381
1382 if (object->vo_purgeable_owner != task) {
1383 continue;
1384 }
1385
1386 /* found an object: try and grab it */
1387 if (!vm_object_lock_try(object)) {
1388 lck_mtx_unlock(&vm_purgeable_queue_lock);
1389 mutex_pause(collisions++);
1390 goto look_again;
1391 }
1392 /* got it ! */
1393
1394 collisions = 0;
1395
1396 /* remove object from purgeable queue */
1397 queue_remove(&queue->objq[group], object,
1398 vm_object_t, objq);
1399 object->objq.next = NULL;
1400 object->objq.prev = NULL;
1401 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
1402 object->purgeable_queue_group = 0;
1403 /* one less volatile object for this object's owner */
1404 assert(object->vo_purgeable_owner == task);
1405 vm_purgeable_volatile_owner_update(task, -1);
1406
1407 #if DEBUG
1408 object->vo_purgeable_volatilizer = NULL;
1409 #endif /* DEBUG */
1410 queue_enter(&purgeable_nonvolatile_queue, object,
1411 vm_object_t, objq);
1412 assert(purgeable_nonvolatile_count >= 0);
1413 purgeable_nonvolatile_count++;
1414 assert(purgeable_nonvolatile_count > 0);
1415 /* one more nonvolatile object for this object's owner */
1416 assert(object->vo_purgeable_owner == task);
1417 vm_purgeable_nonvolatile_owner_update(task, +1);
1418
1419 /* unlock purgeable queues */
1420 lck_mtx_unlock(&vm_purgeable_queue_lock);
1421
1422 if (object->purgeable_when_ripe) {
1423 /* remove a token */
1424 vm_page_lock_queues();
1425 vm_purgeable_token_remove_first(queue);
1426 vm_page_unlock_queues();
1427 }
1428
1429 /* purge the object */
1430 num_pages_purged += vm_object_purge(object, 0);
1431
1432 assert(object->purgable == VM_PURGABLE_EMPTY);
1433 /* no change for purgeable accounting */
1434 vm_object_unlock(object);
1435
1436 /* we unlocked the purgeable queues, so start over */
1437 goto look_again;
1438 }
1439
1440 lck_mtx_unlock(&vm_purgeable_queue_lock);
1441
1442 return num_pages_purged;
1443 }
1444
1445 uint64_t
1446 vm_purgeable_purge_task_owned(
1447 task_t task)
1448 {
1449 purgeable_q_t queue = NULL;
1450 int group = 0;
1451 uint64_t num_pages_purged = 0;
1452
1453 num_pages_purged = 0;
1454
1455 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1456 num_pages_purged += vm_purgeable_queue_purge_task_owned(queue,
1457 0,
1458 task);
1459
1460 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1461 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1462 num_pages_purged += vm_purgeable_queue_purge_task_owned(queue,
1463 group,
1464 task);
1465
1466 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1467 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1468 num_pages_purged += vm_purgeable_queue_purge_task_owned(queue,
1469 group,
1470 task);
1471
1472 return num_pages_purged;
1473 }
1474
1475 void
1476 vm_purgeable_nonvolatile_enqueue(
1477 vm_object_t object,
1478 task_t owner)
1479 {
1480 int page_count;
1481
1482 vm_object_lock_assert_exclusive(object);
1483
1484 assert(object->purgable == VM_PURGABLE_NONVOLATILE);
1485 assert(object->vo_purgeable_owner == NULL);
1486
1487 lck_mtx_lock(&vm_purgeable_queue_lock);
1488
1489 if (owner != NULL &&
1490 owner->task_purgeable_disowning) {
1491 /* task is exiting and no longer tracking purgeable objects */
1492 owner = NULL;
1493 }
1494
1495 object->vo_purgeable_owner = owner;
1496 #if DEBUG
1497 object->vo_purgeable_volatilizer = NULL;
1498 #endif /* DEBUG */
1499 if (owner != NULL) {
1500 task_objq_lock(owner);
1501 queue_enter(&owner->task_objq, object, vm_object_t, task_objq);
1502 task_objq_unlock(owner);
1503 }
1504
1505 #if DEBUG
1506 OSBacktrace(&object->purgeable_owner_bt[0], 16);
1507 #endif /* DEBUG */
1508
1509 page_count = object->resident_page_count;
1510 if (owner != NULL && page_count != 0) {
1511 ledger_credit(owner->ledger,
1512 task_ledgers.purgeable_nonvolatile,
1513 ptoa(page_count));
1514 ledger_credit(owner->ledger,
1515 task_ledgers.phys_footprint,
1516 ptoa(page_count));
1517 }
1518
1519 assert(object->objq.next == NULL);
1520 assert(object->objq.prev == NULL);
1521
1522 queue_enter(&purgeable_nonvolatile_queue, object,
1523 vm_object_t, objq);
1524 assert(purgeable_nonvolatile_count >= 0);
1525 purgeable_nonvolatile_count++;
1526 assert(purgeable_nonvolatile_count > 0);
1527 /* one more nonvolatile object for this object's owner */
1528 assert(object->vo_purgeable_owner == owner);
1529 vm_purgeable_nonvolatile_owner_update(owner, +1);
1530 lck_mtx_unlock(&vm_purgeable_queue_lock);
1531
1532 vm_object_lock_assert_exclusive(object);
1533 }
1534
1535 void
1536 vm_purgeable_nonvolatile_dequeue(
1537 vm_object_t object)
1538 {
1539 task_t owner;
1540
1541 vm_object_lock_assert_exclusive(object);
1542
1543 owner = object->vo_purgeable_owner;
1544 #if DEBUG
1545 assert(object->vo_purgeable_volatilizer == NULL);
1546 #endif /* DEBUG */
1547 if (owner != NULL) {
1548 /*
1549 * Update the owner's ledger to stop accounting
1550 * for this object.
1551 */
1552 vm_purgeable_accounting(object,
1553 object->purgable,
1554 TRUE, /* disown */
1555 FALSE); /* is task_objq locked? */
1556 }
1557
1558 lck_mtx_lock(&vm_purgeable_queue_lock);
1559 assert(object->objq.next != NULL);
1560 assert(object->objq.prev != NULL);
1561 queue_remove(&purgeable_nonvolatile_queue, object,
1562 vm_object_t, objq);
1563 object->objq.next = NULL;
1564 object->objq.prev = NULL;
1565 assert(purgeable_nonvolatile_count > 0);
1566 purgeable_nonvolatile_count--;
1567 assert(purgeable_nonvolatile_count >= 0);
1568 lck_mtx_unlock(&vm_purgeable_queue_lock);
1569
1570 vm_object_lock_assert_exclusive(object);
1571 }
1572
1573 void
1574 vm_purgeable_accounting(
1575 vm_object_t object,
1576 vm_purgable_t old_state,
1577 boolean_t disown,
1578 boolean_t task_objq_locked)
1579 {
1580 task_t owner;
1581 int resident_page_count;
1582 int wired_page_count;
1583 int compressed_page_count;
1584 boolean_t disown_on_the_fly;
1585
1586 vm_object_lock_assert_exclusive(object);
1587
1588 owner = object->vo_purgeable_owner;
1589 if (owner == NULL)
1590 return;
1591
1592 if (!disown && owner->task_purgeable_disowning) {
1593 /* task is disowning its purgeable objects: help it */
1594 disown_on_the_fly = TRUE;
1595 } else {
1596 disown_on_the_fly = FALSE;
1597 }
1598
1599 resident_page_count = object->resident_page_count;
1600 wired_page_count = object->wired_page_count;
1601 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
1602 object->pager != NULL) {
1603 compressed_page_count =
1604 vm_compressor_pager_get_count(object->pager);
1605 } else {
1606 compressed_page_count = 0;
1607 }
1608
1609 if (old_state == VM_PURGABLE_VOLATILE ||
1610 old_state == VM_PURGABLE_EMPTY) {
1611 /* less volatile bytes in ledger */
1612 ledger_debit(owner->ledger,
1613 task_ledgers.purgeable_volatile,
1614 ptoa(resident_page_count - wired_page_count));
1615 /* less compressed volatile bytes in ledger */
1616 ledger_debit(owner->ledger,
1617 task_ledgers.purgeable_volatile_compressed,
1618 ptoa(compressed_page_count));
1619
1620 if (disown || !object->alive || object->terminating) {
1621 /* wired pages were accounted as "non-volatile"... */
1622 ledger_debit(owner->ledger,
1623 task_ledgers.purgeable_nonvolatile,
1624 ptoa(wired_page_count));
1625 /* ... and in phys_footprint */
1626 ledger_debit(owner->ledger,
1627 task_ledgers.phys_footprint,
1628 ptoa(wired_page_count));
1629
1630 if (!disown_on_the_fly &&
1631 (object->purgeable_queue_type ==
1632 PURGEABLE_Q_TYPE_MAX)) {
1633 /*
1634 * Not on a volatile queue: must be empty
1635 * or emptying.
1636 */
1637 vm_purgeable_nonvolatile_owner_update(owner,-1);
1638 } else {
1639 /* on a volatile queue */
1640 vm_purgeable_volatile_owner_update(owner, -1);
1641 }
1642 /* no more accounting for this dead object */
1643 owner = object->vo_purgeable_owner;
1644 if (! task_objq_locked) {
1645 task_objq_lock(owner);
1646 }
1647 task_objq_lock_assert_owned(owner);
1648 queue_remove(&owner->task_objq, object, vm_object_t, task_objq);
1649 if (! task_objq_locked) {
1650 task_objq_unlock(owner);
1651 }
1652 object->vo_purgeable_owner = NULL;
1653 #if DEBUG
1654 object->vo_purgeable_volatilizer = NULL;
1655 #endif /* DEBUG */
1656 return;
1657 }
1658
1659 /* more non-volatile bytes in ledger */
1660 ledger_credit(owner->ledger,
1661 task_ledgers.purgeable_nonvolatile,
1662 ptoa(resident_page_count - wired_page_count));
1663 /* more compressed non-volatile bytes in ledger */
1664 ledger_credit(owner->ledger,
1665 task_ledgers.purgeable_nonvolatile_compressed,
1666 ptoa(compressed_page_count));
1667 /* more footprint */
1668 ledger_credit(owner->ledger,
1669 task_ledgers.phys_footprint,
1670 ptoa(resident_page_count
1671 + compressed_page_count
1672 - wired_page_count));
1673
1674 } else if (old_state == VM_PURGABLE_NONVOLATILE) {
1675
1676 /* less non-volatile bytes in ledger */
1677 ledger_debit(owner->ledger,
1678 task_ledgers.purgeable_nonvolatile,
1679 ptoa(resident_page_count - wired_page_count));
1680 /* less compressed non-volatile bytes in ledger */
1681 ledger_debit(owner->ledger,
1682 task_ledgers.purgeable_nonvolatile_compressed,
1683 ptoa(compressed_page_count));
1684 /* less footprint */
1685 ledger_debit(owner->ledger,
1686 task_ledgers.phys_footprint,
1687 ptoa(resident_page_count
1688 + compressed_page_count
1689 - wired_page_count));
1690
1691 if (disown || !object->alive || object->terminating) {
1692 /* wired pages still accounted as "non-volatile" */
1693 ledger_debit(owner->ledger,
1694 task_ledgers.purgeable_nonvolatile,
1695 ptoa(wired_page_count));
1696 ledger_debit(owner->ledger,
1697 task_ledgers.phys_footprint,
1698 ptoa(wired_page_count));
1699
1700 /* one less "non-volatile" object for the owner */
1701 if (!disown_on_the_fly) {
1702 assert(object->purgeable_queue_type ==
1703 PURGEABLE_Q_TYPE_MAX);
1704 }
1705 vm_purgeable_nonvolatile_owner_update(owner, -1);
1706 /* no more accounting for this dead object */
1707 if (! task_objq_locked) {
1708 task_objq_lock(owner);
1709 }
1710 task_objq_lock_assert_owned(owner);
1711 queue_remove(&owner->task_objq, object, vm_object_t, task_objq);
1712 if (! task_objq_locked) {
1713 task_objq_unlock(owner);
1714 }
1715 object->vo_purgeable_owner = NULL;
1716 #if DEBUG
1717 object->vo_purgeable_volatilizer = NULL;
1718 #endif /* DEBUG */
1719 return;
1720 }
1721 /* more volatile bytes in ledger */
1722 ledger_credit(owner->ledger,
1723 task_ledgers.purgeable_volatile,
1724 ptoa(resident_page_count - wired_page_count));
1725 /* more compressed volatile bytes in ledger */
1726 ledger_credit(owner->ledger,
1727 task_ledgers.purgeable_volatile_compressed,
1728 ptoa(compressed_page_count));
1729 } else {
1730 panic("vm_purgeable_accounting(%p): "
1731 "unexpected old_state=%d\n",
1732 object, old_state);
1733 }
1734
1735 vm_object_lock_assert_exclusive(object);
1736 }
1737
1738 void
1739 vm_purgeable_nonvolatile_owner_update(
1740 task_t owner,
1741 int delta)
1742 {
1743 if (owner == NULL || delta == 0) {
1744 return;
1745 }
1746
1747 if (delta > 0) {
1748 assert(owner->task_nonvolatile_objects >= 0);
1749 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1750 assert(owner->task_nonvolatile_objects > 0);
1751 } else {
1752 assert(owner->task_nonvolatile_objects > delta);
1753 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1754 assert(owner->task_nonvolatile_objects >= 0);
1755 }
1756 }
1757
1758 void
1759 vm_purgeable_volatile_owner_update(
1760 task_t owner,
1761 int delta)
1762 {
1763 if (owner == NULL || delta == 0) {
1764 return;
1765 }
1766
1767 if (delta > 0) {
1768 assert(owner->task_volatile_objects >= 0);
1769 OSAddAtomic(delta, &owner->task_volatile_objects);
1770 assert(owner->task_volatile_objects > 0);
1771 } else {
1772 assert(owner->task_volatile_objects > delta);
1773 OSAddAtomic(delta, &owner->task_volatile_objects);
1774 assert(owner->task_volatile_objects >= 0);
1775 }
1776 }
1777
1778 void
1779 vm_purgeable_compressed_update(
1780 vm_object_t object,
1781 int delta)
1782 {
1783 task_t owner;
1784
1785 vm_object_lock_assert_exclusive(object);
1786
1787 if (delta == 0 ||
1788 !object->internal ||
1789 object->purgable == VM_PURGABLE_DENY ||
1790 object->vo_purgeable_owner == NULL) {
1791 /* not an owned purgeable VM object: nothing to update */
1792 return;
1793 }
1794
1795 owner = object->vo_purgeable_owner;
1796 switch (object->purgable) {
1797 case VM_PURGABLE_DENY:
1798 break;
1799 case VM_PURGABLE_NONVOLATILE:
1800 if (delta > 0) {
1801 ledger_credit(owner->ledger,
1802 task_ledgers.purgeable_nonvolatile_compressed,
1803 ptoa(delta));
1804 ledger_credit(owner->ledger,
1805 task_ledgers.phys_footprint,
1806 ptoa(delta));
1807 } else {
1808 ledger_debit(owner->ledger,
1809 task_ledgers.purgeable_nonvolatile_compressed,
1810 ptoa(-delta));
1811 ledger_debit(owner->ledger,
1812 task_ledgers.phys_footprint,
1813 ptoa(-delta));
1814 }
1815 break;
1816 case VM_PURGABLE_VOLATILE:
1817 case VM_PURGABLE_EMPTY:
1818 if (delta > 0) {
1819 ledger_credit(owner->ledger,
1820 task_ledgers.purgeable_volatile_compressed,
1821 ptoa(delta));
1822 } else {
1823 ledger_debit(owner->ledger,
1824 task_ledgers.purgeable_volatile_compressed,
1825 ptoa(-delta));
1826 }
1827 break;
1828 default:
1829 panic("vm_purgeable_compressed_update(): "
1830 "unexpected purgable %d for object %p\n",
1831 object->purgable, object);
1832 }
1833 }