]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_purgeable.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
26 #include <kern/policy_internal.h>
27
28 #include <libkern/OSDebug.h>
29
30 #include <mach/mach_types.h>
31
32 #include <machine/limits.h>
33
34 #include <vm/vm_compressor_pager.h>
35 #include <vm/vm_kern.h> /* kmem_alloc */
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
38 #include <vm/vm_protos.h>
39 #include <vm/vm_purgeable_internal.h>
40
41 #include <sys/kdebug.h>
42
43 /*
44 * LOCK ORDERING for task-owned purgeable objects
45 *
46 * Whenever we need to hold multiple locks while adding to, removing from,
47 * or scanning a task's task_objq list of VM objects it owns, locks should
48 * be taken in this order:
49 *
50 * VM object ==> vm_purgeable_queue_lock ==> owner_task->task_objq_lock
51 *
52 * If one needs to acquire the VM object lock after any of the other 2 locks,
53 * one needs to use vm_object_lock_try() and, if that fails, release the
54 * other locks and retake them all in the correct order.
55 */
56
57 extern vm_pressure_level_t memorystatus_vm_pressure_level;
58
59 struct token {
60 token_cnt_t count;
61 token_idx_t prev;
62 token_idx_t next;
63 };
64
65 struct token *tokens;
66 token_idx_t token_q_max_cnt = 0;
67 vm_size_t token_q_cur_size = 0;
68
69 token_idx_t token_free_idx = 0; /* head of free queue */
70 token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
71 int32_t token_new_pagecount = 0; /* count of pages that will
72 * be added onto token queue */
73
74 int available_for_purge = 0; /* increase when ripe token
75 * added, decrease when ripe
76 * token removed.
77 * protected by page_queue_lock
78 */
79
80 static int token_q_allocating = 0; /* flag for singlethreading
81 * allocator */
82
83 struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
84 queue_head_t purgeable_nonvolatile_queue;
85 int purgeable_nonvolatile_count;
86
87 decl_lck_mtx_data(, vm_purgeable_queue_lock)
88
89 static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
90
91 static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task);
92
93
94 #if MACH_ASSERT
95 static void
96 vm_purgeable_token_check_queue(purgeable_q_t queue)
97 {
98 int token_cnt = 0, page_cnt = 0;
99 token_idx_t token = queue->token_q_head;
100 token_idx_t unripe = 0;
101 int our_inactive_count;
102
103 #if DEVELOPMENT
104 static unsigned lightweight_check = 0;
105
106 /*
107 * Due to performance impact, only perform this check
108 * every 100 times on DEVELOPMENT kernels.
109 */
110 if (lightweight_check++ < 100) {
111 return;
112 }
113
114 lightweight_check = 0;
115 #endif
116
117 while (token) {
118 if (tokens[token].count != 0) {
119 assert(queue->token_q_unripe);
120 if (unripe == 0) {
121 assert(token == queue->token_q_unripe);
122 unripe = token;
123 }
124 page_cnt += tokens[token].count;
125 }
126 if (tokens[token].next == 0) {
127 assert(queue->token_q_tail == token);
128 }
129
130 token_cnt++;
131 token = tokens[token].next;
132 }
133
134 if (unripe) {
135 assert(queue->token_q_unripe == unripe);
136 }
137 assert(token_cnt == queue->debug_count_tokens);
138
139 /* obsolete queue doesn't maintain token counts */
140 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) {
141 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
142 assert(our_inactive_count >= 0);
143 assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count);
144 }
145 }
146 #endif
147
148 /*
149 * Add a token. Allocate token queue memory if necessary.
150 * Call with page queue locked.
151 */
152 kern_return_t
153 vm_purgeable_token_add(purgeable_q_t queue)
154 {
155 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
156
157 /* new token */
158 token_idx_t token;
159 enum purgeable_q_type i;
160
161 find_available_token:
162
163 if (token_free_idx) { /* unused tokens available */
164 token = token_free_idx;
165 token_free_idx = tokens[token_free_idx].next;
166 } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
167 token = token_init_idx;
168 token_init_idx++;
169 } else { /* allocate more memory */
170 /* Wait if another thread is inside the memory alloc section */
171 while (token_q_allocating) {
172 wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
173 LCK_SLEEP_DEFAULT,
174 (event_t)&token_q_allocating,
175 THREAD_UNINT);
176 if (res != THREAD_AWAKENED) {
177 return KERN_ABORTED;
178 }
179 }
180 ;
181
182 /* Check whether memory is still maxed out */
183 if (token_init_idx < token_q_max_cnt) {
184 goto find_available_token;
185 }
186
187 /* Still no memory. Allocate some. */
188 token_q_allocating = 1;
189
190 /* Drop page queue lock so we can allocate */
191 vm_page_unlock_queues();
192
193 struct token *new_loc;
194 vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
195 kern_return_t result;
196
197 if (alloc_size / sizeof(struct token) > TOKEN_COUNT_MAX) {
198 result = KERN_RESOURCE_SHORTAGE;
199 } else {
200 if (token_q_cur_size) {
201 result = kmem_realloc(kernel_map,
202 (vm_offset_t) tokens,
203 token_q_cur_size,
204 (vm_offset_t *) &new_loc,
205 alloc_size, VM_KERN_MEMORY_OSFMK);
206 } else {
207 result = kmem_alloc(kernel_map,
208 (vm_offset_t *) &new_loc,
209 alloc_size, VM_KERN_MEMORY_OSFMK);
210 }
211 }
212
213 vm_page_lock_queues();
214
215 if (result) {
216 /* Unblock waiting threads */
217 token_q_allocating = 0;
218 thread_wakeup((event_t)&token_q_allocating);
219 return result;
220 }
221
222 /* If we get here, we allocated new memory. Update pointers and
223 * dealloc old range */
224 struct token *old_tokens = tokens;
225 tokens = new_loc;
226 vm_size_t old_token_q_cur_size = token_q_cur_size;
227 token_q_cur_size = alloc_size;
228 token_q_max_cnt = (token_idx_t) (token_q_cur_size /
229 sizeof(struct token));
230 assert(token_init_idx < token_q_max_cnt); /* We must have a free token now */
231
232 if (old_token_q_cur_size) { /* clean up old mapping */
233 vm_page_unlock_queues();
234 /* kmem_realloc leaves the old region mapped. Get rid of it. */
235 kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
236 vm_page_lock_queues();
237 }
238
239 /* Unblock waiting threads */
240 token_q_allocating = 0;
241 thread_wakeup((event_t)&token_q_allocating);
242
243 goto find_available_token;
244 }
245
246 assert(token);
247
248 /*
249 * the new pagecount we got need to be applied to all queues except
250 * obsolete
251 */
252 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
253 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
254 assert(pages >= 0);
255 assert(pages <= TOKEN_COUNT_MAX);
256 purgeable_queues[i].new_pages = (int32_t) pages;
257 assert(purgeable_queues[i].new_pages == pages);
258 }
259 token_new_pagecount = 0;
260
261 /* set token counter value */
262 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) {
263 tokens[token].count = queue->new_pages;
264 } else {
265 tokens[token].count = 0; /* all obsolete items are
266 * ripe immediately */
267 }
268 queue->new_pages = 0;
269
270 /* put token on token counter list */
271 tokens[token].next = 0;
272 if (queue->token_q_tail == 0) {
273 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
274 queue->token_q_head = token;
275 tokens[token].prev = 0;
276 } else {
277 tokens[queue->token_q_tail].next = token;
278 tokens[token].prev = queue->token_q_tail;
279 }
280 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
281 * count == 0) in queue */
282 if (tokens[token].count > 0) {
283 queue->token_q_unripe = token; /* first unripe token */
284 } else {
285 available_for_purge++; /* added a ripe token?
286 * increase available count */
287 }
288 }
289 queue->token_q_tail = token;
290
291 #if MACH_ASSERT
292 queue->debug_count_tokens++;
293 /* Check both queues, since we modified the new_pages count on each */
294 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
295 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
296
297 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
298 queue->type,
299 tokens[token].count, /* num pages on token
300 * (last token) */
301 queue->debug_count_tokens,
302 0,
303 0);
304 #endif
305
306 return KERN_SUCCESS;
307 }
308
309 /*
310 * Remove first token from queue and return its index. Add its count to the
311 * count of the next token.
312 * Call with page queue locked.
313 */
314 static token_idx_t
315 vm_purgeable_token_remove_first(purgeable_q_t queue)
316 {
317 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
318
319 token_idx_t token;
320 token = queue->token_q_head;
321
322 assert(token);
323
324 if (token) {
325 assert(queue->token_q_tail);
326 if (queue->token_q_head == queue->token_q_unripe) {
327 /* no ripe tokens... must move unripe pointer */
328 queue->token_q_unripe = tokens[token].next;
329 } else {
330 /* we're removing a ripe token. decrease count */
331 available_for_purge--;
332 assert(available_for_purge >= 0);
333 }
334
335 if (queue->token_q_tail == queue->token_q_head) {
336 assert(tokens[token].next == 0);
337 }
338
339 queue->token_q_head = tokens[token].next;
340 if (queue->token_q_head) {
341 tokens[queue->token_q_head].count += tokens[token].count;
342 tokens[queue->token_q_head].prev = 0;
343 } else {
344 /* currently no other tokens in the queue */
345 /*
346 * the page count must be added to the next newly
347 * created token
348 */
349 queue->new_pages += tokens[token].count;
350 /* if head is zero, tail is too */
351 queue->token_q_tail = 0;
352 }
353
354 #if MACH_ASSERT
355 queue->debug_count_tokens--;
356 vm_purgeable_token_check_queue(queue);
357
358 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
359 queue->type,
360 tokens[queue->token_q_head].count, /* num pages on new
361 * first token */
362 token_new_pagecount, /* num pages waiting for
363 * next token */
364 available_for_purge,
365 0);
366 #endif
367 }
368 return token;
369 }
370
371 static token_idx_t
372 vm_purgeable_token_remove_last(purgeable_q_t queue)
373 {
374 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
375
376 token_idx_t token;
377 token = queue->token_q_tail;
378
379 assert(token);
380
381 if (token) {
382 assert(queue->token_q_head);
383
384 if (queue->token_q_tail == queue->token_q_head) {
385 assert(tokens[token].next == 0);
386 }
387
388 if (queue->token_q_unripe == 0) {
389 /* we're removing a ripe token. decrease count */
390 available_for_purge--;
391 assert(available_for_purge >= 0);
392 } else if (queue->token_q_unripe == token) {
393 /* we're removing the only unripe token */
394 queue->token_q_unripe = 0;
395 }
396
397 if (token == queue->token_q_head) {
398 /* token is the last one in the queue */
399 queue->token_q_head = 0;
400 queue->token_q_tail = 0;
401 } else {
402 token_idx_t new_tail;
403
404 new_tail = tokens[token].prev;
405
406 assert(new_tail);
407 assert(tokens[new_tail].next == token);
408
409 queue->token_q_tail = new_tail;
410 tokens[new_tail].next = 0;
411 }
412
413 queue->new_pages += tokens[token].count;
414
415 #if MACH_ASSERT
416 queue->debug_count_tokens--;
417 vm_purgeable_token_check_queue(queue);
418
419 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
420 queue->type,
421 tokens[queue->token_q_head].count, /* num pages on new
422 * first token */
423 token_new_pagecount, /* num pages waiting for
424 * next token */
425 available_for_purge,
426 0);
427 #endif
428 }
429 return token;
430 }
431
432 /*
433 * Delete first token from queue. Return token to token queue.
434 * Call with page queue locked.
435 */
436 void
437 vm_purgeable_token_delete_first(purgeable_q_t queue)
438 {
439 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
440 token_idx_t token = vm_purgeable_token_remove_first(queue);
441
442 if (token) {
443 /* stick removed token on free queue */
444 tokens[token].next = token_free_idx;
445 tokens[token].prev = 0;
446 token_free_idx = token;
447 }
448 }
449
450 void
451 vm_purgeable_token_delete_last(purgeable_q_t queue)
452 {
453 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
454 token_idx_t token = vm_purgeable_token_remove_last(queue);
455
456 if (token) {
457 /* stick removed token on free queue */
458 tokens[token].next = token_free_idx;
459 tokens[token].prev = 0;
460 token_free_idx = token;
461 }
462 }
463
464
465 /* Call with page queue locked. */
466 void
467 vm_purgeable_q_advance_all()
468 {
469 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
470
471 /* check queue counters - if they get really large, scale them back.
472 * They tend to get that large when there is no purgeable queue action */
473 int i;
474 if (token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) { /* a system idling years might get there */
475 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
476 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
477 assert(pages >= 0);
478 assert(pages <= TOKEN_COUNT_MAX);
479 purgeable_queues[i].new_pages = (int32_t) pages;
480 assert(purgeable_queues[i].new_pages == pages);
481 }
482 token_new_pagecount = 0;
483 }
484
485 /*
486 * Decrement token counters. A token counter can be zero, this means the
487 * object is ripe to be purged. It is not purged immediately, because that
488 * could cause several objects to be purged even if purging one would satisfy
489 * the memory needs. Instead, the pageout thread purges one after the other
490 * by calling vm_purgeable_object_purge_one and then rechecking the memory
491 * balance.
492 *
493 * No need to advance obsolete queue - all items are ripe there,
494 * always
495 */
496 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
497 purgeable_q_t queue = &purgeable_queues[i];
498 uint32_t num_pages = 1;
499
500 /* Iterate over tokens as long as there are unripe tokens. */
501 while (queue->token_q_unripe) {
502 if (tokens[queue->token_q_unripe].count && num_pages) {
503 tokens[queue->token_q_unripe].count -= 1;
504 num_pages -= 1;
505 }
506
507 if (tokens[queue->token_q_unripe].count == 0) {
508 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
509 available_for_purge++;
510 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
511 queue->type,
512 tokens[queue->token_q_head].count, /* num pages on new
513 * first token */
514 0,
515 available_for_purge,
516 0);
517 continue; /* One token ripened. Make sure to
518 * check the next. */
519 }
520 if (num_pages == 0) {
521 break; /* Current token not ripe and no more pages.
522 * Work done. */
523 }
524 }
525
526 /*
527 * if there are no unripe tokens in the queue, decrement the
528 * new_pages counter instead new_pages can be negative, but must be
529 * canceled out by token_new_pagecount -- since inactive queue as a
530 * whole always contains a nonnegative number of pages
531 */
532 if (!queue->token_q_unripe) {
533 queue->new_pages -= num_pages;
534 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
535 }
536 #if MACH_ASSERT
537 vm_purgeable_token_check_queue(queue);
538 #endif
539 }
540 }
541
542 /*
543 * grab any ripe object and purge it obsolete queue first. then, go through
544 * each volatile group. Select a queue with a ripe token.
545 * Start with first group (0)
546 * 1. Look at queue. Is there an object?
547 * Yes - purge it. Remove token.
548 * No - check other queue. Is there an object?
549 * No - increment group, then go to (1)
550 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
551 * token from other queue and migrate unripe token from this
552 * queue to other queue.
553 * Call with page queue locked.
554 */
555 static void
556 vm_purgeable_token_remove_ripe(purgeable_q_t queue)
557 {
558 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
559 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
560 /* return token to free list. advance token list. */
561 token_idx_t new_head = tokens[queue->token_q_head].next;
562 tokens[queue->token_q_head].next = token_free_idx;
563 tokens[queue->token_q_head].prev = 0;
564 token_free_idx = queue->token_q_head;
565 queue->token_q_head = new_head;
566 tokens[new_head].prev = 0;
567 if (new_head == 0) {
568 queue->token_q_tail = 0;
569 }
570
571 #if MACH_ASSERT
572 queue->debug_count_tokens--;
573 vm_purgeable_token_check_queue(queue);
574 #endif
575
576 available_for_purge--;
577 assert(available_for_purge >= 0);
578 }
579
580 /*
581 * Delete a ripe token from the given queue. If there are no ripe tokens on
582 * that queue, delete a ripe token from queue2, and migrate an unripe token
583 * from queue to queue2
584 * Call with page queue locked.
585 */
586 static void
587 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
588 {
589 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
590 assert(queue->token_q_head);
591
592 if (tokens[queue->token_q_head].count == 0) {
593 /* This queue has a ripe token. Remove. */
594 vm_purgeable_token_remove_ripe(queue);
595 } else {
596 assert(queue2);
597 /*
598 * queue2 must have a ripe token. Remove, and migrate one
599 * from queue to queue2.
600 */
601 vm_purgeable_token_remove_ripe(queue2);
602 /* migrate unripe token */
603 token_idx_t token;
604 token_cnt_t count;
605
606 /* remove token from queue1 */
607 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
608 * tokens, remember? */
609 token = vm_purgeable_token_remove_first(queue);
610 assert(token);
611
612 count = tokens[token].count;
613
614 /* migrate to queue2 */
615 /* go to migration target loc */
616
617 token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after;
618
619 while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) {
620 count -= tokens[token_to_insert_before].count;
621 token_to_insert_before = tokens[token_to_insert_before].next;
622 }
623
624 /* token_to_insert_before is now set correctly */
625
626 /* should the inserted token become the first unripe token? */
627 if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0)) {
628 queue2->token_q_unripe = token; /* if so, must update unripe pointer */
629 }
630 /*
631 * insert token.
632 * if inserting at end, reduce new_pages by that value;
633 * otherwise, reduce counter of next token
634 */
635
636 tokens[token].count = count;
637
638 if (token_to_insert_before != 0) {
639 token_to_insert_after = tokens[token_to_insert_before].prev;
640
641 tokens[token].next = token_to_insert_before;
642 tokens[token_to_insert_before].prev = token;
643
644 assert(tokens[token_to_insert_before].count >= count);
645 tokens[token_to_insert_before].count -= count;
646 } else {
647 /* if we ran off the end of the list, the token to insert after is the tail */
648 token_to_insert_after = queue2->token_q_tail;
649
650 tokens[token].next = 0;
651 queue2->token_q_tail = token;
652
653 assert(queue2->new_pages >= (int32_t) count);
654 queue2->new_pages -= count;
655 }
656
657 if (token_to_insert_after != 0) {
658 tokens[token].prev = token_to_insert_after;
659 tokens[token_to_insert_after].next = token;
660 } else {
661 /* is this case possible? */
662 tokens[token].prev = 0;
663 queue2->token_q_head = token;
664 }
665
666 #if MACH_ASSERT
667 queue2->debug_count_tokens++;
668 vm_purgeable_token_check_queue(queue2);
669 #endif
670 }
671 }
672
673 /* Find an object that can be locked. Returns locked object. */
674 /* Call with purgeable queue locked. */
675 static vm_object_t
676 vm_purgeable_object_find_and_lock(
677 purgeable_q_t queue,
678 int group,
679 boolean_t pick_ripe)
680 {
681 vm_object_t object, best_object;
682 int object_task_importance;
683 int best_object_task_importance;
684 int best_object_skipped;
685 int num_objects_skipped;
686 int try_lock_failed = 0;
687 int try_lock_succeeded = 0;
688 task_t owner;
689
690 best_object = VM_OBJECT_NULL;
691 best_object_task_importance = INT_MAX;
692
693 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
694 /*
695 * Usually we would pick the first element from a queue. However, we
696 * might not be able to get a lock on it, in which case we try the
697 * remaining elements in order.
698 */
699
700 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START),
701 pick_ripe,
702 group,
703 VM_KERNEL_UNSLIDE_OR_PERM(queue),
704 0,
705 0);
706
707 num_objects_skipped = 0;
708 for (object = (vm_object_t) queue_first(&queue->objq[group]);
709 !queue_end(&queue->objq[group], (queue_entry_t) object);
710 object = (vm_object_t) queue_next(&object->objq),
711 num_objects_skipped++) {
712 /*
713 * To prevent us looping for an excessively long time, choose
714 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
715 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
716 * we keep going until we find the first eligible object.
717 */
718 if ((num_objects_skipped >= PURGEABLE_LOOP_MAX) && (best_object != NULL)) {
719 break;
720 }
721
722 if (pick_ripe &&
723 !object->purgeable_when_ripe) {
724 /* we want an object that has a ripe token */
725 continue;
726 }
727
728 object_task_importance = 0;
729
730 /*
731 * We don't want to use VM_OBJECT_OWNER() here: we want to
732 * distinguish kernel-owned and disowned objects.
733 * Disowned objects have no owner and will have no importance...
734 */
735 owner = object->vo_owner;
736 if (owner != NULL && owner != VM_OBJECT_OWNER_DISOWNED) {
737 #if CONFIG_EMBEDDED
738 #if CONFIG_JETSAM
739 object_task_importance = proc_get_memstat_priority((struct proc *)get_bsdtask_info(owner), TRUE);
740 #endif /* CONFIG_JETSAM */
741 #else /* CONFIG_EMBEDDED */
742 object_task_importance = task_importance_estimate(owner);
743 #endif /* CONFIG_EMBEDDED */
744 }
745
746 if (object_task_importance < best_object_task_importance) {
747 if (vm_object_lock_try(object)) {
748 try_lock_succeeded++;
749 if (best_object != VM_OBJECT_NULL) {
750 /* forget about previous best object */
751 vm_object_unlock(best_object);
752 }
753 best_object = object;
754 best_object_task_importance = object_task_importance;
755 best_object_skipped = num_objects_skipped;
756 if (best_object_task_importance == 0) {
757 /* can't get any better: stop looking */
758 break;
759 }
760 } else {
761 try_lock_failed++;
762 }
763 }
764 }
765
766 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END),
767 num_objects_skipped, /* considered objects */
768 try_lock_failed,
769 try_lock_succeeded,
770 VM_KERNEL_UNSLIDE_OR_PERM(best_object),
771 ((best_object == NULL) ? 0 : best_object->resident_page_count));
772
773 object = best_object;
774
775 if (object == VM_OBJECT_NULL) {
776 return VM_OBJECT_NULL;
777 }
778
779 /* Locked. Great. We'll take it. Remove and return. */
780 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
781
782 vm_object_lock_assert_exclusive(object);
783
784 queue_remove(&queue->objq[group], object,
785 vm_object_t, objq);
786 object->objq.next = NULL;
787 object->objq.prev = NULL;
788 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
789 object->purgeable_queue_group = 0;
790 /* one less volatile object for this object's owner */
791 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), -1);
792
793 #if DEBUG
794 object->vo_purgeable_volatilizer = NULL;
795 #endif /* DEBUG */
796
797 /* keep queue of non-volatile objects */
798 queue_enter(&purgeable_nonvolatile_queue, object,
799 vm_object_t, objq);
800 assert(purgeable_nonvolatile_count >= 0);
801 purgeable_nonvolatile_count++;
802 assert(purgeable_nonvolatile_count > 0);
803 /* one more nonvolatile object for this object's owner */
804 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), +1);
805
806 #if MACH_ASSERT
807 queue->debug_count_objects--;
808 #endif
809 return object;
810 }
811
812 /* Can be called without holding locks */
813 void
814 vm_purgeable_object_purge_all(void)
815 {
816 enum purgeable_q_type i;
817 int group;
818 vm_object_t object;
819 unsigned int purged_count;
820 uint32_t collisions;
821
822 purged_count = 0;
823 collisions = 0;
824
825 restart:
826 lck_mtx_lock(&vm_purgeable_queue_lock);
827 /* Cycle through all queues */
828 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
829 purgeable_q_t queue;
830
831 queue = &purgeable_queues[i];
832
833 /*
834 * Look through all groups, starting from the lowest. If
835 * we find an object in that group, try to lock it (this can
836 * fail). If locking is successful, we can drop the queue
837 * lock, remove a token and then purge the object.
838 */
839 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
840 while (!queue_empty(&queue->objq[group])) {
841 object = vm_purgeable_object_find_and_lock(queue, group, FALSE);
842 if (object == VM_OBJECT_NULL) {
843 lck_mtx_unlock(&vm_purgeable_queue_lock);
844 mutex_pause(collisions++);
845 goto restart;
846 }
847
848 lck_mtx_unlock(&vm_purgeable_queue_lock);
849
850 /* Lock the page queue here so we don't hold it
851 * over the whole, legthy operation */
852 if (object->purgeable_when_ripe) {
853 vm_page_lock_queues();
854 vm_purgeable_token_remove_first(queue);
855 vm_page_unlock_queues();
856 }
857
858 (void) vm_object_purge(object, 0);
859 assert(object->purgable == VM_PURGABLE_EMPTY);
860 /* no change in purgeable accounting */
861
862 vm_object_unlock(object);
863 purged_count++;
864 goto restart;
865 }
866 assert(queue->debug_count_objects >= 0);
867 }
868 }
869 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
870 purged_count, /* # of purged objects */
871 0,
872 available_for_purge,
873 0,
874 0);
875 lck_mtx_unlock(&vm_purgeable_queue_lock);
876 return;
877 }
878
879 boolean_t
880 vm_purgeable_object_purge_one_unlocked(
881 int force_purge_below_group)
882 {
883 boolean_t retval;
884
885 vm_page_lock_queues();
886 retval = vm_purgeable_object_purge_one(force_purge_below_group, 0);
887 vm_page_unlock_queues();
888
889 return retval;
890 }
891
892 boolean_t
893 vm_purgeable_object_purge_one(
894 int force_purge_below_group,
895 int flags)
896 {
897 enum purgeable_q_type i;
898 int group;
899 vm_object_t object = 0;
900 purgeable_q_t queue, queue2;
901 boolean_t forced_purge;
902 unsigned int resident_page_count;
903
904
905 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_START,
906 force_purge_below_group, flags, 0, 0, 0);
907
908 /* Need the page queue lock since we'll be changing the token queue. */
909 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
910 lck_mtx_lock(&vm_purgeable_queue_lock);
911
912 /* Cycle through all queues */
913 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
914 queue = &purgeable_queues[i];
915
916 if (force_purge_below_group == 0) {
917 /*
918 * Are there any ripe tokens on this queue? If yes,
919 * we'll find an object to purge there
920 */
921 if (!queue->token_q_head) {
922 /* no token: look at next purgeable queue */
923 continue;
924 }
925
926 if (tokens[queue->token_q_head].count != 0) {
927 /* no ripe token: next queue */
928 continue;
929 }
930 }
931
932 /*
933 * Now look through all groups, starting from the lowest. If
934 * we find an object in that group, try to lock it (this can
935 * fail). If locking is successful, we can drop the queue
936 * lock, remove a token and then purge the object.
937 */
938 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
939 if (!queue->token_q_head ||
940 tokens[queue->token_q_head].count != 0) {
941 /* no tokens or no ripe tokens */
942
943 if (group >= force_purge_below_group) {
944 /* no more groups to force-purge */
945 break;
946 }
947
948 /*
949 * Try and purge an object in this group
950 * even though no tokens are ripe.
951 */
952 if (!queue_empty(&queue->objq[group]) &&
953 (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) {
954 lck_mtx_unlock(&vm_purgeable_queue_lock);
955 if (object->purgeable_when_ripe) {
956 vm_purgeable_token_delete_first(queue);
957 }
958 forced_purge = TRUE;
959 goto purge_now;
960 }
961
962 /* nothing to purge in this group: next group */
963 continue;
964 }
965 if (!queue_empty(&queue->objq[group]) &&
966 (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) {
967 lck_mtx_unlock(&vm_purgeable_queue_lock);
968 if (object->purgeable_when_ripe) {
969 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
970 }
971 forced_purge = FALSE;
972 goto purge_now;
973 }
974 if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
975 /* This is the token migration case, and it works between
976 * FIFO and LIFO only */
977 queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
978 PURGEABLE_Q_TYPE_FIFO :
979 PURGEABLE_Q_TYPE_LIFO];
980
981 if (!queue_empty(&queue2->objq[group]) &&
982 (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) {
983 lck_mtx_unlock(&vm_purgeable_queue_lock);
984 if (object->purgeable_when_ripe) {
985 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
986 }
987 forced_purge = FALSE;
988 goto purge_now;
989 }
990 }
991 assert(queue->debug_count_objects >= 0);
992 }
993 }
994 /*
995 * because we have to do a try_lock on the objects which could fail,
996 * we could end up with no object to purge at this time, even though
997 * we have objects in a purgeable state
998 */
999 lck_mtx_unlock(&vm_purgeable_queue_lock);
1000
1001 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END,
1002 0, 0, available_for_purge, 0, 0);
1003
1004 return FALSE;
1005
1006 purge_now:
1007
1008 assert(object);
1009 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
1010 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_owner, task_importance_estimate(object->vo_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
1011 resident_page_count = object->resident_page_count;
1012 (void) vm_object_purge(object, flags);
1013 assert(object->purgable == VM_PURGABLE_EMPTY);
1014 /* no change in purgeable accounting */
1015 vm_object_unlock(object);
1016 vm_page_lock_queues();
1017
1018 vm_pageout_vminfo.vm_pageout_pages_purged += resident_page_count;
1019
1020 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END,
1021 VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
1022 resident_page_count,
1023 available_for_purge,
1024 0,
1025 0);
1026
1027 return TRUE;
1028 }
1029
1030 /* Called with object lock held */
1031 void
1032 vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
1033 {
1034 vm_object_lock_assert_exclusive(object);
1035 lck_mtx_lock(&vm_purgeable_queue_lock);
1036
1037 assert(object->objq.next != NULL);
1038 assert(object->objq.prev != NULL);
1039 queue_remove(&purgeable_nonvolatile_queue, object,
1040 vm_object_t, objq);
1041 object->objq.next = NULL;
1042 object->objq.prev = NULL;
1043 assert(purgeable_nonvolatile_count > 0);
1044 purgeable_nonvolatile_count--;
1045 assert(purgeable_nonvolatile_count >= 0);
1046 /* one less nonvolatile object for this object's owner */
1047 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), -1);
1048
1049 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE) {
1050 group = 0;
1051 }
1052
1053 if (queue->type != PURGEABLE_Q_TYPE_LIFO) { /* fifo and obsolete are
1054 * fifo-queued */
1055 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
1056 } else {
1057 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
1058 }
1059 /* one more volatile object for this object's owner */
1060 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), +1);
1061
1062 object->purgeable_queue_type = queue->type;
1063 object->purgeable_queue_group = group;
1064
1065 #if DEBUG
1066 assert(object->vo_purgeable_volatilizer == NULL);
1067 object->vo_purgeable_volatilizer = current_task();
1068 OSBacktrace(&object->purgeable_volatilizer_bt[0],
1069 ARRAY_COUNT(object->purgeable_volatilizer_bt));
1070 #endif /* DEBUG */
1071
1072 #if MACH_ASSERT
1073 queue->debug_count_objects++;
1074 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
1075 0,
1076 tokens[queue->token_q_head].count,
1077 queue->type,
1078 group,
1079 0);
1080 #endif
1081
1082 lck_mtx_unlock(&vm_purgeable_queue_lock);
1083 }
1084
1085 /* Look for object. If found, remove from purgeable queue. */
1086 /* Called with object lock held */
1087 purgeable_q_t
1088 vm_purgeable_object_remove(vm_object_t object)
1089 {
1090 int group;
1091 enum purgeable_q_type type;
1092 purgeable_q_t queue;
1093
1094 vm_object_lock_assert_exclusive(object);
1095
1096 type = object->purgeable_queue_type;
1097 group = object->purgeable_queue_group;
1098
1099 if (type == PURGEABLE_Q_TYPE_MAX) {
1100 if (object->objq.prev || object->objq.next) {
1101 panic("unmarked object on purgeable q");
1102 }
1103
1104 return NULL;
1105 } else if (!(object->objq.prev && object->objq.next)) {
1106 panic("marked object not on purgeable q");
1107 }
1108
1109 lck_mtx_lock(&vm_purgeable_queue_lock);
1110
1111 queue = &purgeable_queues[type];
1112
1113 queue_remove(&queue->objq[group], object, vm_object_t, objq);
1114 object->objq.next = NULL;
1115 object->objq.prev = NULL;
1116 /* one less volatile object for this object's owner */
1117 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), -1);
1118 #if DEBUG
1119 object->vo_purgeable_volatilizer = NULL;
1120 #endif /* DEBUG */
1121 /* keep queue of non-volatile objects */
1122 if (object->alive && !object->terminating) {
1123 queue_enter(&purgeable_nonvolatile_queue, object,
1124 vm_object_t, objq);
1125 assert(purgeable_nonvolatile_count >= 0);
1126 purgeable_nonvolatile_count++;
1127 assert(purgeable_nonvolatile_count > 0);
1128 /* one more nonvolatile object for this object's owner */
1129 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), +1);
1130 }
1131
1132 #if MACH_ASSERT
1133 queue->debug_count_objects--;
1134 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
1135 0,
1136 tokens[queue->token_q_head].count,
1137 queue->type,
1138 group,
1139 0);
1140 #endif
1141
1142 lck_mtx_unlock(&vm_purgeable_queue_lock);
1143
1144 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
1145 object->purgeable_queue_group = 0;
1146
1147 vm_object_lock_assert_exclusive(object);
1148
1149 return &purgeable_queues[type];
1150 }
1151
1152 void
1153 vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task)
1154 {
1155 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
1156
1157 stat->count = stat->size = 0;
1158 vm_object_t object;
1159 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1160 !queue_end(&queue->objq[group], (queue_entry_t) object);
1161 object = (vm_object_t) queue_next(&object->objq)) {
1162 if (!target_task || VM_OBJECT_OWNER(object) == target_task) {
1163 stat->count++;
1164 stat->size += (object->resident_page_count * PAGE_SIZE);
1165 }
1166 }
1167 return;
1168 }
1169
1170 void
1171 vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task)
1172 {
1173 purgeable_q_t queue;
1174 int group;
1175
1176 lck_mtx_lock(&vm_purgeable_queue_lock);
1177
1178 /* Populate fifo_data */
1179 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1180 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1181 vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task);
1182 }
1183
1184 /* Populate lifo_data */
1185 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1186 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1187 vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task);
1188 }
1189
1190 /* Populate obsolete data */
1191 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1192 vm_purgeable_stats_helper(&(info->obsolete_data), queue, 0, target_task);
1193
1194 lck_mtx_unlock(&vm_purgeable_queue_lock);
1195 return;
1196 }
1197
1198 #if DEVELOPMENT || DEBUG
1199 static void
1200 vm_purgeable_account_volatile_queue(
1201 purgeable_q_t queue,
1202 int group,
1203 task_t task,
1204 pvm_account_info_t acnt_info)
1205 {
1206 vm_object_t object;
1207 uint64_t compressed_count;
1208
1209 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1210 !queue_end(&queue->objq[group], (queue_entry_t) object);
1211 object = (vm_object_t) queue_next(&object->objq)) {
1212 if (VM_OBJECT_OWNER(object) == task) {
1213 compressed_count = vm_compressor_pager_get_count(object->pager);
1214 acnt_info->pvm_volatile_compressed_count += compressed_count;
1215 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1216 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1217 }
1218 }
1219 }
1220
1221 /*
1222 * Walks the purgeable object queues and calculates the usage
1223 * associated with the objects for the given task.
1224 */
1225 kern_return_t
1226 vm_purgeable_account(
1227 task_t task,
1228 pvm_account_info_t acnt_info)
1229 {
1230 queue_head_t *nonvolatile_q;
1231 vm_object_t object;
1232 int group;
1233 int state;
1234 uint64_t compressed_count;
1235 purgeable_q_t volatile_q;
1236
1237
1238 if ((task == NULL) || (acnt_info == NULL)) {
1239 return KERN_INVALID_ARGUMENT;
1240 }
1241
1242 acnt_info->pvm_volatile_count = 0;
1243 acnt_info->pvm_volatile_compressed_count = 0;
1244 acnt_info->pvm_nonvolatile_count = 0;
1245 acnt_info->pvm_nonvolatile_compressed_count = 0;
1246
1247 lck_mtx_lock(&vm_purgeable_queue_lock);
1248
1249 nonvolatile_q = &purgeable_nonvolatile_queue;
1250 for (object = (vm_object_t) queue_first(nonvolatile_q);
1251 !queue_end(nonvolatile_q, (queue_entry_t) object);
1252 object = (vm_object_t) queue_next(&object->objq)) {
1253 if (VM_OBJECT_OWNER(object) == task) {
1254 state = object->purgable;
1255 compressed_count = vm_compressor_pager_get_count(object->pager);
1256 if (state == VM_PURGABLE_EMPTY) {
1257 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1258 acnt_info->pvm_volatile_compressed_count += compressed_count;
1259 } else {
1260 acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count);
1261 acnt_info->pvm_nonvolatile_compressed_count += compressed_count;
1262 }
1263 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1264 }
1265 }
1266
1267 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1268 vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info);
1269
1270 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1271 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1272 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1273 }
1274
1275 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1276 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1277 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1278 }
1279 lck_mtx_unlock(&vm_purgeable_queue_lock);
1280
1281 acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE);
1282 acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE);
1283 acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE);
1284 acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE);
1285
1286 return KERN_SUCCESS;
1287 }
1288 #endif /* DEVELOPMENT || DEBUG */
1289
1290 void
1291 vm_purgeable_disown(
1292 task_t task)
1293 {
1294 vm_object_t next_object;
1295 vm_object_t object;
1296 int collisions;
1297
1298 if (task == NULL) {
1299 return;
1300 }
1301
1302 /*
1303 * Scan the purgeable objects queues for objects owned by "task".
1304 * This has to be done "atomically" under the "vm_purgeable_queue"
1305 * lock, to ensure that no new purgeable object get associated
1306 * with this task or moved between queues while we're scanning.
1307 */
1308
1309 /*
1310 * Scan non-volatile queue for objects owned by "task".
1311 */
1312
1313 collisions = 0;
1314
1315 again:
1316 if (task->task_purgeable_disowned) {
1317 /* task has already disowned its purgeable memory */
1318 assert(task->task_volatile_objects == 0);
1319 assert(task->task_nonvolatile_objects == 0);
1320 return;
1321 }
1322
1323 lck_mtx_lock(&vm_purgeable_queue_lock);
1324 task_objq_lock(task);
1325
1326 task->task_purgeable_disowning = TRUE;
1327
1328 for (object = (vm_object_t) queue_first(&task->task_objq);
1329 !queue_end(&task->task_objq, (queue_entry_t) object);
1330 object = next_object) {
1331 if (task->task_nonvolatile_objects == 0 &&
1332 task->task_volatile_objects == 0) {
1333 /* no more purgeable objects owned by "task" */
1334 break;
1335 }
1336
1337 next_object = (vm_object_t) queue_next(&object->task_objq);
1338 if (object->purgable == VM_PURGABLE_DENY) {
1339 /* not a purgeable object: skip */
1340 continue;
1341 }
1342
1343 #if DEBUG
1344 assert(object->vo_purgeable_volatilizer == NULL);
1345 #endif /* DEBUG */
1346 assert(object->vo_owner == task);
1347 if (!vm_object_lock_try(object)) {
1348 lck_mtx_unlock(&vm_purgeable_queue_lock);
1349 task_objq_unlock(task);
1350 mutex_pause(collisions++);
1351 goto again;
1352 }
1353 /* transfer ownership to the kernel */
1354 assert(VM_OBJECT_OWNER(object) != kernel_task);
1355 vm_object_ownership_change(
1356 object,
1357 object->vo_ledger_tag, /* unchanged */
1358 VM_OBJECT_OWNER_DISOWNED, /* new owner */
1359 TRUE); /* old_owner->task_objq locked */
1360 assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
1361 vm_object_unlock(object);
1362 }
1363
1364 if (__improbable(task->task_volatile_objects != 0 ||
1365 task->task_nonvolatile_objects != 0)) {
1366 panic("%s(%p): volatile=%d nonvolatile=%d q=%p q_first=%p q_last=%p",
1367 __FUNCTION__,
1368 task,
1369 task->task_volatile_objects,
1370 task->task_nonvolatile_objects,
1371 &task->task_objq,
1372 queue_first(&task->task_objq),
1373 queue_last(&task->task_objq));
1374 }
1375
1376 /* there shouldn't be any purgeable objects owned by task now */
1377 assert(task->task_volatile_objects == 0);
1378 assert(task->task_nonvolatile_objects == 0);
1379 assert(task->task_purgeable_disowning);
1380
1381 /* and we don't need to try and disown again */
1382 task->task_purgeable_disowned = TRUE;
1383
1384 lck_mtx_unlock(&vm_purgeable_queue_lock);
1385 task_objq_unlock(task);
1386 }
1387
1388
1389 static uint64_t
1390 vm_purgeable_queue_purge_task_owned(
1391 purgeable_q_t queue,
1392 int group,
1393 task_t task)
1394 {
1395 vm_object_t object = VM_OBJECT_NULL;
1396 int collisions = 0;
1397 uint64_t num_pages_purged = 0;
1398
1399 num_pages_purged = 0;
1400 collisions = 0;
1401
1402 look_again:
1403 lck_mtx_lock(&vm_purgeable_queue_lock);
1404
1405 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1406 !queue_end(&queue->objq[group], (queue_entry_t) object);
1407 object = (vm_object_t) queue_next(&object->objq)) {
1408 if (object->vo_owner != task) {
1409 continue;
1410 }
1411
1412 /* found an object: try and grab it */
1413 if (!vm_object_lock_try(object)) {
1414 lck_mtx_unlock(&vm_purgeable_queue_lock);
1415 mutex_pause(collisions++);
1416 goto look_again;
1417 }
1418 /* got it ! */
1419
1420 collisions = 0;
1421
1422 /* remove object from purgeable queue */
1423 queue_remove(&queue->objq[group], object,
1424 vm_object_t, objq);
1425 object->objq.next = NULL;
1426 object->objq.prev = NULL;
1427 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
1428 object->purgeable_queue_group = 0;
1429 /* one less volatile object for this object's owner */
1430 assert(object->vo_owner == task);
1431 vm_purgeable_volatile_owner_update(task, -1);
1432
1433 #if DEBUG
1434 object->vo_purgeable_volatilizer = NULL;
1435 #endif /* DEBUG */
1436 queue_enter(&purgeable_nonvolatile_queue, object,
1437 vm_object_t, objq);
1438 assert(purgeable_nonvolatile_count >= 0);
1439 purgeable_nonvolatile_count++;
1440 assert(purgeable_nonvolatile_count > 0);
1441 /* one more nonvolatile object for this object's owner */
1442 assert(object->vo_owner == task);
1443 vm_purgeable_nonvolatile_owner_update(task, +1);
1444
1445 /* unlock purgeable queues */
1446 lck_mtx_unlock(&vm_purgeable_queue_lock);
1447
1448 if (object->purgeable_when_ripe) {
1449 /* remove a token */
1450 vm_page_lock_queues();
1451 vm_purgeable_token_remove_first(queue);
1452 vm_page_unlock_queues();
1453 }
1454
1455 /* purge the object */
1456 num_pages_purged += vm_object_purge(object, 0);
1457
1458 assert(object->purgable == VM_PURGABLE_EMPTY);
1459 /* no change for purgeable accounting */
1460 vm_object_unlock(object);
1461
1462 /* we unlocked the purgeable queues, so start over */
1463 goto look_again;
1464 }
1465
1466 lck_mtx_unlock(&vm_purgeable_queue_lock);
1467
1468 return num_pages_purged;
1469 }
1470
1471 uint64_t
1472 vm_purgeable_purge_task_owned(
1473 task_t task)
1474 {
1475 purgeable_q_t queue = NULL;
1476 int group = 0;
1477 uint64_t num_pages_purged = 0;
1478
1479 num_pages_purged = 0;
1480
1481 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1482 num_pages_purged += vm_purgeable_queue_purge_task_owned(queue,
1483 0,
1484 task);
1485
1486 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1487 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1488 num_pages_purged += vm_purgeable_queue_purge_task_owned(queue,
1489 group,
1490 task);
1491 }
1492
1493 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1494 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1495 num_pages_purged += vm_purgeable_queue_purge_task_owned(queue,
1496 group,
1497 task);
1498 }
1499
1500 return num_pages_purged;
1501 }
1502
1503 void
1504 vm_purgeable_nonvolatile_enqueue(
1505 vm_object_t object,
1506 task_t owner)
1507 {
1508 vm_object_lock_assert_exclusive(object);
1509
1510 assert(object->purgable == VM_PURGABLE_NONVOLATILE);
1511 assert(object->vo_owner == NULL);
1512
1513 lck_mtx_lock(&vm_purgeable_queue_lock);
1514
1515 if (owner != NULL &&
1516 owner->task_purgeable_disowning) {
1517 /* task is exiting and no longer tracking purgeable objects */
1518 owner = VM_OBJECT_OWNER_DISOWNED;
1519 }
1520 if (owner == NULL) {
1521 owner = kernel_task;
1522 }
1523 #if DEBUG
1524 OSBacktrace(&object->purgeable_owner_bt[0],
1525 ARRAY_COUNT(object->purgeable_owner_bt));
1526 object->vo_purgeable_volatilizer = NULL;
1527 #endif /* DEBUG */
1528
1529 vm_object_ownership_change(object,
1530 object->vo_ledger_tag, /* tag unchanged */
1531 owner,
1532 FALSE); /* task_objq_locked */
1533
1534 assert(object->objq.next == NULL);
1535 assert(object->objq.prev == NULL);
1536
1537 queue_enter(&purgeable_nonvolatile_queue, object,
1538 vm_object_t, objq);
1539 assert(purgeable_nonvolatile_count >= 0);
1540 purgeable_nonvolatile_count++;
1541 assert(purgeable_nonvolatile_count > 0);
1542 lck_mtx_unlock(&vm_purgeable_queue_lock);
1543
1544 vm_object_lock_assert_exclusive(object);
1545 }
1546
1547 void
1548 vm_purgeable_nonvolatile_dequeue(
1549 vm_object_t object)
1550 {
1551 task_t owner;
1552
1553 vm_object_lock_assert_exclusive(object);
1554
1555 owner = VM_OBJECT_OWNER(object);
1556 #if DEBUG
1557 assert(object->vo_purgeable_volatilizer == NULL);
1558 #endif /* DEBUG */
1559 if (owner != NULL) {
1560 /*
1561 * Update the owner's ledger to stop accounting
1562 * for this object.
1563 */
1564 /* transfer ownership to the kernel */
1565 assert(VM_OBJECT_OWNER(object) != kernel_task);
1566 vm_object_ownership_change(
1567 object,
1568 object->vo_ledger_tag, /* unchanged */
1569 VM_OBJECT_OWNER_DISOWNED, /* new owner */
1570 FALSE); /* old_owner->task_objq locked */
1571 assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
1572 }
1573
1574 lck_mtx_lock(&vm_purgeable_queue_lock);
1575 assert(object->objq.next != NULL);
1576 assert(object->objq.prev != NULL);
1577 queue_remove(&purgeable_nonvolatile_queue, object,
1578 vm_object_t, objq);
1579 object->objq.next = NULL;
1580 object->objq.prev = NULL;
1581 assert(purgeable_nonvolatile_count > 0);
1582 purgeable_nonvolatile_count--;
1583 assert(purgeable_nonvolatile_count >= 0);
1584 lck_mtx_unlock(&vm_purgeable_queue_lock);
1585
1586 vm_object_lock_assert_exclusive(object);
1587 }
1588
1589 void
1590 vm_purgeable_accounting(
1591 vm_object_t object,
1592 vm_purgable_t old_state)
1593 {
1594 task_t owner;
1595 int resident_page_count;
1596 int wired_page_count;
1597 int compressed_page_count;
1598 int ledger_idx_volatile;
1599 int ledger_idx_nonvolatile;
1600 int ledger_idx_volatile_compressed;
1601 int ledger_idx_nonvolatile_compressed;
1602 boolean_t do_footprint;
1603
1604 vm_object_lock_assert_exclusive(object);
1605 assert(object->purgable != VM_PURGABLE_DENY);
1606
1607 owner = VM_OBJECT_OWNER(object);
1608 if (owner == NULL ||
1609 object->purgable == VM_PURGABLE_DENY) {
1610 return;
1611 }
1612
1613 vm_object_ledger_tag_ledgers(object,
1614 &ledger_idx_volatile,
1615 &ledger_idx_nonvolatile,
1616 &ledger_idx_volatile_compressed,
1617 &ledger_idx_nonvolatile_compressed,
1618 &do_footprint);
1619
1620 resident_page_count = object->resident_page_count;
1621 wired_page_count = object->wired_page_count;
1622 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
1623 object->pager != NULL) {
1624 compressed_page_count =
1625 vm_compressor_pager_get_count(object->pager);
1626 } else {
1627 compressed_page_count = 0;
1628 }
1629
1630 if (old_state == VM_PURGABLE_VOLATILE ||
1631 old_state == VM_PURGABLE_EMPTY) {
1632 /* less volatile bytes in ledger */
1633 ledger_debit(owner->ledger,
1634 ledger_idx_volatile,
1635 ptoa_64(resident_page_count - wired_page_count));
1636 /* less compressed volatile bytes in ledger */
1637 ledger_debit(owner->ledger,
1638 ledger_idx_volatile_compressed,
1639 ptoa_64(compressed_page_count));
1640
1641 /* more non-volatile bytes in ledger */
1642 ledger_credit(owner->ledger,
1643 ledger_idx_nonvolatile,
1644 ptoa_64(resident_page_count - wired_page_count));
1645 /* more compressed non-volatile bytes in ledger */
1646 ledger_credit(owner->ledger,
1647 ledger_idx_nonvolatile_compressed,
1648 ptoa_64(compressed_page_count));
1649 if (do_footprint) {
1650 /* more footprint */
1651 ledger_credit(owner->ledger,
1652 task_ledgers.phys_footprint,
1653 ptoa_64(resident_page_count
1654 + compressed_page_count
1655 - wired_page_count));
1656 }
1657 } else if (old_state == VM_PURGABLE_NONVOLATILE) {
1658 /* less non-volatile bytes in ledger */
1659 ledger_debit(owner->ledger,
1660 ledger_idx_nonvolatile,
1661 ptoa_64(resident_page_count - wired_page_count));
1662 /* less compressed non-volatile bytes in ledger */
1663 ledger_debit(owner->ledger,
1664 ledger_idx_nonvolatile_compressed,
1665 ptoa_64(compressed_page_count));
1666 if (do_footprint) {
1667 /* less footprint */
1668 ledger_debit(owner->ledger,
1669 task_ledgers.phys_footprint,
1670 ptoa_64(resident_page_count
1671 + compressed_page_count
1672 - wired_page_count));
1673 }
1674
1675 /* more volatile bytes in ledger */
1676 ledger_credit(owner->ledger,
1677 ledger_idx_volatile,
1678 ptoa_64(resident_page_count - wired_page_count));
1679 /* more compressed volatile bytes in ledger */
1680 ledger_credit(owner->ledger,
1681 ledger_idx_volatile_compressed,
1682 ptoa_64(compressed_page_count));
1683 } else {
1684 panic("vm_purgeable_accounting(%p): "
1685 "unexpected old_state=%d\n",
1686 object, old_state);
1687 }
1688
1689 vm_object_lock_assert_exclusive(object);
1690 }
1691
1692 void
1693 vm_purgeable_nonvolatile_owner_update(
1694 task_t owner,
1695 int delta)
1696 {
1697 if (owner == NULL || delta == 0) {
1698 return;
1699 }
1700
1701 if (delta > 0) {
1702 assert(owner->task_nonvolatile_objects >= 0);
1703 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1704 assert(owner->task_nonvolatile_objects > 0);
1705 } else {
1706 assert(owner->task_nonvolatile_objects > delta);
1707 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1708 assert(owner->task_nonvolatile_objects >= 0);
1709 }
1710 }
1711
1712 void
1713 vm_purgeable_volatile_owner_update(
1714 task_t owner,
1715 int delta)
1716 {
1717 if (owner == NULL || delta == 0) {
1718 return;
1719 }
1720
1721 if (delta > 0) {
1722 assert(owner->task_volatile_objects >= 0);
1723 OSAddAtomic(delta, &owner->task_volatile_objects);
1724 assert(owner->task_volatile_objects > 0);
1725 } else {
1726 assert(owner->task_volatile_objects > delta);
1727 OSAddAtomic(delta, &owner->task_volatile_objects);
1728 assert(owner->task_volatile_objects >= 0);
1729 }
1730 }
1731
1732 void
1733 vm_object_owner_compressed_update(
1734 vm_object_t object,
1735 int delta)
1736 {
1737 task_t owner;
1738 int ledger_idx_volatile;
1739 int ledger_idx_nonvolatile;
1740 int ledger_idx_volatile_compressed;
1741 int ledger_idx_nonvolatile_compressed;
1742 boolean_t do_footprint;
1743
1744 vm_object_lock_assert_exclusive(object);
1745
1746 owner = VM_OBJECT_OWNER(object);
1747
1748 if (delta == 0 ||
1749 !object->internal ||
1750 (object->purgable == VM_PURGABLE_DENY &&
1751 !object->vo_ledger_tag) ||
1752 owner == NULL) {
1753 /* not an owned purgeable (or tagged) VM object: nothing to update */
1754 return;
1755 }
1756
1757 vm_object_ledger_tag_ledgers(object,
1758 &ledger_idx_volatile,
1759 &ledger_idx_nonvolatile,
1760 &ledger_idx_volatile_compressed,
1761 &ledger_idx_nonvolatile_compressed,
1762 &do_footprint);
1763 switch (object->purgable) {
1764 case VM_PURGABLE_DENY:
1765 /* not purgeable: must be ledger-tagged */
1766 assert(object->vo_ledger_tag != VM_OBJECT_LEDGER_TAG_NONE);
1767 /* fallthru */
1768 case VM_PURGABLE_NONVOLATILE:
1769 if (delta > 0) {
1770 ledger_credit(owner->ledger,
1771 ledger_idx_nonvolatile_compressed,
1772 ptoa_64(delta));
1773 if (do_footprint) {
1774 ledger_credit(owner->ledger,
1775 task_ledgers.phys_footprint,
1776 ptoa_64(delta));
1777 }
1778 } else {
1779 ledger_debit(owner->ledger,
1780 ledger_idx_nonvolatile_compressed,
1781 ptoa_64(-delta));
1782 if (do_footprint) {
1783 ledger_debit(owner->ledger,
1784 task_ledgers.phys_footprint,
1785 ptoa_64(-delta));
1786 }
1787 }
1788 break;
1789 case VM_PURGABLE_VOLATILE:
1790 case VM_PURGABLE_EMPTY:
1791 if (delta > 0) {
1792 ledger_credit(owner->ledger,
1793 ledger_idx_volatile_compressed,
1794 ptoa_64(delta));
1795 } else {
1796 ledger_debit(owner->ledger,
1797 ledger_idx_volatile_compressed,
1798 ptoa_64(-delta));
1799 }
1800 break;
1801 default:
1802 panic("vm_purgeable_compressed_update(): "
1803 "unexpected purgable %d for object %p\n",
1804 object->purgable, object);
1805 }
1806 }