]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_purgeable.c
b1d7aeba506d186dff85d64174822fc19710cb6e
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
26 #include <kern/policy_internal.h>
27
28 #include <libkern/OSDebug.h>
29
30 #include <mach/mach_types.h>
31
32 #include <machine/limits.h>
33
34 #include <vm/vm_compressor_pager.h>
35 #include <vm/vm_kern.h> /* kmem_alloc */
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
38 #include <vm/vm_protos.h>
39 #include <vm/vm_purgeable_internal.h>
40
41 #include <sys/kdebug.h>
42
43 extern vm_pressure_level_t memorystatus_vm_pressure_level;
44
45 struct token {
46 token_cnt_t count;
47 token_idx_t prev;
48 token_idx_t next;
49 };
50
51 struct token *tokens;
52 token_idx_t token_q_max_cnt = 0;
53 vm_size_t token_q_cur_size = 0;
54
55 token_idx_t token_free_idx = 0; /* head of free queue */
56 token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
57 int32_t token_new_pagecount = 0; /* count of pages that will
58 * be added onto token queue */
59
60 int available_for_purge = 0; /* increase when ripe token
61 * added, decrease when ripe
62 * token removed.
63 * protected by page_queue_lock
64 */
65
66 static int token_q_allocating = 0; /* flag for singlethreading
67 * allocator */
68
69 struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
70 queue_head_t purgeable_nonvolatile_queue;
71 int purgeable_nonvolatile_count;
72
73 decl_lck_mtx_data(,vm_purgeable_queue_lock)
74
75 static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
76
77 static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task);
78
79 void vm_purgeable_nonvolatile_owner_update(task_t owner,
80 int delta);
81 void vm_purgeable_volatile_owner_update(task_t owner,
82 int delta);
83
84
85 #if MACH_ASSERT
86 static void
87 vm_purgeable_token_check_queue(purgeable_q_t queue)
88 {
89 int token_cnt = 0, page_cnt = 0;
90 token_idx_t token = queue->token_q_head;
91 token_idx_t unripe = 0;
92 int our_inactive_count;
93
94 #if DEVELOPMENT
95 static unsigned lightweight_check = 0;
96
97 /*
98 * Due to performance impact, only perform this check
99 * every 100 times on DEVELOPMENT kernels.
100 */
101 if (lightweight_check++ < 100) {
102 return;
103 }
104
105 lightweight_check = 0;
106 #endif
107
108 while (token) {
109 if (tokens[token].count != 0) {
110 assert(queue->token_q_unripe);
111 if (unripe == 0) {
112 assert(token == queue->token_q_unripe);
113 unripe = token;
114 }
115 page_cnt += tokens[token].count;
116 }
117 if (tokens[token].next == 0)
118 assert(queue->token_q_tail == token);
119
120 token_cnt++;
121 token = tokens[token].next;
122 }
123
124 if (unripe)
125 assert(queue->token_q_unripe == unripe);
126 assert(token_cnt == queue->debug_count_tokens);
127
128 /* obsolete queue doesn't maintain token counts */
129 if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
130 {
131 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
132 assert(our_inactive_count >= 0);
133 assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count);
134 }
135 }
136 #endif
137
138 /*
139 * Add a token. Allocate token queue memory if necessary.
140 * Call with page queue locked.
141 */
142 kern_return_t
143 vm_purgeable_token_add(purgeable_q_t queue)
144 {
145 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
146
147 /* new token */
148 token_idx_t token;
149 enum purgeable_q_type i;
150
151 find_available_token:
152
153 if (token_free_idx) { /* unused tokens available */
154 token = token_free_idx;
155 token_free_idx = tokens[token_free_idx].next;
156 } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
157 token = token_init_idx;
158 token_init_idx++;
159 } else { /* allocate more memory */
160 /* Wait if another thread is inside the memory alloc section */
161 while(token_q_allocating) {
162 wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
163 LCK_SLEEP_DEFAULT,
164 (event_t)&token_q_allocating,
165 THREAD_UNINT);
166 if(res != THREAD_AWAKENED) return KERN_ABORTED;
167 };
168
169 /* Check whether memory is still maxed out */
170 if(token_init_idx < token_q_max_cnt)
171 goto find_available_token;
172
173 /* Still no memory. Allocate some. */
174 token_q_allocating = 1;
175
176 /* Drop page queue lock so we can allocate */
177 vm_page_unlock_queues();
178
179 struct token *new_loc;
180 vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
181 kern_return_t result;
182
183 if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) {
184 result = KERN_RESOURCE_SHORTAGE;
185 } else {
186 if (token_q_cur_size) {
187 result = kmem_realloc(kernel_map,
188 (vm_offset_t) tokens,
189 token_q_cur_size,
190 (vm_offset_t *) &new_loc,
191 alloc_size, VM_KERN_MEMORY_OSFMK);
192 } else {
193 result = kmem_alloc(kernel_map,
194 (vm_offset_t *) &new_loc,
195 alloc_size, VM_KERN_MEMORY_OSFMK);
196 }
197 }
198
199 vm_page_lock_queues();
200
201 if (result) {
202 /* Unblock waiting threads */
203 token_q_allocating = 0;
204 thread_wakeup((event_t)&token_q_allocating);
205 return result;
206 }
207
208 /* If we get here, we allocated new memory. Update pointers and
209 * dealloc old range */
210 struct token *old_tokens=tokens;
211 tokens=new_loc;
212 vm_size_t old_token_q_cur_size=token_q_cur_size;
213 token_q_cur_size=alloc_size;
214 token_q_max_cnt = (token_idx_t) (token_q_cur_size /
215 sizeof(struct token));
216 assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */
217
218 if (old_token_q_cur_size) { /* clean up old mapping */
219 vm_page_unlock_queues();
220 /* kmem_realloc leaves the old region mapped. Get rid of it. */
221 kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
222 vm_page_lock_queues();
223 }
224
225 /* Unblock waiting threads */
226 token_q_allocating = 0;
227 thread_wakeup((event_t)&token_q_allocating);
228
229 goto find_available_token;
230 }
231
232 assert (token);
233
234 /*
235 * the new pagecount we got need to be applied to all queues except
236 * obsolete
237 */
238 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
239 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
240 assert(pages >= 0);
241 assert(pages <= TOKEN_COUNT_MAX);
242 purgeable_queues[i].new_pages = (int32_t) pages;
243 assert(purgeable_queues[i].new_pages == pages);
244 }
245 token_new_pagecount = 0;
246
247 /* set token counter value */
248 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
249 tokens[token].count = queue->new_pages;
250 else
251 tokens[token].count = 0; /* all obsolete items are
252 * ripe immediately */
253 queue->new_pages = 0;
254
255 /* put token on token counter list */
256 tokens[token].next = 0;
257 if (queue->token_q_tail == 0) {
258 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
259 queue->token_q_head = token;
260 tokens[token].prev = 0;
261 } else {
262 tokens[queue->token_q_tail].next = token;
263 tokens[token].prev = queue->token_q_tail;
264 }
265 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
266 * count == 0) in queue */
267 if (tokens[token].count > 0)
268 queue->token_q_unripe = token; /* first unripe token */
269 else
270 available_for_purge++; /* added a ripe token?
271 * increase available count */
272 }
273 queue->token_q_tail = token;
274
275 #if MACH_ASSERT
276 queue->debug_count_tokens++;
277 /* Check both queues, since we modified the new_pages count on each */
278 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
279 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
280
281 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
282 queue->type,
283 tokens[token].count, /* num pages on token
284 * (last token) */
285 queue->debug_count_tokens,
286 0,
287 0);
288 #endif
289
290 return KERN_SUCCESS;
291 }
292
293 /*
294 * Remove first token from queue and return its index. Add its count to the
295 * count of the next token.
296 * Call with page queue locked.
297 */
298 static token_idx_t
299 vm_purgeable_token_remove_first(purgeable_q_t queue)
300 {
301 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
302
303 token_idx_t token;
304 token = queue->token_q_head;
305
306 assert(token);
307
308 if (token) {
309 assert(queue->token_q_tail);
310 if (queue->token_q_head == queue->token_q_unripe) {
311 /* no ripe tokens... must move unripe pointer */
312 queue->token_q_unripe = tokens[token].next;
313 } else {
314 /* we're removing a ripe token. decrease count */
315 available_for_purge--;
316 assert(available_for_purge >= 0);
317 }
318
319 if (queue->token_q_tail == queue->token_q_head)
320 assert(tokens[token].next == 0);
321
322 queue->token_q_head = tokens[token].next;
323 if (queue->token_q_head) {
324 tokens[queue->token_q_head].count += tokens[token].count;
325 tokens[queue->token_q_head].prev = 0;
326 } else {
327 /* currently no other tokens in the queue */
328 /*
329 * the page count must be added to the next newly
330 * created token
331 */
332 queue->new_pages += tokens[token].count;
333 /* if head is zero, tail is too */
334 queue->token_q_tail = 0;
335 }
336
337 #if MACH_ASSERT
338 queue->debug_count_tokens--;
339 vm_purgeable_token_check_queue(queue);
340
341 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
342 queue->type,
343 tokens[queue->token_q_head].count, /* num pages on new
344 * first token */
345 token_new_pagecount, /* num pages waiting for
346 * next token */
347 available_for_purge,
348 0);
349 #endif
350 }
351 return token;
352 }
353
354 static token_idx_t
355 vm_purgeable_token_remove_last(purgeable_q_t queue)
356 {
357 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
358
359 token_idx_t token;
360 token = queue->token_q_tail;
361
362 assert(token);
363
364 if (token) {
365 assert(queue->token_q_head);
366
367 if (queue->token_q_tail == queue->token_q_head)
368 assert(tokens[token].next == 0);
369
370 if (queue->token_q_unripe == 0) {
371 /* we're removing a ripe token. decrease count */
372 available_for_purge--;
373 assert(available_for_purge >= 0);
374 } else if (queue->token_q_unripe == token) {
375 /* we're removing the only unripe token */
376 queue->token_q_unripe = 0;
377 }
378
379 if (token == queue->token_q_head) {
380 /* token is the last one in the queue */
381 queue->token_q_head = 0;
382 queue->token_q_tail = 0;
383 } else {
384 token_idx_t new_tail;
385
386 new_tail = tokens[token].prev;
387
388 assert(new_tail);
389 assert(tokens[new_tail].next == token);
390
391 queue->token_q_tail = new_tail;
392 tokens[new_tail].next = 0;
393 }
394
395 queue->new_pages += tokens[token].count;
396
397 #if MACH_ASSERT
398 queue->debug_count_tokens--;
399 vm_purgeable_token_check_queue(queue);
400
401 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
402 queue->type,
403 tokens[queue->token_q_head].count, /* num pages on new
404 * first token */
405 token_new_pagecount, /* num pages waiting for
406 * next token */
407 available_for_purge,
408 0);
409 #endif
410 }
411 return token;
412 }
413
414 /*
415 * Delete first token from queue. Return token to token queue.
416 * Call with page queue locked.
417 */
418 void
419 vm_purgeable_token_delete_first(purgeable_q_t queue)
420 {
421 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
422 token_idx_t token = vm_purgeable_token_remove_first(queue);
423
424 if (token) {
425 /* stick removed token on free queue */
426 tokens[token].next = token_free_idx;
427 tokens[token].prev = 0;
428 token_free_idx = token;
429 }
430 }
431
432 void
433 vm_purgeable_token_delete_last(purgeable_q_t queue)
434 {
435 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
436 token_idx_t token = vm_purgeable_token_remove_last(queue);
437
438 if (token) {
439 /* stick removed token on free queue */
440 tokens[token].next = token_free_idx;
441 tokens[token].prev = 0;
442 token_free_idx = token;
443 }
444 }
445
446
447 /* Call with page queue locked. */
448 void
449 vm_purgeable_q_advance_all()
450 {
451 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
452
453 /* check queue counters - if they get really large, scale them back.
454 * They tend to get that large when there is no purgeable queue action */
455 int i;
456 if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */
457 {
458 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
459 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
460 assert(pages >= 0);
461 assert(pages <= TOKEN_COUNT_MAX);
462 purgeable_queues[i].new_pages = (int32_t) pages;
463 assert(purgeable_queues[i].new_pages == pages);
464 }
465 token_new_pagecount = 0;
466 }
467
468 /*
469 * Decrement token counters. A token counter can be zero, this means the
470 * object is ripe to be purged. It is not purged immediately, because that
471 * could cause several objects to be purged even if purging one would satisfy
472 * the memory needs. Instead, the pageout thread purges one after the other
473 * by calling vm_purgeable_object_purge_one and then rechecking the memory
474 * balance.
475 *
476 * No need to advance obsolete queue - all items are ripe there,
477 * always
478 */
479 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
480 purgeable_q_t queue = &purgeable_queues[i];
481 uint32_t num_pages = 1;
482
483 /* Iterate over tokens as long as there are unripe tokens. */
484 while (queue->token_q_unripe) {
485 if (tokens[queue->token_q_unripe].count && num_pages)
486 {
487 tokens[queue->token_q_unripe].count -= 1;
488 num_pages -= 1;
489 }
490
491 if (tokens[queue->token_q_unripe].count == 0) {
492 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
493 available_for_purge++;
494 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
495 queue->type,
496 tokens[queue->token_q_head].count, /* num pages on new
497 * first token */
498 0,
499 available_for_purge,
500 0);
501 continue; /* One token ripened. Make sure to
502 * check the next. */
503 }
504 if (num_pages == 0)
505 break; /* Current token not ripe and no more pages.
506 * Work done. */
507 }
508
509 /*
510 * if there are no unripe tokens in the queue, decrement the
511 * new_pages counter instead new_pages can be negative, but must be
512 * canceled out by token_new_pagecount -- since inactive queue as a
513 * whole always contains a nonnegative number of pages
514 */
515 if (!queue->token_q_unripe) {
516 queue->new_pages -= num_pages;
517 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
518 }
519 #if MACH_ASSERT
520 vm_purgeable_token_check_queue(queue);
521 #endif
522 }
523 }
524
525 /*
526 * grab any ripe object and purge it obsolete queue first. then, go through
527 * each volatile group. Select a queue with a ripe token.
528 * Start with first group (0)
529 * 1. Look at queue. Is there an object?
530 * Yes - purge it. Remove token.
531 * No - check other queue. Is there an object?
532 * No - increment group, then go to (1)
533 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
534 * token from other queue and migrate unripe token from this
535 * queue to other queue.
536 * Call with page queue locked.
537 */
538 static void
539 vm_purgeable_token_remove_ripe(purgeable_q_t queue)
540 {
541 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
542 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
543 /* return token to free list. advance token list. */
544 token_idx_t new_head = tokens[queue->token_q_head].next;
545 tokens[queue->token_q_head].next = token_free_idx;
546 tokens[queue->token_q_head].prev = 0;
547 token_free_idx = queue->token_q_head;
548 queue->token_q_head = new_head;
549 tokens[new_head].prev = 0;
550 if (new_head == 0)
551 queue->token_q_tail = 0;
552
553 #if MACH_ASSERT
554 queue->debug_count_tokens--;
555 vm_purgeable_token_check_queue(queue);
556 #endif
557
558 available_for_purge--;
559 assert(available_for_purge >= 0);
560 }
561
562 /*
563 * Delete a ripe token from the given queue. If there are no ripe tokens on
564 * that queue, delete a ripe token from queue2, and migrate an unripe token
565 * from queue to queue2
566 * Call with page queue locked.
567 */
568 static void
569 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
570 {
571 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
572 assert(queue->token_q_head);
573
574 if (tokens[queue->token_q_head].count == 0) {
575 /* This queue has a ripe token. Remove. */
576 vm_purgeable_token_remove_ripe(queue);
577 } else {
578 assert(queue2);
579 /*
580 * queue2 must have a ripe token. Remove, and migrate one
581 * from queue to queue2.
582 */
583 vm_purgeable_token_remove_ripe(queue2);
584 /* migrate unripe token */
585 token_idx_t token;
586 token_cnt_t count;
587
588 /* remove token from queue1 */
589 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
590 * tokens, remember? */
591 token = vm_purgeable_token_remove_first(queue);
592 assert(token);
593
594 count = tokens[token].count;
595
596 /* migrate to queue2 */
597 /* go to migration target loc */
598
599 token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after;
600
601 while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) {
602 count -= tokens[token_to_insert_before].count;
603 token_to_insert_before = tokens[token_to_insert_before].next;
604 }
605
606 /* token_to_insert_before is now set correctly */
607
608 /* should the inserted token become the first unripe token? */
609 if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0))
610 queue2->token_q_unripe = token; /* if so, must update unripe pointer */
611
612 /*
613 * insert token.
614 * if inserting at end, reduce new_pages by that value;
615 * otherwise, reduce counter of next token
616 */
617
618 tokens[token].count = count;
619
620 if (token_to_insert_before != 0) {
621 token_to_insert_after = tokens[token_to_insert_before].prev;
622
623 tokens[token].next = token_to_insert_before;
624 tokens[token_to_insert_before].prev = token;
625
626 assert(tokens[token_to_insert_before].count >= count);
627 tokens[token_to_insert_before].count -= count;
628 } else {
629 /* if we ran off the end of the list, the token to insert after is the tail */
630 token_to_insert_after = queue2->token_q_tail;
631
632 tokens[token].next = 0;
633 queue2->token_q_tail = token;
634
635 assert(queue2->new_pages >= (int32_t) count);
636 queue2->new_pages -= count;
637 }
638
639 if (token_to_insert_after != 0) {
640 tokens[token].prev = token_to_insert_after;
641 tokens[token_to_insert_after].next = token;
642 } else {
643 /* is this case possible? */
644 tokens[token].prev = 0;
645 queue2->token_q_head = token;
646 }
647
648 #if MACH_ASSERT
649 queue2->debug_count_tokens++;
650 vm_purgeable_token_check_queue(queue2);
651 #endif
652 }
653 }
654
655 /* Find an object that can be locked. Returns locked object. */
656 /* Call with purgeable queue locked. */
657 static vm_object_t
658 vm_purgeable_object_find_and_lock(
659 purgeable_q_t queue,
660 int group,
661 boolean_t pick_ripe)
662 {
663 vm_object_t object, best_object;
664 int object_task_importance;
665 int best_object_task_importance;
666 int best_object_skipped;
667 int num_objects_skipped;
668 int try_lock_failed = 0;
669 int try_lock_succeeded = 0;
670 task_t owner;
671
672 best_object = VM_OBJECT_NULL;
673 best_object_task_importance = INT_MAX;
674
675 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
676 /*
677 * Usually we would pick the first element from a queue. However, we
678 * might not be able to get a lock on it, in which case we try the
679 * remaining elements in order.
680 */
681
682 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START),
683 pick_ripe,
684 group,
685 VM_KERNEL_UNSLIDE_OR_PERM(queue),
686 0,
687 0);
688
689 num_objects_skipped = 0;
690 for (object = (vm_object_t) queue_first(&queue->objq[group]);
691 !queue_end(&queue->objq[group], (queue_entry_t) object);
692 object = (vm_object_t) queue_next(&object->objq),
693 num_objects_skipped++) {
694
695 /*
696 * To prevent us looping for an excessively long time, choose
697 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
698 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
699 * we keep going until we find the first eligible object.
700 */
701 if ((num_objects_skipped >= PURGEABLE_LOOP_MAX) && (best_object != NULL)) {
702 break;
703 }
704
705 if (pick_ripe &&
706 ! object->purgeable_when_ripe) {
707 /* we want an object that has a ripe token */
708 continue;
709 }
710
711 object_task_importance = 0;
712
713 owner = object->vo_purgeable_owner;
714 if (owner) {
715 object_task_importance = task_importance_estimate(owner);
716 }
717
718 if (object_task_importance < best_object_task_importance) {
719 if (vm_object_lock_try(object)) {
720 try_lock_succeeded++;
721 if (best_object != VM_OBJECT_NULL) {
722 /* forget about previous best object */
723 vm_object_unlock(best_object);
724 }
725 best_object = object;
726 best_object_task_importance = object_task_importance;
727 best_object_skipped = num_objects_skipped;
728 if (best_object_task_importance == 0) {
729 /* can't get any better: stop looking */
730 break;
731 }
732 } else {
733 try_lock_failed++;
734 }
735 }
736 }
737
738 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END),
739 num_objects_skipped, /* considered objects */
740 try_lock_failed,
741 try_lock_succeeded,
742 VM_KERNEL_UNSLIDE_OR_PERM(best_object),
743 ((best_object == NULL) ? 0 : best_object->resident_page_count));
744
745 object = best_object;
746
747 if (object == VM_OBJECT_NULL) {
748 return VM_OBJECT_NULL;
749 }
750
751 /* Locked. Great. We'll take it. Remove and return. */
752 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
753
754 vm_object_lock_assert_exclusive(object);
755
756 queue_remove(&queue->objq[group], object,
757 vm_object_t, objq);
758 object->objq.next = NULL;
759 object->objq.prev = NULL;
760 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
761 object->purgeable_queue_group = 0;
762 /* one less volatile object for this object's owner */
763 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1);
764
765 #if DEBUG
766 object->vo_purgeable_volatilizer = NULL;
767 #endif /* DEBUG */
768
769 /* keep queue of non-volatile objects */
770 queue_enter(&purgeable_nonvolatile_queue, object,
771 vm_object_t, objq);
772 assert(purgeable_nonvolatile_count >= 0);
773 purgeable_nonvolatile_count++;
774 assert(purgeable_nonvolatile_count > 0);
775 /* one more nonvolatile object for this object's owner */
776 vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, +1);
777
778 #if MACH_ASSERT
779 queue->debug_count_objects--;
780 #endif
781 return object;
782 }
783
784 /* Can be called without holding locks */
785 void
786 vm_purgeable_object_purge_all(void)
787 {
788 enum purgeable_q_type i;
789 int group;
790 vm_object_t object;
791 unsigned int purged_count;
792 uint32_t collisions;
793
794 purged_count = 0;
795 collisions = 0;
796
797 restart:
798 lck_mtx_lock(&vm_purgeable_queue_lock);
799 /* Cycle through all queues */
800 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
801 purgeable_q_t queue;
802
803 queue = &purgeable_queues[i];
804
805 /*
806 * Look through all groups, starting from the lowest. If
807 * we find an object in that group, try to lock it (this can
808 * fail). If locking is successful, we can drop the queue
809 * lock, remove a token and then purge the object.
810 */
811 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
812 while (!queue_empty(&queue->objq[group])) {
813 object = vm_purgeable_object_find_and_lock(queue, group, FALSE);
814 if (object == VM_OBJECT_NULL) {
815 lck_mtx_unlock(&vm_purgeable_queue_lock);
816 mutex_pause(collisions++);
817 goto restart;
818 }
819
820 lck_mtx_unlock(&vm_purgeable_queue_lock);
821
822 /* Lock the page queue here so we don't hold it
823 * over the whole, legthy operation */
824 if (object->purgeable_when_ripe) {
825 vm_page_lock_queues();
826 vm_purgeable_token_remove_first(queue);
827 vm_page_unlock_queues();
828 }
829
830 (void) vm_object_purge(object, 0);
831 assert(object->purgable == VM_PURGABLE_EMPTY);
832 /* no change in purgeable accounting */
833
834 vm_object_unlock(object);
835 purged_count++;
836 goto restart;
837 }
838 assert(queue->debug_count_objects >= 0);
839 }
840 }
841 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
842 purged_count, /* # of purged objects */
843 0,
844 available_for_purge,
845 0,
846 0);
847 lck_mtx_unlock(&vm_purgeable_queue_lock);
848 return;
849 }
850
851 boolean_t
852 vm_purgeable_object_purge_one_unlocked(
853 int force_purge_below_group)
854 {
855 boolean_t retval;
856
857 vm_page_lock_queues();
858 retval = vm_purgeable_object_purge_one(force_purge_below_group, 0);
859 vm_page_unlock_queues();
860
861 return retval;
862 }
863
864 boolean_t
865 vm_purgeable_object_purge_one(
866 int force_purge_below_group,
867 int flags)
868 {
869 enum purgeable_q_type i;
870 int group;
871 vm_object_t object = 0;
872 purgeable_q_t queue, queue2;
873 boolean_t forced_purge;
874
875 /* Need the page queue lock since we'll be changing the token queue. */
876 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
877 lck_mtx_lock(&vm_purgeable_queue_lock);
878
879 /* Cycle through all queues */
880 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
881 queue = &purgeable_queues[i];
882
883 if (force_purge_below_group == 0) {
884 /*
885 * Are there any ripe tokens on this queue? If yes,
886 * we'll find an object to purge there
887 */
888 if (!queue->token_q_head) {
889 /* no token: look at next purgeable queue */
890 continue;
891 }
892
893 if (tokens[queue->token_q_head].count != 0) {
894 /* no ripe token: next queue */
895 continue;
896 }
897 }
898
899 /*
900 * Now look through all groups, starting from the lowest. If
901 * we find an object in that group, try to lock it (this can
902 * fail). If locking is successful, we can drop the queue
903 * lock, remove a token and then purge the object.
904 */
905 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
906 if (!queue->token_q_head ||
907 tokens[queue->token_q_head].count != 0) {
908 /* no tokens or no ripe tokens */
909
910 if (group >= force_purge_below_group) {
911 /* no more groups to force-purge */
912 break;
913 }
914
915 /*
916 * Try and purge an object in this group
917 * even though no tokens are ripe.
918 */
919 if (!queue_empty(&queue->objq[group]) &&
920 (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) {
921 lck_mtx_unlock(&vm_purgeable_queue_lock);
922 if (object->purgeable_when_ripe) {
923 vm_purgeable_token_delete_first(queue);
924 }
925 forced_purge = TRUE;
926 goto purge_now;
927 }
928
929 /* nothing to purge in this group: next group */
930 continue;
931 }
932 if (!queue_empty(&queue->objq[group]) &&
933 (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) {
934 lck_mtx_unlock(&vm_purgeable_queue_lock);
935 if (object->purgeable_when_ripe) {
936 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
937 }
938 forced_purge = FALSE;
939 goto purge_now;
940 }
941 if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
942 /* This is the token migration case, and it works between
943 * FIFO and LIFO only */
944 queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
945 PURGEABLE_Q_TYPE_FIFO :
946 PURGEABLE_Q_TYPE_LIFO];
947
948 if (!queue_empty(&queue2->objq[group]) &&
949 (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) {
950 lck_mtx_unlock(&vm_purgeable_queue_lock);
951 if (object->purgeable_when_ripe) {
952 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
953 }
954 forced_purge = FALSE;
955 goto purge_now;
956 }
957 }
958 assert(queue->debug_count_objects >= 0);
959 }
960 }
961 /*
962 * because we have to do a try_lock on the objects which could fail,
963 * we could end up with no object to purge at this time, even though
964 * we have objects in a purgeable state
965 */
966 lck_mtx_unlock(&vm_purgeable_queue_lock);
967 return FALSE;
968
969 purge_now:
970
971 assert(object);
972 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
973 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
974 (void) vm_object_purge(object, flags);
975 assert(object->purgable == VM_PURGABLE_EMPTY);
976 /* no change in purgeable accounting */
977 vm_object_unlock(object);
978 vm_page_lock_queues();
979
980 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
981 object, /* purged object */
982 0,
983 available_for_purge,
984 0,
985 0);
986
987 return TRUE;
988 }
989
990 /* Called with object lock held */
991 void
992 vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
993 {
994 vm_object_lock_assert_exclusive(object);
995 lck_mtx_lock(&vm_purgeable_queue_lock);
996
997 assert(object->objq.next != NULL);
998 assert(object->objq.prev != NULL);
999 queue_remove(&purgeable_nonvolatile_queue, object,
1000 vm_object_t, objq);
1001 object->objq.next = NULL;
1002 object->objq.prev = NULL;
1003 assert(purgeable_nonvolatile_count > 0);
1004 purgeable_nonvolatile_count--;
1005 assert(purgeable_nonvolatile_count >= 0);
1006 /* one less nonvolatile object for this object's owner */
1007 vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, -1);
1008
1009 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
1010 group = 0;
1011
1012 if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are
1013 * fifo-queued */
1014 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
1015 else
1016 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
1017 /* one more volatile object for this object's owner */
1018 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, +1);
1019
1020 object->purgeable_queue_type = queue->type;
1021 object->purgeable_queue_group = group;
1022
1023 #if DEBUG
1024 assert(object->vo_purgeable_volatilizer == NULL);
1025 object->vo_purgeable_volatilizer = current_task();
1026 OSBacktrace(&object->purgeable_volatilizer_bt[0], 16);
1027 #endif /* DEBUG */
1028
1029 #if MACH_ASSERT
1030 queue->debug_count_objects++;
1031 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
1032 0,
1033 tokens[queue->token_q_head].count,
1034 queue->type,
1035 group,
1036 0);
1037 #endif
1038
1039 lck_mtx_unlock(&vm_purgeable_queue_lock);
1040 }
1041
1042 /* Look for object. If found, remove from purgeable queue. */
1043 /* Called with object lock held */
1044 purgeable_q_t
1045 vm_purgeable_object_remove(vm_object_t object)
1046 {
1047 int group;
1048 enum purgeable_q_type type;
1049 purgeable_q_t queue;
1050
1051 vm_object_lock_assert_exclusive(object);
1052
1053 type = object->purgeable_queue_type;
1054 group = object->purgeable_queue_group;
1055
1056 if (type == PURGEABLE_Q_TYPE_MAX) {
1057 if (object->objq.prev || object->objq.next)
1058 panic("unmarked object on purgeable q");
1059
1060 return NULL;
1061 } else if (!(object->objq.prev && object->objq.next))
1062 panic("marked object not on purgeable q");
1063
1064 lck_mtx_lock(&vm_purgeable_queue_lock);
1065
1066 queue = &purgeable_queues[type];
1067
1068 queue_remove(&queue->objq[group], object, vm_object_t, objq);
1069 object->objq.next = NULL;
1070 object->objq.prev = NULL;
1071 /* one less volatile object for this object's owner */
1072 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1);
1073 #if DEBUG
1074 object->vo_purgeable_volatilizer = NULL;
1075 #endif /* DEBUG */
1076 /* keep queue of non-volatile objects */
1077 if (object->alive && !object->terminating) {
1078 task_t owner;
1079 queue_enter(&purgeable_nonvolatile_queue, object,
1080 vm_object_t, objq);
1081 assert(purgeable_nonvolatile_count >= 0);
1082 purgeable_nonvolatile_count++;
1083 assert(purgeable_nonvolatile_count > 0);
1084 /* one more nonvolatile object for this object's owner */
1085 owner = object->vo_purgeable_owner;
1086 vm_purgeable_nonvolatile_owner_update(owner, +1);
1087 }
1088
1089 #if MACH_ASSERT
1090 queue->debug_count_objects--;
1091 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
1092 0,
1093 tokens[queue->token_q_head].count,
1094 queue->type,
1095 group,
1096 0);
1097 #endif
1098
1099 lck_mtx_unlock(&vm_purgeable_queue_lock);
1100
1101 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
1102 object->purgeable_queue_group = 0;
1103
1104 vm_object_lock_assert_exclusive(object);
1105
1106 return &purgeable_queues[type];
1107 }
1108
1109 void
1110 vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task)
1111 {
1112 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
1113
1114 stat->count = stat->size = 0;
1115 vm_object_t object;
1116 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1117 !queue_end(&queue->objq[group], (queue_entry_t) object);
1118 object = (vm_object_t) queue_next(&object->objq)) {
1119 if (!target_task || object->vo_purgeable_owner == target_task) {
1120 stat->count++;
1121 stat->size += (object->resident_page_count * PAGE_SIZE);
1122 }
1123 }
1124 return;
1125 }
1126
1127 void
1128 vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task)
1129 {
1130 purgeable_q_t queue;
1131 int group;
1132
1133 lck_mtx_lock(&vm_purgeable_queue_lock);
1134
1135 /* Populate fifo_data */
1136 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1137 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1138 vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task);
1139
1140 /* Populate lifo_data */
1141 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1142 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1143 vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task);
1144
1145 /* Populate obsolete data */
1146 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1147 vm_purgeable_stats_helper(&(info->obsolete_data), queue, 0, target_task);
1148
1149 lck_mtx_unlock(&vm_purgeable_queue_lock);
1150 return;
1151 }
1152
1153 #if DEVELOPMENT || DEBUG
1154 static void
1155 vm_purgeable_account_volatile_queue(
1156 purgeable_q_t queue,
1157 int group,
1158 task_t task,
1159 pvm_account_info_t acnt_info)
1160 {
1161 vm_object_t object;
1162 uint64_t compressed_count;
1163
1164 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1165 !queue_end(&queue->objq[group], (queue_entry_t) object);
1166 object = (vm_object_t) queue_next(&object->objq)) {
1167 if (object->vo_purgeable_owner == task) {
1168 compressed_count = vm_compressor_pager_get_count(object->pager);
1169 acnt_info->pvm_volatile_compressed_count += compressed_count;
1170 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1171 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1172 }
1173 }
1174
1175 }
1176
1177 /*
1178 * Walks the purgeable object queues and calculates the usage
1179 * associated with the objects for the given task.
1180 */
1181 kern_return_t
1182 vm_purgeable_account(
1183 task_t task,
1184 pvm_account_info_t acnt_info)
1185 {
1186 queue_head_t *nonvolatile_q;
1187 vm_object_t object;
1188 int group;
1189 int state;
1190 uint64_t compressed_count;
1191 purgeable_q_t volatile_q;
1192
1193
1194 if ((task == NULL) || (acnt_info == NULL)) {
1195 return KERN_INVALID_ARGUMENT;
1196 }
1197
1198 acnt_info->pvm_volatile_count = 0;
1199 acnt_info->pvm_volatile_compressed_count = 0;
1200 acnt_info->pvm_nonvolatile_count = 0;
1201 acnt_info->pvm_nonvolatile_compressed_count = 0;
1202
1203 lck_mtx_lock(&vm_purgeable_queue_lock);
1204
1205 nonvolatile_q = &purgeable_nonvolatile_queue;
1206 for (object = (vm_object_t) queue_first(nonvolatile_q);
1207 !queue_end(nonvolatile_q, (queue_entry_t) object);
1208 object = (vm_object_t) queue_next(&object->objq)) {
1209 if (object->vo_purgeable_owner == task) {
1210 state = object->purgable;
1211 compressed_count = vm_compressor_pager_get_count(object->pager);
1212 if (state == VM_PURGABLE_EMPTY) {
1213 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1214 acnt_info->pvm_volatile_compressed_count += compressed_count;
1215 } else {
1216 acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count);
1217 acnt_info->pvm_nonvolatile_compressed_count += compressed_count;
1218 }
1219 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1220 }
1221 }
1222
1223 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1224 vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info);
1225
1226 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1227 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1228 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1229 }
1230
1231 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1232 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1233 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1234 }
1235 lck_mtx_unlock(&vm_purgeable_queue_lock);
1236
1237 acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE);
1238 acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE);
1239 acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE);
1240 acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE);
1241
1242 return KERN_SUCCESS;
1243 }
1244 #endif /* DEVELOPMENT || DEBUG */
1245
1246 static void
1247 vm_purgeable_volatile_queue_disown(
1248 purgeable_q_t queue,
1249 int group,
1250 task_t task)
1251 {
1252 vm_object_t object;
1253 int collisions;
1254
1255 collisions = 0;
1256
1257 again:
1258 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
1259
1260 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1261 !queue_end(&queue->objq[group], (queue_entry_t) object);
1262 object = (vm_object_t) queue_next(&object->objq)) {
1263 #if MACH_ASSERT
1264 /*
1265 * Sanity check: let's scan the entire queues to
1266 * make sure we don't leave any purgeable objects
1267 * pointing back at a dead task. If the counters
1268 * are off, we would fail to assert that they go
1269 * back to 0 after disowning is done.
1270 */
1271 #else /* MACH_ASSERT */
1272 if (task->task_volatile_objects == 0) {
1273 /* no more volatile objects owned by "task" */
1274 break;
1275 }
1276 #endif /* MACH_ASSERT */
1277 if (object->vo_purgeable_owner == task) {
1278 if (! vm_object_lock_try(object)) {
1279 lck_mtx_unlock(&vm_purgeable_queue_lock);
1280 mutex_pause(collisions++);
1281 lck_mtx_lock(&vm_purgeable_queue_lock);
1282 goto again;
1283 }
1284 assert(object->purgable == VM_PURGABLE_VOLATILE);
1285 if (object->vo_purgeable_owner == task) {
1286 vm_purgeable_accounting(object,
1287 object->purgable,
1288 TRUE); /* disown */
1289 assert(object->vo_purgeable_owner == NULL);
1290 }
1291 vm_object_unlock(object);
1292 }
1293 }
1294 }
1295
1296 void
1297 vm_purgeable_disown(
1298 task_t task)
1299 {
1300 purgeable_q_t volatile_q;
1301 int group;
1302 queue_head_t *nonvolatile_q;
1303 vm_object_t object;
1304 int collisions;
1305
1306 if (task == NULL) {
1307 return;
1308 }
1309
1310 task->task_purgeable_disowning = TRUE;
1311
1312 /*
1313 * Scan the purgeable objects queues for objects owned by "task".
1314 * This has to be done "atomically" under the "vm_purgeable_queue"
1315 * lock, to ensure that no new purgeable object get associated
1316 * with this task or moved between queues while we're scanning.
1317 */
1318
1319 /*
1320 * Scan non-volatile queue for objects owned by "task".
1321 */
1322
1323 collisions = 0;
1324
1325 again:
1326 if (task->task_purgeable_disowned) {
1327 /* task has already disowned its purgeable memory */
1328 assert(task->task_volatile_objects == 0);
1329 assert(task->task_nonvolatile_objects == 0);
1330 return;
1331 }
1332 lck_mtx_lock(&vm_purgeable_queue_lock);
1333
1334 nonvolatile_q = &purgeable_nonvolatile_queue;
1335 for (object = (vm_object_t) queue_first(nonvolatile_q);
1336 !queue_end(nonvolatile_q, (queue_entry_t) object);
1337 object = (vm_object_t) queue_next(&object->objq)) {
1338 #if MACH_ASSERT
1339 /*
1340 * Sanity check: let's scan the entire queues to
1341 * make sure we don't leave any purgeable objects
1342 * pointing back at a dead task. If the counters
1343 * are off, we would fail to assert that they go
1344 * back to 0 after disowning is done.
1345 */
1346 #else /* MACH_ASSERT */
1347 if (task->task_nonvolatile_objects == 0) {
1348 /* no more non-volatile objects owned by "task" */
1349 break;
1350 }
1351 #endif /* MACH_ASSERT */
1352 #if DEBUG
1353 assert(object->vo_purgeable_volatilizer == NULL);
1354 #endif /* DEBUG */
1355 if (object->vo_purgeable_owner == task) {
1356 if (!vm_object_lock_try(object)) {
1357 lck_mtx_unlock(&vm_purgeable_queue_lock);
1358 mutex_pause(collisions++);
1359 goto again;
1360 }
1361 if (object->vo_purgeable_owner == task) {
1362 vm_purgeable_accounting(object,
1363 object->purgable,
1364 TRUE); /* disown */
1365 assert(object->vo_purgeable_owner == NULL);
1366 }
1367 vm_object_unlock(object);
1368 }
1369 }
1370
1371 lck_mtx_yield(&vm_purgeable_queue_lock);
1372
1373 /*
1374 * Scan volatile queues for objects owned by "task".
1375 */
1376
1377 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1378 vm_purgeable_volatile_queue_disown(volatile_q, 0, task);
1379 lck_mtx_yield(&vm_purgeable_queue_lock);
1380
1381 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1382 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1383 vm_purgeable_volatile_queue_disown(volatile_q, group, task);
1384 lck_mtx_yield(&vm_purgeable_queue_lock);
1385 }
1386
1387 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1388 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1389 vm_purgeable_volatile_queue_disown(volatile_q, group, task);
1390 lck_mtx_yield(&vm_purgeable_queue_lock);
1391 }
1392
1393 if (task->task_volatile_objects != 0 ||
1394 task->task_nonvolatile_objects != 0) {
1395 /* some purgeable objects sneaked into a queue: find them */
1396 lck_mtx_unlock(&vm_purgeable_queue_lock);
1397 mutex_pause(collisions++);
1398 goto again;
1399 }
1400
1401 /* there shouldn't be any purgeable objects owned by task now */
1402 assert(task->task_volatile_objects == 0);
1403 assert(task->task_nonvolatile_objects == 0);
1404 assert(task->task_purgeable_disowning);
1405
1406 /* and we don't need to try and disown again */
1407 task->task_purgeable_disowned = TRUE;
1408
1409 lck_mtx_unlock(&vm_purgeable_queue_lock);
1410 }
1411
1412
1413 #if notyet
1414 static int
1415 vm_purgeable_queue_purge_task_owned(
1416 purgeable_q_t queue,
1417 int group,
1418 task_t task)
1419 {
1420 vm_object_t object;
1421 int num_objects;
1422 int collisions;
1423 int num_objects_purged;
1424
1425 num_objects_purged = 0;
1426 collisions = 0;
1427
1428 look_again:
1429 lck_mtx_lock(&vm_purgeable_queue_lock);
1430
1431 num_objects = 0;
1432 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1433 !queue_end(&queue->objq[group], (queue_entry_t) object);
1434 object = (vm_object_t) queue_next(&object->objq)) {
1435
1436 if (object->vo_purgeable_owner != task &&
1437 object->vo_purgeable_owner != NULL) {
1438 continue;
1439 }
1440
1441 /* found an object: try and grab it */
1442 if (!vm_object_lock_try(object)) {
1443 lck_mtx_unlock(&vm_purgeable_queue_lock);
1444 mutex_pause(collisions++);
1445 goto look_again;
1446 }
1447 /* got it ! */
1448
1449 collisions = 0;
1450
1451 /* remove object from purgeable queue */
1452 queue_remove(&queue->objq[group], object,
1453 vm_object_t, objq);
1454 object->objq.next = NULL;
1455 object->objq.prev = NULL;
1456 /* one less volatile object for this object's owner */
1457 assert(object->vo_purgeable_owner == task);
1458 vm_purgeable_volatile_owner_update(task, -1);
1459
1460 #if DEBUG
1461 object->vo_purgeable_volatilizer = NULL;
1462 #endif /* DEBUG */
1463 queue_enter(&purgeable_nonvolatile_queue, object,
1464 vm_object_t, objq);
1465 assert(purgeable_nonvolatile_count >= 0);
1466 purgeable_nonvolatile_count++;
1467 assert(purgeable_nonvolatile_count > 0);
1468 /* one more nonvolatile object for this object's owner */
1469 assert(object->vo_purgeable_owner == task);
1470 vm_purgeable_nonvolatile_owner_update(task, +1);
1471
1472 /* unlock purgeable queues */
1473 lck_mtx_unlock(&vm_purgeable_queue_lock);
1474
1475 if (object->purgeable_when_ripe) {
1476 /* remove a token */
1477 vm_page_lock_queues();
1478 vm_purgeable_token_remove_first(queue);
1479 vm_page_unlock_queues();
1480 }
1481
1482 /* purge the object */
1483 (void) vm_object_purge(object, 0);
1484 assert(object->purgable == VM_PURGABLE_EMPTY);
1485 /* no change for purgeable accounting */
1486 vm_object_unlock(object);
1487 num_objects_purged++;
1488
1489 /* we unlocked the purgeable queues, so start over */
1490 goto look_again;
1491 }
1492
1493 lck_mtx_unlock(&vm_purgeable_queue_lock);
1494
1495 return num_objects_purged;
1496 }
1497
1498 int
1499 vm_purgeable_purge_task_owned(
1500 task_t task)
1501 {
1502 purgeable_q_t queue;
1503 int group;
1504 int num_objects_purged;
1505
1506 num_objects_purged = 0;
1507
1508 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1509 num_objects_purged += vm_purgeable_queue_purge_task_owned(queue,
1510 0,
1511 task);
1512
1513 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1514 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1515 num_objects_purged += vm_purgeable_queue_purge_task_owned(queue,
1516 group,
1517 task);
1518
1519 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1520 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1521 num_objects_purged += vm_purgeable_queue_purge_task_owned(queue,
1522 group,
1523 task);
1524
1525 return num_objects_purged;
1526 }
1527 #endif
1528
1529 void
1530 vm_purgeable_nonvolatile_enqueue(
1531 vm_object_t object,
1532 task_t owner)
1533 {
1534 int page_count;
1535
1536 vm_object_lock_assert_exclusive(object);
1537
1538 assert(object->purgable == VM_PURGABLE_NONVOLATILE);
1539 assert(object->vo_purgeable_owner == NULL);
1540
1541 lck_mtx_lock(&vm_purgeable_queue_lock);
1542
1543 if (owner != NULL &&
1544 owner->task_purgeable_disowning) {
1545 /* task is exiting and no longer tracking purgeable objects */
1546 owner = NULL;
1547 }
1548
1549 object->vo_purgeable_owner = owner;
1550 #if DEBUG
1551 object->vo_purgeable_volatilizer = NULL;
1552 #endif /* DEBUG */
1553
1554 #if DEBUG
1555 OSBacktrace(&object->purgeable_owner_bt[0], 16);
1556 #endif /* DEBUG */
1557
1558 page_count = object->resident_page_count;
1559 if (owner != NULL && page_count != 0) {
1560 ledger_credit(owner->ledger,
1561 task_ledgers.purgeable_nonvolatile,
1562 ptoa(page_count));
1563 ledger_credit(owner->ledger,
1564 task_ledgers.phys_footprint,
1565 ptoa(page_count));
1566 }
1567
1568 assert(object->objq.next == NULL);
1569 assert(object->objq.prev == NULL);
1570
1571 queue_enter(&purgeable_nonvolatile_queue, object,
1572 vm_object_t, objq);
1573 assert(purgeable_nonvolatile_count >= 0);
1574 purgeable_nonvolatile_count++;
1575 assert(purgeable_nonvolatile_count > 0);
1576 /* one more nonvolatile object for this object's owner */
1577 assert(object->vo_purgeable_owner == owner);
1578 vm_purgeable_nonvolatile_owner_update(owner, +1);
1579 lck_mtx_unlock(&vm_purgeable_queue_lock);
1580
1581 vm_object_lock_assert_exclusive(object);
1582 }
1583
1584 void
1585 vm_purgeable_nonvolatile_dequeue(
1586 vm_object_t object)
1587 {
1588 task_t owner;
1589
1590 vm_object_lock_assert_exclusive(object);
1591
1592 owner = object->vo_purgeable_owner;
1593 #if DEBUG
1594 assert(object->vo_purgeable_volatilizer == NULL);
1595 #endif /* DEBUG */
1596 if (owner != NULL) {
1597 /*
1598 * Update the owner's ledger to stop accounting
1599 * for this object.
1600 */
1601 vm_purgeable_accounting(object,
1602 object->purgable,
1603 TRUE); /* disown */
1604 }
1605
1606 lck_mtx_lock(&vm_purgeable_queue_lock);
1607 assert(object->objq.next != NULL);
1608 assert(object->objq.prev != NULL);
1609 queue_remove(&purgeable_nonvolatile_queue, object,
1610 vm_object_t, objq);
1611 object->objq.next = NULL;
1612 object->objq.prev = NULL;
1613 assert(purgeable_nonvolatile_count > 0);
1614 purgeable_nonvolatile_count--;
1615 assert(purgeable_nonvolatile_count >= 0);
1616 lck_mtx_unlock(&vm_purgeable_queue_lock);
1617
1618 vm_object_lock_assert_exclusive(object);
1619 }
1620
1621 void
1622 vm_purgeable_accounting(
1623 vm_object_t object,
1624 vm_purgable_t old_state,
1625 boolean_t disown)
1626 {
1627 task_t owner;
1628 int resident_page_count;
1629 int wired_page_count;
1630 int compressed_page_count;
1631 boolean_t disown_on_the_fly;
1632
1633 vm_object_lock_assert_exclusive(object);
1634
1635 owner = object->vo_purgeable_owner;
1636 if (owner == NULL)
1637 return;
1638
1639 if (!disown && owner->task_purgeable_disowning) {
1640 /* task is disowning its purgeable objects: help it */
1641 disown_on_the_fly = TRUE;
1642 } else {
1643 disown_on_the_fly = FALSE;
1644 }
1645
1646 resident_page_count = object->resident_page_count;
1647 wired_page_count = object->wired_page_count;
1648 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
1649 object->pager != NULL) {
1650 compressed_page_count =
1651 vm_compressor_pager_get_count(object->pager);
1652 } else {
1653 compressed_page_count = 0;
1654 }
1655
1656 if (old_state == VM_PURGABLE_VOLATILE ||
1657 old_state == VM_PURGABLE_EMPTY) {
1658 /* less volatile bytes in ledger */
1659 ledger_debit(owner->ledger,
1660 task_ledgers.purgeable_volatile,
1661 ptoa(resident_page_count - wired_page_count));
1662 /* less compressed volatile bytes in ledger */
1663 ledger_debit(owner->ledger,
1664 task_ledgers.purgeable_volatile_compressed,
1665 ptoa(compressed_page_count));
1666
1667 if (disown || !object->alive || object->terminating) {
1668 /* wired pages were accounted as "non-volatile"... */
1669 ledger_debit(owner->ledger,
1670 task_ledgers.purgeable_nonvolatile,
1671 ptoa(wired_page_count));
1672 /* ... and in phys_footprint */
1673 ledger_debit(owner->ledger,
1674 task_ledgers.phys_footprint,
1675 ptoa(wired_page_count));
1676
1677 if (!disown_on_the_fly &&
1678 (object->purgeable_queue_type ==
1679 PURGEABLE_Q_TYPE_MAX)) {
1680 /*
1681 * Not on a volatile queue: must be empty
1682 * or emptying.
1683 */
1684 vm_purgeable_nonvolatile_owner_update(owner,-1);
1685 } else {
1686 /* on a volatile queue */
1687 vm_purgeable_volatile_owner_update(owner, -1);
1688 }
1689 /* no more accounting for this dead object */
1690 object->vo_purgeable_owner = NULL;
1691 #if DEBUG
1692 object->vo_purgeable_volatilizer = NULL;
1693 #endif /* DEBUG */
1694 return;
1695 }
1696
1697 /* more non-volatile bytes in ledger */
1698 ledger_credit(owner->ledger,
1699 task_ledgers.purgeable_nonvolatile,
1700 ptoa(resident_page_count - wired_page_count));
1701 /* more compressed non-volatile bytes in ledger */
1702 ledger_credit(owner->ledger,
1703 task_ledgers.purgeable_nonvolatile_compressed,
1704 ptoa(compressed_page_count));
1705 /* more footprint */
1706 ledger_credit(owner->ledger,
1707 task_ledgers.phys_footprint,
1708 ptoa(resident_page_count
1709 + compressed_page_count
1710 - wired_page_count));
1711
1712 } else if (old_state == VM_PURGABLE_NONVOLATILE) {
1713
1714 /* less non-volatile bytes in ledger */
1715 ledger_debit(owner->ledger,
1716 task_ledgers.purgeable_nonvolatile,
1717 ptoa(resident_page_count - wired_page_count));
1718 /* less compressed non-volatile bytes in ledger */
1719 ledger_debit(owner->ledger,
1720 task_ledgers.purgeable_nonvolatile_compressed,
1721 ptoa(compressed_page_count));
1722 /* less footprint */
1723 ledger_debit(owner->ledger,
1724 task_ledgers.phys_footprint,
1725 ptoa(resident_page_count
1726 + compressed_page_count
1727 - wired_page_count));
1728
1729 if (disown || !object->alive || object->terminating) {
1730 /* wired pages still accounted as "non-volatile" */
1731 ledger_debit(owner->ledger,
1732 task_ledgers.purgeable_nonvolatile,
1733 ptoa(wired_page_count));
1734 ledger_debit(owner->ledger,
1735 task_ledgers.phys_footprint,
1736 ptoa(wired_page_count));
1737
1738 /* one less "non-volatile" object for the owner */
1739 if (!disown_on_the_fly) {
1740 assert(object->purgeable_queue_type ==
1741 PURGEABLE_Q_TYPE_MAX);
1742 }
1743 vm_purgeable_nonvolatile_owner_update(owner, -1);
1744 /* no more accounting for this dead object */
1745 object->vo_purgeable_owner = NULL;
1746 #if DEBUG
1747 object->vo_purgeable_volatilizer = NULL;
1748 #endif /* DEBUG */
1749 return;
1750 }
1751 /* more volatile bytes in ledger */
1752 ledger_credit(owner->ledger,
1753 task_ledgers.purgeable_volatile,
1754 ptoa(resident_page_count - wired_page_count));
1755 /* more compressed volatile bytes in ledger */
1756 ledger_credit(owner->ledger,
1757 task_ledgers.purgeable_volatile_compressed,
1758 ptoa(compressed_page_count));
1759 } else {
1760 panic("vm_purgeable_accounting(%p): "
1761 "unexpected old_state=%d\n",
1762 object, old_state);
1763 }
1764
1765 vm_object_lock_assert_exclusive(object);
1766 }
1767
1768 void
1769 vm_purgeable_nonvolatile_owner_update(
1770 task_t owner,
1771 int delta)
1772 {
1773 if (owner == NULL || delta == 0) {
1774 return;
1775 }
1776
1777 if (delta > 0) {
1778 assert(owner->task_nonvolatile_objects >= 0);
1779 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1780 assert(owner->task_nonvolatile_objects > 0);
1781 } else {
1782 assert(owner->task_nonvolatile_objects > delta);
1783 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1784 assert(owner->task_nonvolatile_objects >= 0);
1785 }
1786 }
1787
1788 void
1789 vm_purgeable_volatile_owner_update(
1790 task_t owner,
1791 int delta)
1792 {
1793 if (owner == NULL || delta == 0) {
1794 return;
1795 }
1796
1797 if (delta > 0) {
1798 assert(owner->task_volatile_objects >= 0);
1799 OSAddAtomic(delta, &owner->task_volatile_objects);
1800 assert(owner->task_volatile_objects > 0);
1801 } else {
1802 assert(owner->task_volatile_objects > delta);
1803 OSAddAtomic(delta, &owner->task_volatile_objects);
1804 assert(owner->task_volatile_objects >= 0);
1805 }
1806 }
1807
1808 void
1809 vm_purgeable_compressed_update(
1810 vm_object_t object,
1811 int delta)
1812 {
1813 task_t owner;
1814
1815 vm_object_lock_assert_exclusive(object);
1816
1817 if (delta == 0 ||
1818 !object->internal ||
1819 object->purgable == VM_PURGABLE_DENY ||
1820 object->vo_purgeable_owner == NULL) {
1821 /* not an owned purgeable VM object: nothing to update */
1822 return;
1823 }
1824
1825 owner = object->vo_purgeable_owner;
1826 switch (object->purgable) {
1827 case VM_PURGABLE_DENY:
1828 break;
1829 case VM_PURGABLE_NONVOLATILE:
1830 if (delta > 0) {
1831 ledger_credit(owner->ledger,
1832 task_ledgers.purgeable_nonvolatile_compressed,
1833 ptoa(delta));
1834 ledger_credit(owner->ledger,
1835 task_ledgers.phys_footprint,
1836 ptoa(delta));
1837 } else {
1838 ledger_debit(owner->ledger,
1839 task_ledgers.purgeable_nonvolatile_compressed,
1840 ptoa(-delta));
1841 ledger_debit(owner->ledger,
1842 task_ledgers.phys_footprint,
1843 ptoa(-delta));
1844 }
1845 break;
1846 case VM_PURGABLE_VOLATILE:
1847 case VM_PURGABLE_EMPTY:
1848 if (delta > 0) {
1849 ledger_credit(owner->ledger,
1850 task_ledgers.purgeable_volatile_compressed,
1851 ptoa(delta));
1852 } else {
1853 ledger_debit(owner->ledger,
1854 task_ledgers.purgeable_volatile_compressed,
1855 ptoa(-delta));
1856 }
1857 break;
1858 default:
1859 panic("vm_purgeable_compressed_update(): "
1860 "unexpected purgable %d for object %p\n",
1861 object->purgable, object);
1862 }
1863 }