2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
26 #include <kern/policy_internal.h>
28 #include <libkern/OSDebug.h>
30 #include <mach/mach_types.h>
32 #include <machine/limits.h>
34 #include <vm/vm_compressor_pager.h>
35 #include <vm/vm_kern.h> /* kmem_alloc */
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
38 #include <vm/vm_protos.h>
39 #include <vm/vm_purgeable_internal.h>
41 #include <sys/kdebug.h>
44 * LOCK ORDERING for task-owned purgeable objects
46 * Whenever we need to hold multiple locks while adding to, removing from,
47 * or scanning a task's task_objq list of VM objects it owns, locks should
48 * be taken in this order:
50 * VM object ==> vm_purgeable_queue_lock ==> owner_task->task_objq_lock
52 * If one needs to acquire the VM object lock after any of the other 2 locks,
53 * one needs to use vm_object_lock_try() and, if that fails, release the
54 * other locks and retake them all in the correct order.
57 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
66 token_idx_t token_q_max_cnt
= 0;
67 vm_size_t token_q_cur_size
= 0;
69 token_idx_t token_free_idx
= 0; /* head of free queue */
70 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
71 int32_t token_new_pagecount
= 0; /* count of pages that will
72 * be added onto token queue */
74 int available_for_purge
= 0; /* increase when ripe token
75 * added, decrease when ripe
77 * protected by page_queue_lock
80 static int token_q_allocating
= 0; /* flag for singlethreading
83 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
84 queue_head_t purgeable_nonvolatile_queue
;
85 int purgeable_nonvolatile_count
;
87 decl_lck_mtx_data(,vm_purgeable_queue_lock
)
89 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
91 static void vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
);
96 vm_purgeable_token_check_queue(purgeable_q_t queue
)
98 int token_cnt
= 0, page_cnt
= 0;
99 token_idx_t token
= queue
->token_q_head
;
100 token_idx_t unripe
= 0;
101 int our_inactive_count
;
104 static unsigned lightweight_check
= 0;
107 * Due to performance impact, only perform this check
108 * every 100 times on DEVELOPMENT kernels.
110 if (lightweight_check
++ < 100) {
114 lightweight_check
= 0;
118 if (tokens
[token
].count
!= 0) {
119 assert(queue
->token_q_unripe
);
121 assert(token
== queue
->token_q_unripe
);
124 page_cnt
+= tokens
[token
].count
;
126 if (tokens
[token
].next
== 0)
127 assert(queue
->token_q_tail
== token
);
130 token
= tokens
[token
].next
;
134 assert(queue
->token_q_unripe
== unripe
);
135 assert(token_cnt
== queue
->debug_count_tokens
);
137 /* obsolete queue doesn't maintain token counts */
138 if(queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
140 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
141 assert(our_inactive_count
>= 0);
142 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
- vm_page_cleaned_count
);
148 * Add a token. Allocate token queue memory if necessary.
149 * Call with page queue locked.
152 vm_purgeable_token_add(purgeable_q_t queue
)
154 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
158 enum purgeable_q_type i
;
160 find_available_token
:
162 if (token_free_idx
) { /* unused tokens available */
163 token
= token_free_idx
;
164 token_free_idx
= tokens
[token_free_idx
].next
;
165 } else if (token_init_idx
< token_q_max_cnt
) { /* lazy token array init */
166 token
= token_init_idx
;
168 } else { /* allocate more memory */
169 /* Wait if another thread is inside the memory alloc section */
170 while(token_q_allocating
) {
171 wait_result_t res
= lck_mtx_sleep(&vm_page_queue_lock
,
173 (event_t
)&token_q_allocating
,
175 if(res
!= THREAD_AWAKENED
) return KERN_ABORTED
;
178 /* Check whether memory is still maxed out */
179 if(token_init_idx
< token_q_max_cnt
)
180 goto find_available_token
;
182 /* Still no memory. Allocate some. */
183 token_q_allocating
= 1;
185 /* Drop page queue lock so we can allocate */
186 vm_page_unlock_queues();
188 struct token
*new_loc
;
189 vm_size_t alloc_size
= token_q_cur_size
+ PAGE_SIZE
;
190 kern_return_t result
;
192 if (alloc_size
/ sizeof (struct token
) > TOKEN_COUNT_MAX
) {
193 result
= KERN_RESOURCE_SHORTAGE
;
195 if (token_q_cur_size
) {
196 result
= kmem_realloc(kernel_map
,
197 (vm_offset_t
) tokens
,
199 (vm_offset_t
*) &new_loc
,
200 alloc_size
, VM_KERN_MEMORY_OSFMK
);
202 result
= kmem_alloc(kernel_map
,
203 (vm_offset_t
*) &new_loc
,
204 alloc_size
, VM_KERN_MEMORY_OSFMK
);
208 vm_page_lock_queues();
211 /* Unblock waiting threads */
212 token_q_allocating
= 0;
213 thread_wakeup((event_t
)&token_q_allocating
);
217 /* If we get here, we allocated new memory. Update pointers and
218 * dealloc old range */
219 struct token
*old_tokens
=tokens
;
221 vm_size_t old_token_q_cur_size
=token_q_cur_size
;
222 token_q_cur_size
=alloc_size
;
223 token_q_max_cnt
= (token_idx_t
) (token_q_cur_size
/
224 sizeof(struct token
));
225 assert (token_init_idx
< token_q_max_cnt
); /* We must have a free token now */
227 if (old_token_q_cur_size
) { /* clean up old mapping */
228 vm_page_unlock_queues();
229 /* kmem_realloc leaves the old region mapped. Get rid of it. */
230 kmem_free(kernel_map
, (vm_offset_t
)old_tokens
, old_token_q_cur_size
);
231 vm_page_lock_queues();
234 /* Unblock waiting threads */
235 token_q_allocating
= 0;
236 thread_wakeup((event_t
)&token_q_allocating
);
238 goto find_available_token
;
244 * the new pagecount we got need to be applied to all queues except
247 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
248 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
250 assert(pages
<= TOKEN_COUNT_MAX
);
251 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
252 assert(purgeable_queues
[i
].new_pages
== pages
);
254 token_new_pagecount
= 0;
256 /* set token counter value */
257 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
258 tokens
[token
].count
= queue
->new_pages
;
260 tokens
[token
].count
= 0; /* all obsolete items are
261 * ripe immediately */
262 queue
->new_pages
= 0;
264 /* put token on token counter list */
265 tokens
[token
].next
= 0;
266 if (queue
->token_q_tail
== 0) {
267 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
268 queue
->token_q_head
= token
;
269 tokens
[token
].prev
= 0;
271 tokens
[queue
->token_q_tail
].next
= token
;
272 tokens
[token
].prev
= queue
->token_q_tail
;
274 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
275 * count == 0) in queue */
276 if (tokens
[token
].count
> 0)
277 queue
->token_q_unripe
= token
; /* first unripe token */
279 available_for_purge
++; /* added a ripe token?
280 * increase available count */
282 queue
->token_q_tail
= token
;
285 queue
->debug_count_tokens
++;
286 /* Check both queues, since we modified the new_pages count on each */
287 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
288 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
290 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
292 tokens
[token
].count
, /* num pages on token
294 queue
->debug_count_tokens
,
303 * Remove first token from queue and return its index. Add its count to the
304 * count of the next token.
305 * Call with page queue locked.
308 vm_purgeable_token_remove_first(purgeable_q_t queue
)
310 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
313 token
= queue
->token_q_head
;
318 assert(queue
->token_q_tail
);
319 if (queue
->token_q_head
== queue
->token_q_unripe
) {
320 /* no ripe tokens... must move unripe pointer */
321 queue
->token_q_unripe
= tokens
[token
].next
;
323 /* we're removing a ripe token. decrease count */
324 available_for_purge
--;
325 assert(available_for_purge
>= 0);
328 if (queue
->token_q_tail
== queue
->token_q_head
)
329 assert(tokens
[token
].next
== 0);
331 queue
->token_q_head
= tokens
[token
].next
;
332 if (queue
->token_q_head
) {
333 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
334 tokens
[queue
->token_q_head
].prev
= 0;
336 /* currently no other tokens in the queue */
338 * the page count must be added to the next newly
341 queue
->new_pages
+= tokens
[token
].count
;
342 /* if head is zero, tail is too */
343 queue
->token_q_tail
= 0;
347 queue
->debug_count_tokens
--;
348 vm_purgeable_token_check_queue(queue
);
350 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
352 tokens
[queue
->token_q_head
].count
, /* num pages on new
354 token_new_pagecount
, /* num pages waiting for
364 vm_purgeable_token_remove_last(purgeable_q_t queue
)
366 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
369 token
= queue
->token_q_tail
;
374 assert(queue
->token_q_head
);
376 if (queue
->token_q_tail
== queue
->token_q_head
)
377 assert(tokens
[token
].next
== 0);
379 if (queue
->token_q_unripe
== 0) {
380 /* we're removing a ripe token. decrease count */
381 available_for_purge
--;
382 assert(available_for_purge
>= 0);
383 } else if (queue
->token_q_unripe
== token
) {
384 /* we're removing the only unripe token */
385 queue
->token_q_unripe
= 0;
388 if (token
== queue
->token_q_head
) {
389 /* token is the last one in the queue */
390 queue
->token_q_head
= 0;
391 queue
->token_q_tail
= 0;
393 token_idx_t new_tail
;
395 new_tail
= tokens
[token
].prev
;
398 assert(tokens
[new_tail
].next
== token
);
400 queue
->token_q_tail
= new_tail
;
401 tokens
[new_tail
].next
= 0;
404 queue
->new_pages
+= tokens
[token
].count
;
407 queue
->debug_count_tokens
--;
408 vm_purgeable_token_check_queue(queue
);
410 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
412 tokens
[queue
->token_q_head
].count
, /* num pages on new
414 token_new_pagecount
, /* num pages waiting for
424 * Delete first token from queue. Return token to token queue.
425 * Call with page queue locked.
428 vm_purgeable_token_delete_first(purgeable_q_t queue
)
430 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
431 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
434 /* stick removed token on free queue */
435 tokens
[token
].next
= token_free_idx
;
436 tokens
[token
].prev
= 0;
437 token_free_idx
= token
;
442 vm_purgeable_token_delete_last(purgeable_q_t queue
)
444 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
445 token_idx_t token
= vm_purgeable_token_remove_last(queue
);
448 /* stick removed token on free queue */
449 tokens
[token
].next
= token_free_idx
;
450 tokens
[token
].prev
= 0;
451 token_free_idx
= token
;
456 /* Call with page queue locked. */
458 vm_purgeable_q_advance_all()
460 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
462 /* check queue counters - if they get really large, scale them back.
463 * They tend to get that large when there is no purgeable queue action */
465 if(token_new_pagecount
> (TOKEN_NEW_PAGECOUNT_MAX
>> 1)) /* a system idling years might get there */
467 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
468 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
470 assert(pages
<= TOKEN_COUNT_MAX
);
471 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
472 assert(purgeable_queues
[i
].new_pages
== pages
);
474 token_new_pagecount
= 0;
478 * Decrement token counters. A token counter can be zero, this means the
479 * object is ripe to be purged. It is not purged immediately, because that
480 * could cause several objects to be purged even if purging one would satisfy
481 * the memory needs. Instead, the pageout thread purges one after the other
482 * by calling vm_purgeable_object_purge_one and then rechecking the memory
485 * No need to advance obsolete queue - all items are ripe there,
488 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
489 purgeable_q_t queue
= &purgeable_queues
[i
];
490 uint32_t num_pages
= 1;
492 /* Iterate over tokens as long as there are unripe tokens. */
493 while (queue
->token_q_unripe
) {
494 if (tokens
[queue
->token_q_unripe
].count
&& num_pages
)
496 tokens
[queue
->token_q_unripe
].count
-= 1;
500 if (tokens
[queue
->token_q_unripe
].count
== 0) {
501 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
502 available_for_purge
++;
503 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_RIPEN
)),
505 tokens
[queue
->token_q_head
].count
, /* num pages on new
510 continue; /* One token ripened. Make sure to
514 break; /* Current token not ripe and no more pages.
519 * if there are no unripe tokens in the queue, decrement the
520 * new_pages counter instead new_pages can be negative, but must be
521 * canceled out by token_new_pagecount -- since inactive queue as a
522 * whole always contains a nonnegative number of pages
524 if (!queue
->token_q_unripe
) {
525 queue
->new_pages
-= num_pages
;
526 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
529 vm_purgeable_token_check_queue(queue
);
535 * grab any ripe object and purge it obsolete queue first. then, go through
536 * each volatile group. Select a queue with a ripe token.
537 * Start with first group (0)
538 * 1. Look at queue. Is there an object?
539 * Yes - purge it. Remove token.
540 * No - check other queue. Is there an object?
541 * No - increment group, then go to (1)
542 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
543 * token from other queue and migrate unripe token from this
544 * queue to other queue.
545 * Call with page queue locked.
548 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
550 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
551 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
552 /* return token to free list. advance token list. */
553 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
554 tokens
[queue
->token_q_head
].next
= token_free_idx
;
555 tokens
[queue
->token_q_head
].prev
= 0;
556 token_free_idx
= queue
->token_q_head
;
557 queue
->token_q_head
= new_head
;
558 tokens
[new_head
].prev
= 0;
560 queue
->token_q_tail
= 0;
563 queue
->debug_count_tokens
--;
564 vm_purgeable_token_check_queue(queue
);
567 available_for_purge
--;
568 assert(available_for_purge
>= 0);
572 * Delete a ripe token from the given queue. If there are no ripe tokens on
573 * that queue, delete a ripe token from queue2, and migrate an unripe token
574 * from queue to queue2
575 * Call with page queue locked.
578 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
580 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
581 assert(queue
->token_q_head
);
583 if (tokens
[queue
->token_q_head
].count
== 0) {
584 /* This queue has a ripe token. Remove. */
585 vm_purgeable_token_remove_ripe(queue
);
589 * queue2 must have a ripe token. Remove, and migrate one
590 * from queue to queue2.
592 vm_purgeable_token_remove_ripe(queue2
);
593 /* migrate unripe token */
597 /* remove token from queue1 */
598 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
599 * tokens, remember? */
600 token
= vm_purgeable_token_remove_first(queue
);
603 count
= tokens
[token
].count
;
605 /* migrate to queue2 */
606 /* go to migration target loc */
608 token_idx_t token_to_insert_before
= queue2
->token_q_head
, token_to_insert_after
;
610 while (token_to_insert_before
!= 0 && count
> tokens
[token_to_insert_before
].count
) {
611 count
-= tokens
[token_to_insert_before
].count
;
612 token_to_insert_before
= tokens
[token_to_insert_before
].next
;
615 /* token_to_insert_before is now set correctly */
617 /* should the inserted token become the first unripe token? */
618 if ((token_to_insert_before
== queue2
->token_q_unripe
) || (queue2
->token_q_unripe
== 0))
619 queue2
->token_q_unripe
= token
; /* if so, must update unripe pointer */
623 * if inserting at end, reduce new_pages by that value;
624 * otherwise, reduce counter of next token
627 tokens
[token
].count
= count
;
629 if (token_to_insert_before
!= 0) {
630 token_to_insert_after
= tokens
[token_to_insert_before
].prev
;
632 tokens
[token
].next
= token_to_insert_before
;
633 tokens
[token_to_insert_before
].prev
= token
;
635 assert(tokens
[token_to_insert_before
].count
>= count
);
636 tokens
[token_to_insert_before
].count
-= count
;
638 /* if we ran off the end of the list, the token to insert after is the tail */
639 token_to_insert_after
= queue2
->token_q_tail
;
641 tokens
[token
].next
= 0;
642 queue2
->token_q_tail
= token
;
644 assert(queue2
->new_pages
>= (int32_t) count
);
645 queue2
->new_pages
-= count
;
648 if (token_to_insert_after
!= 0) {
649 tokens
[token
].prev
= token_to_insert_after
;
650 tokens
[token_to_insert_after
].next
= token
;
652 /* is this case possible? */
653 tokens
[token
].prev
= 0;
654 queue2
->token_q_head
= token
;
658 queue2
->debug_count_tokens
++;
659 vm_purgeable_token_check_queue(queue2
);
664 /* Find an object that can be locked. Returns locked object. */
665 /* Call with purgeable queue locked. */
667 vm_purgeable_object_find_and_lock(
672 vm_object_t object
, best_object
;
673 int object_task_importance
;
674 int best_object_task_importance
;
675 int best_object_skipped
;
676 int num_objects_skipped
;
677 int try_lock_failed
= 0;
678 int try_lock_succeeded
= 0;
681 best_object
= VM_OBJECT_NULL
;
682 best_object_task_importance
= INT_MAX
;
684 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
686 * Usually we would pick the first element from a queue. However, we
687 * might not be able to get a lock on it, in which case we try the
688 * remaining elements in order.
691 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_START
),
694 VM_KERNEL_UNSLIDE_OR_PERM(queue
),
698 num_objects_skipped
= 0;
699 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
700 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
701 object
= (vm_object_t
) queue_next(&object
->objq
),
702 num_objects_skipped
++) {
705 * To prevent us looping for an excessively long time, choose
706 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
707 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
708 * we keep going until we find the first eligible object.
710 if ((num_objects_skipped
>= PURGEABLE_LOOP_MAX
) && (best_object
!= NULL
)) {
715 ! object
->purgeable_when_ripe
) {
716 /* we want an object that has a ripe token */
720 object_task_importance
= 0;
723 * We don't want to use VM_OBJECT_OWNER() here: we want to
724 * distinguish kernel-owned and disowned objects.
725 * Disowned objects have no owner and will have no importance...
727 owner
= object
->vo_owner
;
728 if (owner
!= NULL
&& owner
!= VM_OBJECT_OWNER_DISOWNED
) {
731 object_task_importance
= proc_get_memstat_priority((struct proc
*)get_bsdtask_info(owner
), TRUE
);
732 #endif /* CONFIG_JETSAM */
733 #else /* CONFIG_EMBEDDED */
734 object_task_importance
= task_importance_estimate(owner
);
735 #endif /* CONFIG_EMBEDDED */
738 if (object_task_importance
< best_object_task_importance
) {
739 if (vm_object_lock_try(object
)) {
740 try_lock_succeeded
++;
741 if (best_object
!= VM_OBJECT_NULL
) {
742 /* forget about previous best object */
743 vm_object_unlock(best_object
);
745 best_object
= object
;
746 best_object_task_importance
= object_task_importance
;
747 best_object_skipped
= num_objects_skipped
;
748 if (best_object_task_importance
== 0) {
749 /* can't get any better: stop looking */
758 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_END
),
759 num_objects_skipped
, /* considered objects */
762 VM_KERNEL_UNSLIDE_OR_PERM(best_object
),
763 ((best_object
== NULL
) ? 0 : best_object
->resident_page_count
));
765 object
= best_object
;
767 if (object
== VM_OBJECT_NULL
) {
768 return VM_OBJECT_NULL
;
771 /* Locked. Great. We'll take it. Remove and return. */
772 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
774 vm_object_lock_assert_exclusive(object
);
776 queue_remove(&queue
->objq
[group
], object
,
778 object
->objq
.next
= NULL
;
779 object
->objq
.prev
= NULL
;
780 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
781 object
->purgeable_queue_group
= 0;
782 /* one less volatile object for this object's owner */
783 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object
), -1);
786 object
->vo_purgeable_volatilizer
= NULL
;
789 /* keep queue of non-volatile objects */
790 queue_enter(&purgeable_nonvolatile_queue
, object
,
792 assert(purgeable_nonvolatile_count
>= 0);
793 purgeable_nonvolatile_count
++;
794 assert(purgeable_nonvolatile_count
> 0);
795 /* one more nonvolatile object for this object's owner */
796 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object
), +1);
799 queue
->debug_count_objects
--;
804 /* Can be called without holding locks */
806 vm_purgeable_object_purge_all(void)
808 enum purgeable_q_type i
;
811 unsigned int purged_count
;
818 lck_mtx_lock(&vm_purgeable_queue_lock
);
819 /* Cycle through all queues */
820 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
823 queue
= &purgeable_queues
[i
];
826 * Look through all groups, starting from the lowest. If
827 * we find an object in that group, try to lock it (this can
828 * fail). If locking is successful, we can drop the queue
829 * lock, remove a token and then purge the object.
831 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
832 while (!queue_empty(&queue
->objq
[group
])) {
833 object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
);
834 if (object
== VM_OBJECT_NULL
) {
835 lck_mtx_unlock(&vm_purgeable_queue_lock
);
836 mutex_pause(collisions
++);
840 lck_mtx_unlock(&vm_purgeable_queue_lock
);
842 /* Lock the page queue here so we don't hold it
843 * over the whole, legthy operation */
844 if (object
->purgeable_when_ripe
) {
845 vm_page_lock_queues();
846 vm_purgeable_token_remove_first(queue
);
847 vm_page_unlock_queues();
850 (void) vm_object_purge(object
, 0);
851 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
852 /* no change in purgeable accounting */
854 vm_object_unlock(object
);
858 assert(queue
->debug_count_objects
>= 0);
861 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ALL
)),
862 purged_count
, /* # of purged objects */
867 lck_mtx_unlock(&vm_purgeable_queue_lock
);
872 vm_purgeable_object_purge_one_unlocked(
873 int force_purge_below_group
)
877 vm_page_lock_queues();
878 retval
= vm_purgeable_object_purge_one(force_purge_below_group
, 0);
879 vm_page_unlock_queues();
885 vm_purgeable_object_purge_one(
886 int force_purge_below_group
,
889 enum purgeable_q_type i
;
891 vm_object_t object
= 0;
892 purgeable_q_t queue
, queue2
;
893 boolean_t forced_purge
;
894 unsigned int resident_page_count
;
897 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)) | DBG_FUNC_START
,
898 force_purge_below_group
, flags
, 0, 0, 0);
900 /* Need the page queue lock since we'll be changing the token queue. */
901 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
902 lck_mtx_lock(&vm_purgeable_queue_lock
);
904 /* Cycle through all queues */
905 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
906 queue
= &purgeable_queues
[i
];
908 if (force_purge_below_group
== 0) {
910 * Are there any ripe tokens on this queue? If yes,
911 * we'll find an object to purge there
913 if (!queue
->token_q_head
) {
914 /* no token: look at next purgeable queue */
918 if (tokens
[queue
->token_q_head
].count
!= 0) {
919 /* no ripe token: next queue */
925 * Now look through all groups, starting from the lowest. If
926 * we find an object in that group, try to lock it (this can
927 * fail). If locking is successful, we can drop the queue
928 * lock, remove a token and then purge the object.
930 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
931 if (!queue
->token_q_head
||
932 tokens
[queue
->token_q_head
].count
!= 0) {
933 /* no tokens or no ripe tokens */
935 if (group
>= force_purge_below_group
) {
936 /* no more groups to force-purge */
941 * Try and purge an object in this group
942 * even though no tokens are ripe.
944 if (!queue_empty(&queue
->objq
[group
]) &&
945 (object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
))) {
946 lck_mtx_unlock(&vm_purgeable_queue_lock
);
947 if (object
->purgeable_when_ripe
) {
948 vm_purgeable_token_delete_first(queue
);
954 /* nothing to purge in this group: next group */
957 if (!queue_empty(&queue
->objq
[group
]) &&
958 (object
= vm_purgeable_object_find_and_lock(queue
, group
, TRUE
))) {
959 lck_mtx_unlock(&vm_purgeable_queue_lock
);
960 if (object
->purgeable_when_ripe
) {
961 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
963 forced_purge
= FALSE
;
966 if (i
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
967 /* This is the token migration case, and it works between
968 * FIFO and LIFO only */
969 queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
?
970 PURGEABLE_Q_TYPE_FIFO
:
971 PURGEABLE_Q_TYPE_LIFO
];
973 if (!queue_empty(&queue2
->objq
[group
]) &&
974 (object
= vm_purgeable_object_find_and_lock(queue2
, group
, TRUE
))) {
975 lck_mtx_unlock(&vm_purgeable_queue_lock
);
976 if (object
->purgeable_when_ripe
) {
977 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
979 forced_purge
= FALSE
;
983 assert(queue
->debug_count_objects
>= 0);
987 * because we have to do a try_lock on the objects which could fail,
988 * we could end up with no object to purge at this time, even though
989 * we have objects in a purgeable state
991 lck_mtx_unlock(&vm_purgeable_queue_lock
);
993 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)) | DBG_FUNC_END
,
994 0, 0, available_for_purge
, 0, 0);
1001 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
1002 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_owner, task_importance_estimate(object->vo_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
1003 resident_page_count
= object
->resident_page_count
;
1004 (void) vm_object_purge(object
, flags
);
1005 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1006 /* no change in purgeable accounting */
1007 vm_object_unlock(object
);
1008 vm_page_lock_queues();
1010 vm_pageout_vminfo
.vm_pageout_pages_purged
+= resident_page_count
;
1012 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)) | DBG_FUNC_END
,
1013 VM_KERNEL_UNSLIDE_OR_PERM(object
), /* purged object */
1014 resident_page_count
,
1015 available_for_purge
,
1022 /* Called with object lock held */
1024 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
1026 vm_object_lock_assert_exclusive(object
);
1027 lck_mtx_lock(&vm_purgeable_queue_lock
);
1029 assert(object
->objq
.next
!= NULL
);
1030 assert(object
->objq
.prev
!= NULL
);
1031 queue_remove(&purgeable_nonvolatile_queue
, object
,
1033 object
->objq
.next
= NULL
;
1034 object
->objq
.prev
= NULL
;
1035 assert(purgeable_nonvolatile_count
> 0);
1036 purgeable_nonvolatile_count
--;
1037 assert(purgeable_nonvolatile_count
>= 0);
1038 /* one less nonvolatile object for this object's owner */
1039 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object
), -1);
1041 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
)
1044 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) /* fifo and obsolete are
1046 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
1048 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
1049 /* one more volatile object for this object's owner */
1050 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object
), +1);
1052 object
->purgeable_queue_type
= queue
->type
;
1053 object
->purgeable_queue_group
= group
;
1056 assert(object
->vo_purgeable_volatilizer
== NULL
);
1057 object
->vo_purgeable_volatilizer
= current_task();
1058 OSBacktrace(&object
->purgeable_volatilizer_bt
[0],
1059 ARRAY_COUNT(object
->purgeable_volatilizer_bt
));
1063 queue
->debug_count_objects
++;
1064 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADD
)),
1066 tokens
[queue
->token_q_head
].count
,
1072 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1075 /* Look for object. If found, remove from purgeable queue. */
1076 /* Called with object lock held */
1078 vm_purgeable_object_remove(vm_object_t object
)
1081 enum purgeable_q_type type
;
1082 purgeable_q_t queue
;
1084 vm_object_lock_assert_exclusive(object
);
1086 type
= object
->purgeable_queue_type
;
1087 group
= object
->purgeable_queue_group
;
1089 if (type
== PURGEABLE_Q_TYPE_MAX
) {
1090 if (object
->objq
.prev
|| object
->objq
.next
)
1091 panic("unmarked object on purgeable q");
1094 } else if (!(object
->objq
.prev
&& object
->objq
.next
))
1095 panic("marked object not on purgeable q");
1097 lck_mtx_lock(&vm_purgeable_queue_lock
);
1099 queue
= &purgeable_queues
[type
];
1101 queue_remove(&queue
->objq
[group
], object
, vm_object_t
, objq
);
1102 object
->objq
.next
= NULL
;
1103 object
->objq
.prev
= NULL
;
1104 /* one less volatile object for this object's owner */
1105 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object
), -1);
1107 object
->vo_purgeable_volatilizer
= NULL
;
1109 /* keep queue of non-volatile objects */
1110 if (object
->alive
&& !object
->terminating
) {
1111 queue_enter(&purgeable_nonvolatile_queue
, object
,
1113 assert(purgeable_nonvolatile_count
>= 0);
1114 purgeable_nonvolatile_count
++;
1115 assert(purgeable_nonvolatile_count
> 0);
1116 /* one more nonvolatile object for this object's owner */
1117 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object
), +1);
1121 queue
->debug_count_objects
--;
1122 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVE
)),
1124 tokens
[queue
->token_q_head
].count
,
1130 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1132 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1133 object
->purgeable_queue_group
= 0;
1135 vm_object_lock_assert_exclusive(object
);
1137 return &purgeable_queues
[type
];
1141 vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
)
1143 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1145 stat
->count
= stat
->size
= 0;
1147 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1148 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1149 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1150 if (!target_task
|| VM_OBJECT_OWNER(object
) == target_task
) {
1152 stat
->size
+= (object
->resident_page_count
* PAGE_SIZE
);
1159 vm_purgeable_stats(vm_purgeable_info_t info
, task_t target_task
)
1161 purgeable_q_t queue
;
1164 lck_mtx_lock(&vm_purgeable_queue_lock
);
1166 /* Populate fifo_data */
1167 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1168 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1169 vm_purgeable_stats_helper(&(info
->fifo_data
[group
]), queue
, group
, target_task
);
1171 /* Populate lifo_data */
1172 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1173 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1174 vm_purgeable_stats_helper(&(info
->lifo_data
[group
]), queue
, group
, target_task
);
1176 /* Populate obsolete data */
1177 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1178 vm_purgeable_stats_helper(&(info
->obsolete_data
), queue
, 0, target_task
);
1180 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1184 #if DEVELOPMENT || DEBUG
1186 vm_purgeable_account_volatile_queue(
1187 purgeable_q_t queue
,
1190 pvm_account_info_t acnt_info
)
1193 uint64_t compressed_count
;
1195 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1196 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1197 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1198 if (VM_OBJECT_OWNER(object
) == task
) {
1199 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1200 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1201 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1202 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1209 * Walks the purgeable object queues and calculates the usage
1210 * associated with the objects for the given task.
1213 vm_purgeable_account(
1215 pvm_account_info_t acnt_info
)
1217 queue_head_t
*nonvolatile_q
;
1221 uint64_t compressed_count
;
1222 purgeable_q_t volatile_q
;
1225 if ((task
== NULL
) || (acnt_info
== NULL
)) {
1226 return KERN_INVALID_ARGUMENT
;
1229 acnt_info
->pvm_volatile_count
= 0;
1230 acnt_info
->pvm_volatile_compressed_count
= 0;
1231 acnt_info
->pvm_nonvolatile_count
= 0;
1232 acnt_info
->pvm_nonvolatile_compressed_count
= 0;
1234 lck_mtx_lock(&vm_purgeable_queue_lock
);
1236 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1237 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1238 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1239 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1240 if (VM_OBJECT_OWNER(object
) == task
) {
1241 state
= object
->purgable
;
1242 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1243 if (state
== VM_PURGABLE_EMPTY
) {
1244 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1245 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1247 acnt_info
->pvm_nonvolatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1248 acnt_info
->pvm_nonvolatile_compressed_count
+= compressed_count
;
1250 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1254 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1255 vm_purgeable_account_volatile_queue(volatile_q
, 0, task
, acnt_info
);
1257 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1258 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1259 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1262 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1263 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1264 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1266 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1268 acnt_info
->pvm_volatile_count
= (acnt_info
->pvm_volatile_count
* PAGE_SIZE
);
1269 acnt_info
->pvm_volatile_compressed_count
= (acnt_info
->pvm_volatile_compressed_count
* PAGE_SIZE
);
1270 acnt_info
->pvm_nonvolatile_count
= (acnt_info
->pvm_nonvolatile_count
* PAGE_SIZE
);
1271 acnt_info
->pvm_nonvolatile_compressed_count
= (acnt_info
->pvm_nonvolatile_compressed_count
* PAGE_SIZE
);
1273 return KERN_SUCCESS
;
1275 #endif /* DEVELOPMENT || DEBUG */
1278 vm_purgeable_disown(
1281 vm_object_t next_object
;
1290 * Scan the purgeable objects queues for objects owned by "task".
1291 * This has to be done "atomically" under the "vm_purgeable_queue"
1292 * lock, to ensure that no new purgeable object get associated
1293 * with this task or moved between queues while we're scanning.
1297 * Scan non-volatile queue for objects owned by "task".
1303 if (task
->task_purgeable_disowned
) {
1304 /* task has already disowned its purgeable memory */
1305 assert(task
->task_volatile_objects
== 0);
1306 assert(task
->task_nonvolatile_objects
== 0);
1310 lck_mtx_lock(&vm_purgeable_queue_lock
);
1311 task_objq_lock(task
);
1313 task
->task_purgeable_disowning
= TRUE
;
1315 for (object
= (vm_object_t
) queue_first(&task
->task_objq
);
1316 !queue_end(&task
->task_objq
, (queue_entry_t
) object
);
1317 object
= next_object
) {
1318 if (task
->task_nonvolatile_objects
== 0 &&
1319 task
->task_volatile_objects
== 0) {
1320 /* no more purgeable objects owned by "task" */
1324 next_object
= (vm_object_t
) queue_next(&object
->task_objq
);
1325 if (object
->purgable
== VM_PURGABLE_DENY
) {
1326 /* not a purgeable object: skip */
1331 assert(object
->vo_purgeable_volatilizer
== NULL
);
1333 assert(object
->vo_owner
== task
);
1334 if (!vm_object_lock_try(object
)) {
1335 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1336 task_objq_unlock(task
);
1337 mutex_pause(collisions
++);
1340 /* transfer ownership to the kernel */
1341 assert(VM_OBJECT_OWNER(object
) != kernel_task
);
1342 vm_object_ownership_change(
1344 object
->vo_ledger_tag
, /* unchanged */
1345 VM_OBJECT_OWNER_DISOWNED
, /* new owner */
1346 TRUE
); /* old_owner->task_objq locked */
1347 assert(object
->vo_owner
== VM_OBJECT_OWNER_DISOWNED
);
1348 vm_object_unlock(object
);
1351 if (__improbable(task
->task_volatile_objects
!= 0 ||
1352 task
->task_nonvolatile_objects
!= 0)) {
1353 panic("%s(%p): volatile=%d nonvolatile=%d q=%p q_first=%p q_last=%p",
1356 task
->task_volatile_objects
,
1357 task
->task_nonvolatile_objects
,
1359 queue_first(&task
->task_objq
),
1360 queue_last(&task
->task_objq
));
1363 /* there shouldn't be any purgeable objects owned by task now */
1364 assert(task
->task_volatile_objects
== 0);
1365 assert(task
->task_nonvolatile_objects
== 0);
1366 assert(task
->task_purgeable_disowning
);
1368 /* and we don't need to try and disown again */
1369 task
->task_purgeable_disowned
= TRUE
;
1371 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1372 task_objq_unlock(task
);
1377 vm_purgeable_queue_purge_task_owned(
1378 purgeable_q_t queue
,
1382 vm_object_t object
= VM_OBJECT_NULL
;
1384 uint64_t num_pages_purged
= 0;
1386 num_pages_purged
= 0;
1390 lck_mtx_lock(&vm_purgeable_queue_lock
);
1392 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1393 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1394 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1396 if (object
->vo_owner
!= task
) {
1400 /* found an object: try and grab it */
1401 if (!vm_object_lock_try(object
)) {
1402 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1403 mutex_pause(collisions
++);
1410 /* remove object from purgeable queue */
1411 queue_remove(&queue
->objq
[group
], object
,
1413 object
->objq
.next
= NULL
;
1414 object
->objq
.prev
= NULL
;
1415 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1416 object
->purgeable_queue_group
= 0;
1417 /* one less volatile object for this object's owner */
1418 assert(object
->vo_owner
== task
);
1419 vm_purgeable_volatile_owner_update(task
, -1);
1422 object
->vo_purgeable_volatilizer
= NULL
;
1424 queue_enter(&purgeable_nonvolatile_queue
, object
,
1426 assert(purgeable_nonvolatile_count
>= 0);
1427 purgeable_nonvolatile_count
++;
1428 assert(purgeable_nonvolatile_count
> 0);
1429 /* one more nonvolatile object for this object's owner */
1430 assert(object
->vo_owner
== task
);
1431 vm_purgeable_nonvolatile_owner_update(task
, +1);
1433 /* unlock purgeable queues */
1434 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1436 if (object
->purgeable_when_ripe
) {
1437 /* remove a token */
1438 vm_page_lock_queues();
1439 vm_purgeable_token_remove_first(queue
);
1440 vm_page_unlock_queues();
1443 /* purge the object */
1444 num_pages_purged
+= vm_object_purge(object
, 0);
1446 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1447 /* no change for purgeable accounting */
1448 vm_object_unlock(object
);
1450 /* we unlocked the purgeable queues, so start over */
1454 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1456 return num_pages_purged
;
1460 vm_purgeable_purge_task_owned(
1463 purgeable_q_t queue
= NULL
;
1465 uint64_t num_pages_purged
= 0;
1467 num_pages_purged
= 0;
1469 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1470 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1474 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1475 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1476 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1480 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1481 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1482 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1486 return num_pages_purged
;
1490 vm_purgeable_nonvolatile_enqueue(
1494 vm_object_lock_assert_exclusive(object
);
1496 assert(object
->purgable
== VM_PURGABLE_NONVOLATILE
);
1497 assert(object
->vo_owner
== NULL
);
1499 lck_mtx_lock(&vm_purgeable_queue_lock
);
1501 if (owner
!= NULL
&&
1502 owner
->task_purgeable_disowning
) {
1503 /* task is exiting and no longer tracking purgeable objects */
1504 owner
= VM_OBJECT_OWNER_DISOWNED
;
1506 if (owner
== NULL
) {
1507 owner
= kernel_task
;
1510 OSBacktrace(&object
->purgeable_owner_bt
[0],
1511 ARRAY_COUNT(object
->purgeable_owner_bt
));
1512 object
->vo_purgeable_volatilizer
= NULL
;
1515 vm_object_ownership_change(object
,
1516 object
->vo_ledger_tag
, /* tag unchanged */
1518 FALSE
); /* task_objq_locked */
1520 assert(object
->objq
.next
== NULL
);
1521 assert(object
->objq
.prev
== NULL
);
1523 queue_enter(&purgeable_nonvolatile_queue
, object
,
1525 assert(purgeable_nonvolatile_count
>= 0);
1526 purgeable_nonvolatile_count
++;
1527 assert(purgeable_nonvolatile_count
> 0);
1528 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1530 vm_object_lock_assert_exclusive(object
);
1534 vm_purgeable_nonvolatile_dequeue(
1539 vm_object_lock_assert_exclusive(object
);
1541 owner
= VM_OBJECT_OWNER(object
);
1543 assert(object
->vo_purgeable_volatilizer
== NULL
);
1545 if (owner
!= NULL
) {
1547 * Update the owner's ledger to stop accounting
1550 /* transfer ownership to the kernel */
1551 assert(VM_OBJECT_OWNER(object
) != kernel_task
);
1552 vm_object_ownership_change(
1554 object
->vo_ledger_tag
, /* unchanged */
1555 VM_OBJECT_OWNER_DISOWNED
, /* new owner */
1556 FALSE
); /* old_owner->task_objq locked */
1557 assert(object
->vo_owner
== VM_OBJECT_OWNER_DISOWNED
);
1560 lck_mtx_lock(&vm_purgeable_queue_lock
);
1561 assert(object
->objq
.next
!= NULL
);
1562 assert(object
->objq
.prev
!= NULL
);
1563 queue_remove(&purgeable_nonvolatile_queue
, object
,
1565 object
->objq
.next
= NULL
;
1566 object
->objq
.prev
= NULL
;
1567 assert(purgeable_nonvolatile_count
> 0);
1568 purgeable_nonvolatile_count
--;
1569 assert(purgeable_nonvolatile_count
>= 0);
1570 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1572 vm_object_lock_assert_exclusive(object
);
1576 vm_purgeable_accounting(
1578 vm_purgable_t old_state
)
1581 int resident_page_count
;
1582 int wired_page_count
;
1583 int compressed_page_count
;
1584 int ledger_idx_volatile
;
1585 int ledger_idx_nonvolatile
;
1586 int ledger_idx_volatile_compressed
;
1587 int ledger_idx_nonvolatile_compressed
;
1588 boolean_t do_footprint
;
1590 vm_object_lock_assert_exclusive(object
);
1591 assert(object
->purgable
!= VM_PURGABLE_DENY
);
1593 owner
= VM_OBJECT_OWNER(object
);
1594 if (owner
== NULL
||
1595 object
->purgable
== VM_PURGABLE_DENY
)
1598 vm_object_ledger_tag_ledgers(object
,
1599 &ledger_idx_volatile
,
1600 &ledger_idx_nonvolatile
,
1601 &ledger_idx_volatile_compressed
,
1602 &ledger_idx_nonvolatile_compressed
,
1605 resident_page_count
= object
->resident_page_count
;
1606 wired_page_count
= object
->wired_page_count
;
1607 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
&&
1608 object
->pager
!= NULL
) {
1609 compressed_page_count
=
1610 vm_compressor_pager_get_count(object
->pager
);
1612 compressed_page_count
= 0;
1615 if (old_state
== VM_PURGABLE_VOLATILE
||
1616 old_state
== VM_PURGABLE_EMPTY
) {
1617 /* less volatile bytes in ledger */
1618 ledger_debit(owner
->ledger
,
1619 ledger_idx_volatile
,
1620 ptoa_64(resident_page_count
- wired_page_count
));
1621 /* less compressed volatile bytes in ledger */
1622 ledger_debit(owner
->ledger
,
1623 ledger_idx_volatile_compressed
,
1624 ptoa_64(compressed_page_count
));
1626 /* more non-volatile bytes in ledger */
1627 ledger_credit(owner
->ledger
,
1628 ledger_idx_nonvolatile
,
1629 ptoa_64(resident_page_count
- wired_page_count
));
1630 /* more compressed non-volatile bytes in ledger */
1631 ledger_credit(owner
->ledger
,
1632 ledger_idx_nonvolatile_compressed
,
1633 ptoa_64(compressed_page_count
));
1635 /* more footprint */
1636 ledger_credit(owner
->ledger
,
1637 task_ledgers
.phys_footprint
,
1638 ptoa_64(resident_page_count
1639 + compressed_page_count
1640 - wired_page_count
));
1643 } else if (old_state
== VM_PURGABLE_NONVOLATILE
) {
1645 /* less non-volatile bytes in ledger */
1646 ledger_debit(owner
->ledger
,
1647 ledger_idx_nonvolatile
,
1648 ptoa_64(resident_page_count
- wired_page_count
));
1649 /* less compressed non-volatile bytes in ledger */
1650 ledger_debit(owner
->ledger
,
1651 ledger_idx_nonvolatile_compressed
,
1652 ptoa_64(compressed_page_count
));
1654 /* less footprint */
1655 ledger_debit(owner
->ledger
,
1656 task_ledgers
.phys_footprint
,
1657 ptoa_64(resident_page_count
1658 + compressed_page_count
1659 - wired_page_count
));
1662 /* more volatile bytes in ledger */
1663 ledger_credit(owner
->ledger
,
1664 ledger_idx_volatile
,
1665 ptoa_64(resident_page_count
- wired_page_count
));
1666 /* more compressed volatile bytes in ledger */
1667 ledger_credit(owner
->ledger
,
1668 ledger_idx_volatile_compressed
,
1669 ptoa_64(compressed_page_count
));
1671 panic("vm_purgeable_accounting(%p): "
1672 "unexpected old_state=%d\n",
1676 vm_object_lock_assert_exclusive(object
);
1680 vm_purgeable_nonvolatile_owner_update(
1684 if (owner
== NULL
|| delta
== 0) {
1689 assert(owner
->task_nonvolatile_objects
>= 0);
1690 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1691 assert(owner
->task_nonvolatile_objects
> 0);
1693 assert(owner
->task_nonvolatile_objects
> delta
);
1694 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1695 assert(owner
->task_nonvolatile_objects
>= 0);
1700 vm_purgeable_volatile_owner_update(
1704 if (owner
== NULL
|| delta
== 0) {
1709 assert(owner
->task_volatile_objects
>= 0);
1710 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1711 assert(owner
->task_volatile_objects
> 0);
1713 assert(owner
->task_volatile_objects
> delta
);
1714 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1715 assert(owner
->task_volatile_objects
>= 0);
1720 vm_object_owner_compressed_update(
1725 int ledger_idx_volatile
;
1726 int ledger_idx_nonvolatile
;
1727 int ledger_idx_volatile_compressed
;
1728 int ledger_idx_nonvolatile_compressed
;
1729 boolean_t do_footprint
;
1731 vm_object_lock_assert_exclusive(object
);
1733 owner
= VM_OBJECT_OWNER(object
);
1736 !object
->internal
||
1737 (object
->purgable
== VM_PURGABLE_DENY
&&
1738 ! object
->vo_ledger_tag
) ||
1740 /* not an owned purgeable (or tagged) VM object: nothing to update */
1744 vm_object_ledger_tag_ledgers(object
,
1745 &ledger_idx_volatile
,
1746 &ledger_idx_nonvolatile
,
1747 &ledger_idx_volatile_compressed
,
1748 &ledger_idx_nonvolatile_compressed
,
1750 switch (object
->purgable
) {
1751 case VM_PURGABLE_DENY
:
1752 /* not purgeable: must be ledger-tagged */
1753 assert(object
->vo_ledger_tag
!= VM_OBJECT_LEDGER_TAG_NONE
);
1755 case VM_PURGABLE_NONVOLATILE
:
1757 ledger_credit(owner
->ledger
,
1758 ledger_idx_nonvolatile_compressed
,
1761 ledger_credit(owner
->ledger
,
1762 task_ledgers
.phys_footprint
,
1766 ledger_debit(owner
->ledger
,
1767 ledger_idx_nonvolatile_compressed
,
1770 ledger_debit(owner
->ledger
,
1771 task_ledgers
.phys_footprint
,
1776 case VM_PURGABLE_VOLATILE
:
1777 case VM_PURGABLE_EMPTY
:
1779 ledger_credit(owner
->ledger
,
1780 ledger_idx_volatile_compressed
,
1783 ledger_debit(owner
->ledger
,
1784 ledger_idx_volatile_compressed
,
1789 panic("vm_purgeable_compressed_update(): "
1790 "unexpected purgable %d for object %p\n",
1791 object
->purgable
, object
);