2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
26 #include <kern/policy_internal.h>
28 #include <libkern/OSDebug.h>
30 #include <mach/mach_types.h>
32 #include <machine/limits.h>
34 #include <vm/vm_compressor_pager.h>
35 #include <vm/vm_kern.h> /* kmem_alloc */
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
38 #include <vm/vm_protos.h>
39 #include <vm/vm_purgeable_internal.h>
41 #include <sys/kdebug.h>
44 * LOCK ORDERING for task-owned purgeable objects
46 * Whenever we need to hold multiple locks while adding to, removing from,
47 * or scanning a task's task_objq list of VM objects it owns, locks should
48 * be taken in this order:
50 * VM object ==> vm_purgeable_queue_lock ==> owner_task->task_objq_lock
52 * If one needs to acquire the VM object lock after any of the other 2 locks,
53 * one needs to use vm_object_lock_try() and, if that fails, release the
54 * other locks and retake them all in the correct order.
57 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
66 token_idx_t token_q_max_cnt
= 0;
67 vm_size_t token_q_cur_size
= 0;
69 token_idx_t token_free_idx
= 0; /* head of free queue */
70 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
71 int32_t token_new_pagecount
= 0; /* count of pages that will
72 * be added onto token queue */
74 int available_for_purge
= 0; /* increase when ripe token
75 * added, decrease when ripe
77 * protected by page_queue_lock
80 static int token_q_allocating
= 0; /* flag for singlethreading
83 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
84 queue_head_t purgeable_nonvolatile_queue
;
85 int purgeable_nonvolatile_count
;
87 decl_lck_mtx_data(, vm_purgeable_queue_lock
);
89 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
91 static void vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
);
96 vm_purgeable_token_check_queue(purgeable_q_t queue
)
98 int token_cnt
= 0, page_cnt
= 0;
99 token_idx_t token
= queue
->token_q_head
;
100 token_idx_t unripe
= 0;
101 int our_inactive_count
;
105 static int lightweight_check
= 0;
108 * Due to performance impact, perform this check less frequently on DEVELOPMENT kernels.
109 * Checking the queue scales linearly with its length, so we compensate by
110 * by performing this check less frequently as the queue grows.
112 if (lightweight_check
++ < (100 + queue
->debug_count_tokens
/ 512)) {
116 lightweight_check
= 0;
120 if (tokens
[token
].count
!= 0) {
121 assert(queue
->token_q_unripe
);
123 assert(token
== queue
->token_q_unripe
);
126 page_cnt
+= tokens
[token
].count
;
128 if (tokens
[token
].next
== 0) {
129 assert(queue
->token_q_tail
== token
);
133 token
= tokens
[token
].next
;
137 assert(queue
->token_q_unripe
== unripe
);
139 assert(token_cnt
== queue
->debug_count_tokens
);
141 /* obsolete queue doesn't maintain token counts */
142 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
143 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
144 assert(our_inactive_count
>= 0);
145 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
- vm_page_cleaned_count
);
151 * Add a token. Allocate token queue memory if necessary.
152 * Call with page queue locked.
155 vm_purgeable_token_add(purgeable_q_t queue
)
157 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
161 enum purgeable_q_type i
;
163 find_available_token
:
165 if (token_free_idx
) { /* unused tokens available */
166 token
= token_free_idx
;
167 token_free_idx
= tokens
[token_free_idx
].next
;
168 } else if (token_init_idx
< token_q_max_cnt
) { /* lazy token array init */
169 token
= token_init_idx
;
171 } else { /* allocate more memory */
172 /* Wait if another thread is inside the memory alloc section */
173 while (token_q_allocating
) {
174 wait_result_t res
= lck_mtx_sleep(&vm_page_queue_lock
,
176 (event_t
)&token_q_allocating
,
178 if (res
!= THREAD_AWAKENED
) {
184 /* Check whether memory is still maxed out */
185 if (token_init_idx
< token_q_max_cnt
) {
186 goto find_available_token
;
189 /* Still no memory. Allocate some. */
190 token_q_allocating
= 1;
192 /* Drop page queue lock so we can allocate */
193 vm_page_unlock_queues();
195 struct token
*new_loc
;
196 vm_size_t alloc_size
= token_q_cur_size
+ PAGE_SIZE
;
197 kern_return_t result
;
199 if (alloc_size
/ sizeof(struct token
) > TOKEN_COUNT_MAX
) {
200 result
= KERN_RESOURCE_SHORTAGE
;
202 if (token_q_cur_size
) {
203 result
= kmem_realloc(kernel_map
,
204 (vm_offset_t
) tokens
,
206 (vm_offset_t
*) &new_loc
,
207 alloc_size
, VM_KERN_MEMORY_OSFMK
);
209 result
= kmem_alloc(kernel_map
,
210 (vm_offset_t
*) &new_loc
,
211 alloc_size
, VM_KERN_MEMORY_OSFMK
);
215 vm_page_lock_queues();
218 /* Unblock waiting threads */
219 token_q_allocating
= 0;
220 thread_wakeup((event_t
)&token_q_allocating
);
224 /* If we get here, we allocated new memory. Update pointers and
225 * dealloc old range */
226 struct token
*old_tokens
= tokens
;
228 vm_size_t old_token_q_cur_size
= token_q_cur_size
;
229 token_q_cur_size
= alloc_size
;
230 token_q_max_cnt
= (token_idx_t
) (token_q_cur_size
/
231 sizeof(struct token
));
232 assert(token_init_idx
< token_q_max_cnt
); /* We must have a free token now */
234 if (old_token_q_cur_size
) { /* clean up old mapping */
235 vm_page_unlock_queues();
236 /* kmem_realloc leaves the old region mapped. Get rid of it. */
237 kmem_free(kernel_map
, (vm_offset_t
)old_tokens
, old_token_q_cur_size
);
238 vm_page_lock_queues();
241 /* Unblock waiting threads */
242 token_q_allocating
= 0;
243 thread_wakeup((event_t
)&token_q_allocating
);
245 goto find_available_token
;
251 * the new pagecount we got need to be applied to all queues except
254 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
255 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
257 assert(pages
<= TOKEN_COUNT_MAX
);
258 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
259 assert(purgeable_queues
[i
].new_pages
== pages
);
261 token_new_pagecount
= 0;
263 /* set token counter value */
264 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
265 tokens
[token
].count
= queue
->new_pages
;
267 tokens
[token
].count
= 0; /* all obsolete items are
268 * ripe immediately */
270 queue
->new_pages
= 0;
272 /* put token on token counter list */
273 tokens
[token
].next
= 0;
274 if (queue
->token_q_tail
== 0) {
275 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
276 queue
->token_q_head
= token
;
277 tokens
[token
].prev
= 0;
279 tokens
[queue
->token_q_tail
].next
= token
;
280 tokens
[token
].prev
= queue
->token_q_tail
;
282 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
283 * count == 0) in queue */
284 if (tokens
[token
].count
> 0) {
285 queue
->token_q_unripe
= token
; /* first unripe token */
287 available_for_purge
++; /* added a ripe token?
288 * increase available count */
291 queue
->token_q_tail
= token
;
294 queue
->debug_count_tokens
++;
295 /* Check both queues, since we modified the new_pages count on each */
296 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
297 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
299 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
301 tokens
[token
].count
, /* num pages on token
303 queue
->debug_count_tokens
,
312 * Remove first token from queue and return its index. Add its count to the
313 * count of the next token.
314 * Call with page queue locked.
317 vm_purgeable_token_remove_first(purgeable_q_t queue
)
319 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
322 token
= queue
->token_q_head
;
327 assert(queue
->token_q_tail
);
328 if (queue
->token_q_head
== queue
->token_q_unripe
) {
329 /* no ripe tokens... must move unripe pointer */
330 queue
->token_q_unripe
= tokens
[token
].next
;
332 /* we're removing a ripe token. decrease count */
333 available_for_purge
--;
334 assert(available_for_purge
>= 0);
337 if (queue
->token_q_tail
== queue
->token_q_head
) {
338 assert(tokens
[token
].next
== 0);
341 queue
->token_q_head
= tokens
[token
].next
;
342 if (queue
->token_q_head
) {
343 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
344 tokens
[queue
->token_q_head
].prev
= 0;
346 /* currently no other tokens in the queue */
348 * the page count must be added to the next newly
351 queue
->new_pages
+= tokens
[token
].count
;
352 /* if head is zero, tail is too */
353 queue
->token_q_tail
= 0;
357 queue
->debug_count_tokens
--;
358 vm_purgeable_token_check_queue(queue
);
360 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
362 tokens
[queue
->token_q_head
].count
, /* num pages on new
364 token_new_pagecount
, /* num pages waiting for
374 vm_purgeable_token_remove_last(purgeable_q_t queue
)
376 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
379 token
= queue
->token_q_tail
;
384 assert(queue
->token_q_head
);
386 if (queue
->token_q_tail
== queue
->token_q_head
) {
387 assert(tokens
[token
].next
== 0);
390 if (queue
->token_q_unripe
== 0) {
391 /* we're removing a ripe token. decrease count */
392 available_for_purge
--;
393 assert(available_for_purge
>= 0);
394 } else if (queue
->token_q_unripe
== token
) {
395 /* we're removing the only unripe token */
396 queue
->token_q_unripe
= 0;
399 if (token
== queue
->token_q_head
) {
400 /* token is the last one in the queue */
401 queue
->token_q_head
= 0;
402 queue
->token_q_tail
= 0;
404 token_idx_t new_tail
;
406 new_tail
= tokens
[token
].prev
;
409 assert(tokens
[new_tail
].next
== token
);
411 queue
->token_q_tail
= new_tail
;
412 tokens
[new_tail
].next
= 0;
415 queue
->new_pages
+= tokens
[token
].count
;
418 queue
->debug_count_tokens
--;
419 vm_purgeable_token_check_queue(queue
);
421 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
423 tokens
[queue
->token_q_head
].count
, /* num pages on new
425 token_new_pagecount
, /* num pages waiting for
435 * Delete first token from queue. Return token to token queue.
436 * Call with page queue locked.
439 vm_purgeable_token_delete_first(purgeable_q_t queue
)
441 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
442 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
445 /* stick removed token on free queue */
446 tokens
[token
].next
= token_free_idx
;
447 tokens
[token
].prev
= 0;
448 token_free_idx
= token
;
453 vm_purgeable_token_delete_last(purgeable_q_t queue
)
455 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
456 token_idx_t token
= vm_purgeable_token_remove_last(queue
);
459 /* stick removed token on free queue */
460 tokens
[token
].next
= token_free_idx
;
461 tokens
[token
].prev
= 0;
462 token_free_idx
= token
;
467 /* Call with page queue locked. */
469 vm_purgeable_q_advance_all()
471 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
473 /* check queue counters - if they get really large, scale them back.
474 * They tend to get that large when there is no purgeable queue action */
476 if (token_new_pagecount
> (TOKEN_NEW_PAGECOUNT_MAX
>> 1)) { /* a system idling years might get there */
477 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
478 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
480 assert(pages
<= TOKEN_COUNT_MAX
);
481 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
482 assert(purgeable_queues
[i
].new_pages
== pages
);
484 token_new_pagecount
= 0;
488 * Decrement token counters. A token counter can be zero, this means the
489 * object is ripe to be purged. It is not purged immediately, because that
490 * could cause several objects to be purged even if purging one would satisfy
491 * the memory needs. Instead, the pageout thread purges one after the other
492 * by calling vm_purgeable_object_purge_one and then rechecking the memory
495 * No need to advance obsolete queue - all items are ripe there,
498 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
499 purgeable_q_t queue
= &purgeable_queues
[i
];
500 uint32_t num_pages
= 1;
502 /* Iterate over tokens as long as there are unripe tokens. */
503 while (queue
->token_q_unripe
) {
504 if (tokens
[queue
->token_q_unripe
].count
&& num_pages
) {
505 tokens
[queue
->token_q_unripe
].count
-= 1;
509 if (tokens
[queue
->token_q_unripe
].count
== 0) {
510 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
511 available_for_purge
++;
512 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_RIPEN
)),
514 tokens
[queue
->token_q_head
].count
, /* num pages on new
519 continue; /* One token ripened. Make sure to
522 if (num_pages
== 0) {
523 break; /* Current token not ripe and no more pages.
529 * if there are no unripe tokens in the queue, decrement the
530 * new_pages counter instead new_pages can be negative, but must be
531 * canceled out by token_new_pagecount -- since inactive queue as a
532 * whole always contains a nonnegative number of pages
534 if (!queue
->token_q_unripe
) {
535 queue
->new_pages
-= num_pages
;
536 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
539 vm_purgeable_token_check_queue(queue
);
545 * grab any ripe object and purge it obsolete queue first. then, go through
546 * each volatile group. Select a queue with a ripe token.
547 * Start with first group (0)
548 * 1. Look at queue. Is there an object?
549 * Yes - purge it. Remove token.
550 * No - check other queue. Is there an object?
551 * No - increment group, then go to (1)
552 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
553 * token from other queue and migrate unripe token from this
554 * queue to other queue.
555 * Call with page queue locked.
558 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
560 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
561 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
562 /* return token to free list. advance token list. */
563 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
564 tokens
[queue
->token_q_head
].next
= token_free_idx
;
565 tokens
[queue
->token_q_head
].prev
= 0;
566 token_free_idx
= queue
->token_q_head
;
567 queue
->token_q_head
= new_head
;
568 tokens
[new_head
].prev
= 0;
570 queue
->token_q_tail
= 0;
574 queue
->debug_count_tokens
--;
575 vm_purgeable_token_check_queue(queue
);
578 available_for_purge
--;
579 assert(available_for_purge
>= 0);
583 * Delete a ripe token from the given queue. If there are no ripe tokens on
584 * that queue, delete a ripe token from queue2, and migrate an unripe token
585 * from queue to queue2
586 * Call with page queue locked.
589 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
591 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
592 assert(queue
->token_q_head
);
594 if (tokens
[queue
->token_q_head
].count
== 0) {
595 /* This queue has a ripe token. Remove. */
596 vm_purgeable_token_remove_ripe(queue
);
600 * queue2 must have a ripe token. Remove, and migrate one
601 * from queue to queue2.
603 vm_purgeable_token_remove_ripe(queue2
);
604 /* migrate unripe token */
608 /* remove token from queue1 */
609 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
610 * tokens, remember? */
611 token
= vm_purgeable_token_remove_first(queue
);
614 count
= tokens
[token
].count
;
616 /* migrate to queue2 */
617 /* go to migration target loc */
619 token_idx_t token_to_insert_before
= queue2
->token_q_head
, token_to_insert_after
;
621 while (token_to_insert_before
!= 0 && count
> tokens
[token_to_insert_before
].count
) {
622 count
-= tokens
[token_to_insert_before
].count
;
623 token_to_insert_before
= tokens
[token_to_insert_before
].next
;
626 /* token_to_insert_before is now set correctly */
628 /* should the inserted token become the first unripe token? */
629 if ((token_to_insert_before
== queue2
->token_q_unripe
) || (queue2
->token_q_unripe
== 0)) {
630 queue2
->token_q_unripe
= token
; /* if so, must update unripe pointer */
634 * if inserting at end, reduce new_pages by that value;
635 * otherwise, reduce counter of next token
638 tokens
[token
].count
= count
;
640 if (token_to_insert_before
!= 0) {
641 token_to_insert_after
= tokens
[token_to_insert_before
].prev
;
643 tokens
[token
].next
= token_to_insert_before
;
644 tokens
[token_to_insert_before
].prev
= token
;
646 assert(tokens
[token_to_insert_before
].count
>= count
);
647 tokens
[token_to_insert_before
].count
-= count
;
649 /* if we ran off the end of the list, the token to insert after is the tail */
650 token_to_insert_after
= queue2
->token_q_tail
;
652 tokens
[token
].next
= 0;
653 queue2
->token_q_tail
= token
;
655 assert(queue2
->new_pages
>= (int32_t) count
);
656 queue2
->new_pages
-= count
;
659 if (token_to_insert_after
!= 0) {
660 tokens
[token
].prev
= token_to_insert_after
;
661 tokens
[token_to_insert_after
].next
= token
;
663 /* is this case possible? */
664 tokens
[token
].prev
= 0;
665 queue2
->token_q_head
= token
;
669 queue2
->debug_count_tokens
++;
670 vm_purgeable_token_check_queue(queue2
);
675 /* Find an object that can be locked. Returns locked object. */
676 /* Call with purgeable queue locked. */
678 vm_purgeable_object_find_and_lock(
683 vm_object_t object
, best_object
;
684 int object_task_importance
;
685 int best_object_task_importance
;
686 int best_object_skipped
;
687 int num_objects_skipped
;
688 int try_lock_failed
= 0;
689 int try_lock_succeeded
= 0;
692 best_object
= VM_OBJECT_NULL
;
693 best_object_task_importance
= INT_MAX
;
695 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
697 * Usually we would pick the first element from a queue. However, we
698 * might not be able to get a lock on it, in which case we try the
699 * remaining elements in order.
702 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_START
),
705 VM_KERNEL_UNSLIDE_OR_PERM(queue
),
709 num_objects_skipped
= 0;
710 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
711 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
712 object
= (vm_object_t
) queue_next(&object
->objq
),
713 num_objects_skipped
++) {
715 * To prevent us looping for an excessively long time, choose
716 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
717 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
718 * we keep going until we find the first eligible object.
720 if ((num_objects_skipped
>= PURGEABLE_LOOP_MAX
) && (best_object
!= NULL
)) {
725 !object
->purgeable_when_ripe
) {
726 /* we want an object that has a ripe token */
730 object_task_importance
= 0;
733 * We don't want to use VM_OBJECT_OWNER() here: we want to
734 * distinguish kernel-owned and disowned objects.
735 * Disowned objects have no owner and will have no importance...
737 owner
= object
->vo_owner
;
738 if (owner
!= NULL
&& owner
!= VM_OBJECT_OWNER_DISOWNED
) {
739 #if !XNU_TARGET_OS_OSX
741 object_task_importance
= proc_get_memstat_priority((struct proc
*)get_bsdtask_info(owner
), TRUE
);
742 #endif /* CONFIG_JETSAM */
743 #else /* !XNU_TARGET_OS_OSX */
744 object_task_importance
= task_importance_estimate(owner
);
745 #endif /* !XNU_TARGET_OS_OSX */
748 if (object_task_importance
< best_object_task_importance
) {
749 if (vm_object_lock_try(object
)) {
750 try_lock_succeeded
++;
751 if (best_object
!= VM_OBJECT_NULL
) {
752 /* forget about previous best object */
753 vm_object_unlock(best_object
);
755 best_object
= object
;
756 best_object_task_importance
= object_task_importance
;
757 best_object_skipped
= num_objects_skipped
;
758 if (best_object_task_importance
== 0) {
759 /* can't get any better: stop looking */
768 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_END
),
769 num_objects_skipped
, /* considered objects */
772 VM_KERNEL_UNSLIDE_OR_PERM(best_object
),
773 ((best_object
== NULL
) ? 0 : best_object
->resident_page_count
));
775 object
= best_object
;
777 if (object
== VM_OBJECT_NULL
) {
778 return VM_OBJECT_NULL
;
781 /* Locked. Great. We'll take it. Remove and return. */
782 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
784 vm_object_lock_assert_exclusive(object
);
786 queue_remove(&queue
->objq
[group
], object
,
788 object
->objq
.next
= NULL
;
789 object
->objq
.prev
= NULL
;
790 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
791 object
->purgeable_queue_group
= 0;
792 /* one less volatile object for this object's owner */
793 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object
), -1);
796 object
->vo_purgeable_volatilizer
= NULL
;
799 /* keep queue of non-volatile objects */
800 queue_enter(&purgeable_nonvolatile_queue
, object
,
802 assert(purgeable_nonvolatile_count
>= 0);
803 purgeable_nonvolatile_count
++;
804 assert(purgeable_nonvolatile_count
> 0);
805 /* one more nonvolatile object for this object's owner */
806 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object
), +1);
809 queue
->debug_count_objects
--;
814 /* Can be called without holding locks */
816 vm_purgeable_object_purge_all(void)
818 enum purgeable_q_type i
;
821 unsigned int purged_count
;
828 lck_mtx_lock(&vm_purgeable_queue_lock
);
829 /* Cycle through all queues */
830 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
833 queue
= &purgeable_queues
[i
];
836 * Look through all groups, starting from the lowest. If
837 * we find an object in that group, try to lock it (this can
838 * fail). If locking is successful, we can drop the queue
839 * lock, remove a token and then purge the object.
841 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
842 while (!queue_empty(&queue
->objq
[group
])) {
843 object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
);
844 if (object
== VM_OBJECT_NULL
) {
845 lck_mtx_unlock(&vm_purgeable_queue_lock
);
846 mutex_pause(collisions
++);
850 lck_mtx_unlock(&vm_purgeable_queue_lock
);
852 /* Lock the page queue here so we don't hold it
853 * over the whole, legthy operation */
854 if (object
->purgeable_when_ripe
) {
855 vm_page_lock_queues();
856 vm_purgeable_token_remove_first(queue
);
857 vm_page_unlock_queues();
860 (void) vm_object_purge(object
, 0);
861 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
862 /* no change in purgeable accounting */
864 vm_object_unlock(object
);
868 assert(queue
->debug_count_objects
>= 0);
871 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ALL
)),
872 purged_count
, /* # of purged objects */
877 lck_mtx_unlock(&vm_purgeable_queue_lock
);
882 vm_purgeable_object_purge_one_unlocked(
883 int force_purge_below_group
)
887 vm_page_lock_queues();
888 retval
= vm_purgeable_object_purge_one(force_purge_below_group
, 0);
889 vm_page_unlock_queues();
895 vm_purgeable_object_purge_one(
896 int force_purge_below_group
,
899 enum purgeable_q_type i
;
901 vm_object_t object
= 0;
902 purgeable_q_t queue
, queue2
;
903 boolean_t forced_purge
;
904 unsigned int resident_page_count
;
907 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)) | DBG_FUNC_START
,
908 force_purge_below_group
, flags
, 0, 0, 0);
910 /* Need the page queue lock since we'll be changing the token queue. */
911 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
912 lck_mtx_lock(&vm_purgeable_queue_lock
);
914 /* Cycle through all queues */
915 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
916 queue
= &purgeable_queues
[i
];
918 if (force_purge_below_group
== 0) {
920 * Are there any ripe tokens on this queue? If yes,
921 * we'll find an object to purge there
923 if (!queue
->token_q_head
) {
924 /* no token: look at next purgeable queue */
928 if (tokens
[queue
->token_q_head
].count
!= 0) {
929 /* no ripe token: next queue */
935 * Now look through all groups, starting from the lowest. If
936 * we find an object in that group, try to lock it (this can
937 * fail). If locking is successful, we can drop the queue
938 * lock, remove a token and then purge the object.
940 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
941 if (!queue
->token_q_head
||
942 tokens
[queue
->token_q_head
].count
!= 0) {
943 /* no tokens or no ripe tokens */
945 if (group
>= force_purge_below_group
) {
946 /* no more groups to force-purge */
951 * Try and purge an object in this group
952 * even though no tokens are ripe.
954 if (!queue_empty(&queue
->objq
[group
]) &&
955 (object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
))) {
956 lck_mtx_unlock(&vm_purgeable_queue_lock
);
957 if (object
->purgeable_when_ripe
) {
958 vm_purgeable_token_delete_first(queue
);
964 /* nothing to purge in this group: next group */
967 if (!queue_empty(&queue
->objq
[group
]) &&
968 (object
= vm_purgeable_object_find_and_lock(queue
, group
, TRUE
))) {
969 lck_mtx_unlock(&vm_purgeable_queue_lock
);
970 if (object
->purgeable_when_ripe
) {
971 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
973 forced_purge
= FALSE
;
976 if (i
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
977 /* This is the token migration case, and it works between
978 * FIFO and LIFO only */
979 queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
?
980 PURGEABLE_Q_TYPE_FIFO
:
981 PURGEABLE_Q_TYPE_LIFO
];
983 if (!queue_empty(&queue2
->objq
[group
]) &&
984 (object
= vm_purgeable_object_find_and_lock(queue2
, group
, TRUE
))) {
985 lck_mtx_unlock(&vm_purgeable_queue_lock
);
986 if (object
->purgeable_when_ripe
) {
987 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
989 forced_purge
= FALSE
;
993 assert(queue
->debug_count_objects
>= 0);
997 * because we have to do a try_lock on the objects which could fail,
998 * we could end up with no object to purge at this time, even though
999 * we have objects in a purgeable state
1001 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1003 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)) | DBG_FUNC_END
,
1004 0, 0, available_for_purge
, 0, 0);
1011 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
1012 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_owner, task_importance_estimate(object->vo_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
1013 resident_page_count
= object
->resident_page_count
;
1014 (void) vm_object_purge(object
, flags
);
1015 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1016 /* no change in purgeable accounting */
1017 vm_object_unlock(object
);
1018 vm_page_lock_queues();
1020 vm_pageout_vminfo
.vm_pageout_pages_purged
+= resident_page_count
;
1022 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)) | DBG_FUNC_END
,
1023 VM_KERNEL_UNSLIDE_OR_PERM(object
), /* purged object */
1024 resident_page_count
,
1025 available_for_purge
,
1032 /* Called with object lock held */
1034 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
1036 vm_object_lock_assert_exclusive(object
);
1037 lck_mtx_lock(&vm_purgeable_queue_lock
);
1039 assert(object
->objq
.next
!= NULL
);
1040 assert(object
->objq
.prev
!= NULL
);
1041 queue_remove(&purgeable_nonvolatile_queue
, object
,
1043 object
->objq
.next
= NULL
;
1044 object
->objq
.prev
= NULL
;
1045 assert(purgeable_nonvolatile_count
> 0);
1046 purgeable_nonvolatile_count
--;
1047 assert(purgeable_nonvolatile_count
>= 0);
1048 /* one less nonvolatile object for this object's owner */
1049 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object
), -1);
1051 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
) {
1055 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) { /* fifo and obsolete are
1057 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
1059 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
1061 /* one more volatile object for this object's owner */
1062 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object
), +1);
1064 object
->purgeable_queue_type
= queue
->type
;
1065 object
->purgeable_queue_group
= group
;
1068 assert(object
->vo_purgeable_volatilizer
== NULL
);
1069 object
->vo_purgeable_volatilizer
= current_task();
1070 OSBacktrace(&object
->purgeable_volatilizer_bt
[0],
1071 ARRAY_COUNT(object
->purgeable_volatilizer_bt
));
1075 queue
->debug_count_objects
++;
1076 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADD
)),
1078 tokens
[queue
->token_q_head
].count
,
1084 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1087 /* Look for object. If found, remove from purgeable queue. */
1088 /* Called with object lock held */
1090 vm_purgeable_object_remove(vm_object_t object
)
1093 enum purgeable_q_type type
;
1094 purgeable_q_t queue
;
1096 vm_object_lock_assert_exclusive(object
);
1098 type
= object
->purgeable_queue_type
;
1099 group
= object
->purgeable_queue_group
;
1101 if (type
== PURGEABLE_Q_TYPE_MAX
) {
1102 if (object
->objq
.prev
|| object
->objq
.next
) {
1103 panic("unmarked object on purgeable q");
1107 } else if (!(object
->objq
.prev
&& object
->objq
.next
)) {
1108 panic("marked object not on purgeable q");
1111 lck_mtx_lock(&vm_purgeable_queue_lock
);
1113 queue
= &purgeable_queues
[type
];
1115 queue_remove(&queue
->objq
[group
], object
, vm_object_t
, objq
);
1116 object
->objq
.next
= NULL
;
1117 object
->objq
.prev
= NULL
;
1118 /* one less volatile object for this object's owner */
1119 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object
), -1);
1121 object
->vo_purgeable_volatilizer
= NULL
;
1123 /* keep queue of non-volatile objects */
1124 if (object
->alive
&& !object
->terminating
) {
1125 queue_enter(&purgeable_nonvolatile_queue
, object
,
1127 assert(purgeable_nonvolatile_count
>= 0);
1128 purgeable_nonvolatile_count
++;
1129 assert(purgeable_nonvolatile_count
> 0);
1130 /* one more nonvolatile object for this object's owner */
1131 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object
), +1);
1135 queue
->debug_count_objects
--;
1136 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVE
)),
1138 tokens
[queue
->token_q_head
].count
,
1144 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1146 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1147 object
->purgeable_queue_group
= 0;
1149 vm_object_lock_assert_exclusive(object
);
1151 return &purgeable_queues
[type
];
1155 vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
)
1157 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1159 stat
->count
= stat
->size
= 0;
1161 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1162 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1163 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1164 if (!target_task
|| VM_OBJECT_OWNER(object
) == target_task
) {
1166 stat
->size
+= (object
->resident_page_count
* PAGE_SIZE
);
1173 vm_purgeable_stats(vm_purgeable_info_t info
, task_t target_task
)
1175 purgeable_q_t queue
;
1178 lck_mtx_lock(&vm_purgeable_queue_lock
);
1180 /* Populate fifo_data */
1181 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1182 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1183 vm_purgeable_stats_helper(&(info
->fifo_data
[group
]), queue
, group
, target_task
);
1186 /* Populate lifo_data */
1187 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1188 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1189 vm_purgeable_stats_helper(&(info
->lifo_data
[group
]), queue
, group
, target_task
);
1192 /* Populate obsolete data */
1193 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1194 vm_purgeable_stats_helper(&(info
->obsolete_data
), queue
, 0, target_task
);
1196 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1200 #if DEVELOPMENT || DEBUG
1202 vm_purgeable_account_volatile_queue(
1203 purgeable_q_t queue
,
1206 pvm_account_info_t acnt_info
)
1209 uint64_t compressed_count
;
1211 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1212 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1213 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1214 if (VM_OBJECT_OWNER(object
) == task
) {
1215 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1216 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1217 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1218 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1224 * Walks the purgeable object queues and calculates the usage
1225 * associated with the objects for the given task.
1228 vm_purgeable_account(
1230 pvm_account_info_t acnt_info
)
1232 queue_head_t
*nonvolatile_q
;
1236 uint64_t compressed_count
;
1237 purgeable_q_t volatile_q
;
1240 if ((task
== NULL
) || (acnt_info
== NULL
)) {
1241 return KERN_INVALID_ARGUMENT
;
1244 acnt_info
->pvm_volatile_count
= 0;
1245 acnt_info
->pvm_volatile_compressed_count
= 0;
1246 acnt_info
->pvm_nonvolatile_count
= 0;
1247 acnt_info
->pvm_nonvolatile_compressed_count
= 0;
1249 lck_mtx_lock(&vm_purgeable_queue_lock
);
1251 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1252 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1253 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1254 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1255 if (VM_OBJECT_OWNER(object
) == task
) {
1256 state
= object
->purgable
;
1257 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1258 if (state
== VM_PURGABLE_EMPTY
) {
1259 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1260 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1262 acnt_info
->pvm_nonvolatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1263 acnt_info
->pvm_nonvolatile_compressed_count
+= compressed_count
;
1265 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1269 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1270 vm_purgeable_account_volatile_queue(volatile_q
, 0, task
, acnt_info
);
1272 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1273 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1274 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1277 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1278 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1279 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1281 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1283 acnt_info
->pvm_volatile_count
= (acnt_info
->pvm_volatile_count
* PAGE_SIZE
);
1284 acnt_info
->pvm_volatile_compressed_count
= (acnt_info
->pvm_volatile_compressed_count
* PAGE_SIZE
);
1285 acnt_info
->pvm_nonvolatile_count
= (acnt_info
->pvm_nonvolatile_count
* PAGE_SIZE
);
1286 acnt_info
->pvm_nonvolatile_compressed_count
= (acnt_info
->pvm_nonvolatile_compressed_count
* PAGE_SIZE
);
1288 return KERN_SUCCESS
;
1290 #endif /* DEVELOPMENT || DEBUG */
1293 vm_purgeable_queue_purge_task_owned(
1294 purgeable_q_t queue
,
1298 vm_object_t object
= VM_OBJECT_NULL
;
1300 uint64_t num_pages_purged
= 0;
1302 num_pages_purged
= 0;
1306 lck_mtx_lock(&vm_purgeable_queue_lock
);
1308 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1309 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1310 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1311 if (object
->vo_owner
!= task
) {
1315 /* found an object: try and grab it */
1316 if (!vm_object_lock_try(object
)) {
1317 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1318 mutex_pause(collisions
++);
1325 /* remove object from purgeable queue */
1326 queue_remove(&queue
->objq
[group
], object
,
1328 object
->objq
.next
= NULL
;
1329 object
->objq
.prev
= NULL
;
1330 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1331 object
->purgeable_queue_group
= 0;
1332 /* one less volatile object for this object's owner */
1333 assert(object
->vo_owner
== task
);
1334 vm_purgeable_volatile_owner_update(task
, -1);
1337 object
->vo_purgeable_volatilizer
= NULL
;
1339 queue_enter(&purgeable_nonvolatile_queue
, object
,
1341 assert(purgeable_nonvolatile_count
>= 0);
1342 purgeable_nonvolatile_count
++;
1343 assert(purgeable_nonvolatile_count
> 0);
1344 /* one more nonvolatile object for this object's owner */
1345 assert(object
->vo_owner
== task
);
1346 vm_purgeable_nonvolatile_owner_update(task
, +1);
1348 /* unlock purgeable queues */
1349 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1351 if (object
->purgeable_when_ripe
) {
1352 /* remove a token */
1353 vm_page_lock_queues();
1354 vm_purgeable_token_remove_first(queue
);
1355 vm_page_unlock_queues();
1358 /* purge the object */
1359 num_pages_purged
+= vm_object_purge(object
, 0);
1361 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1362 /* no change for purgeable accounting */
1363 vm_object_unlock(object
);
1365 /* we unlocked the purgeable queues, so start over */
1369 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1371 return num_pages_purged
;
1375 vm_purgeable_purge_task_owned(
1378 purgeable_q_t queue
= NULL
;
1380 uint64_t num_pages_purged
= 0;
1382 num_pages_purged
= 0;
1384 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1385 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1389 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1390 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1391 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1396 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1397 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1398 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1403 return num_pages_purged
;
1407 vm_purgeable_nonvolatile_enqueue(
1414 vm_object_lock_assert_exclusive(object
);
1416 assert(object
->purgable
== VM_PURGABLE_NONVOLATILE
);
1417 assert(object
->vo_owner
== NULL
);
1419 lck_mtx_lock(&vm_purgeable_queue_lock
);
1421 if (owner
!= NULL
&&
1422 owner
->task_objects_disowning
) {
1423 /* task is exiting and no longer tracking purgeable objects */
1424 owner
= VM_OBJECT_OWNER_DISOWNED
;
1426 if (owner
== NULL
) {
1427 owner
= kernel_task
;
1430 OSBacktrace(&object
->purgeable_owner_bt
[0],
1431 ARRAY_COUNT(object
->purgeable_owner_bt
));
1432 object
->vo_purgeable_volatilizer
= NULL
;
1436 if (object
->vo_no_footprint
) {
1437 ledger_flags
|= VM_LEDGER_FLAG_NO_FOOTPRINT
;
1439 kr
= vm_object_ownership_change(object
,
1440 object
->vo_ledger_tag
, /* tag unchanged */
1443 FALSE
); /* task_objq_locked */
1444 assert(kr
== KERN_SUCCESS
);
1446 assert(object
->objq
.next
== NULL
);
1447 assert(object
->objq
.prev
== NULL
);
1449 queue_enter(&purgeable_nonvolatile_queue
, object
,
1451 assert(purgeable_nonvolatile_count
>= 0);
1452 purgeable_nonvolatile_count
++;
1453 assert(purgeable_nonvolatile_count
> 0);
1454 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1456 vm_object_lock_assert_exclusive(object
);
1460 vm_purgeable_nonvolatile_dequeue(
1466 vm_object_lock_assert_exclusive(object
);
1468 owner
= VM_OBJECT_OWNER(object
);
1470 assert(object
->vo_purgeable_volatilizer
== NULL
);
1472 if (owner
!= NULL
) {
1474 * Update the owner's ledger to stop accounting
1477 /* transfer ownership to the kernel */
1478 assert(VM_OBJECT_OWNER(object
) != kernel_task
);
1479 kr
= vm_object_ownership_change(
1481 object
->vo_ledger_tag
, /* unchanged */
1482 VM_OBJECT_OWNER_DISOWNED
, /* new owner */
1483 0, /* ledger_flags */
1484 FALSE
); /* old_owner->task_objq locked */
1485 assert(kr
== KERN_SUCCESS
);
1486 assert(object
->vo_owner
== VM_OBJECT_OWNER_DISOWNED
);
1489 lck_mtx_lock(&vm_purgeable_queue_lock
);
1490 assert(object
->objq
.next
!= NULL
);
1491 assert(object
->objq
.prev
!= NULL
);
1492 queue_remove(&purgeable_nonvolatile_queue
, object
,
1494 object
->objq
.next
= NULL
;
1495 object
->objq
.prev
= NULL
;
1496 assert(purgeable_nonvolatile_count
> 0);
1497 purgeable_nonvolatile_count
--;
1498 assert(purgeable_nonvolatile_count
>= 0);
1499 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1501 vm_object_lock_assert_exclusive(object
);
1505 vm_purgeable_accounting(
1507 vm_purgable_t old_state
)
1510 int resident_page_count
;
1511 int wired_page_count
;
1512 int compressed_page_count
;
1513 int ledger_idx_volatile
;
1514 int ledger_idx_nonvolatile
;
1515 int ledger_idx_volatile_compressed
;
1516 int ledger_idx_nonvolatile_compressed
;
1517 boolean_t do_footprint
;
1519 vm_object_lock_assert_exclusive(object
);
1520 assert(object
->purgable
!= VM_PURGABLE_DENY
);
1522 owner
= VM_OBJECT_OWNER(object
);
1523 if (owner
== NULL
||
1524 object
->purgable
== VM_PURGABLE_DENY
) {
1528 vm_object_ledger_tag_ledgers(object
,
1529 &ledger_idx_volatile
,
1530 &ledger_idx_nonvolatile
,
1531 &ledger_idx_volatile_compressed
,
1532 &ledger_idx_nonvolatile_compressed
,
1535 resident_page_count
= object
->resident_page_count
;
1536 wired_page_count
= object
->wired_page_count
;
1537 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
&&
1538 object
->pager
!= NULL
) {
1539 compressed_page_count
=
1540 vm_compressor_pager_get_count(object
->pager
);
1542 compressed_page_count
= 0;
1545 if (old_state
== VM_PURGABLE_VOLATILE
||
1546 old_state
== VM_PURGABLE_EMPTY
) {
1547 /* less volatile bytes in ledger */
1548 ledger_debit(owner
->ledger
,
1549 ledger_idx_volatile
,
1550 ptoa_64(resident_page_count
- wired_page_count
));
1551 /* less compressed volatile bytes in ledger */
1552 ledger_debit(owner
->ledger
,
1553 ledger_idx_volatile_compressed
,
1554 ptoa_64(compressed_page_count
));
1556 /* more non-volatile bytes in ledger */
1557 ledger_credit(owner
->ledger
,
1558 ledger_idx_nonvolatile
,
1559 ptoa_64(resident_page_count
- wired_page_count
));
1560 /* more compressed non-volatile bytes in ledger */
1561 ledger_credit(owner
->ledger
,
1562 ledger_idx_nonvolatile_compressed
,
1563 ptoa_64(compressed_page_count
));
1565 /* more footprint */
1566 ledger_credit(owner
->ledger
,
1567 task_ledgers
.phys_footprint
,
1568 ptoa_64(resident_page_count
1569 + compressed_page_count
1570 - wired_page_count
));
1572 } else if (old_state
== VM_PURGABLE_NONVOLATILE
) {
1573 /* less non-volatile bytes in ledger */
1574 ledger_debit(owner
->ledger
,
1575 ledger_idx_nonvolatile
,
1576 ptoa_64(resident_page_count
- wired_page_count
));
1577 /* less compressed non-volatile bytes in ledger */
1578 ledger_debit(owner
->ledger
,
1579 ledger_idx_nonvolatile_compressed
,
1580 ptoa_64(compressed_page_count
));
1582 /* less footprint */
1583 ledger_debit(owner
->ledger
,
1584 task_ledgers
.phys_footprint
,
1585 ptoa_64(resident_page_count
1586 + compressed_page_count
1587 - wired_page_count
));
1590 /* more volatile bytes in ledger */
1591 ledger_credit(owner
->ledger
,
1592 ledger_idx_volatile
,
1593 ptoa_64(resident_page_count
- wired_page_count
));
1594 /* more compressed volatile bytes in ledger */
1595 ledger_credit(owner
->ledger
,
1596 ledger_idx_volatile_compressed
,
1597 ptoa_64(compressed_page_count
));
1599 panic("vm_purgeable_accounting(%p): "
1600 "unexpected old_state=%d\n",
1604 vm_object_lock_assert_exclusive(object
);
1608 vm_purgeable_nonvolatile_owner_update(
1612 if (owner
== NULL
|| delta
== 0) {
1617 assert(owner
->task_nonvolatile_objects
>= 0);
1618 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1619 assert(owner
->task_nonvolatile_objects
> 0);
1621 assert(owner
->task_nonvolatile_objects
> delta
);
1622 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1623 assert(owner
->task_nonvolatile_objects
>= 0);
1628 vm_purgeable_volatile_owner_update(
1632 if (owner
== NULL
|| delta
== 0) {
1637 assert(owner
->task_volatile_objects
>= 0);
1638 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1639 assert(owner
->task_volatile_objects
> 0);
1641 assert(owner
->task_volatile_objects
> delta
);
1642 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1643 assert(owner
->task_volatile_objects
>= 0);
1648 vm_object_owner_compressed_update(
1653 int ledger_idx_volatile
;
1654 int ledger_idx_nonvolatile
;
1655 int ledger_idx_volatile_compressed
;
1656 int ledger_idx_nonvolatile_compressed
;
1657 boolean_t do_footprint
;
1659 vm_object_lock_assert_exclusive(object
);
1661 owner
= VM_OBJECT_OWNER(object
);
1664 !object
->internal
||
1665 (object
->purgable
== VM_PURGABLE_DENY
&&
1666 !object
->vo_ledger_tag
) ||
1668 /* not an owned purgeable (or tagged) VM object: nothing to update */
1672 vm_object_ledger_tag_ledgers(object
,
1673 &ledger_idx_volatile
,
1674 &ledger_idx_nonvolatile
,
1675 &ledger_idx_volatile_compressed
,
1676 &ledger_idx_nonvolatile_compressed
,
1678 switch (object
->purgable
) {
1679 case VM_PURGABLE_DENY
:
1680 /* not purgeable: must be ledger-tagged */
1681 assert(object
->vo_ledger_tag
!= VM_LEDGER_TAG_NONE
);
1683 case VM_PURGABLE_NONVOLATILE
:
1685 ledger_credit(owner
->ledger
,
1686 ledger_idx_nonvolatile_compressed
,
1689 ledger_credit(owner
->ledger
,
1690 task_ledgers
.phys_footprint
,
1694 ledger_debit(owner
->ledger
,
1695 ledger_idx_nonvolatile_compressed
,
1698 ledger_debit(owner
->ledger
,
1699 task_ledgers
.phys_footprint
,
1704 case VM_PURGABLE_VOLATILE
:
1705 case VM_PURGABLE_EMPTY
:
1707 ledger_credit(owner
->ledger
,
1708 ledger_idx_volatile_compressed
,
1711 ledger_debit(owner
->ledger
,
1712 ledger_idx_volatile_compressed
,
1717 panic("vm_purgeable_compressed_update(): "
1718 "unexpected purgable %d for object %p\n",
1719 object
->purgable
, object
);