2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
26 #include <kern/policy_internal.h>
28 #include <libkern/OSDebug.h>
30 #include <mach/mach_types.h>
32 #include <machine/limits.h>
34 #include <vm/vm_compressor_pager.h>
35 #include <vm/vm_kern.h> /* kmem_alloc */
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
38 #include <vm/vm_protos.h>
39 #include <vm/vm_purgeable_internal.h>
41 #include <sys/kdebug.h>
43 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
52 token_idx_t token_q_max_cnt
= 0;
53 vm_size_t token_q_cur_size
= 0;
55 token_idx_t token_free_idx
= 0; /* head of free queue */
56 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
57 int32_t token_new_pagecount
= 0; /* count of pages that will
58 * be added onto token queue */
60 int available_for_purge
= 0; /* increase when ripe token
61 * added, decrease when ripe
63 * protected by page_queue_lock
66 static int token_q_allocating
= 0; /* flag for singlethreading
69 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
70 queue_head_t purgeable_nonvolatile_queue
;
71 int purgeable_nonvolatile_count
;
73 decl_lck_mtx_data(,vm_purgeable_queue_lock
)
75 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
77 static void vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
);
79 void vm_purgeable_nonvolatile_owner_update(task_t owner
,
81 void vm_purgeable_volatile_owner_update(task_t owner
,
87 vm_purgeable_token_check_queue(purgeable_q_t queue
)
89 int token_cnt
= 0, page_cnt
= 0;
90 token_idx_t token
= queue
->token_q_head
;
91 token_idx_t unripe
= 0;
92 int our_inactive_count
;
95 static unsigned lightweight_check
= 0;
98 * Due to performance impact, only perform this check
99 * every 100 times on DEVELOPMENT kernels.
101 if (lightweight_check
++ < 100) {
105 lightweight_check
= 0;
109 if (tokens
[token
].count
!= 0) {
110 assert(queue
->token_q_unripe
);
112 assert(token
== queue
->token_q_unripe
);
115 page_cnt
+= tokens
[token
].count
;
117 if (tokens
[token
].next
== 0)
118 assert(queue
->token_q_tail
== token
);
121 token
= tokens
[token
].next
;
125 assert(queue
->token_q_unripe
== unripe
);
126 assert(token_cnt
== queue
->debug_count_tokens
);
128 /* obsolete queue doesn't maintain token counts */
129 if(queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
131 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
132 assert(our_inactive_count
>= 0);
133 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
- vm_page_cleaned_count
);
139 * Add a token. Allocate token queue memory if necessary.
140 * Call with page queue locked.
143 vm_purgeable_token_add(purgeable_q_t queue
)
145 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
149 enum purgeable_q_type i
;
151 find_available_token
:
153 if (token_free_idx
) { /* unused tokens available */
154 token
= token_free_idx
;
155 token_free_idx
= tokens
[token_free_idx
].next
;
156 } else if (token_init_idx
< token_q_max_cnt
) { /* lazy token array init */
157 token
= token_init_idx
;
159 } else { /* allocate more memory */
160 /* Wait if another thread is inside the memory alloc section */
161 while(token_q_allocating
) {
162 wait_result_t res
= lck_mtx_sleep(&vm_page_queue_lock
,
164 (event_t
)&token_q_allocating
,
166 if(res
!= THREAD_AWAKENED
) return KERN_ABORTED
;
169 /* Check whether memory is still maxed out */
170 if(token_init_idx
< token_q_max_cnt
)
171 goto find_available_token
;
173 /* Still no memory. Allocate some. */
174 token_q_allocating
= 1;
176 /* Drop page queue lock so we can allocate */
177 vm_page_unlock_queues();
179 struct token
*new_loc
;
180 vm_size_t alloc_size
= token_q_cur_size
+ PAGE_SIZE
;
181 kern_return_t result
;
183 if (alloc_size
/ sizeof (struct token
) > TOKEN_COUNT_MAX
) {
184 result
= KERN_RESOURCE_SHORTAGE
;
186 if (token_q_cur_size
) {
187 result
= kmem_realloc(kernel_map
,
188 (vm_offset_t
) tokens
,
190 (vm_offset_t
*) &new_loc
,
191 alloc_size
, VM_KERN_MEMORY_OSFMK
);
193 result
= kmem_alloc(kernel_map
,
194 (vm_offset_t
*) &new_loc
,
195 alloc_size
, VM_KERN_MEMORY_OSFMK
);
199 vm_page_lock_queues();
202 /* Unblock waiting threads */
203 token_q_allocating
= 0;
204 thread_wakeup((event_t
)&token_q_allocating
);
208 /* If we get here, we allocated new memory. Update pointers and
209 * dealloc old range */
210 struct token
*old_tokens
=tokens
;
212 vm_size_t old_token_q_cur_size
=token_q_cur_size
;
213 token_q_cur_size
=alloc_size
;
214 token_q_max_cnt
= (token_idx_t
) (token_q_cur_size
/
215 sizeof(struct token
));
216 assert (token_init_idx
< token_q_max_cnt
); /* We must have a free token now */
218 if (old_token_q_cur_size
) { /* clean up old mapping */
219 vm_page_unlock_queues();
220 /* kmem_realloc leaves the old region mapped. Get rid of it. */
221 kmem_free(kernel_map
, (vm_offset_t
)old_tokens
, old_token_q_cur_size
);
222 vm_page_lock_queues();
225 /* Unblock waiting threads */
226 token_q_allocating
= 0;
227 thread_wakeup((event_t
)&token_q_allocating
);
229 goto find_available_token
;
235 * the new pagecount we got need to be applied to all queues except
238 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
239 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
241 assert(pages
<= TOKEN_COUNT_MAX
);
242 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
243 assert(purgeable_queues
[i
].new_pages
== pages
);
245 token_new_pagecount
= 0;
247 /* set token counter value */
248 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
249 tokens
[token
].count
= queue
->new_pages
;
251 tokens
[token
].count
= 0; /* all obsolete items are
252 * ripe immediately */
253 queue
->new_pages
= 0;
255 /* put token on token counter list */
256 tokens
[token
].next
= 0;
257 if (queue
->token_q_tail
== 0) {
258 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
259 queue
->token_q_head
= token
;
260 tokens
[token
].prev
= 0;
262 tokens
[queue
->token_q_tail
].next
= token
;
263 tokens
[token
].prev
= queue
->token_q_tail
;
265 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
266 * count == 0) in queue */
267 if (tokens
[token
].count
> 0)
268 queue
->token_q_unripe
= token
; /* first unripe token */
270 available_for_purge
++; /* added a ripe token?
271 * increase available count */
273 queue
->token_q_tail
= token
;
276 queue
->debug_count_tokens
++;
277 /* Check both queues, since we modified the new_pages count on each */
278 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
279 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
281 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
283 tokens
[token
].count
, /* num pages on token
285 queue
->debug_count_tokens
,
294 * Remove first token from queue and return its index. Add its count to the
295 * count of the next token.
296 * Call with page queue locked.
299 vm_purgeable_token_remove_first(purgeable_q_t queue
)
301 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
304 token
= queue
->token_q_head
;
309 assert(queue
->token_q_tail
);
310 if (queue
->token_q_head
== queue
->token_q_unripe
) {
311 /* no ripe tokens... must move unripe pointer */
312 queue
->token_q_unripe
= tokens
[token
].next
;
314 /* we're removing a ripe token. decrease count */
315 available_for_purge
--;
316 assert(available_for_purge
>= 0);
319 if (queue
->token_q_tail
== queue
->token_q_head
)
320 assert(tokens
[token
].next
== 0);
322 queue
->token_q_head
= tokens
[token
].next
;
323 if (queue
->token_q_head
) {
324 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
325 tokens
[queue
->token_q_head
].prev
= 0;
327 /* currently no other tokens in the queue */
329 * the page count must be added to the next newly
332 queue
->new_pages
+= tokens
[token
].count
;
333 /* if head is zero, tail is too */
334 queue
->token_q_tail
= 0;
338 queue
->debug_count_tokens
--;
339 vm_purgeable_token_check_queue(queue
);
341 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
343 tokens
[queue
->token_q_head
].count
, /* num pages on new
345 token_new_pagecount
, /* num pages waiting for
355 vm_purgeable_token_remove_last(purgeable_q_t queue
)
357 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
360 token
= queue
->token_q_tail
;
365 assert(queue
->token_q_head
);
367 if (queue
->token_q_tail
== queue
->token_q_head
)
368 assert(tokens
[token
].next
== 0);
370 if (queue
->token_q_unripe
== 0) {
371 /* we're removing a ripe token. decrease count */
372 available_for_purge
--;
373 assert(available_for_purge
>= 0);
374 } else if (queue
->token_q_unripe
== token
) {
375 /* we're removing the only unripe token */
376 queue
->token_q_unripe
= 0;
379 if (token
== queue
->token_q_head
) {
380 /* token is the last one in the queue */
381 queue
->token_q_head
= 0;
382 queue
->token_q_tail
= 0;
384 token_idx_t new_tail
;
386 new_tail
= tokens
[token
].prev
;
389 assert(tokens
[new_tail
].next
== token
);
391 queue
->token_q_tail
= new_tail
;
392 tokens
[new_tail
].next
= 0;
395 queue
->new_pages
+= tokens
[token
].count
;
398 queue
->debug_count_tokens
--;
399 vm_purgeable_token_check_queue(queue
);
401 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
403 tokens
[queue
->token_q_head
].count
, /* num pages on new
405 token_new_pagecount
, /* num pages waiting for
415 * Delete first token from queue. Return token to token queue.
416 * Call with page queue locked.
419 vm_purgeable_token_delete_first(purgeable_q_t queue
)
421 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
422 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
425 /* stick removed token on free queue */
426 tokens
[token
].next
= token_free_idx
;
427 tokens
[token
].prev
= 0;
428 token_free_idx
= token
;
433 vm_purgeable_token_delete_last(purgeable_q_t queue
)
435 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
436 token_idx_t token
= vm_purgeable_token_remove_last(queue
);
439 /* stick removed token on free queue */
440 tokens
[token
].next
= token_free_idx
;
441 tokens
[token
].prev
= 0;
442 token_free_idx
= token
;
447 /* Call with page queue locked. */
449 vm_purgeable_q_advance_all()
451 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
453 /* check queue counters - if they get really large, scale them back.
454 * They tend to get that large when there is no purgeable queue action */
456 if(token_new_pagecount
> (TOKEN_NEW_PAGECOUNT_MAX
>> 1)) /* a system idling years might get there */
458 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
459 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
461 assert(pages
<= TOKEN_COUNT_MAX
);
462 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
463 assert(purgeable_queues
[i
].new_pages
== pages
);
465 token_new_pagecount
= 0;
469 * Decrement token counters. A token counter can be zero, this means the
470 * object is ripe to be purged. It is not purged immediately, because that
471 * could cause several objects to be purged even if purging one would satisfy
472 * the memory needs. Instead, the pageout thread purges one after the other
473 * by calling vm_purgeable_object_purge_one and then rechecking the memory
476 * No need to advance obsolete queue - all items are ripe there,
479 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
480 purgeable_q_t queue
= &purgeable_queues
[i
];
481 uint32_t num_pages
= 1;
483 /* Iterate over tokens as long as there are unripe tokens. */
484 while (queue
->token_q_unripe
) {
485 if (tokens
[queue
->token_q_unripe
].count
&& num_pages
)
487 tokens
[queue
->token_q_unripe
].count
-= 1;
491 if (tokens
[queue
->token_q_unripe
].count
== 0) {
492 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
493 available_for_purge
++;
494 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_RIPEN
)),
496 tokens
[queue
->token_q_head
].count
, /* num pages on new
501 continue; /* One token ripened. Make sure to
505 break; /* Current token not ripe and no more pages.
510 * if there are no unripe tokens in the queue, decrement the
511 * new_pages counter instead new_pages can be negative, but must be
512 * canceled out by token_new_pagecount -- since inactive queue as a
513 * whole always contains a nonnegative number of pages
515 if (!queue
->token_q_unripe
) {
516 queue
->new_pages
-= num_pages
;
517 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
520 vm_purgeable_token_check_queue(queue
);
526 * grab any ripe object and purge it obsolete queue first. then, go through
527 * each volatile group. Select a queue with a ripe token.
528 * Start with first group (0)
529 * 1. Look at queue. Is there an object?
530 * Yes - purge it. Remove token.
531 * No - check other queue. Is there an object?
532 * No - increment group, then go to (1)
533 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
534 * token from other queue and migrate unripe token from this
535 * queue to other queue.
536 * Call with page queue locked.
539 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
541 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
542 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
543 /* return token to free list. advance token list. */
544 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
545 tokens
[queue
->token_q_head
].next
= token_free_idx
;
546 tokens
[queue
->token_q_head
].prev
= 0;
547 token_free_idx
= queue
->token_q_head
;
548 queue
->token_q_head
= new_head
;
549 tokens
[new_head
].prev
= 0;
551 queue
->token_q_tail
= 0;
554 queue
->debug_count_tokens
--;
555 vm_purgeable_token_check_queue(queue
);
558 available_for_purge
--;
559 assert(available_for_purge
>= 0);
563 * Delete a ripe token from the given queue. If there are no ripe tokens on
564 * that queue, delete a ripe token from queue2, and migrate an unripe token
565 * from queue to queue2
566 * Call with page queue locked.
569 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
571 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
572 assert(queue
->token_q_head
);
574 if (tokens
[queue
->token_q_head
].count
== 0) {
575 /* This queue has a ripe token. Remove. */
576 vm_purgeable_token_remove_ripe(queue
);
580 * queue2 must have a ripe token. Remove, and migrate one
581 * from queue to queue2.
583 vm_purgeable_token_remove_ripe(queue2
);
584 /* migrate unripe token */
588 /* remove token from queue1 */
589 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
590 * tokens, remember? */
591 token
= vm_purgeable_token_remove_first(queue
);
594 count
= tokens
[token
].count
;
596 /* migrate to queue2 */
597 /* go to migration target loc */
599 token_idx_t token_to_insert_before
= queue2
->token_q_head
, token_to_insert_after
;
601 while (token_to_insert_before
!= 0 && count
> tokens
[token_to_insert_before
].count
) {
602 count
-= tokens
[token_to_insert_before
].count
;
603 token_to_insert_before
= tokens
[token_to_insert_before
].next
;
606 /* token_to_insert_before is now set correctly */
608 /* should the inserted token become the first unripe token? */
609 if ((token_to_insert_before
== queue2
->token_q_unripe
) || (queue2
->token_q_unripe
== 0))
610 queue2
->token_q_unripe
= token
; /* if so, must update unripe pointer */
614 * if inserting at end, reduce new_pages by that value;
615 * otherwise, reduce counter of next token
618 tokens
[token
].count
= count
;
620 if (token_to_insert_before
!= 0) {
621 token_to_insert_after
= tokens
[token_to_insert_before
].prev
;
623 tokens
[token
].next
= token_to_insert_before
;
624 tokens
[token_to_insert_before
].prev
= token
;
626 assert(tokens
[token_to_insert_before
].count
>= count
);
627 tokens
[token_to_insert_before
].count
-= count
;
629 /* if we ran off the end of the list, the token to insert after is the tail */
630 token_to_insert_after
= queue2
->token_q_tail
;
632 tokens
[token
].next
= 0;
633 queue2
->token_q_tail
= token
;
635 assert(queue2
->new_pages
>= (int32_t) count
);
636 queue2
->new_pages
-= count
;
639 if (token_to_insert_after
!= 0) {
640 tokens
[token
].prev
= token_to_insert_after
;
641 tokens
[token_to_insert_after
].next
= token
;
643 /* is this case possible? */
644 tokens
[token
].prev
= 0;
645 queue2
->token_q_head
= token
;
649 queue2
->debug_count_tokens
++;
650 vm_purgeable_token_check_queue(queue2
);
655 /* Find an object that can be locked. Returns locked object. */
656 /* Call with purgeable queue locked. */
658 vm_purgeable_object_find_and_lock(
663 vm_object_t object
, best_object
;
664 int object_task_importance
;
665 int best_object_task_importance
;
666 int best_object_skipped
;
667 int num_objects_skipped
;
668 int try_lock_failed
= 0;
669 int try_lock_succeeded
= 0;
672 best_object
= VM_OBJECT_NULL
;
673 best_object_task_importance
= INT_MAX
;
675 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
677 * Usually we would pick the first element from a queue. However, we
678 * might not be able to get a lock on it, in which case we try the
679 * remaining elements in order.
682 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_START
),
685 VM_KERNEL_UNSLIDE_OR_PERM(queue
),
689 num_objects_skipped
= 0;
690 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
691 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
692 object
= (vm_object_t
) queue_next(&object
->objq
),
693 num_objects_skipped
++) {
696 * To prevent us looping for an excessively long time, choose
697 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
698 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
699 * we keep going until we find the first eligible object.
701 if ((num_objects_skipped
>= PURGEABLE_LOOP_MAX
) && (best_object
!= NULL
)) {
706 ! object
->purgeable_when_ripe
) {
707 /* we want an object that has a ripe token */
711 object_task_importance
= 0;
713 owner
= object
->vo_purgeable_owner
;
715 object_task_importance
= task_importance_estimate(owner
);
718 if (object_task_importance
< best_object_task_importance
) {
719 if (vm_object_lock_try(object
)) {
720 try_lock_succeeded
++;
721 if (best_object
!= VM_OBJECT_NULL
) {
722 /* forget about previous best object */
723 vm_object_unlock(best_object
);
725 best_object
= object
;
726 best_object_task_importance
= object_task_importance
;
727 best_object_skipped
= num_objects_skipped
;
728 if (best_object_task_importance
== 0) {
729 /* can't get any better: stop looking */
738 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_END
),
739 num_objects_skipped
, /* considered objects */
742 VM_KERNEL_UNSLIDE_OR_PERM(best_object
),
743 ((best_object
== NULL
) ? 0 : best_object
->resident_page_count
));
745 object
= best_object
;
747 if (object
== VM_OBJECT_NULL
) {
748 return VM_OBJECT_NULL
;
751 /* Locked. Great. We'll take it. Remove and return. */
752 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
754 vm_object_lock_assert_exclusive(object
);
756 queue_remove(&queue
->objq
[group
], object
,
758 object
->objq
.next
= NULL
;
759 object
->objq
.prev
= NULL
;
760 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
761 object
->purgeable_queue_group
= 0;
762 /* one less volatile object for this object's owner */
763 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, -1);
766 object
->vo_purgeable_volatilizer
= NULL
;
769 /* keep queue of non-volatile objects */
770 queue_enter(&purgeable_nonvolatile_queue
, object
,
772 assert(purgeable_nonvolatile_count
>= 0);
773 purgeable_nonvolatile_count
++;
774 assert(purgeable_nonvolatile_count
> 0);
775 /* one more nonvolatile object for this object's owner */
776 vm_purgeable_nonvolatile_owner_update(object
->vo_purgeable_owner
, +1);
779 queue
->debug_count_objects
--;
784 /* Can be called without holding locks */
786 vm_purgeable_object_purge_all(void)
788 enum purgeable_q_type i
;
791 unsigned int purged_count
;
798 lck_mtx_lock(&vm_purgeable_queue_lock
);
799 /* Cycle through all queues */
800 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
803 queue
= &purgeable_queues
[i
];
806 * Look through all groups, starting from the lowest. If
807 * we find an object in that group, try to lock it (this can
808 * fail). If locking is successful, we can drop the queue
809 * lock, remove a token and then purge the object.
811 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
812 while (!queue_empty(&queue
->objq
[group
])) {
813 object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
);
814 if (object
== VM_OBJECT_NULL
) {
815 lck_mtx_unlock(&vm_purgeable_queue_lock
);
816 mutex_pause(collisions
++);
820 lck_mtx_unlock(&vm_purgeable_queue_lock
);
822 /* Lock the page queue here so we don't hold it
823 * over the whole, legthy operation */
824 if (object
->purgeable_when_ripe
) {
825 vm_page_lock_queues();
826 vm_purgeable_token_remove_first(queue
);
827 vm_page_unlock_queues();
830 (void) vm_object_purge(object
, 0);
831 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
832 /* no change in purgeable accounting */
834 vm_object_unlock(object
);
838 assert(queue
->debug_count_objects
>= 0);
841 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ALL
)),
842 purged_count
, /* # of purged objects */
847 lck_mtx_unlock(&vm_purgeable_queue_lock
);
852 vm_purgeable_object_purge_one_unlocked(
853 int force_purge_below_group
)
857 vm_page_lock_queues();
858 retval
= vm_purgeable_object_purge_one(force_purge_below_group
, 0);
859 vm_page_unlock_queues();
865 vm_purgeable_object_purge_one(
866 int force_purge_below_group
,
869 enum purgeable_q_type i
;
871 vm_object_t object
= 0;
872 purgeable_q_t queue
, queue2
;
873 boolean_t forced_purge
;
875 /* Need the page queue lock since we'll be changing the token queue. */
876 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
877 lck_mtx_lock(&vm_purgeable_queue_lock
);
879 /* Cycle through all queues */
880 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
881 queue
= &purgeable_queues
[i
];
883 if (force_purge_below_group
== 0) {
885 * Are there any ripe tokens on this queue? If yes,
886 * we'll find an object to purge there
888 if (!queue
->token_q_head
) {
889 /* no token: look at next purgeable queue */
893 if (tokens
[queue
->token_q_head
].count
!= 0) {
894 /* no ripe token: next queue */
900 * Now look through all groups, starting from the lowest. If
901 * we find an object in that group, try to lock it (this can
902 * fail). If locking is successful, we can drop the queue
903 * lock, remove a token and then purge the object.
905 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
906 if (!queue
->token_q_head
||
907 tokens
[queue
->token_q_head
].count
!= 0) {
908 /* no tokens or no ripe tokens */
910 if (group
>= force_purge_below_group
) {
911 /* no more groups to force-purge */
916 * Try and purge an object in this group
917 * even though no tokens are ripe.
919 if (!queue_empty(&queue
->objq
[group
]) &&
920 (object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
))) {
921 lck_mtx_unlock(&vm_purgeable_queue_lock
);
922 if (object
->purgeable_when_ripe
) {
923 vm_purgeable_token_delete_first(queue
);
929 /* nothing to purge in this group: next group */
932 if (!queue_empty(&queue
->objq
[group
]) &&
933 (object
= vm_purgeable_object_find_and_lock(queue
, group
, TRUE
))) {
934 lck_mtx_unlock(&vm_purgeable_queue_lock
);
935 if (object
->purgeable_when_ripe
) {
936 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
938 forced_purge
= FALSE
;
941 if (i
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
942 /* This is the token migration case, and it works between
943 * FIFO and LIFO only */
944 queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
?
945 PURGEABLE_Q_TYPE_FIFO
:
946 PURGEABLE_Q_TYPE_LIFO
];
948 if (!queue_empty(&queue2
->objq
[group
]) &&
949 (object
= vm_purgeable_object_find_and_lock(queue2
, group
, TRUE
))) {
950 lck_mtx_unlock(&vm_purgeable_queue_lock
);
951 if (object
->purgeable_when_ripe
) {
952 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
954 forced_purge
= FALSE
;
958 assert(queue
->debug_count_objects
>= 0);
962 * because we have to do a try_lock on the objects which could fail,
963 * we could end up with no object to purge at this time, even though
964 * we have objects in a purgeable state
966 lck_mtx_unlock(&vm_purgeable_queue_lock
);
972 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
973 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
974 (void) vm_object_purge(object
, flags
);
975 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
976 /* no change in purgeable accounting */
977 vm_object_unlock(object
);
978 vm_page_lock_queues();
980 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)),
981 object
, /* purged object */
990 /* Called with object lock held */
992 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
994 vm_object_lock_assert_exclusive(object
);
995 lck_mtx_lock(&vm_purgeable_queue_lock
);
997 assert(object
->objq
.next
!= NULL
);
998 assert(object
->objq
.prev
!= NULL
);
999 queue_remove(&purgeable_nonvolatile_queue
, object
,
1001 object
->objq
.next
= NULL
;
1002 object
->objq
.prev
= NULL
;
1003 assert(purgeable_nonvolatile_count
> 0);
1004 purgeable_nonvolatile_count
--;
1005 assert(purgeable_nonvolatile_count
>= 0);
1006 /* one less nonvolatile object for this object's owner */
1007 vm_purgeable_nonvolatile_owner_update(object
->vo_purgeable_owner
, -1);
1009 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
)
1012 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) /* fifo and obsolete are
1014 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
1016 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
1017 /* one more volatile object for this object's owner */
1018 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, +1);
1020 object
->purgeable_queue_type
= queue
->type
;
1021 object
->purgeable_queue_group
= group
;
1024 assert(object
->vo_purgeable_volatilizer
== NULL
);
1025 object
->vo_purgeable_volatilizer
= current_task();
1026 OSBacktrace(&object
->purgeable_volatilizer_bt
[0], 16);
1030 queue
->debug_count_objects
++;
1031 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADD
)),
1033 tokens
[queue
->token_q_head
].count
,
1039 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1042 /* Look for object. If found, remove from purgeable queue. */
1043 /* Called with object lock held */
1045 vm_purgeable_object_remove(vm_object_t object
)
1048 enum purgeable_q_type type
;
1049 purgeable_q_t queue
;
1051 vm_object_lock_assert_exclusive(object
);
1053 type
= object
->purgeable_queue_type
;
1054 group
= object
->purgeable_queue_group
;
1056 if (type
== PURGEABLE_Q_TYPE_MAX
) {
1057 if (object
->objq
.prev
|| object
->objq
.next
)
1058 panic("unmarked object on purgeable q");
1061 } else if (!(object
->objq
.prev
&& object
->objq
.next
))
1062 panic("marked object not on purgeable q");
1064 lck_mtx_lock(&vm_purgeable_queue_lock
);
1066 queue
= &purgeable_queues
[type
];
1068 queue_remove(&queue
->objq
[group
], object
, vm_object_t
, objq
);
1069 object
->objq
.next
= NULL
;
1070 object
->objq
.prev
= NULL
;
1071 /* one less volatile object for this object's owner */
1072 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, -1);
1074 object
->vo_purgeable_volatilizer
= NULL
;
1076 /* keep queue of non-volatile objects */
1077 if (object
->alive
&& !object
->terminating
) {
1079 queue_enter(&purgeable_nonvolatile_queue
, object
,
1081 assert(purgeable_nonvolatile_count
>= 0);
1082 purgeable_nonvolatile_count
++;
1083 assert(purgeable_nonvolatile_count
> 0);
1084 /* one more nonvolatile object for this object's owner */
1085 owner
= object
->vo_purgeable_owner
;
1086 vm_purgeable_nonvolatile_owner_update(owner
, +1);
1090 queue
->debug_count_objects
--;
1091 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVE
)),
1093 tokens
[queue
->token_q_head
].count
,
1099 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1101 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1102 object
->purgeable_queue_group
= 0;
1104 vm_object_lock_assert_exclusive(object
);
1106 return &purgeable_queues
[type
];
1110 vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
)
1112 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1114 stat
->count
= stat
->size
= 0;
1116 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1117 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1118 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1119 if (!target_task
|| object
->vo_purgeable_owner
== target_task
) {
1121 stat
->size
+= (object
->resident_page_count
* PAGE_SIZE
);
1128 vm_purgeable_stats(vm_purgeable_info_t info
, task_t target_task
)
1130 purgeable_q_t queue
;
1133 lck_mtx_lock(&vm_purgeable_queue_lock
);
1135 /* Populate fifo_data */
1136 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1137 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1138 vm_purgeable_stats_helper(&(info
->fifo_data
[group
]), queue
, group
, target_task
);
1140 /* Populate lifo_data */
1141 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1142 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1143 vm_purgeable_stats_helper(&(info
->lifo_data
[group
]), queue
, group
, target_task
);
1145 /* Populate obsolete data */
1146 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1147 vm_purgeable_stats_helper(&(info
->obsolete_data
), queue
, 0, target_task
);
1149 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1153 #if DEVELOPMENT || DEBUG
1155 vm_purgeable_account_volatile_queue(
1156 purgeable_q_t queue
,
1159 pvm_account_info_t acnt_info
)
1162 uint64_t compressed_count
;
1164 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1165 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1166 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1167 if (object
->vo_purgeable_owner
== task
) {
1168 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1169 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1170 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1171 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1178 * Walks the purgeable object queues and calculates the usage
1179 * associated with the objects for the given task.
1182 vm_purgeable_account(
1184 pvm_account_info_t acnt_info
)
1186 queue_head_t
*nonvolatile_q
;
1190 uint64_t compressed_count
;
1191 purgeable_q_t volatile_q
;
1194 if ((task
== NULL
) || (acnt_info
== NULL
)) {
1195 return KERN_INVALID_ARGUMENT
;
1198 acnt_info
->pvm_volatile_count
= 0;
1199 acnt_info
->pvm_volatile_compressed_count
= 0;
1200 acnt_info
->pvm_nonvolatile_count
= 0;
1201 acnt_info
->pvm_nonvolatile_compressed_count
= 0;
1203 lck_mtx_lock(&vm_purgeable_queue_lock
);
1205 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1206 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1207 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1208 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1209 if (object
->vo_purgeable_owner
== task
) {
1210 state
= object
->purgable
;
1211 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1212 if (state
== VM_PURGABLE_EMPTY
) {
1213 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1214 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1216 acnt_info
->pvm_nonvolatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1217 acnt_info
->pvm_nonvolatile_compressed_count
+= compressed_count
;
1219 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1223 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1224 vm_purgeable_account_volatile_queue(volatile_q
, 0, task
, acnt_info
);
1226 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1227 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1228 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1231 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1232 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1233 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1235 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1237 acnt_info
->pvm_volatile_count
= (acnt_info
->pvm_volatile_count
* PAGE_SIZE
);
1238 acnt_info
->pvm_volatile_compressed_count
= (acnt_info
->pvm_volatile_compressed_count
* PAGE_SIZE
);
1239 acnt_info
->pvm_nonvolatile_count
= (acnt_info
->pvm_nonvolatile_count
* PAGE_SIZE
);
1240 acnt_info
->pvm_nonvolatile_compressed_count
= (acnt_info
->pvm_nonvolatile_compressed_count
* PAGE_SIZE
);
1242 return KERN_SUCCESS
;
1244 #endif /* DEVELOPMENT || DEBUG */
1247 vm_purgeable_volatile_queue_disown(
1248 purgeable_q_t queue
,
1258 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1260 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1261 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1262 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1265 * Sanity check: let's scan the entire queues to
1266 * make sure we don't leave any purgeable objects
1267 * pointing back at a dead task. If the counters
1268 * are off, we would fail to assert that they go
1269 * back to 0 after disowning is done.
1271 #else /* MACH_ASSERT */
1272 if (task
->task_volatile_objects
== 0) {
1273 /* no more volatile objects owned by "task" */
1276 #endif /* MACH_ASSERT */
1277 if (object
->vo_purgeable_owner
== task
) {
1278 if (! vm_object_lock_try(object
)) {
1279 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1280 mutex_pause(collisions
++);
1281 lck_mtx_lock(&vm_purgeable_queue_lock
);
1284 assert(object
->purgable
== VM_PURGABLE_VOLATILE
);
1285 if (object
->vo_purgeable_owner
== task
) {
1286 vm_purgeable_accounting(object
,
1289 assert(object
->vo_purgeable_owner
== NULL
);
1291 vm_object_unlock(object
);
1297 vm_purgeable_disown(
1300 purgeable_q_t volatile_q
;
1302 queue_head_t
*nonvolatile_q
;
1310 task
->task_purgeable_disowning
= TRUE
;
1313 * Scan the purgeable objects queues for objects owned by "task".
1314 * This has to be done "atomically" under the "vm_purgeable_queue"
1315 * lock, to ensure that no new purgeable object get associated
1316 * with this task or moved between queues while we're scanning.
1320 * Scan non-volatile queue for objects owned by "task".
1326 if (task
->task_purgeable_disowned
) {
1327 /* task has already disowned its purgeable memory */
1328 assert(task
->task_volatile_objects
== 0);
1329 assert(task
->task_nonvolatile_objects
== 0);
1332 lck_mtx_lock(&vm_purgeable_queue_lock
);
1334 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1335 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1336 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1337 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1340 * Sanity check: let's scan the entire queues to
1341 * make sure we don't leave any purgeable objects
1342 * pointing back at a dead task. If the counters
1343 * are off, we would fail to assert that they go
1344 * back to 0 after disowning is done.
1346 #else /* MACH_ASSERT */
1347 if (task
->task_nonvolatile_objects
== 0) {
1348 /* no more non-volatile objects owned by "task" */
1351 #endif /* MACH_ASSERT */
1353 assert(object
->vo_purgeable_volatilizer
== NULL
);
1355 if (object
->vo_purgeable_owner
== task
) {
1356 if (!vm_object_lock_try(object
)) {
1357 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1358 mutex_pause(collisions
++);
1361 if (object
->vo_purgeable_owner
== task
) {
1362 vm_purgeable_accounting(object
,
1365 assert(object
->vo_purgeable_owner
== NULL
);
1367 vm_object_unlock(object
);
1371 lck_mtx_yield(&vm_purgeable_queue_lock
);
1374 * Scan volatile queues for objects owned by "task".
1377 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1378 vm_purgeable_volatile_queue_disown(volatile_q
, 0, task
);
1379 lck_mtx_yield(&vm_purgeable_queue_lock
);
1381 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1382 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1383 vm_purgeable_volatile_queue_disown(volatile_q
, group
, task
);
1384 lck_mtx_yield(&vm_purgeable_queue_lock
);
1387 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1388 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1389 vm_purgeable_volatile_queue_disown(volatile_q
, group
, task
);
1390 lck_mtx_yield(&vm_purgeable_queue_lock
);
1393 if (task
->task_volatile_objects
!= 0 ||
1394 task
->task_nonvolatile_objects
!= 0) {
1395 /* some purgeable objects sneaked into a queue: find them */
1396 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1397 mutex_pause(collisions
++);
1401 /* there shouldn't be any purgeable objects owned by task now */
1402 assert(task
->task_volatile_objects
== 0);
1403 assert(task
->task_nonvolatile_objects
== 0);
1404 assert(task
->task_purgeable_disowning
);
1406 /* and we don't need to try and disown again */
1407 task
->task_purgeable_disowned
= TRUE
;
1409 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1415 vm_purgeable_queue_purge_task_owned(
1416 purgeable_q_t queue
,
1423 int num_objects_purged
;
1425 num_objects_purged
= 0;
1429 lck_mtx_lock(&vm_purgeable_queue_lock
);
1432 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1433 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1434 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1436 if (object
->vo_purgeable_owner
!= task
&&
1437 object
->vo_purgeable_owner
!= NULL
) {
1441 /* found an object: try and grab it */
1442 if (!vm_object_lock_try(object
)) {
1443 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1444 mutex_pause(collisions
++);
1451 /* remove object from purgeable queue */
1452 queue_remove(&queue
->objq
[group
], object
,
1454 object
->objq
.next
= NULL
;
1455 object
->objq
.prev
= NULL
;
1456 /* one less volatile object for this object's owner */
1457 assert(object
->vo_purgeable_owner
== task
);
1458 vm_purgeable_volatile_owner_update(task
, -1);
1461 object
->vo_purgeable_volatilizer
= NULL
;
1463 queue_enter(&purgeable_nonvolatile_queue
, object
,
1465 assert(purgeable_nonvolatile_count
>= 0);
1466 purgeable_nonvolatile_count
++;
1467 assert(purgeable_nonvolatile_count
> 0);
1468 /* one more nonvolatile object for this object's owner */
1469 assert(object
->vo_purgeable_owner
== task
);
1470 vm_purgeable_nonvolatile_owner_update(task
, +1);
1472 /* unlock purgeable queues */
1473 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1475 if (object
->purgeable_when_ripe
) {
1476 /* remove a token */
1477 vm_page_lock_queues();
1478 vm_purgeable_token_remove_first(queue
);
1479 vm_page_unlock_queues();
1482 /* purge the object */
1483 (void) vm_object_purge(object
, 0);
1484 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1485 /* no change for purgeable accounting */
1486 vm_object_unlock(object
);
1487 num_objects_purged
++;
1489 /* we unlocked the purgeable queues, so start over */
1493 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1495 return num_objects_purged
;
1499 vm_purgeable_purge_task_owned(
1502 purgeable_q_t queue
;
1504 int num_objects_purged
;
1506 num_objects_purged
= 0;
1508 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1509 num_objects_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1513 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1514 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1515 num_objects_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1519 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1520 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1521 num_objects_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1525 return num_objects_purged
;
1530 vm_purgeable_nonvolatile_enqueue(
1536 vm_object_lock_assert_exclusive(object
);
1538 assert(object
->purgable
== VM_PURGABLE_NONVOLATILE
);
1539 assert(object
->vo_purgeable_owner
== NULL
);
1541 lck_mtx_lock(&vm_purgeable_queue_lock
);
1543 if (owner
!= NULL
&&
1544 owner
->task_purgeable_disowning
) {
1545 /* task is exiting and no longer tracking purgeable objects */
1549 object
->vo_purgeable_owner
= owner
;
1551 object
->vo_purgeable_volatilizer
= NULL
;
1555 OSBacktrace(&object
->purgeable_owner_bt
[0], 16);
1558 page_count
= object
->resident_page_count
;
1559 if (owner
!= NULL
&& page_count
!= 0) {
1560 ledger_credit(owner
->ledger
,
1561 task_ledgers
.purgeable_nonvolatile
,
1563 ledger_credit(owner
->ledger
,
1564 task_ledgers
.phys_footprint
,
1568 assert(object
->objq
.next
== NULL
);
1569 assert(object
->objq
.prev
== NULL
);
1571 queue_enter(&purgeable_nonvolatile_queue
, object
,
1573 assert(purgeable_nonvolatile_count
>= 0);
1574 purgeable_nonvolatile_count
++;
1575 assert(purgeable_nonvolatile_count
> 0);
1576 /* one more nonvolatile object for this object's owner */
1577 assert(object
->vo_purgeable_owner
== owner
);
1578 vm_purgeable_nonvolatile_owner_update(owner
, +1);
1579 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1581 vm_object_lock_assert_exclusive(object
);
1585 vm_purgeable_nonvolatile_dequeue(
1590 vm_object_lock_assert_exclusive(object
);
1592 owner
= object
->vo_purgeable_owner
;
1594 assert(object
->vo_purgeable_volatilizer
== NULL
);
1596 if (owner
!= NULL
) {
1598 * Update the owner's ledger to stop accounting
1601 vm_purgeable_accounting(object
,
1606 lck_mtx_lock(&vm_purgeable_queue_lock
);
1607 assert(object
->objq
.next
!= NULL
);
1608 assert(object
->objq
.prev
!= NULL
);
1609 queue_remove(&purgeable_nonvolatile_queue
, object
,
1611 object
->objq
.next
= NULL
;
1612 object
->objq
.prev
= NULL
;
1613 assert(purgeable_nonvolatile_count
> 0);
1614 purgeable_nonvolatile_count
--;
1615 assert(purgeable_nonvolatile_count
>= 0);
1616 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1618 vm_object_lock_assert_exclusive(object
);
1622 vm_purgeable_accounting(
1624 vm_purgable_t old_state
,
1628 int resident_page_count
;
1629 int wired_page_count
;
1630 int compressed_page_count
;
1631 boolean_t disown_on_the_fly
;
1633 vm_object_lock_assert_exclusive(object
);
1635 owner
= object
->vo_purgeable_owner
;
1639 if (!disown
&& owner
->task_purgeable_disowning
) {
1640 /* task is disowning its purgeable objects: help it */
1641 disown_on_the_fly
= TRUE
;
1643 disown_on_the_fly
= FALSE
;
1646 resident_page_count
= object
->resident_page_count
;
1647 wired_page_count
= object
->wired_page_count
;
1648 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
&&
1649 object
->pager
!= NULL
) {
1650 compressed_page_count
=
1651 vm_compressor_pager_get_count(object
->pager
);
1653 compressed_page_count
= 0;
1656 if (old_state
== VM_PURGABLE_VOLATILE
||
1657 old_state
== VM_PURGABLE_EMPTY
) {
1658 /* less volatile bytes in ledger */
1659 ledger_debit(owner
->ledger
,
1660 task_ledgers
.purgeable_volatile
,
1661 ptoa(resident_page_count
- wired_page_count
));
1662 /* less compressed volatile bytes in ledger */
1663 ledger_debit(owner
->ledger
,
1664 task_ledgers
.purgeable_volatile_compressed
,
1665 ptoa(compressed_page_count
));
1667 if (disown
|| !object
->alive
|| object
->terminating
) {
1668 /* wired pages were accounted as "non-volatile"... */
1669 ledger_debit(owner
->ledger
,
1670 task_ledgers
.purgeable_nonvolatile
,
1671 ptoa(wired_page_count
));
1672 /* ... and in phys_footprint */
1673 ledger_debit(owner
->ledger
,
1674 task_ledgers
.phys_footprint
,
1675 ptoa(wired_page_count
));
1677 if (!disown_on_the_fly
&&
1678 (object
->purgeable_queue_type
==
1679 PURGEABLE_Q_TYPE_MAX
)) {
1681 * Not on a volatile queue: must be empty
1684 vm_purgeable_nonvolatile_owner_update(owner
,-1);
1686 /* on a volatile queue */
1687 vm_purgeable_volatile_owner_update(owner
, -1);
1689 /* no more accounting for this dead object */
1690 object
->vo_purgeable_owner
= NULL
;
1692 object
->vo_purgeable_volatilizer
= NULL
;
1697 /* more non-volatile bytes in ledger */
1698 ledger_credit(owner
->ledger
,
1699 task_ledgers
.purgeable_nonvolatile
,
1700 ptoa(resident_page_count
- wired_page_count
));
1701 /* more compressed non-volatile bytes in ledger */
1702 ledger_credit(owner
->ledger
,
1703 task_ledgers
.purgeable_nonvolatile_compressed
,
1704 ptoa(compressed_page_count
));
1705 /* more footprint */
1706 ledger_credit(owner
->ledger
,
1707 task_ledgers
.phys_footprint
,
1708 ptoa(resident_page_count
1709 + compressed_page_count
1710 - wired_page_count
));
1712 } else if (old_state
== VM_PURGABLE_NONVOLATILE
) {
1714 /* less non-volatile bytes in ledger */
1715 ledger_debit(owner
->ledger
,
1716 task_ledgers
.purgeable_nonvolatile
,
1717 ptoa(resident_page_count
- wired_page_count
));
1718 /* less compressed non-volatile bytes in ledger */
1719 ledger_debit(owner
->ledger
,
1720 task_ledgers
.purgeable_nonvolatile_compressed
,
1721 ptoa(compressed_page_count
));
1722 /* less footprint */
1723 ledger_debit(owner
->ledger
,
1724 task_ledgers
.phys_footprint
,
1725 ptoa(resident_page_count
1726 + compressed_page_count
1727 - wired_page_count
));
1729 if (disown
|| !object
->alive
|| object
->terminating
) {
1730 /* wired pages still accounted as "non-volatile" */
1731 ledger_debit(owner
->ledger
,
1732 task_ledgers
.purgeable_nonvolatile
,
1733 ptoa(wired_page_count
));
1734 ledger_debit(owner
->ledger
,
1735 task_ledgers
.phys_footprint
,
1736 ptoa(wired_page_count
));
1738 /* one less "non-volatile" object for the owner */
1739 if (!disown_on_the_fly
) {
1740 assert(object
->purgeable_queue_type
==
1741 PURGEABLE_Q_TYPE_MAX
);
1743 vm_purgeable_nonvolatile_owner_update(owner
, -1);
1744 /* no more accounting for this dead object */
1745 object
->vo_purgeable_owner
= NULL
;
1747 object
->vo_purgeable_volatilizer
= NULL
;
1751 /* more volatile bytes in ledger */
1752 ledger_credit(owner
->ledger
,
1753 task_ledgers
.purgeable_volatile
,
1754 ptoa(resident_page_count
- wired_page_count
));
1755 /* more compressed volatile bytes in ledger */
1756 ledger_credit(owner
->ledger
,
1757 task_ledgers
.purgeable_volatile_compressed
,
1758 ptoa(compressed_page_count
));
1760 panic("vm_purgeable_accounting(%p): "
1761 "unexpected old_state=%d\n",
1765 vm_object_lock_assert_exclusive(object
);
1769 vm_purgeable_nonvolatile_owner_update(
1773 if (owner
== NULL
|| delta
== 0) {
1778 assert(owner
->task_nonvolatile_objects
>= 0);
1779 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1780 assert(owner
->task_nonvolatile_objects
> 0);
1782 assert(owner
->task_nonvolatile_objects
> delta
);
1783 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1784 assert(owner
->task_nonvolatile_objects
>= 0);
1789 vm_purgeable_volatile_owner_update(
1793 if (owner
== NULL
|| delta
== 0) {
1798 assert(owner
->task_volatile_objects
>= 0);
1799 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1800 assert(owner
->task_volatile_objects
> 0);
1802 assert(owner
->task_volatile_objects
> delta
);
1803 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1804 assert(owner
->task_volatile_objects
>= 0);
1809 vm_purgeable_compressed_update(
1815 vm_object_lock_assert_exclusive(object
);
1818 !object
->internal
||
1819 object
->purgable
== VM_PURGABLE_DENY
||
1820 object
->vo_purgeable_owner
== NULL
) {
1821 /* not an owned purgeable VM object: nothing to update */
1825 owner
= object
->vo_purgeable_owner
;
1826 switch (object
->purgable
) {
1827 case VM_PURGABLE_DENY
:
1829 case VM_PURGABLE_NONVOLATILE
:
1831 ledger_credit(owner
->ledger
,
1832 task_ledgers
.purgeable_nonvolatile_compressed
,
1834 ledger_credit(owner
->ledger
,
1835 task_ledgers
.phys_footprint
,
1838 ledger_debit(owner
->ledger
,
1839 task_ledgers
.purgeable_nonvolatile_compressed
,
1841 ledger_debit(owner
->ledger
,
1842 task_ledgers
.phys_footprint
,
1846 case VM_PURGABLE_VOLATILE
:
1847 case VM_PURGABLE_EMPTY
:
1849 ledger_credit(owner
->ledger
,
1850 task_ledgers
.purgeable_volatile_compressed
,
1853 ledger_debit(owner
->ledger
,
1854 task_ledgers
.purgeable_volatile_compressed
,
1859 panic("vm_purgeable_compressed_update(): "
1860 "unexpected purgable %d for object %p\n",
1861 object
->purgable
, object
);