2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
27 #include <libkern/OSDebug.h>
29 #include <mach/mach_types.h>
31 #include <machine/limits.h>
33 #include <vm/vm_compressor_pager.h>
34 #include <vm/vm_kern.h> /* kmem_alloc */
35 #include <vm/vm_page.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/vm_protos.h>
38 #include <vm/vm_purgeable_internal.h>
40 #include <sys/kdebug.h>
42 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
51 token_idx_t token_q_max_cnt
= 0;
52 vm_size_t token_q_cur_size
= 0;
54 token_idx_t token_free_idx
= 0; /* head of free queue */
55 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
56 int32_t token_new_pagecount
= 0; /* count of pages that will
57 * be added onto token queue */
59 int available_for_purge
= 0; /* increase when ripe token
60 * added, decrease when ripe
62 * protected by page_queue_lock
65 static int token_q_allocating
= 0; /* flag for singlethreading
68 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
69 queue_head_t purgeable_nonvolatile_queue
;
70 int purgeable_nonvolatile_count
;
72 decl_lck_mtx_data(,vm_purgeable_queue_lock
)
74 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
76 static void vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
);
78 void vm_purgeable_nonvolatile_owner_update(task_t owner
,
80 void vm_purgeable_volatile_owner_update(task_t owner
,
86 vm_purgeable_token_check_queue(purgeable_q_t queue
)
88 int token_cnt
= 0, page_cnt
= 0;
89 token_idx_t token
= queue
->token_q_head
;
90 token_idx_t unripe
= 0;
91 int our_inactive_count
;
94 static unsigned lightweight_check
= 0;
97 * Due to performance impact, only perform this check
98 * every 100 times on DEVELOPMENT kernels.
100 if (lightweight_check
++ < 100) {
104 lightweight_check
= 0;
108 if (tokens
[token
].count
!= 0) {
109 assert(queue
->token_q_unripe
);
111 assert(token
== queue
->token_q_unripe
);
114 page_cnt
+= tokens
[token
].count
;
116 if (tokens
[token
].next
== 0)
117 assert(queue
->token_q_tail
== token
);
120 token
= tokens
[token
].next
;
124 assert(queue
->token_q_unripe
== unripe
);
125 assert(token_cnt
== queue
->debug_count_tokens
);
127 /* obsolete queue doesn't maintain token counts */
128 if(queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
130 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
131 assert(our_inactive_count
>= 0);
132 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
- vm_page_cleaned_count
);
138 * Add a token. Allocate token queue memory if necessary.
139 * Call with page queue locked.
142 vm_purgeable_token_add(purgeable_q_t queue
)
145 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
150 enum purgeable_q_type i
;
152 find_available_token
:
154 if (token_free_idx
) { /* unused tokens available */
155 token
= token_free_idx
;
156 token_free_idx
= tokens
[token_free_idx
].next
;
157 } else if (token_init_idx
< token_q_max_cnt
) { /* lazy token array init */
158 token
= token_init_idx
;
160 } else { /* allocate more memory */
161 /* Wait if another thread is inside the memory alloc section */
162 while(token_q_allocating
) {
163 wait_result_t res
= lck_mtx_sleep(&vm_page_queue_lock
,
165 (event_t
)&token_q_allocating
,
167 if(res
!= THREAD_AWAKENED
) return KERN_ABORTED
;
170 /* Check whether memory is still maxed out */
171 if(token_init_idx
< token_q_max_cnt
)
172 goto find_available_token
;
174 /* Still no memory. Allocate some. */
175 token_q_allocating
= 1;
177 /* Drop page queue lock so we can allocate */
178 vm_page_unlock_queues();
180 struct token
*new_loc
;
181 vm_size_t alloc_size
= token_q_cur_size
+ PAGE_SIZE
;
182 kern_return_t result
;
184 if (alloc_size
/ sizeof (struct token
) > TOKEN_COUNT_MAX
) {
185 result
= KERN_RESOURCE_SHORTAGE
;
187 if (token_q_cur_size
) {
188 result
= kmem_realloc(kernel_map
,
189 (vm_offset_t
) tokens
,
191 (vm_offset_t
*) &new_loc
,
192 alloc_size
, VM_KERN_MEMORY_OSFMK
);
194 result
= kmem_alloc(kernel_map
,
195 (vm_offset_t
*) &new_loc
,
196 alloc_size
, VM_KERN_MEMORY_OSFMK
);
200 vm_page_lock_queues();
203 /* Unblock waiting threads */
204 token_q_allocating
= 0;
205 thread_wakeup((event_t
)&token_q_allocating
);
209 /* If we get here, we allocated new memory. Update pointers and
210 * dealloc old range */
211 struct token
*old_tokens
=tokens
;
213 vm_size_t old_token_q_cur_size
=token_q_cur_size
;
214 token_q_cur_size
=alloc_size
;
215 token_q_max_cnt
= (token_idx_t
) (token_q_cur_size
/
216 sizeof(struct token
));
217 assert (token_init_idx
< token_q_max_cnt
); /* We must have a free token now */
219 if (old_token_q_cur_size
) { /* clean up old mapping */
220 vm_page_unlock_queues();
221 /* kmem_realloc leaves the old region mapped. Get rid of it. */
222 kmem_free(kernel_map
, (vm_offset_t
)old_tokens
, old_token_q_cur_size
);
223 vm_page_lock_queues();
226 /* Unblock waiting threads */
227 token_q_allocating
= 0;
228 thread_wakeup((event_t
)&token_q_allocating
);
230 goto find_available_token
;
236 * the new pagecount we got need to be applied to all queues except
239 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
240 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
242 assert(pages
<= TOKEN_COUNT_MAX
);
243 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
244 assert(purgeable_queues
[i
].new_pages
== pages
);
246 token_new_pagecount
= 0;
248 /* set token counter value */
249 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
250 tokens
[token
].count
= queue
->new_pages
;
252 tokens
[token
].count
= 0; /* all obsolete items are
253 * ripe immediately */
254 queue
->new_pages
= 0;
256 /* put token on token counter list */
257 tokens
[token
].next
= 0;
258 if (queue
->token_q_tail
== 0) {
259 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
260 queue
->token_q_head
= token
;
261 tokens
[token
].prev
= 0;
263 tokens
[queue
->token_q_tail
].next
= token
;
264 tokens
[token
].prev
= queue
->token_q_tail
;
266 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
267 * count == 0) in queue */
268 if (tokens
[token
].count
> 0)
269 queue
->token_q_unripe
= token
; /* first unripe token */
271 available_for_purge
++; /* added a ripe token?
272 * increase available count */
274 queue
->token_q_tail
= token
;
277 queue
->debug_count_tokens
++;
278 /* Check both queues, since we modified the new_pages count on each */
279 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
280 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
282 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
284 tokens
[token
].count
, /* num pages on token
286 queue
->debug_count_tokens
,
295 * Remove first token from queue and return its index. Add its count to the
296 * count of the next token.
297 * Call with page queue locked.
300 vm_purgeable_token_remove_first(purgeable_q_t queue
)
303 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
307 token
= queue
->token_q_head
;
312 assert(queue
->token_q_tail
);
313 if (queue
->token_q_head
== queue
->token_q_unripe
) {
314 /* no ripe tokens... must move unripe pointer */
315 queue
->token_q_unripe
= tokens
[token
].next
;
317 /* we're removing a ripe token. decrease count */
318 available_for_purge
--;
319 assert(available_for_purge
>= 0);
322 if (queue
->token_q_tail
== queue
->token_q_head
)
323 assert(tokens
[token
].next
== 0);
325 queue
->token_q_head
= tokens
[token
].next
;
326 if (queue
->token_q_head
) {
327 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
328 tokens
[queue
->token_q_head
].prev
= 0;
330 /* currently no other tokens in the queue */
332 * the page count must be added to the next newly
335 queue
->new_pages
+= tokens
[token
].count
;
336 /* if head is zero, tail is too */
337 queue
->token_q_tail
= 0;
341 queue
->debug_count_tokens
--;
342 vm_purgeable_token_check_queue(queue
);
344 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
346 tokens
[queue
->token_q_head
].count
, /* num pages on new
348 token_new_pagecount
, /* num pages waiting for
358 vm_purgeable_token_remove_last(purgeable_q_t queue
)
361 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
365 token
= queue
->token_q_tail
;
370 assert(queue
->token_q_head
);
372 if (queue
->token_q_tail
== queue
->token_q_head
)
373 assert(tokens
[token
].next
== 0);
375 if (queue
->token_q_unripe
== 0) {
376 /* we're removing a ripe token. decrease count */
377 available_for_purge
--;
378 assert(available_for_purge
>= 0);
379 } else if (queue
->token_q_unripe
== token
) {
380 /* we're removing the only unripe token */
381 queue
->token_q_unripe
= 0;
384 if (token
== queue
->token_q_head
) {
385 /* token is the last one in the queue */
386 queue
->token_q_head
= 0;
387 queue
->token_q_tail
= 0;
389 token_idx_t new_tail
;
391 new_tail
= tokens
[token
].prev
;
394 assert(tokens
[new_tail
].next
== token
);
396 queue
->token_q_tail
= new_tail
;
397 tokens
[new_tail
].next
= 0;
400 queue
->new_pages
+= tokens
[token
].count
;
403 queue
->debug_count_tokens
--;
404 vm_purgeable_token_check_queue(queue
);
406 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
408 tokens
[queue
->token_q_head
].count
, /* num pages on new
410 token_new_pagecount
, /* num pages waiting for
420 * Delete first token from queue. Return token to token queue.
421 * Call with page queue locked.
424 vm_purgeable_token_delete_first(purgeable_q_t queue
)
427 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
429 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
432 /* stick removed token on free queue */
433 tokens
[token
].next
= token_free_idx
;
434 tokens
[token
].prev
= 0;
435 token_free_idx
= token
;
440 vm_purgeable_token_delete_last(purgeable_q_t queue
)
443 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
445 token_idx_t token
= vm_purgeable_token_remove_last(queue
);
448 /* stick removed token on free queue */
449 tokens
[token
].next
= token_free_idx
;
450 tokens
[token
].prev
= 0;
451 token_free_idx
= token
;
456 /* Call with page queue locked. */
458 vm_purgeable_q_advance_all()
461 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
464 /* check queue counters - if they get really large, scale them back.
465 * They tend to get that large when there is no purgeable queue action */
467 if(token_new_pagecount
> (TOKEN_NEW_PAGECOUNT_MAX
>> 1)) /* a system idling years might get there */
469 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
470 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
472 assert(pages
<= TOKEN_COUNT_MAX
);
473 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
474 assert(purgeable_queues
[i
].new_pages
== pages
);
476 token_new_pagecount
= 0;
480 * Decrement token counters. A token counter can be zero, this means the
481 * object is ripe to be purged. It is not purged immediately, because that
482 * could cause several objects to be purged even if purging one would satisfy
483 * the memory needs. Instead, the pageout thread purges one after the other
484 * by calling vm_purgeable_object_purge_one and then rechecking the memory
487 * No need to advance obsolete queue - all items are ripe there,
490 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
491 purgeable_q_t queue
= &purgeable_queues
[i
];
492 uint32_t num_pages
= 1;
494 /* Iterate over tokens as long as there are unripe tokens. */
495 while (queue
->token_q_unripe
) {
496 if (tokens
[queue
->token_q_unripe
].count
&& num_pages
)
498 tokens
[queue
->token_q_unripe
].count
-= 1;
502 if (tokens
[queue
->token_q_unripe
].count
== 0) {
503 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
504 available_for_purge
++;
505 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_RIPEN
)),
507 tokens
[queue
->token_q_head
].count
, /* num pages on new
512 continue; /* One token ripened. Make sure to
516 break; /* Current token not ripe and no more pages.
521 * if there are no unripe tokens in the queue, decrement the
522 * new_pages counter instead new_pages can be negative, but must be
523 * canceled out by token_new_pagecount -- since inactive queue as a
524 * whole always contains a nonnegative number of pages
526 if (!queue
->token_q_unripe
) {
527 queue
->new_pages
-= num_pages
;
528 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
531 vm_purgeable_token_check_queue(queue
);
537 * grab any ripe object and purge it obsolete queue first. then, go through
538 * each volatile group. Select a queue with a ripe token.
539 * Start with first group (0)
540 * 1. Look at queue. Is there an object?
541 * Yes - purge it. Remove token.
542 * No - check other queue. Is there an object?
543 * No - increment group, then go to (1)
544 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
545 * token from other queue and migrate unripe token from this
546 * queue to other queue.
547 * Call with page queue locked.
550 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
553 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
555 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
556 /* return token to free list. advance token list. */
557 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
558 tokens
[queue
->token_q_head
].next
= token_free_idx
;
559 tokens
[queue
->token_q_head
].prev
= 0;
560 token_free_idx
= queue
->token_q_head
;
561 queue
->token_q_head
= new_head
;
562 tokens
[new_head
].prev
= 0;
564 queue
->token_q_tail
= 0;
567 queue
->debug_count_tokens
--;
568 vm_purgeable_token_check_queue(queue
);
571 available_for_purge
--;
572 assert(available_for_purge
>= 0);
576 * Delete a ripe token from the given queue. If there are no ripe tokens on
577 * that queue, delete a ripe token from queue2, and migrate an unripe token
578 * from queue to queue2
579 * Call with page queue locked.
582 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
585 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
587 assert(queue
->token_q_head
);
589 if (tokens
[queue
->token_q_head
].count
== 0) {
590 /* This queue has a ripe token. Remove. */
591 vm_purgeable_token_remove_ripe(queue
);
595 * queue2 must have a ripe token. Remove, and migrate one
596 * from queue to queue2.
598 vm_purgeable_token_remove_ripe(queue2
);
599 /* migrate unripe token */
603 /* remove token from queue1 */
604 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
605 * tokens, remember? */
606 token
= vm_purgeable_token_remove_first(queue
);
609 count
= tokens
[token
].count
;
611 /* migrate to queue2 */
612 /* go to migration target loc */
614 token_idx_t token_to_insert_before
= queue2
->token_q_head
, token_to_insert_after
;
616 while (token_to_insert_before
!= 0 && count
> tokens
[token_to_insert_before
].count
) {
617 count
-= tokens
[token_to_insert_before
].count
;
618 token_to_insert_before
= tokens
[token_to_insert_before
].next
;
621 /* token_to_insert_before is now set correctly */
623 /* should the inserted token become the first unripe token? */
624 if ((token_to_insert_before
== queue2
->token_q_unripe
) || (queue2
->token_q_unripe
== 0))
625 queue2
->token_q_unripe
= token
; /* if so, must update unripe pointer */
629 * if inserting at end, reduce new_pages by that value;
630 * otherwise, reduce counter of next token
633 tokens
[token
].count
= count
;
635 if (token_to_insert_before
!= 0) {
636 token_to_insert_after
= tokens
[token_to_insert_before
].prev
;
638 tokens
[token
].next
= token_to_insert_before
;
639 tokens
[token_to_insert_before
].prev
= token
;
641 assert(tokens
[token_to_insert_before
].count
>= count
);
642 tokens
[token_to_insert_before
].count
-= count
;
644 /* if we ran off the end of the list, the token to insert after is the tail */
645 token_to_insert_after
= queue2
->token_q_tail
;
647 tokens
[token
].next
= 0;
648 queue2
->token_q_tail
= token
;
650 assert(queue2
->new_pages
>= (int32_t) count
);
651 queue2
->new_pages
-= count
;
654 if (token_to_insert_after
!= 0) {
655 tokens
[token
].prev
= token_to_insert_after
;
656 tokens
[token_to_insert_after
].next
= token
;
658 /* is this case possible? */
659 tokens
[token
].prev
= 0;
660 queue2
->token_q_head
= token
;
664 queue2
->debug_count_tokens
++;
665 vm_purgeable_token_check_queue(queue2
);
670 /* Find an object that can be locked. Returns locked object. */
671 /* Call with purgeable queue locked. */
673 vm_purgeable_object_find_and_lock(
678 vm_object_t object
, best_object
;
679 int object_task_importance
;
680 int best_object_task_importance
;
681 int best_object_skipped
;
682 int num_objects_skipped
;
683 int try_lock_failed
= 0;
684 int try_lock_succeeded
= 0;
687 best_object
= VM_OBJECT_NULL
;
688 best_object_task_importance
= INT_MAX
;
690 lck_mtx_assert(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
692 * Usually we would pick the first element from a queue. However, we
693 * might not be able to get a lock on it, in which case we try the
694 * remaining elements in order.
697 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_START
),
700 VM_KERNEL_UNSLIDE_OR_PERM(queue
),
704 num_objects_skipped
= 0;
705 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
706 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
707 object
= (vm_object_t
) queue_next(&object
->objq
),
708 num_objects_skipped
++) {
711 * To prevent us looping for an excessively long time, choose
712 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
713 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
714 * we keep going until we find the first eligible object.
716 if ((num_objects_skipped
>= PURGEABLE_LOOP_MAX
) && (best_object
!= NULL
)) {
721 ! object
->purgeable_when_ripe
) {
722 /* we want an object that has a ripe token */
726 object_task_importance
= 0;
728 owner
= object
->vo_purgeable_owner
;
730 object_task_importance
= task_importance_estimate(owner
);
733 if (object_task_importance
< best_object_task_importance
) {
734 if (vm_object_lock_try(object
)) {
735 try_lock_succeeded
++;
736 if (best_object
!= VM_OBJECT_NULL
) {
737 /* forget about previous best object */
738 vm_object_unlock(best_object
);
740 best_object
= object
;
741 best_object_task_importance
= object_task_importance
;
742 best_object_skipped
= num_objects_skipped
;
743 if (best_object_task_importance
== 0) {
744 /* can't get any better: stop looking */
753 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_END
),
754 num_objects_skipped
, /* considered objects */
757 VM_KERNEL_UNSLIDE_OR_PERM(best_object
),
758 ((best_object
== NULL
) ? 0 : best_object
->resident_page_count
));
760 object
= best_object
;
762 if (object
== VM_OBJECT_NULL
) {
763 return VM_OBJECT_NULL
;
766 /* Locked. Great. We'll take it. Remove and return. */
767 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
769 vm_object_lock_assert_exclusive(object
);
771 queue_remove(&queue
->objq
[group
], object
,
773 object
->objq
.next
= NULL
;
774 object
->objq
.prev
= NULL
;
775 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
776 object
->purgeable_queue_group
= 0;
777 /* one less volatile object for this object's owner */
778 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, -1);
781 object
->vo_purgeable_volatilizer
= NULL
;
784 /* keep queue of non-volatile objects */
785 queue_enter(&purgeable_nonvolatile_queue
, object
,
787 assert(purgeable_nonvolatile_count
>= 0);
788 purgeable_nonvolatile_count
++;
789 assert(purgeable_nonvolatile_count
> 0);
790 /* one more nonvolatile object for this object's owner */
791 vm_purgeable_nonvolatile_owner_update(object
->vo_purgeable_owner
, +1);
794 queue
->debug_count_objects
--;
799 /* Can be called without holding locks */
801 vm_purgeable_object_purge_all(void)
803 enum purgeable_q_type i
;
806 unsigned int purged_count
;
813 lck_mtx_lock(&vm_purgeable_queue_lock
);
814 /* Cycle through all queues */
815 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
818 queue
= &purgeable_queues
[i
];
821 * Look through all groups, starting from the lowest. If
822 * we find an object in that group, try to lock it (this can
823 * fail). If locking is successful, we can drop the queue
824 * lock, remove a token and then purge the object.
826 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
827 while (!queue_empty(&queue
->objq
[group
])) {
828 object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
);
829 if (object
== VM_OBJECT_NULL
) {
830 lck_mtx_unlock(&vm_purgeable_queue_lock
);
831 mutex_pause(collisions
++);
835 lck_mtx_unlock(&vm_purgeable_queue_lock
);
837 /* Lock the page queue here so we don't hold it
838 * over the whole, legthy operation */
839 if (object
->purgeable_when_ripe
) {
840 vm_page_lock_queues();
841 vm_purgeable_token_remove_first(queue
);
842 vm_page_unlock_queues();
845 (void) vm_object_purge(object
, 0);
846 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
847 /* no change in purgeable accounting */
849 vm_object_unlock(object
);
853 assert(queue
->debug_count_objects
>= 0);
856 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ALL
)),
857 purged_count
, /* # of purged objects */
862 lck_mtx_unlock(&vm_purgeable_queue_lock
);
867 vm_purgeable_object_purge_one_unlocked(
868 int force_purge_below_group
)
872 vm_page_lock_queues();
873 retval
= vm_purgeable_object_purge_one(force_purge_below_group
, 0);
874 vm_page_unlock_queues();
880 vm_purgeable_object_purge_one(
881 int force_purge_below_group
,
884 enum purgeable_q_type i
;
886 vm_object_t object
= 0;
887 purgeable_q_t queue
, queue2
;
888 boolean_t forced_purge
;
890 /* Need the page queue lock since we'll be changing the token queue. */
892 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
894 lck_mtx_lock(&vm_purgeable_queue_lock
);
896 /* Cycle through all queues */
897 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
898 queue
= &purgeable_queues
[i
];
900 if (force_purge_below_group
== 0) {
902 * Are there any ripe tokens on this queue? If yes,
903 * we'll find an object to purge there
905 if (!queue
->token_q_head
) {
906 /* no token: look at next purgeable queue */
910 if (tokens
[queue
->token_q_head
].count
!= 0) {
911 /* no ripe token: next queue */
917 * Now look through all groups, starting from the lowest. If
918 * we find an object in that group, try to lock it (this can
919 * fail). If locking is successful, we can drop the queue
920 * lock, remove a token and then purge the object.
922 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
923 if (!queue
->token_q_head
||
924 tokens
[queue
->token_q_head
].count
!= 0) {
925 /* no tokens or no ripe tokens */
927 if (group
>= force_purge_below_group
) {
928 /* no more groups to force-purge */
933 * Try and purge an object in this group
934 * even though no tokens are ripe.
936 if (!queue_empty(&queue
->objq
[group
]) &&
937 (object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
))) {
938 lck_mtx_unlock(&vm_purgeable_queue_lock
);
939 if (object
->purgeable_when_ripe
) {
940 vm_purgeable_token_delete_first(queue
);
946 /* nothing to purge in this group: next group */
949 if (!queue_empty(&queue
->objq
[group
]) &&
950 (object
= vm_purgeable_object_find_and_lock(queue
, group
, TRUE
))) {
951 lck_mtx_unlock(&vm_purgeable_queue_lock
);
952 if (object
->purgeable_when_ripe
) {
953 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
955 forced_purge
= FALSE
;
958 if (i
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
959 /* This is the token migration case, and it works between
960 * FIFO and LIFO only */
961 queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
?
962 PURGEABLE_Q_TYPE_FIFO
:
963 PURGEABLE_Q_TYPE_LIFO
];
965 if (!queue_empty(&queue2
->objq
[group
]) &&
966 (object
= vm_purgeable_object_find_and_lock(queue2
, group
, TRUE
))) {
967 lck_mtx_unlock(&vm_purgeable_queue_lock
);
968 if (object
->purgeable_when_ripe
) {
969 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
971 forced_purge
= FALSE
;
975 assert(queue
->debug_count_objects
>= 0);
979 * because we have to do a try_lock on the objects which could fail,
980 * we could end up with no object to purge at this time, even though
981 * we have objects in a purgeable state
983 lck_mtx_unlock(&vm_purgeable_queue_lock
);
989 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
990 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
991 (void) vm_object_purge(object
, flags
);
992 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
993 /* no change in purgeable accounting */
994 vm_object_unlock(object
);
995 vm_page_lock_queues();
997 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)),
998 object
, /* purged object */
1000 available_for_purge
,
1007 /* Called with object lock held */
1009 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
1011 vm_object_lock_assert_exclusive(object
);
1012 lck_mtx_lock(&vm_purgeable_queue_lock
);
1014 assert(object
->objq
.next
!= NULL
);
1015 assert(object
->objq
.prev
!= NULL
);
1016 queue_remove(&purgeable_nonvolatile_queue
, object
,
1018 object
->objq
.next
= NULL
;
1019 object
->objq
.prev
= NULL
;
1020 assert(purgeable_nonvolatile_count
> 0);
1021 purgeable_nonvolatile_count
--;
1022 assert(purgeable_nonvolatile_count
>= 0);
1023 /* one less nonvolatile object for this object's owner */
1024 vm_purgeable_nonvolatile_owner_update(object
->vo_purgeable_owner
, -1);
1026 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
)
1029 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) /* fifo and obsolete are
1031 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
1033 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
1034 /* one more volatile object for this object's owner */
1035 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, +1);
1037 object
->purgeable_queue_type
= queue
->type
;
1038 object
->purgeable_queue_group
= group
;
1041 assert(object
->vo_purgeable_volatilizer
== NULL
);
1042 object
->vo_purgeable_volatilizer
= current_task();
1043 OSBacktrace(&object
->purgeable_volatilizer_bt
[0], 16);
1047 queue
->debug_count_objects
++;
1048 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADD
)),
1050 tokens
[queue
->token_q_head
].count
,
1056 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1059 /* Look for object. If found, remove from purgeable queue. */
1060 /* Called with object lock held */
1062 vm_purgeable_object_remove(vm_object_t object
)
1065 enum purgeable_q_type type
;
1066 purgeable_q_t queue
;
1068 vm_object_lock_assert_exclusive(object
);
1070 type
= object
->purgeable_queue_type
;
1071 group
= object
->purgeable_queue_group
;
1073 if (type
== PURGEABLE_Q_TYPE_MAX
) {
1074 if (object
->objq
.prev
|| object
->objq
.next
)
1075 panic("unmarked object on purgeable q");
1078 } else if (!(object
->objq
.prev
&& object
->objq
.next
))
1079 panic("marked object not on purgeable q");
1081 lck_mtx_lock(&vm_purgeable_queue_lock
);
1083 queue
= &purgeable_queues
[type
];
1085 queue_remove(&queue
->objq
[group
], object
, vm_object_t
, objq
);
1086 object
->objq
.next
= NULL
;
1087 object
->objq
.prev
= NULL
;
1088 /* one less volatile object for this object's owner */
1089 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, -1);
1091 object
->vo_purgeable_volatilizer
= NULL
;
1093 /* keep queue of non-volatile objects */
1094 if (object
->alive
&& !object
->terminating
) {
1096 queue_enter(&purgeable_nonvolatile_queue
, object
,
1098 assert(purgeable_nonvolatile_count
>= 0);
1099 purgeable_nonvolatile_count
++;
1100 assert(purgeable_nonvolatile_count
> 0);
1101 /* one more nonvolatile object for this object's owner */
1102 owner
= object
->vo_purgeable_owner
;
1103 vm_purgeable_nonvolatile_owner_update(owner
, +1);
1107 queue
->debug_count_objects
--;
1108 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVE
)),
1110 tokens
[queue
->token_q_head
].count
,
1116 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1118 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1119 object
->purgeable_queue_group
= 0;
1121 vm_object_lock_assert_exclusive(object
);
1123 return &purgeable_queues
[type
];
1127 vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
)
1129 lck_mtx_assert(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1131 stat
->count
= stat
->size
= 0;
1133 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1134 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1135 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1136 if (!target_task
|| object
->vo_purgeable_owner
== target_task
) {
1138 stat
->size
+= (object
->resident_page_count
* PAGE_SIZE
);
1145 vm_purgeable_stats(vm_purgeable_info_t info
, task_t target_task
)
1147 purgeable_q_t queue
;
1150 lck_mtx_lock(&vm_purgeable_queue_lock
);
1152 /* Populate fifo_data */
1153 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1154 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1155 vm_purgeable_stats_helper(&(info
->fifo_data
[group
]), queue
, group
, target_task
);
1157 /* Populate lifo_data */
1158 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1159 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1160 vm_purgeable_stats_helper(&(info
->lifo_data
[group
]), queue
, group
, target_task
);
1162 /* Populate obsolete data */
1163 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1164 vm_purgeable_stats_helper(&(info
->obsolete_data
), queue
, 0, target_task
);
1166 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1170 #if DEVELOPMENT || DEBUG
1172 vm_purgeable_account_volatile_queue(
1173 purgeable_q_t queue
,
1176 pvm_account_info_t acnt_info
)
1179 uint64_t compressed_count
;
1181 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1182 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1183 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1184 if (object
->vo_purgeable_owner
== task
) {
1185 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1186 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1187 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1188 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1195 * Walks the purgeable object queues and calculates the usage
1196 * associated with the objects for the given task.
1199 vm_purgeable_account(
1201 pvm_account_info_t acnt_info
)
1203 queue_head_t
*nonvolatile_q
;
1207 uint64_t compressed_count
;
1208 purgeable_q_t volatile_q
;
1211 if ((task
== NULL
) || (acnt_info
== NULL
)) {
1212 return KERN_INVALID_ARGUMENT
;
1215 acnt_info
->pvm_volatile_count
= 0;
1216 acnt_info
->pvm_volatile_compressed_count
= 0;
1217 acnt_info
->pvm_nonvolatile_count
= 0;
1218 acnt_info
->pvm_nonvolatile_compressed_count
= 0;
1220 lck_mtx_lock(&vm_purgeable_queue_lock
);
1222 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1223 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1224 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1225 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1226 if (object
->vo_purgeable_owner
== task
) {
1227 state
= object
->purgable
;
1228 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1229 if (state
== VM_PURGABLE_EMPTY
) {
1230 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1231 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1233 acnt_info
->pvm_nonvolatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1234 acnt_info
->pvm_nonvolatile_compressed_count
+= compressed_count
;
1236 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1240 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1241 vm_purgeable_account_volatile_queue(volatile_q
, 0, task
, acnt_info
);
1243 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1244 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1245 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1248 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1249 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1250 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1252 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1254 acnt_info
->pvm_volatile_count
= (acnt_info
->pvm_volatile_count
* PAGE_SIZE
);
1255 acnt_info
->pvm_volatile_compressed_count
= (acnt_info
->pvm_volatile_compressed_count
* PAGE_SIZE
);
1256 acnt_info
->pvm_nonvolatile_count
= (acnt_info
->pvm_nonvolatile_count
* PAGE_SIZE
);
1257 acnt_info
->pvm_nonvolatile_compressed_count
= (acnt_info
->pvm_nonvolatile_compressed_count
* PAGE_SIZE
);
1259 return KERN_SUCCESS
;
1261 #endif /* DEVELOPMENT || DEBUG */
1264 vm_purgeable_volatile_queue_disown(
1265 purgeable_q_t queue
,
1275 lck_mtx_assert(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1277 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1278 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1279 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1282 * Sanity check: let's scan the entire queues to
1283 * make sure we don't leave any purgeable objects
1284 * pointing back at a dead task. If the counters
1285 * are off, we would fail to assert that they go
1286 * back to 0 after disowning is done.
1288 #else /* MACH_ASSERT */
1289 if (task
->task_volatile_objects
== 0) {
1290 /* no more volatile objects owned by "task" */
1293 #endif /* MACH_ASSERT */
1294 if (object
->vo_purgeable_owner
== task
) {
1295 if (! vm_object_lock_try(object
)) {
1296 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1297 mutex_pause(collisions
++);
1298 lck_mtx_lock(&vm_purgeable_queue_lock
);
1301 assert(object
->purgable
== VM_PURGABLE_VOLATILE
);
1302 if (object
->vo_purgeable_owner
== task
) {
1303 vm_purgeable_accounting(object
,
1306 assert(object
->vo_purgeable_owner
== NULL
);
1308 vm_object_unlock(object
);
1314 vm_purgeable_disown(
1317 purgeable_q_t volatile_q
;
1319 queue_head_t
*nonvolatile_q
;
1327 task
->task_purgeable_disowning
= TRUE
;
1330 * Scan the purgeable objects queues for objects owned by "task".
1331 * This has to be done "atomically" under the "vm_purgeable_queue"
1332 * lock, to ensure that no new purgeable object get associated
1333 * with this task or moved between queues while we're scanning.
1337 * Scan non-volatile queue for objects owned by "task".
1343 if (task
->task_purgeable_disowned
) {
1344 /* task has already disowned its purgeable memory */
1345 assert(task
->task_volatile_objects
== 0);
1346 assert(task
->task_nonvolatile_objects
== 0);
1349 lck_mtx_lock(&vm_purgeable_queue_lock
);
1351 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1352 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1353 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1354 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1357 * Sanity check: let's scan the entire queues to
1358 * make sure we don't leave any purgeable objects
1359 * pointing back at a dead task. If the counters
1360 * are off, we would fail to assert that they go
1361 * back to 0 after disowning is done.
1363 #else /* MACH_ASSERT */
1364 if (task
->task_nonvolatile_objects
== 0) {
1365 /* no more non-volatile objects owned by "task" */
1368 #endif /* MACH_ASSERT */
1370 assert(object
->vo_purgeable_volatilizer
== NULL
);
1372 if (object
->vo_purgeable_owner
== task
) {
1373 if (!vm_object_lock_try(object
)) {
1374 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1375 mutex_pause(collisions
++);
1378 if (object
->vo_purgeable_owner
== task
) {
1379 vm_purgeable_accounting(object
,
1382 assert(object
->vo_purgeable_owner
== NULL
);
1384 vm_object_unlock(object
);
1388 lck_mtx_yield(&vm_purgeable_queue_lock
);
1391 * Scan volatile queues for objects owned by "task".
1394 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1395 vm_purgeable_volatile_queue_disown(volatile_q
, 0, task
);
1396 lck_mtx_yield(&vm_purgeable_queue_lock
);
1398 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1399 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1400 vm_purgeable_volatile_queue_disown(volatile_q
, group
, task
);
1401 lck_mtx_yield(&vm_purgeable_queue_lock
);
1404 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1405 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1406 vm_purgeable_volatile_queue_disown(volatile_q
, group
, task
);
1407 lck_mtx_yield(&vm_purgeable_queue_lock
);
1410 if (task
->task_volatile_objects
!= 0 ||
1411 task
->task_nonvolatile_objects
!= 0) {
1412 /* some purgeable objects sneaked into a queue: find them */
1413 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1414 mutex_pause(collisions
++);
1418 /* there shouldn't be any purgeable objects owned by task now */
1419 assert(task
->task_volatile_objects
== 0);
1420 assert(task
->task_nonvolatile_objects
== 0);
1421 assert(task
->task_purgeable_disowning
);
1423 /* and we don't need to try and disown again */
1424 task
->task_purgeable_disowned
= TRUE
;
1426 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1432 vm_purgeable_queue_purge_task_owned(
1433 purgeable_q_t queue
,
1440 int num_objects_purged
;
1442 num_objects_purged
= 0;
1446 lck_mtx_lock(&vm_purgeable_queue_lock
);
1449 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1450 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1451 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1453 if (object
->vo_purgeable_owner
!= task
&&
1454 object
->vo_purgeable_owner
!= NULL
) {
1458 /* found an object: try and grab it */
1459 if (!vm_object_lock_try(object
)) {
1460 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1461 mutex_pause(collisions
++);
1468 /* remove object from purgeable queue */
1469 queue_remove(&queue
->objq
[group
], object
,
1471 object
->objq
.next
= NULL
;
1472 object
->objq
.prev
= NULL
;
1473 /* one less volatile object for this object's owner */
1474 assert(object
->vo_purgeable_owner
== task
);
1475 vm_purgeable_volatile_owner_update(task
, -1);
1478 object
->vo_purgeable_volatilizer
= NULL
;
1480 queue_enter(&purgeable_nonvolatile_queue
, object
,
1482 assert(purgeable_nonvolatile_count
>= 0);
1483 purgeable_nonvolatile_count
++;
1484 assert(purgeable_nonvolatile_count
> 0);
1485 /* one more nonvolatile object for this object's owner */
1486 assert(object
->vo_purgeable_owner
== task
);
1487 vm_purgeable_nonvolatile_owner_update(task
, +1);
1489 /* unlock purgeable queues */
1490 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1492 if (object
->purgeable_when_ripe
) {
1493 /* remove a token */
1494 vm_page_lock_queues();
1495 vm_purgeable_token_remove_first(queue
);
1496 vm_page_unlock_queues();
1499 /* purge the object */
1500 (void) vm_object_purge(object
, 0);
1501 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1502 /* no change for purgeable accounting */
1503 vm_object_unlock(object
);
1504 num_objects_purged
++;
1506 /* we unlocked the purgeable queues, so start over */
1510 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1512 return num_objects_purged
;
1516 vm_purgeable_purge_task_owned(
1519 purgeable_q_t queue
;
1521 int num_objects_purged
;
1523 num_objects_purged
= 0;
1525 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1526 num_objects_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1530 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1531 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1532 num_objects_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1536 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1537 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1538 num_objects_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1542 return num_objects_purged
;
1547 vm_purgeable_nonvolatile_enqueue(
1553 vm_object_lock_assert_exclusive(object
);
1555 assert(object
->purgable
== VM_PURGABLE_NONVOLATILE
);
1556 assert(object
->vo_purgeable_owner
== NULL
);
1557 assert(owner
!= NULL
);
1559 lck_mtx_lock(&vm_purgeable_queue_lock
);
1561 if (owner
->task_purgeable_disowning
) {
1562 /* task is exiting and no longer tracking purgeable objects */
1566 object
->vo_purgeable_owner
= owner
;
1568 object
->vo_purgeable_volatilizer
= NULL
;
1572 OSBacktrace(&object
->purgeable_owner_bt
[0], 16);
1575 page_count
= object
->resident_page_count
;
1576 assert(page_count
== 0); /* should be a freshly-created object */
1577 if (owner
!= NULL
&& page_count
!= 0) {
1578 ledger_credit(owner
->ledger
,
1579 task_ledgers
.purgeable_nonvolatile
,
1581 ledger_credit(owner
->ledger
,
1582 task_ledgers
.phys_footprint
,
1586 assert(object
->objq
.next
== NULL
);
1587 assert(object
->objq
.prev
== NULL
);
1589 queue_enter(&purgeable_nonvolatile_queue
, object
,
1591 assert(purgeable_nonvolatile_count
>= 0);
1592 purgeable_nonvolatile_count
++;
1593 assert(purgeable_nonvolatile_count
> 0);
1594 /* one more nonvolatile object for this object's owner */
1595 assert(object
->vo_purgeable_owner
== owner
);
1596 vm_purgeable_nonvolatile_owner_update(owner
, +1);
1597 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1599 vm_object_lock_assert_exclusive(object
);
1603 vm_purgeable_nonvolatile_dequeue(
1608 vm_object_lock_assert_exclusive(object
);
1610 owner
= object
->vo_purgeable_owner
;
1612 assert(object
->vo_purgeable_volatilizer
== NULL
);
1614 if (owner
!= NULL
) {
1616 * Update the owner's ledger to stop accounting
1619 vm_purgeable_accounting(object
,
1624 lck_mtx_lock(&vm_purgeable_queue_lock
);
1625 assert(object
->objq
.next
!= NULL
);
1626 assert(object
->objq
.prev
!= NULL
);
1627 queue_remove(&purgeable_nonvolatile_queue
, object
,
1629 object
->objq
.next
= NULL
;
1630 object
->objq
.prev
= NULL
;
1631 assert(purgeable_nonvolatile_count
> 0);
1632 purgeable_nonvolatile_count
--;
1633 assert(purgeable_nonvolatile_count
>= 0);
1634 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1636 vm_object_lock_assert_exclusive(object
);
1640 vm_purgeable_accounting(
1642 vm_purgable_t old_state
,
1646 int resident_page_count
;
1647 int wired_page_count
;
1648 int compressed_page_count
;
1649 boolean_t disown_on_the_fly
;
1651 vm_object_lock_assert_exclusive(object
);
1653 owner
= object
->vo_purgeable_owner
;
1657 if (!disown
&& owner
->task_purgeable_disowning
) {
1658 /* task is disowning its purgeable objects: help it */
1659 disown_on_the_fly
= TRUE
;
1661 disown_on_the_fly
= FALSE
;
1664 resident_page_count
= object
->resident_page_count
;
1665 wired_page_count
= object
->wired_page_count
;
1666 if ((COMPRESSED_PAGER_IS_ACTIVE
||
1667 DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) &&
1668 object
->pager
!= NULL
) {
1669 compressed_page_count
=
1670 vm_compressor_pager_get_count(object
->pager
);
1672 compressed_page_count
= 0;
1675 if (old_state
== VM_PURGABLE_VOLATILE
||
1676 old_state
== VM_PURGABLE_EMPTY
) {
1677 /* less volatile bytes in ledger */
1678 ledger_debit(owner
->ledger
,
1679 task_ledgers
.purgeable_volatile
,
1680 ptoa(resident_page_count
- wired_page_count
));
1681 /* less compressed volatile bytes in ledger */
1682 ledger_debit(owner
->ledger
,
1683 task_ledgers
.purgeable_volatile_compressed
,
1684 ptoa(compressed_page_count
));
1686 if (disown
|| !object
->alive
|| object
->terminating
) {
1687 /* wired pages were accounted as "non-volatile"... */
1688 ledger_debit(owner
->ledger
,
1689 task_ledgers
.purgeable_nonvolatile
,
1690 ptoa(wired_page_count
));
1691 /* ... and in phys_footprint */
1692 ledger_debit(owner
->ledger
,
1693 task_ledgers
.phys_footprint
,
1694 ptoa(wired_page_count
));
1696 if (!disown_on_the_fly
&&
1697 (object
->purgeable_queue_type
==
1698 PURGEABLE_Q_TYPE_MAX
)) {
1700 * Not on a volatile queue: must be empty
1703 vm_purgeable_nonvolatile_owner_update(owner
,-1);
1705 /* on a volatile queue */
1706 vm_purgeable_volatile_owner_update(owner
, -1);
1708 /* no more accounting for this dead object */
1709 object
->vo_purgeable_owner
= NULL
;
1711 object
->vo_purgeable_volatilizer
= NULL
;
1716 /* more non-volatile bytes in ledger */
1717 ledger_credit(owner
->ledger
,
1718 task_ledgers
.purgeable_nonvolatile
,
1719 ptoa(resident_page_count
- wired_page_count
));
1720 /* more compressed non-volatile bytes in ledger */
1721 ledger_credit(owner
->ledger
,
1722 task_ledgers
.purgeable_nonvolatile_compressed
,
1723 ptoa(compressed_page_count
));
1724 /* more footprint */
1725 ledger_credit(owner
->ledger
,
1726 task_ledgers
.phys_footprint
,
1727 ptoa(resident_page_count
1728 + compressed_page_count
1729 - wired_page_count
));
1731 } else if (old_state
== VM_PURGABLE_NONVOLATILE
) {
1733 /* less non-volatile bytes in ledger */
1734 ledger_debit(owner
->ledger
,
1735 task_ledgers
.purgeable_nonvolatile
,
1736 ptoa(resident_page_count
- wired_page_count
));
1737 /* less compressed non-volatile bytes in ledger */
1738 ledger_debit(owner
->ledger
,
1739 task_ledgers
.purgeable_nonvolatile_compressed
,
1740 ptoa(compressed_page_count
));
1741 /* less footprint */
1742 ledger_debit(owner
->ledger
,
1743 task_ledgers
.phys_footprint
,
1744 ptoa(resident_page_count
1745 + compressed_page_count
1746 - wired_page_count
));
1748 if (disown
|| !object
->alive
|| object
->terminating
) {
1749 /* wired pages still accounted as "non-volatile" */
1750 ledger_debit(owner
->ledger
,
1751 task_ledgers
.purgeable_nonvolatile
,
1752 ptoa(wired_page_count
));
1753 ledger_debit(owner
->ledger
,
1754 task_ledgers
.phys_footprint
,
1755 ptoa(wired_page_count
));
1757 /* one less "non-volatile" object for the owner */
1758 if (!disown_on_the_fly
) {
1759 assert(object
->purgeable_queue_type
==
1760 PURGEABLE_Q_TYPE_MAX
);
1762 vm_purgeable_nonvolatile_owner_update(owner
, -1);
1763 /* no more accounting for this dead object */
1764 object
->vo_purgeable_owner
= NULL
;
1766 object
->vo_purgeable_volatilizer
= NULL
;
1770 /* more volatile bytes in ledger */
1771 ledger_credit(owner
->ledger
,
1772 task_ledgers
.purgeable_volatile
,
1773 ptoa(resident_page_count
- wired_page_count
));
1774 /* more compressed volatile bytes in ledger */
1775 ledger_credit(owner
->ledger
,
1776 task_ledgers
.purgeable_volatile_compressed
,
1777 ptoa(compressed_page_count
));
1779 panic("vm_purgeable_accounting(%p): "
1780 "unexpected old_state=%d\n",
1784 vm_object_lock_assert_exclusive(object
);
1788 vm_purgeable_nonvolatile_owner_update(
1792 if (owner
== NULL
|| delta
== 0) {
1797 assert(owner
->task_nonvolatile_objects
>= 0);
1798 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1799 assert(owner
->task_nonvolatile_objects
> 0);
1801 assert(owner
->task_nonvolatile_objects
> delta
);
1802 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1803 assert(owner
->task_nonvolatile_objects
>= 0);
1808 vm_purgeable_volatile_owner_update(
1812 if (owner
== NULL
|| delta
== 0) {
1817 assert(owner
->task_volatile_objects
>= 0);
1818 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1819 assert(owner
->task_volatile_objects
> 0);
1821 assert(owner
->task_volatile_objects
> delta
);
1822 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1823 assert(owner
->task_volatile_objects
>= 0);
1828 vm_purgeable_compressed_update(
1834 vm_object_lock_assert_exclusive(object
);
1837 !object
->internal
||
1838 object
->purgable
== VM_PURGABLE_DENY
||
1839 object
->vo_purgeable_owner
== NULL
) {
1840 /* not an owned purgeable VM object: nothing to update */
1844 owner
= object
->vo_purgeable_owner
;
1845 switch (object
->purgable
) {
1846 case VM_PURGABLE_DENY
:
1848 case VM_PURGABLE_NONVOLATILE
:
1850 ledger_credit(owner
->ledger
,
1851 task_ledgers
.purgeable_nonvolatile_compressed
,
1853 ledger_credit(owner
->ledger
,
1854 task_ledgers
.phys_footprint
,
1857 ledger_debit(owner
->ledger
,
1858 task_ledgers
.purgeable_nonvolatile_compressed
,
1860 ledger_debit(owner
->ledger
,
1861 task_ledgers
.phys_footprint
,
1865 case VM_PURGABLE_VOLATILE
:
1866 case VM_PURGABLE_EMPTY
:
1868 ledger_credit(owner
->ledger
,
1869 task_ledgers
.purgeable_volatile_compressed
,
1872 ledger_debit(owner
->ledger
,
1873 task_ledgers
.purgeable_volatile_compressed
,
1878 panic("vm_purgeable_compressed_update(): "
1879 "unexpected purgable %d for object %p\n",
1880 object
->purgable
, object
);