2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
26 #include <kern/policy_internal.h>
28 #include <libkern/OSDebug.h>
30 #include <mach/mach_types.h>
32 #include <machine/limits.h>
34 #include <vm/vm_compressor_pager.h>
35 #include <vm/vm_kern.h> /* kmem_alloc */
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
38 #include <vm/vm_protos.h>
39 #include <vm/vm_purgeable_internal.h>
41 #include <sys/kdebug.h>
44 * LOCK ORDERING for task-owned purgeable objects
46 * Whenever we need to hold multiple locks while adding to, removing from,
47 * or scanning a task's task_objq list of VM objects it owns, locks should
48 * be taken in this order:
50 * VM object ==> vm_purgeable_queue_lock ==> owner_task->task_objq_lock
52 * If one needs to acquire the VM object lock after any of the other 2 locks,
53 * one needs to use vm_object_lock_try() and, if that fails, release the
54 * other locks and retake them all in the correct order.
57 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
66 token_idx_t token_q_max_cnt
= 0;
67 vm_size_t token_q_cur_size
= 0;
69 token_idx_t token_free_idx
= 0; /* head of free queue */
70 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
71 int32_t token_new_pagecount
= 0; /* count of pages that will
72 * be added onto token queue */
74 int available_for_purge
= 0; /* increase when ripe token
75 * added, decrease when ripe
77 * protected by page_queue_lock
80 static int token_q_allocating
= 0; /* flag for singlethreading
83 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
84 queue_head_t purgeable_nonvolatile_queue
;
85 int purgeable_nonvolatile_count
;
87 decl_lck_mtx_data(, vm_purgeable_queue_lock
)
89 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
91 static void vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
);
96 vm_purgeable_token_check_queue(purgeable_q_t queue
)
98 int token_cnt
= 0, page_cnt
= 0;
99 token_idx_t token
= queue
->token_q_head
;
100 token_idx_t unripe
= 0;
101 int our_inactive_count
;
104 static unsigned lightweight_check
= 0;
107 * Due to performance impact, only perform this check
108 * every 100 times on DEVELOPMENT kernels.
110 if (lightweight_check
++ < 100) {
114 lightweight_check
= 0;
118 if (tokens
[token
].count
!= 0) {
119 assert(queue
->token_q_unripe
);
121 assert(token
== queue
->token_q_unripe
);
124 page_cnt
+= tokens
[token
].count
;
126 if (tokens
[token
].next
== 0) {
127 assert(queue
->token_q_tail
== token
);
131 token
= tokens
[token
].next
;
135 assert(queue
->token_q_unripe
== unripe
);
137 assert(token_cnt
== queue
->debug_count_tokens
);
139 /* obsolete queue doesn't maintain token counts */
140 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
141 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
142 assert(our_inactive_count
>= 0);
143 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
- vm_page_cleaned_count
);
149 * Add a token. Allocate token queue memory if necessary.
150 * Call with page queue locked.
153 vm_purgeable_token_add(purgeable_q_t queue
)
155 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
159 enum purgeable_q_type i
;
161 find_available_token
:
163 if (token_free_idx
) { /* unused tokens available */
164 token
= token_free_idx
;
165 token_free_idx
= tokens
[token_free_idx
].next
;
166 } else if (token_init_idx
< token_q_max_cnt
) { /* lazy token array init */
167 token
= token_init_idx
;
169 } else { /* allocate more memory */
170 /* Wait if another thread is inside the memory alloc section */
171 while (token_q_allocating
) {
172 wait_result_t res
= lck_mtx_sleep(&vm_page_queue_lock
,
174 (event_t
)&token_q_allocating
,
176 if (res
!= THREAD_AWAKENED
) {
182 /* Check whether memory is still maxed out */
183 if (token_init_idx
< token_q_max_cnt
) {
184 goto find_available_token
;
187 /* Still no memory. Allocate some. */
188 token_q_allocating
= 1;
190 /* Drop page queue lock so we can allocate */
191 vm_page_unlock_queues();
193 struct token
*new_loc
;
194 vm_size_t alloc_size
= token_q_cur_size
+ PAGE_SIZE
;
195 kern_return_t result
;
197 if (alloc_size
/ sizeof(struct token
) > TOKEN_COUNT_MAX
) {
198 result
= KERN_RESOURCE_SHORTAGE
;
200 if (token_q_cur_size
) {
201 result
= kmem_realloc(kernel_map
,
202 (vm_offset_t
) tokens
,
204 (vm_offset_t
*) &new_loc
,
205 alloc_size
, VM_KERN_MEMORY_OSFMK
);
207 result
= kmem_alloc(kernel_map
,
208 (vm_offset_t
*) &new_loc
,
209 alloc_size
, VM_KERN_MEMORY_OSFMK
);
213 vm_page_lock_queues();
216 /* Unblock waiting threads */
217 token_q_allocating
= 0;
218 thread_wakeup((event_t
)&token_q_allocating
);
222 /* If we get here, we allocated new memory. Update pointers and
223 * dealloc old range */
224 struct token
*old_tokens
= tokens
;
226 vm_size_t old_token_q_cur_size
= token_q_cur_size
;
227 token_q_cur_size
= alloc_size
;
228 token_q_max_cnt
= (token_idx_t
) (token_q_cur_size
/
229 sizeof(struct token
));
230 assert(token_init_idx
< token_q_max_cnt
); /* We must have a free token now */
232 if (old_token_q_cur_size
) { /* clean up old mapping */
233 vm_page_unlock_queues();
234 /* kmem_realloc leaves the old region mapped. Get rid of it. */
235 kmem_free(kernel_map
, (vm_offset_t
)old_tokens
, old_token_q_cur_size
);
236 vm_page_lock_queues();
239 /* Unblock waiting threads */
240 token_q_allocating
= 0;
241 thread_wakeup((event_t
)&token_q_allocating
);
243 goto find_available_token
;
249 * the new pagecount we got need to be applied to all queues except
252 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
253 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
255 assert(pages
<= TOKEN_COUNT_MAX
);
256 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
257 assert(purgeable_queues
[i
].new_pages
== pages
);
259 token_new_pagecount
= 0;
261 /* set token counter value */
262 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
263 tokens
[token
].count
= queue
->new_pages
;
265 tokens
[token
].count
= 0; /* all obsolete items are
266 * ripe immediately */
268 queue
->new_pages
= 0;
270 /* put token on token counter list */
271 tokens
[token
].next
= 0;
272 if (queue
->token_q_tail
== 0) {
273 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
274 queue
->token_q_head
= token
;
275 tokens
[token
].prev
= 0;
277 tokens
[queue
->token_q_tail
].next
= token
;
278 tokens
[token
].prev
= queue
->token_q_tail
;
280 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
281 * count == 0) in queue */
282 if (tokens
[token
].count
> 0) {
283 queue
->token_q_unripe
= token
; /* first unripe token */
285 available_for_purge
++; /* added a ripe token?
286 * increase available count */
289 queue
->token_q_tail
= token
;
292 queue
->debug_count_tokens
++;
293 /* Check both queues, since we modified the new_pages count on each */
294 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
295 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
297 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
299 tokens
[token
].count
, /* num pages on token
301 queue
->debug_count_tokens
,
310 * Remove first token from queue and return its index. Add its count to the
311 * count of the next token.
312 * Call with page queue locked.
315 vm_purgeable_token_remove_first(purgeable_q_t queue
)
317 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
320 token
= queue
->token_q_head
;
325 assert(queue
->token_q_tail
);
326 if (queue
->token_q_head
== queue
->token_q_unripe
) {
327 /* no ripe tokens... must move unripe pointer */
328 queue
->token_q_unripe
= tokens
[token
].next
;
330 /* we're removing a ripe token. decrease count */
331 available_for_purge
--;
332 assert(available_for_purge
>= 0);
335 if (queue
->token_q_tail
== queue
->token_q_head
) {
336 assert(tokens
[token
].next
== 0);
339 queue
->token_q_head
= tokens
[token
].next
;
340 if (queue
->token_q_head
) {
341 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
342 tokens
[queue
->token_q_head
].prev
= 0;
344 /* currently no other tokens in the queue */
346 * the page count must be added to the next newly
349 queue
->new_pages
+= tokens
[token
].count
;
350 /* if head is zero, tail is too */
351 queue
->token_q_tail
= 0;
355 queue
->debug_count_tokens
--;
356 vm_purgeable_token_check_queue(queue
);
358 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
360 tokens
[queue
->token_q_head
].count
, /* num pages on new
362 token_new_pagecount
, /* num pages waiting for
372 vm_purgeable_token_remove_last(purgeable_q_t queue
)
374 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
377 token
= queue
->token_q_tail
;
382 assert(queue
->token_q_head
);
384 if (queue
->token_q_tail
== queue
->token_q_head
) {
385 assert(tokens
[token
].next
== 0);
388 if (queue
->token_q_unripe
== 0) {
389 /* we're removing a ripe token. decrease count */
390 available_for_purge
--;
391 assert(available_for_purge
>= 0);
392 } else if (queue
->token_q_unripe
== token
) {
393 /* we're removing the only unripe token */
394 queue
->token_q_unripe
= 0;
397 if (token
== queue
->token_q_head
) {
398 /* token is the last one in the queue */
399 queue
->token_q_head
= 0;
400 queue
->token_q_tail
= 0;
402 token_idx_t new_tail
;
404 new_tail
= tokens
[token
].prev
;
407 assert(tokens
[new_tail
].next
== token
);
409 queue
->token_q_tail
= new_tail
;
410 tokens
[new_tail
].next
= 0;
413 queue
->new_pages
+= tokens
[token
].count
;
416 queue
->debug_count_tokens
--;
417 vm_purgeable_token_check_queue(queue
);
419 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
421 tokens
[queue
->token_q_head
].count
, /* num pages on new
423 token_new_pagecount
, /* num pages waiting for
433 * Delete first token from queue. Return token to token queue.
434 * Call with page queue locked.
437 vm_purgeable_token_delete_first(purgeable_q_t queue
)
439 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
440 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
443 /* stick removed token on free queue */
444 tokens
[token
].next
= token_free_idx
;
445 tokens
[token
].prev
= 0;
446 token_free_idx
= token
;
451 vm_purgeable_token_delete_last(purgeable_q_t queue
)
453 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
454 token_idx_t token
= vm_purgeable_token_remove_last(queue
);
457 /* stick removed token on free queue */
458 tokens
[token
].next
= token_free_idx
;
459 tokens
[token
].prev
= 0;
460 token_free_idx
= token
;
465 /* Call with page queue locked. */
467 vm_purgeable_q_advance_all()
469 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
471 /* check queue counters - if they get really large, scale them back.
472 * They tend to get that large when there is no purgeable queue action */
474 if (token_new_pagecount
> (TOKEN_NEW_PAGECOUNT_MAX
>> 1)) { /* a system idling years might get there */
475 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
476 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
478 assert(pages
<= TOKEN_COUNT_MAX
);
479 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
480 assert(purgeable_queues
[i
].new_pages
== pages
);
482 token_new_pagecount
= 0;
486 * Decrement token counters. A token counter can be zero, this means the
487 * object is ripe to be purged. It is not purged immediately, because that
488 * could cause several objects to be purged even if purging one would satisfy
489 * the memory needs. Instead, the pageout thread purges one after the other
490 * by calling vm_purgeable_object_purge_one and then rechecking the memory
493 * No need to advance obsolete queue - all items are ripe there,
496 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
497 purgeable_q_t queue
= &purgeable_queues
[i
];
498 uint32_t num_pages
= 1;
500 /* Iterate over tokens as long as there are unripe tokens. */
501 while (queue
->token_q_unripe
) {
502 if (tokens
[queue
->token_q_unripe
].count
&& num_pages
) {
503 tokens
[queue
->token_q_unripe
].count
-= 1;
507 if (tokens
[queue
->token_q_unripe
].count
== 0) {
508 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
509 available_for_purge
++;
510 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_RIPEN
)),
512 tokens
[queue
->token_q_head
].count
, /* num pages on new
517 continue; /* One token ripened. Make sure to
520 if (num_pages
== 0) {
521 break; /* Current token not ripe and no more pages.
527 * if there are no unripe tokens in the queue, decrement the
528 * new_pages counter instead new_pages can be negative, but must be
529 * canceled out by token_new_pagecount -- since inactive queue as a
530 * whole always contains a nonnegative number of pages
532 if (!queue
->token_q_unripe
) {
533 queue
->new_pages
-= num_pages
;
534 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
537 vm_purgeable_token_check_queue(queue
);
543 * grab any ripe object and purge it obsolete queue first. then, go through
544 * each volatile group. Select a queue with a ripe token.
545 * Start with first group (0)
546 * 1. Look at queue. Is there an object?
547 * Yes - purge it. Remove token.
548 * No - check other queue. Is there an object?
549 * No - increment group, then go to (1)
550 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
551 * token from other queue and migrate unripe token from this
552 * queue to other queue.
553 * Call with page queue locked.
556 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
558 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
559 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
560 /* return token to free list. advance token list. */
561 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
562 tokens
[queue
->token_q_head
].next
= token_free_idx
;
563 tokens
[queue
->token_q_head
].prev
= 0;
564 token_free_idx
= queue
->token_q_head
;
565 queue
->token_q_head
= new_head
;
566 tokens
[new_head
].prev
= 0;
568 queue
->token_q_tail
= 0;
572 queue
->debug_count_tokens
--;
573 vm_purgeable_token_check_queue(queue
);
576 available_for_purge
--;
577 assert(available_for_purge
>= 0);
581 * Delete a ripe token from the given queue. If there are no ripe tokens on
582 * that queue, delete a ripe token from queue2, and migrate an unripe token
583 * from queue to queue2
584 * Call with page queue locked.
587 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
589 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
590 assert(queue
->token_q_head
);
592 if (tokens
[queue
->token_q_head
].count
== 0) {
593 /* This queue has a ripe token. Remove. */
594 vm_purgeable_token_remove_ripe(queue
);
598 * queue2 must have a ripe token. Remove, and migrate one
599 * from queue to queue2.
601 vm_purgeable_token_remove_ripe(queue2
);
602 /* migrate unripe token */
606 /* remove token from queue1 */
607 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
608 * tokens, remember? */
609 token
= vm_purgeable_token_remove_first(queue
);
612 count
= tokens
[token
].count
;
614 /* migrate to queue2 */
615 /* go to migration target loc */
617 token_idx_t token_to_insert_before
= queue2
->token_q_head
, token_to_insert_after
;
619 while (token_to_insert_before
!= 0 && count
> tokens
[token_to_insert_before
].count
) {
620 count
-= tokens
[token_to_insert_before
].count
;
621 token_to_insert_before
= tokens
[token_to_insert_before
].next
;
624 /* token_to_insert_before is now set correctly */
626 /* should the inserted token become the first unripe token? */
627 if ((token_to_insert_before
== queue2
->token_q_unripe
) || (queue2
->token_q_unripe
== 0)) {
628 queue2
->token_q_unripe
= token
; /* if so, must update unripe pointer */
632 * if inserting at end, reduce new_pages by that value;
633 * otherwise, reduce counter of next token
636 tokens
[token
].count
= count
;
638 if (token_to_insert_before
!= 0) {
639 token_to_insert_after
= tokens
[token_to_insert_before
].prev
;
641 tokens
[token
].next
= token_to_insert_before
;
642 tokens
[token_to_insert_before
].prev
= token
;
644 assert(tokens
[token_to_insert_before
].count
>= count
);
645 tokens
[token_to_insert_before
].count
-= count
;
647 /* if we ran off the end of the list, the token to insert after is the tail */
648 token_to_insert_after
= queue2
->token_q_tail
;
650 tokens
[token
].next
= 0;
651 queue2
->token_q_tail
= token
;
653 assert(queue2
->new_pages
>= (int32_t) count
);
654 queue2
->new_pages
-= count
;
657 if (token_to_insert_after
!= 0) {
658 tokens
[token
].prev
= token_to_insert_after
;
659 tokens
[token_to_insert_after
].next
= token
;
661 /* is this case possible? */
662 tokens
[token
].prev
= 0;
663 queue2
->token_q_head
= token
;
667 queue2
->debug_count_tokens
++;
668 vm_purgeable_token_check_queue(queue2
);
673 /* Find an object that can be locked. Returns locked object. */
674 /* Call with purgeable queue locked. */
676 vm_purgeable_object_find_and_lock(
681 vm_object_t object
, best_object
;
682 int object_task_importance
;
683 int best_object_task_importance
;
684 int best_object_skipped
;
685 int num_objects_skipped
;
686 int try_lock_failed
= 0;
687 int try_lock_succeeded
= 0;
690 best_object
= VM_OBJECT_NULL
;
691 best_object_task_importance
= INT_MAX
;
693 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
695 * Usually we would pick the first element from a queue. However, we
696 * might not be able to get a lock on it, in which case we try the
697 * remaining elements in order.
700 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_START
),
703 VM_KERNEL_UNSLIDE_OR_PERM(queue
),
707 num_objects_skipped
= 0;
708 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
709 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
710 object
= (vm_object_t
) queue_next(&object
->objq
),
711 num_objects_skipped
++) {
713 * To prevent us looping for an excessively long time, choose
714 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
715 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
716 * we keep going until we find the first eligible object.
718 if ((num_objects_skipped
>= PURGEABLE_LOOP_MAX
) && (best_object
!= NULL
)) {
723 !object
->purgeable_when_ripe
) {
724 /* we want an object that has a ripe token */
728 object_task_importance
= 0;
731 * We don't want to use VM_OBJECT_OWNER() here: we want to
732 * distinguish kernel-owned and disowned objects.
733 * Disowned objects have no owner and will have no importance...
735 owner
= object
->vo_owner
;
736 if (owner
!= NULL
&& owner
!= VM_OBJECT_OWNER_DISOWNED
) {
739 object_task_importance
= proc_get_memstat_priority((struct proc
*)get_bsdtask_info(owner
), TRUE
);
740 #endif /* CONFIG_JETSAM */
741 #else /* CONFIG_EMBEDDED */
742 object_task_importance
= task_importance_estimate(owner
);
743 #endif /* CONFIG_EMBEDDED */
746 if (object_task_importance
< best_object_task_importance
) {
747 if (vm_object_lock_try(object
)) {
748 try_lock_succeeded
++;
749 if (best_object
!= VM_OBJECT_NULL
) {
750 /* forget about previous best object */
751 vm_object_unlock(best_object
);
753 best_object
= object
;
754 best_object_task_importance
= object_task_importance
;
755 best_object_skipped
= num_objects_skipped
;
756 if (best_object_task_importance
== 0) {
757 /* can't get any better: stop looking */
766 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_END
),
767 num_objects_skipped
, /* considered objects */
770 VM_KERNEL_UNSLIDE_OR_PERM(best_object
),
771 ((best_object
== NULL
) ? 0 : best_object
->resident_page_count
));
773 object
= best_object
;
775 if (object
== VM_OBJECT_NULL
) {
776 return VM_OBJECT_NULL
;
779 /* Locked. Great. We'll take it. Remove and return. */
780 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
782 vm_object_lock_assert_exclusive(object
);
784 queue_remove(&queue
->objq
[group
], object
,
786 object
->objq
.next
= NULL
;
787 object
->objq
.prev
= NULL
;
788 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
789 object
->purgeable_queue_group
= 0;
790 /* one less volatile object for this object's owner */
791 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object
), -1);
794 object
->vo_purgeable_volatilizer
= NULL
;
797 /* keep queue of non-volatile objects */
798 queue_enter(&purgeable_nonvolatile_queue
, object
,
800 assert(purgeable_nonvolatile_count
>= 0);
801 purgeable_nonvolatile_count
++;
802 assert(purgeable_nonvolatile_count
> 0);
803 /* one more nonvolatile object for this object's owner */
804 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object
), +1);
807 queue
->debug_count_objects
--;
812 /* Can be called without holding locks */
814 vm_purgeable_object_purge_all(void)
816 enum purgeable_q_type i
;
819 unsigned int purged_count
;
826 lck_mtx_lock(&vm_purgeable_queue_lock
);
827 /* Cycle through all queues */
828 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
831 queue
= &purgeable_queues
[i
];
834 * Look through all groups, starting from the lowest. If
835 * we find an object in that group, try to lock it (this can
836 * fail). If locking is successful, we can drop the queue
837 * lock, remove a token and then purge the object.
839 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
840 while (!queue_empty(&queue
->objq
[group
])) {
841 object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
);
842 if (object
== VM_OBJECT_NULL
) {
843 lck_mtx_unlock(&vm_purgeable_queue_lock
);
844 mutex_pause(collisions
++);
848 lck_mtx_unlock(&vm_purgeable_queue_lock
);
850 /* Lock the page queue here so we don't hold it
851 * over the whole, legthy operation */
852 if (object
->purgeable_when_ripe
) {
853 vm_page_lock_queues();
854 vm_purgeable_token_remove_first(queue
);
855 vm_page_unlock_queues();
858 (void) vm_object_purge(object
, 0);
859 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
860 /* no change in purgeable accounting */
862 vm_object_unlock(object
);
866 assert(queue
->debug_count_objects
>= 0);
869 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ALL
)),
870 purged_count
, /* # of purged objects */
875 lck_mtx_unlock(&vm_purgeable_queue_lock
);
880 vm_purgeable_object_purge_one_unlocked(
881 int force_purge_below_group
)
885 vm_page_lock_queues();
886 retval
= vm_purgeable_object_purge_one(force_purge_below_group
, 0);
887 vm_page_unlock_queues();
893 vm_purgeable_object_purge_one(
894 int force_purge_below_group
,
897 enum purgeable_q_type i
;
899 vm_object_t object
= 0;
900 purgeable_q_t queue
, queue2
;
901 boolean_t forced_purge
;
902 unsigned int resident_page_count
;
905 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)) | DBG_FUNC_START
,
906 force_purge_below_group
, flags
, 0, 0, 0);
908 /* Need the page queue lock since we'll be changing the token queue. */
909 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
910 lck_mtx_lock(&vm_purgeable_queue_lock
);
912 /* Cycle through all queues */
913 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
914 queue
= &purgeable_queues
[i
];
916 if (force_purge_below_group
== 0) {
918 * Are there any ripe tokens on this queue? If yes,
919 * we'll find an object to purge there
921 if (!queue
->token_q_head
) {
922 /* no token: look at next purgeable queue */
926 if (tokens
[queue
->token_q_head
].count
!= 0) {
927 /* no ripe token: next queue */
933 * Now look through all groups, starting from the lowest. If
934 * we find an object in that group, try to lock it (this can
935 * fail). If locking is successful, we can drop the queue
936 * lock, remove a token and then purge the object.
938 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
939 if (!queue
->token_q_head
||
940 tokens
[queue
->token_q_head
].count
!= 0) {
941 /* no tokens or no ripe tokens */
943 if (group
>= force_purge_below_group
) {
944 /* no more groups to force-purge */
949 * Try and purge an object in this group
950 * even though no tokens are ripe.
952 if (!queue_empty(&queue
->objq
[group
]) &&
953 (object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
))) {
954 lck_mtx_unlock(&vm_purgeable_queue_lock
);
955 if (object
->purgeable_when_ripe
) {
956 vm_purgeable_token_delete_first(queue
);
962 /* nothing to purge in this group: next group */
965 if (!queue_empty(&queue
->objq
[group
]) &&
966 (object
= vm_purgeable_object_find_and_lock(queue
, group
, TRUE
))) {
967 lck_mtx_unlock(&vm_purgeable_queue_lock
);
968 if (object
->purgeable_when_ripe
) {
969 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
971 forced_purge
= FALSE
;
974 if (i
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
975 /* This is the token migration case, and it works between
976 * FIFO and LIFO only */
977 queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
?
978 PURGEABLE_Q_TYPE_FIFO
:
979 PURGEABLE_Q_TYPE_LIFO
];
981 if (!queue_empty(&queue2
->objq
[group
]) &&
982 (object
= vm_purgeable_object_find_and_lock(queue2
, group
, TRUE
))) {
983 lck_mtx_unlock(&vm_purgeable_queue_lock
);
984 if (object
->purgeable_when_ripe
) {
985 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
987 forced_purge
= FALSE
;
991 assert(queue
->debug_count_objects
>= 0);
995 * because we have to do a try_lock on the objects which could fail,
996 * we could end up with no object to purge at this time, even though
997 * we have objects in a purgeable state
999 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1001 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)) | DBG_FUNC_END
,
1002 0, 0, available_for_purge
, 0, 0);
1009 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
1010 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_owner, task_importance_estimate(object->vo_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
1011 resident_page_count
= object
->resident_page_count
;
1012 (void) vm_object_purge(object
, flags
);
1013 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1014 /* no change in purgeable accounting */
1015 vm_object_unlock(object
);
1016 vm_page_lock_queues();
1018 vm_pageout_vminfo
.vm_pageout_pages_purged
+= resident_page_count
;
1020 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)) | DBG_FUNC_END
,
1021 VM_KERNEL_UNSLIDE_OR_PERM(object
), /* purged object */
1022 resident_page_count
,
1023 available_for_purge
,
1030 /* Called with object lock held */
1032 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
1034 vm_object_lock_assert_exclusive(object
);
1035 lck_mtx_lock(&vm_purgeable_queue_lock
);
1037 assert(object
->objq
.next
!= NULL
);
1038 assert(object
->objq
.prev
!= NULL
);
1039 queue_remove(&purgeable_nonvolatile_queue
, object
,
1041 object
->objq
.next
= NULL
;
1042 object
->objq
.prev
= NULL
;
1043 assert(purgeable_nonvolatile_count
> 0);
1044 purgeable_nonvolatile_count
--;
1045 assert(purgeable_nonvolatile_count
>= 0);
1046 /* one less nonvolatile object for this object's owner */
1047 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object
), -1);
1049 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
) {
1053 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) { /* fifo and obsolete are
1055 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
1057 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
1059 /* one more volatile object for this object's owner */
1060 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object
), +1);
1062 object
->purgeable_queue_type
= queue
->type
;
1063 object
->purgeable_queue_group
= group
;
1066 assert(object
->vo_purgeable_volatilizer
== NULL
);
1067 object
->vo_purgeable_volatilizer
= current_task();
1068 OSBacktrace(&object
->purgeable_volatilizer_bt
[0],
1069 ARRAY_COUNT(object
->purgeable_volatilizer_bt
));
1073 queue
->debug_count_objects
++;
1074 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADD
)),
1076 tokens
[queue
->token_q_head
].count
,
1082 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1085 /* Look for object. If found, remove from purgeable queue. */
1086 /* Called with object lock held */
1088 vm_purgeable_object_remove(vm_object_t object
)
1091 enum purgeable_q_type type
;
1092 purgeable_q_t queue
;
1094 vm_object_lock_assert_exclusive(object
);
1096 type
= object
->purgeable_queue_type
;
1097 group
= object
->purgeable_queue_group
;
1099 if (type
== PURGEABLE_Q_TYPE_MAX
) {
1100 if (object
->objq
.prev
|| object
->objq
.next
) {
1101 panic("unmarked object on purgeable q");
1105 } else if (!(object
->objq
.prev
&& object
->objq
.next
)) {
1106 panic("marked object not on purgeable q");
1109 lck_mtx_lock(&vm_purgeable_queue_lock
);
1111 queue
= &purgeable_queues
[type
];
1113 queue_remove(&queue
->objq
[group
], object
, vm_object_t
, objq
);
1114 object
->objq
.next
= NULL
;
1115 object
->objq
.prev
= NULL
;
1116 /* one less volatile object for this object's owner */
1117 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object
), -1);
1119 object
->vo_purgeable_volatilizer
= NULL
;
1121 /* keep queue of non-volatile objects */
1122 if (object
->alive
&& !object
->terminating
) {
1123 queue_enter(&purgeable_nonvolatile_queue
, object
,
1125 assert(purgeable_nonvolatile_count
>= 0);
1126 purgeable_nonvolatile_count
++;
1127 assert(purgeable_nonvolatile_count
> 0);
1128 /* one more nonvolatile object for this object's owner */
1129 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object
), +1);
1133 queue
->debug_count_objects
--;
1134 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVE
)),
1136 tokens
[queue
->token_q_head
].count
,
1142 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1144 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1145 object
->purgeable_queue_group
= 0;
1147 vm_object_lock_assert_exclusive(object
);
1149 return &purgeable_queues
[type
];
1153 vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
)
1155 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1157 stat
->count
= stat
->size
= 0;
1159 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1160 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1161 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1162 if (!target_task
|| VM_OBJECT_OWNER(object
) == target_task
) {
1164 stat
->size
+= (object
->resident_page_count
* PAGE_SIZE
);
1171 vm_purgeable_stats(vm_purgeable_info_t info
, task_t target_task
)
1173 purgeable_q_t queue
;
1176 lck_mtx_lock(&vm_purgeable_queue_lock
);
1178 /* Populate fifo_data */
1179 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1180 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1181 vm_purgeable_stats_helper(&(info
->fifo_data
[group
]), queue
, group
, target_task
);
1184 /* Populate lifo_data */
1185 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1186 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1187 vm_purgeable_stats_helper(&(info
->lifo_data
[group
]), queue
, group
, target_task
);
1190 /* Populate obsolete data */
1191 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1192 vm_purgeable_stats_helper(&(info
->obsolete_data
), queue
, 0, target_task
);
1194 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1198 #if DEVELOPMENT || DEBUG
1200 vm_purgeable_account_volatile_queue(
1201 purgeable_q_t queue
,
1204 pvm_account_info_t acnt_info
)
1207 uint64_t compressed_count
;
1209 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1210 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1211 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1212 if (VM_OBJECT_OWNER(object
) == task
) {
1213 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1214 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1215 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1216 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1222 * Walks the purgeable object queues and calculates the usage
1223 * associated with the objects for the given task.
1226 vm_purgeable_account(
1228 pvm_account_info_t acnt_info
)
1230 queue_head_t
*nonvolatile_q
;
1234 uint64_t compressed_count
;
1235 purgeable_q_t volatile_q
;
1238 if ((task
== NULL
) || (acnt_info
== NULL
)) {
1239 return KERN_INVALID_ARGUMENT
;
1242 acnt_info
->pvm_volatile_count
= 0;
1243 acnt_info
->pvm_volatile_compressed_count
= 0;
1244 acnt_info
->pvm_nonvolatile_count
= 0;
1245 acnt_info
->pvm_nonvolatile_compressed_count
= 0;
1247 lck_mtx_lock(&vm_purgeable_queue_lock
);
1249 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1250 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1251 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1252 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1253 if (VM_OBJECT_OWNER(object
) == task
) {
1254 state
= object
->purgable
;
1255 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1256 if (state
== VM_PURGABLE_EMPTY
) {
1257 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1258 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1260 acnt_info
->pvm_nonvolatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1261 acnt_info
->pvm_nonvolatile_compressed_count
+= compressed_count
;
1263 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1267 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1268 vm_purgeable_account_volatile_queue(volatile_q
, 0, task
, acnt_info
);
1270 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1271 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1272 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1275 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1276 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1277 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1279 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1281 acnt_info
->pvm_volatile_count
= (acnt_info
->pvm_volatile_count
* PAGE_SIZE
);
1282 acnt_info
->pvm_volatile_compressed_count
= (acnt_info
->pvm_volatile_compressed_count
* PAGE_SIZE
);
1283 acnt_info
->pvm_nonvolatile_count
= (acnt_info
->pvm_nonvolatile_count
* PAGE_SIZE
);
1284 acnt_info
->pvm_nonvolatile_compressed_count
= (acnt_info
->pvm_nonvolatile_compressed_count
* PAGE_SIZE
);
1286 return KERN_SUCCESS
;
1288 #endif /* DEVELOPMENT || DEBUG */
1291 vm_purgeable_disown(
1294 vm_object_t next_object
;
1303 * Scan the purgeable objects queues for objects owned by "task".
1304 * This has to be done "atomically" under the "vm_purgeable_queue"
1305 * lock, to ensure that no new purgeable object get associated
1306 * with this task or moved between queues while we're scanning.
1310 * Scan non-volatile queue for objects owned by "task".
1316 if (task
->task_purgeable_disowned
) {
1317 /* task has already disowned its purgeable memory */
1318 assert(task
->task_volatile_objects
== 0);
1319 assert(task
->task_nonvolatile_objects
== 0);
1323 lck_mtx_lock(&vm_purgeable_queue_lock
);
1324 task_objq_lock(task
);
1326 task
->task_purgeable_disowning
= TRUE
;
1328 for (object
= (vm_object_t
) queue_first(&task
->task_objq
);
1329 !queue_end(&task
->task_objq
, (queue_entry_t
) object
);
1330 object
= next_object
) {
1331 if (task
->task_nonvolatile_objects
== 0 &&
1332 task
->task_volatile_objects
== 0) {
1333 /* no more purgeable objects owned by "task" */
1337 next_object
= (vm_object_t
) queue_next(&object
->task_objq
);
1338 if (object
->purgable
== VM_PURGABLE_DENY
) {
1339 /* not a purgeable object: skip */
1344 assert(object
->vo_purgeable_volatilizer
== NULL
);
1346 assert(object
->vo_owner
== task
);
1347 if (!vm_object_lock_try(object
)) {
1348 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1349 task_objq_unlock(task
);
1350 mutex_pause(collisions
++);
1353 /* transfer ownership to the kernel */
1354 assert(VM_OBJECT_OWNER(object
) != kernel_task
);
1355 vm_object_ownership_change(
1357 object
->vo_ledger_tag
, /* unchanged */
1358 VM_OBJECT_OWNER_DISOWNED
, /* new owner */
1359 TRUE
); /* old_owner->task_objq locked */
1360 assert(object
->vo_owner
== VM_OBJECT_OWNER_DISOWNED
);
1361 vm_object_unlock(object
);
1364 if (__improbable(task
->task_volatile_objects
!= 0 ||
1365 task
->task_nonvolatile_objects
!= 0)) {
1366 panic("%s(%p): volatile=%d nonvolatile=%d q=%p q_first=%p q_last=%p",
1369 task
->task_volatile_objects
,
1370 task
->task_nonvolatile_objects
,
1372 queue_first(&task
->task_objq
),
1373 queue_last(&task
->task_objq
));
1376 /* there shouldn't be any purgeable objects owned by task now */
1377 assert(task
->task_volatile_objects
== 0);
1378 assert(task
->task_nonvolatile_objects
== 0);
1379 assert(task
->task_purgeable_disowning
);
1381 /* and we don't need to try and disown again */
1382 task
->task_purgeable_disowned
= TRUE
;
1384 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1385 task_objq_unlock(task
);
1390 vm_purgeable_queue_purge_task_owned(
1391 purgeable_q_t queue
,
1395 vm_object_t object
= VM_OBJECT_NULL
;
1397 uint64_t num_pages_purged
= 0;
1399 num_pages_purged
= 0;
1403 lck_mtx_lock(&vm_purgeable_queue_lock
);
1405 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1406 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1407 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1408 if (object
->vo_owner
!= task
) {
1412 /* found an object: try and grab it */
1413 if (!vm_object_lock_try(object
)) {
1414 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1415 mutex_pause(collisions
++);
1422 /* remove object from purgeable queue */
1423 queue_remove(&queue
->objq
[group
], object
,
1425 object
->objq
.next
= NULL
;
1426 object
->objq
.prev
= NULL
;
1427 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1428 object
->purgeable_queue_group
= 0;
1429 /* one less volatile object for this object's owner */
1430 assert(object
->vo_owner
== task
);
1431 vm_purgeable_volatile_owner_update(task
, -1);
1434 object
->vo_purgeable_volatilizer
= NULL
;
1436 queue_enter(&purgeable_nonvolatile_queue
, object
,
1438 assert(purgeable_nonvolatile_count
>= 0);
1439 purgeable_nonvolatile_count
++;
1440 assert(purgeable_nonvolatile_count
> 0);
1441 /* one more nonvolatile object for this object's owner */
1442 assert(object
->vo_owner
== task
);
1443 vm_purgeable_nonvolatile_owner_update(task
, +1);
1445 /* unlock purgeable queues */
1446 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1448 if (object
->purgeable_when_ripe
) {
1449 /* remove a token */
1450 vm_page_lock_queues();
1451 vm_purgeable_token_remove_first(queue
);
1452 vm_page_unlock_queues();
1455 /* purge the object */
1456 num_pages_purged
+= vm_object_purge(object
, 0);
1458 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1459 /* no change for purgeable accounting */
1460 vm_object_unlock(object
);
1462 /* we unlocked the purgeable queues, so start over */
1466 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1468 return num_pages_purged
;
1472 vm_purgeable_purge_task_owned(
1475 purgeable_q_t queue
= NULL
;
1477 uint64_t num_pages_purged
= 0;
1479 num_pages_purged
= 0;
1481 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1482 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1486 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1487 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1488 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1493 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1494 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1495 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1500 return num_pages_purged
;
1504 vm_purgeable_nonvolatile_enqueue(
1508 vm_object_lock_assert_exclusive(object
);
1510 assert(object
->purgable
== VM_PURGABLE_NONVOLATILE
);
1511 assert(object
->vo_owner
== NULL
);
1513 lck_mtx_lock(&vm_purgeable_queue_lock
);
1515 if (owner
!= NULL
&&
1516 owner
->task_purgeable_disowning
) {
1517 /* task is exiting and no longer tracking purgeable objects */
1518 owner
= VM_OBJECT_OWNER_DISOWNED
;
1520 if (owner
== NULL
) {
1521 owner
= kernel_task
;
1524 OSBacktrace(&object
->purgeable_owner_bt
[0],
1525 ARRAY_COUNT(object
->purgeable_owner_bt
));
1526 object
->vo_purgeable_volatilizer
= NULL
;
1529 vm_object_ownership_change(object
,
1530 object
->vo_ledger_tag
, /* tag unchanged */
1532 FALSE
); /* task_objq_locked */
1534 assert(object
->objq
.next
== NULL
);
1535 assert(object
->objq
.prev
== NULL
);
1537 queue_enter(&purgeable_nonvolatile_queue
, object
,
1539 assert(purgeable_nonvolatile_count
>= 0);
1540 purgeable_nonvolatile_count
++;
1541 assert(purgeable_nonvolatile_count
> 0);
1542 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1544 vm_object_lock_assert_exclusive(object
);
1548 vm_purgeable_nonvolatile_dequeue(
1553 vm_object_lock_assert_exclusive(object
);
1555 owner
= VM_OBJECT_OWNER(object
);
1557 assert(object
->vo_purgeable_volatilizer
== NULL
);
1559 if (owner
!= NULL
) {
1561 * Update the owner's ledger to stop accounting
1564 /* transfer ownership to the kernel */
1565 assert(VM_OBJECT_OWNER(object
) != kernel_task
);
1566 vm_object_ownership_change(
1568 object
->vo_ledger_tag
, /* unchanged */
1569 VM_OBJECT_OWNER_DISOWNED
, /* new owner */
1570 FALSE
); /* old_owner->task_objq locked */
1571 assert(object
->vo_owner
== VM_OBJECT_OWNER_DISOWNED
);
1574 lck_mtx_lock(&vm_purgeable_queue_lock
);
1575 assert(object
->objq
.next
!= NULL
);
1576 assert(object
->objq
.prev
!= NULL
);
1577 queue_remove(&purgeable_nonvolatile_queue
, object
,
1579 object
->objq
.next
= NULL
;
1580 object
->objq
.prev
= NULL
;
1581 assert(purgeable_nonvolatile_count
> 0);
1582 purgeable_nonvolatile_count
--;
1583 assert(purgeable_nonvolatile_count
>= 0);
1584 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1586 vm_object_lock_assert_exclusive(object
);
1590 vm_purgeable_accounting(
1592 vm_purgable_t old_state
)
1595 int resident_page_count
;
1596 int wired_page_count
;
1597 int compressed_page_count
;
1598 int ledger_idx_volatile
;
1599 int ledger_idx_nonvolatile
;
1600 int ledger_idx_volatile_compressed
;
1601 int ledger_idx_nonvolatile_compressed
;
1602 boolean_t do_footprint
;
1604 vm_object_lock_assert_exclusive(object
);
1605 assert(object
->purgable
!= VM_PURGABLE_DENY
);
1607 owner
= VM_OBJECT_OWNER(object
);
1608 if (owner
== NULL
||
1609 object
->purgable
== VM_PURGABLE_DENY
) {
1613 vm_object_ledger_tag_ledgers(object
,
1614 &ledger_idx_volatile
,
1615 &ledger_idx_nonvolatile
,
1616 &ledger_idx_volatile_compressed
,
1617 &ledger_idx_nonvolatile_compressed
,
1620 resident_page_count
= object
->resident_page_count
;
1621 wired_page_count
= object
->wired_page_count
;
1622 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
&&
1623 object
->pager
!= NULL
) {
1624 compressed_page_count
=
1625 vm_compressor_pager_get_count(object
->pager
);
1627 compressed_page_count
= 0;
1630 if (old_state
== VM_PURGABLE_VOLATILE
||
1631 old_state
== VM_PURGABLE_EMPTY
) {
1632 /* less volatile bytes in ledger */
1633 ledger_debit(owner
->ledger
,
1634 ledger_idx_volatile
,
1635 ptoa_64(resident_page_count
- wired_page_count
));
1636 /* less compressed volatile bytes in ledger */
1637 ledger_debit(owner
->ledger
,
1638 ledger_idx_volatile_compressed
,
1639 ptoa_64(compressed_page_count
));
1641 /* more non-volatile bytes in ledger */
1642 ledger_credit(owner
->ledger
,
1643 ledger_idx_nonvolatile
,
1644 ptoa_64(resident_page_count
- wired_page_count
));
1645 /* more compressed non-volatile bytes in ledger */
1646 ledger_credit(owner
->ledger
,
1647 ledger_idx_nonvolatile_compressed
,
1648 ptoa_64(compressed_page_count
));
1650 /* more footprint */
1651 ledger_credit(owner
->ledger
,
1652 task_ledgers
.phys_footprint
,
1653 ptoa_64(resident_page_count
1654 + compressed_page_count
1655 - wired_page_count
));
1657 } else if (old_state
== VM_PURGABLE_NONVOLATILE
) {
1658 /* less non-volatile bytes in ledger */
1659 ledger_debit(owner
->ledger
,
1660 ledger_idx_nonvolatile
,
1661 ptoa_64(resident_page_count
- wired_page_count
));
1662 /* less compressed non-volatile bytes in ledger */
1663 ledger_debit(owner
->ledger
,
1664 ledger_idx_nonvolatile_compressed
,
1665 ptoa_64(compressed_page_count
));
1667 /* less footprint */
1668 ledger_debit(owner
->ledger
,
1669 task_ledgers
.phys_footprint
,
1670 ptoa_64(resident_page_count
1671 + compressed_page_count
1672 - wired_page_count
));
1675 /* more volatile bytes in ledger */
1676 ledger_credit(owner
->ledger
,
1677 ledger_idx_volatile
,
1678 ptoa_64(resident_page_count
- wired_page_count
));
1679 /* more compressed volatile bytes in ledger */
1680 ledger_credit(owner
->ledger
,
1681 ledger_idx_volatile_compressed
,
1682 ptoa_64(compressed_page_count
));
1684 panic("vm_purgeable_accounting(%p): "
1685 "unexpected old_state=%d\n",
1689 vm_object_lock_assert_exclusive(object
);
1693 vm_purgeable_nonvolatile_owner_update(
1697 if (owner
== NULL
|| delta
== 0) {
1702 assert(owner
->task_nonvolatile_objects
>= 0);
1703 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1704 assert(owner
->task_nonvolatile_objects
> 0);
1706 assert(owner
->task_nonvolatile_objects
> delta
);
1707 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1708 assert(owner
->task_nonvolatile_objects
>= 0);
1713 vm_purgeable_volatile_owner_update(
1717 if (owner
== NULL
|| delta
== 0) {
1722 assert(owner
->task_volatile_objects
>= 0);
1723 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1724 assert(owner
->task_volatile_objects
> 0);
1726 assert(owner
->task_volatile_objects
> delta
);
1727 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1728 assert(owner
->task_volatile_objects
>= 0);
1733 vm_object_owner_compressed_update(
1738 int ledger_idx_volatile
;
1739 int ledger_idx_nonvolatile
;
1740 int ledger_idx_volatile_compressed
;
1741 int ledger_idx_nonvolatile_compressed
;
1742 boolean_t do_footprint
;
1744 vm_object_lock_assert_exclusive(object
);
1746 owner
= VM_OBJECT_OWNER(object
);
1749 !object
->internal
||
1750 (object
->purgable
== VM_PURGABLE_DENY
&&
1751 !object
->vo_ledger_tag
) ||
1753 /* not an owned purgeable (or tagged) VM object: nothing to update */
1757 vm_object_ledger_tag_ledgers(object
,
1758 &ledger_idx_volatile
,
1759 &ledger_idx_nonvolatile
,
1760 &ledger_idx_volatile_compressed
,
1761 &ledger_idx_nonvolatile_compressed
,
1763 switch (object
->purgable
) {
1764 case VM_PURGABLE_DENY
:
1765 /* not purgeable: must be ledger-tagged */
1766 assert(object
->vo_ledger_tag
!= VM_OBJECT_LEDGER_TAG_NONE
);
1768 case VM_PURGABLE_NONVOLATILE
:
1770 ledger_credit(owner
->ledger
,
1771 ledger_idx_nonvolatile_compressed
,
1774 ledger_credit(owner
->ledger
,
1775 task_ledgers
.phys_footprint
,
1779 ledger_debit(owner
->ledger
,
1780 ledger_idx_nonvolatile_compressed
,
1783 ledger_debit(owner
->ledger
,
1784 task_ledgers
.phys_footprint
,
1789 case VM_PURGABLE_VOLATILE
:
1790 case VM_PURGABLE_EMPTY
:
1792 ledger_credit(owner
->ledger
,
1793 ledger_idx_volatile_compressed
,
1796 ledger_debit(owner
->ledger
,
1797 ledger_idx_volatile_compressed
,
1802 panic("vm_purgeable_compressed_update(): "
1803 "unexpected purgable %d for object %p\n",
1804 object
->purgable
, object
);