2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
26 #include <kern/policy_internal.h>
28 #include <libkern/OSDebug.h>
30 #include <mach/mach_types.h>
32 #include <machine/limits.h>
34 #include <vm/vm_compressor_pager.h>
35 #include <vm/vm_kern.h> /* kmem_alloc */
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
38 #include <vm/vm_protos.h>
39 #include <vm/vm_purgeable_internal.h>
41 #include <sys/kdebug.h>
44 * LOCK ORDERING for task-owned purgeable objects
46 * Whenever we need to hold multiple locks while adding to, removing from,
47 * or scanning a task's task_objq list of VM objects it owns, locks should
48 * be taken in this order:
50 * VM object ==> vm_purgeable_queue_lock ==> owner_task->task_objq_lock
52 * If one needs to acquire the VM object lock after any of the other 2 locks,
53 * one needs to use vm_object_lock_try() and, if that fails, release the
54 * other locks and retake them all in the correct order.
57 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
66 token_idx_t token_q_max_cnt
= 0;
67 vm_size_t token_q_cur_size
= 0;
69 token_idx_t token_free_idx
= 0; /* head of free queue */
70 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
71 int32_t token_new_pagecount
= 0; /* count of pages that will
72 * be added onto token queue */
74 int available_for_purge
= 0; /* increase when ripe token
75 * added, decrease when ripe
77 * protected by page_queue_lock
80 static int token_q_allocating
= 0; /* flag for singlethreading
83 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
84 queue_head_t purgeable_nonvolatile_queue
;
85 int purgeable_nonvolatile_count
;
87 decl_lck_mtx_data(,vm_purgeable_queue_lock
)
89 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
91 static void vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
);
93 void vm_purgeable_nonvolatile_owner_update(task_t owner
,
95 void vm_purgeable_volatile_owner_update(task_t owner
,
101 vm_purgeable_token_check_queue(purgeable_q_t queue
)
103 int token_cnt
= 0, page_cnt
= 0;
104 token_idx_t token
= queue
->token_q_head
;
105 token_idx_t unripe
= 0;
106 int our_inactive_count
;
109 static unsigned lightweight_check
= 0;
112 * Due to performance impact, only perform this check
113 * every 100 times on DEVELOPMENT kernels.
115 if (lightweight_check
++ < 100) {
119 lightweight_check
= 0;
123 if (tokens
[token
].count
!= 0) {
124 assert(queue
->token_q_unripe
);
126 assert(token
== queue
->token_q_unripe
);
129 page_cnt
+= tokens
[token
].count
;
131 if (tokens
[token
].next
== 0)
132 assert(queue
->token_q_tail
== token
);
135 token
= tokens
[token
].next
;
139 assert(queue
->token_q_unripe
== unripe
);
140 assert(token_cnt
== queue
->debug_count_tokens
);
142 /* obsolete queue doesn't maintain token counts */
143 if(queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
145 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
146 assert(our_inactive_count
>= 0);
147 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
- vm_page_cleaned_count
);
153 * Add a token. Allocate token queue memory if necessary.
154 * Call with page queue locked.
157 vm_purgeable_token_add(purgeable_q_t queue
)
159 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
163 enum purgeable_q_type i
;
165 find_available_token
:
167 if (token_free_idx
) { /* unused tokens available */
168 token
= token_free_idx
;
169 token_free_idx
= tokens
[token_free_idx
].next
;
170 } else if (token_init_idx
< token_q_max_cnt
) { /* lazy token array init */
171 token
= token_init_idx
;
173 } else { /* allocate more memory */
174 /* Wait if another thread is inside the memory alloc section */
175 while(token_q_allocating
) {
176 wait_result_t res
= lck_mtx_sleep(&vm_page_queue_lock
,
178 (event_t
)&token_q_allocating
,
180 if(res
!= THREAD_AWAKENED
) return KERN_ABORTED
;
183 /* Check whether memory is still maxed out */
184 if(token_init_idx
< token_q_max_cnt
)
185 goto find_available_token
;
187 /* Still no memory. Allocate some. */
188 token_q_allocating
= 1;
190 /* Drop page queue lock so we can allocate */
191 vm_page_unlock_queues();
193 struct token
*new_loc
;
194 vm_size_t alloc_size
= token_q_cur_size
+ PAGE_SIZE
;
195 kern_return_t result
;
197 if (alloc_size
/ sizeof (struct token
) > TOKEN_COUNT_MAX
) {
198 result
= KERN_RESOURCE_SHORTAGE
;
200 if (token_q_cur_size
) {
201 result
= kmem_realloc(kernel_map
,
202 (vm_offset_t
) tokens
,
204 (vm_offset_t
*) &new_loc
,
205 alloc_size
, VM_KERN_MEMORY_OSFMK
);
207 result
= kmem_alloc(kernel_map
,
208 (vm_offset_t
*) &new_loc
,
209 alloc_size
, VM_KERN_MEMORY_OSFMK
);
213 vm_page_lock_queues();
216 /* Unblock waiting threads */
217 token_q_allocating
= 0;
218 thread_wakeup((event_t
)&token_q_allocating
);
222 /* If we get here, we allocated new memory. Update pointers and
223 * dealloc old range */
224 struct token
*old_tokens
=tokens
;
226 vm_size_t old_token_q_cur_size
=token_q_cur_size
;
227 token_q_cur_size
=alloc_size
;
228 token_q_max_cnt
= (token_idx_t
) (token_q_cur_size
/
229 sizeof(struct token
));
230 assert (token_init_idx
< token_q_max_cnt
); /* We must have a free token now */
232 if (old_token_q_cur_size
) { /* clean up old mapping */
233 vm_page_unlock_queues();
234 /* kmem_realloc leaves the old region mapped. Get rid of it. */
235 kmem_free(kernel_map
, (vm_offset_t
)old_tokens
, old_token_q_cur_size
);
236 vm_page_lock_queues();
239 /* Unblock waiting threads */
240 token_q_allocating
= 0;
241 thread_wakeup((event_t
)&token_q_allocating
);
243 goto find_available_token
;
249 * the new pagecount we got need to be applied to all queues except
252 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
253 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
255 assert(pages
<= TOKEN_COUNT_MAX
);
256 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
257 assert(purgeable_queues
[i
].new_pages
== pages
);
259 token_new_pagecount
= 0;
261 /* set token counter value */
262 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
263 tokens
[token
].count
= queue
->new_pages
;
265 tokens
[token
].count
= 0; /* all obsolete items are
266 * ripe immediately */
267 queue
->new_pages
= 0;
269 /* put token on token counter list */
270 tokens
[token
].next
= 0;
271 if (queue
->token_q_tail
== 0) {
272 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
273 queue
->token_q_head
= token
;
274 tokens
[token
].prev
= 0;
276 tokens
[queue
->token_q_tail
].next
= token
;
277 tokens
[token
].prev
= queue
->token_q_tail
;
279 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
280 * count == 0) in queue */
281 if (tokens
[token
].count
> 0)
282 queue
->token_q_unripe
= token
; /* first unripe token */
284 available_for_purge
++; /* added a ripe token?
285 * increase available count */
287 queue
->token_q_tail
= token
;
290 queue
->debug_count_tokens
++;
291 /* Check both queues, since we modified the new_pages count on each */
292 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
293 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
295 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
297 tokens
[token
].count
, /* num pages on token
299 queue
->debug_count_tokens
,
308 * Remove first token from queue and return its index. Add its count to the
309 * count of the next token.
310 * Call with page queue locked.
313 vm_purgeable_token_remove_first(purgeable_q_t queue
)
315 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
318 token
= queue
->token_q_head
;
323 assert(queue
->token_q_tail
);
324 if (queue
->token_q_head
== queue
->token_q_unripe
) {
325 /* no ripe tokens... must move unripe pointer */
326 queue
->token_q_unripe
= tokens
[token
].next
;
328 /* we're removing a ripe token. decrease count */
329 available_for_purge
--;
330 assert(available_for_purge
>= 0);
333 if (queue
->token_q_tail
== queue
->token_q_head
)
334 assert(tokens
[token
].next
== 0);
336 queue
->token_q_head
= tokens
[token
].next
;
337 if (queue
->token_q_head
) {
338 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
339 tokens
[queue
->token_q_head
].prev
= 0;
341 /* currently no other tokens in the queue */
343 * the page count must be added to the next newly
346 queue
->new_pages
+= tokens
[token
].count
;
347 /* if head is zero, tail is too */
348 queue
->token_q_tail
= 0;
352 queue
->debug_count_tokens
--;
353 vm_purgeable_token_check_queue(queue
);
355 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
357 tokens
[queue
->token_q_head
].count
, /* num pages on new
359 token_new_pagecount
, /* num pages waiting for
369 vm_purgeable_token_remove_last(purgeable_q_t queue
)
371 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
374 token
= queue
->token_q_tail
;
379 assert(queue
->token_q_head
);
381 if (queue
->token_q_tail
== queue
->token_q_head
)
382 assert(tokens
[token
].next
== 0);
384 if (queue
->token_q_unripe
== 0) {
385 /* we're removing a ripe token. decrease count */
386 available_for_purge
--;
387 assert(available_for_purge
>= 0);
388 } else if (queue
->token_q_unripe
== token
) {
389 /* we're removing the only unripe token */
390 queue
->token_q_unripe
= 0;
393 if (token
== queue
->token_q_head
) {
394 /* token is the last one in the queue */
395 queue
->token_q_head
= 0;
396 queue
->token_q_tail
= 0;
398 token_idx_t new_tail
;
400 new_tail
= tokens
[token
].prev
;
403 assert(tokens
[new_tail
].next
== token
);
405 queue
->token_q_tail
= new_tail
;
406 tokens
[new_tail
].next
= 0;
409 queue
->new_pages
+= tokens
[token
].count
;
412 queue
->debug_count_tokens
--;
413 vm_purgeable_token_check_queue(queue
);
415 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
417 tokens
[queue
->token_q_head
].count
, /* num pages on new
419 token_new_pagecount
, /* num pages waiting for
429 * Delete first token from queue. Return token to token queue.
430 * Call with page queue locked.
433 vm_purgeable_token_delete_first(purgeable_q_t queue
)
435 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
436 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
439 /* stick removed token on free queue */
440 tokens
[token
].next
= token_free_idx
;
441 tokens
[token
].prev
= 0;
442 token_free_idx
= token
;
447 vm_purgeable_token_delete_last(purgeable_q_t queue
)
449 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
450 token_idx_t token
= vm_purgeable_token_remove_last(queue
);
453 /* stick removed token on free queue */
454 tokens
[token
].next
= token_free_idx
;
455 tokens
[token
].prev
= 0;
456 token_free_idx
= token
;
461 /* Call with page queue locked. */
463 vm_purgeable_q_advance_all()
465 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
467 /* check queue counters - if they get really large, scale them back.
468 * They tend to get that large when there is no purgeable queue action */
470 if(token_new_pagecount
> (TOKEN_NEW_PAGECOUNT_MAX
>> 1)) /* a system idling years might get there */
472 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
473 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
475 assert(pages
<= TOKEN_COUNT_MAX
);
476 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
477 assert(purgeable_queues
[i
].new_pages
== pages
);
479 token_new_pagecount
= 0;
483 * Decrement token counters. A token counter can be zero, this means the
484 * object is ripe to be purged. It is not purged immediately, because that
485 * could cause several objects to be purged even if purging one would satisfy
486 * the memory needs. Instead, the pageout thread purges one after the other
487 * by calling vm_purgeable_object_purge_one and then rechecking the memory
490 * No need to advance obsolete queue - all items are ripe there,
493 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
494 purgeable_q_t queue
= &purgeable_queues
[i
];
495 uint32_t num_pages
= 1;
497 /* Iterate over tokens as long as there are unripe tokens. */
498 while (queue
->token_q_unripe
) {
499 if (tokens
[queue
->token_q_unripe
].count
&& num_pages
)
501 tokens
[queue
->token_q_unripe
].count
-= 1;
505 if (tokens
[queue
->token_q_unripe
].count
== 0) {
506 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
507 available_for_purge
++;
508 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_RIPEN
)),
510 tokens
[queue
->token_q_head
].count
, /* num pages on new
515 continue; /* One token ripened. Make sure to
519 break; /* Current token not ripe and no more pages.
524 * if there are no unripe tokens in the queue, decrement the
525 * new_pages counter instead new_pages can be negative, but must be
526 * canceled out by token_new_pagecount -- since inactive queue as a
527 * whole always contains a nonnegative number of pages
529 if (!queue
->token_q_unripe
) {
530 queue
->new_pages
-= num_pages
;
531 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
534 vm_purgeable_token_check_queue(queue
);
540 * grab any ripe object and purge it obsolete queue first. then, go through
541 * each volatile group. Select a queue with a ripe token.
542 * Start with first group (0)
543 * 1. Look at queue. Is there an object?
544 * Yes - purge it. Remove token.
545 * No - check other queue. Is there an object?
546 * No - increment group, then go to (1)
547 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
548 * token from other queue and migrate unripe token from this
549 * queue to other queue.
550 * Call with page queue locked.
553 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
555 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
556 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
557 /* return token to free list. advance token list. */
558 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
559 tokens
[queue
->token_q_head
].next
= token_free_idx
;
560 tokens
[queue
->token_q_head
].prev
= 0;
561 token_free_idx
= queue
->token_q_head
;
562 queue
->token_q_head
= new_head
;
563 tokens
[new_head
].prev
= 0;
565 queue
->token_q_tail
= 0;
568 queue
->debug_count_tokens
--;
569 vm_purgeable_token_check_queue(queue
);
572 available_for_purge
--;
573 assert(available_for_purge
>= 0);
577 * Delete a ripe token from the given queue. If there are no ripe tokens on
578 * that queue, delete a ripe token from queue2, and migrate an unripe token
579 * from queue to queue2
580 * Call with page queue locked.
583 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
585 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
586 assert(queue
->token_q_head
);
588 if (tokens
[queue
->token_q_head
].count
== 0) {
589 /* This queue has a ripe token. Remove. */
590 vm_purgeable_token_remove_ripe(queue
);
594 * queue2 must have a ripe token. Remove, and migrate one
595 * from queue to queue2.
597 vm_purgeable_token_remove_ripe(queue2
);
598 /* migrate unripe token */
602 /* remove token from queue1 */
603 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
604 * tokens, remember? */
605 token
= vm_purgeable_token_remove_first(queue
);
608 count
= tokens
[token
].count
;
610 /* migrate to queue2 */
611 /* go to migration target loc */
613 token_idx_t token_to_insert_before
= queue2
->token_q_head
, token_to_insert_after
;
615 while (token_to_insert_before
!= 0 && count
> tokens
[token_to_insert_before
].count
) {
616 count
-= tokens
[token_to_insert_before
].count
;
617 token_to_insert_before
= tokens
[token_to_insert_before
].next
;
620 /* token_to_insert_before is now set correctly */
622 /* should the inserted token become the first unripe token? */
623 if ((token_to_insert_before
== queue2
->token_q_unripe
) || (queue2
->token_q_unripe
== 0))
624 queue2
->token_q_unripe
= token
; /* if so, must update unripe pointer */
628 * if inserting at end, reduce new_pages by that value;
629 * otherwise, reduce counter of next token
632 tokens
[token
].count
= count
;
634 if (token_to_insert_before
!= 0) {
635 token_to_insert_after
= tokens
[token_to_insert_before
].prev
;
637 tokens
[token
].next
= token_to_insert_before
;
638 tokens
[token_to_insert_before
].prev
= token
;
640 assert(tokens
[token_to_insert_before
].count
>= count
);
641 tokens
[token_to_insert_before
].count
-= count
;
643 /* if we ran off the end of the list, the token to insert after is the tail */
644 token_to_insert_after
= queue2
->token_q_tail
;
646 tokens
[token
].next
= 0;
647 queue2
->token_q_tail
= token
;
649 assert(queue2
->new_pages
>= (int32_t) count
);
650 queue2
->new_pages
-= count
;
653 if (token_to_insert_after
!= 0) {
654 tokens
[token
].prev
= token_to_insert_after
;
655 tokens
[token_to_insert_after
].next
= token
;
657 /* is this case possible? */
658 tokens
[token
].prev
= 0;
659 queue2
->token_q_head
= token
;
663 queue2
->debug_count_tokens
++;
664 vm_purgeable_token_check_queue(queue2
);
669 /* Find an object that can be locked. Returns locked object. */
670 /* Call with purgeable queue locked. */
672 vm_purgeable_object_find_and_lock(
677 vm_object_t object
, best_object
;
678 int object_task_importance
;
679 int best_object_task_importance
;
680 int best_object_skipped
;
681 int num_objects_skipped
;
682 int try_lock_failed
= 0;
683 int try_lock_succeeded
= 0;
686 best_object
= VM_OBJECT_NULL
;
687 best_object_task_importance
= INT_MAX
;
689 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
691 * Usually we would pick the first element from a queue. However, we
692 * might not be able to get a lock on it, in which case we try the
693 * remaining elements in order.
696 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_START
),
699 VM_KERNEL_UNSLIDE_OR_PERM(queue
),
703 num_objects_skipped
= 0;
704 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
705 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
706 object
= (vm_object_t
) queue_next(&object
->objq
),
707 num_objects_skipped
++) {
710 * To prevent us looping for an excessively long time, choose
711 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
712 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
713 * we keep going until we find the first eligible object.
715 if ((num_objects_skipped
>= PURGEABLE_LOOP_MAX
) && (best_object
!= NULL
)) {
720 ! object
->purgeable_when_ripe
) {
721 /* we want an object that has a ripe token */
725 object_task_importance
= 0;
727 owner
= object
->vo_purgeable_owner
;
731 object_task_importance
= proc_get_memstat_priority((struct proc
*)get_bsdtask_info(owner
), TRUE
);
732 #endif /* CONFIG_JETSAM */
733 #else /* CONFIG_EMBEDDED */
734 object_task_importance
= task_importance_estimate(owner
);
735 #endif /* CONFIG_EMBEDDED */
738 if (object_task_importance
< best_object_task_importance
) {
739 if (vm_object_lock_try(object
)) {
740 try_lock_succeeded
++;
741 if (best_object
!= VM_OBJECT_NULL
) {
742 /* forget about previous best object */
743 vm_object_unlock(best_object
);
745 best_object
= object
;
746 best_object_task_importance
= object_task_importance
;
747 best_object_skipped
= num_objects_skipped
;
748 if (best_object_task_importance
== 0) {
749 /* can't get any better: stop looking */
758 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_LOOP
) | DBG_FUNC_END
),
759 num_objects_skipped
, /* considered objects */
762 VM_KERNEL_UNSLIDE_OR_PERM(best_object
),
763 ((best_object
== NULL
) ? 0 : best_object
->resident_page_count
));
765 object
= best_object
;
767 if (object
== VM_OBJECT_NULL
) {
768 return VM_OBJECT_NULL
;
771 /* Locked. Great. We'll take it. Remove and return. */
772 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
774 vm_object_lock_assert_exclusive(object
);
776 queue_remove(&queue
->objq
[group
], object
,
778 object
->objq
.next
= NULL
;
779 object
->objq
.prev
= NULL
;
780 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
781 object
->purgeable_queue_group
= 0;
782 /* one less volatile object for this object's owner */
783 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, -1);
786 object
->vo_purgeable_volatilizer
= NULL
;
789 /* keep queue of non-volatile objects */
790 queue_enter(&purgeable_nonvolatile_queue
, object
,
792 assert(purgeable_nonvolatile_count
>= 0);
793 purgeable_nonvolatile_count
++;
794 assert(purgeable_nonvolatile_count
> 0);
795 /* one more nonvolatile object for this object's owner */
796 vm_purgeable_nonvolatile_owner_update(object
->vo_purgeable_owner
, +1);
799 queue
->debug_count_objects
--;
804 /* Can be called without holding locks */
806 vm_purgeable_object_purge_all(void)
808 enum purgeable_q_type i
;
811 unsigned int purged_count
;
818 lck_mtx_lock(&vm_purgeable_queue_lock
);
819 /* Cycle through all queues */
820 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
823 queue
= &purgeable_queues
[i
];
826 * Look through all groups, starting from the lowest. If
827 * we find an object in that group, try to lock it (this can
828 * fail). If locking is successful, we can drop the queue
829 * lock, remove a token and then purge the object.
831 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
832 while (!queue_empty(&queue
->objq
[group
])) {
833 object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
);
834 if (object
== VM_OBJECT_NULL
) {
835 lck_mtx_unlock(&vm_purgeable_queue_lock
);
836 mutex_pause(collisions
++);
840 lck_mtx_unlock(&vm_purgeable_queue_lock
);
842 /* Lock the page queue here so we don't hold it
843 * over the whole, legthy operation */
844 if (object
->purgeable_when_ripe
) {
845 vm_page_lock_queues();
846 vm_purgeable_token_remove_first(queue
);
847 vm_page_unlock_queues();
850 (void) vm_object_purge(object
, 0);
851 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
852 /* no change in purgeable accounting */
854 vm_object_unlock(object
);
858 assert(queue
->debug_count_objects
>= 0);
861 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ALL
)),
862 purged_count
, /* # of purged objects */
867 lck_mtx_unlock(&vm_purgeable_queue_lock
);
872 vm_purgeable_object_purge_one_unlocked(
873 int force_purge_below_group
)
877 vm_page_lock_queues();
878 retval
= vm_purgeable_object_purge_one(force_purge_below_group
, 0);
879 vm_page_unlock_queues();
885 vm_purgeable_object_purge_one(
886 int force_purge_below_group
,
889 enum purgeable_q_type i
;
891 vm_object_t object
= 0;
892 purgeable_q_t queue
, queue2
;
893 boolean_t forced_purge
;
895 /* Need the page queue lock since we'll be changing the token queue. */
896 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
897 lck_mtx_lock(&vm_purgeable_queue_lock
);
899 /* Cycle through all queues */
900 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
901 queue
= &purgeable_queues
[i
];
903 if (force_purge_below_group
== 0) {
905 * Are there any ripe tokens on this queue? If yes,
906 * we'll find an object to purge there
908 if (!queue
->token_q_head
) {
909 /* no token: look at next purgeable queue */
913 if (tokens
[queue
->token_q_head
].count
!= 0) {
914 /* no ripe token: next queue */
920 * Now look through all groups, starting from the lowest. If
921 * we find an object in that group, try to lock it (this can
922 * fail). If locking is successful, we can drop the queue
923 * lock, remove a token and then purge the object.
925 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
926 if (!queue
->token_q_head
||
927 tokens
[queue
->token_q_head
].count
!= 0) {
928 /* no tokens or no ripe tokens */
930 if (group
>= force_purge_below_group
) {
931 /* no more groups to force-purge */
936 * Try and purge an object in this group
937 * even though no tokens are ripe.
939 if (!queue_empty(&queue
->objq
[group
]) &&
940 (object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
))) {
941 lck_mtx_unlock(&vm_purgeable_queue_lock
);
942 if (object
->purgeable_when_ripe
) {
943 vm_purgeable_token_delete_first(queue
);
949 /* nothing to purge in this group: next group */
952 if (!queue_empty(&queue
->objq
[group
]) &&
953 (object
= vm_purgeable_object_find_and_lock(queue
, group
, TRUE
))) {
954 lck_mtx_unlock(&vm_purgeable_queue_lock
);
955 if (object
->purgeable_when_ripe
) {
956 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
958 forced_purge
= FALSE
;
961 if (i
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
962 /* This is the token migration case, and it works between
963 * FIFO and LIFO only */
964 queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
?
965 PURGEABLE_Q_TYPE_FIFO
:
966 PURGEABLE_Q_TYPE_LIFO
];
968 if (!queue_empty(&queue2
->objq
[group
]) &&
969 (object
= vm_purgeable_object_find_and_lock(queue2
, group
, TRUE
))) {
970 lck_mtx_unlock(&vm_purgeable_queue_lock
);
971 if (object
->purgeable_when_ripe
) {
972 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
974 forced_purge
= FALSE
;
978 assert(queue
->debug_count_objects
>= 0);
982 * because we have to do a try_lock on the objects which could fail,
983 * we could end up with no object to purge at this time, even though
984 * we have objects in a purgeable state
986 lck_mtx_unlock(&vm_purgeable_queue_lock
);
992 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
993 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
994 (void) vm_object_purge(object
, flags
);
995 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
996 /* no change in purgeable accounting */
997 vm_object_unlock(object
);
998 vm_page_lock_queues();
1000 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)),
1001 VM_KERNEL_UNSLIDE_OR_PERM(object
), /* purged object */
1003 available_for_purge
,
1010 /* Called with object lock held */
1012 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
1014 vm_object_lock_assert_exclusive(object
);
1015 lck_mtx_lock(&vm_purgeable_queue_lock
);
1017 assert(object
->objq
.next
!= NULL
);
1018 assert(object
->objq
.prev
!= NULL
);
1019 queue_remove(&purgeable_nonvolatile_queue
, object
,
1021 object
->objq
.next
= NULL
;
1022 object
->objq
.prev
= NULL
;
1023 assert(purgeable_nonvolatile_count
> 0);
1024 purgeable_nonvolatile_count
--;
1025 assert(purgeable_nonvolatile_count
>= 0);
1026 /* one less nonvolatile object for this object's owner */
1027 vm_purgeable_nonvolatile_owner_update(object
->vo_purgeable_owner
, -1);
1029 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
)
1032 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) /* fifo and obsolete are
1034 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
1036 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
1037 /* one more volatile object for this object's owner */
1038 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, +1);
1040 object
->purgeable_queue_type
= queue
->type
;
1041 object
->purgeable_queue_group
= group
;
1044 assert(object
->vo_purgeable_volatilizer
== NULL
);
1045 object
->vo_purgeable_volatilizer
= current_task();
1046 OSBacktrace(&object
->purgeable_volatilizer_bt
[0], 16);
1050 queue
->debug_count_objects
++;
1051 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADD
)),
1053 tokens
[queue
->token_q_head
].count
,
1059 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1062 /* Look for object. If found, remove from purgeable queue. */
1063 /* Called with object lock held */
1065 vm_purgeable_object_remove(vm_object_t object
)
1068 enum purgeable_q_type type
;
1069 purgeable_q_t queue
;
1071 vm_object_lock_assert_exclusive(object
);
1073 type
= object
->purgeable_queue_type
;
1074 group
= object
->purgeable_queue_group
;
1076 if (type
== PURGEABLE_Q_TYPE_MAX
) {
1077 if (object
->objq
.prev
|| object
->objq
.next
)
1078 panic("unmarked object on purgeable q");
1081 } else if (!(object
->objq
.prev
&& object
->objq
.next
))
1082 panic("marked object not on purgeable q");
1084 lck_mtx_lock(&vm_purgeable_queue_lock
);
1086 queue
= &purgeable_queues
[type
];
1088 queue_remove(&queue
->objq
[group
], object
, vm_object_t
, objq
);
1089 object
->objq
.next
= NULL
;
1090 object
->objq
.prev
= NULL
;
1091 /* one less volatile object for this object's owner */
1092 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, -1);
1094 object
->vo_purgeable_volatilizer
= NULL
;
1096 /* keep queue of non-volatile objects */
1097 if (object
->alive
&& !object
->terminating
) {
1099 queue_enter(&purgeable_nonvolatile_queue
, object
,
1101 assert(purgeable_nonvolatile_count
>= 0);
1102 purgeable_nonvolatile_count
++;
1103 assert(purgeable_nonvolatile_count
> 0);
1104 /* one more nonvolatile object for this object's owner */
1105 owner
= object
->vo_purgeable_owner
;
1106 vm_purgeable_nonvolatile_owner_update(owner
, +1);
1110 queue
->debug_count_objects
--;
1111 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVE
)),
1113 tokens
[queue
->token_q_head
].count
,
1119 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1121 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1122 object
->purgeable_queue_group
= 0;
1124 vm_object_lock_assert_exclusive(object
);
1126 return &purgeable_queues
[type
];
1130 vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
)
1132 LCK_MTX_ASSERT(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1134 stat
->count
= stat
->size
= 0;
1136 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1137 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1138 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1139 if (!target_task
|| object
->vo_purgeable_owner
== target_task
) {
1141 stat
->size
+= (object
->resident_page_count
* PAGE_SIZE
);
1148 vm_purgeable_stats(vm_purgeable_info_t info
, task_t target_task
)
1150 purgeable_q_t queue
;
1153 lck_mtx_lock(&vm_purgeable_queue_lock
);
1155 /* Populate fifo_data */
1156 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1157 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1158 vm_purgeable_stats_helper(&(info
->fifo_data
[group
]), queue
, group
, target_task
);
1160 /* Populate lifo_data */
1161 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1162 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1163 vm_purgeable_stats_helper(&(info
->lifo_data
[group
]), queue
, group
, target_task
);
1165 /* Populate obsolete data */
1166 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1167 vm_purgeable_stats_helper(&(info
->obsolete_data
), queue
, 0, target_task
);
1169 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1173 #if DEVELOPMENT || DEBUG
1175 vm_purgeable_account_volatile_queue(
1176 purgeable_q_t queue
,
1179 pvm_account_info_t acnt_info
)
1182 uint64_t compressed_count
;
1184 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1185 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1186 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1187 if (object
->vo_purgeable_owner
== task
) {
1188 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1189 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1190 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1191 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1198 * Walks the purgeable object queues and calculates the usage
1199 * associated with the objects for the given task.
1202 vm_purgeable_account(
1204 pvm_account_info_t acnt_info
)
1206 queue_head_t
*nonvolatile_q
;
1210 uint64_t compressed_count
;
1211 purgeable_q_t volatile_q
;
1214 if ((task
== NULL
) || (acnt_info
== NULL
)) {
1215 return KERN_INVALID_ARGUMENT
;
1218 acnt_info
->pvm_volatile_count
= 0;
1219 acnt_info
->pvm_volatile_compressed_count
= 0;
1220 acnt_info
->pvm_nonvolatile_count
= 0;
1221 acnt_info
->pvm_nonvolatile_compressed_count
= 0;
1223 lck_mtx_lock(&vm_purgeable_queue_lock
);
1225 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1226 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1227 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1228 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1229 if (object
->vo_purgeable_owner
== task
) {
1230 state
= object
->purgable
;
1231 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1232 if (state
== VM_PURGABLE_EMPTY
) {
1233 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1234 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1236 acnt_info
->pvm_nonvolatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1237 acnt_info
->pvm_nonvolatile_compressed_count
+= compressed_count
;
1239 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1243 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1244 vm_purgeable_account_volatile_queue(volatile_q
, 0, task
, acnt_info
);
1246 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1247 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1248 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1251 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1252 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1253 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1255 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1257 acnt_info
->pvm_volatile_count
= (acnt_info
->pvm_volatile_count
* PAGE_SIZE
);
1258 acnt_info
->pvm_volatile_compressed_count
= (acnt_info
->pvm_volatile_compressed_count
* PAGE_SIZE
);
1259 acnt_info
->pvm_nonvolatile_count
= (acnt_info
->pvm_nonvolatile_count
* PAGE_SIZE
);
1260 acnt_info
->pvm_nonvolatile_compressed_count
= (acnt_info
->pvm_nonvolatile_compressed_count
* PAGE_SIZE
);
1262 return KERN_SUCCESS
;
1264 #endif /* DEVELOPMENT || DEBUG */
1267 vm_purgeable_disown(
1270 vm_object_t next_object
;
1279 * Scan the purgeable objects queues for objects owned by "task".
1280 * This has to be done "atomically" under the "vm_purgeable_queue"
1281 * lock, to ensure that no new purgeable object get associated
1282 * with this task or moved between queues while we're scanning.
1286 * Scan non-volatile queue for objects owned by "task".
1292 if (task
->task_purgeable_disowned
) {
1293 /* task has already disowned its purgeable memory */
1294 assert(task
->task_volatile_objects
== 0);
1295 assert(task
->task_nonvolatile_objects
== 0);
1299 lck_mtx_lock(&vm_purgeable_queue_lock
);
1300 task_objq_lock(task
);
1302 task
->task_purgeable_disowning
= TRUE
;
1304 for (object
= (vm_object_t
) queue_first(&task
->task_objq
);
1305 !queue_end(&task
->task_objq
, (queue_entry_t
) object
);
1306 object
= next_object
) {
1307 if (task
->task_nonvolatile_objects
== 0 &&
1308 task
->task_volatile_objects
== 0) {
1309 /* no more purgeable objects owned by "task" */
1313 next_object
= (vm_object_t
) queue_next(&object
->task_objq
);
1314 if (object
->purgable
== VM_PURGABLE_DENY
) {
1315 /* not a purgeable object: skip */
1320 assert(object
->vo_purgeable_volatilizer
== NULL
);
1322 assert(object
->vo_purgeable_owner
== task
);
1323 if (!vm_object_lock_try(object
)) {
1324 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1325 task_objq_unlock(task
);
1326 mutex_pause(collisions
++);
1329 vm_purgeable_accounting(object
,
1332 TRUE
);/* task_objq_lock is locked */
1333 assert(object
->vo_purgeable_owner
== NULL
);
1334 vm_object_unlock(object
);
1337 if (__improbable(task
->task_volatile_objects
!= 0 ||
1338 task
->task_nonvolatile_objects
!= 0)) {
1339 panic("%s(%p): volatile=%d nonvolatile=%d q=%p q_first=%p q_last=%p",
1342 task
->task_volatile_objects
,
1343 task
->task_nonvolatile_objects
,
1345 queue_first(&task
->task_objq
),
1346 queue_last(&task
->task_objq
));
1349 /* there shouldn't be any purgeable objects owned by task now */
1350 assert(task
->task_volatile_objects
== 0);
1351 assert(task
->task_nonvolatile_objects
== 0);
1352 assert(task
->task_purgeable_disowning
);
1354 /* and we don't need to try and disown again */
1355 task
->task_purgeable_disowned
= TRUE
;
1357 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1358 task_objq_unlock(task
);
1363 vm_purgeable_queue_purge_task_owned(
1364 purgeable_q_t queue
,
1368 vm_object_t object
= VM_OBJECT_NULL
;
1370 uint64_t num_pages_purged
= 0;
1372 num_pages_purged
= 0;
1376 lck_mtx_lock(&vm_purgeable_queue_lock
);
1378 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1379 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1380 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1382 if (object
->vo_purgeable_owner
!= task
) {
1386 /* found an object: try and grab it */
1387 if (!vm_object_lock_try(object
)) {
1388 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1389 mutex_pause(collisions
++);
1396 /* remove object from purgeable queue */
1397 queue_remove(&queue
->objq
[group
], object
,
1399 object
->objq
.next
= NULL
;
1400 object
->objq
.prev
= NULL
;
1401 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1402 object
->purgeable_queue_group
= 0;
1403 /* one less volatile object for this object's owner */
1404 assert(object
->vo_purgeable_owner
== task
);
1405 vm_purgeable_volatile_owner_update(task
, -1);
1408 object
->vo_purgeable_volatilizer
= NULL
;
1410 queue_enter(&purgeable_nonvolatile_queue
, object
,
1412 assert(purgeable_nonvolatile_count
>= 0);
1413 purgeable_nonvolatile_count
++;
1414 assert(purgeable_nonvolatile_count
> 0);
1415 /* one more nonvolatile object for this object's owner */
1416 assert(object
->vo_purgeable_owner
== task
);
1417 vm_purgeable_nonvolatile_owner_update(task
, +1);
1419 /* unlock purgeable queues */
1420 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1422 if (object
->purgeable_when_ripe
) {
1423 /* remove a token */
1424 vm_page_lock_queues();
1425 vm_purgeable_token_remove_first(queue
);
1426 vm_page_unlock_queues();
1429 /* purge the object */
1430 num_pages_purged
+= vm_object_purge(object
, 0);
1432 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1433 /* no change for purgeable accounting */
1434 vm_object_unlock(object
);
1436 /* we unlocked the purgeable queues, so start over */
1440 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1442 return num_pages_purged
;
1446 vm_purgeable_purge_task_owned(
1449 purgeable_q_t queue
= NULL
;
1451 uint64_t num_pages_purged
= 0;
1453 num_pages_purged
= 0;
1455 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1456 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1460 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1461 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1462 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1466 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1467 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1468 num_pages_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1472 return num_pages_purged
;
1476 vm_purgeable_nonvolatile_enqueue(
1482 vm_object_lock_assert_exclusive(object
);
1484 assert(object
->purgable
== VM_PURGABLE_NONVOLATILE
);
1485 assert(object
->vo_purgeable_owner
== NULL
);
1487 lck_mtx_lock(&vm_purgeable_queue_lock
);
1489 if (owner
!= NULL
&&
1490 owner
->task_purgeable_disowning
) {
1491 /* task is exiting and no longer tracking purgeable objects */
1495 object
->vo_purgeable_owner
= owner
;
1497 object
->vo_purgeable_volatilizer
= NULL
;
1499 if (owner
!= NULL
) {
1500 task_objq_lock(owner
);
1501 queue_enter(&owner
->task_objq
, object
, vm_object_t
, task_objq
);
1502 task_objq_unlock(owner
);
1506 OSBacktrace(&object
->purgeable_owner_bt
[0], 16);
1509 page_count
= object
->resident_page_count
;
1510 if (owner
!= NULL
&& page_count
!= 0) {
1511 ledger_credit(owner
->ledger
,
1512 task_ledgers
.purgeable_nonvolatile
,
1514 ledger_credit(owner
->ledger
,
1515 task_ledgers
.phys_footprint
,
1519 assert(object
->objq
.next
== NULL
);
1520 assert(object
->objq
.prev
== NULL
);
1522 queue_enter(&purgeable_nonvolatile_queue
, object
,
1524 assert(purgeable_nonvolatile_count
>= 0);
1525 purgeable_nonvolatile_count
++;
1526 assert(purgeable_nonvolatile_count
> 0);
1527 /* one more nonvolatile object for this object's owner */
1528 assert(object
->vo_purgeable_owner
== owner
);
1529 vm_purgeable_nonvolatile_owner_update(owner
, +1);
1530 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1532 vm_object_lock_assert_exclusive(object
);
1536 vm_purgeable_nonvolatile_dequeue(
1541 vm_object_lock_assert_exclusive(object
);
1543 owner
= object
->vo_purgeable_owner
;
1545 assert(object
->vo_purgeable_volatilizer
== NULL
);
1547 if (owner
!= NULL
) {
1549 * Update the owner's ledger to stop accounting
1552 vm_purgeable_accounting(object
,
1555 FALSE
); /* is task_objq locked? */
1558 lck_mtx_lock(&vm_purgeable_queue_lock
);
1559 assert(object
->objq
.next
!= NULL
);
1560 assert(object
->objq
.prev
!= NULL
);
1561 queue_remove(&purgeable_nonvolatile_queue
, object
,
1563 object
->objq
.next
= NULL
;
1564 object
->objq
.prev
= NULL
;
1565 assert(purgeable_nonvolatile_count
> 0);
1566 purgeable_nonvolatile_count
--;
1567 assert(purgeable_nonvolatile_count
>= 0);
1568 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1570 vm_object_lock_assert_exclusive(object
);
1574 vm_purgeable_accounting(
1576 vm_purgable_t old_state
,
1578 boolean_t task_objq_locked
)
1581 int resident_page_count
;
1582 int wired_page_count
;
1583 int compressed_page_count
;
1584 boolean_t disown_on_the_fly
;
1586 vm_object_lock_assert_exclusive(object
);
1588 owner
= object
->vo_purgeable_owner
;
1592 if (!disown
&& owner
->task_purgeable_disowning
) {
1593 /* task is disowning its purgeable objects: help it */
1594 disown_on_the_fly
= TRUE
;
1596 disown_on_the_fly
= FALSE
;
1599 resident_page_count
= object
->resident_page_count
;
1600 wired_page_count
= object
->wired_page_count
;
1601 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
&&
1602 object
->pager
!= NULL
) {
1603 compressed_page_count
=
1604 vm_compressor_pager_get_count(object
->pager
);
1606 compressed_page_count
= 0;
1609 if (old_state
== VM_PURGABLE_VOLATILE
||
1610 old_state
== VM_PURGABLE_EMPTY
) {
1611 /* less volatile bytes in ledger */
1612 ledger_debit(owner
->ledger
,
1613 task_ledgers
.purgeable_volatile
,
1614 ptoa(resident_page_count
- wired_page_count
));
1615 /* less compressed volatile bytes in ledger */
1616 ledger_debit(owner
->ledger
,
1617 task_ledgers
.purgeable_volatile_compressed
,
1618 ptoa(compressed_page_count
));
1620 if (disown
|| !object
->alive
|| object
->terminating
) {
1621 /* wired pages were accounted as "non-volatile"... */
1622 ledger_debit(owner
->ledger
,
1623 task_ledgers
.purgeable_nonvolatile
,
1624 ptoa(wired_page_count
));
1625 /* ... and in phys_footprint */
1626 ledger_debit(owner
->ledger
,
1627 task_ledgers
.phys_footprint
,
1628 ptoa(wired_page_count
));
1630 if (!disown_on_the_fly
&&
1631 (object
->purgeable_queue_type
==
1632 PURGEABLE_Q_TYPE_MAX
)) {
1634 * Not on a volatile queue: must be empty
1637 vm_purgeable_nonvolatile_owner_update(owner
,-1);
1639 /* on a volatile queue */
1640 vm_purgeable_volatile_owner_update(owner
, -1);
1642 /* no more accounting for this dead object */
1643 owner
= object
->vo_purgeable_owner
;
1644 if (! task_objq_locked
) {
1645 task_objq_lock(owner
);
1647 task_objq_lock_assert_owned(owner
);
1648 queue_remove(&owner
->task_objq
, object
, vm_object_t
, task_objq
);
1649 if (! task_objq_locked
) {
1650 task_objq_unlock(owner
);
1652 object
->vo_purgeable_owner
= NULL
;
1654 object
->vo_purgeable_volatilizer
= NULL
;
1659 /* more non-volatile bytes in ledger */
1660 ledger_credit(owner
->ledger
,
1661 task_ledgers
.purgeable_nonvolatile
,
1662 ptoa(resident_page_count
- wired_page_count
));
1663 /* more compressed non-volatile bytes in ledger */
1664 ledger_credit(owner
->ledger
,
1665 task_ledgers
.purgeable_nonvolatile_compressed
,
1666 ptoa(compressed_page_count
));
1667 /* more footprint */
1668 ledger_credit(owner
->ledger
,
1669 task_ledgers
.phys_footprint
,
1670 ptoa(resident_page_count
1671 + compressed_page_count
1672 - wired_page_count
));
1674 } else if (old_state
== VM_PURGABLE_NONVOLATILE
) {
1676 /* less non-volatile bytes in ledger */
1677 ledger_debit(owner
->ledger
,
1678 task_ledgers
.purgeable_nonvolatile
,
1679 ptoa(resident_page_count
- wired_page_count
));
1680 /* less compressed non-volatile bytes in ledger */
1681 ledger_debit(owner
->ledger
,
1682 task_ledgers
.purgeable_nonvolatile_compressed
,
1683 ptoa(compressed_page_count
));
1684 /* less footprint */
1685 ledger_debit(owner
->ledger
,
1686 task_ledgers
.phys_footprint
,
1687 ptoa(resident_page_count
1688 + compressed_page_count
1689 - wired_page_count
));
1691 if (disown
|| !object
->alive
|| object
->terminating
) {
1692 /* wired pages still accounted as "non-volatile" */
1693 ledger_debit(owner
->ledger
,
1694 task_ledgers
.purgeable_nonvolatile
,
1695 ptoa(wired_page_count
));
1696 ledger_debit(owner
->ledger
,
1697 task_ledgers
.phys_footprint
,
1698 ptoa(wired_page_count
));
1700 /* one less "non-volatile" object for the owner */
1701 if (!disown_on_the_fly
) {
1702 assert(object
->purgeable_queue_type
==
1703 PURGEABLE_Q_TYPE_MAX
);
1705 vm_purgeable_nonvolatile_owner_update(owner
, -1);
1706 /* no more accounting for this dead object */
1707 if (! task_objq_locked
) {
1708 task_objq_lock(owner
);
1710 task_objq_lock_assert_owned(owner
);
1711 queue_remove(&owner
->task_objq
, object
, vm_object_t
, task_objq
);
1712 if (! task_objq_locked
) {
1713 task_objq_unlock(owner
);
1715 object
->vo_purgeable_owner
= NULL
;
1717 object
->vo_purgeable_volatilizer
= NULL
;
1721 /* more volatile bytes in ledger */
1722 ledger_credit(owner
->ledger
,
1723 task_ledgers
.purgeable_volatile
,
1724 ptoa(resident_page_count
- wired_page_count
));
1725 /* more compressed volatile bytes in ledger */
1726 ledger_credit(owner
->ledger
,
1727 task_ledgers
.purgeable_volatile_compressed
,
1728 ptoa(compressed_page_count
));
1730 panic("vm_purgeable_accounting(%p): "
1731 "unexpected old_state=%d\n",
1735 vm_object_lock_assert_exclusive(object
);
1739 vm_purgeable_nonvolatile_owner_update(
1743 if (owner
== NULL
|| delta
== 0) {
1748 assert(owner
->task_nonvolatile_objects
>= 0);
1749 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1750 assert(owner
->task_nonvolatile_objects
> 0);
1752 assert(owner
->task_nonvolatile_objects
> delta
);
1753 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1754 assert(owner
->task_nonvolatile_objects
>= 0);
1759 vm_purgeable_volatile_owner_update(
1763 if (owner
== NULL
|| delta
== 0) {
1768 assert(owner
->task_volatile_objects
>= 0);
1769 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1770 assert(owner
->task_volatile_objects
> 0);
1772 assert(owner
->task_volatile_objects
> delta
);
1773 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1774 assert(owner
->task_volatile_objects
>= 0);
1779 vm_purgeable_compressed_update(
1785 vm_object_lock_assert_exclusive(object
);
1788 !object
->internal
||
1789 object
->purgable
== VM_PURGABLE_DENY
||
1790 object
->vo_purgeable_owner
== NULL
) {
1791 /* not an owned purgeable VM object: nothing to update */
1795 owner
= object
->vo_purgeable_owner
;
1796 switch (object
->purgable
) {
1797 case VM_PURGABLE_DENY
:
1799 case VM_PURGABLE_NONVOLATILE
:
1801 ledger_credit(owner
->ledger
,
1802 task_ledgers
.purgeable_nonvolatile_compressed
,
1804 ledger_credit(owner
->ledger
,
1805 task_ledgers
.phys_footprint
,
1808 ledger_debit(owner
->ledger
,
1809 task_ledgers
.purgeable_nonvolatile_compressed
,
1811 ledger_debit(owner
->ledger
,
1812 task_ledgers
.phys_footprint
,
1816 case VM_PURGABLE_VOLATILE
:
1817 case VM_PURGABLE_EMPTY
:
1819 ledger_credit(owner
->ledger
,
1820 task_ledgers
.purgeable_volatile_compressed
,
1823 ledger_debit(owner
->ledger
,
1824 task_ledgers
.purgeable_volatile_compressed
,
1829 panic("vm_purgeable_compressed_update(): "
1830 "unexpected purgable %d for object %p\n",
1831 object
->purgable
, object
);