2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/sched_prim.h>
25 #include <kern/ledger.h>
27 #include <libkern/OSDebug.h>
29 #include <mach/mach_types.h>
31 #include <machine/limits.h>
33 #include <vm/vm_compressor_pager.h>
34 #include <vm/vm_kern.h> /* kmem_alloc */
35 #include <vm/vm_page.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/vm_protos.h>
38 #include <vm/vm_purgeable_internal.h>
40 #include <sys/kdebug.h>
42 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
51 token_idx_t token_q_max_cnt
= 0;
52 vm_size_t token_q_cur_size
= 0;
54 token_idx_t token_free_idx
= 0; /* head of free queue */
55 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
56 int32_t token_new_pagecount
= 0; /* count of pages that will
57 * be added onto token queue */
59 int available_for_purge
= 0; /* increase when ripe token
60 * added, decrease when ripe
62 * protected by page_queue_lock
65 static int token_q_allocating
= 0; /* flag for singlethreading
68 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
69 queue_head_t purgeable_nonvolatile_queue
;
70 int purgeable_nonvolatile_count
;
72 decl_lck_mtx_data(,vm_purgeable_queue_lock
)
74 #define TOKEN_ADD 0x40 /* 0x100 */
75 #define TOKEN_DELETE 0x41 /* 0x104 */
76 #define TOKEN_RIPEN 0x42 /* 0x108 */
77 #define OBJECT_ADD 0x48 /* 0x120 */
78 #define OBJECT_REMOVE 0x49 /* 0x124 */
79 #define OBJECT_PURGE 0x4a /* 0x128 */
80 #define OBJECT_PURGE_ALL 0x4b /* 0x12c */
82 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
84 static void vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
);
86 void vm_purgeable_nonvolatile_owner_update(task_t owner
,
88 void vm_purgeable_volatile_owner_update(task_t owner
,
94 vm_purgeable_token_check_queue(purgeable_q_t queue
)
96 int token_cnt
= 0, page_cnt
= 0;
97 token_idx_t token
= queue
->token_q_head
;
98 token_idx_t unripe
= 0;
99 int our_inactive_count
;
102 static unsigned lightweight_check
= 0;
105 * Due to performance impact, only perform this check
106 * every 100 times on DEVELOPMENT kernels.
108 if (lightweight_check
++ < 100) {
112 lightweight_check
= 0;
116 if (tokens
[token
].count
!= 0) {
117 assert(queue
->token_q_unripe
);
119 assert(token
== queue
->token_q_unripe
);
122 page_cnt
+= tokens
[token
].count
;
124 if (tokens
[token
].next
== 0)
125 assert(queue
->token_q_tail
== token
);
128 token
= tokens
[token
].next
;
132 assert(queue
->token_q_unripe
== unripe
);
133 assert(token_cnt
== queue
->debug_count_tokens
);
135 /* obsolete queue doesn't maintain token counts */
136 if(queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
138 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
139 assert(our_inactive_count
>= 0);
140 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
- vm_page_cleaned_count
);
146 * Add a token. Allocate token queue memory if necessary.
147 * Call with page queue locked.
150 vm_purgeable_token_add(purgeable_q_t queue
)
153 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
158 enum purgeable_q_type i
;
160 find_available_token
:
162 if (token_free_idx
) { /* unused tokens available */
163 token
= token_free_idx
;
164 token_free_idx
= tokens
[token_free_idx
].next
;
165 } else if (token_init_idx
< token_q_max_cnt
) { /* lazy token array init */
166 token
= token_init_idx
;
168 } else { /* allocate more memory */
169 /* Wait if another thread is inside the memory alloc section */
170 while(token_q_allocating
) {
171 wait_result_t res
= lck_mtx_sleep(&vm_page_queue_lock
,
173 (event_t
)&token_q_allocating
,
175 if(res
!= THREAD_AWAKENED
) return KERN_ABORTED
;
178 /* Check whether memory is still maxed out */
179 if(token_init_idx
< token_q_max_cnt
)
180 goto find_available_token
;
182 /* Still no memory. Allocate some. */
183 token_q_allocating
= 1;
185 /* Drop page queue lock so we can allocate */
186 vm_page_unlock_queues();
188 struct token
*new_loc
;
189 vm_size_t alloc_size
= token_q_cur_size
+ PAGE_SIZE
;
190 kern_return_t result
;
192 if (alloc_size
/ sizeof (struct token
) > TOKEN_COUNT_MAX
) {
193 result
= KERN_RESOURCE_SHORTAGE
;
195 if (token_q_cur_size
) {
196 result
= kmem_realloc(kernel_map
,
197 (vm_offset_t
) tokens
,
199 (vm_offset_t
*) &new_loc
,
200 alloc_size
, VM_KERN_MEMORY_OSFMK
);
202 result
= kmem_alloc(kernel_map
,
203 (vm_offset_t
*) &new_loc
,
204 alloc_size
, VM_KERN_MEMORY_OSFMK
);
208 vm_page_lock_queues();
211 /* Unblock waiting threads */
212 token_q_allocating
= 0;
213 thread_wakeup((event_t
)&token_q_allocating
);
217 /* If we get here, we allocated new memory. Update pointers and
218 * dealloc old range */
219 struct token
*old_tokens
=tokens
;
221 vm_size_t old_token_q_cur_size
=token_q_cur_size
;
222 token_q_cur_size
=alloc_size
;
223 token_q_max_cnt
= (token_idx_t
) (token_q_cur_size
/
224 sizeof(struct token
));
225 assert (token_init_idx
< token_q_max_cnt
); /* We must have a free token now */
227 if (old_token_q_cur_size
) { /* clean up old mapping */
228 vm_page_unlock_queues();
229 /* kmem_realloc leaves the old region mapped. Get rid of it. */
230 kmem_free(kernel_map
, (vm_offset_t
)old_tokens
, old_token_q_cur_size
);
231 vm_page_lock_queues();
234 /* Unblock waiting threads */
235 token_q_allocating
= 0;
236 thread_wakeup((event_t
)&token_q_allocating
);
238 goto find_available_token
;
244 * the new pagecount we got need to be applied to all queues except
247 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
248 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
250 assert(pages
<= TOKEN_COUNT_MAX
);
251 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
252 assert(purgeable_queues
[i
].new_pages
== pages
);
254 token_new_pagecount
= 0;
256 /* set token counter value */
257 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
258 tokens
[token
].count
= queue
->new_pages
;
260 tokens
[token
].count
= 0; /* all obsolete items are
261 * ripe immediately */
262 queue
->new_pages
= 0;
264 /* put token on token counter list */
265 tokens
[token
].next
= 0;
266 if (queue
->token_q_tail
== 0) {
267 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
268 queue
->token_q_head
= token
;
269 tokens
[token
].prev
= 0;
271 tokens
[queue
->token_q_tail
].next
= token
;
272 tokens
[token
].prev
= queue
->token_q_tail
;
274 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
275 * count == 0) in queue */
276 if (tokens
[token
].count
> 0)
277 queue
->token_q_unripe
= token
; /* first unripe token */
279 available_for_purge
++; /* added a ripe token?
280 * increase available count */
282 queue
->token_q_tail
= token
;
285 queue
->debug_count_tokens
++;
286 /* Check both queues, since we modified the new_pages count on each */
287 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
288 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
290 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
292 tokens
[token
].count
, /* num pages on token
294 queue
->debug_count_tokens
,
303 * Remove first token from queue and return its index. Add its count to the
304 * count of the next token.
305 * Call with page queue locked.
308 vm_purgeable_token_remove_first(purgeable_q_t queue
)
311 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
315 token
= queue
->token_q_head
;
320 assert(queue
->token_q_tail
);
321 if (queue
->token_q_head
== queue
->token_q_unripe
) {
322 /* no ripe tokens... must move unripe pointer */
323 queue
->token_q_unripe
= tokens
[token
].next
;
325 /* we're removing a ripe token. decrease count */
326 available_for_purge
--;
327 assert(available_for_purge
>= 0);
330 if (queue
->token_q_tail
== queue
->token_q_head
)
331 assert(tokens
[token
].next
== 0);
333 queue
->token_q_head
= tokens
[token
].next
;
334 if (queue
->token_q_head
) {
335 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
336 tokens
[queue
->token_q_head
].prev
= 0;
338 /* currently no other tokens in the queue */
340 * the page count must be added to the next newly
343 queue
->new_pages
+= tokens
[token
].count
;
344 /* if head is zero, tail is too */
345 queue
->token_q_tail
= 0;
349 queue
->debug_count_tokens
--;
350 vm_purgeable_token_check_queue(queue
);
352 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
354 tokens
[queue
->token_q_head
].count
, /* num pages on new
356 token_new_pagecount
, /* num pages waiting for
366 vm_purgeable_token_remove_last(purgeable_q_t queue
)
369 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
373 token
= queue
->token_q_tail
;
378 assert(queue
->token_q_head
);
380 if (queue
->token_q_tail
== queue
->token_q_head
)
381 assert(tokens
[token
].next
== 0);
383 if (queue
->token_q_unripe
== 0) {
384 /* we're removing a ripe token. decrease count */
385 available_for_purge
--;
386 assert(available_for_purge
>= 0);
387 } else if (queue
->token_q_unripe
== token
) {
388 /* we're removing the only unripe token */
389 queue
->token_q_unripe
= 0;
392 if (token
== queue
->token_q_head
) {
393 /* token is the last one in the queue */
394 queue
->token_q_head
= 0;
395 queue
->token_q_tail
= 0;
397 token_idx_t new_tail
;
399 new_tail
= tokens
[token
].prev
;
402 assert(tokens
[new_tail
].next
== token
);
404 queue
->token_q_tail
= new_tail
;
405 tokens
[new_tail
].next
= 0;
408 queue
->new_pages
+= tokens
[token
].count
;
411 queue
->debug_count_tokens
--;
412 vm_purgeable_token_check_queue(queue
);
414 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
416 tokens
[queue
->token_q_head
].count
, /* num pages on new
418 token_new_pagecount
, /* num pages waiting for
428 * Delete first token from queue. Return token to token queue.
429 * Call with page queue locked.
432 vm_purgeable_token_delete_first(purgeable_q_t queue
)
435 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
437 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
440 /* stick removed token on free queue */
441 tokens
[token
].next
= token_free_idx
;
442 tokens
[token
].prev
= 0;
443 token_free_idx
= token
;
448 vm_purgeable_token_delete_last(purgeable_q_t queue
)
451 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
453 token_idx_t token
= vm_purgeable_token_remove_last(queue
);
456 /* stick removed token on free queue */
457 tokens
[token
].next
= token_free_idx
;
458 tokens
[token
].prev
= 0;
459 token_free_idx
= token
;
464 /* Call with page queue locked. */
466 vm_purgeable_q_advance_all()
469 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
472 /* check queue counters - if they get really large, scale them back.
473 * They tend to get that large when there is no purgeable queue action */
475 if(token_new_pagecount
> (TOKEN_NEW_PAGECOUNT_MAX
>> 1)) /* a system idling years might get there */
477 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
478 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
480 assert(pages
<= TOKEN_COUNT_MAX
);
481 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
482 assert(purgeable_queues
[i
].new_pages
== pages
);
484 token_new_pagecount
= 0;
488 * Decrement token counters. A token counter can be zero, this means the
489 * object is ripe to be purged. It is not purged immediately, because that
490 * could cause several objects to be purged even if purging one would satisfy
491 * the memory needs. Instead, the pageout thread purges one after the other
492 * by calling vm_purgeable_object_purge_one and then rechecking the memory
495 * No need to advance obsolete queue - all items are ripe there,
498 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
499 purgeable_q_t queue
= &purgeable_queues
[i
];
500 uint32_t num_pages
= 1;
502 /* Iterate over tokens as long as there are unripe tokens. */
503 while (queue
->token_q_unripe
) {
504 if (tokens
[queue
->token_q_unripe
].count
&& num_pages
)
506 tokens
[queue
->token_q_unripe
].count
-= 1;
510 if (tokens
[queue
->token_q_unripe
].count
== 0) {
511 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
512 available_for_purge
++;
513 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_RIPEN
)),
515 tokens
[queue
->token_q_head
].count
, /* num pages on new
520 continue; /* One token ripened. Make sure to
524 break; /* Current token not ripe and no more pages.
529 * if there are no unripe tokens in the queue, decrement the
530 * new_pages counter instead new_pages can be negative, but must be
531 * canceled out by token_new_pagecount -- since inactive queue as a
532 * whole always contains a nonnegative number of pages
534 if (!queue
->token_q_unripe
) {
535 queue
->new_pages
-= num_pages
;
536 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
539 vm_purgeable_token_check_queue(queue
);
545 * grab any ripe object and purge it obsolete queue first. then, go through
546 * each volatile group. Select a queue with a ripe token.
547 * Start with first group (0)
548 * 1. Look at queue. Is there an object?
549 * Yes - purge it. Remove token.
550 * No - check other queue. Is there an object?
551 * No - increment group, then go to (1)
552 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
553 * token from other queue and migrate unripe token from this
554 * queue to other queue.
555 * Call with page queue locked.
558 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
561 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
563 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
564 /* return token to free list. advance token list. */
565 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
566 tokens
[queue
->token_q_head
].next
= token_free_idx
;
567 tokens
[queue
->token_q_head
].prev
= 0;
568 token_free_idx
= queue
->token_q_head
;
569 queue
->token_q_head
= new_head
;
570 tokens
[new_head
].prev
= 0;
572 queue
->token_q_tail
= 0;
575 queue
->debug_count_tokens
--;
576 vm_purgeable_token_check_queue(queue
);
579 available_for_purge
--;
580 assert(available_for_purge
>= 0);
584 * Delete a ripe token from the given queue. If there are no ripe tokens on
585 * that queue, delete a ripe token from queue2, and migrate an unripe token
586 * from queue to queue2
587 * Call with page queue locked.
590 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
593 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
595 assert(queue
->token_q_head
);
597 if (tokens
[queue
->token_q_head
].count
== 0) {
598 /* This queue has a ripe token. Remove. */
599 vm_purgeable_token_remove_ripe(queue
);
603 * queue2 must have a ripe token. Remove, and migrate one
604 * from queue to queue2.
606 vm_purgeable_token_remove_ripe(queue2
);
607 /* migrate unripe token */
611 /* remove token from queue1 */
612 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
613 * tokens, remember? */
614 token
= vm_purgeable_token_remove_first(queue
);
617 count
= tokens
[token
].count
;
619 /* migrate to queue2 */
620 /* go to migration target loc */
622 token_idx_t token_to_insert_before
= queue2
->token_q_head
, token_to_insert_after
;
624 while (token_to_insert_before
!= 0 && count
> tokens
[token_to_insert_before
].count
) {
625 count
-= tokens
[token_to_insert_before
].count
;
626 token_to_insert_before
= tokens
[token_to_insert_before
].next
;
629 /* token_to_insert_before is now set correctly */
631 /* should the inserted token become the first unripe token? */
632 if ((token_to_insert_before
== queue2
->token_q_unripe
) || (queue2
->token_q_unripe
== 0))
633 queue2
->token_q_unripe
= token
; /* if so, must update unripe pointer */
637 * if inserting at end, reduce new_pages by that value;
638 * otherwise, reduce counter of next token
641 tokens
[token
].count
= count
;
643 if (token_to_insert_before
!= 0) {
644 token_to_insert_after
= tokens
[token_to_insert_before
].prev
;
646 tokens
[token
].next
= token_to_insert_before
;
647 tokens
[token_to_insert_before
].prev
= token
;
649 assert(tokens
[token_to_insert_before
].count
>= count
);
650 tokens
[token_to_insert_before
].count
-= count
;
652 /* if we ran off the end of the list, the token to insert after is the tail */
653 token_to_insert_after
= queue2
->token_q_tail
;
655 tokens
[token
].next
= 0;
656 queue2
->token_q_tail
= token
;
658 assert(queue2
->new_pages
>= (int32_t) count
);
659 queue2
->new_pages
-= count
;
662 if (token_to_insert_after
!= 0) {
663 tokens
[token
].prev
= token_to_insert_after
;
664 tokens
[token_to_insert_after
].next
= token
;
666 /* is this case possible? */
667 tokens
[token
].prev
= 0;
668 queue2
->token_q_head
= token
;
672 queue2
->debug_count_tokens
++;
673 vm_purgeable_token_check_queue(queue2
);
678 /* Find an object that can be locked. Returns locked object. */
679 /* Call with purgeable queue locked. */
681 vm_purgeable_object_find_and_lock(
686 vm_object_t object
, best_object
;
687 int object_task_importance
;
688 int best_object_task_importance
;
689 int best_object_skipped
;
690 int num_objects_skipped
;
693 best_object
= VM_OBJECT_NULL
;
694 best_object_task_importance
= INT_MAX
;
696 lck_mtx_assert(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
698 * Usually we would pick the first element from a queue. However, we
699 * might not be able to get a lock on it, in which case we try the
700 * remaining elements in order.
703 num_objects_skipped
= -1;
704 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
705 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
706 object
= (vm_object_t
) queue_next(&object
->objq
),
707 num_objects_skipped
++) {
710 ! object
->purgeable_when_ripe
) {
711 /* we want an object that has a ripe token */
715 object_task_importance
= 0;
717 owner
= object
->vo_purgeable_owner
;
719 object_task_importance
= task_importance_estimate(owner
);
722 if (object_task_importance
< best_object_task_importance
) {
723 if (vm_object_lock_try(object
)) {
724 if (best_object
!= VM_OBJECT_NULL
) {
725 /* forget about previous best object */
726 vm_object_unlock(best_object
);
728 best_object
= object
;
729 best_object_task_importance
= object_task_importance
;
730 best_object_skipped
= num_objects_skipped
;
731 if (best_object_task_importance
== 0) {
732 /* can't get any better: stop looking */
738 object
= best_object
;
740 if (object
== VM_OBJECT_NULL
) {
741 return VM_OBJECT_NULL
;
744 /* Locked. Great. We'll take it. Remove and return. */
745 // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
747 vm_object_lock_assert_exclusive(object
);
749 queue_remove(&queue
->objq
[group
], object
,
751 object
->objq
.next
= NULL
;
752 object
->objq
.prev
= NULL
;
753 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
754 object
->purgeable_queue_group
= 0;
755 /* one less volatile object for this object's owner */
756 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, -1);
759 object
->vo_purgeable_volatilizer
= NULL
;
762 /* keep queue of non-volatile objects */
763 queue_enter(&purgeable_nonvolatile_queue
, object
,
765 assert(purgeable_nonvolatile_count
>= 0);
766 purgeable_nonvolatile_count
++;
767 assert(purgeable_nonvolatile_count
> 0);
768 /* one more nonvolatile object for this object's owner */
769 vm_purgeable_nonvolatile_owner_update(object
->vo_purgeable_owner
, +1);
772 queue
->debug_count_objects
--;
777 /* Can be called without holding locks */
779 vm_purgeable_object_purge_all(void)
781 enum purgeable_q_type i
;
784 unsigned int purged_count
;
791 lck_mtx_lock(&vm_purgeable_queue_lock
);
792 /* Cycle through all queues */
793 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
796 queue
= &purgeable_queues
[i
];
799 * Look through all groups, starting from the lowest. If
800 * we find an object in that group, try to lock it (this can
801 * fail). If locking is successful, we can drop the queue
802 * lock, remove a token and then purge the object.
804 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
805 while (!queue_empty(&queue
->objq
[group
])) {
806 object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
);
807 if (object
== VM_OBJECT_NULL
) {
808 lck_mtx_unlock(&vm_purgeable_queue_lock
);
809 mutex_pause(collisions
++);
813 lck_mtx_unlock(&vm_purgeable_queue_lock
);
815 /* Lock the page queue here so we don't hold it
816 * over the whole, legthy operation */
817 if (object
->purgeable_when_ripe
) {
818 vm_page_lock_queues();
819 vm_purgeable_token_remove_first(queue
);
820 vm_page_unlock_queues();
823 (void) vm_object_purge(object
, 0);
824 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
825 /* no change in purgeable accounting */
827 vm_object_unlock(object
);
831 assert(queue
->debug_count_objects
>= 0);
834 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ALL
)),
835 purged_count
, /* # of purged objects */
840 lck_mtx_unlock(&vm_purgeable_queue_lock
);
845 vm_purgeable_object_purge_one_unlocked(
846 int force_purge_below_group
)
850 vm_page_lock_queues();
851 retval
= vm_purgeable_object_purge_one(force_purge_below_group
, 0);
852 vm_page_unlock_queues();
858 vm_purgeable_object_purge_one(
859 int force_purge_below_group
,
862 enum purgeable_q_type i
;
864 vm_object_t object
= 0;
865 purgeable_q_t queue
, queue2
;
866 boolean_t forced_purge
;
868 /* Need the page queue lock since we'll be changing the token queue. */
870 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
872 lck_mtx_lock(&vm_purgeable_queue_lock
);
874 /* Cycle through all queues */
875 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
876 queue
= &purgeable_queues
[i
];
878 if (force_purge_below_group
== 0) {
880 * Are there any ripe tokens on this queue? If yes,
881 * we'll find an object to purge there
883 if (!queue
->token_q_head
) {
884 /* no token: look at next purgeable queue */
888 if (tokens
[queue
->token_q_head
].count
!= 0) {
889 /* no ripe token: next queue */
895 * Now look through all groups, starting from the lowest. If
896 * we find an object in that group, try to lock it (this can
897 * fail). If locking is successful, we can drop the queue
898 * lock, remove a token and then purge the object.
900 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
901 if (!queue
->token_q_head
||
902 tokens
[queue
->token_q_head
].count
!= 0) {
903 /* no tokens or no ripe tokens */
905 if (group
>= force_purge_below_group
) {
906 /* no more groups to force-purge */
911 * Try and purge an object in this group
912 * even though no tokens are ripe.
914 if (!queue_empty(&queue
->objq
[group
]) &&
915 (object
= vm_purgeable_object_find_and_lock(queue
, group
, FALSE
))) {
916 lck_mtx_unlock(&vm_purgeable_queue_lock
);
917 if (object
->purgeable_when_ripe
) {
918 vm_purgeable_token_delete_first(queue
);
924 /* nothing to purge in this group: next group */
927 if (!queue_empty(&queue
->objq
[group
]) &&
928 (object
= vm_purgeable_object_find_and_lock(queue
, group
, TRUE
))) {
929 lck_mtx_unlock(&vm_purgeable_queue_lock
);
930 if (object
->purgeable_when_ripe
) {
931 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
933 forced_purge
= FALSE
;
936 if (i
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
937 /* This is the token migration case, and it works between
938 * FIFO and LIFO only */
939 queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
?
940 PURGEABLE_Q_TYPE_FIFO
:
941 PURGEABLE_Q_TYPE_LIFO
];
943 if (!queue_empty(&queue2
->objq
[group
]) &&
944 (object
= vm_purgeable_object_find_and_lock(queue2
, group
, TRUE
))) {
945 lck_mtx_unlock(&vm_purgeable_queue_lock
);
946 if (object
->purgeable_when_ripe
) {
947 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
949 forced_purge
= FALSE
;
953 assert(queue
->debug_count_objects
>= 0);
957 * because we have to do a try_lock on the objects which could fail,
958 * we could end up with no object to purge at this time, even though
959 * we have objects in a purgeable state
961 lck_mtx_unlock(&vm_purgeable_queue_lock
);
967 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
968 // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
969 (void) vm_object_purge(object
, flags
);
970 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
971 /* no change in purgeable accounting */
972 vm_object_unlock(object
);
973 vm_page_lock_queues();
975 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)),
976 object
, /* purged object */
985 /* Called with object lock held */
987 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
989 vm_object_lock_assert_exclusive(object
);
990 lck_mtx_lock(&vm_purgeable_queue_lock
);
992 assert(object
->objq
.next
!= NULL
);
993 assert(object
->objq
.prev
!= NULL
);
994 queue_remove(&purgeable_nonvolatile_queue
, object
,
996 object
->objq
.next
= NULL
;
997 object
->objq
.prev
= NULL
;
998 assert(purgeable_nonvolatile_count
> 0);
999 purgeable_nonvolatile_count
--;
1000 assert(purgeable_nonvolatile_count
>= 0);
1001 /* one less nonvolatile object for this object's owner */
1002 vm_purgeable_nonvolatile_owner_update(object
->vo_purgeable_owner
, -1);
1004 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
)
1007 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) /* fifo and obsolete are
1009 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
1011 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
1012 /* one more volatile object for this object's owner */
1013 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, +1);
1015 object
->purgeable_queue_type
= queue
->type
;
1016 object
->purgeable_queue_group
= group
;
1019 assert(object
->vo_purgeable_volatilizer
== NULL
);
1020 object
->vo_purgeable_volatilizer
= current_task();
1021 OSBacktrace(&object
->purgeable_volatilizer_bt
[0], 16);
1025 queue
->debug_count_objects
++;
1026 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADD
)),
1028 tokens
[queue
->token_q_head
].count
,
1034 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1037 /* Look for object. If found, remove from purgeable queue. */
1038 /* Called with object lock held */
1040 vm_purgeable_object_remove(vm_object_t object
)
1043 enum purgeable_q_type type
;
1044 purgeable_q_t queue
;
1046 vm_object_lock_assert_exclusive(object
);
1048 type
= object
->purgeable_queue_type
;
1049 group
= object
->purgeable_queue_group
;
1051 if (type
== PURGEABLE_Q_TYPE_MAX
) {
1052 if (object
->objq
.prev
|| object
->objq
.next
)
1053 panic("unmarked object on purgeable q");
1056 } else if (!(object
->objq
.prev
&& object
->objq
.next
))
1057 panic("marked object not on purgeable q");
1059 lck_mtx_lock(&vm_purgeable_queue_lock
);
1061 queue
= &purgeable_queues
[type
];
1063 queue_remove(&queue
->objq
[group
], object
, vm_object_t
, objq
);
1064 object
->objq
.next
= NULL
;
1065 object
->objq
.prev
= NULL
;
1066 /* one less volatile object for this object's owner */
1067 vm_purgeable_volatile_owner_update(object
->vo_purgeable_owner
, -1);
1069 object
->vo_purgeable_volatilizer
= NULL
;
1071 /* keep queue of non-volatile objects */
1072 if (object
->alive
&& !object
->terminating
) {
1074 queue_enter(&purgeable_nonvolatile_queue
, object
,
1076 assert(purgeable_nonvolatile_count
>= 0);
1077 purgeable_nonvolatile_count
++;
1078 assert(purgeable_nonvolatile_count
> 0);
1079 /* one more nonvolatile object for this object's owner */
1080 owner
= object
->vo_purgeable_owner
;
1081 vm_purgeable_nonvolatile_owner_update(owner
, +1);
1085 queue
->debug_count_objects
--;
1086 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVE
)),
1088 tokens
[queue
->token_q_head
].count
,
1094 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1096 object
->purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
1097 object
->purgeable_queue_group
= 0;
1099 vm_object_lock_assert_exclusive(object
);
1101 return &purgeable_queues
[type
];
1105 vm_purgeable_stats_helper(vm_purgeable_stat_t
*stat
, purgeable_q_t queue
, int group
, task_t target_task
)
1107 lck_mtx_assert(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1109 stat
->count
= stat
->size
= 0;
1111 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1112 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1113 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1114 if (!target_task
|| object
->vo_purgeable_owner
== target_task
) {
1116 stat
->size
+= (object
->resident_page_count
* PAGE_SIZE
);
1123 vm_purgeable_stats(vm_purgeable_info_t info
, task_t target_task
)
1125 purgeable_q_t queue
;
1128 lck_mtx_lock(&vm_purgeable_queue_lock
);
1130 /* Populate fifo_data */
1131 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1132 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1133 vm_purgeable_stats_helper(&(info
->fifo_data
[group
]), queue
, group
, target_task
);
1135 /* Populate lifo_data */
1136 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1137 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1138 vm_purgeable_stats_helper(&(info
->lifo_data
[group
]), queue
, group
, target_task
);
1140 /* Populate obsolete data */
1141 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1142 vm_purgeable_stats_helper(&(info
->obsolete_data
), queue
, 0, target_task
);
1144 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1148 #if DEVELOPMENT || DEBUG
1150 vm_purgeable_account_volatile_queue(
1151 purgeable_q_t queue
,
1154 pvm_account_info_t acnt_info
)
1157 uint64_t compressed_count
;
1159 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1160 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1161 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1162 if (object
->vo_purgeable_owner
== task
) {
1163 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1164 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1165 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1166 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1173 * Walks the purgeable object queues and calculates the usage
1174 * associated with the objects for the given task.
1177 vm_purgeable_account(
1179 pvm_account_info_t acnt_info
)
1181 queue_head_t
*nonvolatile_q
;
1185 uint64_t compressed_count
;
1186 purgeable_q_t volatile_q
;
1189 if ((task
== NULL
) || (acnt_info
== NULL
)) {
1190 return KERN_INVALID_ARGUMENT
;
1193 acnt_info
->pvm_volatile_count
= 0;
1194 acnt_info
->pvm_volatile_compressed_count
= 0;
1195 acnt_info
->pvm_nonvolatile_count
= 0;
1196 acnt_info
->pvm_nonvolatile_compressed_count
= 0;
1198 lck_mtx_lock(&vm_purgeable_queue_lock
);
1200 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1201 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1202 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1203 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1204 if (object
->vo_purgeable_owner
== task
) {
1205 state
= object
->purgable
;
1206 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
1207 if (state
== VM_PURGABLE_EMPTY
) {
1208 acnt_info
->pvm_volatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1209 acnt_info
->pvm_volatile_compressed_count
+= compressed_count
;
1211 acnt_info
->pvm_nonvolatile_count
+= (object
->resident_page_count
- object
->wired_page_count
);
1212 acnt_info
->pvm_nonvolatile_compressed_count
+= compressed_count
;
1214 acnt_info
->pvm_nonvolatile_count
+= object
->wired_page_count
;
1218 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1219 vm_purgeable_account_volatile_queue(volatile_q
, 0, task
, acnt_info
);
1221 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1222 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1223 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1226 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1227 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1228 vm_purgeable_account_volatile_queue(volatile_q
, group
, task
, acnt_info
);
1230 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1232 acnt_info
->pvm_volatile_count
= (acnt_info
->pvm_volatile_count
* PAGE_SIZE
);
1233 acnt_info
->pvm_volatile_compressed_count
= (acnt_info
->pvm_volatile_compressed_count
* PAGE_SIZE
);
1234 acnt_info
->pvm_nonvolatile_count
= (acnt_info
->pvm_nonvolatile_count
* PAGE_SIZE
);
1235 acnt_info
->pvm_nonvolatile_compressed_count
= (acnt_info
->pvm_nonvolatile_compressed_count
* PAGE_SIZE
);
1237 return KERN_SUCCESS
;
1239 #endif /* DEVELOPMENT || DEBUG */
1242 vm_purgeable_volatile_queue_disown(
1243 purgeable_q_t queue
,
1253 lck_mtx_assert(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1255 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1256 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1257 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1260 * Sanity check: let's scan the entire queues to
1261 * make sure we don't leave any purgeable objects
1262 * pointing back at a dead task. If the counters
1263 * are off, we would fail to assert that they go
1264 * back to 0 after disowning is done.
1266 #else /* MACH_ASSERT */
1267 if (task
->task_volatile_objects
== 0) {
1268 /* no more volatile objects owned by "task" */
1271 #endif /* MACH_ASSERT */
1272 if (object
->vo_purgeable_owner
== task
) {
1273 if (! vm_object_lock_try(object
)) {
1274 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1275 mutex_pause(collisions
++);
1276 lck_mtx_lock(&vm_purgeable_queue_lock
);
1279 assert(object
->purgable
== VM_PURGABLE_VOLATILE
);
1280 if (object
->vo_purgeable_owner
== task
) {
1281 vm_purgeable_accounting(object
,
1284 assert(object
->vo_purgeable_owner
== NULL
);
1286 vm_object_unlock(object
);
1292 vm_purgeable_disown(
1295 purgeable_q_t volatile_q
;
1297 queue_head_t
*nonvolatile_q
;
1305 task
->task_purgeable_disowning
= TRUE
;
1308 * Scan the purgeable objects queues for objects owned by "task".
1309 * This has to be done "atomically" under the "vm_purgeable_queue"
1310 * lock, to ensure that no new purgeable object get associated
1311 * with this task or moved between queues while we're scanning.
1315 * Scan non-volatile queue for objects owned by "task".
1321 if (task
->task_purgeable_disowned
) {
1322 /* task has already disowned its purgeable memory */
1323 assert(task
->task_volatile_objects
== 0);
1324 assert(task
->task_nonvolatile_objects
== 0);
1327 lck_mtx_lock(&vm_purgeable_queue_lock
);
1329 nonvolatile_q
= &purgeable_nonvolatile_queue
;
1330 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
1331 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
1332 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1335 * Sanity check: let's scan the entire queues to
1336 * make sure we don't leave any purgeable objects
1337 * pointing back at a dead task. If the counters
1338 * are off, we would fail to assert that they go
1339 * back to 0 after disowning is done.
1341 #else /* MACH_ASSERT */
1342 if (task
->task_nonvolatile_objects
== 0) {
1343 /* no more non-volatile objects owned by "task" */
1346 #endif /* MACH_ASSERT */
1348 assert(object
->vo_purgeable_volatilizer
== NULL
);
1350 if (object
->vo_purgeable_owner
== task
) {
1351 if (!vm_object_lock_try(object
)) {
1352 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1353 mutex_pause(collisions
++);
1356 if (object
->vo_purgeable_owner
== task
) {
1357 vm_purgeable_accounting(object
,
1360 assert(object
->vo_purgeable_owner
== NULL
);
1362 vm_object_unlock(object
);
1366 lck_mtx_yield(&vm_purgeable_queue_lock
);
1369 * Scan volatile queues for objects owned by "task".
1372 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1373 vm_purgeable_volatile_queue_disown(volatile_q
, 0, task
);
1374 lck_mtx_yield(&vm_purgeable_queue_lock
);
1376 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1377 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1378 vm_purgeable_volatile_queue_disown(volatile_q
, group
, task
);
1379 lck_mtx_yield(&vm_purgeable_queue_lock
);
1382 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1383 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
1384 vm_purgeable_volatile_queue_disown(volatile_q
, group
, task
);
1385 lck_mtx_yield(&vm_purgeable_queue_lock
);
1388 if (task
->task_volatile_objects
!= 0 ||
1389 task
->task_nonvolatile_objects
!= 0) {
1390 /* some purgeable objects sneaked into a queue: find them */
1391 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1392 mutex_pause(collisions
++);
1396 /* there shouldn't be any purgeable objects owned by task now */
1397 assert(task
->task_volatile_objects
== 0);
1398 assert(task
->task_nonvolatile_objects
== 0);
1399 assert(task
->task_purgeable_disowning
);
1401 /* and we don't need to try and disown again */
1402 task
->task_purgeable_disowned
= TRUE
;
1404 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1410 vm_purgeable_queue_purge_task_owned(
1411 purgeable_q_t queue
,
1418 int num_objects_purged
;
1420 num_objects_purged
= 0;
1424 lck_mtx_lock(&vm_purgeable_queue_lock
);
1427 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
1428 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
1429 object
= (vm_object_t
) queue_next(&object
->objq
)) {
1431 if (object
->vo_purgeable_owner
!= task
&&
1432 object
->vo_purgeable_owner
!= NULL
) {
1436 /* found an object: try and grab it */
1437 if (!vm_object_lock_try(object
)) {
1438 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1439 mutex_pause(collisions
++);
1446 /* remove object from purgeable queue */
1447 queue_remove(&queue
->objq
[group
], object
,
1449 object
->objq
.next
= NULL
;
1450 object
->objq
.prev
= NULL
;
1451 /* one less volatile object for this object's owner */
1452 assert(object
->vo_purgeable_owner
== task
);
1453 vm_purgeable_volatile_owner_update(task
, -1);
1456 object
->vo_purgeable_volatilizer
= NULL
;
1458 queue_enter(&purgeable_nonvolatile_queue
, object
,
1460 assert(purgeable_nonvolatile_count
>= 0);
1461 purgeable_nonvolatile_count
++;
1462 assert(purgeable_nonvolatile_count
> 0);
1463 /* one more nonvolatile object for this object's owner */
1464 assert(object
->vo_purgeable_owner
== task
);
1465 vm_purgeable_nonvolatile_owner_update(task
, +1);
1467 /* unlock purgeable queues */
1468 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1470 if (object
->purgeable_when_ripe
) {
1471 /* remove a token */
1472 vm_page_lock_queues();
1473 vm_purgeable_token_remove_first(queue
);
1474 vm_page_unlock_queues();
1477 /* purge the object */
1478 (void) vm_object_purge(object
, 0);
1479 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
1480 /* no change for purgeable accounting */
1481 vm_object_unlock(object
);
1482 num_objects_purged
++;
1484 /* we unlocked the purgeable queues, so start over */
1488 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1490 return num_objects_purged
;
1494 vm_purgeable_purge_task_owned(
1497 purgeable_q_t queue
;
1499 int num_objects_purged
;
1501 num_objects_purged
= 0;
1503 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
1504 num_objects_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1508 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
1509 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1510 num_objects_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1514 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
1515 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
1516 num_objects_purged
+= vm_purgeable_queue_purge_task_owned(queue
,
1520 return num_objects_purged
;
1525 vm_purgeable_nonvolatile_enqueue(
1531 vm_object_lock_assert_exclusive(object
);
1533 assert(object
->purgable
== VM_PURGABLE_NONVOLATILE
);
1534 assert(object
->vo_purgeable_owner
== NULL
);
1535 assert(owner
!= NULL
);
1537 lck_mtx_lock(&vm_purgeable_queue_lock
);
1539 if (owner
->task_purgeable_disowning
) {
1540 /* task is exiting and no longer tracking purgeable objects */
1544 object
->vo_purgeable_owner
= owner
;
1546 object
->vo_purgeable_volatilizer
= NULL
;
1550 OSBacktrace(&object
->purgeable_owner_bt
[0], 16);
1553 page_count
= object
->resident_page_count
;
1554 assert(page_count
== 0); /* should be a freshly-created object */
1555 if (owner
!= NULL
&& page_count
!= 0) {
1556 ledger_credit(owner
->ledger
,
1557 task_ledgers
.purgeable_nonvolatile
,
1559 ledger_credit(owner
->ledger
,
1560 task_ledgers
.phys_footprint
,
1564 assert(object
->objq
.next
== NULL
);
1565 assert(object
->objq
.prev
== NULL
);
1567 queue_enter(&purgeable_nonvolatile_queue
, object
,
1569 assert(purgeable_nonvolatile_count
>= 0);
1570 purgeable_nonvolatile_count
++;
1571 assert(purgeable_nonvolatile_count
> 0);
1572 /* one more nonvolatile object for this object's owner */
1573 assert(object
->vo_purgeable_owner
== owner
);
1574 vm_purgeable_nonvolatile_owner_update(owner
, +1);
1575 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1577 vm_object_lock_assert_exclusive(object
);
1581 vm_purgeable_nonvolatile_dequeue(
1586 vm_object_lock_assert_exclusive(object
);
1588 owner
= object
->vo_purgeable_owner
;
1590 assert(object
->vo_purgeable_volatilizer
== NULL
);
1592 if (owner
!= NULL
) {
1594 * Update the owner's ledger to stop accounting
1597 vm_purgeable_accounting(object
,
1602 lck_mtx_lock(&vm_purgeable_queue_lock
);
1603 assert(object
->objq
.next
!= NULL
);
1604 assert(object
->objq
.prev
!= NULL
);
1605 queue_remove(&purgeable_nonvolatile_queue
, object
,
1607 object
->objq
.next
= NULL
;
1608 object
->objq
.prev
= NULL
;
1609 assert(purgeable_nonvolatile_count
> 0);
1610 purgeable_nonvolatile_count
--;
1611 assert(purgeable_nonvolatile_count
>= 0);
1612 lck_mtx_unlock(&vm_purgeable_queue_lock
);
1614 vm_object_lock_assert_exclusive(object
);
1618 vm_purgeable_accounting(
1620 vm_purgable_t old_state
,
1624 int resident_page_count
;
1625 int wired_page_count
;
1626 int compressed_page_count
;
1627 boolean_t disown_on_the_fly
;
1629 vm_object_lock_assert_exclusive(object
);
1631 owner
= object
->vo_purgeable_owner
;
1635 if (!disown
&& owner
->task_purgeable_disowning
) {
1636 /* task is disowning its purgeable objects: help it */
1637 disown_on_the_fly
= TRUE
;
1639 disown_on_the_fly
= FALSE
;
1642 resident_page_count
= object
->resident_page_count
;
1643 wired_page_count
= object
->wired_page_count
;
1644 if ((COMPRESSED_PAGER_IS_ACTIVE
||
1645 DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) &&
1646 object
->pager
!= NULL
) {
1647 compressed_page_count
=
1648 vm_compressor_pager_get_count(object
->pager
);
1650 compressed_page_count
= 0;
1653 if (old_state
== VM_PURGABLE_VOLATILE
||
1654 old_state
== VM_PURGABLE_EMPTY
) {
1655 /* less volatile bytes in ledger */
1656 ledger_debit(owner
->ledger
,
1657 task_ledgers
.purgeable_volatile
,
1658 ptoa(resident_page_count
- wired_page_count
));
1659 /* less compressed volatile bytes in ledger */
1660 ledger_debit(owner
->ledger
,
1661 task_ledgers
.purgeable_volatile_compressed
,
1662 ptoa(compressed_page_count
));
1664 if (disown
|| !object
->alive
|| object
->terminating
) {
1665 /* wired pages were accounted as "non-volatile"... */
1666 ledger_debit(owner
->ledger
,
1667 task_ledgers
.purgeable_nonvolatile
,
1668 ptoa(wired_page_count
));
1669 /* ... and in phys_footprint */
1670 ledger_debit(owner
->ledger
,
1671 task_ledgers
.phys_footprint
,
1672 ptoa(wired_page_count
));
1674 if (!disown_on_the_fly
&&
1675 (object
->purgeable_queue_type
==
1676 PURGEABLE_Q_TYPE_MAX
)) {
1678 * Not on a volatile queue: must be empty
1681 vm_purgeable_nonvolatile_owner_update(owner
,-1);
1683 /* on a volatile queue */
1684 vm_purgeable_volatile_owner_update(owner
, -1);
1686 /* no more accounting for this dead object */
1687 object
->vo_purgeable_owner
= NULL
;
1689 object
->vo_purgeable_volatilizer
= NULL
;
1694 /* more non-volatile bytes in ledger */
1695 ledger_credit(owner
->ledger
,
1696 task_ledgers
.purgeable_nonvolatile
,
1697 ptoa(resident_page_count
- wired_page_count
));
1698 /* more compressed non-volatile bytes in ledger */
1699 ledger_credit(owner
->ledger
,
1700 task_ledgers
.purgeable_nonvolatile_compressed
,
1701 ptoa(compressed_page_count
));
1702 /* more footprint */
1703 ledger_credit(owner
->ledger
,
1704 task_ledgers
.phys_footprint
,
1705 ptoa(resident_page_count
1706 + compressed_page_count
1707 - wired_page_count
));
1709 } else if (old_state
== VM_PURGABLE_NONVOLATILE
) {
1711 /* less non-volatile bytes in ledger */
1712 ledger_debit(owner
->ledger
,
1713 task_ledgers
.purgeable_nonvolatile
,
1714 ptoa(resident_page_count
- wired_page_count
));
1715 /* less compressed non-volatile bytes in ledger */
1716 ledger_debit(owner
->ledger
,
1717 task_ledgers
.purgeable_nonvolatile_compressed
,
1718 ptoa(compressed_page_count
));
1719 /* less footprint */
1720 ledger_debit(owner
->ledger
,
1721 task_ledgers
.phys_footprint
,
1722 ptoa(resident_page_count
1723 + compressed_page_count
1724 - wired_page_count
));
1726 if (disown
|| !object
->alive
|| object
->terminating
) {
1727 /* wired pages still accounted as "non-volatile" */
1728 ledger_debit(owner
->ledger
,
1729 task_ledgers
.purgeable_nonvolatile
,
1730 ptoa(wired_page_count
));
1731 ledger_debit(owner
->ledger
,
1732 task_ledgers
.phys_footprint
,
1733 ptoa(wired_page_count
));
1735 /* one less "non-volatile" object for the owner */
1736 if (!disown_on_the_fly
) {
1737 assert(object
->purgeable_queue_type
==
1738 PURGEABLE_Q_TYPE_MAX
);
1740 vm_purgeable_nonvolatile_owner_update(owner
, -1);
1741 /* no more accounting for this dead object */
1742 object
->vo_purgeable_owner
= NULL
;
1744 object
->vo_purgeable_volatilizer
= NULL
;
1748 /* more volatile bytes in ledger */
1749 ledger_credit(owner
->ledger
,
1750 task_ledgers
.purgeable_volatile
,
1751 ptoa(resident_page_count
- wired_page_count
));
1752 /* more compressed volatile bytes in ledger */
1753 ledger_credit(owner
->ledger
,
1754 task_ledgers
.purgeable_volatile_compressed
,
1755 ptoa(compressed_page_count
));
1757 panic("vm_purgeable_accounting(%p): "
1758 "unexpected old_state=%d\n",
1762 vm_object_lock_assert_exclusive(object
);
1766 vm_purgeable_nonvolatile_owner_update(
1770 if (owner
== NULL
|| delta
== 0) {
1775 assert(owner
->task_nonvolatile_objects
>= 0);
1776 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1777 assert(owner
->task_nonvolatile_objects
> 0);
1779 assert(owner
->task_nonvolatile_objects
> delta
);
1780 OSAddAtomic(delta
, &owner
->task_nonvolatile_objects
);
1781 assert(owner
->task_nonvolatile_objects
>= 0);
1786 vm_purgeable_volatile_owner_update(
1790 if (owner
== NULL
|| delta
== 0) {
1795 assert(owner
->task_volatile_objects
>= 0);
1796 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1797 assert(owner
->task_volatile_objects
> 0);
1799 assert(owner
->task_volatile_objects
> delta
);
1800 OSAddAtomic(delta
, &owner
->task_volatile_objects
);
1801 assert(owner
->task_volatile_objects
>= 0);
1806 vm_purgeable_compressed_update(
1812 vm_object_lock_assert_exclusive(object
);
1815 !object
->internal
||
1816 object
->purgable
== VM_PURGABLE_DENY
||
1817 object
->vo_purgeable_owner
== NULL
) {
1818 /* not an owned purgeable VM object: nothing to update */
1822 owner
= object
->vo_purgeable_owner
;
1823 switch (object
->purgable
) {
1824 case VM_PURGABLE_DENY
:
1826 case VM_PURGABLE_NONVOLATILE
:
1828 ledger_credit(owner
->ledger
,
1829 task_ledgers
.purgeable_nonvolatile_compressed
,
1831 ledger_credit(owner
->ledger
,
1832 task_ledgers
.phys_footprint
,
1835 ledger_debit(owner
->ledger
,
1836 task_ledgers
.purgeable_nonvolatile_compressed
,
1838 ledger_debit(owner
->ledger
,
1839 task_ledgers
.phys_footprint
,
1843 case VM_PURGABLE_VOLATILE
:
1844 case VM_PURGABLE_EMPTY
:
1846 ledger_credit(owner
->ledger
,
1847 task_ledgers
.purgeable_volatile_compressed
,
1850 ledger_debit(owner
->ledger
,
1851 task_ledgers
.purgeable_volatile_compressed
,
1856 panic("vm_purgeable_compressed_update(): "
1857 "unexpected purgable %d for object %p\n",
1858 object
->purgable
, object
);