2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <mach/mach_types.h>
25 #include <vm/vm_page.h>
26 #include <vm/vm_kern.h> /* kmem_alloc */
27 #include <vm/vm_purgeable_internal.h>
28 #include <sys/kdebug.h>
29 #include <kern/sched_prim.h>
37 token_idx_t token_q_max_cnt
= 0;
38 vm_size_t token_q_cur_size
= 0;
40 token_idx_t token_free_idx
= 0; /* head of free queue */
41 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
42 int32_t token_new_pagecount
= 0; /* count of pages that will
43 * be added onto token queue */
45 int available_for_purge
= 0; /* increase when ripe token
46 * added, decrease when ripe
48 * protected by page_queue_lock
51 static int token_q_allocating
= 0; /* flag for singlethreading
54 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
56 decl_lck_mtx_data(,vm_purgeable_queue_lock
)
58 #define TOKEN_ADD 0x40 /* 0x100 */
59 #define TOKEN_DELETE 0x41 /* 0x104 */
60 #define TOKEN_RIPEN 0x42 /* 0x108 */
61 #define OBJECT_ADD 0x48 /* 0x120 */
62 #define OBJECT_REMOVE 0x49 /* 0x124 */
63 #define OBJECT_PURGE 0x4a /* 0x128 */
64 #define OBJECT_PURGE_ALL 0x4b /* 0x12c */
66 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
70 vm_purgeable_token_check_queue(purgeable_q_t queue
)
72 int token_cnt
= 0, page_cnt
= 0;
73 token_idx_t token
= queue
->token_q_head
;
74 token_idx_t unripe
= 0;
75 int our_inactive_count
;
78 if (tokens
[token
].count
!= 0) {
79 assert(queue
->token_q_unripe
);
81 assert(token
== queue
->token_q_unripe
);
84 page_cnt
+= tokens
[token
].count
;
86 if (tokens
[token
].next
== 0)
87 assert(queue
->token_q_tail
== token
);
90 token
= tokens
[token
].next
;
94 assert(queue
->token_q_unripe
== unripe
);
95 assert(token_cnt
== queue
->debug_count_tokens
);
97 /* obsolete queue doesn't maintain token counts */
98 if(queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
100 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
101 assert(our_inactive_count
>= 0);
102 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
);
108 * Add a token. Allocate token queue memory if necessary.
109 * Call with page queue locked.
112 vm_purgeable_token_add(purgeable_q_t queue
)
115 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
120 enum purgeable_q_type i
;
122 find_available_token
:
124 if (token_free_idx
) { /* unused tokens available */
125 token
= token_free_idx
;
126 token_free_idx
= tokens
[token_free_idx
].next
;
127 } else if (token_init_idx
< token_q_max_cnt
) { /* lazy token array init */
128 token
= token_init_idx
;
130 } else { /* allocate more memory */
131 /* Wait if another thread is inside the memory alloc section */
132 while(token_q_allocating
) {
133 wait_result_t res
= lck_mtx_sleep(&vm_page_queue_lock
,
135 (event_t
)&token_q_allocating
,
137 if(res
!= THREAD_AWAKENED
) return KERN_ABORTED
;
140 /* Check whether memory is still maxed out */
141 if(token_init_idx
< token_q_max_cnt
)
142 goto find_available_token
;
144 /* Still no memory. Allocate some. */
145 token_q_allocating
= 1;
147 /* Drop page queue lock so we can allocate */
148 vm_page_unlock_queues();
150 struct token
*new_loc
;
151 vm_size_t alloc_size
= token_q_cur_size
+ PAGE_SIZE
;
152 kern_return_t result
;
154 if (alloc_size
/ sizeof (struct token
) > TOKEN_COUNT_MAX
) {
155 result
= KERN_RESOURCE_SHORTAGE
;
157 if (token_q_cur_size
) {
158 result
= kmem_realloc(kernel_map
,
159 (vm_offset_t
) tokens
,
161 (vm_offset_t
*) &new_loc
,
164 result
= kmem_alloc(kernel_map
,
165 (vm_offset_t
*) &new_loc
,
170 vm_page_lock_queues();
173 /* Unblock waiting threads */
174 token_q_allocating
= 0;
175 thread_wakeup((event_t
)&token_q_allocating
);
179 /* If we get here, we allocated new memory. Update pointers and
180 * dealloc old range */
181 struct token
*old_tokens
=tokens
;
183 vm_size_t old_token_q_cur_size
=token_q_cur_size
;
184 token_q_cur_size
=alloc_size
;
185 token_q_max_cnt
= (token_idx_t
) (token_q_cur_size
/
186 sizeof(struct token
));
187 assert (token_init_idx
< token_q_max_cnt
); /* We must have a free token now */
189 if (old_token_q_cur_size
) { /* clean up old mapping */
190 vm_page_unlock_queues();
191 /* kmem_realloc leaves the old region mapped. Get rid of it. */
192 kmem_free(kernel_map
, (vm_offset_t
)old_tokens
, old_token_q_cur_size
);
193 vm_page_lock_queues();
196 /* Unblock waiting threads */
197 token_q_allocating
= 0;
198 thread_wakeup((event_t
)&token_q_allocating
);
200 goto find_available_token
;
206 * the new pagecount we got need to be applied to all queues except
209 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
210 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
212 assert(pages
<= TOKEN_COUNT_MAX
);
213 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
214 assert(purgeable_queues
[i
].new_pages
== pages
);
216 token_new_pagecount
= 0;
218 /* set token counter value */
219 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
220 tokens
[token
].count
= queue
->new_pages
;
222 tokens
[token
].count
= 0; /* all obsolete items are
223 * ripe immediately */
224 queue
->new_pages
= 0;
226 /* put token on token counter list */
227 tokens
[token
].next
= 0;
228 if (queue
->token_q_tail
== 0) {
229 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
230 queue
->token_q_head
= token
;
232 tokens
[queue
->token_q_tail
].next
= token
;
234 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
235 * count == 0) in queue */
236 if (tokens
[token
].count
> 0)
237 queue
->token_q_unripe
= token
; /* first unripe token */
239 available_for_purge
++; /* added a ripe token?
240 * increase available count */
242 queue
->token_q_tail
= token
;
245 queue
->debug_count_tokens
++;
246 /* Check both queues, since we modified the new_pages count on each */
247 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
248 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
250 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
252 tokens
[token
].count
, /* num pages on token
254 queue
->debug_count_tokens
,
263 * Remove first token from queue and return its index. Add its count to the
264 * count of the next token.
265 * Call with page queue locked.
268 vm_purgeable_token_remove_first(purgeable_q_t queue
)
271 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
275 token
= queue
->token_q_head
;
280 assert(queue
->token_q_tail
);
281 if (queue
->token_q_head
== queue
->token_q_unripe
) {
282 /* no ripe tokens... must move unripe pointer */
283 queue
->token_q_unripe
= tokens
[token
].next
;
285 /* we're removing a ripe token. decrease count */
286 available_for_purge
--;
287 assert(available_for_purge
>= 0);
290 if (queue
->token_q_tail
== queue
->token_q_head
)
291 assert(tokens
[token
].next
== 0);
293 queue
->token_q_head
= tokens
[token
].next
;
294 if (queue
->token_q_head
) {
295 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
297 /* currently no other tokens in the queue */
299 * the page count must be added to the next newly
302 queue
->new_pages
+= tokens
[token
].count
;
303 /* if head is zero, tail is too */
304 queue
->token_q_tail
= 0;
308 queue
->debug_count_tokens
--;
309 vm_purgeable_token_check_queue(queue
);
311 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
313 tokens
[queue
->token_q_head
].count
, /* num pages on new
315 token_new_pagecount
, /* num pages waiting for
325 * Delete first token from queue. Return token to token queue.
326 * Call with page queue locked.
329 vm_purgeable_token_delete_first(purgeable_q_t queue
)
332 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
334 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
337 /* stick removed token on free queue */
338 tokens
[token
].next
= token_free_idx
;
339 token_free_idx
= token
;
344 /* Call with page queue locked. */
346 vm_purgeable_q_advance_all()
349 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
352 /* check queue counters - if they get really large, scale them back.
353 * They tend to get that large when there is no purgeable queue action */
355 if(token_new_pagecount
> (TOKEN_NEW_PAGECOUNT_MAX
>> 1)) /* a system idling years might get there */
357 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
358 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
360 assert(pages
<= TOKEN_COUNT_MAX
);
361 purgeable_queues
[i
].new_pages
= (int32_t) pages
;
362 assert(purgeable_queues
[i
].new_pages
== pages
);
364 token_new_pagecount
= 0;
368 * Decrement token counters. A token counter can be zero, this means the
369 * object is ripe to be purged. It is not purged immediately, because that
370 * could cause several objects to be purged even if purging one would satisfy
371 * the memory needs. Instead, the pageout thread purges one after the other
372 * by calling vm_purgeable_object_purge_one and then rechecking the memory
375 * No need to advance obsolete queue - all items are ripe there,
378 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
379 purgeable_q_t queue
= &purgeable_queues
[i
];
380 uint32_t num_pages
= 1;
382 /* Iterate over tokens as long as there are unripe tokens. */
383 while (queue
->token_q_unripe
) {
384 if (tokens
[queue
->token_q_unripe
].count
&& num_pages
)
386 tokens
[queue
->token_q_unripe
].count
-= 1;
390 if (tokens
[queue
->token_q_unripe
].count
== 0) {
391 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
392 available_for_purge
++;
393 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_RIPEN
)),
395 tokens
[queue
->token_q_head
].count
, /* num pages on new
400 continue; /* One token ripened. Make sure to
404 break; /* Current token not ripe and no more pages.
409 * if there are no unripe tokens in the queue, decrement the
410 * new_pages counter instead new_pages can be negative, but must be
411 * canceled out by token_new_pagecount -- since inactive queue as a
412 * whole always contains a nonnegative number of pages
414 if (!queue
->token_q_unripe
) {
415 queue
->new_pages
-= num_pages
;
416 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
419 vm_purgeable_token_check_queue(queue
);
425 * grab any ripe object and purge it obsolete queue first. then, go through
426 * each volatile group. Select a queue with a ripe token.
427 * Start with first group (0)
428 * 1. Look at queue. Is there an object?
429 * Yes - purge it. Remove token.
430 * No - check other queue. Is there an object?
431 * No - increment group, then go to (1)
432 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
433 * token from other queue and migrate unripe token from this
434 * queue to other queue.
435 * Call with page queue locked.
438 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
441 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
443 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
444 /* return token to free list. advance token list. */
445 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
446 tokens
[queue
->token_q_head
].next
= token_free_idx
;
447 token_free_idx
= queue
->token_q_head
;
448 queue
->token_q_head
= new_head
;
450 queue
->token_q_tail
= 0;
453 queue
->debug_count_tokens
--;
454 vm_purgeable_token_check_queue(queue
);
457 available_for_purge
--;
458 assert(available_for_purge
>= 0);
462 * Delete a ripe token from the given queue. If there are no ripe tokens on
463 * that queue, delete a ripe token from queue2, and migrate an unripe token
464 * from queue to queue2
465 * Call with page queue locked.
468 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
471 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
473 assert(queue
->token_q_head
);
475 if (tokens
[queue
->token_q_head
].count
== 0) {
476 /* This queue has a ripe token. Remove. */
477 vm_purgeable_token_remove_ripe(queue
);
481 * queue2 must have a ripe token. Remove, and migrate one
482 * from queue to queue2.
484 vm_purgeable_token_remove_ripe(queue2
);
485 /* migrate unripe token */
489 /* remove token from queue1 */
490 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
491 * tokens, remember? */
492 token
= vm_purgeable_token_remove_first(queue
);
495 count
= tokens
[token
].count
;
497 /* migrate to queue2 */
498 /* go to migration target loc */
499 token_idx_t
*token_in_queue2
= &queue2
->token_q_head
;
500 while (*token_in_queue2
&& count
> tokens
[*token_in_queue2
].count
) {
501 count
-= tokens
[*token_in_queue2
].count
;
502 token_in_queue2
= &tokens
[*token_in_queue2
].next
;
505 if ((*token_in_queue2
== queue2
->token_q_unripe
) || /* becomes the first
507 (queue2
->token_q_unripe
== 0))
508 queue2
->token_q_unripe
= token
; /* must update unripe
512 tokens
[token
].count
= count
;
513 tokens
[token
].next
= *token_in_queue2
;
516 * if inserting at end, reduce new_pages by that value if
517 * inserting before token, reduce counter of that token
519 if (*token_in_queue2
== 0) { /* insertion at end of queue2 */
520 queue2
->token_q_tail
= token
; /* must update tail
522 assert(queue2
->new_pages
>= (int32_t) count
);
523 queue2
->new_pages
-= count
;
525 assert(tokens
[*token_in_queue2
].count
>= count
);
526 tokens
[*token_in_queue2
].count
-= count
;
528 *token_in_queue2
= token
;
531 queue2
->debug_count_tokens
++;
532 vm_purgeable_token_check_queue(queue2
);
537 /* Find an object that can be locked. Returns locked object. */
538 /* Call with purgeable queue locked. */
540 vm_purgeable_object_find_and_lock(purgeable_q_t queue
, int group
)
542 lck_mtx_assert(&vm_purgeable_queue_lock
, LCK_MTX_ASSERT_OWNED
);
544 * Usually we would pick the first element from a queue. However, we
545 * might not be able to get a lock on it, in which case we try the
546 * remaining elements in order.
550 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
551 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
552 object
= (vm_object_t
) queue_next(&object
->objq
)) {
553 if (vm_object_lock_try(object
)) {
554 /* Locked. Great. We'll take it. Remove and return. */
555 queue_remove(&queue
->objq
[group
], object
,
557 object
->objq
.next
= 0;
558 object
->objq
.prev
= 0;
560 queue
->debug_count_objects
--;
569 /* Can be called without holding locks */
571 vm_purgeable_object_purge_all(void)
573 enum purgeable_q_type i
;
576 unsigned int purged_count
;
583 lck_mtx_lock(&vm_purgeable_queue_lock
);
584 /* Cycle through all queues */
585 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
588 queue
= &purgeable_queues
[i
];
591 * Look through all groups, starting from the lowest. If
592 * we find an object in that group, try to lock it (this can
593 * fail). If locking is successful, we can drop the queue
594 * lock, remove a token and then purge the object.
596 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
597 while (!queue_empty(&queue
->objq
[group
])) {
598 object
= vm_purgeable_object_find_and_lock(queue
, group
);
599 if (object
== VM_OBJECT_NULL
) {
600 lck_mtx_unlock(&vm_purgeable_queue_lock
);
601 mutex_pause(collisions
++);
605 lck_mtx_unlock(&vm_purgeable_queue_lock
);
607 /* Lock the page queue here so we don't hold it
608 * over the whole, legthy operation */
609 vm_page_lock_queues();
610 vm_purgeable_token_remove_first(queue
);
611 vm_page_unlock_queues();
613 assert(object
->purgable
== VM_PURGABLE_VOLATILE
);
614 (void) vm_object_purge(object
);
615 vm_object_unlock(object
);
619 assert(queue
->debug_count_objects
>= 0);
622 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ALL
)),
623 purged_count
, /* # of purged objects */
628 lck_mtx_unlock(&vm_purgeable_queue_lock
);
633 vm_purgeable_object_purge_one(void)
635 enum purgeable_q_type i
;
637 vm_object_t object
= 0;
638 purgeable_q_t queue
, queue2
;
640 /* Need the page queue lock since we'll be changing the token queue. */
642 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
644 lck_mtx_lock(&vm_purgeable_queue_lock
);
646 /* Cycle through all queues */
647 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
648 queue
= &purgeable_queues
[i
];
651 * Are there any ripe tokens on this queue? If yes, we'll
652 * find an object to purge there
654 if (!(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0))
655 continue; /* no token? Look at next purgeable
659 * Now look through all groups, starting from the lowest. If
660 * we find an object in that group, try to lock it (this can
661 * fail). If locking is successful, we can drop the queue
662 * lock, remove a token and then purge the object.
664 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
665 if (!queue_empty(&queue
->objq
[group
]) &&
666 (object
= vm_purgeable_object_find_and_lock(queue
, group
))) {
667 lck_mtx_unlock(&vm_purgeable_queue_lock
);
668 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
671 if (i
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
672 /* This is the token migration case, and it works between
673 * FIFO and LIFO only */
674 queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
?
675 PURGEABLE_Q_TYPE_FIFO
:
676 PURGEABLE_Q_TYPE_LIFO
];
678 if (!queue_empty(&queue2
->objq
[group
]) &&
679 (object
= vm_purgeable_object_find_and_lock(queue2
, group
))) {
680 lck_mtx_unlock(&vm_purgeable_queue_lock
);
681 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
685 assert(queue
->debug_count_objects
>= 0);
689 * because we have to do a try_lock on the objects which could fail,
690 * we could end up with no object to purge at this time, even though
691 * we have objects in a purgeable state
693 lck_mtx_unlock(&vm_purgeable_queue_lock
);
699 assert(object
->purgable
== VM_PURGABLE_VOLATILE
);
700 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
701 (void) vm_object_purge(object
);
702 vm_object_unlock(object
);
703 vm_page_lock_queues();
705 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE
)),
706 object
, /* purged object */
715 /* Called with object lock held */
717 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
719 vm_object_lock_assert_exclusive(object
);
720 lck_mtx_lock(&vm_purgeable_queue_lock
);
722 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
)
724 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) /* fifo and obsolete are
726 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
728 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
731 queue
->debug_count_objects
++;
732 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADD
)),
734 tokens
[queue
->token_q_head
].count
,
740 lck_mtx_unlock(&vm_purgeable_queue_lock
);
743 /* Look for object. If found, remove from purgeable queue. */
744 /* Called with object lock held */
746 vm_purgeable_object_remove(vm_object_t object
)
748 enum purgeable_q_type i
;
751 vm_object_lock_assert_exclusive(object
);
752 lck_mtx_lock(&vm_purgeable_queue_lock
);
754 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
755 purgeable_q_t queue
= &purgeable_queues
[i
];
756 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
758 for (o
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
759 !queue_end(&queue
->objq
[group
], (queue_entry_t
) o
);
760 o
= (vm_object_t
) queue_next(&o
->objq
)) {
762 queue_remove(&queue
->objq
[group
], object
,
765 queue
->debug_count_objects
--;
766 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVE
)),
768 tokens
[queue
->token_q_head
].count
,
773 lck_mtx_unlock(&vm_purgeable_queue_lock
);
774 object
->objq
.next
= 0;
775 object
->objq
.prev
= 0;
776 return &purgeable_queues
[i
];
781 lck_mtx_unlock(&vm_purgeable_queue_lock
);