2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <mach/mach_types.h>
25 #include <vm/vm_page.h>
26 #include <vm/vm_kern.h> /* kmem_alloc */
27 #include <vm/vm_purgeable_internal.h>
28 #include <sys/kdebug.h>
29 #include <kern/sched_prim.h>
37 token_idx_t token_q_max_cnt
= 0;
38 vm_size_t token_q_cur_size
= 0;
40 token_idx_t token_free_idx
= 0; /* head of free queue */
41 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
42 int32_t token_new_pagecount
= 0; /* count of pages that will
43 * be added onto token queue */
45 int available_for_purge
= 0; /* increase when ripe token
46 * added, decrease when ripe
47 * token removed protect with
50 static int token_q_allocating
= 0; /* flag to singlethread allocator */
52 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
54 #define TOKEN_ADD 0x40/* 0x100 */
55 #define TOKEN_DELETE 0x41/* 0x104 */
56 #define TOKEN_QUEUE_ADVANCE 0x42/* 0x108 actually means "token ripened" */
57 #define TOKEN_OBJECT_PURGED 0x43/* 0x10c */
58 #define OBJECT_ADDED 0x50/* 0x140 */
59 #define OBJECT_REMOVED 0x51/* 0x144 */
61 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
65 vm_purgeable_token_check_queue(purgeable_q_t queue
)
67 int token_cnt
= 0, page_cnt
= 0;
68 token_idx_t token
= queue
->token_q_head
;
69 token_idx_t unripe
= 0;
70 int our_inactive_count
;
73 if (tokens
[token
].count
!= 0) {
74 assert(queue
->token_q_unripe
);
76 assert(token
== queue
->token_q_unripe
);
79 page_cnt
+= tokens
[token
].count
;
81 if (tokens
[token
].next
== 0)
82 assert(queue
->token_q_tail
== token
);
85 token
= tokens
[token
].next
;
89 assert(queue
->token_q_unripe
== unripe
);
90 assert(token_cnt
== queue
->debug_count_tokens
);
92 /* obsolete queue doesn't maintain token counts */
93 if(queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
95 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
96 assert(our_inactive_count
>= 0);
97 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
);
103 vm_purgeable_token_add(purgeable_q_t queue
)
107 enum purgeable_q_type i
;
109 find_available_token
:
111 if (token_free_idx
) { /* unused tokens available */
112 token
= token_free_idx
;
113 token_free_idx
= tokens
[token_free_idx
].next
;
114 } else if (token_init_idx
< token_q_max_cnt
) { /* lazy token array init */
115 token
= token_init_idx
;
117 } else { /* allocate more memory */
118 /* Wait if another thread is inside the memory alloc section */
119 while(token_q_allocating
) {
120 wait_result_t res
= thread_sleep_mutex((event_t
)&token_q_allocating
,
123 if(res
!= THREAD_AWAKENED
) return KERN_ABORTED
;
126 /* Check whether memory is still maxed out */
127 if(token_init_idx
< token_q_max_cnt
)
128 goto find_available_token
;
130 /* Still no memory. Allocate some. */
131 token_q_allocating
= 1;
133 /* Drop page queue lock so we can allocate */
134 vm_page_unlock_queues();
136 struct token
*new_loc
;
137 vm_size_t alloc_size
= token_q_cur_size
+ PAGE_SIZE
;
138 kern_return_t result
;
140 if (token_q_cur_size
) {
141 result
=kmem_realloc(kernel_map
, (vm_offset_t
)tokens
, token_q_cur_size
,
142 (vm_offset_t
*)&new_loc
, alloc_size
);
144 result
=kmem_alloc(kernel_map
, (vm_offset_t
*)&new_loc
, alloc_size
);
147 vm_page_lock_queues();
150 /* Unblock waiting threads */
151 token_q_allocating
= 0;
152 thread_wakeup((event_t
)&token_q_allocating
);
156 /* If we get here, we allocated new memory. Update pointers and
157 * dealloc old range */
158 struct token
*old_tokens
=tokens
;
160 vm_size_t old_token_q_cur_size
=token_q_cur_size
;
161 token_q_cur_size
=alloc_size
;
162 token_q_max_cnt
= token_q_cur_size
/ sizeof(struct token
);
163 assert (token_init_idx
< token_q_max_cnt
); /* We must have a free token now */
165 if (old_token_q_cur_size
) { /* clean up old mapping */
166 vm_page_unlock_queues();
167 /* kmem_realloc leaves the old region mapped. Get rid of it. */
168 kmem_free(kernel_map
, (vm_offset_t
)old_tokens
, old_token_q_cur_size
);
169 vm_page_lock_queues();
172 /* Unblock waiting threads */
173 token_q_allocating
= 0;
174 thread_wakeup((event_t
)&token_q_allocating
);
176 goto find_available_token
;
182 * the new pagecount we got need to be applied to all queues except
185 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
186 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
188 assert(pages
<= TOKEN_COUNT_MAX
);
189 purgeable_queues
[i
].new_pages
=pages
;
191 token_new_pagecount
= 0;
193 /* set token counter value */
194 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
195 tokens
[token
].count
= queue
->new_pages
;
197 tokens
[token
].count
= 0; /* all obsolete items are
198 * ripe immediately */
199 queue
->new_pages
= 0;
201 /* put token on token counter list */
202 tokens
[token
].next
= 0;
203 if (queue
->token_q_tail
== 0) {
204 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
205 queue
->token_q_head
= token
;
207 tokens
[queue
->token_q_tail
].next
= token
;
209 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
210 * count == 0) in queue */
211 if (tokens
[token
].count
> 0)
212 queue
->token_q_unripe
= token
; /* first unripe token */
214 available_for_purge
++; /* added a ripe token?
215 * increase available count */
217 queue
->token_q_tail
= token
;
220 queue
->debug_count_tokens
++;
221 /* Check both queues, since we modified the new_pages count on each */
222 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
223 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
225 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
227 tokens
[token
].count
, /* num pages on token
229 queue
->debug_count_tokens
,
238 * Remove first token from queue and return its index. Add its count to the
239 * count of the next token.
242 vm_purgeable_token_remove_first(purgeable_q_t queue
)
245 token
= queue
->token_q_head
;
250 assert(queue
->token_q_tail
);
251 if (queue
->token_q_head
== queue
->token_q_unripe
) {
252 /* no ripe tokens... must move unripe pointer */
253 queue
->token_q_unripe
= tokens
[token
].next
;
255 /* we're removing a ripe token. decrease count */
256 available_for_purge
--;
257 assert(available_for_purge
>= 0);
260 if (queue
->token_q_tail
== queue
->token_q_head
)
261 assert(tokens
[token
].next
== 0);
263 queue
->token_q_head
= tokens
[token
].next
;
264 if (queue
->token_q_head
) {
265 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
267 /* currently no other tokens in the queue */
269 * the page count must be added to the next newly
272 queue
->new_pages
+= tokens
[token
].count
;
273 /* if head is zero, tail is too */
274 queue
->token_q_tail
= 0;
278 queue
->debug_count_tokens
--;
279 vm_purgeable_token_check_queue(queue
);
281 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
283 tokens
[queue
->token_q_head
].count
, /* num pages on new
285 token_new_pagecount
, /* num pages waiting for
294 /* Delete first token from queue. Return token to token queue. */
296 vm_purgeable_token_delete_first(purgeable_q_t queue
)
298 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
301 /* stick removed token on free queue */
302 tokens
[token
].next
= token_free_idx
;
303 token_free_idx
= token
;
309 vm_purgeable_q_advance_all()
311 /* check queue counters - if they get really large, scale them back.
312 * They tend to get that large when there is no purgeable queue action */
314 if(token_new_pagecount
> (TOKEN_NEW_PAGECOUNT_MAX
>> 1)) /* a system idling years might get there */
316 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
317 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
319 assert(pages
<= TOKEN_COUNT_MAX
);
320 purgeable_queues
[i
].new_pages
=pages
;
322 token_new_pagecount
= 0;
326 * Decrement token counters. A token counter can be zero, this means the
327 * object is ripe to be purged. It is not purged immediately, because that
328 * could cause several objects to be purged even if purging one would satisfy
329 * the memory needs. Instead, the pageout thread purges one after the other
330 * by calling vm_purgeable_object_purge_one and then rechecking the memory
333 * No need to advance obsolete queue - all items are ripe there,
336 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
337 purgeable_q_t queue
= &purgeable_queues
[i
];
338 uint32_t num_pages
= 1;
340 /* Iterate over tokens as long as there are unripe tokens. */
341 while (queue
->token_q_unripe
) {
342 if (tokens
[queue
->token_q_unripe
].count
&& num_pages
)
344 tokens
[queue
->token_q_unripe
].count
-= 1;
348 if (tokens
[queue
->token_q_unripe
].count
== 0) {
349 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
350 available_for_purge
++;
351 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_QUEUE_ADVANCE
)),
353 tokens
[queue
->token_q_head
].count
, /* num pages on new
358 continue; /* One token ripened. Make sure to
362 break; /* Current token not ripe and no more pages.
367 * if there are no unripe tokens in the queue, decrement the
368 * new_pages counter instead new_pages can be negative, but must be
369 * canceled out by token_new_pagecount -- since inactive queue as a
370 * whole always contains a nonnegative number of pages
372 if (!queue
->token_q_unripe
) {
373 queue
->new_pages
-= num_pages
;
374 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
377 vm_purgeable_token_check_queue(queue
);
383 * grab any ripe object and purge it obsolete queue first. then, go through
384 * each volatile group. Select a queue with a ripe token.
385 * Start with first group (0)
386 * 1. Look at queue. Is there an object?
387 * Yes - purge it. Remove token.
388 * No - check other queue. Is there an object?
389 * No - increment group, then go to (1)
390 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
391 * token from other queue and migrate unripe token from this
392 * queue to other queue.
395 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
397 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
398 /* return token to free list. advance token list. */
399 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
400 tokens
[queue
->token_q_head
].next
= token_free_idx
;
401 token_free_idx
= queue
->token_q_head
;
402 queue
->token_q_head
= new_head
;
404 queue
->token_q_tail
= 0;
407 queue
->debug_count_tokens
--;
408 vm_purgeable_token_check_queue(queue
);
411 available_for_purge
--;
412 assert(available_for_purge
>= 0);
416 * Delete a ripe token from the given queue. If there are no ripe tokens on
417 * that queue, delete a ripe token from queue2, and migrate an unripe token
418 * from queue to queue2
421 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
423 assert(queue
->token_q_head
);
425 if (tokens
[queue
->token_q_head
].count
== 0) {
426 /* This queue has a ripe token. Remove. */
427 vm_purgeable_token_remove_ripe(queue
);
431 * queue2 must have a ripe token. Remove, and migrate one
432 * from queue to queue2.
434 vm_purgeable_token_remove_ripe(queue2
);
435 /* migrate unripe token */
439 /* remove token from queue1 */
440 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
441 * tokens, remember? */
442 token
= vm_purgeable_token_remove_first(queue
);
445 count
= tokens
[token
].count
;
447 /* migrate to queue2 */
448 /* go to migration target loc */
449 token_idx_t
*token_in_queue2
= &queue2
->token_q_head
;
450 while (*token_in_queue2
&& count
> tokens
[*token_in_queue2
].count
) {
451 count
-= tokens
[*token_in_queue2
].count
;
452 token_in_queue2
= &tokens
[*token_in_queue2
].next
;
455 if ((*token_in_queue2
== queue2
->token_q_unripe
) || /* becomes the first
457 (queue2
->token_q_unripe
== 0))
458 queue2
->token_q_unripe
= token
; /* must update unripe
462 tokens
[token
].count
= count
;
463 tokens
[token
].next
= *token_in_queue2
;
466 * if inserting at end, reduce new_pages by that value if
467 * inserting before token, reduce counter of that token
469 if (*token_in_queue2
== 0) { /* insertion at end of queue2 */
470 queue2
->token_q_tail
= token
; /* must update tail
472 assert(queue2
->new_pages
>= (int32_t) count
);
473 queue2
->new_pages
-= count
;
475 assert(tokens
[*token_in_queue2
].count
>= count
);
476 tokens
[*token_in_queue2
].count
-= count
;
478 *token_in_queue2
= token
;
481 queue2
->debug_count_tokens
++;
482 vm_purgeable_token_check_queue(queue2
);
487 /* Find an object that can be locked. Returns locked object. */
489 vm_purgeable_object_find_and_lock(purgeable_q_t queue
, int group
)
492 * Usually we would pick the first element from a queue. However, we
493 * might not be able to get a lock on it, in which case we try the
494 * remaining elements in order.
498 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
499 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
500 object
= (vm_object_t
) queue_next(&object
->objq
)) {
501 if (vm_object_lock_try(object
)) {
502 /* Locked. Great. We'll take it. Remove and return. */
503 queue_remove(&queue
->objq
[group
], object
,
505 object
->objq
.next
= 0;
506 object
->objq
.prev
= 0;
508 queue
->debug_count_objects
--;
518 vm_purgeable_object_purge_one(void)
520 enum purgeable_q_type i
;
522 vm_object_t object
= 0;
523 purgeable_q_t queue
, queue2
;
525 mutex_lock(&vm_purgeable_queue_lock
);
526 /* Cycle through all queues */
527 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
528 queue
= &purgeable_queues
[i
];
531 * Are there any ripe tokens on this queue? If yes, we'll
532 * find an object to purge there
534 if (!(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0))
535 continue; /* no token? Look at next purgeable
539 * Now look through all groups, starting from the lowest. If
540 * we find an object in that group, try to lock it (this can
541 * fail). If locking is successful, we can drop the queue
542 * lock, remove a token and then purge the object.
544 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
545 if (!queue_empty(&queue
->objq
[group
]) &&
546 (object
= vm_purgeable_object_find_and_lock(queue
, group
))) {
547 mutex_unlock(&vm_purgeable_queue_lock
);
548 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
551 if (i
!= PURGEABLE_Q_TYPE_OBSOLETE
) {
552 /* This is the token migration case, and it works between
553 * FIFO and LIFO only */
554 queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
?
555 PURGEABLE_Q_TYPE_FIFO
:
556 PURGEABLE_Q_TYPE_LIFO
];
558 if (!queue_empty(&queue2
->objq
[group
]) &&
559 (object
= vm_purgeable_object_find_and_lock(queue2
, group
))) {
560 mutex_unlock(&vm_purgeable_queue_lock
);
561 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
565 assert(queue
->debug_count_objects
>= 0);
569 * because we have to do a try_lock on the objects which could fail,
570 * we could end up with no object to purge at this time, even though
571 * we have objects in a purgeable state
573 mutex_unlock(&vm_purgeable_queue_lock
);
579 (void) vm_object_purge(object
);
580 vm_object_unlock(object
);
582 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_OBJECT_PURGED
)),
583 (unsigned int) object
, /* purged object */
591 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
593 mutex_lock(&vm_purgeable_queue_lock
);
595 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
)
597 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) /* fifo and obsolete are
599 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
601 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
604 queue
->debug_count_objects
++;
605 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADDED
)),
607 tokens
[queue
->token_q_head
].count
,
613 mutex_unlock(&vm_purgeable_queue_lock
);
616 /* Look for object. If found, remove from purgeable queue. */
618 vm_purgeable_object_remove(vm_object_t object
)
620 enum purgeable_q_type i
;
623 mutex_lock(&vm_purgeable_queue_lock
);
624 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
625 purgeable_q_t queue
= &purgeable_queues
[i
];
626 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
628 for (o
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
629 !queue_end(&queue
->objq
[group
], (queue_entry_t
) o
);
630 o
= (vm_object_t
) queue_next(&o
->objq
)) {
632 queue_remove(&queue
->objq
[group
], object
,
635 queue
->debug_count_objects
--;
636 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVED
)),
638 tokens
[queue
->token_q_head
].count
,
643 mutex_unlock(&vm_purgeable_queue_lock
);
644 object
->objq
.next
= 0;
645 object
->objq
.prev
= 0;
646 return &purgeable_queues
[i
];
651 mutex_unlock(&vm_purgeable_queue_lock
);