2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <mach/mach_types.h>
25 #include <vm/vm_page.h>
26 #include <vm/vm_purgeable_internal.h>
27 #include <sys/kdebug.h>
34 struct token tokens
[MAX_VOLATILE
];
36 token_idx_t token_free_idx
= 0; /* head of free queue */
37 token_cnt_t token_init_count
= 1; /* token 0 is reserved!! */
38 token_cnt_t token_new_pagecount
= 0; /* count of pages that will
39 * be added onto token queue */
41 int available_for_purge
= 0; /* increase when ripe token
42 * added, decrease when ripe
43 * token removed protect with
46 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
48 #define TOKEN_ADD 0x40/* 0x100 */
49 #define TOKEN_DELETE 0x41/* 0x104 */
50 #define TOKEN_QUEUE_ADVANCE 0x42/* 0x108 actually means "token ripened" */
51 #define TOKEN_OBJECT_PURGED 0x43/* 0x10c */
52 #define OBJECT_ADDED 0x50/* 0x140 */
53 #define OBJECT_REMOVED 0x51/* 0x144 */
55 static void vm_purgeable_q_advance(uint32_t num_pages
, purgeable_q_t queue
);
56 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
60 vm_purgeable_token_check_queue(purgeable_q_t queue
)
62 int token_cnt
= 0, page_cnt
= 0;
63 token_idx_t token
= queue
->token_q_head
;
64 token_idx_t unripe
= 0;
65 int our_inactive_count
;
68 if (tokens
[token
].count
!= 0) {
69 assert(queue
->token_q_unripe
);
71 assert(token
== queue
->token_q_unripe
);
74 page_cnt
+= tokens
[token
].count
;
76 if (tokens
[token
].next
== 0)
77 assert(queue
->token_q_tail
== token
);
80 token
= tokens
[token
].next
;
84 assert(queue
->token_q_unripe
== unripe
);
85 assert(token_cnt
== queue
->debug_count_tokens
);
86 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
87 assert(our_inactive_count
>= 0);
88 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
);
93 vm_purgeable_token_add(purgeable_q_t queue
)
97 enum purgeable_q_type i
;
99 if (token_init_count
< MAX_VOLATILE
) { /* lazy token array init */
100 token
= token_init_count
;
102 } else if (token_free_idx
) {
103 token
= token_free_idx
;
104 token_free_idx
= tokens
[token_free_idx
].next
;
110 * the new pagecount we got need to be applied to all queues except
113 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
114 purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
115 assert(purgeable_queues
[i
].new_pages
>= 0);
116 assert((uint64_t) (purgeable_queues
[i
].new_pages
) <= TOKEN_COUNT_MAX
);
118 token_new_pagecount
= 0;
120 /* set token counter value */
121 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
122 tokens
[token
].count
= queue
->new_pages
;
124 tokens
[token
].count
= 0; /* all obsolete items are
125 * ripe immediately */
126 queue
->new_pages
= 0;
128 /* put token on token counter list */
129 tokens
[token
].next
= 0;
130 if (queue
->token_q_tail
== 0) {
131 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
132 queue
->token_q_head
= token
;
134 tokens
[queue
->token_q_tail
].next
= token
;
136 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
137 * count == 0) in queue */
138 if (tokens
[token
].count
> 0)
139 queue
->token_q_unripe
= token
; /* first unripe token */
141 available_for_purge
++; /* added a ripe token?
142 * increase available count */
144 queue
->token_q_tail
= token
;
147 queue
->debug_count_tokens
++;
148 /* Check both queues, since we modified the new_pages count on each */
149 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
150 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
152 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
154 tokens
[token
].count
, /* num pages on token
156 queue
->debug_count_tokens
,
165 * Remove first token from queue and return its index. Add its count to the
166 * count of the next token.
169 vm_purgeable_token_remove_first(purgeable_q_t queue
)
172 token
= queue
->token_q_head
;
177 assert(queue
->token_q_tail
);
178 if (queue
->token_q_head
== queue
->token_q_unripe
) {
179 /* no ripe tokens... must move unripe pointer */
180 queue
->token_q_unripe
= tokens
[token
].next
;
182 /* we're removing a ripe token. decrease count */
183 available_for_purge
--;
184 assert(available_for_purge
>= 0);
187 if (queue
->token_q_tail
== queue
->token_q_head
)
188 assert(tokens
[token
].next
== 0);
190 queue
->token_q_head
= tokens
[token
].next
;
191 if (queue
->token_q_head
) {
192 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
194 /* currently no other tokens in the queue */
196 * the page count must be added to the next newly
199 queue
->new_pages
+= tokens
[token
].count
;
200 /* if head is zero, tail is too */
201 queue
->token_q_tail
= 0;
205 queue
->debug_count_tokens
--;
206 vm_purgeable_token_check_queue(queue
);
208 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
210 tokens
[queue
->token_q_head
].count
, /* num pages on new
212 token_new_pagecount
, /* num pages waiting for
221 /* Delete first token from queue. Return token to token queue. */
223 vm_purgeable_token_delete_first(purgeable_q_t queue
)
225 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
228 /* stick removed token on free queue */
229 tokens
[token
].next
= token_free_idx
;
230 token_free_idx
= token
;
236 vm_purgeable_q_advance_all(uint32_t num_pages
)
239 * don't need to advance obsolete queue - all items are ripe there,
242 vm_purgeable_q_advance(num_pages
, &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
243 vm_purgeable_q_advance(num_pages
, &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
247 * Decrements token counters. A token counter can be zero, this means the
248 * object is ripe to be purged. It is not purged immediately, because that
249 * could cause several objects to be purged even if purging one would satisfy
250 * the memory needs. Instead, the pageout thread purges one after the other
251 * by calling vm_purgeable_object_purge_one and then rechecking the memory
255 vm_purgeable_q_advance(uint32_t num_pages
, purgeable_q_t queue
)
257 /* Iterate over tokens as long as there are unripe tokens. */
258 while (queue
->token_q_unripe
) {
259 int min
= (tokens
[queue
->token_q_unripe
].count
< num_pages
) ?
260 tokens
[queue
->token_q_unripe
].count
: num_pages
;
261 tokens
[queue
->token_q_unripe
].count
-= min
;
264 if (tokens
[queue
->token_q_unripe
].count
== 0) {
265 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
266 available_for_purge
++;
267 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_QUEUE_ADVANCE
)),
269 tokens
[queue
->token_q_head
].count
, /* num pages on new
274 continue; /* One token ripened. Make sure to
278 break; /* Current token not ripe and no more pages.
283 * if there are no unripe tokens in the queue, decrement the
284 * new_pages counter instead new_pages can be negative, but must be
285 * canceled out by token_new_pagecount -- since inactive queue as a
286 * whole always contains a nonnegative number of pages
288 if (!queue
->token_q_unripe
) {
289 queue
->new_pages
-= num_pages
;
290 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
293 vm_purgeable_token_check_queue(queue
);
298 * grab any ripe object and purge it obsolete queue first. then, go through
299 * each volatile group. Select a queue with a ripe token.
300 * Start with first group (0)
301 * 1. Look at queue. Is there an object?
302 * Yes - purge it. Remove token.
303 * No - check other queue. Is there an object?
304 * No - increment group, then go to (1)
305 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
306 * token from other queue and migrate unripe token from this
307 * queue to other queue.
310 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
312 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
313 /* return token to free list. advance token list. */
314 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
315 tokens
[queue
->token_q_head
].next
= token_free_idx
;
316 token_free_idx
= queue
->token_q_head
;
317 queue
->token_q_head
= new_head
;
319 queue
->token_q_tail
= 0;
322 queue
->debug_count_tokens
--;
323 vm_purgeable_token_check_queue(queue
);
326 available_for_purge
--;
327 assert(available_for_purge
>= 0);
331 * Delete a ripe token from the given queue. If there are no ripe tokens on
332 * that queue, delete a ripe token from queue2, and migrate an unripe token
333 * from queue to queue2
336 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
338 assert(queue
->token_q_head
);
340 if (tokens
[queue
->token_q_head
].count
== 0) {
341 /* This queue has a ripe token. Remove. */
342 vm_purgeable_token_remove_ripe(queue
);
346 * queue2 must have a ripe token. Remove, and migrate one
347 * from queue to queue2.
349 vm_purgeable_token_remove_ripe(queue2
);
350 /* migrate unripe token */
354 /* remove token from queue1 */
355 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
356 * tokens, remember? */
357 token
= vm_purgeable_token_remove_first(queue
);
360 count
= tokens
[token
].count
;
362 /* migrate to queue2 */
363 /* go to migration target loc */
364 token_idx_t
*token_in_queue2
= &queue2
->token_q_head
;
365 while (*token_in_queue2
&& count
> tokens
[*token_in_queue2
].count
) {
366 count
-= tokens
[*token_in_queue2
].count
;
367 token_in_queue2
= &tokens
[*token_in_queue2
].next
;
370 if ((*token_in_queue2
== queue2
->token_q_unripe
) || /* becomes the first
372 (queue2
->token_q_unripe
== 0))
373 queue2
->token_q_unripe
= token
; /* must update unripe
377 tokens
[token
].count
= count
;
378 tokens
[token
].next
= *token_in_queue2
;
381 * if inserting at end, reduce new_pages by that value if
382 * inserting before token, reduce counter of that token
384 if (*token_in_queue2
== 0) { /* insertion at end of queue2 */
385 queue2
->token_q_tail
= token
; /* must update tail
387 assert(queue2
->new_pages
>= (int32_t) count
);
388 queue2
->new_pages
-= count
;
390 assert(tokens
[*token_in_queue2
].count
>= count
);
391 tokens
[*token_in_queue2
].count
-= count
;
393 *token_in_queue2
= token
;
396 queue2
->debug_count_tokens
++;
397 vm_purgeable_token_check_queue(queue2
);
402 /* Find an object that can be locked. Returns locked object. */
404 vm_purgeable_object_find_and_lock(purgeable_q_t queue
, int group
)
407 * Usually we would pick the first element from a queue. However, we
408 * might not be able to get a lock on it, in which case we try the
409 * remaining elements in order.
413 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
414 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
415 object
= (vm_object_t
) queue_next(&object
->objq
)) {
416 if (vm_object_lock_try(object
)) {
417 /* Locked. Great. We'll take it. Remove and return. */
418 queue_remove(&queue
->objq
[group
], object
,
420 object
->objq
.next
= 0;
421 object
->objq
.prev
= 0;
423 queue
->debug_count_objects
--;
433 vm_purgeable_object_purge_one(void)
435 enum purgeable_q_type i
;
437 vm_object_t object
= 0;
439 mutex_lock(&vm_purgeable_queue_lock
);
440 /* Cycle through all queues */
441 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
442 purgeable_q_t queue
= &purgeable_queues
[i
];
445 * Are there any ripe tokens on this queue? If yes, we'll
446 * find an object to purge there
448 if (!(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0))
449 continue; /* no token? Look at next purgeable
453 * Now look through all groups, starting from the lowest. If
454 * we find an object in that group, try to lock it (this can
455 * fail). If locking is successful, we can drop the queue
456 * lock, remove a token and then purge the object.
458 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
459 if (!queue_empty(&queue
->objq
[group
]) && (object
= vm_purgeable_object_find_and_lock(queue
, group
))) {
460 mutex_unlock(&vm_purgeable_queue_lock
);
461 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
464 assert(i
!= PURGEABLE_Q_TYPE_OBSOLETE
); /* obsolete queue must
465 * have all objects in
467 purgeable_q_t queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
? PURGEABLE_Q_TYPE_FIFO
: PURGEABLE_Q_TYPE_LIFO
];
469 if (!queue_empty(&queue2
->objq
[group
]) && (object
= vm_purgeable_object_find_and_lock(queue2
, group
))) {
470 mutex_unlock(&vm_purgeable_queue_lock
);
471 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
475 assert(queue
->debug_count_objects
>= 0);
479 * because we have to do a try_lock on the objects which could fail,
480 * we could end up with no object to purge at this time, even though
481 * we have objects in a purgeable state
483 mutex_unlock(&vm_purgeable_queue_lock
);
489 (void) vm_object_purge(object
);
490 vm_object_unlock(object
);
492 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_OBJECT_PURGED
)),
493 (unsigned int) object
, /* purged object */
501 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
503 mutex_lock(&vm_purgeable_queue_lock
);
505 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
)
507 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) /* fifo and obsolete are
509 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
511 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
514 queue
->debug_count_objects
++;
515 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADDED
)),
517 tokens
[queue
->token_q_head
].count
,
523 mutex_unlock(&vm_purgeable_queue_lock
);
526 /* Look for object. If found, remove from purgeable queue. */
528 vm_purgeable_object_remove(vm_object_t object
)
530 enum purgeable_q_type i
;
533 mutex_lock(&vm_purgeable_queue_lock
);
534 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
535 purgeable_q_t queue
= &purgeable_queues
[i
];
536 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
538 for (o
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
539 !queue_end(&queue
->objq
[group
], (queue_entry_t
) o
);
540 o
= (vm_object_t
) queue_next(&o
->objq
)) {
542 queue_remove(&queue
->objq
[group
], object
,
545 queue
->debug_count_objects
--;
546 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVED
)),
548 tokens
[queue
->token_q_head
].count
,
553 mutex_unlock(&vm_purgeable_queue_lock
);
554 object
->objq
.next
= 0;
555 object
->objq
.prev
= 0;
556 return &purgeable_queues
[i
];
561 mutex_unlock(&vm_purgeable_queue_lock
);