2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <mach/mach_types.h>
25 #include <vm/vm_page.h>
26 #include <vm/vm_purgeable_internal.h>
27 #include <sys/kdebug.h>
34 struct token tokens
[MAX_VOLATILE
];
36 token_idx_t token_free_idx
= 0; /* head of free queue */
37 token_idx_t token_init_idx
= 1; /* token 0 is reserved!! */
38 int32_t token_new_pagecount
= 0; /* count of pages that will
39 * be added onto token queue */
41 int available_for_purge
= 0; /* increase when ripe token
42 * added, decrease when ripe
43 * token removed protect with
46 struct purgeable_q purgeable_queues
[PURGEABLE_Q_TYPE_MAX
];
48 #define TOKEN_ADD 0x40/* 0x100 */
49 #define TOKEN_DELETE 0x41/* 0x104 */
50 #define TOKEN_QUEUE_ADVANCE 0x42/* 0x108 actually means "token ripened" */
51 #define TOKEN_OBJECT_PURGED 0x43/* 0x10c */
52 #define OBJECT_ADDED 0x50/* 0x140 */
53 #define OBJECT_REMOVED 0x51/* 0x144 */
55 static void vm_purgeable_q_advance(uint32_t num_pages
, purgeable_q_t queue
);
56 static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue
);
60 vm_purgeable_token_check_queue(purgeable_q_t queue
)
62 int token_cnt
= 0, page_cnt
= 0;
63 token_idx_t token
= queue
->token_q_head
;
64 token_idx_t unripe
= 0;
65 int our_inactive_count
;
68 if (tokens
[token
].count
!= 0) {
69 assert(queue
->token_q_unripe
);
71 assert(token
== queue
->token_q_unripe
);
74 page_cnt
+= tokens
[token
].count
;
76 if (tokens
[token
].next
== 0)
77 assert(queue
->token_q_tail
== token
);
80 token
= tokens
[token
].next
;
84 assert(queue
->token_q_unripe
== unripe
);
85 assert(token_cnt
== queue
->debug_count_tokens
);
86 our_inactive_count
= page_cnt
+ queue
->new_pages
+ token_new_pagecount
;
87 assert(our_inactive_count
>= 0);
88 assert((uint32_t) our_inactive_count
== vm_page_inactive_count
);
93 vm_purgeable_token_add(purgeable_q_t queue
)
97 enum purgeable_q_type i
;
99 if (token_init_idx
< MAX_VOLATILE
) { /* lazy token array init */
100 token
= token_init_idx
;
102 } else if (token_free_idx
) {
103 token
= token_free_idx
;
104 token_free_idx
= tokens
[token_free_idx
].next
;
110 * the new pagecount we got need to be applied to all queues except
113 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
114 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
116 assert(pages
<= TOKEN_COUNT_MAX
);
117 purgeable_queues
[i
].new_pages
=pages
;
119 token_new_pagecount
= 0;
121 /* set token counter value */
122 if (queue
->type
!= PURGEABLE_Q_TYPE_OBSOLETE
)
123 tokens
[token
].count
= queue
->new_pages
;
125 tokens
[token
].count
= 0; /* all obsolete items are
126 * ripe immediately */
127 queue
->new_pages
= 0;
129 /* put token on token counter list */
130 tokens
[token
].next
= 0;
131 if (queue
->token_q_tail
== 0) {
132 assert(queue
->token_q_head
== 0 && queue
->token_q_unripe
== 0);
133 queue
->token_q_head
= token
;
135 tokens
[queue
->token_q_tail
].next
= token
;
137 if (queue
->token_q_unripe
== 0) { /* only ripe tokens (token
138 * count == 0) in queue */
139 if (tokens
[token
].count
> 0)
140 queue
->token_q_unripe
= token
; /* first unripe token */
142 available_for_purge
++; /* added a ripe token?
143 * increase available count */
145 queue
->token_q_tail
= token
;
148 queue
->debug_count_tokens
++;
149 /* Check both queues, since we modified the new_pages count on each */
150 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
151 vm_purgeable_token_check_queue(&purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
153 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_ADD
)),
155 tokens
[token
].count
, /* num pages on token
157 queue
->debug_count_tokens
,
166 * Remove first token from queue and return its index. Add its count to the
167 * count of the next token.
170 vm_purgeable_token_remove_first(purgeable_q_t queue
)
173 token
= queue
->token_q_head
;
178 assert(queue
->token_q_tail
);
179 if (queue
->token_q_head
== queue
->token_q_unripe
) {
180 /* no ripe tokens... must move unripe pointer */
181 queue
->token_q_unripe
= tokens
[token
].next
;
183 /* we're removing a ripe token. decrease count */
184 available_for_purge
--;
185 assert(available_for_purge
>= 0);
188 if (queue
->token_q_tail
== queue
->token_q_head
)
189 assert(tokens
[token
].next
== 0);
191 queue
->token_q_head
= tokens
[token
].next
;
192 if (queue
->token_q_head
) {
193 tokens
[queue
->token_q_head
].count
+= tokens
[token
].count
;
195 /* currently no other tokens in the queue */
197 * the page count must be added to the next newly
200 queue
->new_pages
+= tokens
[token
].count
;
201 /* if head is zero, tail is too */
202 queue
->token_q_tail
= 0;
206 queue
->debug_count_tokens
--;
207 vm_purgeable_token_check_queue(queue
);
209 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_DELETE
)),
211 tokens
[queue
->token_q_head
].count
, /* num pages on new
213 token_new_pagecount
, /* num pages waiting for
222 /* Delete first token from queue. Return token to token queue. */
224 vm_purgeable_token_delete_first(purgeable_q_t queue
)
226 token_idx_t token
= vm_purgeable_token_remove_first(queue
);
229 /* stick removed token on free queue */
230 tokens
[token
].next
= token_free_idx
;
231 token_free_idx
= token
;
237 vm_purgeable_q_advance_all(uint32_t num_pages
)
239 /* check queue counters - if they get really large, scale them back.
240 * They tend to get that large when there is no purgeable queue action */
242 if(token_new_pagecount
> (INT32_MAX
>> 1)) /* a system idling years might get there */
244 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
245 int64_t pages
= purgeable_queues
[i
].new_pages
+= token_new_pagecount
;
247 assert(pages
<= TOKEN_COUNT_MAX
);
248 purgeable_queues
[i
].new_pages
=pages
;
250 token_new_pagecount
= 0;
254 * don't need to advance obsolete queue - all items are ripe there,
257 vm_purgeable_q_advance(num_pages
, &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
]);
258 vm_purgeable_q_advance(num_pages
, &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
]);
262 * Decrements token counters. A token counter can be zero, this means the
263 * object is ripe to be purged. It is not purged immediately, because that
264 * could cause several objects to be purged even if purging one would satisfy
265 * the memory needs. Instead, the pageout thread purges one after the other
266 * by calling vm_purgeable_object_purge_one and then rechecking the memory
270 vm_purgeable_q_advance(uint32_t num_pages
, purgeable_q_t queue
)
272 /* Iterate over tokens as long as there are unripe tokens. */
273 while (queue
->token_q_unripe
) {
274 int min
= (tokens
[queue
->token_q_unripe
].count
< num_pages
) ?
275 tokens
[queue
->token_q_unripe
].count
: num_pages
;
276 tokens
[queue
->token_q_unripe
].count
-= min
;
279 if (tokens
[queue
->token_q_unripe
].count
== 0) {
280 queue
->token_q_unripe
= tokens
[queue
->token_q_unripe
].next
;
281 available_for_purge
++;
282 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_QUEUE_ADVANCE
)),
284 tokens
[queue
->token_q_head
].count
, /* num pages on new
289 continue; /* One token ripened. Make sure to
293 break; /* Current token not ripe and no more pages.
298 * if there are no unripe tokens in the queue, decrement the
299 * new_pages counter instead new_pages can be negative, but must be
300 * canceled out by token_new_pagecount -- since inactive queue as a
301 * whole always contains a nonnegative number of pages
303 if (!queue
->token_q_unripe
) {
304 queue
->new_pages
-= num_pages
;
305 assert((int32_t) token_new_pagecount
+ queue
->new_pages
>= 0);
308 vm_purgeable_token_check_queue(queue
);
313 * grab any ripe object and purge it obsolete queue first. then, go through
314 * each volatile group. Select a queue with a ripe token.
315 * Start with first group (0)
316 * 1. Look at queue. Is there an object?
317 * Yes - purge it. Remove token.
318 * No - check other queue. Is there an object?
319 * No - increment group, then go to (1)
320 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
321 * token from other queue and migrate unripe token from this
322 * queue to other queue.
325 vm_purgeable_token_remove_ripe(purgeable_q_t queue
)
327 assert(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0);
328 /* return token to free list. advance token list. */
329 token_idx_t new_head
= tokens
[queue
->token_q_head
].next
;
330 tokens
[queue
->token_q_head
].next
= token_free_idx
;
331 token_free_idx
= queue
->token_q_head
;
332 queue
->token_q_head
= new_head
;
334 queue
->token_q_tail
= 0;
337 queue
->debug_count_tokens
--;
338 vm_purgeable_token_check_queue(queue
);
341 available_for_purge
--;
342 assert(available_for_purge
>= 0);
346 * Delete a ripe token from the given queue. If there are no ripe tokens on
347 * that queue, delete a ripe token from queue2, and migrate an unripe token
348 * from queue to queue2
351 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue
, purgeable_q_t queue2
)
353 assert(queue
->token_q_head
);
355 if (tokens
[queue
->token_q_head
].count
== 0) {
356 /* This queue has a ripe token. Remove. */
357 vm_purgeable_token_remove_ripe(queue
);
361 * queue2 must have a ripe token. Remove, and migrate one
362 * from queue to queue2.
364 vm_purgeable_token_remove_ripe(queue2
);
365 /* migrate unripe token */
369 /* remove token from queue1 */
370 assert(queue
->token_q_unripe
== queue
->token_q_head
); /* queue1 had no unripe
371 * tokens, remember? */
372 token
= vm_purgeable_token_remove_first(queue
);
375 count
= tokens
[token
].count
;
377 /* migrate to queue2 */
378 /* go to migration target loc */
379 token_idx_t
*token_in_queue2
= &queue2
->token_q_head
;
380 while (*token_in_queue2
&& count
> tokens
[*token_in_queue2
].count
) {
381 count
-= tokens
[*token_in_queue2
].count
;
382 token_in_queue2
= &tokens
[*token_in_queue2
].next
;
385 if ((*token_in_queue2
== queue2
->token_q_unripe
) || /* becomes the first
387 (queue2
->token_q_unripe
== 0))
388 queue2
->token_q_unripe
= token
; /* must update unripe
392 tokens
[token
].count
= count
;
393 tokens
[token
].next
= *token_in_queue2
;
396 * if inserting at end, reduce new_pages by that value if
397 * inserting before token, reduce counter of that token
399 if (*token_in_queue2
== 0) { /* insertion at end of queue2 */
400 queue2
->token_q_tail
= token
; /* must update tail
402 assert(queue2
->new_pages
>= (int32_t) count
);
403 queue2
->new_pages
-= count
;
405 assert(tokens
[*token_in_queue2
].count
>= count
);
406 tokens
[*token_in_queue2
].count
-= count
;
408 *token_in_queue2
= token
;
411 queue2
->debug_count_tokens
++;
412 vm_purgeable_token_check_queue(queue2
);
417 /* Find an object that can be locked. Returns locked object. */
419 vm_purgeable_object_find_and_lock(purgeable_q_t queue
, int group
)
422 * Usually we would pick the first element from a queue. However, we
423 * might not be able to get a lock on it, in which case we try the
424 * remaining elements in order.
428 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
429 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
430 object
= (vm_object_t
) queue_next(&object
->objq
)) {
431 if (vm_object_lock_try(object
)) {
432 /* Locked. Great. We'll take it. Remove and return. */
433 queue_remove(&queue
->objq
[group
], object
,
435 object
->objq
.next
= 0;
436 object
->objq
.prev
= 0;
438 queue
->debug_count_objects
--;
448 vm_purgeable_object_purge_one(void)
450 enum purgeable_q_type i
;
452 vm_object_t object
= 0;
454 mutex_lock(&vm_purgeable_queue_lock
);
455 /* Cycle through all queues */
456 for (i
= PURGEABLE_Q_TYPE_OBSOLETE
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
457 purgeable_q_t queue
= &purgeable_queues
[i
];
460 * Are there any ripe tokens on this queue? If yes, we'll
461 * find an object to purge there
463 if (!(queue
->token_q_head
&& tokens
[queue
->token_q_head
].count
== 0))
464 continue; /* no token? Look at next purgeable
468 * Now look through all groups, starting from the lowest. If
469 * we find an object in that group, try to lock it (this can
470 * fail). If locking is successful, we can drop the queue
471 * lock, remove a token and then purge the object.
473 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
474 if (!queue_empty(&queue
->objq
[group
]) && (object
= vm_purgeable_object_find_and_lock(queue
, group
))) {
475 mutex_unlock(&vm_purgeable_queue_lock
);
476 vm_purgeable_token_choose_and_delete_ripe(queue
, 0);
479 assert(i
!= PURGEABLE_Q_TYPE_OBSOLETE
); /* obsolete queue must
480 * have all objects in
482 purgeable_q_t queue2
= &purgeable_queues
[i
!= PURGEABLE_Q_TYPE_FIFO
? PURGEABLE_Q_TYPE_FIFO
: PURGEABLE_Q_TYPE_LIFO
];
484 if (!queue_empty(&queue2
->objq
[group
]) && (object
= vm_purgeable_object_find_and_lock(queue2
, group
))) {
485 mutex_unlock(&vm_purgeable_queue_lock
);
486 vm_purgeable_token_choose_and_delete_ripe(queue2
, queue
);
490 assert(queue
->debug_count_objects
>= 0);
494 * because we have to do a try_lock on the objects which could fail,
495 * we could end up with no object to purge at this time, even though
496 * we have objects in a purgeable state
498 mutex_unlock(&vm_purgeable_queue_lock
);
504 (void) vm_object_purge(object
);
505 vm_object_unlock(object
);
507 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, TOKEN_OBJECT_PURGED
)),
508 (unsigned int) object
, /* purged object */
516 vm_purgeable_object_add(vm_object_t object
, purgeable_q_t queue
, int group
)
518 mutex_lock(&vm_purgeable_queue_lock
);
520 if (queue
->type
== PURGEABLE_Q_TYPE_OBSOLETE
)
522 if (queue
->type
!= PURGEABLE_Q_TYPE_LIFO
) /* fifo and obsolete are
524 queue_enter(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* last to die */
526 queue_enter_first(&queue
->objq
[group
], object
, vm_object_t
, objq
); /* first to die */
529 queue
->debug_count_objects
++;
530 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_ADDED
)),
532 tokens
[queue
->token_q_head
].count
,
538 mutex_unlock(&vm_purgeable_queue_lock
);
541 /* Look for object. If found, remove from purgeable queue. */
543 vm_purgeable_object_remove(vm_object_t object
)
545 enum purgeable_q_type i
;
548 mutex_lock(&vm_purgeable_queue_lock
);
549 for (i
= PURGEABLE_Q_TYPE_FIFO
; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
550 purgeable_q_t queue
= &purgeable_queues
[i
];
551 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
553 for (o
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
554 !queue_end(&queue
->objq
[group
], (queue_entry_t
) o
);
555 o
= (vm_object_t
) queue_next(&o
->objq
)) {
557 queue_remove(&queue
->objq
[group
], object
,
560 queue
->debug_count_objects
--;
561 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, OBJECT_REMOVED
)),
563 tokens
[queue
->token_q_head
].count
,
568 mutex_unlock(&vm_purgeable_queue_lock
);
569 object
->objq
.next
= 0;
570 object
->objq
.prev
= 0;
571 return &purgeable_queues
[i
];
576 mutex_unlock(&vm_purgeable_queue_lock
);