]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_purgeable.c
dfd80266f956420d3e8592214e335f8297a4e4f6
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <mach/mach_types.h>
25 #include <vm/vm_page.h>
26 #include <vm/vm_purgeable_internal.h>
27 #include <sys/kdebug.h>
28
29 struct token {
30 token_cnt_t count;
31 token_idx_t next;
32 };
33
34 struct token tokens[MAX_VOLATILE];
35
36 token_idx_t token_free_idx = 0; /* head of free queue */
37 token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
38 int32_t token_new_pagecount = 0; /* count of pages that will
39 * be added onto token queue */
40
41 int available_for_purge = 0; /* increase when ripe token
42 * added, decrease when ripe
43 * token removed protect with
44 * page_queue_lock */
45
46 struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
47
48 #define TOKEN_ADD 0x40/* 0x100 */
49 #define TOKEN_DELETE 0x41/* 0x104 */
50 #define TOKEN_QUEUE_ADVANCE 0x42/* 0x108 actually means "token ripened" */
51 #define TOKEN_OBJECT_PURGED 0x43/* 0x10c */
52 #define OBJECT_ADDED 0x50/* 0x140 */
53 #define OBJECT_REMOVED 0x51/* 0x144 */
54
55 static void vm_purgeable_q_advance(uint32_t num_pages, purgeable_q_t queue);
56 static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
57
58 #if MACH_ASSERT
59 static void
60 vm_purgeable_token_check_queue(purgeable_q_t queue)
61 {
62 int token_cnt = 0, page_cnt = 0;
63 token_idx_t token = queue->token_q_head;
64 token_idx_t unripe = 0;
65 int our_inactive_count;
66
67 while (token) {
68 if (tokens[token].count != 0) {
69 assert(queue->token_q_unripe);
70 if (unripe == 0) {
71 assert(token == queue->token_q_unripe);
72 unripe = token;
73 }
74 page_cnt += tokens[token].count;
75 }
76 if (tokens[token].next == 0)
77 assert(queue->token_q_tail == token);
78
79 token_cnt++;
80 token = tokens[token].next;
81 }
82
83 if (unripe)
84 assert(queue->token_q_unripe == unripe);
85 assert(token_cnt == queue->debug_count_tokens);
86 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
87 assert(our_inactive_count >= 0);
88 assert((uint32_t) our_inactive_count == vm_page_inactive_count);
89 }
90 #endif
91
92 kern_return_t
93 vm_purgeable_token_add(purgeable_q_t queue)
94 {
95 /* new token */
96 token_idx_t token;
97 enum purgeable_q_type i;
98
99 if (token_init_idx < MAX_VOLATILE) { /* lazy token array init */
100 token = token_init_idx;
101 token_init_idx++;
102 } else if (token_free_idx) {
103 token = token_free_idx;
104 token_free_idx = tokens[token_free_idx].next;
105 } else {
106 return KERN_FAILURE;
107 }
108
109 /*
110 * the new pagecount we got need to be applied to all queues except
111 * obsolete
112 */
113 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
114 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
115 assert(pages >= 0);
116 assert(pages <= TOKEN_COUNT_MAX);
117 purgeable_queues[i].new_pages=pages;
118 }
119 token_new_pagecount = 0;
120
121 /* set token counter value */
122 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
123 tokens[token].count = queue->new_pages;
124 else
125 tokens[token].count = 0; /* all obsolete items are
126 * ripe immediately */
127 queue->new_pages = 0;
128
129 /* put token on token counter list */
130 tokens[token].next = 0;
131 if (queue->token_q_tail == 0) {
132 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
133 queue->token_q_head = token;
134 } else {
135 tokens[queue->token_q_tail].next = token;
136 }
137 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
138 * count == 0) in queue */
139 if (tokens[token].count > 0)
140 queue->token_q_unripe = token; /* first unripe token */
141 else
142 available_for_purge++; /* added a ripe token?
143 * increase available count */
144 }
145 queue->token_q_tail = token;
146
147 #if MACH_ASSERT
148 queue->debug_count_tokens++;
149 /* Check both queues, since we modified the new_pages count on each */
150 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
151 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
152
153 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
154 queue->type,
155 tokens[token].count, /* num pages on token
156 * (last token) */
157 queue->debug_count_tokens,
158 0,
159 0);
160 #endif
161
162 return KERN_SUCCESS;
163 }
164
165 /*
166 * Remove first token from queue and return its index. Add its count to the
167 * count of the next token.
168 */
169 static token_idx_t
170 vm_purgeable_token_remove_first(purgeable_q_t queue)
171 {
172 token_idx_t token;
173 token = queue->token_q_head;
174
175 assert(token);
176
177 if (token) {
178 assert(queue->token_q_tail);
179 if (queue->token_q_head == queue->token_q_unripe) {
180 /* no ripe tokens... must move unripe pointer */
181 queue->token_q_unripe = tokens[token].next;
182 } else {
183 /* we're removing a ripe token. decrease count */
184 available_for_purge--;
185 assert(available_for_purge >= 0);
186 }
187
188 if (queue->token_q_tail == queue->token_q_head)
189 assert(tokens[token].next == 0);
190
191 queue->token_q_head = tokens[token].next;
192 if (queue->token_q_head) {
193 tokens[queue->token_q_head].count += tokens[token].count;
194 } else {
195 /* currently no other tokens in the queue */
196 /*
197 * the page count must be added to the next newly
198 * created token
199 */
200 queue->new_pages += tokens[token].count;
201 /* if head is zero, tail is too */
202 queue->token_q_tail = 0;
203 }
204
205 #if MACH_ASSERT
206 queue->debug_count_tokens--;
207 vm_purgeable_token_check_queue(queue);
208
209 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
210 queue->type,
211 tokens[queue->token_q_head].count, /* num pages on new
212 * first token */
213 token_new_pagecount, /* num pages waiting for
214 * next token */
215 available_for_purge,
216 0);
217 #endif
218 }
219 return token;
220 }
221
222 /* Delete first token from queue. Return token to token queue. */
223 void
224 vm_purgeable_token_delete_first(purgeable_q_t queue)
225 {
226 token_idx_t token = vm_purgeable_token_remove_first(queue);
227
228 if (token) {
229 /* stick removed token on free queue */
230 tokens[token].next = token_free_idx;
231 token_free_idx = token;
232 }
233 }
234
235
236 void
237 vm_purgeable_q_advance_all(uint32_t num_pages)
238 {
239 /* check queue counters - if they get really large, scale them back.
240 * They tend to get that large when there is no purgeable queue action */
241 int i;
242 if(token_new_pagecount > (INT32_MAX >> 1)) /* a system idling years might get there */
243 {
244 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
245 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
246 assert(pages >= 0);
247 assert(pages <= TOKEN_COUNT_MAX);
248 purgeable_queues[i].new_pages=pages;
249 }
250 token_new_pagecount = 0;
251 }
252
253 /*
254 * don't need to advance obsolete queue - all items are ripe there,
255 * always
256 */
257 vm_purgeable_q_advance(num_pages, &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
258 vm_purgeable_q_advance(num_pages, &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
259 }
260
261 /*
262 * Decrements token counters. A token counter can be zero, this means the
263 * object is ripe to be purged. It is not purged immediately, because that
264 * could cause several objects to be purged even if purging one would satisfy
265 * the memory needs. Instead, the pageout thread purges one after the other
266 * by calling vm_purgeable_object_purge_one and then rechecking the memory
267 * balance.
268 */
269 static void
270 vm_purgeable_q_advance(uint32_t num_pages, purgeable_q_t queue)
271 {
272 /* Iterate over tokens as long as there are unripe tokens. */
273 while (queue->token_q_unripe) {
274 int min = (tokens[queue->token_q_unripe].count < num_pages) ?
275 tokens[queue->token_q_unripe].count : num_pages;
276 tokens[queue->token_q_unripe].count -= min;
277 num_pages -= min;
278
279 if (tokens[queue->token_q_unripe].count == 0) {
280 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
281 available_for_purge++;
282 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_QUEUE_ADVANCE)),
283 queue->type,
284 tokens[queue->token_q_head].count, /* num pages on new
285 * first token */
286 0,
287 available_for_purge,
288 0);
289 continue; /* One token ripened. Make sure to
290 * check the next. */
291 }
292 if (num_pages == 0)
293 break; /* Current token not ripe and no more pages.
294 * Work done. */
295 }
296
297 /*
298 * if there are no unripe tokens in the queue, decrement the
299 * new_pages counter instead new_pages can be negative, but must be
300 * canceled out by token_new_pagecount -- since inactive queue as a
301 * whole always contains a nonnegative number of pages
302 */
303 if (!queue->token_q_unripe) {
304 queue->new_pages -= num_pages;
305 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
306 }
307 #if MACH_ASSERT
308 vm_purgeable_token_check_queue(queue);
309 #endif
310 }
311
312 /*
313 * grab any ripe object and purge it obsolete queue first. then, go through
314 * each volatile group. Select a queue with a ripe token.
315 * Start with first group (0)
316 * 1. Look at queue. Is there an object?
317 * Yes - purge it. Remove token.
318 * No - check other queue. Is there an object?
319 * No - increment group, then go to (1)
320 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
321 * token from other queue and migrate unripe token from this
322 * queue to other queue.
323 */
324 static void
325 vm_purgeable_token_remove_ripe(purgeable_q_t queue)
326 {
327 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
328 /* return token to free list. advance token list. */
329 token_idx_t new_head = tokens[queue->token_q_head].next;
330 tokens[queue->token_q_head].next = token_free_idx;
331 token_free_idx = queue->token_q_head;
332 queue->token_q_head = new_head;
333 if (new_head == 0)
334 queue->token_q_tail = 0;
335
336 #if MACH_ASSERT
337 queue->debug_count_tokens--;
338 vm_purgeable_token_check_queue(queue);
339 #endif
340
341 available_for_purge--;
342 assert(available_for_purge >= 0);
343 }
344
345 /*
346 * Delete a ripe token from the given queue. If there are no ripe tokens on
347 * that queue, delete a ripe token from queue2, and migrate an unripe token
348 * from queue to queue2
349 */
350 static void
351 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
352 {
353 assert(queue->token_q_head);
354
355 if (tokens[queue->token_q_head].count == 0) {
356 /* This queue has a ripe token. Remove. */
357 vm_purgeable_token_remove_ripe(queue);
358 } else {
359 assert(queue2);
360 /*
361 * queue2 must have a ripe token. Remove, and migrate one
362 * from queue to queue2.
363 */
364 vm_purgeable_token_remove_ripe(queue2);
365 /* migrate unripe token */
366 token_idx_t token;
367 token_cnt_t count;
368
369 /* remove token from queue1 */
370 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
371 * tokens, remember? */
372 token = vm_purgeable_token_remove_first(queue);
373 assert(token);
374
375 count = tokens[token].count;
376
377 /* migrate to queue2 */
378 /* go to migration target loc */
379 token_idx_t *token_in_queue2 = &queue2->token_q_head;
380 while (*token_in_queue2 && count > tokens[*token_in_queue2].count) {
381 count -= tokens[*token_in_queue2].count;
382 token_in_queue2 = &tokens[*token_in_queue2].next;
383 }
384
385 if ((*token_in_queue2 == queue2->token_q_unripe) || /* becomes the first
386 * unripe token */
387 (queue2->token_q_unripe == 0))
388 queue2->token_q_unripe = token; /* must update unripe
389 * pointer */
390
391 /* insert token */
392 tokens[token].count = count;
393 tokens[token].next = *token_in_queue2;
394
395 /*
396 * if inserting at end, reduce new_pages by that value if
397 * inserting before token, reduce counter of that token
398 */
399 if (*token_in_queue2 == 0) { /* insertion at end of queue2 */
400 queue2->token_q_tail = token; /* must update tail
401 * pointer */
402 assert(queue2->new_pages >= (int32_t) count);
403 queue2->new_pages -= count;
404 } else {
405 assert(tokens[*token_in_queue2].count >= count);
406 tokens[*token_in_queue2].count -= count;
407 }
408 *token_in_queue2 = token;
409
410 #if MACH_ASSERT
411 queue2->debug_count_tokens++;
412 vm_purgeable_token_check_queue(queue2);
413 #endif
414 }
415 }
416
417 /* Find an object that can be locked. Returns locked object. */
418 static vm_object_t
419 vm_purgeable_object_find_and_lock(purgeable_q_t queue, int group)
420 {
421 /*
422 * Usually we would pick the first element from a queue. However, we
423 * might not be able to get a lock on it, in which case we try the
424 * remaining elements in order.
425 */
426
427 vm_object_t object;
428 for (object = (vm_object_t) queue_first(&queue->objq[group]);
429 !queue_end(&queue->objq[group], (queue_entry_t) object);
430 object = (vm_object_t) queue_next(&object->objq)) {
431 if (vm_object_lock_try(object)) {
432 /* Locked. Great. We'll take it. Remove and return. */
433 queue_remove(&queue->objq[group], object,
434 vm_object_t, objq);
435 object->objq.next = 0;
436 object->objq.prev = 0;
437 #if MACH_ASSERT
438 queue->debug_count_objects--;
439 #endif
440 return object;
441 }
442 }
443
444 return 0;
445 }
446
447 void
448 vm_purgeable_object_purge_one(void)
449 {
450 enum purgeable_q_type i;
451 int group;
452 vm_object_t object = 0;
453
454 mutex_lock(&vm_purgeable_queue_lock);
455 /* Cycle through all queues */
456 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
457 purgeable_q_t queue = &purgeable_queues[i];
458
459 /*
460 * Are there any ripe tokens on this queue? If yes, we'll
461 * find an object to purge there
462 */
463 if (!(queue->token_q_head && tokens[queue->token_q_head].count == 0))
464 continue; /* no token? Look at next purgeable
465 * queue */
466
467 /*
468 * Now look through all groups, starting from the lowest. If
469 * we find an object in that group, try to lock it (this can
470 * fail). If locking is successful, we can drop the queue
471 * lock, remove a token and then purge the object.
472 */
473 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
474 if (!queue_empty(&queue->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue, group))) {
475 mutex_unlock(&vm_purgeable_queue_lock);
476 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
477 goto purge_now;
478 } else {
479 assert(i != PURGEABLE_Q_TYPE_OBSOLETE); /* obsolete queue must
480 * have all objects in
481 * group 0 */
482 purgeable_q_t queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? PURGEABLE_Q_TYPE_FIFO : PURGEABLE_Q_TYPE_LIFO];
483
484 if (!queue_empty(&queue2->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue2, group))) {
485 mutex_unlock(&vm_purgeable_queue_lock);
486 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
487 goto purge_now;
488 }
489 }
490 assert(queue->debug_count_objects >= 0);
491 }
492 }
493 /*
494 * because we have to do a try_lock on the objects which could fail,
495 * we could end up with no object to purge at this time, even though
496 * we have objects in a purgeable state
497 */
498 mutex_unlock(&vm_purgeable_queue_lock);
499 return;
500
501 purge_now:
502
503 assert(object);
504 (void) vm_object_purge(object);
505 vm_object_unlock(object);
506
507 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_OBJECT_PURGED)),
508 (unsigned int) object, /* purged object */
509 0,
510 available_for_purge,
511 0,
512 0);
513 }
514
515 void
516 vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
517 {
518 mutex_lock(&vm_purgeable_queue_lock);
519
520 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
521 group = 0;
522 if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are
523 * fifo-queued */
524 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
525 else
526 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
527
528 #if MACH_ASSERT
529 queue->debug_count_objects++;
530 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADDED)),
531 0,
532 tokens[queue->token_q_head].count,
533 queue->type,
534 group,
535 0);
536 #endif
537
538 mutex_unlock(&vm_purgeable_queue_lock);
539 }
540
541 /* Look for object. If found, remove from purgeable queue. */
542 purgeable_q_t
543 vm_purgeable_object_remove(vm_object_t object)
544 {
545 enum purgeable_q_type i;
546 int group;
547
548 mutex_lock(&vm_purgeable_queue_lock);
549 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
550 purgeable_q_t queue = &purgeable_queues[i];
551 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
552 vm_object_t o;
553 for (o = (vm_object_t) queue_first(&queue->objq[group]);
554 !queue_end(&queue->objq[group], (queue_entry_t) o);
555 o = (vm_object_t) queue_next(&o->objq)) {
556 if (o == object) {
557 queue_remove(&queue->objq[group], object,
558 vm_object_t, objq);
559 #if MACH_ASSERT
560 queue->debug_count_objects--;
561 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVED)),
562 0,
563 tokens[queue->token_q_head].count,
564 queue->type,
565 group,
566 0);
567 #endif
568 mutex_unlock(&vm_purgeable_queue_lock);
569 object->objq.next = 0;
570 object->objq.prev = 0;
571 return &purgeable_queues[i];
572 }
573 }
574 }
575 }
576 mutex_unlock(&vm_purgeable_queue_lock);
577 return 0;
578 }