]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_purgeable.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <mach/mach_types.h>
25 #include <vm/vm_page.h>
26 #include <vm/vm_kern.h> /* kmem_alloc */
27 #include <vm/vm_purgeable_internal.h>
28 #include <sys/kdebug.h>
29 #include <kern/sched_prim.h>
30
31 struct token {
32 token_cnt_t count;
33 token_idx_t next;
34 };
35
36 struct token *tokens;
37 token_idx_t token_q_max_cnt = 0;
38 vm_size_t token_q_cur_size = 0;
39
40 token_idx_t token_free_idx = 0; /* head of free queue */
41 token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
42 int32_t token_new_pagecount = 0; /* count of pages that will
43 * be added onto token queue */
44
45 int available_for_purge = 0; /* increase when ripe token
46 * added, decrease when ripe
47 * token removed protect with
48 * page_queue_lock */
49
50 static int token_q_allocating = 0; /* flag to singlethread allocator */
51
52 struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
53
54 #define TOKEN_ADD 0x40/* 0x100 */
55 #define TOKEN_DELETE 0x41/* 0x104 */
56 #define TOKEN_QUEUE_ADVANCE 0x42/* 0x108 actually means "token ripened" */
57 #define TOKEN_OBJECT_PURGED 0x43/* 0x10c */
58 #define OBJECT_ADDED 0x50/* 0x140 */
59 #define OBJECT_REMOVED 0x51/* 0x144 */
60
61 static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
62
63 #if MACH_ASSERT
64 static void
65 vm_purgeable_token_check_queue(purgeable_q_t queue)
66 {
67 int token_cnt = 0, page_cnt = 0;
68 token_idx_t token = queue->token_q_head;
69 token_idx_t unripe = 0;
70 int our_inactive_count;
71
72 while (token) {
73 if (tokens[token].count != 0) {
74 assert(queue->token_q_unripe);
75 if (unripe == 0) {
76 assert(token == queue->token_q_unripe);
77 unripe = token;
78 }
79 page_cnt += tokens[token].count;
80 }
81 if (tokens[token].next == 0)
82 assert(queue->token_q_tail == token);
83
84 token_cnt++;
85 token = tokens[token].next;
86 }
87
88 if (unripe)
89 assert(queue->token_q_unripe == unripe);
90 assert(token_cnt == queue->debug_count_tokens);
91 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
92 assert(our_inactive_count >= 0);
93 assert((uint32_t) our_inactive_count == vm_page_inactive_count);
94 }
95 #endif
96
97 kern_return_t
98 vm_purgeable_token_add(purgeable_q_t queue)
99 {
100 /* new token */
101 token_idx_t token;
102 enum purgeable_q_type i;
103
104 find_available_token:
105
106 if (token_free_idx) { /* unused tokens available */
107 token = token_free_idx;
108 token_free_idx = tokens[token_free_idx].next;
109 } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
110 token = token_init_idx;
111 token_init_idx++;
112 } else { /* allocate more memory */
113 /* Wait if another thread is inside the memory alloc section */
114 while(token_q_allocating) {
115 wait_result_t res = thread_sleep_mutex((event_t)&token_q_allocating,
116 &vm_page_queue_lock,
117 THREAD_UNINT);
118 if(res != THREAD_AWAKENED) return KERN_ABORTED;
119 };
120
121 /* Check whether memory is still maxed out */
122 if(token_init_idx < token_q_max_cnt)
123 goto find_available_token;
124
125 /* Still no memory. Allocate some. */
126 token_q_allocating = 1;
127
128 /* Drop page queue lock so we can allocate */
129 vm_page_unlock_queues();
130
131 struct token *new_loc;
132 vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
133 kern_return_t result;
134
135 if (token_q_cur_size) {
136 result=kmem_realloc(kernel_map, (vm_offset_t)tokens, token_q_cur_size,
137 (vm_offset_t*)&new_loc, alloc_size);
138 } else {
139 result=kmem_alloc(kernel_map, (vm_offset_t*)&new_loc, alloc_size);
140 }
141
142 vm_page_lock_queues();
143
144 if (result) {
145 /* Unblock waiting threads */
146 token_q_allocating = 0;
147 thread_wakeup((event_t)&token_q_allocating);
148 return result;
149 }
150
151 /* If we get here, we allocated new memory. Update pointers and
152 * dealloc old range */
153 struct token *old_tokens=tokens;
154 tokens=new_loc;
155 vm_size_t old_token_q_cur_size=token_q_cur_size;
156 token_q_cur_size=alloc_size;
157 token_q_max_cnt = token_q_cur_size / sizeof(struct token);
158 assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */
159
160 if (old_token_q_cur_size) { /* clean up old mapping */
161 vm_page_unlock_queues();
162 /* kmem_realloc leaves the old region mapped. Get rid of it. */
163 kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
164 vm_page_lock_queues();
165 }
166
167 /* Unblock waiting threads */
168 token_q_allocating = 0;
169 thread_wakeup((event_t)&token_q_allocating);
170
171 goto find_available_token;
172 }
173
174 assert (token);
175
176 /*
177 * the new pagecount we got need to be applied to all queues except
178 * obsolete
179 */
180 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
181 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
182 assert(pages >= 0);
183 assert(pages <= TOKEN_COUNT_MAX);
184 purgeable_queues[i].new_pages=pages;
185 }
186 token_new_pagecount = 0;
187
188 /* set token counter value */
189 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
190 tokens[token].count = queue->new_pages;
191 else
192 tokens[token].count = 0; /* all obsolete items are
193 * ripe immediately */
194 queue->new_pages = 0;
195
196 /* put token on token counter list */
197 tokens[token].next = 0;
198 if (queue->token_q_tail == 0) {
199 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
200 queue->token_q_head = token;
201 } else {
202 tokens[queue->token_q_tail].next = token;
203 }
204 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
205 * count == 0) in queue */
206 if (tokens[token].count > 0)
207 queue->token_q_unripe = token; /* first unripe token */
208 else
209 available_for_purge++; /* added a ripe token?
210 * increase available count */
211 }
212 queue->token_q_tail = token;
213
214 #if MACH_ASSERT
215 queue->debug_count_tokens++;
216 /* Check both queues, since we modified the new_pages count on each */
217 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
218 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
219
220 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
221 queue->type,
222 tokens[token].count, /* num pages on token
223 * (last token) */
224 queue->debug_count_tokens,
225 0,
226 0);
227 #endif
228
229 return KERN_SUCCESS;
230 }
231
232 /*
233 * Remove first token from queue and return its index. Add its count to the
234 * count of the next token.
235 */
236 static token_idx_t
237 vm_purgeable_token_remove_first(purgeable_q_t queue)
238 {
239 token_idx_t token;
240 token = queue->token_q_head;
241
242 assert(token);
243
244 if (token) {
245 assert(queue->token_q_tail);
246 if (queue->token_q_head == queue->token_q_unripe) {
247 /* no ripe tokens... must move unripe pointer */
248 queue->token_q_unripe = tokens[token].next;
249 } else {
250 /* we're removing a ripe token. decrease count */
251 available_for_purge--;
252 assert(available_for_purge >= 0);
253 }
254
255 if (queue->token_q_tail == queue->token_q_head)
256 assert(tokens[token].next == 0);
257
258 queue->token_q_head = tokens[token].next;
259 if (queue->token_q_head) {
260 tokens[queue->token_q_head].count += tokens[token].count;
261 } else {
262 /* currently no other tokens in the queue */
263 /*
264 * the page count must be added to the next newly
265 * created token
266 */
267 queue->new_pages += tokens[token].count;
268 /* if head is zero, tail is too */
269 queue->token_q_tail = 0;
270 }
271
272 #if MACH_ASSERT
273 queue->debug_count_tokens--;
274 vm_purgeable_token_check_queue(queue);
275
276 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
277 queue->type,
278 tokens[queue->token_q_head].count, /* num pages on new
279 * first token */
280 token_new_pagecount, /* num pages waiting for
281 * next token */
282 available_for_purge,
283 0);
284 #endif
285 }
286 return token;
287 }
288
289 /* Delete first token from queue. Return token to token queue. */
290 void
291 vm_purgeable_token_delete_first(purgeable_q_t queue)
292 {
293 token_idx_t token = vm_purgeable_token_remove_first(queue);
294
295 if (token) {
296 /* stick removed token on free queue */
297 tokens[token].next = token_free_idx;
298 token_free_idx = token;
299 }
300 }
301
302
303 void
304 vm_purgeable_q_advance_all()
305 {
306 /* check queue counters - if they get really large, scale them back.
307 * They tend to get that large when there is no purgeable queue action */
308 int i;
309 if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */
310 {
311 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
312 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
313 assert(pages >= 0);
314 assert(pages <= TOKEN_COUNT_MAX);
315 purgeable_queues[i].new_pages=pages;
316 }
317 token_new_pagecount = 0;
318 }
319
320 /*
321 * Decrement token counters. A token counter can be zero, this means the
322 * object is ripe to be purged. It is not purged immediately, because that
323 * could cause several objects to be purged even if purging one would satisfy
324 * the memory needs. Instead, the pageout thread purges one after the other
325 * by calling vm_purgeable_object_purge_one and then rechecking the memory
326 * balance.
327 *
328 * No need to advance obsolete queue - all items are ripe there,
329 * always
330 */
331 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
332 purgeable_q_t queue = &purgeable_queues[i];
333 uint32_t num_pages = 1;
334
335 /* Iterate over tokens as long as there are unripe tokens. */
336 while (queue->token_q_unripe) {
337 if (tokens[queue->token_q_unripe].count && num_pages)
338 {
339 tokens[queue->token_q_unripe].count -= 1;
340 num_pages -= 1;
341 }
342
343 if (tokens[queue->token_q_unripe].count == 0) {
344 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
345 available_for_purge++;
346 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_QUEUE_ADVANCE)),
347 queue->type,
348 tokens[queue->token_q_head].count, /* num pages on new
349 * first token */
350 0,
351 available_for_purge,
352 0);
353 continue; /* One token ripened. Make sure to
354 * check the next. */
355 }
356 if (num_pages == 0)
357 break; /* Current token not ripe and no more pages.
358 * Work done. */
359 }
360
361 /*
362 * if there are no unripe tokens in the queue, decrement the
363 * new_pages counter instead new_pages can be negative, but must be
364 * canceled out by token_new_pagecount -- since inactive queue as a
365 * whole always contains a nonnegative number of pages
366 */
367 if (!queue->token_q_unripe) {
368 queue->new_pages -= num_pages;
369 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
370 }
371 #if MACH_ASSERT
372 vm_purgeable_token_check_queue(queue);
373 #endif
374 }
375 }
376
377 /*
378 * grab any ripe object and purge it obsolete queue first. then, go through
379 * each volatile group. Select a queue with a ripe token.
380 * Start with first group (0)
381 * 1. Look at queue. Is there an object?
382 * Yes - purge it. Remove token.
383 * No - check other queue. Is there an object?
384 * No - increment group, then go to (1)
385 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
386 * token from other queue and migrate unripe token from this
387 * queue to other queue.
388 */
389 static void
390 vm_purgeable_token_remove_ripe(purgeable_q_t queue)
391 {
392 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
393 /* return token to free list. advance token list. */
394 token_idx_t new_head = tokens[queue->token_q_head].next;
395 tokens[queue->token_q_head].next = token_free_idx;
396 token_free_idx = queue->token_q_head;
397 queue->token_q_head = new_head;
398 if (new_head == 0)
399 queue->token_q_tail = 0;
400
401 #if MACH_ASSERT
402 queue->debug_count_tokens--;
403 vm_purgeable_token_check_queue(queue);
404 #endif
405
406 available_for_purge--;
407 assert(available_for_purge >= 0);
408 }
409
410 /*
411 * Delete a ripe token from the given queue. If there are no ripe tokens on
412 * that queue, delete a ripe token from queue2, and migrate an unripe token
413 * from queue to queue2
414 */
415 static void
416 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
417 {
418 assert(queue->token_q_head);
419
420 if (tokens[queue->token_q_head].count == 0) {
421 /* This queue has a ripe token. Remove. */
422 vm_purgeable_token_remove_ripe(queue);
423 } else {
424 assert(queue2);
425 /*
426 * queue2 must have a ripe token. Remove, and migrate one
427 * from queue to queue2.
428 */
429 vm_purgeable_token_remove_ripe(queue2);
430 /* migrate unripe token */
431 token_idx_t token;
432 token_cnt_t count;
433
434 /* remove token from queue1 */
435 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
436 * tokens, remember? */
437 token = vm_purgeable_token_remove_first(queue);
438 assert(token);
439
440 count = tokens[token].count;
441
442 /* migrate to queue2 */
443 /* go to migration target loc */
444 token_idx_t *token_in_queue2 = &queue2->token_q_head;
445 while (*token_in_queue2 && count > tokens[*token_in_queue2].count) {
446 count -= tokens[*token_in_queue2].count;
447 token_in_queue2 = &tokens[*token_in_queue2].next;
448 }
449
450 if ((*token_in_queue2 == queue2->token_q_unripe) || /* becomes the first
451 * unripe token */
452 (queue2->token_q_unripe == 0))
453 queue2->token_q_unripe = token; /* must update unripe
454 * pointer */
455
456 /* insert token */
457 tokens[token].count = count;
458 tokens[token].next = *token_in_queue2;
459
460 /*
461 * if inserting at end, reduce new_pages by that value if
462 * inserting before token, reduce counter of that token
463 */
464 if (*token_in_queue2 == 0) { /* insertion at end of queue2 */
465 queue2->token_q_tail = token; /* must update tail
466 * pointer */
467 assert(queue2->new_pages >= (int32_t) count);
468 queue2->new_pages -= count;
469 } else {
470 assert(tokens[*token_in_queue2].count >= count);
471 tokens[*token_in_queue2].count -= count;
472 }
473 *token_in_queue2 = token;
474
475 #if MACH_ASSERT
476 queue2->debug_count_tokens++;
477 vm_purgeable_token_check_queue(queue2);
478 #endif
479 }
480 }
481
482 /* Find an object that can be locked. Returns locked object. */
483 static vm_object_t
484 vm_purgeable_object_find_and_lock(purgeable_q_t queue, int group)
485 {
486 /*
487 * Usually we would pick the first element from a queue. However, we
488 * might not be able to get a lock on it, in which case we try the
489 * remaining elements in order.
490 */
491
492 vm_object_t object;
493 for (object = (vm_object_t) queue_first(&queue->objq[group]);
494 !queue_end(&queue->objq[group], (queue_entry_t) object);
495 object = (vm_object_t) queue_next(&object->objq)) {
496 if (vm_object_lock_try(object)) {
497 /* Locked. Great. We'll take it. Remove and return. */
498 queue_remove(&queue->objq[group], object,
499 vm_object_t, objq);
500 object->objq.next = 0;
501 object->objq.prev = 0;
502 #if MACH_ASSERT
503 queue->debug_count_objects--;
504 #endif
505 return object;
506 }
507 }
508
509 return 0;
510 }
511
512 void
513 vm_purgeable_object_purge_one(void)
514 {
515 enum purgeable_q_type i;
516 int group;
517 vm_object_t object = 0;
518
519 mutex_lock(&vm_purgeable_queue_lock);
520 /* Cycle through all queues */
521 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
522 purgeable_q_t queue = &purgeable_queues[i];
523
524 /*
525 * Are there any ripe tokens on this queue? If yes, we'll
526 * find an object to purge there
527 */
528 if (!(queue->token_q_head && tokens[queue->token_q_head].count == 0))
529 continue; /* no token? Look at next purgeable
530 * queue */
531
532 /*
533 * Now look through all groups, starting from the lowest. If
534 * we find an object in that group, try to lock it (this can
535 * fail). If locking is successful, we can drop the queue
536 * lock, remove a token and then purge the object.
537 */
538 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
539 if (!queue_empty(&queue->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue, group))) {
540 mutex_unlock(&vm_purgeable_queue_lock);
541 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
542 goto purge_now;
543 } else {
544 assert(i != PURGEABLE_Q_TYPE_OBSOLETE); /* obsolete queue must
545 * have all objects in
546 * group 0 */
547 purgeable_q_t queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? PURGEABLE_Q_TYPE_FIFO : PURGEABLE_Q_TYPE_LIFO];
548
549 if (!queue_empty(&queue2->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue2, group))) {
550 mutex_unlock(&vm_purgeable_queue_lock);
551 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
552 goto purge_now;
553 }
554 }
555 assert(queue->debug_count_objects >= 0);
556 }
557 }
558 /*
559 * because we have to do a try_lock on the objects which could fail,
560 * we could end up with no object to purge at this time, even though
561 * we have objects in a purgeable state
562 */
563 mutex_unlock(&vm_purgeable_queue_lock);
564 return;
565
566 purge_now:
567
568 assert(object);
569 (void) vm_object_purge(object);
570 vm_object_unlock(object);
571
572 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_OBJECT_PURGED)),
573 (unsigned int) object, /* purged object */
574 0,
575 available_for_purge,
576 0,
577 0);
578 }
579
580 void
581 vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
582 {
583 mutex_lock(&vm_purgeable_queue_lock);
584
585 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
586 group = 0;
587 if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are
588 * fifo-queued */
589 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
590 else
591 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
592
593 #if MACH_ASSERT
594 queue->debug_count_objects++;
595 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADDED)),
596 0,
597 tokens[queue->token_q_head].count,
598 queue->type,
599 group,
600 0);
601 #endif
602
603 mutex_unlock(&vm_purgeable_queue_lock);
604 }
605
606 /* Look for object. If found, remove from purgeable queue. */
607 purgeable_q_t
608 vm_purgeable_object_remove(vm_object_t object)
609 {
610 enum purgeable_q_type i;
611 int group;
612
613 mutex_lock(&vm_purgeable_queue_lock);
614 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
615 purgeable_q_t queue = &purgeable_queues[i];
616 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
617 vm_object_t o;
618 for (o = (vm_object_t) queue_first(&queue->objq[group]);
619 !queue_end(&queue->objq[group], (queue_entry_t) o);
620 o = (vm_object_t) queue_next(&o->objq)) {
621 if (o == object) {
622 queue_remove(&queue->objq[group], object,
623 vm_object_t, objq);
624 #if MACH_ASSERT
625 queue->debug_count_objects--;
626 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVED)),
627 0,
628 tokens[queue->token_q_head].count,
629 queue->type,
630 group,
631 0);
632 #endif
633 mutex_unlock(&vm_purgeable_queue_lock);
634 object->objq.next = 0;
635 object->objq.prev = 0;
636 return &purgeable_queues[i];
637 }
638 }
639 }
640 }
641 mutex_unlock(&vm_purgeable_queue_lock);
642 return 0;
643 }