]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_purgeable.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <mach/mach_types.h>
25 #include <vm/vm_page.h>
26 #include <vm/vm_purgeable_internal.h>
27 #include <sys/kdebug.h>
28
29 struct token {
30 token_cnt_t count;
31 token_idx_t next;
32 };
33
34 struct token tokens[MAX_VOLATILE];
35
36 token_idx_t token_free_idx = 0; /* head of free queue */
37 token_cnt_t token_init_count = 1; /* token 0 is reserved!! */
38 token_cnt_t token_new_pagecount = 0; /* count of pages that will
39 * be added onto token queue */
40
41 int available_for_purge = 0; /* increase when ripe token
42 * added, decrease when ripe
43 * token removed protect with
44 * page_queue_lock */
45
46 struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
47
48 #define TOKEN_ADD 0x40/* 0x100 */
49 #define TOKEN_DELETE 0x41/* 0x104 */
50 #define TOKEN_QUEUE_ADVANCE 0x42/* 0x108 actually means "token ripened" */
51 #define TOKEN_OBJECT_PURGED 0x43/* 0x10c */
52 #define OBJECT_ADDED 0x50/* 0x140 */
53 #define OBJECT_REMOVED 0x51/* 0x144 */
54
55 static void vm_purgeable_q_advance(uint32_t num_pages, purgeable_q_t queue);
56 static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
57
58 #if MACH_ASSERT
59 static void
60 vm_purgeable_token_check_queue(purgeable_q_t queue)
61 {
62 int token_cnt = 0, page_cnt = 0;
63 token_idx_t token = queue->token_q_head;
64 token_idx_t unripe = 0;
65 int our_inactive_count;
66
67 while (token) {
68 if (tokens[token].count != 0) {
69 assert(queue->token_q_unripe);
70 if (unripe == 0) {
71 assert(token == queue->token_q_unripe);
72 unripe = token;
73 }
74 page_cnt += tokens[token].count;
75 }
76 if (tokens[token].next == 0)
77 assert(queue->token_q_tail == token);
78
79 token_cnt++;
80 token = tokens[token].next;
81 }
82
83 if (unripe)
84 assert(queue->token_q_unripe == unripe);
85 assert(token_cnt == queue->debug_count_tokens);
86 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
87 assert(our_inactive_count >= 0);
88 assert((uint32_t) our_inactive_count == vm_page_inactive_count);
89 }
90 #endif
91
92 kern_return_t
93 vm_purgeable_token_add(purgeable_q_t queue)
94 {
95 /* new token */
96 token_idx_t token;
97 enum purgeable_q_type i;
98
99 if (token_init_count < MAX_VOLATILE) { /* lazy token array init */
100 token = token_init_count;
101 token_init_count++;
102 } else if (token_free_idx) {
103 token = token_free_idx;
104 token_free_idx = tokens[token_free_idx].next;
105 } else {
106 return KERN_FAILURE;
107 }
108
109 /*
110 * the new pagecount we got need to be applied to all queues except
111 * obsolete
112 */
113 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
114 purgeable_queues[i].new_pages += token_new_pagecount;
115 assert(purgeable_queues[i].new_pages >= 0);
116 assert((uint64_t) (purgeable_queues[i].new_pages) <= TOKEN_COUNT_MAX);
117 }
118 token_new_pagecount = 0;
119
120 /* set token counter value */
121 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
122 tokens[token].count = queue->new_pages;
123 else
124 tokens[token].count = 0; /* all obsolete items are
125 * ripe immediately */
126 queue->new_pages = 0;
127
128 /* put token on token counter list */
129 tokens[token].next = 0;
130 if (queue->token_q_tail == 0) {
131 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
132 queue->token_q_head = token;
133 } else {
134 tokens[queue->token_q_tail].next = token;
135 }
136 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
137 * count == 0) in queue */
138 if (tokens[token].count > 0)
139 queue->token_q_unripe = token; /* first unripe token */
140 else
141 available_for_purge++; /* added a ripe token?
142 * increase available count */
143 }
144 queue->token_q_tail = token;
145
146 #if MACH_ASSERT
147 queue->debug_count_tokens++;
148 /* Check both queues, since we modified the new_pages count on each */
149 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
150 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
151
152 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
153 queue->type,
154 tokens[token].count, /* num pages on token
155 * (last token) */
156 queue->debug_count_tokens,
157 0,
158 0);
159 #endif
160
161 return KERN_SUCCESS;
162 }
163
164 /*
165 * Remove first token from queue and return its index. Add its count to the
166 * count of the next token.
167 */
168 static token_idx_t
169 vm_purgeable_token_remove_first(purgeable_q_t queue)
170 {
171 token_idx_t token;
172 token = queue->token_q_head;
173
174 assert(token);
175
176 if (token) {
177 assert(queue->token_q_tail);
178 if (queue->token_q_head == queue->token_q_unripe) {
179 /* no ripe tokens... must move unripe pointer */
180 queue->token_q_unripe = tokens[token].next;
181 } else {
182 /* we're removing a ripe token. decrease count */
183 available_for_purge--;
184 assert(available_for_purge >= 0);
185 }
186
187 if (queue->token_q_tail == queue->token_q_head)
188 assert(tokens[token].next == 0);
189
190 queue->token_q_head = tokens[token].next;
191 if (queue->token_q_head) {
192 tokens[queue->token_q_head].count += tokens[token].count;
193 } else {
194 /* currently no other tokens in the queue */
195 /*
196 * the page count must be added to the next newly
197 * created token
198 */
199 queue->new_pages += tokens[token].count;
200 /* if head is zero, tail is too */
201 queue->token_q_tail = 0;
202 }
203
204 #if MACH_ASSERT
205 queue->debug_count_tokens--;
206 vm_purgeable_token_check_queue(queue);
207
208 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
209 queue->type,
210 tokens[queue->token_q_head].count, /* num pages on new
211 * first token */
212 token_new_pagecount, /* num pages waiting for
213 * next token */
214 available_for_purge,
215 0);
216 #endif
217 }
218 return token;
219 }
220
221 /* Delete first token from queue. Return token to token queue. */
222 void
223 vm_purgeable_token_delete_first(purgeable_q_t queue)
224 {
225 token_idx_t token = vm_purgeable_token_remove_first(queue);
226
227 if (token) {
228 /* stick removed token on free queue */
229 tokens[token].next = token_free_idx;
230 token_free_idx = token;
231 }
232 }
233
234
235 void
236 vm_purgeable_q_advance_all(uint32_t num_pages)
237 {
238 /*
239 * don't need to advance obsolete queue - all items are ripe there,
240 * always
241 */
242 vm_purgeable_q_advance(num_pages, &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
243 vm_purgeable_q_advance(num_pages, &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
244 }
245
246 /*
247 * Decrements token counters. A token counter can be zero, this means the
248 * object is ripe to be purged. It is not purged immediately, because that
249 * could cause several objects to be purged even if purging one would satisfy
250 * the memory needs. Instead, the pageout thread purges one after the other
251 * by calling vm_purgeable_object_purge_one and then rechecking the memory
252 * balance.
253 */
254 static void
255 vm_purgeable_q_advance(uint32_t num_pages, purgeable_q_t queue)
256 {
257 /* Iterate over tokens as long as there are unripe tokens. */
258 while (queue->token_q_unripe) {
259 int min = (tokens[queue->token_q_unripe].count < num_pages) ?
260 tokens[queue->token_q_unripe].count : num_pages;
261 tokens[queue->token_q_unripe].count -= min;
262 num_pages -= min;
263
264 if (tokens[queue->token_q_unripe].count == 0) {
265 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
266 available_for_purge++;
267 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_QUEUE_ADVANCE)),
268 queue->type,
269 tokens[queue->token_q_head].count, /* num pages on new
270 * first token */
271 0,
272 available_for_purge,
273 0);
274 continue; /* One token ripened. Make sure to
275 * check the next. */
276 }
277 if (num_pages == 0)
278 break; /* Current token not ripe and no more pages.
279 * Work done. */
280 }
281
282 /*
283 * if there are no unripe tokens in the queue, decrement the
284 * new_pages counter instead new_pages can be negative, but must be
285 * canceled out by token_new_pagecount -- since inactive queue as a
286 * whole always contains a nonnegative number of pages
287 */
288 if (!queue->token_q_unripe) {
289 queue->new_pages -= num_pages;
290 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
291 }
292 #if MACH_ASSERT
293 vm_purgeable_token_check_queue(queue);
294 #endif
295 }
296
297 /*
298 * grab any ripe object and purge it obsolete queue first. then, go through
299 * each volatile group. Select a queue with a ripe token.
300 * Start with first group (0)
301 * 1. Look at queue. Is there an object?
302 * Yes - purge it. Remove token.
303 * No - check other queue. Is there an object?
304 * No - increment group, then go to (1)
305 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
306 * token from other queue and migrate unripe token from this
307 * queue to other queue.
308 */
309 static void
310 vm_purgeable_token_remove_ripe(purgeable_q_t queue)
311 {
312 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
313 /* return token to free list. advance token list. */
314 token_idx_t new_head = tokens[queue->token_q_head].next;
315 tokens[queue->token_q_head].next = token_free_idx;
316 token_free_idx = queue->token_q_head;
317 queue->token_q_head = new_head;
318 if (new_head == 0)
319 queue->token_q_tail = 0;
320
321 #if MACH_ASSERT
322 queue->debug_count_tokens--;
323 vm_purgeable_token_check_queue(queue);
324 #endif
325
326 available_for_purge--;
327 assert(available_for_purge >= 0);
328 }
329
330 /*
331 * Delete a ripe token from the given queue. If there are no ripe tokens on
332 * that queue, delete a ripe token from queue2, and migrate an unripe token
333 * from queue to queue2
334 */
335 static void
336 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
337 {
338 assert(queue->token_q_head);
339
340 if (tokens[queue->token_q_head].count == 0) {
341 /* This queue has a ripe token. Remove. */
342 vm_purgeable_token_remove_ripe(queue);
343 } else {
344 assert(queue2);
345 /*
346 * queue2 must have a ripe token. Remove, and migrate one
347 * from queue to queue2.
348 */
349 vm_purgeable_token_remove_ripe(queue2);
350 /* migrate unripe token */
351 token_idx_t token;
352 token_cnt_t count;
353
354 /* remove token from queue1 */
355 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
356 * tokens, remember? */
357 token = vm_purgeable_token_remove_first(queue);
358 assert(token);
359
360 count = tokens[token].count;
361
362 /* migrate to queue2 */
363 /* go to migration target loc */
364 token_idx_t *token_in_queue2 = &queue2->token_q_head;
365 while (*token_in_queue2 && count > tokens[*token_in_queue2].count) {
366 count -= tokens[*token_in_queue2].count;
367 token_in_queue2 = &tokens[*token_in_queue2].next;
368 }
369
370 if ((*token_in_queue2 == queue2->token_q_unripe) || /* becomes the first
371 * unripe token */
372 (queue2->token_q_unripe == 0))
373 queue2->token_q_unripe = token; /* must update unripe
374 * pointer */
375
376 /* insert token */
377 tokens[token].count = count;
378 tokens[token].next = *token_in_queue2;
379
380 /*
381 * if inserting at end, reduce new_pages by that value if
382 * inserting before token, reduce counter of that token
383 */
384 if (*token_in_queue2 == 0) { /* insertion at end of queue2 */
385 queue2->token_q_tail = token; /* must update tail
386 * pointer */
387 assert(queue2->new_pages >= (int32_t) count);
388 queue2->new_pages -= count;
389 } else {
390 assert(tokens[*token_in_queue2].count >= count);
391 tokens[*token_in_queue2].count -= count;
392 }
393 *token_in_queue2 = token;
394
395 #if MACH_ASSERT
396 queue2->debug_count_tokens++;
397 vm_purgeable_token_check_queue(queue2);
398 #endif
399 }
400 }
401
402 /* Find an object that can be locked. Returns locked object. */
403 static vm_object_t
404 vm_purgeable_object_find_and_lock(purgeable_q_t queue, int group)
405 {
406 /*
407 * Usually we would pick the first element from a queue. However, we
408 * might not be able to get a lock on it, in which case we try the
409 * remaining elements in order.
410 */
411
412 vm_object_t object;
413 for (object = (vm_object_t) queue_first(&queue->objq[group]);
414 !queue_end(&queue->objq[group], (queue_entry_t) object);
415 object = (vm_object_t) queue_next(&object->objq)) {
416 if (vm_object_lock_try(object)) {
417 /* Locked. Great. We'll take it. Remove and return. */
418 queue_remove(&queue->objq[group], object,
419 vm_object_t, objq);
420 object->objq.next = 0;
421 object->objq.prev = 0;
422 #if MACH_ASSERT
423 queue->debug_count_objects--;
424 #endif
425 return object;
426 }
427 }
428
429 return 0;
430 }
431
432 void
433 vm_purgeable_object_purge_one(void)
434 {
435 enum purgeable_q_type i;
436 int group;
437 vm_object_t object = 0;
438
439 mutex_lock(&vm_purgeable_queue_lock);
440 /* Cycle through all queues */
441 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
442 purgeable_q_t queue = &purgeable_queues[i];
443
444 /*
445 * Are there any ripe tokens on this queue? If yes, we'll
446 * find an object to purge there
447 */
448 if (!(queue->token_q_head && tokens[queue->token_q_head].count == 0))
449 continue; /* no token? Look at next purgeable
450 * queue */
451
452 /*
453 * Now look through all groups, starting from the lowest. If
454 * we find an object in that group, try to lock it (this can
455 * fail). If locking is successful, we can drop the queue
456 * lock, remove a token and then purge the object.
457 */
458 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
459 if (!queue_empty(&queue->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue, group))) {
460 mutex_unlock(&vm_purgeable_queue_lock);
461 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
462 goto purge_now;
463 } else {
464 assert(i != PURGEABLE_Q_TYPE_OBSOLETE); /* obsolete queue must
465 * have all objects in
466 * group 0 */
467 purgeable_q_t queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? PURGEABLE_Q_TYPE_FIFO : PURGEABLE_Q_TYPE_LIFO];
468
469 if (!queue_empty(&queue2->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue2, group))) {
470 mutex_unlock(&vm_purgeable_queue_lock);
471 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
472 goto purge_now;
473 }
474 }
475 assert(queue->debug_count_objects >= 0);
476 }
477 }
478 /*
479 * because we have to do a try_lock on the objects which could fail,
480 * we could end up with no object to purge at this time, even though
481 * we have objects in a purgeable state
482 */
483 mutex_unlock(&vm_purgeable_queue_lock);
484 return;
485
486 purge_now:
487
488 assert(object);
489 (void) vm_object_purge(object);
490 vm_object_unlock(object);
491
492 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_OBJECT_PURGED)),
493 (unsigned int) object, /* purged object */
494 0,
495 available_for_purge,
496 0,
497 0);
498 }
499
500 void
501 vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
502 {
503 mutex_lock(&vm_purgeable_queue_lock);
504
505 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
506 group = 0;
507 if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are
508 * fifo-queued */
509 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
510 else
511 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
512
513 #if MACH_ASSERT
514 queue->debug_count_objects++;
515 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADDED)),
516 0,
517 tokens[queue->token_q_head].count,
518 queue->type,
519 group,
520 0);
521 #endif
522
523 mutex_unlock(&vm_purgeable_queue_lock);
524 }
525
526 /* Look for object. If found, remove from purgeable queue. */
527 purgeable_q_t
528 vm_purgeable_object_remove(vm_object_t object)
529 {
530 enum purgeable_q_type i;
531 int group;
532
533 mutex_lock(&vm_purgeable_queue_lock);
534 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
535 purgeable_q_t queue = &purgeable_queues[i];
536 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
537 vm_object_t o;
538 for (o = (vm_object_t) queue_first(&queue->objq[group]);
539 !queue_end(&queue->objq[group], (queue_entry_t) o);
540 o = (vm_object_t) queue_next(&o->objq)) {
541 if (o == object) {
542 queue_remove(&queue->objq[group], object,
543 vm_object_t, objq);
544 #if MACH_ASSERT
545 queue->debug_count_objects--;
546 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVED)),
547 0,
548 tokens[queue->token_q_head].count,
549 queue->type,
550 group,
551 0);
552 #endif
553 mutex_unlock(&vm_purgeable_queue_lock);
554 object->objq.next = 0;
555 object->objq.prev = 0;
556 return &purgeable_queues[i];
557 }
558 }
559 }
560 }
561 mutex_unlock(&vm_purgeable_queue_lock);
562 return 0;
563 }