]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_purgeable.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
fe8ab488
A
24#include <kern/sched_prim.h>
25#include <kern/ledger.h>
39037602 26#include <kern/policy_internal.h>
fe8ab488
A
27
28#include <libkern/OSDebug.h>
29
2d21ac55 30#include <mach/mach_types.h>
fe8ab488
A
31
32#include <machine/limits.h>
33
34#include <vm/vm_compressor_pager.h>
b0d623f7 35#include <vm/vm_kern.h> /* kmem_alloc */
fe8ab488
A
36#include <vm/vm_page.h>
37#include <vm/vm_pageout.h>
39236c6e 38#include <vm/vm_protos.h>
2d21ac55 39#include <vm/vm_purgeable_internal.h>
fe8ab488 40
2d21ac55 41#include <sys/kdebug.h>
39236c6e
A
42
43extern vm_pressure_level_t memorystatus_vm_pressure_level;
2d21ac55
A
44
45struct token {
46 token_cnt_t count;
39236c6e 47 token_idx_t prev;
2d21ac55
A
48 token_idx_t next;
49};
50
cf7d32b8
A
51struct token *tokens;
52token_idx_t token_q_max_cnt = 0;
53vm_size_t token_q_cur_size = 0;
2d21ac55 54
4a3eedf9
A
55token_idx_t token_free_idx = 0; /* head of free queue */
56token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
57int32_t token_new_pagecount = 0; /* count of pages that will
2d21ac55
A
58 * be added onto token queue */
59
60int available_for_purge = 0; /* increase when ripe token
61 * added, decrease when ripe
b0d623f7
A
62 * token removed.
63 * protected by page_queue_lock
64 */
2d21ac55 65
b0d623f7
A
66static int token_q_allocating = 0; /* flag for singlethreading
67 * allocator */
cf7d32b8 68
2d21ac55 69struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
fe8ab488
A
70queue_head_t purgeable_nonvolatile_queue;
71int purgeable_nonvolatile_count;
2d21ac55 72
b0d623f7
A
73decl_lck_mtx_data(,vm_purgeable_queue_lock)
74
2d21ac55
A
75static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
76
39236c6e
A
77static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task);
78
fe8ab488
A
79void vm_purgeable_nonvolatile_owner_update(task_t owner,
80 int delta);
81void vm_purgeable_volatile_owner_update(task_t owner,
82 int delta);
83
84
2d21ac55
A
85#if MACH_ASSERT
86static void
87vm_purgeable_token_check_queue(purgeable_q_t queue)
88{
89 int token_cnt = 0, page_cnt = 0;
90 token_idx_t token = queue->token_q_head;
91 token_idx_t unripe = 0;
92 int our_inactive_count;
93
fe8ab488
A
94#if DEVELOPMENT
95 static unsigned lightweight_check = 0;
96
97 /*
98 * Due to performance impact, only perform this check
99 * every 100 times on DEVELOPMENT kernels.
100 */
101 if (lightweight_check++ < 100) {
102 return;
103 }
104
105 lightweight_check = 0;
106#endif
107
2d21ac55
A
108 while (token) {
109 if (tokens[token].count != 0) {
110 assert(queue->token_q_unripe);
111 if (unripe == 0) {
112 assert(token == queue->token_q_unripe);
113 unripe = token;
114 }
115 page_cnt += tokens[token].count;
116 }
117 if (tokens[token].next == 0)
118 assert(queue->token_q_tail == token);
119
120 token_cnt++;
121 token = tokens[token].next;
122 }
123
124 if (unripe)
125 assert(queue->token_q_unripe == unripe);
126 assert(token_cnt == queue->debug_count_tokens);
593a1d5f
A
127
128 /* obsolete queue doesn't maintain token counts */
129 if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
130 {
131 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
132 assert(our_inactive_count >= 0);
316670eb 133 assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count);
593a1d5f 134 }
2d21ac55
A
135}
136#endif
137
b0d623f7
A
138/*
139 * Add a token. Allocate token queue memory if necessary.
140 * Call with page queue locked.
141 */
2d21ac55
A
142kern_return_t
143vm_purgeable_token_add(purgeable_q_t queue)
144{
39037602 145 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 146
2d21ac55
A
147 /* new token */
148 token_idx_t token;
149 enum purgeable_q_type i;
150
cf7d32b8
A
151find_available_token:
152
153 if (token_free_idx) { /* unused tokens available */
2d21ac55
A
154 token = token_free_idx;
155 token_free_idx = tokens[token_free_idx].next;
cf7d32b8
A
156 } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
157 token = token_init_idx;
158 token_init_idx++;
159 } else { /* allocate more memory */
160 /* Wait if another thread is inside the memory alloc section */
161 while(token_q_allocating) {
b0d623f7
A
162 wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
163 LCK_SLEEP_DEFAULT,
164 (event_t)&token_q_allocating,
165 THREAD_UNINT);
cf7d32b8
A
166 if(res != THREAD_AWAKENED) return KERN_ABORTED;
167 };
168
169 /* Check whether memory is still maxed out */
170 if(token_init_idx < token_q_max_cnt)
171 goto find_available_token;
172
173 /* Still no memory. Allocate some. */
174 token_q_allocating = 1;
175
176 /* Drop page queue lock so we can allocate */
177 vm_page_unlock_queues();
178
179 struct token *new_loc;
180 vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
181 kern_return_t result;
182
b0d623f7
A
183 if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) {
184 result = KERN_RESOURCE_SHORTAGE;
cf7d32b8 185 } else {
b0d623f7
A
186 if (token_q_cur_size) {
187 result = kmem_realloc(kernel_map,
188 (vm_offset_t) tokens,
189 token_q_cur_size,
190 (vm_offset_t *) &new_loc,
3e170ce0 191 alloc_size, VM_KERN_MEMORY_OSFMK);
b0d623f7
A
192 } else {
193 result = kmem_alloc(kernel_map,
194 (vm_offset_t *) &new_loc,
3e170ce0 195 alloc_size, VM_KERN_MEMORY_OSFMK);
b0d623f7 196 }
cf7d32b8
A
197 }
198
199 vm_page_lock_queues();
200
201 if (result) {
202 /* Unblock waiting threads */
203 token_q_allocating = 0;
204 thread_wakeup((event_t)&token_q_allocating);
205 return result;
206 }
207
208 /* If we get here, we allocated new memory. Update pointers and
209 * dealloc old range */
210 struct token *old_tokens=tokens;
211 tokens=new_loc;
212 vm_size_t old_token_q_cur_size=token_q_cur_size;
213 token_q_cur_size=alloc_size;
b0d623f7
A
214 token_q_max_cnt = (token_idx_t) (token_q_cur_size /
215 sizeof(struct token));
cf7d32b8
A
216 assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */
217
218 if (old_token_q_cur_size) { /* clean up old mapping */
219 vm_page_unlock_queues();
220 /* kmem_realloc leaves the old region mapped. Get rid of it. */
221 kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
222 vm_page_lock_queues();
223 }
224
225 /* Unblock waiting threads */
226 token_q_allocating = 0;
227 thread_wakeup((event_t)&token_q_allocating);
228
229 goto find_available_token;
2d21ac55 230 }
cf7d32b8
A
231
232 assert (token);
233
2d21ac55
A
234 /*
235 * the new pagecount we got need to be applied to all queues except
236 * obsolete
237 */
238 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
4a3eedf9
A
239 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
240 assert(pages >= 0);
241 assert(pages <= TOKEN_COUNT_MAX);
b0d623f7
A
242 purgeable_queues[i].new_pages = (int32_t) pages;
243 assert(purgeable_queues[i].new_pages == pages);
2d21ac55
A
244 }
245 token_new_pagecount = 0;
246
247 /* set token counter value */
248 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
249 tokens[token].count = queue->new_pages;
250 else
251 tokens[token].count = 0; /* all obsolete items are
252 * ripe immediately */
253 queue->new_pages = 0;
254
255 /* put token on token counter list */
256 tokens[token].next = 0;
257 if (queue->token_q_tail == 0) {
258 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
259 queue->token_q_head = token;
39236c6e 260 tokens[token].prev = 0;
2d21ac55
A
261 } else {
262 tokens[queue->token_q_tail].next = token;
39236c6e 263 tokens[token].prev = queue->token_q_tail;
2d21ac55
A
264 }
265 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
266 * count == 0) in queue */
267 if (tokens[token].count > 0)
268 queue->token_q_unripe = token; /* first unripe token */
269 else
270 available_for_purge++; /* added a ripe token?
271 * increase available count */
272 }
273 queue->token_q_tail = token;
274
275#if MACH_ASSERT
276 queue->debug_count_tokens++;
277 /* Check both queues, since we modified the new_pages count on each */
278 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
279 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
280
281 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
282 queue->type,
283 tokens[token].count, /* num pages on token
284 * (last token) */
285 queue->debug_count_tokens,
286 0,
287 0);
288#endif
289
290 return KERN_SUCCESS;
291}
292
293/*
294 * Remove first token from queue and return its index. Add its count to the
295 * count of the next token.
b0d623f7 296 * Call with page queue locked.
2d21ac55
A
297 */
298static token_idx_t
299vm_purgeable_token_remove_first(purgeable_q_t queue)
300{
39037602 301 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 302
2d21ac55
A
303 token_idx_t token;
304 token = queue->token_q_head;
305
306 assert(token);
307
308 if (token) {
309 assert(queue->token_q_tail);
310 if (queue->token_q_head == queue->token_q_unripe) {
311 /* no ripe tokens... must move unripe pointer */
312 queue->token_q_unripe = tokens[token].next;
313 } else {
314 /* we're removing a ripe token. decrease count */
315 available_for_purge--;
316 assert(available_for_purge >= 0);
317 }
318
319 if (queue->token_q_tail == queue->token_q_head)
320 assert(tokens[token].next == 0);
321
322 queue->token_q_head = tokens[token].next;
323 if (queue->token_q_head) {
324 tokens[queue->token_q_head].count += tokens[token].count;
39236c6e 325 tokens[queue->token_q_head].prev = 0;
2d21ac55
A
326 } else {
327 /* currently no other tokens in the queue */
328 /*
329 * the page count must be added to the next newly
330 * created token
331 */
332 queue->new_pages += tokens[token].count;
333 /* if head is zero, tail is too */
334 queue->token_q_tail = 0;
335 }
336
337#if MACH_ASSERT
338 queue->debug_count_tokens--;
339 vm_purgeable_token_check_queue(queue);
340
341 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
342 queue->type,
343 tokens[queue->token_q_head].count, /* num pages on new
344 * first token */
345 token_new_pagecount, /* num pages waiting for
346 * next token */
347 available_for_purge,
348 0);
349#endif
350 }
351 return token;
352}
353
316670eb
A
354static token_idx_t
355vm_purgeable_token_remove_last(purgeable_q_t queue)
356{
39037602 357 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
316670eb
A
358
359 token_idx_t token;
360 token = queue->token_q_tail;
361
362 assert(token);
363
364 if (token) {
365 assert(queue->token_q_head);
366
367 if (queue->token_q_tail == queue->token_q_head)
368 assert(tokens[token].next == 0);
369
370 if (queue->token_q_unripe == 0) {
371 /* we're removing a ripe token. decrease count */
372 available_for_purge--;
373 assert(available_for_purge >= 0);
374 } else if (queue->token_q_unripe == token) {
375 /* we're removing the only unripe token */
376 queue->token_q_unripe = 0;
377 }
378
379 if (token == queue->token_q_head) {
380 /* token is the last one in the queue */
381 queue->token_q_head = 0;
382 queue->token_q_tail = 0;
383 } else {
384 token_idx_t new_tail;
385
39236c6e
A
386 new_tail = tokens[token].prev;
387
388 assert(new_tail);
316670eb 389 assert(tokens[new_tail].next == token);
39236c6e 390
316670eb
A
391 queue->token_q_tail = new_tail;
392 tokens[new_tail].next = 0;
393 }
394
395 queue->new_pages += tokens[token].count;
396
397#if MACH_ASSERT
398 queue->debug_count_tokens--;
399 vm_purgeable_token_check_queue(queue);
400
401 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
402 queue->type,
403 tokens[queue->token_q_head].count, /* num pages on new
404 * first token */
405 token_new_pagecount, /* num pages waiting for
406 * next token */
407 available_for_purge,
408 0);
409#endif
410 }
411 return token;
412}
413
b0d623f7
A
414/*
415 * Delete first token from queue. Return token to token queue.
416 * Call with page queue locked.
417 */
2d21ac55
A
418void
419vm_purgeable_token_delete_first(purgeable_q_t queue)
420{
39037602 421 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
422 token_idx_t token = vm_purgeable_token_remove_first(queue);
423
424 if (token) {
425 /* stick removed token on free queue */
426 tokens[token].next = token_free_idx;
39236c6e 427 tokens[token].prev = 0;
2d21ac55
A
428 token_free_idx = token;
429 }
430}
431
316670eb
A
432void
433vm_purgeable_token_delete_last(purgeable_q_t queue)
434{
39037602 435 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
316670eb
A
436 token_idx_t token = vm_purgeable_token_remove_last(queue);
437
438 if (token) {
439 /* stick removed token on free queue */
440 tokens[token].next = token_free_idx;
39236c6e 441 tokens[token].prev = 0;
316670eb
A
442 token_free_idx = token;
443 }
444}
445
2d21ac55 446
b0d623f7 447/* Call with page queue locked. */
2d21ac55 448void
cf7d32b8 449vm_purgeable_q_advance_all()
2d21ac55 450{
39037602 451 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 452
4a3eedf9
A
453 /* check queue counters - if they get really large, scale them back.
454 * They tend to get that large when there is no purgeable queue action */
455 int i;
cf7d32b8 456 if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */
4a3eedf9
A
457 {
458 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
459 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
460 assert(pages >= 0);
461 assert(pages <= TOKEN_COUNT_MAX);
b0d623f7
A
462 purgeable_queues[i].new_pages = (int32_t) pages;
463 assert(purgeable_queues[i].new_pages == pages);
4a3eedf9
A
464 }
465 token_new_pagecount = 0;
466 }
467
2d21ac55 468 /*
cf7d32b8
A
469 * Decrement token counters. A token counter can be zero, this means the
470 * object is ripe to be purged. It is not purged immediately, because that
471 * could cause several objects to be purged even if purging one would satisfy
472 * the memory needs. Instead, the pageout thread purges one after the other
473 * by calling vm_purgeable_object_purge_one and then rechecking the memory
474 * balance.
475 *
476 * No need to advance obsolete queue - all items are ripe there,
2d21ac55
A
477 * always
478 */
cf7d32b8
A
479 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
480 purgeable_q_t queue = &purgeable_queues[i];
481 uint32_t num_pages = 1;
482
483 /* Iterate over tokens as long as there are unripe tokens. */
484 while (queue->token_q_unripe) {
485 if (tokens[queue->token_q_unripe].count && num_pages)
486 {
487 tokens[queue->token_q_unripe].count -= 1;
488 num_pages -= 1;
489 }
2d21ac55 490
cf7d32b8
A
491 if (tokens[queue->token_q_unripe].count == 0) {
492 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
493 available_for_purge++;
b0d623f7 494 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
cf7d32b8
A
495 queue->type,
496 tokens[queue->token_q_head].count, /* num pages on new
497 * first token */
498 0,
499 available_for_purge,
500 0);
501 continue; /* One token ripened. Make sure to
502 * check the next. */
503 }
504 if (num_pages == 0)
505 break; /* Current token not ripe and no more pages.
506 * Work done. */
2d21ac55 507 }
2d21ac55 508
cf7d32b8
A
509 /*
510 * if there are no unripe tokens in the queue, decrement the
511 * new_pages counter instead new_pages can be negative, but must be
512 * canceled out by token_new_pagecount -- since inactive queue as a
513 * whole always contains a nonnegative number of pages
514 */
515 if (!queue->token_q_unripe) {
516 queue->new_pages -= num_pages;
517 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
518 }
2d21ac55 519#if MACH_ASSERT
cf7d32b8 520 vm_purgeable_token_check_queue(queue);
2d21ac55 521#endif
cf7d32b8 522 }
2d21ac55
A
523}
524
525/*
526 * grab any ripe object and purge it obsolete queue first. then, go through
527 * each volatile group. Select a queue with a ripe token.
528 * Start with first group (0)
529 * 1. Look at queue. Is there an object?
530 * Yes - purge it. Remove token.
531 * No - check other queue. Is there an object?
532 * No - increment group, then go to (1)
533 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
534 * token from other queue and migrate unripe token from this
535 * queue to other queue.
b0d623f7 536 * Call with page queue locked.
2d21ac55
A
537 */
538static void
539vm_purgeable_token_remove_ripe(purgeable_q_t queue)
540{
39037602 541 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
542 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
543 /* return token to free list. advance token list. */
544 token_idx_t new_head = tokens[queue->token_q_head].next;
545 tokens[queue->token_q_head].next = token_free_idx;
39236c6e 546 tokens[queue->token_q_head].prev = 0;
2d21ac55
A
547 token_free_idx = queue->token_q_head;
548 queue->token_q_head = new_head;
39236c6e 549 tokens[new_head].prev = 0;
2d21ac55
A
550 if (new_head == 0)
551 queue->token_q_tail = 0;
552
553#if MACH_ASSERT
554 queue->debug_count_tokens--;
555 vm_purgeable_token_check_queue(queue);
556#endif
557
558 available_for_purge--;
559 assert(available_for_purge >= 0);
560}
561
562/*
563 * Delete a ripe token from the given queue. If there are no ripe tokens on
564 * that queue, delete a ripe token from queue2, and migrate an unripe token
565 * from queue to queue2
b0d623f7 566 * Call with page queue locked.
2d21ac55
A
567 */
568static void
569vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
570{
39037602 571 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
572 assert(queue->token_q_head);
573
574 if (tokens[queue->token_q_head].count == 0) {
575 /* This queue has a ripe token. Remove. */
576 vm_purgeable_token_remove_ripe(queue);
577 } else {
578 assert(queue2);
579 /*
580 * queue2 must have a ripe token. Remove, and migrate one
581 * from queue to queue2.
582 */
583 vm_purgeable_token_remove_ripe(queue2);
584 /* migrate unripe token */
585 token_idx_t token;
586 token_cnt_t count;
587
588 /* remove token from queue1 */
589 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
590 * tokens, remember? */
591 token = vm_purgeable_token_remove_first(queue);
592 assert(token);
593
594 count = tokens[token].count;
595
596 /* migrate to queue2 */
597 /* go to migration target loc */
2d21ac55 598
39236c6e 599 token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after;
2d21ac55 600
39236c6e
A
601 while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) {
602 count -= tokens[token_to_insert_before].count;
603 token_to_insert_before = tokens[token_to_insert_before].next;
604 }
605
606 /* token_to_insert_before is now set correctly */
607
608 /* should the inserted token become the first unripe token? */
609 if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0))
610 queue2->token_q_unripe = token; /* if so, must update unripe pointer */
2d21ac55
A
611
612 /*
39236c6e
A
613 * insert token.
614 * if inserting at end, reduce new_pages by that value;
615 * otherwise, reduce counter of next token
2d21ac55 616 */
39236c6e
A
617
618 tokens[token].count = count;
619
620 if (token_to_insert_before != 0) {
621 token_to_insert_after = tokens[token_to_insert_before].prev;
622
623 tokens[token].next = token_to_insert_before;
624 tokens[token_to_insert_before].prev = token;
625
626 assert(tokens[token_to_insert_before].count >= count);
627 tokens[token_to_insert_before].count -= count;
628 } else {
629 /* if we ran off the end of the list, the token to insert after is the tail */
630 token_to_insert_after = queue2->token_q_tail;
631
632 tokens[token].next = 0;
633 queue2->token_q_tail = token;
634
2d21ac55
A
635 assert(queue2->new_pages >= (int32_t) count);
636 queue2->new_pages -= count;
39236c6e
A
637 }
638
639 if (token_to_insert_after != 0) {
640 tokens[token].prev = token_to_insert_after;
641 tokens[token_to_insert_after].next = token;
2d21ac55 642 } else {
39236c6e
A
643 /* is this case possible? */
644 tokens[token].prev = 0;
645 queue2->token_q_head = token;
2d21ac55 646 }
2d21ac55
A
647
648#if MACH_ASSERT
649 queue2->debug_count_tokens++;
650 vm_purgeable_token_check_queue(queue2);
651#endif
652 }
653}
654
655/* Find an object that can be locked. Returns locked object. */
b0d623f7 656/* Call with purgeable queue locked. */
39236c6e
A
657static vm_object_t
658vm_purgeable_object_find_and_lock(
659 purgeable_q_t queue,
660 int group,
661 boolean_t pick_ripe)
2d21ac55 662{
39236c6e
A
663 vm_object_t object, best_object;
664 int object_task_importance;
665 int best_object_task_importance;
666 int best_object_skipped;
667 int num_objects_skipped;
4bd07ac2
A
668 int try_lock_failed = 0;
669 int try_lock_succeeded = 0;
39236c6e
A
670 task_t owner;
671
672 best_object = VM_OBJECT_NULL;
673 best_object_task_importance = INT_MAX;
674
39037602 675 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
676 /*
677 * Usually we would pick the first element from a queue. However, we
678 * might not be able to get a lock on it, in which case we try the
679 * remaining elements in order.
680 */
681
4bd07ac2
A
682 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START),
683 pick_ripe,
684 group,
685 VM_KERNEL_UNSLIDE_OR_PERM(queue),
686 0,
687 0);
688
689 num_objects_skipped = 0;
2d21ac55
A
690 for (object = (vm_object_t) queue_first(&queue->objq[group]);
691 !queue_end(&queue->objq[group], (queue_entry_t) object);
39236c6e
A
692 object = (vm_object_t) queue_next(&object->objq),
693 num_objects_skipped++) {
694
4bd07ac2
A
695 /*
696 * To prevent us looping for an excessively long time, choose
697 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
698 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
699 * we keep going until we find the first eligible object.
700 */
701 if ((num_objects_skipped >= PURGEABLE_LOOP_MAX) && (best_object != NULL)) {
702 break;
703 }
704
39236c6e
A
705 if (pick_ripe &&
706 ! object->purgeable_when_ripe) {
707 /* we want an object that has a ripe token */
708 continue;
709 }
710
711 object_task_importance = 0;
fe8ab488 712
39236c6e
A
713 owner = object->vo_purgeable_owner;
714 if (owner) {
5ba3f43e
A
715#if CONFIG_EMBEDDED
716#if CONFIG_JETSAM
717 object_task_importance = proc_get_memstat_priority((struct proc *)get_bsdtask_info(owner), TRUE);
718#endif /* CONFIG_JETSAM */
719#else /* CONFIG_EMBEDDED */
39236c6e 720 object_task_importance = task_importance_estimate(owner);
5ba3f43e 721#endif /* CONFIG_EMBEDDED */
39236c6e 722 }
fe8ab488 723
39236c6e
A
724 if (object_task_importance < best_object_task_importance) {
725 if (vm_object_lock_try(object)) {
4bd07ac2 726 try_lock_succeeded++;
39236c6e
A
727 if (best_object != VM_OBJECT_NULL) {
728 /* forget about previous best object */
729 vm_object_unlock(best_object);
730 }
731 best_object = object;
732 best_object_task_importance = object_task_importance;
733 best_object_skipped = num_objects_skipped;
734 if (best_object_task_importance == 0) {
735 /* can't get any better: stop looking */
736 break;
737 }
4bd07ac2
A
738 } else {
739 try_lock_failed++;
39236c6e
A
740 }
741 }
742 }
4bd07ac2
A
743
744 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END),
745 num_objects_skipped, /* considered objects */
746 try_lock_failed,
747 try_lock_succeeded,
748 VM_KERNEL_UNSLIDE_OR_PERM(best_object),
749 ((best_object == NULL) ? 0 : best_object->resident_page_count));
750
fe8ab488
A
751 object = best_object;
752
753 if (object == VM_OBJECT_NULL) {
754 return VM_OBJECT_NULL;
755 }
39236c6e 756
fe8ab488
A
757 /* Locked. Great. We'll take it. Remove and return. */
758// printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
39236c6e 759
fe8ab488
A
760 vm_object_lock_assert_exclusive(object);
761
762 queue_remove(&queue->objq[group], object,
763 vm_object_t, objq);
764 object->objq.next = NULL;
765 object->objq.prev = NULL;
766 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
767 object->purgeable_queue_group = 0;
768 /* one less volatile object for this object's owner */
769 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1);
770
771#if DEBUG
772 object->vo_purgeable_volatilizer = NULL;
773#endif /* DEBUG */
774
775 /* keep queue of non-volatile objects */
776 queue_enter(&purgeable_nonvolatile_queue, object,
777 vm_object_t, objq);
778 assert(purgeable_nonvolatile_count >= 0);
779 purgeable_nonvolatile_count++;
780 assert(purgeable_nonvolatile_count > 0);
781 /* one more nonvolatile object for this object's owner */
782 vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, +1);
39236c6e 783
2d21ac55 784#if MACH_ASSERT
fe8ab488 785 queue->debug_count_objects--;
2d21ac55 786#endif
fe8ab488 787 return object;
2d21ac55
A
788}
789
b0d623f7 790/* Can be called without holding locks */
2d21ac55 791void
b0d623f7
A
792vm_purgeable_object_purge_all(void)
793{
794 enum purgeable_q_type i;
795 int group;
796 vm_object_t object;
797 unsigned int purged_count;
798 uint32_t collisions;
799
800 purged_count = 0;
801 collisions = 0;
802
803restart:
804 lck_mtx_lock(&vm_purgeable_queue_lock);
805 /* Cycle through all queues */
806 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
807 purgeable_q_t queue;
808
809 queue = &purgeable_queues[i];
810
811 /*
812 * Look through all groups, starting from the lowest. If
813 * we find an object in that group, try to lock it (this can
814 * fail). If locking is successful, we can drop the queue
815 * lock, remove a token and then purge the object.
816 */
817 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
818 while (!queue_empty(&queue->objq[group])) {
39236c6e 819 object = vm_purgeable_object_find_and_lock(queue, group, FALSE);
b0d623f7
A
820 if (object == VM_OBJECT_NULL) {
821 lck_mtx_unlock(&vm_purgeable_queue_lock);
822 mutex_pause(collisions++);
823 goto restart;
824 }
825
826 lck_mtx_unlock(&vm_purgeable_queue_lock);
827
828 /* Lock the page queue here so we don't hold it
829 * over the whole, legthy operation */
39236c6e
A
830 if (object->purgeable_when_ripe) {
831 vm_page_lock_queues();
832 vm_purgeable_token_remove_first(queue);
833 vm_page_unlock_queues();
834 }
b0d623f7 835
fe8ab488
A
836 (void) vm_object_purge(object, 0);
837 assert(object->purgable == VM_PURGABLE_EMPTY);
838 /* no change in purgeable accounting */
839
b0d623f7
A
840 vm_object_unlock(object);
841 purged_count++;
842 goto restart;
843 }
844 assert(queue->debug_count_objects >= 0);
845 }
846 }
847 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
848 purged_count, /* # of purged objects */
849 0,
850 available_for_purge,
851 0,
852 0);
853 lck_mtx_unlock(&vm_purgeable_queue_lock);
854 return;
855}
856
857boolean_t
39236c6e
A
858vm_purgeable_object_purge_one_unlocked(
859 int force_purge_below_group)
860{
861 boolean_t retval;
862
863 vm_page_lock_queues();
fe8ab488 864 retval = vm_purgeable_object_purge_one(force_purge_below_group, 0);
39236c6e
A
865 vm_page_unlock_queues();
866
867 return retval;
868}
869
870boolean_t
871vm_purgeable_object_purge_one(
fe8ab488
A
872 int force_purge_below_group,
873 int flags)
2d21ac55
A
874{
875 enum purgeable_q_type i;
876 int group;
877 vm_object_t object = 0;
593a1d5f 878 purgeable_q_t queue, queue2;
39236c6e 879 boolean_t forced_purge;
2d21ac55 880
b0d623f7 881 /* Need the page queue lock since we'll be changing the token queue. */
39037602 882 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
883 lck_mtx_lock(&vm_purgeable_queue_lock);
884
2d21ac55
A
885 /* Cycle through all queues */
886 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
593a1d5f 887 queue = &purgeable_queues[i];
2d21ac55 888
39236c6e
A
889 if (force_purge_below_group == 0) {
890 /*
891 * Are there any ripe tokens on this queue? If yes,
892 * we'll find an object to purge there
893 */
894 if (!queue->token_q_head) {
895 /* no token: look at next purgeable queue */
896 continue;
897 }
898
899 if (tokens[queue->token_q_head].count != 0) {
900 /* no ripe token: next queue */
901 continue;
902 }
903 }
2d21ac55
A
904
905 /*
906 * Now look through all groups, starting from the lowest. If
907 * we find an object in that group, try to lock it (this can
908 * fail). If locking is successful, we can drop the queue
909 * lock, remove a token and then purge the object.
910 */
911 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
39236c6e
A
912 if (!queue->token_q_head ||
913 tokens[queue->token_q_head].count != 0) {
914 /* no tokens or no ripe tokens */
915
916 if (group >= force_purge_below_group) {
917 /* no more groups to force-purge */
918 break;
919 }
920
921 /*
922 * Try and purge an object in this group
923 * even though no tokens are ripe.
924 */
925 if (!queue_empty(&queue->objq[group]) &&
926 (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) {
927 lck_mtx_unlock(&vm_purgeable_queue_lock);
928 if (object->purgeable_when_ripe) {
929 vm_purgeable_token_delete_first(queue);
930 }
931 forced_purge = TRUE;
932 goto purge_now;
933 }
934
935 /* nothing to purge in this group: next group */
936 continue;
937 }
593a1d5f 938 if (!queue_empty(&queue->objq[group]) &&
39236c6e 939 (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) {
b0d623f7 940 lck_mtx_unlock(&vm_purgeable_queue_lock);
39236c6e
A
941 if (object->purgeable_when_ripe) {
942 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
943 }
944 forced_purge = FALSE;
2d21ac55 945 goto purge_now;
593a1d5f
A
946 }
947 if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
948 /* This is the token migration case, and it works between
949 * FIFO and LIFO only */
950 queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
951 PURGEABLE_Q_TYPE_FIFO :
952 PURGEABLE_Q_TYPE_LIFO];
953
954 if (!queue_empty(&queue2->objq[group]) &&
39236c6e 955 (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) {
b0d623f7 956 lck_mtx_unlock(&vm_purgeable_queue_lock);
39236c6e
A
957 if (object->purgeable_when_ripe) {
958 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
959 }
960 forced_purge = FALSE;
2d21ac55
A
961 goto purge_now;
962 }
963 }
964 assert(queue->debug_count_objects >= 0);
965 }
966 }
967 /*
968 * because we have to do a try_lock on the objects which could fail,
969 * we could end up with no object to purge at this time, even though
970 * we have objects in a purgeable state
971 */
b0d623f7
A
972 lck_mtx_unlock(&vm_purgeable_queue_lock);
973 return FALSE;
2d21ac55
A
974
975purge_now:
976
977 assert(object);
b0d623f7 978 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
39236c6e 979// printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
fe8ab488
A
980 (void) vm_object_purge(object, flags);
981 assert(object->purgable == VM_PURGABLE_EMPTY);
982 /* no change in purgeable accounting */
2d21ac55 983 vm_object_unlock(object);
b0d623f7 984 vm_page_lock_queues();
2d21ac55 985
b0d623f7 986 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
d190cdc3 987 VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
2d21ac55
A
988 0,
989 available_for_purge,
990 0,
991 0);
b0d623f7
A
992
993 return TRUE;
2d21ac55
A
994}
995
b0d623f7 996/* Called with object lock held */
2d21ac55
A
997void
998vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
999{
b0d623f7
A
1000 vm_object_lock_assert_exclusive(object);
1001 lck_mtx_lock(&vm_purgeable_queue_lock);
2d21ac55 1002
fe8ab488
A
1003 assert(object->objq.next != NULL);
1004 assert(object->objq.prev != NULL);
1005 queue_remove(&purgeable_nonvolatile_queue, object,
1006 vm_object_t, objq);
1007 object->objq.next = NULL;
1008 object->objq.prev = NULL;
1009 assert(purgeable_nonvolatile_count > 0);
1010 purgeable_nonvolatile_count--;
1011 assert(purgeable_nonvolatile_count >= 0);
1012 /* one less nonvolatile object for this object's owner */
1013 vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, -1);
1014
2d21ac55
A
1015 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
1016 group = 0;
39236c6e 1017
2d21ac55
A
1018 if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are
1019 * fifo-queued */
1020 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
1021 else
1022 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
fe8ab488
A
1023 /* one more volatile object for this object's owner */
1024 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, +1);
2d21ac55 1025
39236c6e
A
1026 object->purgeable_queue_type = queue->type;
1027 object->purgeable_queue_group = group;
1028
fe8ab488
A
1029#if DEBUG
1030 assert(object->vo_purgeable_volatilizer == NULL);
1031 object->vo_purgeable_volatilizer = current_task();
1032 OSBacktrace(&object->purgeable_volatilizer_bt[0], 16);
1033#endif /* DEBUG */
39236c6e 1034
2d21ac55
A
1035#if MACH_ASSERT
1036 queue->debug_count_objects++;
b0d623f7 1037 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
2d21ac55
A
1038 0,
1039 tokens[queue->token_q_head].count,
1040 queue->type,
1041 group,
1042 0);
1043#endif
1044
b0d623f7 1045 lck_mtx_unlock(&vm_purgeable_queue_lock);
2d21ac55
A
1046}
1047
1048/* Look for object. If found, remove from purgeable queue. */
b0d623f7 1049/* Called with object lock held */
2d21ac55
A
1050purgeable_q_t
1051vm_purgeable_object_remove(vm_object_t object)
1052{
39236c6e 1053 int group;
39236c6e
A
1054 enum purgeable_q_type type;
1055 purgeable_q_t queue;
2d21ac55 1056
b0d623f7 1057 vm_object_lock_assert_exclusive(object);
39236c6e
A
1058
1059 type = object->purgeable_queue_type;
1060 group = object->purgeable_queue_group;
1061
1062 if (type == PURGEABLE_Q_TYPE_MAX) {
1063 if (object->objq.prev || object->objq.next)
1064 panic("unmarked object on purgeable q");
1065
1066 return NULL;
1067 } else if (!(object->objq.prev && object->objq.next))
1068 panic("marked object not on purgeable q");
1069
b0d623f7 1070 lck_mtx_lock(&vm_purgeable_queue_lock);
39236c6e
A
1071
1072 queue = &purgeable_queues[type];
1073
39236c6e 1074 queue_remove(&queue->objq[group], object, vm_object_t, objq);
fe8ab488
A
1075 object->objq.next = NULL;
1076 object->objq.prev = NULL;
1077 /* one less volatile object for this object's owner */
1078 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1);
1079#if DEBUG
1080 object->vo_purgeable_volatilizer = NULL;
1081#endif /* DEBUG */
1082 /* keep queue of non-volatile objects */
1083 if (object->alive && !object->terminating) {
1084 task_t owner;
1085 queue_enter(&purgeable_nonvolatile_queue, object,
1086 vm_object_t, objq);
1087 assert(purgeable_nonvolatile_count >= 0);
1088 purgeable_nonvolatile_count++;
1089 assert(purgeable_nonvolatile_count > 0);
1090 /* one more nonvolatile object for this object's owner */
1091 owner = object->vo_purgeable_owner;
1092 vm_purgeable_nonvolatile_owner_update(owner, +1);
1093 }
39236c6e 1094
2d21ac55 1095#if MACH_ASSERT
39236c6e
A
1096 queue->debug_count_objects--;
1097 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
1098 0,
1099 tokens[queue->token_q_head].count,
1100 queue->type,
1101 group,
1102 0);
2d21ac55 1103#endif
39236c6e
A
1104
1105 lck_mtx_unlock(&vm_purgeable_queue_lock);
1106
1107 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
1108 object->purgeable_queue_group = 0;
1109
fe8ab488 1110 vm_object_lock_assert_exclusive(object);
39236c6e
A
1111
1112 return &purgeable_queues[type];
1113}
1114
1115void
1116vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task)
1117{
39037602 1118 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
39236c6e
A
1119
1120 stat->count = stat->size = 0;
1121 vm_object_t object;
1122 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1123 !queue_end(&queue->objq[group], (queue_entry_t) object);
1124 object = (vm_object_t) queue_next(&object->objq)) {
1125 if (!target_task || object->vo_purgeable_owner == target_task) {
1126 stat->count++;
1127 stat->size += (object->resident_page_count * PAGE_SIZE);
2d21ac55 1128 }
39236c6e
A
1129 }
1130 return;
1131}
1132
1133void
1134vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task)
1135{
1136 purgeable_q_t queue;
1137 int group;
1138
1139 lck_mtx_lock(&vm_purgeable_queue_lock);
1140
1141 /* Populate fifo_data */
1142 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1143 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1144 vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task);
1145
1146 /* Populate lifo_data */
1147 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1148 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1149 vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task);
1150
1151 /* Populate obsolete data */
1152 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1153 vm_purgeable_stats_helper(&(info->obsolete_data), queue, 0, target_task);
1154
1155 lck_mtx_unlock(&vm_purgeable_queue_lock);
1156 return;
1157}
3e170ce0
A
1158
1159#if DEVELOPMENT || DEBUG
1160static void
1161vm_purgeable_account_volatile_queue(
1162 purgeable_q_t queue,
1163 int group,
1164 task_t task,
1165 pvm_account_info_t acnt_info)
1166{
1167 vm_object_t object;
1168 uint64_t compressed_count;
1169
1170 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1171 !queue_end(&queue->objq[group], (queue_entry_t) object);
1172 object = (vm_object_t) queue_next(&object->objq)) {
1173 if (object->vo_purgeable_owner == task) {
1174 compressed_count = vm_compressor_pager_get_count(object->pager);
1175 acnt_info->pvm_volatile_compressed_count += compressed_count;
1176 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1177 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1178 }
1179 }
1180
1181}
1182
1183/*
1184 * Walks the purgeable object queues and calculates the usage
1185 * associated with the objects for the given task.
1186 */
1187kern_return_t
1188vm_purgeable_account(
1189 task_t task,
1190 pvm_account_info_t acnt_info)
1191{
1192 queue_head_t *nonvolatile_q;
1193 vm_object_t object;
1194 int group;
1195 int state;
1196 uint64_t compressed_count;
1197 purgeable_q_t volatile_q;
1198
1199
1200 if ((task == NULL) || (acnt_info == NULL)) {
1201 return KERN_INVALID_ARGUMENT;
1202 }
1203
1204 acnt_info->pvm_volatile_count = 0;
1205 acnt_info->pvm_volatile_compressed_count = 0;
1206 acnt_info->pvm_nonvolatile_count = 0;
1207 acnt_info->pvm_nonvolatile_compressed_count = 0;
1208
1209 lck_mtx_lock(&vm_purgeable_queue_lock);
1210
1211 nonvolatile_q = &purgeable_nonvolatile_queue;
1212 for (object = (vm_object_t) queue_first(nonvolatile_q);
1213 !queue_end(nonvolatile_q, (queue_entry_t) object);
1214 object = (vm_object_t) queue_next(&object->objq)) {
1215 if (object->vo_purgeable_owner == task) {
1216 state = object->purgable;
1217 compressed_count = vm_compressor_pager_get_count(object->pager);
1218 if (state == VM_PURGABLE_EMPTY) {
1219 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1220 acnt_info->pvm_volatile_compressed_count += compressed_count;
1221 } else {
1222 acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count);
1223 acnt_info->pvm_nonvolatile_compressed_count += compressed_count;
1224 }
1225 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1226 }
1227 }
1228
1229 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1230 vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info);
1231
1232 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1233 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1234 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1235 }
1236
1237 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1238 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1239 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1240 }
1241 lck_mtx_unlock(&vm_purgeable_queue_lock);
1242
1243 acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE);
1244 acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE);
1245 acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE);
1246 acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE);
1247
1248 return KERN_SUCCESS;
1249}
1250#endif /* DEVELOPMENT || DEBUG */
39236c6e
A
1251
1252static void
fe8ab488 1253vm_purgeable_volatile_queue_disown(
39236c6e
A
1254 purgeable_q_t queue,
1255 int group,
1256 task_t task)
1257{
1258 vm_object_t object;
fe8ab488
A
1259 int collisions;
1260
1261 collisions = 0;
39236c6e 1262
fe8ab488 1263again:
39037602 1264 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
39236c6e 1265
39236c6e
A
1266 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1267 !queue_end(&queue->objq[group], (queue_entry_t) object);
1268 object = (vm_object_t) queue_next(&object->objq)) {
fe8ab488
A
1269#if MACH_ASSERT
1270 /*
1271 * Sanity check: let's scan the entire queues to
1272 * make sure we don't leave any purgeable objects
1273 * pointing back at a dead task. If the counters
1274 * are off, we would fail to assert that they go
1275 * back to 0 after disowning is done.
1276 */
1277#else /* MACH_ASSERT */
1278 if (task->task_volatile_objects == 0) {
1279 /* no more volatile objects owned by "task" */
1280 break;
1281 }
1282#endif /* MACH_ASSERT */
39236c6e 1283 if (object->vo_purgeable_owner == task) {
fe8ab488
A
1284 if (! vm_object_lock_try(object)) {
1285 lck_mtx_unlock(&vm_purgeable_queue_lock);
1286 mutex_pause(collisions++);
1287 lck_mtx_lock(&vm_purgeable_queue_lock);
1288 goto again;
1289 }
1290 assert(object->purgable == VM_PURGABLE_VOLATILE);
1291 if (object->vo_purgeable_owner == task) {
1292 vm_purgeable_accounting(object,
1293 object->purgable,
1294 TRUE); /* disown */
1295 assert(object->vo_purgeable_owner == NULL);
1296 }
1297 vm_object_unlock(object);
2d21ac55
A
1298 }
1299 }
39236c6e
A
1300}
1301
1302void
1303vm_purgeable_disown(
1304 task_t task)
1305{
fe8ab488 1306 purgeable_q_t volatile_q;
39236c6e 1307 int group;
fe8ab488
A
1308 queue_head_t *nonvolatile_q;
1309 vm_object_t object;
1310 int collisions;
39236c6e
A
1311
1312 if (task == NULL) {
1313 return;
1314 }
1315
fe8ab488
A
1316 task->task_purgeable_disowning = TRUE;
1317
1318 /*
1319 * Scan the purgeable objects queues for objects owned by "task".
1320 * This has to be done "atomically" under the "vm_purgeable_queue"
1321 * lock, to ensure that no new purgeable object get associated
1322 * with this task or moved between queues while we're scanning.
1323 */
1324
1325 /*
1326 * Scan non-volatile queue for objects owned by "task".
1327 */
1328
1329 collisions = 0;
1330
1331again:
1332 if (task->task_purgeable_disowned) {
1333 /* task has already disowned its purgeable memory */
1334 assert(task->task_volatile_objects == 0);
1335 assert(task->task_nonvolatile_objects == 0);
1336 return;
1337 }
39236c6e 1338 lck_mtx_lock(&vm_purgeable_queue_lock);
fe8ab488
A
1339
1340 nonvolatile_q = &purgeable_nonvolatile_queue;
1341 for (object = (vm_object_t) queue_first(nonvolatile_q);
1342 !queue_end(nonvolatile_q, (queue_entry_t) object);
1343 object = (vm_object_t) queue_next(&object->objq)) {
1344#if MACH_ASSERT
1345 /*
1346 * Sanity check: let's scan the entire queues to
1347 * make sure we don't leave any purgeable objects
1348 * pointing back at a dead task. If the counters
1349 * are off, we would fail to assert that they go
1350 * back to 0 after disowning is done.
1351 */
1352#else /* MACH_ASSERT */
1353 if (task->task_nonvolatile_objects == 0) {
1354 /* no more non-volatile objects owned by "task" */
1355 break;
1356 }
1357#endif /* MACH_ASSERT */
1358#if DEBUG
1359 assert(object->vo_purgeable_volatilizer == NULL);
1360#endif /* DEBUG */
1361 if (object->vo_purgeable_owner == task) {
1362 if (!vm_object_lock_try(object)) {
1363 lck_mtx_unlock(&vm_purgeable_queue_lock);
1364 mutex_pause(collisions++);
1365 goto again;
1366 }
1367 if (object->vo_purgeable_owner == task) {
1368 vm_purgeable_accounting(object,
1369 object->purgable,
1370 TRUE); /* disown */
1371 assert(object->vo_purgeable_owner == NULL);
1372 }
1373 vm_object_unlock(object);
1374 }
1375 }
1376
1377 lck_mtx_yield(&vm_purgeable_queue_lock);
1378
1379 /*
1380 * Scan volatile queues for objects owned by "task".
1381 */
1382
1383 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1384 vm_purgeable_volatile_queue_disown(volatile_q, 0, task);
1385 lck_mtx_yield(&vm_purgeable_queue_lock);
1386
1387 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1388 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1389 vm_purgeable_volatile_queue_disown(volatile_q, group, task);
1390 lck_mtx_yield(&vm_purgeable_queue_lock);
1391 }
39236c6e 1392
fe8ab488
A
1393 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1394 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1395 vm_purgeable_volatile_queue_disown(volatile_q, group, task);
1396 lck_mtx_yield(&vm_purgeable_queue_lock);
1397 }
1398
1399 if (task->task_volatile_objects != 0 ||
1400 task->task_nonvolatile_objects != 0) {
1401 /* some purgeable objects sneaked into a queue: find them */
1402 lck_mtx_unlock(&vm_purgeable_queue_lock);
1403 mutex_pause(collisions++);
1404 goto again;
1405 }
1406
1407 /* there shouldn't be any purgeable objects owned by task now */
1408 assert(task->task_volatile_objects == 0);
1409 assert(task->task_nonvolatile_objects == 0);
1410 assert(task->task_purgeable_disowning);
1411
1412 /* and we don't need to try and disown again */
1413 task->task_purgeable_disowned = TRUE;
1414
1415 lck_mtx_unlock(&vm_purgeable_queue_lock);
1416}
1417
1418
1419#if notyet
1420static int
1421vm_purgeable_queue_purge_task_owned(
1422 purgeable_q_t queue,
1423 int group,
1424 task_t task)
1425{
1426 vm_object_t object;
1427 int num_objects;
1428 int collisions;
1429 int num_objects_purged;
1430
1431 num_objects_purged = 0;
1432 collisions = 0;
1433
1434look_again:
1435 lck_mtx_lock(&vm_purgeable_queue_lock);
1436
1437 num_objects = 0;
1438 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1439 !queue_end(&queue->objq[group], (queue_entry_t) object);
1440 object = (vm_object_t) queue_next(&object->objq)) {
1441
1442 if (object->vo_purgeable_owner != task &&
1443 object->vo_purgeable_owner != NULL) {
1444 continue;
1445 }
1446
1447 /* found an object: try and grab it */
1448 if (!vm_object_lock_try(object)) {
1449 lck_mtx_unlock(&vm_purgeable_queue_lock);
1450 mutex_pause(collisions++);
1451 goto look_again;
1452 }
1453 /* got it ! */
1454
1455 collisions = 0;
1456
1457 /* remove object from purgeable queue */
1458 queue_remove(&queue->objq[group], object,
1459 vm_object_t, objq);
1460 object->objq.next = NULL;
1461 object->objq.prev = NULL;
1462 /* one less volatile object for this object's owner */
1463 assert(object->vo_purgeable_owner == task);
1464 vm_purgeable_volatile_owner_update(task, -1);
1465
1466#if DEBUG
1467 object->vo_purgeable_volatilizer = NULL;
1468#endif /* DEBUG */
1469 queue_enter(&purgeable_nonvolatile_queue, object,
1470 vm_object_t, objq);
1471 assert(purgeable_nonvolatile_count >= 0);
1472 purgeable_nonvolatile_count++;
1473 assert(purgeable_nonvolatile_count > 0);
1474 /* one more nonvolatile object for this object's owner */
1475 assert(object->vo_purgeable_owner == task);
1476 vm_purgeable_nonvolatile_owner_update(task, +1);
1477
1478 /* unlock purgeable queues */
1479 lck_mtx_unlock(&vm_purgeable_queue_lock);
1480
1481 if (object->purgeable_when_ripe) {
1482 /* remove a token */
1483 vm_page_lock_queues();
1484 vm_purgeable_token_remove_first(queue);
1485 vm_page_unlock_queues();
1486 }
1487
1488 /* purge the object */
1489 (void) vm_object_purge(object, 0);
1490 assert(object->purgable == VM_PURGABLE_EMPTY);
1491 /* no change for purgeable accounting */
1492 vm_object_unlock(object);
1493 num_objects_purged++;
1494
1495 /* we unlocked the purgeable queues, so start over */
1496 goto look_again;
1497 }
1498
1499 lck_mtx_unlock(&vm_purgeable_queue_lock);
1500
1501 return num_objects_purged;
1502}
1503
1504int
1505vm_purgeable_purge_task_owned(
1506 task_t task)
1507{
1508 purgeable_q_t queue;
1509 int group;
1510 int num_objects_purged;
1511
1512 num_objects_purged = 0;
1513
39236c6e 1514 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
fe8ab488
A
1515 num_objects_purged += vm_purgeable_queue_purge_task_owned(queue,
1516 0,
1517 task);
39236c6e
A
1518
1519 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1520 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
fe8ab488
A
1521 num_objects_purged += vm_purgeable_queue_purge_task_owned(queue,
1522 group,
1523 task);
39236c6e
A
1524
1525 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1526 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
fe8ab488
A
1527 num_objects_purged += vm_purgeable_queue_purge_task_owned(queue,
1528 group,
1529 task);
1530
1531 return num_objects_purged;
1532}
1533#endif
1534
1535void
1536vm_purgeable_nonvolatile_enqueue(
1537 vm_object_t object,
1538 task_t owner)
1539{
1540 int page_count;
1541
1542 vm_object_lock_assert_exclusive(object);
1543
1544 assert(object->purgable == VM_PURGABLE_NONVOLATILE);
1545 assert(object->vo_purgeable_owner == NULL);
fe8ab488
A
1546
1547 lck_mtx_lock(&vm_purgeable_queue_lock);
1548
39037602
A
1549 if (owner != NULL &&
1550 owner->task_purgeable_disowning) {
fe8ab488
A
1551 /* task is exiting and no longer tracking purgeable objects */
1552 owner = NULL;
1553 }
1554
1555 object->vo_purgeable_owner = owner;
1556#if DEBUG
1557 object->vo_purgeable_volatilizer = NULL;
1558#endif /* DEBUG */
1559
1560#if DEBUG
1561 OSBacktrace(&object->purgeable_owner_bt[0], 16);
1562#endif /* DEBUG */
1563
1564 page_count = object->resident_page_count;
fe8ab488
A
1565 if (owner != NULL && page_count != 0) {
1566 ledger_credit(owner->ledger,
1567 task_ledgers.purgeable_nonvolatile,
1568 ptoa(page_count));
1569 ledger_credit(owner->ledger,
1570 task_ledgers.phys_footprint,
1571 ptoa(page_count));
1572 }
1573
1574 assert(object->objq.next == NULL);
1575 assert(object->objq.prev == NULL);
1576
1577 queue_enter(&purgeable_nonvolatile_queue, object,
1578 vm_object_t, objq);
1579 assert(purgeable_nonvolatile_count >= 0);
1580 purgeable_nonvolatile_count++;
1581 assert(purgeable_nonvolatile_count > 0);
1582 /* one more nonvolatile object for this object's owner */
1583 assert(object->vo_purgeable_owner == owner);
1584 vm_purgeable_nonvolatile_owner_update(owner, +1);
1585 lck_mtx_unlock(&vm_purgeable_queue_lock);
1586
1587 vm_object_lock_assert_exclusive(object);
1588}
1589
1590void
1591vm_purgeable_nonvolatile_dequeue(
1592 vm_object_t object)
1593{
1594 task_t owner;
1595
1596 vm_object_lock_assert_exclusive(object);
1597
1598 owner = object->vo_purgeable_owner;
1599#if DEBUG
1600 assert(object->vo_purgeable_volatilizer == NULL);
1601#endif /* DEBUG */
1602 if (owner != NULL) {
1603 /*
1604 * Update the owner's ledger to stop accounting
1605 * for this object.
1606 */
1607 vm_purgeable_accounting(object,
1608 object->purgable,
1609 TRUE); /* disown */
1610 }
39236c6e 1611
fe8ab488
A
1612 lck_mtx_lock(&vm_purgeable_queue_lock);
1613 assert(object->objq.next != NULL);
1614 assert(object->objq.prev != NULL);
1615 queue_remove(&purgeable_nonvolatile_queue, object,
1616 vm_object_t, objq);
1617 object->objq.next = NULL;
1618 object->objq.prev = NULL;
1619 assert(purgeable_nonvolatile_count > 0);
1620 purgeable_nonvolatile_count--;
1621 assert(purgeable_nonvolatile_count >= 0);
b0d623f7 1622 lck_mtx_unlock(&vm_purgeable_queue_lock);
fe8ab488
A
1623
1624 vm_object_lock_assert_exclusive(object);
1625}
1626
1627void
1628vm_purgeable_accounting(
1629 vm_object_t object,
1630 vm_purgable_t old_state,
1631 boolean_t disown)
1632{
1633 task_t owner;
1634 int resident_page_count;
1635 int wired_page_count;
1636 int compressed_page_count;
1637 boolean_t disown_on_the_fly;
1638
1639 vm_object_lock_assert_exclusive(object);
1640
1641 owner = object->vo_purgeable_owner;
1642 if (owner == NULL)
1643 return;
1644
1645 if (!disown && owner->task_purgeable_disowning) {
1646 /* task is disowning its purgeable objects: help it */
1647 disown_on_the_fly = TRUE;
1648 } else {
1649 disown_on_the_fly = FALSE;
1650 }
1651
1652 resident_page_count = object->resident_page_count;
1653 wired_page_count = object->wired_page_count;
39037602
A
1654 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
1655 object->pager != NULL) {
fe8ab488
A
1656 compressed_page_count =
1657 vm_compressor_pager_get_count(object->pager);
1658 } else {
1659 compressed_page_count = 0;
1660 }
1661
1662 if (old_state == VM_PURGABLE_VOLATILE ||
1663 old_state == VM_PURGABLE_EMPTY) {
1664 /* less volatile bytes in ledger */
1665 ledger_debit(owner->ledger,
1666 task_ledgers.purgeable_volatile,
1667 ptoa(resident_page_count - wired_page_count));
1668 /* less compressed volatile bytes in ledger */
1669 ledger_debit(owner->ledger,
1670 task_ledgers.purgeable_volatile_compressed,
1671 ptoa(compressed_page_count));
1672
1673 if (disown || !object->alive || object->terminating) {
1674 /* wired pages were accounted as "non-volatile"... */
1675 ledger_debit(owner->ledger,
1676 task_ledgers.purgeable_nonvolatile,
1677 ptoa(wired_page_count));
1678 /* ... and in phys_footprint */
1679 ledger_debit(owner->ledger,
1680 task_ledgers.phys_footprint,
1681 ptoa(wired_page_count));
1682
1683 if (!disown_on_the_fly &&
1684 (object->purgeable_queue_type ==
1685 PURGEABLE_Q_TYPE_MAX)) {
1686 /*
1687 * Not on a volatile queue: must be empty
1688 * or emptying.
1689 */
1690 vm_purgeable_nonvolatile_owner_update(owner,-1);
1691 } else {
1692 /* on a volatile queue */
1693 vm_purgeable_volatile_owner_update(owner, -1);
1694 }
1695 /* no more accounting for this dead object */
1696 object->vo_purgeable_owner = NULL;
1697#if DEBUG
1698 object->vo_purgeable_volatilizer = NULL;
1699#endif /* DEBUG */
1700 return;
1701 }
1702
1703 /* more non-volatile bytes in ledger */
1704 ledger_credit(owner->ledger,
1705 task_ledgers.purgeable_nonvolatile,
1706 ptoa(resident_page_count - wired_page_count));
1707 /* more compressed non-volatile bytes in ledger */
1708 ledger_credit(owner->ledger,
1709 task_ledgers.purgeable_nonvolatile_compressed,
1710 ptoa(compressed_page_count));
1711 /* more footprint */
1712 ledger_credit(owner->ledger,
1713 task_ledgers.phys_footprint,
1714 ptoa(resident_page_count
1715 + compressed_page_count
1716 - wired_page_count));
1717
1718 } else if (old_state == VM_PURGABLE_NONVOLATILE) {
1719
1720 /* less non-volatile bytes in ledger */
1721 ledger_debit(owner->ledger,
1722 task_ledgers.purgeable_nonvolatile,
1723 ptoa(resident_page_count - wired_page_count));
1724 /* less compressed non-volatile bytes in ledger */
1725 ledger_debit(owner->ledger,
1726 task_ledgers.purgeable_nonvolatile_compressed,
1727 ptoa(compressed_page_count));
1728 /* less footprint */
1729 ledger_debit(owner->ledger,
1730 task_ledgers.phys_footprint,
1731 ptoa(resident_page_count
1732 + compressed_page_count
1733 - wired_page_count));
1734
1735 if (disown || !object->alive || object->terminating) {
1736 /* wired pages still accounted as "non-volatile" */
1737 ledger_debit(owner->ledger,
1738 task_ledgers.purgeable_nonvolatile,
1739 ptoa(wired_page_count));
1740 ledger_debit(owner->ledger,
1741 task_ledgers.phys_footprint,
1742 ptoa(wired_page_count));
1743
1744 /* one less "non-volatile" object for the owner */
1745 if (!disown_on_the_fly) {
1746 assert(object->purgeable_queue_type ==
1747 PURGEABLE_Q_TYPE_MAX);
1748 }
1749 vm_purgeable_nonvolatile_owner_update(owner, -1);
1750 /* no more accounting for this dead object */
1751 object->vo_purgeable_owner = NULL;
1752#if DEBUG
1753 object->vo_purgeable_volatilizer = NULL;
1754#endif /* DEBUG */
1755 return;
1756 }
1757 /* more volatile bytes in ledger */
1758 ledger_credit(owner->ledger,
1759 task_ledgers.purgeable_volatile,
1760 ptoa(resident_page_count - wired_page_count));
1761 /* more compressed volatile bytes in ledger */
1762 ledger_credit(owner->ledger,
1763 task_ledgers.purgeable_volatile_compressed,
1764 ptoa(compressed_page_count));
1765 } else {
1766 panic("vm_purgeable_accounting(%p): "
1767 "unexpected old_state=%d\n",
1768 object, old_state);
1769 }
1770
1771 vm_object_lock_assert_exclusive(object);
1772}
1773
1774void
1775vm_purgeable_nonvolatile_owner_update(
1776 task_t owner,
1777 int delta)
1778{
1779 if (owner == NULL || delta == 0) {
1780 return;
1781 }
1782
1783 if (delta > 0) {
1784 assert(owner->task_nonvolatile_objects >= 0);
1785 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1786 assert(owner->task_nonvolatile_objects > 0);
1787 } else {
1788 assert(owner->task_nonvolatile_objects > delta);
1789 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1790 assert(owner->task_nonvolatile_objects >= 0);
1791 }
1792}
1793
1794void
1795vm_purgeable_volatile_owner_update(
1796 task_t owner,
1797 int delta)
1798{
1799 if (owner == NULL || delta == 0) {
1800 return;
1801 }
1802
1803 if (delta > 0) {
1804 assert(owner->task_volatile_objects >= 0);
1805 OSAddAtomic(delta, &owner->task_volatile_objects);
1806 assert(owner->task_volatile_objects > 0);
1807 } else {
1808 assert(owner->task_volatile_objects > delta);
1809 OSAddAtomic(delta, &owner->task_volatile_objects);
1810 assert(owner->task_volatile_objects >= 0);
1811 }
1812}
1813
1814void
1815vm_purgeable_compressed_update(
1816 vm_object_t object,
1817 int delta)
1818{
1819 task_t owner;
1820
1821 vm_object_lock_assert_exclusive(object);
1822
1823 if (delta == 0 ||
1824 !object->internal ||
1825 object->purgable == VM_PURGABLE_DENY ||
1826 object->vo_purgeable_owner == NULL) {
1827 /* not an owned purgeable VM object: nothing to update */
1828 return;
1829 }
1830
1831 owner = object->vo_purgeable_owner;
1832 switch (object->purgable) {
1833 case VM_PURGABLE_DENY:
1834 break;
1835 case VM_PURGABLE_NONVOLATILE:
1836 if (delta > 0) {
1837 ledger_credit(owner->ledger,
1838 task_ledgers.purgeable_nonvolatile_compressed,
1839 ptoa(delta));
1840 ledger_credit(owner->ledger,
1841 task_ledgers.phys_footprint,
1842 ptoa(delta));
1843 } else {
1844 ledger_debit(owner->ledger,
1845 task_ledgers.purgeable_nonvolatile_compressed,
1846 ptoa(-delta));
1847 ledger_debit(owner->ledger,
1848 task_ledgers.phys_footprint,
1849 ptoa(-delta));
1850 }
1851 break;
1852 case VM_PURGABLE_VOLATILE:
1853 case VM_PURGABLE_EMPTY:
1854 if (delta > 0) {
1855 ledger_credit(owner->ledger,
1856 task_ledgers.purgeable_volatile_compressed,
1857 ptoa(delta));
1858 } else {
1859 ledger_debit(owner->ledger,
1860 task_ledgers.purgeable_volatile_compressed,
1861 ptoa(-delta));
1862 }
1863 break;
1864 default:
1865 panic("vm_purgeable_compressed_update(): "
1866 "unexpected purgable %d for object %p\n",
1867 object->purgable, object);
1868 }
2d21ac55 1869}