]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_purgeable.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
CommitLineData
2d21ac55 1/*
cb323159 2 * Copyright (c) 2019 Apple Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
fe8ab488
A
24#include <kern/sched_prim.h>
25#include <kern/ledger.h>
39037602 26#include <kern/policy_internal.h>
fe8ab488
A
27
28#include <libkern/OSDebug.h>
29
2d21ac55 30#include <mach/mach_types.h>
fe8ab488
A
31
32#include <machine/limits.h>
33
34#include <vm/vm_compressor_pager.h>
0a7de745 35#include <vm/vm_kern.h> /* kmem_alloc */
fe8ab488
A
36#include <vm/vm_page.h>
37#include <vm/vm_pageout.h>
39236c6e 38#include <vm/vm_protos.h>
2d21ac55 39#include <vm/vm_purgeable_internal.h>
fe8ab488 40
2d21ac55 41#include <sys/kdebug.h>
39236c6e 42
a39ff7e2
A
43/*
44 * LOCK ORDERING for task-owned purgeable objects
45 *
46 * Whenever we need to hold multiple locks while adding to, removing from,
47 * or scanning a task's task_objq list of VM objects it owns, locks should
48 * be taken in this order:
49 *
50 * VM object ==> vm_purgeable_queue_lock ==> owner_task->task_objq_lock
51 *
52 * If one needs to acquire the VM object lock after any of the other 2 locks,
53 * one needs to use vm_object_lock_try() and, if that fails, release the
54 * other locks and retake them all in the correct order.
55 */
56
39236c6e 57extern vm_pressure_level_t memorystatus_vm_pressure_level;
2d21ac55
A
58
59struct token {
60 token_cnt_t count;
0a7de745 61 token_idx_t prev;
2d21ac55
A
62 token_idx_t next;
63};
64
0a7de745
A
65struct token *tokens;
66token_idx_t token_q_max_cnt = 0;
67vm_size_t token_q_cur_size = 0;
2d21ac55 68
0a7de745
A
69token_idx_t token_free_idx = 0; /* head of free queue */
70token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
71int32_t token_new_pagecount = 0; /* count of pages that will
72 * be added onto token queue */
2d21ac55 73
0a7de745
A
74int available_for_purge = 0; /* increase when ripe token
75 * added, decrease when ripe
76 * token removed.
77 * protected by page_queue_lock
78 */
2d21ac55 79
0a7de745
A
80static int token_q_allocating = 0; /* flag for singlethreading
81 * allocator */
cf7d32b8 82
2d21ac55 83struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
fe8ab488
A
84queue_head_t purgeable_nonvolatile_queue;
85int purgeable_nonvolatile_count;
2d21ac55 86
cb323159 87decl_lck_mtx_data(, vm_purgeable_queue_lock);
b0d623f7 88
2d21ac55
A
89static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
90
39236c6e
A
91static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task);
92
fe8ab488 93
2d21ac55
A
94#if MACH_ASSERT
95static void
96vm_purgeable_token_check_queue(purgeable_q_t queue)
97{
98 int token_cnt = 0, page_cnt = 0;
99 token_idx_t token = queue->token_q_head;
100 token_idx_t unripe = 0;
101 int our_inactive_count;
102
cb323159 103
fe8ab488 104#if DEVELOPMENT
cb323159 105 static int lightweight_check = 0;
fe8ab488
A
106
107 /*
cb323159
A
108 * Due to performance impact, perform this check less frequently on DEVELOPMENT kernels.
109 * Checking the queue scales linearly with its length, so we compensate by
110 * by performing this check less frequently as the queue grows.
fe8ab488 111 */
cb323159 112 if (lightweight_check++ < (100 + queue->debug_count_tokens / 512)) {
fe8ab488
A
113 return;
114 }
115
116 lightweight_check = 0;
117#endif
118
2d21ac55
A
119 while (token) {
120 if (tokens[token].count != 0) {
121 assert(queue->token_q_unripe);
122 if (unripe == 0) {
123 assert(token == queue->token_q_unripe);
124 unripe = token;
125 }
126 page_cnt += tokens[token].count;
127 }
0a7de745 128 if (tokens[token].next == 0) {
2d21ac55 129 assert(queue->token_q_tail == token);
0a7de745 130 }
2d21ac55
A
131
132 token_cnt++;
133 token = tokens[token].next;
134 }
135
0a7de745 136 if (unripe) {
2d21ac55 137 assert(queue->token_q_unripe == unripe);
0a7de745 138 }
2d21ac55 139 assert(token_cnt == queue->debug_count_tokens);
0a7de745 140
593a1d5f 141 /* obsolete queue doesn't maintain token counts */
0a7de745 142 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) {
593a1d5f
A
143 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
144 assert(our_inactive_count >= 0);
316670eb 145 assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count);
593a1d5f 146 }
2d21ac55
A
147}
148#endif
149
b0d623f7
A
150/*
151 * Add a token. Allocate token queue memory if necessary.
152 * Call with page queue locked.
153 */
2d21ac55
A
154kern_return_t
155vm_purgeable_token_add(purgeable_q_t queue)
156{
39037602 157 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
0a7de745 158
2d21ac55
A
159 /* new token */
160 token_idx_t token;
161 enum purgeable_q_type i;
162
cf7d32b8
A
163find_available_token:
164
0a7de745 165 if (token_free_idx) { /* unused tokens available */
2d21ac55
A
166 token = token_free_idx;
167 token_free_idx = tokens[token_free_idx].next;
0a7de745 168 } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
cf7d32b8
A
169 token = token_init_idx;
170 token_init_idx++;
0a7de745 171 } else { /* allocate more memory */
cf7d32b8 172 /* Wait if another thread is inside the memory alloc section */
0a7de745 173 while (token_q_allocating) {
b0d623f7 174 wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
0a7de745
A
175 LCK_SLEEP_DEFAULT,
176 (event_t)&token_q_allocating,
177 THREAD_UNINT);
178 if (res != THREAD_AWAKENED) {
179 return KERN_ABORTED;
180 }
181 }
182 ;
183
cf7d32b8 184 /* Check whether memory is still maxed out */
0a7de745 185 if (token_init_idx < token_q_max_cnt) {
cf7d32b8 186 goto find_available_token;
0a7de745
A
187 }
188
cf7d32b8
A
189 /* Still no memory. Allocate some. */
190 token_q_allocating = 1;
0a7de745 191
cf7d32b8
A
192 /* Drop page queue lock so we can allocate */
193 vm_page_unlock_queues();
0a7de745 194
cf7d32b8
A
195 struct token *new_loc;
196 vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
197 kern_return_t result;
0a7de745
A
198
199 if (alloc_size / sizeof(struct token) > TOKEN_COUNT_MAX) {
b0d623f7 200 result = KERN_RESOURCE_SHORTAGE;
cf7d32b8 201 } else {
b0d623f7
A
202 if (token_q_cur_size) {
203 result = kmem_realloc(kernel_map,
0a7de745
A
204 (vm_offset_t) tokens,
205 token_q_cur_size,
206 (vm_offset_t *) &new_loc,
207 alloc_size, VM_KERN_MEMORY_OSFMK);
b0d623f7
A
208 } else {
209 result = kmem_alloc(kernel_map,
0a7de745
A
210 (vm_offset_t *) &new_loc,
211 alloc_size, VM_KERN_MEMORY_OSFMK);
b0d623f7 212 }
cf7d32b8 213 }
0a7de745 214
cf7d32b8 215 vm_page_lock_queues();
0a7de745 216
cf7d32b8
A
217 if (result) {
218 /* Unblock waiting threads */
219 token_q_allocating = 0;
220 thread_wakeup((event_t)&token_q_allocating);
221 return result;
222 }
0a7de745 223
cf7d32b8
A
224 /* If we get here, we allocated new memory. Update pointers and
225 * dealloc old range */
0a7de745
A
226 struct token *old_tokens = tokens;
227 tokens = new_loc;
228 vm_size_t old_token_q_cur_size = token_q_cur_size;
229 token_q_cur_size = alloc_size;
b0d623f7 230 token_q_max_cnt = (token_idx_t) (token_q_cur_size /
0a7de745
A
231 sizeof(struct token));
232 assert(token_init_idx < token_q_max_cnt); /* We must have a free token now */
233
234 if (old_token_q_cur_size) { /* clean up old mapping */
cf7d32b8
A
235 vm_page_unlock_queues();
236 /* kmem_realloc leaves the old region mapped. Get rid of it. */
237 kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
238 vm_page_lock_queues();
239 }
0a7de745 240
cf7d32b8
A
241 /* Unblock waiting threads */
242 token_q_allocating = 0;
243 thread_wakeup((event_t)&token_q_allocating);
0a7de745 244
cf7d32b8 245 goto find_available_token;
2d21ac55 246 }
0a7de745
A
247
248 assert(token);
249
2d21ac55
A
250 /*
251 * the new pagecount we got need to be applied to all queues except
252 * obsolete
253 */
254 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
4a3eedf9
A
255 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
256 assert(pages >= 0);
257 assert(pages <= TOKEN_COUNT_MAX);
b0d623f7
A
258 purgeable_queues[i].new_pages = (int32_t) pages;
259 assert(purgeable_queues[i].new_pages == pages);
2d21ac55
A
260 }
261 token_new_pagecount = 0;
262
263 /* set token counter value */
0a7de745 264 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) {
2d21ac55 265 tokens[token].count = queue->new_pages;
0a7de745
A
266 } else {
267 tokens[token].count = 0; /* all obsolete items are
268 * ripe immediately */
269 }
2d21ac55
A
270 queue->new_pages = 0;
271
272 /* put token on token counter list */
273 tokens[token].next = 0;
274 if (queue->token_q_tail == 0) {
275 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
276 queue->token_q_head = token;
39236c6e 277 tokens[token].prev = 0;
2d21ac55
A
278 } else {
279 tokens[queue->token_q_tail].next = token;
39236c6e 280 tokens[token].prev = queue->token_q_tail;
2d21ac55 281 }
0a7de745
A
282 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
283 * count == 0) in queue */
284 if (tokens[token].count > 0) {
285 queue->token_q_unripe = token; /* first unripe token */
286 } else {
287 available_for_purge++; /* added a ripe token?
288 * increase available count */
289 }
2d21ac55
A
290 }
291 queue->token_q_tail = token;
292
293#if MACH_ASSERT
294 queue->debug_count_tokens++;
295 /* Check both queues, since we modified the new_pages count on each */
296 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
297 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
298
299 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
0a7de745
A
300 queue->type,
301 tokens[token].count, /* num pages on token
302 * (last token) */
303 queue->debug_count_tokens,
304 0,
305 0);
2d21ac55
A
306#endif
307
308 return KERN_SUCCESS;
309}
310
311/*
312 * Remove first token from queue and return its index. Add its count to the
313 * count of the next token.
0a7de745 314 * Call with page queue locked.
2d21ac55 315 */
0a7de745 316static token_idx_t
2d21ac55
A
317vm_purgeable_token_remove_first(purgeable_q_t queue)
318{
39037602 319 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
0a7de745 320
2d21ac55
A
321 token_idx_t token;
322 token = queue->token_q_head;
323
324 assert(token);
325
326 if (token) {
327 assert(queue->token_q_tail);
328 if (queue->token_q_head == queue->token_q_unripe) {
329 /* no ripe tokens... must move unripe pointer */
330 queue->token_q_unripe = tokens[token].next;
331 } else {
332 /* we're removing a ripe token. decrease count */
333 available_for_purge--;
334 assert(available_for_purge >= 0);
335 }
336
0a7de745 337 if (queue->token_q_tail == queue->token_q_head) {
2d21ac55 338 assert(tokens[token].next == 0);
0a7de745 339 }
2d21ac55
A
340
341 queue->token_q_head = tokens[token].next;
342 if (queue->token_q_head) {
343 tokens[queue->token_q_head].count += tokens[token].count;
39236c6e 344 tokens[queue->token_q_head].prev = 0;
2d21ac55
A
345 } else {
346 /* currently no other tokens in the queue */
347 /*
348 * the page count must be added to the next newly
349 * created token
350 */
351 queue->new_pages += tokens[token].count;
352 /* if head is zero, tail is too */
353 queue->token_q_tail = 0;
354 }
355
356#if MACH_ASSERT
357 queue->debug_count_tokens--;
358 vm_purgeable_token_check_queue(queue);
359
360 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
0a7de745
A
361 queue->type,
362 tokens[queue->token_q_head].count, /* num pages on new
363 * first token */
364 token_new_pagecount, /* num pages waiting for
365 * next token */
366 available_for_purge,
367 0);
2d21ac55
A
368#endif
369 }
370 return token;
371}
372
0a7de745 373static token_idx_t
316670eb
A
374vm_purgeable_token_remove_last(purgeable_q_t queue)
375{
39037602 376 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
0a7de745 377
316670eb
A
378 token_idx_t token;
379 token = queue->token_q_tail;
380
381 assert(token);
382
383 if (token) {
384 assert(queue->token_q_head);
385
0a7de745 386 if (queue->token_q_tail == queue->token_q_head) {
316670eb 387 assert(tokens[token].next == 0);
0a7de745 388 }
316670eb
A
389
390 if (queue->token_q_unripe == 0) {
391 /* we're removing a ripe token. decrease count */
392 available_for_purge--;
393 assert(available_for_purge >= 0);
394 } else if (queue->token_q_unripe == token) {
395 /* we're removing the only unripe token */
396 queue->token_q_unripe = 0;
397 }
0a7de745 398
316670eb
A
399 if (token == queue->token_q_head) {
400 /* token is the last one in the queue */
401 queue->token_q_head = 0;
402 queue->token_q_tail = 0;
403 } else {
404 token_idx_t new_tail;
405
39236c6e
A
406 new_tail = tokens[token].prev;
407
408 assert(new_tail);
316670eb 409 assert(tokens[new_tail].next == token);
39236c6e 410
316670eb
A
411 queue->token_q_tail = new_tail;
412 tokens[new_tail].next = 0;
413 }
414
415 queue->new_pages += tokens[token].count;
416
417#if MACH_ASSERT
418 queue->debug_count_tokens--;
419 vm_purgeable_token_check_queue(queue);
420
421 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
0a7de745
A
422 queue->type,
423 tokens[queue->token_q_head].count, /* num pages on new
424 * first token */
425 token_new_pagecount, /* num pages waiting for
426 * next token */
427 available_for_purge,
428 0);
316670eb
A
429#endif
430 }
431 return token;
432}
433
0a7de745 434/*
b0d623f7 435 * Delete first token from queue. Return token to token queue.
0a7de745 436 * Call with page queue locked.
b0d623f7 437 */
2d21ac55
A
438void
439vm_purgeable_token_delete_first(purgeable_q_t queue)
440{
39037602 441 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
442 token_idx_t token = vm_purgeable_token_remove_first(queue);
443
444 if (token) {
445 /* stick removed token on free queue */
446 tokens[token].next = token_free_idx;
39236c6e 447 tokens[token].prev = 0;
2d21ac55
A
448 token_free_idx = token;
449 }
450}
451
316670eb
A
452void
453vm_purgeable_token_delete_last(purgeable_q_t queue)
454{
39037602 455 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
316670eb
A
456 token_idx_t token = vm_purgeable_token_remove_last(queue);
457
458 if (token) {
459 /* stick removed token on free queue */
460 tokens[token].next = token_free_idx;
39236c6e 461 tokens[token].prev = 0;
316670eb
A
462 token_free_idx = token;
463 }
464}
465
2d21ac55 466
b0d623f7 467/* Call with page queue locked. */
2d21ac55 468void
cf7d32b8 469vm_purgeable_q_advance_all()
2d21ac55 470{
39037602 471 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
0a7de745 472
4a3eedf9
A
473 /* check queue counters - if they get really large, scale them back.
474 * They tend to get that large when there is no purgeable queue action */
475 int i;
0a7de745 476 if (token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) { /* a system idling years might get there */
4a3eedf9
A
477 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
478 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
479 assert(pages >= 0);
480 assert(pages <= TOKEN_COUNT_MAX);
b0d623f7
A
481 purgeable_queues[i].new_pages = (int32_t) pages;
482 assert(purgeable_queues[i].new_pages == pages);
4a3eedf9
A
483 }
484 token_new_pagecount = 0;
485 }
0a7de745 486
2d21ac55 487 /*
cf7d32b8
A
488 * Decrement token counters. A token counter can be zero, this means the
489 * object is ripe to be purged. It is not purged immediately, because that
490 * could cause several objects to be purged even if purging one would satisfy
491 * the memory needs. Instead, the pageout thread purges one after the other
492 * by calling vm_purgeable_object_purge_one and then rechecking the memory
493 * balance.
494 *
495 * No need to advance obsolete queue - all items are ripe there,
2d21ac55
A
496 * always
497 */
cf7d32b8
A
498 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
499 purgeable_q_t queue = &purgeable_queues[i];
500 uint32_t num_pages = 1;
0a7de745 501
cf7d32b8
A
502 /* Iterate over tokens as long as there are unripe tokens. */
503 while (queue->token_q_unripe) {
0a7de745 504 if (tokens[queue->token_q_unripe].count && num_pages) {
cf7d32b8
A
505 tokens[queue->token_q_unripe].count -= 1;
506 num_pages -= 1;
507 }
2d21ac55 508
cf7d32b8
A
509 if (tokens[queue->token_q_unripe].count == 0) {
510 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
511 available_for_purge++;
b0d623f7 512 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
0a7de745
A
513 queue->type,
514 tokens[queue->token_q_head].count, /* num pages on new
515 * first token */
516 0,
517 available_for_purge,
518 0);
519 continue; /* One token ripened. Make sure to
520 * check the next. */
521 }
522 if (num_pages == 0) {
523 break; /* Current token not ripe and no more pages.
524 * Work done. */
cf7d32b8 525 }
2d21ac55 526 }
2d21ac55 527
cf7d32b8
A
528 /*
529 * if there are no unripe tokens in the queue, decrement the
530 * new_pages counter instead new_pages can be negative, but must be
531 * canceled out by token_new_pagecount -- since inactive queue as a
532 * whole always contains a nonnegative number of pages
533 */
534 if (!queue->token_q_unripe) {
535 queue->new_pages -= num_pages;
536 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
537 }
2d21ac55 538#if MACH_ASSERT
cf7d32b8 539 vm_purgeable_token_check_queue(queue);
2d21ac55 540#endif
cf7d32b8 541 }
2d21ac55
A
542}
543
544/*
545 * grab any ripe object and purge it obsolete queue first. then, go through
546 * each volatile group. Select a queue with a ripe token.
547 * Start with first group (0)
548 * 1. Look at queue. Is there an object?
549 * Yes - purge it. Remove token.
550 * No - check other queue. Is there an object?
551 * No - increment group, then go to (1)
552 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
553 * token from other queue and migrate unripe token from this
554 * queue to other queue.
b0d623f7 555 * Call with page queue locked.
2d21ac55
A
556 */
557static void
558vm_purgeable_token_remove_ripe(purgeable_q_t queue)
559{
39037602 560 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
561 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
562 /* return token to free list. advance token list. */
563 token_idx_t new_head = tokens[queue->token_q_head].next;
564 tokens[queue->token_q_head].next = token_free_idx;
39236c6e 565 tokens[queue->token_q_head].prev = 0;
2d21ac55
A
566 token_free_idx = queue->token_q_head;
567 queue->token_q_head = new_head;
39236c6e 568 tokens[new_head].prev = 0;
0a7de745 569 if (new_head == 0) {
2d21ac55 570 queue->token_q_tail = 0;
0a7de745 571 }
2d21ac55
A
572
573#if MACH_ASSERT
574 queue->debug_count_tokens--;
575 vm_purgeable_token_check_queue(queue);
576#endif
577
578 available_for_purge--;
579 assert(available_for_purge >= 0);
580}
581
582/*
583 * Delete a ripe token from the given queue. If there are no ripe tokens on
584 * that queue, delete a ripe token from queue2, and migrate an unripe token
585 * from queue to queue2
b0d623f7 586 * Call with page queue locked.
2d21ac55
A
587 */
588static void
589vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
590{
39037602 591 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
592 assert(queue->token_q_head);
593
594 if (tokens[queue->token_q_head].count == 0) {
595 /* This queue has a ripe token. Remove. */
596 vm_purgeable_token_remove_ripe(queue);
597 } else {
598 assert(queue2);
599 /*
600 * queue2 must have a ripe token. Remove, and migrate one
601 * from queue to queue2.
602 */
603 vm_purgeable_token_remove_ripe(queue2);
604 /* migrate unripe token */
605 token_idx_t token;
606 token_cnt_t count;
607
608 /* remove token from queue1 */
0a7de745
A
609 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
610 * tokens, remember? */
2d21ac55
A
611 token = vm_purgeable_token_remove_first(queue);
612 assert(token);
613
614 count = tokens[token].count;
615
616 /* migrate to queue2 */
617 /* go to migration target loc */
2d21ac55 618
39236c6e 619 token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after;
2d21ac55 620
39236c6e
A
621 while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) {
622 count -= tokens[token_to_insert_before].count;
623 token_to_insert_before = tokens[token_to_insert_before].next;
624 }
625
626 /* token_to_insert_before is now set correctly */
2d21ac55 627
0a7de745
A
628 /* should the inserted token become the first unripe token? */
629 if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0)) {
630 queue2->token_q_unripe = token; /* if so, must update unripe pointer */
631 }
2d21ac55 632 /*
39236c6e
A
633 * insert token.
634 * if inserting at end, reduce new_pages by that value;
635 * otherwise, reduce counter of next token
2d21ac55 636 */
39236c6e
A
637
638 tokens[token].count = count;
639
640 if (token_to_insert_before != 0) {
641 token_to_insert_after = tokens[token_to_insert_before].prev;
642
643 tokens[token].next = token_to_insert_before;
644 tokens[token_to_insert_before].prev = token;
645
646 assert(tokens[token_to_insert_before].count >= count);
647 tokens[token_to_insert_before].count -= count;
648 } else {
649 /* if we ran off the end of the list, the token to insert after is the tail */
650 token_to_insert_after = queue2->token_q_tail;
651
652 tokens[token].next = 0;
653 queue2->token_q_tail = token;
654
2d21ac55
A
655 assert(queue2->new_pages >= (int32_t) count);
656 queue2->new_pages -= count;
39236c6e
A
657 }
658
659 if (token_to_insert_after != 0) {
660 tokens[token].prev = token_to_insert_after;
661 tokens[token_to_insert_after].next = token;
2d21ac55 662 } else {
39236c6e
A
663 /* is this case possible? */
664 tokens[token].prev = 0;
665 queue2->token_q_head = token;
2d21ac55 666 }
2d21ac55
A
667
668#if MACH_ASSERT
669 queue2->debug_count_tokens++;
670 vm_purgeable_token_check_queue(queue2);
671#endif
672 }
673}
674
675/* Find an object that can be locked. Returns locked object. */
b0d623f7 676/* Call with purgeable queue locked. */
39236c6e
A
677static vm_object_t
678vm_purgeable_object_find_and_lock(
0a7de745
A
679 purgeable_q_t queue,
680 int group,
681 boolean_t pick_ripe)
2d21ac55 682{
39236c6e 683 vm_object_t object, best_object;
0a7de745
A
684 int object_task_importance;
685 int best_object_task_importance;
686 int best_object_skipped;
687 int num_objects_skipped;
688 int try_lock_failed = 0;
689 int try_lock_succeeded = 0;
690 task_t owner;
39236c6e
A
691
692 best_object = VM_OBJECT_NULL;
693 best_object_task_importance = INT_MAX;
694
39037602 695 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
696 /*
697 * Usually we would pick the first element from a queue. However, we
698 * might not be able to get a lock on it, in which case we try the
699 * remaining elements in order.
700 */
701
4bd07ac2 702 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START),
0a7de745
A
703 pick_ripe,
704 group,
705 VM_KERNEL_UNSLIDE_OR_PERM(queue),
706 0,
707 0);
4bd07ac2
A
708
709 num_objects_skipped = 0;
2d21ac55 710 for (object = (vm_object_t) queue_first(&queue->objq[group]);
0a7de745
A
711 !queue_end(&queue->objq[group], (queue_entry_t) object);
712 object = (vm_object_t) queue_next(&object->objq),
713 num_objects_skipped++) {
4bd07ac2
A
714 /*
715 * To prevent us looping for an excessively long time, choose
716 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
717 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
718 * we keep going until we find the first eligible object.
719 */
720 if ((num_objects_skipped >= PURGEABLE_LOOP_MAX) && (best_object != NULL)) {
721 break;
722 }
723
39236c6e 724 if (pick_ripe &&
0a7de745 725 !object->purgeable_when_ripe) {
39236c6e
A
726 /* we want an object that has a ripe token */
727 continue;
728 }
729
730 object_task_importance = 0;
fe8ab488 731
d9a64523
A
732 /*
733 * We don't want to use VM_OBJECT_OWNER() here: we want to
734 * distinguish kernel-owned and disowned objects.
735 * Disowned objects have no owner and will have no importance...
736 */
737 owner = object->vo_owner;
738 if (owner != NULL && owner != VM_OBJECT_OWNER_DISOWNED) {
5ba3f43e
A
739#if CONFIG_EMBEDDED
740#if CONFIG_JETSAM
0a7de745 741 object_task_importance = proc_get_memstat_priority((struct proc *)get_bsdtask_info(owner), TRUE);
5ba3f43e
A
742#endif /* CONFIG_JETSAM */
743#else /* CONFIG_EMBEDDED */
39236c6e 744 object_task_importance = task_importance_estimate(owner);
5ba3f43e 745#endif /* CONFIG_EMBEDDED */
39236c6e 746 }
fe8ab488 747
39236c6e
A
748 if (object_task_importance < best_object_task_importance) {
749 if (vm_object_lock_try(object)) {
4bd07ac2 750 try_lock_succeeded++;
39236c6e
A
751 if (best_object != VM_OBJECT_NULL) {
752 /* forget about previous best object */
753 vm_object_unlock(best_object);
754 }
755 best_object = object;
756 best_object_task_importance = object_task_importance;
757 best_object_skipped = num_objects_skipped;
758 if (best_object_task_importance == 0) {
759 /* can't get any better: stop looking */
760 break;
761 }
4bd07ac2
A
762 } else {
763 try_lock_failed++;
39236c6e
A
764 }
765 }
766 }
4bd07ac2
A
767
768 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END),
0a7de745
A
769 num_objects_skipped, /* considered objects */
770 try_lock_failed,
771 try_lock_succeeded,
772 VM_KERNEL_UNSLIDE_OR_PERM(best_object),
773 ((best_object == NULL) ? 0 : best_object->resident_page_count));
4bd07ac2 774
fe8ab488
A
775 object = best_object;
776
777 if (object == VM_OBJECT_NULL) {
778 return VM_OBJECT_NULL;
779 }
39236c6e 780
fe8ab488
A
781 /* Locked. Great. We'll take it. Remove and return. */
782// printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
39236c6e 783
fe8ab488
A
784 vm_object_lock_assert_exclusive(object);
785
786 queue_remove(&queue->objq[group], object,
0a7de745 787 vm_object_t, objq);
fe8ab488
A
788 object->objq.next = NULL;
789 object->objq.prev = NULL;
790 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
791 object->purgeable_queue_group = 0;
792 /* one less volatile object for this object's owner */
d9a64523 793 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), -1);
fe8ab488
A
794
795#if DEBUG
796 object->vo_purgeable_volatilizer = NULL;
797#endif /* DEBUG */
798
799 /* keep queue of non-volatile objects */
800 queue_enter(&purgeable_nonvolatile_queue, object,
0a7de745 801 vm_object_t, objq);
fe8ab488
A
802 assert(purgeable_nonvolatile_count >= 0);
803 purgeable_nonvolatile_count++;
804 assert(purgeable_nonvolatile_count > 0);
805 /* one more nonvolatile object for this object's owner */
d9a64523 806 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), +1);
39236c6e 807
2d21ac55 808#if MACH_ASSERT
fe8ab488 809 queue->debug_count_objects--;
2d21ac55 810#endif
fe8ab488 811 return object;
2d21ac55
A
812}
813
b0d623f7 814/* Can be called without holding locks */
2d21ac55 815void
b0d623f7
A
816vm_purgeable_object_purge_all(void)
817{
818 enum purgeable_q_type i;
819 int group;
820 vm_object_t object;
0a7de745
A
821 unsigned int purged_count;
822 uint32_t collisions;
b0d623f7
A
823
824 purged_count = 0;
825 collisions = 0;
826
827restart:
828 lck_mtx_lock(&vm_purgeable_queue_lock);
829 /* Cycle through all queues */
830 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
831 purgeable_q_t queue;
832
833 queue = &purgeable_queues[i];
834
835 /*
836 * Look through all groups, starting from the lowest. If
837 * we find an object in that group, try to lock it (this can
838 * fail). If locking is successful, we can drop the queue
839 * lock, remove a token and then purge the object.
840 */
841 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
842 while (!queue_empty(&queue->objq[group])) {
39236c6e 843 object = vm_purgeable_object_find_and_lock(queue, group, FALSE);
b0d623f7
A
844 if (object == VM_OBJECT_NULL) {
845 lck_mtx_unlock(&vm_purgeable_queue_lock);
846 mutex_pause(collisions++);
847 goto restart;
848 }
849
850 lck_mtx_unlock(&vm_purgeable_queue_lock);
0a7de745 851
b0d623f7
A
852 /* Lock the page queue here so we don't hold it
853 * over the whole, legthy operation */
39236c6e
A
854 if (object->purgeable_when_ripe) {
855 vm_page_lock_queues();
856 vm_purgeable_token_remove_first(queue);
857 vm_page_unlock_queues();
858 }
0a7de745 859
fe8ab488
A
860 (void) vm_object_purge(object, 0);
861 assert(object->purgable == VM_PURGABLE_EMPTY);
862 /* no change in purgeable accounting */
863
b0d623f7
A
864 vm_object_unlock(object);
865 purged_count++;
866 goto restart;
867 }
868 assert(queue->debug_count_objects >= 0);
869 }
870 }
871 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
0a7de745
A
872 purged_count, /* # of purged objects */
873 0,
874 available_for_purge,
875 0,
876 0);
b0d623f7
A
877 lck_mtx_unlock(&vm_purgeable_queue_lock);
878 return;
879}
880
881boolean_t
39236c6e 882vm_purgeable_object_purge_one_unlocked(
0a7de745 883 int force_purge_below_group)
39236c6e 884{
0a7de745 885 boolean_t retval;
39236c6e
A
886
887 vm_page_lock_queues();
fe8ab488 888 retval = vm_purgeable_object_purge_one(force_purge_below_group, 0);
39236c6e
A
889 vm_page_unlock_queues();
890
891 return retval;
892}
893
894boolean_t
895vm_purgeable_object_purge_one(
0a7de745
A
896 int force_purge_below_group,
897 int flags)
2d21ac55
A
898{
899 enum purgeable_q_type i;
900 int group;
901 vm_object_t object = 0;
593a1d5f 902 purgeable_q_t queue, queue2;
0a7de745 903 boolean_t forced_purge;
d9a64523
A
904 unsigned int resident_page_count;
905
906
907 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_START,
0a7de745 908 force_purge_below_group, flags, 0, 0, 0);
2d21ac55 909
b0d623f7 910 /* Need the page queue lock since we'll be changing the token queue. */
39037602 911 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 912 lck_mtx_lock(&vm_purgeable_queue_lock);
0a7de745 913
2d21ac55
A
914 /* Cycle through all queues */
915 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
593a1d5f 916 queue = &purgeable_queues[i];
2d21ac55 917
39236c6e
A
918 if (force_purge_below_group == 0) {
919 /*
920 * Are there any ripe tokens on this queue? If yes,
921 * we'll find an object to purge there
922 */
923 if (!queue->token_q_head) {
924 /* no token: look at next purgeable queue */
925 continue;
926 }
927
928 if (tokens[queue->token_q_head].count != 0) {
929 /* no ripe token: next queue */
930 continue;
931 }
932 }
2d21ac55
A
933
934 /*
935 * Now look through all groups, starting from the lowest. If
936 * we find an object in that group, try to lock it (this can
937 * fail). If locking is successful, we can drop the queue
938 * lock, remove a token and then purge the object.
939 */
940 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
39236c6e
A
941 if (!queue->token_q_head ||
942 tokens[queue->token_q_head].count != 0) {
943 /* no tokens or no ripe tokens */
944
945 if (group >= force_purge_below_group) {
946 /* no more groups to force-purge */
947 break;
948 }
949
950 /*
951 * Try and purge an object in this group
952 * even though no tokens are ripe.
953 */
954 if (!queue_empty(&queue->objq[group]) &&
955 (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) {
956 lck_mtx_unlock(&vm_purgeable_queue_lock);
957 if (object->purgeable_when_ripe) {
958 vm_purgeable_token_delete_first(queue);
959 }
960 forced_purge = TRUE;
961 goto purge_now;
962 }
963
964 /* nothing to purge in this group: next group */
965 continue;
966 }
0a7de745 967 if (!queue_empty(&queue->objq[group]) &&
39236c6e 968 (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) {
b0d623f7 969 lck_mtx_unlock(&vm_purgeable_queue_lock);
39236c6e
A
970 if (object->purgeable_when_ripe) {
971 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
972 }
973 forced_purge = FALSE;
2d21ac55 974 goto purge_now;
593a1d5f 975 }
0a7de745 976 if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
593a1d5f
A
977 /* This is the token migration case, and it works between
978 * FIFO and LIFO only */
0a7de745
A
979 queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
980 PURGEABLE_Q_TYPE_FIFO :
981 PURGEABLE_Q_TYPE_LIFO];
593a1d5f 982
0a7de745 983 if (!queue_empty(&queue2->objq[group]) &&
39236c6e 984 (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) {
b0d623f7 985 lck_mtx_unlock(&vm_purgeable_queue_lock);
39236c6e
A
986 if (object->purgeable_when_ripe) {
987 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
988 }
989 forced_purge = FALSE;
2d21ac55
A
990 goto purge_now;
991 }
992 }
993 assert(queue->debug_count_objects >= 0);
994 }
995 }
996 /*
0a7de745
A
997 * because we have to do a try_lock on the objects which could fail,
998 * we could end up with no object to purge at this time, even though
999 * we have objects in a purgeable state
1000 */
b0d623f7 1001 lck_mtx_unlock(&vm_purgeable_queue_lock);
d9a64523
A
1002
1003 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END,
0a7de745 1004 0, 0, available_for_purge, 0, 0);
d9a64523 1005
b0d623f7 1006 return FALSE;
2d21ac55
A
1007
1008purge_now:
1009
1010 assert(object);
b0d623f7 1011 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
d9a64523
A
1012// printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_owner, task_importance_estimate(object->vo_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
1013 resident_page_count = object->resident_page_count;
fe8ab488
A
1014 (void) vm_object_purge(object, flags);
1015 assert(object->purgable == VM_PURGABLE_EMPTY);
1016 /* no change in purgeable accounting */
2d21ac55 1017 vm_object_unlock(object);
b0d623f7 1018 vm_page_lock_queues();
2d21ac55 1019
d9a64523
A
1020 vm_pageout_vminfo.vm_pageout_pages_purged += resident_page_count;
1021
1022 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END,
0a7de745
A
1023 VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
1024 resident_page_count,
1025 available_for_purge,
1026 0,
1027 0);
1028
b0d623f7 1029 return TRUE;
2d21ac55
A
1030}
1031
b0d623f7 1032/* Called with object lock held */
2d21ac55
A
1033void
1034vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
1035{
b0d623f7
A
1036 vm_object_lock_assert_exclusive(object);
1037 lck_mtx_lock(&vm_purgeable_queue_lock);
2d21ac55 1038
fe8ab488
A
1039 assert(object->objq.next != NULL);
1040 assert(object->objq.prev != NULL);
1041 queue_remove(&purgeable_nonvolatile_queue, object,
0a7de745 1042 vm_object_t, objq);
fe8ab488
A
1043 object->objq.next = NULL;
1044 object->objq.prev = NULL;
1045 assert(purgeable_nonvolatile_count > 0);
1046 purgeable_nonvolatile_count--;
1047 assert(purgeable_nonvolatile_count >= 0);
1048 /* one less nonvolatile object for this object's owner */
d9a64523 1049 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), -1);
fe8ab488 1050
0a7de745 1051 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE) {
2d21ac55 1052 group = 0;
0a7de745 1053 }
39236c6e 1054
0a7de745
A
1055 if (queue->type != PURGEABLE_Q_TYPE_LIFO) { /* fifo and obsolete are
1056 * fifo-queued */
1057 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
1058 } else {
1059 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
1060 }
fe8ab488 1061 /* one more volatile object for this object's owner */
d9a64523 1062 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), +1);
2d21ac55 1063
39236c6e
A
1064 object->purgeable_queue_type = queue->type;
1065 object->purgeable_queue_group = group;
1066
fe8ab488
A
1067#if DEBUG
1068 assert(object->vo_purgeable_volatilizer == NULL);
1069 object->vo_purgeable_volatilizer = current_task();
d9a64523 1070 OSBacktrace(&object->purgeable_volatilizer_bt[0],
0a7de745 1071 ARRAY_COUNT(object->purgeable_volatilizer_bt));
fe8ab488 1072#endif /* DEBUG */
39236c6e 1073
2d21ac55
A
1074#if MACH_ASSERT
1075 queue->debug_count_objects++;
b0d623f7 1076 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
0a7de745
A
1077 0,
1078 tokens[queue->token_q_head].count,
1079 queue->type,
1080 group,
1081 0);
2d21ac55
A
1082#endif
1083
b0d623f7 1084 lck_mtx_unlock(&vm_purgeable_queue_lock);
2d21ac55
A
1085}
1086
1087/* Look for object. If found, remove from purgeable queue. */
b0d623f7 1088/* Called with object lock held */
2d21ac55
A
1089purgeable_q_t
1090vm_purgeable_object_remove(vm_object_t object)
1091{
39236c6e 1092 int group;
39236c6e
A
1093 enum purgeable_q_type type;
1094 purgeable_q_t queue;
2d21ac55 1095
b0d623f7 1096 vm_object_lock_assert_exclusive(object);
39236c6e
A
1097
1098 type = object->purgeable_queue_type;
1099 group = object->purgeable_queue_group;
1100
1101 if (type == PURGEABLE_Q_TYPE_MAX) {
0a7de745 1102 if (object->objq.prev || object->objq.next) {
39236c6e 1103 panic("unmarked object on purgeable q");
0a7de745 1104 }
39236c6e
A
1105
1106 return NULL;
0a7de745 1107 } else if (!(object->objq.prev && object->objq.next)) {
39236c6e 1108 panic("marked object not on purgeable q");
0a7de745 1109 }
39236c6e 1110
b0d623f7 1111 lck_mtx_lock(&vm_purgeable_queue_lock);
39236c6e
A
1112
1113 queue = &purgeable_queues[type];
1114
39236c6e 1115 queue_remove(&queue->objq[group], object, vm_object_t, objq);
fe8ab488
A
1116 object->objq.next = NULL;
1117 object->objq.prev = NULL;
1118 /* one less volatile object for this object's owner */
d9a64523 1119 vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), -1);
fe8ab488
A
1120#if DEBUG
1121 object->vo_purgeable_volatilizer = NULL;
1122#endif /* DEBUG */
1123 /* keep queue of non-volatile objects */
1124 if (object->alive && !object->terminating) {
fe8ab488 1125 queue_enter(&purgeable_nonvolatile_queue, object,
0a7de745 1126 vm_object_t, objq);
fe8ab488
A
1127 assert(purgeable_nonvolatile_count >= 0);
1128 purgeable_nonvolatile_count++;
1129 assert(purgeable_nonvolatile_count > 0);
1130 /* one more nonvolatile object for this object's owner */
d9a64523 1131 vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), +1);
fe8ab488 1132 }
39236c6e 1133
2d21ac55 1134#if MACH_ASSERT
39236c6e
A
1135 queue->debug_count_objects--;
1136 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
0a7de745
A
1137 0,
1138 tokens[queue->token_q_head].count,
1139 queue->type,
1140 group,
1141 0);
2d21ac55 1142#endif
39236c6e
A
1143
1144 lck_mtx_unlock(&vm_purgeable_queue_lock);
1145
1146 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
1147 object->purgeable_queue_group = 0;
1148
fe8ab488 1149 vm_object_lock_assert_exclusive(object);
39236c6e
A
1150
1151 return &purgeable_queues[type];
1152}
1153
1154void
1155vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task)
1156{
39037602 1157 LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
39236c6e
A
1158
1159 stat->count = stat->size = 0;
1160 vm_object_t object;
1161 for (object = (vm_object_t) queue_first(&queue->objq[group]);
0a7de745
A
1162 !queue_end(&queue->objq[group], (queue_entry_t) object);
1163 object = (vm_object_t) queue_next(&object->objq)) {
d9a64523
A
1164 if (!target_task || VM_OBJECT_OWNER(object) == target_task) {
1165 stat->count++;
1166 stat->size += (object->resident_page_count * PAGE_SIZE);
1167 }
39236c6e
A
1168 }
1169 return;
1170}
1171
1172void
1173vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task)
1174{
0a7de745 1175 purgeable_q_t queue;
39236c6e
A
1176 int group;
1177
1178 lck_mtx_lock(&vm_purgeable_queue_lock);
0a7de745 1179
39236c6e
A
1180 /* Populate fifo_data */
1181 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
0a7de745 1182 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
39236c6e 1183 vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task);
0a7de745
A
1184 }
1185
39236c6e
A
1186 /* Populate lifo_data */
1187 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
0a7de745 1188 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
39236c6e 1189 vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task);
0a7de745 1190 }
39236c6e
A
1191
1192 /* Populate obsolete data */
1193 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1194 vm_purgeable_stats_helper(&(info->obsolete_data), queue, 0, target_task);
1195
1196 lck_mtx_unlock(&vm_purgeable_queue_lock);
1197 return;
1198}
3e170ce0
A
1199
1200#if DEVELOPMENT || DEBUG
1201static void
1202vm_purgeable_account_volatile_queue(
1203 purgeable_q_t queue,
1204 int group,
1205 task_t task,
1206 pvm_account_info_t acnt_info)
1207{
1208 vm_object_t object;
1209 uint64_t compressed_count;
1210
1211 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1212 !queue_end(&queue->objq[group], (queue_entry_t) object);
1213 object = (vm_object_t) queue_next(&object->objq)) {
d9a64523 1214 if (VM_OBJECT_OWNER(object) == task) {
3e170ce0
A
1215 compressed_count = vm_compressor_pager_get_count(object->pager);
1216 acnt_info->pvm_volatile_compressed_count += compressed_count;
1217 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1218 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1219 }
1220 }
3e170ce0
A
1221}
1222
1223/*
1224 * Walks the purgeable object queues and calculates the usage
1225 * associated with the objects for the given task.
1226 */
1227kern_return_t
1228vm_purgeable_account(
0a7de745
A
1229 task_t task,
1230 pvm_account_info_t acnt_info)
3e170ce0 1231{
0a7de745
A
1232 queue_head_t *nonvolatile_q;
1233 vm_object_t object;
1234 int group;
1235 int state;
1236 uint64_t compressed_count;
1237 purgeable_q_t volatile_q;
3e170ce0
A
1238
1239
1240 if ((task == NULL) || (acnt_info == NULL)) {
1241 return KERN_INVALID_ARGUMENT;
1242 }
1243
1244 acnt_info->pvm_volatile_count = 0;
1245 acnt_info->pvm_volatile_compressed_count = 0;
1246 acnt_info->pvm_nonvolatile_count = 0;
1247 acnt_info->pvm_nonvolatile_compressed_count = 0;
1248
1249 lck_mtx_lock(&vm_purgeable_queue_lock);
1250
1251 nonvolatile_q = &purgeable_nonvolatile_queue;
1252 for (object = (vm_object_t) queue_first(nonvolatile_q);
0a7de745
A
1253 !queue_end(nonvolatile_q, (queue_entry_t) object);
1254 object = (vm_object_t) queue_next(&object->objq)) {
d9a64523 1255 if (VM_OBJECT_OWNER(object) == task) {
3e170ce0
A
1256 state = object->purgable;
1257 compressed_count = vm_compressor_pager_get_count(object->pager);
1258 if (state == VM_PURGABLE_EMPTY) {
1259 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1260 acnt_info->pvm_volatile_compressed_count += compressed_count;
1261 } else {
1262 acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count);
1263 acnt_info->pvm_nonvolatile_compressed_count += compressed_count;
1264 }
1265 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1266 }
1267 }
1268
1269 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1270 vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info);
1271
1272 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1273 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1274 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1275 }
1276
1277 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1278 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1279 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1280 }
1281 lck_mtx_unlock(&vm_purgeable_queue_lock);
1282
1283 acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE);
1284 acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE);
1285 acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE);
1286 acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE);
1287
1288 return KERN_SUCCESS;
1289}
1290#endif /* DEVELOPMENT || DEBUG */
39236c6e 1291
a39ff7e2 1292static uint64_t
fe8ab488 1293vm_purgeable_queue_purge_task_owned(
0a7de745
A
1294 purgeable_q_t queue,
1295 int group,
1296 task_t task)
fe8ab488 1297{
0a7de745
A
1298 vm_object_t object = VM_OBJECT_NULL;
1299 int collisions = 0;
1300 uint64_t num_pages_purged = 0;
fe8ab488 1301
a39ff7e2 1302 num_pages_purged = 0;
fe8ab488
A
1303 collisions = 0;
1304
1305look_again:
1306 lck_mtx_lock(&vm_purgeable_queue_lock);
1307
fe8ab488 1308 for (object = (vm_object_t) queue_first(&queue->objq[group]);
0a7de745
A
1309 !queue_end(&queue->objq[group], (queue_entry_t) object);
1310 object = (vm_object_t) queue_next(&object->objq)) {
d9a64523 1311 if (object->vo_owner != task) {
fe8ab488
A
1312 continue;
1313 }
1314
1315 /* found an object: try and grab it */
1316 if (!vm_object_lock_try(object)) {
1317 lck_mtx_unlock(&vm_purgeable_queue_lock);
1318 mutex_pause(collisions++);
1319 goto look_again;
1320 }
1321 /* got it ! */
1322
1323 collisions = 0;
1324
1325 /* remove object from purgeable queue */
1326 queue_remove(&queue->objq[group], object,
0a7de745 1327 vm_object_t, objq);
fe8ab488
A
1328 object->objq.next = NULL;
1329 object->objq.prev = NULL;
a39ff7e2
A
1330 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
1331 object->purgeable_queue_group = 0;
fe8ab488 1332 /* one less volatile object for this object's owner */
d9a64523 1333 assert(object->vo_owner == task);
fe8ab488
A
1334 vm_purgeable_volatile_owner_update(task, -1);
1335
1336#if DEBUG
1337 object->vo_purgeable_volatilizer = NULL;
1338#endif /* DEBUG */
1339 queue_enter(&purgeable_nonvolatile_queue, object,
0a7de745 1340 vm_object_t, objq);
fe8ab488
A
1341 assert(purgeable_nonvolatile_count >= 0);
1342 purgeable_nonvolatile_count++;
1343 assert(purgeable_nonvolatile_count > 0);
1344 /* one more nonvolatile object for this object's owner */
d9a64523 1345 assert(object->vo_owner == task);
fe8ab488
A
1346 vm_purgeable_nonvolatile_owner_update(task, +1);
1347
1348 /* unlock purgeable queues */
1349 lck_mtx_unlock(&vm_purgeable_queue_lock);
1350
1351 if (object->purgeable_when_ripe) {
1352 /* remove a token */
1353 vm_page_lock_queues();
1354 vm_purgeable_token_remove_first(queue);
1355 vm_page_unlock_queues();
1356 }
1357
1358 /* purge the object */
a39ff7e2
A
1359 num_pages_purged += vm_object_purge(object, 0);
1360
fe8ab488
A
1361 assert(object->purgable == VM_PURGABLE_EMPTY);
1362 /* no change for purgeable accounting */
1363 vm_object_unlock(object);
fe8ab488
A
1364
1365 /* we unlocked the purgeable queues, so start over */
1366 goto look_again;
1367 }
1368
1369 lck_mtx_unlock(&vm_purgeable_queue_lock);
1370
a39ff7e2 1371 return num_pages_purged;
fe8ab488
A
1372}
1373
a39ff7e2 1374uint64_t
fe8ab488 1375vm_purgeable_purge_task_owned(
0a7de745 1376 task_t task)
fe8ab488 1377{
0a7de745
A
1378 purgeable_q_t queue = NULL;
1379 int group = 0;
1380 uint64_t num_pages_purged = 0;
fe8ab488 1381
a39ff7e2 1382 num_pages_purged = 0;
fe8ab488 1383
39236c6e 1384 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
a39ff7e2 1385 num_pages_purged += vm_purgeable_queue_purge_task_owned(queue,
0a7de745
A
1386 0,
1387 task);
39236c6e
A
1388
1389 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
0a7de745 1390 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
a39ff7e2 1391 num_pages_purged += vm_purgeable_queue_purge_task_owned(queue,
0a7de745
A
1392 group,
1393 task);
1394 }
1395
39236c6e 1396 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
0a7de745 1397 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
a39ff7e2 1398 num_pages_purged += vm_purgeable_queue_purge_task_owned(queue,
0a7de745
A
1399 group,
1400 task);
1401 }
fe8ab488 1402
a39ff7e2 1403 return num_pages_purged;
fe8ab488 1404}
fe8ab488
A
1405
1406void
1407vm_purgeable_nonvolatile_enqueue(
0a7de745
A
1408 vm_object_t object,
1409 task_t owner)
fe8ab488 1410{
cb323159
A
1411 int ledger_flags;
1412 kern_return_t kr;
1413
fe8ab488
A
1414 vm_object_lock_assert_exclusive(object);
1415
1416 assert(object->purgable == VM_PURGABLE_NONVOLATILE);
d9a64523 1417 assert(object->vo_owner == NULL);
fe8ab488
A
1418
1419 lck_mtx_lock(&vm_purgeable_queue_lock);
1420
39037602 1421 if (owner != NULL &&
cb323159 1422 owner->task_objects_disowning) {
fe8ab488 1423 /* task is exiting and no longer tracking purgeable objects */
d9a64523
A
1424 owner = VM_OBJECT_OWNER_DISOWNED;
1425 }
1426 if (owner == NULL) {
1427 owner = kernel_task;
fe8ab488 1428 }
fe8ab488 1429#if DEBUG
d9a64523 1430 OSBacktrace(&object->purgeable_owner_bt[0],
0a7de745 1431 ARRAY_COUNT(object->purgeable_owner_bt));
fe8ab488
A
1432 object->vo_purgeable_volatilizer = NULL;
1433#endif /* DEBUG */
1434
cb323159
A
1435 ledger_flags = 0;
1436 if (object->vo_no_footprint) {
1437 ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
1438 }
1439 kr = vm_object_ownership_change(object,
1440 object->vo_ledger_tag, /* tag unchanged */
0a7de745 1441 owner,
cb323159 1442 ledger_flags,
0a7de745 1443 FALSE); /* task_objq_locked */
cb323159 1444 assert(kr == KERN_SUCCESS);
fe8ab488 1445
fe8ab488
A
1446 assert(object->objq.next == NULL);
1447 assert(object->objq.prev == NULL);
1448
1449 queue_enter(&purgeable_nonvolatile_queue, object,
0a7de745 1450 vm_object_t, objq);
fe8ab488
A
1451 assert(purgeable_nonvolatile_count >= 0);
1452 purgeable_nonvolatile_count++;
1453 assert(purgeable_nonvolatile_count > 0);
fe8ab488
A
1454 lck_mtx_unlock(&vm_purgeable_queue_lock);
1455
1456 vm_object_lock_assert_exclusive(object);
1457}
1458
1459void
1460vm_purgeable_nonvolatile_dequeue(
0a7de745 1461 vm_object_t object)
fe8ab488 1462{
0a7de745 1463 task_t owner;
cb323159 1464 kern_return_t kr;
fe8ab488
A
1465
1466 vm_object_lock_assert_exclusive(object);
1467
d9a64523 1468 owner = VM_OBJECT_OWNER(object);
fe8ab488
A
1469#if DEBUG
1470 assert(object->vo_purgeable_volatilizer == NULL);
1471#endif /* DEBUG */
1472 if (owner != NULL) {
1473 /*
1474 * Update the owner's ledger to stop accounting
1475 * for this object.
1476 */
d9a64523
A
1477 /* transfer ownership to the kernel */
1478 assert(VM_OBJECT_OWNER(object) != kernel_task);
cb323159 1479 kr = vm_object_ownership_change(
d9a64523 1480 object,
0a7de745 1481 object->vo_ledger_tag, /* unchanged */
d9a64523 1482 VM_OBJECT_OWNER_DISOWNED, /* new owner */
cb323159 1483 0, /* ledger_flags */
d9a64523 1484 FALSE); /* old_owner->task_objq locked */
cb323159 1485 assert(kr == KERN_SUCCESS);
d9a64523 1486 assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
fe8ab488 1487 }
39236c6e 1488
fe8ab488
A
1489 lck_mtx_lock(&vm_purgeable_queue_lock);
1490 assert(object->objq.next != NULL);
1491 assert(object->objq.prev != NULL);
1492 queue_remove(&purgeable_nonvolatile_queue, object,
0a7de745 1493 vm_object_t, objq);
fe8ab488
A
1494 object->objq.next = NULL;
1495 object->objq.prev = NULL;
1496 assert(purgeable_nonvolatile_count > 0);
1497 purgeable_nonvolatile_count--;
1498 assert(purgeable_nonvolatile_count >= 0);
b0d623f7 1499 lck_mtx_unlock(&vm_purgeable_queue_lock);
fe8ab488
A
1500
1501 vm_object_lock_assert_exclusive(object);
1502}
1503
1504void
1505vm_purgeable_accounting(
0a7de745
A
1506 vm_object_t object,
1507 vm_purgable_t old_state)
fe8ab488 1508{
0a7de745
A
1509 task_t owner;
1510 int resident_page_count;
1511 int wired_page_count;
1512 int compressed_page_count;
1513 int ledger_idx_volatile;
1514 int ledger_idx_nonvolatile;
1515 int ledger_idx_volatile_compressed;
1516 int ledger_idx_nonvolatile_compressed;
1517 boolean_t do_footprint;
fe8ab488
A
1518
1519 vm_object_lock_assert_exclusive(object);
d9a64523 1520 assert(object->purgable != VM_PURGABLE_DENY);
fe8ab488 1521
d9a64523
A
1522 owner = VM_OBJECT_OWNER(object);
1523 if (owner == NULL ||
0a7de745 1524 object->purgable == VM_PURGABLE_DENY) {
fe8ab488 1525 return;
0a7de745 1526 }
fe8ab488 1527
d9a64523 1528 vm_object_ledger_tag_ledgers(object,
0a7de745
A
1529 &ledger_idx_volatile,
1530 &ledger_idx_nonvolatile,
1531 &ledger_idx_volatile_compressed,
1532 &ledger_idx_nonvolatile_compressed,
1533 &do_footprint);
fe8ab488
A
1534
1535 resident_page_count = object->resident_page_count;
1536 wired_page_count = object->wired_page_count;
39037602 1537 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
0a7de745 1538 object->pager != NULL) {
fe8ab488 1539 compressed_page_count =
0a7de745 1540 vm_compressor_pager_get_count(object->pager);
fe8ab488
A
1541 } else {
1542 compressed_page_count = 0;
1543 }
1544
1545 if (old_state == VM_PURGABLE_VOLATILE ||
1546 old_state == VM_PURGABLE_EMPTY) {
1547 /* less volatile bytes in ledger */
1548 ledger_debit(owner->ledger,
0a7de745
A
1549 ledger_idx_volatile,
1550 ptoa_64(resident_page_count - wired_page_count));
fe8ab488
A
1551 /* less compressed volatile bytes in ledger */
1552 ledger_debit(owner->ledger,
0a7de745
A
1553 ledger_idx_volatile_compressed,
1554 ptoa_64(compressed_page_count));
fe8ab488
A
1555
1556 /* more non-volatile bytes in ledger */
1557 ledger_credit(owner->ledger,
0a7de745
A
1558 ledger_idx_nonvolatile,
1559 ptoa_64(resident_page_count - wired_page_count));
fe8ab488
A
1560 /* more compressed non-volatile bytes in ledger */
1561 ledger_credit(owner->ledger,
0a7de745
A
1562 ledger_idx_nonvolatile_compressed,
1563 ptoa_64(compressed_page_count));
d9a64523
A
1564 if (do_footprint) {
1565 /* more footprint */
1566 ledger_credit(owner->ledger,
0a7de745
A
1567 task_ledgers.phys_footprint,
1568 ptoa_64(resident_page_count
1569 + compressed_page_count
1570 - wired_page_count));
d9a64523 1571 }
fe8ab488 1572 } else if (old_state == VM_PURGABLE_NONVOLATILE) {
fe8ab488
A
1573 /* less non-volatile bytes in ledger */
1574 ledger_debit(owner->ledger,
0a7de745
A
1575 ledger_idx_nonvolatile,
1576 ptoa_64(resident_page_count - wired_page_count));
fe8ab488
A
1577 /* less compressed non-volatile bytes in ledger */
1578 ledger_debit(owner->ledger,
0a7de745
A
1579 ledger_idx_nonvolatile_compressed,
1580 ptoa_64(compressed_page_count));
d9a64523
A
1581 if (do_footprint) {
1582 /* less footprint */
fe8ab488 1583 ledger_debit(owner->ledger,
0a7de745
A
1584 task_ledgers.phys_footprint,
1585 ptoa_64(resident_page_count
1586 + compressed_page_count
1587 - wired_page_count));
fe8ab488 1588 }
d9a64523 1589
fe8ab488
A
1590 /* more volatile bytes in ledger */
1591 ledger_credit(owner->ledger,
0a7de745
A
1592 ledger_idx_volatile,
1593 ptoa_64(resident_page_count - wired_page_count));
fe8ab488
A
1594 /* more compressed volatile bytes in ledger */
1595 ledger_credit(owner->ledger,
0a7de745
A
1596 ledger_idx_volatile_compressed,
1597 ptoa_64(compressed_page_count));
fe8ab488
A
1598 } else {
1599 panic("vm_purgeable_accounting(%p): "
0a7de745
A
1600 "unexpected old_state=%d\n",
1601 object, old_state);
fe8ab488
A
1602 }
1603
1604 vm_object_lock_assert_exclusive(object);
1605}
1606
1607void
1608vm_purgeable_nonvolatile_owner_update(
0a7de745
A
1609 task_t owner,
1610 int delta)
fe8ab488
A
1611{
1612 if (owner == NULL || delta == 0) {
1613 return;
1614 }
1615
1616 if (delta > 0) {
1617 assert(owner->task_nonvolatile_objects >= 0);
1618 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1619 assert(owner->task_nonvolatile_objects > 0);
1620 } else {
1621 assert(owner->task_nonvolatile_objects > delta);
1622 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1623 assert(owner->task_nonvolatile_objects >= 0);
1624 }
1625}
1626
1627void
1628vm_purgeable_volatile_owner_update(
0a7de745
A
1629 task_t owner,
1630 int delta)
fe8ab488
A
1631{
1632 if (owner == NULL || delta == 0) {
1633 return;
1634 }
1635
1636 if (delta > 0) {
1637 assert(owner->task_volatile_objects >= 0);
1638 OSAddAtomic(delta, &owner->task_volatile_objects);
1639 assert(owner->task_volatile_objects > 0);
1640 } else {
1641 assert(owner->task_volatile_objects > delta);
1642 OSAddAtomic(delta, &owner->task_volatile_objects);
1643 assert(owner->task_volatile_objects >= 0);
1644 }
1645}
1646
1647void
d9a64523 1648vm_object_owner_compressed_update(
0a7de745
A
1649 vm_object_t object,
1650 int delta)
fe8ab488 1651{
0a7de745
A
1652 task_t owner;
1653 int ledger_idx_volatile;
1654 int ledger_idx_nonvolatile;
1655 int ledger_idx_volatile_compressed;
1656 int ledger_idx_nonvolatile_compressed;
1657 boolean_t do_footprint;
fe8ab488
A
1658
1659 vm_object_lock_assert_exclusive(object);
1660
d9a64523
A
1661 owner = VM_OBJECT_OWNER(object);
1662
fe8ab488
A
1663 if (delta == 0 ||
1664 !object->internal ||
d9a64523 1665 (object->purgable == VM_PURGABLE_DENY &&
0a7de745 1666 !object->vo_ledger_tag) ||
d9a64523
A
1667 owner == NULL) {
1668 /* not an owned purgeable (or tagged) VM object: nothing to update */
fe8ab488
A
1669 return;
1670 }
0a7de745 1671
d9a64523 1672 vm_object_ledger_tag_ledgers(object,
0a7de745
A
1673 &ledger_idx_volatile,
1674 &ledger_idx_nonvolatile,
1675 &ledger_idx_volatile_compressed,
1676 &ledger_idx_nonvolatile_compressed,
1677 &do_footprint);
fe8ab488
A
1678 switch (object->purgable) {
1679 case VM_PURGABLE_DENY:
d9a64523 1680 /* not purgeable: must be ledger-tagged */
cb323159 1681 assert(object->vo_ledger_tag != VM_LEDGER_TAG_NONE);
0a7de745 1682 /* fallthru */
fe8ab488
A
1683 case VM_PURGABLE_NONVOLATILE:
1684 if (delta > 0) {
1685 ledger_credit(owner->ledger,
0a7de745
A
1686 ledger_idx_nonvolatile_compressed,
1687 ptoa_64(delta));
d9a64523
A
1688 if (do_footprint) {
1689 ledger_credit(owner->ledger,
0a7de745
A
1690 task_ledgers.phys_footprint,
1691 ptoa_64(delta));
d9a64523 1692 }
fe8ab488
A
1693 } else {
1694 ledger_debit(owner->ledger,
0a7de745
A
1695 ledger_idx_nonvolatile_compressed,
1696 ptoa_64(-delta));
d9a64523
A
1697 if (do_footprint) {
1698 ledger_debit(owner->ledger,
0a7de745
A
1699 task_ledgers.phys_footprint,
1700 ptoa_64(-delta));
d9a64523 1701 }
fe8ab488
A
1702 }
1703 break;
1704 case VM_PURGABLE_VOLATILE:
1705 case VM_PURGABLE_EMPTY:
1706 if (delta > 0) {
1707 ledger_credit(owner->ledger,
0a7de745
A
1708 ledger_idx_volatile_compressed,
1709 ptoa_64(delta));
fe8ab488
A
1710 } else {
1711 ledger_debit(owner->ledger,
0a7de745
A
1712 ledger_idx_volatile_compressed,
1713 ptoa_64(-delta));
fe8ab488
A
1714 }
1715 break;
1716 default:
1717 panic("vm_purgeable_compressed_update(): "
0a7de745
A
1718 "unexpected purgable %d for object %p\n",
1719 object->purgable, object);
fe8ab488 1720 }
2d21ac55 1721}