]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_purgeable.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
fe8ab488
A
24#include <kern/sched_prim.h>
25#include <kern/ledger.h>
26
27#include <libkern/OSDebug.h>
28
2d21ac55 29#include <mach/mach_types.h>
fe8ab488
A
30
31#include <machine/limits.h>
32
33#include <vm/vm_compressor_pager.h>
b0d623f7 34#include <vm/vm_kern.h> /* kmem_alloc */
fe8ab488
A
35#include <vm/vm_page.h>
36#include <vm/vm_pageout.h>
39236c6e 37#include <vm/vm_protos.h>
2d21ac55 38#include <vm/vm_purgeable_internal.h>
fe8ab488 39
2d21ac55 40#include <sys/kdebug.h>
39236c6e
A
41
42extern vm_pressure_level_t memorystatus_vm_pressure_level;
2d21ac55
A
43
44struct token {
45 token_cnt_t count;
39236c6e 46 token_idx_t prev;
2d21ac55
A
47 token_idx_t next;
48};
49
cf7d32b8
A
50struct token *tokens;
51token_idx_t token_q_max_cnt = 0;
52vm_size_t token_q_cur_size = 0;
2d21ac55 53
4a3eedf9
A
54token_idx_t token_free_idx = 0; /* head of free queue */
55token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
56int32_t token_new_pagecount = 0; /* count of pages that will
2d21ac55
A
57 * be added onto token queue */
58
59int available_for_purge = 0; /* increase when ripe token
60 * added, decrease when ripe
b0d623f7
A
61 * token removed.
62 * protected by page_queue_lock
63 */
2d21ac55 64
b0d623f7
A
65static int token_q_allocating = 0; /* flag for singlethreading
66 * allocator */
cf7d32b8 67
2d21ac55 68struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
fe8ab488
A
69queue_head_t purgeable_nonvolatile_queue;
70int purgeable_nonvolatile_count;
2d21ac55 71
b0d623f7
A
72decl_lck_mtx_data(,vm_purgeable_queue_lock)
73
2d21ac55
A
74static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
75
39236c6e
A
76static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task);
77
fe8ab488
A
78void vm_purgeable_nonvolatile_owner_update(task_t owner,
79 int delta);
80void vm_purgeable_volatile_owner_update(task_t owner,
81 int delta);
82
83
2d21ac55
A
84#if MACH_ASSERT
85static void
86vm_purgeable_token_check_queue(purgeable_q_t queue)
87{
88 int token_cnt = 0, page_cnt = 0;
89 token_idx_t token = queue->token_q_head;
90 token_idx_t unripe = 0;
91 int our_inactive_count;
92
fe8ab488
A
93#if DEVELOPMENT
94 static unsigned lightweight_check = 0;
95
96 /*
97 * Due to performance impact, only perform this check
98 * every 100 times on DEVELOPMENT kernels.
99 */
100 if (lightweight_check++ < 100) {
101 return;
102 }
103
104 lightweight_check = 0;
105#endif
106
2d21ac55
A
107 while (token) {
108 if (tokens[token].count != 0) {
109 assert(queue->token_q_unripe);
110 if (unripe == 0) {
111 assert(token == queue->token_q_unripe);
112 unripe = token;
113 }
114 page_cnt += tokens[token].count;
115 }
116 if (tokens[token].next == 0)
117 assert(queue->token_q_tail == token);
118
119 token_cnt++;
120 token = tokens[token].next;
121 }
122
123 if (unripe)
124 assert(queue->token_q_unripe == unripe);
125 assert(token_cnt == queue->debug_count_tokens);
593a1d5f
A
126
127 /* obsolete queue doesn't maintain token counts */
128 if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
129 {
130 our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
131 assert(our_inactive_count >= 0);
316670eb 132 assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count);
593a1d5f 133 }
2d21ac55
A
134}
135#endif
136
b0d623f7
A
137/*
138 * Add a token. Allocate token queue memory if necessary.
139 * Call with page queue locked.
140 */
2d21ac55
A
141kern_return_t
142vm_purgeable_token_add(purgeable_q_t queue)
143{
b0d623f7
A
144#if MACH_ASSERT
145 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
146#endif
147
2d21ac55
A
148 /* new token */
149 token_idx_t token;
150 enum purgeable_q_type i;
151
cf7d32b8
A
152find_available_token:
153
154 if (token_free_idx) { /* unused tokens available */
2d21ac55
A
155 token = token_free_idx;
156 token_free_idx = tokens[token_free_idx].next;
cf7d32b8
A
157 } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
158 token = token_init_idx;
159 token_init_idx++;
160 } else { /* allocate more memory */
161 /* Wait if another thread is inside the memory alloc section */
162 while(token_q_allocating) {
b0d623f7
A
163 wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
164 LCK_SLEEP_DEFAULT,
165 (event_t)&token_q_allocating,
166 THREAD_UNINT);
cf7d32b8
A
167 if(res != THREAD_AWAKENED) return KERN_ABORTED;
168 };
169
170 /* Check whether memory is still maxed out */
171 if(token_init_idx < token_q_max_cnt)
172 goto find_available_token;
173
174 /* Still no memory. Allocate some. */
175 token_q_allocating = 1;
176
177 /* Drop page queue lock so we can allocate */
178 vm_page_unlock_queues();
179
180 struct token *new_loc;
181 vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
182 kern_return_t result;
183
b0d623f7
A
184 if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) {
185 result = KERN_RESOURCE_SHORTAGE;
cf7d32b8 186 } else {
b0d623f7
A
187 if (token_q_cur_size) {
188 result = kmem_realloc(kernel_map,
189 (vm_offset_t) tokens,
190 token_q_cur_size,
191 (vm_offset_t *) &new_loc,
3e170ce0 192 alloc_size, VM_KERN_MEMORY_OSFMK);
b0d623f7
A
193 } else {
194 result = kmem_alloc(kernel_map,
195 (vm_offset_t *) &new_loc,
3e170ce0 196 alloc_size, VM_KERN_MEMORY_OSFMK);
b0d623f7 197 }
cf7d32b8
A
198 }
199
200 vm_page_lock_queues();
201
202 if (result) {
203 /* Unblock waiting threads */
204 token_q_allocating = 0;
205 thread_wakeup((event_t)&token_q_allocating);
206 return result;
207 }
208
209 /* If we get here, we allocated new memory. Update pointers and
210 * dealloc old range */
211 struct token *old_tokens=tokens;
212 tokens=new_loc;
213 vm_size_t old_token_q_cur_size=token_q_cur_size;
214 token_q_cur_size=alloc_size;
b0d623f7
A
215 token_q_max_cnt = (token_idx_t) (token_q_cur_size /
216 sizeof(struct token));
cf7d32b8
A
217 assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */
218
219 if (old_token_q_cur_size) { /* clean up old mapping */
220 vm_page_unlock_queues();
221 /* kmem_realloc leaves the old region mapped. Get rid of it. */
222 kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
223 vm_page_lock_queues();
224 }
225
226 /* Unblock waiting threads */
227 token_q_allocating = 0;
228 thread_wakeup((event_t)&token_q_allocating);
229
230 goto find_available_token;
2d21ac55 231 }
cf7d32b8
A
232
233 assert (token);
234
2d21ac55
A
235 /*
236 * the new pagecount we got need to be applied to all queues except
237 * obsolete
238 */
239 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
4a3eedf9
A
240 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
241 assert(pages >= 0);
242 assert(pages <= TOKEN_COUNT_MAX);
b0d623f7
A
243 purgeable_queues[i].new_pages = (int32_t) pages;
244 assert(purgeable_queues[i].new_pages == pages);
2d21ac55
A
245 }
246 token_new_pagecount = 0;
247
248 /* set token counter value */
249 if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
250 tokens[token].count = queue->new_pages;
251 else
252 tokens[token].count = 0; /* all obsolete items are
253 * ripe immediately */
254 queue->new_pages = 0;
255
256 /* put token on token counter list */
257 tokens[token].next = 0;
258 if (queue->token_q_tail == 0) {
259 assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
260 queue->token_q_head = token;
39236c6e 261 tokens[token].prev = 0;
2d21ac55
A
262 } else {
263 tokens[queue->token_q_tail].next = token;
39236c6e 264 tokens[token].prev = queue->token_q_tail;
2d21ac55
A
265 }
266 if (queue->token_q_unripe == 0) { /* only ripe tokens (token
267 * count == 0) in queue */
268 if (tokens[token].count > 0)
269 queue->token_q_unripe = token; /* first unripe token */
270 else
271 available_for_purge++; /* added a ripe token?
272 * increase available count */
273 }
274 queue->token_q_tail = token;
275
276#if MACH_ASSERT
277 queue->debug_count_tokens++;
278 /* Check both queues, since we modified the new_pages count on each */
279 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
280 vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
281
282 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
283 queue->type,
284 tokens[token].count, /* num pages on token
285 * (last token) */
286 queue->debug_count_tokens,
287 0,
288 0);
289#endif
290
291 return KERN_SUCCESS;
292}
293
294/*
295 * Remove first token from queue and return its index. Add its count to the
296 * count of the next token.
b0d623f7 297 * Call with page queue locked.
2d21ac55
A
298 */
299static token_idx_t
300vm_purgeable_token_remove_first(purgeable_q_t queue)
301{
b0d623f7
A
302#if MACH_ASSERT
303 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
304#endif
305
2d21ac55
A
306 token_idx_t token;
307 token = queue->token_q_head;
308
309 assert(token);
310
311 if (token) {
312 assert(queue->token_q_tail);
313 if (queue->token_q_head == queue->token_q_unripe) {
314 /* no ripe tokens... must move unripe pointer */
315 queue->token_q_unripe = tokens[token].next;
316 } else {
317 /* we're removing a ripe token. decrease count */
318 available_for_purge--;
319 assert(available_for_purge >= 0);
320 }
321
322 if (queue->token_q_tail == queue->token_q_head)
323 assert(tokens[token].next == 0);
324
325 queue->token_q_head = tokens[token].next;
326 if (queue->token_q_head) {
327 tokens[queue->token_q_head].count += tokens[token].count;
39236c6e 328 tokens[queue->token_q_head].prev = 0;
2d21ac55
A
329 } else {
330 /* currently no other tokens in the queue */
331 /*
332 * the page count must be added to the next newly
333 * created token
334 */
335 queue->new_pages += tokens[token].count;
336 /* if head is zero, tail is too */
337 queue->token_q_tail = 0;
338 }
339
340#if MACH_ASSERT
341 queue->debug_count_tokens--;
342 vm_purgeable_token_check_queue(queue);
343
344 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
345 queue->type,
346 tokens[queue->token_q_head].count, /* num pages on new
347 * first token */
348 token_new_pagecount, /* num pages waiting for
349 * next token */
350 available_for_purge,
351 0);
352#endif
353 }
354 return token;
355}
356
316670eb
A
357static token_idx_t
358vm_purgeable_token_remove_last(purgeable_q_t queue)
359{
360#if MACH_ASSERT
361 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
362#endif
363
364 token_idx_t token;
365 token = queue->token_q_tail;
366
367 assert(token);
368
369 if (token) {
370 assert(queue->token_q_head);
371
372 if (queue->token_q_tail == queue->token_q_head)
373 assert(tokens[token].next == 0);
374
375 if (queue->token_q_unripe == 0) {
376 /* we're removing a ripe token. decrease count */
377 available_for_purge--;
378 assert(available_for_purge >= 0);
379 } else if (queue->token_q_unripe == token) {
380 /* we're removing the only unripe token */
381 queue->token_q_unripe = 0;
382 }
383
384 if (token == queue->token_q_head) {
385 /* token is the last one in the queue */
386 queue->token_q_head = 0;
387 queue->token_q_tail = 0;
388 } else {
389 token_idx_t new_tail;
390
39236c6e
A
391 new_tail = tokens[token].prev;
392
393 assert(new_tail);
316670eb 394 assert(tokens[new_tail].next == token);
39236c6e 395
316670eb
A
396 queue->token_q_tail = new_tail;
397 tokens[new_tail].next = 0;
398 }
399
400 queue->new_pages += tokens[token].count;
401
402#if MACH_ASSERT
403 queue->debug_count_tokens--;
404 vm_purgeable_token_check_queue(queue);
405
406 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
407 queue->type,
408 tokens[queue->token_q_head].count, /* num pages on new
409 * first token */
410 token_new_pagecount, /* num pages waiting for
411 * next token */
412 available_for_purge,
413 0);
414#endif
415 }
416 return token;
417}
418
b0d623f7
A
419/*
420 * Delete first token from queue. Return token to token queue.
421 * Call with page queue locked.
422 */
2d21ac55
A
423void
424vm_purgeable_token_delete_first(purgeable_q_t queue)
425{
b0d623f7
A
426#if MACH_ASSERT
427 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
428#endif
2d21ac55
A
429 token_idx_t token = vm_purgeable_token_remove_first(queue);
430
431 if (token) {
432 /* stick removed token on free queue */
433 tokens[token].next = token_free_idx;
39236c6e 434 tokens[token].prev = 0;
2d21ac55
A
435 token_free_idx = token;
436 }
437}
438
316670eb
A
439void
440vm_purgeable_token_delete_last(purgeable_q_t queue)
441{
442#if MACH_ASSERT
443 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
444#endif
445 token_idx_t token = vm_purgeable_token_remove_last(queue);
446
447 if (token) {
448 /* stick removed token on free queue */
449 tokens[token].next = token_free_idx;
39236c6e 450 tokens[token].prev = 0;
316670eb
A
451 token_free_idx = token;
452 }
453}
454
2d21ac55 455
b0d623f7 456/* Call with page queue locked. */
2d21ac55 457void
cf7d32b8 458vm_purgeable_q_advance_all()
2d21ac55 459{
b0d623f7
A
460#if MACH_ASSERT
461 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
462#endif
463
4a3eedf9
A
464 /* check queue counters - if they get really large, scale them back.
465 * They tend to get that large when there is no purgeable queue action */
466 int i;
cf7d32b8 467 if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */
4a3eedf9
A
468 {
469 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
470 int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
471 assert(pages >= 0);
472 assert(pages <= TOKEN_COUNT_MAX);
b0d623f7
A
473 purgeable_queues[i].new_pages = (int32_t) pages;
474 assert(purgeable_queues[i].new_pages == pages);
4a3eedf9
A
475 }
476 token_new_pagecount = 0;
477 }
478
2d21ac55 479 /*
cf7d32b8
A
480 * Decrement token counters. A token counter can be zero, this means the
481 * object is ripe to be purged. It is not purged immediately, because that
482 * could cause several objects to be purged even if purging one would satisfy
483 * the memory needs. Instead, the pageout thread purges one after the other
484 * by calling vm_purgeable_object_purge_one and then rechecking the memory
485 * balance.
486 *
487 * No need to advance obsolete queue - all items are ripe there,
2d21ac55
A
488 * always
489 */
cf7d32b8
A
490 for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
491 purgeable_q_t queue = &purgeable_queues[i];
492 uint32_t num_pages = 1;
493
494 /* Iterate over tokens as long as there are unripe tokens. */
495 while (queue->token_q_unripe) {
496 if (tokens[queue->token_q_unripe].count && num_pages)
497 {
498 tokens[queue->token_q_unripe].count -= 1;
499 num_pages -= 1;
500 }
2d21ac55 501
cf7d32b8
A
502 if (tokens[queue->token_q_unripe].count == 0) {
503 queue->token_q_unripe = tokens[queue->token_q_unripe].next;
504 available_for_purge++;
b0d623f7 505 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
cf7d32b8
A
506 queue->type,
507 tokens[queue->token_q_head].count, /* num pages on new
508 * first token */
509 0,
510 available_for_purge,
511 0);
512 continue; /* One token ripened. Make sure to
513 * check the next. */
514 }
515 if (num_pages == 0)
516 break; /* Current token not ripe and no more pages.
517 * Work done. */
2d21ac55 518 }
2d21ac55 519
cf7d32b8
A
520 /*
521 * if there are no unripe tokens in the queue, decrement the
522 * new_pages counter instead new_pages can be negative, but must be
523 * canceled out by token_new_pagecount -- since inactive queue as a
524 * whole always contains a nonnegative number of pages
525 */
526 if (!queue->token_q_unripe) {
527 queue->new_pages -= num_pages;
528 assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
529 }
2d21ac55 530#if MACH_ASSERT
cf7d32b8 531 vm_purgeable_token_check_queue(queue);
2d21ac55 532#endif
cf7d32b8 533 }
2d21ac55
A
534}
535
536/*
537 * grab any ripe object and purge it obsolete queue first. then, go through
538 * each volatile group. Select a queue with a ripe token.
539 * Start with first group (0)
540 * 1. Look at queue. Is there an object?
541 * Yes - purge it. Remove token.
542 * No - check other queue. Is there an object?
543 * No - increment group, then go to (1)
544 * Yes - purge it. Remove token. If there is no ripe token, remove ripe
545 * token from other queue and migrate unripe token from this
546 * queue to other queue.
b0d623f7 547 * Call with page queue locked.
2d21ac55
A
548 */
549static void
550vm_purgeable_token_remove_ripe(purgeable_q_t queue)
551{
b0d623f7
A
552#if MACH_ASSERT
553 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
554#endif
2d21ac55
A
555 assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
556 /* return token to free list. advance token list. */
557 token_idx_t new_head = tokens[queue->token_q_head].next;
558 tokens[queue->token_q_head].next = token_free_idx;
39236c6e 559 tokens[queue->token_q_head].prev = 0;
2d21ac55
A
560 token_free_idx = queue->token_q_head;
561 queue->token_q_head = new_head;
39236c6e 562 tokens[new_head].prev = 0;
2d21ac55
A
563 if (new_head == 0)
564 queue->token_q_tail = 0;
565
566#if MACH_ASSERT
567 queue->debug_count_tokens--;
568 vm_purgeable_token_check_queue(queue);
569#endif
570
571 available_for_purge--;
572 assert(available_for_purge >= 0);
573}
574
575/*
576 * Delete a ripe token from the given queue. If there are no ripe tokens on
577 * that queue, delete a ripe token from queue2, and migrate an unripe token
578 * from queue to queue2
b0d623f7 579 * Call with page queue locked.
2d21ac55
A
580 */
581static void
582vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
583{
b0d623f7
A
584#if MACH_ASSERT
585 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
586#endif
2d21ac55
A
587 assert(queue->token_q_head);
588
589 if (tokens[queue->token_q_head].count == 0) {
590 /* This queue has a ripe token. Remove. */
591 vm_purgeable_token_remove_ripe(queue);
592 } else {
593 assert(queue2);
594 /*
595 * queue2 must have a ripe token. Remove, and migrate one
596 * from queue to queue2.
597 */
598 vm_purgeable_token_remove_ripe(queue2);
599 /* migrate unripe token */
600 token_idx_t token;
601 token_cnt_t count;
602
603 /* remove token from queue1 */
604 assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe
605 * tokens, remember? */
606 token = vm_purgeable_token_remove_first(queue);
607 assert(token);
608
609 count = tokens[token].count;
610
611 /* migrate to queue2 */
612 /* go to migration target loc */
2d21ac55 613
39236c6e 614 token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after;
2d21ac55 615
39236c6e
A
616 while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) {
617 count -= tokens[token_to_insert_before].count;
618 token_to_insert_before = tokens[token_to_insert_before].next;
619 }
620
621 /* token_to_insert_before is now set correctly */
622
623 /* should the inserted token become the first unripe token? */
624 if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0))
625 queue2->token_q_unripe = token; /* if so, must update unripe pointer */
2d21ac55
A
626
627 /*
39236c6e
A
628 * insert token.
629 * if inserting at end, reduce new_pages by that value;
630 * otherwise, reduce counter of next token
2d21ac55 631 */
39236c6e
A
632
633 tokens[token].count = count;
634
635 if (token_to_insert_before != 0) {
636 token_to_insert_after = tokens[token_to_insert_before].prev;
637
638 tokens[token].next = token_to_insert_before;
639 tokens[token_to_insert_before].prev = token;
640
641 assert(tokens[token_to_insert_before].count >= count);
642 tokens[token_to_insert_before].count -= count;
643 } else {
644 /* if we ran off the end of the list, the token to insert after is the tail */
645 token_to_insert_after = queue2->token_q_tail;
646
647 tokens[token].next = 0;
648 queue2->token_q_tail = token;
649
2d21ac55
A
650 assert(queue2->new_pages >= (int32_t) count);
651 queue2->new_pages -= count;
39236c6e
A
652 }
653
654 if (token_to_insert_after != 0) {
655 tokens[token].prev = token_to_insert_after;
656 tokens[token_to_insert_after].next = token;
2d21ac55 657 } else {
39236c6e
A
658 /* is this case possible? */
659 tokens[token].prev = 0;
660 queue2->token_q_head = token;
2d21ac55 661 }
2d21ac55
A
662
663#if MACH_ASSERT
664 queue2->debug_count_tokens++;
665 vm_purgeable_token_check_queue(queue2);
666#endif
667 }
668}
669
670/* Find an object that can be locked. Returns locked object. */
b0d623f7 671/* Call with purgeable queue locked. */
39236c6e
A
672static vm_object_t
673vm_purgeable_object_find_and_lock(
674 purgeable_q_t queue,
675 int group,
676 boolean_t pick_ripe)
2d21ac55 677{
39236c6e
A
678 vm_object_t object, best_object;
679 int object_task_importance;
680 int best_object_task_importance;
681 int best_object_skipped;
682 int num_objects_skipped;
4bd07ac2
A
683 int try_lock_failed = 0;
684 int try_lock_succeeded = 0;
39236c6e
A
685 task_t owner;
686
687 best_object = VM_OBJECT_NULL;
688 best_object_task_importance = INT_MAX;
689
b0d623f7 690 lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
691 /*
692 * Usually we would pick the first element from a queue. However, we
693 * might not be able to get a lock on it, in which case we try the
694 * remaining elements in order.
695 */
696
4bd07ac2
A
697 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START),
698 pick_ripe,
699 group,
700 VM_KERNEL_UNSLIDE_OR_PERM(queue),
701 0,
702 0);
703
704 num_objects_skipped = 0;
2d21ac55
A
705 for (object = (vm_object_t) queue_first(&queue->objq[group]);
706 !queue_end(&queue->objq[group], (queue_entry_t) object);
39236c6e
A
707 object = (vm_object_t) queue_next(&object->objq),
708 num_objects_skipped++) {
709
4bd07ac2
A
710 /*
711 * To prevent us looping for an excessively long time, choose
712 * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
713 * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
714 * we keep going until we find the first eligible object.
715 */
716 if ((num_objects_skipped >= PURGEABLE_LOOP_MAX) && (best_object != NULL)) {
717 break;
718 }
719
39236c6e
A
720 if (pick_ripe &&
721 ! object->purgeable_when_ripe) {
722 /* we want an object that has a ripe token */
723 continue;
724 }
725
726 object_task_importance = 0;
fe8ab488 727
39236c6e
A
728 owner = object->vo_purgeable_owner;
729 if (owner) {
730 object_task_importance = task_importance_estimate(owner);
731 }
fe8ab488 732
39236c6e
A
733 if (object_task_importance < best_object_task_importance) {
734 if (vm_object_lock_try(object)) {
4bd07ac2 735 try_lock_succeeded++;
39236c6e
A
736 if (best_object != VM_OBJECT_NULL) {
737 /* forget about previous best object */
738 vm_object_unlock(best_object);
739 }
740 best_object = object;
741 best_object_task_importance = object_task_importance;
742 best_object_skipped = num_objects_skipped;
743 if (best_object_task_importance == 0) {
744 /* can't get any better: stop looking */
745 break;
746 }
4bd07ac2
A
747 } else {
748 try_lock_failed++;
39236c6e
A
749 }
750 }
751 }
4bd07ac2
A
752
753 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END),
754 num_objects_skipped, /* considered objects */
755 try_lock_failed,
756 try_lock_succeeded,
757 VM_KERNEL_UNSLIDE_OR_PERM(best_object),
758 ((best_object == NULL) ? 0 : best_object->resident_page_count));
759
fe8ab488
A
760 object = best_object;
761
762 if (object == VM_OBJECT_NULL) {
763 return VM_OBJECT_NULL;
764 }
39236c6e 765
fe8ab488
A
766 /* Locked. Great. We'll take it. Remove and return. */
767// printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
39236c6e 768
fe8ab488
A
769 vm_object_lock_assert_exclusive(object);
770
771 queue_remove(&queue->objq[group], object,
772 vm_object_t, objq);
773 object->objq.next = NULL;
774 object->objq.prev = NULL;
775 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
776 object->purgeable_queue_group = 0;
777 /* one less volatile object for this object's owner */
778 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1);
779
780#if DEBUG
781 object->vo_purgeable_volatilizer = NULL;
782#endif /* DEBUG */
783
784 /* keep queue of non-volatile objects */
785 queue_enter(&purgeable_nonvolatile_queue, object,
786 vm_object_t, objq);
787 assert(purgeable_nonvolatile_count >= 0);
788 purgeable_nonvolatile_count++;
789 assert(purgeable_nonvolatile_count > 0);
790 /* one more nonvolatile object for this object's owner */
791 vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, +1);
39236c6e 792
2d21ac55 793#if MACH_ASSERT
fe8ab488 794 queue->debug_count_objects--;
2d21ac55 795#endif
fe8ab488 796 return object;
2d21ac55
A
797}
798
b0d623f7 799/* Can be called without holding locks */
2d21ac55 800void
b0d623f7
A
801vm_purgeable_object_purge_all(void)
802{
803 enum purgeable_q_type i;
804 int group;
805 vm_object_t object;
806 unsigned int purged_count;
807 uint32_t collisions;
808
809 purged_count = 0;
810 collisions = 0;
811
812restart:
813 lck_mtx_lock(&vm_purgeable_queue_lock);
814 /* Cycle through all queues */
815 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
816 purgeable_q_t queue;
817
818 queue = &purgeable_queues[i];
819
820 /*
821 * Look through all groups, starting from the lowest. If
822 * we find an object in that group, try to lock it (this can
823 * fail). If locking is successful, we can drop the queue
824 * lock, remove a token and then purge the object.
825 */
826 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
827 while (!queue_empty(&queue->objq[group])) {
39236c6e 828 object = vm_purgeable_object_find_and_lock(queue, group, FALSE);
b0d623f7
A
829 if (object == VM_OBJECT_NULL) {
830 lck_mtx_unlock(&vm_purgeable_queue_lock);
831 mutex_pause(collisions++);
832 goto restart;
833 }
834
835 lck_mtx_unlock(&vm_purgeable_queue_lock);
836
837 /* Lock the page queue here so we don't hold it
838 * over the whole, legthy operation */
39236c6e
A
839 if (object->purgeable_when_ripe) {
840 vm_page_lock_queues();
841 vm_purgeable_token_remove_first(queue);
842 vm_page_unlock_queues();
843 }
b0d623f7 844
fe8ab488
A
845 (void) vm_object_purge(object, 0);
846 assert(object->purgable == VM_PURGABLE_EMPTY);
847 /* no change in purgeable accounting */
848
b0d623f7
A
849 vm_object_unlock(object);
850 purged_count++;
851 goto restart;
852 }
853 assert(queue->debug_count_objects >= 0);
854 }
855 }
856 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
857 purged_count, /* # of purged objects */
858 0,
859 available_for_purge,
860 0,
861 0);
862 lck_mtx_unlock(&vm_purgeable_queue_lock);
863 return;
864}
865
866boolean_t
39236c6e
A
867vm_purgeable_object_purge_one_unlocked(
868 int force_purge_below_group)
869{
870 boolean_t retval;
871
872 vm_page_lock_queues();
fe8ab488 873 retval = vm_purgeable_object_purge_one(force_purge_below_group, 0);
39236c6e
A
874 vm_page_unlock_queues();
875
876 return retval;
877}
878
879boolean_t
880vm_purgeable_object_purge_one(
fe8ab488
A
881 int force_purge_below_group,
882 int flags)
2d21ac55
A
883{
884 enum purgeable_q_type i;
885 int group;
886 vm_object_t object = 0;
593a1d5f 887 purgeable_q_t queue, queue2;
39236c6e 888 boolean_t forced_purge;
2d21ac55 889
b0d623f7
A
890 /* Need the page queue lock since we'll be changing the token queue. */
891#if MACH_ASSERT
892 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
893#endif
894 lck_mtx_lock(&vm_purgeable_queue_lock);
895
2d21ac55
A
896 /* Cycle through all queues */
897 for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
593a1d5f 898 queue = &purgeable_queues[i];
2d21ac55 899
39236c6e
A
900 if (force_purge_below_group == 0) {
901 /*
902 * Are there any ripe tokens on this queue? If yes,
903 * we'll find an object to purge there
904 */
905 if (!queue->token_q_head) {
906 /* no token: look at next purgeable queue */
907 continue;
908 }
909
910 if (tokens[queue->token_q_head].count != 0) {
911 /* no ripe token: next queue */
912 continue;
913 }
914 }
2d21ac55
A
915
916 /*
917 * Now look through all groups, starting from the lowest. If
918 * we find an object in that group, try to lock it (this can
919 * fail). If locking is successful, we can drop the queue
920 * lock, remove a token and then purge the object.
921 */
922 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
39236c6e
A
923 if (!queue->token_q_head ||
924 tokens[queue->token_q_head].count != 0) {
925 /* no tokens or no ripe tokens */
926
927 if (group >= force_purge_below_group) {
928 /* no more groups to force-purge */
929 break;
930 }
931
932 /*
933 * Try and purge an object in this group
934 * even though no tokens are ripe.
935 */
936 if (!queue_empty(&queue->objq[group]) &&
937 (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) {
938 lck_mtx_unlock(&vm_purgeable_queue_lock);
939 if (object->purgeable_when_ripe) {
940 vm_purgeable_token_delete_first(queue);
941 }
942 forced_purge = TRUE;
943 goto purge_now;
944 }
945
946 /* nothing to purge in this group: next group */
947 continue;
948 }
593a1d5f 949 if (!queue_empty(&queue->objq[group]) &&
39236c6e 950 (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) {
b0d623f7 951 lck_mtx_unlock(&vm_purgeable_queue_lock);
39236c6e
A
952 if (object->purgeable_when_ripe) {
953 vm_purgeable_token_choose_and_delete_ripe(queue, 0);
954 }
955 forced_purge = FALSE;
2d21ac55 956 goto purge_now;
593a1d5f
A
957 }
958 if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
959 /* This is the token migration case, and it works between
960 * FIFO and LIFO only */
961 queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
962 PURGEABLE_Q_TYPE_FIFO :
963 PURGEABLE_Q_TYPE_LIFO];
964
965 if (!queue_empty(&queue2->objq[group]) &&
39236c6e 966 (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) {
b0d623f7 967 lck_mtx_unlock(&vm_purgeable_queue_lock);
39236c6e
A
968 if (object->purgeable_when_ripe) {
969 vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
970 }
971 forced_purge = FALSE;
2d21ac55
A
972 goto purge_now;
973 }
974 }
975 assert(queue->debug_count_objects >= 0);
976 }
977 }
978 /*
979 * because we have to do a try_lock on the objects which could fail,
980 * we could end up with no object to purge at this time, even though
981 * we have objects in a purgeable state
982 */
b0d623f7
A
983 lck_mtx_unlock(&vm_purgeable_queue_lock);
984 return FALSE;
2d21ac55
A
985
986purge_now:
987
988 assert(object);
b0d623f7 989 vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */
39236c6e 990// printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
fe8ab488
A
991 (void) vm_object_purge(object, flags);
992 assert(object->purgable == VM_PURGABLE_EMPTY);
993 /* no change in purgeable accounting */
2d21ac55 994 vm_object_unlock(object);
b0d623f7 995 vm_page_lock_queues();
2d21ac55 996
b0d623f7
A
997 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
998 object, /* purged object */
2d21ac55
A
999 0,
1000 available_for_purge,
1001 0,
1002 0);
b0d623f7
A
1003
1004 return TRUE;
2d21ac55
A
1005}
1006
b0d623f7 1007/* Called with object lock held */
2d21ac55
A
1008void
1009vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
1010{
b0d623f7
A
1011 vm_object_lock_assert_exclusive(object);
1012 lck_mtx_lock(&vm_purgeable_queue_lock);
2d21ac55 1013
fe8ab488
A
1014 assert(object->objq.next != NULL);
1015 assert(object->objq.prev != NULL);
1016 queue_remove(&purgeable_nonvolatile_queue, object,
1017 vm_object_t, objq);
1018 object->objq.next = NULL;
1019 object->objq.prev = NULL;
1020 assert(purgeable_nonvolatile_count > 0);
1021 purgeable_nonvolatile_count--;
1022 assert(purgeable_nonvolatile_count >= 0);
1023 /* one less nonvolatile object for this object's owner */
1024 vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, -1);
1025
2d21ac55
A
1026 if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
1027 group = 0;
39236c6e 1028
2d21ac55
A
1029 if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are
1030 * fifo-queued */
1031 queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */
1032 else
1033 queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */
fe8ab488
A
1034 /* one more volatile object for this object's owner */
1035 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, +1);
2d21ac55 1036
39236c6e
A
1037 object->purgeable_queue_type = queue->type;
1038 object->purgeable_queue_group = group;
1039
fe8ab488
A
1040#if DEBUG
1041 assert(object->vo_purgeable_volatilizer == NULL);
1042 object->vo_purgeable_volatilizer = current_task();
1043 OSBacktrace(&object->purgeable_volatilizer_bt[0], 16);
1044#endif /* DEBUG */
39236c6e 1045
2d21ac55
A
1046#if MACH_ASSERT
1047 queue->debug_count_objects++;
b0d623f7 1048 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
2d21ac55
A
1049 0,
1050 tokens[queue->token_q_head].count,
1051 queue->type,
1052 group,
1053 0);
1054#endif
1055
b0d623f7 1056 lck_mtx_unlock(&vm_purgeable_queue_lock);
2d21ac55
A
1057}
1058
1059/* Look for object. If found, remove from purgeable queue. */
b0d623f7 1060/* Called with object lock held */
2d21ac55
A
1061purgeable_q_t
1062vm_purgeable_object_remove(vm_object_t object)
1063{
39236c6e 1064 int group;
39236c6e
A
1065 enum purgeable_q_type type;
1066 purgeable_q_t queue;
2d21ac55 1067
b0d623f7 1068 vm_object_lock_assert_exclusive(object);
39236c6e
A
1069
1070 type = object->purgeable_queue_type;
1071 group = object->purgeable_queue_group;
1072
1073 if (type == PURGEABLE_Q_TYPE_MAX) {
1074 if (object->objq.prev || object->objq.next)
1075 panic("unmarked object on purgeable q");
1076
1077 return NULL;
1078 } else if (!(object->objq.prev && object->objq.next))
1079 panic("marked object not on purgeable q");
1080
b0d623f7 1081 lck_mtx_lock(&vm_purgeable_queue_lock);
39236c6e
A
1082
1083 queue = &purgeable_queues[type];
1084
39236c6e 1085 queue_remove(&queue->objq[group], object, vm_object_t, objq);
fe8ab488
A
1086 object->objq.next = NULL;
1087 object->objq.prev = NULL;
1088 /* one less volatile object for this object's owner */
1089 vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1);
1090#if DEBUG
1091 object->vo_purgeable_volatilizer = NULL;
1092#endif /* DEBUG */
1093 /* keep queue of non-volatile objects */
1094 if (object->alive && !object->terminating) {
1095 task_t owner;
1096 queue_enter(&purgeable_nonvolatile_queue, object,
1097 vm_object_t, objq);
1098 assert(purgeable_nonvolatile_count >= 0);
1099 purgeable_nonvolatile_count++;
1100 assert(purgeable_nonvolatile_count > 0);
1101 /* one more nonvolatile object for this object's owner */
1102 owner = object->vo_purgeable_owner;
1103 vm_purgeable_nonvolatile_owner_update(owner, +1);
1104 }
39236c6e 1105
2d21ac55 1106#if MACH_ASSERT
39236c6e
A
1107 queue->debug_count_objects--;
1108 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
1109 0,
1110 tokens[queue->token_q_head].count,
1111 queue->type,
1112 group,
1113 0);
2d21ac55 1114#endif
39236c6e
A
1115
1116 lck_mtx_unlock(&vm_purgeable_queue_lock);
1117
1118 object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
1119 object->purgeable_queue_group = 0;
1120
fe8ab488 1121 vm_object_lock_assert_exclusive(object);
39236c6e
A
1122
1123 return &purgeable_queues[type];
1124}
1125
1126void
1127vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task)
1128{
1129 lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
1130
1131 stat->count = stat->size = 0;
1132 vm_object_t object;
1133 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1134 !queue_end(&queue->objq[group], (queue_entry_t) object);
1135 object = (vm_object_t) queue_next(&object->objq)) {
1136 if (!target_task || object->vo_purgeable_owner == target_task) {
1137 stat->count++;
1138 stat->size += (object->resident_page_count * PAGE_SIZE);
2d21ac55 1139 }
39236c6e
A
1140 }
1141 return;
1142}
1143
1144void
1145vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task)
1146{
1147 purgeable_q_t queue;
1148 int group;
1149
1150 lck_mtx_lock(&vm_purgeable_queue_lock);
1151
1152 /* Populate fifo_data */
1153 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1154 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1155 vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task);
1156
1157 /* Populate lifo_data */
1158 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1159 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
1160 vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task);
1161
1162 /* Populate obsolete data */
1163 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1164 vm_purgeable_stats_helper(&(info->obsolete_data), queue, 0, target_task);
1165
1166 lck_mtx_unlock(&vm_purgeable_queue_lock);
1167 return;
1168}
3e170ce0
A
1169
1170#if DEVELOPMENT || DEBUG
1171static void
1172vm_purgeable_account_volatile_queue(
1173 purgeable_q_t queue,
1174 int group,
1175 task_t task,
1176 pvm_account_info_t acnt_info)
1177{
1178 vm_object_t object;
1179 uint64_t compressed_count;
1180
1181 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1182 !queue_end(&queue->objq[group], (queue_entry_t) object);
1183 object = (vm_object_t) queue_next(&object->objq)) {
1184 if (object->vo_purgeable_owner == task) {
1185 compressed_count = vm_compressor_pager_get_count(object->pager);
1186 acnt_info->pvm_volatile_compressed_count += compressed_count;
1187 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1188 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1189 }
1190 }
1191
1192}
1193
1194/*
1195 * Walks the purgeable object queues and calculates the usage
1196 * associated with the objects for the given task.
1197 */
1198kern_return_t
1199vm_purgeable_account(
1200 task_t task,
1201 pvm_account_info_t acnt_info)
1202{
1203 queue_head_t *nonvolatile_q;
1204 vm_object_t object;
1205 int group;
1206 int state;
1207 uint64_t compressed_count;
1208 purgeable_q_t volatile_q;
1209
1210
1211 if ((task == NULL) || (acnt_info == NULL)) {
1212 return KERN_INVALID_ARGUMENT;
1213 }
1214
1215 acnt_info->pvm_volatile_count = 0;
1216 acnt_info->pvm_volatile_compressed_count = 0;
1217 acnt_info->pvm_nonvolatile_count = 0;
1218 acnt_info->pvm_nonvolatile_compressed_count = 0;
1219
1220 lck_mtx_lock(&vm_purgeable_queue_lock);
1221
1222 nonvolatile_q = &purgeable_nonvolatile_queue;
1223 for (object = (vm_object_t) queue_first(nonvolatile_q);
1224 !queue_end(nonvolatile_q, (queue_entry_t) object);
1225 object = (vm_object_t) queue_next(&object->objq)) {
1226 if (object->vo_purgeable_owner == task) {
1227 state = object->purgable;
1228 compressed_count = vm_compressor_pager_get_count(object->pager);
1229 if (state == VM_PURGABLE_EMPTY) {
1230 acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
1231 acnt_info->pvm_volatile_compressed_count += compressed_count;
1232 } else {
1233 acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count);
1234 acnt_info->pvm_nonvolatile_compressed_count += compressed_count;
1235 }
1236 acnt_info->pvm_nonvolatile_count += object->wired_page_count;
1237 }
1238 }
1239
1240 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1241 vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info);
1242
1243 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1244 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1245 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1246 }
1247
1248 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1249 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1250 vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
1251 }
1252 lck_mtx_unlock(&vm_purgeable_queue_lock);
1253
1254 acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE);
1255 acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE);
1256 acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE);
1257 acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE);
1258
1259 return KERN_SUCCESS;
1260}
1261#endif /* DEVELOPMENT || DEBUG */
39236c6e
A
1262
1263static void
fe8ab488 1264vm_purgeable_volatile_queue_disown(
39236c6e
A
1265 purgeable_q_t queue,
1266 int group,
1267 task_t task)
1268{
1269 vm_object_t object;
fe8ab488
A
1270 int collisions;
1271
1272 collisions = 0;
39236c6e 1273
fe8ab488 1274again:
39236c6e
A
1275 lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
1276
39236c6e
A
1277 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1278 !queue_end(&queue->objq[group], (queue_entry_t) object);
1279 object = (vm_object_t) queue_next(&object->objq)) {
fe8ab488
A
1280#if MACH_ASSERT
1281 /*
1282 * Sanity check: let's scan the entire queues to
1283 * make sure we don't leave any purgeable objects
1284 * pointing back at a dead task. If the counters
1285 * are off, we would fail to assert that they go
1286 * back to 0 after disowning is done.
1287 */
1288#else /* MACH_ASSERT */
1289 if (task->task_volatile_objects == 0) {
1290 /* no more volatile objects owned by "task" */
1291 break;
1292 }
1293#endif /* MACH_ASSERT */
39236c6e 1294 if (object->vo_purgeable_owner == task) {
fe8ab488
A
1295 if (! vm_object_lock_try(object)) {
1296 lck_mtx_unlock(&vm_purgeable_queue_lock);
1297 mutex_pause(collisions++);
1298 lck_mtx_lock(&vm_purgeable_queue_lock);
1299 goto again;
1300 }
1301 assert(object->purgable == VM_PURGABLE_VOLATILE);
1302 if (object->vo_purgeable_owner == task) {
1303 vm_purgeable_accounting(object,
1304 object->purgable,
1305 TRUE); /* disown */
1306 assert(object->vo_purgeable_owner == NULL);
1307 }
1308 vm_object_unlock(object);
2d21ac55
A
1309 }
1310 }
39236c6e
A
1311}
1312
1313void
1314vm_purgeable_disown(
1315 task_t task)
1316{
fe8ab488 1317 purgeable_q_t volatile_q;
39236c6e 1318 int group;
fe8ab488
A
1319 queue_head_t *nonvolatile_q;
1320 vm_object_t object;
1321 int collisions;
39236c6e
A
1322
1323 if (task == NULL) {
1324 return;
1325 }
1326
fe8ab488
A
1327 task->task_purgeable_disowning = TRUE;
1328
1329 /*
1330 * Scan the purgeable objects queues for objects owned by "task".
1331 * This has to be done "atomically" under the "vm_purgeable_queue"
1332 * lock, to ensure that no new purgeable object get associated
1333 * with this task or moved between queues while we're scanning.
1334 */
1335
1336 /*
1337 * Scan non-volatile queue for objects owned by "task".
1338 */
1339
1340 collisions = 0;
1341
1342again:
1343 if (task->task_purgeable_disowned) {
1344 /* task has already disowned its purgeable memory */
1345 assert(task->task_volatile_objects == 0);
1346 assert(task->task_nonvolatile_objects == 0);
1347 return;
1348 }
39236c6e 1349 lck_mtx_lock(&vm_purgeable_queue_lock);
fe8ab488
A
1350
1351 nonvolatile_q = &purgeable_nonvolatile_queue;
1352 for (object = (vm_object_t) queue_first(nonvolatile_q);
1353 !queue_end(nonvolatile_q, (queue_entry_t) object);
1354 object = (vm_object_t) queue_next(&object->objq)) {
1355#if MACH_ASSERT
1356 /*
1357 * Sanity check: let's scan the entire queues to
1358 * make sure we don't leave any purgeable objects
1359 * pointing back at a dead task. If the counters
1360 * are off, we would fail to assert that they go
1361 * back to 0 after disowning is done.
1362 */
1363#else /* MACH_ASSERT */
1364 if (task->task_nonvolatile_objects == 0) {
1365 /* no more non-volatile objects owned by "task" */
1366 break;
1367 }
1368#endif /* MACH_ASSERT */
1369#if DEBUG
1370 assert(object->vo_purgeable_volatilizer == NULL);
1371#endif /* DEBUG */
1372 if (object->vo_purgeable_owner == task) {
1373 if (!vm_object_lock_try(object)) {
1374 lck_mtx_unlock(&vm_purgeable_queue_lock);
1375 mutex_pause(collisions++);
1376 goto again;
1377 }
1378 if (object->vo_purgeable_owner == task) {
1379 vm_purgeable_accounting(object,
1380 object->purgable,
1381 TRUE); /* disown */
1382 assert(object->vo_purgeable_owner == NULL);
1383 }
1384 vm_object_unlock(object);
1385 }
1386 }
1387
1388 lck_mtx_yield(&vm_purgeable_queue_lock);
1389
1390 /*
1391 * Scan volatile queues for objects owned by "task".
1392 */
1393
1394 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
1395 vm_purgeable_volatile_queue_disown(volatile_q, 0, task);
1396 lck_mtx_yield(&vm_purgeable_queue_lock);
1397
1398 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1399 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1400 vm_purgeable_volatile_queue_disown(volatile_q, group, task);
1401 lck_mtx_yield(&vm_purgeable_queue_lock);
1402 }
39236c6e 1403
fe8ab488
A
1404 volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1405 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1406 vm_purgeable_volatile_queue_disown(volatile_q, group, task);
1407 lck_mtx_yield(&vm_purgeable_queue_lock);
1408 }
1409
1410 if (task->task_volatile_objects != 0 ||
1411 task->task_nonvolatile_objects != 0) {
1412 /* some purgeable objects sneaked into a queue: find them */
1413 lck_mtx_unlock(&vm_purgeable_queue_lock);
1414 mutex_pause(collisions++);
1415 goto again;
1416 }
1417
1418 /* there shouldn't be any purgeable objects owned by task now */
1419 assert(task->task_volatile_objects == 0);
1420 assert(task->task_nonvolatile_objects == 0);
1421 assert(task->task_purgeable_disowning);
1422
1423 /* and we don't need to try and disown again */
1424 task->task_purgeable_disowned = TRUE;
1425
1426 lck_mtx_unlock(&vm_purgeable_queue_lock);
1427}
1428
1429
1430#if notyet
1431static int
1432vm_purgeable_queue_purge_task_owned(
1433 purgeable_q_t queue,
1434 int group,
1435 task_t task)
1436{
1437 vm_object_t object;
1438 int num_objects;
1439 int collisions;
1440 int num_objects_purged;
1441
1442 num_objects_purged = 0;
1443 collisions = 0;
1444
1445look_again:
1446 lck_mtx_lock(&vm_purgeable_queue_lock);
1447
1448 num_objects = 0;
1449 for (object = (vm_object_t) queue_first(&queue->objq[group]);
1450 !queue_end(&queue->objq[group], (queue_entry_t) object);
1451 object = (vm_object_t) queue_next(&object->objq)) {
1452
1453 if (object->vo_purgeable_owner != task &&
1454 object->vo_purgeable_owner != NULL) {
1455 continue;
1456 }
1457
1458 /* found an object: try and grab it */
1459 if (!vm_object_lock_try(object)) {
1460 lck_mtx_unlock(&vm_purgeable_queue_lock);
1461 mutex_pause(collisions++);
1462 goto look_again;
1463 }
1464 /* got it ! */
1465
1466 collisions = 0;
1467
1468 /* remove object from purgeable queue */
1469 queue_remove(&queue->objq[group], object,
1470 vm_object_t, objq);
1471 object->objq.next = NULL;
1472 object->objq.prev = NULL;
1473 /* one less volatile object for this object's owner */
1474 assert(object->vo_purgeable_owner == task);
1475 vm_purgeable_volatile_owner_update(task, -1);
1476
1477#if DEBUG
1478 object->vo_purgeable_volatilizer = NULL;
1479#endif /* DEBUG */
1480 queue_enter(&purgeable_nonvolatile_queue, object,
1481 vm_object_t, objq);
1482 assert(purgeable_nonvolatile_count >= 0);
1483 purgeable_nonvolatile_count++;
1484 assert(purgeable_nonvolatile_count > 0);
1485 /* one more nonvolatile object for this object's owner */
1486 assert(object->vo_purgeable_owner == task);
1487 vm_purgeable_nonvolatile_owner_update(task, +1);
1488
1489 /* unlock purgeable queues */
1490 lck_mtx_unlock(&vm_purgeable_queue_lock);
1491
1492 if (object->purgeable_when_ripe) {
1493 /* remove a token */
1494 vm_page_lock_queues();
1495 vm_purgeable_token_remove_first(queue);
1496 vm_page_unlock_queues();
1497 }
1498
1499 /* purge the object */
1500 (void) vm_object_purge(object, 0);
1501 assert(object->purgable == VM_PURGABLE_EMPTY);
1502 /* no change for purgeable accounting */
1503 vm_object_unlock(object);
1504 num_objects_purged++;
1505
1506 /* we unlocked the purgeable queues, so start over */
1507 goto look_again;
1508 }
1509
1510 lck_mtx_unlock(&vm_purgeable_queue_lock);
1511
1512 return num_objects_purged;
1513}
1514
1515int
1516vm_purgeable_purge_task_owned(
1517 task_t task)
1518{
1519 purgeable_q_t queue;
1520 int group;
1521 int num_objects_purged;
1522
1523 num_objects_purged = 0;
1524
39236c6e 1525 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
fe8ab488
A
1526 num_objects_purged += vm_purgeable_queue_purge_task_owned(queue,
1527 0,
1528 task);
39236c6e
A
1529
1530 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
1531 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
fe8ab488
A
1532 num_objects_purged += vm_purgeable_queue_purge_task_owned(queue,
1533 group,
1534 task);
39236c6e
A
1535
1536 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
1537 for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
fe8ab488
A
1538 num_objects_purged += vm_purgeable_queue_purge_task_owned(queue,
1539 group,
1540 task);
1541
1542 return num_objects_purged;
1543}
1544#endif
1545
1546void
1547vm_purgeable_nonvolatile_enqueue(
1548 vm_object_t object,
1549 task_t owner)
1550{
1551 int page_count;
1552
1553 vm_object_lock_assert_exclusive(object);
1554
1555 assert(object->purgable == VM_PURGABLE_NONVOLATILE);
1556 assert(object->vo_purgeable_owner == NULL);
1557 assert(owner != NULL);
1558
1559 lck_mtx_lock(&vm_purgeable_queue_lock);
1560
1561 if (owner->task_purgeable_disowning) {
1562 /* task is exiting and no longer tracking purgeable objects */
1563 owner = NULL;
1564 }
1565
1566 object->vo_purgeable_owner = owner;
1567#if DEBUG
1568 object->vo_purgeable_volatilizer = NULL;
1569#endif /* DEBUG */
1570
1571#if DEBUG
1572 OSBacktrace(&object->purgeable_owner_bt[0], 16);
1573#endif /* DEBUG */
1574
1575 page_count = object->resident_page_count;
1576 assert(page_count == 0); /* should be a freshly-created object */
1577 if (owner != NULL && page_count != 0) {
1578 ledger_credit(owner->ledger,
1579 task_ledgers.purgeable_nonvolatile,
1580 ptoa(page_count));
1581 ledger_credit(owner->ledger,
1582 task_ledgers.phys_footprint,
1583 ptoa(page_count));
1584 }
1585
1586 assert(object->objq.next == NULL);
1587 assert(object->objq.prev == NULL);
1588
1589 queue_enter(&purgeable_nonvolatile_queue, object,
1590 vm_object_t, objq);
1591 assert(purgeable_nonvolatile_count >= 0);
1592 purgeable_nonvolatile_count++;
1593 assert(purgeable_nonvolatile_count > 0);
1594 /* one more nonvolatile object for this object's owner */
1595 assert(object->vo_purgeable_owner == owner);
1596 vm_purgeable_nonvolatile_owner_update(owner, +1);
1597 lck_mtx_unlock(&vm_purgeable_queue_lock);
1598
1599 vm_object_lock_assert_exclusive(object);
1600}
1601
1602void
1603vm_purgeable_nonvolatile_dequeue(
1604 vm_object_t object)
1605{
1606 task_t owner;
1607
1608 vm_object_lock_assert_exclusive(object);
1609
1610 owner = object->vo_purgeable_owner;
1611#if DEBUG
1612 assert(object->vo_purgeable_volatilizer == NULL);
1613#endif /* DEBUG */
1614 if (owner != NULL) {
1615 /*
1616 * Update the owner's ledger to stop accounting
1617 * for this object.
1618 */
1619 vm_purgeable_accounting(object,
1620 object->purgable,
1621 TRUE); /* disown */
1622 }
39236c6e 1623
fe8ab488
A
1624 lck_mtx_lock(&vm_purgeable_queue_lock);
1625 assert(object->objq.next != NULL);
1626 assert(object->objq.prev != NULL);
1627 queue_remove(&purgeable_nonvolatile_queue, object,
1628 vm_object_t, objq);
1629 object->objq.next = NULL;
1630 object->objq.prev = NULL;
1631 assert(purgeable_nonvolatile_count > 0);
1632 purgeable_nonvolatile_count--;
1633 assert(purgeable_nonvolatile_count >= 0);
b0d623f7 1634 lck_mtx_unlock(&vm_purgeable_queue_lock);
fe8ab488
A
1635
1636 vm_object_lock_assert_exclusive(object);
1637}
1638
1639void
1640vm_purgeable_accounting(
1641 vm_object_t object,
1642 vm_purgable_t old_state,
1643 boolean_t disown)
1644{
1645 task_t owner;
1646 int resident_page_count;
1647 int wired_page_count;
1648 int compressed_page_count;
1649 boolean_t disown_on_the_fly;
1650
1651 vm_object_lock_assert_exclusive(object);
1652
1653 owner = object->vo_purgeable_owner;
1654 if (owner == NULL)
1655 return;
1656
1657 if (!disown && owner->task_purgeable_disowning) {
1658 /* task is disowning its purgeable objects: help it */
1659 disown_on_the_fly = TRUE;
1660 } else {
1661 disown_on_the_fly = FALSE;
1662 }
1663
1664 resident_page_count = object->resident_page_count;
1665 wired_page_count = object->wired_page_count;
1666 if ((COMPRESSED_PAGER_IS_ACTIVE ||
1667 DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) &&
1668 object->pager != NULL) {
1669 compressed_page_count =
1670 vm_compressor_pager_get_count(object->pager);
1671 } else {
1672 compressed_page_count = 0;
1673 }
1674
1675 if (old_state == VM_PURGABLE_VOLATILE ||
1676 old_state == VM_PURGABLE_EMPTY) {
1677 /* less volatile bytes in ledger */
1678 ledger_debit(owner->ledger,
1679 task_ledgers.purgeable_volatile,
1680 ptoa(resident_page_count - wired_page_count));
1681 /* less compressed volatile bytes in ledger */
1682 ledger_debit(owner->ledger,
1683 task_ledgers.purgeable_volatile_compressed,
1684 ptoa(compressed_page_count));
1685
1686 if (disown || !object->alive || object->terminating) {
1687 /* wired pages were accounted as "non-volatile"... */
1688 ledger_debit(owner->ledger,
1689 task_ledgers.purgeable_nonvolatile,
1690 ptoa(wired_page_count));
1691 /* ... and in phys_footprint */
1692 ledger_debit(owner->ledger,
1693 task_ledgers.phys_footprint,
1694 ptoa(wired_page_count));
1695
1696 if (!disown_on_the_fly &&
1697 (object->purgeable_queue_type ==
1698 PURGEABLE_Q_TYPE_MAX)) {
1699 /*
1700 * Not on a volatile queue: must be empty
1701 * or emptying.
1702 */
1703 vm_purgeable_nonvolatile_owner_update(owner,-1);
1704 } else {
1705 /* on a volatile queue */
1706 vm_purgeable_volatile_owner_update(owner, -1);
1707 }
1708 /* no more accounting for this dead object */
1709 object->vo_purgeable_owner = NULL;
1710#if DEBUG
1711 object->vo_purgeable_volatilizer = NULL;
1712#endif /* DEBUG */
1713 return;
1714 }
1715
1716 /* more non-volatile bytes in ledger */
1717 ledger_credit(owner->ledger,
1718 task_ledgers.purgeable_nonvolatile,
1719 ptoa(resident_page_count - wired_page_count));
1720 /* more compressed non-volatile bytes in ledger */
1721 ledger_credit(owner->ledger,
1722 task_ledgers.purgeable_nonvolatile_compressed,
1723 ptoa(compressed_page_count));
1724 /* more footprint */
1725 ledger_credit(owner->ledger,
1726 task_ledgers.phys_footprint,
1727 ptoa(resident_page_count
1728 + compressed_page_count
1729 - wired_page_count));
1730
1731 } else if (old_state == VM_PURGABLE_NONVOLATILE) {
1732
1733 /* less non-volatile bytes in ledger */
1734 ledger_debit(owner->ledger,
1735 task_ledgers.purgeable_nonvolatile,
1736 ptoa(resident_page_count - wired_page_count));
1737 /* less compressed non-volatile bytes in ledger */
1738 ledger_debit(owner->ledger,
1739 task_ledgers.purgeable_nonvolatile_compressed,
1740 ptoa(compressed_page_count));
1741 /* less footprint */
1742 ledger_debit(owner->ledger,
1743 task_ledgers.phys_footprint,
1744 ptoa(resident_page_count
1745 + compressed_page_count
1746 - wired_page_count));
1747
1748 if (disown || !object->alive || object->terminating) {
1749 /* wired pages still accounted as "non-volatile" */
1750 ledger_debit(owner->ledger,
1751 task_ledgers.purgeable_nonvolatile,
1752 ptoa(wired_page_count));
1753 ledger_debit(owner->ledger,
1754 task_ledgers.phys_footprint,
1755 ptoa(wired_page_count));
1756
1757 /* one less "non-volatile" object for the owner */
1758 if (!disown_on_the_fly) {
1759 assert(object->purgeable_queue_type ==
1760 PURGEABLE_Q_TYPE_MAX);
1761 }
1762 vm_purgeable_nonvolatile_owner_update(owner, -1);
1763 /* no more accounting for this dead object */
1764 object->vo_purgeable_owner = NULL;
1765#if DEBUG
1766 object->vo_purgeable_volatilizer = NULL;
1767#endif /* DEBUG */
1768 return;
1769 }
1770 /* more volatile bytes in ledger */
1771 ledger_credit(owner->ledger,
1772 task_ledgers.purgeable_volatile,
1773 ptoa(resident_page_count - wired_page_count));
1774 /* more compressed volatile bytes in ledger */
1775 ledger_credit(owner->ledger,
1776 task_ledgers.purgeable_volatile_compressed,
1777 ptoa(compressed_page_count));
1778 } else {
1779 panic("vm_purgeable_accounting(%p): "
1780 "unexpected old_state=%d\n",
1781 object, old_state);
1782 }
1783
1784 vm_object_lock_assert_exclusive(object);
1785}
1786
1787void
1788vm_purgeable_nonvolatile_owner_update(
1789 task_t owner,
1790 int delta)
1791{
1792 if (owner == NULL || delta == 0) {
1793 return;
1794 }
1795
1796 if (delta > 0) {
1797 assert(owner->task_nonvolatile_objects >= 0);
1798 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1799 assert(owner->task_nonvolatile_objects > 0);
1800 } else {
1801 assert(owner->task_nonvolatile_objects > delta);
1802 OSAddAtomic(delta, &owner->task_nonvolatile_objects);
1803 assert(owner->task_nonvolatile_objects >= 0);
1804 }
1805}
1806
1807void
1808vm_purgeable_volatile_owner_update(
1809 task_t owner,
1810 int delta)
1811{
1812 if (owner == NULL || delta == 0) {
1813 return;
1814 }
1815
1816 if (delta > 0) {
1817 assert(owner->task_volatile_objects >= 0);
1818 OSAddAtomic(delta, &owner->task_volatile_objects);
1819 assert(owner->task_volatile_objects > 0);
1820 } else {
1821 assert(owner->task_volatile_objects > delta);
1822 OSAddAtomic(delta, &owner->task_volatile_objects);
1823 assert(owner->task_volatile_objects >= 0);
1824 }
1825}
1826
1827void
1828vm_purgeable_compressed_update(
1829 vm_object_t object,
1830 int delta)
1831{
1832 task_t owner;
1833
1834 vm_object_lock_assert_exclusive(object);
1835
1836 if (delta == 0 ||
1837 !object->internal ||
1838 object->purgable == VM_PURGABLE_DENY ||
1839 object->vo_purgeable_owner == NULL) {
1840 /* not an owned purgeable VM object: nothing to update */
1841 return;
1842 }
1843
1844 owner = object->vo_purgeable_owner;
1845 switch (object->purgable) {
1846 case VM_PURGABLE_DENY:
1847 break;
1848 case VM_PURGABLE_NONVOLATILE:
1849 if (delta > 0) {
1850 ledger_credit(owner->ledger,
1851 task_ledgers.purgeable_nonvolatile_compressed,
1852 ptoa(delta));
1853 ledger_credit(owner->ledger,
1854 task_ledgers.phys_footprint,
1855 ptoa(delta));
1856 } else {
1857 ledger_debit(owner->ledger,
1858 task_ledgers.purgeable_nonvolatile_compressed,
1859 ptoa(-delta));
1860 ledger_debit(owner->ledger,
1861 task_ledgers.phys_footprint,
1862 ptoa(-delta));
1863 }
1864 break;
1865 case VM_PURGABLE_VOLATILE:
1866 case VM_PURGABLE_EMPTY:
1867 if (delta > 0) {
1868 ledger_credit(owner->ledger,
1869 task_ledgers.purgeable_volatile_compressed,
1870 ptoa(delta));
1871 } else {
1872 ledger_debit(owner->ledger,
1873 task_ledgers.purgeable_volatile_compressed,
1874 ptoa(-delta));
1875 }
1876 break;
1877 default:
1878 panic("vm_purgeable_compressed_update(): "
1879 "unexpected purgable %d for object %p\n",
1880 object->purgable, object);
1881 }
2d21ac55 1882}