]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
24 | #include <kern/sched_prim.h> | |
25 | #include <kern/ledger.h> | |
26 | ||
27 | #include <libkern/OSDebug.h> | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | ||
31 | #include <machine/limits.h> | |
32 | ||
33 | #include <vm/vm_compressor_pager.h> | |
34 | #include <vm/vm_kern.h> /* kmem_alloc */ | |
35 | #include <vm/vm_page.h> | |
36 | #include <vm/vm_pageout.h> | |
37 | #include <vm/vm_protos.h> | |
38 | #include <vm/vm_purgeable_internal.h> | |
39 | ||
40 | #include <sys/kdebug.h> | |
41 | ||
42 | extern vm_pressure_level_t memorystatus_vm_pressure_level; | |
43 | ||
44 | struct token { | |
45 | token_cnt_t count; | |
46 | token_idx_t prev; | |
47 | token_idx_t next; | |
48 | }; | |
49 | ||
50 | struct token *tokens; | |
51 | token_idx_t token_q_max_cnt = 0; | |
52 | vm_size_t token_q_cur_size = 0; | |
53 | ||
54 | token_idx_t token_free_idx = 0; /* head of free queue */ | |
55 | token_idx_t token_init_idx = 1; /* token 0 is reserved!! */ | |
56 | int32_t token_new_pagecount = 0; /* count of pages that will | |
57 | * be added onto token queue */ | |
58 | ||
59 | int available_for_purge = 0; /* increase when ripe token | |
60 | * added, decrease when ripe | |
61 | * token removed. | |
62 | * protected by page_queue_lock | |
63 | */ | |
64 | ||
65 | static int token_q_allocating = 0; /* flag for singlethreading | |
66 | * allocator */ | |
67 | ||
68 | struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX]; | |
69 | queue_head_t purgeable_nonvolatile_queue; | |
70 | int purgeable_nonvolatile_count; | |
71 | ||
72 | decl_lck_mtx_data(,vm_purgeable_queue_lock) | |
73 | ||
74 | #define TOKEN_ADD 0x40 /* 0x100 */ | |
75 | #define TOKEN_DELETE 0x41 /* 0x104 */ | |
76 | #define TOKEN_RIPEN 0x42 /* 0x108 */ | |
77 | #define OBJECT_ADD 0x48 /* 0x120 */ | |
78 | #define OBJECT_REMOVE 0x49 /* 0x124 */ | |
79 | #define OBJECT_PURGE 0x4a /* 0x128 */ | |
80 | #define OBJECT_PURGE_ALL 0x4b /* 0x12c */ | |
81 | ||
82 | static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue); | |
83 | ||
84 | static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task); | |
85 | ||
86 | void vm_purgeable_nonvolatile_owner_update(task_t owner, | |
87 | int delta); | |
88 | void vm_purgeable_volatile_owner_update(task_t owner, | |
89 | int delta); | |
90 | ||
91 | ||
92 | #if MACH_ASSERT | |
93 | static void | |
94 | vm_purgeable_token_check_queue(purgeable_q_t queue) | |
95 | { | |
96 | int token_cnt = 0, page_cnt = 0; | |
97 | token_idx_t token = queue->token_q_head; | |
98 | token_idx_t unripe = 0; | |
99 | int our_inactive_count; | |
100 | ||
101 | #if DEVELOPMENT | |
102 | static unsigned lightweight_check = 0; | |
103 | ||
104 | /* | |
105 | * Due to performance impact, only perform this check | |
106 | * every 100 times on DEVELOPMENT kernels. | |
107 | */ | |
108 | if (lightweight_check++ < 100) { | |
109 | return; | |
110 | } | |
111 | ||
112 | lightweight_check = 0; | |
113 | #endif | |
114 | ||
115 | while (token) { | |
116 | if (tokens[token].count != 0) { | |
117 | assert(queue->token_q_unripe); | |
118 | if (unripe == 0) { | |
119 | assert(token == queue->token_q_unripe); | |
120 | unripe = token; | |
121 | } | |
122 | page_cnt += tokens[token].count; | |
123 | } | |
124 | if (tokens[token].next == 0) | |
125 | assert(queue->token_q_tail == token); | |
126 | ||
127 | token_cnt++; | |
128 | token = tokens[token].next; | |
129 | } | |
130 | ||
131 | if (unripe) | |
132 | assert(queue->token_q_unripe == unripe); | |
133 | assert(token_cnt == queue->debug_count_tokens); | |
134 | ||
135 | /* obsolete queue doesn't maintain token counts */ | |
136 | if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE) | |
137 | { | |
138 | our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount; | |
139 | assert(our_inactive_count >= 0); | |
140 | assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count); | |
141 | } | |
142 | } | |
143 | #endif | |
144 | ||
145 | /* | |
146 | * Add a token. Allocate token queue memory if necessary. | |
147 | * Call with page queue locked. | |
148 | */ | |
149 | kern_return_t | |
150 | vm_purgeable_token_add(purgeable_q_t queue) | |
151 | { | |
152 | #if MACH_ASSERT | |
153 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
154 | #endif | |
155 | ||
156 | /* new token */ | |
157 | token_idx_t token; | |
158 | enum purgeable_q_type i; | |
159 | ||
160 | find_available_token: | |
161 | ||
162 | if (token_free_idx) { /* unused tokens available */ | |
163 | token = token_free_idx; | |
164 | token_free_idx = tokens[token_free_idx].next; | |
165 | } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */ | |
166 | token = token_init_idx; | |
167 | token_init_idx++; | |
168 | } else { /* allocate more memory */ | |
169 | /* Wait if another thread is inside the memory alloc section */ | |
170 | while(token_q_allocating) { | |
171 | wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock, | |
172 | LCK_SLEEP_DEFAULT, | |
173 | (event_t)&token_q_allocating, | |
174 | THREAD_UNINT); | |
175 | if(res != THREAD_AWAKENED) return KERN_ABORTED; | |
176 | }; | |
177 | ||
178 | /* Check whether memory is still maxed out */ | |
179 | if(token_init_idx < token_q_max_cnt) | |
180 | goto find_available_token; | |
181 | ||
182 | /* Still no memory. Allocate some. */ | |
183 | token_q_allocating = 1; | |
184 | ||
185 | /* Drop page queue lock so we can allocate */ | |
186 | vm_page_unlock_queues(); | |
187 | ||
188 | struct token *new_loc; | |
189 | vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE; | |
190 | kern_return_t result; | |
191 | ||
192 | if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) { | |
193 | result = KERN_RESOURCE_SHORTAGE; | |
194 | } else { | |
195 | if (token_q_cur_size) { | |
196 | result = kmem_realloc(kernel_map, | |
197 | (vm_offset_t) tokens, | |
198 | token_q_cur_size, | |
199 | (vm_offset_t *) &new_loc, | |
200 | alloc_size, VM_KERN_MEMORY_OSFMK); | |
201 | } else { | |
202 | result = kmem_alloc(kernel_map, | |
203 | (vm_offset_t *) &new_loc, | |
204 | alloc_size, VM_KERN_MEMORY_OSFMK); | |
205 | } | |
206 | } | |
207 | ||
208 | vm_page_lock_queues(); | |
209 | ||
210 | if (result) { | |
211 | /* Unblock waiting threads */ | |
212 | token_q_allocating = 0; | |
213 | thread_wakeup((event_t)&token_q_allocating); | |
214 | return result; | |
215 | } | |
216 | ||
217 | /* If we get here, we allocated new memory. Update pointers and | |
218 | * dealloc old range */ | |
219 | struct token *old_tokens=tokens; | |
220 | tokens=new_loc; | |
221 | vm_size_t old_token_q_cur_size=token_q_cur_size; | |
222 | token_q_cur_size=alloc_size; | |
223 | token_q_max_cnt = (token_idx_t) (token_q_cur_size / | |
224 | sizeof(struct token)); | |
225 | assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */ | |
226 | ||
227 | if (old_token_q_cur_size) { /* clean up old mapping */ | |
228 | vm_page_unlock_queues(); | |
229 | /* kmem_realloc leaves the old region mapped. Get rid of it. */ | |
230 | kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size); | |
231 | vm_page_lock_queues(); | |
232 | } | |
233 | ||
234 | /* Unblock waiting threads */ | |
235 | token_q_allocating = 0; | |
236 | thread_wakeup((event_t)&token_q_allocating); | |
237 | ||
238 | goto find_available_token; | |
239 | } | |
240 | ||
241 | assert (token); | |
242 | ||
243 | /* | |
244 | * the new pagecount we got need to be applied to all queues except | |
245 | * obsolete | |
246 | */ | |
247 | for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { | |
248 | int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount; | |
249 | assert(pages >= 0); | |
250 | assert(pages <= TOKEN_COUNT_MAX); | |
251 | purgeable_queues[i].new_pages = (int32_t) pages; | |
252 | assert(purgeable_queues[i].new_pages == pages); | |
253 | } | |
254 | token_new_pagecount = 0; | |
255 | ||
256 | /* set token counter value */ | |
257 | if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) | |
258 | tokens[token].count = queue->new_pages; | |
259 | else | |
260 | tokens[token].count = 0; /* all obsolete items are | |
261 | * ripe immediately */ | |
262 | queue->new_pages = 0; | |
263 | ||
264 | /* put token on token counter list */ | |
265 | tokens[token].next = 0; | |
266 | if (queue->token_q_tail == 0) { | |
267 | assert(queue->token_q_head == 0 && queue->token_q_unripe == 0); | |
268 | queue->token_q_head = token; | |
269 | tokens[token].prev = 0; | |
270 | } else { | |
271 | tokens[queue->token_q_tail].next = token; | |
272 | tokens[token].prev = queue->token_q_tail; | |
273 | } | |
274 | if (queue->token_q_unripe == 0) { /* only ripe tokens (token | |
275 | * count == 0) in queue */ | |
276 | if (tokens[token].count > 0) | |
277 | queue->token_q_unripe = token; /* first unripe token */ | |
278 | else | |
279 | available_for_purge++; /* added a ripe token? | |
280 | * increase available count */ | |
281 | } | |
282 | queue->token_q_tail = token; | |
283 | ||
284 | #if MACH_ASSERT | |
285 | queue->debug_count_tokens++; | |
286 | /* Check both queues, since we modified the new_pages count on each */ | |
287 | vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]); | |
288 | vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]); | |
289 | ||
290 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)), | |
291 | queue->type, | |
292 | tokens[token].count, /* num pages on token | |
293 | * (last token) */ | |
294 | queue->debug_count_tokens, | |
295 | 0, | |
296 | 0); | |
297 | #endif | |
298 | ||
299 | return KERN_SUCCESS; | |
300 | } | |
301 | ||
302 | /* | |
303 | * Remove first token from queue and return its index. Add its count to the | |
304 | * count of the next token. | |
305 | * Call with page queue locked. | |
306 | */ | |
307 | static token_idx_t | |
308 | vm_purgeable_token_remove_first(purgeable_q_t queue) | |
309 | { | |
310 | #if MACH_ASSERT | |
311 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
312 | #endif | |
313 | ||
314 | token_idx_t token; | |
315 | token = queue->token_q_head; | |
316 | ||
317 | assert(token); | |
318 | ||
319 | if (token) { | |
320 | assert(queue->token_q_tail); | |
321 | if (queue->token_q_head == queue->token_q_unripe) { | |
322 | /* no ripe tokens... must move unripe pointer */ | |
323 | queue->token_q_unripe = tokens[token].next; | |
324 | } else { | |
325 | /* we're removing a ripe token. decrease count */ | |
326 | available_for_purge--; | |
327 | assert(available_for_purge >= 0); | |
328 | } | |
329 | ||
330 | if (queue->token_q_tail == queue->token_q_head) | |
331 | assert(tokens[token].next == 0); | |
332 | ||
333 | queue->token_q_head = tokens[token].next; | |
334 | if (queue->token_q_head) { | |
335 | tokens[queue->token_q_head].count += tokens[token].count; | |
336 | tokens[queue->token_q_head].prev = 0; | |
337 | } else { | |
338 | /* currently no other tokens in the queue */ | |
339 | /* | |
340 | * the page count must be added to the next newly | |
341 | * created token | |
342 | */ | |
343 | queue->new_pages += tokens[token].count; | |
344 | /* if head is zero, tail is too */ | |
345 | queue->token_q_tail = 0; | |
346 | } | |
347 | ||
348 | #if MACH_ASSERT | |
349 | queue->debug_count_tokens--; | |
350 | vm_purgeable_token_check_queue(queue); | |
351 | ||
352 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)), | |
353 | queue->type, | |
354 | tokens[queue->token_q_head].count, /* num pages on new | |
355 | * first token */ | |
356 | token_new_pagecount, /* num pages waiting for | |
357 | * next token */ | |
358 | available_for_purge, | |
359 | 0); | |
360 | #endif | |
361 | } | |
362 | return token; | |
363 | } | |
364 | ||
365 | static token_idx_t | |
366 | vm_purgeable_token_remove_last(purgeable_q_t queue) | |
367 | { | |
368 | #if MACH_ASSERT | |
369 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
370 | #endif | |
371 | ||
372 | token_idx_t token; | |
373 | token = queue->token_q_tail; | |
374 | ||
375 | assert(token); | |
376 | ||
377 | if (token) { | |
378 | assert(queue->token_q_head); | |
379 | ||
380 | if (queue->token_q_tail == queue->token_q_head) | |
381 | assert(tokens[token].next == 0); | |
382 | ||
383 | if (queue->token_q_unripe == 0) { | |
384 | /* we're removing a ripe token. decrease count */ | |
385 | available_for_purge--; | |
386 | assert(available_for_purge >= 0); | |
387 | } else if (queue->token_q_unripe == token) { | |
388 | /* we're removing the only unripe token */ | |
389 | queue->token_q_unripe = 0; | |
390 | } | |
391 | ||
392 | if (token == queue->token_q_head) { | |
393 | /* token is the last one in the queue */ | |
394 | queue->token_q_head = 0; | |
395 | queue->token_q_tail = 0; | |
396 | } else { | |
397 | token_idx_t new_tail; | |
398 | ||
399 | new_tail = tokens[token].prev; | |
400 | ||
401 | assert(new_tail); | |
402 | assert(tokens[new_tail].next == token); | |
403 | ||
404 | queue->token_q_tail = new_tail; | |
405 | tokens[new_tail].next = 0; | |
406 | } | |
407 | ||
408 | queue->new_pages += tokens[token].count; | |
409 | ||
410 | #if MACH_ASSERT | |
411 | queue->debug_count_tokens--; | |
412 | vm_purgeable_token_check_queue(queue); | |
413 | ||
414 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)), | |
415 | queue->type, | |
416 | tokens[queue->token_q_head].count, /* num pages on new | |
417 | * first token */ | |
418 | token_new_pagecount, /* num pages waiting for | |
419 | * next token */ | |
420 | available_for_purge, | |
421 | 0); | |
422 | #endif | |
423 | } | |
424 | return token; | |
425 | } | |
426 | ||
427 | /* | |
428 | * Delete first token from queue. Return token to token queue. | |
429 | * Call with page queue locked. | |
430 | */ | |
431 | void | |
432 | vm_purgeable_token_delete_first(purgeable_q_t queue) | |
433 | { | |
434 | #if MACH_ASSERT | |
435 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
436 | #endif | |
437 | token_idx_t token = vm_purgeable_token_remove_first(queue); | |
438 | ||
439 | if (token) { | |
440 | /* stick removed token on free queue */ | |
441 | tokens[token].next = token_free_idx; | |
442 | tokens[token].prev = 0; | |
443 | token_free_idx = token; | |
444 | } | |
445 | } | |
446 | ||
447 | void | |
448 | vm_purgeable_token_delete_last(purgeable_q_t queue) | |
449 | { | |
450 | #if MACH_ASSERT | |
451 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
452 | #endif | |
453 | token_idx_t token = vm_purgeable_token_remove_last(queue); | |
454 | ||
455 | if (token) { | |
456 | /* stick removed token on free queue */ | |
457 | tokens[token].next = token_free_idx; | |
458 | tokens[token].prev = 0; | |
459 | token_free_idx = token; | |
460 | } | |
461 | } | |
462 | ||
463 | ||
464 | /* Call with page queue locked. */ | |
465 | void | |
466 | vm_purgeable_q_advance_all() | |
467 | { | |
468 | #if MACH_ASSERT | |
469 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
470 | #endif | |
471 | ||
472 | /* check queue counters - if they get really large, scale them back. | |
473 | * They tend to get that large when there is no purgeable queue action */ | |
474 | int i; | |
475 | if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */ | |
476 | { | |
477 | for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { | |
478 | int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount; | |
479 | assert(pages >= 0); | |
480 | assert(pages <= TOKEN_COUNT_MAX); | |
481 | purgeable_queues[i].new_pages = (int32_t) pages; | |
482 | assert(purgeable_queues[i].new_pages == pages); | |
483 | } | |
484 | token_new_pagecount = 0; | |
485 | } | |
486 | ||
487 | /* | |
488 | * Decrement token counters. A token counter can be zero, this means the | |
489 | * object is ripe to be purged. It is not purged immediately, because that | |
490 | * could cause several objects to be purged even if purging one would satisfy | |
491 | * the memory needs. Instead, the pageout thread purges one after the other | |
492 | * by calling vm_purgeable_object_purge_one and then rechecking the memory | |
493 | * balance. | |
494 | * | |
495 | * No need to advance obsolete queue - all items are ripe there, | |
496 | * always | |
497 | */ | |
498 | for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { | |
499 | purgeable_q_t queue = &purgeable_queues[i]; | |
500 | uint32_t num_pages = 1; | |
501 | ||
502 | /* Iterate over tokens as long as there are unripe tokens. */ | |
503 | while (queue->token_q_unripe) { | |
504 | if (tokens[queue->token_q_unripe].count && num_pages) | |
505 | { | |
506 | tokens[queue->token_q_unripe].count -= 1; | |
507 | num_pages -= 1; | |
508 | } | |
509 | ||
510 | if (tokens[queue->token_q_unripe].count == 0) { | |
511 | queue->token_q_unripe = tokens[queue->token_q_unripe].next; | |
512 | available_for_purge++; | |
513 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)), | |
514 | queue->type, | |
515 | tokens[queue->token_q_head].count, /* num pages on new | |
516 | * first token */ | |
517 | 0, | |
518 | available_for_purge, | |
519 | 0); | |
520 | continue; /* One token ripened. Make sure to | |
521 | * check the next. */ | |
522 | } | |
523 | if (num_pages == 0) | |
524 | break; /* Current token not ripe and no more pages. | |
525 | * Work done. */ | |
526 | } | |
527 | ||
528 | /* | |
529 | * if there are no unripe tokens in the queue, decrement the | |
530 | * new_pages counter instead new_pages can be negative, but must be | |
531 | * canceled out by token_new_pagecount -- since inactive queue as a | |
532 | * whole always contains a nonnegative number of pages | |
533 | */ | |
534 | if (!queue->token_q_unripe) { | |
535 | queue->new_pages -= num_pages; | |
536 | assert((int32_t) token_new_pagecount + queue->new_pages >= 0); | |
537 | } | |
538 | #if MACH_ASSERT | |
539 | vm_purgeable_token_check_queue(queue); | |
540 | #endif | |
541 | } | |
542 | } | |
543 | ||
544 | /* | |
545 | * grab any ripe object and purge it obsolete queue first. then, go through | |
546 | * each volatile group. Select a queue with a ripe token. | |
547 | * Start with first group (0) | |
548 | * 1. Look at queue. Is there an object? | |
549 | * Yes - purge it. Remove token. | |
550 | * No - check other queue. Is there an object? | |
551 | * No - increment group, then go to (1) | |
552 | * Yes - purge it. Remove token. If there is no ripe token, remove ripe | |
553 | * token from other queue and migrate unripe token from this | |
554 | * queue to other queue. | |
555 | * Call with page queue locked. | |
556 | */ | |
557 | static void | |
558 | vm_purgeable_token_remove_ripe(purgeable_q_t queue) | |
559 | { | |
560 | #if MACH_ASSERT | |
561 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
562 | #endif | |
563 | assert(queue->token_q_head && tokens[queue->token_q_head].count == 0); | |
564 | /* return token to free list. advance token list. */ | |
565 | token_idx_t new_head = tokens[queue->token_q_head].next; | |
566 | tokens[queue->token_q_head].next = token_free_idx; | |
567 | tokens[queue->token_q_head].prev = 0; | |
568 | token_free_idx = queue->token_q_head; | |
569 | queue->token_q_head = new_head; | |
570 | tokens[new_head].prev = 0; | |
571 | if (new_head == 0) | |
572 | queue->token_q_tail = 0; | |
573 | ||
574 | #if MACH_ASSERT | |
575 | queue->debug_count_tokens--; | |
576 | vm_purgeable_token_check_queue(queue); | |
577 | #endif | |
578 | ||
579 | available_for_purge--; | |
580 | assert(available_for_purge >= 0); | |
581 | } | |
582 | ||
583 | /* | |
584 | * Delete a ripe token from the given queue. If there are no ripe tokens on | |
585 | * that queue, delete a ripe token from queue2, and migrate an unripe token | |
586 | * from queue to queue2 | |
587 | * Call with page queue locked. | |
588 | */ | |
589 | static void | |
590 | vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2) | |
591 | { | |
592 | #if MACH_ASSERT | |
593 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
594 | #endif | |
595 | assert(queue->token_q_head); | |
596 | ||
597 | if (tokens[queue->token_q_head].count == 0) { | |
598 | /* This queue has a ripe token. Remove. */ | |
599 | vm_purgeable_token_remove_ripe(queue); | |
600 | } else { | |
601 | assert(queue2); | |
602 | /* | |
603 | * queue2 must have a ripe token. Remove, and migrate one | |
604 | * from queue to queue2. | |
605 | */ | |
606 | vm_purgeable_token_remove_ripe(queue2); | |
607 | /* migrate unripe token */ | |
608 | token_idx_t token; | |
609 | token_cnt_t count; | |
610 | ||
611 | /* remove token from queue1 */ | |
612 | assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe | |
613 | * tokens, remember? */ | |
614 | token = vm_purgeable_token_remove_first(queue); | |
615 | assert(token); | |
616 | ||
617 | count = tokens[token].count; | |
618 | ||
619 | /* migrate to queue2 */ | |
620 | /* go to migration target loc */ | |
621 | ||
622 | token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after; | |
623 | ||
624 | while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) { | |
625 | count -= tokens[token_to_insert_before].count; | |
626 | token_to_insert_before = tokens[token_to_insert_before].next; | |
627 | } | |
628 | ||
629 | /* token_to_insert_before is now set correctly */ | |
630 | ||
631 | /* should the inserted token become the first unripe token? */ | |
632 | if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0)) | |
633 | queue2->token_q_unripe = token; /* if so, must update unripe pointer */ | |
634 | ||
635 | /* | |
636 | * insert token. | |
637 | * if inserting at end, reduce new_pages by that value; | |
638 | * otherwise, reduce counter of next token | |
639 | */ | |
640 | ||
641 | tokens[token].count = count; | |
642 | ||
643 | if (token_to_insert_before != 0) { | |
644 | token_to_insert_after = tokens[token_to_insert_before].prev; | |
645 | ||
646 | tokens[token].next = token_to_insert_before; | |
647 | tokens[token_to_insert_before].prev = token; | |
648 | ||
649 | assert(tokens[token_to_insert_before].count >= count); | |
650 | tokens[token_to_insert_before].count -= count; | |
651 | } else { | |
652 | /* if we ran off the end of the list, the token to insert after is the tail */ | |
653 | token_to_insert_after = queue2->token_q_tail; | |
654 | ||
655 | tokens[token].next = 0; | |
656 | queue2->token_q_tail = token; | |
657 | ||
658 | assert(queue2->new_pages >= (int32_t) count); | |
659 | queue2->new_pages -= count; | |
660 | } | |
661 | ||
662 | if (token_to_insert_after != 0) { | |
663 | tokens[token].prev = token_to_insert_after; | |
664 | tokens[token_to_insert_after].next = token; | |
665 | } else { | |
666 | /* is this case possible? */ | |
667 | tokens[token].prev = 0; | |
668 | queue2->token_q_head = token; | |
669 | } | |
670 | ||
671 | #if MACH_ASSERT | |
672 | queue2->debug_count_tokens++; | |
673 | vm_purgeable_token_check_queue(queue2); | |
674 | #endif | |
675 | } | |
676 | } | |
677 | ||
678 | /* Find an object that can be locked. Returns locked object. */ | |
679 | /* Call with purgeable queue locked. */ | |
680 | static vm_object_t | |
681 | vm_purgeable_object_find_and_lock( | |
682 | purgeable_q_t queue, | |
683 | int group, | |
684 | boolean_t pick_ripe) | |
685 | { | |
686 | vm_object_t object, best_object; | |
687 | int object_task_importance; | |
688 | int best_object_task_importance; | |
689 | int best_object_skipped; | |
690 | int num_objects_skipped; | |
691 | task_t owner; | |
692 | ||
693 | best_object = VM_OBJECT_NULL; | |
694 | best_object_task_importance = INT_MAX; | |
695 | ||
696 | lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED); | |
697 | /* | |
698 | * Usually we would pick the first element from a queue. However, we | |
699 | * might not be able to get a lock on it, in which case we try the | |
700 | * remaining elements in order. | |
701 | */ | |
702 | ||
703 | num_objects_skipped = -1; | |
704 | for (object = (vm_object_t) queue_first(&queue->objq[group]); | |
705 | !queue_end(&queue->objq[group], (queue_entry_t) object); | |
706 | object = (vm_object_t) queue_next(&object->objq), | |
707 | num_objects_skipped++) { | |
708 | ||
709 | if (pick_ripe && | |
710 | ! object->purgeable_when_ripe) { | |
711 | /* we want an object that has a ripe token */ | |
712 | continue; | |
713 | } | |
714 | ||
715 | object_task_importance = 0; | |
716 | ||
717 | owner = object->vo_purgeable_owner; | |
718 | if (owner) { | |
719 | object_task_importance = task_importance_estimate(owner); | |
720 | } | |
721 | ||
722 | if (object_task_importance < best_object_task_importance) { | |
723 | if (vm_object_lock_try(object)) { | |
724 | if (best_object != VM_OBJECT_NULL) { | |
725 | /* forget about previous best object */ | |
726 | vm_object_unlock(best_object); | |
727 | } | |
728 | best_object = object; | |
729 | best_object_task_importance = object_task_importance; | |
730 | best_object_skipped = num_objects_skipped; | |
731 | if (best_object_task_importance == 0) { | |
732 | /* can't get any better: stop looking */ | |
733 | break; | |
734 | } | |
735 | } | |
736 | } | |
737 | } | |
738 | object = best_object; | |
739 | ||
740 | if (object == VM_OBJECT_NULL) { | |
741 | return VM_OBJECT_NULL; | |
742 | } | |
743 | ||
744 | /* Locked. Great. We'll take it. Remove and return. */ | |
745 | // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped); | |
746 | ||
747 | vm_object_lock_assert_exclusive(object); | |
748 | ||
749 | queue_remove(&queue->objq[group], object, | |
750 | vm_object_t, objq); | |
751 | object->objq.next = NULL; | |
752 | object->objq.prev = NULL; | |
753 | object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; | |
754 | object->purgeable_queue_group = 0; | |
755 | /* one less volatile object for this object's owner */ | |
756 | vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1); | |
757 | ||
758 | #if DEBUG | |
759 | object->vo_purgeable_volatilizer = NULL; | |
760 | #endif /* DEBUG */ | |
761 | ||
762 | /* keep queue of non-volatile objects */ | |
763 | queue_enter(&purgeable_nonvolatile_queue, object, | |
764 | vm_object_t, objq); | |
765 | assert(purgeable_nonvolatile_count >= 0); | |
766 | purgeable_nonvolatile_count++; | |
767 | assert(purgeable_nonvolatile_count > 0); | |
768 | /* one more nonvolatile object for this object's owner */ | |
769 | vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, +1); | |
770 | ||
771 | #if MACH_ASSERT | |
772 | queue->debug_count_objects--; | |
773 | #endif | |
774 | return object; | |
775 | } | |
776 | ||
777 | /* Can be called without holding locks */ | |
778 | void | |
779 | vm_purgeable_object_purge_all(void) | |
780 | { | |
781 | enum purgeable_q_type i; | |
782 | int group; | |
783 | vm_object_t object; | |
784 | unsigned int purged_count; | |
785 | uint32_t collisions; | |
786 | ||
787 | purged_count = 0; | |
788 | collisions = 0; | |
789 | ||
790 | restart: | |
791 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
792 | /* Cycle through all queues */ | |
793 | for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) { | |
794 | purgeable_q_t queue; | |
795 | ||
796 | queue = &purgeable_queues[i]; | |
797 | ||
798 | /* | |
799 | * Look through all groups, starting from the lowest. If | |
800 | * we find an object in that group, try to lock it (this can | |
801 | * fail). If locking is successful, we can drop the queue | |
802 | * lock, remove a token and then purge the object. | |
803 | */ | |
804 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
805 | while (!queue_empty(&queue->objq[group])) { | |
806 | object = vm_purgeable_object_find_and_lock(queue, group, FALSE); | |
807 | if (object == VM_OBJECT_NULL) { | |
808 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
809 | mutex_pause(collisions++); | |
810 | goto restart; | |
811 | } | |
812 | ||
813 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
814 | ||
815 | /* Lock the page queue here so we don't hold it | |
816 | * over the whole, legthy operation */ | |
817 | if (object->purgeable_when_ripe) { | |
818 | vm_page_lock_queues(); | |
819 | vm_purgeable_token_remove_first(queue); | |
820 | vm_page_unlock_queues(); | |
821 | } | |
822 | ||
823 | (void) vm_object_purge(object, 0); | |
824 | assert(object->purgable == VM_PURGABLE_EMPTY); | |
825 | /* no change in purgeable accounting */ | |
826 | ||
827 | vm_object_unlock(object); | |
828 | purged_count++; | |
829 | goto restart; | |
830 | } | |
831 | assert(queue->debug_count_objects >= 0); | |
832 | } | |
833 | } | |
834 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)), | |
835 | purged_count, /* # of purged objects */ | |
836 | 0, | |
837 | available_for_purge, | |
838 | 0, | |
839 | 0); | |
840 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
841 | return; | |
842 | } | |
843 | ||
844 | boolean_t | |
845 | vm_purgeable_object_purge_one_unlocked( | |
846 | int force_purge_below_group) | |
847 | { | |
848 | boolean_t retval; | |
849 | ||
850 | vm_page_lock_queues(); | |
851 | retval = vm_purgeable_object_purge_one(force_purge_below_group, 0); | |
852 | vm_page_unlock_queues(); | |
853 | ||
854 | return retval; | |
855 | } | |
856 | ||
857 | boolean_t | |
858 | vm_purgeable_object_purge_one( | |
859 | int force_purge_below_group, | |
860 | int flags) | |
861 | { | |
862 | enum purgeable_q_type i; | |
863 | int group; | |
864 | vm_object_t object = 0; | |
865 | purgeable_q_t queue, queue2; | |
866 | boolean_t forced_purge; | |
867 | ||
868 | /* Need the page queue lock since we'll be changing the token queue. */ | |
869 | #if MACH_ASSERT | |
870 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
871 | #endif | |
872 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
873 | ||
874 | /* Cycle through all queues */ | |
875 | for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) { | |
876 | queue = &purgeable_queues[i]; | |
877 | ||
878 | if (force_purge_below_group == 0) { | |
879 | /* | |
880 | * Are there any ripe tokens on this queue? If yes, | |
881 | * we'll find an object to purge there | |
882 | */ | |
883 | if (!queue->token_q_head) { | |
884 | /* no token: look at next purgeable queue */ | |
885 | continue; | |
886 | } | |
887 | ||
888 | if (tokens[queue->token_q_head].count != 0) { | |
889 | /* no ripe token: next queue */ | |
890 | continue; | |
891 | } | |
892 | } | |
893 | ||
894 | /* | |
895 | * Now look through all groups, starting from the lowest. If | |
896 | * we find an object in that group, try to lock it (this can | |
897 | * fail). If locking is successful, we can drop the queue | |
898 | * lock, remove a token and then purge the object. | |
899 | */ | |
900 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
901 | if (!queue->token_q_head || | |
902 | tokens[queue->token_q_head].count != 0) { | |
903 | /* no tokens or no ripe tokens */ | |
904 | ||
905 | if (group >= force_purge_below_group) { | |
906 | /* no more groups to force-purge */ | |
907 | break; | |
908 | } | |
909 | ||
910 | /* | |
911 | * Try and purge an object in this group | |
912 | * even though no tokens are ripe. | |
913 | */ | |
914 | if (!queue_empty(&queue->objq[group]) && | |
915 | (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) { | |
916 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
917 | if (object->purgeable_when_ripe) { | |
918 | vm_purgeable_token_delete_first(queue); | |
919 | } | |
920 | forced_purge = TRUE; | |
921 | goto purge_now; | |
922 | } | |
923 | ||
924 | /* nothing to purge in this group: next group */ | |
925 | continue; | |
926 | } | |
927 | if (!queue_empty(&queue->objq[group]) && | |
928 | (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) { | |
929 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
930 | if (object->purgeable_when_ripe) { | |
931 | vm_purgeable_token_choose_and_delete_ripe(queue, 0); | |
932 | } | |
933 | forced_purge = FALSE; | |
934 | goto purge_now; | |
935 | } | |
936 | if (i != PURGEABLE_Q_TYPE_OBSOLETE) { | |
937 | /* This is the token migration case, and it works between | |
938 | * FIFO and LIFO only */ | |
939 | queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? | |
940 | PURGEABLE_Q_TYPE_FIFO : | |
941 | PURGEABLE_Q_TYPE_LIFO]; | |
942 | ||
943 | if (!queue_empty(&queue2->objq[group]) && | |
944 | (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) { | |
945 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
946 | if (object->purgeable_when_ripe) { | |
947 | vm_purgeable_token_choose_and_delete_ripe(queue2, queue); | |
948 | } | |
949 | forced_purge = FALSE; | |
950 | goto purge_now; | |
951 | } | |
952 | } | |
953 | assert(queue->debug_count_objects >= 0); | |
954 | } | |
955 | } | |
956 | /* | |
957 | * because we have to do a try_lock on the objects which could fail, | |
958 | * we could end up with no object to purge at this time, even though | |
959 | * we have objects in a purgeable state | |
960 | */ | |
961 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
962 | return FALSE; | |
963 | ||
964 | purge_now: | |
965 | ||
966 | assert(object); | |
967 | vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */ | |
968 | // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level); | |
969 | (void) vm_object_purge(object, flags); | |
970 | assert(object->purgable == VM_PURGABLE_EMPTY); | |
971 | /* no change in purgeable accounting */ | |
972 | vm_object_unlock(object); | |
973 | vm_page_lock_queues(); | |
974 | ||
975 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)), | |
976 | object, /* purged object */ | |
977 | 0, | |
978 | available_for_purge, | |
979 | 0, | |
980 | 0); | |
981 | ||
982 | return TRUE; | |
983 | } | |
984 | ||
985 | /* Called with object lock held */ | |
986 | void | |
987 | vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group) | |
988 | { | |
989 | vm_object_lock_assert_exclusive(object); | |
990 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
991 | ||
992 | assert(object->objq.next != NULL); | |
993 | assert(object->objq.prev != NULL); | |
994 | queue_remove(&purgeable_nonvolatile_queue, object, | |
995 | vm_object_t, objq); | |
996 | object->objq.next = NULL; | |
997 | object->objq.prev = NULL; | |
998 | assert(purgeable_nonvolatile_count > 0); | |
999 | purgeable_nonvolatile_count--; | |
1000 | assert(purgeable_nonvolatile_count >= 0); | |
1001 | /* one less nonvolatile object for this object's owner */ | |
1002 | vm_purgeable_nonvolatile_owner_update(object->vo_purgeable_owner, -1); | |
1003 | ||
1004 | if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE) | |
1005 | group = 0; | |
1006 | ||
1007 | if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are | |
1008 | * fifo-queued */ | |
1009 | queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */ | |
1010 | else | |
1011 | queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */ | |
1012 | /* one more volatile object for this object's owner */ | |
1013 | vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, +1); | |
1014 | ||
1015 | object->purgeable_queue_type = queue->type; | |
1016 | object->purgeable_queue_group = group; | |
1017 | ||
1018 | #if DEBUG | |
1019 | assert(object->vo_purgeable_volatilizer == NULL); | |
1020 | object->vo_purgeable_volatilizer = current_task(); | |
1021 | OSBacktrace(&object->purgeable_volatilizer_bt[0], 16); | |
1022 | #endif /* DEBUG */ | |
1023 | ||
1024 | #if MACH_ASSERT | |
1025 | queue->debug_count_objects++; | |
1026 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)), | |
1027 | 0, | |
1028 | tokens[queue->token_q_head].count, | |
1029 | queue->type, | |
1030 | group, | |
1031 | 0); | |
1032 | #endif | |
1033 | ||
1034 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1035 | } | |
1036 | ||
1037 | /* Look for object. If found, remove from purgeable queue. */ | |
1038 | /* Called with object lock held */ | |
1039 | purgeable_q_t | |
1040 | vm_purgeable_object_remove(vm_object_t object) | |
1041 | { | |
1042 | int group; | |
1043 | enum purgeable_q_type type; | |
1044 | purgeable_q_t queue; | |
1045 | ||
1046 | vm_object_lock_assert_exclusive(object); | |
1047 | ||
1048 | type = object->purgeable_queue_type; | |
1049 | group = object->purgeable_queue_group; | |
1050 | ||
1051 | if (type == PURGEABLE_Q_TYPE_MAX) { | |
1052 | if (object->objq.prev || object->objq.next) | |
1053 | panic("unmarked object on purgeable q"); | |
1054 | ||
1055 | return NULL; | |
1056 | } else if (!(object->objq.prev && object->objq.next)) | |
1057 | panic("marked object not on purgeable q"); | |
1058 | ||
1059 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1060 | ||
1061 | queue = &purgeable_queues[type]; | |
1062 | ||
1063 | queue_remove(&queue->objq[group], object, vm_object_t, objq); | |
1064 | object->objq.next = NULL; | |
1065 | object->objq.prev = NULL; | |
1066 | /* one less volatile object for this object's owner */ | |
1067 | vm_purgeable_volatile_owner_update(object->vo_purgeable_owner, -1); | |
1068 | #if DEBUG | |
1069 | object->vo_purgeable_volatilizer = NULL; | |
1070 | #endif /* DEBUG */ | |
1071 | /* keep queue of non-volatile objects */ | |
1072 | if (object->alive && !object->terminating) { | |
1073 | task_t owner; | |
1074 | queue_enter(&purgeable_nonvolatile_queue, object, | |
1075 | vm_object_t, objq); | |
1076 | assert(purgeable_nonvolatile_count >= 0); | |
1077 | purgeable_nonvolatile_count++; | |
1078 | assert(purgeable_nonvolatile_count > 0); | |
1079 | /* one more nonvolatile object for this object's owner */ | |
1080 | owner = object->vo_purgeable_owner; | |
1081 | vm_purgeable_nonvolatile_owner_update(owner, +1); | |
1082 | } | |
1083 | ||
1084 | #if MACH_ASSERT | |
1085 | queue->debug_count_objects--; | |
1086 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)), | |
1087 | 0, | |
1088 | tokens[queue->token_q_head].count, | |
1089 | queue->type, | |
1090 | group, | |
1091 | 0); | |
1092 | #endif | |
1093 | ||
1094 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1095 | ||
1096 | object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; | |
1097 | object->purgeable_queue_group = 0; | |
1098 | ||
1099 | vm_object_lock_assert_exclusive(object); | |
1100 | ||
1101 | return &purgeable_queues[type]; | |
1102 | } | |
1103 | ||
1104 | void | |
1105 | vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task) | |
1106 | { | |
1107 | lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED); | |
1108 | ||
1109 | stat->count = stat->size = 0; | |
1110 | vm_object_t object; | |
1111 | for (object = (vm_object_t) queue_first(&queue->objq[group]); | |
1112 | !queue_end(&queue->objq[group], (queue_entry_t) object); | |
1113 | object = (vm_object_t) queue_next(&object->objq)) { | |
1114 | if (!target_task || object->vo_purgeable_owner == target_task) { | |
1115 | stat->count++; | |
1116 | stat->size += (object->resident_page_count * PAGE_SIZE); | |
1117 | } | |
1118 | } | |
1119 | return; | |
1120 | } | |
1121 | ||
1122 | void | |
1123 | vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task) | |
1124 | { | |
1125 | purgeable_q_t queue; | |
1126 | int group; | |
1127 | ||
1128 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1129 | ||
1130 | /* Populate fifo_data */ | |
1131 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; | |
1132 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) | |
1133 | vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task); | |
1134 | ||
1135 | /* Populate lifo_data */ | |
1136 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; | |
1137 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) | |
1138 | vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task); | |
1139 | ||
1140 | /* Populate obsolete data */ | |
1141 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; | |
1142 | vm_purgeable_stats_helper(&(info->obsolete_data), queue, 0, target_task); | |
1143 | ||
1144 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1145 | return; | |
1146 | } | |
1147 | ||
1148 | #if DEVELOPMENT || DEBUG | |
1149 | static void | |
1150 | vm_purgeable_account_volatile_queue( | |
1151 | purgeable_q_t queue, | |
1152 | int group, | |
1153 | task_t task, | |
1154 | pvm_account_info_t acnt_info) | |
1155 | { | |
1156 | vm_object_t object; | |
1157 | uint64_t compressed_count; | |
1158 | ||
1159 | for (object = (vm_object_t) queue_first(&queue->objq[group]); | |
1160 | !queue_end(&queue->objq[group], (queue_entry_t) object); | |
1161 | object = (vm_object_t) queue_next(&object->objq)) { | |
1162 | if (object->vo_purgeable_owner == task) { | |
1163 | compressed_count = vm_compressor_pager_get_count(object->pager); | |
1164 | acnt_info->pvm_volatile_compressed_count += compressed_count; | |
1165 | acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count); | |
1166 | acnt_info->pvm_nonvolatile_count += object->wired_page_count; | |
1167 | } | |
1168 | } | |
1169 | ||
1170 | } | |
1171 | ||
1172 | /* | |
1173 | * Walks the purgeable object queues and calculates the usage | |
1174 | * associated with the objects for the given task. | |
1175 | */ | |
1176 | kern_return_t | |
1177 | vm_purgeable_account( | |
1178 | task_t task, | |
1179 | pvm_account_info_t acnt_info) | |
1180 | { | |
1181 | queue_head_t *nonvolatile_q; | |
1182 | vm_object_t object; | |
1183 | int group; | |
1184 | int state; | |
1185 | uint64_t compressed_count; | |
1186 | purgeable_q_t volatile_q; | |
1187 | ||
1188 | ||
1189 | if ((task == NULL) || (acnt_info == NULL)) { | |
1190 | return KERN_INVALID_ARGUMENT; | |
1191 | } | |
1192 | ||
1193 | acnt_info->pvm_volatile_count = 0; | |
1194 | acnt_info->pvm_volatile_compressed_count = 0; | |
1195 | acnt_info->pvm_nonvolatile_count = 0; | |
1196 | acnt_info->pvm_nonvolatile_compressed_count = 0; | |
1197 | ||
1198 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1199 | ||
1200 | nonvolatile_q = &purgeable_nonvolatile_queue; | |
1201 | for (object = (vm_object_t) queue_first(nonvolatile_q); | |
1202 | !queue_end(nonvolatile_q, (queue_entry_t) object); | |
1203 | object = (vm_object_t) queue_next(&object->objq)) { | |
1204 | if (object->vo_purgeable_owner == task) { | |
1205 | state = object->purgable; | |
1206 | compressed_count = vm_compressor_pager_get_count(object->pager); | |
1207 | if (state == VM_PURGABLE_EMPTY) { | |
1208 | acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count); | |
1209 | acnt_info->pvm_volatile_compressed_count += compressed_count; | |
1210 | } else { | |
1211 | acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count); | |
1212 | acnt_info->pvm_nonvolatile_compressed_count += compressed_count; | |
1213 | } | |
1214 | acnt_info->pvm_nonvolatile_count += object->wired_page_count; | |
1215 | } | |
1216 | } | |
1217 | ||
1218 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; | |
1219 | vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info); | |
1220 | ||
1221 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; | |
1222 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
1223 | vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info); | |
1224 | } | |
1225 | ||
1226 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; | |
1227 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
1228 | vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info); | |
1229 | } | |
1230 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1231 | ||
1232 | acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE); | |
1233 | acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE); | |
1234 | acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE); | |
1235 | acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE); | |
1236 | ||
1237 | return KERN_SUCCESS; | |
1238 | } | |
1239 | #endif /* DEVELOPMENT || DEBUG */ | |
1240 | ||
1241 | static void | |
1242 | vm_purgeable_volatile_queue_disown( | |
1243 | purgeable_q_t queue, | |
1244 | int group, | |
1245 | task_t task) | |
1246 | { | |
1247 | vm_object_t object; | |
1248 | int collisions; | |
1249 | ||
1250 | collisions = 0; | |
1251 | ||
1252 | again: | |
1253 | lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED); | |
1254 | ||
1255 | for (object = (vm_object_t) queue_first(&queue->objq[group]); | |
1256 | !queue_end(&queue->objq[group], (queue_entry_t) object); | |
1257 | object = (vm_object_t) queue_next(&object->objq)) { | |
1258 | #if MACH_ASSERT | |
1259 | /* | |
1260 | * Sanity check: let's scan the entire queues to | |
1261 | * make sure we don't leave any purgeable objects | |
1262 | * pointing back at a dead task. If the counters | |
1263 | * are off, we would fail to assert that they go | |
1264 | * back to 0 after disowning is done. | |
1265 | */ | |
1266 | #else /* MACH_ASSERT */ | |
1267 | if (task->task_volatile_objects == 0) { | |
1268 | /* no more volatile objects owned by "task" */ | |
1269 | break; | |
1270 | } | |
1271 | #endif /* MACH_ASSERT */ | |
1272 | if (object->vo_purgeable_owner == task) { | |
1273 | if (! vm_object_lock_try(object)) { | |
1274 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1275 | mutex_pause(collisions++); | |
1276 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1277 | goto again; | |
1278 | } | |
1279 | assert(object->purgable == VM_PURGABLE_VOLATILE); | |
1280 | if (object->vo_purgeable_owner == task) { | |
1281 | vm_purgeable_accounting(object, | |
1282 | object->purgable, | |
1283 | TRUE); /* disown */ | |
1284 | assert(object->vo_purgeable_owner == NULL); | |
1285 | } | |
1286 | vm_object_unlock(object); | |
1287 | } | |
1288 | } | |
1289 | } | |
1290 | ||
1291 | void | |
1292 | vm_purgeable_disown( | |
1293 | task_t task) | |
1294 | { | |
1295 | purgeable_q_t volatile_q; | |
1296 | int group; | |
1297 | queue_head_t *nonvolatile_q; | |
1298 | vm_object_t object; | |
1299 | int collisions; | |
1300 | ||
1301 | if (task == NULL) { | |
1302 | return; | |
1303 | } | |
1304 | ||
1305 | task->task_purgeable_disowning = TRUE; | |
1306 | ||
1307 | /* | |
1308 | * Scan the purgeable objects queues for objects owned by "task". | |
1309 | * This has to be done "atomically" under the "vm_purgeable_queue" | |
1310 | * lock, to ensure that no new purgeable object get associated | |
1311 | * with this task or moved between queues while we're scanning. | |
1312 | */ | |
1313 | ||
1314 | /* | |
1315 | * Scan non-volatile queue for objects owned by "task". | |
1316 | */ | |
1317 | ||
1318 | collisions = 0; | |
1319 | ||
1320 | again: | |
1321 | if (task->task_purgeable_disowned) { | |
1322 | /* task has already disowned its purgeable memory */ | |
1323 | assert(task->task_volatile_objects == 0); | |
1324 | assert(task->task_nonvolatile_objects == 0); | |
1325 | return; | |
1326 | } | |
1327 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1328 | ||
1329 | nonvolatile_q = &purgeable_nonvolatile_queue; | |
1330 | for (object = (vm_object_t) queue_first(nonvolatile_q); | |
1331 | !queue_end(nonvolatile_q, (queue_entry_t) object); | |
1332 | object = (vm_object_t) queue_next(&object->objq)) { | |
1333 | #if MACH_ASSERT | |
1334 | /* | |
1335 | * Sanity check: let's scan the entire queues to | |
1336 | * make sure we don't leave any purgeable objects | |
1337 | * pointing back at a dead task. If the counters | |
1338 | * are off, we would fail to assert that they go | |
1339 | * back to 0 after disowning is done. | |
1340 | */ | |
1341 | #else /* MACH_ASSERT */ | |
1342 | if (task->task_nonvolatile_objects == 0) { | |
1343 | /* no more non-volatile objects owned by "task" */ | |
1344 | break; | |
1345 | } | |
1346 | #endif /* MACH_ASSERT */ | |
1347 | #if DEBUG | |
1348 | assert(object->vo_purgeable_volatilizer == NULL); | |
1349 | #endif /* DEBUG */ | |
1350 | if (object->vo_purgeable_owner == task) { | |
1351 | if (!vm_object_lock_try(object)) { | |
1352 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1353 | mutex_pause(collisions++); | |
1354 | goto again; | |
1355 | } | |
1356 | if (object->vo_purgeable_owner == task) { | |
1357 | vm_purgeable_accounting(object, | |
1358 | object->purgable, | |
1359 | TRUE); /* disown */ | |
1360 | assert(object->vo_purgeable_owner == NULL); | |
1361 | } | |
1362 | vm_object_unlock(object); | |
1363 | } | |
1364 | } | |
1365 | ||
1366 | lck_mtx_yield(&vm_purgeable_queue_lock); | |
1367 | ||
1368 | /* | |
1369 | * Scan volatile queues for objects owned by "task". | |
1370 | */ | |
1371 | ||
1372 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; | |
1373 | vm_purgeable_volatile_queue_disown(volatile_q, 0, task); | |
1374 | lck_mtx_yield(&vm_purgeable_queue_lock); | |
1375 | ||
1376 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; | |
1377 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
1378 | vm_purgeable_volatile_queue_disown(volatile_q, group, task); | |
1379 | lck_mtx_yield(&vm_purgeable_queue_lock); | |
1380 | } | |
1381 | ||
1382 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; | |
1383 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
1384 | vm_purgeable_volatile_queue_disown(volatile_q, group, task); | |
1385 | lck_mtx_yield(&vm_purgeable_queue_lock); | |
1386 | } | |
1387 | ||
1388 | if (task->task_volatile_objects != 0 || | |
1389 | task->task_nonvolatile_objects != 0) { | |
1390 | /* some purgeable objects sneaked into a queue: find them */ | |
1391 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1392 | mutex_pause(collisions++); | |
1393 | goto again; | |
1394 | } | |
1395 | ||
1396 | /* there shouldn't be any purgeable objects owned by task now */ | |
1397 | assert(task->task_volatile_objects == 0); | |
1398 | assert(task->task_nonvolatile_objects == 0); | |
1399 | assert(task->task_purgeable_disowning); | |
1400 | ||
1401 | /* and we don't need to try and disown again */ | |
1402 | task->task_purgeable_disowned = TRUE; | |
1403 | ||
1404 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1405 | } | |
1406 | ||
1407 | ||
1408 | #if notyet | |
1409 | static int | |
1410 | vm_purgeable_queue_purge_task_owned( | |
1411 | purgeable_q_t queue, | |
1412 | int group, | |
1413 | task_t task) | |
1414 | { | |
1415 | vm_object_t object; | |
1416 | int num_objects; | |
1417 | int collisions; | |
1418 | int num_objects_purged; | |
1419 | ||
1420 | num_objects_purged = 0; | |
1421 | collisions = 0; | |
1422 | ||
1423 | look_again: | |
1424 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1425 | ||
1426 | num_objects = 0; | |
1427 | for (object = (vm_object_t) queue_first(&queue->objq[group]); | |
1428 | !queue_end(&queue->objq[group], (queue_entry_t) object); | |
1429 | object = (vm_object_t) queue_next(&object->objq)) { | |
1430 | ||
1431 | if (object->vo_purgeable_owner != task && | |
1432 | object->vo_purgeable_owner != NULL) { | |
1433 | continue; | |
1434 | } | |
1435 | ||
1436 | /* found an object: try and grab it */ | |
1437 | if (!vm_object_lock_try(object)) { | |
1438 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1439 | mutex_pause(collisions++); | |
1440 | goto look_again; | |
1441 | } | |
1442 | /* got it ! */ | |
1443 | ||
1444 | collisions = 0; | |
1445 | ||
1446 | /* remove object from purgeable queue */ | |
1447 | queue_remove(&queue->objq[group], object, | |
1448 | vm_object_t, objq); | |
1449 | object->objq.next = NULL; | |
1450 | object->objq.prev = NULL; | |
1451 | /* one less volatile object for this object's owner */ | |
1452 | assert(object->vo_purgeable_owner == task); | |
1453 | vm_purgeable_volatile_owner_update(task, -1); | |
1454 | ||
1455 | #if DEBUG | |
1456 | object->vo_purgeable_volatilizer = NULL; | |
1457 | #endif /* DEBUG */ | |
1458 | queue_enter(&purgeable_nonvolatile_queue, object, | |
1459 | vm_object_t, objq); | |
1460 | assert(purgeable_nonvolatile_count >= 0); | |
1461 | purgeable_nonvolatile_count++; | |
1462 | assert(purgeable_nonvolatile_count > 0); | |
1463 | /* one more nonvolatile object for this object's owner */ | |
1464 | assert(object->vo_purgeable_owner == task); | |
1465 | vm_purgeable_nonvolatile_owner_update(task, +1); | |
1466 | ||
1467 | /* unlock purgeable queues */ | |
1468 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1469 | ||
1470 | if (object->purgeable_when_ripe) { | |
1471 | /* remove a token */ | |
1472 | vm_page_lock_queues(); | |
1473 | vm_purgeable_token_remove_first(queue); | |
1474 | vm_page_unlock_queues(); | |
1475 | } | |
1476 | ||
1477 | /* purge the object */ | |
1478 | (void) vm_object_purge(object, 0); | |
1479 | assert(object->purgable == VM_PURGABLE_EMPTY); | |
1480 | /* no change for purgeable accounting */ | |
1481 | vm_object_unlock(object); | |
1482 | num_objects_purged++; | |
1483 | ||
1484 | /* we unlocked the purgeable queues, so start over */ | |
1485 | goto look_again; | |
1486 | } | |
1487 | ||
1488 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1489 | ||
1490 | return num_objects_purged; | |
1491 | } | |
1492 | ||
1493 | int | |
1494 | vm_purgeable_purge_task_owned( | |
1495 | task_t task) | |
1496 | { | |
1497 | purgeable_q_t queue; | |
1498 | int group; | |
1499 | int num_objects_purged; | |
1500 | ||
1501 | num_objects_purged = 0; | |
1502 | ||
1503 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; | |
1504 | num_objects_purged += vm_purgeable_queue_purge_task_owned(queue, | |
1505 | 0, | |
1506 | task); | |
1507 | ||
1508 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; | |
1509 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) | |
1510 | num_objects_purged += vm_purgeable_queue_purge_task_owned(queue, | |
1511 | group, | |
1512 | task); | |
1513 | ||
1514 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; | |
1515 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) | |
1516 | num_objects_purged += vm_purgeable_queue_purge_task_owned(queue, | |
1517 | group, | |
1518 | task); | |
1519 | ||
1520 | return num_objects_purged; | |
1521 | } | |
1522 | #endif | |
1523 | ||
1524 | void | |
1525 | vm_purgeable_nonvolatile_enqueue( | |
1526 | vm_object_t object, | |
1527 | task_t owner) | |
1528 | { | |
1529 | int page_count; | |
1530 | ||
1531 | vm_object_lock_assert_exclusive(object); | |
1532 | ||
1533 | assert(object->purgable == VM_PURGABLE_NONVOLATILE); | |
1534 | assert(object->vo_purgeable_owner == NULL); | |
1535 | assert(owner != NULL); | |
1536 | ||
1537 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1538 | ||
1539 | if (owner->task_purgeable_disowning) { | |
1540 | /* task is exiting and no longer tracking purgeable objects */ | |
1541 | owner = NULL; | |
1542 | } | |
1543 | ||
1544 | object->vo_purgeable_owner = owner; | |
1545 | #if DEBUG | |
1546 | object->vo_purgeable_volatilizer = NULL; | |
1547 | #endif /* DEBUG */ | |
1548 | ||
1549 | #if DEBUG | |
1550 | OSBacktrace(&object->purgeable_owner_bt[0], 16); | |
1551 | #endif /* DEBUG */ | |
1552 | ||
1553 | page_count = object->resident_page_count; | |
1554 | assert(page_count == 0); /* should be a freshly-created object */ | |
1555 | if (owner != NULL && page_count != 0) { | |
1556 | ledger_credit(owner->ledger, | |
1557 | task_ledgers.purgeable_nonvolatile, | |
1558 | ptoa(page_count)); | |
1559 | ledger_credit(owner->ledger, | |
1560 | task_ledgers.phys_footprint, | |
1561 | ptoa(page_count)); | |
1562 | } | |
1563 | ||
1564 | assert(object->objq.next == NULL); | |
1565 | assert(object->objq.prev == NULL); | |
1566 | ||
1567 | queue_enter(&purgeable_nonvolatile_queue, object, | |
1568 | vm_object_t, objq); | |
1569 | assert(purgeable_nonvolatile_count >= 0); | |
1570 | purgeable_nonvolatile_count++; | |
1571 | assert(purgeable_nonvolatile_count > 0); | |
1572 | /* one more nonvolatile object for this object's owner */ | |
1573 | assert(object->vo_purgeable_owner == owner); | |
1574 | vm_purgeable_nonvolatile_owner_update(owner, +1); | |
1575 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1576 | ||
1577 | vm_object_lock_assert_exclusive(object); | |
1578 | } | |
1579 | ||
1580 | void | |
1581 | vm_purgeable_nonvolatile_dequeue( | |
1582 | vm_object_t object) | |
1583 | { | |
1584 | task_t owner; | |
1585 | ||
1586 | vm_object_lock_assert_exclusive(object); | |
1587 | ||
1588 | owner = object->vo_purgeable_owner; | |
1589 | #if DEBUG | |
1590 | assert(object->vo_purgeable_volatilizer == NULL); | |
1591 | #endif /* DEBUG */ | |
1592 | if (owner != NULL) { | |
1593 | /* | |
1594 | * Update the owner's ledger to stop accounting | |
1595 | * for this object. | |
1596 | */ | |
1597 | vm_purgeable_accounting(object, | |
1598 | object->purgable, | |
1599 | TRUE); /* disown */ | |
1600 | } | |
1601 | ||
1602 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1603 | assert(object->objq.next != NULL); | |
1604 | assert(object->objq.prev != NULL); | |
1605 | queue_remove(&purgeable_nonvolatile_queue, object, | |
1606 | vm_object_t, objq); | |
1607 | object->objq.next = NULL; | |
1608 | object->objq.prev = NULL; | |
1609 | assert(purgeable_nonvolatile_count > 0); | |
1610 | purgeable_nonvolatile_count--; | |
1611 | assert(purgeable_nonvolatile_count >= 0); | |
1612 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1613 | ||
1614 | vm_object_lock_assert_exclusive(object); | |
1615 | } | |
1616 | ||
1617 | void | |
1618 | vm_purgeable_accounting( | |
1619 | vm_object_t object, | |
1620 | vm_purgable_t old_state, | |
1621 | boolean_t disown) | |
1622 | { | |
1623 | task_t owner; | |
1624 | int resident_page_count; | |
1625 | int wired_page_count; | |
1626 | int compressed_page_count; | |
1627 | boolean_t disown_on_the_fly; | |
1628 | ||
1629 | vm_object_lock_assert_exclusive(object); | |
1630 | ||
1631 | owner = object->vo_purgeable_owner; | |
1632 | if (owner == NULL) | |
1633 | return; | |
1634 | ||
1635 | if (!disown && owner->task_purgeable_disowning) { | |
1636 | /* task is disowning its purgeable objects: help it */ | |
1637 | disown_on_the_fly = TRUE; | |
1638 | } else { | |
1639 | disown_on_the_fly = FALSE; | |
1640 | } | |
1641 | ||
1642 | resident_page_count = object->resident_page_count; | |
1643 | wired_page_count = object->wired_page_count; | |
1644 | if ((COMPRESSED_PAGER_IS_ACTIVE || | |
1645 | DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && | |
1646 | object->pager != NULL) { | |
1647 | compressed_page_count = | |
1648 | vm_compressor_pager_get_count(object->pager); | |
1649 | } else { | |
1650 | compressed_page_count = 0; | |
1651 | } | |
1652 | ||
1653 | if (old_state == VM_PURGABLE_VOLATILE || | |
1654 | old_state == VM_PURGABLE_EMPTY) { | |
1655 | /* less volatile bytes in ledger */ | |
1656 | ledger_debit(owner->ledger, | |
1657 | task_ledgers.purgeable_volatile, | |
1658 | ptoa(resident_page_count - wired_page_count)); | |
1659 | /* less compressed volatile bytes in ledger */ | |
1660 | ledger_debit(owner->ledger, | |
1661 | task_ledgers.purgeable_volatile_compressed, | |
1662 | ptoa(compressed_page_count)); | |
1663 | ||
1664 | if (disown || !object->alive || object->terminating) { | |
1665 | /* wired pages were accounted as "non-volatile"... */ | |
1666 | ledger_debit(owner->ledger, | |
1667 | task_ledgers.purgeable_nonvolatile, | |
1668 | ptoa(wired_page_count)); | |
1669 | /* ... and in phys_footprint */ | |
1670 | ledger_debit(owner->ledger, | |
1671 | task_ledgers.phys_footprint, | |
1672 | ptoa(wired_page_count)); | |
1673 | ||
1674 | if (!disown_on_the_fly && | |
1675 | (object->purgeable_queue_type == | |
1676 | PURGEABLE_Q_TYPE_MAX)) { | |
1677 | /* | |
1678 | * Not on a volatile queue: must be empty | |
1679 | * or emptying. | |
1680 | */ | |
1681 | vm_purgeable_nonvolatile_owner_update(owner,-1); | |
1682 | } else { | |
1683 | /* on a volatile queue */ | |
1684 | vm_purgeable_volatile_owner_update(owner, -1); | |
1685 | } | |
1686 | /* no more accounting for this dead object */ | |
1687 | object->vo_purgeable_owner = NULL; | |
1688 | #if DEBUG | |
1689 | object->vo_purgeable_volatilizer = NULL; | |
1690 | #endif /* DEBUG */ | |
1691 | return; | |
1692 | } | |
1693 | ||
1694 | /* more non-volatile bytes in ledger */ | |
1695 | ledger_credit(owner->ledger, | |
1696 | task_ledgers.purgeable_nonvolatile, | |
1697 | ptoa(resident_page_count - wired_page_count)); | |
1698 | /* more compressed non-volatile bytes in ledger */ | |
1699 | ledger_credit(owner->ledger, | |
1700 | task_ledgers.purgeable_nonvolatile_compressed, | |
1701 | ptoa(compressed_page_count)); | |
1702 | /* more footprint */ | |
1703 | ledger_credit(owner->ledger, | |
1704 | task_ledgers.phys_footprint, | |
1705 | ptoa(resident_page_count | |
1706 | + compressed_page_count | |
1707 | - wired_page_count)); | |
1708 | ||
1709 | } else if (old_state == VM_PURGABLE_NONVOLATILE) { | |
1710 | ||
1711 | /* less non-volatile bytes in ledger */ | |
1712 | ledger_debit(owner->ledger, | |
1713 | task_ledgers.purgeable_nonvolatile, | |
1714 | ptoa(resident_page_count - wired_page_count)); | |
1715 | /* less compressed non-volatile bytes in ledger */ | |
1716 | ledger_debit(owner->ledger, | |
1717 | task_ledgers.purgeable_nonvolatile_compressed, | |
1718 | ptoa(compressed_page_count)); | |
1719 | /* less footprint */ | |
1720 | ledger_debit(owner->ledger, | |
1721 | task_ledgers.phys_footprint, | |
1722 | ptoa(resident_page_count | |
1723 | + compressed_page_count | |
1724 | - wired_page_count)); | |
1725 | ||
1726 | if (disown || !object->alive || object->terminating) { | |
1727 | /* wired pages still accounted as "non-volatile" */ | |
1728 | ledger_debit(owner->ledger, | |
1729 | task_ledgers.purgeable_nonvolatile, | |
1730 | ptoa(wired_page_count)); | |
1731 | ledger_debit(owner->ledger, | |
1732 | task_ledgers.phys_footprint, | |
1733 | ptoa(wired_page_count)); | |
1734 | ||
1735 | /* one less "non-volatile" object for the owner */ | |
1736 | if (!disown_on_the_fly) { | |
1737 | assert(object->purgeable_queue_type == | |
1738 | PURGEABLE_Q_TYPE_MAX); | |
1739 | } | |
1740 | vm_purgeable_nonvolatile_owner_update(owner, -1); | |
1741 | /* no more accounting for this dead object */ | |
1742 | object->vo_purgeable_owner = NULL; | |
1743 | #if DEBUG | |
1744 | object->vo_purgeable_volatilizer = NULL; | |
1745 | #endif /* DEBUG */ | |
1746 | return; | |
1747 | } | |
1748 | /* more volatile bytes in ledger */ | |
1749 | ledger_credit(owner->ledger, | |
1750 | task_ledgers.purgeable_volatile, | |
1751 | ptoa(resident_page_count - wired_page_count)); | |
1752 | /* more compressed volatile bytes in ledger */ | |
1753 | ledger_credit(owner->ledger, | |
1754 | task_ledgers.purgeable_volatile_compressed, | |
1755 | ptoa(compressed_page_count)); | |
1756 | } else { | |
1757 | panic("vm_purgeable_accounting(%p): " | |
1758 | "unexpected old_state=%d\n", | |
1759 | object, old_state); | |
1760 | } | |
1761 | ||
1762 | vm_object_lock_assert_exclusive(object); | |
1763 | } | |
1764 | ||
1765 | void | |
1766 | vm_purgeable_nonvolatile_owner_update( | |
1767 | task_t owner, | |
1768 | int delta) | |
1769 | { | |
1770 | if (owner == NULL || delta == 0) { | |
1771 | return; | |
1772 | } | |
1773 | ||
1774 | if (delta > 0) { | |
1775 | assert(owner->task_nonvolatile_objects >= 0); | |
1776 | OSAddAtomic(delta, &owner->task_nonvolatile_objects); | |
1777 | assert(owner->task_nonvolatile_objects > 0); | |
1778 | } else { | |
1779 | assert(owner->task_nonvolatile_objects > delta); | |
1780 | OSAddAtomic(delta, &owner->task_nonvolatile_objects); | |
1781 | assert(owner->task_nonvolatile_objects >= 0); | |
1782 | } | |
1783 | } | |
1784 | ||
1785 | void | |
1786 | vm_purgeable_volatile_owner_update( | |
1787 | task_t owner, | |
1788 | int delta) | |
1789 | { | |
1790 | if (owner == NULL || delta == 0) { | |
1791 | return; | |
1792 | } | |
1793 | ||
1794 | if (delta > 0) { | |
1795 | assert(owner->task_volatile_objects >= 0); | |
1796 | OSAddAtomic(delta, &owner->task_volatile_objects); | |
1797 | assert(owner->task_volatile_objects > 0); | |
1798 | } else { | |
1799 | assert(owner->task_volatile_objects > delta); | |
1800 | OSAddAtomic(delta, &owner->task_volatile_objects); | |
1801 | assert(owner->task_volatile_objects >= 0); | |
1802 | } | |
1803 | } | |
1804 | ||
1805 | void | |
1806 | vm_purgeable_compressed_update( | |
1807 | vm_object_t object, | |
1808 | int delta) | |
1809 | { | |
1810 | task_t owner; | |
1811 | ||
1812 | vm_object_lock_assert_exclusive(object); | |
1813 | ||
1814 | if (delta == 0 || | |
1815 | !object->internal || | |
1816 | object->purgable == VM_PURGABLE_DENY || | |
1817 | object->vo_purgeable_owner == NULL) { | |
1818 | /* not an owned purgeable VM object: nothing to update */ | |
1819 | return; | |
1820 | } | |
1821 | ||
1822 | owner = object->vo_purgeable_owner; | |
1823 | switch (object->purgable) { | |
1824 | case VM_PURGABLE_DENY: | |
1825 | break; | |
1826 | case VM_PURGABLE_NONVOLATILE: | |
1827 | if (delta > 0) { | |
1828 | ledger_credit(owner->ledger, | |
1829 | task_ledgers.purgeable_nonvolatile_compressed, | |
1830 | ptoa(delta)); | |
1831 | ledger_credit(owner->ledger, | |
1832 | task_ledgers.phys_footprint, | |
1833 | ptoa(delta)); | |
1834 | } else { | |
1835 | ledger_debit(owner->ledger, | |
1836 | task_ledgers.purgeable_nonvolatile_compressed, | |
1837 | ptoa(-delta)); | |
1838 | ledger_debit(owner->ledger, | |
1839 | task_ledgers.phys_footprint, | |
1840 | ptoa(-delta)); | |
1841 | } | |
1842 | break; | |
1843 | case VM_PURGABLE_VOLATILE: | |
1844 | case VM_PURGABLE_EMPTY: | |
1845 | if (delta > 0) { | |
1846 | ledger_credit(owner->ledger, | |
1847 | task_ledgers.purgeable_volatile_compressed, | |
1848 | ptoa(delta)); | |
1849 | } else { | |
1850 | ledger_debit(owner->ledger, | |
1851 | task_ledgers.purgeable_volatile_compressed, | |
1852 | ptoa(-delta)); | |
1853 | } | |
1854 | break; | |
1855 | default: | |
1856 | panic("vm_purgeable_compressed_update(): " | |
1857 | "unexpected purgable %d for object %p\n", | |
1858 | object->purgable, object); | |
1859 | } | |
1860 | } |