]>
Commit | Line | Data |
---|---|---|
2d21ac55 A |
1 | /* |
2 | * Copyright (c) 2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
fe8ab488 A |
24 | #include <kern/sched_prim.h> |
25 | #include <kern/ledger.h> | |
39037602 | 26 | #include <kern/policy_internal.h> |
fe8ab488 A |
27 | |
28 | #include <libkern/OSDebug.h> | |
29 | ||
2d21ac55 | 30 | #include <mach/mach_types.h> |
fe8ab488 A |
31 | |
32 | #include <machine/limits.h> | |
33 | ||
34 | #include <vm/vm_compressor_pager.h> | |
b0d623f7 | 35 | #include <vm/vm_kern.h> /* kmem_alloc */ |
fe8ab488 A |
36 | #include <vm/vm_page.h> |
37 | #include <vm/vm_pageout.h> | |
39236c6e | 38 | #include <vm/vm_protos.h> |
2d21ac55 | 39 | #include <vm/vm_purgeable_internal.h> |
fe8ab488 | 40 | |
2d21ac55 | 41 | #include <sys/kdebug.h> |
39236c6e | 42 | |
a39ff7e2 A |
43 | /* |
44 | * LOCK ORDERING for task-owned purgeable objects | |
45 | * | |
46 | * Whenever we need to hold multiple locks while adding to, removing from, | |
47 | * or scanning a task's task_objq list of VM objects it owns, locks should | |
48 | * be taken in this order: | |
49 | * | |
50 | * VM object ==> vm_purgeable_queue_lock ==> owner_task->task_objq_lock | |
51 | * | |
52 | * If one needs to acquire the VM object lock after any of the other 2 locks, | |
53 | * one needs to use vm_object_lock_try() and, if that fails, release the | |
54 | * other locks and retake them all in the correct order. | |
55 | */ | |
56 | ||
39236c6e | 57 | extern vm_pressure_level_t memorystatus_vm_pressure_level; |
2d21ac55 A |
58 | |
59 | struct token { | |
60 | token_cnt_t count; | |
39236c6e | 61 | token_idx_t prev; |
2d21ac55 A |
62 | token_idx_t next; |
63 | }; | |
64 | ||
cf7d32b8 A |
65 | struct token *tokens; |
66 | token_idx_t token_q_max_cnt = 0; | |
67 | vm_size_t token_q_cur_size = 0; | |
2d21ac55 | 68 | |
4a3eedf9 A |
69 | token_idx_t token_free_idx = 0; /* head of free queue */ |
70 | token_idx_t token_init_idx = 1; /* token 0 is reserved!! */ | |
71 | int32_t token_new_pagecount = 0; /* count of pages that will | |
2d21ac55 A |
72 | * be added onto token queue */ |
73 | ||
74 | int available_for_purge = 0; /* increase when ripe token | |
75 | * added, decrease when ripe | |
b0d623f7 A |
76 | * token removed. |
77 | * protected by page_queue_lock | |
78 | */ | |
2d21ac55 | 79 | |
b0d623f7 A |
80 | static int token_q_allocating = 0; /* flag for singlethreading |
81 | * allocator */ | |
cf7d32b8 | 82 | |
2d21ac55 | 83 | struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX]; |
fe8ab488 A |
84 | queue_head_t purgeable_nonvolatile_queue; |
85 | int purgeable_nonvolatile_count; | |
2d21ac55 | 86 | |
b0d623f7 A |
87 | decl_lck_mtx_data(,vm_purgeable_queue_lock) |
88 | ||
2d21ac55 A |
89 | static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue); |
90 | ||
39236c6e A |
91 | static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task); |
92 | ||
fe8ab488 | 93 | |
2d21ac55 A |
94 | #if MACH_ASSERT |
95 | static void | |
96 | vm_purgeable_token_check_queue(purgeable_q_t queue) | |
97 | { | |
98 | int token_cnt = 0, page_cnt = 0; | |
99 | token_idx_t token = queue->token_q_head; | |
100 | token_idx_t unripe = 0; | |
101 | int our_inactive_count; | |
102 | ||
fe8ab488 A |
103 | #if DEVELOPMENT |
104 | static unsigned lightweight_check = 0; | |
105 | ||
106 | /* | |
107 | * Due to performance impact, only perform this check | |
108 | * every 100 times on DEVELOPMENT kernels. | |
109 | */ | |
110 | if (lightweight_check++ < 100) { | |
111 | return; | |
112 | } | |
113 | ||
114 | lightweight_check = 0; | |
115 | #endif | |
116 | ||
2d21ac55 A |
117 | while (token) { |
118 | if (tokens[token].count != 0) { | |
119 | assert(queue->token_q_unripe); | |
120 | if (unripe == 0) { | |
121 | assert(token == queue->token_q_unripe); | |
122 | unripe = token; | |
123 | } | |
124 | page_cnt += tokens[token].count; | |
125 | } | |
126 | if (tokens[token].next == 0) | |
127 | assert(queue->token_q_tail == token); | |
128 | ||
129 | token_cnt++; | |
130 | token = tokens[token].next; | |
131 | } | |
132 | ||
133 | if (unripe) | |
134 | assert(queue->token_q_unripe == unripe); | |
135 | assert(token_cnt == queue->debug_count_tokens); | |
593a1d5f A |
136 | |
137 | /* obsolete queue doesn't maintain token counts */ | |
138 | if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE) | |
139 | { | |
140 | our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount; | |
141 | assert(our_inactive_count >= 0); | |
316670eb | 142 | assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count); |
593a1d5f | 143 | } |
2d21ac55 A |
144 | } |
145 | #endif | |
146 | ||
b0d623f7 A |
147 | /* |
148 | * Add a token. Allocate token queue memory if necessary. | |
149 | * Call with page queue locked. | |
150 | */ | |
2d21ac55 A |
151 | kern_return_t |
152 | vm_purgeable_token_add(purgeable_q_t queue) | |
153 | { | |
39037602 | 154 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 | 155 | |
2d21ac55 A |
156 | /* new token */ |
157 | token_idx_t token; | |
158 | enum purgeable_q_type i; | |
159 | ||
cf7d32b8 A |
160 | find_available_token: |
161 | ||
162 | if (token_free_idx) { /* unused tokens available */ | |
2d21ac55 A |
163 | token = token_free_idx; |
164 | token_free_idx = tokens[token_free_idx].next; | |
cf7d32b8 A |
165 | } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */ |
166 | token = token_init_idx; | |
167 | token_init_idx++; | |
168 | } else { /* allocate more memory */ | |
169 | /* Wait if another thread is inside the memory alloc section */ | |
170 | while(token_q_allocating) { | |
b0d623f7 A |
171 | wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock, |
172 | LCK_SLEEP_DEFAULT, | |
173 | (event_t)&token_q_allocating, | |
174 | THREAD_UNINT); | |
cf7d32b8 A |
175 | if(res != THREAD_AWAKENED) return KERN_ABORTED; |
176 | }; | |
177 | ||
178 | /* Check whether memory is still maxed out */ | |
179 | if(token_init_idx < token_q_max_cnt) | |
180 | goto find_available_token; | |
181 | ||
182 | /* Still no memory. Allocate some. */ | |
183 | token_q_allocating = 1; | |
184 | ||
185 | /* Drop page queue lock so we can allocate */ | |
186 | vm_page_unlock_queues(); | |
187 | ||
188 | struct token *new_loc; | |
189 | vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE; | |
190 | kern_return_t result; | |
191 | ||
b0d623f7 A |
192 | if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) { |
193 | result = KERN_RESOURCE_SHORTAGE; | |
cf7d32b8 | 194 | } else { |
b0d623f7 A |
195 | if (token_q_cur_size) { |
196 | result = kmem_realloc(kernel_map, | |
197 | (vm_offset_t) tokens, | |
198 | token_q_cur_size, | |
199 | (vm_offset_t *) &new_loc, | |
3e170ce0 | 200 | alloc_size, VM_KERN_MEMORY_OSFMK); |
b0d623f7 A |
201 | } else { |
202 | result = kmem_alloc(kernel_map, | |
203 | (vm_offset_t *) &new_loc, | |
3e170ce0 | 204 | alloc_size, VM_KERN_MEMORY_OSFMK); |
b0d623f7 | 205 | } |
cf7d32b8 A |
206 | } |
207 | ||
208 | vm_page_lock_queues(); | |
209 | ||
210 | if (result) { | |
211 | /* Unblock waiting threads */ | |
212 | token_q_allocating = 0; | |
213 | thread_wakeup((event_t)&token_q_allocating); | |
214 | return result; | |
215 | } | |
216 | ||
217 | /* If we get here, we allocated new memory. Update pointers and | |
218 | * dealloc old range */ | |
219 | struct token *old_tokens=tokens; | |
220 | tokens=new_loc; | |
221 | vm_size_t old_token_q_cur_size=token_q_cur_size; | |
222 | token_q_cur_size=alloc_size; | |
b0d623f7 A |
223 | token_q_max_cnt = (token_idx_t) (token_q_cur_size / |
224 | sizeof(struct token)); | |
cf7d32b8 A |
225 | assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */ |
226 | ||
227 | if (old_token_q_cur_size) { /* clean up old mapping */ | |
228 | vm_page_unlock_queues(); | |
229 | /* kmem_realloc leaves the old region mapped. Get rid of it. */ | |
230 | kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size); | |
231 | vm_page_lock_queues(); | |
232 | } | |
233 | ||
234 | /* Unblock waiting threads */ | |
235 | token_q_allocating = 0; | |
236 | thread_wakeup((event_t)&token_q_allocating); | |
237 | ||
238 | goto find_available_token; | |
2d21ac55 | 239 | } |
cf7d32b8 A |
240 | |
241 | assert (token); | |
242 | ||
2d21ac55 A |
243 | /* |
244 | * the new pagecount we got need to be applied to all queues except | |
245 | * obsolete | |
246 | */ | |
247 | for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { | |
4a3eedf9 A |
248 | int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount; |
249 | assert(pages >= 0); | |
250 | assert(pages <= TOKEN_COUNT_MAX); | |
b0d623f7 A |
251 | purgeable_queues[i].new_pages = (int32_t) pages; |
252 | assert(purgeable_queues[i].new_pages == pages); | |
2d21ac55 A |
253 | } |
254 | token_new_pagecount = 0; | |
255 | ||
256 | /* set token counter value */ | |
257 | if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) | |
258 | tokens[token].count = queue->new_pages; | |
259 | else | |
260 | tokens[token].count = 0; /* all obsolete items are | |
261 | * ripe immediately */ | |
262 | queue->new_pages = 0; | |
263 | ||
264 | /* put token on token counter list */ | |
265 | tokens[token].next = 0; | |
266 | if (queue->token_q_tail == 0) { | |
267 | assert(queue->token_q_head == 0 && queue->token_q_unripe == 0); | |
268 | queue->token_q_head = token; | |
39236c6e | 269 | tokens[token].prev = 0; |
2d21ac55 A |
270 | } else { |
271 | tokens[queue->token_q_tail].next = token; | |
39236c6e | 272 | tokens[token].prev = queue->token_q_tail; |
2d21ac55 A |
273 | } |
274 | if (queue->token_q_unripe == 0) { /* only ripe tokens (token | |
275 | * count == 0) in queue */ | |
276 | if (tokens[token].count > 0) | |
277 | queue->token_q_unripe = token; /* first unripe token */ | |
278 | else | |
279 | available_for_purge++; /* added a ripe token? | |
280 | * increase available count */ | |
281 | } | |
282 | queue->token_q_tail = token; | |
283 | ||
284 | #if MACH_ASSERT | |
285 | queue->debug_count_tokens++; | |
286 | /* Check both queues, since we modified the new_pages count on each */ | |
287 | vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]); | |
288 | vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]); | |
289 | ||
290 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)), | |
291 | queue->type, | |
292 | tokens[token].count, /* num pages on token | |
293 | * (last token) */ | |
294 | queue->debug_count_tokens, | |
295 | 0, | |
296 | 0); | |
297 | #endif | |
298 | ||
299 | return KERN_SUCCESS; | |
300 | } | |
301 | ||
302 | /* | |
303 | * Remove first token from queue and return its index. Add its count to the | |
304 | * count of the next token. | |
b0d623f7 | 305 | * Call with page queue locked. |
2d21ac55 A |
306 | */ |
307 | static token_idx_t | |
308 | vm_purgeable_token_remove_first(purgeable_q_t queue) | |
309 | { | |
39037602 | 310 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 | 311 | |
2d21ac55 A |
312 | token_idx_t token; |
313 | token = queue->token_q_head; | |
314 | ||
315 | assert(token); | |
316 | ||
317 | if (token) { | |
318 | assert(queue->token_q_tail); | |
319 | if (queue->token_q_head == queue->token_q_unripe) { | |
320 | /* no ripe tokens... must move unripe pointer */ | |
321 | queue->token_q_unripe = tokens[token].next; | |
322 | } else { | |
323 | /* we're removing a ripe token. decrease count */ | |
324 | available_for_purge--; | |
325 | assert(available_for_purge >= 0); | |
326 | } | |
327 | ||
328 | if (queue->token_q_tail == queue->token_q_head) | |
329 | assert(tokens[token].next == 0); | |
330 | ||
331 | queue->token_q_head = tokens[token].next; | |
332 | if (queue->token_q_head) { | |
333 | tokens[queue->token_q_head].count += tokens[token].count; | |
39236c6e | 334 | tokens[queue->token_q_head].prev = 0; |
2d21ac55 A |
335 | } else { |
336 | /* currently no other tokens in the queue */ | |
337 | /* | |
338 | * the page count must be added to the next newly | |
339 | * created token | |
340 | */ | |
341 | queue->new_pages += tokens[token].count; | |
342 | /* if head is zero, tail is too */ | |
343 | queue->token_q_tail = 0; | |
344 | } | |
345 | ||
346 | #if MACH_ASSERT | |
347 | queue->debug_count_tokens--; | |
348 | vm_purgeable_token_check_queue(queue); | |
349 | ||
350 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)), | |
351 | queue->type, | |
352 | tokens[queue->token_q_head].count, /* num pages on new | |
353 | * first token */ | |
354 | token_new_pagecount, /* num pages waiting for | |
355 | * next token */ | |
356 | available_for_purge, | |
357 | 0); | |
358 | #endif | |
359 | } | |
360 | return token; | |
361 | } | |
362 | ||
316670eb A |
363 | static token_idx_t |
364 | vm_purgeable_token_remove_last(purgeable_q_t queue) | |
365 | { | |
39037602 | 366 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
316670eb A |
367 | |
368 | token_idx_t token; | |
369 | token = queue->token_q_tail; | |
370 | ||
371 | assert(token); | |
372 | ||
373 | if (token) { | |
374 | assert(queue->token_q_head); | |
375 | ||
376 | if (queue->token_q_tail == queue->token_q_head) | |
377 | assert(tokens[token].next == 0); | |
378 | ||
379 | if (queue->token_q_unripe == 0) { | |
380 | /* we're removing a ripe token. decrease count */ | |
381 | available_for_purge--; | |
382 | assert(available_for_purge >= 0); | |
383 | } else if (queue->token_q_unripe == token) { | |
384 | /* we're removing the only unripe token */ | |
385 | queue->token_q_unripe = 0; | |
386 | } | |
387 | ||
388 | if (token == queue->token_q_head) { | |
389 | /* token is the last one in the queue */ | |
390 | queue->token_q_head = 0; | |
391 | queue->token_q_tail = 0; | |
392 | } else { | |
393 | token_idx_t new_tail; | |
394 | ||
39236c6e A |
395 | new_tail = tokens[token].prev; |
396 | ||
397 | assert(new_tail); | |
316670eb | 398 | assert(tokens[new_tail].next == token); |
39236c6e | 399 | |
316670eb A |
400 | queue->token_q_tail = new_tail; |
401 | tokens[new_tail].next = 0; | |
402 | } | |
403 | ||
404 | queue->new_pages += tokens[token].count; | |
405 | ||
406 | #if MACH_ASSERT | |
407 | queue->debug_count_tokens--; | |
408 | vm_purgeable_token_check_queue(queue); | |
409 | ||
410 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)), | |
411 | queue->type, | |
412 | tokens[queue->token_q_head].count, /* num pages on new | |
413 | * first token */ | |
414 | token_new_pagecount, /* num pages waiting for | |
415 | * next token */ | |
416 | available_for_purge, | |
417 | 0); | |
418 | #endif | |
419 | } | |
420 | return token; | |
421 | } | |
422 | ||
b0d623f7 A |
423 | /* |
424 | * Delete first token from queue. Return token to token queue. | |
425 | * Call with page queue locked. | |
426 | */ | |
2d21ac55 A |
427 | void |
428 | vm_purgeable_token_delete_first(purgeable_q_t queue) | |
429 | { | |
39037602 | 430 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
2d21ac55 A |
431 | token_idx_t token = vm_purgeable_token_remove_first(queue); |
432 | ||
433 | if (token) { | |
434 | /* stick removed token on free queue */ | |
435 | tokens[token].next = token_free_idx; | |
39236c6e | 436 | tokens[token].prev = 0; |
2d21ac55 A |
437 | token_free_idx = token; |
438 | } | |
439 | } | |
440 | ||
316670eb A |
441 | void |
442 | vm_purgeable_token_delete_last(purgeable_q_t queue) | |
443 | { | |
39037602 | 444 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
316670eb A |
445 | token_idx_t token = vm_purgeable_token_remove_last(queue); |
446 | ||
447 | if (token) { | |
448 | /* stick removed token on free queue */ | |
449 | tokens[token].next = token_free_idx; | |
39236c6e | 450 | tokens[token].prev = 0; |
316670eb A |
451 | token_free_idx = token; |
452 | } | |
453 | } | |
454 | ||
2d21ac55 | 455 | |
b0d623f7 | 456 | /* Call with page queue locked. */ |
2d21ac55 | 457 | void |
cf7d32b8 | 458 | vm_purgeable_q_advance_all() |
2d21ac55 | 459 | { |
39037602 | 460 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 | 461 | |
4a3eedf9 A |
462 | /* check queue counters - if they get really large, scale them back. |
463 | * They tend to get that large when there is no purgeable queue action */ | |
464 | int i; | |
cf7d32b8 | 465 | if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */ |
4a3eedf9 A |
466 | { |
467 | for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { | |
468 | int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount; | |
469 | assert(pages >= 0); | |
470 | assert(pages <= TOKEN_COUNT_MAX); | |
b0d623f7 A |
471 | purgeable_queues[i].new_pages = (int32_t) pages; |
472 | assert(purgeable_queues[i].new_pages == pages); | |
4a3eedf9 A |
473 | } |
474 | token_new_pagecount = 0; | |
475 | } | |
476 | ||
2d21ac55 | 477 | /* |
cf7d32b8 A |
478 | * Decrement token counters. A token counter can be zero, this means the |
479 | * object is ripe to be purged. It is not purged immediately, because that | |
480 | * could cause several objects to be purged even if purging one would satisfy | |
481 | * the memory needs. Instead, the pageout thread purges one after the other | |
482 | * by calling vm_purgeable_object_purge_one and then rechecking the memory | |
483 | * balance. | |
484 | * | |
485 | * No need to advance obsolete queue - all items are ripe there, | |
2d21ac55 A |
486 | * always |
487 | */ | |
cf7d32b8 A |
488 | for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { |
489 | purgeable_q_t queue = &purgeable_queues[i]; | |
490 | uint32_t num_pages = 1; | |
491 | ||
492 | /* Iterate over tokens as long as there are unripe tokens. */ | |
493 | while (queue->token_q_unripe) { | |
494 | if (tokens[queue->token_q_unripe].count && num_pages) | |
495 | { | |
496 | tokens[queue->token_q_unripe].count -= 1; | |
497 | num_pages -= 1; | |
498 | } | |
2d21ac55 | 499 | |
cf7d32b8 A |
500 | if (tokens[queue->token_q_unripe].count == 0) { |
501 | queue->token_q_unripe = tokens[queue->token_q_unripe].next; | |
502 | available_for_purge++; | |
b0d623f7 | 503 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)), |
cf7d32b8 A |
504 | queue->type, |
505 | tokens[queue->token_q_head].count, /* num pages on new | |
506 | * first token */ | |
507 | 0, | |
508 | available_for_purge, | |
509 | 0); | |
510 | continue; /* One token ripened. Make sure to | |
511 | * check the next. */ | |
512 | } | |
513 | if (num_pages == 0) | |
514 | break; /* Current token not ripe and no more pages. | |
515 | * Work done. */ | |
2d21ac55 | 516 | } |
2d21ac55 | 517 | |
cf7d32b8 A |
518 | /* |
519 | * if there are no unripe tokens in the queue, decrement the | |
520 | * new_pages counter instead new_pages can be negative, but must be | |
521 | * canceled out by token_new_pagecount -- since inactive queue as a | |
522 | * whole always contains a nonnegative number of pages | |
523 | */ | |
524 | if (!queue->token_q_unripe) { | |
525 | queue->new_pages -= num_pages; | |
526 | assert((int32_t) token_new_pagecount + queue->new_pages >= 0); | |
527 | } | |
2d21ac55 | 528 | #if MACH_ASSERT |
cf7d32b8 | 529 | vm_purgeable_token_check_queue(queue); |
2d21ac55 | 530 | #endif |
cf7d32b8 | 531 | } |
2d21ac55 A |
532 | } |
533 | ||
534 | /* | |
535 | * grab any ripe object and purge it obsolete queue first. then, go through | |
536 | * each volatile group. Select a queue with a ripe token. | |
537 | * Start with first group (0) | |
538 | * 1. Look at queue. Is there an object? | |
539 | * Yes - purge it. Remove token. | |
540 | * No - check other queue. Is there an object? | |
541 | * No - increment group, then go to (1) | |
542 | * Yes - purge it. Remove token. If there is no ripe token, remove ripe | |
543 | * token from other queue and migrate unripe token from this | |
544 | * queue to other queue. | |
b0d623f7 | 545 | * Call with page queue locked. |
2d21ac55 A |
546 | */ |
547 | static void | |
548 | vm_purgeable_token_remove_ripe(purgeable_q_t queue) | |
549 | { | |
39037602 | 550 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
2d21ac55 A |
551 | assert(queue->token_q_head && tokens[queue->token_q_head].count == 0); |
552 | /* return token to free list. advance token list. */ | |
553 | token_idx_t new_head = tokens[queue->token_q_head].next; | |
554 | tokens[queue->token_q_head].next = token_free_idx; | |
39236c6e | 555 | tokens[queue->token_q_head].prev = 0; |
2d21ac55 A |
556 | token_free_idx = queue->token_q_head; |
557 | queue->token_q_head = new_head; | |
39236c6e | 558 | tokens[new_head].prev = 0; |
2d21ac55 A |
559 | if (new_head == 0) |
560 | queue->token_q_tail = 0; | |
561 | ||
562 | #if MACH_ASSERT | |
563 | queue->debug_count_tokens--; | |
564 | vm_purgeable_token_check_queue(queue); | |
565 | #endif | |
566 | ||
567 | available_for_purge--; | |
568 | assert(available_for_purge >= 0); | |
569 | } | |
570 | ||
571 | /* | |
572 | * Delete a ripe token from the given queue. If there are no ripe tokens on | |
573 | * that queue, delete a ripe token from queue2, and migrate an unripe token | |
574 | * from queue to queue2 | |
b0d623f7 | 575 | * Call with page queue locked. |
2d21ac55 A |
576 | */ |
577 | static void | |
578 | vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2) | |
579 | { | |
39037602 | 580 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
2d21ac55 A |
581 | assert(queue->token_q_head); |
582 | ||
583 | if (tokens[queue->token_q_head].count == 0) { | |
584 | /* This queue has a ripe token. Remove. */ | |
585 | vm_purgeable_token_remove_ripe(queue); | |
586 | } else { | |
587 | assert(queue2); | |
588 | /* | |
589 | * queue2 must have a ripe token. Remove, and migrate one | |
590 | * from queue to queue2. | |
591 | */ | |
592 | vm_purgeable_token_remove_ripe(queue2); | |
593 | /* migrate unripe token */ | |
594 | token_idx_t token; | |
595 | token_cnt_t count; | |
596 | ||
597 | /* remove token from queue1 */ | |
598 | assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe | |
599 | * tokens, remember? */ | |
600 | token = vm_purgeable_token_remove_first(queue); | |
601 | assert(token); | |
602 | ||
603 | count = tokens[token].count; | |
604 | ||
605 | /* migrate to queue2 */ | |
606 | /* go to migration target loc */ | |
2d21ac55 | 607 | |
39236c6e | 608 | token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after; |
2d21ac55 | 609 | |
39236c6e A |
610 | while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) { |
611 | count -= tokens[token_to_insert_before].count; | |
612 | token_to_insert_before = tokens[token_to_insert_before].next; | |
613 | } | |
614 | ||
615 | /* token_to_insert_before is now set correctly */ | |
616 | ||
617 | /* should the inserted token become the first unripe token? */ | |
618 | if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0)) | |
619 | queue2->token_q_unripe = token; /* if so, must update unripe pointer */ | |
2d21ac55 A |
620 | |
621 | /* | |
39236c6e A |
622 | * insert token. |
623 | * if inserting at end, reduce new_pages by that value; | |
624 | * otherwise, reduce counter of next token | |
2d21ac55 | 625 | */ |
39236c6e A |
626 | |
627 | tokens[token].count = count; | |
628 | ||
629 | if (token_to_insert_before != 0) { | |
630 | token_to_insert_after = tokens[token_to_insert_before].prev; | |
631 | ||
632 | tokens[token].next = token_to_insert_before; | |
633 | tokens[token_to_insert_before].prev = token; | |
634 | ||
635 | assert(tokens[token_to_insert_before].count >= count); | |
636 | tokens[token_to_insert_before].count -= count; | |
637 | } else { | |
638 | /* if we ran off the end of the list, the token to insert after is the tail */ | |
639 | token_to_insert_after = queue2->token_q_tail; | |
640 | ||
641 | tokens[token].next = 0; | |
642 | queue2->token_q_tail = token; | |
643 | ||
2d21ac55 A |
644 | assert(queue2->new_pages >= (int32_t) count); |
645 | queue2->new_pages -= count; | |
39236c6e A |
646 | } |
647 | ||
648 | if (token_to_insert_after != 0) { | |
649 | tokens[token].prev = token_to_insert_after; | |
650 | tokens[token_to_insert_after].next = token; | |
2d21ac55 | 651 | } else { |
39236c6e A |
652 | /* is this case possible? */ |
653 | tokens[token].prev = 0; | |
654 | queue2->token_q_head = token; | |
2d21ac55 | 655 | } |
2d21ac55 A |
656 | |
657 | #if MACH_ASSERT | |
658 | queue2->debug_count_tokens++; | |
659 | vm_purgeable_token_check_queue(queue2); | |
660 | #endif | |
661 | } | |
662 | } | |
663 | ||
664 | /* Find an object that can be locked. Returns locked object. */ | |
b0d623f7 | 665 | /* Call with purgeable queue locked. */ |
39236c6e A |
666 | static vm_object_t |
667 | vm_purgeable_object_find_and_lock( | |
668 | purgeable_q_t queue, | |
669 | int group, | |
670 | boolean_t pick_ripe) | |
2d21ac55 | 671 | { |
39236c6e A |
672 | vm_object_t object, best_object; |
673 | int object_task_importance; | |
674 | int best_object_task_importance; | |
675 | int best_object_skipped; | |
676 | int num_objects_skipped; | |
4bd07ac2 A |
677 | int try_lock_failed = 0; |
678 | int try_lock_succeeded = 0; | |
39236c6e A |
679 | task_t owner; |
680 | ||
681 | best_object = VM_OBJECT_NULL; | |
682 | best_object_task_importance = INT_MAX; | |
683 | ||
39037602 | 684 | LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED); |
2d21ac55 A |
685 | /* |
686 | * Usually we would pick the first element from a queue. However, we | |
687 | * might not be able to get a lock on it, in which case we try the | |
688 | * remaining elements in order. | |
689 | */ | |
690 | ||
4bd07ac2 A |
691 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START), |
692 | pick_ripe, | |
693 | group, | |
694 | VM_KERNEL_UNSLIDE_OR_PERM(queue), | |
695 | 0, | |
696 | 0); | |
697 | ||
698 | num_objects_skipped = 0; | |
2d21ac55 A |
699 | for (object = (vm_object_t) queue_first(&queue->objq[group]); |
700 | !queue_end(&queue->objq[group], (queue_entry_t) object); | |
39236c6e A |
701 | object = (vm_object_t) queue_next(&object->objq), |
702 | num_objects_skipped++) { | |
703 | ||
4bd07ac2 A |
704 | /* |
705 | * To prevent us looping for an excessively long time, choose | |
706 | * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements. | |
707 | * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements, | |
708 | * we keep going until we find the first eligible object. | |
709 | */ | |
710 | if ((num_objects_skipped >= PURGEABLE_LOOP_MAX) && (best_object != NULL)) { | |
711 | break; | |
712 | } | |
713 | ||
39236c6e A |
714 | if (pick_ripe && |
715 | ! object->purgeable_when_ripe) { | |
716 | /* we want an object that has a ripe token */ | |
717 | continue; | |
718 | } | |
719 | ||
720 | object_task_importance = 0; | |
fe8ab488 | 721 | |
d9a64523 A |
722 | /* |
723 | * We don't want to use VM_OBJECT_OWNER() here: we want to | |
724 | * distinguish kernel-owned and disowned objects. | |
725 | * Disowned objects have no owner and will have no importance... | |
726 | */ | |
727 | owner = object->vo_owner; | |
728 | if (owner != NULL && owner != VM_OBJECT_OWNER_DISOWNED) { | |
5ba3f43e A |
729 | #if CONFIG_EMBEDDED |
730 | #if CONFIG_JETSAM | |
731 | object_task_importance = proc_get_memstat_priority((struct proc *)get_bsdtask_info(owner), TRUE); | |
732 | #endif /* CONFIG_JETSAM */ | |
733 | #else /* CONFIG_EMBEDDED */ | |
39236c6e | 734 | object_task_importance = task_importance_estimate(owner); |
5ba3f43e | 735 | #endif /* CONFIG_EMBEDDED */ |
39236c6e | 736 | } |
fe8ab488 | 737 | |
39236c6e A |
738 | if (object_task_importance < best_object_task_importance) { |
739 | if (vm_object_lock_try(object)) { | |
4bd07ac2 | 740 | try_lock_succeeded++; |
39236c6e A |
741 | if (best_object != VM_OBJECT_NULL) { |
742 | /* forget about previous best object */ | |
743 | vm_object_unlock(best_object); | |
744 | } | |
745 | best_object = object; | |
746 | best_object_task_importance = object_task_importance; | |
747 | best_object_skipped = num_objects_skipped; | |
748 | if (best_object_task_importance == 0) { | |
749 | /* can't get any better: stop looking */ | |
750 | break; | |
751 | } | |
4bd07ac2 A |
752 | } else { |
753 | try_lock_failed++; | |
39236c6e A |
754 | } |
755 | } | |
756 | } | |
4bd07ac2 A |
757 | |
758 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END), | |
759 | num_objects_skipped, /* considered objects */ | |
760 | try_lock_failed, | |
761 | try_lock_succeeded, | |
762 | VM_KERNEL_UNSLIDE_OR_PERM(best_object), | |
763 | ((best_object == NULL) ? 0 : best_object->resident_page_count)); | |
764 | ||
fe8ab488 A |
765 | object = best_object; |
766 | ||
767 | if (object == VM_OBJECT_NULL) { | |
768 | return VM_OBJECT_NULL; | |
769 | } | |
39236c6e | 770 | |
fe8ab488 A |
771 | /* Locked. Great. We'll take it. Remove and return. */ |
772 | // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped); | |
39236c6e | 773 | |
fe8ab488 A |
774 | vm_object_lock_assert_exclusive(object); |
775 | ||
776 | queue_remove(&queue->objq[group], object, | |
777 | vm_object_t, objq); | |
778 | object->objq.next = NULL; | |
779 | object->objq.prev = NULL; | |
780 | object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; | |
781 | object->purgeable_queue_group = 0; | |
782 | /* one less volatile object for this object's owner */ | |
d9a64523 | 783 | vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), -1); |
fe8ab488 A |
784 | |
785 | #if DEBUG | |
786 | object->vo_purgeable_volatilizer = NULL; | |
787 | #endif /* DEBUG */ | |
788 | ||
789 | /* keep queue of non-volatile objects */ | |
790 | queue_enter(&purgeable_nonvolatile_queue, object, | |
791 | vm_object_t, objq); | |
792 | assert(purgeable_nonvolatile_count >= 0); | |
793 | purgeable_nonvolatile_count++; | |
794 | assert(purgeable_nonvolatile_count > 0); | |
795 | /* one more nonvolatile object for this object's owner */ | |
d9a64523 | 796 | vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), +1); |
39236c6e | 797 | |
2d21ac55 | 798 | #if MACH_ASSERT |
fe8ab488 | 799 | queue->debug_count_objects--; |
2d21ac55 | 800 | #endif |
fe8ab488 | 801 | return object; |
2d21ac55 A |
802 | } |
803 | ||
b0d623f7 | 804 | /* Can be called without holding locks */ |
2d21ac55 | 805 | void |
b0d623f7 A |
806 | vm_purgeable_object_purge_all(void) |
807 | { | |
808 | enum purgeable_q_type i; | |
809 | int group; | |
810 | vm_object_t object; | |
811 | unsigned int purged_count; | |
812 | uint32_t collisions; | |
813 | ||
814 | purged_count = 0; | |
815 | collisions = 0; | |
816 | ||
817 | restart: | |
818 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
819 | /* Cycle through all queues */ | |
820 | for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) { | |
821 | purgeable_q_t queue; | |
822 | ||
823 | queue = &purgeable_queues[i]; | |
824 | ||
825 | /* | |
826 | * Look through all groups, starting from the lowest. If | |
827 | * we find an object in that group, try to lock it (this can | |
828 | * fail). If locking is successful, we can drop the queue | |
829 | * lock, remove a token and then purge the object. | |
830 | */ | |
831 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
832 | while (!queue_empty(&queue->objq[group])) { | |
39236c6e | 833 | object = vm_purgeable_object_find_and_lock(queue, group, FALSE); |
b0d623f7 A |
834 | if (object == VM_OBJECT_NULL) { |
835 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
836 | mutex_pause(collisions++); | |
837 | goto restart; | |
838 | } | |
839 | ||
840 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
841 | ||
842 | /* Lock the page queue here so we don't hold it | |
843 | * over the whole, legthy operation */ | |
39236c6e A |
844 | if (object->purgeable_when_ripe) { |
845 | vm_page_lock_queues(); | |
846 | vm_purgeable_token_remove_first(queue); | |
847 | vm_page_unlock_queues(); | |
848 | } | |
b0d623f7 | 849 | |
fe8ab488 A |
850 | (void) vm_object_purge(object, 0); |
851 | assert(object->purgable == VM_PURGABLE_EMPTY); | |
852 | /* no change in purgeable accounting */ | |
853 | ||
b0d623f7 A |
854 | vm_object_unlock(object); |
855 | purged_count++; | |
856 | goto restart; | |
857 | } | |
858 | assert(queue->debug_count_objects >= 0); | |
859 | } | |
860 | } | |
861 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)), | |
862 | purged_count, /* # of purged objects */ | |
863 | 0, | |
864 | available_for_purge, | |
865 | 0, | |
866 | 0); | |
867 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
868 | return; | |
869 | } | |
870 | ||
871 | boolean_t | |
39236c6e A |
872 | vm_purgeable_object_purge_one_unlocked( |
873 | int force_purge_below_group) | |
874 | { | |
875 | boolean_t retval; | |
876 | ||
877 | vm_page_lock_queues(); | |
fe8ab488 | 878 | retval = vm_purgeable_object_purge_one(force_purge_below_group, 0); |
39236c6e A |
879 | vm_page_unlock_queues(); |
880 | ||
881 | return retval; | |
882 | } | |
883 | ||
884 | boolean_t | |
885 | vm_purgeable_object_purge_one( | |
fe8ab488 A |
886 | int force_purge_below_group, |
887 | int flags) | |
2d21ac55 A |
888 | { |
889 | enum purgeable_q_type i; | |
890 | int group; | |
891 | vm_object_t object = 0; | |
593a1d5f | 892 | purgeable_q_t queue, queue2; |
39236c6e | 893 | boolean_t forced_purge; |
d9a64523 A |
894 | unsigned int resident_page_count; |
895 | ||
896 | ||
897 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_START, | |
898 | force_purge_below_group, flags, 0, 0, 0); | |
2d21ac55 | 899 | |
b0d623f7 | 900 | /* Need the page queue lock since we'll be changing the token queue. */ |
39037602 | 901 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
902 | lck_mtx_lock(&vm_purgeable_queue_lock); |
903 | ||
2d21ac55 A |
904 | /* Cycle through all queues */ |
905 | for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) { | |
593a1d5f | 906 | queue = &purgeable_queues[i]; |
2d21ac55 | 907 | |
39236c6e A |
908 | if (force_purge_below_group == 0) { |
909 | /* | |
910 | * Are there any ripe tokens on this queue? If yes, | |
911 | * we'll find an object to purge there | |
912 | */ | |
913 | if (!queue->token_q_head) { | |
914 | /* no token: look at next purgeable queue */ | |
915 | continue; | |
916 | } | |
917 | ||
918 | if (tokens[queue->token_q_head].count != 0) { | |
919 | /* no ripe token: next queue */ | |
920 | continue; | |
921 | } | |
922 | } | |
2d21ac55 A |
923 | |
924 | /* | |
925 | * Now look through all groups, starting from the lowest. If | |
926 | * we find an object in that group, try to lock it (this can | |
927 | * fail). If locking is successful, we can drop the queue | |
928 | * lock, remove a token and then purge the object. | |
929 | */ | |
930 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
39236c6e A |
931 | if (!queue->token_q_head || |
932 | tokens[queue->token_q_head].count != 0) { | |
933 | /* no tokens or no ripe tokens */ | |
934 | ||
935 | if (group >= force_purge_below_group) { | |
936 | /* no more groups to force-purge */ | |
937 | break; | |
938 | } | |
939 | ||
940 | /* | |
941 | * Try and purge an object in this group | |
942 | * even though no tokens are ripe. | |
943 | */ | |
944 | if (!queue_empty(&queue->objq[group]) && | |
945 | (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) { | |
946 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
947 | if (object->purgeable_when_ripe) { | |
948 | vm_purgeable_token_delete_first(queue); | |
949 | } | |
950 | forced_purge = TRUE; | |
951 | goto purge_now; | |
952 | } | |
953 | ||
954 | /* nothing to purge in this group: next group */ | |
955 | continue; | |
956 | } | |
593a1d5f | 957 | if (!queue_empty(&queue->objq[group]) && |
39236c6e | 958 | (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) { |
b0d623f7 | 959 | lck_mtx_unlock(&vm_purgeable_queue_lock); |
39236c6e A |
960 | if (object->purgeable_when_ripe) { |
961 | vm_purgeable_token_choose_and_delete_ripe(queue, 0); | |
962 | } | |
963 | forced_purge = FALSE; | |
2d21ac55 | 964 | goto purge_now; |
593a1d5f A |
965 | } |
966 | if (i != PURGEABLE_Q_TYPE_OBSOLETE) { | |
967 | /* This is the token migration case, and it works between | |
968 | * FIFO and LIFO only */ | |
969 | queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? | |
970 | PURGEABLE_Q_TYPE_FIFO : | |
971 | PURGEABLE_Q_TYPE_LIFO]; | |
972 | ||
973 | if (!queue_empty(&queue2->objq[group]) && | |
39236c6e | 974 | (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) { |
b0d623f7 | 975 | lck_mtx_unlock(&vm_purgeable_queue_lock); |
39236c6e A |
976 | if (object->purgeable_when_ripe) { |
977 | vm_purgeable_token_choose_and_delete_ripe(queue2, queue); | |
978 | } | |
979 | forced_purge = FALSE; | |
2d21ac55 A |
980 | goto purge_now; |
981 | } | |
982 | } | |
983 | assert(queue->debug_count_objects >= 0); | |
984 | } | |
985 | } | |
986 | /* | |
987 | * because we have to do a try_lock on the objects which could fail, | |
988 | * we could end up with no object to purge at this time, even though | |
989 | * we have objects in a purgeable state | |
990 | */ | |
b0d623f7 | 991 | lck_mtx_unlock(&vm_purgeable_queue_lock); |
d9a64523 A |
992 | |
993 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END, | |
994 | 0, 0, available_for_purge, 0, 0); | |
995 | ||
b0d623f7 | 996 | return FALSE; |
2d21ac55 A |
997 | |
998 | purge_now: | |
999 | ||
1000 | assert(object); | |
b0d623f7 | 1001 | vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */ |
d9a64523 A |
1002 | // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_owner, task_importance_estimate(object->vo_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level); |
1003 | resident_page_count = object->resident_page_count; | |
fe8ab488 A |
1004 | (void) vm_object_purge(object, flags); |
1005 | assert(object->purgable == VM_PURGABLE_EMPTY); | |
1006 | /* no change in purgeable accounting */ | |
2d21ac55 | 1007 | vm_object_unlock(object); |
b0d623f7 | 1008 | vm_page_lock_queues(); |
2d21ac55 | 1009 | |
d9a64523 A |
1010 | vm_pageout_vminfo.vm_pageout_pages_purged += resident_page_count; |
1011 | ||
1012 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END, | |
d190cdc3 | 1013 | VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ |
d9a64523 | 1014 | resident_page_count, |
2d21ac55 A |
1015 | available_for_purge, |
1016 | 0, | |
1017 | 0); | |
b0d623f7 A |
1018 | |
1019 | return TRUE; | |
2d21ac55 A |
1020 | } |
1021 | ||
b0d623f7 | 1022 | /* Called with object lock held */ |
2d21ac55 A |
1023 | void |
1024 | vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group) | |
1025 | { | |
b0d623f7 A |
1026 | vm_object_lock_assert_exclusive(object); |
1027 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
2d21ac55 | 1028 | |
fe8ab488 A |
1029 | assert(object->objq.next != NULL); |
1030 | assert(object->objq.prev != NULL); | |
1031 | queue_remove(&purgeable_nonvolatile_queue, object, | |
1032 | vm_object_t, objq); | |
1033 | object->objq.next = NULL; | |
1034 | object->objq.prev = NULL; | |
1035 | assert(purgeable_nonvolatile_count > 0); | |
1036 | purgeable_nonvolatile_count--; | |
1037 | assert(purgeable_nonvolatile_count >= 0); | |
1038 | /* one less nonvolatile object for this object's owner */ | |
d9a64523 | 1039 | vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), -1); |
fe8ab488 | 1040 | |
2d21ac55 A |
1041 | if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE) |
1042 | group = 0; | |
39236c6e | 1043 | |
2d21ac55 A |
1044 | if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are |
1045 | * fifo-queued */ | |
1046 | queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */ | |
1047 | else | |
1048 | queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */ | |
fe8ab488 | 1049 | /* one more volatile object for this object's owner */ |
d9a64523 | 1050 | vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), +1); |
2d21ac55 | 1051 | |
39236c6e A |
1052 | object->purgeable_queue_type = queue->type; |
1053 | object->purgeable_queue_group = group; | |
1054 | ||
fe8ab488 A |
1055 | #if DEBUG |
1056 | assert(object->vo_purgeable_volatilizer == NULL); | |
1057 | object->vo_purgeable_volatilizer = current_task(); | |
d9a64523 A |
1058 | OSBacktrace(&object->purgeable_volatilizer_bt[0], |
1059 | ARRAY_COUNT(object->purgeable_volatilizer_bt)); | |
fe8ab488 | 1060 | #endif /* DEBUG */ |
39236c6e | 1061 | |
2d21ac55 A |
1062 | #if MACH_ASSERT |
1063 | queue->debug_count_objects++; | |
b0d623f7 | 1064 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)), |
2d21ac55 A |
1065 | 0, |
1066 | tokens[queue->token_q_head].count, | |
1067 | queue->type, | |
1068 | group, | |
1069 | 0); | |
1070 | #endif | |
1071 | ||
b0d623f7 | 1072 | lck_mtx_unlock(&vm_purgeable_queue_lock); |
2d21ac55 A |
1073 | } |
1074 | ||
1075 | /* Look for object. If found, remove from purgeable queue. */ | |
b0d623f7 | 1076 | /* Called with object lock held */ |
2d21ac55 A |
1077 | purgeable_q_t |
1078 | vm_purgeable_object_remove(vm_object_t object) | |
1079 | { | |
39236c6e | 1080 | int group; |
39236c6e A |
1081 | enum purgeable_q_type type; |
1082 | purgeable_q_t queue; | |
2d21ac55 | 1083 | |
b0d623f7 | 1084 | vm_object_lock_assert_exclusive(object); |
39236c6e A |
1085 | |
1086 | type = object->purgeable_queue_type; | |
1087 | group = object->purgeable_queue_group; | |
1088 | ||
1089 | if (type == PURGEABLE_Q_TYPE_MAX) { | |
1090 | if (object->objq.prev || object->objq.next) | |
1091 | panic("unmarked object on purgeable q"); | |
1092 | ||
1093 | return NULL; | |
1094 | } else if (!(object->objq.prev && object->objq.next)) | |
1095 | panic("marked object not on purgeable q"); | |
1096 | ||
b0d623f7 | 1097 | lck_mtx_lock(&vm_purgeable_queue_lock); |
39236c6e A |
1098 | |
1099 | queue = &purgeable_queues[type]; | |
1100 | ||
39236c6e | 1101 | queue_remove(&queue->objq[group], object, vm_object_t, objq); |
fe8ab488 A |
1102 | object->objq.next = NULL; |
1103 | object->objq.prev = NULL; | |
1104 | /* one less volatile object for this object's owner */ | |
d9a64523 | 1105 | vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), -1); |
fe8ab488 A |
1106 | #if DEBUG |
1107 | object->vo_purgeable_volatilizer = NULL; | |
1108 | #endif /* DEBUG */ | |
1109 | /* keep queue of non-volatile objects */ | |
1110 | if (object->alive && !object->terminating) { | |
fe8ab488 A |
1111 | queue_enter(&purgeable_nonvolatile_queue, object, |
1112 | vm_object_t, objq); | |
1113 | assert(purgeable_nonvolatile_count >= 0); | |
1114 | purgeable_nonvolatile_count++; | |
1115 | assert(purgeable_nonvolatile_count > 0); | |
1116 | /* one more nonvolatile object for this object's owner */ | |
d9a64523 | 1117 | vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), +1); |
fe8ab488 | 1118 | } |
39236c6e | 1119 | |
2d21ac55 | 1120 | #if MACH_ASSERT |
39236c6e A |
1121 | queue->debug_count_objects--; |
1122 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)), | |
1123 | 0, | |
1124 | tokens[queue->token_q_head].count, | |
1125 | queue->type, | |
1126 | group, | |
1127 | 0); | |
2d21ac55 | 1128 | #endif |
39236c6e A |
1129 | |
1130 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1131 | ||
1132 | object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; | |
1133 | object->purgeable_queue_group = 0; | |
1134 | ||
fe8ab488 | 1135 | vm_object_lock_assert_exclusive(object); |
39236c6e A |
1136 | |
1137 | return &purgeable_queues[type]; | |
1138 | } | |
1139 | ||
1140 | void | |
1141 | vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task) | |
1142 | { | |
39037602 | 1143 | LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED); |
39236c6e A |
1144 | |
1145 | stat->count = stat->size = 0; | |
1146 | vm_object_t object; | |
1147 | for (object = (vm_object_t) queue_first(&queue->objq[group]); | |
1148 | !queue_end(&queue->objq[group], (queue_entry_t) object); | |
1149 | object = (vm_object_t) queue_next(&object->objq)) { | |
d9a64523 A |
1150 | if (!target_task || VM_OBJECT_OWNER(object) == target_task) { |
1151 | stat->count++; | |
1152 | stat->size += (object->resident_page_count * PAGE_SIZE); | |
1153 | } | |
39236c6e A |
1154 | } |
1155 | return; | |
1156 | } | |
1157 | ||
1158 | void | |
1159 | vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task) | |
1160 | { | |
1161 | purgeable_q_t queue; | |
1162 | int group; | |
1163 | ||
1164 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1165 | ||
1166 | /* Populate fifo_data */ | |
1167 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; | |
1168 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) | |
1169 | vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task); | |
1170 | ||
1171 | /* Populate lifo_data */ | |
1172 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; | |
1173 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) | |
1174 | vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task); | |
1175 | ||
1176 | /* Populate obsolete data */ | |
1177 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; | |
1178 | vm_purgeable_stats_helper(&(info->obsolete_data), queue, 0, target_task); | |
1179 | ||
1180 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1181 | return; | |
1182 | } | |
3e170ce0 A |
1183 | |
1184 | #if DEVELOPMENT || DEBUG | |
1185 | static void | |
1186 | vm_purgeable_account_volatile_queue( | |
1187 | purgeable_q_t queue, | |
1188 | int group, | |
1189 | task_t task, | |
1190 | pvm_account_info_t acnt_info) | |
1191 | { | |
1192 | vm_object_t object; | |
1193 | uint64_t compressed_count; | |
1194 | ||
1195 | for (object = (vm_object_t) queue_first(&queue->objq[group]); | |
1196 | !queue_end(&queue->objq[group], (queue_entry_t) object); | |
1197 | object = (vm_object_t) queue_next(&object->objq)) { | |
d9a64523 | 1198 | if (VM_OBJECT_OWNER(object) == task) { |
3e170ce0 A |
1199 | compressed_count = vm_compressor_pager_get_count(object->pager); |
1200 | acnt_info->pvm_volatile_compressed_count += compressed_count; | |
1201 | acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count); | |
1202 | acnt_info->pvm_nonvolatile_count += object->wired_page_count; | |
1203 | } | |
1204 | } | |
1205 | ||
1206 | } | |
1207 | ||
1208 | /* | |
1209 | * Walks the purgeable object queues and calculates the usage | |
1210 | * associated with the objects for the given task. | |
1211 | */ | |
1212 | kern_return_t | |
1213 | vm_purgeable_account( | |
1214 | task_t task, | |
1215 | pvm_account_info_t acnt_info) | |
1216 | { | |
1217 | queue_head_t *nonvolatile_q; | |
1218 | vm_object_t object; | |
1219 | int group; | |
1220 | int state; | |
1221 | uint64_t compressed_count; | |
1222 | purgeable_q_t volatile_q; | |
1223 | ||
1224 | ||
1225 | if ((task == NULL) || (acnt_info == NULL)) { | |
1226 | return KERN_INVALID_ARGUMENT; | |
1227 | } | |
1228 | ||
1229 | acnt_info->pvm_volatile_count = 0; | |
1230 | acnt_info->pvm_volatile_compressed_count = 0; | |
1231 | acnt_info->pvm_nonvolatile_count = 0; | |
1232 | acnt_info->pvm_nonvolatile_compressed_count = 0; | |
1233 | ||
1234 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1235 | ||
1236 | nonvolatile_q = &purgeable_nonvolatile_queue; | |
1237 | for (object = (vm_object_t) queue_first(nonvolatile_q); | |
1238 | !queue_end(nonvolatile_q, (queue_entry_t) object); | |
1239 | object = (vm_object_t) queue_next(&object->objq)) { | |
d9a64523 | 1240 | if (VM_OBJECT_OWNER(object) == task) { |
3e170ce0 A |
1241 | state = object->purgable; |
1242 | compressed_count = vm_compressor_pager_get_count(object->pager); | |
1243 | if (state == VM_PURGABLE_EMPTY) { | |
1244 | acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count); | |
1245 | acnt_info->pvm_volatile_compressed_count += compressed_count; | |
1246 | } else { | |
1247 | acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count); | |
1248 | acnt_info->pvm_nonvolatile_compressed_count += compressed_count; | |
1249 | } | |
1250 | acnt_info->pvm_nonvolatile_count += object->wired_page_count; | |
1251 | } | |
1252 | } | |
1253 | ||
1254 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; | |
1255 | vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info); | |
1256 | ||
1257 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; | |
1258 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
1259 | vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info); | |
1260 | } | |
1261 | ||
1262 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; | |
1263 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { | |
1264 | vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info); | |
1265 | } | |
1266 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1267 | ||
1268 | acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE); | |
1269 | acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE); | |
1270 | acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE); | |
1271 | acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE); | |
1272 | ||
1273 | return KERN_SUCCESS; | |
1274 | } | |
1275 | #endif /* DEVELOPMENT || DEBUG */ | |
39236c6e | 1276 | |
39236c6e A |
1277 | void |
1278 | vm_purgeable_disown( | |
1279 | task_t task) | |
1280 | { | |
a39ff7e2 | 1281 | vm_object_t next_object; |
fe8ab488 A |
1282 | vm_object_t object; |
1283 | int collisions; | |
39236c6e A |
1284 | |
1285 | if (task == NULL) { | |
1286 | return; | |
1287 | } | |
1288 | ||
fe8ab488 A |
1289 | /* |
1290 | * Scan the purgeable objects queues for objects owned by "task". | |
1291 | * This has to be done "atomically" under the "vm_purgeable_queue" | |
1292 | * lock, to ensure that no new purgeable object get associated | |
1293 | * with this task or moved between queues while we're scanning. | |
1294 | */ | |
1295 | ||
1296 | /* | |
1297 | * Scan non-volatile queue for objects owned by "task". | |
1298 | */ | |
1299 | ||
1300 | collisions = 0; | |
1301 | ||
1302 | again: | |
1303 | if (task->task_purgeable_disowned) { | |
1304 | /* task has already disowned its purgeable memory */ | |
1305 | assert(task->task_volatile_objects == 0); | |
1306 | assert(task->task_nonvolatile_objects == 0); | |
1307 | return; | |
1308 | } | |
a39ff7e2 | 1309 | |
39236c6e | 1310 | lck_mtx_lock(&vm_purgeable_queue_lock); |
a39ff7e2 | 1311 | task_objq_lock(task); |
fe8ab488 | 1312 | |
a39ff7e2 A |
1313 | task->task_purgeable_disowning = TRUE; |
1314 | ||
1315 | for (object = (vm_object_t) queue_first(&task->task_objq); | |
1316 | !queue_end(&task->task_objq, (queue_entry_t) object); | |
1317 | object = next_object) { | |
1318 | if (task->task_nonvolatile_objects == 0 && | |
1319 | task->task_volatile_objects == 0) { | |
1320 | /* no more purgeable objects owned by "task" */ | |
fe8ab488 A |
1321 | break; |
1322 | } | |
a39ff7e2 A |
1323 | |
1324 | next_object = (vm_object_t) queue_next(&object->task_objq); | |
1325 | if (object->purgable == VM_PURGABLE_DENY) { | |
1326 | /* not a purgeable object: skip */ | |
1327 | continue; | |
1328 | } | |
1329 | ||
fe8ab488 A |
1330 | #if DEBUG |
1331 | assert(object->vo_purgeable_volatilizer == NULL); | |
1332 | #endif /* DEBUG */ | |
d9a64523 | 1333 | assert(object->vo_owner == task); |
a39ff7e2 A |
1334 | if (!vm_object_lock_try(object)) { |
1335 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1336 | task_objq_unlock(task); | |
1337 | mutex_pause(collisions++); | |
1338 | goto again; | |
fe8ab488 | 1339 | } |
d9a64523 A |
1340 | /* transfer ownership to the kernel */ |
1341 | assert(VM_OBJECT_OWNER(object) != kernel_task); | |
1342 | vm_object_ownership_change( | |
1343 | object, | |
1344 | object->vo_ledger_tag, /* unchanged */ | |
1345 | VM_OBJECT_OWNER_DISOWNED, /* new owner */ | |
1346 | TRUE); /* old_owner->task_objq locked */ | |
1347 | assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED); | |
a39ff7e2 | 1348 | vm_object_unlock(object); |
fe8ab488 A |
1349 | } |
1350 | ||
a39ff7e2 A |
1351 | if (__improbable(task->task_volatile_objects != 0 || |
1352 | task->task_nonvolatile_objects != 0)) { | |
1353 | panic("%s(%p): volatile=%d nonvolatile=%d q=%p q_first=%p q_last=%p", | |
1354 | __FUNCTION__, | |
1355 | task, | |
1356 | task->task_volatile_objects, | |
1357 | task->task_nonvolatile_objects, | |
1358 | &task->task_objq, | |
1359 | queue_first(&task->task_objq), | |
1360 | queue_last(&task->task_objq)); | |
fe8ab488 A |
1361 | } |
1362 | ||
1363 | /* there shouldn't be any purgeable objects owned by task now */ | |
1364 | assert(task->task_volatile_objects == 0); | |
1365 | assert(task->task_nonvolatile_objects == 0); | |
1366 | assert(task->task_purgeable_disowning); | |
1367 | ||
1368 | /* and we don't need to try and disown again */ | |
1369 | task->task_purgeable_disowned = TRUE; | |
1370 | ||
1371 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
a39ff7e2 | 1372 | task_objq_unlock(task); |
fe8ab488 A |
1373 | } |
1374 | ||
1375 | ||
a39ff7e2 | 1376 | static uint64_t |
fe8ab488 A |
1377 | vm_purgeable_queue_purge_task_owned( |
1378 | purgeable_q_t queue, | |
1379 | int group, | |
1380 | task_t task) | |
1381 | { | |
a39ff7e2 A |
1382 | vm_object_t object = VM_OBJECT_NULL; |
1383 | int collisions = 0; | |
1384 | uint64_t num_pages_purged = 0; | |
fe8ab488 | 1385 | |
a39ff7e2 | 1386 | num_pages_purged = 0; |
fe8ab488 A |
1387 | collisions = 0; |
1388 | ||
1389 | look_again: | |
1390 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1391 | ||
fe8ab488 A |
1392 | for (object = (vm_object_t) queue_first(&queue->objq[group]); |
1393 | !queue_end(&queue->objq[group], (queue_entry_t) object); | |
1394 | object = (vm_object_t) queue_next(&object->objq)) { | |
1395 | ||
d9a64523 | 1396 | if (object->vo_owner != task) { |
fe8ab488 A |
1397 | continue; |
1398 | } | |
1399 | ||
1400 | /* found an object: try and grab it */ | |
1401 | if (!vm_object_lock_try(object)) { | |
1402 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1403 | mutex_pause(collisions++); | |
1404 | goto look_again; | |
1405 | } | |
1406 | /* got it ! */ | |
1407 | ||
1408 | collisions = 0; | |
1409 | ||
1410 | /* remove object from purgeable queue */ | |
1411 | queue_remove(&queue->objq[group], object, | |
1412 | vm_object_t, objq); | |
1413 | object->objq.next = NULL; | |
1414 | object->objq.prev = NULL; | |
a39ff7e2 A |
1415 | object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; |
1416 | object->purgeable_queue_group = 0; | |
fe8ab488 | 1417 | /* one less volatile object for this object's owner */ |
d9a64523 | 1418 | assert(object->vo_owner == task); |
fe8ab488 A |
1419 | vm_purgeable_volatile_owner_update(task, -1); |
1420 | ||
1421 | #if DEBUG | |
1422 | object->vo_purgeable_volatilizer = NULL; | |
1423 | #endif /* DEBUG */ | |
1424 | queue_enter(&purgeable_nonvolatile_queue, object, | |
1425 | vm_object_t, objq); | |
1426 | assert(purgeable_nonvolatile_count >= 0); | |
1427 | purgeable_nonvolatile_count++; | |
1428 | assert(purgeable_nonvolatile_count > 0); | |
1429 | /* one more nonvolatile object for this object's owner */ | |
d9a64523 | 1430 | assert(object->vo_owner == task); |
fe8ab488 A |
1431 | vm_purgeable_nonvolatile_owner_update(task, +1); |
1432 | ||
1433 | /* unlock purgeable queues */ | |
1434 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1435 | ||
1436 | if (object->purgeable_when_ripe) { | |
1437 | /* remove a token */ | |
1438 | vm_page_lock_queues(); | |
1439 | vm_purgeable_token_remove_first(queue); | |
1440 | vm_page_unlock_queues(); | |
1441 | } | |
1442 | ||
1443 | /* purge the object */ | |
a39ff7e2 A |
1444 | num_pages_purged += vm_object_purge(object, 0); |
1445 | ||
fe8ab488 A |
1446 | assert(object->purgable == VM_PURGABLE_EMPTY); |
1447 | /* no change for purgeable accounting */ | |
1448 | vm_object_unlock(object); | |
fe8ab488 A |
1449 | |
1450 | /* we unlocked the purgeable queues, so start over */ | |
1451 | goto look_again; | |
1452 | } | |
1453 | ||
1454 | lck_mtx_unlock(&vm_purgeable_queue_lock); | |
1455 | ||
a39ff7e2 | 1456 | return num_pages_purged; |
fe8ab488 A |
1457 | } |
1458 | ||
a39ff7e2 | 1459 | uint64_t |
fe8ab488 A |
1460 | vm_purgeable_purge_task_owned( |
1461 | task_t task) | |
1462 | { | |
a39ff7e2 A |
1463 | purgeable_q_t queue = NULL; |
1464 | int group = 0; | |
1465 | uint64_t num_pages_purged = 0; | |
fe8ab488 | 1466 | |
a39ff7e2 | 1467 | num_pages_purged = 0; |
fe8ab488 | 1468 | |
39236c6e | 1469 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; |
a39ff7e2 | 1470 | num_pages_purged += vm_purgeable_queue_purge_task_owned(queue, |
fe8ab488 A |
1471 | 0, |
1472 | task); | |
39236c6e A |
1473 | |
1474 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; | |
1475 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) | |
a39ff7e2 | 1476 | num_pages_purged += vm_purgeable_queue_purge_task_owned(queue, |
fe8ab488 A |
1477 | group, |
1478 | task); | |
39236c6e A |
1479 | |
1480 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; | |
1481 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) | |
a39ff7e2 | 1482 | num_pages_purged += vm_purgeable_queue_purge_task_owned(queue, |
fe8ab488 A |
1483 | group, |
1484 | task); | |
1485 | ||
a39ff7e2 | 1486 | return num_pages_purged; |
fe8ab488 | 1487 | } |
fe8ab488 A |
1488 | |
1489 | void | |
1490 | vm_purgeable_nonvolatile_enqueue( | |
1491 | vm_object_t object, | |
1492 | task_t owner) | |
1493 | { | |
fe8ab488 A |
1494 | vm_object_lock_assert_exclusive(object); |
1495 | ||
1496 | assert(object->purgable == VM_PURGABLE_NONVOLATILE); | |
d9a64523 | 1497 | assert(object->vo_owner == NULL); |
fe8ab488 A |
1498 | |
1499 | lck_mtx_lock(&vm_purgeable_queue_lock); | |
1500 | ||
39037602 A |
1501 | if (owner != NULL && |
1502 | owner->task_purgeable_disowning) { | |
fe8ab488 | 1503 | /* task is exiting and no longer tracking purgeable objects */ |
d9a64523 A |
1504 | owner = VM_OBJECT_OWNER_DISOWNED; |
1505 | } | |
1506 | if (owner == NULL) { | |
1507 | owner = kernel_task; | |
fe8ab488 | 1508 | } |
fe8ab488 | 1509 | #if DEBUG |
d9a64523 A |
1510 | OSBacktrace(&object->purgeable_owner_bt[0], |
1511 | ARRAY_COUNT(object->purgeable_owner_bt)); | |
fe8ab488 A |
1512 | object->vo_purgeable_volatilizer = NULL; |
1513 | #endif /* DEBUG */ | |
1514 | ||
d9a64523 A |
1515 | vm_object_ownership_change(object, |
1516 | object->vo_ledger_tag, /* tag unchanged */ | |
1517 | owner, | |
1518 | FALSE); /* task_objq_locked */ | |
fe8ab488 | 1519 | |
fe8ab488 A |
1520 | assert(object->objq.next == NULL); |
1521 | assert(object->objq.prev == NULL); | |
1522 | ||
1523 | queue_enter(&purgeable_nonvolatile_queue, object, | |
1524 | vm_object_t, objq); | |
1525 | assert(purgeable_nonvolatile_count >= 0); | |
1526 | purgeable_nonvolatile_count++; | |
1527 | assert(purgeable_nonvolatile_count > 0); | |
fe8ab488 A |
1528 | lck_mtx_unlock(&vm_purgeable_queue_lock); |
1529 | ||
1530 | vm_object_lock_assert_exclusive(object); | |
1531 | } | |
1532 | ||
1533 | void | |
1534 | vm_purgeable_nonvolatile_dequeue( | |
1535 | vm_object_t object) | |
1536 | { | |
1537 | task_t owner; | |
1538 | ||
1539 | vm_object_lock_assert_exclusive(object); | |
1540 | ||
d9a64523 | 1541 | owner = VM_OBJECT_OWNER(object); |
fe8ab488 A |
1542 | #if DEBUG |
1543 | assert(object->vo_purgeable_volatilizer == NULL); | |
1544 | #endif /* DEBUG */ | |
1545 | if (owner != NULL) { | |
1546 | /* | |
1547 | * Update the owner's ledger to stop accounting | |
1548 | * for this object. | |
1549 | */ | |
d9a64523 A |
1550 | /* transfer ownership to the kernel */ |
1551 | assert(VM_OBJECT_OWNER(object) != kernel_task); | |
1552 | vm_object_ownership_change( | |
1553 | object, | |
1554 | object->vo_ledger_tag, /* unchanged */ | |
1555 | VM_OBJECT_OWNER_DISOWNED, /* new owner */ | |
1556 | FALSE); /* old_owner->task_objq locked */ | |
1557 | assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED); | |
fe8ab488 | 1558 | } |
39236c6e | 1559 | |
fe8ab488 A |
1560 | lck_mtx_lock(&vm_purgeable_queue_lock); |
1561 | assert(object->objq.next != NULL); | |
1562 | assert(object->objq.prev != NULL); | |
1563 | queue_remove(&purgeable_nonvolatile_queue, object, | |
1564 | vm_object_t, objq); | |
1565 | object->objq.next = NULL; | |
1566 | object->objq.prev = NULL; | |
1567 | assert(purgeable_nonvolatile_count > 0); | |
1568 | purgeable_nonvolatile_count--; | |
1569 | assert(purgeable_nonvolatile_count >= 0); | |
b0d623f7 | 1570 | lck_mtx_unlock(&vm_purgeable_queue_lock); |
fe8ab488 A |
1571 | |
1572 | vm_object_lock_assert_exclusive(object); | |
1573 | } | |
1574 | ||
1575 | void | |
1576 | vm_purgeable_accounting( | |
1577 | vm_object_t object, | |
d9a64523 | 1578 | vm_purgable_t old_state) |
fe8ab488 A |
1579 | { |
1580 | task_t owner; | |
1581 | int resident_page_count; | |
1582 | int wired_page_count; | |
1583 | int compressed_page_count; | |
d9a64523 A |
1584 | int ledger_idx_volatile; |
1585 | int ledger_idx_nonvolatile; | |
1586 | int ledger_idx_volatile_compressed; | |
1587 | int ledger_idx_nonvolatile_compressed; | |
1588 | boolean_t do_footprint; | |
fe8ab488 A |
1589 | |
1590 | vm_object_lock_assert_exclusive(object); | |
d9a64523 | 1591 | assert(object->purgable != VM_PURGABLE_DENY); |
fe8ab488 | 1592 | |
d9a64523 A |
1593 | owner = VM_OBJECT_OWNER(object); |
1594 | if (owner == NULL || | |
1595 | object->purgable == VM_PURGABLE_DENY) | |
fe8ab488 A |
1596 | return; |
1597 | ||
d9a64523 A |
1598 | vm_object_ledger_tag_ledgers(object, |
1599 | &ledger_idx_volatile, | |
1600 | &ledger_idx_nonvolatile, | |
1601 | &ledger_idx_volatile_compressed, | |
1602 | &ledger_idx_nonvolatile_compressed, | |
1603 | &do_footprint); | |
fe8ab488 A |
1604 | |
1605 | resident_page_count = object->resident_page_count; | |
1606 | wired_page_count = object->wired_page_count; | |
39037602 A |
1607 | if (VM_CONFIG_COMPRESSOR_IS_PRESENT && |
1608 | object->pager != NULL) { | |
fe8ab488 A |
1609 | compressed_page_count = |
1610 | vm_compressor_pager_get_count(object->pager); | |
1611 | } else { | |
1612 | compressed_page_count = 0; | |
1613 | } | |
1614 | ||
1615 | if (old_state == VM_PURGABLE_VOLATILE || | |
1616 | old_state == VM_PURGABLE_EMPTY) { | |
1617 | /* less volatile bytes in ledger */ | |
1618 | ledger_debit(owner->ledger, | |
d9a64523 A |
1619 | ledger_idx_volatile, |
1620 | ptoa_64(resident_page_count - wired_page_count)); | |
fe8ab488 A |
1621 | /* less compressed volatile bytes in ledger */ |
1622 | ledger_debit(owner->ledger, | |
d9a64523 A |
1623 | ledger_idx_volatile_compressed, |
1624 | ptoa_64(compressed_page_count)); | |
fe8ab488 A |
1625 | |
1626 | /* more non-volatile bytes in ledger */ | |
1627 | ledger_credit(owner->ledger, | |
d9a64523 A |
1628 | ledger_idx_nonvolatile, |
1629 | ptoa_64(resident_page_count - wired_page_count)); | |
fe8ab488 A |
1630 | /* more compressed non-volatile bytes in ledger */ |
1631 | ledger_credit(owner->ledger, | |
d9a64523 A |
1632 | ledger_idx_nonvolatile_compressed, |
1633 | ptoa_64(compressed_page_count)); | |
1634 | if (do_footprint) { | |
1635 | /* more footprint */ | |
1636 | ledger_credit(owner->ledger, | |
1637 | task_ledgers.phys_footprint, | |
1638 | ptoa_64(resident_page_count | |
1639 | + compressed_page_count | |
1640 | - wired_page_count)); | |
1641 | } | |
fe8ab488 A |
1642 | |
1643 | } else if (old_state == VM_PURGABLE_NONVOLATILE) { | |
1644 | ||
1645 | /* less non-volatile bytes in ledger */ | |
1646 | ledger_debit(owner->ledger, | |
d9a64523 A |
1647 | ledger_idx_nonvolatile, |
1648 | ptoa_64(resident_page_count - wired_page_count)); | |
fe8ab488 A |
1649 | /* less compressed non-volatile bytes in ledger */ |
1650 | ledger_debit(owner->ledger, | |
d9a64523 A |
1651 | ledger_idx_nonvolatile_compressed, |
1652 | ptoa_64(compressed_page_count)); | |
1653 | if (do_footprint) { | |
1654 | /* less footprint */ | |
fe8ab488 A |
1655 | ledger_debit(owner->ledger, |
1656 | task_ledgers.phys_footprint, | |
d9a64523 A |
1657 | ptoa_64(resident_page_count |
1658 | + compressed_page_count | |
1659 | - wired_page_count)); | |
fe8ab488 | 1660 | } |
d9a64523 | 1661 | |
fe8ab488 A |
1662 | /* more volatile bytes in ledger */ |
1663 | ledger_credit(owner->ledger, | |
d9a64523 A |
1664 | ledger_idx_volatile, |
1665 | ptoa_64(resident_page_count - wired_page_count)); | |
fe8ab488 A |
1666 | /* more compressed volatile bytes in ledger */ |
1667 | ledger_credit(owner->ledger, | |
d9a64523 A |
1668 | ledger_idx_volatile_compressed, |
1669 | ptoa_64(compressed_page_count)); | |
fe8ab488 A |
1670 | } else { |
1671 | panic("vm_purgeable_accounting(%p): " | |
1672 | "unexpected old_state=%d\n", | |
1673 | object, old_state); | |
1674 | } | |
1675 | ||
1676 | vm_object_lock_assert_exclusive(object); | |
1677 | } | |
1678 | ||
1679 | void | |
1680 | vm_purgeable_nonvolatile_owner_update( | |
1681 | task_t owner, | |
1682 | int delta) | |
1683 | { | |
1684 | if (owner == NULL || delta == 0) { | |
1685 | return; | |
1686 | } | |
1687 | ||
1688 | if (delta > 0) { | |
1689 | assert(owner->task_nonvolatile_objects >= 0); | |
1690 | OSAddAtomic(delta, &owner->task_nonvolatile_objects); | |
1691 | assert(owner->task_nonvolatile_objects > 0); | |
1692 | } else { | |
1693 | assert(owner->task_nonvolatile_objects > delta); | |
1694 | OSAddAtomic(delta, &owner->task_nonvolatile_objects); | |
1695 | assert(owner->task_nonvolatile_objects >= 0); | |
1696 | } | |
1697 | } | |
1698 | ||
1699 | void | |
1700 | vm_purgeable_volatile_owner_update( | |
1701 | task_t owner, | |
1702 | int delta) | |
1703 | { | |
1704 | if (owner == NULL || delta == 0) { | |
1705 | return; | |
1706 | } | |
1707 | ||
1708 | if (delta > 0) { | |
1709 | assert(owner->task_volatile_objects >= 0); | |
1710 | OSAddAtomic(delta, &owner->task_volatile_objects); | |
1711 | assert(owner->task_volatile_objects > 0); | |
1712 | } else { | |
1713 | assert(owner->task_volatile_objects > delta); | |
1714 | OSAddAtomic(delta, &owner->task_volatile_objects); | |
1715 | assert(owner->task_volatile_objects >= 0); | |
1716 | } | |
1717 | } | |
1718 | ||
1719 | void | |
d9a64523 | 1720 | vm_object_owner_compressed_update( |
fe8ab488 A |
1721 | vm_object_t object, |
1722 | int delta) | |
1723 | { | |
d9a64523 A |
1724 | task_t owner; |
1725 | int ledger_idx_volatile; | |
1726 | int ledger_idx_nonvolatile; | |
1727 | int ledger_idx_volatile_compressed; | |
1728 | int ledger_idx_nonvolatile_compressed; | |
1729 | boolean_t do_footprint; | |
fe8ab488 A |
1730 | |
1731 | vm_object_lock_assert_exclusive(object); | |
1732 | ||
d9a64523 A |
1733 | owner = VM_OBJECT_OWNER(object); |
1734 | ||
fe8ab488 A |
1735 | if (delta == 0 || |
1736 | !object->internal || | |
d9a64523 A |
1737 | (object->purgable == VM_PURGABLE_DENY && |
1738 | ! object->vo_ledger_tag) || | |
1739 | owner == NULL) { | |
1740 | /* not an owned purgeable (or tagged) VM object: nothing to update */ | |
fe8ab488 A |
1741 | return; |
1742 | } | |
1743 | ||
d9a64523 A |
1744 | vm_object_ledger_tag_ledgers(object, |
1745 | &ledger_idx_volatile, | |
1746 | &ledger_idx_nonvolatile, | |
1747 | &ledger_idx_volatile_compressed, | |
1748 | &ledger_idx_nonvolatile_compressed, | |
1749 | &do_footprint); | |
fe8ab488 A |
1750 | switch (object->purgable) { |
1751 | case VM_PURGABLE_DENY: | |
d9a64523 A |
1752 | /* not purgeable: must be ledger-tagged */ |
1753 | assert(object->vo_ledger_tag != VM_OBJECT_LEDGER_TAG_NONE); | |
1754 | /* fallthru */ | |
fe8ab488 A |
1755 | case VM_PURGABLE_NONVOLATILE: |
1756 | if (delta > 0) { | |
1757 | ledger_credit(owner->ledger, | |
d9a64523 A |
1758 | ledger_idx_nonvolatile_compressed, |
1759 | ptoa_64(delta)); | |
1760 | if (do_footprint) { | |
1761 | ledger_credit(owner->ledger, | |
1762 | task_ledgers.phys_footprint, | |
1763 | ptoa_64(delta)); | |
1764 | } | |
fe8ab488 A |
1765 | } else { |
1766 | ledger_debit(owner->ledger, | |
d9a64523 A |
1767 | ledger_idx_nonvolatile_compressed, |
1768 | ptoa_64(-delta)); | |
1769 | if (do_footprint) { | |
1770 | ledger_debit(owner->ledger, | |
1771 | task_ledgers.phys_footprint, | |
1772 | ptoa_64(-delta)); | |
1773 | } | |
fe8ab488 A |
1774 | } |
1775 | break; | |
1776 | case VM_PURGABLE_VOLATILE: | |
1777 | case VM_PURGABLE_EMPTY: | |
1778 | if (delta > 0) { | |
1779 | ledger_credit(owner->ledger, | |
d9a64523 A |
1780 | ledger_idx_volatile_compressed, |
1781 | ptoa_64(delta)); | |
fe8ab488 A |
1782 | } else { |
1783 | ledger_debit(owner->ledger, | |
d9a64523 A |
1784 | ledger_idx_volatile_compressed, |
1785 | ptoa_64(-delta)); | |
fe8ab488 A |
1786 | } |
1787 | break; | |
1788 | default: | |
1789 | panic("vm_purgeable_compressed_update(): " | |
1790 | "unexpected purgable %d for object %p\n", | |
1791 | object->purgable, object); | |
1792 | } | |
2d21ac55 | 1793 | } |