]>
Commit | Line | Data |
---|---|---|
6d2010ae A |
1 | /* |
2 | * Copyright (c) 2009-2010 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <libkern/libkern.h> | |
30 | #include <mach/mach_types.h> | |
31 | #include <mach/task.h> | |
32 | #include <sys/proc_internal.h> | |
33 | #include <sys/event.h> | |
34 | #include <sys/eventvar.h> | |
35 | #include <kern/locks.h> | |
36 | #include <sys/queue.h> | |
37 | #include <kern/vm_pressure.h> | |
38 | #include <sys/malloc.h> | |
39 | #include <sys/errno.h> | |
40 | #include <sys/systm.h> | |
41 | #include <sys/types.h> | |
42 | #include <sys/sysctl.h> | |
316670eb | 43 | #include <kern/assert.h> |
39236c6e | 44 | #include <kern/task.h> |
316670eb | 45 | #include <vm/vm_pageout.h> |
6d2010ae | 46 | |
39236c6e A |
47 | #include <kern/task.h> |
48 | ||
316670eb A |
49 | #if CONFIG_MEMORYSTATUS |
50 | #include <sys/kern_memorystatus.h> | |
51 | #endif | |
52 | ||
53 | /* | |
54 | * This value is the threshold that a process must meet to be considered for scavenging. | |
55 | */ | |
56 | #define VM_PRESSURE_MINIMUM_RSIZE 10 /* MB */ | |
fe8ab488 | 57 | |
316670eb A |
58 | #define VM_PRESSURE_NOTIFY_WAIT_PERIOD 10000 /* milliseconds */ |
59 | ||
39236c6e A |
60 | void vm_pressure_klist_lock(void); |
61 | void vm_pressure_klist_unlock(void); | |
6d2010ae | 62 | |
316670eb | 63 | static void vm_dispatch_memory_pressure(void); |
39236c6e A |
64 | void vm_reset_active_list(void); |
65 | ||
fe8ab488 A |
66 | #if CONFIG_MEMORYSTATUS |
67 | static kern_return_t vm_try_pressure_candidates(boolean_t target_foreground_process); | |
39236c6e | 68 | #endif |
316670eb A |
69 | |
70 | static lck_mtx_t vm_pressure_klist_mutex; | |
6d2010ae A |
71 | |
72 | struct klist vm_pressure_klist; | |
73 | struct klist vm_pressure_klist_dormant; | |
74 | ||
316670eb A |
75 | #if DEBUG |
76 | #define VM_PRESSURE_DEBUG(cond, format, ...) \ | |
77 | do { \ | |
78 | if (cond) { printf(format, ##__VA_ARGS__); } \ | |
79 | } while(0) | |
80 | #else | |
81 | #define VM_PRESSURE_DEBUG(cond, format, ...) | |
82 | #endif | |
83 | ||
84 | void vm_pressure_init(lck_grp_t *grp, lck_attr_t *attr) { | |
85 | lck_mtx_init(&vm_pressure_klist_mutex, grp, attr); | |
86 | } | |
87 | ||
39236c6e | 88 | void vm_pressure_klist_lock(void) { |
6d2010ae A |
89 | lck_mtx_lock(&vm_pressure_klist_mutex); |
90 | } | |
91 | ||
39236c6e | 92 | void vm_pressure_klist_unlock(void) { |
6d2010ae A |
93 | lck_mtx_unlock(&vm_pressure_klist_mutex); |
94 | } | |
95 | ||
96 | int vm_knote_register(struct knote *kn) { | |
97 | int rv = 0; | |
98 | ||
99 | vm_pressure_klist_lock(); | |
100 | ||
316670eb | 101 | if ((kn->kn_sfflags) & (NOTE_VM_PRESSURE)) { |
6d2010ae | 102 | KNOTE_ATTACH(&vm_pressure_klist, kn); |
316670eb | 103 | } else { |
6d2010ae | 104 | rv = ENOTSUP; |
316670eb | 105 | } |
6d2010ae A |
106 | |
107 | vm_pressure_klist_unlock(); | |
108 | ||
109 | return rv; | |
110 | } | |
111 | ||
112 | void vm_knote_unregister(struct knote *kn) { | |
113 | struct knote *kn_temp; | |
114 | ||
115 | vm_pressure_klist_lock(); | |
116 | ||
316670eb | 117 | VM_PRESSURE_DEBUG(0, "[vm_pressure] process %d cancelling pressure notification\n", kn->kn_kq->kq_p->p_pid); |
6d2010ae A |
118 | |
119 | SLIST_FOREACH(kn_temp, &vm_pressure_klist, kn_selnext) { | |
120 | if (kn_temp == kn) { | |
121 | KNOTE_DETACH(&vm_pressure_klist, kn); | |
122 | vm_pressure_klist_unlock(); | |
123 | return; | |
124 | } | |
125 | } | |
316670eb A |
126 | |
127 | SLIST_FOREACH(kn_temp, &vm_pressure_klist_dormant, kn_selnext) { | |
128 | if (kn_temp == kn) { | |
129 | KNOTE_DETACH(&vm_pressure_klist_dormant, kn); | |
130 | vm_pressure_klist_unlock(); | |
131 | return; | |
132 | } | |
133 | } | |
134 | ||
135 | vm_pressure_klist_unlock(); | |
136 | } | |
137 | ||
138 | void vm_pressure_proc_cleanup(proc_t p) | |
139 | { | |
140 | struct knote *kn = NULL; | |
141 | ||
142 | vm_pressure_klist_lock(); | |
143 | ||
144 | VM_PRESSURE_DEBUG(0, "[vm_pressure] process %d exiting pressure notification\n", p->p_pid); | |
145 | ||
146 | SLIST_FOREACH(kn, &vm_pressure_klist, kn_selnext) { | |
147 | if (kn->kn_kq->kq_p == p) { | |
148 | KNOTE_DETACH(&vm_pressure_klist, kn); | |
149 | vm_pressure_klist_unlock(); | |
150 | return; | |
151 | } | |
152 | } | |
153 | ||
154 | SLIST_FOREACH(kn, &vm_pressure_klist_dormant, kn_selnext) { | |
155 | if (kn->kn_kq->kq_p == p) { | |
156 | KNOTE_DETACH(&vm_pressure_klist_dormant, kn); | |
157 | vm_pressure_klist_unlock(); | |
158 | return; | |
159 | } | |
160 | } | |
6d2010ae A |
161 | |
162 | vm_pressure_klist_unlock(); | |
163 | } | |
164 | ||
39236c6e A |
165 | /* |
166 | * Used by the vm_pressure_thread which is | |
167 | * signalled from within vm_pageout_scan(). | |
168 | */ | |
316670eb A |
169 | void consider_vm_pressure_events(void) |
170 | { | |
6d2010ae A |
171 | vm_dispatch_memory_pressure(); |
172 | } | |
173 | ||
fe8ab488 | 174 | #if CONFIG_MEMORYSTATUS |
6d2010ae | 175 | |
316670eb A |
176 | /* Jetsam aware version. Called with lock held */ |
177 | ||
fe8ab488 A |
178 | struct knote *vm_find_knote_from_pid(pid_t, struct klist *); |
179 | ||
180 | struct knote *vm_find_knote_from_pid(pid_t pid, struct klist *list) { | |
316670eb A |
181 | struct knote *kn = NULL; |
182 | ||
39236c6e | 183 | SLIST_FOREACH(kn, list, kn_selnext) { |
316670eb A |
184 | struct proc *p; |
185 | pid_t current_pid; | |
186 | ||
187 | p = kn->kn_kq->kq_p; | |
188 | current_pid = p->p_pid; | |
189 | ||
190 | if (current_pid == pid) { | |
191 | break; | |
6d2010ae A |
192 | } |
193 | } | |
316670eb A |
194 | |
195 | return kn; | |
196 | } | |
6d2010ae | 197 | |
39236c6e A |
198 | int vm_dispatch_pressure_note_to_pid(pid_t pid, boolean_t locked) { |
199 | int ret = EINVAL; | |
200 | struct knote *kn; | |
201 | ||
202 | VM_PRESSURE_DEBUG(1, "vm_dispatch_pressure_note_to_pid(): pid %d\n", pid); | |
203 | ||
204 | if (!locked) { | |
205 | vm_pressure_klist_lock(); | |
206 | } | |
207 | ||
208 | /* | |
209 | * Because we're specifically targeting a process here, we don't care | |
210 | * if a warning has already been sent and it's moved to the dormant | |
211 | * list; check that too. | |
212 | */ | |
213 | kn = vm_find_knote_from_pid(pid, &vm_pressure_klist); | |
214 | if (kn) { | |
215 | KNOTE(&vm_pressure_klist, pid); | |
216 | ret = 0; | |
217 | } else { | |
218 | kn = vm_find_knote_from_pid(pid, &vm_pressure_klist_dormant); | |
fe8ab488 | 219 | if (kn) { |
39236c6e | 220 | KNOTE(&vm_pressure_klist_dormant, pid); |
fe8ab488 | 221 | ret = 0; |
39236c6e A |
222 | } |
223 | } | |
224 | ||
225 | if (!locked) { | |
226 | vm_pressure_klist_unlock(); | |
227 | } | |
228 | ||
229 | return ret; | |
230 | } | |
231 | ||
232 | void vm_find_pressure_foreground_candidates(void) | |
316670eb | 233 | { |
39236c6e A |
234 | struct knote *kn, *kn_tmp; |
235 | struct klist dispatch_klist = { NULL }; | |
316670eb | 236 | |
39236c6e A |
237 | vm_pressure_klist_lock(); |
238 | proc_list_lock(); | |
239 | ||
240 | /* Find the foreground processes. */ | |
241 | SLIST_FOREACH_SAFE(kn, &vm_pressure_klist, kn_selnext, kn_tmp) { | |
242 | proc_t p = kn->kn_kq->kq_p; | |
316670eb | 243 | |
39236c6e A |
244 | if (memorystatus_is_foreground_locked(p)) { |
245 | KNOTE_DETACH(&vm_pressure_klist, kn); | |
246 | KNOTE_ATTACH(&dispatch_klist, kn); | |
247 | } | |
248 | } | |
316670eb | 249 | |
39236c6e A |
250 | SLIST_FOREACH_SAFE(kn, &vm_pressure_klist_dormant, kn_selnext, kn_tmp) { |
251 | proc_t p = kn->kn_kq->kq_p; | |
316670eb | 252 | |
39236c6e A |
253 | if (memorystatus_is_foreground_locked(p)) { |
254 | KNOTE_DETACH(&vm_pressure_klist_dormant, kn); | |
255 | KNOTE_ATTACH(&dispatch_klist, kn); | |
256 | } | |
257 | } | |
316670eb | 258 | |
39236c6e A |
259 | proc_list_unlock(); |
260 | ||
261 | /* Dispatch pressure notifications accordingly */ | |
262 | SLIST_FOREACH_SAFE(kn, &dispatch_klist, kn_selnext, kn_tmp) { | |
263 | proc_t p = kn->kn_kq->kq_p; | |
264 | ||
265 | proc_list_lock(); | |
266 | if (p != proc_ref_locked(p)) { | |
267 | proc_list_unlock(); | |
268 | KNOTE_DETACH(&dispatch_klist, kn); | |
269 | KNOTE_ATTACH(&vm_pressure_klist_dormant, kn); | |
270 | continue; | |
271 | } | |
272 | proc_list_unlock(); | |
273 | ||
274 | VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d\n", kn->kn_kq->kq_p->p_pid); | |
275 | KNOTE(&dispatch_klist, p->p_pid); | |
276 | KNOTE_DETACH(&dispatch_klist, kn); | |
277 | KNOTE_ATTACH(&vm_pressure_klist_dormant, kn); | |
278 | microuptime(&p->vm_pressure_last_notify_tstamp); | |
279 | memorystatus_send_pressure_note(p->p_pid); | |
280 | proc_rele(p); | |
281 | } | |
282 | ||
283 | vm_pressure_klist_unlock(); | |
6d2010ae A |
284 | } |
285 | ||
39236c6e A |
286 | void vm_find_pressure_candidate(void) |
287 | { | |
288 | struct knote *kn = NULL, *kn_max = NULL; | |
289 | unsigned int resident_max = 0; | |
290 | pid_t target_pid = -1; | |
291 | struct klist dispatch_klist = { NULL }; | |
292 | struct timeval curr_tstamp = {0, 0}; | |
293 | int elapsed_msecs = 0; | |
294 | proc_t target_proc = PROC_NULL; | |
295 | kern_return_t kr = KERN_SUCCESS; | |
296 | ||
297 | microuptime(&curr_tstamp); | |
298 | ||
299 | vm_pressure_klist_lock(); | |
300 | ||
301 | SLIST_FOREACH(kn, &vm_pressure_klist, kn_selnext) {\ | |
302 | struct mach_task_basic_info basic_info; | |
303 | mach_msg_type_number_t size = MACH_TASK_BASIC_INFO_COUNT; | |
304 | unsigned int resident_size = 0; | |
305 | proc_t p = PROC_NULL; | |
306 | struct task* t = TASK_NULL; | |
307 | ||
308 | p = kn->kn_kq->kq_p; | |
309 | proc_list_lock(); | |
310 | if (p != proc_ref_locked(p)) { | |
311 | p = PROC_NULL; | |
312 | proc_list_unlock(); | |
313 | continue; | |
314 | } | |
315 | proc_list_unlock(); | |
316 | ||
317 | t = (struct task *)(p->task); | |
318 | ||
319 | timevalsub(&curr_tstamp, &p->vm_pressure_last_notify_tstamp); | |
320 | elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000; | |
321 | ||
322 | if (elapsed_msecs < VM_PRESSURE_NOTIFY_WAIT_PERIOD) { | |
323 | proc_rele(p); | |
324 | continue; | |
325 | } | |
326 | ||
327 | if (!memorystatus_bg_pressure_eligible(p)) { | |
328 | VM_PRESSURE_DEBUG(1, "[vm_pressure] skipping process %d\n", p->p_pid); | |
329 | proc_rele(p); | |
330 | continue; | |
331 | } | |
332 | ||
333 | if( ( kr = task_info(t, MACH_TASK_BASIC_INFO, (task_info_t)(&basic_info), &size)) != KERN_SUCCESS ) { | |
334 | VM_PRESSURE_DEBUG(1, "[vm_pressure] task_info for pid %d failed\n", p->p_pid); | |
335 | proc_rele(p); | |
336 | continue; | |
337 | } | |
338 | ||
339 | /* | |
340 | * We don't want a small process to block large processes from | |
341 | * being notified again. <rdar://problem/7955532> | |
342 | */ | |
343 | resident_size = (basic_info.resident_size)/(1024 * 1024); | |
344 | if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) { | |
345 | if (resident_size > resident_max) { | |
346 | resident_max = resident_size; | |
347 | kn_max = kn; | |
348 | target_pid = p->p_pid; | |
349 | target_proc = p; | |
350 | } | |
351 | } else { | |
352 | /* There was no candidate with enough resident memory to scavenge */ | |
353 | VM_PRESSURE_DEBUG(1, "[vm_pressure] threshold failed for pid %d with %u resident...\n", p->p_pid, resident_size); | |
354 | } | |
355 | proc_rele(p); | |
356 | } | |
357 | ||
358 | if (kn_max == NULL || target_pid == -1) { | |
359 | VM_PRESSURE_DEBUG(1, "[vm_pressure] - no target found!\n"); | |
360 | goto exit; | |
361 | } | |
362 | ||
3e170ce0 | 363 | VM_DEBUG_CONSTANT_EVENT(vm_pressure_event, VM_PRESSURE_EVENT, DBG_FUNC_NONE, target_pid, resident_max, 0, 0); |
39236c6e A |
364 | VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d with %u resident\n", kn_max->kn_kq->kq_p->p_pid, resident_max); |
365 | ||
366 | KNOTE_DETACH(&vm_pressure_klist, kn_max); | |
367 | ||
368 | target_proc = proc_find(target_pid); | |
369 | if (target_proc != PROC_NULL) { | |
370 | KNOTE_ATTACH(&dispatch_klist, kn_max); | |
371 | KNOTE(&dispatch_klist, target_pid); | |
372 | KNOTE_ATTACH(&vm_pressure_klist_dormant, kn_max); | |
373 | memorystatus_send_pressure_note(target_pid); | |
374 | microuptime(&target_proc->vm_pressure_last_notify_tstamp); | |
375 | proc_rele(target_proc); | |
376 | } | |
377 | ||
378 | exit: | |
379 | vm_pressure_klist_unlock(); | |
316670eb | 380 | } |
fe8ab488 | 381 | #endif /* CONFIG_MEMORYSTATUS */ |
316670eb | 382 | |
316670eb | 383 | |
39236c6e | 384 | struct knote * |
fe8ab488 | 385 | vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int level, boolean_t target_foreground_process); |
39236c6e | 386 | |
fe8ab488 A |
387 | kern_return_t vm_pressure_notification_without_levels(boolean_t target_foreground_process); |
388 | kern_return_t vm_pressure_notify_dispatch_vm_clients(boolean_t target_foreground_process); | |
39236c6e A |
389 | |
390 | kern_return_t | |
fe8ab488 | 391 | vm_pressure_notify_dispatch_vm_clients(boolean_t target_foreground_process) |
39236c6e | 392 | { |
316670eb | 393 | vm_pressure_klist_lock(); |
39236c6e A |
394 | |
395 | if (SLIST_EMPTY(&vm_pressure_klist)) { | |
396 | vm_reset_active_list(); | |
316670eb | 397 | } |
39236c6e A |
398 | |
399 | if (!SLIST_EMPTY(&vm_pressure_klist)) { | |
400 | ||
401 | VM_PRESSURE_DEBUG(1, "[vm_pressure] vm_dispatch_memory_pressure\n"); | |
402 | ||
fe8ab488 | 403 | if (KERN_SUCCESS == vm_try_pressure_candidates(target_foreground_process)) { |
39236c6e A |
404 | vm_pressure_klist_unlock(); |
405 | return KERN_SUCCESS; | |
406 | } | |
407 | } | |
408 | ||
409 | VM_PRESSURE_DEBUG(1, "[vm_pressure] could not find suitable event candidate\n"); | |
410 | ||
316670eb | 411 | vm_pressure_klist_unlock(); |
39236c6e A |
412 | |
413 | return KERN_FAILURE; | |
316670eb A |
414 | } |
415 | ||
39236c6e A |
416 | static void vm_dispatch_memory_pressure(void) |
417 | { | |
418 | memorystatus_update_vm_pressure(FALSE); | |
419 | } | |
316670eb | 420 | |
39236c6e A |
421 | extern vm_pressure_level_t |
422 | convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t); | |
316670eb | 423 | |
39236c6e | 424 | struct knote * |
fe8ab488 | 425 | vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int level, boolean_t target_foreground_process) |
316670eb | 426 | { |
39236c6e A |
427 | struct knote *kn = NULL, *kn_max = NULL; |
428 | unsigned int resident_max = 0; | |
39236c6e A |
429 | struct timeval curr_tstamp = {0, 0}; |
430 | int elapsed_msecs = 0; | |
431 | int selected_task_importance = 0; | |
432 | static int pressure_snapshot = -1; | |
433 | boolean_t pressure_increase = FALSE; | |
434 | ||
435 | if (level != -1) { | |
436 | ||
437 | if (pressure_snapshot == -1) { | |
438 | /* | |
439 | * Initial snapshot. | |
440 | */ | |
441 | pressure_snapshot = level; | |
442 | pressure_increase = TRUE; | |
443 | } else { | |
444 | ||
445 | if (level >= pressure_snapshot) { | |
446 | pressure_increase = TRUE; | |
447 | } else { | |
448 | pressure_increase = FALSE; | |
449 | } | |
450 | ||
451 | pressure_snapshot = level; | |
452 | } | |
453 | } | |
454 | ||
455 | if ((level > 0) && (pressure_increase) == TRUE) { | |
456 | /* | |
457 | * We'll start by considering the largest | |
458 | * unimportant task in our list. | |
459 | */ | |
460 | selected_task_importance = INT_MAX; | |
461 | } else { | |
462 | /* | |
463 | * We'll start by considering the largest | |
464 | * important task in our list. | |
465 | */ | |
466 | selected_task_importance = 0; | |
467 | } | |
316670eb A |
468 | |
469 | microuptime(&curr_tstamp); | |
39236c6e A |
470 | |
471 | SLIST_FOREACH(kn, candidate_list, kn_selnext) { | |
472 | ||
316670eb A |
473 | unsigned int resident_size = 0; |
474 | proc_t p = PROC_NULL; | |
475 | struct task* t = TASK_NULL; | |
39236c6e A |
476 | int curr_task_importance = 0; |
477 | boolean_t consider_knote = FALSE; | |
3e170ce0 | 478 | boolean_t privileged_listener = FALSE; |
316670eb A |
479 | |
480 | p = kn->kn_kq->kq_p; | |
481 | proc_list_lock(); | |
482 | if (p != proc_ref_locked(p)) { | |
483 | p = PROC_NULL; | |
484 | proc_list_unlock(); | |
485 | continue; | |
486 | } | |
487 | proc_list_unlock(); | |
488 | ||
fe8ab488 A |
489 | #if CONFIG_MEMORYSTATUS |
490 | if (target_foreground_process == TRUE && !memorystatus_is_foreground_locked(p)) { | |
491 | /* | |
492 | * Skip process not marked foreground. | |
493 | */ | |
494 | proc_rele(p); | |
495 | continue; | |
496 | } | |
497 | #endif /* CONFIG_MEMORYSTATUS */ | |
498 | ||
316670eb A |
499 | t = (struct task *)(p->task); |
500 | ||
501 | timevalsub(&curr_tstamp, &p->vm_pressure_last_notify_tstamp); | |
502 | elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000; | |
fe8ab488 | 503 | |
39236c6e | 504 | if ((level == -1) && (elapsed_msecs < VM_PRESSURE_NOTIFY_WAIT_PERIOD)) { |
316670eb A |
505 | proc_rele(p); |
506 | continue; | |
507 | } | |
508 | ||
39236c6e A |
509 | if (level != -1) { |
510 | /* | |
511 | * For the level based notifications, check and see if this knote is | |
512 | * registered for the current level. | |
513 | */ | |
514 | vm_pressure_level_t dispatch_level = convert_internal_pressure_level_to_dispatch_level(level); | |
fe8ab488 | 515 | |
39236c6e A |
516 | if ((kn->kn_sfflags & dispatch_level) == 0) { |
517 | proc_rele(p); | |
518 | continue; | |
519 | } | |
520 | } | |
fe8ab488 A |
521 | |
522 | #if CONFIG_MEMORYSTATUS | |
523 | if (target_foreground_process == FALSE && !memorystatus_bg_pressure_eligible(p)) { | |
524 | VM_PRESSURE_DEBUG(1, "[vm_pressure] skipping process %d\n", p->p_pid); | |
316670eb | 525 | proc_rele(p); |
fe8ab488 A |
526 | continue; |
527 | } | |
528 | #endif /* CONFIG_MEMORYSTATUS */ | |
316670eb | 529 | |
39236c6e A |
530 | curr_task_importance = task_importance_estimate(t); |
531 | ||
3e170ce0 A |
532 | /* |
533 | * Privileged listeners are only considered in the multi-level pressure scheme | |
534 | * AND only if the pressure is increasing. | |
535 | */ | |
536 | if (level > 0) { | |
537 | ||
538 | if (task_has_been_notified(t, level) == FALSE) { | |
539 | ||
540 | /* | |
541 | * Is this a privileged listener? | |
542 | */ | |
543 | if (task_low_mem_privileged_listener(t, FALSE, &privileged_listener) == 0) { | |
544 | ||
545 | if (privileged_listener) { | |
546 | kn_max = kn; | |
547 | proc_rele(p); | |
548 | goto done_scanning; | |
549 | } | |
550 | } | |
551 | } else { | |
552 | proc_rele(p); | |
553 | continue; | |
554 | } | |
555 | } else if (level == 0) { | |
556 | ||
557 | /* | |
558 | * Task wasn't notified when the pressure was increasing and so | |
559 | * no need to notify it that the pressure is decreasing. | |
560 | */ | |
561 | if ((task_has_been_notified(t, kVMPressureWarning) == FALSE) && (task_has_been_notified(t, kVMPressureCritical) == FALSE)) { | |
562 | proc_rele(p); | |
563 | continue; | |
564 | } | |
565 | } | |
566 | ||
567 | /* | |
fe8ab488 A |
568 | * We don't want a small process to block large processes from |
569 | * being notified again. <rdar://problem/7955532> | |
570 | */ | |
571 | resident_size = (get_task_phys_footprint(t))/(1024*1024ULL); //(MB); | |
39236c6e | 572 | |
316670eb | 573 | if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) { |
39236c6e A |
574 | |
575 | if (level > 0) { | |
576 | /* | |
577 | * Warning or Critical Pressure. | |
578 | */ | |
579 | if (pressure_increase) { | |
fe8ab488 A |
580 | if ((curr_task_importance < selected_task_importance) || |
581 | ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) { | |
582 | ||
583 | /* | |
584 | * We have found a candidate process which is: | |
585 | * a) at a lower importance than the current selected process | |
586 | * OR | |
587 | * b) has importance equal to that of the current selected process but is larger | |
588 | */ | |
589 | ||
3e170ce0 | 590 | consider_knote = TRUE; |
39236c6e A |
591 | } |
592 | } else { | |
fe8ab488 A |
593 | if ((curr_task_importance > selected_task_importance) || |
594 | ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) { | |
595 | ||
596 | /* | |
597 | * We have found a candidate process which is: | |
598 | * a) at a higher importance than the current selected process | |
599 | * OR | |
600 | * b) has importance equal to that of the current selected process but is larger | |
601 | */ | |
602 | ||
3e170ce0 | 603 | consider_knote = TRUE; |
39236c6e A |
604 | } |
605 | } | |
606 | } else if (level == 0) { | |
607 | /* | |
608 | * Pressure back to normal. | |
609 | */ | |
fe8ab488 A |
610 | if ((curr_task_importance > selected_task_importance) || |
611 | ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) { | |
39236c6e | 612 | |
3e170ce0 | 613 | consider_knote = TRUE; |
39236c6e A |
614 | } |
615 | } else if (level == -1) { | |
616 | ||
617 | /* | |
618 | * Simple (importance and level)-free behavior based solely on RSIZE. | |
619 | */ | |
620 | if (resident_size > resident_max) { | |
621 | consider_knote = TRUE; | |
622 | } | |
623 | } | |
624 | ||
625 | ||
626 | if (consider_knote) { | |
627 | resident_max = resident_size; | |
628 | kn_max = kn; | |
629 | selected_task_importance = curr_task_importance; | |
630 | consider_knote = FALSE; /* reset for the next candidate */ | |
631 | } | |
316670eb A |
632 | } else { |
633 | /* There was no candidate with enough resident memory to scavenge */ | |
634 | VM_PRESSURE_DEBUG(0, "[vm_pressure] threshold failed for pid %d with %u resident...\n", p->p_pid, resident_size); | |
635 | } | |
636 | proc_rele(p); | |
637 | } | |
638 | ||
3e170ce0 | 639 | done_scanning: |
39236c6e | 640 | if (kn_max) { |
3e170ce0 A |
641 | VM_DEBUG_CONSTANT_EVENT(vm_pressure_event, VM_PRESSURE_EVENT, DBG_FUNC_NONE, kn_max->kn_kq->kq_p->p_pid, resident_max, 0, 0); |
642 | VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d with %u resident\n", kn_max->kn_kq->kq_p->p_pid, resident_max); | |
316670eb A |
643 | } |
644 | ||
39236c6e A |
645 | return kn_max; |
646 | } | |
647 | ||
648 | /* | |
649 | * vm_pressure_klist_lock is held for this routine. | |
650 | */ | |
fe8ab488 | 651 | kern_return_t vm_pressure_notification_without_levels(boolean_t target_foreground_process) |
39236c6e A |
652 | { |
653 | struct knote *kn_max = NULL; | |
654 | pid_t target_pid = -1; | |
655 | struct klist dispatch_klist = { NULL }; | |
656 | proc_t target_proc = PROC_NULL; | |
fe8ab488 | 657 | struct klist *candidate_list = NULL; |
39236c6e | 658 | |
fe8ab488 A |
659 | candidate_list = &vm_pressure_klist; |
660 | ||
661 | kn_max = vm_pressure_select_optimal_candidate_to_notify(candidate_list, -1, target_foreground_process); | |
316670eb | 662 | |
39236c6e | 663 | if (kn_max == NULL) { |
fe8ab488 A |
664 | if (target_foreground_process) { |
665 | /* | |
666 | * Doesn't matter if the process had been notified earlier on. | |
667 | * This is a very specific request. Deliver it. | |
668 | */ | |
669 | candidate_list = &vm_pressure_klist_dormant; | |
670 | kn_max = vm_pressure_select_optimal_candidate_to_notify(candidate_list, -1, target_foreground_process); | |
671 | } | |
672 | ||
673 | if (kn_max == NULL) { | |
674 | return KERN_FAILURE; | |
675 | } | |
39236c6e A |
676 | } |
677 | ||
678 | target_proc = kn_max->kn_kq->kq_p; | |
679 | ||
fe8ab488 | 680 | KNOTE_DETACH(candidate_list, kn_max); |
316670eb | 681 | |
316670eb | 682 | if (target_proc != PROC_NULL) { |
39236c6e A |
683 | |
684 | target_pid = target_proc->p_pid; | |
fe8ab488 | 685 | |
39236c6e A |
686 | memoryshot(VM_PRESSURE_EVENT, DBG_FUNC_NONE); |
687 | ||
316670eb A |
688 | KNOTE_ATTACH(&dispatch_klist, kn_max); |
689 | KNOTE(&dispatch_klist, target_pid); | |
690 | KNOTE_ATTACH(&vm_pressure_klist_dormant, kn_max); | |
691 | ||
fe8ab488 A |
692 | #if CONFIG_MEMORYSTATUS |
693 | memorystatus_send_pressure_note(target_pid); | |
694 | #endif /* CONFIG_MEMORYSTATUS */ | |
695 | ||
316670eb | 696 | microuptime(&target_proc->vm_pressure_last_notify_tstamp); |
316670eb A |
697 | } |
698 | ||
699 | return KERN_SUCCESS; | |
700 | } | |
6d2010ae | 701 | |
fe8ab488 | 702 | static kern_return_t vm_try_pressure_candidates(boolean_t target_foreground_process) |
39236c6e A |
703 | { |
704 | /* | |
705 | * This takes care of candidates that use NOTE_VM_PRESSURE. | |
706 | * It's a notification without indication of the level | |
707 | * of memory pressure. | |
708 | */ | |
fe8ab488 | 709 | return (vm_pressure_notification_without_levels(target_foreground_process)); |
39236c6e A |
710 | } |
711 | ||
6d2010ae A |
712 | /* |
713 | * Remove all elements from the dormant list and place them on the active list. | |
714 | * Called with klist lock held. | |
715 | */ | |
39236c6e | 716 | void vm_reset_active_list(void) { |
6d2010ae A |
717 | /* Re-charge the main list from the dormant list if possible */ |
718 | if (!SLIST_EMPTY(&vm_pressure_klist_dormant)) { | |
6d2010ae | 719 | struct knote *kn; |
316670eb A |
720 | |
721 | VM_PRESSURE_DEBUG(1, "[vm_pressure] recharging main list from dormant list\n"); | |
722 | ||
6d2010ae A |
723 | while (!SLIST_EMPTY(&vm_pressure_klist_dormant)) { |
724 | kn = SLIST_FIRST(&vm_pressure_klist_dormant); | |
725 | SLIST_REMOVE_HEAD(&vm_pressure_klist_dormant, kn_selnext); | |
726 | SLIST_INSERT_HEAD(&vm_pressure_klist, kn, kn_selnext); | |
727 | } | |
728 | } | |
729 | } |