]>
Commit | Line | Data |
---|---|---|
6d2010ae A |
1 | /* |
2 | * Copyright (c) 2009-2010 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <libkern/libkern.h> | |
30 | #include <mach/mach_types.h> | |
31 | #include <mach/task.h> | |
32 | #include <sys/proc_internal.h> | |
33 | #include <sys/event.h> | |
34 | #include <sys/eventvar.h> | |
35 | #include <kern/locks.h> | |
36 | #include <sys/queue.h> | |
37 | #include <kern/vm_pressure.h> | |
38 | #include <sys/malloc.h> | |
39 | #include <sys/errno.h> | |
40 | #include <sys/systm.h> | |
41 | #include <sys/types.h> | |
42 | #include <sys/sysctl.h> | |
316670eb | 43 | #include <kern/assert.h> |
39236c6e | 44 | #include <kern/task.h> |
316670eb | 45 | #include <vm/vm_pageout.h> |
6d2010ae | 46 | |
39236c6e A |
47 | #include <kern/task.h> |
48 | ||
316670eb A |
49 | #if CONFIG_MEMORYSTATUS |
50 | #include <sys/kern_memorystatus.h> | |
51 | #endif | |
52 | ||
53 | /* | |
54 | * This value is the threshold that a process must meet to be considered for scavenging. | |
55 | */ | |
56 | #define VM_PRESSURE_MINIMUM_RSIZE 10 /* MB */ | |
fe8ab488 | 57 | |
316670eb A |
58 | #define VM_PRESSURE_NOTIFY_WAIT_PERIOD 10000 /* milliseconds */ |
59 | ||
39236c6e A |
60 | void vm_pressure_klist_lock(void); |
61 | void vm_pressure_klist_unlock(void); | |
6d2010ae | 62 | |
316670eb | 63 | static void vm_dispatch_memory_pressure(void); |
39236c6e A |
64 | void vm_reset_active_list(void); |
65 | ||
fe8ab488 A |
66 | #if CONFIG_MEMORYSTATUS |
67 | static kern_return_t vm_try_pressure_candidates(boolean_t target_foreground_process); | |
39236c6e | 68 | #endif |
316670eb A |
69 | |
70 | static lck_mtx_t vm_pressure_klist_mutex; | |
6d2010ae A |
71 | |
72 | struct klist vm_pressure_klist; | |
73 | struct klist vm_pressure_klist_dormant; | |
74 | ||
316670eb A |
75 | #if DEBUG |
76 | #define VM_PRESSURE_DEBUG(cond, format, ...) \ | |
77 | do { \ | |
78 | if (cond) { printf(format, ##__VA_ARGS__); } \ | |
79 | } while(0) | |
80 | #else | |
81 | #define VM_PRESSURE_DEBUG(cond, format, ...) | |
82 | #endif | |
83 | ||
84 | void vm_pressure_init(lck_grp_t *grp, lck_attr_t *attr) { | |
85 | lck_mtx_init(&vm_pressure_klist_mutex, grp, attr); | |
86 | } | |
87 | ||
39236c6e | 88 | void vm_pressure_klist_lock(void) { |
6d2010ae A |
89 | lck_mtx_lock(&vm_pressure_klist_mutex); |
90 | } | |
91 | ||
39236c6e | 92 | void vm_pressure_klist_unlock(void) { |
6d2010ae A |
93 | lck_mtx_unlock(&vm_pressure_klist_mutex); |
94 | } | |
95 | ||
96 | int vm_knote_register(struct knote *kn) { | |
97 | int rv = 0; | |
98 | ||
99 | vm_pressure_klist_lock(); | |
100 | ||
316670eb | 101 | if ((kn->kn_sfflags) & (NOTE_VM_PRESSURE)) { |
6d2010ae | 102 | KNOTE_ATTACH(&vm_pressure_klist, kn); |
316670eb | 103 | } else { |
6d2010ae | 104 | rv = ENOTSUP; |
316670eb | 105 | } |
6d2010ae A |
106 | |
107 | vm_pressure_klist_unlock(); | |
108 | ||
109 | return rv; | |
110 | } | |
111 | ||
112 | void vm_knote_unregister(struct knote *kn) { | |
113 | struct knote *kn_temp; | |
114 | ||
115 | vm_pressure_klist_lock(); | |
116 | ||
316670eb | 117 | VM_PRESSURE_DEBUG(0, "[vm_pressure] process %d cancelling pressure notification\n", kn->kn_kq->kq_p->p_pid); |
6d2010ae A |
118 | |
119 | SLIST_FOREACH(kn_temp, &vm_pressure_klist, kn_selnext) { | |
120 | if (kn_temp == kn) { | |
121 | KNOTE_DETACH(&vm_pressure_klist, kn); | |
122 | vm_pressure_klist_unlock(); | |
123 | return; | |
124 | } | |
125 | } | |
316670eb A |
126 | |
127 | SLIST_FOREACH(kn_temp, &vm_pressure_klist_dormant, kn_selnext) { | |
128 | if (kn_temp == kn) { | |
129 | KNOTE_DETACH(&vm_pressure_klist_dormant, kn); | |
130 | vm_pressure_klist_unlock(); | |
131 | return; | |
132 | } | |
133 | } | |
134 | ||
135 | vm_pressure_klist_unlock(); | |
136 | } | |
137 | ||
138 | void vm_pressure_proc_cleanup(proc_t p) | |
139 | { | |
140 | struct knote *kn = NULL; | |
141 | ||
142 | vm_pressure_klist_lock(); | |
143 | ||
144 | VM_PRESSURE_DEBUG(0, "[vm_pressure] process %d exiting pressure notification\n", p->p_pid); | |
145 | ||
146 | SLIST_FOREACH(kn, &vm_pressure_klist, kn_selnext) { | |
147 | if (kn->kn_kq->kq_p == p) { | |
148 | KNOTE_DETACH(&vm_pressure_klist, kn); | |
149 | vm_pressure_klist_unlock(); | |
150 | return; | |
151 | } | |
152 | } | |
153 | ||
154 | SLIST_FOREACH(kn, &vm_pressure_klist_dormant, kn_selnext) { | |
155 | if (kn->kn_kq->kq_p == p) { | |
156 | KNOTE_DETACH(&vm_pressure_klist_dormant, kn); | |
157 | vm_pressure_klist_unlock(); | |
158 | return; | |
159 | } | |
160 | } | |
6d2010ae A |
161 | |
162 | vm_pressure_klist_unlock(); | |
163 | } | |
164 | ||
39236c6e A |
165 | /* |
166 | * Used by the vm_pressure_thread which is | |
167 | * signalled from within vm_pageout_scan(). | |
168 | */ | |
316670eb A |
169 | void consider_vm_pressure_events(void) |
170 | { | |
6d2010ae A |
171 | vm_dispatch_memory_pressure(); |
172 | } | |
173 | ||
fe8ab488 | 174 | #if CONFIG_MEMORYSTATUS |
6d2010ae | 175 | |
316670eb A |
176 | /* Jetsam aware version. Called with lock held */ |
177 | ||
fe8ab488 A |
178 | struct knote *vm_find_knote_from_pid(pid_t, struct klist *); |
179 | ||
180 | struct knote *vm_find_knote_from_pid(pid_t pid, struct klist *list) { | |
316670eb A |
181 | struct knote *kn = NULL; |
182 | ||
39236c6e | 183 | SLIST_FOREACH(kn, list, kn_selnext) { |
316670eb A |
184 | struct proc *p; |
185 | pid_t current_pid; | |
186 | ||
187 | p = kn->kn_kq->kq_p; | |
188 | current_pid = p->p_pid; | |
189 | ||
190 | if (current_pid == pid) { | |
191 | break; | |
6d2010ae A |
192 | } |
193 | } | |
316670eb A |
194 | |
195 | return kn; | |
196 | } | |
6d2010ae | 197 | |
39236c6e A |
198 | int vm_dispatch_pressure_note_to_pid(pid_t pid, boolean_t locked) { |
199 | int ret = EINVAL; | |
200 | struct knote *kn; | |
201 | ||
202 | VM_PRESSURE_DEBUG(1, "vm_dispatch_pressure_note_to_pid(): pid %d\n", pid); | |
203 | ||
204 | if (!locked) { | |
205 | vm_pressure_klist_lock(); | |
206 | } | |
207 | ||
208 | /* | |
209 | * Because we're specifically targeting a process here, we don't care | |
210 | * if a warning has already been sent and it's moved to the dormant | |
211 | * list; check that too. | |
212 | */ | |
213 | kn = vm_find_knote_from_pid(pid, &vm_pressure_klist); | |
214 | if (kn) { | |
215 | KNOTE(&vm_pressure_klist, pid); | |
216 | ret = 0; | |
217 | } else { | |
218 | kn = vm_find_knote_from_pid(pid, &vm_pressure_klist_dormant); | |
fe8ab488 | 219 | if (kn) { |
39236c6e | 220 | KNOTE(&vm_pressure_klist_dormant, pid); |
fe8ab488 | 221 | ret = 0; |
39236c6e A |
222 | } |
223 | } | |
224 | ||
225 | if (!locked) { | |
226 | vm_pressure_klist_unlock(); | |
227 | } | |
228 | ||
229 | return ret; | |
230 | } | |
231 | ||
232 | void vm_find_pressure_foreground_candidates(void) | |
316670eb | 233 | { |
39236c6e A |
234 | struct knote *kn, *kn_tmp; |
235 | struct klist dispatch_klist = { NULL }; | |
316670eb | 236 | |
39236c6e A |
237 | vm_pressure_klist_lock(); |
238 | proc_list_lock(); | |
239 | ||
240 | /* Find the foreground processes. */ | |
241 | SLIST_FOREACH_SAFE(kn, &vm_pressure_klist, kn_selnext, kn_tmp) { | |
242 | proc_t p = kn->kn_kq->kq_p; | |
316670eb | 243 | |
39236c6e A |
244 | if (memorystatus_is_foreground_locked(p)) { |
245 | KNOTE_DETACH(&vm_pressure_klist, kn); | |
246 | KNOTE_ATTACH(&dispatch_klist, kn); | |
247 | } | |
248 | } | |
316670eb | 249 | |
39236c6e A |
250 | SLIST_FOREACH_SAFE(kn, &vm_pressure_klist_dormant, kn_selnext, kn_tmp) { |
251 | proc_t p = kn->kn_kq->kq_p; | |
316670eb | 252 | |
39236c6e A |
253 | if (memorystatus_is_foreground_locked(p)) { |
254 | KNOTE_DETACH(&vm_pressure_klist_dormant, kn); | |
255 | KNOTE_ATTACH(&dispatch_klist, kn); | |
256 | } | |
257 | } | |
316670eb | 258 | |
39236c6e A |
259 | proc_list_unlock(); |
260 | ||
261 | /* Dispatch pressure notifications accordingly */ | |
262 | SLIST_FOREACH_SAFE(kn, &dispatch_klist, kn_selnext, kn_tmp) { | |
263 | proc_t p = kn->kn_kq->kq_p; | |
264 | ||
265 | proc_list_lock(); | |
266 | if (p != proc_ref_locked(p)) { | |
267 | proc_list_unlock(); | |
268 | KNOTE_DETACH(&dispatch_klist, kn); | |
269 | KNOTE_ATTACH(&vm_pressure_klist_dormant, kn); | |
270 | continue; | |
271 | } | |
272 | proc_list_unlock(); | |
273 | ||
274 | VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d\n", kn->kn_kq->kq_p->p_pid); | |
275 | KNOTE(&dispatch_klist, p->p_pid); | |
276 | KNOTE_DETACH(&dispatch_klist, kn); | |
277 | KNOTE_ATTACH(&vm_pressure_klist_dormant, kn); | |
278 | microuptime(&p->vm_pressure_last_notify_tstamp); | |
279 | memorystatus_send_pressure_note(p->p_pid); | |
280 | proc_rele(p); | |
281 | } | |
282 | ||
283 | vm_pressure_klist_unlock(); | |
6d2010ae A |
284 | } |
285 | ||
39236c6e A |
286 | void vm_find_pressure_candidate(void) |
287 | { | |
288 | struct knote *kn = NULL, *kn_max = NULL; | |
289 | unsigned int resident_max = 0; | |
290 | pid_t target_pid = -1; | |
291 | struct klist dispatch_klist = { NULL }; | |
292 | struct timeval curr_tstamp = {0, 0}; | |
293 | int elapsed_msecs = 0; | |
294 | proc_t target_proc = PROC_NULL; | |
295 | kern_return_t kr = KERN_SUCCESS; | |
296 | ||
297 | microuptime(&curr_tstamp); | |
298 | ||
299 | vm_pressure_klist_lock(); | |
300 | ||
301 | SLIST_FOREACH(kn, &vm_pressure_klist, kn_selnext) {\ | |
302 | struct mach_task_basic_info basic_info; | |
303 | mach_msg_type_number_t size = MACH_TASK_BASIC_INFO_COUNT; | |
304 | unsigned int resident_size = 0; | |
305 | proc_t p = PROC_NULL; | |
306 | struct task* t = TASK_NULL; | |
307 | ||
308 | p = kn->kn_kq->kq_p; | |
309 | proc_list_lock(); | |
310 | if (p != proc_ref_locked(p)) { | |
311 | p = PROC_NULL; | |
312 | proc_list_unlock(); | |
313 | continue; | |
314 | } | |
315 | proc_list_unlock(); | |
316 | ||
317 | t = (struct task *)(p->task); | |
318 | ||
319 | timevalsub(&curr_tstamp, &p->vm_pressure_last_notify_tstamp); | |
320 | elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000; | |
321 | ||
322 | if (elapsed_msecs < VM_PRESSURE_NOTIFY_WAIT_PERIOD) { | |
323 | proc_rele(p); | |
324 | continue; | |
325 | } | |
326 | ||
327 | if (!memorystatus_bg_pressure_eligible(p)) { | |
328 | VM_PRESSURE_DEBUG(1, "[vm_pressure] skipping process %d\n", p->p_pid); | |
329 | proc_rele(p); | |
330 | continue; | |
331 | } | |
332 | ||
333 | if( ( kr = task_info(t, MACH_TASK_BASIC_INFO, (task_info_t)(&basic_info), &size)) != KERN_SUCCESS ) { | |
334 | VM_PRESSURE_DEBUG(1, "[vm_pressure] task_info for pid %d failed\n", p->p_pid); | |
335 | proc_rele(p); | |
336 | continue; | |
337 | } | |
338 | ||
339 | /* | |
340 | * We don't want a small process to block large processes from | |
341 | * being notified again. <rdar://problem/7955532> | |
342 | */ | |
343 | resident_size = (basic_info.resident_size)/(1024 * 1024); | |
344 | if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) { | |
345 | if (resident_size > resident_max) { | |
346 | resident_max = resident_size; | |
347 | kn_max = kn; | |
348 | target_pid = p->p_pid; | |
349 | target_proc = p; | |
350 | } | |
351 | } else { | |
352 | /* There was no candidate with enough resident memory to scavenge */ | |
353 | VM_PRESSURE_DEBUG(1, "[vm_pressure] threshold failed for pid %d with %u resident...\n", p->p_pid, resident_size); | |
354 | } | |
355 | proc_rele(p); | |
356 | } | |
357 | ||
358 | if (kn_max == NULL || target_pid == -1) { | |
359 | VM_PRESSURE_DEBUG(1, "[vm_pressure] - no target found!\n"); | |
360 | goto exit; | |
361 | } | |
362 | ||
363 | VM_DEBUG_EVENT(vm_pageout_scan, VM_PRESSURE_EVENT, DBG_FUNC_NONE, target_pid, resident_max, 0, 0); | |
364 | VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d with %u resident\n", kn_max->kn_kq->kq_p->p_pid, resident_max); | |
365 | ||
366 | KNOTE_DETACH(&vm_pressure_klist, kn_max); | |
367 | ||
368 | target_proc = proc_find(target_pid); | |
369 | if (target_proc != PROC_NULL) { | |
370 | KNOTE_ATTACH(&dispatch_klist, kn_max); | |
371 | KNOTE(&dispatch_klist, target_pid); | |
372 | KNOTE_ATTACH(&vm_pressure_klist_dormant, kn_max); | |
373 | memorystatus_send_pressure_note(target_pid); | |
374 | microuptime(&target_proc->vm_pressure_last_notify_tstamp); | |
375 | proc_rele(target_proc); | |
376 | } | |
377 | ||
378 | exit: | |
379 | vm_pressure_klist_unlock(); | |
316670eb | 380 | } |
fe8ab488 | 381 | #endif /* CONFIG_MEMORYSTATUS */ |
316670eb | 382 | |
316670eb | 383 | |
39236c6e | 384 | struct knote * |
fe8ab488 | 385 | vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int level, boolean_t target_foreground_process); |
39236c6e | 386 | |
fe8ab488 A |
387 | kern_return_t vm_pressure_notification_without_levels(boolean_t target_foreground_process); |
388 | kern_return_t vm_pressure_notify_dispatch_vm_clients(boolean_t target_foreground_process); | |
39236c6e A |
389 | |
390 | kern_return_t | |
fe8ab488 | 391 | vm_pressure_notify_dispatch_vm_clients(boolean_t target_foreground_process) |
39236c6e | 392 | { |
316670eb | 393 | vm_pressure_klist_lock(); |
39236c6e A |
394 | |
395 | if (SLIST_EMPTY(&vm_pressure_klist)) { | |
396 | vm_reset_active_list(); | |
316670eb | 397 | } |
39236c6e A |
398 | |
399 | if (!SLIST_EMPTY(&vm_pressure_klist)) { | |
400 | ||
401 | VM_PRESSURE_DEBUG(1, "[vm_pressure] vm_dispatch_memory_pressure\n"); | |
402 | ||
fe8ab488 | 403 | if (KERN_SUCCESS == vm_try_pressure_candidates(target_foreground_process)) { |
39236c6e A |
404 | vm_pressure_klist_unlock(); |
405 | return KERN_SUCCESS; | |
406 | } | |
407 | } | |
408 | ||
409 | VM_PRESSURE_DEBUG(1, "[vm_pressure] could not find suitable event candidate\n"); | |
410 | ||
316670eb | 411 | vm_pressure_klist_unlock(); |
39236c6e A |
412 | |
413 | return KERN_FAILURE; | |
316670eb A |
414 | } |
415 | ||
39236c6e A |
416 | static void vm_dispatch_memory_pressure(void) |
417 | { | |
418 | memorystatus_update_vm_pressure(FALSE); | |
419 | } | |
316670eb | 420 | |
39236c6e A |
421 | extern vm_pressure_level_t |
422 | convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t); | |
316670eb | 423 | |
39236c6e | 424 | struct knote * |
fe8ab488 | 425 | vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int level, boolean_t target_foreground_process) |
316670eb | 426 | { |
39236c6e A |
427 | struct knote *kn = NULL, *kn_max = NULL; |
428 | unsigned int resident_max = 0; | |
39236c6e A |
429 | struct timeval curr_tstamp = {0, 0}; |
430 | int elapsed_msecs = 0; | |
431 | int selected_task_importance = 0; | |
432 | static int pressure_snapshot = -1; | |
433 | boolean_t pressure_increase = FALSE; | |
434 | ||
435 | if (level != -1) { | |
436 | ||
437 | if (pressure_snapshot == -1) { | |
438 | /* | |
439 | * Initial snapshot. | |
440 | */ | |
441 | pressure_snapshot = level; | |
442 | pressure_increase = TRUE; | |
443 | } else { | |
444 | ||
445 | if (level >= pressure_snapshot) { | |
446 | pressure_increase = TRUE; | |
447 | } else { | |
448 | pressure_increase = FALSE; | |
449 | } | |
450 | ||
451 | pressure_snapshot = level; | |
452 | } | |
453 | } | |
454 | ||
455 | if ((level > 0) && (pressure_increase) == TRUE) { | |
456 | /* | |
457 | * We'll start by considering the largest | |
458 | * unimportant task in our list. | |
459 | */ | |
460 | selected_task_importance = INT_MAX; | |
461 | } else { | |
462 | /* | |
463 | * We'll start by considering the largest | |
464 | * important task in our list. | |
465 | */ | |
466 | selected_task_importance = 0; | |
467 | } | |
316670eb A |
468 | |
469 | microuptime(&curr_tstamp); | |
39236c6e A |
470 | |
471 | SLIST_FOREACH(kn, candidate_list, kn_selnext) { | |
472 | ||
316670eb A |
473 | unsigned int resident_size = 0; |
474 | proc_t p = PROC_NULL; | |
475 | struct task* t = TASK_NULL; | |
39236c6e A |
476 | int curr_task_importance = 0; |
477 | boolean_t consider_knote = FALSE; | |
316670eb A |
478 | |
479 | p = kn->kn_kq->kq_p; | |
480 | proc_list_lock(); | |
481 | if (p != proc_ref_locked(p)) { | |
482 | p = PROC_NULL; | |
483 | proc_list_unlock(); | |
484 | continue; | |
485 | } | |
486 | proc_list_unlock(); | |
487 | ||
fe8ab488 A |
488 | #if CONFIG_MEMORYSTATUS |
489 | if (target_foreground_process == TRUE && !memorystatus_is_foreground_locked(p)) { | |
490 | /* | |
491 | * Skip process not marked foreground. | |
492 | */ | |
493 | proc_rele(p); | |
494 | continue; | |
495 | } | |
496 | #endif /* CONFIG_MEMORYSTATUS */ | |
497 | ||
316670eb A |
498 | t = (struct task *)(p->task); |
499 | ||
500 | timevalsub(&curr_tstamp, &p->vm_pressure_last_notify_tstamp); | |
501 | elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000; | |
fe8ab488 | 502 | |
39236c6e | 503 | if ((level == -1) && (elapsed_msecs < VM_PRESSURE_NOTIFY_WAIT_PERIOD)) { |
316670eb A |
504 | proc_rele(p); |
505 | continue; | |
506 | } | |
507 | ||
39236c6e A |
508 | if (level != -1) { |
509 | /* | |
510 | * For the level based notifications, check and see if this knote is | |
511 | * registered for the current level. | |
512 | */ | |
513 | vm_pressure_level_t dispatch_level = convert_internal_pressure_level_to_dispatch_level(level); | |
fe8ab488 | 514 | |
39236c6e A |
515 | if ((kn->kn_sfflags & dispatch_level) == 0) { |
516 | proc_rele(p); | |
517 | continue; | |
518 | } | |
519 | } | |
fe8ab488 A |
520 | |
521 | #if CONFIG_MEMORYSTATUS | |
522 | if (target_foreground_process == FALSE && !memorystatus_bg_pressure_eligible(p)) { | |
523 | VM_PRESSURE_DEBUG(1, "[vm_pressure] skipping process %d\n", p->p_pid); | |
316670eb | 524 | proc_rele(p); |
fe8ab488 A |
525 | continue; |
526 | } | |
527 | #endif /* CONFIG_MEMORYSTATUS */ | |
316670eb | 528 | |
39236c6e A |
529 | curr_task_importance = task_importance_estimate(t); |
530 | ||
316670eb | 531 | /* |
fe8ab488 A |
532 | * We don't want a small process to block large processes from |
533 | * being notified again. <rdar://problem/7955532> | |
534 | */ | |
535 | resident_size = (get_task_phys_footprint(t))/(1024*1024ULL); //(MB); | |
39236c6e | 536 | |
316670eb | 537 | if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) { |
39236c6e A |
538 | |
539 | if (level > 0) { | |
540 | /* | |
541 | * Warning or Critical Pressure. | |
542 | */ | |
543 | if (pressure_increase) { | |
fe8ab488 A |
544 | if ((curr_task_importance < selected_task_importance) || |
545 | ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) { | |
546 | ||
547 | /* | |
548 | * We have found a candidate process which is: | |
549 | * a) at a lower importance than the current selected process | |
550 | * OR | |
551 | * b) has importance equal to that of the current selected process but is larger | |
552 | */ | |
553 | ||
39236c6e A |
554 | if (task_has_been_notified(t, level) == FALSE) { |
555 | consider_knote = TRUE; | |
556 | } | |
557 | } | |
558 | } else { | |
fe8ab488 A |
559 | if ((curr_task_importance > selected_task_importance) || |
560 | ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) { | |
561 | ||
562 | /* | |
563 | * We have found a candidate process which is: | |
564 | * a) at a higher importance than the current selected process | |
565 | * OR | |
566 | * b) has importance equal to that of the current selected process but is larger | |
567 | */ | |
568 | ||
39236c6e A |
569 | if (task_has_been_notified(t, level) == FALSE) { |
570 | consider_knote = TRUE; | |
571 | } | |
572 | } | |
573 | } | |
574 | } else if (level == 0) { | |
575 | /* | |
576 | * Pressure back to normal. | |
577 | */ | |
fe8ab488 A |
578 | if ((curr_task_importance > selected_task_importance) || |
579 | ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) { | |
39236c6e A |
580 | |
581 | if ((task_has_been_notified(t, kVMPressureWarning) == TRUE) || (task_has_been_notified(t, kVMPressureCritical) == TRUE)) { | |
582 | consider_knote = TRUE; | |
583 | } | |
584 | } | |
585 | } else if (level == -1) { | |
586 | ||
587 | /* | |
588 | * Simple (importance and level)-free behavior based solely on RSIZE. | |
589 | */ | |
590 | if (resident_size > resident_max) { | |
591 | consider_knote = TRUE; | |
592 | } | |
593 | } | |
594 | ||
595 | ||
596 | if (consider_knote) { | |
597 | resident_max = resident_size; | |
598 | kn_max = kn; | |
599 | selected_task_importance = curr_task_importance; | |
600 | consider_knote = FALSE; /* reset for the next candidate */ | |
601 | } | |
316670eb A |
602 | } else { |
603 | /* There was no candidate with enough resident memory to scavenge */ | |
604 | VM_PRESSURE_DEBUG(0, "[vm_pressure] threshold failed for pid %d with %u resident...\n", p->p_pid, resident_size); | |
605 | } | |
606 | proc_rele(p); | |
607 | } | |
608 | ||
39236c6e A |
609 | if (kn_max) { |
610 | VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d with %u resident\n", kn_max->kn_kq->kq_p->p_pid, resident_max); | |
316670eb A |
611 | } |
612 | ||
39236c6e A |
613 | return kn_max; |
614 | } | |
615 | ||
616 | /* | |
617 | * vm_pressure_klist_lock is held for this routine. | |
618 | */ | |
fe8ab488 | 619 | kern_return_t vm_pressure_notification_without_levels(boolean_t target_foreground_process) |
39236c6e A |
620 | { |
621 | struct knote *kn_max = NULL; | |
622 | pid_t target_pid = -1; | |
623 | struct klist dispatch_klist = { NULL }; | |
624 | proc_t target_proc = PROC_NULL; | |
fe8ab488 | 625 | struct klist *candidate_list = NULL; |
39236c6e | 626 | |
fe8ab488 A |
627 | candidate_list = &vm_pressure_klist; |
628 | ||
629 | kn_max = vm_pressure_select_optimal_candidate_to_notify(candidate_list, -1, target_foreground_process); | |
316670eb | 630 | |
39236c6e | 631 | if (kn_max == NULL) { |
fe8ab488 A |
632 | if (target_foreground_process) { |
633 | /* | |
634 | * Doesn't matter if the process had been notified earlier on. | |
635 | * This is a very specific request. Deliver it. | |
636 | */ | |
637 | candidate_list = &vm_pressure_klist_dormant; | |
638 | kn_max = vm_pressure_select_optimal_candidate_to_notify(candidate_list, -1, target_foreground_process); | |
639 | } | |
640 | ||
641 | if (kn_max == NULL) { | |
642 | return KERN_FAILURE; | |
643 | } | |
39236c6e A |
644 | } |
645 | ||
646 | target_proc = kn_max->kn_kq->kq_p; | |
647 | ||
fe8ab488 | 648 | KNOTE_DETACH(candidate_list, kn_max); |
316670eb | 649 | |
316670eb | 650 | if (target_proc != PROC_NULL) { |
39236c6e A |
651 | |
652 | target_pid = target_proc->p_pid; | |
fe8ab488 | 653 | |
39236c6e A |
654 | memoryshot(VM_PRESSURE_EVENT, DBG_FUNC_NONE); |
655 | ||
316670eb A |
656 | KNOTE_ATTACH(&dispatch_klist, kn_max); |
657 | KNOTE(&dispatch_klist, target_pid); | |
658 | KNOTE_ATTACH(&vm_pressure_klist_dormant, kn_max); | |
659 | ||
fe8ab488 A |
660 | #if CONFIG_MEMORYSTATUS |
661 | memorystatus_send_pressure_note(target_pid); | |
662 | #endif /* CONFIG_MEMORYSTATUS */ | |
663 | ||
316670eb | 664 | microuptime(&target_proc->vm_pressure_last_notify_tstamp); |
316670eb A |
665 | } |
666 | ||
667 | return KERN_SUCCESS; | |
668 | } | |
6d2010ae | 669 | |
fe8ab488 | 670 | static kern_return_t vm_try_pressure_candidates(boolean_t target_foreground_process) |
39236c6e A |
671 | { |
672 | /* | |
673 | * This takes care of candidates that use NOTE_VM_PRESSURE. | |
674 | * It's a notification without indication of the level | |
675 | * of memory pressure. | |
676 | */ | |
fe8ab488 | 677 | return (vm_pressure_notification_without_levels(target_foreground_process)); |
39236c6e A |
678 | } |
679 | ||
6d2010ae A |
680 | /* |
681 | * Remove all elements from the dormant list and place them on the active list. | |
682 | * Called with klist lock held. | |
683 | */ | |
39236c6e | 684 | void vm_reset_active_list(void) { |
6d2010ae A |
685 | /* Re-charge the main list from the dormant list if possible */ |
686 | if (!SLIST_EMPTY(&vm_pressure_klist_dormant)) { | |
6d2010ae | 687 | struct knote *kn; |
316670eb A |
688 | |
689 | VM_PRESSURE_DEBUG(1, "[vm_pressure] recharging main list from dormant list\n"); | |
690 | ||
6d2010ae A |
691 | while (!SLIST_EMPTY(&vm_pressure_klist_dormant)) { |
692 | kn = SLIST_FIRST(&vm_pressure_klist_dormant); | |
693 | SLIST_REMOVE_HEAD(&vm_pressure_klist_dormant, kn_selnext); | |
694 | SLIST_INSERT_HEAD(&vm_pressure_klist, kn, kn_selnext); | |
695 | } | |
696 | } | |
697 | } |