2 * Copyright (c) 2009-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <libkern/libkern.h>
30 #include <mach/mach_types.h>
31 #include <mach/task.h>
32 #include <sys/proc_internal.h>
33 #include <sys/event.h>
34 #include <sys/eventvar.h>
35 #include <kern/locks.h>
36 #include <sys/queue.h>
37 #include <kern/vm_pressure.h>
38 #include <sys/malloc.h>
39 #include <sys/errno.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/sysctl.h>
43 #include <kern/assert.h>
44 #include <vm/vm_pageout.h>
46 #if CONFIG_MEMORYSTATUS
47 #include <sys/kern_memorystatus.h>
51 * This value is the threshold that a process must meet to be considered for scavenging.
53 #define VM_PRESSURE_MINIMUM_RSIZE 10 /* MB */
54 #define VM_PRESSURE_NOTIFY_WAIT_PERIOD 10000 /* milliseconds */
56 static void vm_pressure_klist_lock(void);
57 static void vm_pressure_klist_unlock(void);
59 static void vm_dispatch_memory_pressure(void);
60 static kern_return_t
vm_try_pressure_candidates(void);
61 static void vm_reset_active_list(void);
63 static lck_mtx_t vm_pressure_klist_mutex
;
65 struct klist vm_pressure_klist
;
66 struct klist vm_pressure_klist_dormant
;
69 #define VM_PRESSURE_DEBUG(cond, format, ...) \
71 if (cond) { printf(format, ##__VA_ARGS__); } \
74 #define VM_PRESSURE_DEBUG(cond, format, ...)
77 void vm_pressure_init(lck_grp_t
*grp
, lck_attr_t
*attr
) {
78 lck_mtx_init(&vm_pressure_klist_mutex
, grp
, attr
);
81 static void vm_pressure_klist_lock(void) {
82 lck_mtx_lock(&vm_pressure_klist_mutex
);
85 static void vm_pressure_klist_unlock(void) {
86 lck_mtx_unlock(&vm_pressure_klist_mutex
);
89 int vm_knote_register(struct knote
*kn
) {
92 vm_pressure_klist_lock();
94 if ((kn
->kn_sfflags
) & (NOTE_VM_PRESSURE
)) {
95 KNOTE_ATTACH(&vm_pressure_klist
, kn
);
100 vm_pressure_klist_unlock();
105 void vm_knote_unregister(struct knote
*kn
) {
106 struct knote
*kn_temp
;
108 vm_pressure_klist_lock();
110 VM_PRESSURE_DEBUG(0, "[vm_pressure] process %d cancelling pressure notification\n", kn
->kn_kq
->kq_p
->p_pid
);
112 SLIST_FOREACH(kn_temp
, &vm_pressure_klist
, kn_selnext
) {
114 KNOTE_DETACH(&vm_pressure_klist
, kn
);
115 vm_pressure_klist_unlock();
120 SLIST_FOREACH(kn_temp
, &vm_pressure_klist_dormant
, kn_selnext
) {
122 KNOTE_DETACH(&vm_pressure_klist_dormant
, kn
);
123 vm_pressure_klist_unlock();
128 vm_pressure_klist_unlock();
131 void vm_pressure_proc_cleanup(proc_t p
)
133 struct knote
*kn
= NULL
;
135 vm_pressure_klist_lock();
137 VM_PRESSURE_DEBUG(0, "[vm_pressure] process %d exiting pressure notification\n", p
->p_pid
);
139 SLIST_FOREACH(kn
, &vm_pressure_klist
, kn_selnext
) {
140 if (kn
->kn_kq
->kq_p
== p
) {
141 KNOTE_DETACH(&vm_pressure_klist
, kn
);
142 vm_pressure_klist_unlock();
147 SLIST_FOREACH(kn
, &vm_pressure_klist_dormant
, kn_selnext
) {
148 if (kn
->kn_kq
->kq_p
== p
) {
149 KNOTE_DETACH(&vm_pressure_klist_dormant
, kn
);
150 vm_pressure_klist_unlock();
155 vm_pressure_klist_unlock();
158 void consider_vm_pressure_events(void)
160 vm_dispatch_memory_pressure();
163 static void vm_dispatch_memory_pressure(void)
165 vm_pressure_klist_lock();
167 if (!SLIST_EMPTY(&vm_pressure_klist
)) {
169 VM_PRESSURE_DEBUG(1, "[vm_pressure] vm_dispatch_memory_pressure\n");
171 if (vm_try_pressure_candidates() == KERN_SUCCESS
) {
172 vm_pressure_klist_unlock();
178 VM_PRESSURE_DEBUG(1, "[vm_pressure] could not find suitable event candidate\n");
180 vm_reset_active_list();
182 vm_pressure_klist_unlock();
187 /* Jetsam aware version. Called with lock held */
189 static struct knote
* vm_find_knote_from_pid(pid_t pid
) {
190 struct knote
*kn
= NULL
;
192 SLIST_FOREACH(kn
, &vm_pressure_klist
, kn_selnext
) {
197 current_pid
= p
->p_pid
;
199 if (current_pid
== pid
) {
207 static kern_return_t
vm_try_pressure_candidates(void)
209 struct knote
*kn
= NULL
;
210 pid_t target_pid
= (pid_t
)-1;
212 /* If memory is low, and there's a pid to target... */
213 target_pid
= memorystatus_request_vm_pressure_candidate();
214 while (target_pid
!= -1) {
215 /* ...look it up in the list, and break if found... */
216 if ((kn
= vm_find_knote_from_pid(target_pid
))) {
220 /* ...otherwise, go round again. */
221 target_pid
= memorystatus_request_vm_pressure_candidate();
225 VM_PRESSURE_DEBUG(0, "[vm_pressure] can't find candidate pid\n");
229 /* ...and dispatch the note */
230 VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d, free pages %d\n", kn
->kn_kq
->kq_p
->p_pid
, memorystatus_available_pages
);
232 KNOTE(&vm_pressure_klist
, target_pid
);
234 memorystatus_send_pressure_note(target_pid
);
239 static void vm_reset_active_list(void) {
243 #if DEVELOPMENT || DEBUG
245 /* Test purposes only */
246 boolean_t
vm_dispatch_pressure_note_to_pid(pid_t pid
) {
249 vm_pressure_klist_lock();
251 kn
= vm_find_knote_from_pid(pid
);
253 KNOTE(&vm_pressure_klist
, pid
);
256 vm_pressure_klist_unlock();
258 return kn
? TRUE
: FALSE
;
261 #endif /* DEVELOPMENT || DEBUG */
263 #else /* CONFIG_MEMORYSTATUS */
265 static kern_return_t
vm_try_pressure_candidates(void)
267 struct knote
*kn
= NULL
, *kn_max
= NULL
;
268 unsigned int resident_max
= 0;
269 pid_t target_pid
= -1;
270 struct klist dispatch_klist
= { NULL
};
271 kern_return_t kr
= KERN_SUCCESS
;
272 struct timeval curr_tstamp
= {0, 0};
273 int elapsed_msecs
= 0;
274 proc_t target_proc
= PROC_NULL
;
276 microuptime(&curr_tstamp
);
278 SLIST_FOREACH(kn
, &vm_pressure_klist
, kn_selnext
) {
279 struct mach_task_basic_info basic_info
;
280 mach_msg_type_number_t size
= MACH_TASK_BASIC_INFO_COUNT
;
281 unsigned int resident_size
= 0;
282 proc_t p
= PROC_NULL
;
283 struct task
* t
= TASK_NULL
;
287 if (p
!= proc_ref_locked(p
)) {
294 t
= (struct task
*)(p
->task
);
296 timevalsub(&curr_tstamp
, &p
->vm_pressure_last_notify_tstamp
);
297 elapsed_msecs
= curr_tstamp
.tv_sec
* 1000 + curr_tstamp
.tv_usec
/ 1000;
299 if (elapsed_msecs
< VM_PRESSURE_NOTIFY_WAIT_PERIOD
) {
304 if( ( kr
= task_info(t
, MACH_TASK_BASIC_INFO
, (task_info_t
)(&basic_info
), &size
)) != KERN_SUCCESS
) {
305 VM_PRESSURE_DEBUG(1, "[vm_pressure] task_info for pid %d failed with %d\n", p
->p_pid
, kr
);
311 * We don't want a small process to block large processes from
312 * being notified again. <rdar://problem/7955532>
314 resident_size
= (basic_info
.resident_size
)/(MB
);
315 if (resident_size
>= VM_PRESSURE_MINIMUM_RSIZE
) {
316 if (resident_size
> resident_max
) {
317 resident_max
= resident_size
;
319 target_pid
= p
->p_pid
;
323 /* There was no candidate with enough resident memory to scavenge */
324 VM_PRESSURE_DEBUG(0, "[vm_pressure] threshold failed for pid %d with %u resident...\n", p
->p_pid
, resident_size
);
329 if (kn_max
== NULL
|| target_pid
== -1) {
333 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PRESSURE_EVENT
, DBG_FUNC_NONE
, target_pid
, resident_max
, 0, 0);
334 VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d with %u resident\n", kn_max
->kn_kq
->kq_p
->p_pid
, resident_max
);
336 KNOTE_DETACH(&vm_pressure_klist
, kn_max
);
338 target_proc
= proc_find(target_pid
);
339 if (target_proc
!= PROC_NULL
) {
340 KNOTE_ATTACH(&dispatch_klist
, kn_max
);
341 KNOTE(&dispatch_klist
, target_pid
);
342 KNOTE_ATTACH(&vm_pressure_klist_dormant
, kn_max
);
344 microuptime(&target_proc
->vm_pressure_last_notify_tstamp
);
345 proc_rele(target_proc
);
352 * Remove all elements from the dormant list and place them on the active list.
353 * Called with klist lock held.
355 static void vm_reset_active_list(void) {
356 /* Re-charge the main list from the dormant list if possible */
357 if (!SLIST_EMPTY(&vm_pressure_klist_dormant
)) {
360 VM_PRESSURE_DEBUG(1, "[vm_pressure] recharging main list from dormant list\n");
362 while (!SLIST_EMPTY(&vm_pressure_klist_dormant
)) {
363 kn
= SLIST_FIRST(&vm_pressure_klist_dormant
);
364 SLIST_REMOVE_HEAD(&vm_pressure_klist_dormant
, kn_selnext
);
365 SLIST_INSERT_HEAD(&vm_pressure_klist
, kn
, kn_selnext
);
370 #endif /* CONFIG_MEMORYSTATUS */