]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/vm_pressure.c
b5fc2f07214274325cdf90614d546057cdf4ccd0
[apple/xnu.git] / bsd / kern / vm_pressure.c
1 /*
2 * Copyright (c) 2009-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/libkern.h>
30 #include <mach/mach_types.h>
31 #include <mach/task.h>
32 #include <sys/proc_internal.h>
33 #include <sys/event.h>
34 #include <sys/eventvar.h>
35 #include <kern/locks.h>
36 #include <sys/queue.h>
37 #include <kern/vm_pressure.h>
38 #include <sys/malloc.h>
39 #include <sys/errno.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/sysctl.h>
43
44 void vm_pressure_klist_lock(void);
45 void vm_pressure_klist_unlock(void);
46
47 void vm_dispatch_memory_pressure(void);
48 int vm_try_terminate_candidates(void);
49 int vm_try_pressure_candidates(void);
50 void vm_recharge_active_list(void);
51
52 struct klist vm_pressure_klist;
53 struct klist vm_pressure_klist_dormant;
54
55 void vm_pressure_klist_lock(void) {
56 lck_mtx_lock(&vm_pressure_klist_mutex);
57 }
58
59 void vm_pressure_klist_unlock(void) {
60 lck_mtx_unlock(&vm_pressure_klist_mutex);
61 }
62
63 int vm_knote_register(struct knote *kn) {
64 int rv = 0;
65
66 vm_pressure_klist_lock();
67
68 if ((kn->kn_sfflags & (NOTE_VM_PRESSURE))) {
69 #if DEBUG
70 printf("[vm_pressure] process %d registering pressure notification\n", kn->kn_kq->kq_p->p_pid);
71 #endif
72 KNOTE_ATTACH(&vm_pressure_klist, kn);
73 } else
74 rv = ENOTSUP;
75
76 vm_pressure_klist_unlock();
77
78 return rv;
79 }
80
81 void vm_knote_unregister(struct knote *kn) {
82 struct knote *kn_temp;
83
84 vm_pressure_klist_lock();
85
86 #if DEBUG
87 printf("[vm_pressure] process %d cancelling pressure notification\n", kn->kn_kq->kq_p->p_pid);
88 #endif
89
90 SLIST_FOREACH(kn_temp, &vm_pressure_klist, kn_selnext) {
91 if (kn_temp == kn) {
92 KNOTE_DETACH(&vm_pressure_klist, kn);
93 vm_pressure_klist_unlock();
94 return;
95 }
96 }
97 KNOTE_DETACH(&vm_pressure_klist_dormant, kn);
98
99 vm_pressure_klist_unlock();
100 }
101
102 /* Interface for event dispatch from vm_pageout_garbage_collect thread */
103 void consider_pressure_events(void) {
104 vm_dispatch_memory_pressure();
105 }
106
107 void vm_dispatch_memory_pressure(void) {
108 vm_pressure_klist_lock();
109
110 if (!SLIST_EMPTY(&vm_pressure_klist)) {
111
112 #if DEBUG
113 printf("[vm_pressure] vm_dispatch_memory_pressure\n");
114 #endif
115
116 if (vm_try_pressure_candidates()) {
117 vm_pressure_klist_unlock();
118 return;
119 }
120
121 }
122
123 /* Else... */
124
125 #if DEBUG
126 printf("[vm_pressure] could not find suitable event candidate\n");
127 #endif
128
129 vm_recharge_active_list();
130
131 vm_pressure_klist_unlock();
132 }
133
134 /*
135 * Try standard pressure event candidates. Called with klist lock held.
136 */
137 int vm_try_pressure_candidates(void) {
138 /*
139 * This value is the threshold that a process must meet to be considered for scavenging.
140 * If a process has sufficiently little resident memory, there is probably no use scavenging it.
141 * At best, we'll scavenge very little memory. At worst, we'll page in code pages or malloc metadata.
142 */
143
144 #define VM_PRESSURE_MINIMUM_RSIZE (10 * 1024 * 1024)
145
146 struct proc *p_max = NULL;
147 unsigned int resident_max = 0;
148 struct knote *kn_max = NULL;
149 struct knote *kn;
150
151 SLIST_FOREACH(kn, &vm_pressure_klist, kn_selnext) {
152 if ( (kn != NULL ) && ( kn->kn_kq != NULL ) && ( kn->kn_kq->kq_p != NULL ) ) {
153 if (kn->kn_sfflags & NOTE_VM_PRESSURE) {
154 struct proc *p = kn->kn_kq->kq_p;
155 if (!(kn->kn_status & KN_DISABLED)) {
156 kern_return_t kr = KERN_SUCCESS;
157 struct task *t = (struct task *)(p->task);
158 struct task_basic_info basic_info;
159 mach_msg_type_number_t size = TASK_BASIC_INFO_COUNT;
160 if( ( kr = task_info(t, TASK_BASIC_INFO, (task_info_t)(&basic_info), &size)) == KERN_SUCCESS ) {
161 unsigned int resident_size = basic_info.resident_size;
162 /*
163 * We don't want a small process to block large processes from
164 * being notified again. <rdar://problem/7955532>
165 */
166 if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) {
167 if (resident_size > resident_max) {
168 p_max = p;
169 resident_max = resident_size;
170 kn_max = kn;
171 }
172 } else {
173 #if DEBUG
174 /* There was no candidate with enough resident memory to scavenge */
175 /* This debug print makes too much noise now */
176 //printf("[vm_pressure] threshold failed for pid %d with %u resident, skipping...\n", p->p_pid, resident_size);
177 #endif
178 }
179 } else {
180 #if DEBUG
181 printf("[vm_pressure] task_info for pid %d failed with %d\n", p->p_pid, kr);
182 #endif
183 }
184 } else {
185 #if DEBUG
186 printf("[vm_pressure] pid %d currently disabled, skipping...\n", p->p_pid);
187 #endif
188 }
189 }
190 } else {
191 #if DEBUG
192 if (kn == NULL) {
193 printf("[vm_pressure] kn is NULL\n");
194 } else if (kn->kn_kq == NULL) {
195 printf("[vm_pressure] kn->kn_kq is NULL\n");
196 } else if (kn->kn_kq->kq_p == NULL) {
197 printf("[vm_pressure] kn->kn_kq->kq_p is NULL\n");
198 }
199 #endif
200 }
201 }
202
203 if (kn_max == NULL) return 0;
204
205 #if DEBUG
206 printf("[vm_pressure] sending event to pid %d with %u resident\n", kn_max->kn_kq->kq_p->p_pid, resident_max);
207 #endif
208
209 KNOTE_DETACH(&vm_pressure_klist, kn_max);
210 struct klist dispatch_klist = { NULL };
211 KNOTE_ATTACH(&dispatch_klist, kn_max);
212 KNOTE(&dispatch_klist, NOTE_VM_PRESSURE);
213 KNOTE_ATTACH(&vm_pressure_klist_dormant, kn_max);
214
215 return 1;
216 }
217
218
219 /*
220 * Remove all elements from the dormant list and place them on the active list.
221 * Called with klist lock held.
222 */
223 void vm_recharge_active_list(void) {
224 /* Re-charge the main list from the dormant list if possible */
225 if (!SLIST_EMPTY(&vm_pressure_klist_dormant)) {
226 #if DEBUG
227 printf("[vm_pressure] recharging main list from dormant list\n");
228 #endif
229 struct knote *kn;
230 while (!SLIST_EMPTY(&vm_pressure_klist_dormant)) {
231 kn = SLIST_FIRST(&vm_pressure_klist_dormant);
232 SLIST_REMOVE_HEAD(&vm_pressure_klist_dormant, kn_selnext);
233 SLIST_INSERT_HEAD(&vm_pressure_klist, kn, kn_selnext);
234 }
235 }
236 }