]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2013 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <sys/param.h> | |
30 | #include <sys/systm.h> | |
31 | #include <sys/kernel.h> | |
32 | #include <sys/malloc.h> | |
33 | #include <sys/proc_internal.h> | |
34 | #include <sys/proc.h> | |
35 | #include <sys/kauth.h> | |
36 | #include <sys/unistd.h> | |
37 | #include <sys/priv.h> | |
38 | ||
39 | #include <mach/mach_types.h> | |
40 | #include <mach/vm_param.h> | |
41 | #include <kern/task.h> | |
42 | #include <kern/locks.h> | |
43 | #include <kern/assert.h> | |
44 | #include <kern/sched_prim.h> | |
45 | ||
46 | #include <sys/kern_overrides.h> | |
47 | #include <sys/bsdtask_info.h> | |
48 | #include <sys/kdebug.h> | |
49 | #include <sys/sysproto.h> | |
50 | #include <sys/msgbuf.h> | |
51 | #include <sys/kern_memorystatus.h> | |
52 | ||
53 | /* Mutex for global system override state */ | |
54 | static lck_mtx_t sys_override_lock; | |
55 | static lck_grp_t *sys_override_mtx_grp; | |
56 | static lck_attr_t *sys_override_mtx_attr; | |
57 | static lck_grp_attr_t *sys_override_mtx_grp_attr; | |
58 | ||
59 | /* | |
60 | * Assertion counts for system properties (add new ones for each new mechanism) | |
61 | * | |
62 | * The assertion count management for system overrides is as follows: | |
63 | * | |
64 | * - All assertion counts are protected by the sys_override_lock. | |
65 | * | |
66 | * - Each caller of system_override() increments the assertion count for the | |
67 | * mechanism it specified in the flags. The caller then blocks for the | |
68 | * timeout specified in the system call. | |
69 | * | |
70 | * - At the end of the timeout, the caller thread wakes up and decrements the | |
71 | * assertion count for the mechanism it originally took an assertion on. | |
72 | * | |
73 | * - If another caller calls the system_override() to disable the override | |
74 | * for a mechanism, it simply disables the mechanism without changing any | |
75 | * assertion counts. That way, the assertion counts are properly balanced. | |
76 | * | |
77 | * One thing to note is that a SYS_OVERRIDE_DISABLE disables the overrides | |
78 | * for a mechanism irrespective of how many clients requested that override. | |
79 | * That makes the implementation simpler and avoids keeping a lot of process | |
80 | * specific state in the kernel. | |
81 | * | |
82 | */ | |
83 | static int64_t io_throttle_assert_cnt; | |
84 | static int64_t cpu_throttle_assert_cnt; | |
85 | static int64_t fast_jetsam_assert_cnt; | |
86 | ||
87 | /* Wait Channel for system override */ | |
88 | static uint64_t sys_override_wait; | |
89 | ||
90 | /* Global variable to indicate if system_override is enabled */ | |
91 | int sys_override_enabled; | |
92 | ||
93 | /* Helper routines */ | |
94 | static void system_override_begin(uint64_t flags); | |
95 | static void system_override_end(uint64_t flags); | |
96 | static void system_override_abort(uint64_t flags); | |
97 | static void system_override_callouts(uint64_t flags, boolean_t enable_override); | |
98 | static __attribute__((noinline)) void PROCESS_OVERRIDING_SYSTEM_DEFAULTS(uint64_t timeout); | |
99 | ||
100 | void | |
101 | init_system_override() | |
102 | { | |
103 | sys_override_mtx_grp_attr = lck_grp_attr_alloc_init(); | |
104 | sys_override_mtx_grp = lck_grp_alloc_init("system_override", sys_override_mtx_grp_attr); | |
105 | sys_override_mtx_attr = lck_attr_alloc_init(); | |
106 | lck_mtx_init(&sys_override_lock, sys_override_mtx_grp, sys_override_mtx_attr); | |
107 | io_throttle_assert_cnt = cpu_throttle_assert_cnt = fast_jetsam_assert_cnt = 0; | |
108 | sys_override_enabled = 1; | |
109 | } | |
110 | ||
111 | /* system call implementation */ | |
112 | int | |
113 | system_override(__unused struct proc *p, struct system_override_args * uap, __unused int32_t *retval) | |
114 | { | |
115 | uint64_t timeout = uap->timeout; | |
116 | uint64_t flags = uap->flags; | |
117 | int error = 0; | |
118 | ||
119 | /* Check credentials for caller. Only entitled processes are allowed to make this call. */ | |
120 | if ((error = priv_check_cred(kauth_cred_get(), PRIV_SYSTEM_OVERRIDE, 0))) { | |
121 | goto out; | |
122 | } | |
123 | ||
124 | /* Check to see if sane flags are specified. */ | |
125 | if ((flags & ~SYS_OVERRIDE_FLAGS_MASK) != 0) { | |
126 | error = EINVAL; | |
127 | goto out; | |
128 | } | |
129 | ||
130 | /* Make sure that the system override syscall has been initialized */ | |
131 | if (!sys_override_enabled) { | |
132 | error = EINVAL; | |
133 | goto out; | |
134 | } | |
135 | ||
136 | lck_mtx_lock(&sys_override_lock); | |
137 | ||
138 | if (flags & SYS_OVERRIDE_DISABLE) { | |
139 | flags &= ~SYS_OVERRIDE_DISABLE; | |
140 | system_override_abort(flags); | |
141 | } else { | |
142 | system_override_begin(flags); | |
143 | PROCESS_OVERRIDING_SYSTEM_DEFAULTS(timeout); | |
144 | system_override_end(flags); | |
145 | } | |
146 | ||
147 | lck_mtx_unlock(&sys_override_lock); | |
148 | ||
149 | out: | |
150 | return error; | |
151 | } | |
152 | ||
153 | /* | |
154 | * Helper routines for enabling/disabling system overrides for various mechanisms. | |
155 | * These routines should be called with the sys_override_lock held. Each subsystem | |
156 | * which is hooked into the override service provides two routines: | |
157 | * | |
158 | * - void sys_override_foo_init(void); | |
159 | * Routine to initialize the subsystem or the data needed for the override to work. | |
160 | * This routine is optional and if a subsystem needs it, it should be invoked from | |
161 | * init_system_override(). | |
162 | * | |
163 | * - void sys_override_foo(boolean_t enable_override); | |
164 | * Routine to enable/disable the override mechanism for that subsystem. A value of | |
165 | * true indicates that the mechanism should be overridden and the special behavior | |
166 | * should begin. A false value indicates that the subsystem should return to default | |
167 | * behavior. This routine is mandatory and should be invoked as part of the helper | |
168 | * routines if the flags passed in the syscall match the subsystem. Also, this | |
169 | * routine should preferably be idempotent. | |
170 | */ | |
171 | ||
172 | static void | |
173 | system_override_callouts(uint64_t flags, boolean_t enable_override) | |
174 | { | |
175 | switch (flags) { | |
176 | case SYS_OVERRIDE_IO_THROTTLE: | |
177 | if (enable_override) { | |
178 | KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_THROTTLE, IO_THROTTLE_DISABLE) | DBG_FUNC_START, | |
179 | current_proc()->p_pid, 0, 0, 0, 0); | |
180 | } else { | |
181 | KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_THROTTLE, IO_THROTTLE_DISABLE) | DBG_FUNC_END, | |
182 | current_proc()->p_pid, 0, 0, 0, 0); | |
183 | } | |
184 | sys_override_io_throttle(enable_override); | |
185 | break; | |
186 | ||
187 | case SYS_OVERRIDE_CPU_THROTTLE: | |
188 | if (enable_override) { | |
189 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CPU_THROTTLE_DISABLE) | DBG_FUNC_START, | |
190 | current_proc()->p_pid, 0, 0, 0, 0); | |
191 | } else { | |
192 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CPU_THROTTLE_DISABLE) | DBG_FUNC_END, | |
193 | current_proc()->p_pid, 0, 0, 0, 0); | |
194 | } | |
195 | sys_override_cpu_throttle(enable_override); | |
196 | break; | |
197 | ||
198 | case SYS_OVERRIDE_FAST_JETSAM: | |
199 | if (enable_override) { | |
200 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FAST_JETSAM) | DBG_FUNC_START, | |
201 | current_proc()->p_pid, 0, 0, 0, 0); | |
202 | } else { | |
203 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FAST_JETSAM) | DBG_FUNC_END, | |
204 | current_proc()->p_pid, 0, 0, 0, 0); | |
205 | } | |
206 | #if CONFIG_JETSAM | |
207 | memorystatus_fast_jetsam_override(enable_override); | |
208 | #endif /* CONFIG_JETSAM */ | |
209 | break; | |
210 | ||
211 | default: | |
212 | panic("Unknown option to system_override_callouts(): %llu\n", flags); | |
213 | } | |
214 | } | |
215 | ||
216 | /* | |
217 | * system_override_begin(uint64_t flags) | |
218 | * | |
219 | * Routine to start a system override if the assertion count | |
220 | * transitions from 0->1 for a specified mechanism. | |
221 | */ | |
222 | static void | |
223 | system_override_begin(uint64_t flags) | |
224 | { | |
225 | lck_mtx_assert(&sys_override_lock, LCK_MTX_ASSERT_OWNED); | |
226 | ||
227 | if (flags & SYS_OVERRIDE_IO_THROTTLE) { | |
228 | if (io_throttle_assert_cnt == 0) { | |
229 | system_override_callouts(SYS_OVERRIDE_IO_THROTTLE, true); | |
230 | } | |
231 | io_throttle_assert_cnt++; | |
232 | } | |
233 | ||
234 | if (flags & SYS_OVERRIDE_CPU_THROTTLE) { | |
235 | if (cpu_throttle_assert_cnt == 0) { | |
236 | system_override_callouts(SYS_OVERRIDE_CPU_THROTTLE, true); | |
237 | } | |
238 | cpu_throttle_assert_cnt++; | |
239 | } | |
240 | ||
241 | if (flags & SYS_OVERRIDE_FAST_JETSAM) { | |
242 | if (fast_jetsam_assert_cnt == 0) { | |
243 | system_override_callouts(SYS_OVERRIDE_FAST_JETSAM, true); | |
244 | } | |
245 | fast_jetsam_assert_cnt++; | |
246 | } | |
247 | } | |
248 | ||
249 | /* | |
250 | * system_override_end(uint64_t flags) | |
251 | * | |
252 | * Routine to end a system override if the assertion count | |
253 | * transitions from 1->0 for a specified mechanism. | |
254 | */ | |
255 | static void | |
256 | system_override_end(uint64_t flags) | |
257 | { | |
258 | lck_mtx_assert(&sys_override_lock, LCK_MTX_ASSERT_OWNED); | |
259 | ||
260 | if (flags & SYS_OVERRIDE_IO_THROTTLE) { | |
261 | assert(io_throttle_assert_cnt > 0); | |
262 | io_throttle_assert_cnt--; | |
263 | if (io_throttle_assert_cnt == 0) { | |
264 | system_override_callouts(SYS_OVERRIDE_IO_THROTTLE, false); | |
265 | } | |
266 | } | |
267 | ||
268 | if (flags & SYS_OVERRIDE_CPU_THROTTLE) { | |
269 | assert(cpu_throttle_assert_cnt > 0); | |
270 | cpu_throttle_assert_cnt--; | |
271 | if (cpu_throttle_assert_cnt == 0) { | |
272 | system_override_callouts(SYS_OVERRIDE_CPU_THROTTLE, false); | |
273 | } | |
274 | } | |
275 | ||
276 | if (flags & SYS_OVERRIDE_FAST_JETSAM) { | |
277 | assert(fast_jetsam_assert_cnt > 0); | |
278 | fast_jetsam_assert_cnt--; | |
279 | if (fast_jetsam_assert_cnt == 0) { | |
280 | system_override_callouts(SYS_OVERRIDE_FAST_JETSAM, false); | |
281 | } | |
282 | } | |
283 | } | |
284 | ||
285 | /* | |
286 | * system_override_abort(uint64_t flags) | |
287 | * | |
288 | * Routine to abort a system override (if one was active) | |
289 | * irrespective of the assertion counts and number of blocked | |
290 | * requestors. | |
291 | */ | |
292 | static void | |
293 | system_override_abort(uint64_t flags) | |
294 | { | |
295 | lck_mtx_assert(&sys_override_lock, LCK_MTX_ASSERT_OWNED); | |
296 | ||
297 | if ((flags & SYS_OVERRIDE_IO_THROTTLE) && (io_throttle_assert_cnt > 0)) { | |
298 | system_override_callouts(SYS_OVERRIDE_IO_THROTTLE, false); | |
299 | } | |
300 | ||
301 | if ((flags & SYS_OVERRIDE_CPU_THROTTLE) && (cpu_throttle_assert_cnt > 0)) { | |
302 | system_override_callouts(SYS_OVERRIDE_CPU_THROTTLE, false); | |
303 | } | |
304 | ||
305 | if ((flags & SYS_OVERRIDE_FAST_JETSAM) && (fast_jetsam_assert_cnt > 0)) { | |
306 | system_override_callouts(SYS_OVERRIDE_FAST_JETSAM, false); | |
307 | } | |
308 | } | |
309 | ||
310 | static __attribute__((noinline)) void | |
311 | PROCESS_OVERRIDING_SYSTEM_DEFAULTS(uint64_t timeout) | |
312 | { | |
313 | struct timespec ts; | |
314 | ts.tv_sec = timeout / NSEC_PER_SEC; | |
315 | ts.tv_nsec = timeout - ((long)ts.tv_sec * NSEC_PER_SEC); | |
316 | msleep((caddr_t)&sys_override_wait, &sys_override_lock, PRIBIO | PCATCH, "system_override", &ts); | |
317 | } |