]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105
106 #include <security/audit/audit.h>
107 #include <kern/kalloc.h>
108
109 #include <mach/machine.h>
110 #include <mach/mach_host.h>
111 #include <mach/mach_types.h>
112 #include <mach/vm_param.h>
113 #include <kern/mach_param.h>
114 #include <kern/task.h>
115 #include <kern/thread.h>
116 #include <kern/lock.h>
117 #include <kern/processor.h>
118 #include <kern/debug.h>
119 #include <vm/vm_kern.h>
120 #include <vm/vm_map.h>
121 #include <mach/host_info.h>
122
123 #include <sys/mount_internal.h>
124 #include <sys/kdebug.h>
125 #include <sys/sysproto.h>
126
127 #include <IOKit/IOPlatformExpert.h>
128 #include <pexpert/pexpert.h>
129
130 #include <machine/machine_routines.h>
131 #include <machine/exec.h>
132
133 #include <vm/vm_protos.h>
134 #include <sys/imgsrc.h>
135
136 #if defined(__i386__) || defined(__x86_64__)
137 #include <i386/cpuid.h>
138 #endif
139
140 #if CONFIG_FREEZE
141 #include <sys/kern_memorystatus.h>
142 #endif
143
144 /*
145 * deliberately setting max requests to really high number
146 * so that runaway settings do not cause MALLOC overflows
147 */
148 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
149
150 extern sysctlfn net_sysctl;
151 extern sysctlfn cpu_sysctl;
152 extern int aio_max_requests;
153 extern int aio_max_requests_per_process;
154 extern int aio_worker_threads;
155 extern int lowpri_IO_window_msecs;
156 extern int lowpri_IO_delay_msecs;
157 extern int nx_enabled;
158 extern int speculative_reads_disabled;
159 extern int ignore_is_ssd;
160 extern unsigned int speculative_prefetch_max;
161 extern unsigned int speculative_prefetch_max_iosize;
162 extern unsigned int preheat_pages_max;
163 extern unsigned int preheat_pages_min;
164 extern long numvnodes;
165
166 extern unsigned int vm_max_delayed_work_limit;
167 extern unsigned int vm_max_batch;
168
169 extern unsigned int vm_page_free_min;
170 extern unsigned int vm_page_free_target;
171 extern unsigned int vm_page_free_reserved;
172 extern unsigned int vm_page_speculative_percentage;
173 extern unsigned int vm_page_speculative_q_age_ms;
174
175 /*
176 * Conditionally allow dtrace to see these functions for debugging purposes.
177 */
178 #ifdef STATIC
179 #undef STATIC
180 #endif
181 #if 0
182 #define STATIC
183 #else
184 #define STATIC static
185 #endif
186
187 extern boolean_t mach_timer_coalescing_enabled;
188
189 STATIC void
190 fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
191 STATIC void
192 fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
193 STATIC void
194 fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
195 STATIC void
196 fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
197 STATIC void
198 fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
199 STATIC void
200 fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
201
202 extern int
203 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
204 #if NFSCLIENT
205 extern int
206 netboot_root(void);
207 #endif
208 int
209 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
210 proc_t p);
211 __private_extern__ kern_return_t
212 reset_vmobjectcache(unsigned int val1, unsigned int val2);
213 int
214 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
215 size_t *sizep, proc_t cur_proc);
216 STATIC int
217 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
218 proc_t cur_proc, int argc_yes);
219 int
220 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
221 size_t newlen, void *sp, int len);
222
223 STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
224 STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
225 STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
226 STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
227 STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
228 #if CONFIG_LCTX
229 STATIC int sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg);
230 #endif
231 int sysdoproc_callback(proc_t p, void *arg);
232
233
234 /* forward declarations for non-static STATIC */
235 STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
236 STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
237 STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
238 STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
239 STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
240 STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
241 STATIC int sysctl_dotranslate SYSCTL_HANDLER_ARGS;
242 STATIC int sysctl_doaffinity SYSCTL_HANDLER_ARGS;
243 #if COUNT_SYSCALLS
244 STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
245 #endif /* COUNT_SYSCALLS */
246 #if !CONFIG_EMBEDDED
247 STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
248 #endif /* !CONFIG_EMBEDDED */
249 STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
250 STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
251 #if DEBUG
252 STATIC int sysctl_dodebug SYSCTL_HANDLER_ARGS;
253 #endif
254 STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
255 STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
256 STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
257 STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
258 STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
259 STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
260 STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
261 STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
262 STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
263 STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
264 STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
265 STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
266 STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
267 #if NFSCLIENT
268 STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269 #endif
270 #ifdef CONFIG_IMGSRC_ACCESS
271 STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272 #endif
273 STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274 STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275 STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
276 STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
277 STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
278 STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
279 STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
280 STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
281 STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
282 STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
283 STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
284 #if defined(__i386__) || defined(__x86_64__)
285 STATIC int sysctl_sysctl_exec_affinity(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286 #endif
287 STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
288 STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
289 STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
290 STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291 STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
292 STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
293
294
295 extern void IORegistrySetOSBuildVersion(char * build_version);
296
297 STATIC void
298 fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
299 {
300 la64->ldavg[0] = la->ldavg[0];
301 la64->ldavg[1] = la->ldavg[1];
302 la64->ldavg[2] = la->ldavg[2];
303 la64->fscale = (user64_long_t)la->fscale;
304 }
305
306 STATIC void
307 fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
308 {
309 la32->ldavg[0] = la->ldavg[0];
310 la32->ldavg[1] = la->ldavg[1];
311 la32->ldavg[2] = la->ldavg[2];
312 la32->fscale = (user32_long_t)la->fscale;
313 }
314
315 /*
316 * sysctl_mem_hold
317 *
318 * Description: Wire down the callers address map on behalf of sysctl's
319 * that perform their own copy operations while holding
320 * locks e.g. in the paging path, which could lead to a
321 * deadlock, or while holding a spinlock.
322 *
323 * Parameters: addr User buffer address
324 * len User buffer length
325 *
326 * Returns: 0 Success
327 * vslock:ENOMEM Insufficient physical pages to wire
328 * vslock:EACCES Bad protection mode
329 * vslock:EINVAL Invalid parameters
330 *
331 * Notes: This code is invoked for the first OID element where the
332 * CTLFLAG_LOCKED is not specified for a given OID node
333 * element durng OID traversal, and is held for all
334 * subsequent node traversals, and only released after the
335 * leaf node handler invocation is complete.
336 *
337 * Legacy: For legacy scyctl's provided by third party code which
338 * expect funnel protection for calls into their code, this
339 * routine will also take the funnel, which will also only
340 * be released after the leaf node handler is complete.
341 *
342 * This is to support legacy 32 bit BSD KEXTs and legacy 32
343 * bit single threaded filesystem KEXTs and similar code
344 * which relies on funnel protection, e.g. for things like
345 * FSID based sysctl's.
346 *
347 * NEW CODE SHOULD NOT RELY ON THIS BEHAVIOUR! IT WILL BE
348 * REMOVED IN A FUTURE RELASE OF Mac OS X!
349 *
350 * Bugs: This routine does nothing with the new_addr and new_len
351 * at present, but it should, since read from the user space
352 * process adddress space which could potentially trigger
353 * paging may also be occurring deep down. This is due to
354 * a current limitation of the vslock() routine, which will
355 * always request a wired mapping be read/write, due to not
356 * taking an access mode parameter. Note that this could
357 * also cause problems for output on architectures where
358 * write access does not require read acccess if the current
359 * mapping lacks read access.
360 *
361 * XXX: To be moved to kern_newsysctl.c to avoid __private_extern__
362 */
363 int sysctl_mem_lock(user_addr_t old_addr, user_size_t old_len, user_addr_t new_addr, user_size_t new_len);
364 int
365 sysctl_mem_lock(__unused user_addr_t old_addr, __unused user_size_t old_len, __unused user_addr_t new_addr, __unused user_size_t new_len)
366 {
367 return 0;
368 }
369
370 /*
371 * Locking and stats
372 */
373
374 /* sysctl() syscall */
375 int
376 __sysctl(proc_t p, struct __sysctl_args *uap, __unused int32_t *retval)
377 {
378 boolean_t funnel_state = FALSE; /* not held if unknown */
379 int error;
380 size_t savelen = 0, oldlen = 0, newlen;
381 int name[CTL_MAXNAME];
382 int error1;
383 boolean_t vslock_taken = FALSE;
384 boolean_t funnel_taken = FALSE;
385 #if CONFIG_MACF
386 kauth_cred_t my_cred;
387 #endif
388
389 /*
390 * all top-level sysctl names are non-terminal
391 */
392 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
393 return (EINVAL);
394 error = copyin(uap->name, &name[0], uap->namelen * sizeof(int));
395 if (error)
396 return (error);
397
398 AUDIT_ARG(ctlname, name, uap->namelen);
399
400 if (proc_is64bit(p)) {
401 /* uap->newlen is a size_t value which grows to 64 bits
402 * when coming from a 64-bit process. since it's doubtful we'll
403 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
404 */
405 newlen = CAST_DOWN(size_t, uap->newlen);
406 }
407 else {
408 newlen = uap->newlen;
409 }
410
411 /*
412 * XXX TODO: push down rights check for CTL_HW OIDs; most duplicate
413 * XXX it anyway, which is a performance sink, and requires use
414 * XXX of SUID root programs (see <rdar://3915692>).
415 *
416 * Note: Opt out of non-leaf node enforcement by removing this
417 * check for the top level OID value, and then adding
418 * CTLFLAG_ANYBODY to the leaf nodes in question. Enforce as
419 * suser for writed in leaf nodes by omitting this flag.
420 * Enforce with a higher granularity by making the leaf node
421 * of type SYSCTL_PROC() in order to provide a procedural
422 * enforcement call site.
423 *
424 * NOTE: This function is called prior to any subfunctions being
425 * called with a fallback to userland_sysctl(); as such, this
426 * permissions check here will veto the fallback operation.
427 */
428 /* CTL_UNSPEC is used to get oid to AUTO_OID */
429 if (uap->new != USER_ADDR_NULL
430 && ((name[0] == CTL_HW)
431 || (name[0] == CTL_VM))
432 && (error = suser(kauth_cred_get(), &p->p_acflag)))
433 return (error);
434
435 // XXX need to relocate into each terminal instead of leaving this here...
436 // XXX macf preemptory check.
437 #if CONFIG_MACF
438 my_cred = kauth_cred_proc_ref(p);
439 error = mac_system_check_sysctl(
440 my_cred,
441 (int *) name,
442 uap->namelen,
443 uap->old,
444 uap->oldlenp,
445 0, /* XXX 1 for CTL_KERN checks */
446 uap->new,
447 newlen
448 );
449 kauth_cred_unref(&my_cred);
450 if (error)
451 return (error);
452 #endif
453
454 if (uap->oldlenp != USER_ADDR_NULL) {
455 uint64_t oldlen64 = fuulong(uap->oldlenp);
456
457 oldlen = CAST_DOWN(size_t, oldlen64);
458 /*
459 * If more than 4G, clamp to 4G - useracc() below will catch
460 * with an EFAULT, if it's actually necessary.
461 */
462 if (oldlen64 > 0x00000000ffffffffULL)
463 oldlen = 0xffffffffUL;
464 }
465
466 if ((name[0] == CTL_VFS || name[0] == CTL_VM)) {
467 /*
468 * Always take the funnel for CTL_VFS and CTL_VM
469 *
470 * XXX We should also take it for any OID without the
471 * XXX CTLFLAG_LOCKED set on it; fix this later!
472 */
473 funnel_state = thread_funnel_set(kernel_flock, TRUE);
474 funnel_taken = TRUE;
475
476 /*
477 * XXX Take the vslock() only when we are copying out; this
478 * XXX erroneously assumes that the copy in will not cause
479 * XXX a fault if caled from the paging path due to the
480 * XXX having been recently touched in order to establish
481 * XXX the input data. This is a bad assumption.
482 *
483 * Note: This is overkill, but third parties might
484 * already call sysctl internally in KEXTs that
485 * implement mass storage drivers. If you are
486 * writing a new KEXT, don't do that.
487 */
488 if(uap->old != USER_ADDR_NULL) {
489 if (!useracc(uap->old, (user_size_t)oldlen, B_WRITE)) {
490 thread_funnel_set(kernel_flock, funnel_state);
491 return (EFAULT);
492 }
493
494 if (oldlen) {
495 if ((error = vslock(uap->old, (user_size_t)oldlen))) {
496 thread_funnel_set(kernel_flock, funnel_state);
497 return(error);
498 }
499 savelen = oldlen;
500 vslock_taken = TRUE;
501 }
502 }
503 }
504
505 /*
506 * XXX convert vfs_sysctl subelements to newsysctl; this is hard
507 * XXX because of VFS_NUMMNTOPS being top level.
508 */
509 error = ENOTSUP;
510 if (name[0] == CTL_VFS) {
511 error = vfs_sysctl(name + 1, uap->namelen - 1, uap->old,
512 &oldlen, uap->new, newlen, p);
513 }
514
515 if (vslock_taken == TRUE) {
516 error1 = vsunlock(uap->old, (user_size_t)savelen, B_WRITE);
517 if (!error)
518 error = error1;
519 }
520
521 if ( (name[0] != CTL_VFS) && (error == ENOTSUP) ) {
522 size_t tmp = oldlen;
523 error = userland_sysctl(p, name, uap->namelen, uap->old, &tmp,
524 uap->new, newlen, &oldlen);
525 }
526
527 /*
528 * If we took the funnel, which we only do for CTL_VFS and CTL_VM on
529 * 32 bit architectures, then drop it.
530 *
531 * XXX the grabbing and dropping need to move into the leaf nodes,
532 * XXX for sysctl's that are not marked CTLFLAG_LOCKED, but this is
533 * XXX true for the vslock, as well. We have a start at a routine
534 * to wrapper this (above), but it's not turned on. The current code
535 * removed the funnel and the vslock() from all but these two top
536 * level OIDs. Note that VFS only needs to take the funnel if the FS
537 * against which it's operating is not thread safe (but since an FS
538 * can be in the paging path, it still needs to take the vslock()).
539 */
540 if (funnel_taken)
541 thread_funnel_set(kernel_flock, funnel_state);
542
543 if ((error) && (error != ENOMEM))
544 return (error);
545
546 if (uap->oldlenp != USER_ADDR_NULL)
547 error = suulong(uap->oldlenp, oldlen);
548
549 return (error);
550 }
551
552 /*
553 * Attributes stored in the kernel.
554 */
555 __private_extern__ char corefilename[MAXPATHLEN+1];
556 __private_extern__ int do_coredump;
557 __private_extern__ int sugid_coredump;
558
559 #if COUNT_SYSCALLS
560 __private_extern__ int do_count_syscalls;
561 #endif
562
563 #ifdef INSECURE
564 int securelevel = -1;
565 #else
566 int securelevel;
567 #endif
568
569 STATIC int
570 sysctl_doaffinity SYSCTL_HANDLER_ARGS
571 {
572 __unused int cmd = oidp->oid_arg2; /* subcommand*/
573 int *name = arg1; /* oid element argument vector */
574 int namelen = arg2; /* number of oid element arguments */
575 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
576 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
577 user_addr_t newp = req->newptr; /* user buffer copy in address */
578 // size_t newlen = req->newlen; /* user buffer copy in size */
579
580 int error = ENOTSUP; /* Default to failure */
581
582 proc_t cur_proc = current_proc();
583
584 if (namelen < 1)
585 return (ENOTSUP);
586
587 if (name[0] == 0 && 1 == namelen) {
588 error = sysctl_rdint(oldp, oldlenp, newp,
589 (cur_proc->p_flag & P_AFFINITY) ? 1 : 0);
590 } else if (name[0] == 1 && 2 == namelen) {
591 if (name[1] == 0) {
592 OSBitAndAtomic(~((uint32_t)P_AFFINITY), &cur_proc->p_flag);
593 } else {
594 OSBitOrAtomic(P_AFFINITY, &cur_proc->p_flag);
595 }
596 error = 0;
597 }
598
599 /* adjust index so we return the right required/consumed amount */
600 if (!error)
601 req->oldidx += req->oldlen;
602
603 return (error);
604 }
605 SYSCTL_PROC(_kern, KERN_AFFINITY, affinity, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
606 0, /* Pointer argument (arg1) */
607 0, /* Integer argument (arg2) */
608 sysctl_doaffinity, /* Handler function */
609 NULL, /* Data pointer */
610 "");
611
612 STATIC int
613 sysctl_dotranslate SYSCTL_HANDLER_ARGS
614 {
615 __unused int cmd = oidp->oid_arg2; /* subcommand*/
616 int *name = arg1; /* oid element argument vector */
617 int namelen = arg2; /* number of oid element arguments */
618 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
619 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
620 user_addr_t newp = req->newptr; /* user buffer copy in address */
621 // size_t newlen = req->newlen; /* user buffer copy in size */
622 int error;
623
624 proc_t cur_proc = current_proc();
625 proc_t p;
626 int istranslated = 0;
627 kauth_cred_t my_cred;
628 uid_t uid;
629
630 if (namelen != 1)
631 return (ENOTSUP);
632
633 p = proc_find(name[0]);
634 if (p == NULL)
635 return (EINVAL);
636
637 my_cred = kauth_cred_proc_ref(p);
638 uid = kauth_cred_getuid(my_cred);
639 kauth_cred_unref(&my_cred);
640 if ((uid != kauth_cred_getuid(kauth_cred_get()))
641 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
642 proc_rele(p);
643 return (EPERM);
644 }
645
646 istranslated = (p->p_flag & P_TRANSLATED);
647 proc_rele(p);
648 error = sysctl_rdint(oldp, oldlenp, newp,
649 (istranslated != 0) ? 1 : 0);
650
651 /* adjust index so we return the right required/consumed amount */
652 if (!error)
653 req->oldidx += req->oldlen;
654
655 return (error);
656 }
657 /*
658 * XXX make CTLFLAG_RW so sysctl_rdint() will EPERM on attempts to write;
659 * XXX this may not be necessary.
660 */
661 SYSCTL_PROC(_kern, KERN_TRANSLATE, translate, CTLTYPE_NODE|CTLFLAG_RW | CTLFLAG_LOCKED,
662 0, /* Pointer argument (arg1) */
663 0, /* Integer argument (arg2) */
664 sysctl_dotranslate, /* Handler function */
665 NULL, /* Data pointer */
666 "");
667
668 STATIC int
669 sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
670 __unused int arg2, struct sysctl_req *req)
671 {
672 int error;
673 struct uthread *ut = get_bsdthread_info(current_thread());
674 user_addr_t oldp=0, newp=0;
675 size_t *oldlenp=NULL;
676 size_t newlen=0;
677
678 oldp = req->oldptr;
679 oldlenp = &(req->oldlen);
680 newp = req->newptr;
681 newlen = req->newlen;
682
683 /* We want the current length, and maybe the string itself */
684 if(oldlenp) {
685 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
686 size_t currlen = MAXTHREADNAMESIZE - 1;
687
688 if(ut->pth_name)
689 /* use length of current thread name */
690 currlen = strlen(ut->pth_name);
691 if(oldp) {
692 if(*oldlenp < currlen)
693 return ENOMEM;
694 /* NOTE - we do not copy the NULL terminator */
695 if(ut->pth_name) {
696 error = copyout(ut->pth_name,oldp,currlen);
697 if(error)
698 return error;
699 }
700 }
701 /* return length of thread name minus NULL terminator (just like strlen) */
702 req->oldidx = currlen;
703 }
704
705 /* We want to set the name to something */
706 if(newp)
707 {
708 if(newlen > (MAXTHREADNAMESIZE - 1))
709 return ENAMETOOLONG;
710 if(!ut->pth_name)
711 {
712 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
713 if(!ut->pth_name)
714 return ENOMEM;
715 }
716 bzero(ut->pth_name, MAXTHREADNAMESIZE);
717 error = copyin(newp, ut->pth_name, newlen);
718 if(error)
719 return error;
720 }
721
722 return 0;
723 }
724
725 SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
726
727 #define BSD_HOST 1
728 STATIC int
729 sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
730 {
731 host_basic_info_data_t hinfo;
732 kern_return_t kret;
733 uint32_t size;
734 int changed;
735 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
736 struct _processor_statistics_np *buf;
737 int error;
738
739 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
740 if (kret != KERN_SUCCESS) {
741 return EINVAL;
742 }
743
744 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
745
746 if (req->oldlen < size) {
747 return EINVAL;
748 }
749
750 MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
751
752 kret = get_sched_statistics(buf, &size);
753 if (kret != KERN_SUCCESS) {
754 error = EINVAL;
755 goto out;
756 }
757
758 error = sysctl_io_opaque(req, buf, size, &changed);
759 if (error) {
760 goto out;
761 }
762
763 if (changed) {
764 panic("Sched info changed?!");
765 }
766 out:
767 FREE(buf, M_TEMP);
768 return error;
769 }
770
771 SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
772
773 STATIC int
774 sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
775 {
776 boolean_t active;
777 int res;
778
779 if (req->newlen != sizeof(active)) {
780 return EINVAL;
781 }
782
783 res = copyin(req->newptr, &active, sizeof(active));
784 if (res != 0) {
785 return res;
786 }
787
788 return set_sched_stats_active(active);
789 }
790
791 SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
792
793 extern int get_kernel_symfile(proc_t, char **);
794
795 #if COUNT_SYSCALLS
796 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
797
798 extern int nsysent;
799 extern int syscalls_log[];
800 extern const char *syscallnames[];
801
802 STATIC int
803 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
804 {
805 __unused int cmd = oidp->oid_arg2; /* subcommand*/
806 __unused int *name = arg1; /* oid element argument vector */
807 __unused int namelen = arg2; /* number of oid element arguments */
808 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
809 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
810 user_addr_t newp = req->newptr; /* user buffer copy in address */
811 size_t newlen = req->newlen; /* user buffer copy in size */
812 int error;
813
814 int tmp;
815
816 /* valid values passed in:
817 * = 0 means don't keep called counts for each bsd syscall
818 * > 0 means keep called counts for each bsd syscall
819 * = 2 means dump current counts to the system log
820 * = 3 means reset all counts
821 * for example, to dump current counts:
822 * sysctl -w kern.count_calls=2
823 */
824 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
825 if ( error != 0 ) {
826 return (error);
827 }
828
829 if ( tmp == 1 ) {
830 do_count_syscalls = 1;
831 }
832 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
833 int i;
834 for ( i = 0; i < nsysent; i++ ) {
835 if ( syscalls_log[i] != 0 ) {
836 if ( tmp == 2 ) {
837 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
838 }
839 else {
840 syscalls_log[i] = 0;
841 }
842 }
843 }
844 if ( tmp != 0 ) {
845 do_count_syscalls = 1;
846 }
847 }
848
849 /* adjust index so we return the right required/consumed amount */
850 if (!error)
851 req->oldidx += req->oldlen;
852
853 return (error);
854 }
855 SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
856 0, /* Pointer argument (arg1) */
857 0, /* Integer argument (arg2) */
858 sysctl_docountsyscalls, /* Handler function */
859 NULL, /* Data pointer */
860 "");
861 #endif /* COUNT_SYSCALLS */
862
863 #if DEBUG
864 /*
865 * Debugging related system variables.
866 */
867 #if DIAGNOSTIC
868 extern
869 #endif /* DIAGNOSTIC */
870 struct ctldebug debug0, debug1;
871 struct ctldebug debug2, debug3, debug4;
872 struct ctldebug debug5, debug6, debug7, debug8, debug9;
873 struct ctldebug debug10, debug11, debug12, debug13, debug14;
874 struct ctldebug debug15, debug16, debug17, debug18, debug19;
875 STATIC struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
876 &debug0, &debug1, &debug2, &debug3, &debug4,
877 &debug5, &debug6, &debug7, &debug8, &debug9,
878 &debug10, &debug11, &debug12, &debug13, &debug14,
879 &debug15, &debug16, &debug17, &debug18, &debug19,
880 };
881 STATIC int
882 sysctl_dodebug SYSCTL_HANDLER_ARGS
883 {
884 int cmd = oidp->oid_arg2; /* subcommand*/
885 int *name = arg1; /* oid element argument vector */
886 int namelen = arg2; /* number of oid element arguments */
887 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
888 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
889 user_addr_t newp = req->newptr; /* user buffer copy in address */
890 size_t newlen = req->newlen; /* user buffer copy in size */
891 int error;
892
893 struct ctldebug *cdp;
894
895 /* all sysctl names at this level are name and field */
896 if (namelen != 1)
897 return (ENOTSUP); /* overloaded */
898 if (cmd < 0 || cmd >= CTL_DEBUG_MAXID)
899 return (ENOTSUP);
900 cdp = debugvars[cmd];
901 if (cdp->debugname == 0)
902 return (ENOTSUP);
903 switch (name[0]) {
904 case CTL_DEBUG_NAME:
905 error = sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname);
906 break;
907 case CTL_DEBUG_VALUE:
908 error = sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar);
909 break;
910 default:
911 error = ENOTSUP;
912 break;
913 }
914
915 /* adjust index so we return the right required/consumed amount */
916 if (!error)
917 req->oldidx += req->oldlen;
918
919 return (error);
920 }
921 /*
922 * XXX We mark this RW instead of RD to let sysctl_rdstring() return the
923 * XXX historical error.
924 */
925 SYSCTL_PROC(_debug, CTL_DEBUG_NAME, name, CTLTYPE_NODE|CTLFLAG_RW | CTLFLAG_LOCKED,
926 0, /* Pointer argument (arg1) */
927 CTL_DEBUG_NAME, /* Integer argument (arg2) */
928 sysctl_dodebug, /* Handler function */
929 NULL, /* Data pointer */
930 "Debugging");
931 SYSCTL_PROC(_debug, CTL_DEBUG_VALUE, value, CTLTYPE_NODE|CTLFLAG_RW | CTLFLAG_LOCKED,
932 0, /* Pointer argument (arg1) */
933 CTL_DEBUG_VALUE, /* Integer argument (arg2) */
934 sysctl_dodebug, /* Handler function */
935 NULL, /* Data pointer */
936 "Debugging");
937 #endif /* DEBUG */
938
939 /*
940 * The following sysctl_* functions should not be used
941 * any more, as they can only cope with callers in
942 * user mode: Use new-style
943 * sysctl_io_number()
944 * sysctl_io_string()
945 * sysctl_io_opaque()
946 * instead.
947 */
948
949 /*
950 * Validate parameters and get old / set new parameters
951 * for an integer-valued sysctl function.
952 */
953 int
954 sysctl_int(user_addr_t oldp, size_t *oldlenp,
955 user_addr_t newp, size_t newlen, int *valp)
956 {
957 int error = 0;
958
959 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
960 return (EFAULT);
961 if (oldp && *oldlenp < sizeof(int))
962 return (ENOMEM);
963 if (newp && newlen != sizeof(int))
964 return (EINVAL);
965 *oldlenp = sizeof(int);
966 if (oldp)
967 error = copyout(valp, oldp, sizeof(int));
968 if (error == 0 && newp) {
969 error = copyin(newp, valp, sizeof(int));
970 AUDIT_ARG(value32, *valp);
971 }
972 return (error);
973 }
974
975 /*
976 * As above, but read-only.
977 */
978 int
979 sysctl_rdint(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, int val)
980 {
981 int error = 0;
982
983 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
984 return (EFAULT);
985 if (oldp && *oldlenp < sizeof(int))
986 return (ENOMEM);
987 if (newp)
988 return (EPERM);
989 *oldlenp = sizeof(int);
990 if (oldp)
991 error = copyout((caddr_t)&val, oldp, sizeof(int));
992 return (error);
993 }
994
995 /*
996 * Validate parameters and get old / set new parameters
997 * for an quad(64bit)-valued sysctl function.
998 */
999 int
1000 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
1001 user_addr_t newp, size_t newlen, quad_t *valp)
1002 {
1003 int error = 0;
1004
1005 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1006 return (EFAULT);
1007 if (oldp && *oldlenp < sizeof(quad_t))
1008 return (ENOMEM);
1009 if (newp && newlen != sizeof(quad_t))
1010 return (EINVAL);
1011 *oldlenp = sizeof(quad_t);
1012 if (oldp)
1013 error = copyout(valp, oldp, sizeof(quad_t));
1014 if (error == 0 && newp)
1015 error = copyin(newp, valp, sizeof(quad_t));
1016 return (error);
1017 }
1018
1019 /*
1020 * As above, but read-only.
1021 */
1022 int
1023 sysctl_rdquad(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, quad_t val)
1024 {
1025 int error = 0;
1026
1027 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1028 return (EFAULT);
1029 if (oldp && *oldlenp < sizeof(quad_t))
1030 return (ENOMEM);
1031 if (newp)
1032 return (EPERM);
1033 *oldlenp = sizeof(quad_t);
1034 if (oldp)
1035 error = copyout((caddr_t)&val, oldp, sizeof(quad_t));
1036 return (error);
1037 }
1038
1039 /*
1040 * Validate parameters and get old / set new parameters
1041 * for a string-valued sysctl function. Unlike sysctl_string, if you
1042 * give it a too small (but larger than 0 bytes) buffer, instead of
1043 * returning ENOMEM, it truncates the returned string to the buffer
1044 * size. This preserves the semantics of some library routines
1045 * implemented via sysctl, which truncate their returned data, rather
1046 * than simply returning an error. The returned string is always NUL
1047 * terminated.
1048 */
1049 int
1050 sysctl_trstring(user_addr_t oldp, size_t *oldlenp,
1051 user_addr_t newp, size_t newlen, char *str, int maxlen)
1052 {
1053 int len, copylen, error = 0;
1054
1055 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1056 return (EFAULT);
1057 copylen = len = strlen(str) + 1;
1058 if (oldp && (len < 0 || *oldlenp < 1))
1059 return (ENOMEM);
1060 if (oldp && (*oldlenp < (size_t)len))
1061 copylen = *oldlenp + 1;
1062 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1063 return (EINVAL);
1064 *oldlenp = copylen - 1; /* deal with NULL strings correctly */
1065 if (oldp) {
1066 error = copyout(str, oldp, copylen);
1067 if (!error) {
1068 unsigned char c = 0;
1069 /* NUL terminate */
1070 oldp += *oldlenp;
1071 error = copyout((void *)&c, oldp, sizeof(char));
1072 }
1073 }
1074 if (error == 0 && newp) {
1075 error = copyin(newp, str, newlen);
1076 str[newlen] = 0;
1077 AUDIT_ARG(text, (char *)str);
1078 }
1079 return (error);
1080 }
1081
1082 /*
1083 * Validate parameters and get old / set new parameters
1084 * for a string-valued sysctl function.
1085 */
1086 int
1087 sysctl_string(user_addr_t oldp, size_t *oldlenp,
1088 user_addr_t newp, size_t newlen, char *str, int maxlen)
1089 {
1090 int len, error = 0;
1091
1092 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1093 return (EFAULT);
1094 len = strlen(str) + 1;
1095 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1096 return (ENOMEM);
1097 if (newp && (maxlen < 0 || newlen >= (size_t)maxlen))
1098 return (EINVAL);
1099 *oldlenp = len -1; /* deal with NULL strings correctly */
1100 if (oldp) {
1101 error = copyout(str, oldp, len);
1102 }
1103 if (error == 0 && newp) {
1104 error = copyin(newp, str, newlen);
1105 str[newlen] = 0;
1106 AUDIT_ARG(text, (char *)str);
1107 }
1108 return (error);
1109 }
1110
1111 /*
1112 * As above, but read-only.
1113 */
1114 int
1115 sysctl_rdstring(user_addr_t oldp, size_t *oldlenp,
1116 user_addr_t newp, char *str)
1117 {
1118 int len, error = 0;
1119
1120 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1121 return (EFAULT);
1122 len = strlen(str) + 1;
1123 if (oldp && *oldlenp < (size_t)len)
1124 return (ENOMEM);
1125 if (newp)
1126 return (EPERM);
1127 *oldlenp = len;
1128 if (oldp)
1129 error = copyout(str, oldp, len);
1130 return (error);
1131 }
1132
1133 /*
1134 * Validate parameters and get old / set new parameters
1135 * for a structure oriented sysctl function.
1136 */
1137 int
1138 sysctl_struct(user_addr_t oldp, size_t *oldlenp,
1139 user_addr_t newp, size_t newlen, void *sp, int len)
1140 {
1141 int error = 0;
1142
1143 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1144 return (EFAULT);
1145 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1146 return (ENOMEM);
1147 if (newp && (len < 0 || newlen > (size_t)len))
1148 return (EINVAL);
1149 if (oldp) {
1150 *oldlenp = len;
1151 error = copyout(sp, oldp, len);
1152 }
1153 if (error == 0 && newp)
1154 error = copyin(newp, sp, len);
1155 return (error);
1156 }
1157
1158 /*
1159 * Validate parameters and get old parameters
1160 * for a structure oriented sysctl function.
1161 */
1162 int
1163 sysctl_rdstruct(user_addr_t oldp, size_t *oldlenp,
1164 user_addr_t newp, void *sp, int len)
1165 {
1166 int error = 0;
1167
1168 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
1169 return (EFAULT);
1170 if (oldp && (len < 0 || *oldlenp < (size_t)len))
1171 return (ENOMEM);
1172 if (newp)
1173 return (EPERM);
1174 *oldlenp = len;
1175 if (oldp)
1176 error = copyout(sp, oldp, len);
1177 return (error);
1178 }
1179
1180 STATIC int
1181 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
1182 {
1183 if (p->p_pid != (pid_t)*(int*)arg)
1184 return(0);
1185 else
1186 return(1);
1187 }
1188
1189 STATIC int
1190 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
1191 {
1192 if (p->p_pgrpid != (pid_t)*(int*)arg)
1193 return(0);
1194 else
1195 return(1);
1196 }
1197
1198 STATIC int
1199 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
1200 {
1201 boolean_t funnel_state;
1202 int retval;
1203 struct tty *tp;
1204
1205
1206 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1207 /* This is very racy but list lock is held.. Hmmm. */
1208 if ((p->p_flag & P_CONTROLT) == 0 ||
1209 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
1210 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
1211 tp->t_dev != (dev_t)*(int*)arg)
1212 retval = 0;
1213 else
1214 retval = 1;
1215
1216 thread_funnel_set(kernel_flock, funnel_state);
1217
1218 return(retval);
1219 }
1220
1221 STATIC int
1222 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
1223 {
1224 kauth_cred_t my_cred;
1225 uid_t uid;
1226
1227 if (p->p_ucred == NULL)
1228 return(0);
1229 my_cred = kauth_cred_proc_ref(p);
1230 uid = kauth_cred_getuid(my_cred);
1231 kauth_cred_unref(&my_cred);
1232
1233 if (uid != (uid_t)*(int*)arg)
1234 return(0);
1235 else
1236 return(1);
1237 }
1238
1239
1240 STATIC int
1241 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
1242 {
1243 kauth_cred_t my_cred;
1244 uid_t ruid;
1245
1246 if (p->p_ucred == NULL)
1247 return(0);
1248 my_cred = kauth_cred_proc_ref(p);
1249 ruid = kauth_cred_getruid(my_cred);
1250 kauth_cred_unref(&my_cred);
1251
1252 if (ruid != (uid_t)*(int*)arg)
1253 return(0);
1254 else
1255 return(1);
1256 }
1257
1258 #if CONFIG_LCTX
1259 STATIC int
1260 sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg)
1261 {
1262 if ((p->p_lctx == NULL) ||
1263 (p->p_lctx->lc_id != (pid_t)*(int*)arg))
1264 return(0);
1265 else
1266 return(1);
1267 }
1268 #endif
1269
1270 /*
1271 * try over estimating by 5 procs
1272 */
1273 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1274 struct sysdoproc_args {
1275 int buflen;
1276 void *kprocp;
1277 boolean_t is_64_bit;
1278 user_addr_t dp;
1279 size_t needed;
1280 int sizeof_kproc;
1281 int *errorp;
1282 int uidcheck;
1283 int ruidcheck;
1284 int ttycheck;
1285 int uidval;
1286 };
1287
1288 int
1289 sysdoproc_callback(proc_t p, void *arg)
1290 {
1291 struct sysdoproc_args *args = arg;
1292
1293 if (args->buflen >= args->sizeof_kproc) {
1294 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
1295 return (PROC_RETURNED);
1296 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
1297 return (PROC_RETURNED);
1298 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
1299 return (PROC_RETURNED);
1300
1301 bzero(args->kprocp, args->sizeof_kproc);
1302 if (args->is_64_bit)
1303 fill_user64_proc(p, args->kprocp);
1304 else
1305 fill_user32_proc(p, args->kprocp);
1306 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
1307 if (error) {
1308 *args->errorp = error;
1309 return (PROC_RETURNED_DONE);
1310 }
1311 args->dp += args->sizeof_kproc;
1312 args->buflen -= args->sizeof_kproc;
1313 }
1314 args->needed += args->sizeof_kproc;
1315 return (PROC_RETURNED);
1316 }
1317
1318 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
1319 STATIC int
1320 sysctl_prochandle SYSCTL_HANDLER_ARGS
1321 {
1322 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
1323 int *name = arg1; /* oid element argument vector */
1324 int namelen = arg2; /* number of oid element arguments */
1325 user_addr_t where = req->oldptr;/* user buffer copy out address */
1326
1327 user_addr_t dp = where;
1328 size_t needed = 0;
1329 int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
1330 int error = 0;
1331 boolean_t is_64_bit = proc_is64bit(current_proc());
1332 struct user32_kinfo_proc user32_kproc;
1333 struct user64_kinfo_proc user_kproc;
1334 int sizeof_kproc;
1335 void *kprocp;
1336 int (*filterfn)(proc_t, void *) = 0;
1337 struct sysdoproc_args args;
1338 int uidcheck = 0;
1339 int ruidcheck = 0;
1340 int ttycheck = 0;
1341
1342 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
1343 return (EINVAL);
1344
1345 if (is_64_bit) {
1346 sizeof_kproc = sizeof(user_kproc);
1347 kprocp = &user_kproc;
1348 } else {
1349 sizeof_kproc = sizeof(user32_kproc);
1350 kprocp = &user32_kproc;
1351 }
1352
1353 switch (cmd) {
1354
1355 case KERN_PROC_PID:
1356 filterfn = sysdoproc_filt_KERN_PROC_PID;
1357 break;
1358
1359 case KERN_PROC_PGRP:
1360 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
1361 break;
1362
1363 case KERN_PROC_TTY:
1364 ttycheck = 1;
1365 break;
1366
1367 case KERN_PROC_UID:
1368 uidcheck = 1;
1369 break;
1370
1371 case KERN_PROC_RUID:
1372 ruidcheck = 1;
1373 break;
1374
1375 #if CONFIG_LCTX
1376 case KERN_PROC_LCID:
1377 filterfn = sysdoproc_filt_KERN_PROC_LCID;
1378 break;
1379 #endif
1380 case KERN_PROC_ALL:
1381 break;
1382
1383 default:
1384 /* must be kern.proc.<unknown> */
1385 return (ENOTSUP);
1386 }
1387
1388 error = 0;
1389 args.buflen = buflen;
1390 args.kprocp = kprocp;
1391 args.is_64_bit = is_64_bit;
1392 args.dp = dp;
1393 args.needed = needed;
1394 args.errorp = &error;
1395 args.uidcheck = uidcheck;
1396 args.ruidcheck = ruidcheck;
1397 args.ttycheck = ttycheck;
1398 args.sizeof_kproc = sizeof_kproc;
1399 if (namelen)
1400 args.uidval = name[0];
1401
1402 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
1403 sysdoproc_callback, &args, filterfn, name);
1404
1405 if (error)
1406 return (error);
1407
1408 dp = args.dp;
1409 needed = args.needed;
1410
1411 if (where != USER_ADDR_NULL) {
1412 req->oldlen = dp - where;
1413 if (needed > req->oldlen)
1414 return (ENOMEM);
1415 } else {
1416 needed += KERN_PROCSLOP;
1417 req->oldlen = needed;
1418 }
1419 /* adjust index so we return the right required/consumed amount */
1420 req->oldidx += req->oldlen;
1421 return (0);
1422 }
1423
1424 /*
1425 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
1426 * in the sysctl declaration itself, which comes into the handler function
1427 * as 'oidp->oid_arg2'.
1428 *
1429 * For these particular sysctls, since they have well known OIDs, we could
1430 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
1431 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
1432 * of a well known value with a common handler function. This is desirable,
1433 * because we want well known values to "go away" at some future date.
1434 *
1435 * It should be noted that the value of '((int *)arg1)[1]' is used for many
1436 * an integer parameter to the subcommand for many of these sysctls; we'd
1437 * rather have used '((int *)arg1)[0]' for that, or even better, an element
1438 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
1439 * and then use leaf-node permissions enforcement, but that would have
1440 * necessitated modifying user space code to correspond to the interface
1441 * change, and we are striving for binary backward compatibility here; even
1442 * though these are SPI, and not intended for use by user space applications
1443 * which are not themselves system tools or libraries, some applications
1444 * have erroneously used them.
1445 */
1446 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1447 0, /* Pointer argument (arg1) */
1448 KERN_PROC_ALL, /* Integer argument (arg2) */
1449 sysctl_prochandle, /* Handler function */
1450 NULL, /* Data is size variant on ILP32/LP64 */
1451 "");
1452 SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1453 0, /* Pointer argument (arg1) */
1454 KERN_PROC_PID, /* Integer argument (arg2) */
1455 sysctl_prochandle, /* Handler function */
1456 NULL, /* Data is size variant on ILP32/LP64 */
1457 "");
1458 SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1459 0, /* Pointer argument (arg1) */
1460 KERN_PROC_TTY, /* Integer argument (arg2) */
1461 sysctl_prochandle, /* Handler function */
1462 NULL, /* Data is size variant on ILP32/LP64 */
1463 "");
1464 SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1465 0, /* Pointer argument (arg1) */
1466 KERN_PROC_PGRP, /* Integer argument (arg2) */
1467 sysctl_prochandle, /* Handler function */
1468 NULL, /* Data is size variant on ILP32/LP64 */
1469 "");
1470 SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1471 0, /* Pointer argument (arg1) */
1472 KERN_PROC_UID, /* Integer argument (arg2) */
1473 sysctl_prochandle, /* Handler function */
1474 NULL, /* Data is size variant on ILP32/LP64 */
1475 "");
1476 SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1477 0, /* Pointer argument (arg1) */
1478 KERN_PROC_RUID, /* Integer argument (arg2) */
1479 sysctl_prochandle, /* Handler function */
1480 NULL, /* Data is size variant on ILP32/LP64 */
1481 "");
1482 SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1483 0, /* Pointer argument (arg1) */
1484 KERN_PROC_LCID, /* Integer argument (arg2) */
1485 sysctl_prochandle, /* Handler function */
1486 NULL, /* Data is size variant on ILP32/LP64 */
1487 "");
1488
1489
1490 /*
1491 * Fill in non-zero fields of an eproc structure for the specified process.
1492 */
1493 STATIC void
1494 fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
1495 {
1496 struct tty *tp;
1497 struct pgrp *pg;
1498 struct session *sessp;
1499 kauth_cred_t my_cred;
1500
1501 pg = proc_pgrp(p);
1502 sessp = proc_session(p);
1503
1504 if (pg != PGRP_NULL) {
1505 ep->e_pgid = p->p_pgrpid;
1506 ep->e_jobc = pg->pg_jobc;
1507 if (sessp != SESSION_NULL && sessp->s_ttyvp)
1508 ep->e_flag = EPROC_CTTY;
1509 }
1510 #if CONFIG_LCTX
1511 if (p->p_lctx)
1512 ep->e_lcid = p->p_lctx->lc_id;
1513 #endif
1514 ep->e_ppid = p->p_ppid;
1515 if (p->p_ucred) {
1516 my_cred = kauth_cred_proc_ref(p);
1517
1518 /* A fake historical pcred */
1519 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
1520 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
1521 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
1522 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
1523
1524 /* A fake historical *kauth_cred_t */
1525 ep->e_ucred.cr_ref = my_cred->cr_ref;
1526 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1527 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
1528 bcopy(posix_cred_get(my_cred)->cr_groups,
1529 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
1530
1531 kauth_cred_unref(&my_cred);
1532 }
1533
1534 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1535 (tp = SESSION_TP(sessp))) {
1536 ep->e_tdev = tp->t_dev;
1537 ep->e_tpgid = sessp->s_ttypgrpid;
1538 } else
1539 ep->e_tdev = NODEV;
1540
1541 if (sessp != SESSION_NULL) {
1542 if (SESS_LEADER(p, sessp))
1543 ep->e_flag |= EPROC_SLEADER;
1544 session_rele(sessp);
1545 }
1546 if (pg != PGRP_NULL)
1547 pg_rele(pg);
1548 }
1549
1550 /*
1551 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
1552 */
1553 STATIC void
1554 fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
1555 {
1556 struct tty *tp;
1557 struct pgrp *pg;
1558 struct session *sessp;
1559 kauth_cred_t my_cred;
1560
1561 pg = proc_pgrp(p);
1562 sessp = proc_session(p);
1563
1564 if (pg != PGRP_NULL) {
1565 ep->e_pgid = p->p_pgrpid;
1566 ep->e_jobc = pg->pg_jobc;
1567 if (sessp != SESSION_NULL && sessp->s_ttyvp)
1568 ep->e_flag = EPROC_CTTY;
1569 }
1570 #if CONFIG_LCTX
1571 if (p->p_lctx)
1572 ep->e_lcid = p->p_lctx->lc_id;
1573 #endif
1574 ep->e_ppid = p->p_ppid;
1575 if (p->p_ucred) {
1576 my_cred = kauth_cred_proc_ref(p);
1577
1578 /* A fake historical pcred */
1579 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
1580 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
1581 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
1582 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
1583
1584 /* A fake historical *kauth_cred_t */
1585 ep->e_ucred.cr_ref = my_cred->cr_ref;
1586 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1587 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
1588 bcopy(posix_cred_get(my_cred)->cr_groups,
1589 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
1590
1591 kauth_cred_unref(&my_cred);
1592 }
1593
1594 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1595 (tp = SESSION_TP(sessp))) {
1596 ep->e_tdev = tp->t_dev;
1597 ep->e_tpgid = sessp->s_ttypgrpid;
1598 } else
1599 ep->e_tdev = NODEV;
1600
1601 if (sessp != SESSION_NULL) {
1602 if (SESS_LEADER(p, sessp))
1603 ep->e_flag |= EPROC_SLEADER;
1604 session_rele(sessp);
1605 }
1606 if (pg != PGRP_NULL)
1607 pg_rele(pg);
1608 }
1609
1610 /*
1611 * Fill in an eproc structure for the specified process.
1612 * bzeroed by our caller, so only set non-zero fields.
1613 */
1614 STATIC void
1615 fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1616 {
1617 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1618 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1619 exp->p_flag = p->p_flag;
1620 if (p->p_lflag & P_LTRACED)
1621 exp->p_flag |= P_TRACED;
1622 if (p->p_lflag & P_LPPWAIT)
1623 exp->p_flag |= P_PPWAIT;
1624 if (p->p_lflag & P_LEXIT)
1625 exp->p_flag |= P_WEXIT;
1626 exp->p_stat = p->p_stat;
1627 exp->p_pid = p->p_pid;
1628 exp->p_oppid = p->p_oppid;
1629 /* Mach related */
1630 exp->user_stack = p->user_stack;
1631 exp->p_debugger = p->p_debugger;
1632 exp->sigwait = p->sigwait;
1633 /* scheduling */
1634 #ifdef _PROC_HAS_SCHEDINFO_
1635 exp->p_estcpu = p->p_estcpu;
1636 exp->p_pctcpu = p->p_pctcpu;
1637 exp->p_slptime = p->p_slptime;
1638 #endif
1639 exp->p_realtimer.it_interval.tv_sec =
1640 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1641 exp->p_realtimer.it_interval.tv_usec =
1642 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1643
1644 exp->p_realtimer.it_value.tv_sec =
1645 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1646 exp->p_realtimer.it_value.tv_usec =
1647 (__int32_t)p->p_realtimer.it_value.tv_usec;
1648
1649 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1650 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1651
1652 exp->p_sigignore = p->p_sigignore;
1653 exp->p_sigcatch = p->p_sigcatch;
1654 exp->p_priority = p->p_priority;
1655 exp->p_nice = p->p_nice;
1656 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1657 exp->p_xstat = p->p_xstat;
1658 exp->p_acflag = p->p_acflag;
1659 }
1660
1661 /*
1662 * Fill in an LP64 version of extern_proc structure for the specified process.
1663 */
1664 STATIC void
1665 fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1666 {
1667 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1668 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1669 exp->p_flag = p->p_flag;
1670 if (p->p_lflag & P_LTRACED)
1671 exp->p_flag |= P_TRACED;
1672 if (p->p_lflag & P_LPPWAIT)
1673 exp->p_flag |= P_PPWAIT;
1674 if (p->p_lflag & P_LEXIT)
1675 exp->p_flag |= P_WEXIT;
1676 exp->p_stat = p->p_stat;
1677 exp->p_pid = p->p_pid;
1678 exp->p_oppid = p->p_oppid;
1679 /* Mach related */
1680 exp->user_stack = p->user_stack;
1681 exp->p_debugger = p->p_debugger;
1682 exp->sigwait = p->sigwait;
1683 /* scheduling */
1684 #ifdef _PROC_HAS_SCHEDINFO_
1685 exp->p_estcpu = p->p_estcpu;
1686 exp->p_pctcpu = p->p_pctcpu;
1687 exp->p_slptime = p->p_slptime;
1688 #endif
1689 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1690 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1691
1692 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1693 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1694
1695 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1696 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1697
1698 exp->p_sigignore = p->p_sigignore;
1699 exp->p_sigcatch = p->p_sigcatch;
1700 exp->p_priority = p->p_priority;
1701 exp->p_nice = p->p_nice;
1702 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1703 exp->p_xstat = p->p_xstat;
1704 exp->p_acflag = p->p_acflag;
1705 }
1706
1707 STATIC void
1708 fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1709 {
1710 /* on a 64 bit kernel, 32 bit users get some truncated information */
1711 fill_user32_externproc(p, &kp->kp_proc);
1712 fill_user32_eproc(p, &kp->kp_eproc);
1713 }
1714
1715 STATIC void
1716 fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1717 {
1718 fill_user64_externproc(p, &kp->kp_proc);
1719 fill_user64_eproc(p, &kp->kp_eproc);
1720 }
1721
1722 STATIC int
1723 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1724 {
1725 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1726 int *name = arg1; /* oid element argument vector */
1727 int namelen = arg2; /* number of oid element arguments */
1728 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1729 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1730 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1731 // size_t newlen = req->newlen; /* user buffer copy in size */
1732
1733 proc_t p = current_proc();
1734 int ret=0;
1735
1736 if (namelen == 0)
1737 return(ENOTSUP);
1738
1739 ret = suser(kauth_cred_get(), &p->p_acflag);
1740 if (ret)
1741 return(ret);
1742
1743 switch(name[0]) {
1744 case KERN_KDEFLAGS:
1745 case KERN_KDDFLAGS:
1746 case KERN_KDENABLE:
1747 case KERN_KDGETBUF:
1748 case KERN_KDSETUP:
1749 case KERN_KDREMOVE:
1750 case KERN_KDSETREG:
1751 case KERN_KDGETREG:
1752 case KERN_KDREADTR:
1753 case KERN_KDWRITETR:
1754 case KERN_KDWRITEMAP:
1755 case KERN_KDPIDTR:
1756 case KERN_KDTHRMAP:
1757 case KERN_KDPIDEX:
1758 case KERN_KDSETRTCDEC:
1759 case KERN_KDSETBUF:
1760 case KERN_KDGETENTROPY:
1761 case KERN_KDENABLE_BG_TRACE:
1762 case KERN_KDDISABLE_BG_TRACE:
1763 case KERN_KDSET_TYPEFILTER:
1764
1765 ret = kdbg_control(name, namelen, oldp, oldlenp);
1766 break;
1767 default:
1768 ret= ENOTSUP;
1769 break;
1770 }
1771
1772 /* adjust index so we return the right required/consumed amount */
1773 if (!ret)
1774 req->oldidx += req->oldlen;
1775
1776 return (ret);
1777 }
1778 SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1779 0, /* Pointer argument (arg1) */
1780 0, /* Integer argument (arg2) */
1781 sysctl_kdebug_ops, /* Handler function */
1782 NULL, /* Data pointer */
1783 "");
1784
1785
1786 #if !CONFIG_EMBEDDED
1787 /*
1788 * Return the top *sizep bytes of the user stack, or the entire area of the
1789 * user stack down through the saved exec_path, whichever is smaller.
1790 */
1791 STATIC int
1792 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1793 {
1794 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1795 int *name = arg1; /* oid element argument vector */
1796 int namelen = arg2; /* number of oid element arguments */
1797 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1798 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1799 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1800 // size_t newlen = req->newlen; /* user buffer copy in size */
1801 int error;
1802
1803 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1804
1805 /* adjust index so we return the right required/consumed amount */
1806 if (!error)
1807 req->oldidx += req->oldlen;
1808
1809 return (error);
1810 }
1811 SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1812 0, /* Pointer argument (arg1) */
1813 0, /* Integer argument (arg2) */
1814 sysctl_doprocargs, /* Handler function */
1815 NULL, /* Data pointer */
1816 "");
1817 #endif /* !CONFIG_EMBEDDED */
1818
1819 STATIC int
1820 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1821 {
1822 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1823 int *name = arg1; /* oid element argument vector */
1824 int namelen = arg2; /* number of oid element arguments */
1825 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1826 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1827 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1828 // size_t newlen = req->newlen; /* user buffer copy in size */
1829 int error;
1830
1831 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1832
1833 /* adjust index so we return the right required/consumed amount */
1834 if (!error)
1835 req->oldidx += req->oldlen;
1836
1837 return (error);
1838 }
1839 SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1840 0, /* Pointer argument (arg1) */
1841 0, /* Integer argument (arg2) */
1842 sysctl_doprocargs2, /* Handler function */
1843 NULL, /* Data pointer */
1844 "");
1845
1846 STATIC int
1847 sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1848 size_t *sizep, proc_t cur_proc, int argc_yes)
1849 {
1850 proc_t p;
1851 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1852 int error = 0;
1853 struct _vm_map *proc_map;
1854 struct task * task;
1855 vm_map_copy_t tmp;
1856 user_addr_t arg_addr;
1857 size_t arg_size;
1858 caddr_t data;
1859 size_t argslen=0;
1860 int size;
1861 vm_offset_t copy_start, copy_end;
1862 kern_return_t ret;
1863 int pid;
1864 kauth_cred_t my_cred;
1865 uid_t uid;
1866
1867 if ( namelen < 1 )
1868 return(EINVAL);
1869
1870 if (argc_yes)
1871 buflen -= sizeof(int); /* reserve first word to return argc */
1872
1873 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1874 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1875 /* is not NULL then the caller wants us to return the length needed to */
1876 /* hold the data we would return */
1877 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1878 return(EINVAL);
1879 }
1880 arg_size = buflen;
1881
1882 /*
1883 * Lookup process by pid
1884 */
1885 pid = name[0];
1886 p = proc_find(pid);
1887 if (p == NULL) {
1888 return(EINVAL);
1889 }
1890
1891 /*
1892 * Copy the top N bytes of the stack.
1893 * On all machines we have so far, the stack grows
1894 * downwards.
1895 *
1896 * If the user expects no more than N bytes of
1897 * argument list, use that as a guess for the
1898 * size.
1899 */
1900
1901 if (!p->user_stack) {
1902 proc_rele(p);
1903 return(EINVAL);
1904 }
1905
1906 if (where == USER_ADDR_NULL) {
1907 /* caller only wants to know length of proc args data */
1908 if (sizep == NULL) {
1909 proc_rele(p);
1910 return(EFAULT);
1911 }
1912
1913 size = p->p_argslen;
1914 proc_rele(p);
1915 if (argc_yes) {
1916 size += sizeof(int);
1917 }
1918 else {
1919 /*
1920 * old PROCARGS will return the executable's path and plus some
1921 * extra space for work alignment and data tags
1922 */
1923 size += PATH_MAX + (6 * sizeof(int));
1924 }
1925 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1926 *sizep = size;
1927 return (0);
1928 }
1929
1930 my_cred = kauth_cred_proc_ref(p);
1931 uid = kauth_cred_getuid(my_cred);
1932 kauth_cred_unref(&my_cred);
1933
1934 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1935 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1936 proc_rele(p);
1937 return (EINVAL);
1938 }
1939
1940 if ((u_int)arg_size > p->p_argslen)
1941 arg_size = round_page(p->p_argslen);
1942
1943 arg_addr = p->user_stack - arg_size;
1944
1945
1946 /*
1947 * Before we can block (any VM code), make another
1948 * reference to the map to keep it alive. We do
1949 * that by getting a reference on the task itself.
1950 */
1951 task = p->task;
1952 if (task == NULL) {
1953 proc_rele(p);
1954 return(EINVAL);
1955 }
1956
1957 argslen = p->p_argslen;
1958 /*
1959 * Once we have a task reference we can convert that into a
1960 * map reference, which we will use in the calls below. The
1961 * task/process may change its map after we take this reference
1962 * (see execve), but the worst that will happen then is a return
1963 * of stale info (which is always a possibility).
1964 */
1965 task_reference(task);
1966 proc_rele(p);
1967 proc_map = get_task_map_reference(task);
1968 task_deallocate(task);
1969
1970 if (proc_map == NULL)
1971 return(EINVAL);
1972
1973
1974 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1975 if (ret != KERN_SUCCESS) {
1976 vm_map_deallocate(proc_map);
1977 return(ENOMEM);
1978 }
1979
1980 copy_end = round_page(copy_start + arg_size);
1981
1982 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1983 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1984 vm_map_deallocate(proc_map);
1985 kmem_free(kernel_map, copy_start,
1986 round_page(arg_size));
1987 return (EIO);
1988 }
1989
1990 /*
1991 * Now that we've done the copyin from the process'
1992 * map, we can release the reference to it.
1993 */
1994 vm_map_deallocate(proc_map);
1995
1996 if( vm_map_copy_overwrite(kernel_map,
1997 (vm_map_address_t)copy_start,
1998 tmp, FALSE) != KERN_SUCCESS) {
1999 kmem_free(kernel_map, copy_start,
2000 round_page(arg_size));
2001 return (EIO);
2002 }
2003
2004 if (arg_size > argslen) {
2005 data = (caddr_t) (copy_end - argslen);
2006 size = argslen;
2007 } else {
2008 data = (caddr_t) (copy_end - arg_size);
2009 size = arg_size;
2010 }
2011
2012 if (argc_yes) {
2013 /* Put processes argc as the first word in the copyout buffer */
2014 suword(where, p->p_argc);
2015 error = copyout(data, (where + sizeof(int)), size);
2016 size += sizeof(int);
2017 } else {
2018 error = copyout(data, where, size);
2019
2020 /*
2021 * Make the old PROCARGS work to return the executable's path
2022 * But, only if there is enough space in the provided buffer
2023 *
2024 * on entry: data [possibily] points to the beginning of the path
2025 *
2026 * Note: we keep all pointers&sizes aligned to word boundries
2027 */
2028 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
2029 {
2030 int binPath_sz, alignedBinPath_sz = 0;
2031 int extraSpaceNeeded, addThis;
2032 user_addr_t placeHere;
2033 char * str = (char *) data;
2034 int max_len = size;
2035
2036 /* Some apps are really bad about messing up their stacks
2037 So, we have to be extra careful about getting the length
2038 of the executing binary. If we encounter an error, we bail.
2039 */
2040
2041 /* Limit ourselves to PATH_MAX paths */
2042 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
2043
2044 binPath_sz = 0;
2045
2046 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
2047 binPath_sz++;
2048
2049 /* If we have a NUL terminator, copy it, too */
2050 if (binPath_sz < max_len-1) binPath_sz += 1;
2051
2052 /* Pre-Flight the space requiremnts */
2053
2054 /* Account for the padding that fills out binPath to the next word */
2055 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
2056
2057 placeHere = where + size;
2058
2059 /* Account for the bytes needed to keep placeHere word aligned */
2060 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
2061
2062 /* Add up all the space that is needed */
2063 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
2064
2065 /* is there is room to tack on argv[0]? */
2066 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
2067 {
2068 placeHere += addThis;
2069 suword(placeHere, 0);
2070 placeHere += sizeof(int);
2071 suword(placeHere, 0xBFFF0000);
2072 placeHere += sizeof(int);
2073 suword(placeHere, 0);
2074 placeHere += sizeof(int);
2075 error = copyout(data, placeHere, binPath_sz);
2076 if ( ! error )
2077 {
2078 placeHere += binPath_sz;
2079 suword(placeHere, 0);
2080 size += extraSpaceNeeded;
2081 }
2082 }
2083 }
2084 }
2085
2086 if (copy_start != (vm_offset_t) 0) {
2087 kmem_free(kernel_map, copy_start, copy_end - copy_start);
2088 }
2089 if (error) {
2090 return(error);
2091 }
2092
2093 if (where != USER_ADDR_NULL)
2094 *sizep = size;
2095 return (0);
2096 }
2097
2098
2099 /*
2100 * Max number of concurrent aio requests
2101 */
2102 STATIC int
2103 sysctl_aiomax
2104 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2105 {
2106 int new_value, changed;
2107 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
2108 if (changed) {
2109 /* make sure the system-wide limit is greater than the per process limit */
2110 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
2111 aio_max_requests = new_value;
2112 else
2113 error = EINVAL;
2114 }
2115 return(error);
2116 }
2117
2118
2119 /*
2120 * Max number of concurrent aio requests per process
2121 */
2122 STATIC int
2123 sysctl_aioprocmax
2124 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2125 {
2126 int new_value, changed;
2127 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
2128 if (changed) {
2129 /* make sure per process limit is less than the system-wide limit */
2130 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
2131 aio_max_requests_per_process = new_value;
2132 else
2133 error = EINVAL;
2134 }
2135 return(error);
2136 }
2137
2138
2139 /*
2140 * Max number of async IO worker threads
2141 */
2142 STATIC int
2143 sysctl_aiothreads
2144 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2145 {
2146 int new_value, changed;
2147 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
2148 if (changed) {
2149 /* we only allow an increase in the number of worker threads */
2150 if (new_value > aio_worker_threads ) {
2151 _aio_create_worker_threads((new_value - aio_worker_threads));
2152 aio_worker_threads = new_value;
2153 }
2154 else
2155 error = EINVAL;
2156 }
2157 return(error);
2158 }
2159
2160
2161 /*
2162 * System-wide limit on the max number of processes
2163 */
2164 STATIC int
2165 sysctl_maxproc
2166 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2167 {
2168 int new_value, changed;
2169 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
2170 if (changed) {
2171 AUDIT_ARG(value32, new_value);
2172 /* make sure the system-wide limit is less than the configured hard
2173 limit set at kernel compilation */
2174 if (new_value <= hard_maxproc && new_value > 0)
2175 maxproc = new_value;
2176 else
2177 error = EINVAL;
2178 }
2179 return(error);
2180 }
2181
2182 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
2183 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2184 ostype, 0, "");
2185 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
2186 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2187 osrelease, 0, "");
2188 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
2189 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2190 (int *)NULL, BSD, "");
2191 SYSCTL_STRING(_kern, KERN_VERSION, version,
2192 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2193 version, 0, "");
2194 SYSCTL_STRING(_kern, OID_AUTO, uuid,
2195 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2196 &kernel_uuid[0], 0, "");
2197
2198 #if DEBUG
2199 int debug_kprint_syscall = 0;
2200 char debug_kprint_syscall_process[MAXCOMLEN+1];
2201
2202 /* Thread safe: bits and string value are not used to reclaim state */
2203 SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
2204 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
2205 SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
2206 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
2207 "name of process for kprintf syscall tracing");
2208
2209 int debug_kprint_current_process(const char **namep)
2210 {
2211 struct proc *p = current_proc();
2212
2213 if (p == NULL) {
2214 return 0;
2215 }
2216
2217 if (debug_kprint_syscall_process[0]) {
2218 /* user asked to scope tracing to a particular process name */
2219 if(0 == strncmp(debug_kprint_syscall_process,
2220 p->p_comm, sizeof(debug_kprint_syscall_process))) {
2221 /* no value in telling the user that we traced what they asked */
2222 if(namep) *namep = NULL;
2223
2224 return 1;
2225 } else {
2226 return 0;
2227 }
2228 }
2229
2230 /* trace all processes. Tell user what we traced */
2231 if (namep) {
2232 *namep = p->p_comm;
2233 }
2234
2235 return 1;
2236 }
2237 #endif
2238
2239 /* PR-5293665: need to use a callback function for kern.osversion to set
2240 * osversion in IORegistry */
2241
2242 STATIC int
2243 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2244 {
2245 int rval = 0;
2246
2247 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2248
2249 if (req->newptr) {
2250 IORegistrySetOSBuildVersion((char *)arg1);
2251 }
2252
2253 return rval;
2254 }
2255
2256 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
2257 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2258 osversion, 256 /* OSVERSIZE*/,
2259 sysctl_osversion, "A", "");
2260
2261 STATIC int
2262 sysctl_sysctl_bootargs
2263 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2264 {
2265 int error;
2266 char buf[256];
2267
2268 strlcpy(buf, PE_boot_args(), 256);
2269 error = sysctl_io_string(req, buf, 256, 0, NULL);
2270 return(error);
2271 }
2272
2273 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
2274 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
2275 NULL, 0,
2276 sysctl_sysctl_bootargs, "A", "bootargs");
2277
2278 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
2279 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2280 &maxfiles, 0, "");
2281 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
2282 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2283 (int *)NULL, ARG_MAX, "");
2284 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
2285 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2286 (int *)NULL, _POSIX_VERSION, "");
2287 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
2288 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2289 (int *)NULL, NGROUPS_MAX, "");
2290 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
2291 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2292 (int *)NULL, 1, "");
2293 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2294 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2295 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2296 (int *)NULL, 1, "");
2297 #else
2298 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2299 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2300 NULL, 0, "");
2301 #endif
2302 SYSCTL_INT(_kern, OID_AUTO, num_files,
2303 CTLFLAG_RD | CTLFLAG_LOCKED,
2304 &nfiles, 0, "");
2305 SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
2306 CTLFLAG_RD | CTLFLAG_LOCKED,
2307 &numvnodes, 0, "");
2308 SYSCTL_INT(_kern, OID_AUTO, num_tasks,
2309 CTLFLAG_RD | CTLFLAG_LOCKED,
2310 &task_max, 0, "");
2311 SYSCTL_INT(_kern, OID_AUTO, num_threads,
2312 CTLFLAG_RD | CTLFLAG_LOCKED,
2313 &thread_max, 0, "");
2314 SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
2315 CTLFLAG_RD | CTLFLAG_LOCKED,
2316 &task_threadmax, 0, "");
2317
2318 STATIC int
2319 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2320 {
2321 int oldval = desiredvnodes;
2322 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
2323
2324 if (oldval != desiredvnodes) {
2325 reset_vmobjectcache(oldval, desiredvnodes);
2326 resize_namecache(desiredvnodes);
2327 }
2328
2329 return(error);
2330 }
2331
2332 SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
2333 CTLFLAG_RW | CTLFLAG_LOCKED,
2334 &nc_disabled, 0, "");
2335
2336 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
2337 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2338 0, 0, sysctl_maxvnodes, "I", "");
2339
2340 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
2341 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2342 0, 0, sysctl_maxproc, "I", "");
2343
2344 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
2345 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2346 0, 0, sysctl_aiomax, "I", "");
2347
2348 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
2349 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2350 0, 0, sysctl_aioprocmax, "I", "");
2351
2352 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
2353 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2354 0, 0, sysctl_aiothreads, "I", "");
2355
2356 STATIC int
2357 sysctl_securelvl
2358 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2359 {
2360 int new_value, changed;
2361 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
2362 if (changed) {
2363 if (!(new_value < securelevel && req->p->p_pid != 1)) {
2364 proc_list_lock();
2365 securelevel = new_value;
2366 proc_list_unlock();
2367 } else {
2368 error = EPERM;
2369 }
2370 }
2371 return(error);
2372 }
2373
2374 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
2375 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2376 0, 0, sysctl_securelvl, "I", "");
2377
2378
2379 STATIC int
2380 sysctl_domainname
2381 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2382 {
2383 int error, changed;
2384 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
2385 if (changed) {
2386 domainnamelen = strlen(domainname);
2387 }
2388 return(error);
2389 }
2390
2391 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
2392 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2393 0, 0, sysctl_domainname, "A", "");
2394
2395 SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
2396 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2397 &hostid, 0, "");
2398
2399 STATIC int
2400 sysctl_hostname
2401 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2402 {
2403 int error, changed;
2404 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
2405 if (changed) {
2406 hostnamelen = req->newlen;
2407 }
2408 return(error);
2409 }
2410
2411
2412 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
2413 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2414 0, 0, sysctl_hostname, "A", "");
2415
2416 STATIC int
2417 sysctl_procname
2418 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2419 {
2420 /* Original code allowed writing, I'm copying this, although this all makes
2421 no sense to me. Besides, this sysctl is never used. */
2422 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
2423 }
2424
2425 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
2426 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2427 0, 0, sysctl_procname, "A", "");
2428
2429 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
2430 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2431 &speculative_reads_disabled, 0, "");
2432
2433 SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd,
2434 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2435 &ignore_is_ssd, 0, "");
2436
2437 SYSCTL_UINT(_kern, OID_AUTO, preheat_pages_max,
2438 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2439 &preheat_pages_max, 0, "");
2440
2441 SYSCTL_UINT(_kern, OID_AUTO, preheat_pages_min,
2442 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2443 &preheat_pages_min, 0, "");
2444
2445 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
2446 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2447 &speculative_prefetch_max, 0, "");
2448
2449 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
2450 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2451 &speculative_prefetch_max_iosize, 0, "");
2452
2453 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
2454 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2455 &vm_page_free_target, 0, "");
2456
2457 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
2458 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2459 &vm_page_free_min, 0, "");
2460
2461 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
2462 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2463 &vm_page_free_reserved, 0, "");
2464
2465 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
2466 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2467 &vm_page_speculative_percentage, 0, "");
2468
2469 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
2470 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2471 &vm_page_speculative_q_age_ms, 0, "");
2472
2473 SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
2474 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2475 &vm_max_delayed_work_limit, 0, "");
2476
2477 SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
2478 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2479 &vm_max_batch, 0, "");
2480
2481
2482 STATIC int
2483 sysctl_boottime
2484 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2485 {
2486 time_t tv_sec = boottime_sec();
2487 struct proc *p = req->p;
2488
2489 if (proc_is64bit(p)) {
2490 struct user64_timeval t;
2491 t.tv_sec = tv_sec;
2492 t.tv_usec = 0;
2493 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2494 } else {
2495 struct user32_timeval t;
2496 t.tv_sec = tv_sec;
2497 t.tv_usec = 0;
2498 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2499 }
2500 }
2501
2502 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
2503 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2504 0, 0, sysctl_boottime, "S,timeval", "");
2505
2506 STATIC int
2507 sysctl_symfile
2508 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2509 {
2510 char *str;
2511 int error = get_kernel_symfile(req->p, &str);
2512 if (error)
2513 return (error);
2514 return sysctl_io_string(req, str, 0, 0, NULL);
2515 }
2516
2517
2518 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
2519 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
2520 0, 0, sysctl_symfile, "A", "");
2521
2522 #if NFSCLIENT
2523 STATIC int
2524 sysctl_netboot
2525 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2526 {
2527 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2528 }
2529
2530 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
2531 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2532 0, 0, sysctl_netboot, "I", "");
2533 #endif
2534
2535 #ifdef CONFIG_IMGSRC_ACCESS
2536 /*
2537 * Legacy--act as if only one layer of nesting is possible.
2538 */
2539 STATIC int
2540 sysctl_imgsrcdev
2541 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2542 {
2543 vfs_context_t ctx = vfs_context_current();
2544 vnode_t devvp;
2545 int result;
2546
2547 if (!vfs_context_issuser(ctx)) {
2548 return EPERM;
2549 }
2550
2551 if (imgsrc_rootvnodes[0] == NULL) {
2552 return ENOENT;
2553 }
2554
2555 result = vnode_getwithref(imgsrc_rootvnodes[0]);
2556 if (result != 0) {
2557 return result;
2558 }
2559
2560 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
2561 result = vnode_getwithref(devvp);
2562 if (result != 0) {
2563 goto out;
2564 }
2565
2566 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
2567
2568 vnode_put(devvp);
2569 out:
2570 vnode_put(imgsrc_rootvnodes[0]);
2571 return result;
2572 }
2573
2574 SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
2575 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2576 0, 0, sysctl_imgsrcdev, "I", "");
2577
2578 STATIC int
2579 sysctl_imgsrcinfo
2580 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2581 {
2582 int error;
2583 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING]; /* 2 for now, no problem */
2584 uint32_t i;
2585 vnode_t rvp, devvp;
2586
2587 if (imgsrc_rootvnodes[0] == NULLVP) {
2588 return ENXIO;
2589 }
2590
2591 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2592 /*
2593 * Go get the root vnode.
2594 */
2595 rvp = imgsrc_rootvnodes[i];
2596 if (rvp == NULLVP) {
2597 break;
2598 }
2599
2600 error = vnode_get(rvp);
2601 if (error != 0) {
2602 return error;
2603 }
2604
2605 /*
2606 * For now, no getting at a non-local volume.
2607 */
2608 devvp = vnode_mount(rvp)->mnt_devvp;
2609 if (devvp == NULL) {
2610 vnode_put(rvp);
2611 return EINVAL;
2612 }
2613
2614 error = vnode_getwithref(devvp);
2615 if (error != 0) {
2616 vnode_put(rvp);
2617 return error;
2618 }
2619
2620 /*
2621 * Fill in info.
2622 */
2623 info[i].ii_dev = vnode_specrdev(devvp);
2624 info[i].ii_flags = 0;
2625 info[i].ii_height = i;
2626 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2627
2628 vnode_put(devvp);
2629 vnode_put(rvp);
2630 }
2631
2632 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2633 }
2634
2635 SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2636 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2637 0, 0, sysctl_imgsrcinfo, "I", "");
2638
2639 #endif /* CONFIG_IMGSRC_ACCESS */
2640
2641 SYSCTL_INT(_kern, OID_AUTO, timer_coalescing_enabled,
2642 CTLFLAG_RW | CTLFLAG_LOCKED,
2643 &mach_timer_coalescing_enabled, 0, "");
2644
2645 STATIC int
2646 sysctl_usrstack
2647 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2648 {
2649 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2650 }
2651
2652 SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2653 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2654 0, 0, sysctl_usrstack, "I", "");
2655
2656 STATIC int
2657 sysctl_usrstack64
2658 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2659 {
2660 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2661 }
2662
2663 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2664 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2665 0, 0, sysctl_usrstack64, "Q", "");
2666
2667 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2668 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2669 corefilename, sizeof(corefilename), "");
2670
2671 STATIC int
2672 sysctl_coredump
2673 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2674 {
2675 #ifdef SECURE_KERNEL
2676 return (ENOTSUP);
2677 #endif
2678 int new_value, changed;
2679 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2680 if (changed) {
2681 if ((new_value == 0) || (new_value == 1))
2682 do_coredump = new_value;
2683 else
2684 error = EINVAL;
2685 }
2686 return(error);
2687 }
2688
2689 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2690 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2691 0, 0, sysctl_coredump, "I", "");
2692
2693 STATIC int
2694 sysctl_suid_coredump
2695 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2696 {
2697 #ifdef SECURE_KERNEL
2698 return (ENOTSUP);
2699 #endif
2700 int new_value, changed;
2701 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2702 if (changed) {
2703 if ((new_value == 0) || (new_value == 1))
2704 sugid_coredump = new_value;
2705 else
2706 error = EINVAL;
2707 }
2708 return(error);
2709 }
2710
2711 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2712 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2713 0, 0, sysctl_suid_coredump, "I", "");
2714
2715 STATIC int
2716 sysctl_delayterm
2717 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2718 {
2719 struct proc *p = req->p;
2720 int new_value, changed;
2721 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2722 if (changed) {
2723 proc_lock(p);
2724 if (new_value)
2725 req->p->p_lflag |= P_LDELAYTERM;
2726 else
2727 req->p->p_lflag &= ~P_LDELAYTERM;
2728 proc_unlock(p);
2729 }
2730 return(error);
2731 }
2732
2733 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2734 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2735 0, 0, sysctl_delayterm, "I", "");
2736
2737
2738 STATIC int
2739 sysctl_rage_vnode
2740 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2741 {
2742 struct proc *p = req->p;
2743 struct uthread *ut;
2744 int new_value, old_value, changed;
2745 int error;
2746
2747 ut = get_bsdthread_info(current_thread());
2748
2749 if (ut->uu_flag & UT_RAGE_VNODES)
2750 old_value = KERN_RAGE_THREAD;
2751 else if (p->p_lflag & P_LRAGE_VNODES)
2752 old_value = KERN_RAGE_PROC;
2753 else
2754 old_value = 0;
2755
2756 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2757
2758 if (error == 0) {
2759 switch (new_value) {
2760 case KERN_RAGE_PROC:
2761 proc_lock(p);
2762 p->p_lflag |= P_LRAGE_VNODES;
2763 proc_unlock(p);
2764 break;
2765 case KERN_UNRAGE_PROC:
2766 proc_lock(p);
2767 p->p_lflag &= ~P_LRAGE_VNODES;
2768 proc_unlock(p);
2769 break;
2770
2771 case KERN_RAGE_THREAD:
2772 ut->uu_flag |= UT_RAGE_VNODES;
2773 break;
2774 case KERN_UNRAGE_THREAD:
2775 ut = get_bsdthread_info(current_thread());
2776 ut->uu_flag &= ~UT_RAGE_VNODES;
2777 break;
2778 }
2779 }
2780 return(error);
2781 }
2782
2783 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2784 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2785 0, 0, sysctl_rage_vnode, "I", "");
2786
2787 /* XXX move this interface into libproc and remove this sysctl */
2788 STATIC int
2789 sysctl_setthread_cpupercent
2790 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2791 {
2792 int new_value, old_value;
2793 int error = 0;
2794 kern_return_t kret = KERN_SUCCESS;
2795 uint8_t percent = 0;
2796 int ms_refill = 0;
2797
2798 old_value = 0;
2799
2800 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2801 return (error);
2802
2803 percent = new_value & 0xff; /* low 8 bytes for perent */
2804 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2805 if (percent > 100)
2806 return (EINVAL);
2807
2808 /*
2809 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2810 */
2811 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * NSEC_PER_MSEC)) != 0)
2812 return (EIO);
2813
2814 return (0);
2815 }
2816
2817 SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
2818 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY,
2819 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2820
2821
2822 STATIC int
2823 sysctl_kern_check_openevt
2824 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2825 {
2826 struct proc *p = req->p;
2827 int new_value, old_value, changed;
2828 int error;
2829
2830 if (p->p_flag & P_CHECKOPENEVT) {
2831 old_value = KERN_OPENEVT_PROC;
2832 } else {
2833 old_value = 0;
2834 }
2835
2836 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2837
2838 if (error == 0) {
2839 switch (new_value) {
2840 case KERN_OPENEVT_PROC:
2841 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2842 break;
2843
2844 case KERN_UNOPENEVT_PROC:
2845 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2846 break;
2847
2848 default:
2849 error = EINVAL;
2850 }
2851 }
2852 return(error);
2853 }
2854
2855 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2856 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2857
2858
2859
2860 STATIC int
2861 sysctl_nx
2862 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2863 {
2864 #ifdef SECURE_KERNEL
2865 return ENOTSUP;
2866 #endif
2867 int new_value, changed;
2868 int error;
2869
2870 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2871 if (error)
2872 return error;
2873
2874 if (changed) {
2875 #if defined(__i386__) || defined(__x86_64__)
2876 /*
2877 * Only allow setting if NX is supported on the chip
2878 */
2879 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2880 return ENOTSUP;
2881 #endif
2882 nx_enabled = new_value;
2883 }
2884 return(error);
2885 }
2886
2887
2888
2889 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2890 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2891 0, 0, sysctl_nx, "I", "");
2892
2893 STATIC int
2894 sysctl_loadavg
2895 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2896 {
2897 if (proc_is64bit(req->p)) {
2898 struct user64_loadavg loadinfo64;
2899 fill_loadavg64(&averunnable, &loadinfo64);
2900 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2901 } else {
2902 struct user32_loadavg loadinfo32;
2903 fill_loadavg32(&averunnable, &loadinfo32);
2904 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2905 }
2906 }
2907
2908 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2909 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2910 0, 0, sysctl_loadavg, "S,loadavg", "");
2911
2912 /*
2913 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2914 */
2915 STATIC int
2916 sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2917 __unused int arg2, struct sysctl_req *req)
2918 {
2919 int old_value=0, new_value=0, error=0;
2920
2921 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2922 return(error);
2923 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2924 if (!error) {
2925 return (vm_toggle_entry_reuse(new_value, NULL));
2926 }
2927 return(error);
2928 }
2929
2930 SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2931
2932 STATIC int
2933 sysctl_swapusage
2934 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2935 {
2936 int error;
2937 uint64_t swap_total;
2938 uint64_t swap_avail;
2939 vm_size_t swap_pagesize;
2940 boolean_t swap_encrypted;
2941 struct xsw_usage xsu;
2942
2943 error = macx_swapinfo(&swap_total,
2944 &swap_avail,
2945 &swap_pagesize,
2946 &swap_encrypted);
2947 if (error)
2948 return error;
2949
2950 xsu.xsu_total = swap_total;
2951 xsu.xsu_avail = swap_avail;
2952 xsu.xsu_used = swap_total - swap_avail;
2953 xsu.xsu_pagesize = swap_pagesize;
2954 xsu.xsu_encrypted = swap_encrypted;
2955 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2956 }
2957
2958
2959
2960 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2961 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2962 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2963
2964 #if CONFIG_FREEZE
2965 extern void vm_page_reactivate_all_throttled(void);
2966
2967 static int
2968 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2969 {
2970 #pragma unused(arg1, arg2)
2971 int error, val = memorystatus_freeze_enabled ? 1 : 0;
2972 boolean_t disabled;
2973
2974 error = sysctl_handle_int(oidp, &val, 0, req);
2975 if (error || !req->newptr)
2976 return (error);
2977
2978 /*
2979 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2980 */
2981 disabled = (!val && memorystatus_freeze_enabled);
2982
2983 memorystatus_freeze_enabled = val ? TRUE : FALSE;
2984
2985 if (disabled) {
2986 vm_page_reactivate_all_throttled();
2987 }
2988
2989 return (0);
2990 }
2991
2992 SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
2993 #endif /* CONFIG_FREEZE */
2994
2995 /* this kernel does NOT implement shared_region_make_private_np() */
2996 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2997 CTLFLAG_RD | CTLFLAG_LOCKED,
2998 (int *)NULL, 0, "");
2999
3000 #if defined(__i386__) || defined(__x86_64__)
3001 STATIC int
3002 sysctl_sysctl_exec_affinity(__unused struct sysctl_oid *oidp,
3003 __unused void *arg1, __unused int arg2,
3004 struct sysctl_req *req)
3005 {
3006 proc_t cur_proc = req->p;
3007 int error;
3008
3009 if (req->oldptr != USER_ADDR_NULL) {
3010 cpu_type_t oldcputype = (cur_proc->p_flag & P_AFFINITY) ? CPU_TYPE_POWERPC : CPU_TYPE_I386;
3011 if ((error = SYSCTL_OUT(req, &oldcputype, sizeof(oldcputype))))
3012 return error;
3013 }
3014
3015 if (req->newptr != USER_ADDR_NULL) {
3016 cpu_type_t newcputype;
3017 if ((error = SYSCTL_IN(req, &newcputype, sizeof(newcputype))))
3018 return error;
3019 if (newcputype == CPU_TYPE_I386)
3020 OSBitAndAtomic(~((uint32_t)P_AFFINITY), &cur_proc->p_flag);
3021 else if (newcputype == CPU_TYPE_POWERPC)
3022 OSBitOrAtomic(P_AFFINITY, &cur_proc->p_flag);
3023 else
3024 return (EINVAL);
3025 }
3026
3027 return 0;
3028 }
3029 SYSCTL_PROC(_sysctl, OID_AUTO, proc_exec_affinity, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_exec_affinity ,"I","proc_exec_affinity");
3030 #endif
3031
3032 STATIC int
3033 fetch_process_cputype(
3034 proc_t cur_proc,
3035 int *name,
3036 u_int namelen,
3037 cpu_type_t *cputype)
3038 {
3039 proc_t p = PROC_NULL;
3040 int refheld = 0;
3041 cpu_type_t ret = 0;
3042 int error = 0;
3043
3044 if (namelen == 0)
3045 p = cur_proc;
3046 else if (namelen == 1) {
3047 p = proc_find(name[0]);
3048 if (p == NULL)
3049 return (EINVAL);
3050 refheld = 1;
3051 } else {
3052 error = EINVAL;
3053 goto out;
3054 }
3055
3056 #if defined(__i386__) || defined(__x86_64__)
3057 if (p->p_flag & P_TRANSLATED) {
3058 ret = CPU_TYPE_POWERPC;
3059 }
3060 else
3061 #endif
3062 {
3063 ret = cpu_type();
3064 if (IS_64BIT_PROCESS(p))
3065 ret |= CPU_ARCH_ABI64;
3066 }
3067 *cputype = ret;
3068
3069 if (refheld != 0)
3070 proc_rele(p);
3071 out:
3072 return (error);
3073 }
3074
3075 STATIC int
3076 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3077 struct sysctl_req *req)
3078 {
3079 int error;
3080 cpu_type_t proc_cputype = 0;
3081 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
3082 return error;
3083 int res = 1;
3084 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
3085 res = 0;
3086 return SYSCTL_OUT(req, &res, sizeof(res));
3087 }
3088 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
3089
3090 STATIC int
3091 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3092 struct sysctl_req *req)
3093 {
3094 int error;
3095 cpu_type_t proc_cputype = 0;
3096 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
3097 return error;
3098 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
3099 }
3100 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
3101
3102 STATIC int
3103 sysctl_safeboot
3104 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3105 {
3106 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
3107 }
3108
3109 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
3110 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3111 0, 0, sysctl_safeboot, "I", "");
3112
3113 STATIC int
3114 sysctl_singleuser
3115 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3116 {
3117 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
3118 }
3119
3120 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
3121 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3122 0, 0, sysctl_singleuser, "I", "");
3123
3124 /*
3125 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3126 */
3127 extern boolean_t affinity_sets_enabled;
3128 extern int affinity_sets_mapping;
3129
3130 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
3131 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
3132 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
3133 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
3134
3135 /*
3136 * Boolean indicating if KASLR is active.
3137 */
3138 STATIC int
3139 sysctl_slide
3140 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3141 {
3142 uint32_t slide;
3143
3144 slide = vm_kernel_slide ? 1 : 0;
3145
3146 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
3147 }
3148
3149 SYSCTL_PROC(_kern, OID_AUTO, slide,
3150 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3151 0, 0, sysctl_slide, "I", "");
3152
3153 /*
3154 * Limit on total memory users can wire.
3155 *
3156 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
3157 *
3158 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
3159 *
3160 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3161 * kmem_init().
3162 *
3163 * All values are in bytes.
3164 */
3165
3166 vm_map_size_t vm_global_no_user_wire_amount;
3167 vm_map_size_t vm_global_user_wire_limit;
3168 vm_map_size_t vm_user_wire_limit;
3169
3170 /*
3171 * There needs to be a more automatic/elegant way to do this
3172 */
3173 SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
3174 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
3175 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
3176
3177 extern int vm_map_copy_overwrite_aligned_src_not_internal;
3178 extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
3179 extern int vm_map_copy_overwrite_aligned_src_large;
3180 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
3181 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
3182 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
3183
3184
3185 /*
3186 * enable back trace events for thread blocks
3187 */
3188
3189 extern uint32_t kdebug_thread_block;
3190
3191 SYSCTL_INT (_kern, OID_AUTO, kdebug_thread_block,
3192 CTLFLAG_RW | CTLFLAG_LOCKED, &kdebug_thread_block, 0, "kdebug thread_block");
3193
3194 /*
3195 * Kernel stack size and depth
3196 */
3197 SYSCTL_INT (_kern, OID_AUTO, stack_size,
3198 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
3199 SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
3200 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
3201
3202 /*
3203 * enable back trace for port allocations
3204 */
3205 extern int ipc_portbt;
3206
3207 SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
3208 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
3209 &ipc_portbt, 0, "");
3210
3211 /*
3212 * Scheduler sysctls
3213 */
3214
3215 /*
3216 * See osfmk/kern/sched_prim.c for the corresponding definition
3217 * in osfmk/. If either version changes, update the other.
3218 */
3219 #define SCHED_STRING_MAX_LENGTH (48)
3220
3221 extern char sched_string[SCHED_STRING_MAX_LENGTH];
3222 SYSCTL_STRING(_kern, OID_AUTO, sched,
3223 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3224 sched_string, sizeof(sched_string),
3225 "Timeshare scheduler implementation");
3226
3227 /*
3228 * Only support runtime modification on embedded platforms
3229 * with development config enabled
3230 */
3231 #if CONFIG_EMBEDDED
3232 #if !SECURE_KERNEL
3233 extern int precise_user_kernel_time;
3234 SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time,
3235 CTLFLAG_RW | CTLFLAG_LOCKED,
3236 &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
3237 #endif
3238 #endif