]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_mib.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_mib.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Mike Karels at Berkeley Software Design, Inc.
34 *
35 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
36 * project, to make these variables more userfriendly.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 */
68
69 #include <sys/param.h>
70 #include <sys/kernel.h>
71 #include <sys/systm.h>
72 #include <sys/sysctl.h>
73 #include <sys/proc_internal.h>
74 #include <sys/unistd.h>
75
76 #if defined(SMP)
77 #include <machine/smp.h>
78 #endif
79
80 #include <sys/param.h> /* XXX prune includes */
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/proc.h>
85 #include <sys/file_internal.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/ioctl.h>
89 #include <sys/namei.h>
90 #include <sys/tty.h>
91 #include <sys/disklabel.h>
92 #include <sys/vm.h>
93 #include <sys/sysctl.h>
94 #include <sys/user.h>
95 #include <mach/machine.h>
96 #include <mach/mach_types.h>
97 #include <mach/vm_param.h>
98 #include <kern/task.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_protos.h>
102 #include <mach/host_info.h>
103 #include <kern/pms.h>
104 #include <pexpert/device_tree.h>
105 #include <pexpert/pexpert.h>
106 #include <kern/sched_prim.h>
107 #include <console/serial_protos.h>
108
109 extern vm_map_t bsd_pageable_map;
110
111 #include <sys/mount_internal.h>
112 #include <sys/kdebug.h>
113
114 #include <IOKit/IOPlatformExpert.h>
115 #include <pexpert/pexpert.h>
116
117 #include <machine/config.h>
118 #include <machine/machine_routines.h>
119 #include <machine/cpu_capabilities.h>
120
121 #include <mach/mach_host.h> /* for host_info() */
122
123 #if defined(__i386__) || defined(__x86_64__)
124 #include <i386/cpuid.h> /* for cpuid_info() */
125 #endif
126
127 #if defined(__arm__) || defined(__arm64__)
128 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
129 #endif
130
131
132 #ifndef MAX
133 #define MAX(a, b) (a >= b ? a : b)
134 #endif
135
136 /* XXX This should be in a BSD accessible Mach header, but isn't. */
137 extern unsigned int vm_page_wire_count;
138
139 static int cputhreadtype, cpu64bit;
140 static uint64_t cacheconfig[10], cachesize[10];
141 static int packages;
142
143 static char * osenvironment = NULL;
144 static uint32_t osenvironment_size = 0;
145 static int osenvironment_initialized = 0;
146
147 static uint32_t ephemeral_storage = 0;
148 static uint32_t use_recovery_securityd = 0;
149
150 static struct {
151 uint32_t ephemeral_storage:1;
152 uint32_t use_recovery_securityd:1;
153 } property_existence = {0, 0};
154
155 SYSCTL_EXTENSIBLE_NODE(, 0, sysctl, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
156 "Sysctl internal magic");
157 SYSCTL_EXTENSIBLE_NODE(, CTL_KERN, kern, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
158 "High kernel, proc, limits &c");
159 SYSCTL_EXTENSIBLE_NODE(, CTL_VM, vm, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
160 "Virtual memory");
161 SYSCTL_EXTENSIBLE_NODE(, CTL_VFS, vfs, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
162 "File system");
163 SYSCTL_EXTENSIBLE_NODE(, CTL_NET, net, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
164 "Network, (see socket.h)");
165 SYSCTL_EXTENSIBLE_NODE(, CTL_DEBUG, debug, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
166 "Debugging");
167 SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
168 "hardware");
169 SYSCTL_EXTENSIBLE_NODE(, CTL_MACHDEP, machdep, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
170 "machine dependent");
171 SYSCTL_NODE(, CTL_USER, user, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
172 "user-level");
173
174 SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
175 "bridge");
176
177 #define SYSCTL_RETURN(r, x) SYSCTL_OUT(r, &x, sizeof(x))
178
179 /******************************************************************************
180 * hw.* MIB
181 */
182
183 #define CTLHW_RETQUAD (1U << 31)
184 #define CTLHW_LOCAL (1U << 30)
185
186 #define HW_LOCAL_CPUTHREADTYPE (1 | CTLHW_LOCAL)
187 #define HW_LOCAL_PHYSICALCPU (2 | CTLHW_LOCAL)
188 #define HW_LOCAL_PHYSICALCPUMAX (3 | CTLHW_LOCAL)
189 #define HW_LOCAL_LOGICALCPU (4 | CTLHW_LOCAL)
190 #define HW_LOCAL_LOGICALCPUMAX (5 | CTLHW_LOCAL)
191 #define HW_LOCAL_CPUTYPE (6 | CTLHW_LOCAL)
192 #define HW_LOCAL_CPUSUBTYPE (7 | CTLHW_LOCAL)
193 #define HW_LOCAL_CPUFAMILY (8 | CTLHW_LOCAL)
194 #define HW_LOCAL_CPUSUBFAMILY (9 | CTLHW_LOCAL)
195
196
197 /*
198 * Supporting some variables requires us to do "real" work. We
199 * gather some of that here.
200 */
201 static int
202 sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1,
203 int arg2, struct sysctl_req *req)
204 {
205 char dummy[65];
206 int epochTemp;
207 ml_cpu_info_t cpu_info;
208 int val, doquad;
209 long long qval;
210 host_basic_info_data_t hinfo;
211 kern_return_t kret;
212 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
213
214 /*
215 * Test and mask off the 'return quad' flag.
216 * Note that only some things here support it.
217 */
218 doquad = arg2 & CTLHW_RETQUAD;
219 arg2 &= ~CTLHW_RETQUAD;
220
221 ml_cpu_get_info(&cpu_info);
222
223 #define BSD_HOST 1
224 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
225
226 /*
227 * Handle various OIDs.
228 *
229 * OIDs that can return int or quad set val and qval and then break.
230 * Errors and int-only values return inline.
231 */
232 switch (arg2) {
233 case HW_NCPU:
234 if (kret == KERN_SUCCESS) {
235 return SYSCTL_RETURN(req, hinfo.max_cpus);
236 } else {
237 return EINVAL;
238 }
239 case HW_AVAILCPU:
240 if (kret == KERN_SUCCESS) {
241 return SYSCTL_RETURN(req, hinfo.avail_cpus);
242 } else {
243 return EINVAL;
244 }
245 case HW_LOCAL_PHYSICALCPU:
246 if (kret == KERN_SUCCESS) {
247 return SYSCTL_RETURN(req, hinfo.physical_cpu);
248 } else {
249 return EINVAL;
250 }
251 case HW_LOCAL_PHYSICALCPUMAX:
252 if (kret == KERN_SUCCESS) {
253 return SYSCTL_RETURN(req, hinfo.physical_cpu_max);
254 } else {
255 return EINVAL;
256 }
257 case HW_LOCAL_LOGICALCPU:
258 if (kret == KERN_SUCCESS) {
259 return SYSCTL_RETURN(req, hinfo.logical_cpu);
260 } else {
261 return EINVAL;
262 }
263 case HW_LOCAL_LOGICALCPUMAX:
264 if (kret == KERN_SUCCESS) {
265 return SYSCTL_RETURN(req, hinfo.logical_cpu_max);
266 } else {
267 return EINVAL;
268 }
269 case HW_LOCAL_CPUTYPE:
270 if (kret == KERN_SUCCESS) {
271 return SYSCTL_RETURN(req, hinfo.cpu_type);
272 } else {
273 return EINVAL;
274 }
275 case HW_LOCAL_CPUSUBTYPE:
276 if (kret == KERN_SUCCESS) {
277 return SYSCTL_RETURN(req, hinfo.cpu_subtype);
278 } else {
279 return EINVAL;
280 }
281 case HW_LOCAL_CPUFAMILY:
282 {
283 int cpufamily = 0;
284 #if defined (__i386__) || defined (__x86_64__)
285 cpufamily = cpuid_cpufamily();
286 #elif defined(__arm__) || defined(__arm64__)
287 {
288 cpufamily = cpuid_get_cpufamily();
289 }
290 #else
291 #error unknown architecture
292 #endif
293 return SYSCTL_RETURN(req, cpufamily);
294 }
295 case HW_LOCAL_CPUSUBFAMILY:
296 {
297 int cpusubfamily = 0;
298 #if defined (__i386__) || defined (__x86_64__)
299 cpusubfamily = CPUSUBFAMILY_UNKNOWN;
300 #elif defined(__arm__) || defined(__arm64__)
301 {
302 cpusubfamily = cpuid_get_cpusubfamily();
303 }
304 #else
305 #error unknown architecture
306 #endif
307 return SYSCTL_RETURN(req, cpusubfamily);
308 }
309 case HW_PAGESIZE:
310 {
311 vm_map_t map = get_task_map(current_task());
312 val = vm_map_page_size(map);
313 qval = (long long)val;
314 break;
315 }
316 case HW_CACHELINE:
317 val = (int)cpu_info.cache_line_size;
318 qval = (long long)val;
319 break;
320 case HW_L1ICACHESIZE:
321 val = (int)cpu_info.l1_icache_size;
322 qval = (long long)cpu_info.l1_icache_size;
323 break;
324 case HW_L1DCACHESIZE:
325 val = (int)cpu_info.l1_dcache_size;
326 qval = (long long)cpu_info.l1_dcache_size;
327 break;
328 case HW_L2CACHESIZE:
329 if (cpu_info.l2_cache_size == UINT32_MAX) {
330 return EINVAL;
331 }
332 val = (int)cpu_info.l2_cache_size;
333 qval = (long long)cpu_info.l2_cache_size;
334 break;
335 case HW_L3CACHESIZE:
336 if (cpu_info.l3_cache_size == UINT32_MAX) {
337 return EINVAL;
338 }
339 val = (int)cpu_info.l3_cache_size;
340 qval = (long long)cpu_info.l3_cache_size;
341 break;
342 case HW_TARGET:
343 bzero(dummy, sizeof(dummy));
344 if (!PEGetTargetName(dummy, 64)) {
345 return EINVAL;
346 }
347 dummy[64] = 0;
348 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
349 case HW_PRODUCT:
350 bzero(dummy, sizeof(dummy));
351 if (!PEGetProductName(dummy, 64)) {
352 return EINVAL;
353 }
354 dummy[64] = 0;
355 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
356
357 /*
358 * Deprecated variables. We still support these for
359 * backwards compatibility purposes only.
360 */
361 #if XNU_TARGET_OS_OSX && defined(__arm64__)
362 /* The following two are kludged for backward
363 * compatibility. Use hw.product/hw.target for something
364 * consistent instead. */
365
366 case HW_MACHINE:
367 bzero(dummy, sizeof(dummy));
368 if (proc_platform(req->p) == PLATFORM_IOS) {
369 /* iOS-on-Mac processes don't expect the macOS kind of
370 * hw.machine, e.g. "arm64", but are used to seeing
371 * a product string on iOS, which we here hardcode
372 * to return as "iPad8,6" for compatibility.
373 *
374 * Another reason why hw.machine and hw.model are
375 * trouble and hw.target+hw.product should be used
376 * instead.
377 */
378
379 strlcpy(dummy, "iPad8,6", sizeof(dummy));
380 }
381 else {
382 strlcpy(dummy, "arm64", sizeof(dummy));
383 }
384 dummy[64] = 0;
385 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
386 case HW_MODEL:
387 bzero(dummy, sizeof(dummy));
388 if (!PEGetProductName(dummy, 64)) {
389 return EINVAL;
390 }
391 dummy[64] = 0;
392 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
393 #else
394 case HW_MACHINE:
395 bzero(dummy, sizeof(dummy));
396 if (!PEGetMachineName(dummy, 64)) {
397 return EINVAL;
398 }
399 dummy[64] = 0;
400 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
401 case HW_MODEL:
402 bzero(dummy, sizeof(dummy));
403 if (!PEGetModelName(dummy, 64)) {
404 return EINVAL;
405 }
406 dummy[64] = 0;
407 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
408 #endif
409 case HW_USERMEM:
410 {
411 int usermem = (int)(mem_size - vm_page_wire_count * page_size);
412
413 return SYSCTL_RETURN(req, usermem);
414 }
415 case HW_EPOCH:
416 epochTemp = PEGetPlatformEpoch();
417 if (epochTemp == -1) {
418 return EINVAL;
419 }
420 return SYSCTL_RETURN(req, epochTemp);
421 case HW_VECTORUNIT: {
422 int vector = cpu_info.vector_unit == 0? 0 : 1;
423 return SYSCTL_RETURN(req, vector);
424 }
425 case HW_L2SETTINGS:
426 if (cpu_info.l2_cache_size == UINT32_MAX) {
427 return EINVAL;
428 }
429 return SYSCTL_RETURN(req, cpu_info.l2_settings);
430 case HW_L3SETTINGS:
431 if (cpu_info.l3_cache_size == UINT32_MAX) {
432 return EINVAL;
433 }
434 return SYSCTL_RETURN(req, cpu_info.l3_settings);
435 default:
436 return ENOTSUP;
437 }
438 /*
439 * Callers may come to us with either int or quad buffers.
440 */
441 if (doquad) {
442 return SYSCTL_RETURN(req, qval);
443 }
444 return SYSCTL_RETURN(req, val);
445 }
446
447 /* hw.pagesize and hw.tbfrequency are expected as 64 bit values */
448 static int
449 sysctl_pagesize
450 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
451 {
452 vm_map_t map = get_task_map(current_task());
453 long long l = vm_map_page_size(map);
454 return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
455 }
456
457 static int
458 sysctl_pagesize32
459 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
460 {
461 long long l;
462 #if __arm64__
463 l = (long long) (1 << page_shift_user32);
464 #else /* __arm64__ */
465 l = (long long) PAGE_SIZE;
466 #endif /* __arm64__ */
467 return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
468 }
469
470 static int
471 sysctl_tbfrequency
472 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
473 {
474 long long l = gPEClockFrequencyInfo.timebase_frequency_hz;
475 return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
476 }
477
478 /*
479 * Called by IOKit on Intel, or by sysctl_load_devicetree_entries()
480 */
481 void
482 sysctl_set_osenvironment(unsigned int size, const void* value)
483 {
484 if (osenvironment_size == 0 && size > 0) {
485 osenvironment = zalloc_permanent(size, ZALIGN_NONE);
486 if (osenvironment) {
487 memcpy(osenvironment, value, size);
488 osenvironment_size = size;
489 }
490 }
491 }
492
493 void
494 sysctl_unblock_osenvironment(void)
495 {
496 os_atomic_inc(&osenvironment_initialized, relaxed);
497 thread_wakeup((event_t) &osenvironment_initialized);
498 }
499
500 /*
501 * Create sysctl entries coming from device tree.
502 *
503 * Entries from device tree are loaded here because SecureDTLookupEntry() only works before
504 * PE_init_iokit(). Doing this also avoids the extern-C hackery to access these entries
505 * from IORegistry (which requires C++).
506 */
507 __startup_func
508 static void
509 sysctl_load_devicetree_entries(void)
510 {
511 DTEntry chosen;
512 void const *value;
513 unsigned int size;
514
515 if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
516 return;
517 }
518
519 /* load osenvironment */
520 if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &value, &size)) {
521 sysctl_set_osenvironment(size, value);
522 }
523
524 /* load ephemeral_storage */
525 if (kSuccess == SecureDTGetProperty(chosen, "ephemeral-storage", (void const **) &value, &size)) {
526 if (size == sizeof(uint32_t)) {
527 ephemeral_storage = *(uint32_t const *)value;
528 property_existence.ephemeral_storage = 1;
529 }
530 }
531
532 /* load use_recovery_securityd */
533 if (kSuccess == SecureDTGetProperty(chosen, "use-recovery-securityd", (void const **) &value, &size)) {
534 if (size == sizeof(uint32_t)) {
535 use_recovery_securityd = *(uint32_t const *)value;
536 property_existence.use_recovery_securityd = 1;
537 }
538 }
539 }
540 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_devicetree_entries);
541
542 static int
543 sysctl_osenvironment
544 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
545 {
546 #if defined(__x86_64__)
547 #if (DEVELOPMENT || DEBUG)
548 if (os_atomic_load(&osenvironment_initialized, relaxed) == 0) {
549 assert_wait((event_t) &osenvironment_initialized, THREAD_UNINT);
550 if (os_atomic_load(&osenvironment_initialized, relaxed) != 0) {
551 clear_wait(current_thread(), THREAD_AWAKENED);
552 } else {
553 (void) thread_block(THREAD_CONTINUE_NULL);
554 }
555 }
556 #endif
557 #endif
558 if (osenvironment_size > 0) {
559 return SYSCTL_OUT(req, osenvironment, osenvironment_size);
560 } else {
561 return EINVAL;
562 }
563 }
564
565 static int
566 sysctl_ephemeral_storage
567 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
568 {
569 if (property_existence.ephemeral_storage) {
570 return SYSCTL_OUT(req, &ephemeral_storage, sizeof(ephemeral_storage));
571 } else {
572 return EINVAL;
573 }
574 }
575
576 static int
577 sysctl_use_recovery_securityd
578 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
579 {
580 if (property_existence.use_recovery_securityd) {
581 return SYSCTL_OUT(req, &use_recovery_securityd, sizeof(use_recovery_securityd));
582 } else {
583 return EINVAL;
584 }
585 }
586
587 static int
588 sysctl_use_kernelmanagerd
589 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
590 {
591 #if CONFIG_ARROW
592 static int use_kernelmanagerd = 1;
593 #else
594 static int use_kernelmanagerd = 0;
595 #endif
596 static bool once = false;
597
598 if (!once) {
599 kc_format_t kc_format;
600 PE_get_primary_kc_format(&kc_format);
601 if (kc_format == KCFormatFileset) {
602 use_kernelmanagerd = 1;
603 } else {
604 PE_parse_boot_argn("kernelmanagerd", &use_kernelmanagerd, sizeof(use_kernelmanagerd));
605 }
606 once = true;
607 }
608 return SYSCTL_OUT(req, &use_kernelmanagerd, sizeof(use_kernelmanagerd));
609 }
610
611 #define HW_LOCAL_FREQUENCY 1
612 #define HW_LOCAL_FREQUENCY_MIN 2
613 #define HW_LOCAL_FREQUENCY_MAX 3
614 #define HW_LOCAL_FREQUENCY_CLOCK_RATE 4
615
616 static int
617 sysctl_bus_frequency
618 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
619 {
620
621 #if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__))
622 switch (arg2) {
623 case HW_LOCAL_FREQUENCY:
624 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_hz);
625 case HW_LOCAL_FREQUENCY_MIN:
626 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_min_hz);
627 case HW_LOCAL_FREQUENCY_MAX:
628 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_max_hz);
629 case HW_LOCAL_FREQUENCY_CLOCK_RATE:
630 return SYSCTL_OUT(req, &gPEClockFrequencyInfo.bus_clock_rate_hz, sizeof(int));
631 default:
632 return EINVAL;
633 }
634 #else
635 return ENOENT;
636 #endif
637 }
638
639 static int
640 sysctl_cpu_frequency
641 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
642 {
643
644 #if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__))
645 switch (arg2) {
646 case HW_LOCAL_FREQUENCY:
647 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_hz);
648 case HW_LOCAL_FREQUENCY_MIN:
649 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_min_hz);
650 case HW_LOCAL_FREQUENCY_MAX:
651 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_max_hz);
652 case HW_LOCAL_FREQUENCY_CLOCK_RATE:
653 return SYSCTL_OUT(req, &gPEClockFrequencyInfo.cpu_clock_rate_hz, sizeof(int));
654 default:
655 return EINVAL;
656 }
657 #else
658 return ENOENT;
659 #endif
660 }
661
662 /*
663 * This sysctl will signal to userspace that a serial console is desired:
664 *
665 * hw.serialdebugmode = 1 will load the serial console job in the multi-user session;
666 * hw.serialdebugmode = 2 will load the serial console job in the base system as well
667 */
668 static int
669 sysctl_serialdebugmode
670 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
671 {
672 uint32_t serial_boot_arg;
673 int serialdebugmode = 0;
674
675 if (PE_parse_boot_argn("serial", &serial_boot_arg, sizeof(serial_boot_arg)) &&
676 (serial_boot_arg & SERIALMODE_OUTPUT) && (serial_boot_arg & SERIALMODE_INPUT)) {
677 serialdebugmode = (serial_boot_arg & SERIALMODE_BASE_TTY) ? 2 : 1;
678 }
679
680 return sysctl_io_number(req, serialdebugmode, sizeof(serialdebugmode), NULL, NULL);
681 }
682
683 /*
684 * hw.* MIB variables.
685 */
686 SYSCTL_PROC(_hw, HW_NCPU, ncpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_NCPU, sysctl_hw_generic, "I", "");
687 SYSCTL_PROC(_hw, HW_AVAILCPU, activecpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_AVAILCPU, sysctl_hw_generic, "I", "");
688 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPU, sysctl_hw_generic, "I", "");
689 SYSCTL_PROC(_hw, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
690 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPU, sysctl_hw_generic, "I", "");
691 SYSCTL_PROC(_hw, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
692 SYSCTL_INT(_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, BYTE_ORDER, "");
693 SYSCTL_PROC(_hw, OID_AUTO, cputype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUTYPE, sysctl_hw_generic, "I", "");
694 SYSCTL_PROC(_hw, OID_AUTO, cpusubtype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBTYPE, sysctl_hw_generic, "I", "");
695 SYSCTL_INT(_hw, OID_AUTO, cpu64bit_capable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpu64bit, 0, "");
696 SYSCTL_PROC(_hw, OID_AUTO, cpufamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUFAMILY, sysctl_hw_generic, "I", "");
697 SYSCTL_PROC(_hw, OID_AUTO, cpusubfamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBFAMILY, sysctl_hw_generic, "I", "");
698 SYSCTL_OPAQUE(_hw, OID_AUTO, cacheconfig, CTLFLAG_RD | CTLFLAG_LOCKED, &cacheconfig, sizeof(cacheconfig), "Q", "");
699 SYSCTL_OPAQUE(_hw, OID_AUTO, cachesize, CTLFLAG_RD | CTLFLAG_LOCKED, &cachesize, sizeof(cachesize), "Q", "");
700 SYSCTL_PROC(_hw, OID_AUTO, pagesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize, "Q", "");
701 SYSCTL_PROC(_hw, OID_AUTO, pagesize32, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize32, "Q", "");
702 SYSCTL_PROC(_hw, OID_AUTO, busfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_bus_frequency, "Q", "");
703 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_bus_frequency, "Q", "");
704 SYSCTL_PROC(_hw, OID_AUTO, busfrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_bus_frequency, "Q", "");
705 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_cpu_frequency, "Q", "");
706 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_cpu_frequency, "Q", "");
707 SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_cpu_frequency, "Q", "");
708 SYSCTL_PROC(_hw, OID_AUTO, cachelinesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_CACHELINE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
709 SYSCTL_PROC(_hw, OID_AUTO, l1icachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
710 SYSCTL_PROC(_hw, OID_AUTO, l1dcachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
711 SYSCTL_PROC(_hw, OID_AUTO, l2cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
712 SYSCTL_PROC(_hw, OID_AUTO, l3cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
713 #if (defined(__arm__) || defined(__arm64__)) && (DEBUG || DEVELOPMENT)
714 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_hz, "");
715 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_min_hz, "");
716 SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_max_hz, "");
717 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_hz, "");
718 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_min_hz, "");
719 SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_max_hz, "");
720 SYSCTL_QUAD(_hw, OID_AUTO, fixfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.fix_frequency_hz, "");
721 #endif /* __arm__ || __arm64__ */
722 SYSCTL_PROC(_hw, OID_AUTO, tbfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_tbfrequency, "Q", "");
723 #if XNU_TARGET_OS_OSX
724 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
725 #else
726 SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, "");
727 #endif /* XNU_TARGET_OS_OSX */
728 SYSCTL_INT(_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &packages, 0, "");
729 SYSCTL_PROC(_hw, OID_AUTO, osenvironment, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_osenvironment, "A", "");
730 SYSCTL_PROC(_hw, OID_AUTO, ephemeral_storage, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_ephemeral_storage, "I", "");
731 SYSCTL_PROC(_hw, OID_AUTO, use_recovery_securityd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_recovery_securityd, "I", "");
732 SYSCTL_PROC(_hw, OID_AUTO, use_kernelmanagerd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_kernelmanagerd, "I", "");
733 SYSCTL_PROC(_hw, OID_AUTO, serialdebugmode, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_serialdebugmode, "I", "");
734
735 /*
736 * Optional CPU features can register nodes below hw.optional.
737 *
738 * If the feature is not present, the node should either not be registered,
739 * or it should return 0. If the feature is present, the node should return
740 * 1.
741 */
742 SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features");
743
744 SYSCTL_INT(_hw_optional, OID_AUTO, floatingpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, 1, ""); /* always set */
745
746 /*
747 * Optional device hardware features can be registered by drivers below hw.features
748 */
749 SYSCTL_EXTENSIBLE_NODE(_hw, OID_AUTO, features, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "hardware features");
750
751 /*
752 * Deprecated variables. These are supported for backwards compatibility
753 * purposes only. The MASKED flag requests that the variables not be
754 * printed by sysctl(8) and similar utilities.
755 *
756 * The variables named *_compat here are int-sized versions of variables
757 * that are now exported as quads. The int-sized versions are normally
758 * looked up only by number, wheras the quad-sized versions should be
759 * looked up by name.
760 *
761 * The *_compat nodes are *NOT* visible within the kernel.
762 */
763
764 SYSCTL_PROC(_hw, HW_PAGESIZE, pagesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PAGESIZE, sysctl_hw_generic, "I", "");
765 SYSCTL_PROC(_hw, HW_BUS_FREQ, busfrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_bus_frequency, "I", "");
766 SYSCTL_PROC(_hw, HW_CPU_FREQ, cpufrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_cpu_frequency, "I", "");
767 SYSCTL_PROC(_hw, HW_CACHELINE, cachelinesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_CACHELINE, sysctl_hw_generic, "I", "");
768 SYSCTL_PROC(_hw, HW_L1ICACHESIZE, l1icachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE, sysctl_hw_generic, "I", "");
769 SYSCTL_PROC(_hw, HW_L1DCACHESIZE, l1dcachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE, sysctl_hw_generic, "I", "");
770 SYSCTL_PROC(_hw, HW_L2CACHESIZE, l2cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE, sysctl_hw_generic, "I", "");
771 SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE, sysctl_hw_generic, "I", "");
772 SYSCTL_COMPAT_INT(_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, "");
773 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MACHINE, sysctl_hw_generic, "A", "");
774 SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MODEL, sysctl_hw_generic, "A", "");
775 SYSCTL_PROC(_hw, HW_TARGET, target, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_TARGET, sysctl_hw_generic, "A", "");
776 SYSCTL_PROC(_hw, HW_PRODUCT, product, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PRODUCT, sysctl_hw_generic, "A", "");
777 SYSCTL_COMPAT_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &mem_size, 0, "");
778 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_USERMEM, sysctl_hw_generic, "I", "");
779 SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_EPOCH, sysctl_hw_generic, "I", "");
780 SYSCTL_PROC(_hw, HW_VECTORUNIT, vectorunit, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_VECTORUNIT, sysctl_hw_generic, "I", "");
781 SYSCTL_PROC(_hw, HW_L2SETTINGS, l2settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2SETTINGS, sysctl_hw_generic, "I", "");
782 SYSCTL_PROC(_hw, HW_L3SETTINGS, l3settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3SETTINGS, sysctl_hw_generic, "I", "");
783 SYSCTL_INT(_hw, OID_AUTO, cputhreadtype, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputhreadtype, 0, "");
784
785 #if defined(__i386__) || defined(__x86_64__) || CONFIG_X86_64_COMPAT
786 static int
787 sysctl_cpu_capability
788 (__unused struct sysctl_oid *oidp, void *arg1, __unused int arg2, struct sysctl_req *req)
789 {
790 uint64_t caps;
791 caps = _get_cpu_capabilities();
792
793 uint64_t mask = (uint64_t) (uintptr_t) arg1;
794 boolean_t is_capable = (caps & mask) != 0;
795
796 return SYSCTL_OUT(req, &is_capable, sizeof(is_capable));
797 }
798 #define capability(name) name
799
800
801 SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMMX), 0, sysctl_cpu_capability, "I", "");
802 SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE), 0, sysctl_cpu_capability, "I", "");
803 SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE2), 0, sysctl_cpu_capability, "I", "");
804 SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE3), 0, sysctl_cpu_capability, "I", "");
805 SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSupplementalSSE3), 0, sysctl_cpu_capability, "I", "");
806 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_1), 0, sysctl_cpu_capability, "I", "");
807 SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_2), 0, sysctl_cpu_capability, "I", "");
808 /* "x86_64" is actually a preprocessor symbol on the x86_64 kernel, so we have to hack this */
809 #undef x86_64
810 SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(k64Bit), 0, sysctl_cpu_capability, "I", "");
811 SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAES), 0, sysctl_cpu_capability, "I", "");
812 SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX1_0), 0, sysctl_cpu_capability, "I", "");
813 SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRDRAND), 0, sysctl_cpu_capability, "I", "");
814 SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasF16C), 0, sysctl_cpu_capability, "I", "");
815 SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasENFSTRG), 0, sysctl_cpu_capability, "I", "");
816 SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasFMA), 0, sysctl_cpu_capability, "I", "");
817 SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX2_0), 0, sysctl_cpu_capability, "I", "");
818 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI1), 0, sysctl_cpu_capability, "I", "");
819 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI2), 0, sysctl_cpu_capability, "I", "");
820 SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRTM), 0, sysctl_cpu_capability, "I", "");
821 SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasHLE), 0, sysctl_cpu_capability, "I", "");
822 SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasADX), 0, sysctl_cpu_capability, "I", "");
823 SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMPX), 0, sysctl_cpu_capability, "I", "");
824 SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSGX), 0, sysctl_cpu_capability, "I", "");
825 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512F), 0, sysctl_cpu_capability, "I", "");
826 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512CD), 0, sysctl_cpu_capability, "I", "");
827 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512DQ), 0, sysctl_cpu_capability, "I", "");
828 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512BW), 0, sysctl_cpu_capability, "I", "");
829 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VL), 0, sysctl_cpu_capability, "I", "");
830 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512IFMA), 0, sysctl_cpu_capability, "I", "");
831 SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VBMI), 0, sysctl_cpu_capability, "I", "");
832 #undef capability
833 #endif /* !__i386__ && !__x86_64 && !CONFIG_X86_64_COMPAT */
834
835 #if defined (__arm__) || defined (__arm64__)
836 int watchpoint_flag = 0;
837 int breakpoint_flag = 0;
838 int gNeon = 0;
839 int gNeonHpfp = 0;
840 int gNeonFp16 = 0;
841 int gARMv81Atomics = 0;
842 int gARMv8Crc32 = 0;
843 int gARMv82FHM = 0;
844 int gARMv82SHA512 = 0;
845 int gARMv82SHA3 = 0;
846
847 #if defined (__arm__)
848 int arm64_flag = 0;
849 #elif defined (__arm64__) /* end __arm__*/
850 int arm64_flag = 1;
851 #else /* end __arm64__*/
852 int arm64_flag = 0;
853 #endif
854
855 SYSCTL_INT(_hw_optional, OID_AUTO, watchpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &watchpoint_flag, 0, "");
856 SYSCTL_INT(_hw_optional, OID_AUTO, breakpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &breakpoint_flag, 0, "");
857 SYSCTL_INT(_hw_optional, OID_AUTO, neon, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gNeon, 0, "");
858 SYSCTL_INT(_hw_optional, OID_AUTO, neon_hpfp, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gNeonHpfp, 0, "");
859 SYSCTL_INT(_hw_optional, OID_AUTO, neon_fp16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gNeonFp16, 0, "");
860 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_1_atomics, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv81Atomics, 0, "");
861 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_crc32, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv8Crc32, 0, "");
862 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_fhm, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv82FHM, 0, "");
863 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha512, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv82SHA512, 0, "");
864 SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv82SHA3, 0, "");
865
866 #if DEBUG || DEVELOPMENT
867 #if __ARM_KERNEL_PROTECT__
868 static int arm_kernel_protect = 1;
869 #else
870 static int arm_kernel_protect = 0;
871 #endif
872 SYSCTL_INT(_hw_optional, OID_AUTO, arm_kernel_protect, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm_kernel_protect, 0, "");
873 #endif
874
875 #if DEBUG || DEVELOPMENT
876 #if __ARM_WKDM_POPCNT__
877 static int wkdm_popcount = 1;
878 #else
879 static int wkdm_popcount = 0;
880 #endif
881 SYSCTL_INT(_hw_optional, OID_AUTO, wkdm_popcount, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &wkdm_popcount, 0, "");
882 #endif
883
884 #if DEBUG || DEVELOPMENT
885 #if __has_feature(ptrauth_calls)
886 static int ptrauth = 1;
887 #else
888 static int ptrauth = 0;
889 #endif
890 SYSCTL_INT(_hw_optional, OID_AUTO, ptrauth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ptrauth, 0, "");
891 #endif
892
893 /*
894 * Without this little ifdef dance, the preprocessor replaces "arm64" with "1",
895 * leaving us with a less-than-helpful sysctl.hwoptional.1.
896 */
897 #ifdef arm64
898 #undef arm64
899 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
900 #define arm64 1
901 #else
902 SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
903 #endif
904 #endif /* !__arm__ && ! __arm64__ */
905
906
907 /******************************************************************************
908 * Generic MIB initialisation.
909 *
910 * This is a hack, and should be replaced with SYSINITs
911 * at some point.
912 */
913 void
914 sysctl_mib_init(void)
915 {
916 #if defined(__i386__) || defined (__x86_64__)
917 cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
918 #elif defined(__arm__) || defined (__arm64__)
919 cpu64bit = (cpu_type() & CPU_ARCH_ABI64) == CPU_ARCH_ABI64;
920 #else
921 #error Unsupported arch
922 #endif
923
924 #if defined (__i386__) || defined (__x86_64__)
925 /* hw.cacheconfig */
926 cacheconfig[0] = ml_cpu_cache_sharing(0);
927 cacheconfig[1] = ml_cpu_cache_sharing(1);
928 cacheconfig[2] = ml_cpu_cache_sharing(2);
929 cacheconfig[3] = ml_cpu_cache_sharing(3);
930 cacheconfig[4] = 0;
931
932 /* hw.cachesize */
933 cachesize[0] = ml_cpu_cache_size(0);
934 cachesize[1] = ml_cpu_cache_size(1);
935 cachesize[2] = ml_cpu_cache_size(2);
936 cachesize[3] = ml_cpu_cache_size(3);
937 cachesize[4] = 0;
938
939 /* hw.packages */
940 packages = (int)(roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count)
941 / cpuid_info()->thread_count);
942
943 #elif defined(__arm__) || defined(__arm64__) /* end __i386 */
944 watchpoint_flag = arm_debug_info()->num_watchpoint_pairs;
945 breakpoint_flag = arm_debug_info()->num_breakpoint_pairs;
946
947 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
948 gNeon = mvfp_info->neon;
949 gNeonHpfp = mvfp_info->neon_hpfp;
950 gNeonFp16 = mvfp_info->neon_fp16;
951
952 cacheconfig[0] = ml_wait_max_cpus();
953 cacheconfig[1] = 1;
954 cacheconfig[2] = cache_info()->c_l2size ? 1:0;
955 cacheconfig[3] = 0;
956 cacheconfig[4] = 0;
957 cacheconfig[5] = 0;
958 cacheconfig[6] = 0;
959
960 cachesize[0] = ml_get_machine_mem();
961 cachesize[1] = cache_info()->c_dsize; /* Using the DCache */
962 cachesize[2] = cache_info()->c_l2size;
963 cachesize[3] = 0;
964 cachesize[4] = 0;
965
966 packages = 1;
967 #else
968 #error unknown architecture
969 #endif /* !__i386__ && !__x86_64 && !__arm__ && !__arm64__ */
970 }
971
972 __startup_func
973 static void
974 sysctl_mib_startup(void)
975 {
976 cputhreadtype = cpu_threadtype();
977
978 /*
979 * Populate the optional portion of the hw.* MIB.
980 *
981 * XXX This could be broken out into parts of the code
982 * that actually directly relate to the functions in
983 * question.
984 */
985
986 if (cputhreadtype != CPU_THREADTYPE_NONE) {
987 sysctl_register_oid_early(&sysctl__hw_cputhreadtype);
988 }
989
990 }
991 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_mib_startup);