+#endif /* DEVELOPMENT || DEBUG */
+
+/*
+ * Enable tracing of voucher contents
+ */
+extern uint32_t ipc_voucher_trace_contents;
+
+SYSCTL_INT(_kern, OID_AUTO, ipc_voucher_trace_contents,
+ CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
+
+/*
+ * Kernel stack size and depth
+ */
+SYSCTL_INT(_kern, OID_AUTO, stack_size,
+ CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
+SYSCTL_INT(_kern, OID_AUTO, stack_depth_max,
+ CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
+
+extern unsigned int kern_feature_overrides;
+SYSCTL_INT(_kern, OID_AUTO, kern_feature_overrides,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask");
+
+/*
+ * enable back trace for port allocations
+ */
+extern int ipc_portbt;
+
+SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &ipc_portbt, 0, "");
+
+/*
+ * Scheduler sysctls
+ */
+
+SYSCTL_STRING(_kern, OID_AUTO, sched,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ sched_string, sizeof(sched_string),
+ "Timeshare scheduler implementation");
+
+#if CONFIG_QUIESCE_COUNTER
+static int
+sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+
+ uint32_t local_min_interval_us = cpu_quiescent_counter_get_min_interval_us();
+
+ int error = sysctl_handle_int(oidp, &local_min_interval_us, 0, req);
+ if (error || !req->newptr) {
+ return error;
+ }
+
+ cpu_quiescent_counter_set_min_interval_us(local_min_interval_us);
+
+ return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, cpu_checkin_interval,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ sysctl_cpu_quiescent_counter_interval, "I",
+ "Quiescent CPU checkin interval (microseconds)");
+#endif /* CONFIG_QUIESCE_COUNTER */
+
+
+/*
+ * Only support runtime modification on embedded platforms
+ * with development config enabled
+ */
+#if CONFIG_EMBEDDED
+#if !SECURE_KERNEL
+extern int precise_user_kernel_time;
+SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
+#endif
+#endif
+
+
+/* Parameters related to timer coalescing tuning, to be replaced
+ * with a dedicated systemcall in the future.
+ */
+/* Enable processing pending timers in the context of any other interrupt
+ * Coalescing tuning parameters for various thread/task attributes */
+STATIC int
+sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp)
+ int size = arg2; /* subcommand*/
+ int error;
+ int changed = 0;
+ uint64_t old_value_ns;
+ uint64_t new_value_ns;
+ uint64_t value_abstime;
+ if (size == sizeof(uint32_t)) {
+ value_abstime = *((uint32_t *)arg1);
+ } else if (size == sizeof(uint64_t)) {
+ value_abstime = *((uint64_t *)arg1);
+ } else {
+ return ENOTSUP;
+ }
+
+ absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
+ error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
+ if ((error) || (!changed)) {
+ return error;
+ }
+
+ nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
+ if (size == sizeof(uint32_t)) {
+ *((uint32_t *)arg1) = (uint32_t)value_abstime;
+ } else {
+ *((uint64_t *)arg1) = value_abstime;
+ }
+ return error;
+}
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
+SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.timer_resort_threshold_abstime,
+ sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.timer_coalesce_bg_abstime_max,
+ sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
+
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.timer_coalesce_kt_abstime_max,
+ sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
+
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.timer_coalesce_fp_abstime_max,
+ sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
+
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.timer_coalesce_ts_abstime_max,
+ sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_scale[0], 0, "");
+
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_abstime_max[0],
+ sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_scale[1], 0, "");
+
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_abstime_max[1],
+ sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_scale[2], 0, "");
+
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_abstime_max[2],
+ sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_scale[3], 0, "");
+
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_abstime_max[3],
+ sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_scale[4], 0, "");
+
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_abstime_max[4],
+ sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_scale[5], 0, "");
+
+SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
+ CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcoal_prio_params.latency_qos_abstime_max[5],
+ sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
+ sysctl_timer_user_us_kernel_abstime,
+ "Q", "");
+
+/* Communicate the "user idle level" heuristic to the timer layer, and
+ * potentially other layers in the future.
+ */
+
+static int
+timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new_value = 0, old_value = 0, changed = 0, error;
+
+ old_value = timer_get_user_idle_level();
+
+ error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
+
+ if (error == 0 && changed) {
+ if (timer_set_user_idle_level(new_value) != KERN_SUCCESS) {
+ error = ERANGE;
+ }
+ }
+
+ return error;
+}
+
+SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ timer_user_idle_level, "I", "User idle level heuristic, 0-128");
+
+#if HYPERVISOR
+SYSCTL_INT(_kern, OID_AUTO, hv_support,
+ CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
+ &hv_support_available, 0, "");
+#endif
+
+#if CONFIG_EMBEDDED
+STATIC int
+sysctl_darkboot SYSCTL_HANDLER_ARGS
+{
+ int err = 0, value = 0;
+#pragma unused(oidp, arg1, arg2, err, value, req)
+
+ /*
+ * Handle the sysctl request.
+ *
+ * If this is a read, the function will set the value to the current darkboot value. Otherwise,
+ * we'll get the request identifier into "value" and then we can honor it.
+ */
+ if ((err = sysctl_io_number(req, darkboot, sizeof(int), &value, NULL)) != 0) {
+ goto exit;
+ }
+
+ /* writing requested, let's process the request */
+ if (req->newptr) {
+ /* writing is protected by an entitlement */
+ if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT, 0) != 0) {
+ err = EPERM;
+ goto exit;
+ }
+
+ switch (value) {
+ case MEMORY_MAINTENANCE_DARK_BOOT_UNSET:
+ /*
+ * If the darkboot sysctl is unset, the NVRAM variable
+ * must be unset too. If that's not the case, it means
+ * someone is doing something crazy and not supported.
+ */
+ if (darkboot != 0) {
+ int ret = PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME);
+ if (ret) {
+ darkboot = 0;
+ } else {
+ err = EINVAL;
+ }
+ }
+ break;
+ case MEMORY_MAINTENANCE_DARK_BOOT_SET:
+ darkboot = 1;
+ break;
+ case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT: {
+ /*
+ * Set the NVRAM and update 'darkboot' in case
+ * of success. Otherwise, do not update
+ * 'darkboot' and report the failure.
+ */
+ if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME, TRUE)) {
+ darkboot = 1;
+ } else {
+ err = EINVAL;
+ }
+
+ break;
+ }
+ default:
+ err = EINVAL;
+ }
+ }
+
+exit:
+ return err;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, darkboot,
+ CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
+ 0, 0, sysctl_darkboot, "I", "");
+#endif
+
+#if DEVELOPMENT || DEBUG
+#include <sys/sysent.h>
+/* This should result in a fatal exception, verifying that "sysent" is
+ * write-protected.
+ */
+static int
+kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ uint64_t new_value = 0, old_value = 0;
+ int changed = 0, error;
+
+ error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
+ if ((error == 0) && changed) {
+ volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
+ *wraddr = 0;
+ printf("sysent[0] write succeeded\n");
+ }
+ return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
+ CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ kern_sysent_write, "I", "Attempt sysent[0] write");
+
+#endif
+
+#if DEVELOPMENT || DEBUG
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
+#else
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
+#endif
+
+
+#if DEVELOPMENT || DEBUG
+
+static int
+sysctl_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int rval = 0;
+ char str[32] = "entry prelog postlog postcore";
+
+ rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+
+ if (rval == 0 && req->newptr) {
+ if (strncmp("entry", str, strlen("entry")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry");
+ } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog");
+ } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog");
+ } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core");
+ }
+ }
+
+ return rval;
+}
+
+static int
+sysctl_debugger_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int rval = 0;
+ char str[32] = "entry prelog postlog postcore";
+
+ rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+
+ if (rval == 0 && req->newptr) {
+ if (strncmp("entry", str, strlen("entry")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY);
+ } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG);
+ } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG);
+ } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE);
+ }
+ }
+
+ return rval;
+}
+
+decl_lck_spin_data(, spinlock_panic_test_lock);
+
+__attribute__((noreturn))
+static void
+spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused)
+{
+ lck_spin_lock(&spinlock_panic_test_lock);
+ while (1) {
+ ;
+ }
+}
+
+static int
+sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ if (req->newlen == 0) {
+ return EINVAL;
+ }
+
+ thread_t panic_spinlock_thread;
+ /* Initialize panic spinlock */
+ lck_grp_t * panic_spinlock_grp;
+ lck_grp_attr_t * panic_spinlock_grp_attr;
+ lck_attr_t * panic_spinlock_attr;
+
+ panic_spinlock_grp_attr = lck_grp_attr_alloc_init();
+ panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr);
+ panic_spinlock_attr = lck_attr_alloc_init();
+
+ lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr);
+
+
+ /* Create thread to acquire spinlock */
+ if (kernel_thread_start(spinlock_panic_test_acquire_spinlock, NULL, &panic_spinlock_thread) != KERN_SUCCESS) {
+ return EBUSY;
+ }
+
+ /* Try to acquire spinlock -- should panic eventually */
+ lck_spin_lock(&spinlock_panic_test_lock);
+ while (1) {
+ ;
+ }
+}
+
+__attribute__((noreturn))
+static void
+simultaneous_panic_worker
+(void * arg, wait_result_t wres __unused)
+{
+ atomic_int *start_panic = (atomic_int *)arg;
+
+ while (!atomic_load(start_panic)) {
+ ;
+ }
+ panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
+ __builtin_unreachable();
+}
+
+static int
+sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ if (req->newlen == 0) {
+ return EINVAL;
+ }
+
+ int i = 0, threads_to_create = 2 * processor_count;
+ atomic_int start_panic = 0;
+ unsigned int threads_created = 0;
+ thread_t new_panic_thread;
+
+ for (i = threads_to_create; i > 0; i--) {
+ if (kernel_thread_start(simultaneous_panic_worker, (void *) &start_panic, &new_panic_thread) == KERN_SUCCESS) {
+ threads_created++;
+ }
+ }
+
+ /* FAIL if we couldn't create at least processor_count threads */
+ if (threads_created < processor_count) {
+ panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
+ threads_created, threads_to_create);
+ }
+
+ atomic_exchange(&start_panic, 1);
+ while (1) {
+ ;
+ }
+}
+
+SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test");
+SYSCTL_PROC(_debug, OID_AUTO, debugger_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_debugger_test, "A", "debugger test");
+SYSCTL_PROC(_debug, OID_AUTO, spinlock_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_spinlock_panic_test, "A", "spinlock panic test");
+SYSCTL_PROC(_debug, OID_AUTO, simultaneous_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_simultaneous_panic_test, "A", "simultaneous panic test");
+
+extern int exc_resource_threads_enabled;
+
+SYSCTL_INT(_kern, OID_AUTO, exc_resource_threads_enabled, CTLFLAG_RD | CTLFLAG_LOCKED, &exc_resource_threads_enabled, 0, "exc_resource thread limit enabled");
+
+
+#endif /* DEVELOPMENT || DEBUG */
+
+const uint32_t thread_groups_supported = 0;
+
+STATIC int
+sysctl_thread_groups_supported(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int value = thread_groups_supported;
+ return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
+ 0, 0, &sysctl_thread_groups_supported, "I", "thread groups supported");
+
+static int
+sysctl_grade_cputype SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ int error = 0;
+ int type_tuple[2] = {};
+ int return_value = 0;
+
+ error = SYSCTL_IN(req, &type_tuple, sizeof(type_tuple));
+
+ if (error) {
+ return error;
+ }
+
+ return_value = grade_binary(type_tuple[0], type_tuple[1], FALSE);
+
+ error = SYSCTL_OUT(req, &return_value, sizeof(return_value));
+
+ if (error) {
+ return error;
+ }
+
+ return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, grade_cputype,
+ CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED | CTLTYPE_OPAQUE,
+ 0, 0, &sysctl_grade_cputype, "S",
+ "grade value of cpu_type_t+cpu_sub_type_t");
+
+
+#if DEVELOPMENT || DEBUG
+
+static atomic_int wedge_thread_should_wake = 0;
+
+static int
+unwedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ atomic_store(&wedge_thread_should_wake, 1);
+ return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, unwedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, unwedge_thread, "I", "unwedge the thread wedged by kern.wedge_thread");
+
+SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_pa, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &phys_carveout_pa,
+ "base physical address of the phys_carveout_mb boot-arg region");
+SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_size, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &phys_carveout_size,
+ "size in bytes of the phys_carveout_mb boot-arg region");
+
+static int
+wedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ uint64_t interval = 1;
+ nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval);
+
+ atomic_store(&wedge_thread_should_wake, 0);
+ while (!atomic_load(&wedge_thread_should_wake)) {
+ tsleep1(NULL, 0, "wedge_thread", mach_absolute_time() + interval, NULL);
+ }
+
+ return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, wedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, wedge_thread, "I", "wedge this thread so it cannot be cleaned up");
+
+extern unsigned long
+total_corpses_count(void);
+
+static int
+sysctl_total_corpses_count SYSCTL_HANDLER_ARGS;
+
+static int
+sysctl_total_corpses_count SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int corpse_count = total_corpses_count();
+ return sysctl_io_opaque(req, &corpse_count, sizeof(int), NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, total_corpses_count, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, sysctl_total_corpses_count, "I", "total corpses on the system");
+
+static int
+sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS;
+static int
+sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS;
+int
+tstile_test_prim_lock(boolean_t use_hashtable);
+int
+tstile_test_prim_unlock(boolean_t use_hashtable);
+
+static int
+sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+ switch (val) {
+ case SYSCTL_TURNSTILE_TEST_USER_DEFAULT:
+ case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE:
+ case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT:
+ case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE:
+ return tstile_test_prim_lock(val);
+ default:
+ return error;
+ }
+}
+
+static int
+sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+ switch (val) {
+ case SYSCTL_TURNSTILE_TEST_USER_DEFAULT:
+ case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE:
+ case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT:
+ case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE:
+ return tstile_test_prim_unlock(val);
+ default:
+ return error;
+ }
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_lock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ 0, 0, sysctl_turnstile_test_prim_lock, "I", "turnstiles test lock");
+
+SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_unlock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ 0, 0, sysctl_turnstile_test_prim_unlock, "I", "turnstiles test unlock");
+
+int
+turnstile_get_boost_stats_sysctl(void *req);
+int
+turnstile_get_unboost_stats_sysctl(void *req);
+static int
+sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS;
+static int
+sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS;
+extern uint64_t thread_block_on_turnstile_count;
+extern uint64_t thread_block_on_regular_waitq_count;
+
+static int
+sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ return turnstile_get_boost_stats_sysctl(req);
+}
+
+static int
+sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ return turnstile_get_unboost_stats_sysctl(req);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, turnstile_boost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
+ 0, 0, sysctl_turnstile_boost_stats, "S", "turnstiles boost stats");
+SYSCTL_PROC(_kern, OID_AUTO, turnstile_unboost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
+ 0, 0, sysctl_turnstile_unboost_stats, "S", "turnstiles unboost stats");
+SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_turnstile,
+ CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &thread_block_on_turnstile_count, "thread blocked on turnstile count");
+SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_reg_waitq,
+ CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &thread_block_on_regular_waitq_count, "thread blocked on regular waitq count");
+
+static int
+sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ if (val == 1) {
+ lck_mtx_test_init();
+ erase_all_test_mtx_stats();
+ }
+
+ return 0;
+}
+
+static int
+sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ char* buffer;
+ int size, buffer_size, error;
+
+ buffer_size = 1000;
+ buffer = kalloc(buffer_size);
+ if (!buffer) {
+ panic("Impossible to allocate memory for %s\n", __func__);
+ }
+
+ lck_mtx_test_init();
+
+ size = get_test_mtx_stats_string(buffer, buffer_size);
+
+ error = sysctl_io_string(req, buffer, size, 0, NULL);
+
+ kfree(buffer, buffer_size);
+
+ return error;
+}
+
+static int
+sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ char* buffer;
+ int buffer_size, offset, error, iter;
+ char input_val[40];
+
+ if (!req->newptr) {
+ return 0;
+ }
+
+ if (!req->oldptr) {
+ return EINVAL;
+ }
+
+ if (req->newlen >= sizeof(input_val)) {
+ return EINVAL;
+ }
+
+ error = SYSCTL_IN(req, input_val, req->newlen);
+ if (error) {
+ return error;
+ }
+ input_val[req->newlen] = '\0';
+
+ iter = 0;
+ error = sscanf(input_val, "%d", &iter);
+ if (error != 1) {
+ printf("%s invalid input\n", __func__);
+ return EINVAL;
+ }
+
+ if (iter <= 0) {
+ printf("%s requested %d iterations, not starting the test\n", __func__, iter);
+ return EINVAL;
+ }
+
+ lck_mtx_test_init();