+#if CONFIG_EMBEDDED
+STATIC int
+sysctl_darkboot SYSCTL_HANDLER_ARGS
+{
+ int err = 0, value = 0;
+#pragma unused(oidp, arg1, arg2, err, value, req)
+
+ /*
+ * Handle the sysctl request.
+ *
+ * If this is a read, the function will set the value to the current darkboot value. Otherwise,
+ * we'll get the request identifier into "value" and then we can honor it.
+ */
+ if ((err = sysctl_io_number(req, darkboot, sizeof(int), &value, NULL)) != 0) {
+ goto exit;
+ }
+
+ /* writing requested, let's process the request */
+ if (req->newptr) {
+ /* writing is protected by an entitlement */
+ if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT, 0) != 0) {
+ err = EPERM;
+ goto exit;
+ }
+
+ switch (value) {
+ case MEMORY_MAINTENANCE_DARK_BOOT_UNSET:
+ /*
+ * If the darkboot sysctl is unset, the NVRAM variable
+ * must be unset too. If that's not the case, it means
+ * someone is doing something crazy and not supported.
+ */
+ if (darkboot != 0) {
+ int ret = PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME);
+ if (ret) {
+ darkboot = 0;
+ } else {
+ err = EINVAL;
+ }
+ }
+ break;
+ case MEMORY_MAINTENANCE_DARK_BOOT_SET:
+ darkboot = 1;
+ break;
+ case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT: {
+ /*
+ * Set the NVRAM and update 'darkboot' in case
+ * of success. Otherwise, do not update
+ * 'darkboot' and report the failure.
+ */
+ if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME, TRUE)) {
+ darkboot = 1;
+ } else {
+ err = EINVAL;
+ }
+
+ break;
+ }
+ default:
+ err = EINVAL;
+ }
+ }
+
+exit:
+ return err;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, darkboot,
+ CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
+ 0, 0, sysctl_darkboot, "I", "");
+#endif
+
+/*
+ * This is set by core audio to tell tailspin (ie background tracing) how long
+ * its smallest buffer is. Background tracing can then try to make a reasonable
+ * decisions to try to avoid introducing so much latency that the buffers will
+ * underflow.
+ */
+
+int min_audio_buffer_usec;
+
+STATIC int
+sysctl_audio_buffer SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int err = 0, value = 0, changed = 0;
+ err = sysctl_io_number(req, min_audio_buffer_usec, sizeof(int), &value, &changed);
+ if (err) goto exit;
+
+ if (changed) {
+ /* writing is protected by an entitlement */
+ if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY, 0) != 0) {
+ err = EPERM;
+ goto exit;
+ }
+ min_audio_buffer_usec = value;
+ }
+exit:
+ return err;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, min_audio_buffer_usec, CTLFLAG_RW | CTLFLAG_ANYBODY, 0, 0, sysctl_audio_buffer, "I", "Minimum audio buffer size, in microseconds");
+
+#if DEVELOPMENT || DEBUG
+#include <sys/sysent.h>
+/* This should result in a fatal exception, verifying that "sysent" is
+ * write-protected.
+ */
+static int
+kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
+ uint64_t new_value = 0, old_value = 0;
+ int changed = 0, error;
+
+ error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
+ if ((error == 0) && changed) {
+ volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
+ *wraddr = 0;
+ printf("sysent[0] write succeeded\n");
+ }
+ return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
+ CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ kern_sysent_write, "I", "Attempt sysent[0] write");
+
+#endif
+
+#if DEVELOPMENT || DEBUG
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
+#else
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
+#endif
+
+
+#if DEVELOPMENT || DEBUG
+
+static int
+sysctl_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int rval = 0;
+ char str[32] = "entry prelog postlog postcore";
+
+ rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+
+ if (rval == 0 && req->newptr) {
+ if (strncmp("entry", str, strlen("entry")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry");
+ } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog");
+ } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog");
+ } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core");
+ }
+ }
+
+ return rval;
+}
+
+static int
+sysctl_debugger_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int rval = 0;
+ char str[32] = "entry prelog postlog postcore";
+
+ rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+
+ if (rval == 0 && req->newptr) {
+ if (strncmp("entry", str, strlen("entry")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY);
+ } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG);
+ } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG);
+ } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE);
+ }
+ }
+
+ return rval;
+}
+
+decl_lck_spin_data(, spinlock_panic_test_lock)
+
+__attribute__((noreturn))
+static void
+spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused)
+{
+ lck_spin_lock(&spinlock_panic_test_lock);
+ while (1) { ; }
+}
+
+static int
+sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ if (req->newlen == 0)
+ return EINVAL;
+
+ thread_t panic_spinlock_thread;
+ /* Initialize panic spinlock */
+ lck_grp_t * panic_spinlock_grp;
+ lck_grp_attr_t * panic_spinlock_grp_attr;
+ lck_attr_t * panic_spinlock_attr;
+
+ panic_spinlock_grp_attr = lck_grp_attr_alloc_init();
+ panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr);
+ panic_spinlock_attr = lck_attr_alloc_init();
+
+ lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr);
+
+
+ /* Create thread to acquire spinlock */
+ if (kernel_thread_start(spinlock_panic_test_acquire_spinlock, NULL, &panic_spinlock_thread) != KERN_SUCCESS) {
+ return EBUSY;
+ }
+
+ /* Try to acquire spinlock -- should panic eventually */
+ lck_spin_lock(&spinlock_panic_test_lock);
+ while(1) { ; }
+}
+
+__attribute__((noreturn))
+static void
+simultaneous_panic_worker
+(void * arg, wait_result_t wres __unused)
+{
+ atomic_int *start_panic = (atomic_int *)arg;
+
+ while (!atomic_load(start_panic)) { ; }
+ panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
+ __builtin_unreachable();
+}
+
+static int
+sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ if (req->newlen == 0)
+ return EINVAL;
+
+ int i = 0, threads_to_create = 2 * processor_count;
+ atomic_int start_panic = 0;
+ unsigned int threads_created = 0;
+ thread_t new_panic_thread;
+
+ for (i = threads_to_create; i > 0; i--) {
+ if (kernel_thread_start(simultaneous_panic_worker, (void *) &start_panic, &new_panic_thread) == KERN_SUCCESS) {
+ threads_created++;
+ }
+ }
+
+ /* FAIL if we couldn't create at least processor_count threads */
+ if (threads_created < processor_count) {
+ panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
+ threads_created, threads_to_create);
+ }
+
+ atomic_exchange(&start_panic, 1);
+ while (1) { ; }
+}
+
+SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test");
+SYSCTL_PROC(_debug, OID_AUTO, debugger_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_debugger_test, "A", "debugger test");
+SYSCTL_PROC(_debug, OID_AUTO, spinlock_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_spinlock_panic_test, "A", "spinlock panic test");
+SYSCTL_PROC(_debug, OID_AUTO, simultaneous_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_simultaneous_panic_test, "A", "simultaneous panic test");
+
+
+#endif /* DEVELOPMENT || DEBUG */
+
+const uint32_t thread_groups_supported = 0;
+
+STATIC int
+sysctl_thread_groups_supported (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int value = thread_groups_supported;
+ return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
+ 0, 0, &sysctl_thread_groups_supported, "I", "thread groups supported");
+
+static int
+sysctl_grade_cputype SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ int error = 0;
+ int type_tuple[2] = {};
+ int return_value = 0;
+
+ error = SYSCTL_IN(req, &type_tuple, sizeof(type_tuple));
+
+ if (error) {
+ return error;
+ }
+
+ return_value = grade_binary(type_tuple[0], type_tuple[1]);
+
+ error = SYSCTL_OUT(req, &return_value, sizeof(return_value));
+
+ if (error) {
+ return error;
+ }
+
+ return error;
+}