+ CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED | CTLTYPE_OPAQUE,
+ 0, 0, &sysctl_grade_cputype, "S",
+ "grade value of cpu_type_t+cpu_sub_type_t");
+
+
+#if DEVELOPMENT || DEBUG
+
+extern void do_cseg_wedge_thread(void);
+extern void do_cseg_unwedge_thread(void);
+
+static int
+cseg_wedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ do_cseg_wedge_thread();
+ return 0;
+}
+SYSCTL_PROC(_kern, OID_AUTO, cseg_wedge_thread, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, cseg_wedge_thread, "I", "wedge c_seg thread");
+
+static int
+cseg_unwedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ do_cseg_unwedge_thread();
+ return 0;
+}
+SYSCTL_PROC(_kern, OID_AUTO, cseg_unwedge_thread, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, cseg_unwedge_thread, "I", "unstuck c_seg thread");
+
+static atomic_int wedge_thread_should_wake = 0;
+
+static int
+unwedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ atomic_store(&wedge_thread_should_wake, 1);
+ return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, unwedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, unwedge_thread, "I", "unwedge the thread wedged by kern.wedge_thread");
+
+SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_pa, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &phys_carveout_pa,
+ "base physical address of the phys_carveout_mb boot-arg region");
+SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_size, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &phys_carveout_size,
+ "size in bytes of the phys_carveout_mb boot-arg region");
+
+static int
+wedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ uint64_t interval = 1;
+ nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval);
+
+ atomic_store(&wedge_thread_should_wake, 0);
+ while (!atomic_load(&wedge_thread_should_wake)) {
+ tsleep1(NULL, 0, "wedge_thread", mach_absolute_time() + interval, NULL);
+ }
+
+ return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, wedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, wedge_thread, "I", "wedge this thread so it cannot be cleaned up");
+
+extern unsigned long
+total_corpses_count(void);
+
+static int
+sysctl_total_corpses_count SYSCTL_HANDLER_ARGS;
+
+static int
+sysctl_total_corpses_count SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int corpse_count = total_corpses_count();
+ return sysctl_io_opaque(req, &corpse_count, sizeof(int), NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, total_corpses_count, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, sysctl_total_corpses_count, "I", "total corpses on the system");
+
+static int
+sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS;
+static int
+sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS;
+int
+tstile_test_prim_lock(boolean_t use_hashtable);
+int
+tstile_test_prim_unlock(boolean_t use_hashtable);
+
+static int
+sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+ switch (val) {
+ case SYSCTL_TURNSTILE_TEST_USER_DEFAULT:
+ case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE:
+ case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT:
+ case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE:
+ return tstile_test_prim_lock(val);
+ default:
+ return error;
+ }
+}
+
+static int
+sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+ switch (val) {
+ case SYSCTL_TURNSTILE_TEST_USER_DEFAULT:
+ case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE:
+ case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT:
+ case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE:
+ return tstile_test_prim_unlock(val);
+ default:
+ return error;
+ }
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_lock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ 0, 0, sysctl_turnstile_test_prim_lock, "I", "turnstiles test lock");
+
+SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_unlock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ 0, 0, sysctl_turnstile_test_prim_unlock, "I", "turnstiles test unlock");
+
+int
+turnstile_get_boost_stats_sysctl(void *req);
+int
+turnstile_get_unboost_stats_sysctl(void *req);
+static int
+sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS;
+static int
+sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS;
+extern uint64_t thread_block_on_turnstile_count;
+extern uint64_t thread_block_on_regular_waitq_count;
+
+static int
+sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ return turnstile_get_boost_stats_sysctl(req);
+}
+
+static int
+sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ return turnstile_get_unboost_stats_sysctl(req);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, turnstile_boost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
+ 0, 0, sysctl_turnstile_boost_stats, "S", "turnstiles boost stats");
+SYSCTL_PROC(_kern, OID_AUTO, turnstile_unboost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
+ 0, 0, sysctl_turnstile_unboost_stats, "S", "turnstiles unboost stats");
+SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_turnstile,
+ CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &thread_block_on_turnstile_count, "thread blocked on turnstile count");
+SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_reg_waitq,
+ CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &thread_block_on_regular_waitq_count, "thread blocked on regular waitq count");
+
+static int
+sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ if (val == 1) {
+ lck_mtx_test_init();
+ erase_all_test_mtx_stats();
+ }
+
+ return 0;
+}
+
+static int
+sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ char* buffer;
+ int size, buffer_size, error;
+
+ buffer_size = 1000;
+ buffer = kalloc(buffer_size);
+ if (!buffer) {
+ panic("Impossible to allocate memory for %s\n", __func__);
+ }
+
+ lck_mtx_test_init();
+
+ size = get_test_mtx_stats_string(buffer, buffer_size);
+
+ error = sysctl_io_string(req, buffer, size, 0, NULL);
+
+ kfree(buffer, buffer_size);
+
+ return error;
+}
+
+static int
+sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ char* buffer;
+ int buffer_size, offset, error, iter;
+ char input_val[40];
+
+ if (!req->newptr) {
+ return 0;
+ }
+
+ if (!req->oldptr) {
+ return EINVAL;
+ }
+
+ if (req->newlen >= sizeof(input_val)) {
+ return EINVAL;
+ }
+
+ error = SYSCTL_IN(req, input_val, req->newlen);
+ if (error) {
+ return error;
+ }
+ input_val[req->newlen] = '\0';
+
+ iter = 0;
+ error = sscanf(input_val, "%d", &iter);
+ if (error != 1) {
+ printf("%s invalid input\n", __func__);
+ return EINVAL;
+ }
+
+ if (iter <= 0) {
+ printf("%s requested %d iterations, not starting the test\n", __func__, iter);
+ return EINVAL;
+ }
+
+ lck_mtx_test_init();
+
+ buffer_size = 2000;
+ offset = 0;
+ buffer = kalloc(buffer_size);
+ if (!buffer) {
+ panic("Impossible to allocate memory for %s\n", __func__);
+ }
+ memset(buffer, 0, buffer_size);
+
+ printf("%s starting uncontended mutex test with %d iterations\n", __func__, iter);
+
+ offset = scnprintf(buffer, buffer_size, "STATS INNER LOOP");
+ offset += lck_mtx_test_mtx_uncontended(iter, &buffer[offset], buffer_size - offset);
+
+ offset += scnprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
+ offset += lck_mtx_test_mtx_uncontended_loop_time(iter, &buffer[offset], buffer_size - offset);
+
+ error = SYSCTL_OUT(req, buffer, offset);
+
+ kfree(buffer, buffer_size);
+ return error;
+}
+
+static int
+sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ char* buffer;
+ int buffer_size, offset, error, iter;
+ char input_val[40];
+
+ if (!req->newptr) {
+ return 0;
+ }
+
+ if (!req->oldptr) {
+ return EINVAL;
+ }
+
+ if (req->newlen >= sizeof(input_val)) {
+ return EINVAL;
+ }
+
+ error = SYSCTL_IN(req, input_val, req->newlen);
+ if (error) {
+ return error;
+ }
+ input_val[req->newlen] = '\0';
+
+ iter = 0;
+ error = sscanf(input_val, "%d", &iter);
+ if (error != 1) {
+ printf("%s invalid input\n", __func__);
+ return EINVAL;
+ }
+
+ if (iter <= 0) {
+ printf("%s requested %d iterations, not starting the test\n", __func__, iter);
+ return EINVAL;
+ }
+
+ lck_mtx_test_init();
+
+ erase_all_test_mtx_stats();
+
+ buffer_size = 2000;
+ offset = 0;
+ buffer = kalloc(buffer_size);
+ if (!buffer) {
+ panic("Impossible to allocate memory for %s\n", __func__);
+ }
+ memset(buffer, 0, buffer_size);
+
+ printf("%s starting contended mutex test with %d iterations FULL_CONTENDED\n", __func__, iter);
+
+ offset = scnprintf(buffer, buffer_size, "STATS INNER LOOP");
+ offset += lck_mtx_test_mtx_contended(iter, &buffer[offset], buffer_size - offset, FULL_CONTENDED);
+
+ printf("%s starting contended mutex loop test with %d iterations FULL_CONTENDED\n", __func__, iter);
+
+ offset += scnprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
+ offset += lck_mtx_test_mtx_contended_loop_time(iter, &buffer[offset], buffer_size - offset, FULL_CONTENDED);
+
+ printf("%s starting contended mutex test with %d iterations HALF_CONTENDED\n", __func__, iter);
+
+ offset += scnprintf(&buffer[offset], buffer_size - offset, "STATS INNER LOOP");
+ offset += lck_mtx_test_mtx_contended(iter, &buffer[offset], buffer_size - offset, HALF_CONTENDED);
+
+ printf("%s starting contended mutex loop test with %d iterations HALF_CONTENDED\n", __func__, iter);
+
+ offset += scnprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
+ offset += lck_mtx_test_mtx_contended_loop_time(iter, &buffer[offset], buffer_size - offset, HALF_CONTENDED);
+
+ error = SYSCTL_OUT(req, buffer, offset);
+
+ printf("\n%s\n", buffer);
+ kfree(buffer, buffer_size);
+
+ return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, erase_all_test_mtx_stats, CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ 0, 0, sysctl_erase_all_test_mtx_stats, "I", "erase test_mtx statistics");
+
+SYSCTL_PROC(_kern, OID_AUTO, get_test_mtx_stats, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ 0, 0, sysctl_get_test_mtx_stats, "A", "get test_mtx statistics");
+
+SYSCTL_PROC(_kern, OID_AUTO, test_mtx_contended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ 0, 0, sysctl_test_mtx_contended, "A", "get statistics for contended mtx test");
+
+SYSCTL_PROC(_kern, OID_AUTO, test_mtx_uncontended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ 0, 0, sysctl_test_mtx_uncontended, "A", "get statistics for uncontended mtx test");
+
+extern uint64_t MutexSpin;
+
+SYSCTL_QUAD(_kern, OID_AUTO, mutex_spin_abs, CTLFLAG_RW, &MutexSpin,
+ "Spin time in abs for acquiring a kernel mutex");
+
+extern uint64_t low_MutexSpin;
+extern int64_t high_MutexSpin;
+extern unsigned int real_ncpus;
+
+SYSCTL_QUAD(_kern, OID_AUTO, low_mutex_spin_abs, CTLFLAG_RW, &low_MutexSpin,
+ "Low spin threshold in abs for acquiring a kernel mutex");
+
+static int
+sysctl_high_mutex_spin_ns SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error;
+ int64_t val = 0;
+ int64_t res;
+
+ /* Check if the user is writing to high_MutexSpin, or just reading it */
+ if (req->newptr) {
+ error = SYSCTL_IN(req, &val, sizeof(val));
+ if (error || (val < 0 && val != -1)) {
+ return error;
+ }
+ high_MutexSpin = val;
+ }
+
+ if (high_MutexSpin >= 0) {
+ res = high_MutexSpin;
+ } else {
+ res = low_MutexSpin * real_ncpus;
+ }
+ return SYSCTL_OUT(req, &res, sizeof(res));
+}
+SYSCTL_PROC(_kern, OID_AUTO, high_mutex_spin_abs, CTLFLAG_RW | CTLTYPE_QUAD, 0, 0, sysctl_high_mutex_spin_ns, "I",
+ "High spin threshold in abs for acquiring a kernel mutex");
+
+#if defined (__x86_64__)
+
+semaphore_t sysctl_test_panic_with_thread_sem;
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winfinite-recursion" /* rdar://38801963 */
+__attribute__((noreturn))
+static void
+panic_thread_test_child_spin(void * arg, wait_result_t wres)
+{
+ static int panic_thread_recurse_count = 5;
+
+ if (panic_thread_recurse_count > 0) {
+ panic_thread_recurse_count--;
+ panic_thread_test_child_spin(arg, wres);
+ }
+
+ semaphore_signal(sysctl_test_panic_with_thread_sem);
+ while (1) {
+ ;
+ }
+}
+#pragma clang diagnostic pop
+
+static void
+panic_thread_test_child_park(void * arg __unused, wait_result_t wres __unused)
+{
+ int event;
+
+ assert_wait(&event, THREAD_UNINT);
+ semaphore_signal(sysctl_test_panic_with_thread_sem);
+ thread_block(panic_thread_test_child_park);
+}
+
+static int
+sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int rval = 0;
+ char str[16] = { '\0' };
+ thread_t child_thread = THREAD_NULL;
+
+ rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+ if (rval != 0 || !req->newptr) {
+ return EINVAL;
+ }
+
+ semaphore_create(kernel_task, &sysctl_test_panic_with_thread_sem, SYNC_POLICY_FIFO, 0);
+
+ /* Create thread to spin or park in continuation */
+ if (strncmp("spin", str, strlen("spin")) == 0) {
+ if (kernel_thread_start(panic_thread_test_child_spin, NULL, &child_thread) != KERN_SUCCESS) {
+ semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
+ return EBUSY;
+ }
+ } else if (strncmp("continuation", str, strlen("continuation")) == 0) {
+ if (kernel_thread_start(panic_thread_test_child_park, NULL, &child_thread) != KERN_SUCCESS) {
+ semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
+ return EBUSY;
+ }
+ } else {
+ semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
+ return EINVAL;
+ }
+
+ semaphore_wait(sysctl_test_panic_with_thread_sem);
+
+ panic_with_thread_context(0, NULL, 0, child_thread, "testing panic_with_thread_context for thread %p", child_thread);
+
+ /* Not reached */
+ return EINVAL;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, test_panic_with_thread, CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_WR | CTLTYPE_STRING,
+ 0, 0, sysctl_test_panic_with_thread, "A", "test panic flow for backtracing a different thread");
+#endif /* defined (__x86_64__) */
+
+#endif /* DEVELOPMENT || DEBUG */
+
+static int
+sysctl_get_owned_vmobjects SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+
+ /* validate */
+ if (req->newlen != sizeof(mach_port_name_t) || req->newptr == USER_ADDR_NULL ||
+ req->oldidx != 0 || req->newidx != 0 || req->p == NULL) {
+ return EINVAL;
+ }
+
+ int error;
+ mach_port_name_t task_port_name;
+ task_t task;
+ size_t buffer_size = (req->oldptr != USER_ADDR_NULL) ? req->oldlen : 0;
+ vmobject_list_output_t buffer;
+ size_t output_size;
+
+ if (buffer_size) {
+ const size_t min_size = sizeof(vm_object_query_data_t) + sizeof(int64_t);
+
+ if (buffer_size < min_size || buffer_size > INT_MAX) {
+ return EINVAL;
+ }
+
+ buffer = kalloc(buffer_size);
+
+ if (!buffer) {
+ error = ENOMEM;
+ goto sysctl_get_vmobject_list_exit;
+ }
+ } else {
+ buffer = NULL;
+ }
+
+ /* we have a "newptr" (for write) we get a task port name from the caller. */
+ error = SYSCTL_IN(req, &task_port_name, sizeof(mach_port_name_t));
+
+ if (error != 0) {
+ goto sysctl_get_vmobject_list_exit;
+ }
+
+ task = port_name_to_task(task_port_name);
+ if (task == TASK_NULL) {
+ error = ESRCH;
+ goto sysctl_get_vmobject_list_exit;
+ }
+
+ /* copy the vmobjects and vmobject data out of the task */
+ if (buffer_size == 0) {
+ int64_t __size;
+ task_copy_vmobjects(task, NULL, 0, &__size);
+ output_size = (__size > 0) ? __size * sizeof(vm_object_query_data_t) + sizeof(int64_t) : 0;
+ } else {
+ task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(int64_t), &buffer->entries);
+ output_size = buffer->entries * sizeof(vm_object_query_data_t) + sizeof(int64_t);
+ }
+
+ task_deallocate(task);
+
+ error = SYSCTL_OUT(req, (char*) buffer, output_size);
+
+sysctl_get_vmobject_list_exit:
+ if (buffer) {
+ kfree(buffer, buffer_size);
+ }
+
+ return error;
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, get_owned_vmobjects, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
+ 0, 0, sysctl_get_owned_vmobjects, "A", "get owned vmobjects in task");