+SYSCTL_PROC(_machdep_misc, OID_AUTO, kernel_timeout_spin,
+ CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, sizeof(kernel_timeout_spin),
+ misc_kernel_timeout_spin, "Q", "Kernel timeout panic test");
+
+SYSCTL_QUAD(_machdep, OID_AUTO, reportphyreadabs,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &reportphyreaddelayabs, "");
+SYSCTL_INT(_machdep, OID_AUTO, reportphyreadosbt,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &reportphyreadosbt, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, phyreaddelaypanic,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &phyreadpanic, 0, "");
+
+extern int pmap_pagezero_mitigation;
+extern int pmap_asserts_enabled, pmap_asserts_traced;
+/* On DEV/DEBUG kernels, clear this to disable the SMAP emulation
+ * (address space disconnect) for pagezero-less processes.
+ */
+SYSCTL_INT(_machdep, OID_AUTO, pmap_pagezero_mitigation,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &pmap_pagezero_mitigation, 0, "");
+/* Toggle pmap assertions */
+SYSCTL_INT(_machdep, OID_AUTO, pmap_asserts,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &pmap_asserts_enabled, 0, "");
+/* Transform pmap assertions into kernel trace terminations */
+SYSCTL_INT(_machdep, OID_AUTO, pmap_asserts_traced,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &pmap_asserts_traced, 0, "");
+
+static int
+misc_svisor_read(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ uint64_t new_value = 0, old_value = 0;
+ int changed = 0, error;
+
+ error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
+ if ((error == 0) && changed) {
+ volatile uint32_t *raddr = (uint32_t *) new_value;
+ printf("Supervisor: value at 0x%llx is 0x%x\n", new_value, *raddr);
+ }
+ return error;
+}
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, misc_svisor_read,
+ CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ misc_svisor_read, "I", "supervisor mode read");
+
+#endif /* DEVELOPMENT || DEBUG */
+
+extern void timer_queue_trace_cpu(int);
+static int
+misc_timer_queue_trace(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int changed = 0, error;
+ char buf[128];
+ buf[0] = '\0';
+
+ error = sysctl_io_string(req, buf, sizeof(buf), 0, &changed);
+
+ if (error == 0 && changed) {
+ timer_queue_trace_cpu(0);
+ }
+ return error;
+}
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, timer_queue_trace,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ misc_timer_queue_trace, "A", "Cut timer queue tracepoint");
+
+extern long NMI_count;
+extern void NMI_cpus(void);
+static int
+misc_nmis(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new = 0, old = 0, changed = 0, error;
+
+ old = NMI_count;
+
+ error = sysctl_io_number(req, old, sizeof(int), &new, &changed);
+ if (error == 0 && changed) {
+ NMI_cpus();
+ }
+
+ return error;
+}
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, nmis,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ misc_nmis, "I", "Report/increment NMI count");
+
+/* Parameters related to timer coalescing tuning, to be replaced
+ * with a dedicated systemcall in the future.
+ */
+/* Enable processing pending timers in the context of any other interrupt */
+SYSCTL_INT(_kern, OID_AUTO, interrupt_timer_coalescing_enabled,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &interrupt_timer_coalescing_enabled, 0, "");
+/* Upon entering idle, process pending timers with HW deadlines
+ * this far in the future.
+ */
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_idle_entry_hard_deadline_max,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &idle_entry_timer_processing_hdeadline_threshold, 0, "");
+
+/* Track potentially expensive eager timer evaluations on QoS tier
+ * switches.
+ */
+extern uint32_t ml_timer_eager_evaluations;
+
+SYSCTL_INT(_machdep, OID_AUTO, eager_timer_evaluations,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &ml_timer_eager_evaluations, 0, "");
+
+extern uint64_t ml_timer_eager_evaluation_max;
+
+SYSCTL_QUAD(_machdep, OID_AUTO, eager_timer_evaluation_max,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &ml_timer_eager_evaluation_max, "");
+extern uint64_t x86_isr_fp_simd_use;
+SYSCTL_QUAD(_machdep, OID_AUTO, x86_fp_simd_isr_uses,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &x86_isr_fp_simd_use, "");
+#if DEVELOPMENT || DEBUG
+
+extern int plctrace_enabled;
+
+SYSCTL_INT(_machdep, OID_AUTO, pltrace,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &plctrace_enabled, 0, "");
+
+extern int fpsimd_fault_popc;
+SYSCTL_INT(_machdep, OID_AUTO, fpsimd_fault_popc,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &fpsimd_fault_popc, 0, "");
+
+#endif /* DEVELOPMENT || DEBUG */