+SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Conventional, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_Conventional_bytes, "");
+SYSCTL_QUAD(_machdep_memmap, OID_AUTO, RuntimeServices, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_RuntimeServices_bytes, "");
+SYSCTL_QUAD(_machdep_memmap, OID_AUTO, ACPIReclaim, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_ACPIReclaim_bytes, "");
+SYSCTL_QUAD(_machdep_memmap, OID_AUTO, ACPINVS, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_ACPINVS_bytes, "");
+SYSCTL_QUAD(_machdep_memmap, OID_AUTO, PalCode, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_PalCode_bytes, "");
+SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Reserved, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_Reserved_bytes, "");
+SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Unusable, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_Unusable_bytes, "");
+SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Other, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_other_bytes, "");
+
+SYSCTL_NODE(_machdep, OID_AUTO, tsc, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "Timestamp counter parameters");
+
+SYSCTL_QUAD(_machdep_tsc, OID_AUTO, frequency,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &tscFreq, "");
+
+extern uint32_t deep_idle_rebase;
+SYSCTL_UINT(_machdep_tsc, OID_AUTO, deep_idle_rebase,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &deep_idle_rebase, 0, "");
+SYSCTL_QUAD(_machdep_tsc, OID_AUTO, at_boot,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &tsc_at_boot, "");
+SYSCTL_QUAD(_machdep_tsc, OID_AUTO, rebase_abs_time,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &tsc_rebase_abs_time, "");
+
+SYSCTL_NODE(_machdep_tsc, OID_AUTO, nanotime,
+ CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "TSC to ns conversion");
+SYSCTL_QUAD(_machdep_tsc_nanotime, OID_AUTO, tsc_base,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ __DECONST(uint64_t *, &pal_rtc_nanotime_info.tsc_base), "");
+SYSCTL_QUAD(_machdep_tsc_nanotime, OID_AUTO, ns_base,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ __DECONST(uint64_t *, &pal_rtc_nanotime_info.ns_base), "");
+SYSCTL_UINT(_machdep_tsc_nanotime, OID_AUTO, scale,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ __DECONST(uint32_t *, &pal_rtc_nanotime_info.scale), 0, "");
+SYSCTL_UINT(_machdep_tsc_nanotime, OID_AUTO, shift,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ __DECONST(uint32_t *, &pal_rtc_nanotime_info.shift), 0, "");
+SYSCTL_UINT(_machdep_tsc_nanotime, OID_AUTO, generation,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ __DECONST(uint32_t *, &pal_rtc_nanotime_info.generation), 0, "");
+
+SYSCTL_NODE(_machdep, OID_AUTO, misc, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
+ "Miscellaneous x86 kernel parameters");
+
+#if (DEVELOPMENT || DEBUG)
+extern uint32_t mp_interrupt_watchdog_events;
+SYSCTL_UINT(_machdep_misc, OID_AUTO, interrupt_watchdog_events,
+ CTLFLAG_RW | CTLFLAG_LOCKED, &mp_interrupt_watchdog_events, 0, "");
+#endif
+
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, panic_restart_timeout,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ panic_set_restart_timeout, "I", "Panic restart timeout in seconds");
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, interrupt_latency_max,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ misc_interrupt_latency_max, "A", "Maximum Interrupt latency");
+
+#if DEVELOPMENT || DEBUG
+SYSCTL_PROC(_machdep_misc, OID_AUTO, machine_check_panic,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ misc_machine_check_panic, "A", "Machine-check exception test");
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, kernel_timeout_spin,
+ CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, sizeof(kernel_timeout_spin),
+ misc_kernel_timeout_spin, "Q", "Kernel timeout panic test");
+
+SYSCTL_QUAD(_machdep, OID_AUTO, reportphyreadabs,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &reportphyreaddelayabs, "");
+SYSCTL_QUAD(_machdep, OID_AUTO, reportphywriteabs,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &reportphywritedelayabs, "");
+SYSCTL_QUAD(_machdep, OID_AUTO, tracephyreadabs,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tracephyreaddelayabs, "");
+SYSCTL_QUAD(_machdep, OID_AUTO, tracephywriteabs,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tracephywritedelayabs, "");
+SYSCTL_INT(_machdep, OID_AUTO, reportphyreadosbt,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &reportphyreadosbt, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, reportphywriteosbt,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &reportphywriteosbt, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, phyreaddelaypanic,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &phyreadpanic, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, phywritedelaypanic,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &phywritepanic, 0, "");
+#if DEVELOPMENT || DEBUG
+extern uint64_t simulate_stretched_io;
+SYSCTL_QUAD(_machdep, OID_AUTO, sim_stretched_io_ns,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &simulate_stretched_io, "");
+#endif
+
+extern int pmap_pagezero_mitigation;
+extern int pmap_asserts_enabled, pmap_asserts_traced;
+/* On DEV/DEBUG kernels, clear this to disable the SMAP emulation
+ * (address space disconnect) for pagezero-less processes.
+ */
+SYSCTL_INT(_machdep, OID_AUTO, pmap_pagezero_mitigation,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &pmap_pagezero_mitigation, 0, "");
+/* Toggle pmap assertions */
+SYSCTL_INT(_machdep, OID_AUTO, pmap_asserts,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &pmap_asserts_enabled, 0, "");
+/* Transform pmap assertions into kernel trace terminations */
+SYSCTL_INT(_machdep, OID_AUTO, pmap_asserts_traced,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &pmap_asserts_traced, 0, "");
+
+static int
+misc_svisor_read(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ uint64_t new_value = 0, old_value = 0;
+ int changed = 0, error;
+
+ error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
+ if ((error == 0) && changed) {
+ volatile uint32_t *raddr = (uint32_t *) new_value;
+ printf("Supervisor: value at 0x%llx is 0x%x\n", new_value, *raddr);
+ }
+ return error;
+}
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, misc_svisor_read,
+ CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ misc_svisor_read, "I", "supervisor mode read");
+
+#endif /* DEVELOPMENT || DEBUG */
+
+extern void timer_queue_trace_cpu(int);
+static int
+misc_timer_queue_trace(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int changed = 0, error;
+ char buf[128];
+ buf[0] = '\0';
+
+ error = sysctl_io_string(req, buf, sizeof(buf), 0, &changed);
+
+ if (error == 0 && changed) {
+ timer_queue_trace_cpu(0);
+ }
+ return error;
+}
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, timer_queue_trace,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ misc_timer_queue_trace, "A", "Cut timer queue tracepoint");
+
+extern long NMI_count;
+extern void NMI_cpus(void);
+static int
+misc_nmis(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new = 0, old = 0, changed = 0, error;
+
+ old = NMI_count;
+
+ error = sysctl_io_number(req, old, sizeof(int), &new, &changed);
+ if (error == 0 && changed) {
+ NMI_cpus();
+ }
+
+ return error;
+}
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, nmis,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ misc_nmis, "I", "Report/increment NMI count");
+
+/* Parameters related to timer coalescing tuning, to be replaced
+ * with a dedicated systemcall in the future.
+ */
+/* Enable processing pending timers in the context of any other interrupt */
+SYSCTL_INT(_kern, OID_AUTO, interrupt_timer_coalescing_enabled,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &interrupt_timer_coalescing_enabled, 0, "");
+/* Upon entering idle, process pending timers with HW deadlines
+ * this far in the future.
+ */
+SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_idle_entry_hard_deadline_max,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &idle_entry_timer_processing_hdeadline_threshold, 0, "");
+
+/* Track potentially expensive eager timer evaluations on QoS tier
+ * switches.
+ */
+extern uint32_t ml_timer_eager_evaluations;
+
+SYSCTL_INT(_machdep, OID_AUTO, eager_timer_evaluations,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &ml_timer_eager_evaluations, 0, "");
+
+extern uint64_t ml_timer_eager_evaluation_max;
+
+SYSCTL_QUAD(_machdep, OID_AUTO, eager_timer_evaluation_max,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &ml_timer_eager_evaluation_max, "");
+extern uint64_t x86_isr_fp_simd_use;
+SYSCTL_QUAD(_machdep, OID_AUTO, x86_fp_simd_isr_uses,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &x86_isr_fp_simd_use, "");
+#if DEVELOPMENT || DEBUG
+
+extern int plctrace_enabled;
+
+SYSCTL_INT(_machdep, OID_AUTO, pltrace,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &plctrace_enabled, 0, "");
+
+/* Intentionally not declared as volatile here: */
+extern int mmiotrace_enabled;
+
+SYSCTL_INT(_machdep, OID_AUTO, MMIOtrace,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &mmiotrace_enabled, 0, "");
+
+extern int fpsimd_fault_popc;
+SYSCTL_INT(_machdep, OID_AUTO, fpsimd_fault_popc,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &fpsimd_fault_popc, 0, "");
+
+volatile int stop_spinning;
+static int
+spin_in_the_kernel(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new = 0, old = 0, changed = 0, error;
+
+ error = sysctl_io_number(req, old, sizeof(int), &new, &changed);
+ if (error == 0 && changed) {
+ stop_spinning = FALSE;
+ while (stop_spinning == FALSE) {
+ __builtin_ia32_pause();
+ }
+ } else if (error == 0) {
+ stop_spinning = TRUE;
+ }
+
+ return error;
+}
+
+SYSCTL_PROC(_machdep_misc, OID_AUTO, spin_forever,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED,
+ 0, 0,
+ spin_in_the_kernel, "I", "Spin forever");
+
+
+extern int traptrace_enabled;
+SYSCTL_INT(_machdep_misc, OID_AUTO, traptrace_enabled,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &traptrace_enabled, 0, "Enabled/disable trap trace");
+
+#endif /* DEVELOPMENT || DEBUG */