+
+static inline cpu_data_t *
+current_cpu_datap(void) {
+ CPU_DATA_GET(cpu_this, cpu_data_t *);
+}
+
+/*
+ * Facility to diagnose preemption-level imbalances, which are otherwise
+ * challenging to debug. On each operation that enables or disables preemption,
+ * we record a backtrace into a per-CPU ring buffer, along with the current
+ * preemption level and operation type. Thus, if an imbalance is observed,
+ * one can examine these per-CPU records to determine which codepath failed
+ * to re-enable preemption, enabled premption without a corresponding
+ * disablement etc. The backtracer determines which stack is currently active,
+ * and uses that to perform bounds checks on unterminated stacks.
+ * To enable, sysctl -w machdep.pltrace=1 on DEVELOPMENT or DEBUG kernels (DRK '15)
+ * The bounds check currently doesn't account for non-default thread stack sizes.
+ */
+#if DEVELOPMENT || DEBUG
+static inline void pltrace_bt(uint64_t *rets, int maxframes, uint64_t stacklo, uint64_t stackhi) {
+ uint64_t *cfp = (uint64_t *) __builtin_frame_address(0);
+ int plbtf;
+
+ assert(stacklo !=0 && stackhi !=0);
+
+ for (plbtf = 0; plbtf < maxframes; plbtf++) {
+ if (((uint64_t)cfp == 0) || (((uint64_t)cfp < stacklo) || ((uint64_t)cfp > stackhi))) {
+ rets[plbtf] = 0;
+ continue;
+ }
+ rets[plbtf] = *(cfp + 1);
+ cfp = (uint64_t *) (*cfp);
+ }
+}
+
+
+extern uint32_t low_intstack[]; /* bottom */
+extern uint32_t low_eintstack[]; /* top */
+extern char mp_slave_stack[PAGE_SIZE];
+
+static inline void pltrace_internal(boolean_t enable) {
+ cpu_data_t *cdata = current_cpu_datap();
+ int cpli = cdata->cpu_preemption_level;
+ int cplrecord = cdata->cpu_plri;
+ uint64_t kstackb, kstackt, *plbts;
+
+ assert(cpli >= 0);
+
+ cdata->plrecords[cplrecord].pltype = enable;
+ cdata->plrecords[cplrecord].plevel = cpli;
+
+ plbts = &cdata->plrecords[cplrecord].plbt[0];
+
+ cplrecord++;
+
+ if (cplrecord >= MAX_PREEMPTION_RECORDS) {
+ cplrecord = 0;
+ }
+
+ cdata->cpu_plri = cplrecord;
+ /* Obtain the 'current' program counter, initial backtrace
+ * element. This will also indicate if we were unable to
+ * trace further up the stack for some reason
+ */
+ __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:"
+ : "=m" (plbts[0])
+ :
+ : "rax");
+
+
+ thread_t cplthread = cdata->cpu_active_thread;
+ if (cplthread) {
+ uintptr_t csp;
+ __asm__ __volatile__ ("movq %%rsp, %0": "=r" (csp):);
+ /* Determine which stack we're on to populate stack bounds.
+ * We don't need to trace across stack boundaries for this
+ * routine.
+ */
+ kstackb = cdata->cpu_active_stack;
+ kstackt = kstackb + KERNEL_STACK_SIZE;
+ if (csp < kstackb || csp > kstackt) {
+ kstackt = cdata->cpu_kernel_stack;
+ kstackb = kstackb - KERNEL_STACK_SIZE;
+ if (csp < kstackb || csp > kstackt) {
+ kstackt = cdata->cpu_int_stack_top;
+ kstackb = kstackt - INTSTACK_SIZE;
+ if (csp < kstackb || csp > kstackt) {
+ kstackt = (uintptr_t)low_eintstack;
+ kstackb = (uintptr_t)low_eintstack - INTSTACK_SIZE;
+ if (csp < kstackb || csp > kstackt) {
+ kstackb = (uintptr_t) mp_slave_stack;
+ kstackt = (uintptr_t) mp_slave_stack + PAGE_SIZE;
+ }
+ }
+ }
+ }
+
+ if (kstackb) {
+ pltrace_bt(&plbts[1], MAXPLFRAMES - 1, kstackb, kstackt);
+ }
+ }
+}
+
+extern int plctrace_enabled;
+#endif /* DEVELOPMENT || DEBUG */
+
+static inline void pltrace(boolean_t plenable) {
+#if DEVELOPMENT || DEBUG
+ if (__improbable(plctrace_enabled != 0)) {
+ pltrace_internal(plenable);
+ }
+#else
+ (void)plenable;
+#endif