+ plbts = &cdata->plrecords[cplrecord].plbt[0];
+
+ cplrecord++;
+
+ if (cplrecord >= MAX_PREEMPTION_RECORDS) {
+ cplrecord = 0;
+ }
+
+ cdata->cpu_plri = cplrecord;
+
+ rbtrace_bt(plbts, MAX_TRACE_BTFRAMES - 1, cdata, (uint64_t)__builtin_frame_address(0), false);
+}
+
+extern int plctrace_enabled;
+
+static inline void
+iotrace(iotrace_type_e type, uint64_t vaddr, uint64_t paddr, int size, uint64_t val,
+ uint64_t sabs, uint64_t duration)
+{
+ cpu_data_t *cdata;
+ int cpu_num, nextidx;
+ iotrace_entry_t *cur_iotrace_ring;
+
+ if (__improbable(mmiotrace_enabled == 0 || iotrace_generators == 0)) {
+ return;
+ }
+
+ cdata = current_cpu_datap();
+ cpu_num = cdata->cpu_number;
+ nextidx = iotrace_next[cpu_num];
+ cur_iotrace_ring = iotrace_ring[cpu_num];
+
+ cur_iotrace_ring[nextidx].iotype = type;
+ cur_iotrace_ring[nextidx].vaddr = vaddr;
+ cur_iotrace_ring[nextidx].paddr = paddr;
+ cur_iotrace_ring[nextidx].size = size;
+ cur_iotrace_ring[nextidx].val = val;
+ cur_iotrace_ring[nextidx].start_time_abs = sabs;
+ cur_iotrace_ring[nextidx].duration = duration;
+
+ iotrace_next[cpu_num] = ((nextidx + 1) >= iotrace_entries_per_cpu) ? 0 : (nextidx + 1);
+
+ rbtrace_bt(&cur_iotrace_ring[nextidx].backtrace[0],
+ MAX_TRACE_BTFRAMES - 1, cdata, (uint64_t)__builtin_frame_address(0), true);
+}
+
+static inline uint32_t
+traptrace_start(int vecnum, uint64_t ipc, uint64_t sabs, uint64_t frameptr)
+{
+ cpu_data_t *cdata;
+ unsigned int cpu_num, nextidx;
+ traptrace_entry_t *cur_traptrace_ring;
+
+ if (__improbable(traptrace_enabled == 0 || traptrace_generators == 0)) {
+ return TRAPTRACE_INVALID_INDEX;
+ }
+
+ assert(ml_get_interrupts_enabled() == FALSE);
+ cdata = current_cpu_datap();
+ cpu_num = (unsigned int)cdata->cpu_number;
+ nextidx = (unsigned int)traptrace_next[cpu_num];
+ /* prevent nested interrupts from clobbering this record */
+ traptrace_next[cpu_num] = (int)(((nextidx + 1) >= (unsigned int)traptrace_entries_per_cpu) ? 0 : (nextidx + 1));
+
+ cur_traptrace_ring = traptrace_ring[cpu_num];
+
+ cur_traptrace_ring[nextidx].vector = vecnum;
+ cur_traptrace_ring[nextidx].curthread = current_thread();
+ cur_traptrace_ring[nextidx].interrupted_pc = ipc;
+ cur_traptrace_ring[nextidx].curpl = cdata->cpu_preemption_level;
+ cur_traptrace_ring[nextidx].curil = cdata->cpu_interrupt_level;
+ cur_traptrace_ring[nextidx].start_time_abs = sabs;
+ cur_traptrace_ring[nextidx].duration = ~0ULL;
+
+ rbtrace_bt(&cur_traptrace_ring[nextidx].backtrace[0],
+ MAX_TRACE_BTFRAMES - 1, cdata, frameptr, false);
+
+ assert(nextidx <= 0xFFFF);
+
+ return (uint32_t)((cpu_num << 16) | nextidx);
+}
+
+static inline void
+traptrace_end(uint32_t index, uint64_t eabs)
+{
+ if (index != TRAPTRACE_INVALID_INDEX) {
+ traptrace_entry_t *ttentp = &traptrace_ring[index >> 16][index & 0xFFFF];
+
+ ttentp->duration = eabs - ttentp->start_time_abs;
+ }
+}
+
+#endif /* DEVELOPMENT || DEBUG */
+
+__header_always_inline void
+pltrace(boolean_t plenable)
+{
+#if DEVELOPMENT || DEBUG
+ if (__improbable(plctrace_enabled != 0)) {
+ pltrace_internal(plenable);
+ }
+#else
+ (void)plenable;
+#endif
+}
+
+static inline void
+disable_preemption_internal(void)
+{
+ assert(get_preemption_level() >= 0);
+
+ os_compiler_barrier();
+ CPU_DATA()->cpu_preemption_level++;
+ os_compiler_barrier();
+ pltrace(FALSE);
+}
+
+static inline void
+enable_preemption_internal(void)
+{
+ assert(get_preemption_level() > 0);
+ pltrace(TRUE);
+ os_compiler_barrier();
+ if (0 == --CPU_DATA()->cpu_preemption_level) {
+ kernel_preempt_check();
+ }
+ os_compiler_barrier();