+ uint32_t bindx;
+ kd_buf *kd;
+ struct kd_bufinfo *kdbp;
+ struct kd_storage *kdsp_actual;
+ union kds_ptr kds_raw;
+
+ if (kd_ctrl_page.kdebug_slowcheck) {
+
+ if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
+ goto out1;
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ /*
+ * Recheck if TYPEFILTER is being used, and if so,
+ * dereference bitmap. If the trace facility is being
+ * disabled, we have ~100ms of preemption-free CPU
+ * usage to access the bitmap.
+ */
+ disable_preemption();
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ if (isset(type_filter_bitmap, KDBG_EXTRACT_CSC(debugid)))
+ goto record_event_preempt_disabled;
+ }
+ enable_preemption();
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ if (debugid >= kdlog_beg && debugid <= kdlog_end)
+ goto record_event;
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
+ goto out1;
+ }
+ }
+
+record_event:
+
+ disable_preemption();
+
+record_event_preempt_disabled:
+ if (kd_ctrl_page.enabled == 0)
+ goto out;
+
+ kdbp = &kdbip[coreid];
+ timestamp &= KDBG_TIMESTAMP_MASK;
+
+#if KDEBUG_MOJO_TRACE
+ if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
+ kdebug_serial_print(coreid, debugid, timestamp,
+ arg1, arg2, arg3, arg4, threadid);
+#endif
+
+retry_q:
+ kds_raw = kdbp->kd_list_tail;
+
+ if (kds_raw.raw != KDS_PTR_NULL) {
+ kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
+ bindx = kdsp_actual->kds_bufindx;
+ } else
+ kdsp_actual = NULL;
+
+ if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
+ if (allocate_storage_unit(coreid) == FALSE) {
+ /*
+ * this can only happen if wrapping
+ * has been disabled
+ */
+ goto out;
+ }
+ goto retry_q;
+ }
+ if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
+ goto retry_q;
+
+ // IOP entries can be allocated before xnu allocates and inits the buffer
+ if (timestamp < kdsp_actual->kds_timestamp)
+ kdsp_actual->kds_timestamp = timestamp;
+
+ kd = &kdsp_actual->kds_records[bindx];
+
+ kd->debugid = debugid;
+ kd->arg1 = arg1;
+ kd->arg2 = arg2;
+ kd->arg3 = arg3;
+ kd->arg4 = arg4;
+ kd->arg5 = threadid;
+
+ kdbg_set_timestamp_and_cpu(kd, timestamp, coreid);
+
+ OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
+out:
+ enable_preemption();
+out1:
+ if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) {
+ boolean_t need_kds_wakeup = FALSE;
+ int s;
+
+ /*
+ * try to take the lock here to synchronize with the
+ * waiter entering the blocked state... use the try
+ * mode to prevent deadlocks caused by re-entering this
+ * routine due to various trace points triggered in the
+ * lck_spin_sleep_xxxx routines used to actually enter
+ * our wait condition... no problem if we fail,
+ * there will be lots of additional events coming in that
+ * will eventually succeed in grabbing this lock
+ */
+ s = ml_set_interrupts_enabled(FALSE);
+
+ if (lck_spin_try_lock(kdw_spin_lock)) {
+
+ if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
+ kds_waiter = 0;
+ need_kds_wakeup = TRUE;
+ }
+ lck_spin_unlock(kdw_spin_lock);
+
+ ml_set_interrupts_enabled(s);
+
+ if (need_kds_wakeup == TRUE)
+ wakeup(&kds_waiter);
+ }
+ }
+}
+
+
+
+static void
+kernel_debug_internal(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uintptr_t arg5)
+{
+ struct proc *curproc;
+ uint64_t now;
+ uint32_t bindx;
+ boolean_t s;
+ kd_buf *kd;
+ int cpu;
+ struct kd_bufinfo *kdbp;
+ struct kd_storage *kdsp_actual;
+ union kds_ptr kds_raw;
+
+
+
+ if (kd_ctrl_page.kdebug_slowcheck) {
+
+ if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
+ kd_chudhook_fn chudhook;
+ /*
+ * Mask interrupts to minimize the interval across
+ * which the driver providing the hook could be
+ * unloaded.
+ */
+ s = ml_set_interrupts_enabled(FALSE);
+ chudhook = kdebug_chudhook;
+ if (chudhook)
+ chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
+ ml_set_interrupts_enabled(s);
+ }
+ if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
+ goto out1;
+
+ if ( !ml_at_interrupt_context()) {
+ if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
+ /*
+ * If kdebug flag is not set for current proc, return
+ */
+ curproc = current_proc();
+
+ if ((curproc && !(curproc->p_kdebug)) &&
+ ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
+ (debugid >> 24 != DBG_TRACE))
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
+ /*
+ * If kdebug flag is set for current proc, return
+ */
+ curproc = current_proc();
+
+ if ((curproc && curproc->p_kdebug) &&
+ ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
+ (debugid >> 24 != DBG_TRACE))
+ goto out1;
+ }
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ /* Always record trace system info */
+ if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
+ goto record_event;
+
+ /*
+ * Recheck if TYPEFILTER is being used, and if so,
+ * dereference bitmap. If the trace facility is being
+ * disabled, we have ~100ms of preemption-free CPU
+ * usage to access the bitmap.
+ */
+ disable_preemption();
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ if (isset(type_filter_bitmap, KDBG_EXTRACT_CSC(debugid)))
+ goto record_event_preempt_disabled;
+ }
+ enable_preemption();
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ /* Always record trace system info */
+ if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
+ goto record_event;
+
+ if (debugid < kdlog_beg || debugid > kdlog_end)
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ /* Always record trace system info */
+ if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
+ goto record_event;
+
+ if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
+ goto out1;
+ }
+ }
+record_event:
+ disable_preemption();
+
+record_event_preempt_disabled:
+ if (kd_ctrl_page.enabled == 0)
+ goto out;
+
+ cpu = cpu_number();
+ kdbp = &kdbip[cpu];
+
+#if KDEBUG_MOJO_TRACE
+ if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
+ kdebug_serial_print(cpu, debugid,
+ mach_absolute_time() & KDBG_TIMESTAMP_MASK,
+ arg1, arg2, arg3, arg4, arg5);
+#endif
+
+retry_q:
+ kds_raw = kdbp->kd_list_tail;
+
+ if (kds_raw.raw != KDS_PTR_NULL) {
+ kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
+ bindx = kdsp_actual->kds_bufindx;
+ } else
+ kdsp_actual = NULL;
+
+ if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
+ if (allocate_storage_unit(cpu) == FALSE) {
+ /*
+ * this can only happen if wrapping
+ * has been disabled
+ */
+ goto out;
+ }
+ goto retry_q;
+ }
+ now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
+
+ if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
+ goto retry_q;
+
+ kd = &kdsp_actual->kds_records[bindx];
+
+ kd->debugid = debugid;
+ kd->arg1 = arg1;
+ kd->arg2 = arg2;
+ kd->arg3 = arg3;
+ kd->arg4 = arg4;
+ kd->arg5 = arg5;
+
+ kdbg_set_timestamp_and_cpu(kd, now, cpu);
+
+ OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
+out:
+ enable_preemption();
+out1:
+ if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
+ uint32_t etype;
+ uint32_t stype;
+
+ etype = debugid & KDBG_EVENTID_MASK;
+ stype = debugid & KDBG_CSC_MASK;
+
+ if (etype == INTERRUPT || etype == MACH_vmfault ||
+ stype == BSC_SysCall || stype == MACH_SysCall) {
+
+ boolean_t need_kds_wakeup = FALSE;
+
+ /*
+ * try to take the lock here to synchronize with the
+ * waiter entering the blocked state... use the try
+ * mode to prevent deadlocks caused by re-entering this
+ * routine due to various trace points triggered in the
+ * lck_spin_sleep_xxxx routines used to actually enter
+ * one of our 2 wait conditions... no problem if we fail,
+ * there will be lots of additional events coming in that
+ * will eventually succeed in grabbing this lock
+ */
+ s = ml_set_interrupts_enabled(FALSE);
+
+ if (lck_spin_try_lock(kdw_spin_lock)) {
+
+ if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
+ kds_waiter = 0;
+ need_kds_wakeup = TRUE;
+ }
+ lck_spin_unlock(kdw_spin_lock);
+ }
+ ml_set_interrupts_enabled(s);
+
+ if (need_kds_wakeup == TRUE)
+ wakeup(&kds_waiter);
+ }
+ }
+}
+
+void
+kernel_debug(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ __unused uintptr_t arg5)
+{
+ kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()));
+}
+
+void
+kernel_debug1(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uintptr_t arg5)
+{
+ kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5);
+}
+
+void
+kernel_debug_string_simple(const char *message)
+{
+ uintptr_t arg[4] = {0, 0, 0, 0};
+
+ /* Stuff the message string in the args and log it. */
+ strncpy((char *)arg, message, MIN(sizeof(arg), strlen(message)));
+ KERNEL_DEBUG_EARLY(
+ TRACE_INFO_STRING,
+ arg[0], arg[1], arg[2], arg[3]);
+}
+
+extern int master_cpu; /* MACH_KERNEL_PRIVATE */
+/*
+ * Used prior to start_kern_tracing() being called.
+ * Log temporarily into a static buffer.
+ */
+void
+kernel_debug_early(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4)
+{
+ /* If tracing is already initialized, use it */
+ if (nkdbufs) {
+ KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0);
+ return;
+ }
+
+ /* Do nothing if the buffer is full or we're not on the boot cpu */
+ kd_early_overflow = kd_early_index >= KD_EARLY_BUFFER_MAX;
+ if (kd_early_overflow ||
+ cpu_number() != master_cpu)
+ return;
+
+ kd_early_buffer[kd_early_index].debugid = debugid;
+ kd_early_buffer[kd_early_index].timestamp = mach_absolute_time();
+ kd_early_buffer[kd_early_index].arg1 = arg1;
+ kd_early_buffer[kd_early_index].arg2 = arg2;
+ kd_early_buffer[kd_early_index].arg3 = arg3;
+ kd_early_buffer[kd_early_index].arg4 = arg4;
+ kd_early_buffer[kd_early_index].arg5 = 0;
+ kd_early_index++;
+}
+
+/*
+ * Transfen the contents of the temporary buffer into the trace buffers.
+ * Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
+ * when mach_absolute_time is set to 0.
+ */
+static void
+kernel_debug_early_end(void)
+{
+ int i;
+
+ if (cpu_number() != master_cpu)
+ panic("kernel_debug_early_end() not call on boot processor");
+
+ /* Fake sentinel marking the start of kernel time relative to TSC */
+ kernel_debug_enter(
+ 0,
+ TRACE_TIMESTAMPS,
+ 0,
+ (uint32_t)(tsc_rebase_abs_time >> 32),
+ (uint32_t)tsc_rebase_abs_time,
+ 0,
+ 0,
+ 0);
+ for (i = 0; i < kd_early_index; i++) {
+ kernel_debug_enter(
+ 0,
+ kd_early_buffer[i].debugid,
+ kd_early_buffer[i].timestamp,
+ kd_early_buffer[i].arg1,
+ kd_early_buffer[i].arg2,
+ kd_early_buffer[i].arg3,
+ kd_early_buffer[i].arg4,
+ 0);
+ }
+
+ /* Cut events-lost event on overflow */
+ if (kd_early_overflow)
+ KERNEL_DEBUG_CONSTANT(
+ TRACE_LOST_EVENTS, 0, 0, 0, 0, 0);
+
+ /* This trace marks the start of kernel tracing */
+ kernel_debug_string_simple("early trace done");
+}
+
+/*
+ * Returns non-zero if debugid is in a reserved class.
+ */
+static int
+kdebug_validate_debugid(uint32_t debugid)
+{
+ uint8_t debugid_class;
+
+ debugid_class = KDBG_EXTRACT_CLASS(debugid);
+ switch (debugid_class) {
+ case DBG_TRACE:
+ return EPERM;
+ }
+
+ return 0;
+}
+
+/*
+ * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
+ */
+int
+kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval)
+{
+ struct kdebug_trace64_args uap64;
+
+ uap64.code = uap->code;
+ uap64.arg1 = uap->arg1;
+ uap64.arg2 = uap->arg2;
+ uap64.arg3 = uap->arg3;
+ uap64.arg4 = uap->arg4;
+
+ return kdebug_trace64(p, &uap64, retval);
+}
+
+/*
+ * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
+ */
+int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
+{
+ int err;
+
+ if ((err = kdebug_validate_debugid(uap->code)) != 0) {
+ return err;
+ }
+
+ if ( __probable(kdebug_enable == 0) )
+ return(0);
+
+ kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, (uintptr_t)thread_tid(current_thread()));
+
+ return(0);
+}
+
+/*
+ * Adding enough padding to contain a full tracepoint for the last
+ * portion of the string greatly simplifies the logic of splitting the
+ * string between tracepoints. Full tracepoints can be generated using
+ * the buffer itself, without having to manually add zeros to pad the
+ * arguments.
+ */
+
+/* 2 string args in first tracepoint and 9 string data tracepoints */
+#define STR_BUF_ARGS (2 + (9 * 4))
+/* times the size of each arg on K64 */
+#define MAX_STR_LEN (STR_BUF_ARGS * sizeof(uint64_t))
+/* on K32, ending straddles a tracepoint, so reserve blanks */
+#define STR_BUF_SIZE (MAX_STR_LEN + (2 * sizeof(uint32_t)))
+
+/*
+ * This function does no error checking and assumes that it is called with
+ * the correct arguments, including that the buffer pointed to by str is at
+ * least STR_BUF_SIZE bytes. However, str must be aligned to word-size and
+ * be NUL-terminated. In cases where a string can fit evenly into a final
+ * tracepoint without its NUL-terminator, this function will not end those
+ * strings with a NUL in trace. It's up to clients to look at the function
+ * qualifier for DBG_FUNC_END in this case, to end the string.
+ */
+static uint64_t
+kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr,
+ size_t str_len)
+{
+ /* str must be word-aligned */
+ uintptr_t *str = vstr;
+ size_t written = 0;
+ uintptr_t thread_id;