+ /* generate an ID, if necessary */
+ if (str_id == 0) {
+ str_id = OSIncrementAtomic64((SInt64 *)&g_curr_str_id);
+ str_id = (str_id & STR_ID_MASK) | g_str_id_signature;
+ }
+
+ trace_debugid |= DBG_FUNC_START;
+ /* string can fit in a single tracepoint */
+ if (str_len <= (2 * sizeof(uintptr_t))) {
+ trace_debugid |= DBG_FUNC_END;
+ }
+
+ kernel_debug_internal(FALSE, trace_debugid, (uintptr_t)debugid,
+ (uintptr_t)str_id, str[0],
+ str[1], thread_id);
+
+ trace_debugid &= KDBG_EVENTID_MASK;
+ i = 2;
+ written += 2 * sizeof(uintptr_t);
+
+ for (; written < str_len; i += 4, written += 4 * sizeof(uintptr_t)) {
+ if ((written + (4 * sizeof(uintptr_t))) >= str_len) {
+ trace_debugid |= DBG_FUNC_END;
+ }
+ kernel_debug_internal(FALSE, trace_debugid, str[i],
+ str[i + 1],
+ str[i + 2],
+ str[i + 3], thread_id);
+ }
+
+ return str_id;
+}
+
+/*
+ * Returns true if the current process can emit events, and false otherwise.
+ * Trace system and scheduling events circumvent this check, as do events
+ * emitted in interrupt context.
+ */
+static boolean_t
+kdebug_current_proc_enabled(uint32_t debugid)
+{
+ /* can't determine current process in interrupt context */
+ if (ml_at_interrupt_context()) {
+ return TRUE;
+ }
+
+ /* always emit trace system and scheduling events */
+ if ((KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE ||
+ (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0)))
+ {
+ return TRUE;
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
+ proc_t cur_proc = current_proc();
+
+ /* only the process with the kdebug bit set is allowed */
+ if (cur_proc && !(cur_proc->p_kdebug)) {
+ return FALSE;
+ }
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
+ proc_t cur_proc = current_proc();
+
+ /* every process except the one with the kdebug bit set is allowed */
+ if (cur_proc && cur_proc->p_kdebug) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+/*
+ * Returns false if the debugid is disabled by filters, and true if the
+ * debugid is allowed to be traced. A debugid may not be traced if the
+ * typefilter disables its class and subclass, it's outside a range
+ * check, or if it's not an allowed debugid in a value check. Trace
+ * system events bypass this check.
+ */
+boolean_t
+kdebug_debugid_enabled(uint32_t debugid)
+{
+ /* if no filtering is enabled */
+ if (!kd_ctrl_page.kdebug_slowcheck) {
+ return TRUE;
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ return typefilter_is_debugid_allowed(kdbg_typefilter, debugid);
+ } else if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
+ return TRUE;
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ if (debugid < kdlog_beg || debugid > kdlog_end) {
+ return FALSE;
+ }
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
+ {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+/*
+ * Returns 0 if a string can be traced with these arguments. Returns errno
+ * value if error occurred.
+ */
+static errno_t
+kdebug_check_trace_string(uint32_t debugid, uint64_t str_id)
+{
+ /* if there are function qualifiers on the debugid */
+ if (debugid & ~KDBG_EVENTID_MASK) {
+ return EINVAL;
+ }
+
+ if (kdebug_validate_debugid(debugid)) {
+ return EPERM;
+ }
+
+ if (str_id != 0 && (str_id & STR_ID_SIG_MASK) != g_str_id_signature) {
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Implementation of KPI kernel_debug_string.
+ */
+int
+kernel_debug_string(uint32_t debugid, uint64_t *str_id, const char *str)
+{
+ /* arguments to tracepoints must be word-aligned */
+ __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
+ static_assert(sizeof(str_buf) > MAX_STR_LEN);
+ vm_size_t len_copied;
+ int err;
+
+ assert(str_id);
+
+ if (__probable(kdebug_enable == 0)) {
+ return 0;
+ }
+
+ if (!kdebug_current_proc_enabled(debugid)) {
+ return 0;
+ }
+
+ if (!kdebug_debugid_enabled(debugid)) {
+ return 0;
+ }
+
+ if ((err = kdebug_check_trace_string(debugid, *str_id)) != 0) {
+ return err;
+ }
+
+ if (str == NULL) {
+ if (str_id == 0) {
+ return EINVAL;
+ }
+
+ *str_id = kernel_debug_string_internal(debugid, *str_id, NULL, 0);
+ return 0;
+ }
+
+ memset(str_buf, 0, sizeof(str_buf));
+ len_copied = strlcpy(str_buf, str, MAX_STR_LEN + 1);
+ *str_id = kernel_debug_string_internal(debugid, *str_id, str_buf,
+ len_copied);
+ return 0;
+}
+
+/*
+ * Support syscall kdebug_trace_string.
+ */
+int
+kdebug_trace_string(__unused struct proc *p,
+ struct kdebug_trace_string_args *uap,
+ uint64_t *retval)
+{
+ __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
+ static_assert(sizeof(str_buf) > MAX_STR_LEN);
+ size_t len_copied;
+ int err;
+
+ if (__probable(kdebug_enable == 0)) {
+ return 0;
+ }
+
+ if (!kdebug_current_proc_enabled(uap->debugid)) {
+ return 0;
+ }
+
+ if (!kdebug_debugid_enabled(uap->debugid)) {
+ return 0;
+ }
+
+ if ((err = kdebug_check_trace_string(uap->debugid, uap->str_id)) != 0) {
+ return err;
+ }
+
+ if (uap->str == USER_ADDR_NULL) {
+ if (uap->str_id == 0) {
+ return EINVAL;
+ }
+
+ *retval = kernel_debug_string_internal(uap->debugid, uap->str_id,
+ NULL, 0);
+ return 0;
+ }
+
+ memset(str_buf, 0, sizeof(str_buf));
+ err = copyinstr(uap->str, str_buf, MAX_STR_LEN + 1, &len_copied);
+
+ /* it's alright to truncate the string, so allow ENAMETOOLONG */
+ if (err == ENAMETOOLONG) {
+ str_buf[MAX_STR_LEN] = '\0';
+ } else if (err) {
+ return err;
+ }
+
+ if (len_copied <= 1) {
+ return EINVAL;
+ }
+
+ /* convert back to a length */
+ len_copied--;
+
+ *retval = kernel_debug_string_internal(uap->debugid, uap->str_id, str_buf,
+ len_copied);
+ return 0;
+}
+
+static void
+kdbg_lock_init(void)
+{
+ static lck_grp_attr_t *kdebug_lck_grp_attr = NULL;
+ static lck_grp_t *kdebug_lck_grp = NULL;
+ static lck_attr_t *kdebug_lck_attr = NULL;
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT) {
+ return;
+ }
+
+ assert(kdebug_lck_grp_attr == NULL);
+ kdebug_lck_grp_attr = lck_grp_attr_alloc_init();
+ kdebug_lck_grp = lck_grp_alloc_init("kdebug", kdebug_lck_grp_attr);
+ kdebug_lck_attr = lck_attr_alloc_init();
+
+ kds_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
+ kdw_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
+
+ kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
+}
+
+int
+kdbg_bootstrap(boolean_t early_trace)
+{
+ kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
+
+ return (create_buffers(early_trace));
+}
+
+int
+kdbg_reinit(boolean_t early_trace)
+{
+ int ret = 0;
+
+ /*
+ * Disable trace collecting
+ * First make sure we're not in
+ * the middle of cutting a trace
+ */
+ kernel_debug_disable();
+
+ /*
+ * make sure the SLOW_NOLOG is seen
+ * by everyone that might be trying
+ * to cut a trace..
+ */
+ IOSleep(100);
+
+ delete_buffers();
+
+ kdbg_clear_thread_map();
+ ret = kdbg_bootstrap(early_trace);
+
+ RAW_file_offset = 0;
+ RAW_file_written = 0;
+
+ return(ret);
+}
+
+void
+kdbg_trace_data(struct proc *proc, long *arg_pid)
+{
+ if (!proc)
+ *arg_pid = 0;
+ else
+ *arg_pid = proc->p_pid;
+}
+
+
+void
+kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
+{
+ char *dbg_nameptr;
+ int dbg_namelen;
+ long dbg_parms[4];
+
+ if (!proc) {
+ *arg1 = 0;
+ *arg2 = 0;