+ return str_id;
+}
+
+/*
+ * Returns true if the current process can emit events, and false otherwise.
+ * Trace system and scheduling events circumvent this check, as do events
+ * emitted in interrupt context.
+ */
+static bool
+kdebug_current_proc_enabled(uint32_t debugid)
+{
+ /* can't determine current process in interrupt context */
+ if (ml_at_interrupt_context()) {
+ return true;
+ }
+
+ /* always emit trace system and scheduling events */
+ if ((KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE ||
+ (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0))) {
+ return true;
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
+ proc_t cur_proc = current_proc();
+
+ /* only the process with the kdebug bit set is allowed */
+ if (cur_proc && !(cur_proc->p_kdebug)) {
+ return false;
+ }
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
+ proc_t cur_proc = current_proc();
+
+ /* every process except the one with the kdebug bit set is allowed */
+ if (cur_proc && cur_proc->p_kdebug) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+kdebug_debugid_enabled(uint32_t debugid)
+{
+ /* if no filtering is enabled */
+ if (!kd_ctrl_page.kdebug_slowcheck) {
+ return true;
+ }
+
+ return kdebug_debugid_explicitly_enabled(debugid);
+}
+
+bool
+kdebug_debugid_explicitly_enabled(uint32_t debugid)
+{
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ return typefilter_is_debugid_allowed(kdbg_typefilter, debugid);
+ } else if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
+ return true;
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ if (debugid < kdlog_beg || debugid > kdlog_end) {
+ return false;
+ }
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+kdebug_using_continuous_time(void)
+{
+ return kdebug_enable & KDEBUG_ENABLE_CONT_TIME;
+}
+
+/*
+ * Returns 0 if a string can be traced with these arguments. Returns errno
+ * value if error occurred.
+ */
+static errno_t
+kdebug_check_trace_string(uint32_t debugid, uint64_t str_id)
+{
+ /* if there are function qualifiers on the debugid */
+ if (debugid & ~KDBG_EVENTID_MASK) {
+ return EINVAL;
+ }
+
+ if (kdebug_validate_debugid(debugid)) {
+ return EPERM;
+ }
+
+ if (str_id != 0 && (str_id & STR_ID_SIG_MASK) != g_str_id_signature) {
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Implementation of KPI kernel_debug_string.
+ */
+int
+kernel_debug_string(uint32_t debugid, uint64_t *str_id, const char *str)
+{
+ /* arguments to tracepoints must be word-aligned */
+ __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
+ static_assert(sizeof(str_buf) > MAX_STR_LEN);
+ vm_size_t len_copied;
+ int err;
+
+ assert(str_id);
+
+ if (__probable(kdebug_enable == 0)) {
+ return 0;
+ }
+
+ if (!kdebug_current_proc_enabled(debugid)) {
+ return 0;
+ }
+
+ if (!kdebug_debugid_enabled(debugid)) {
+ return 0;
+ }
+
+ if ((err = kdebug_check_trace_string(debugid, *str_id)) != 0) {
+ return err;
+ }
+
+ if (str == NULL) {
+ if (str_id == 0) {
+ return EINVAL;
+ }
+
+ *str_id = kernel_debug_string_internal(debugid, *str_id, NULL, 0);
+ return 0;
+ }
+
+ memset(str_buf, 0, sizeof(str_buf));
+ len_copied = strlcpy(str_buf, str, MAX_STR_LEN + 1);
+ *str_id = kernel_debug_string_internal(debugid, *str_id, str_buf,
+ len_copied);
+ return 0;
+}
+
+/*
+ * Support syscall kdebug_trace_string.
+ */
+int
+kdebug_trace_string(__unused struct proc *p,
+ struct kdebug_trace_string_args *uap,
+ uint64_t *retval)
+{
+ __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
+ static_assert(sizeof(str_buf) > MAX_STR_LEN);
+ size_t len_copied;
+ int err;
+
+ if (__probable(kdebug_enable == 0)) {
+ return 0;
+ }
+
+ if (!kdebug_current_proc_enabled(uap->debugid)) {
+ return 0;
+ }
+
+ if (!kdebug_debugid_enabled(uap->debugid)) {
+ return 0;
+ }
+
+ if ((err = kdebug_check_trace_string(uap->debugid, uap->str_id)) != 0) {
+ return err;
+ }
+
+ if (uap->str == USER_ADDR_NULL) {
+ if (uap->str_id == 0) {
+ return EINVAL;
+ }
+
+ *retval = kernel_debug_string_internal(uap->debugid, uap->str_id,
+ NULL, 0);
+ return 0;
+ }
+
+ memset(str_buf, 0, sizeof(str_buf));
+ err = copyinstr(uap->str, str_buf, MAX_STR_LEN + 1, &len_copied);
+
+ /* it's alright to truncate the string, so allow ENAMETOOLONG */
+ if (err == ENAMETOOLONG) {
+ str_buf[MAX_STR_LEN] = '\0';
+ } else if (err) {
+ return err;
+ }
+
+ if (len_copied <= 1) {
+ return EINVAL;
+ }
+
+ /* convert back to a length */
+ len_copied--;
+
+ *retval = kernel_debug_string_internal(uap->debugid, uap->str_id, str_buf,
+ len_copied);
+ return 0;
+}
+
+static void
+kdbg_lock_init(void)
+{
+ static lck_grp_attr_t *kdebug_lck_grp_attr = NULL;
+ static lck_attr_t *kdebug_lck_attr = NULL;
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT) {
+ return;
+ }
+
+ assert(kdebug_lck_grp_attr == NULL);
+ kdebug_lck_grp_attr = lck_grp_attr_alloc_init();
+ kdebug_lck_grp = lck_grp_alloc_init("kdebug", kdebug_lck_grp_attr);
+ kdebug_lck_attr = lck_attr_alloc_init();
+
+ kds_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
+ kdw_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
+
+ kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
+}
+
+int
+kdbg_bootstrap(bool early_trace)
+{
+ kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
+
+ return create_buffers(early_trace);
+}
+
+int
+kdbg_reinit(bool early_trace)
+{
+ int ret = 0;
+
+ /*
+ * Disable trace collecting
+ * First make sure we're not in
+ * the middle of cutting a trace
+ */
+ kernel_debug_disable();
+
+ /*
+ * make sure the SLOW_NOLOG is seen
+ * by everyone that might be trying
+ * to cut a trace..
+ */
+ IOSleep(100);
+
+ delete_buffers();
+
+ kdbg_clear_thread_map();
+ ret = kdbg_bootstrap(early_trace);
+
+ RAW_file_offset = 0;
+ RAW_file_written = 0;
+
+ return ret;
+}
+
+void
+kdbg_trace_data(struct proc *proc, long *arg_pid, long *arg_uniqueid)
+{
+ if (!proc) {
+ *arg_pid = 0;
+ *arg_uniqueid = 0;
+ } else {
+ *arg_pid = proc->p_pid;
+ /* Fit in a trace point */
+ *arg_uniqueid = (long)proc->p_uniqueid;
+ if ((uint64_t) *arg_uniqueid != proc->p_uniqueid) {
+ *arg_uniqueid = 0;
+ }
+ }
+}
+
+
+void
+kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3,
+ long *arg4)
+{
+ if (!proc) {
+ *arg1 = 0;
+ *arg2 = 0;
+ *arg3 = 0;
+ *arg4 = 0;
+ return;
+ }
+
+ const char *procname = proc_best_name(proc);
+ size_t namelen = strlen(procname);
+
+ long args[4] = { 0 };
+
+ if (namelen > sizeof(args)) {
+ namelen = sizeof(args);
+ }
+
+ strncpy((char *)args, procname, namelen);
+
+ *arg1 = args[0];
+ *arg2 = args[1];
+ *arg3 = args[2];
+ *arg4 = args[3];
+}
+
+/*
+ *
+ * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
+ *
+ * You may provide a buffer and size, or if you set the buffer to NULL, a
+ * buffer of sufficient size will be allocated.
+ *
+ * If you provide a buffer and it is too small, sets cpumap_size to the number
+ * of bytes required and returns EINVAL.
+ *
+ * On success, if you provided a buffer, cpumap_size is set to the number of
+ * bytes written. If you did not provide a buffer, cpumap is set to the newly
+ * allocated buffer and cpumap_size is set to the number of bytes allocated.
+ *
+ * NOTE: It may seem redundant to pass both iops and a cpu_count.
+ *
+ * We may be reporting data from "now", or from the "past".
+ *
+ * The "past" data would be for kdbg_readcpumap().
+ *
+ * If we do not pass both iops and cpu_count, and iops is NULL, this function
+ * will need to read "now" state to get the number of cpus, which would be in
+ * error if we were reporting "past" state.
+ */
+
+int
+kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size)
+{
+ assert(cpumap);
+ assert(cpumap_size);
+ assert(cpu_count);
+ assert(!iops || iops->cpu_id + 1 == cpu_count);
+
+ uint32_t bytes_needed = sizeof(kd_cpumap_header) + cpu_count * sizeof(kd_cpumap);
+ uint32_t bytes_available = *cpumap_size;
+ *cpumap_size = bytes_needed;
+
+ if (*cpumap == NULL) {
+ if (kmem_alloc(kernel_map, (vm_offset_t*)cpumap, (vm_size_t)*cpumap_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
+ return ENOMEM;
+ }
+ bzero(*cpumap, *cpumap_size);
+ } else if (bytes_available < bytes_needed) {
+ return EINVAL;
+ }
+
+ kd_cpumap_header* header = (kd_cpumap_header*)(uintptr_t)*cpumap;
+
+ header->version_no = RAW_VERSION1;
+ header->cpu_count = cpu_count;
+
+ kd_cpumap* cpus = (kd_cpumap*)&header[1];
+
+ int32_t index = cpu_count - 1;
+ while (iops) {
+ cpus[index].cpu_id = iops->cpu_id;
+ cpus[index].flags = KDBG_CPUMAP_IS_IOP;
+ strlcpy(cpus[index].name, iops->callback.iop_name, sizeof(cpus->name));
+
+ iops = iops->next;
+ index--;
+ }
+
+ while (index >= 0) {
+ cpus[index].cpu_id = index;
+ cpus[index].flags = 0;
+ strlcpy(cpus[index].name, "AP", sizeof(cpus->name));
+
+ index--;
+ }
+
+ return KERN_SUCCESS;
+}
+
+void
+kdbg_thrmap_init(void)
+{
+ ktrace_assert_lock_held();
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) {
+ return;
+ }
+
+ kd_mapptr = kdbg_thrmap_init_internal(0, &kd_mapsize, &kd_mapcount);
+
+ if (kd_mapptr) {
+ kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
+ }
+}
+
+static void
+kd_resolve_map(thread_t thread, void *opaque)
+{
+ struct kd_resolver *resolve = opaque;
+
+ if (resolve->krs_count < resolve->krs_maxcount) {
+ kd_threadmap *map = &resolve->krs_map[resolve->krs_count];
+ struct kd_task_name *task_name = resolve->krs_task;
+ map->thread = (uintptr_t)thread_tid(thread);
+
+ (void)strlcpy(map->command, task_name->ktn_name, sizeof(map->command));
+ /*
+ * Kernel threads should still be marked with non-zero valid bit.
+ */
+ pid_t pid = resolve->krs_task->ktn_pid;
+ map->valid = pid == 0 ? 1 : pid;
+ resolve->krs_count++;
+ }
+}
+
+static vm_size_t
+kd_resolve_tasks(struct kd_task_name *task_names, vm_size_t ntasks)
+{
+ vm_size_t i = 0;
+ proc_t p = PROC_NULL;
+
+ proc_list_lock();
+ ALLPROC_FOREACH(p) {
+ if (i >= ntasks) {
+ break;
+ }
+ /*
+ * Only record processes that can be referenced and are not exiting.
+ */
+ if (p->task && (p->p_lflag & P_LEXIT) == 0) {
+ task_reference(p->task);
+ task_names[i].ktn_task = p->task;
+ task_names[i].ktn_pid = p->p_pid;
+ (void)strlcpy(task_names[i].ktn_name, proc_best_name(p),
+ sizeof(task_names[i].ktn_name));
+ i++;
+ }
+ }