+static void
+delete_buffers(void)
+{
+ int i;
+
+ if (kd_bufs) {
+ for (i = 0; i < n_storage_buffers; i++) {
+ if (kd_bufs[i].kdsb_addr) {
+ kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
+ }
+ }
+ kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
+
+ kd_bufs = NULL;
+ n_storage_buffers = 0;
+ }
+ if (kdcopybuf) {
+ kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
+
+ kdcopybuf = NULL;
+ }
+ kd_ctrl_page.kds_free_list.raw = KDS_PTR_NULL;
+
+ if (kdbip) {
+ kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
+
+ kdbip = NULL;
+ }
+ kd_ctrl_page.kdebug_iops = NULL;
+ kd_ctrl_page.kdebug_cpus = 0;
+ kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
+}
+
+void
+release_storage_unit(int cpu, uint32_t kdsp_raw)
+{
+ int s = 0;
+ struct kd_storage *kdsp_actual;
+ struct kd_bufinfo *kdbp;
+ union kds_ptr kdsp;
+
+ kdsp.raw = kdsp_raw;
+
+ s = ml_set_interrupts_enabled(FALSE);
+ lck_spin_lock(kds_spin_lock);
+
+ kdbp = &kdbip[cpu];
+
+ if (kdsp.raw == kdbp->kd_list_head.raw) {
+ /*
+ * it's possible for the storage unit pointed to
+ * by kdsp to have already been stolen... so
+ * check to see if it's still the head of the list
+ * now that we're behind the lock that protects
+ * adding and removing from the queue...
+ * since we only ever release and steal units from
+ * that position, if it's no longer the head
+ * we having nothing to do in this context
+ */
+ kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
+ kdbp->kd_list_head = kdsp_actual->kds_next;
+
+ kdsp_actual->kds_next = kd_ctrl_page.kds_free_list;
+ kd_ctrl_page.kds_free_list = kdsp;
+
+ kd_ctrl_page.kds_inuse_count--;
+ }
+ lck_spin_unlock(kds_spin_lock);
+ ml_set_interrupts_enabled(s);
+}
+
+
+boolean_t
+allocate_storage_unit(int cpu)
+{
+ union kds_ptr kdsp;
+ struct kd_storage *kdsp_actual, *kdsp_next_actual;
+ struct kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
+ uint64_t oldest_ts, ts;
+ boolean_t retval = TRUE;
+ int s = 0;
+
+ s = ml_set_interrupts_enabled(FALSE);
+ lck_spin_lock(kds_spin_lock);
+
+ kdbp = &kdbip[cpu];
+
+ /* If someone beat us to the allocate, return success */
+ if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
+ kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
+
+ if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
+ goto out;
+ }
+
+ if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
+ kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
+ kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
+
+ kd_ctrl_page.kds_inuse_count++;
+ } else {
+ if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
+ kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
+ kdbp->kd_lostevents = TRUE;
+ retval = FALSE;
+ goto out;
+ }
+ kdbp_vict = NULL;
+ oldest_ts = UINT64_MAX;
+
+ for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page.kdebug_cpus]; kdbp_try++) {
+
+ if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
+ /*
+ * no storage unit to steal
+ */
+ continue;
+ }
+
+ kdsp_actual = POINTER_FROM_KDS_PTR(kdbp_try->kd_list_head);
+
+ if (kdsp_actual->kds_bufcnt < EVENTS_PER_STORAGE_UNIT) {
+ /*
+ * make sure we don't steal the storage unit
+ * being actively recorded to... need to
+ * move on because we don't want an out-of-order
+ * set of events showing up later
+ */
+ continue;
+ }
+
+ /*
+ * When wrapping, steal the storage unit with the
+ * earliest timestamp on its last event, instead of the
+ * earliest timestamp on the first event. This allows a
+ * storage unit with more recent events to be preserved,
+ * even if the storage unit contains events that are
+ * older than those found in other CPUs.
+ */
+ ts = kdbg_get_timestamp(&kdsp_actual->kds_records[EVENTS_PER_STORAGE_UNIT - 1]);
+ if (ts < oldest_ts) {
+ oldest_ts = ts;
+ kdbp_vict = kdbp_try;
+ }
+ }
+ if (kdbp_vict == NULL) {
+ kdebug_enable = 0;
+ kd_ctrl_page.enabled = 0;
+ commpage_update_kdebug_state();
+ retval = FALSE;
+ goto out;
+ }
+ kdsp = kdbp_vict->kd_list_head;
+ kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
+ kdbp_vict->kd_list_head = kdsp_actual->kds_next;
+
+ if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) {
+ kdsp_next_actual = POINTER_FROM_KDS_PTR(kdbp_vict->kd_list_head);
+ kdsp_next_actual->kds_lostevents = TRUE;
+ } else
+ kdbp_vict->kd_lostevents = TRUE;
+
+ kd_ctrl_page.oldest_time = oldest_ts;
+ kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
+ }
+ kdsp_actual->kds_timestamp = mach_absolute_time();
+ kdsp_actual->kds_next.raw = KDS_PTR_NULL;
+ kdsp_actual->kds_bufcnt = 0;
+ kdsp_actual->kds_readlast = 0;
+
+ kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
+ kdbp->kd_lostevents = FALSE;
+ kdsp_actual->kds_bufindx = 0;
+
+ if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
+ kdbp->kd_list_head = kdsp;
+ else
+ POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
+ kdbp->kd_list_tail = kdsp;
+out:
+ lck_spin_unlock(kds_spin_lock);
+ ml_set_interrupts_enabled(s);
+
+ return (retval);
+}
+
+int
+kernel_debug_register_callback(kd_callback_t callback)
+{
+ kd_iop_t* iop;
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&iop, sizeof(kd_iop_t), VM_KERN_MEMORY_DIAG) == KERN_SUCCESS) {
+ memcpy(&iop->callback, &callback, sizeof(kd_callback_t));
+
+ /*
+ * <rdar://problem/13351477> Some IOP clients are not providing a name.
+ *
+ * Remove when fixed.
+ */
+ {
+ boolean_t is_valid_name = FALSE;
+ for (uint32_t length=0; length<sizeof(callback.iop_name); ++length) {
+ /* This is roughly isprintable(c) */
+ if (callback.iop_name[length] > 0x20 && callback.iop_name[length] < 0x7F)
+ continue;
+ if (callback.iop_name[length] == 0) {
+ if (length)
+ is_valid_name = TRUE;
+ break;
+ }
+ }
+
+ if (!is_valid_name) {
+ strlcpy(iop->callback.iop_name, "IOP-???", sizeof(iop->callback.iop_name));
+ }
+ }
+
+ iop->last_timestamp = 0;
+
+ do {
+ /*
+ * We use two pieces of state, the old list head
+ * pointer, and the value of old_list_head->cpu_id.
+ * If we read kd_iops more than once, it can change
+ * between reads.
+ *
+ * TLDR; Must not read kd_iops more than once per loop.
+ */
+ iop->next = kd_iops;
+ iop->cpu_id = iop->next ? (iop->next->cpu_id+1) : kdbg_cpu_count(FALSE);
+
+ /*
+ * Header says OSCompareAndSwapPtr has a memory barrier
+ */
+ } while (!OSCompareAndSwapPtr(iop->next, iop, (void* volatile*)&kd_iops));
+
+ return iop->cpu_id;
+ }
+
+ return 0;
+}
+
+void
+kernel_debug_enter(
+ uint32_t coreid,
+ uint32_t debugid,
+ uint64_t timestamp,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uintptr_t threadid
+ )
+{
+ uint32_t bindx;
+ kd_buf *kd;
+ struct kd_bufinfo *kdbp;
+ struct kd_storage *kdsp_actual;
+ union kds_ptr kds_raw;
+
+ if (kd_ctrl_page.kdebug_slowcheck) {
+
+ if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
+ goto out1;
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid))
+ goto record_event;
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ if (debugid >= kdlog_beg && debugid <= kdlog_end)
+ goto record_event;
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
+ goto out1;
+ }
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_WRAPPED) {
+ if (timestamp < kd_ctrl_page.oldest_time) {
+ goto out1;
+ }
+ }
+
+record_event:
+
+ disable_preemption();
+
+ if (kd_ctrl_page.enabled == 0)
+ goto out;
+
+ kdbp = &kdbip[coreid];
+ timestamp &= KDBG_TIMESTAMP_MASK;
+
+#if KDEBUG_MOJO_TRACE
+ if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
+ kdebug_serial_print(coreid, debugid, timestamp,
+ arg1, arg2, arg3, arg4, threadid);
+#endif
+
+retry_q:
+ kds_raw = kdbp->kd_list_tail;
+
+ if (kds_raw.raw != KDS_PTR_NULL) {
+ kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
+ bindx = kdsp_actual->kds_bufindx;
+ } else
+ kdsp_actual = NULL;
+
+ if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
+ if (allocate_storage_unit(coreid) == FALSE) {
+ /*
+ * this can only happen if wrapping
+ * has been disabled
+ */
+ goto out;
+ }
+ goto retry_q;
+ }
+ if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
+ goto retry_q;
+
+ // IOP entries can be allocated before xnu allocates and inits the buffer
+ if (timestamp < kdsp_actual->kds_timestamp)
+ kdsp_actual->kds_timestamp = timestamp;
+
+ kd = &kdsp_actual->kds_records[bindx];
+
+ kd->debugid = debugid;
+ kd->arg1 = arg1;
+ kd->arg2 = arg2;
+ kd->arg3 = arg3;
+ kd->arg4 = arg4;
+ kd->arg5 = threadid;
+
+ kdbg_set_timestamp_and_cpu(kd, timestamp, coreid);
+
+ OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
+out:
+ enable_preemption();
+out1:
+ if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) {
+ kdbg_wakeup();
+ }
+}
+
+static void
+kernel_debug_internal(
+ boolean_t only_filter,
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uintptr_t arg5)
+{
+ struct proc *curproc;
+ uint64_t now;
+ uint32_t bindx;
+ kd_buf *kd;
+ int cpu;
+ struct kd_bufinfo *kdbp;
+ struct kd_storage *kdsp_actual;
+ union kds_ptr kds_raw;
+
+ if (kd_ctrl_page.kdebug_slowcheck) {
+ if ((kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) ||
+ !(kdebug_enable & (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_PPT)))
+ {
+ goto out1;
+ }
+
+ if ( !ml_at_interrupt_context()) {
+ if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
+ /*
+ * If kdebug flag is not set for current proc, return
+ */
+ curproc = current_proc();
+
+ if ((curproc && !(curproc->p_kdebug)) &&
+ ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
+ (debugid >> 24 != DBG_TRACE))
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
+ /*
+ * If kdebug flag is set for current proc, return
+ */
+ curproc = current_proc();
+
+ if ((curproc && curproc->p_kdebug) &&
+ ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
+ (debugid >> 24 != DBG_TRACE))
+ goto out1;
+ }
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid))
+ goto record_event;
+
+ goto out1;
+ } else if (only_filter == TRUE) {
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ /* Always record trace system info */
+ if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
+ goto record_event;
+
+ if (debugid < kdlog_beg || debugid > kdlog_end)
+ goto out1;
+ }
+ else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ /* Always record trace system info */
+ if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
+ goto record_event;
+
+ if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
+ goto out1;
+ }
+ } else if (only_filter == TRUE) {
+ goto out1;
+ }
+
+record_event:
+ disable_preemption();
+
+ if (kd_ctrl_page.enabled == 0)
+ goto out;
+
+ cpu = cpu_number();
+ kdbp = &kdbip[cpu];
+
+#if KDEBUG_MOJO_TRACE
+ if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
+ kdebug_serial_print(cpu, debugid,
+ mach_absolute_time() & KDBG_TIMESTAMP_MASK,
+ arg1, arg2, arg3, arg4, arg5);
+#endif
+
+retry_q:
+ kds_raw = kdbp->kd_list_tail;
+
+ if (kds_raw.raw != KDS_PTR_NULL) {
+ kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
+ bindx = kdsp_actual->kds_bufindx;
+ } else
+ kdsp_actual = NULL;
+
+ if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
+ if (allocate_storage_unit(cpu) == FALSE) {
+ /*
+ * this can only happen if wrapping
+ * has been disabled
+ */
+ goto out;
+ }
+ goto retry_q;
+ }
+ now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
+
+ if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
+ goto retry_q;
+
+ kd = &kdsp_actual->kds_records[bindx];
+
+ kd->debugid = debugid;
+ kd->arg1 = arg1;
+ kd->arg2 = arg2;
+ kd->arg3 = arg3;
+ kd->arg4 = arg4;
+ kd->arg5 = arg5;
+
+ kdbg_set_timestamp_and_cpu(kd, now, cpu);
+
+ OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
+
+#if KPERF
+ kperf_kdebug_callback(debugid, __builtin_frame_address(0));
+#endif
+out:
+ enable_preemption();
+out1:
+ if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
+ uint32_t etype;
+ uint32_t stype;
+
+ etype = debugid & KDBG_EVENTID_MASK;
+ stype = debugid & KDBG_CSC_MASK;
+
+ if (etype == INTERRUPT || etype == MACH_vmfault ||
+ stype == BSC_SysCall || stype == MACH_SysCall) {
+ kdbg_wakeup();
+ }
+ }
+}
+
+void
+kernel_debug(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ __unused uintptr_t arg5)
+{
+ kernel_debug_internal(FALSE, debugid, arg1, arg2, arg3, arg4,
+ (uintptr_t)thread_tid(current_thread()));
+}
+
+void
+kernel_debug1(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uintptr_t arg5)
+{
+ kernel_debug_internal(FALSE, debugid, arg1, arg2, arg3, arg4, arg5);
+}
+
+void
+kernel_debug_filtered(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4)
+{
+ kernel_debug_internal(TRUE, debugid, arg1, arg2, arg3, arg4,
+ (uintptr_t)thread_tid(current_thread()));
+}
+
+void
+kernel_debug_string_early(const char *message)
+{
+ uintptr_t arg[4] = {0, 0, 0, 0};
+
+ /* Stuff the message string in the args and log it. */
+ strncpy((char *)arg, message, MIN(sizeof(arg), strlen(message)));
+ KERNEL_DEBUG_EARLY(
+ TRACE_INFO_STRING,
+ arg[0], arg[1], arg[2], arg[3]);
+}
+
+#define SIMPLE_STR_LEN (64)
+static_assert(SIMPLE_STR_LEN % sizeof(uintptr_t) == 0);
+
+void
+kernel_debug_string_simple(uint32_t eventid, const char *str)
+{
+ /* array of uintptr_ts simplifies emitting the string as arguments */
+ uintptr_t str_buf[(SIMPLE_STR_LEN / sizeof(uintptr_t)) + 1] = { 0 };
+ size_t len = strlcpy((char *)str_buf, str, SIMPLE_STR_LEN + 1);
+
+ uintptr_t thread_id = (uintptr_t)thread_tid(current_thread());
+ uint32_t debugid = eventid | DBG_FUNC_START;
+
+ /* string can fit in a single tracepoint */
+ if (len <= (4 * sizeof(uintptr_t))) {
+ debugid |= DBG_FUNC_END;
+ }
+
+ kernel_debug_internal(FALSE, debugid, str_buf[0],
+ str_buf[1],
+ str_buf[2],
+ str_buf[3], thread_id);
+
+ debugid &= KDBG_EVENTID_MASK;
+ int i = 4;
+ size_t written = 4 * sizeof(uintptr_t);
+
+ for (; written < len; i += 4, written += 4 * sizeof(uintptr_t)) {
+ /* if this is the last tracepoint to be emitted */
+ if ((written + (4 * sizeof(uintptr_t))) >= len) {
+ debugid |= DBG_FUNC_END;
+ }
+ kernel_debug_internal(FALSE, debugid, str_buf[i],
+ str_buf[i + 1],
+ str_buf[i + 2],
+ str_buf[i + 3], thread_id);
+ }
+}
+
+extern int master_cpu; /* MACH_KERNEL_PRIVATE */
+/*
+ * Used prior to start_kern_tracing() being called.
+ * Log temporarily into a static buffer.
+ */
+void
+kernel_debug_early(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4)
+{
+ /* If tracing is already initialized, use it */
+ if (nkdbufs) {
+ KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0);
+ return;
+ }
+
+ /* Do nothing if the buffer is full or we're not on the boot cpu */
+ kd_early_overflow = kd_early_index >= KD_EARLY_BUFFER_MAX;
+ if (kd_early_overflow ||
+ cpu_number() != master_cpu)
+ return;
+
+ kd_early_buffer[kd_early_index].debugid = debugid;
+ kd_early_buffer[kd_early_index].timestamp = mach_absolute_time();
+ kd_early_buffer[kd_early_index].arg1 = arg1;
+ kd_early_buffer[kd_early_index].arg2 = arg2;
+ kd_early_buffer[kd_early_index].arg3 = arg3;
+ kd_early_buffer[kd_early_index].arg4 = arg4;
+ kd_early_buffer[kd_early_index].arg5 = 0;
+ kd_early_index++;
+}
+
+/*
+ * Transfen the contents of the temporary buffer into the trace buffers.
+ * Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
+ * when mach_absolute_time is set to 0.
+ */
+static void
+kernel_debug_early_end(void)
+{
+ int i;
+
+ if (cpu_number() != master_cpu)
+ panic("kernel_debug_early_end() not call on boot processor");
+
+ /* Fake sentinel marking the start of kernel time relative to TSC */
+ kernel_debug_enter(
+ 0,
+ TRACE_TIMESTAMPS,
+ 0,
+ (uint32_t)(tsc_rebase_abs_time >> 32),
+ (uint32_t)tsc_rebase_abs_time,
+ 0,
+ 0,
+ 0);
+ for (i = 0; i < kd_early_index; i++) {
+ kernel_debug_enter(
+ 0,
+ kd_early_buffer[i].debugid,
+ kd_early_buffer[i].timestamp,
+ kd_early_buffer[i].arg1,
+ kd_early_buffer[i].arg2,
+ kd_early_buffer[i].arg3,
+ kd_early_buffer[i].arg4,
+ 0);
+ }
+
+ /* Cut events-lost event on overflow */
+ if (kd_early_overflow)
+ KERNEL_DEBUG_CONSTANT(
+ TRACE_LOST_EVENTS, 0, 0, 0, 0, 0);
+
+ /* This trace marks the start of kernel tracing */
+ kernel_debug_string_early("early trace done");
+}
+
+void
+kernel_debug_disable(void)
+{
+ if (kdebug_enable) {
+ kdbg_set_tracing_enabled(FALSE, 0);
+ }
+}
+
+/*
+ * Returns non-zero if debugid is in a reserved class.
+ */
+static int
+kdebug_validate_debugid(uint32_t debugid)
+{
+ uint8_t debugid_class;
+
+ debugid_class = KDBG_EXTRACT_CLASS(debugid);
+ switch (debugid_class) {
+ case DBG_TRACE:
+ return EPERM;
+ }
+
+ return 0;
+}
+
+/*
+ * Support syscall SYS_kdebug_typefilter.
+ */
+int
+kdebug_typefilter(__unused struct proc* p,
+ struct kdebug_typefilter_args* uap,
+ __unused int *retval)
+{
+ int ret = KERN_SUCCESS;
+
+ if (uap->addr == USER_ADDR_NULL ||
+ uap->size == USER_ADDR_NULL) {
+ return EINVAL;
+ }
+
+ /*
+ * The atomic load is to close a race window with setting the typefilter
+ * and memory entry values. A description follows:
+ *
+ * Thread 1 (writer)
+ *
+ * Allocate Typefilter
+ * Allocate MemoryEntry
+ * Write Global MemoryEntry Ptr
+ * Atomic Store (Release) Global Typefilter Ptr
+ *
+ * Thread 2 (reader, AKA us)
+ *
+ * if ((Atomic Load (Acquire) Global Typefilter Ptr) == NULL)
+ * return;
+ *
+ * Without the atomic store, it isn't guaranteed that the write of
+ * Global MemoryEntry Ptr is visible before we can see the write of
+ * Global Typefilter Ptr.
+ *
+ * Without the atomic load, it isn't guaranteed that the loads of
+ * Global MemoryEntry Ptr aren't speculated.
+ *
+ * The global pointers transition from NULL -> valid once and only once,
+ * and never change after becoming valid. This means that having passed
+ * the first atomic load test of Global Typefilter Ptr, this function
+ * can then safely use the remaining global state without atomic checks.
+ */
+ if (!__c11_atomic_load((_Atomic typefilter_t *)&kdbg_typefilter, memory_order_acquire)) {
+ return EINVAL;
+ }
+
+ assert(kdbg_typefilter_memory_entry);
+
+ mach_vm_offset_t user_addr = 0;
+ vm_map_t user_map = current_map();
+
+ ret = mach_to_bsd_errno(
+ mach_vm_map(user_map, // target map
+ &user_addr, // [in, out] target address
+ TYPEFILTER_ALLOC_SIZE, // initial size
+ 0, // mask (alignment?)
+ VM_FLAGS_ANYWHERE, // flags
+ kdbg_typefilter_memory_entry, // port (memory entry!)
+ 0, // offset (in memory entry)
+ FALSE, // should copy
+ VM_PROT_READ, // cur_prot
+ VM_PROT_READ, // max_prot
+ VM_INHERIT_SHARE)); // inherit behavior on fork
+
+ if (ret == KERN_SUCCESS) {
+ vm_size_t user_ptr_size = vm_map_is_64bit(user_map) ? 8 : 4;
+ ret = copyout(CAST_DOWN(void *, &user_addr), uap->addr, user_ptr_size );
+
+ if (ret != KERN_SUCCESS) {
+ mach_vm_deallocate(user_map, user_addr, TYPEFILTER_ALLOC_SIZE);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
+ */
+int
+kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval)
+{
+ struct kdebug_trace64_args uap64;
+
+ uap64.code = uap->code;
+ uap64.arg1 = uap->arg1;
+ uap64.arg2 = uap->arg2;
+ uap64.arg3 = uap->arg3;
+ uap64.arg4 = uap->arg4;
+
+ return kdebug_trace64(p, &uap64, retval);
+}
+
+/*
+ * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated
+ * to fit in 32-bit record format.
+ *
+ * It is intentional that error conditions are not checked until kdebug is
+ * enabled. This is to match the userspace wrapper behavior, which is optimizing
+ * for non-error case performance.
+ */
+int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
+{
+ int err;
+
+ if ( __probable(kdebug_enable == 0) )
+ return(0);
+
+ if ((err = kdebug_validate_debugid(uap->code)) != 0) {
+ return err;
+ }
+
+ kernel_debug_internal(FALSE, uap->code,
+ (uintptr_t)uap->arg1,
+ (uintptr_t)uap->arg2,
+ (uintptr_t)uap->arg3,
+ (uintptr_t)uap->arg4,
+ (uintptr_t)thread_tid(current_thread()));
+
+ return(0);
+}
+
+/*
+ * Adding enough padding to contain a full tracepoint for the last
+ * portion of the string greatly simplifies the logic of splitting the
+ * string between tracepoints. Full tracepoints can be generated using
+ * the buffer itself, without having to manually add zeros to pad the
+ * arguments.
+ */
+
+/* 2 string args in first tracepoint and 9 string data tracepoints */
+#define STR_BUF_ARGS (2 + (9 * 4))
+/* times the size of each arg on K64 */
+#define MAX_STR_LEN (STR_BUF_ARGS * sizeof(uint64_t))
+/* on K32, ending straddles a tracepoint, so reserve blanks */
+#define STR_BUF_SIZE (MAX_STR_LEN + (2 * sizeof(uint32_t)))
+
+/*
+ * This function does no error checking and assumes that it is called with
+ * the correct arguments, including that the buffer pointed to by str is at
+ * least STR_BUF_SIZE bytes. However, str must be aligned to word-size and
+ * be NUL-terminated. In cases where a string can fit evenly into a final
+ * tracepoint without its NUL-terminator, this function will not end those
+ * strings with a NUL in trace. It's up to clients to look at the function
+ * qualifier for DBG_FUNC_END in this case, to end the string.
+ */
+static uint64_t
+kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr,
+ size_t str_len)
+{
+ /* str must be word-aligned */
+ uintptr_t *str = vstr;
+ size_t written = 0;
+ uintptr_t thread_id;
+ int i;
+ uint32_t trace_debugid = TRACEDBG_CODE(DBG_TRACE_STRING,
+ TRACE_STRING_GLOBAL);
+
+ thread_id = (uintptr_t)thread_tid(current_thread());
+
+ /* if the ID is being invalidated, just emit that */
+ if (str_id != 0 && str_len == 0) {
+ kernel_debug_internal(FALSE, trace_debugid | DBG_FUNC_START | DBG_FUNC_END,
+ (uintptr_t)debugid, (uintptr_t)str_id, 0, 0,
+ thread_id);
+ return str_id;
+ }
+
+ /* generate an ID, if necessary */
+ if (str_id == 0) {
+ str_id = OSIncrementAtomic64((SInt64 *)&g_curr_str_id);
+ str_id = (str_id & STR_ID_MASK) | g_str_id_signature;
+ }
+
+ trace_debugid |= DBG_FUNC_START;
+ /* string can fit in a single tracepoint */
+ if (str_len <= (2 * sizeof(uintptr_t))) {
+ trace_debugid |= DBG_FUNC_END;
+ }
+
+ kernel_debug_internal(FALSE, trace_debugid, (uintptr_t)debugid,
+ (uintptr_t)str_id, str[0],
+ str[1], thread_id);
+
+ trace_debugid &= KDBG_EVENTID_MASK;
+ i = 2;
+ written += 2 * sizeof(uintptr_t);
+
+ for (; written < str_len; i += 4, written += 4 * sizeof(uintptr_t)) {
+ if ((written + (4 * sizeof(uintptr_t))) >= str_len) {
+ trace_debugid |= DBG_FUNC_END;
+ }
+ kernel_debug_internal(FALSE, trace_debugid, str[i],
+ str[i + 1],
+ str[i + 2],
+ str[i + 3], thread_id);
+ }
+
+ return str_id;
+}
+
+/*
+ * Returns true if the current process can emit events, and false otherwise.
+ * Trace system and scheduling events circumvent this check, as do events
+ * emitted in interrupt context.
+ */
+static boolean_t
+kdebug_current_proc_enabled(uint32_t debugid)
+{
+ /* can't determine current process in interrupt context */
+ if (ml_at_interrupt_context()) {
+ return TRUE;
+ }
+
+ /* always emit trace system and scheduling events */
+ if ((KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE ||
+ (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0)))
+ {
+ return TRUE;
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
+ proc_t cur_proc = current_proc();
+
+ /* only the process with the kdebug bit set is allowed */
+ if (cur_proc && !(cur_proc->p_kdebug)) {
+ return FALSE;
+ }
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
+ proc_t cur_proc = current_proc();
+
+ /* every process except the one with the kdebug bit set is allowed */
+ if (cur_proc && cur_proc->p_kdebug) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+/*
+ * Returns false if the debugid is disabled by filters, and true if the
+ * debugid is allowed to be traced. A debugid may not be traced if the
+ * typefilter disables its class and subclass, it's outside a range
+ * check, or if it's not an allowed debugid in a value check. Trace
+ * system events bypass this check.
+ */
+boolean_t
+kdebug_debugid_enabled(uint32_t debugid)
+{
+ /* if no filtering is enabled */
+ if (!kd_ctrl_page.kdebug_slowcheck) {
+ return TRUE;
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ return typefilter_is_debugid_allowed(kdbg_typefilter, debugid);
+ } else if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
+ return TRUE;
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ if (debugid < kdlog_beg || debugid > kdlog_end) {
+ return FALSE;
+ }
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
+ {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+/*
+ * Returns 0 if a string can be traced with these arguments. Returns errno
+ * value if error occurred.
+ */
+static errno_t
+kdebug_check_trace_string(uint32_t debugid, uint64_t str_id)
+{
+ /* if there are function qualifiers on the debugid */
+ if (debugid & ~KDBG_EVENTID_MASK) {
+ return EINVAL;
+ }
+
+ if (kdebug_validate_debugid(debugid)) {
+ return EPERM;
+ }
+
+ if (str_id != 0 && (str_id & STR_ID_SIG_MASK) != g_str_id_signature) {
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Implementation of KPI kernel_debug_string.
+ */
+int
+kernel_debug_string(uint32_t debugid, uint64_t *str_id, const char *str)
+{
+ /* arguments to tracepoints must be word-aligned */
+ __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
+ static_assert(sizeof(str_buf) > MAX_STR_LEN);
+ vm_size_t len_copied;
+ int err;
+
+ assert(str_id);
+
+ if (__probable(kdebug_enable == 0)) {
+ return 0;
+ }
+
+ if (!kdebug_current_proc_enabled(debugid)) {
+ return 0;
+ }
+
+ if (!kdebug_debugid_enabled(debugid)) {
+ return 0;
+ }
+
+ if ((err = kdebug_check_trace_string(debugid, *str_id)) != 0) {
+ return err;
+ }
+
+ if (str == NULL) {
+ if (str_id == 0) {
+ return EINVAL;
+ }
+
+ *str_id = kernel_debug_string_internal(debugid, *str_id, NULL, 0);
+ return 0;
+ }
+
+ memset(str_buf, 0, sizeof(str_buf));
+ len_copied = strlcpy(str_buf, str, MAX_STR_LEN + 1);
+ *str_id = kernel_debug_string_internal(debugid, *str_id, str_buf,
+ len_copied);
+ return 0;
+}
+
+/*
+ * Support syscall kdebug_trace_string.
+ */
+int
+kdebug_trace_string(__unused struct proc *p,
+ struct kdebug_trace_string_args *uap,
+ uint64_t *retval)
+{
+ __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
+ static_assert(sizeof(str_buf) > MAX_STR_LEN);
+ size_t len_copied;
+ int err;
+
+ if (__probable(kdebug_enable == 0)) {
+ return 0;
+ }
+
+ if (!kdebug_current_proc_enabled(uap->debugid)) {
+ return 0;
+ }
+
+ if (!kdebug_debugid_enabled(uap->debugid)) {
+ return 0;
+ }
+
+ if ((err = kdebug_check_trace_string(uap->debugid, uap->str_id)) != 0) {
+ return err;
+ }
+
+ if (uap->str == USER_ADDR_NULL) {
+ if (uap->str_id == 0) {
+ return EINVAL;
+ }
+
+ *retval = kernel_debug_string_internal(uap->debugid, uap->str_id,
+ NULL, 0);
+ return 0;
+ }
+
+ memset(str_buf, 0, sizeof(str_buf));
+ err = copyinstr(uap->str, str_buf, MAX_STR_LEN + 1, &len_copied);
+
+ /* it's alright to truncate the string, so allow ENAMETOOLONG */
+ if (err == ENAMETOOLONG) {
+ str_buf[MAX_STR_LEN] = '\0';
+ } else if (err) {
+ return err;
+ }
+
+ if (len_copied <= 1) {
+ return EINVAL;
+ }
+
+ /* convert back to a length */
+ len_copied--;
+
+ *retval = kernel_debug_string_internal(uap->debugid, uap->str_id, str_buf,
+ len_copied);
+ return 0;
+}
+
+static void
+kdbg_lock_init(void)
+{
+ static lck_grp_attr_t *kdebug_lck_grp_attr = NULL;
+ static lck_grp_t *kdebug_lck_grp = NULL;
+ static lck_attr_t *kdebug_lck_attr = NULL;
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT) {
+ return;
+ }
+
+ assert(kdebug_lck_grp_attr == NULL);
+ kdebug_lck_grp_attr = lck_grp_attr_alloc_init();
+ kdebug_lck_grp = lck_grp_alloc_init("kdebug", kdebug_lck_grp_attr);
+ kdebug_lck_attr = lck_attr_alloc_init();
+
+ kds_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
+ kdw_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
+
+ kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
+}
+
+int
+kdbg_bootstrap(boolean_t early_trace)
+{
+ kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
+
+ return (create_buffers(early_trace));
+}
+
+int
+kdbg_reinit(boolean_t early_trace)
+{
+ int ret = 0;
+
+ /*
+ * Disable trace collecting
+ * First make sure we're not in
+ * the middle of cutting a trace
+ */
+ kernel_debug_disable();
+
+ /*
+ * make sure the SLOW_NOLOG is seen
+ * by everyone that might be trying
+ * to cut a trace..
+ */
+ IOSleep(100);
+
+ delete_buffers();
+
+ kdbg_clear_thread_map();
+ ret = kdbg_bootstrap(early_trace);
+
+ RAW_file_offset = 0;
+ RAW_file_written = 0;
+
+ return(ret);
+}
+
+void
+kdbg_trace_data(struct proc *proc, long *arg_pid)
+{
+ if (!proc)
+ *arg_pid = 0;
+ else
+ *arg_pid = proc->p_pid;
+}
+
+
+void
+kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
+{
+ char *dbg_nameptr;
+ int dbg_namelen;
+ long dbg_parms[4];
+
+ if (!proc) {
+ *arg1 = 0;
+ *arg2 = 0;
+ *arg3 = 0;
+ *arg4 = 0;
+ return;
+ }
+ /*
+ * Collect the pathname for tracing
+ */
+ dbg_nameptr = proc->p_comm;
+ dbg_namelen = (int)strlen(proc->p_comm);
+ dbg_parms[0]=0L;
+ dbg_parms[1]=0L;
+ dbg_parms[2]=0L;
+ dbg_parms[3]=0L;
+
+ if(dbg_namelen > (int)sizeof(dbg_parms))
+ dbg_namelen = (int)sizeof(dbg_parms);
+
+ strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
+
+ *arg1=dbg_parms[0];
+ *arg2=dbg_parms[1];
+ *arg3=dbg_parms[2];
+ *arg4=dbg_parms[3];
+}
+
+static void
+kdbg_resolve_map(thread_t th_act, void *opaque)
+{
+ kd_threadmap *mapptr;
+ krt_t *t = (krt_t *)opaque;
+
+ if (t->count < t->maxcount) {
+ mapptr = &t->map[t->count];
+ mapptr->thread = (uintptr_t)thread_tid(th_act);
+
+ (void) strlcpy (mapptr->command, t->atts->task_comm,
+ sizeof(t->atts->task_comm));
+ /*
+ * Some kernel threads have no associated pid.
+ * We still need to mark the entry as valid.
+ */
+ if (t->atts->pid)
+ mapptr->valid = t->atts->pid;
+ else
+ mapptr->valid = 1;
+
+ t->count++;
+ }
+}
+
+/*
+ *
+ * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
+ *
+ * You may provide a buffer and size, or if you set the buffer to NULL, a
+ * buffer of sufficient size will be allocated.
+ *
+ * If you provide a buffer and it is too small, sets cpumap_size to the number
+ * of bytes required and returns EINVAL.
+ *
+ * On success, if you provided a buffer, cpumap_size is set to the number of
+ * bytes written. If you did not provide a buffer, cpumap is set to the newly
+ * allocated buffer and cpumap_size is set to the number of bytes allocated.
+ *
+ * NOTE: It may seem redundant to pass both iops and a cpu_count.
+ *
+ * We may be reporting data from "now", or from the "past".
+ *
+ * The "past" data would be for kdbg_readcpumap().
+ *
+ * If we do not pass both iops and cpu_count, and iops is NULL, this function
+ * will need to read "now" state to get the number of cpus, which would be in
+ * error if we were reporting "past" state.
+ */
+
+int
+kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size)
+{
+ assert(cpumap);
+ assert(cpumap_size);
+ assert(cpu_count);
+ assert(!iops || iops->cpu_id + 1 == cpu_count);
+
+ uint32_t bytes_needed = sizeof(kd_cpumap_header) + cpu_count * sizeof(kd_cpumap);
+ uint32_t bytes_available = *cpumap_size;
+ *cpumap_size = bytes_needed;
+
+ if (*cpumap == NULL) {
+ if (kmem_alloc(kernel_map, (vm_offset_t*)cpumap, (vm_size_t)*cpumap_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
+ return ENOMEM;
+ }
+ bzero(*cpumap, *cpumap_size);
+ } else if (bytes_available < bytes_needed) {
+ return EINVAL;
+ }
+
+ kd_cpumap_header* header = (kd_cpumap_header*)(uintptr_t)*cpumap;
+
+ header->version_no = RAW_VERSION1;
+ header->cpu_count = cpu_count;
+
+ kd_cpumap* cpus = (kd_cpumap*)&header[1];
+
+ int32_t index = cpu_count - 1;
+ while (iops) {
+ cpus[index].cpu_id = iops->cpu_id;
+ cpus[index].flags = KDBG_CPUMAP_IS_IOP;
+ strlcpy(cpus[index].name, iops->callback.iop_name, sizeof(cpus->name));
+
+ iops = iops->next;
+ index--;
+ }
+
+ while (index >= 0) {
+ cpus[index].cpu_id = index;
+ cpus[index].flags = 0;
+ strlcpy(cpus[index].name, "AP", sizeof(cpus->name));
+
+ index--;
+ }
+
+ return KERN_SUCCESS;
+}
+
+void
+kdbg_thrmap_init(void)
+{
+ lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) {
+ return;
+ }
+
+ kd_mapptr = kdbg_thrmap_init_internal(0, &kd_mapsize, &kd_mapcount);
+
+ if (kd_mapptr) {
+ kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
+ }
+}
+
+static kd_threadmap *
+kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount)
+{
+ kd_threadmap *mapptr;
+ proc_t p;
+ struct krt akrt;
+ int tts_count = 0; /* number of task-to-string structures */
+ struct tts *tts_mapptr;
+ unsigned int tts_mapsize = 0;
+ vm_offset_t kaddr;
+
+ assert(mapsize != NULL);
+ assert(mapcount != NULL);
+
+ *mapcount = threads_count;
+ tts_count = tasks_count;
+
+ /*
+ * The proc count could change during buffer allocation,
+ * so introduce a small fudge factor to bump up the
+ * buffer sizes. This gives new tasks some chance of
+ * making into the tables. Bump up by 25%.
+ */
+ *mapcount += *mapcount / 4;
+ tts_count += tts_count / 4;
+
+ *mapsize = *mapcount * sizeof(kd_threadmap);
+
+ if (count && count < *mapcount) {
+ return 0;
+ }
+
+ if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)*mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) {
+ bzero((void *)kaddr, *mapsize);
+ mapptr = (kd_threadmap *)kaddr;
+ } else {
+ return 0;
+ }
+
+ tts_mapsize = tts_count * sizeof(struct tts);
+
+ if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)tts_mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) {
+ bzero((void *)kaddr, tts_mapsize);
+ tts_mapptr = (struct tts *)kaddr;
+ } else {
+ kmem_free(kernel_map, (vm_offset_t)mapptr, *mapsize);
+
+ return 0;
+ }
+
+ /*
+ * Save the proc's name and take a reference for each task associated
+ * with a valid process.
+ */
+ proc_list_lock();
+
+ int i = 0;
+ ALLPROC_FOREACH(p) {
+ if (i >= tts_count) {
+ break;
+ }
+ if (p->p_lflag & P_LEXIT) {
+ continue;
+ }
+ if (p->task) {
+ task_reference(p->task);
+ tts_mapptr[i].task = p->task;
+ tts_mapptr[i].pid = p->p_pid;
+ (void)strlcpy(tts_mapptr[i].task_comm, proc_best_name(p), sizeof(tts_mapptr[i].task_comm));
+ i++;
+ }
+ }
+ tts_count = i;
+
+ proc_list_unlock();
+
+ /*
+ * Initialize thread map data
+ */
+ akrt.map = mapptr;
+ akrt.count = 0;
+ akrt.maxcount = *mapcount;
+
+ for (i = 0; i < tts_count; i++) {
+ akrt.atts = &tts_mapptr[i];
+ task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
+ task_deallocate((task_t)tts_mapptr[i].task);
+ }
+ kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
+
+ *mapcount = akrt.count;
+
+ return mapptr;
+}
+
+static void
+kdbg_clear(void)
+{
+ /*
+ * Clean up the trace buffer
+ * First make sure we're not in
+ * the middle of cutting a trace
+ */
+ kernel_debug_disable();
+ kdbg_disable_typefilter();
+
+ /*
+ * make sure the SLOW_NOLOG is seen
+ * by everyone that might be trying
+ * to cut a trace..
+ */
+ IOSleep(100);
+
+ /* reset kdebug state for each process */
+ if (kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) {
+ proc_list_lock();
+ proc_t p;
+ ALLPROC_FOREACH(p) {
+ p->p_kdebug = 0;
+ }
+ proc_list_unlock();
+ }
+
+ kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+ kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
+ kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
+
+ kd_ctrl_page.oldest_time = 0;
+
+ delete_buffers();
+ nkdbufs = 0;
+
+ /* Clean up the thread map buffer */
+ kdbg_clear_thread_map();
+
+ RAW_file_offset = 0;
+ RAW_file_written = 0;
+}
+
+void
+kdebug_reset(void)
+{
+ lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);
+
+ kdbg_lock_init();
+
+ kdbg_clear();
+ if (kdbg_typefilter) {
+ typefilter_reject_all(kdbg_typefilter);
+ typefilter_allow_class(kdbg_typefilter, DBG_TRACE);
+ }
+}
+
+int
+kdbg_setpid(kd_regtype *kdr)
+{
+ pid_t pid;
+ int flag, ret=0;
+ struct proc *p;
+
+ pid = (pid_t)kdr->value1;
+ flag = (int)kdr->value2;
+
+ if (pid >= 0) {
+ if ((p = proc_find(pid)) == NULL)
+ ret = ESRCH;
+ else {
+ if (flag == 1) {
+ /*
+ * turn on pid check for this and all pids
+ */
+ kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
+ kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
+ kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+
+ p->p_kdebug = 1;
+ } else {
+ /*
+ * turn off pid check for this pid value
+ * Don't turn off all pid checking though
+ *
+ * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
+ */
+ p->p_kdebug = 0;
+ }
+ proc_rele(p);
+ }
+ }
+ else
+ ret = EINVAL;
+
+ return(ret);
+}
+
+/* This is for pid exclusion in the trace buffer */
+int
+kdbg_setpidex(kd_regtype *kdr)
+{
+ pid_t pid;
+ int flag, ret=0;
+ struct proc *p;
+
+ pid = (pid_t)kdr->value1;
+ flag = (int)kdr->value2;
+
+ if (pid >= 0) {
+ if ((p = proc_find(pid)) == NULL)
+ ret = ESRCH;
+ else {
+ if (flag == 1) {
+ /*
+ * turn on pid exclusion
+ */
+ kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
+ kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
+ kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+
+ p->p_kdebug = 1;
+ }
+ else {
+ /*
+ * turn off pid exclusion for this pid value
+ * Don't turn off all pid exclusion though
+ *
+ * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
+ */
+ p->p_kdebug = 0;
+ }
+ proc_rele(p);
+ }
+ } else
+ ret = EINVAL;
+
+ return(ret);
+}
+
+/*
+ * The following functions all operate on the "global" typefilter singleton.
+ */
+
+/*
+ * The tf param is optional, you may pass either a valid typefilter or NULL.
+ * If you pass a valid typefilter, you release ownership of that typefilter.
+ */
+static int
+kdbg_initialize_typefilter(typefilter_t tf)
+{
+ lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);
+ assert(!kdbg_typefilter);
+ assert(!kdbg_typefilter_memory_entry);
+ typefilter_t deallocate_tf = NULL;
+
+ if (!tf && ((tf = deallocate_tf = typefilter_create()) == NULL)) {
+ return ENOMEM;
+ }
+
+ if ((kdbg_typefilter_memory_entry = typefilter_create_memory_entry(tf)) == MACH_PORT_NULL) {
+ if (deallocate_tf) {
+ typefilter_deallocate(deallocate_tf);
+ }
+ return ENOMEM;
+ }
+
+ /*
+ * The atomic store closes a race window with
+ * the kdebug_typefilter syscall, which assumes
+ * that any non-null kdbg_typefilter means a
+ * valid memory_entry is available.
+ */
+ __c11_atomic_store(((_Atomic typefilter_t*)&kdbg_typefilter), tf, memory_order_release);
+
+ return KERN_SUCCESS;
+}
+
+static int
+kdbg_copyin_typefilter(user_addr_t addr, size_t size)
+{
+ int ret = ENOMEM;
+ typefilter_t tf;
+
+ lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
+ return EINVAL;
+ }
+
+ if ((tf = typefilter_create())) {
+ if ((ret = copyin(addr, tf, KDBG_TYPEFILTER_BITMAP_SIZE)) == 0) {
+ /* The kernel typefilter must always allow DBG_TRACE */
+ typefilter_allow_class(tf, DBG_TRACE);
+
+ /*
+ * If this is the first typefilter; claim it.
+ * Otherwise copy and deallocate.
+ *
+ * Allocating a typefilter for the copyin allows
+ * the kernel to hold the invariant that DBG_TRACE
+ * must always be allowed.
+ */
+ if (!kdbg_typefilter) {
+ if ((ret = kdbg_initialize_typefilter(tf))) {
+ return ret;
+ }
+ tf = NULL;
+ } else {
+ typefilter_copy(kdbg_typefilter, tf);
+ }
+
+ kdbg_enable_typefilter();
+ kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, kdbg_typefilter);
+ }
+
+ if (tf)
+ typefilter_deallocate(tf);
+ }
+
+ return ret;
+}
+
+/*
+ * Enable the flags in the control page for the typefilter. Assumes that
+ * kdbg_typefilter has already been allocated, so events being written
+ * don't see a bad typefilter.
+ */
+static void
+kdbg_enable_typefilter(void)
+{
+ assert(kdbg_typefilter);
+ kd_ctrl_page.kdebug_flags &= ~(KDBG_RANGECHECK | KDBG_VALCHECK);
+ kd_ctrl_page.kdebug_flags |= KDBG_TYPEFILTER_CHECK;
+ kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ commpage_update_kdebug_state();
+}
+
+/*
+ * Disable the flags in the control page for the typefilter. The typefilter
+ * may be safely deallocated shortly after this function returns.
+ */
+static void
+kdbg_disable_typefilter(void)
+{
+ kd_ctrl_page.kdebug_flags &= ~KDBG_TYPEFILTER_CHECK;
+
+ if ((kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE))) {
+ kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ } else {
+ kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
+ }
+ commpage_update_kdebug_state();
+}
+
+uint32_t
+kdebug_commpage_state(void)
+{
+ if (kdebug_enable) {
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ return KDEBUG_COMMPAGE_ENABLE_TYPEFILTER | KDEBUG_COMMPAGE_ENABLE_TRACE;
+ }
+
+ return KDEBUG_COMMPAGE_ENABLE_TRACE;
+ }
+
+ return 0;
+}
+
+int
+kdbg_setreg(kd_regtype * kdr)