+
+#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
+#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
+#if defined(__i386__) || defined (__x86_64__)
+#define TRAP_DEBUGGER __asm__ volatile("int3");
+#endif
+#ifdef __ppc__
+#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
+#endif
+
+#define SANE_TRACEBUF_SIZE 2*1024*1024
+
+/* Initialize the mutex governing access to the stack snapshot subsystem */
+__private_extern__ void
+stackshot_lock_init( void )
+{
+ stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
+
+ stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
+
+ stackshot_subsys_lck_attr = lck_attr_alloc_init();
+
+ lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
+}
+
+/*
+ * stack_snapshot: Obtains a coherent set of stack traces for all threads
+ * on the system, tracing both kernel and user stacks
+ * where available. Uses machine specific trace routines
+ * for ppc, ppc64 and x86.
+ * Inputs: uap->pid - process id of process to be traced, or -1
+ * for the entire system
+ * uap->tracebuf - address of the user space destination
+ * buffer
+ * uap->tracebuf_size - size of the user space trace buffer
+ * uap->options - various options, including the maximum
+ * number of frames to trace.
+ * Outputs: EPERM if the caller is not privileged
+ * EINVAL if the supplied trace buffer isn't sanely sized
+ * ENOMEM if we don't have enough memory to satisfy the
+ * request
+ * ENOENT if the target pid isn't found
+ * ENOSPC if the supplied buffer is insufficient
+ * *retval contains the number of bytes traced, if successful
+ * and -1 otherwise. If the request failed due to
+ * tracebuffer exhaustion, we copyout as much as possible.
+ */
+int
+stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
+ int error = 0;
+
+
+ if ((error = suser(kauth_cred_get(), &p->p_acflag)))
+ return(error);
+
+ return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
+ uap->flags, uap->dispatch_offset, retval);
+}
+
+int
+stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval)
+{
+ int error = 0;
+ unsigned bytesTraced = 0;
+
+ *retval = -1;
+/* Serialize tracing */
+ STACKSHOT_SUBSYS_LOCK();
+
+ if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
+ error = EINVAL;
+ goto error_exit;
+ }
+
+ MALLOC(stackshot_snapbuf, void *, tracebuf_size, M_TEMP, M_WAITOK);
+
+ if (stackshot_snapbuf == NULL) {
+ error = ENOMEM;
+ goto error_exit;
+ }
+/* Preload trace parameters*/
+ kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, dispatch_offset);
+
+/* Trap to the debugger to obtain a coherent stack snapshot; this populates
+ * the trace buffer
+ */
+ if (panic_active()) {
+ error = ENOMEM;
+ goto error_exit;
+ }
+
+ TRAP_DEBUGGER;
+
+ bytesTraced = kdp_stack_snapshot_bytes_traced();
+
+ if (bytesTraced > 0) {
+ if ((error = copyout(stackshot_snapbuf, tracebuf,
+ ((bytesTraced < tracebuf_size) ?
+ bytesTraced : tracebuf_size))))
+ goto error_exit;
+ *retval = bytesTraced;
+ }
+ else {
+ error = ENOENT;
+ goto error_exit;
+ }
+
+ error = kdp_stack_snapshot_geterror();
+ if (error == -1) {
+ error = ENOSPC;
+ *retval = -1;
+ goto error_exit;
+ }
+
+error_exit:
+ if (stackshot_snapbuf != NULL)
+ FREE(stackshot_snapbuf, M_TEMP);
+ stackshot_snapbuf = NULL;
+ STACKSHOT_SUBSYS_UNLOCK();
+ return error;
+}
+
+void
+start_kern_tracing(unsigned int new_nkdbufs) {
+ if (!new_nkdbufs)
+ return;
+ kdbg_set_nkdbufs(new_nkdbufs);
+ kdbg_lock_init();
+ kdbg_reinit();
+ kdebug_enable |= KDEBUG_ENABLE_TRACE;
+ kdebug_slowcheck &= ~SLOW_NOLOG;
+ kdbg_mapinit();
+
+#if defined(__i386__) || defined(__x86_64__)
+ uint64_t now = mach_absolute_time();
+
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 1)) | DBG_FUNC_NONE,
+ (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time,
+ (uint32_t)(now >> 32), (uint32_t)now,
+ 0);
+#endif
+ printf("kernel tracing started\n");
+}
+
+void
+kdbg_dump_trace_to_file(const char *filename)
+{
+ vfs_context_t ctx;
+ vnode_t vp;
+ int error;
+ size_t number;
+
+
+ if (kdebug_enable & (KDEBUG_ENABLE_CHUD | KDEBUG_ENABLE_ENTROPY))
+ return;
+
+ if (global_state_pid != -1) {
+ if ((proc_find(global_state_pid)) != NULL) {
+ /*
+ * The global pid exists, we're running
+ * due to fs_usage, latency, etc...
+ * don't cut the panic/shutdown trace file
+ */
+ return;
+ }
+ }
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
+
+ kdebug_enable = 0;
+
+ ctx = vfs_context_kernel();
+
+ if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
+ return;
+
+ number = kd_mapsize;
+ kdbg_readmap(0, &number, vp, ctx);
+
+ number = nkdbufs*sizeof(kd_buf);
+ kdbg_read(0, &number, vp, ctx);
+
+ vnode_close(vp, FWRITE, ctx);
+
+ sync(current_proc(), (void *)NULL, (int *)NULL);
+}