+
+#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
+#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
+#if defined(__i386__) || defined (__x86_64__)
+#define TRAP_DEBUGGER __asm__ volatile("int3");
+#endif
+
+#define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
+
+/* Initialize the mutex governing access to the stack snapshot subsystem */
+__private_extern__ void
+stackshot_lock_init( void )
+{
+ stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
+
+ stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
+
+ stackshot_subsys_lck_attr = lck_attr_alloc_init();
+
+ lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
+}
+
+/*
+ * stack_snapshot: Obtains a coherent set of stack traces for all threads
+ * on the system, tracing both kernel and user stacks
+ * where available. Uses machine specific trace routines
+ * for ppc, ppc64 and x86.
+ * Inputs: uap->pid - process id of process to be traced, or -1
+ * for the entire system
+ * uap->tracebuf - address of the user space destination
+ * buffer
+ * uap->tracebuf_size - size of the user space trace buffer
+ * uap->options - various options, including the maximum
+ * number of frames to trace.
+ * Outputs: EPERM if the caller is not privileged
+ * EINVAL if the supplied trace buffer isn't sanely sized
+ * ENOMEM if we don't have enough memory to satisfy the
+ * request
+ * ENOENT if the target pid isn't found
+ * ENOSPC if the supplied buffer is insufficient
+ * *retval contains the number of bytes traced, if successful
+ * and -1 otherwise. If the request failed due to
+ * tracebuffer exhaustion, we copyout as much as possible.
+ */
+int
+stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
+ int error = 0;
+
+ if ((error = suser(kauth_cred_get(), &p->p_acflag)))
+ return(error);
+
+ return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
+ uap->flags, uap->dispatch_offset, retval);
+}
+
+int
+stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval)
+{
+ int error = 0;
+ unsigned bytesTraced = 0;
+ boolean_t istate;
+
+ *retval = -1;
+/* Serialize tracing */
+ STACKSHOT_SUBSYS_LOCK();
+
+ if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
+ error = EINVAL;
+ goto error_exit;
+ }
+
+ assert(stackshot_snapbuf == NULL);
+ if (kmem_alloc_kobject(kernel_map, (vm_offset_t *)&stackshot_snapbuf, tracebuf_size) != KERN_SUCCESS) {
+ error = ENOMEM;
+ goto error_exit;
+ }
+
+ if (panic_active()) {
+ error = ENOMEM;
+ goto error_exit;
+ }
+
+ istate = ml_set_interrupts_enabled(FALSE);
+/* Preload trace parameters*/
+ kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, dispatch_offset);
+
+/* Trap to the debugger to obtain a coherent stack snapshot; this populates
+ * the trace buffer
+ */
+
+ TRAP_DEBUGGER;
+
+ ml_set_interrupts_enabled(istate);
+
+ bytesTraced = kdp_stack_snapshot_bytes_traced();
+
+ if (bytesTraced > 0) {
+ if ((error = copyout(stackshot_snapbuf, tracebuf,
+ ((bytesTraced < tracebuf_size) ?
+ bytesTraced : tracebuf_size))))
+ goto error_exit;
+ *retval = bytesTraced;
+ }
+ else {
+ error = ENOENT;
+ goto error_exit;
+ }
+
+ error = kdp_stack_snapshot_geterror();
+ if (error == -1) {
+ error = ENOSPC;
+ *retval = -1;
+ goto error_exit;
+ }
+
+error_exit:
+ if (stackshot_snapbuf != NULL)
+ kmem_free(kernel_map, (vm_offset_t) stackshot_snapbuf, tracebuf_size);
+ stackshot_snapbuf = NULL;
+ STACKSHOT_SUBSYS_UNLOCK();
+ return error;
+}
+
+void
+start_kern_tracing(unsigned int new_nkdbufs) {
+
+ if (!new_nkdbufs)
+ return;
+ kdbg_set_nkdbufs(new_nkdbufs);
+ kdbg_lock_init();
+ kdbg_reinit(TRUE);
+ kdbg_set_tracing_enabled(TRUE);
+
+#if defined(__i386__) || defined(__x86_64__)
+ uint64_t now = mach_absolute_time();
+
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 1)) | DBG_FUNC_NONE,
+ (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time,
+ (uint32_t)(now >> 32), (uint32_t)now,
+ 0);
+#endif
+ printf("kernel tracing started\n");
+}
+
+void
+kdbg_dump_trace_to_file(const char *filename)
+{
+ vfs_context_t ctx;
+ vnode_t vp;
+ int error;
+ size_t number;
+
+
+ if ( !(kdebug_enable & KDEBUG_ENABLE_TRACE))
+ return;
+
+ if (global_state_pid != -1) {
+ if ((proc_find(global_state_pid)) != NULL) {
+ /*
+ * The global pid exists, we're running
+ * due to fs_usage, latency, etc...
+ * don't cut the panic/shutdown trace file
+ */
+ return;
+ }
+ }
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
+
+ kdebug_enable = 0;
+ kd_ctrl_page.enabled = 0;
+
+ ctx = vfs_context_kernel();
+
+ if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
+ return;
+
+ number = kd_mapsize;
+ kdbg_readmap(0, &number, vp, ctx);
+
+ number = nkdbufs*sizeof(kd_buf);
+ kdbg_read(0, &number, vp, ctx);
+
+ vnode_close(vp, FWRITE, ctx);
+
+ sync(current_proc(), (void *)NULL, (int *)NULL);
+}
+
+/* Helper function for filling in the BSD name for an address space
+ * Defined here because the machine bindings know only Mach threads
+ * and nothing about BSD processes.
+ *
+ * FIXME: need to grab a lock during this?
+ */
+void kdbg_get_task_name(char* name_buf, int len, task_t task)
+{
+ proc_t proc;
+
+ /* Note: we can't use thread->task (and functions that rely on it) here
+ * because it hasn't been initialized yet when this function is called.
+ * We use the explicitly-passed task parameter instead.
+ */
+ proc = get_bsdtask_info(task);
+ if (proc != PROC_NULL)
+ snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
+ else
+ snprintf(name_buf, len, "%p [!bsd]", task);
+}
+
+
+
+#if defined(NATIVE_TRACE_FACILITY)
+void trace_handler_map_ctrl_page(__unused uintptr_t addr, __unused size_t ctrl_page_size, __unused size_t storage_size, __unused size_t kds_ptr_size)
+{
+}
+void trace_handler_map_bufinfo(__unused uintptr_t addr, __unused size_t size)
+{
+}
+void trace_handler_unmap_bufinfo(void)
+{
+}
+void trace_handler_map_buffer(__unused int index, __unused uintptr_t addr, __unused size_t size)
+{
+}
+void trace_handler_unmap_buffer(__unused int index)
+{
+}
+#endif