- count = avail/sizeof(kd_buf);
- if (count) {
- if ((my_kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) {
- if (count > nkdbufs)
- count = nkdbufs;
- if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr > kd_readlast))
- {
- copycount = my_kd_bufptr-kd_readlast;
- if (copycount > count)
- copycount = count;
-
- if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
- {
- *number = 0;
- return(EINVAL);
- }
- kd_readlast += copycount;
- *number = copycount;
- return(0);
- }
- else if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr == kd_readlast))
- {
- *number = 0;
- return(0);
- }
- else
- {
- if (my_kdebug_flags & KDBG_WRAPPED)
- {
- kd_readlast = my_kd_bufptr;
- kdebug_flags &= ~KDBG_WRAPPED;
- }
-
- /* Note that by setting kd_readlast equal to my_kd_bufptr,
- we now treat the kd_buffer read the same as if we weren't
- wrapped and my_kd_bufptr was less than kd_readlast.
- */
-
- /* first copyout from readlast to end of kd_buffer */
- copycount = kd_buflast - kd_readlast;
- if (copycount > count)
- copycount = count;
- if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
- {
- *number = 0;
- return(EINVAL);
- }
- buffer += copycount;
- count -= copycount;
- totalcount = copycount;
- kd_readlast += copycount;
- if (kd_readlast == kd_buflast)
- kd_readlast = kd_buffer;
- if (count == 0)
- {
- *number = totalcount;
- return(0);
- }
-
- /* second copyout from top of kd_buffer to bufptr */
- copycount = my_kd_bufptr - kd_readlast;
- if (copycount > count)
- copycount = count;
- if (copycount == 0)
- {
- *number = totalcount;
- return(0);
- }
- if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
- {
- return(EINVAL);
- }
- kd_readlast += copycount;
- totalcount += copycount;
- *number = totalcount;
- return(0);
- }
- } /* end if KDBG_BUFINIT */
- } /* end if count */
- return (EINVAL);
+ if (last_wrap_cpu == -1)
+ first_event = FALSE;
+
+ while (count) {
+ tempbuf = kdcopybuf;
+ tempbuf_number = 0;
+
+ while (tempbuf_count) {
+ mintime = 0xffffffffffffffffULL; /* all actual timestamps are below */
+ mincpu = -1;
+
+ for (cpu = 0; cpu < kd_cpus; cpu++) {
+ if (kdbip[cpu].kd_stop == 0) /* empty buffer */
+ continue;
+ t = kdbip[cpu].kd_readlast[0].timestamp & KDBG_TIMESTAMP_MASK;
+
+ if (t < mintime) {
+ mintime = t;
+ mincpu = cpu;
+ }
+ }
+ if (mincpu < 0)
+ /*
+ * all buffers ran empty early
+ */
+ break;
+
+ if (first_event == TRUE) {
+ /*
+ * make sure we leave room for the
+ * LAST_WRAPPER event we inject
+ * by throwing away the first event
+ * it's better to lose that one
+ * than the last one
+ */
+ first_event = FALSE;
+
+ kdbip[mincpu].kd_readlast++;
+
+ if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
+ kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
+ if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
+ kdbip[mincpu].kd_stop = 0;
+
+ continue;
+ }
+ if (last_wrap_cpu == mincpu) {
+ tempbuf->debugid = MISCDBG_CODE(DBG_BUFFER, 0) | DBG_FUNC_NONE;
+ tempbuf->arg1 = kd_bufsize / sizeof(kd_buf);
+ tempbuf->arg2 = kd_cpus;
+ tempbuf->arg3 = 0;
+ tempbuf->arg4 = 0;
+ tempbuf->arg5 = (int)current_thread();
+
+ tempbuf->timestamp = last_wrap_time | (((uint64_t)last_wrap_cpu) << KDBG_CPU_SHIFT);
+
+ tempbuf++;
+
+ last_wrap_cpu = -1;
+
+ } else {
+ *(tempbuf++) = kdbip[mincpu].kd_readlast[0];
+
+ kdbip[mincpu].kd_readlast++;
+
+ if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
+ kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
+ if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
+ kdbip[mincpu].kd_stop = 0;
+ }
+ tempbuf_count--;
+ tempbuf_number++;
+ }
+ if (tempbuf_number) {
+ if ((error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf)))) {
+ *number = 0;
+ error = EINVAL;
+ break;
+ }
+ count -= tempbuf_number;
+ *number += tempbuf_number;
+ buffer += (tempbuf_number * sizeof(kd_buf));
+ }
+ if (tempbuf_count)
+ /*
+ * all trace buffers are empty
+ */
+ break;
+
+ if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
+ tempbuf_count = KDCOPYBUF_COUNT;
+ }
+ if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
+ do {
+ old_kdebug_flags = kdebug_flags;
+ new_kdebug_flags = old_kdebug_flags & ~KDBG_NOWRAP;
+ } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
+
+ if ( !(old_kdebug_slowcheck & SLOW_NOLOG)) {
+ do {
+ old_kdebug_slowcheck = kdebug_slowcheck;
+ new_kdebug_slowcheck = old_kdebug_slowcheck & ~SLOW_NOLOG;
+ } while ( !OSCompareAndSwap((UInt32)old_kdebug_slowcheck, (UInt32)new_kdebug_slowcheck, (UInt32 *)&kdebug_slowcheck));
+ }
+ }
+ return (error);
+}
+
+
+unsigned char *getProcName(struct proc *proc);
+unsigned char *getProcName(struct proc *proc) {
+
+ return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
+
+}
+
+#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
+#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
+#ifdef __i386__
+#define TRAP_DEBUGGER __asm__ volatile("int3");
+#endif
+#ifdef __ppc__
+#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
+#endif
+
+#define SANE_TRACEBUF_SIZE 2*1024*1024
+
+/* Initialize the mutex governing access to the stack snapshot subsystem */
+__private_extern__ void
+stackshot_lock_init( void )
+{
+ stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
+
+ stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
+
+ stackshot_subsys_lck_attr = lck_attr_alloc_init();
+
+ lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
+}
+
+/*
+ * stack_snapshot: Obtains a coherent set of stack traces for all threads
+ * on the system, tracing both kernel and user stacks
+ * where available. Uses machine specific trace routines
+ * for ppc, ppc64 and x86.
+ * Inputs: uap->pid - process id of process to be traced, or -1
+ * for the entire system
+ * uap->tracebuf - address of the user space destination
+ * buffer
+ * uap->tracebuf_size - size of the user space trace buffer
+ * uap->options - various options, including the maximum
+ * number of frames to trace.
+ * Outputs: EPERM if the caller is not privileged
+ * EINVAL if the supplied trace buffer isn't sanely sized
+ * ENOMEM if we don't have enough memory to satisfy the
+ * request
+ * ENOENT if the target pid isn't found
+ * ENOSPC if the supplied buffer is insufficient
+ * *retval contains the number of bytes traced, if successful
+ * and -1 otherwise. If the request failed due to
+ * tracebuffer exhaustion, we copyout as much as possible.
+ */
+int
+stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, register_t *retval) {
+ int error = 0;
+
+ if ((error = suser(kauth_cred_get(), &p->p_acflag)))
+ return(error);
+
+ return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
+ uap->options, retval);
+}
+
+int
+stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval)
+{
+ int error = 0;
+ unsigned bytesTraced = 0;
+
+ *retval = -1;
+/* Serialize tracing */
+ STACKSHOT_SUBSYS_LOCK();
+
+ if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
+ error = EINVAL;
+ goto error_exit;
+ }
+
+ MALLOC(stackshot_snapbuf, void *, tracebuf_size, M_TEMP, M_WAITOK);
+
+ if (stackshot_snapbuf == NULL) {
+ error = ENOMEM;
+ goto error_exit;
+ }
+/* Preload trace parameters*/
+ kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, options);
+
+/* Trap to the debugger to obtain a coherent stack snapshot; this populates
+ * the trace buffer
+ */
+ if (panic_active()) {
+ error = ENOMEM;
+ goto error_exit;
+ }
+
+ TRAP_DEBUGGER;
+
+ bytesTraced = kdp_stack_snapshot_bytes_traced();
+
+ if (bytesTraced > 0) {
+ if ((error = copyout(stackshot_snapbuf, tracebuf,
+ ((bytesTraced < tracebuf_size) ?
+ bytesTraced : tracebuf_size))))
+ goto error_exit;
+ *retval = bytesTraced;
+ }
+ else {
+ error = ENOENT;
+ goto error_exit;
+ }
+
+ error = kdp_stack_snapshot_geterror();
+ if (error == -1) {
+ error = ENOSPC;
+ *retval = -1;
+ goto error_exit;
+ }
+
+error_exit:
+ if (stackshot_snapbuf != NULL)
+ FREE(stackshot_snapbuf, M_TEMP);
+ stackshot_snapbuf = NULL;
+ STACKSHOT_SUBSYS_UNLOCK();
+ return error;
+}
+
+void
+start_kern_tracing(unsigned int new_nkdbufs) {
+ if (!new_nkdbufs)
+ return;
+ kdbg_set_nkdbufs(new_nkdbufs);
+ kdbg_lock_init();
+ kdbg_reinit();
+ kdebug_enable |= KDEBUG_ENABLE_TRACE;
+ kdebug_slowcheck &= ~SLOW_NOLOG;
+ kdbg_mapinit();
+ printf("kernel tracing started\n");