+ /*
+ * allocate lock group attribute and group
+ */
+ kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
+ kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
+
+ /*
+ * allocate the lock attribute
+ */
+ kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
+
+
+ /*
+ * allocate and initialize mutex's
+ */
+ kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
+ kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
+ kdw_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
+
+ kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
+}
+
+
+int
+kdbg_bootstrap(boolean_t early_trace)
+{
+ kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
+
+ return (create_buffers(early_trace));
+}
+
+int
+kdbg_reinit(boolean_t early_trace)
+{
+ int ret = 0;
+
+ /*
+ * Disable trace collecting
+ * First make sure we're not in
+ * the middle of cutting a trace
+ */
+ kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
+
+ /*
+ * make sure the SLOW_NOLOG is seen
+ * by everyone that might be trying
+ * to cut a trace..
+ */
+ IOSleep(100);
+
+ delete_buffers();
+
+ if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
+ kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
+ kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
+ kd_mapsize = 0;
+ kd_mapptr = (kd_threadmap *) 0;
+ kd_mapcount = 0;
+ }
+ ret = kdbg_bootstrap(early_trace);
+
+ RAW_file_offset = 0;
+ RAW_file_written = 0;
+
+ return(ret);
+}
+
+void
+kdbg_trace_data(struct proc *proc, long *arg_pid)
+{
+ if (!proc)
+ *arg_pid = 0;
+ else
+ *arg_pid = proc->p_pid;
+}
+
+
+void
+kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
+{
+ char *dbg_nameptr;
+ int dbg_namelen;
+ long dbg_parms[4];
+
+ if (!proc) {
+ *arg1 = 0;
+ *arg2 = 0;
+ *arg3 = 0;
+ *arg4 = 0;
+ return;
+ }
+ /*
+ * Collect the pathname for tracing
+ */
+ dbg_nameptr = proc->p_comm;
+ dbg_namelen = (int)strlen(proc->p_comm);
+ dbg_parms[0]=0L;
+ dbg_parms[1]=0L;
+ dbg_parms[2]=0L;
+ dbg_parms[3]=0L;
+
+ if(dbg_namelen > (int)sizeof(dbg_parms))
+ dbg_namelen = (int)sizeof(dbg_parms);
+
+ strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
+
+ *arg1=dbg_parms[0];
+ *arg2=dbg_parms[1];
+ *arg3=dbg_parms[2];
+ *arg4=dbg_parms[3];
+}
+
+static void
+kdbg_resolve_map(thread_t th_act, void *opaque)
+{
+ kd_threadmap *mapptr;
+ krt_t *t = (krt_t *)opaque;
+
+ if (t->count < t->maxcount) {
+ mapptr = &t->map[t->count];
+ mapptr->thread = (uintptr_t)thread_tid(th_act);
+
+ (void) strlcpy (mapptr->command, t->atts->task_comm,
+ sizeof(t->atts->task_comm));
+ /*
+ * Some kernel threads have no associated pid.
+ * We still need to mark the entry as valid.
+ */
+ if (t->atts->pid)
+ mapptr->valid = t->atts->pid;
+ else
+ mapptr->valid = 1;
+
+ t->count++;
+ }
+}
+
+void
+kdbg_mapinit(void)
+{
+ struct proc *p;
+ struct krt akrt;
+ int tts_count; /* number of task-to-string structures */
+ struct tts *tts_mapptr;
+ unsigned int tts_mapsize = 0;
+ vm_offset_t tts_maptomem=0;
+ int i;
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT)
+ return;
+
+ /*
+ * need to use PROC_SCANPROCLIST with proc_iterate
+ */
+ proc_list_lock();
+
+ /*
+ * Calculate the sizes of map buffers
+ */
+ for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
+ kd_mapcount += get_task_numacts((task_t)p->task);
+ tts_count++;
+ }
+ proc_list_unlock();
+
+ /*
+ * The proc count could change during buffer allocation,
+ * so introduce a small fudge factor to bump up the
+ * buffer sizes. This gives new tasks some chance of
+ * making into the tables. Bump up by 10%.
+ */
+ kd_mapcount += kd_mapcount/10;
+ tts_count += tts_count/10;
+
+ kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
+
+ if ((kmem_alloc(kernel_map, & kd_maptomem, (vm_size_t)kd_mapsize) == KERN_SUCCESS)) {
+ kd_mapptr = (kd_threadmap *) kd_maptomem;
+ bzero(kd_mapptr, kd_mapsize);
+ } else
+ kd_mapptr = (kd_threadmap *) 0;
+
+ tts_mapsize = tts_count * sizeof(struct tts);
+
+ if ((kmem_alloc(kernel_map, & tts_maptomem, (vm_size_t)tts_mapsize) == KERN_SUCCESS)) {
+ tts_mapptr = (struct tts *) tts_maptomem;
+ bzero(tts_mapptr, tts_mapsize);
+ } else
+ tts_mapptr = (struct tts *) 0;
+
+ /*
+ * We need to save the procs command string
+ * and take a reference for each task associated
+ * with a valid process
+ */
+ if (tts_mapptr) {
+ /*
+ * should use proc_iterate
+ */
+ proc_list_lock();
+
+ for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
+ if (p->p_lflag & P_LEXIT)
+ continue;
+
+ if (p->task) {
+ task_reference(p->task);
+ tts_mapptr[i].task = p->task;
+ tts_mapptr[i].pid = p->p_pid;
+ (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
+ i++;
+ }
+ }
+ tts_count = i;
+
+ proc_list_unlock();
+ }
+
+ if (kd_mapptr && tts_mapptr) {
+ kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
+
+ /*
+ * Initialize thread map data
+ */
+ akrt.map = kd_mapptr;
+ akrt.count = 0;
+ akrt.maxcount = kd_mapcount;
+
+ for (i = 0; i < tts_count; i++) {
+ akrt.atts = &tts_mapptr[i];
+ task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
+ task_deallocate((task_t) tts_mapptr[i].task);
+ }
+ kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
+ }
+}
+
+static void
+kdbg_clear(void)
+{
+ /*
+ * Clean up the trace buffer
+ * First make sure we're not in
+ * the middle of cutting a trace
+ */
+ kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
+
+ /*
+ * make sure the SLOW_NOLOG is seen
+ * by everyone that might be trying
+ * to cut a trace..
+ */
+ IOSleep(100);
+
+ kdlog_sched_events = 0;
+ global_state_pid = -1;
+ kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+ kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
+ kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
+
+ kdbg_disable_typefilter();
+
+ delete_buffers();
+ nkdbufs = 0;
+
+ /* Clean up the thread map buffer */
+ kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
+ if (kd_mapptr) {
+ kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
+ kd_mapptr = (kd_threadmap *) 0;
+ }
+ kd_mapsize = 0;
+ kd_mapcount = 0;
+
+ RAW_file_offset = 0;
+ RAW_file_written = 0;
+}
+
+int
+kdbg_setpid(kd_regtype *kdr)
+{
+ pid_t pid;
+ int flag, ret=0;
+ struct proc *p;
+
+ pid = (pid_t)kdr->value1;
+ flag = (int)kdr->value2;
+
+ if (pid > 0) {
+ if ((p = proc_find(pid)) == NULL)
+ ret = ESRCH;
+ else {
+ if (flag == 1) {
+ /*
+ * turn on pid check for this and all pids
+ */
+ kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
+ kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
+ kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+
+ p->p_kdebug = 1;
+ } else {
+ /*
+ * turn off pid check for this pid value
+ * Don't turn off all pid checking though
+ *
+ * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
+ */
+ p->p_kdebug = 0;
+ }
+ proc_rele(p);
+ }
+ }
+ else
+ ret = EINVAL;
+
+ return(ret);
+}
+
+/* This is for pid exclusion in the trace buffer */
+int
+kdbg_setpidex(kd_regtype *kdr)
+{
+ pid_t pid;
+ int flag, ret=0;
+ struct proc *p;
+
+ pid = (pid_t)kdr->value1;
+ flag = (int)kdr->value2;
+
+ if (pid > 0) {
+ if ((p = proc_find(pid)) == NULL)
+ ret = ESRCH;
+ else {
+ if (flag == 1) {
+ /*
+ * turn on pid exclusion
+ */
+ kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
+ kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
+ kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+
+ p->p_kdebug = 1;
+ }
+ else {
+ /*
+ * turn off pid exclusion for this pid value
+ * Don't turn off all pid exclusion though
+ *
+ * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
+ */
+ p->p_kdebug = 0;
+ }
+ proc_rele(p);
+ }
+ } else
+ ret = EINVAL;
+
+ return(ret);
+}
+
+
+/*
+ * This is for setting a maximum decrementer value
+ */
+int
+kdbg_setrtcdec(kd_regtype *kdr)
+{
+ int ret = 0;
+ natural_t decval;
+
+ decval = (natural_t)kdr->value1;
+
+ if (decval && decval < KDBG_MINRTCDEC)
+ ret = EINVAL;
+ else
+ ret = ENOTSUP;
+
+ return(ret);
+}
+
+int
+kdbg_enable_typefilter(void)
+{
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ /* free the old filter */
+ kdbg_disable_typefilter();
+ }
+
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE) != KERN_SUCCESS) {
+ return ENOSPC;
+ }
+
+ bzero(type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
+
+ /* Turn off range and value checks */
+ kd_ctrl_page.kdebug_flags &= ~(KDBG_RANGECHECK | KDBG_VALCHECK);
+
+ /* Enable filter checking */
+ kd_ctrl_page.kdebug_flags |= KDBG_TYPEFILTER_CHECK;
+ kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ return 0;
+}
+
+int
+kdbg_disable_typefilter(void)
+{
+ /* Disable filter checking */
+ kd_ctrl_page.kdebug_flags &= ~KDBG_TYPEFILTER_CHECK;
+
+ /* Turn off slow checks unless pid checks are using them */
+ if ( (kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
+ kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ else
+ kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
+
+ if(type_filter_bitmap == NULL)
+ return 0;
+
+ vm_offset_t old_bitmap = (vm_offset_t)type_filter_bitmap;
+ type_filter_bitmap = NULL;
+
+ kmem_free(kernel_map, old_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
+ return 0;
+}
+
+int
+kdbg_setreg(kd_regtype * kdr)
+{
+ int ret=0;
+ unsigned int val_1, val_2, val;
+
+ kdlog_sched_events = 0;
+
+ switch (kdr->type) {
+
+ case KDBG_CLASSTYPE :
+ val_1 = (kdr->value1 & 0xff);