+
+kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
+{
+ int ret=0;
+ int size=*sizep;
+ int max_entries;
+ unsigned int value = name[1];
+ kd_regtype kd_Reg;
+ kbufinfo_t kd_bufinfo;
+ pid_t curpid;
+ struct proc *p, *curproc;
+
+
+ kdbg_lock_init();
+ lck_mtx_lock(kd_trace_mtx);
+
+ if (name[0] == KERN_KDGETBUF) {
+ /*
+ * Does not alter the global_state_pid
+ * This is a passive request.
+ */
+ if (size < sizeof(kd_bufinfo.nkdbufs)) {
+ /*
+ * There is not enough room to return even
+ * the first element of the info structure.
+ */
+ lck_mtx_unlock(kd_trace_mtx);
+
+ return(EINVAL);
+ }
+ kd_bufinfo.nkdbufs = nkdbufs;
+ kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
+
+ if ( (kdebug_slowcheck & SLOW_NOLOG) )
+ kd_bufinfo.nolog = 1;
+ else
+ kd_bufinfo.nolog = 0;
+ kd_bufinfo.flags = kdebug_flags;
+ kd_bufinfo.bufid = global_state_pid;
+
+ if (size >= sizeof(kd_bufinfo)) {
+ /*
+ * Provide all the info we have
+ */
+ if (copyout (&kd_bufinfo, where, sizeof(kd_bufinfo))) {
+ lck_mtx_unlock(kd_trace_mtx);
+
+ return(EINVAL);
+ }
+ }
+ else {
+ /*
+ * For backwards compatibility, only provide
+ * as much info as there is room for.
+ */
+ if (copyout (&kd_bufinfo, where, size)) {
+ lck_mtx_unlock(kd_trace_mtx);
+
+ return(EINVAL);
+ }
+ }
+ lck_mtx_unlock(kd_trace_mtx);
+
+ return(0);
+ } else if (name[0] == KERN_KDGETENTROPY) {
+ if (kd_entropy_buffer)
+ ret = EBUSY;
+ else
+ ret = kdbg_getentropy(where, sizep, value);
+ lck_mtx_unlock(kd_trace_mtx);
+
+ return (ret);
+ }
+
+ if (curproc = current_proc())
+ curpid = curproc->p_pid;
+ else {
+ lck_mtx_unlock(kd_trace_mtx);
+
+ return (ESRCH);
+ }