/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
* @Apple_LICENSE_HEADER_START@
*
#include <kern/thread.h>
#include <kern/task.h>
+#include <kern/debug.h>
#include <vm/vm_kern.h>
#include <sys/lock.h>
int kdbg_reinit(void);
int kdbg_bootstrap(void);
-static int create_buffers(void);
+static int create_buffers(void);
static void delete_buffers(void);
+extern void IOSleep(int);
+
#ifdef ppc
extern uint32_t maxDec;
#endif
kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
-__private_extern__ void stackshot_lock_init( void );
+__private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
/* Support syscall SYS_kdebug_trace */
int
return(0);
}
-
-
static int
create_buffers(void)
{
int nentries;
nentries = nkdbufs / kd_cpus;
+ nkdbufs = nentries * kd_cpus;
+
kd_bufsize = nentries * sizeof(kd_buf);
bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
if (kdcopybuf == 0) {
if (kmem_alloc(kernel_map, (unsigned int *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS)
- return ENOMEM;
+ return(ENOMEM);
}
for (cpu = 0; cpu < kd_cpus; cpu++) {
if (kmem_alloc(kernel_map, (unsigned int *)&kdbip[cpu].kd_buffer, kd_bufsize) != KERN_SUCCESS)
{
/* If kdebug flag is not set for current proc, return */
curproc = current_proc();
- if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
+ if ((curproc && !(curproc->p_kdebug)) &&
((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
goto out;
}
{
/* If kdebug flag is set for current proc, return */
curproc = current_proc();
- if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
+ if ((curproc && curproc->p_kdebug) &&
((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
goto out;
}
host_basic_info_data_t hinfo;
mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
-
if (kdebug_flags & KDBG_LOCKINIT)
return;
/* get the number of cpus and cache it */
#define BSD_HOST 1
host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
- kd_cpus = hinfo.physical_cpu_max;
+ kd_cpus = hinfo.logical_cpu_max;
- if (kmem_alloc(kernel_map, (unsigned int *)&kdbip, sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
+ if (kmem_alloc(kernel_map, (unsigned int *)&kdbip,
+ sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
return;
/*
void
kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
{
- int i;
char *dbg_nameptr;
int dbg_namelen;
long dbg_parms[4];
-
if (!proc)
{
*arg1 = 0;
if(dbg_namelen > (int)sizeof(dbg_parms))
dbg_namelen = sizeof(dbg_parms);
- for(i=0;dbg_namelen > 0; i++)
- {
- dbg_parms[i]=*(long*)dbg_nameptr;
- dbg_nameptr += sizeof(long);
- dbg_namelen -= sizeof(long);
- }
+ strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
*arg1=dbg_parms[0];
*arg2=dbg_parms[1];
{
mapptr=&t->map[t->count];
mapptr->thread = (unsigned int)th_act;
- (void) strncpy (mapptr->command, t->atts->task_comm,
- sizeof(t->atts->task_comm)-1);
- mapptr->command[sizeof(t->atts->task_comm)-1] = '\0';
+ (void) strlcpy (mapptr->command, t->atts->task_comm,
+ sizeof(t->atts->task_comm));
/*
Some kernel threads have no associated pid.
if (kdebug_flags & KDBG_MAPINIT)
return;
+ /* need to use PROC_SCANPROCLIST with proc_iterate */
+ proc_list_lock();
+
/* Calculate the sizes of map buffers*/
for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
p = p->p_list.le_next)
tts_count++;
}
+ proc_list_unlock();
+
/*
* The proc count could change during buffer allocation,
* so introduce a small fudge factor to bump up the
*/
if (tts_mapptr) {
+ /* should use proc_iterate */
+ proc_list_lock();
+
for (p = allproc.lh_first, i=0; p && i < tts_count;
p = p->p_list.le_next) {
- if (p->p_flag & P_WEXIT)
+ if (p->p_lflag & P_LEXIT)
continue;
if (p->task) {
task_reference(p->task);
tts_mapptr[i].task = p->task;
tts_mapptr[i].pid = p->p_pid;
- (void)strncpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm) - 1);
+ (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
i++;
}
}
tts_count = i;
+
+ proc_list_unlock();
+
}
if (pid > 0)
{
- if ((p = pfind(pid)) == NULL)
+ if ((p = proc_find(pid)) == NULL)
ret = ESRCH;
else
{
kdebug_flags &= ~KDBG_PIDEXCLUDE;
kdebug_slowcheck |= SLOW_CHECKS;
- p->p_flag |= P_KDEBUG;
+ p->p_kdebug = 1;
}
else /* turn off pid check for this pid value */
{
/* Don't turn off all pid checking though */
/* kdebug_flags &= ~KDBG_PIDCHECK;*/
- p->p_flag &= ~P_KDEBUG;
+ p->p_kdebug = 0;
}
+ proc_rele(p);
}
}
else
if (pid > 0)
{
- if ((p = pfind(pid)) == NULL)
+ if ((p = proc_find(pid)) == NULL)
ret = ESRCH;
else
{
kdebug_flags &= ~KDBG_PIDCHECK;
kdebug_slowcheck |= SLOW_CHECKS;
- p->p_flag |= P_KDEBUG;
+ p->p_kdebug = 1;
}
else /* turn off pid exclusion for this pid value */
{
/* Don't turn off all pid exclusion though */
/* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
- p->p_flag &= ~P_KDEBUG;
+ p->p_kdebug = 0;
}
+ proc_rele(p);
}
}
else
}
+static void
+kdbg_set_nkdbufs(unsigned int value)
+{
+ /*
+ * We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller
+ * 'value' is the desired number of trace entries
+ */
+ unsigned int max_entries = (sane_size/4) / sizeof(kd_buf);
+
+ if (value <= max_entries)
+ nkdbufs = value;
+ else
+ nkdbufs = max_entries;
+}
+
+
/*
* This function is provided for the CHUD toolkit only.
* int val:
int
-kdbg_control(int *name, __unused u_int namelen, user_addr_t where, size_t *sizep)
+kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
{
- int ret=0;
+ int ret=0;
size_t size=*sizep;
- unsigned int max_entries;
- unsigned int value = name[1];
+ unsigned int value = 0;
kd_regtype kd_Reg;
kbufinfo_t kd_bufinfo;
pid_t curpid;
struct proc *p, *curproc;
-
+ if (name[0] == KERN_KDGETENTROPY ||
+ name[0] == KERN_KDEFLAGS ||
+ name[0] == KERN_KDDFLAGS ||
+ name[0] == KERN_KDENABLE ||
+ name[0] == KERN_KDSETBUF) {
+
+ if ( namelen < 2 )
+ return(EINVAL);
+ value = name[1];
+ }
+
kdbg_lock_init();
if ( !(kdebug_flags & KDBG_LOCKINIT))
if (global_state_pid == -1)
global_state_pid = curpid;
else if (global_state_pid != curpid) {
- if ((p = pfind(global_state_pid)) == NULL) {
+ if ((p = proc_find(global_state_pid)) == NULL) {
/*
* The global pid no longer exists
*/
/*
* The global pid exists, deny this request
*/
+ proc_rele(p);
lck_mtx_unlock(kd_trace_mtx_sysctl);
return(EBUSY);
}
break;
case KERN_KDSETBUF:
- /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
- /* 'value' is the desired number of trace entries */
- max_entries = (sane_size/4) / sizeof(kd_buf);
- if (value <= max_entries)
- nkdbufs = value;
- else
- nkdbufs = max_entries;
+ kdbg_set_nkdbufs(value);
break;
case KERN_KDSETUP:
ret=kdbg_reinit();
uint32_t tempbuf_number;
unsigned int old_kdebug_flags, new_kdebug_flags;
unsigned int old_kdebug_slowcheck, new_kdebug_slowcheck;
+ boolean_t first_event = TRUE;
+
count = *number/sizeof(kd_buf);
*number = 0;
if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
tempbuf_count = KDCOPYBUF_COUNT;
+ if (last_wrap_cpu == -1)
+ first_event = FALSE;
+
while (count) {
tempbuf = kdcopybuf;
tempbuf_number = 0;
*/
break;
+ if (first_event == TRUE) {
+ /*
+ * make sure we leave room for the
+ * LAST_WRAPPER event we inject
+ * by throwing away the first event
+ * it's better to lose that one
+ * than the last one
+ */
+ first_event = FALSE;
+
+ kdbip[mincpu].kd_readlast++;
+
+ if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
+ kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
+ if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
+ kdbip[mincpu].kd_stop = 0;
+
+ continue;
+ }
if (last_wrap_cpu == mincpu) {
tempbuf->debugid = MISCDBG_CODE(DBG_BUFFER, 0) | DBG_FUNC_NONE;
- tempbuf->arg1 = 0;
- tempbuf->arg2 = 0;
+ tempbuf->arg1 = kd_bufsize / sizeof(kd_buf);
+ tempbuf->arg2 = kd_cpus;
tempbuf->arg3 = 0;
tempbuf->arg4 = 0;
tempbuf->arg5 = (int)current_thread();
#define TRAP_DEBUGGER __asm__ volatile("int3");
#endif
#ifdef __ppc__
-#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
+#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
#endif
#define SANE_TRACEBUF_SIZE 2*1024*1024
/* Trap to the debugger to obtain a coherent stack snapshot; this populates
* the trace buffer
*/
+ if (panic_active()) {
+ error = ENOMEM;
+ goto error_exit;
+ }
+
TRAP_DEBUGGER;
bytesTraced = kdp_stack_snapshot_bytes_traced();
STACKSHOT_SUBSYS_UNLOCK();
return error;
}
+
+void
+start_kern_tracing(unsigned int new_nkdbufs) {
+ if (!new_nkdbufs)
+ return;
+ kdbg_set_nkdbufs(new_nkdbufs);
+ kdbg_lock_init();
+ kdbg_reinit();
+ kdebug_enable |= KDEBUG_ENABLE_TRACE;
+ kdebug_slowcheck &= ~SLOW_NOLOG;
+ kdbg_mapinit();
+ printf("kernel tracing started\n");
+}