]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/kern/kdebug.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
index 3eb9043dd5fb1f565dd265378802dca4ecb8cce6..f7c7fa73a92a407abe2bc87b9d0e2d26a1b0ff51 100644 (file)
@@ -20,6 +20,7 @@
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 
+
 #include <machine/spl.h>
 
 #include <sys/errno.h>
@@ -30,6 +31,7 @@
 #include <sys/sysctl.h>
 #include <sys/kdebug.h>
 #include <sys/sysproto.h>
+#include <sys/bsdtask_info.h>
 
 #define HZ      100
 #include <mach/clock_types.h>
 #include <machine/machine_routines.h>
 
 #if defined(__i386__) || defined(__x86_64__)
-#include <i386/rtclock.h>
+#include <i386/rtclock_protos.h>
+#include <i386/mp.h>
+#include <i386/machine_routines.h>
 #endif
+
+#include <kern/clock.h>
+
 #include <kern/thread.h>
 #include <kern/task.h>
 #include <kern/debug.h>
+#include <kern/kalloc.h>
+#include <kern/cpu_data.h>
 #include <kern/assert.h>
 #include <vm/vm_kern.h>
 #include <sys/lock.h>
 #include <sys/vnode.h>
 #include <sys/vnode_internal.h>
 #include <sys/fcntl.h>
+#include <sys/file_internal.h>
+#include <sys/ubc.h>
 
 #include <mach/mach_host.h>            /* for host_info() */
 #include <libkern/OSAtomic.h>
 
+#include <machine/pal_routines.h>
+
 /* XXX should have prototypes, but Mach does not provide one */
 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
 int cpu_number(void);  /* XXX <machine/...> include path broken */
@@ -74,18 +87,14 @@ int kdbg_setrtcdec(kd_regtype *);
 int kdbg_setpidex(kd_regtype *);
 int kdbg_setpid(kd_regtype *);
 void kdbg_mapinit(void);
-int kdbg_reinit(void);
-int kdbg_bootstrap(void);
+int kdbg_reinit(boolean_t);
+int kdbg_bootstrap(boolean_t);
 
-static int create_buffers(void);
+static int create_buffers(boolean_t);
 static void delete_buffers(void);
 
 extern void IOSleep(int);
 
-#ifdef ppc
-extern uint32_t maxDec;
-#endif
-
 /* trace enable status */
 unsigned int kdebug_enable = 0;
 
@@ -96,23 +105,38 @@ unsigned int      kd_entropy_count  = 0;
 unsigned int      kd_entropy_indx   = 0;
 vm_offset_t       kd_entropy_buftomem = 0;
 
+#define MAX_ENTROPY_COUNT      (128 * 1024)
+
 
 #define SLOW_NOLOG     0x01
 #define SLOW_CHECKS    0x02
 #define SLOW_ENTROPY   0x04
-
-unsigned int kdebug_slowcheck = SLOW_NOLOG;
+#define SLOW_CHUD      0x08
 
 unsigned int kd_cpus;
 
 #define EVENTS_PER_STORAGE_UNIT                2048
 #define MIN_STORAGE_UNITS_PER_CPU      4
 
+#define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
+
+#define NATIVE_TRACE_FACILITY
+
+union kds_ptr {
+       struct {
+               uint32_t buffer_index:21;
+               uint16_t offset:11;
+       };
+       uint32_t raw;
+};
+
 struct kd_storage {
-       struct  kd_storage *kds_next;
-       kd_buf  *kds_bufptr;
-       kd_buf  *kds_buflast;
-       kd_buf  *kds_readlast;
+       union   kds_ptr kds_next;
+       uint32_t kds_bufindx;
+       uint32_t kds_bufcnt;
+       uint32_t kds_readlast;
+       boolean_t kds_lostevents;
+       uint64_t  kds_timestamp;
 
        kd_buf  kds_records[EVENTS_PER_STORAGE_UNIT];
 };
@@ -120,34 +144,52 @@ struct kd_storage {
 #define MAX_BUFFER_SIZE                        (1024 * 1024 * 128)
 #define N_STORAGE_UNITS_PER_BUFFER     (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
 
-
 struct kd_storage_buffers {
        struct  kd_storage      *kdsb_addr;
        uint32_t                kdsb_size;
 };
 
-
-struct kd_storage *kds_free_list = NULL;
+#define KDS_PTR_NULL 0xffffffff
 struct kd_storage_buffers *kd_bufs = NULL;
 int    n_storage_units = 0;
 int    n_storage_buffers = 0;
+int    n_storage_threshold = 0;
+int    kds_waiter = 0;
+int    kde_waiter = 0;
 
+#pragma pack(0)
 struct kd_bufinfo {
-       struct  kd_storage *kd_list_head;
-       struct  kd_storage *kd_list_tail;
-       struct  kd_storage *kd_active;
-        uint64_t kd_prev_timebase;
+       union  kds_ptr kd_list_head;
+       union  kds_ptr kd_list_tail;
+       boolean_t kd_lostevents;
+       uint32_t _pad;
+       uint64_t kd_prev_timebase;
+       uint32_t num_bufs;
 } __attribute__(( aligned(CPU_CACHE_SIZE) ));
 
+struct kd_ctrl_page_t {
+       union kds_ptr kds_free_list;
+       uint32_t enabled        :1;
+       uint32_t _pad0          :31;
+       int                     kds_inuse_count;
+       uint32_t kdebug_flags;
+       uint32_t kdebug_slowcheck;
+       uint32_t _pad1;
+       struct {
+               uint64_t tsc_base;
+               uint64_t ns_base;
+       } cpu_timebase[32]; // should be max number of actual logical cpus
+} kd_ctrl_page = {.kds_free_list = {.raw = KDS_PTR_NULL}, .enabled = 0, .kds_inuse_count = 0, .kdebug_flags = 0, .kdebug_slowcheck = SLOW_NOLOG};
+#pragma pack()
+
 struct kd_bufinfo *kdbip = NULL;
 
-#define KDCOPYBUF_COUNT        2048
+#define KDCOPYBUF_COUNT        8192
 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
 kd_buf *kdcopybuf = NULL;
 
 
 unsigned int nkdbufs = 8192;
-unsigned int kdebug_flags = 0;
 unsigned int kdlog_beg=0;
 unsigned int kdlog_end=0;
 unsigned int kdlog_value1=0;
@@ -155,6 +197,7 @@ unsigned int kdlog_value2=0;
 unsigned int kdlog_value3=0;
 unsigned int kdlog_value4=0;
 
+static lck_spin_t * kdw_spin_lock;
 static lck_spin_t * kds_spin_lock;
 static lck_mtx_t  * kd_trace_mtx_sysctl;
 static lck_grp_t  * kd_trace_mtx_sysctl_grp;
@@ -185,10 +228,21 @@ unsigned int kd_mapcount = 0;
 vm_offset_t kd_maptomem = 0;
 
 off_t  RAW_file_offset = 0;
+int    RAW_file_written = 0;
+
+#define        RAW_FLUSH_SIZE  (2 * 1024 * 1024)
+
 
 pid_t global_state_pid = -1;       /* Used to control exclusive use of kd_buffer */
 
-#define DBG_FUNC_MASK 0xfffffffc
+#define DBG_FUNC_MASK  0xfffffffc
+
+#define INTERRUPT      0x01050000
+#define MACH_vmfault   0x01300008
+#define BSC_SysCall    0x040c0000
+#define MACH_SysCall   0x010c0000
+#define DBG_SCALL_MASK 0xffff0000
+
 
 /* task to string structure */
 struct tts
@@ -202,10 +256,10 @@ typedef struct tts tts_t;
 
 struct krt
 {
-  kd_threadmap *map;    /* pointer to the map buffer */
-  int count;
-  int maxcount;
-  struct tts *atts;
+       kd_threadmap *map;    /* pointer to the map buffer */
+       int count;
+       int maxcount;
+       struct tts *atts;
 };
 
 typedef struct krt krt_t;
@@ -215,24 +269,102 @@ typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
                                uintptr_t arg2, uintptr_t arg3,
                                uintptr_t arg4, uintptr_t arg5);
 
-kd_chudhook_fn kdebug_chudhook = 0;   /* pointer to CHUD toolkit function */
+volatile kd_chudhook_fn kdebug_chudhook = 0;   /* pointer to CHUD toolkit function */
 
 __private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
 
-/* Support syscall SYS_kdebug_trace */
-int
-kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
+static void
+kdbg_set_tracing_enabled(boolean_t enabled)
 {
-    if ( (kdebug_enable == 0) )
-        return(EINVAL);
-  
-    kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
-    return(0);
+       int s = ml_set_interrupts_enabled(FALSE);
+       lck_spin_lock(kds_spin_lock);
+
+       if (enabled) {
+               kdebug_enable |= KDEBUG_ENABLE_TRACE;
+               kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
+               kd_ctrl_page.enabled = 1;
+       } else {
+               kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
+               kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
+               kd_ctrl_page.enabled = 0;
+       }
+       lck_spin_unlock(kds_spin_lock);
+       ml_set_interrupts_enabled(s);
 }
 
+static void
+kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled)
+{
+       int s = ml_set_interrupts_enabled(FALSE);
+       lck_spin_lock(kds_spin_lock);
+
+       if (enabled) {
+               kd_ctrl_page.kdebug_slowcheck |= slowflag;
+               kdebug_enable |= enableflag;
+       } else {
+               kd_ctrl_page.kdebug_slowcheck &= ~slowflag;
+               kdebug_enable &= ~enableflag;
+       }
+       lck_spin_unlock(kds_spin_lock);
+       ml_set_interrupts_enabled(s);
+}
+
+
+#ifdef NATIVE_TRACE_FACILITY
+void
+disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags)
+{
+       int s = ml_set_interrupts_enabled(FALSE);
+       lck_spin_lock(kds_spin_lock);
+
+       *old_slowcheck = kd_ctrl_page.kdebug_slowcheck;
+       *old_flags = kd_ctrl_page.kdebug_flags;
+
+       kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
+       kd_ctrl_page.kdebug_flags |= KDBG_NOWRAP;
+
+       lck_spin_unlock(kds_spin_lock);
+       ml_set_interrupts_enabled(s);
+}
+
+void
+enable_wrap(uint32_t old_slowcheck, boolean_t lostevents)
+{
+       int s = ml_set_interrupts_enabled(FALSE);
+       lck_spin_lock(kds_spin_lock);
+
+       kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP;
+
+       if ( !(old_slowcheck & SLOW_NOLOG))
+               kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
+
+       if (lostevents == TRUE)
+               kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
+
+       lck_spin_unlock(kds_spin_lock);
+       ml_set_interrupts_enabled(s);
+}
+
+void trace_set_timebases(__unused uint64_t tsc, __unused uint64_t ns)
+{
+}
+#else
+/* Begin functions that are defined twice */
+void trace_set_timebases(uint64_t tsc, uint64_t ns)
+{
+       int cpu = cpu_number();
+       kd_ctrl_page.cpu_timebase[cpu].tsc_base = tsc;
+       kd_ctrl_page.cpu_timebase[cpu].ns_base = ns;
+}
+
+#endif
 
 static int
-create_buffers(void)
+#if defined(__i386__) || defined(__x86_64__)
+create_buffers(boolean_t early_trace)
+#else
+create_buffers(__unused boolean_t early_trace)
+#endif
 {
         int    i;
        int     p_buffer_size;
@@ -240,6 +372,42 @@ create_buffers(void)
        int     f_buffers;
        int     error = 0;
 
+         /*
+         * get the number of cpus and cache it
+         */
+#if defined(__i386__) || defined(__x86_64__)
+       if (early_trace == TRUE) {
+               /*
+                * we've started tracing before the
+                * IOKit has even started running... just
+                * use the static max value
+                */
+               kd_cpus = max_ncpus;
+       } else
+#endif
+       {
+               host_basic_info_data_t hinfo;
+               mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+
+#define BSD_HOST 1
+               host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
+               kd_cpus = hinfo.logical_cpu_max;
+       }
+       if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip, sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS) {
+               error = ENOSPC;
+               goto out;
+       }
+
+       trace_handler_map_bufinfo((uintptr_t)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
+
+#if !defined(NATIVE_TRACE_FACILITY)
+       for(i=0;i<(int)kd_cpus;i++) {
+               get_nanotime_timebases(i, 
+                               &kd_ctrl_page.cpu_timebase[i].tsc_base, 
+                               &kd_ctrl_page.cpu_timebase[i].ns_base);
+       }
+#endif
+
        if (nkdbufs < (kd_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
                n_storage_units = kd_cpus * MIN_STORAGE_UNITS_PER_CPU;
        else
@@ -275,6 +443,8 @@ create_buffers(void)
                        error = ENOSPC;
                        goto out;
                }
+               bzero(kd_bufs[i].kdsb_addr, f_buffer_size);
+
                kd_bufs[i].kdsb_size = f_buffer_size;
        }
        if (p_buffer_size) {
@@ -282,8 +452,11 @@ create_buffers(void)
                        error = ENOSPC;
                        goto out;
                }
+               bzero(kd_bufs[i].kdsb_addr, p_buffer_size);
+
                kd_bufs[i].kdsb_size = p_buffer_size;
        }
+       n_storage_units = 0;
 
        for (i = 0; i < n_storage_buffers; i++) {
                struct kd_storage *kds;
@@ -293,16 +466,31 @@ create_buffers(void)
                n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
                kds = kd_bufs[i].kdsb_addr;
 
+               trace_handler_map_buffer(i, (uintptr_t)kd_bufs[i].kdsb_addr, kd_bufs[i].kdsb_size);
+
                for (n = 0; n < n_elements; n++) {
-                       kds[n].kds_next = kds_free_list;
-                       kds_free_list = &kds[n];
+                       kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index;
+                       kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset;
 
-                       kds[n].kds_buflast = &kds[n].kds_records[EVENTS_PER_STORAGE_UNIT];
+                       kd_ctrl_page.kds_free_list.buffer_index = i;
+                       kd_ctrl_page.kds_free_list.offset = n;
                }
+               n_storage_units += n_elements;
        }
+
        bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
 
-       kdebug_flags |= KDBG_BUFINIT;
+       for (i = 0; i < (int)kd_cpus; i++) {
+               kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
+               kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
+               kdbip[i].kd_lostevents = FALSE;
+               kdbip[i].num_bufs = 0;
+       }
+
+       kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT;
+
+       kd_ctrl_page.kds_inuse_count = 0;
+       n_storage_threshold = n_storage_units / 2;
 out:
        if (error)
                delete_buffers();
@@ -318,8 +506,10 @@ delete_buffers(void)
        
        if (kd_bufs) {
                for (i = 0; i < n_storage_buffers; i++) {
-                       if (kd_bufs[i].kdsb_addr)
+                       if (kd_bufs[i].kdsb_addr) {
                                kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
+                               trace_handler_unmap_buffer(i);
+                       }
                }
                kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
 
@@ -331,58 +521,92 @@ delete_buffers(void)
 
                kdcopybuf = NULL;
        }
-       kds_free_list = NULL;
+       kd_ctrl_page.kds_free_list.raw = KDS_PTR_NULL;
 
-       kdebug_flags &= ~KDBG_BUFINIT;
+       if (kdbip) {
+               trace_handler_unmap_bufinfo();
+
+               kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
+               
+               kdbip = NULL;
+       }
+       kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
 }
 
 
-static void
-release_storage_unit(struct kd_bufinfo *kdbp, struct kd_storage *kdsp)
+#ifdef NATIVE_TRACE_FACILITY
+void
+release_storage_unit(int cpu, uint32_t kdsp_raw)
 {
-
        int s = 0;
+       struct  kd_storage *kdsp_actual;
+       struct kd_bufinfo *kdbp;
+       union kds_ptr kdsp;
+
+       kdsp.raw = kdsp_raw;
+
        s = ml_set_interrupts_enabled(FALSE);
        lck_spin_lock(kds_spin_lock);
 
-       if (kdsp == kdbp->kd_list_head) {
+       kdbp = &kdbip[cpu];
+
+       if (kdsp.raw == kdbp->kd_list_head.raw) {
                /*
-                * its possible for the storage unit pointed to
+                * it's possible for the storage unit pointed to
                 * by kdsp to have already been stolen... so
-                * check to see if its still the head of the list
+                * check to see if it's still the head of the list
                 * now that we're behind the lock that protects 
                 * adding and removing from the queue...
                 * since we only ever release and steal units from
-                * that position, if its no longer the head
+                * that position, if it's no longer the head
                 * we having nothing to do in this context
                 */
-               kdbp->kd_list_head = kdsp->kds_next;
+               kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
+               kdbp->kd_list_head = kdsp_actual->kds_next;
        
-               kdsp->kds_next = kds_free_list;
-               kds_free_list = kdsp;
+               kdsp_actual->kds_next = kd_ctrl_page.kds_free_list;
+               kd_ctrl_page.kds_free_list = kdsp;
+
+               kd_ctrl_page.kds_inuse_count--;
        }
        lck_spin_unlock(kds_spin_lock);
        ml_set_interrupts_enabled(s);
 }
 
 
-/*
- * Interrupts are disabled when we enter this routine.
- */
-static struct kd_storage *
-allocate_storage_unit(struct kd_bufinfo *kdbp)
+boolean_t
+allocate_storage_unit(int cpu)
 {
-       struct  kd_storage *kdsp;
-       struct  kd_bufinfo *kdbp_vict, *kdbp_try;
+       union   kds_ptr kdsp;
+       struct  kd_storage *kdsp_actual;
+       struct  kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
        uint64_t        oldest_ts, ts;
+       boolean_t       retval = TRUE;
+       int                     s = 0;
                
+       s = ml_set_interrupts_enabled(FALSE);
        lck_spin_lock(kds_spin_lock);
 
-       if ((kdsp = kds_free_list))
-               kds_free_list = kdsp->kds_next;
-       else {
-               if (kdebug_flags & KDBG_NOWRAP) {
-                        kdebug_slowcheck |= SLOW_NOLOG;
+       kdbp = &kdbip[cpu];
+
+       /* If someone beat us to the allocate, return success */
+       if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
+               kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
+
+               if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
+                       goto out;
+       }
+       
+       if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
+               kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
+               kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
+
+               kd_ctrl_page.kds_inuse_count++;
+       } else {
+               if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
+                       kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
+                       kdbp->kd_lostevents = TRUE;
+                       retval = FALSE;
                        goto out;
                }
                kdbp_vict = NULL;
@@ -390,22 +614,25 @@ allocate_storage_unit(struct kd_bufinfo *kdbp)
 
                for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_cpus]; kdbp_try++) {
 
-                       if ((kdsp = kdbp_try->kd_list_head) == NULL) {
+                       if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
                                /*
                                 * no storage unit to steal
                                 */
                                continue;
                        }
-                       if (kdsp == kdbp_try->kd_active) {
+
+                       kdsp_actual = POINTER_FROM_KDS_PTR(kdbp_try->kd_list_head);
+
+                       if (kdsp_actual->kds_bufcnt < EVENTS_PER_STORAGE_UNIT) {
                                /*
                                 * make sure we don't steal the storage unit
-                                * being actively recorded to...  this state
-                                * also implies that this is the only unit assigned
-                                * to this CPU, so we can immediately move on 
+                                * being actively recorded to...  need to
+                                * move on because we don't want an out-of-order
+                                * set of events showing up later
                                 */
                                continue;
                        }
-                       ts = kdbg_get_timestamp(&(kdbp_try->kd_list_head->kds_records[0]));
+                       ts = kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
 
                        if (ts < oldest_ts) {
                                /*
@@ -417,37 +644,52 @@ allocate_storage_unit(struct kd_bufinfo *kdbp)
                                kdbp_vict = kdbp_try;
                        }
                }
-#if 1
                if (kdbp_vict == NULL) {
                        kdebug_enable = 0;
-
-                       panic("allocate_storage_unit: no storage units available\n");
+                       kd_ctrl_page.enabled = 0;
+                       retval = FALSE;
+                       goto out;
                }
-#endif
                kdsp = kdbp_vict->kd_list_head;
+               kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
 
-               kdbp_vict->kd_list_head = kdsp->kds_next;
+               kdbp_vict->kd_list_head = kdsp_actual->kds_next;
 
-               kdebug_flags |= KDBG_WRAPPED;
+               kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
        }
-       kdsp->kds_next     = NULL;
-       kdsp->kds_bufptr   = &kdsp->kds_records[0];
-       kdsp->kds_readlast = kdsp->kds_bufptr;
+       kdsp_actual->kds_timestamp = mach_absolute_time();
+       kdsp_actual->kds_next.raw = KDS_PTR_NULL;
+       kdsp_actual->kds_bufcnt   = 0;
+       kdsp_actual->kds_readlast = 0;
+
+       kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
+       kdbp->kd_lostevents = FALSE;
+       kdsp_actual->kds_bufindx  = 0;
 
-       if (kdbp->kd_list_head == NULL)
+       if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
                kdbp->kd_list_head = kdsp;
        else
-               kdbp->kd_list_tail->kds_next = kdsp;
+               POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
        kdbp->kd_list_tail = kdsp;
 out:
        lck_spin_unlock(kds_spin_lock);
+       ml_set_interrupts_enabled(s);
 
-       return (kdsp);
+       return (retval);
 }
+#endif
 
+void
+kernel_debug_internal(
+       uint32_t        debugid,
+       uintptr_t       arg1,
+       uintptr_t       arg2,
+       uintptr_t       arg3,
+       uintptr_t       arg4,
+       uintptr_t       arg5,
+       int             entropy_flag);
 
-
-static void
+__attribute__((always_inline)) void
 kernel_debug_internal(
        uint32_t        debugid,
        uintptr_t       arg1,
@@ -459,92 +701,118 @@ kernel_debug_internal(
 {
        struct proc     *curproc;
        uint64_t        now;
-       int             s;
+       uint32_t        bindx;
+       boolean_t       s;
        kd_buf          *kd;
        int             cpu;
        struct kd_bufinfo *kdbp;
-       struct kd_storage *kdsp;
+       struct kd_storage *kdsp_actual;
 
-       s = ml_set_interrupts_enabled(FALSE);
 
-       now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
-       cpu = cpu_number();
-
-       if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
-               if (kdebug_chudhook)
-                       kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
-
-               if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
-                       goto out;
-       }
-       if (kdebug_slowcheck == 0)
-               goto record_trace;
+       if (kd_ctrl_page.kdebug_slowcheck) {
 
-       if (entropy_flag && (kdebug_enable & KDEBUG_ENABLE_ENTROPY)) {
-               if (kd_entropy_indx < kd_entropy_count) {
-                       kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
-                       kd_entropy_indx++;
-               }
-           
-               if (kd_entropy_indx == kd_entropy_count) {
+               if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
+                       kd_chudhook_fn chudhook;
                        /*
-                        * Disable entropy collection
+                        * Mask interrupts to minimize the interval across
+                        * which the driver providing the hook could be
+                        * unloaded.
                         */
-                       kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
-                       kdebug_slowcheck &= ~SLOW_ENTROPY;
+                       s = ml_set_interrupts_enabled(FALSE);
+                       chudhook = kdebug_chudhook;
+                       if (chudhook)
+                               chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
+                       ml_set_interrupts_enabled(s);
                }
-       }
-       if ( (kdebug_slowcheck & SLOW_NOLOG) )
-               goto out;
+               if ((kdebug_enable & KDEBUG_ENABLE_ENTROPY) && entropy_flag) {
+
+                       now = mach_absolute_time();
+
+                       s = ml_set_interrupts_enabled(FALSE);
+                       lck_spin_lock(kds_spin_lock);
+
+                       if (kdebug_enable & KDEBUG_ENABLE_ENTROPY) {
+
+                               if (kd_entropy_indx < kd_entropy_count) {
+                                       kd_entropy_buffer[kd_entropy_indx] = now;
+                                       kd_entropy_indx++;
+                               }
+                               if (kd_entropy_indx == kd_entropy_count) {
+                                       /*
+                                        * Disable entropy collection
+                                        */
+                                       kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
+                                       kd_ctrl_page.kdebug_slowcheck &= ~SLOW_ENTROPY;
+                               }
+                       }
+                       lck_spin_unlock(kds_spin_lock);
+                       ml_set_interrupts_enabled(s);
+               }
+               if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & KDEBUG_ENABLE_TRACE))
+                       goto out1;
        
-       if (kdebug_flags & KDBG_PIDCHECK) {
-               /*
-                * If kdebug flag is not set for current proc, return
-                */
-               curproc = current_proc();
+               if ( !ml_at_interrupt_context()) {
+                       if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
+                               /*
+                                * If kdebug flag is not set for current proc, return
+                                */
+                               curproc = current_proc();
 
-               if ((curproc && !(curproc->p_kdebug)) &&
-                   ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
-                       goto out;
-       }
-       else if (kdebug_flags & KDBG_PIDEXCLUDE) {
-               /*
-                * If kdebug flag is set for current proc, return
-                */
-               curproc = current_proc();
+                               if ((curproc && !(curproc->p_kdebug)) &&
+                                   ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
+                                       goto out1;
+                       }
+                       else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
+                               /*
+                                * If kdebug flag is set for current proc, return
+                                */
+                               curproc = current_proc();
 
-               if ((curproc && curproc->p_kdebug) &&
-                   ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
-                       goto out;
-       }
-       if (kdebug_flags & KDBG_RANGECHECK) {
-               if ((debugid < kdlog_beg)
-                   || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
-                       goto out;
-       }
-       else if (kdebug_flags & KDBG_VALCHECK) {
-               if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
-                   (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
-                   (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
-                   (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
-                   (debugid >> 24 != DBG_TRACE))
-                       goto out;
+                               if ((curproc && curproc->p_kdebug) &&
+                                   ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
+                                       goto out1;
+                       }
+               }
+               if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+                       if ((debugid < kdlog_beg)
+                                       || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
+                               goto out1;
+               }
+               else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+                       if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
+                                       (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
+                                       (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
+                                       (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
+                                       (debugid >> 24 != DBG_TRACE))
+                               goto out1;
+               }
        }
-
-record_trace:
+       disable_preemption();
+       cpu = cpu_number();
        kdbp = &kdbip[cpu];
-
-       if ((kdsp = kdbp->kd_active) == NULL) {
-               if ((kdsp = allocate_storage_unit(kdbp)) == NULL) {
+retry_q:
+       if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
+               kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
+               bindx = kdsp_actual->kds_bufindx;
+       } else
+               kdsp_actual = NULL;
+       
+       if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
+               if (allocate_storage_unit(cpu) == FALSE) {
                        /*
                         * this can only happen if wrapping
                         * has been disabled
                         */
                        goto out;
                }
-               kdbp->kd_active = kdsp;
+               goto retry_q;
        }
-       kd = kdsp->kds_bufptr;
+       now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
+
+       if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
+               goto retry_q;
+
+       kd = &kdsp_actual->kds_records[bindx];
 
        kd->debugid = debugid;
        kd->arg1 = arg1;
@@ -555,12 +823,56 @@ record_trace:
                  
        kdbg_set_timestamp_and_cpu(kd, now, cpu);
 
-       kdsp->kds_bufptr++;
-
-       if (kdsp->kds_bufptr >= kdsp->kds_buflast)
-               kdbp->kd_active = NULL;
+       OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
 out:
-       ml_set_interrupts_enabled(s);
+       enable_preemption();
+out1:
+       if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) ||
+           (kde_waiter && kd_entropy_indx >= kd_entropy_count)) {
+               uint32_t        etype;
+               uint32_t        stype;
+               
+               etype = debugid & DBG_FUNC_MASK;
+               stype = debugid & DBG_SCALL_MASK;
+
+               if (etype == INTERRUPT || etype == MACH_vmfault ||
+                   stype == BSC_SysCall || stype == MACH_SysCall) {
+
+                       boolean_t need_kds_wakeup = FALSE;
+                       boolean_t need_kde_wakeup = FALSE;
+
+                       /*
+                        * try to take the lock here to synchronize with the
+                        * waiter entering the blocked state... use the try
+                        * mode to prevent deadlocks caused by re-entering this
+                        * routine due to various trace points triggered in the
+                        * lck_spin_sleep_xxxx routines used to actually enter
+                        * one of our 2 wait conditions... no problem if we fail,
+                        * there will be lots of additional events coming in that
+                        * will eventually succeed in grabbing this lock
+                        */
+                       s = ml_set_interrupts_enabled(FALSE);
+
+                       if (lck_spin_try_lock(kdw_spin_lock)) {
+
+                               if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
+                                       kds_waiter = 0;
+                                       need_kds_wakeup = TRUE;
+                               }
+                               if (kde_waiter && kd_entropy_indx >= kd_entropy_count) {
+                                       kde_waiter = 0;
+                                       need_kde_wakeup = TRUE;
+                               }
+                               lck_spin_unlock(kdw_spin_lock);
+                       }
+                       ml_set_interrupts_enabled(s);
+                       
+                       if (need_kds_wakeup == TRUE)
+                               wakeup(&kds_waiter);
+                       if (need_kde_wakeup == TRUE)
+                               wakeup(&kde_waiter);
+               }
+       }
 }
 
 void
@@ -584,27 +896,32 @@ kernel_debug1(
        uintptr_t       arg4,
        uintptr_t       arg5)
 {
-       kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0);
+       kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 1);
 }
 
-static void
-kdbg_lock_init(void)
+/*
+ * Support syscall SYS_kdebug_trace
+ */
+int
+kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
 {
-       host_basic_info_data_t hinfo;
-       mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+       if ( __probable(kdebug_enable == 0) )
+               return(EINVAL);
+  
+       kernel_debug_internal(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, (uintptr_t)thread_tid(current_thread()), 0);
 
-       if (kdebug_flags & KDBG_LOCKINIT)
-               return;
+       return(0);
+}
 
-       /* get the number of cpus and cache it */
-#define BSD_HOST 1
-       host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
-       kd_cpus = hinfo.logical_cpu_max;
 
-       if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip,
-                      sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
-               return;
+static void
+kdbg_lock_init(void)
+{
+       if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT)
+               return;
 
+       trace_handler_map_ctrl_page((uintptr_t)&kd_ctrl_page, sizeof(kd_ctrl_page), sizeof(struct kd_storage), sizeof(union kds_ptr));
+       
        /*
         * allocate lock group attribute and group
         */
@@ -618,25 +935,26 @@ kdbg_lock_init(void)
 
 
        /*
-        * allocate and initialize spin lock and mutex
+        * allocate and initialize mutex's
         */
        kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
        kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
+       kdw_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
 
-       kdebug_flags |= KDBG_LOCKINIT;
+       kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
 }
 
 
 int
-kdbg_bootstrap(void)
+kdbg_bootstrap(boolean_t early_trace)
 {
-        kdebug_flags &= ~KDBG_WRAPPED;
+        kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
 
-       return (create_buffers());
+       return (create_buffers(early_trace));
 }
 
 int
-kdbg_reinit(void)
+kdbg_reinit(boolean_t early_trace)
 {
        int ret = 0;
 
@@ -645,8 +963,7 @@ kdbg_reinit(void)
         * First make sure we're not in
         * the middle of cutting a trace
         */
-       kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
-       kdebug_slowcheck |= SLOW_NOLOG;
+       kdbg_set_tracing_enabled(FALSE);
 
        /*
         * make sure the SLOW_NOLOG is seen
@@ -657,14 +974,17 @@ kdbg_reinit(void)
 
        delete_buffers();
 
-       if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
+       if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
                kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
-               kdebug_flags &= ~KDBG_MAPINIT;
+               kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
                kd_mapsize = 0;
                kd_mapptr = (kd_threadmap *) 0;
                kd_mapcount = 0;
        }  
-       ret = kdbg_bootstrap();
+       ret = kdbg_bootstrap(early_trace);
+
+       RAW_file_offset = 0;
+       RAW_file_written = 0;
 
        return(ret);
 }
@@ -750,7 +1070,7 @@ kdbg_mapinit(void)
        vm_offset_t     tts_maptomem=0;
        int             i;
 
-        if (kdebug_flags & KDBG_MAPINIT)
+        if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT)
                return;
 
        /*
@@ -821,7 +1141,7 @@ kdbg_mapinit(void)
        }
 
        if (kd_mapptr && tts_mapptr) {
-               kdebug_flags |= KDBG_MAPINIT;
+               kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
 
                /*
                 * Initialize thread map data
@@ -847,9 +1167,7 @@ kdbg_clear(void)
         * First make sure we're not in
         * the middle of cutting a trace
         */
-
-       kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
-       kdebug_slowcheck = SLOW_NOLOG;
+       kdbg_set_tracing_enabled(FALSE);
 
        /*
         * make sure the SLOW_NOLOG is seen
@@ -858,24 +1176,24 @@ kdbg_clear(void)
         */
        IOSleep(100);
 
-       if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
-               kdebug_slowcheck |= SLOW_ENTROPY;
-
         global_state_pid = -1;
-       kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
-       kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
-       kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
+       kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+       kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
+       kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
 
        delete_buffers();
 
        /* Clean up the thread map buffer */
-       kdebug_flags &= ~KDBG_MAPINIT;
+       kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
        if (kd_mapptr) {
                kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
                kd_mapptr = (kd_threadmap *) 0;
        }
        kd_mapsize = 0;
        kd_mapcount = 0;
+
+       RAW_file_offset = 0;
+       RAW_file_written = 0;
 }
 
 int
@@ -896,17 +1214,17 @@ kdbg_setpid(kd_regtype *kdr)
                                /*
                                 * turn on pid check for this and all pids
                                 */
-                               kdebug_flags |= KDBG_PIDCHECK;
-                               kdebug_flags &= ~KDBG_PIDEXCLUDE;
-                               kdebug_slowcheck |= SLOW_CHECKS;
-                               
+                               kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
+                               kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
+                               kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+
                                p->p_kdebug = 1;
                        } else {
                                /*
                                 * turn off pid check for this pid value
                                 * Don't turn off all pid checking though
                                 *
-                                * kdebug_flags &= ~KDBG_PIDCHECK;
+                                * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
                                 */   
                                p->p_kdebug = 0;
                        }
@@ -938,9 +1256,9 @@ kdbg_setpidex(kd_regtype *kdr)
                                /*
                                 * turn on pid exclusion
                                 */
-                               kdebug_flags |= KDBG_PIDEXCLUDE;
-                               kdebug_flags &= ~KDBG_PIDCHECK;
-                               kdebug_slowcheck |= SLOW_CHECKS;
+                               kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
+                               kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
+                               kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
 
                                p->p_kdebug = 1;
                        }
@@ -949,7 +1267,7 @@ kdbg_setpidex(kd_regtype *kdr)
                                 * turn off pid exclusion for this pid value
                                 * Don't turn off all pid exclusion though
                                 *
-                                * kdebug_flags &= ~KDBG_PIDEXCLUDE;
+                                * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
                                 */   
                                p->p_kdebug = 0;
                        }
@@ -975,14 +1293,8 @@ kdbg_setrtcdec(kd_regtype *kdr)
 
        if (decval && decval < KDBG_MINRTCDEC)
                ret = EINVAL;
-#ifdef ppc
-       else {
-               maxDec = decval ? decval : 0x7FFFFFFF;  /* Set or reset the max decrementer */
-       }
-#else
        else
                ret = ENOTSUP;
-#endif /* ppc */
 
        return(ret);
 }
@@ -999,10 +1311,10 @@ kdbg_setreg(kd_regtype * kdr)
                val_2 = (kdr->value2 & 0xff);
                kdlog_beg = (val_1<<24);
                kdlog_end = (val_2<<24);
-               kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
-               kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
-               kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
-               kdebug_slowcheck |= SLOW_CHECKS;
+               kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+               kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
+               kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
+               kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
                break;
        case KDBG_SUBCLSTYPE :
                val_1 = (kdr->value1 & 0xff);
@@ -1010,36 +1322,36 @@ kdbg_setreg(kd_regtype * kdr)
                val = val_2 + 1;
                kdlog_beg = ((val_1<<24) | (val_2 << 16));
                kdlog_end = ((val_1<<24) | (val << 16));
-               kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
-               kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
-               kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
-               kdebug_slowcheck |= SLOW_CHECKS;
+               kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+               kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
+               kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
+               kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
                break;
        case KDBG_RANGETYPE :
                kdlog_beg = (kdr->value1);
                kdlog_end = (kdr->value2);
-               kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
-               kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
-               kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
-               kdebug_slowcheck |= SLOW_CHECKS;
+               kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+               kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
+               kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
+               kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
                break;
        case KDBG_VALCHECK:
                kdlog_value1 = (kdr->value1);
                kdlog_value2 = (kdr->value2);
                kdlog_value3 = (kdr->value3);
                kdlog_value4 = (kdr->value4);
-               kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
-               kdebug_flags &= ~KDBG_RANGECHECK;    /* Turn off range check */
-               kdebug_flags |= KDBG_VALCHECK;       /* Turn on specific value check  */
-               kdebug_slowcheck |= SLOW_CHECKS;
+               kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+               kd_ctrl_page.kdebug_flags &= ~KDBG_RANGECHECK;    /* Turn off range check */
+               kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK;       /* Turn on specific value check  */
+               kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
                break;
        case KDBG_TYPENONE :
-               kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+               kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
 
-               if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
-                       kdebug_slowcheck |= SLOW_CHECKS;
+               if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
+                       kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
                else
-                       kdebug_slowcheck &= ~SLOW_CHECKS;
+                       kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
 
                kdlog_beg = 0;
                kdlog_end = 0;
@@ -1064,8 +1376,8 @@ kdbg_getreg(__unused kd_regtype * kdr)
                val_2 = val_1 + 1;
                kdlog_beg = (val_1<<24);
                kdlog_end = (val_2<<24);
-               kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
-               kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
+               kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+               kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
                break;
        case KDBG_SUBCLSTYPE :
                val_1 = (kdr->value1 & 0xff);
@@ -1073,17 +1385,17 @@ kdbg_getreg(__unused kd_regtype * kdr)
                val = val_2 + 1;
                kdlog_beg = ((val_1<<24) | (val_2 << 16));
                kdlog_end = ((val_1<<24) | (val << 16));
-               kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
-               kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
+               kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+               kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
                break;
        case KDBG_RANGETYPE :
                kdlog_beg = (kdr->value1);
                kdlog_end = (kdr->value2);
-               kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
-               kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
+               kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+               kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
                break;
        case KDBG_TYPENONE :
-               kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
+               kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
                kdlog_beg = 0;
                kdlog_end = 0;
                break;
@@ -1107,21 +1419,56 @@ kdbg_readmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
 
        if (count && (count <= kd_mapcount))
        {
-               if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
+               if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
                {
                        if (*number < kd_mapsize)
                                ret = EINVAL;
                        else
                        {
-                               if (vp) {
-                                       vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
-                                               UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
-                                       RAW_file_offset += sizeof(uint32_t);
-
-                                       vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, kd_mapsize, RAW_file_offset,
-                                               UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+                               if (vp)
+                               {
+                                       RAW_header      header;
+                                       clock_sec_t     secs;
+                                       clock_usec_t    usecs;
+                                       char    *pad_buf;
+                                       int     pad_size;
+
+                                       header.version_no = RAW_VERSION1;
+                                       header.thread_count = count;
+
+                                       clock_get_calendar_microtime(&secs, &usecs);
+                                       header.TOD_secs = secs;
+                                       header.TOD_usecs = usecs;
+                                       
+                                       ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset,
+                                                     UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+                                       if (ret)
+                                               goto write_error;
+                                       RAW_file_offset += sizeof(RAW_header);
+
+                                       ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, kd_mapsize, RAW_file_offset,
+                                                     UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+                                       if (ret)
+                                               goto write_error;
                                        RAW_file_offset += kd_mapsize;
 
+                                       pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64);
+
+                                       if (pad_size)
+                                       {
+                                               pad_buf = (char *)kalloc(pad_size);
+                                               memset(pad_buf, 0, pad_size);
+
+                                               ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
+                                                       UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+                                               kfree(pad_buf, pad_size);
+                                               
+                                               if (ret)
+                                                       goto write_error;
+                                               RAW_file_offset += pad_size;
+                                       }
+                                       RAW_file_written += sizeof(RAW_header) + kd_mapsize + pad_size;
+
                                } else {
                                        if (copyout(kd_mapptr, buffer, kd_mapsize))
                                                ret = EINVAL;
@@ -1134,22 +1481,24 @@ kdbg_readmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
        else
                ret = EINVAL;
 
-       if (ret && vp) {
+       if (ret && vp)
+       {
                count = 0;
 
                vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
                        UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
                RAW_file_offset += sizeof(uint32_t);
+               RAW_file_written += sizeof(uint32_t);
        }
-       if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
+write_error:
+       if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
        {
                kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
-               kdebug_flags &= ~KDBG_MAPINIT;
+               kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
                kd_mapsize = 0;
                kd_mapptr = (kd_threadmap *) 0;
                kd_mapcount = 0;
        }  
-
        return(ret);
 }
 
@@ -1158,44 +1507,85 @@ kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
 {
        int avail = *number;
        int ret = 0;
+       int s;
+       u_int64_t abstime;
+       u_int64_t ns;
+       int wait_result = THREAD_AWAKENED;
+
 
        if (kd_entropy_buffer)
                return(EBUSY);
 
-       kd_entropy_count = avail/sizeof(mach_timespec_t);
-       kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
-       kd_entropy_indx = 0;
+       if (ms_timeout < 0)
+               return(EINVAL);
+
+       kd_entropy_count = avail/sizeof(uint64_t);
+
+       if (kd_entropy_count > MAX_ENTROPY_COUNT || kd_entropy_count == 0) {
+               /*
+                * Enforce maximum entropy entries
+                */
+               return(EINVAL);
+       }
+       kd_entropy_bufsize = kd_entropy_count * sizeof(uint64_t);
 
        /*
-        * Enforce maximum entropy entries here if needed
         * allocate entropy buffer
         */
-       if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
-                      (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS) {
+       if (kmem_alloc(kernel_map, &kd_entropy_buftomem, (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS) {
                kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
        } else {
                kd_entropy_buffer = (uint64_t *) 0;
                kd_entropy_count = 0;
-               kd_entropy_indx = 0;
-               return (EINVAL);
+
+               return (ENOMEM);
        }
+       kd_entropy_indx = 0;
 
-       if (ms_timeout < 10)
-               ms_timeout = 10;
+       KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_START, ms_timeout, kd_entropy_count, 0, 0, 0);
 
        /*
         * Enable entropy sampling
         */
-       kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
-       kdebug_slowcheck |= SLOW_ENTROPY;
+       kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, TRUE);
 
-       ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
+       if (ms_timeout) {
+               ns = (u_int64_t)ms_timeout * (u_int64_t)(1000 * 1000);
+               nanoseconds_to_absolutetime(ns,  &abstime );
+               clock_absolutetime_interval_to_deadline( abstime, &abstime );
+       } else
+               abstime = 0;
+
+       s = ml_set_interrupts_enabled(FALSE);
+       lck_spin_lock(kdw_spin_lock);
+
+       while (wait_result == THREAD_AWAKENED && kd_entropy_indx < kd_entropy_count) {
+
+               kde_waiter = 1;
+
+               if (abstime) {
+                       /*
+                        * wait for the specified timeout or
+                        * until we've hit our sample limit
+                        */
+                       wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE, abstime);
+               } else {
+                       /*
+                        * wait until we've hit our sample limit
+                        */
+                       wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE);
+               }
+               kde_waiter = 0;
+       }
+       lck_spin_unlock(kdw_spin_lock);
+       ml_set_interrupts_enabled(s);
 
        /*
         * Disable entropy sampling
         */
-       kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
-       kdebug_slowcheck &= ~SLOW_ENTROPY;
+       kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, FALSE);
+
+       KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_END, ms_timeout, kd_entropy_indx, 0, 0, 0);
 
        *number = 0;
        ret = 0;
@@ -1204,10 +1594,10 @@ kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
                /*
                 * copyout the buffer
                 */
-               if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
+               if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(uint64_t)))
                        ret = EINVAL;
                else
-                       *number = kd_entropy_indx;
+                       *number = kd_entropy_indx * sizeof(uint64_t);
        }
        /*
         * Always cleanup
@@ -1250,14 +1640,16 @@ kdbg_set_nkdbufs(unsigned int value)
 void
 kdbg_control_chud(int val, void *fn)
 {
-        if (val) {
-                /* enable chudhook */
+       kdbg_lock_init();
+    
+       if (val) {
+               /* enable chudhook */
                kdebug_chudhook = fn;
-               kdebug_enable |= KDEBUG_ENABLE_CHUD;
+               kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, TRUE);
        }
        else {
-               /* disable chudhook */
-                kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
+               /* disable chudhook */
+               kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, FALSE);
                kdebug_chudhook = 0;
        }
 }
@@ -1272,22 +1664,24 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
        kd_regtype kd_Reg;
        kbufinfo_t kd_bufinfo;
        pid_t curpid;
-       struct proc *p, *curproc;
+       proc_t p, curproc;
 
        if (name[0] == KERN_KDGETENTROPY ||
+               name[0] == KERN_KDWRITETR ||
+               name[0] == KERN_KDWRITEMAP ||
                name[0] == KERN_KDEFLAGS ||
                name[0] == KERN_KDDFLAGS ||
                name[0] == KERN_KDENABLE ||
                name[0] == KERN_KDSETBUF) {
                
                if ( namelen < 2 )
-               return(EINVAL);
+                       return(EINVAL);
                value = name[1];
        }
        
        kdbg_lock_init();
 
-       if ( !(kdebug_flags & KDBG_LOCKINIT))
+       if ( !(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
                return(ENOSPC);
 
        lck_mtx_lock(kd_trace_mtx_sysctl);
@@ -1308,12 +1702,12 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
                kd_bufinfo.nkdbufs = nkdbufs;
                kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
 
-               if ( (kdebug_slowcheck & SLOW_NOLOG) )
+               if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) )
                        kd_bufinfo.nolog = 1;
                else
                        kd_bufinfo.nolog = 0;
 
-               kd_bufinfo.flags = kdebug_flags;
+               kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
 #if defined(__LP64__)
                kd_bufinfo.flags |= KDBG_LP64;
 #endif
@@ -1371,11 +1765,11 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
        switch(name[0]) {
                case KERN_KDEFLAGS:
                        value &= KDBG_USERFLAGS;
-                       kdebug_flags |= value;
+                       kd_ctrl_page.kdebug_flags |= value;
                        break;
                case KERN_KDDFLAGS:
                        value &= KDBG_USERFLAGS;
-                       kdebug_flags &= ~value;
+                       kd_ctrl_page.kdebug_flags &= ~value;
                        break;
                case KERN_KDENABLE:
                        /*
@@ -1385,25 +1779,22 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
                                /*
                                 * enable only if buffer is initialized
                                 */
-                               if (!(kdebug_flags & KDBG_BUFINIT)) {
+                               if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT)) {
                                        ret = EINVAL;
                                        break;
                                }
                                kdbg_mapinit();
 
-                               kdebug_enable |= KDEBUG_ENABLE_TRACE;
-                               kdebug_slowcheck &= ~SLOW_NOLOG;
-                       }
-                       else {
-                               kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
-                               kdebug_slowcheck |= SLOW_NOLOG;
+                               kdbg_set_tracing_enabled(TRUE);
                        }
+                       else
+                               kdbg_set_tracing_enabled(FALSE);
                        break;
                case KERN_KDSETBUF:
                        kdbg_set_nkdbufs(value);
                        break;
                case KERN_KDSETUP:
-                       ret = kdbg_reinit();
+                       ret = kdbg_reinit(FALSE);
                        break;
                case KERN_KDREMOVE:
                        kdbg_clear();
@@ -1432,6 +1823,86 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
                case KERN_KDREADTR:
                        ret = kdbg_read(where, sizep, NULL, NULL);
                        break;
+               case KERN_KDWRITETR:
+               case KERN_KDWRITEMAP:
+               {
+                       struct  vfs_context context;
+                       struct  fileproc *fp;
+                       size_t  number;
+                       vnode_t vp;
+                       int     fd;
+
+                       if (name[0] == KERN_KDWRITETR) {
+                               int s;
+                               int wait_result = THREAD_AWAKENED;
+                               u_int64_t abstime;
+                               u_int64_t ns;
+
+                               if (*sizep) {
+                                       ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
+                                       nanoseconds_to_absolutetime(ns,  &abstime );
+                                       clock_absolutetime_interval_to_deadline( abstime, &abstime );
+                               } else
+                                       abstime = 0;
+
+                               s = ml_set_interrupts_enabled(FALSE);
+                               lck_spin_lock(kdw_spin_lock);
+
+                               while (wait_result == THREAD_AWAKENED && kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
+
+                                       kds_waiter = 1;
+
+                                       if (abstime)
+                                               wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
+                                       else
+                                               wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
+                                       
+                                       kds_waiter = 0;
+                               }
+                               lck_spin_unlock(kdw_spin_lock);
+                               ml_set_interrupts_enabled(s);
+                       }
+                       p = current_proc();
+                       fd = value;
+
+                       proc_fdlock(p);
+                       if ( (ret = fp_lookup(p, fd, &fp, 1)) ) {
+                               proc_fdunlock(p);
+                               break;
+                       }
+                       context.vc_thread = current_thread();
+                       context.vc_ucred = fp->f_fglob->fg_cred;
+
+                       if (fp->f_fglob->fg_type != DTYPE_VNODE) {
+                               fp_drop(p, fd, fp, 1);
+                               proc_fdunlock(p);
+
+                               ret = EBADF;
+                               break;
+                       }
+                       vp = (struct vnode *)fp->f_fglob->fg_data;
+                       proc_fdunlock(p);
+
+                       if ((ret = vnode_getwithref(vp)) == 0) {
+
+                               if (name[0] == KERN_KDWRITETR) {
+                                       number = nkdbufs * sizeof(kd_buf);
+
+                                       KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+                                       ret = kdbg_read(0, &number, vp, &context);
+                                       KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_END, number, 0, 0, 0, 0);
+
+                                       *sizep = number;
+                               } else {
+                                       number = kd_mapsize;
+                                       kdbg_readmap(0, &number, vp, &context);
+                               }
+                               vnode_put(vp);
+                       }
+                       fp_drop(p, fd, fp, 0);
+
+                       break;
+               }
                case KERN_KDPIDTR:
                        if (size < sizeof(kd_regtype)) {
                                ret = EINVAL;
@@ -1489,25 +1960,32 @@ int
 kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
 {
        unsigned int count;
-       unsigned int cpu, mincpu;
+       unsigned int cpu, min_cpu;
        uint64_t  mintime, t;
-       int error = 0,s = 0;
+       int error = 0;
        kd_buf *tempbuf;
-       kd_buf *rcursor;
-       kd_buf *min_rcursor;
-       struct kd_storage *kdsp;
+       uint32_t rcursor;
+       kd_buf lostevent;
+       union kds_ptr kdsp;
+       struct kd_storage *kdsp_actual;
        struct kd_bufinfo *kdbp;
+       struct kd_bufinfo *min_kdbp;
        uint32_t tempbuf_count;
        uint32_t tempbuf_number;
        uint32_t old_kdebug_flags;
        uint32_t old_kdebug_slowcheck;
+       boolean_t lostevents = FALSE;
+       boolean_t out_of_events = FALSE;
 
        count = *number/sizeof(kd_buf);
        *number = 0;
 
-       if (count == 0 || !(kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
+       if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
                return EINVAL;
 
+       memset(&lostevent, 0, sizeof(lostevent));
+       lostevent.debugid = TRACEDBG_CODE(DBG_TRACE_INFO, 2);
+
        /*
         * because we hold kd_trace_mtx_sysctl, no other control threads can 
         * be playing with kdebug_flags... the code that cuts new events could
@@ -1515,17 +1993,8 @@ kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
         * storage chunk which is where it examines kdebug_flags... it its adding
         * to the same chunk we're reading from, no problem... 
         */
-       s = ml_set_interrupts_enabled(FALSE);
-       lck_spin_lock(kds_spin_lock);
 
-       old_kdebug_slowcheck = kdebug_slowcheck;
-       old_kdebug_flags = kdebug_flags;
-
-       kdebug_flags &= ~KDBG_WRAPPED;
-       kdebug_flags |= KDBG_NOWRAP;
-
-       lck_spin_unlock(kds_spin_lock);
-       ml_set_interrupts_enabled(s);
+       disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
 
        if (count > nkdbufs)
                count = nkdbufs;
@@ -1538,66 +2007,86 @@ kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
                tempbuf_number = 0;
 
                while (tempbuf_count) {
-                       mintime = 0xffffffffffffffffULL; /* all actual timestamps are below */
-                       mincpu = -1;
-                       min_rcursor = NULL;
+                       mintime = 0xffffffffffffffffULL;
+                       min_kdbp = NULL;
+                       min_cpu = 0;
 
                        for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_cpus; cpu++, kdbp++) {
 
-                               if ((kdsp = kdbp->kd_list_head) == NULL)
+                               if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL)
                                        continue;
-                               rcursor = kdsp->kds_readlast;
+                               kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
+
+                               rcursor = kdsp_actual->kds_readlast;
 
-                               if (rcursor == kdsp->kds_bufptr)
+                               if (rcursor == kdsp_actual->kds_bufindx)
                                        continue;
-                               t = kdbg_get_timestamp(rcursor);
 
+                               t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
+
+                               if (t < kdsp_actual->kds_timestamp) {
+                                       /*
+                                        * indicates we've not yet completed filling
+                                        * in this event...
+                                        * this should only occur when we're looking
+                                        * at the buf that the record head is utilizing
+                                        * we'll pick these events up on the next
+                                        * call to kdbg_read
+                                        * we bail at this point so that we don't
+                                        * get an out-of-order timestream by continuing
+                                        * to read events from the other CPUs' timestream(s)
+                                        */
+                                       out_of_events = TRUE;
+                                       break;
+                               }
                                if (t < mintime) {
-                                       mincpu = cpu;
                                        mintime = t;
-                                       min_rcursor = rcursor;
+                                       min_kdbp = kdbp;
+                                       min_cpu = cpu;
                                }
                        }
-                       if (mincpu == (unsigned int)-1)
-                               /*
+                       if (min_kdbp == NULL || out_of_events == TRUE) {
+                               /*
                                 * all buffers ran empty
                                 */
-                               break;
-                       
-                       kdbp = &kdbip[mincpu];
-                       kdsp = kdbp->kd_list_head;
+                               out_of_events = TRUE;
+                               break;
+                       }
+                       kdsp = min_kdbp->kd_list_head;
+                       kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
 
-                       *tempbuf = *min_rcursor;
+                       if (kdsp_actual->kds_lostevents == TRUE) {
+                               lostevent.timestamp = kdsp_actual->kds_records[kdsp_actual->kds_readlast].timestamp;
+                               *tempbuf = lostevent;
+                               
+                               kdsp_actual->kds_lostevents = FALSE;
+                               lostevents = TRUE;
 
-                       if (mintime != kdbg_get_timestamp(tempbuf)) {
-                               /*
-                                * we stole this storage unit and used it
-                                * before we could slurp the selected event out
-                                * so we need to re-evaluate
-                                */
-                               continue;
+                               goto nextevent;
                        }
+                       *tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
+
+                       if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT)
+                               release_storage_unit(min_cpu, kdsp.raw);
+
                        /*
                         * Watch for out of order timestamps
                         */     
-                       if (mintime < kdbp->kd_prev_timebase) {
+                       if (mintime < min_kdbp->kd_prev_timebase) {
                                /*
                                 * if so, use the previous timestamp + 1 cycle
                                 */
-                               kdbp->kd_prev_timebase++;
-                               kdbg_set_timestamp_and_cpu(tempbuf, kdbp->kd_prev_timebase, mincpu);
+                               min_kdbp->kd_prev_timebase++;
+                               kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf));
                        } else
-                               kdbp->kd_prev_timebase = mintime;
-
-                       if (min_rcursor == kdsp->kds_readlast)
-                               kdsp->kds_readlast++;
-
-                       if (kdsp->kds_readlast == kdsp->kds_buflast)
-                               release_storage_unit(kdbp, kdsp);
-
+                               min_kdbp->kd_prev_timebase = mintime;
+nextevent:
                        tempbuf_count--;
                        tempbuf_number++;
                        tempbuf++;
+
+                       if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE)
+                               break;
                }
                if (tempbuf_number) {
 
@@ -1606,6 +2095,12 @@ kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
                                                UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
 
                                RAW_file_offset += (tempbuf_number * sizeof(kd_buf));
+       
+                               if (RAW_file_written >= RAW_FLUSH_SIZE) {
+                                       cluster_push(vp, 0);
+
+                                       RAW_file_written = 0;
+                               }
                        } else {
                                error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
                                buffer += (tempbuf_number * sizeof(kd_buf));
@@ -1618,7 +2113,7 @@ kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
                        count   -= tempbuf_number;
                        *number += tempbuf_number;
                }
-               if (tempbuf_count)
+               if (out_of_events == TRUE)
                       /*
                        * all trace buffers are empty
                        */
@@ -1628,17 +2123,7 @@ kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
                        tempbuf_count = KDCOPYBUF_COUNT;
        }
        if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
-
-               s = ml_set_interrupts_enabled(FALSE);
-               lck_spin_lock(kds_spin_lock);
-
-               kdebug_flags &= ~KDBG_NOWRAP;
-
-               if ( !(old_kdebug_slowcheck & SLOW_NOLOG))
-                       kdebug_slowcheck &= ~SLOW_NOLOG;
-
-               lck_spin_unlock(kds_spin_lock);
-               ml_set_interrupts_enabled(s);
+               enable_wrap(old_kdebug_slowcheck, lostevents);
        }
        return (error);
 }
@@ -1656,9 +2141,6 @@ unsigned char *getProcName(struct proc *proc) {
 #if defined(__i386__) || defined (__x86_64__)
 #define TRAP_DEBUGGER __asm__ volatile("int3");
 #endif
-#ifdef __ppc__
-#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
-#endif
 
 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
 
@@ -1701,7 +2183,6 @@ int
 stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
        int error = 0;
 
-
        if ((error = suser(kauth_cred_get(), &p->p_acflag)))
                 return(error);
 
@@ -1779,14 +2260,13 @@ error_exit:
 
 void
 start_kern_tracing(unsigned int new_nkdbufs) {
+
        if (!new_nkdbufs)
                return;
        kdbg_set_nkdbufs(new_nkdbufs);
        kdbg_lock_init();
-       kdbg_reinit();
-       kdebug_enable |= KDEBUG_ENABLE_TRACE;
-       kdebug_slowcheck &= ~SLOW_NOLOG;
-       kdbg_mapinit();
+       kdbg_reinit(TRUE);
+       kdbg_set_tracing_enabled(TRUE);
 
 #if defined(__i386__) || defined(__x86_64__)
        uint64_t now = mach_absolute_time();
@@ -1808,7 +2288,7 @@ kdbg_dump_trace_to_file(const char *filename)
        size_t          number;
 
 
-       if (kdebug_enable & (KDEBUG_ENABLE_CHUD | KDEBUG_ENABLE_ENTROPY))
+       if ( !(kdebug_enable & KDEBUG_ENABLE_TRACE))
                return;
 
         if (global_state_pid != -1) {
@@ -1824,6 +2304,7 @@ kdbg_dump_trace_to_file(const char *filename)
        KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
 
        kdebug_enable = 0;
+       kd_ctrl_page.enabled = 0;
 
        ctx = vfs_context_kernel();
 
@@ -1840,3 +2321,44 @@ kdbg_dump_trace_to_file(const char *filename)
 
        sync(current_proc(), (void *)NULL, (int *)NULL);
 }
+
+/* Helper function for filling in the BSD name for an address space
+ * Defined here because the machine bindings know only Mach threads
+ * and nothing about BSD processes.
+ *
+ * FIXME: need to grab a lock during this?
+ */
+void kdbg_get_task_name(char* name_buf, int len, task_t task)
+{
+       proc_t proc;
+       
+       /* Note: we can't use thread->task (and functions that rely on it) here 
+        * because it hasn't been initialized yet when this function is called.
+        * We use the explicitly-passed task parameter instead.
+        */
+       proc = get_bsdtask_info(task);
+       if (proc != PROC_NULL)
+               snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
+       else
+               snprintf(name_buf, len, "%p [!bsd]", task);
+}
+
+
+
+#if defined(NATIVE_TRACE_FACILITY)
+void trace_handler_map_ctrl_page(__unused uintptr_t addr, __unused size_t ctrl_page_size, __unused size_t storage_size, __unused size_t kds_ptr_size)
+{
+}
+void trace_handler_map_bufinfo(__unused uintptr_t addr, __unused size_t size)
+{
+}
+void trace_handler_unmap_bufinfo(void)
+{
+}
+void trace_handler_map_buffer(__unused int index, __unused uintptr_t addr, __unused size_t size)
+{
+}
+void trace_handler_unmap_buffer(__unused int index)
+{
+}
+#endif