]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/kern/kdebug.c
xnu-2782.30.5.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
index 71f1ae7c2effeab0c30cf3595f9b623cf53fbb70..65c98080d0554a3c64b094ddd464793dee2ccc07 100644 (file)
@@ -32,6 +32,7 @@
 #include <sys/kdebug.h>
 #include <sys/sysproto.h>
 #include <sys/bsdtask_info.h>
 #include <sys/kdebug.h>
 #include <sys/sysproto.h>
 #include <sys/bsdtask_info.h>
+#include <sys/random.h>
 
 #define HZ      100
 #include <mach/clock_types.h>
 
 #define HZ      100
 #include <mach/clock_types.h>
 
 #include <machine/pal_routines.h>
 
 
 #include <machine/pal_routines.h>
 
+extern boolean_t kdebug_serial;
+#if KDEBUG_MOJO_TRACE
+#include <sys/kdebugevents.h>
+static void kdebug_serial_print(       /* forward */
+               uint32_t, uint32_t, uint64_t,
+               uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+#endif
+
 /*
  * IOP(s)
  *
 /*
  * IOP(s)
  *
@@ -112,12 +121,12 @@ static kd_iop_t* kd_iops = NULL;
 /* XXX should have prototypes, but Mach does not provide one */
 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
 int cpu_number(void);  /* XXX <machine/...> include path broken */
 /* XXX should have prototypes, but Mach does not provide one */
 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
 int cpu_number(void);  /* XXX <machine/...> include path broken */
+void commpage_update_kdebug_enable(void); /* XXX sign */
 
 /* XXX should probably be static, but it's debugging code... */
 int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
 void kdbg_control_chud(int, void *);
 int kdbg_control(int *, u_int, user_addr_t, size_t *);
 
 /* XXX should probably be static, but it's debugging code... */
 int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
 void kdbg_control_chud(int, void *);
 int kdbg_control(int *, u_int, user_addr_t, size_t *);
-int kdbg_getentropy (user_addr_t, size_t *, int);
 int kdbg_readcpumap(user_addr_t, size_t *);
 int kdbg_readcurcpumap(user_addr_t, size_t *);
 int kdbg_readthrmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
 int kdbg_readcpumap(user_addr_t, size_t *);
 int kdbg_readcurcpumap(user_addr_t, size_t *);
 int kdbg_readthrmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
@@ -145,18 +154,15 @@ extern void IOSleep(int);
 /* trace enable status */
 unsigned int kdebug_enable = 0;
 
 /* trace enable status */
 unsigned int kdebug_enable = 0;
 
-/* track timestamps for security server's entropy needs */
-uint64_t *       kd_entropy_buffer = 0;
-unsigned int      kd_entropy_bufsize = 0;
-unsigned int      kd_entropy_count  = 0;
-unsigned int      kd_entropy_indx   = 0;
-vm_offset_t       kd_entropy_buftomem = 0;
-
-#define MAX_ENTROPY_COUNT      (128 * 1024)
+/* A static buffer to record events prior to the start of regular logging */
+#define        KD_EARLY_BUFFER_MAX      64
+static kd_buf          kd_early_buffer[KD_EARLY_BUFFER_MAX];
+static int             kd_early_index = 0;
+static boolean_t       kd_early_overflow = FALSE;
 
 #define SLOW_NOLOG     0x01
 #define SLOW_CHECKS    0x02
 
 #define SLOW_NOLOG     0x01
 #define SLOW_CHECKS    0x02
-#define SLOW_ENTROPY   0x04
+#define SLOW_ENTROPY   0x04                    /* Obsolescent */
 #define SLOW_CHUD      0x08
 
 #define EVENTS_PER_STORAGE_UNIT                2048
 #define SLOW_CHUD      0x08
 
 #define EVENTS_PER_STORAGE_UNIT                2048
@@ -197,7 +203,6 @@ int n_storage_units = 0;
 int    n_storage_buffers = 0;
 int    n_storage_threshold = 0;
 int    kds_waiter = 0;
 int    n_storage_buffers = 0;
 int    n_storage_threshold = 0;
 int    kds_waiter = 0;
-int    kde_waiter = 0;
 
 #pragma pack(0)
 struct kd_bufinfo {
 
 #pragma pack(0)
 struct kd_bufinfo {
@@ -305,7 +310,6 @@ pid_t global_state_pid = -1;       /* Used to control exclusive use of kd_buffer
 #define MACH_SysCall   0x010c0000
 #define DBG_SCALL_MASK 0xffff0000
 
 #define MACH_SysCall   0x010c0000
 #define DBG_SCALL_MASK 0xffff0000
 
-
 /* task to string structure */
 struct tts
 {
 /* task to string structure */
 struct tts
 {
@@ -337,6 +341,29 @@ __private_extern__ void stackshot_lock_init( void );
 
 static uint8_t *type_filter_bitmap;
 
 
 static uint8_t *type_filter_bitmap;
 
+/*
+ * This allows kperf to swap out the global state pid when kperf ownership is
+ * passed from one process to another. It checks the old global state pid so
+ * that kperf can't accidentally steal control of trace when a non-kperf trace user has
+ * control of trace.
+ */
+void
+kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid);
+
+void
+kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid)
+{
+       if (!(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
+               return;
+
+       lck_mtx_lock(kd_trace_mtx_sysctl);
+
+       if (old_pid == global_state_pid)
+               global_state_pid = new_pid;
+
+       lck_mtx_unlock(kd_trace_mtx_sysctl);
+}
+
 static uint32_t
 kdbg_cpu_count(boolean_t early_trace)
 {
 static uint32_t
 kdbg_cpu_count(boolean_t early_trace)
 {
@@ -356,62 +383,6 @@ kdbg_cpu_count(boolean_t early_trace)
 }
 
 #if MACH_ASSERT
 }
 
 #if MACH_ASSERT
-static boolean_t
-kdbg_iop_list_is_valid(kd_iop_t* iop)
-{
-        if (iop) {
-                /* Is list sorted by cpu_id? */
-                kd_iop_t* temp = iop;
-                do {
-                        assert(!temp->next || temp->next->cpu_id == temp->cpu_id - 1);
-                        assert(temp->next || (temp->cpu_id == kdbg_cpu_count(FALSE) || temp->cpu_id == kdbg_cpu_count(TRUE)));
-                } while ((temp = temp->next));
-
-                /* Does each entry have a function and a name? */
-                temp = iop;
-                do {
-                        assert(temp->callback.func);
-                        assert(strlen(temp->callback.iop_name) < sizeof(temp->callback.iop_name));
-                } while ((temp = temp->next));
-        }
-
-        return TRUE;
-}
-
-static boolean_t
-kdbg_iop_list_contains_cpu_id(kd_iop_t* list, uint32_t cpu_id)
-{
-       while (list) {
-               if (list->cpu_id == cpu_id)
-                       return TRUE;
-               list = list->next;
-       }
-
-       return FALSE;
-}
-
-/*
- * This is a temporary workaround for <rdar://problem/13512084>
- *
- * DO NOT CALL IN RELEASE BUILD, LEAKS ADDRESS INFORMATION!
- */
-static boolean_t
-kdbg_iop_list_check_for_timestamp_rollback(kd_iop_t* list, uint32_t cpu_id, uint64_t timestamp)
-{
-       while (list) {
-               if (list->cpu_id == cpu_id) {
-                       if (list->last_timestamp > timestamp) {
-                               kprintf("%s is sending trace events that have gone backwards in time. Run the following command: \"symbols -2 -lookup 0x%p\" and file a radar against the matching kext.\n", list->callback.iop_name, (void*)list->callback.func);
-                       }
-                       /* Unconditional set mitigates syslog spam */
-                       list->last_timestamp = timestamp;
-                       return TRUE;
-               }
-               list = list->next;
-       }
-
-       return FALSE;
-}
 #endif /* MACH_ASSERT */
 
 static void
 #endif /* MACH_ASSERT */
 
 static void
@@ -428,15 +399,16 @@ kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type)
 {
        int s = ml_set_interrupts_enabled(FALSE);
        lck_spin_lock(kds_spin_lock);
 {
        int s = ml_set_interrupts_enabled(FALSE);
        lck_spin_lock(kds_spin_lock);
-
        if (enabled) {
                kdebug_enable |= trace_type;
                kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
                kd_ctrl_page.enabled = 1;
        if (enabled) {
                kdebug_enable |= trace_type;
                kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
                kd_ctrl_page.enabled = 1;
+               commpage_update_kdebug_enable();
        } else {
                kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
                kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
                kd_ctrl_page.enabled = 0;
        } else {
                kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
                kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
                kd_ctrl_page.enabled = 0;
+               commpage_update_kdebug_enable();
        }
        lck_spin_unlock(kds_spin_lock);
        ml_set_interrupts_enabled(s);
        }
        lck_spin_unlock(kds_spin_lock);
        ml_set_interrupts_enabled(s);
@@ -524,7 +496,6 @@ create_buffers(boolean_t early_trace)
         */
        kd_ctrl_page.kdebug_iops = kd_iops;
 
         */
        kd_ctrl_page.kdebug_iops = kd_iops;
 
-       assert(kdbg_iop_list_is_valid(kd_ctrl_page.kdebug_iops));
 
        /*
         * If the list is valid, it is sorted, newest -> oldest. Each iop entry
 
        /*
         * If the list is valid, it is sorted, newest -> oldest. Each iop entry
@@ -772,6 +743,7 @@ allocate_storage_unit(int cpu)
                if (kdbp_vict == NULL) {
                        kdebug_enable = 0;
                        kd_ctrl_page.enabled = 0;
                if (kdbp_vict == NULL) {
                        kdebug_enable = 0;
                        kd_ctrl_page.enabled = 0;
+                       commpage_update_kdebug_enable();
                        retval = FALSE;
                        goto out;
                }
                        retval = FALSE;
                        goto out;
                }
@@ -906,9 +878,6 @@ kernel_debug_enter(
        }
        
 record_event:
        }
        
 record_event:
-       assert(kdbg_iop_list_contains_cpu_id(kd_ctrl_page.kdebug_iops, coreid));
-       /* Remove when <rdar://problem/13512084> is closed. */
-       assert(kdbg_iop_list_check_for_timestamp_rollback(kd_ctrl_page.kdebug_iops, coreid, timestamp));
 
        disable_preemption();
 
 
        disable_preemption();
 
@@ -918,6 +887,12 @@ record_event:
        kdbp = &kdbip[coreid];
        timestamp &= KDBG_TIMESTAMP_MASK;
 
        kdbp = &kdbip[coreid];
        timestamp &= KDBG_TIMESTAMP_MASK;
 
+#if KDEBUG_MOJO_TRACE
+       if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
+               kdebug_serial_print(coreid, debugid, timestamp,
+                                   arg1, arg2, arg3, arg4, threadid);
+#endif
+
 retry_q:
        kds_raw = kdbp->kd_list_tail;
 
 retry_q:
        kds_raw = kdbp->kd_list_tail;
 
@@ -993,25 +968,14 @@ out1:
 
 
 
 
 
 
-void
-kernel_debug_internal(
-       uint32_t        debugid,
-       uintptr_t       arg1,
-       uintptr_t       arg2,
-       uintptr_t       arg3,
-       uintptr_t       arg4,
-       uintptr_t       arg5,
-       int             entropy_flag);
-
-__attribute__((always_inline)) void
+static void
 kernel_debug_internal(
        uint32_t        debugid,
        uintptr_t       arg1,
        uintptr_t       arg2,
        uintptr_t       arg3,
        uintptr_t       arg4,
 kernel_debug_internal(
        uint32_t        debugid,
        uintptr_t       arg1,
        uintptr_t       arg2,
        uintptr_t       arg3,
        uintptr_t       arg4,
-       uintptr_t       arg5,
-       int             entropy_flag)
+       uintptr_t       arg5)
 {
        struct proc     *curproc;
        uint64_t        now;
 {
        struct proc     *curproc;
        uint64_t        now;
@@ -1040,30 +1004,6 @@ kernel_debug_internal(
                                chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
                        ml_set_interrupts_enabled(s);
                }
                                chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
                        ml_set_interrupts_enabled(s);
                }
-               if ((kdebug_enable & KDEBUG_ENABLE_ENTROPY) && entropy_flag) {
-
-                       now = mach_absolute_time();
-
-                       s = ml_set_interrupts_enabled(FALSE);
-                       lck_spin_lock(kds_spin_lock);
-
-                       if (kdebug_enable & KDEBUG_ENABLE_ENTROPY) {
-
-                               if (kd_entropy_indx < kd_entropy_count) {
-                                       kd_entropy_buffer[kd_entropy_indx] = now;
-                                       kd_entropy_indx++;
-                               }
-                               if (kd_entropy_indx == kd_entropy_count) {
-                                       /*
-                                        * Disable entropy collection
-                                        */
-                                       kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
-                                       kd_ctrl_page.kdebug_slowcheck &= ~SLOW_ENTROPY;
-                               }
-                       }
-                       lck_spin_unlock(kds_spin_lock);
-                       ml_set_interrupts_enabled(s);
-               }
                if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
                        goto out1;
        
                if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
                        goto out1;
        
@@ -1129,6 +1069,14 @@ record_event:
 
        cpu = cpu_number();
        kdbp = &kdbip[cpu];
 
        cpu = cpu_number();
        kdbp = &kdbip[cpu];
+
+#if KDEBUG_MOJO_TRACE
+       if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
+               kdebug_serial_print(cpu, debugid,
+                                   mach_absolute_time() & KDBG_TIMESTAMP_MASK,
+                                   arg1, arg2, arg3, arg4, arg5);
+#endif
+
 retry_q:
        kds_raw = kdbp->kd_list_tail;
 
 retry_q:
        kds_raw = kdbp->kd_list_tail;
 
@@ -1168,8 +1116,7 @@ retry_q:
 out:
        enable_preemption();
 out1:
 out:
        enable_preemption();
 out1:
-       if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) ||
-           (kde_waiter && kd_entropy_indx >= kd_entropy_count)) {
+       if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
                uint32_t        etype;
                uint32_t        stype;
                
                uint32_t        etype;
                uint32_t        stype;
                
@@ -1180,7 +1127,6 @@ out1:
                    stype == BSC_SysCall || stype == MACH_SysCall) {
 
                        boolean_t need_kds_wakeup = FALSE;
                    stype == BSC_SysCall || stype == MACH_SysCall) {
 
                        boolean_t need_kds_wakeup = FALSE;
-                       boolean_t need_kde_wakeup = FALSE;
 
                        /*
                         * try to take the lock here to synchronize with the
 
                        /*
                         * try to take the lock here to synchronize with the
@@ -1200,18 +1146,12 @@ out1:
                                        kds_waiter = 0;
                                        need_kds_wakeup = TRUE;
                                }
                                        kds_waiter = 0;
                                        need_kds_wakeup = TRUE;
                                }
-                               if (kde_waiter && kd_entropy_indx >= kd_entropy_count) {
-                                       kde_waiter = 0;
-                                       need_kde_wakeup = TRUE;
-                               }
                                lck_spin_unlock(kdw_spin_lock);
                        }
                        ml_set_interrupts_enabled(s);
                        
                        if (need_kds_wakeup == TRUE)
                                wakeup(&kds_waiter);
                                lck_spin_unlock(kdw_spin_lock);
                        }
                        ml_set_interrupts_enabled(s);
                        
                        if (need_kds_wakeup == TRUE)
                                wakeup(&kds_waiter);
-                       if (need_kde_wakeup == TRUE)
-                               wakeup(&kde_waiter);
                }
        }
 }
                }
        }
 }
@@ -1225,7 +1165,7 @@ kernel_debug(
        uintptr_t       arg4,
        __unused uintptr_t arg5)
 {
        uintptr_t       arg4,
        __unused uintptr_t arg5)
 {
-       kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()), 1);
+       kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()));
 }
 
 void
 }
 
 void
@@ -1237,24 +1177,143 @@ kernel_debug1(
        uintptr_t       arg4,
        uintptr_t       arg5)
 {
        uintptr_t       arg4,
        uintptr_t       arg5)
 {
-       kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 1);
+       kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5);
+}
+
+void
+kernel_debug_string(const char *message)
+{
+       uintptr_t arg[4] = {0, 0, 0, 0};
+
+       /* Stuff the message string in the args and log it. */
+        strncpy((char *)arg, message, MIN(sizeof(arg), strlen(message)));
+       KERNEL_DEBUG_EARLY(
+               TRACE_INFO_STRING,
+               arg[0], arg[1], arg[2], arg[3]);
 }
 
 }
 
+extern int     master_cpu;             /* MACH_KERNEL_PRIVATE */
 /*
 /*
- * Support syscall SYS_kdebug_trace
+ * Used prior to start_kern_tracing() being called.
+ * Log temporarily into a static buffer.
+ */
+void
+kernel_debug_early(
+       uint32_t        debugid,
+       uintptr_t       arg1,
+       uintptr_t       arg2,
+       uintptr_t       arg3,
+       uintptr_t       arg4)
+{
+       /* If tracing is already initialized, use it */
+       if (nkdbufs) {
+               KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0);
+               return;
+       }
+
+       /* Do nothing if the buffer is full or we're not on the boot cpu */ 
+       kd_early_overflow = kd_early_index >= KD_EARLY_BUFFER_MAX;
+       if (kd_early_overflow ||
+           cpu_number() != master_cpu)
+               return;
+
+       kd_early_buffer[kd_early_index].debugid = debugid;
+       kd_early_buffer[kd_early_index].timestamp = mach_absolute_time();
+       kd_early_buffer[kd_early_index].arg1 = arg1;
+       kd_early_buffer[kd_early_index].arg2 = arg2;
+       kd_early_buffer[kd_early_index].arg3 = arg3;
+       kd_early_buffer[kd_early_index].arg4 = arg4;
+       kd_early_buffer[kd_early_index].arg5 = 0;
+       kd_early_index++;
+}
+
+/*
+ * Transfen the contents of the temporary buffer into the trace buffers.
+ * Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
+ * when mach_absolute_time is set to 0.
+ */
+static void
+kernel_debug_early_end(void)
+{
+       int     i;
+
+       if (cpu_number() != master_cpu)
+               panic("kernel_debug_early_end() not call on boot processor");
+
+       /* Fake sentinel marking the start of kernel time relative to TSC */
+       kernel_debug_enter(
+               0,
+               TRACE_TIMESTAMPS,
+               0,
+               (uint32_t)(tsc_rebase_abs_time >> 32),
+               (uint32_t)tsc_rebase_abs_time,
+               0,
+               0,
+               0);
+       for (i = 0; i < kd_early_index; i++) {
+               kernel_debug_enter(
+                       0,
+                       kd_early_buffer[i].debugid,
+                       kd_early_buffer[i].timestamp,
+                       kd_early_buffer[i].arg1,
+                       kd_early_buffer[i].arg2,
+                       kd_early_buffer[i].arg3,
+                       kd_early_buffer[i].arg4,
+                       0);
+       }
+
+       /* Cut events-lost event on overflow */
+       if (kd_early_overflow)
+               KERNEL_DEBUG_CONSTANT(
+                       TRACE_LOST_EVENTS, 0, 0, 0, 0, 0);
+
+       /* This trace marks the start of kernel tracing */
+       kernel_debug_string("early trace done");
+}
+
+/*
+ * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
  */
 int
  */
 int
-kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
+kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval)
 {
 {
+       struct kdebug_trace64_args uap64;
+
+       uap64.code = uap->code;
+       uap64.arg1 = uap->arg1;
+       uap64.arg2 = uap->arg2;
+       uap64.arg3 = uap->arg3;
+       uap64.arg4 = uap->arg4;
+
+       return kdebug_trace64(p, &uap64, retval);
+}
+
+/*
+ * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
+ */
+int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
+{
+       uint8_t code_class;
+
+       /*
+        * Not all class are supported for injection from userspace, especially ones used by the core
+        * kernel tracing infrastructure.
+        */
+       code_class = EXTRACT_CLASS(uap->code);
+
+       switch (code_class) {
+               case DBG_TRACE:
+                       return EPERM;
+       }
+
        if ( __probable(kdebug_enable == 0) )
        if ( __probable(kdebug_enable == 0) )
-               return(EINVAL);
-  
-       kernel_debug_internal(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, (uintptr_t)thread_tid(current_thread()), 0);
+               return(0); 
+
+       kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, (uintptr_t)thread_tid(current_thread()));
 
        return(0);
 }
 
 
        return(0);
 }
 
-
 static void
 kdbg_lock_init(void)
 {
 static void
 kdbg_lock_init(void)
 {
@@ -2076,115 +2135,6 @@ write_error:
        return(ret);
 }
 
        return(ret);
 }
 
-int
-kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
-{
-       int avail = *number;
-       int ret = 0;
-       int s;
-       u_int64_t abstime;
-       u_int64_t ns;
-       int wait_result = THREAD_AWAKENED;
-
-
-       if (kd_entropy_buffer)
-               return(EBUSY);
-
-       if (ms_timeout < 0)
-               return(EINVAL);
-
-       kd_entropy_count = avail/sizeof(uint64_t);
-
-       if (kd_entropy_count > MAX_ENTROPY_COUNT || kd_entropy_count == 0) {
-               /*
-                * Enforce maximum entropy entries
-                */
-               return(EINVAL);
-       }
-       kd_entropy_bufsize = kd_entropy_count * sizeof(uint64_t);
-
-       /*
-        * allocate entropy buffer
-        */
-       if (kmem_alloc(kernel_map, &kd_entropy_buftomem, (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS) {
-               kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
-       } else {
-               kd_entropy_buffer = (uint64_t *) 0;
-               kd_entropy_count = 0;
-
-               return (ENOMEM);
-       }
-       kd_entropy_indx = 0;
-
-       KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_START, ms_timeout, kd_entropy_count, 0, 0, 0);
-
-       /*
-        * Enable entropy sampling
-        */
-       kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, TRUE);
-
-       if (ms_timeout) {
-               ns = (u_int64_t)ms_timeout * (u_int64_t)(1000 * 1000);
-               nanoseconds_to_absolutetime(ns,  &abstime );
-               clock_absolutetime_interval_to_deadline( abstime, &abstime );
-       } else
-               abstime = 0;
-
-       s = ml_set_interrupts_enabled(FALSE);
-       lck_spin_lock(kdw_spin_lock);
-
-       while (wait_result == THREAD_AWAKENED && kd_entropy_indx < kd_entropy_count) {
-
-               kde_waiter = 1;
-
-               if (abstime) {
-                       /*
-                        * wait for the specified timeout or
-                        * until we've hit our sample limit
-                        */
-                       wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE, abstime);
-               } else {
-                       /*
-                        * wait until we've hit our sample limit
-                        */
-                       wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE);
-               }
-               kde_waiter = 0;
-       }
-       lck_spin_unlock(kdw_spin_lock);
-       ml_set_interrupts_enabled(s);
-
-       /*
-        * Disable entropy sampling
-        */
-       kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, FALSE);
-
-       KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_END, ms_timeout, kd_entropy_indx, 0, 0, 0);
-
-       *number = 0;
-       ret = 0;
-
-       if (kd_entropy_indx > 0) {
-               /*
-                * copyout the buffer
-                */
-               if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(uint64_t)))
-                       ret = EINVAL;
-               else
-                       *number = kd_entropy_indx * sizeof(uint64_t);
-       }
-       /*
-        * Always cleanup
-        */
-       kd_entropy_count = 0;
-       kd_entropy_indx = 0;
-       kd_entropy_buftomem = 0;
-       kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
-       kd_entropy_buffer = (uint64_t *) 0;
-       
-       return(ret);
-}
-
 
 static int
 kdbg_set_nkdbufs(unsigned int value)
 
 static int
 kdbg_set_nkdbufs(unsigned int value)
@@ -2331,28 +2281,26 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
                                        ret = EINVAL;
                        }
                        goto out;
                                        ret = EINVAL;
                        }
                        goto out;
-                       break;
                        
                        
-               case KERN_KDGETENTROPY:
-                       if (kd_entropy_buffer)
-                               ret = EBUSY;
-                       else
-                               ret = kdbg_getentropy(where, sizep, value);
+               case KERN_KDGETENTROPY: {
+                       /* Obsolescent - just fake with a random buffer */
+                       char    *buffer = (char *) kalloc(size);
+                       read_frandom((void *) buffer, size);
+                       ret = copyout(buffer, where, size);
+                       kfree(buffer, size);
                        goto out;
                        goto out;
-                       break;
+               }
                        
                case KERN_KDENABLE_BG_TRACE:
                        bg_nkdbufs = kdbg_set_nkdbufs(value);
                        kdlog_bg_trace = TRUE;
                        ret = kdbg_enable_bg_trace();
                        goto out;
                        
                case KERN_KDENABLE_BG_TRACE:
                        bg_nkdbufs = kdbg_set_nkdbufs(value);
                        kdlog_bg_trace = TRUE;
                        ret = kdbg_enable_bg_trace();
                        goto out;
-                       break;
                        
                case KERN_KDDISABLE_BG_TRACE:
                        kdlog_bg_trace = FALSE;
                        kdbg_disable_bg_trace();
                        goto out;
                        
                case KERN_KDDISABLE_BG_TRACE:
                        kdlog_bg_trace = FALSE;
                        kdbg_disable_bg_trace();
                        goto out;
-                       break;
        }
        
        if ((curproc = current_proc()) != NULL)
        }
        
        if ((curproc = current_proc()) != NULL)
@@ -2523,19 +2471,20 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
                        proc_fdunlock(p);
 
                        if ((ret = vnode_getwithref(vp)) == 0) {
                        proc_fdunlock(p);
 
                        if ((ret = vnode_getwithref(vp)) == 0) {
-
+                               RAW_file_offset = fp->f_fglob->fg_offset;
                                if (name[0] == KERN_KDWRITETR) {
                                        number = nkdbufs * sizeof(kd_buf);
 
                                if (name[0] == KERN_KDWRITETR) {
                                        number = nkdbufs * sizeof(kd_buf);
 
-                                       KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+                                       KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS | DBG_FUNC_START, 0, 0, 0, 0, 0);
                                        ret = kdbg_read(0, &number, vp, &context);
                                        ret = kdbg_read(0, &number, vp, &context);
-                                       KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_END, number, 0, 0, 0, 0);
+                                       KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS | DBG_FUNC_END, number, 0, 0, 0, 0);
 
                                        *sizep = number;
                                } else {
                                        number = kd_mapcount * sizeof(kd_threadmap);
                                        kdbg_readthrmap(0, &number, vp, &context);
                                }
 
                                        *sizep = number;
                                } else {
                                        number = kd_mapcount * sizeof(kd_threadmap);
                                        kdbg_readthrmap(0, &number, vp, &context);
                                }
+                               fp->f_fglob->fg_offset = RAW_file_offset;
                                vnode_put(vp);
                        }
                        fp_drop(p, fd, fp, 0);
                                vnode_put(vp);
                        }
                        fp_drop(p, fd, fp, 0);
@@ -2708,7 +2657,7 @@ kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
                return EINVAL;
 
        memset(&lostevent, 0, sizeof(lostevent));
                return EINVAL;
 
        memset(&lostevent, 0, sizeof(lostevent));
-       lostevent.debugid = TRACEDBG_CODE(DBG_TRACE_INFO, 2);
+       lostevent.debugid = TRACE_LOST_EVENTS;
 
        /* Capture timestamp. Only sort events that have occured before the timestamp.
         * Since the iop is being flushed here, its possible that events occur on the AP
 
        /* Capture timestamp. Only sort events that have occured before the timestamp.
         * Since the iop is being flushed here, its possible that events occur on the AP
@@ -3017,6 +2966,23 @@ stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_
                *retval = 0;
                return (0);
        }
                *retval = 0;
                return (0);
        }
+
+       if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE) {
+               error = telemetry_enable_window();
+
+               if (error != KERN_SUCCESS) {
+                       /* We are probably out of memory */
+                       *retval = -1;
+                       return ENOMEM;
+               }
+
+               *retval = 0;
+               return (0);
+       } else if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE) {
+               telemetry_disable_window();
+               *retval = 0;
+               return (0);
+       }
 #endif
 
        *retval = -1;
 #endif
 
        *retval = -1;
@@ -3047,6 +3013,23 @@ stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_
                goto error_exit;
        }
 
                goto error_exit;
        }
 
+       if (flags & STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS) {
+
+               if (tracebuf_size > SANE_TRACEBUF_SIZE) {
+                       error = EINVAL;
+                       goto error_exit;
+               }
+
+               bytesTraced = tracebuf_size;
+               error = telemetry_gather_windowed(tracebuf, &bytesTraced);
+               if (error == KERN_NO_SPACE) {
+                       error = ENOSPC;
+               }
+
+               *retval = (int)bytesTraced;
+               goto error_exit;
+       }
+
        if (flags & STACKSHOT_GET_BOOT_PROFILE) {
 
                if (tracebuf_size > SANE_BOOTPROFILE_TRACEBUF_SIZE) {
        if (flags & STACKSHOT_GET_BOOT_PROFILE) {
 
                if (tracebuf_size > SANE_BOOTPROFILE_TRACEBUF_SIZE) {
@@ -3123,13 +3106,16 @@ error_exit:
 }
 
 void
 }
 
 void
-start_kern_tracing(unsigned int new_nkdbufs, boolean_t need_map) {
+start_kern_tracing(unsigned int new_nkdbufs, boolean_t need_map)
+{
 
        if (!new_nkdbufs)
                return;
        nkdbufs = kdbg_set_nkdbufs(new_nkdbufs);
        kdbg_lock_init();
 
 
        if (!new_nkdbufs)
                return;
        nkdbufs = kdbg_set_nkdbufs(new_nkdbufs);
        kdbg_lock_init();
 
+       kernel_debug_string("start_kern_tracing");
+
        if (0 == kdbg_reinit(TRUE)) {
 
                if (need_map == TRUE) {
        if (0 == kdbg_reinit(TRUE)) {
 
                if (need_map == TRUE) {
@@ -3139,22 +3125,53 @@ start_kern_tracing(unsigned int new_nkdbufs, boolean_t need_map) {
 
                        disable_wrap(&old1, &old2);
                }
 
                        disable_wrap(&old1, &old2);
                }
-               kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE);
 
 
-#if defined(__i386__) || defined(__x86_64__)
-               uint64_t now = mach_absolute_time();
+               /* Hold off interrupts until the early traces are cut */
+               boolean_t       s = ml_set_interrupts_enabled(FALSE);
+
+               kdbg_set_tracing_enabled(
+                       TRUE,
+                       kdebug_serial ?
+                               (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_SERIAL) :
+                                KDEBUG_ENABLE_TRACE);
+
+               /*
+                * Transfer all very early events from the static buffer
+                * into the real buffers.
+                */
+               kernel_debug_early_end();
+       
+               ml_set_interrupts_enabled(s);
 
 
-        KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 1)) | DBG_FUNC_NONE,
-                              (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time,
-                              (uint32_t)(now >> 32), (uint32_t)now,
-                              0);
-#endif
                printf("kernel tracing started\n");
                printf("kernel tracing started\n");
+#if KDEBUG_MOJO_TRACE
+               if (kdebug_serial) {
+                       printf("serial output enabled with %lu named events\n",
+                       sizeof(kd_events)/sizeof(kd_event_t));
+               }
+#endif
        } else {
        } else {
-               printf("error from kdbg_reinit,kernel tracing not started\n");
+               printf("error from kdbg_reinit, kernel tracing not started\n");
        }
 }
 
        }
 }
 
+void
+start_kern_tracing_with_typefilter(unsigned int new_nkdbufs,
+                                  boolean_t need_map,
+                                  unsigned int typefilter)
+{
+       /* startup tracing */
+       start_kern_tracing(new_nkdbufs, need_map);
+
+       /* check that tracing was actually enabled */
+       if (!(kdebug_enable & KDEBUG_ENABLE_TRACE))
+               return;
+
+       /* setup the typefiltering */
+       if (0 == kdbg_enable_typefilter())
+               setbit(type_filter_bitmap, typefilter & (CSC_MASK >> CSC_OFFSET));
+}
+
 void
 kdbg_dump_trace_to_file(const char *filename)
 {
 void
 kdbg_dump_trace_to_file(const char *filename)
 {
@@ -3178,13 +3195,15 @@ kdbg_dump_trace_to_file(const char *filename)
                         */
                        kdebug_enable = 0;
                        kd_ctrl_page.enabled = 0;
                         */
                        kdebug_enable = 0;
                        kd_ctrl_page.enabled = 0;
+                       commpage_update_kdebug_enable();
                        return;
                }
        }
                        return;
                }
        }
-       KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
+       KERNEL_DEBUG_CONSTANT(TRACE_PANIC | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
 
        kdebug_enable = 0;
        kd_ctrl_page.enabled = 0;
 
        kdebug_enable = 0;
        kd_ctrl_page.enabled = 0;
+       commpage_update_kdebug_enable();
 
        ctx = vfs_context_kernel();
 
 
        ctx = vfs_context_kernel();
 
@@ -3222,3 +3241,146 @@ void kdbg_get_task_name(char* name_buf, int len, task_t task)
        else
                snprintf(name_buf, len, "%p [!bsd]", task);
 }
        else
                snprintf(name_buf, len, "%p [!bsd]", task);
 }
+
+#if KDEBUG_MOJO_TRACE
+static kd_event_t *
+binary_search(uint32_t id)
+{
+       int low, high, mid;
+
+       low = 0;
+       high = sizeof(kd_events)/sizeof(kd_event_t) - 1;
+
+       while (TRUE)
+       {
+               mid = (low + high) / 2;
+
+               if (low > high)
+                       return NULL; /* failed */
+               else if ( low + 1 >= high) {
+                       /* We have a match */
+                       if (kd_events[high].id == id)
+                               return &kd_events[high];
+                       else if (kd_events[low].id == id)
+                               return &kd_events[low];
+                       else
+                               return NULL;  /* search failed */
+               }
+               else if (id < kd_events[mid].id)
+                       high = mid;
+               else
+                       low = mid;
+       } 
+}
+
+/*
+ * Look up event id to get name string.
+ * Using a per-cpu cache of a single entry
+ * before resorting to a binary search of the full table.
+ */
+#define        NCACHE  1
+static kd_event_t      *last_hit[MAX_CPUS];
+static kd_event_t *
+event_lookup_cache(uint32_t cpu, uint32_t id)
+{
+       if (last_hit[cpu] == NULL || last_hit[cpu]->id != id)
+               last_hit[cpu] = binary_search(id);
+       return last_hit[cpu];
+}
+
+static uint64_t        kd_last_timstamp;
+
+static void
+kdebug_serial_print(
+       uint32_t        cpunum,
+       uint32_t        debugid,
+       uint64_t        timestamp,
+       uintptr_t       arg1,
+       uintptr_t       arg2,
+       uintptr_t       arg3,
+       uintptr_t       arg4,
+       uintptr_t       threadid
+       )
+{
+       char            kprintf_line[192];
+       char            event[40];
+       uint64_t        us = timestamp / NSEC_PER_USEC;
+       uint64_t        us_tenth = (timestamp % NSEC_PER_USEC) / 100;
+       uint64_t        delta = timestamp - kd_last_timstamp;
+       uint64_t        delta_us = delta / NSEC_PER_USEC;
+       uint64_t        delta_us_tenth = (delta % NSEC_PER_USEC) / 100;
+       uint32_t        event_id = debugid & DBG_FUNC_MASK;
+       const char      *command;
+       const char      *bra;
+       const char      *ket;
+       kd_event_t      *ep;
+
+       /* event time and delta from last */
+       snprintf(kprintf_line, sizeof(kprintf_line),
+               "%11llu.%1llu %8llu.%1llu ",
+               us, us_tenth, delta_us, delta_us_tenth);
+
+
+       /* event (id or name) - start prefixed by "[", end postfixed by "]" */
+       bra = (debugid & DBG_FUNC_START) ? "[" : " ";
+       ket = (debugid & DBG_FUNC_END)   ? "]" : " ";
+       ep = event_lookup_cache(cpunum, event_id);
+       if (ep) {
+               if (strlen(ep->name) < sizeof(event) - 3)
+                       snprintf(event, sizeof(event), "%s%s%s",
+                                bra, ep->name, ket);
+               else
+                       snprintf(event, sizeof(event), "%s%x(name too long)%s",
+                                bra, event_id, ket);
+       } else {
+               snprintf(event, sizeof(event), "%s%x%s",
+                        bra, event_id, ket);
+       }
+       snprintf(kprintf_line + strlen(kprintf_line),
+                sizeof(kprintf_line) - strlen(kprintf_line),
+                "%-40s  ", event);
+
+       /* arg1 .. arg4 with special cases for strings */
+       switch (event_id) {
+           case VFS_LOOKUP:
+           case VFS_LOOKUP_DONE:
+               if (debugid & DBG_FUNC_START) {
+                       /* arg1 hex then arg2..arg4 chars */
+                       snprintf(kprintf_line + strlen(kprintf_line),
+                               sizeof(kprintf_line) - strlen(kprintf_line),
+                               "%-16lx %-8s%-8s%-8s                          ",
+                               arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
+                       break;
+               }
+               /* else fall through for arg1..arg4 chars */
+           case TRACE_STRING_EXEC:
+           case TRACE_STRING_NEWTHREAD:
+           case TRACE_INFO_STRING:
+               snprintf(kprintf_line + strlen(kprintf_line),
+                       sizeof(kprintf_line) - strlen(kprintf_line),
+                       "%-8s%-8s%-8s%-8s                                   ",
+                       (char*)&arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
+               break;
+           default:
+               snprintf(kprintf_line + strlen(kprintf_line),
+                       sizeof(kprintf_line) - strlen(kprintf_line),
+                       "%-16lx %-16lx %-16lx %-16lx",
+                       arg1, arg2, arg3, arg4);
+       }
+
+       /* threadid, cpu and command name */
+       if (threadid == (uintptr_t)thread_tid(current_thread()) &&
+           current_proc() &&
+           current_proc()->p_comm)
+               command = current_proc()->p_comm;
+       else
+               command = "-";
+       snprintf(kprintf_line + strlen(kprintf_line),
+               sizeof(kprintf_line) - strlen(kprintf_line),
+               "  %-16lx  %-2d %s\n",
+               threadid, cpunum, command);
+       
+       kprintf("%s", kprintf_line);
+       kd_last_timstamp = timestamp;
+}
+#endif