]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kdp/kdp.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / kdp / kdp.c
index 21a29d47af24d025b83e315282bb961d142dd901..ec8e73b4e1701d6a35e94fdd38c7e5a3e83e6732 100644 (file)
  */
 
 #include <mach/mach_types.h>
+#include <mach/vm_param.h>
 #include <sys/appleapiopts.h>
 #include <kern/debug.h>
+#include <uuid/uuid.h>
 
 #include <kdp/kdp_internal.h>
 #include <kdp/kdp_private.h>
+#include <kdp/kdp_core.h>
+#include <kdp/kdp_dyld.h>
 
 #include <libsa/types.h>
+#include <libkern/version.h>
 
 #include <string.h> /* bcopy */
 
 #include <kern/processor.h>
 #include <kern/thread.h>
+#include <kern/clock.h>
 #include <vm/vm_map.h>
 #include <vm/vm_kern.h>
+#include <vm/vm_pageout.h>
+
+extern int count_busy_buffers(void);   /* must track with declaration in bsd/sys/buf_internal.h */
 
 #define DO_ALIGN       1       /* align all packet data accesses */
 
@@ -80,11 +89,12 @@ static kdp_dispatch_t
 /*17 */ kdp_breakpoint64_remove,
 /*18 */ kdp_kernelversion,
 /*19 */ kdp_readphysmem64,
-/*20 */ kdp_writephysmem64,
-/*21 */ kdp_readioport,
-/*22 */ kdp_writeioport,
-/*23 */ kdp_readmsr64,
-/*24 */ kdp_writemsr64,
+/*1A */ kdp_writephysmem64,
+/*1B */ kdp_readioport,
+/*1C */ kdp_writeioport,
+/*1D */ kdp_readmsr64,
+/*1E */ kdp_writemsr64,
+/*1F */ kdp_dumpinfo,
     };
     
 kdp_glob_t     kdp;
@@ -95,7 +105,7 @@ kdp_glob_t   kdp;
  * Version 11 of the KDP Protocol adds support for 64-bit wide memory
  * addresses (read/write and breakpoints) as well as a dedicated
  * kernelversion request. Version 12 adds read/writing of physical
- * memory with 64-bit wide memory addresses.
+ * memory with 64-bit wide memory addresses. 
  */
 #define KDP_VERSION 12
 
@@ -113,6 +123,7 @@ int noresume_on_disconnect = 0;
 extern unsigned int return_on_panic;
 
 typedef struct thread_snapshot *thread_snapshot_t;
+typedef struct task_snapshot *task_snapshot_t;
 
 extern int
 machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
@@ -141,13 +152,11 @@ kdp_remove_breakpoint_internal(
 
 
 int
-kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, unsigned trace_options, uint32_t *pbytesTraced);
+kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t dispatch_offset, uint32_t *pbytesTraced);
 
 boolean_t kdp_copyin(pmap_t, uint64_t, void *, size_t);
 extern void bcopy_phys(addr64_t, addr64_t, vm_size_t);
 
-extern char version[];
-
 boolean_t
 kdp_packet(
     unsigned char      *pkt,
@@ -219,31 +228,42 @@ kdp_connect(
     kdp_connect_req_t  *rq = &pkt->connect_req;
     size_t             plen = *len;
     kdp_connect_reply_t        *rp = &pkt->connect_reply;
+    uint16_t            rport, eport;
+    uint32_t            key;
+    uint8_t             seq;
 
     if (plen < sizeof (*rq))
        return (FALSE);
 
     dprintf(("kdp_connect seq %x greeting %s\n", rq->hdr.seq, rq->greeting));
 
+    rport = rq->req_reply_port;
+    eport = rq->exc_note_port;
+    key   = rq->hdr.key;
+    seq   = rq->hdr.seq;
     if (kdp.is_conn) {
-       if (rq->hdr.seq == kdp.conn_seq)        /* duplicate request */
+       if ((seq == kdp.conn_seq) &&    /* duplicate request */
+            (rport == kdp.reply_port) &&
+            (eport == kdp.exception_port) &&
+            (key == kdp.session_key))
            rp->error = KDPERR_NO_ERROR;
-       else
+       else 
            rp->error = KDPERR_ALREADY_CONNECTED;
     }
     else { 
-       kdp.reply_port = rq->req_reply_port;
-       kdp.exception_port = rq->exc_note_port;
-       kdp.is_conn = TRUE;
-       kdp.conn_seq = rq->hdr.seq;
-    
+           kdp.reply_port     = rport;
+           kdp.exception_port = eport;
+           kdp.is_conn        = TRUE;
+           kdp.conn_seq       = seq;
+        kdp.session_key    = key;
+
        rp->error = KDPERR_NO_ERROR;
     }
 
     rp->hdr.is_reply = 1;
     rp->hdr.len = sizeof (*rp);
     
-    *reply_port = kdp.reply_port;
+    *reply_port = rport;
     *len = rp->hdr.len;
     
     if (current_debugger == KDP_CUR_DB)    
@@ -276,6 +296,7 @@ kdp_disconnect(
     kdp.reply_port = kdp.exception_port = 0;
     kdp.is_halted = kdp.is_conn = FALSE;
     kdp.exception_seq = kdp.conn_seq = 0;
+    kdp.session_key = 0;
 
     if ((panicstr != NULL) && (return_on_panic == 0))
            reattach_wait = 1;
@@ -358,7 +379,7 @@ kdp_kernelversion(
     rp->hdr.len = sizeof (*rp);
        
     dprintf(("kdp_kernelversion\n"));
-       slen = strlcpy(rp->version, version, MAX_KDP_DATA_SIZE);
+       slen = strlcpy(rp->version, kdp_kernelversion_string, MAX_KDP_DATA_SIZE);
        
        rp->hdr.len += slen + 1; /* strlcpy returns the amount copied with NUL */
        
@@ -530,8 +551,8 @@ kdp_readmem(
     size_t             plen = *len;
     kdp_readmem_reply_t *rp = &pkt->readmem_reply;
     mach_vm_size_t                     cnt;
-#if __i386__ || __arm__
-    void               *pversion = &version;
+#if __i386__
+    void               *pversion = &kdp_kernelversion_string;
 #endif
 
     if (plen < sizeof (*rq))
@@ -546,9 +567,9 @@ kdp_readmem(
        unsigned int    n = rq->nbytes;
 
        dprintf(("kdp_readmem addr %x size %d\n", rq->address, n));
-#if __i386__ || __arm__
+#if __i386__
        /* XXX This is a hack to facilitate the "showversion" macro
-        * on i386/ARM, which is used to obtain the kernel version without
+        * on i386, which is used to obtain the kernel version without
         * symbols - a pointer to the version string should eventually
         * be pinned at a fixed address when an equivalent of the
         * VECTORS segment (loaded at a fixed load address, and contains
@@ -1030,7 +1051,7 @@ kdp_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size) {
 
        while (rem) {
                ppnum_t upn = pmap_find_phys(p, uaddr);
-               uint64_t phys_src = (upn << PAGE_SHIFT) | (uaddr & PAGE_MASK);
+               uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
                uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
                uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
                uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
@@ -1049,8 +1070,60 @@ kdp_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size) {
        return (rem == 0);
 }
 
+
+static void
+kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap)
+{
+  unsigned int pages_reclaimed;
+  unsigned int pages_wanted;
+  kern_return_t kErr;
+
+  memio_snap->snapshot_magic = STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC;
+  memio_snap->free_pages = vm_page_free_count;
+  memio_snap->active_pages = vm_page_active_count;
+  memio_snap->inactive_pages = vm_page_inactive_count;
+  memio_snap->purgeable_pages = vm_page_purgeable_count;
+  memio_snap->wired_pages = vm_page_wire_count;
+  memio_snap->speculative_pages = vm_page_speculative_count;
+  memio_snap->throttled_pages = vm_page_throttled_count;
+  memio_snap->busy_buffer_count = count_busy_buffers();
+  kErr = mach_vm_pressure_monitor(FALSE, VM_PRESSURE_TIME_WINDOW, &pages_reclaimed, &pages_wanted);
+  if ( ! kErr ) {
+       memio_snap->pages_wanted = (uint32_t)pages_wanted;
+       memio_snap->pages_reclaimed = (uint32_t)pages_reclaimed;
+       memio_snap->pages_wanted_reclaimed_valid = 1;
+  } else {
+       memio_snap->pages_wanted = 0;
+       memio_snap->pages_reclaimed = 0;
+       memio_snap->pages_wanted_reclaimed_valid = 0;
+  }
+}
+
+
+
+/* 
+ * Method for grabbing timer values safely, in the sense that no infinite loop will occur 
+ * Certain flavors of the timer_grab function, which would seem to be the thing to use,   
+ * can loop infinitely if called while the timer is in the process of being updated.      
+ * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of    
+ * the timer using this method. This seems insoluble, since stackshot runs in a context   
+ * where the timer might be half-updated, and has no way of yielding control just long    
+ * enough to finish the update.                                                           
+ */
+
+static uint64_t safe_grab_timer_value(struct timer *t)
+{
+#if   defined(__LP64__)
+  return t->all_bits;
+#else
+  uint64_t time = t->high_bits;    /* endian independent grab */
+  time = (time << 32) | t->low_bits;
+  return time;
+#endif
+}
+
 int
-kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, unsigned trace_options, uint32_t *pbytesTraced)
+kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t dispatch_offset, uint32_t *pbytesTraced)
 {
        char *tracepos = (char *) tracebuf;
        char *tracebound = tracepos + tracebuf_size;
@@ -1059,49 +1132,163 @@ kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, unsigned trace_op
 
        task_t task = TASK_NULL;
        thread_t thread = THREAD_NULL;
-       int nframes = trace_options;
        thread_snapshot_t tsnap = NULL;
        unsigned framesize = 2 * sizeof(vm_offset_t);
-       boolean_t dispatch_p = ((trace_options & STACKSHOT_GET_DQ) != 0);
-       uint16_t  dispatch_offset = (trace_options & STACKSHOT_DISPATCH_OFFSET_MASK) >> STACKSHOT_DISPATCH_OFFSET_SHIFT;
        struct task ctask;
        struct thread cthread;
+       struct _vm_map cmap;
+       struct pmap cpmap;
+
+       queue_head_t *task_list = &tasks;
+       boolean_t is_active_list = TRUE;
+       
+       boolean_t dispatch_p = ((trace_flags & STACKSHOT_GET_DQ) != 0);
+       boolean_t save_loadinfo_p = ((trace_flags & STACKSHOT_SAVE_LOADINFO) != 0);
+
+       if(trace_flags & STACKSHOT_GET_GLOBAL_MEM_STATS) {
+         if(tracepos + sizeof(struct mem_and_io_snapshot) > tracebound) {
+           error = -1;
+           goto error_exit;
+         }
+         kdp_mem_and_io_snapshot((struct mem_and_io_snapshot *)tracepos);
+         tracepos += sizeof(struct mem_and_io_snapshot);
+       }
 
-       if ((nframes <= 0) || nframes > MAX_FRAMES)
-               nframes = MAX_FRAMES;
-
-       queue_iterate(&tasks, task, task_t, tasks) {
+walk_list:
+       queue_iterate(task_list, task, task_t, tasks) {
                if ((task == NULL) || (ml_nofault_copy((vm_offset_t) task, (vm_offset_t) &ctask, sizeof(struct task)) != sizeof(struct task)))
                        goto error_exit;
+
+               int task_pid = pid_from_task(task);
+               boolean_t task64 = task_has_64BitAddr(task);
+
+               if (!task->active) {
+                       /* 
+                        * Not interested in terminated tasks without threads, and
+                        * at the moment, stackshot can't handle a task  without a name.
+                        */
+                       if (queue_empty(&task->threads) || task_pid == -1) {
+                               continue;
+                       }
+               }
+
                /* Trace everything, unless a process was specified */
-               if ((pid == -1) || (pid == pid_from_task(task)))
+               if ((pid == -1) || (pid == task_pid)) {
+                       task_snapshot_t task_snap;
+                       uint32_t uuid_info_count = 0;
+                       mach_vm_address_t uuid_info_addr = 0;
+                       boolean_t have_map = (task->map != NULL) && 
+                         (ml_nofault_copy((vm_offset_t)(task->map), (vm_offset_t)&cmap, sizeof(struct _vm_map)) == sizeof(struct _vm_map));
+                       boolean_t have_pmap = have_map && (cmap.pmap != NULL) &&
+                         (ml_nofault_copy((vm_offset_t)(cmap.pmap), (vm_offset_t)&cpmap, sizeof(struct pmap)) == sizeof(struct pmap));
+
+                       if (have_pmap && task->active && save_loadinfo_p && task_pid > 0) {
+                               // Read the dyld_all_image_infos struct from the task memory to get UUID array count and location
+                               if (task64) {
+                                       struct user64_dyld_all_image_infos task_image_infos;
+                                       if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
+                                               uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
+                                               uuid_info_addr = task_image_infos.uuidArray;
+                                       }
+                               } else {
+                                       struct user32_dyld_all_image_infos task_image_infos;
+                                       if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user32_dyld_all_image_infos))) {
+                                               uuid_info_count = task_image_infos.uuidArrayCount;
+                                               uuid_info_addr = task_image_infos.uuidArray;
+                                       }
+                               }
+
+                               // If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
+                               // this data structure), we zero the uuid_info_count so that we won't even try to save load info
+                               // for this task.
+                               if (!uuid_info_addr) {
+                                       uuid_info_count = 0;
+                               }
+                       }
+
+                       if (tracepos + sizeof(struct task_snapshot) > tracebound) {
+                               error = -1;
+                               goto error_exit;
+                       }
+
+                       task_snap = (task_snapshot_t) tracepos;
+                       task_snap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
+                       task_snap->pid = task_pid;
+                       task_snap->nloadinfos = uuid_info_count;
+                       /* Add the BSD process identifiers */
+                       if (task_pid != -1)
+                               proc_name_kdp(task, task_snap->p_comm, sizeof(task_snap->p_comm));
+                       else
+                               task_snap->p_comm[0] = '\0';
+                       task_snap->ss_flags = 0;
+                       if (task64)
+                               task_snap->ss_flags |= kUser64_p;
+                       if (!task->active) 
+                               task_snap->ss_flags |= kTerminatedSnapshot;
+                       if(task->pidsuspended) task_snap->ss_flags |= kPidSuspended;
+                       if(task->frozen) task_snap->ss_flags |= kFrozen;
+
+                       task_snap->suspend_count = task->suspend_count;
+                       task_snap->task_size = have_pmap ? pmap_resident_count(task->map->pmap) : 0;
+                       task_snap->faults = task->faults;
+                       task_snap->pageins = task->pageins;
+                       task_snap->cow_faults = task->cow_faults;
+                       
+                       task_snap->user_time_in_terminated_threads = task->total_user_time;
+                       task_snap->system_time_in_terminated_threads = task->total_system_time;
+                       tracepos += sizeof(struct task_snapshot);
+
+                       if (task_pid > 0 && uuid_info_count > 0) {
+                               uint32_t uuid_info_size = (uint32_t)(task64 ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
+                               uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
+
+                               if (tracepos + uuid_info_array_size > tracebound) {
+                                       error = -1;
+                                       goto error_exit;
+                               }
+
+                               // Copy in the UUID info array
+                               // It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
+                               if (have_pmap && !kdp_copyin(task->map->pmap, uuid_info_addr, tracepos, uuid_info_array_size))
+                                       task_snap->nloadinfos = 0;
+                               else
+                                       tracepos += uuid_info_array_size;
+                       }
+
                        queue_iterate(&task->threads, thread, thread_t, task_threads){
+                               uint64_t tval;
+
                                if ((thread == NULL) || (ml_nofault_copy((vm_offset_t) thread, (vm_offset_t) &cthread, sizeof(struct thread)) != sizeof(struct thread)))
                                        goto error_exit;
+
                                if (((tracepos + 4 * sizeof(struct thread_snapshot)) > tracebound)) {
                                        error = -1;
                                        goto error_exit;
                                }
-/* Populate the thread snapshot header */
+                               /* Populate the thread snapshot header */
                                tsnap = (thread_snapshot_t) tracepos;
-                               tsnap->thread_id = (uint64_t) (uintptr_t)thread;
+                               tsnap->thread_id = thread_tid(thread);
                                tsnap->state = thread->state;
-                               tsnap->wait_event = thread->wait_event;
-                               tsnap->continuation = (uint64_t) (uintptr_t) thread->continuation;
-/* Add the BSD process identifiers */
-                               if ((tsnap->pid = pid_from_task(task)) != -1)
-                                       proc_name_kdp(task, tsnap->p_comm, sizeof(tsnap->p_comm));
-                               else
-                                       tsnap->p_comm[0] = '\0';
-
-                               tsnap->snapshot_magic = 0xfeedface;
+                               tsnap->sched_pri = thread->sched_pri;
+                               tsnap->sched_flags = thread->sched_flags;
+                               tsnap->wait_event = VM_KERNEL_UNSLIDE(thread->wait_event);
+                               tsnap->continuation = VM_KERNEL_UNSLIDE(thread->continuation);
+                               tval = safe_grab_timer_value(&thread->user_timer);
+                               tsnap->user_time = tval;
+                               tval = safe_grab_timer_value(&thread->system_timer);
+                               if (thread->precise_user_kernel_time) {
+                                       tsnap->system_time = tval;
+                               } else {
+                                       tsnap->user_time += tval;
+                                       tsnap->system_time = 0;
+                               }
+                               tsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
                                tracepos += sizeof(struct thread_snapshot);
                                tsnap->ss_flags = 0;
 
-                               if (dispatch_p && (task != kernel_task) && (task->active) && (task->map)) {
+                               if (dispatch_p && (task != kernel_task) && (task->active) && have_pmap) {
                                        uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
                                        if (dqkeyaddr != 0) {
-                                               boolean_t task64 = task_has_64BitAddr(task);
                                                uint64_t dqaddr = 0;
                                                if (kdp_copyin(task->map->pmap, dqkeyaddr, &dqaddr, (task64 ? 8 : 4)) && (dqaddr != 0)) {
                                                        uint64_t dqserialnumaddr = dqaddr + dispatch_offset;
@@ -1117,29 +1304,30 @@ kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, unsigned trace_op
 /* Call through to the machine specific trace routines
  * Frames are added past the snapshot header.
  */
+                               tracebytes = 0;
                                if (thread->kernel_stack != 0) {
 #if defined(__LP64__)                                  
-                                       tracebytes = machine_trace_thread64(thread, tracepos, tracebound, nframes, FALSE);
+                                       tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, FALSE);
                                        tsnap->ss_flags |= kKernel64_p;
                                        framesize = 16;
 #else
-                                       tracebytes = machine_trace_thread(thread, tracepos, tracebound, nframes, FALSE);
+                                       tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, FALSE);
                                        framesize = 8;
 #endif
                                }
                                tsnap->nkern_frames = tracebytes/framesize;
                                tracepos += tracebytes;
                                tracebytes = 0;
-/* Trace user stack, if any */
-                               if (thread->task->map != kernel_map) {
+                               /* Trace user stack, if any */
+                               if (task->active && thread->task->map != kernel_map) {
                                        /* 64-bit task? */
                                        if (task_has_64BitAddr(thread->task)) {
-                                               tracebytes = machine_trace_thread64(thread, tracepos, tracebound, nframes, TRUE);
+                                               tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, TRUE);
                                                tsnap->ss_flags |= kUser64_p;
                                                framesize = 16;
                                        }
                                        else {
-                                               tracebytes = machine_trace_thread(thread, tracepos, tracebound, nframes, TRUE);
+                                               tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, TRUE);
                                                framesize = 8;
                                        }
                                }
@@ -1147,6 +1335,13 @@ kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, unsigned trace_op
                                tracepos += tracebytes;
                                tracebytes = 0;
                        }
+               }
+       }
+
+       if (is_active_list) { 
+               is_active_list = FALSE;
+               task_list = &terminated_tasks;
+               goto walk_list;
        }
 
 error_exit:
@@ -1278,3 +1473,36 @@ kdp_writemsr64(
     
        return (TRUE);
 }
+
+static boolean_t
+kdp_dumpinfo(
+       kdp_pkt_t       *pkt,
+       int             *len,
+       unsigned short  *reply_port
+              )
+{
+       kdp_dumpinfo_req_t   *rq = &pkt->dumpinfo_req;
+       kdp_dumpinfo_reply_t *rp = &pkt->dumpinfo_reply;
+       size_t  plen = *len;
+       
+       if (plen < sizeof (*rq))
+               return (FALSE);
+       
+       dprintf(("kdp_dumpinfo file=%s destip=%s routerip=%s\n", rq->name, rq->destip, rq->routerip));
+       rp->hdr.is_reply = 1;
+       rp->hdr.len = sizeof (*rp);
+       
+        if ((rq->type & KDP_DUMPINFO_MASK) != KDP_DUMPINFO_GETINFO) {
+            kdp_set_dump_info(rq->type, rq->name, rq->destip, rq->routerip, 
+                                rq->port);
+        }
+
+        /* gather some stats for reply */
+        kdp_get_dump_info(&rp->type, rp->name, rp->destip, rp->routerip, 
+                          &rp->port);
+
+       *reply_port = kdp.reply_port;
+       *len = rp->hdr.len;
+    
+       return (TRUE);
+}