2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <sys/appleapiopts.h>
31 #include <kern/debug.h>
32 #include <uuid/uuid.h>
34 #include <kdp/kdp_internal.h>
35 #include <kdp/kdp_private.h>
36 #include <kdp/kdp_core.h>
37 #include <kdp/kdp_dyld.h>
39 #include <libsa/types.h>
40 #include <libkern/version.h>
42 #include <string.h> /* bcopy */
44 #include <kern/processor.h>
45 #include <kern/thread.h>
46 #include <kern/clock.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
50 #define DO_ALIGN 1 /* align all packet data accesses */
52 #define KDP_TEST_HARNESS 0
54 #define dprintf(x) kprintf x
60 dispatch_table
[KDP_INVALID_REQUEST
-KDP_CONNECT
] =
63 /* 1 */ kdp_disconnect
,
70 /* 8 */ kdp_writeregs
,
74 /* C */ kdp_resumecpus
,
77 /* F */ kdp_breakpoint_set
,
78 /*10 */ kdp_breakpoint_remove
,
82 /*14 */ kdp_readmem64
,
83 /*15 */ kdp_writemem64
,
84 /*16 */ kdp_breakpoint64_set
,
85 /*17 */ kdp_breakpoint64_remove
,
86 /*18 */ kdp_kernelversion
,
87 /*19 */ kdp_readphysmem64
,
88 /*1A */ kdp_writephysmem64
,
89 /*1B */ kdp_readioport
,
90 /*1C */ kdp_writeioport
,
91 /*1D */ kdp_readmsr64
,
92 /*1E */ kdp_writemsr64
,
98 #define MAX_BREAKPOINTS 100
101 * Version 11 of the KDP Protocol adds support for 64-bit wide memory
102 * addresses (read/write and breakpoints) as well as a dedicated
103 * kernelversion request. Version 12 adds read/writing of physical
104 * memory with 64-bit wide memory addresses.
106 #define KDP_VERSION 12
109 mach_vm_address_t address
;
111 uint8_t oldbytes
[MAX_BREAKINSN_BYTES
];
112 } kdp_breakpoint_record_t
;
114 static kdp_breakpoint_record_t breakpoint_list
[MAX_BREAKPOINTS
];
115 static unsigned int breakpoints_initialized
= 0;
117 int reattach_wait
= 0;
118 int noresume_on_disconnect
= 0;
119 extern unsigned int return_on_panic
;
121 typedef struct thread_snapshot
*thread_snapshot_t
;
122 typedef struct task_snapshot
*task_snapshot_t
;
125 machine_trace_thread(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
127 machine_trace_thread64(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
131 proc_name_kdp(task_t task
, char *buf
, int size
);
134 kdp_snapshot_postflight(void);
137 pid_from_task(task_t task
);
140 kdp_set_breakpoint_internal(
141 mach_vm_address_t address
145 kdp_remove_breakpoint_internal(
146 mach_vm_address_t address
151 kdp_stackshot(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t trace_flags
, uint32_t dispatch_offset
, uint32_t *pbytesTraced
);
153 boolean_t
kdp_copyin(pmap_t
, uint64_t, void *, size_t);
154 extern void bcopy_phys(addr64_t
, addr64_t
, vm_size_t
);
160 unsigned short *reply_port
163 static unsigned aligned_pkt
[1538/sizeof(unsigned)+1]; // max ether pkt
164 kdp_pkt_t
*rd
= (kdp_pkt_t
*)&aligned_pkt
;
170 bcopy((char *)pkt
, (char *)rd
, sizeof(aligned_pkt
));
172 rd
= (kdp_pkt_t
*)pkt
;
174 if (plen
< sizeof (rd
->hdr
) || rd
->hdr
.len
!= plen
) {
175 printf("kdp_packet bad len pkt %lu hdr %d\n", plen
, rd
->hdr
.len
);
180 if (rd
->hdr
.is_reply
) {
181 printf("kdp_packet reply recvd req %x seq %x\n",
182 rd
->hdr
.request
, rd
->hdr
.seq
);
187 req
= rd
->hdr
.request
;
188 if (req
>= KDP_INVALID_REQUEST
) {
189 printf("kdp_packet bad request %x len %d seq %x key %x\n",
190 rd
->hdr
.request
, rd
->hdr
.len
, rd
->hdr
.seq
, rd
->hdr
.key
);
195 ret
= ((*dispatch_table
[req
- KDP_CONNECT
])(rd
, len
, reply_port
));
197 bcopy((char *)rd
, (char *) pkt
, *len
);
206 __unused
unsigned short *reply_port
209 kdp_pkt_t
*rd
= (kdp_pkt_t
*)pkt
;
211 printf("kdp_unknown request %x len %d seq %x key %x\n",
212 rd
->hdr
.request
, rd
->hdr
.len
, rd
->hdr
.seq
, rd
->hdr
.key
);
221 unsigned short *reply_port
224 kdp_connect_req_t
*rq
= &pkt
->connect_req
;
226 kdp_connect_reply_t
*rp
= &pkt
->connect_reply
;
227 uint16_t rport
, eport
;
231 if (plen
< sizeof (*rq
))
234 dprintf(("kdp_connect seq %x greeting %s\n", rq
->hdr
.seq
, rq
->greeting
));
236 rport
= rq
->req_reply_port
;
237 eport
= rq
->exc_note_port
;
241 if ((seq
== kdp
.conn_seq
) && /* duplicate request */
242 (rport
== kdp
.reply_port
) &&
243 (eport
== kdp
.exception_port
) &&
244 (key
== kdp
.session_key
))
245 rp
->error
= KDPERR_NO_ERROR
;
247 rp
->error
= KDPERR_ALREADY_CONNECTED
;
250 kdp
.reply_port
= rport
;
251 kdp
.exception_port
= eport
;
254 kdp
.session_key
= key
;
256 rp
->error
= KDPERR_NO_ERROR
;
259 rp
->hdr
.is_reply
= 1;
260 rp
->hdr
.len
= sizeof (*rp
);
265 if (current_debugger
== KDP_CUR_DB
)
275 unsigned short *reply_port
278 kdp_disconnect_req_t
*rq
= &pkt
->disconnect_req
;
280 kdp_disconnect_reply_t
*rp
= &pkt
->disconnect_reply
;
282 if (plen
< sizeof (*rq
))
288 dprintf(("kdp_disconnect\n"));
290 *reply_port
= kdp
.reply_port
;
292 kdp
.reply_port
= kdp
.exception_port
= 0;
293 kdp
.is_halted
= kdp
.is_conn
= FALSE
;
294 kdp
.exception_seq
= kdp
.conn_seq
= 0;
297 if ((panicstr
!= NULL
) && (return_on_panic
== 0))
300 if (noresume_on_disconnect
== 1) {
302 noresume_on_disconnect
= 0;
305 rp
->hdr
.is_reply
= 1;
306 rp
->hdr
.len
= sizeof (*rp
);
310 if (current_debugger
== KDP_CUR_DB
)
320 unsigned short *reply_port
323 kdp_reattach_req_t
*rq
= &pkt
->reattach_req
;
326 kdp_disconnect(pkt
, len
, reply_port
);
327 *reply_port
= rq
->req_reply_port
;
336 unsigned short *reply_port
339 kdp_hostinfo_req_t
*rq
= &pkt
->hostinfo_req
;
341 kdp_hostinfo_reply_t
*rp
= &pkt
->hostinfo_reply
;
343 if (plen
< sizeof (*rq
))
346 dprintf(("kdp_hostinfo\n"));
348 rp
->hdr
.is_reply
= 1;
349 rp
->hdr
.len
= sizeof (*rp
);
351 kdp_machine_hostinfo(&rp
->hostinfo
);
353 *reply_port
= kdp
.reply_port
;
363 unsigned short *reply_port
366 kdp_kernelversion_req_t
*rq
= &pkt
->kernelversion_req
;
368 kdp_kernelversion_reply_t
*rp
= &pkt
->kernelversion_reply
;
371 if (plen
< sizeof (*rq
))
374 rp
->hdr
.is_reply
= 1;
375 rp
->hdr
.len
= sizeof (*rp
);
377 dprintf(("kdp_kernelversion\n"));
378 slen
= strlcpy(rp
->version
, kdp_kernelversion_string
, MAX_KDP_DATA_SIZE
);
380 rp
->hdr
.len
+= slen
+ 1; /* strlcpy returns the amount copied with NUL */
382 *reply_port
= kdp
.reply_port
;
392 unsigned short *reply_port
395 kdp_suspend_req_t
*rq
= &pkt
->suspend_req
;
397 kdp_suspend_reply_t
*rp
= &pkt
->suspend_reply
;
399 if (plen
< sizeof (*rq
))
402 rp
->hdr
.is_reply
= 1;
403 rp
->hdr
.len
= sizeof (*rp
);
405 dprintf(("kdp_suspend\n"));
407 kdp
.is_halted
= TRUE
;
409 *reply_port
= kdp
.reply_port
;
419 unsigned short *reply_port
422 kdp_resumecpus_req_t
*rq
= &pkt
->resumecpus_req
;
424 kdp_resumecpus_reply_t
*rp
= &pkt
->resumecpus_reply
;
426 if (plen
< sizeof (*rq
))
429 rp
->hdr
.is_reply
= 1;
430 rp
->hdr
.len
= sizeof (*rp
);
432 dprintf(("kdp_resumecpus %x\n", rq
->cpu_mask
));
434 kdp
.is_halted
= FALSE
;
436 *reply_port
= kdp
.reply_port
;
446 unsigned short *reply_port
449 kdp_writemem_req_t
*rq
= &pkt
->writemem_req
;
451 kdp_writemem_reply_t
*rp
= &pkt
->writemem_reply
;
454 if (plen
< sizeof (*rq
))
457 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
458 rp
->error
= KDPERR_BAD_NBYTES
;
460 dprintf(("kdp_writemem addr %x size %d\n", rq
->address
, rq
->nbytes
));
462 cnt
= kdp_machine_vm_write((caddr_t
)rq
->data
, (mach_vm_address_t
)rq
->address
, rq
->nbytes
);
463 rp
->error
= KDPERR_NO_ERROR
;
466 rp
->hdr
.is_reply
= 1;
467 rp
->hdr
.len
= sizeof (*rp
);
469 *reply_port
= kdp
.reply_port
;
479 unsigned short *reply_port
482 kdp_writemem64_req_t
*rq
= &pkt
->writemem64_req
;
484 kdp_writemem64_reply_t
*rp
= &pkt
->writemem64_reply
;
487 if (plen
< sizeof (*rq
))
490 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
491 rp
->error
= KDPERR_BAD_NBYTES
;
493 dprintf(("kdp_writemem64 addr %llx size %d\n", rq
->address
, rq
->nbytes
));
495 cnt
= kdp_machine_vm_write((caddr_t
)rq
->data
, (mach_vm_address_t
)rq
->address
, (mach_vm_size_t
)rq
->nbytes
);
496 rp
->error
= KDPERR_NO_ERROR
;
499 rp
->hdr
.is_reply
= 1;
500 rp
->hdr
.len
= sizeof (*rp
);
502 *reply_port
= kdp
.reply_port
;
512 unsigned short *reply_port
515 kdp_writephysmem64_req_t
*rq
= &pkt
->writephysmem64_req
;
517 kdp_writephysmem64_reply_t
*rp
= &pkt
->writephysmem64_reply
;
519 if (plen
< sizeof (*rq
))
522 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
523 rp
->error
= KDPERR_BAD_NBYTES
;
525 dprintf(("kdp_writephysmem64 addr %llx size %d\n", rq
->address
, rq
->nbytes
));
526 kdp_machine_phys_write(rq
, rq
->data
, rq
->lcpu
);
527 rp
->error
= KDPERR_NO_ERROR
;
530 rp
->hdr
.is_reply
= 1;
531 rp
->hdr
.len
= sizeof (*rp
);
533 *reply_port
= kdp
.reply_port
;
543 unsigned short *reply_port
546 kdp_readmem_req_t
*rq
= &pkt
->readmem_req
;
548 kdp_readmem_reply_t
*rp
= &pkt
->readmem_reply
;
551 void *pversion
= &kdp_kernelversion_string
;
554 if (plen
< sizeof (*rq
))
557 rp
->hdr
.is_reply
= 1;
558 rp
->hdr
.len
= sizeof (*rp
);
560 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
561 rp
->error
= KDPERR_BAD_NBYTES
;
563 unsigned int n
= rq
->nbytes
;
565 dprintf(("kdp_readmem addr %x size %d\n", rq
->address
, n
));
567 /* XXX This is a hack to facilitate the "showversion" macro
568 * on i386, which is used to obtain the kernel version without
569 * symbols - a pointer to the version string should eventually
570 * be pinned at a fixed address when an equivalent of the
571 * VECTORS segment (loaded at a fixed load address, and contains
572 * a table) is implemented on these architectures, as with PPC.
573 * N.B.: x86 now has a low global page, and the version indirection
574 * is pinned at 0x201C. We retain the 0x501C address override
575 * for compatibility. Future architectures should instead use
576 * the KDP_KERNELVERSION request.
578 if (rq
->address
== 0x501C)
579 rq
->address
= (uintptr_t)&pversion
;
581 cnt
= kdp_machine_vm_read((mach_vm_address_t
)rq
->address
, (caddr_t
)rp
->data
, n
);
582 rp
->error
= KDPERR_NO_ERROR
;
587 *reply_port
= kdp
.reply_port
;
597 unsigned short *reply_port
600 kdp_readmem64_req_t
*rq
= &pkt
->readmem64_req
;
602 kdp_readmem64_reply_t
*rp
= &pkt
->readmem64_reply
;
605 if (plen
< sizeof (*rq
))
608 rp
->hdr
.is_reply
= 1;
609 rp
->hdr
.len
= sizeof (*rp
);
611 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
612 rp
->error
= KDPERR_BAD_NBYTES
;
615 dprintf(("kdp_readmem64 addr %llx size %d\n", rq
->address
, rq
->nbytes
));
617 cnt
= kdp_machine_vm_read((mach_vm_address_t
)rq
->address
, (caddr_t
)rp
->data
, rq
->nbytes
);
618 rp
->error
= KDPERR_NO_ERROR
;
623 *reply_port
= kdp
.reply_port
;
633 unsigned short *reply_port
636 kdp_readphysmem64_req_t
*rq
= &pkt
->readphysmem64_req
;
638 kdp_readphysmem64_reply_t
*rp
= &pkt
->readphysmem64_reply
;
641 if (plen
< sizeof (*rq
))
644 rp
->hdr
.is_reply
= 1;
645 rp
->hdr
.len
= sizeof (*rp
);
647 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
648 rp
->error
= KDPERR_BAD_NBYTES
;
651 dprintf(("kdp_readphysmem64 addr %llx size %d\n", rq
->address
, rq
->nbytes
));
653 cnt
= (int)kdp_machine_phys_read(rq
, rp
->data
, rq
->lcpu
);
654 rp
->error
= KDPERR_NO_ERROR
;
659 *reply_port
= kdp
.reply_port
;
669 unsigned short *reply_port
672 kdp_maxbytes_req_t
*rq
= &pkt
->maxbytes_req
;
674 kdp_maxbytes_reply_t
*rp
= &pkt
->maxbytes_reply
;
676 if (plen
< sizeof (*rq
))
679 rp
->hdr
.is_reply
= 1;
680 rp
->hdr
.len
= sizeof (*rp
);
682 dprintf(("kdp_maxbytes\n"));
684 rp
->max_bytes
= MAX_KDP_DATA_SIZE
;
686 *reply_port
= kdp
.reply_port
;
696 unsigned short *reply_port
699 kdp_version_req_t
*rq
= &pkt
->version_req
;
701 kdp_version_reply_t
*rp
= &pkt
->version_reply
;
703 if (plen
< sizeof (*rq
))
706 rp
->hdr
.is_reply
= 1;
707 rp
->hdr
.len
= sizeof (*rp
);
709 dprintf(("kdp_version\n"));
711 rp
->version
= KDP_VERSION
;
712 if (!(kdp_flag
& KDP_BP_DIS
))
713 rp
->feature
= KDP_FEATURE_BP
;
717 *reply_port
= kdp
.reply_port
;
727 unsigned short *reply_port
730 kdp_regions_req_t
*rq
= &pkt
->regions_req
;
732 kdp_regions_reply_t
*rp
= &pkt
->regions_reply
;
735 if (plen
< sizeof (*rq
))
738 rp
->hdr
.is_reply
= 1;
739 rp
->hdr
.len
= sizeof (*rp
);
741 dprintf(("kdp_regions\n"));
747 r
->nbytes
= 0xffffffff;
749 r
->protection
= VM_PROT_ALL
; r
++; rp
->nregions
++;
751 rp
->hdr
.len
+= rp
->nregions
* sizeof (kdp_region_t
);
753 *reply_port
= kdp
.reply_port
;
763 unsigned short *reply_port
766 kdp_writeregs_req_t
*rq
= &pkt
->writeregs_req
;
769 kdp_writeregs_reply_t
*rp
= &pkt
->writeregs_reply
;
771 if (plen
< sizeof (*rq
))
774 size
= rq
->hdr
.len
- (unsigned)sizeof(kdp_hdr_t
) - (unsigned)sizeof(unsigned int);
775 rp
->error
= kdp_machine_write_regs(rq
->cpu
, rq
->flavor
, rq
->data
, &size
);
777 rp
->hdr
.is_reply
= 1;
778 rp
->hdr
.len
= sizeof (*rp
);
780 *reply_port
= kdp
.reply_port
;
790 unsigned short *reply_port
793 kdp_readregs_req_t
*rq
= &pkt
->readregs_req
;
795 kdp_readregs_reply_t
*rp
= &pkt
->readregs_reply
;
798 if (plen
< sizeof (*rq
))
801 rp
->hdr
.is_reply
= 1;
802 rp
->hdr
.len
= sizeof (*rp
);
804 rp
->error
= kdp_machine_read_regs(rq
->cpu
, rq
->flavor
, rp
->data
, &size
);
807 *reply_port
= kdp
.reply_port
;
818 unsigned short *reply_port
821 kdp_breakpoint_req_t
*rq
= &pkt
->breakpoint_req
;
822 kdp_breakpoint_reply_t
*rp
= &pkt
->breakpoint_reply
;
826 if (plen
< sizeof (*rq
))
829 dprintf(("kdp_breakpoint_set %x\n", rq
->address
));
831 kerr
= kdp_set_breakpoint_internal((mach_vm_address_t
)rq
->address
);
835 rp
->hdr
.is_reply
= 1;
836 rp
->hdr
.len
= sizeof (*rp
);
837 *reply_port
= kdp
.reply_port
;
844 kdp_breakpoint64_set(
847 unsigned short *reply_port
850 kdp_breakpoint64_req_t
*rq
= &pkt
->breakpoint64_req
;
851 kdp_breakpoint64_reply_t
*rp
= &pkt
->breakpoint64_reply
;
855 if (plen
< sizeof (*rq
))
858 dprintf(("kdp_breakpoint64_set %llx\n", rq
->address
));
860 kerr
= kdp_set_breakpoint_internal((mach_vm_address_t
)rq
->address
);
864 rp
->hdr
.is_reply
= 1;
865 rp
->hdr
.len
= sizeof (*rp
);
866 *reply_port
= kdp
.reply_port
;
873 kdp_breakpoint_remove(
876 unsigned short *reply_port
879 kdp_breakpoint_req_t
*rq
= &pkt
->breakpoint_req
;
880 kdp_breakpoint_reply_t
*rp
= &pkt
->breakpoint_reply
;
883 if (plen
< sizeof (*rq
))
886 dprintf(("kdp_breakpoint_remove %x\n", rq
->address
));
888 kerr
= kdp_remove_breakpoint_internal((mach_vm_address_t
)rq
->address
);
892 rp
->hdr
.is_reply
= 1;
893 rp
->hdr
.len
= sizeof (*rp
);
894 *reply_port
= kdp
.reply_port
;
901 kdp_breakpoint64_remove(
904 unsigned short *reply_port
907 kdp_breakpoint64_req_t
*rq
= &pkt
->breakpoint64_req
;
908 kdp_breakpoint64_reply_t
*rp
= &pkt
->breakpoint64_reply
;
912 if (plen
< sizeof (*rq
))
915 dprintf(("kdp_breakpoint64_remove %llx\n", rq
->address
));
917 kerr
= kdp_remove_breakpoint_internal((mach_vm_address_t
)rq
->address
);
921 rp
->hdr
.is_reply
= 1;
922 rp
->hdr
.len
= sizeof (*rp
);
923 *reply_port
= kdp
.reply_port
;
931 kdp_set_breakpoint_internal(
932 mach_vm_address_t address
936 uint8_t breakinstr
[MAX_BREAKINSN_BYTES
], oldinstr
[MAX_BREAKINSN_BYTES
];
937 uint32_t breakinstrsize
= sizeof(breakinstr
);
941 kdp_machine_get_breakinsn(breakinstr
, &breakinstrsize
);
943 if(breakpoints_initialized
== 0)
945 for(i
=0;(i
< MAX_BREAKPOINTS
); breakpoint_list
[i
].address
=0, i
++);
946 breakpoints_initialized
++;
949 cnt
= kdp_machine_vm_read(address
, (caddr_t
)&oldinstr
, (mach_vm_size_t
)breakinstrsize
);
951 if (0 == memcmp(oldinstr
, breakinstr
, breakinstrsize
)) {
952 printf("A trap was already set at that address, not setting new breakpoint\n");
954 return KDPERR_BREAKPOINT_ALREADY_SET
;
957 for(i
=0;(i
< MAX_BREAKPOINTS
) && (breakpoint_list
[i
].address
!= 0); i
++);
959 if (i
== MAX_BREAKPOINTS
) {
960 return KDPERR_MAX_BREAKPOINTS
;
963 breakpoint_list
[i
].address
= address
;
964 memcpy(breakpoint_list
[i
].oldbytes
, oldinstr
, breakinstrsize
);
965 breakpoint_list
[i
].bytesused
= breakinstrsize
;
967 cnt
= kdp_machine_vm_write((caddr_t
)&breakinstr
, address
, breakinstrsize
);
969 return KDPERR_NO_ERROR
;
973 kdp_remove_breakpoint_internal(
974 mach_vm_address_t address
980 for(i
=0;(i
< MAX_BREAKPOINTS
) && (breakpoint_list
[i
].address
!= address
); i
++);
982 if (i
== MAX_BREAKPOINTS
)
984 return KDPERR_BREAKPOINT_NOT_FOUND
;
987 breakpoint_list
[i
].address
= 0;
988 cnt
= kdp_machine_vm_write((caddr_t
)&breakpoint_list
[i
].oldbytes
, address
, breakpoint_list
[i
].bytesused
);
990 return KDPERR_NO_ERROR
;
994 kdp_remove_all_breakpoints(void)
997 boolean_t breakpoint_found
= FALSE
;
999 if (breakpoints_initialized
)
1001 for(i
=0;i
< MAX_BREAKPOINTS
; i
++)
1003 if (breakpoint_list
[i
].address
)
1005 kdp_machine_vm_write((caddr_t
)&(breakpoint_list
[i
].oldbytes
), (mach_vm_address_t
)breakpoint_list
[i
].address
, (mach_vm_size_t
)breakpoint_list
[i
].bytesused
);
1006 breakpoint_found
= TRUE
;
1007 breakpoint_list
[i
].address
= 0;
1011 if (breakpoint_found
)
1012 printf("kdp_remove_all_breakpoints: found extant breakpoints, removing them.\n");
1014 return breakpoint_found
;
1019 __unused kdp_pkt_t
*pkt
,
1021 __unused
unsigned short *reply_port
1024 dprintf(("kdp_reboot\n"));
1026 kdp_machine_reboot();
1028 return (TRUE
); // no, not really, we won't return
1031 #define MAX_FRAMES 1000
1033 static int pid_from_task(task_t task
)
1038 pid
= proc_pid(task
->bsd_info
);
1044 kdp_copyin(pmap_t p
, uint64_t uaddr
, void *dest
, size_t size
) {
1046 char *kvaddr
= dest
;
1049 ppnum_t upn
= pmap_find_phys(p
, uaddr
);
1050 uint64_t phys_src
= ptoa_64(upn
) | (uaddr
& PAGE_MASK
);
1051 uint64_t phys_dest
= kvtophys((vm_offset_t
)kvaddr
);
1052 uint64_t src_rem
= PAGE_SIZE
- (phys_src
& PAGE_MASK
);
1053 uint64_t dst_rem
= PAGE_SIZE
- (phys_dest
& PAGE_MASK
);
1054 size_t cur_size
= (uint32_t) MIN(src_rem
, dst_rem
);
1055 cur_size
= MIN(cur_size
, rem
);
1057 if (upn
&& pmap_valid_page(upn
) && phys_dest
) {
1058 bcopy_phys(phys_src
, phys_dest
, cur_size
);
1071 kdp_mem_snapshot(struct mem_snapshot
*mem_snap
)
1073 mem_snap
->snapshot_magic
= STACKSHOT_MEM_SNAPSHOT_MAGIC
;
1074 mem_snap
->free_pages
= vm_page_free_count
;
1075 mem_snap
->active_pages
= vm_page_active_count
;
1076 mem_snap
->inactive_pages
= vm_page_inactive_count
;
1077 mem_snap
->purgeable_pages
= vm_page_purgeable_count
;
1078 mem_snap
->wired_pages
= vm_page_wire_count
;
1079 mem_snap
->speculative_pages
= vm_page_speculative_count
;
1080 mem_snap
->throttled_pages
= vm_page_throttled_count
;
1085 * Method for grabbing timer values safely, in the sense that no infinite loop will occur
1086 * Certain flavors of the timer_grab function, which would seem to be the thing to use,
1087 * can loop infinitely if called while the timer is in the process of being updated.
1088 * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of
1089 * the timer using this method. This seems insoluble, since stackshot runs in a context
1090 * where the timer might be half-updated, and has no way of yielding control just long
1091 * enough to finish the update.
1094 static uint64_t safe_grab_timer_value(struct timer
*t
)
1096 #if defined(__LP64__)
1099 uint64_t time
= t
->high_bits
; /* endian independent grab */
1100 time
= (time
<< 32) | t
->low_bits
;
1106 kdp_stackshot(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t trace_flags
, uint32_t dispatch_offset
, uint32_t *pbytesTraced
)
1108 char *tracepos
= (char *) tracebuf
;
1109 char *tracebound
= tracepos
+ tracebuf_size
;
1110 uint32_t tracebytes
= 0;
1113 task_t task
= TASK_NULL
;
1114 thread_t thread
= THREAD_NULL
;
1115 thread_snapshot_t tsnap
= NULL
;
1116 unsigned framesize
= 2 * sizeof(vm_offset_t
);
1118 struct thread cthread
;
1119 struct _vm_map cmap
;
1122 queue_head_t
*task_list
= &tasks
;
1123 boolean_t is_active_list
= TRUE
;
1125 boolean_t dispatch_p
= ((trace_flags
& STACKSHOT_GET_DQ
) != 0);
1126 boolean_t save_loadinfo_p
= ((trace_flags
& STACKSHOT_SAVE_LOADINFO
) != 0);
1128 if(trace_flags
& STACKSHOT_GET_GLOBAL_MEM_STATS
) {
1129 if(tracepos
+ sizeof(struct mem_snapshot
) > tracebound
) {
1133 kdp_mem_snapshot((struct mem_snapshot
*)tracepos
);
1134 tracepos
+= sizeof(struct mem_snapshot
);
1138 queue_iterate(task_list
, task
, task_t
, tasks
) {
1139 if ((task
== NULL
) || (ml_nofault_copy((vm_offset_t
) task
, (vm_offset_t
) &ctask
, sizeof(struct task
)) != sizeof(struct task
)))
1142 int task_pid
= pid_from_task(task
);
1143 boolean_t task64
= task_has_64BitAddr(task
);
1145 if (!task
->active
) {
1147 * Not interested in terminated tasks without threads, and
1148 * at the moment, stackshot can't handle a task without a name.
1150 if (queue_empty(&task
->threads
) || task_pid
== -1) {
1155 /* Trace everything, unless a process was specified */
1156 if ((pid
== -1) || (pid
== task_pid
)) {
1157 task_snapshot_t task_snap
;
1158 uint32_t uuid_info_count
= 0;
1159 mach_vm_address_t uuid_info_addr
= 0;
1160 boolean_t have_map
= (task
->map
!= NULL
) &&
1161 (ml_nofault_copy((vm_offset_t
)(task
->map
), (vm_offset_t
)&cmap
, sizeof(struct _vm_map
)) == sizeof(struct _vm_map
));
1162 boolean_t have_pmap
= have_map
&& (cmap
.pmap
!= NULL
) &&
1163 (ml_nofault_copy((vm_offset_t
)(cmap
.pmap
), (vm_offset_t
)&cpmap
, sizeof(struct pmap
)) == sizeof(struct pmap
));
1165 if (have_pmap
&& task
->active
&& save_loadinfo_p
&& task_pid
> 0) {
1166 // Read the dyld_all_image_infos struct from the task memory to get UUID array count and location
1168 struct dyld_all_image_infos64 task_image_infos
;
1169 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct dyld_all_image_infos64
))) {
1170 uuid_info_count
= (uint32_t)task_image_infos
.uuidArrayCount
;
1171 uuid_info_addr
= task_image_infos
.uuidArray
;
1174 struct dyld_all_image_infos task_image_infos
;
1175 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct dyld_all_image_infos
))) {
1176 uuid_info_count
= task_image_infos
.uuidArrayCount
;
1177 uuid_info_addr
= task_image_infos
.uuidArray
;
1181 // If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1182 // this data structure), we zero the uuid_info_count so that we won't even try to save load info
1184 if (!uuid_info_addr
) {
1185 uuid_info_count
= 0;
1189 if (tracepos
+ sizeof(struct task_snapshot
) > tracebound
) {
1194 task_snap
= (task_snapshot_t
) tracepos
;
1195 task_snap
->snapshot_magic
= STACKSHOT_TASK_SNAPSHOT_MAGIC
;
1196 task_snap
->pid
= task_pid
;
1197 task_snap
->nloadinfos
= uuid_info_count
;
1198 /* Add the BSD process identifiers */
1200 proc_name_kdp(task
, task_snap
->p_comm
, sizeof(task_snap
->p_comm
));
1202 task_snap
->p_comm
[0] = '\0';
1203 task_snap
->ss_flags
= 0;
1205 task_snap
->ss_flags
|= kUser64_p
;
1207 task_snap
->ss_flags
|= kTerminatedSnapshot
;
1209 task_snap
->suspend_count
= task
->suspend_count
;
1210 task_snap
->task_size
= have_pmap
? pmap_resident_count(task
->map
->pmap
) : 0;
1211 task_snap
->faults
= task
->faults
;
1212 task_snap
->pageins
= task
->pageins
;
1213 task_snap
->cow_faults
= task
->cow_faults
;
1215 task_snap
->user_time_in_terminated_threads
= task
->total_user_time
;
1216 task_snap
->system_time_in_terminated_threads
= task
->total_system_time
;
1217 tracepos
+= sizeof(struct task_snapshot
);
1219 if (task_pid
> 0 && uuid_info_count
> 0) {
1220 uint32_t uuid_info_size
= (uint32_t)(task64
? sizeof(struct dyld_uuid_info64
) : sizeof(struct dyld_uuid_info
));
1221 uint32_t uuid_info_array_size
= uuid_info_count
* uuid_info_size
;
1223 if (tracepos
+ uuid_info_array_size
> tracebound
) {
1228 // Copy in the UUID info array
1229 // It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1230 if (have_pmap
&& !kdp_copyin(task
->map
->pmap
, uuid_info_addr
, tracepos
, uuid_info_array_size
))
1231 task_snap
->nloadinfos
= 0;
1233 tracepos
+= uuid_info_array_size
;
1236 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
){
1237 if ((thread
== NULL
) || (ml_nofault_copy((vm_offset_t
) thread
, (vm_offset_t
) &cthread
, sizeof(struct thread
)) != sizeof(struct thread
)))
1240 if (((tracepos
+ 4 * sizeof(struct thread_snapshot
)) > tracebound
)) {
1244 /* Populate the thread snapshot header */
1245 tsnap
= (thread_snapshot_t
) tracepos
;
1246 tsnap
->thread_id
= thread_tid(thread
);
1247 tsnap
->state
= thread
->state
;
1248 tsnap
->wait_event
= thread
->wait_event
;
1249 tsnap
->continuation
= (uint64_t) (uintptr_t) thread
->continuation
;
1250 tsnap
->user_time
= safe_grab_timer_value(&thread
->user_timer
);
1251 tsnap
->system_time
= safe_grab_timer_value(&thread
->system_timer
);
1252 tsnap
->snapshot_magic
= STACKSHOT_THREAD_SNAPSHOT_MAGIC
;
1253 tracepos
+= sizeof(struct thread_snapshot
);
1254 tsnap
->ss_flags
= 0;
1256 if (dispatch_p
&& (task
!= kernel_task
) && (task
->active
) && have_pmap
) {
1257 uint64_t dqkeyaddr
= thread_dispatchqaddr(thread
);
1258 if (dqkeyaddr
!= 0) {
1259 uint64_t dqaddr
= 0;
1260 if (kdp_copyin(task
->map
->pmap
, dqkeyaddr
, &dqaddr
, (task64
? 8 : 4)) && (dqaddr
!= 0)) {
1261 uint64_t dqserialnumaddr
= dqaddr
+ dispatch_offset
;
1262 uint64_t dqserialnum
= 0;
1263 if (kdp_copyin(task
->map
->pmap
, dqserialnumaddr
, &dqserialnum
, (task64
? 8 : 4))) {
1264 tsnap
->ss_flags
|= kHasDispatchSerial
;
1265 *(uint64_t *)tracepos
= dqserialnum
;
1271 /* Call through to the machine specific trace routines
1272 * Frames are added past the snapshot header.
1275 if (thread
->kernel_stack
!= 0) {
1276 #if defined(__LP64__)
1277 tracebytes
= machine_trace_thread64(thread
, tracepos
, tracebound
, MAX_FRAMES
, FALSE
);
1278 tsnap
->ss_flags
|= kKernel64_p
;
1281 tracebytes
= machine_trace_thread(thread
, tracepos
, tracebound
, MAX_FRAMES
, FALSE
);
1285 tsnap
->nkern_frames
= tracebytes
/framesize
;
1286 tracepos
+= tracebytes
;
1288 /* Trace user stack, if any */
1289 if (task
->active
&& thread
->task
->map
!= kernel_map
) {
1291 if (task_has_64BitAddr(thread
->task
)) {
1292 tracebytes
= machine_trace_thread64(thread
, tracepos
, tracebound
, MAX_FRAMES
, TRUE
);
1293 tsnap
->ss_flags
|= kUser64_p
;
1297 tracebytes
= machine_trace_thread(thread
, tracepos
, tracebound
, MAX_FRAMES
, TRUE
);
1301 tsnap
->nuser_frames
= tracebytes
/framesize
;
1302 tracepos
+= tracebytes
;
1308 if (is_active_list
) {
1309 is_active_list
= FALSE
;
1310 task_list
= &terminated_tasks
;
1315 /* Release stack snapshot wait indicator */
1316 kdp_snapshot_postflight();
1318 *pbytesTraced
= (uint32_t)(tracepos
- (char *) tracebuf
);
1324 kdp_readioport(kdp_pkt_t
*pkt
,
1326 unsigned short *reply_port
1329 kdp_readioport_req_t
*rq
= &pkt
->readioport_req
;
1330 kdp_readioport_reply_t
*rp
= &pkt
->readioport_reply
;
1333 if (plen
< sizeof (*rq
))
1336 rp
->hdr
.is_reply
= 1;
1337 rp
->hdr
.len
= sizeof (*rp
);
1339 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
1340 rp
->error
= KDPERR_BAD_NBYTES
;
1342 #if KDP_TEST_HARNESS
1343 uint16_t addr
= rq
->address
;
1345 uint16_t size
= rq
->nbytes
;
1346 dprintf(("kdp_readioport addr %x size %d\n", addr
, size
));
1348 rp
->error
= kdp_machine_ioport_read(rq
, rp
->data
, rq
->lcpu
);
1349 if (rp
->error
== KDPERR_NO_ERROR
)
1350 rp
->hdr
.len
+= size
;
1353 *reply_port
= kdp
.reply_port
;
1363 unsigned short *reply_port
1366 kdp_writeioport_req_t
*rq
= &pkt
->writeioport_req
;
1367 kdp_writeioport_reply_t
*rp
= &pkt
->writeioport_reply
;
1370 if (plen
< sizeof (*rq
))
1373 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
1374 rp
->error
= KDPERR_BAD_NBYTES
;
1376 dprintf(("kdp_writeioport addr %x size %d\n", rq
->address
,
1379 rp
->error
= kdp_machine_ioport_write(rq
, rq
->data
, rq
->lcpu
);
1382 rp
->hdr
.is_reply
= 1;
1383 rp
->hdr
.len
= sizeof (*rp
);
1385 *reply_port
= kdp
.reply_port
;
1392 kdp_readmsr64(kdp_pkt_t
*pkt
,
1394 unsigned short *reply_port
1397 kdp_readmsr64_req_t
*rq
= &pkt
->readmsr64_req
;
1398 kdp_readmsr64_reply_t
*rp
= &pkt
->readmsr64_reply
;
1401 if (plen
< sizeof (*rq
))
1404 rp
->hdr
.is_reply
= 1;
1405 rp
->hdr
.len
= sizeof (*rp
);
1407 dprintf(("kdp_readmsr64 lcpu %x addr %x\n", rq
->lcpu
, rq
->address
));
1408 rp
->error
= kdp_machine_msr64_read(rq
, rp
->data
, rq
->lcpu
);
1409 if (rp
->error
== KDPERR_NO_ERROR
)
1410 rp
->hdr
.len
+= sizeof(uint64_t);
1412 *reply_port
= kdp
.reply_port
;
1422 unsigned short *reply_port
1425 kdp_writemsr64_req_t
*rq
= &pkt
->writemsr64_req
;
1426 kdp_writemsr64_reply_t
*rp
= &pkt
->writemsr64_reply
;
1429 if (plen
< sizeof (*rq
))
1432 dprintf(("kdp_writemsr64 lcpu %x addr %x\n", rq
->lcpu
, rq
->address
));
1433 rp
->error
= kdp_machine_msr64_write(rq
, rq
->data
, rq
->lcpu
);
1435 rp
->hdr
.is_reply
= 1;
1436 rp
->hdr
.len
= sizeof (*rp
);
1438 *reply_port
= kdp
.reply_port
;
1448 unsigned short *reply_port
1451 kdp_dumpinfo_req_t
*rq
= &pkt
->dumpinfo_req
;
1452 kdp_dumpinfo_reply_t
*rp
= &pkt
->dumpinfo_reply
;
1455 if (plen
< sizeof (*rq
))
1458 dprintf(("kdp_dumpinfo file=%s destip=%s routerip=%s\n", rq
->name
, rq
->destip
, rq
->routerip
));
1459 rp
->hdr
.is_reply
= 1;
1460 rp
->hdr
.len
= sizeof (*rp
);
1462 if ((rq
->type
& KDP_DUMPINFO_MASK
) != KDP_DUMPINFO_GETINFO
) {
1463 kdp_set_dump_info(rq
->type
, rq
->name
, rq
->destip
, rq
->routerip
,
1467 /* gather some stats for reply */
1468 kdp_get_dump_info(&rp
->type
, rp
->name
, rp
->destip
, rp
->routerip
,
1471 *reply_port
= kdp
.reply_port
;