2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/vm_param.h>
31 #include <sys/appleapiopts.h>
32 #include <kern/debug.h>
33 #include <uuid/uuid.h>
35 #include <kdp/kdp_internal.h>
36 #include <kdp/kdp_private.h>
37 #include <kdp/kdp_core.h>
38 #include <kdp/kdp_dyld.h>
40 #include <libsa/types.h>
41 #include <libkern/version.h>
43 #include <string.h> /* bcopy */
45 #include <kern/processor.h>
46 #include <kern/thread.h>
47 #include <kern/clock.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_pageout.h>
52 extern int count_busy_buffers(void); /* must track with declaration in bsd/sys/buf_internal.h */
54 #define DO_ALIGN 1 /* align all packet data accesses */
56 #define KDP_TEST_HARNESS 0
58 #define dprintf(x) kprintf x
64 dispatch_table
[KDP_INVALID_REQUEST
-KDP_CONNECT
] =
67 /* 1 */ kdp_disconnect
,
74 /* 8 */ kdp_writeregs
,
78 /* C */ kdp_resumecpus
,
81 /* F */ kdp_breakpoint_set
,
82 /*10 */ kdp_breakpoint_remove
,
86 /*14 */ kdp_readmem64
,
87 /*15 */ kdp_writemem64
,
88 /*16 */ kdp_breakpoint64_set
,
89 /*17 */ kdp_breakpoint64_remove
,
90 /*18 */ kdp_kernelversion
,
91 /*19 */ kdp_readphysmem64
,
92 /*1A */ kdp_writephysmem64
,
93 /*1B */ kdp_readioport
,
94 /*1C */ kdp_writeioport
,
95 /*1D */ kdp_readmsr64
,
96 /*1E */ kdp_writemsr64
,
102 #define MAX_BREAKPOINTS 100
105 * Version 11 of the KDP Protocol adds support for 64-bit wide memory
106 * addresses (read/write and breakpoints) as well as a dedicated
107 * kernelversion request. Version 12 adds read/writing of physical
108 * memory with 64-bit wide memory addresses.
110 #define KDP_VERSION 12
113 mach_vm_address_t address
;
115 uint8_t oldbytes
[MAX_BREAKINSN_BYTES
];
116 } kdp_breakpoint_record_t
;
118 static kdp_breakpoint_record_t breakpoint_list
[MAX_BREAKPOINTS
];
119 static unsigned int breakpoints_initialized
= 0;
121 int reattach_wait
= 0;
122 int noresume_on_disconnect
= 0;
123 extern unsigned int return_on_panic
;
125 typedef struct thread_snapshot
*thread_snapshot_t
;
126 typedef struct task_snapshot
*task_snapshot_t
;
129 machine_trace_thread(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
131 machine_trace_thread64(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
135 proc_name_kdp(task_t task
, char *buf
, int size
);
138 kdp_snapshot_postflight(void);
141 pid_from_task(task_t task
);
144 kdp_set_breakpoint_internal(
145 mach_vm_address_t address
149 kdp_remove_breakpoint_internal(
150 mach_vm_address_t address
155 kdp_stackshot(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t trace_flags
, uint32_t dispatch_offset
, uint32_t *pbytesTraced
);
157 boolean_t
kdp_copyin(pmap_t
, uint64_t, void *, size_t);
158 extern void bcopy_phys(addr64_t
, addr64_t
, vm_size_t
);
164 unsigned short *reply_port
167 static unsigned aligned_pkt
[1538/sizeof(unsigned)+1]; // max ether pkt
168 kdp_pkt_t
*rd
= (kdp_pkt_t
*)&aligned_pkt
;
174 bcopy((char *)pkt
, (char *)rd
, sizeof(aligned_pkt
));
176 rd
= (kdp_pkt_t
*)pkt
;
178 if (plen
< sizeof (rd
->hdr
) || rd
->hdr
.len
!= plen
) {
179 printf("kdp_packet bad len pkt %lu hdr %d\n", plen
, rd
->hdr
.len
);
184 if (rd
->hdr
.is_reply
) {
185 printf("kdp_packet reply recvd req %x seq %x\n",
186 rd
->hdr
.request
, rd
->hdr
.seq
);
191 req
= rd
->hdr
.request
;
192 if (req
>= KDP_INVALID_REQUEST
) {
193 printf("kdp_packet bad request %x len %d seq %x key %x\n",
194 rd
->hdr
.request
, rd
->hdr
.len
, rd
->hdr
.seq
, rd
->hdr
.key
);
199 ret
= ((*dispatch_table
[req
- KDP_CONNECT
])(rd
, len
, reply_port
));
201 bcopy((char *)rd
, (char *) pkt
, *len
);
210 __unused
unsigned short *reply_port
213 kdp_pkt_t
*rd
= (kdp_pkt_t
*)pkt
;
215 printf("kdp_unknown request %x len %d seq %x key %x\n",
216 rd
->hdr
.request
, rd
->hdr
.len
, rd
->hdr
.seq
, rd
->hdr
.key
);
225 unsigned short *reply_port
228 kdp_connect_req_t
*rq
= &pkt
->connect_req
;
230 kdp_connect_reply_t
*rp
= &pkt
->connect_reply
;
231 uint16_t rport
, eport
;
235 if (plen
< sizeof (*rq
))
238 dprintf(("kdp_connect seq %x greeting %s\n", rq
->hdr
.seq
, rq
->greeting
));
240 rport
= rq
->req_reply_port
;
241 eport
= rq
->exc_note_port
;
245 if ((seq
== kdp
.conn_seq
) && /* duplicate request */
246 (rport
== kdp
.reply_port
) &&
247 (eport
== kdp
.exception_port
) &&
248 (key
== kdp
.session_key
))
249 rp
->error
= KDPERR_NO_ERROR
;
251 rp
->error
= KDPERR_ALREADY_CONNECTED
;
254 kdp
.reply_port
= rport
;
255 kdp
.exception_port
= eport
;
258 kdp
.session_key
= key
;
260 rp
->error
= KDPERR_NO_ERROR
;
263 rp
->hdr
.is_reply
= 1;
264 rp
->hdr
.len
= sizeof (*rp
);
269 if (current_debugger
== KDP_CUR_DB
)
279 unsigned short *reply_port
282 kdp_disconnect_req_t
*rq
= &pkt
->disconnect_req
;
284 kdp_disconnect_reply_t
*rp
= &pkt
->disconnect_reply
;
286 if (plen
< sizeof (*rq
))
292 dprintf(("kdp_disconnect\n"));
294 *reply_port
= kdp
.reply_port
;
296 kdp
.reply_port
= kdp
.exception_port
= 0;
297 kdp
.is_halted
= kdp
.is_conn
= FALSE
;
298 kdp
.exception_seq
= kdp
.conn_seq
= 0;
301 if ((panicstr
!= NULL
) && (return_on_panic
== 0))
304 if (noresume_on_disconnect
== 1) {
306 noresume_on_disconnect
= 0;
309 rp
->hdr
.is_reply
= 1;
310 rp
->hdr
.len
= sizeof (*rp
);
314 if (current_debugger
== KDP_CUR_DB
)
324 unsigned short *reply_port
327 kdp_reattach_req_t
*rq
= &pkt
->reattach_req
;
330 kdp_disconnect(pkt
, len
, reply_port
);
331 *reply_port
= rq
->req_reply_port
;
340 unsigned short *reply_port
343 kdp_hostinfo_req_t
*rq
= &pkt
->hostinfo_req
;
345 kdp_hostinfo_reply_t
*rp
= &pkt
->hostinfo_reply
;
347 if (plen
< sizeof (*rq
))
350 dprintf(("kdp_hostinfo\n"));
352 rp
->hdr
.is_reply
= 1;
353 rp
->hdr
.len
= sizeof (*rp
);
355 kdp_machine_hostinfo(&rp
->hostinfo
);
357 *reply_port
= kdp
.reply_port
;
367 unsigned short *reply_port
370 kdp_kernelversion_req_t
*rq
= &pkt
->kernelversion_req
;
372 kdp_kernelversion_reply_t
*rp
= &pkt
->kernelversion_reply
;
375 if (plen
< sizeof (*rq
))
378 rp
->hdr
.is_reply
= 1;
379 rp
->hdr
.len
= sizeof (*rp
);
381 dprintf(("kdp_kernelversion\n"));
382 slen
= strlcpy(rp
->version
, kdp_kernelversion_string
, MAX_KDP_DATA_SIZE
);
384 rp
->hdr
.len
+= slen
+ 1; /* strlcpy returns the amount copied with NUL */
386 *reply_port
= kdp
.reply_port
;
396 unsigned short *reply_port
399 kdp_suspend_req_t
*rq
= &pkt
->suspend_req
;
401 kdp_suspend_reply_t
*rp
= &pkt
->suspend_reply
;
403 if (plen
< sizeof (*rq
))
406 rp
->hdr
.is_reply
= 1;
407 rp
->hdr
.len
= sizeof (*rp
);
409 dprintf(("kdp_suspend\n"));
411 kdp
.is_halted
= TRUE
;
413 *reply_port
= kdp
.reply_port
;
423 unsigned short *reply_port
426 kdp_resumecpus_req_t
*rq
= &pkt
->resumecpus_req
;
428 kdp_resumecpus_reply_t
*rp
= &pkt
->resumecpus_reply
;
430 if (plen
< sizeof (*rq
))
433 rp
->hdr
.is_reply
= 1;
434 rp
->hdr
.len
= sizeof (*rp
);
436 dprintf(("kdp_resumecpus %x\n", rq
->cpu_mask
));
438 kdp
.is_halted
= FALSE
;
440 *reply_port
= kdp
.reply_port
;
450 unsigned short *reply_port
453 kdp_writemem_req_t
*rq
= &pkt
->writemem_req
;
455 kdp_writemem_reply_t
*rp
= &pkt
->writemem_reply
;
458 if (plen
< sizeof (*rq
))
461 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
462 rp
->error
= KDPERR_BAD_NBYTES
;
464 dprintf(("kdp_writemem addr %x size %d\n", rq
->address
, rq
->nbytes
));
466 cnt
= kdp_machine_vm_write((caddr_t
)rq
->data
, (mach_vm_address_t
)rq
->address
, rq
->nbytes
);
467 rp
->error
= KDPERR_NO_ERROR
;
470 rp
->hdr
.is_reply
= 1;
471 rp
->hdr
.len
= sizeof (*rp
);
473 *reply_port
= kdp
.reply_port
;
483 unsigned short *reply_port
486 kdp_writemem64_req_t
*rq
= &pkt
->writemem64_req
;
488 kdp_writemem64_reply_t
*rp
= &pkt
->writemem64_reply
;
491 if (plen
< sizeof (*rq
))
494 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
495 rp
->error
= KDPERR_BAD_NBYTES
;
497 dprintf(("kdp_writemem64 addr %llx size %d\n", rq
->address
, rq
->nbytes
));
499 cnt
= kdp_machine_vm_write((caddr_t
)rq
->data
, (mach_vm_address_t
)rq
->address
, (mach_vm_size_t
)rq
->nbytes
);
500 rp
->error
= KDPERR_NO_ERROR
;
503 rp
->hdr
.is_reply
= 1;
504 rp
->hdr
.len
= sizeof (*rp
);
506 *reply_port
= kdp
.reply_port
;
516 unsigned short *reply_port
519 kdp_writephysmem64_req_t
*rq
= &pkt
->writephysmem64_req
;
521 kdp_writephysmem64_reply_t
*rp
= &pkt
->writephysmem64_reply
;
523 if (plen
< sizeof (*rq
))
526 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
527 rp
->error
= KDPERR_BAD_NBYTES
;
529 dprintf(("kdp_writephysmem64 addr %llx size %d\n", rq
->address
, rq
->nbytes
));
530 kdp_machine_phys_write(rq
, rq
->data
, rq
->lcpu
);
531 rp
->error
= KDPERR_NO_ERROR
;
534 rp
->hdr
.is_reply
= 1;
535 rp
->hdr
.len
= sizeof (*rp
);
537 *reply_port
= kdp
.reply_port
;
547 unsigned short *reply_port
550 kdp_readmem_req_t
*rq
= &pkt
->readmem_req
;
552 kdp_readmem_reply_t
*rp
= &pkt
->readmem_reply
;
555 void *pversion
= &kdp_kernelversion_string
;
558 if (plen
< sizeof (*rq
))
561 rp
->hdr
.is_reply
= 1;
562 rp
->hdr
.len
= sizeof (*rp
);
564 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
565 rp
->error
= KDPERR_BAD_NBYTES
;
567 unsigned int n
= rq
->nbytes
;
569 dprintf(("kdp_readmem addr %x size %d\n", rq
->address
, n
));
571 /* XXX This is a hack to facilitate the "showversion" macro
572 * on i386, which is used to obtain the kernel version without
573 * symbols - a pointer to the version string should eventually
574 * be pinned at a fixed address when an equivalent of the
575 * VECTORS segment (loaded at a fixed load address, and contains
576 * a table) is implemented on these architectures, as with PPC.
577 * N.B.: x86 now has a low global page, and the version indirection
578 * is pinned at 0x201C. We retain the 0x501C address override
579 * for compatibility. Future architectures should instead use
580 * the KDP_KERNELVERSION request.
582 if (rq
->address
== 0x501C)
583 rq
->address
= (uintptr_t)&pversion
;
585 cnt
= kdp_machine_vm_read((mach_vm_address_t
)rq
->address
, (caddr_t
)rp
->data
, n
);
586 rp
->error
= KDPERR_NO_ERROR
;
591 *reply_port
= kdp
.reply_port
;
601 unsigned short *reply_port
604 kdp_readmem64_req_t
*rq
= &pkt
->readmem64_req
;
606 kdp_readmem64_reply_t
*rp
= &pkt
->readmem64_reply
;
609 if (plen
< sizeof (*rq
))
612 rp
->hdr
.is_reply
= 1;
613 rp
->hdr
.len
= sizeof (*rp
);
615 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
616 rp
->error
= KDPERR_BAD_NBYTES
;
619 dprintf(("kdp_readmem64 addr %llx size %d\n", rq
->address
, rq
->nbytes
));
621 cnt
= kdp_machine_vm_read((mach_vm_address_t
)rq
->address
, (caddr_t
)rp
->data
, rq
->nbytes
);
622 rp
->error
= KDPERR_NO_ERROR
;
627 *reply_port
= kdp
.reply_port
;
637 unsigned short *reply_port
640 kdp_readphysmem64_req_t
*rq
= &pkt
->readphysmem64_req
;
642 kdp_readphysmem64_reply_t
*rp
= &pkt
->readphysmem64_reply
;
645 if (plen
< sizeof (*rq
))
648 rp
->hdr
.is_reply
= 1;
649 rp
->hdr
.len
= sizeof (*rp
);
651 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
652 rp
->error
= KDPERR_BAD_NBYTES
;
655 dprintf(("kdp_readphysmem64 addr %llx size %d\n", rq
->address
, rq
->nbytes
));
657 cnt
= (int)kdp_machine_phys_read(rq
, rp
->data
, rq
->lcpu
);
658 rp
->error
= KDPERR_NO_ERROR
;
663 *reply_port
= kdp
.reply_port
;
673 unsigned short *reply_port
676 kdp_maxbytes_req_t
*rq
= &pkt
->maxbytes_req
;
678 kdp_maxbytes_reply_t
*rp
= &pkt
->maxbytes_reply
;
680 if (plen
< sizeof (*rq
))
683 rp
->hdr
.is_reply
= 1;
684 rp
->hdr
.len
= sizeof (*rp
);
686 dprintf(("kdp_maxbytes\n"));
688 rp
->max_bytes
= MAX_KDP_DATA_SIZE
;
690 *reply_port
= kdp
.reply_port
;
700 unsigned short *reply_port
703 kdp_version_req_t
*rq
= &pkt
->version_req
;
705 kdp_version_reply_t
*rp
= &pkt
->version_reply
;
707 if (plen
< sizeof (*rq
))
710 rp
->hdr
.is_reply
= 1;
711 rp
->hdr
.len
= sizeof (*rp
);
713 dprintf(("kdp_version\n"));
715 rp
->version
= KDP_VERSION
;
716 if (!(kdp_flag
& KDP_BP_DIS
))
717 rp
->feature
= KDP_FEATURE_BP
;
721 *reply_port
= kdp
.reply_port
;
731 unsigned short *reply_port
734 kdp_regions_req_t
*rq
= &pkt
->regions_req
;
736 kdp_regions_reply_t
*rp
= &pkt
->regions_reply
;
739 if (plen
< sizeof (*rq
))
742 rp
->hdr
.is_reply
= 1;
743 rp
->hdr
.len
= sizeof (*rp
);
745 dprintf(("kdp_regions\n"));
751 r
->nbytes
= 0xffffffff;
753 r
->protection
= VM_PROT_ALL
; r
++; rp
->nregions
++;
755 rp
->hdr
.len
+= rp
->nregions
* sizeof (kdp_region_t
);
757 *reply_port
= kdp
.reply_port
;
767 unsigned short *reply_port
770 kdp_writeregs_req_t
*rq
= &pkt
->writeregs_req
;
773 kdp_writeregs_reply_t
*rp
= &pkt
->writeregs_reply
;
775 if (plen
< sizeof (*rq
))
778 size
= rq
->hdr
.len
- (unsigned)sizeof(kdp_hdr_t
) - (unsigned)sizeof(unsigned int);
779 rp
->error
= kdp_machine_write_regs(rq
->cpu
, rq
->flavor
, rq
->data
, &size
);
781 rp
->hdr
.is_reply
= 1;
782 rp
->hdr
.len
= sizeof (*rp
);
784 *reply_port
= kdp
.reply_port
;
794 unsigned short *reply_port
797 kdp_readregs_req_t
*rq
= &pkt
->readregs_req
;
799 kdp_readregs_reply_t
*rp
= &pkt
->readregs_reply
;
802 if (plen
< sizeof (*rq
))
805 rp
->hdr
.is_reply
= 1;
806 rp
->hdr
.len
= sizeof (*rp
);
808 rp
->error
= kdp_machine_read_regs(rq
->cpu
, rq
->flavor
, rp
->data
, &size
);
811 *reply_port
= kdp
.reply_port
;
822 unsigned short *reply_port
825 kdp_breakpoint_req_t
*rq
= &pkt
->breakpoint_req
;
826 kdp_breakpoint_reply_t
*rp
= &pkt
->breakpoint_reply
;
830 if (plen
< sizeof (*rq
))
833 dprintf(("kdp_breakpoint_set %x\n", rq
->address
));
835 kerr
= kdp_set_breakpoint_internal((mach_vm_address_t
)rq
->address
);
839 rp
->hdr
.is_reply
= 1;
840 rp
->hdr
.len
= sizeof (*rp
);
841 *reply_port
= kdp
.reply_port
;
848 kdp_breakpoint64_set(
851 unsigned short *reply_port
854 kdp_breakpoint64_req_t
*rq
= &pkt
->breakpoint64_req
;
855 kdp_breakpoint64_reply_t
*rp
= &pkt
->breakpoint64_reply
;
859 if (plen
< sizeof (*rq
))
862 dprintf(("kdp_breakpoint64_set %llx\n", rq
->address
));
864 kerr
= kdp_set_breakpoint_internal((mach_vm_address_t
)rq
->address
);
868 rp
->hdr
.is_reply
= 1;
869 rp
->hdr
.len
= sizeof (*rp
);
870 *reply_port
= kdp
.reply_port
;
877 kdp_breakpoint_remove(
880 unsigned short *reply_port
883 kdp_breakpoint_req_t
*rq
= &pkt
->breakpoint_req
;
884 kdp_breakpoint_reply_t
*rp
= &pkt
->breakpoint_reply
;
887 if (plen
< sizeof (*rq
))
890 dprintf(("kdp_breakpoint_remove %x\n", rq
->address
));
892 kerr
= kdp_remove_breakpoint_internal((mach_vm_address_t
)rq
->address
);
896 rp
->hdr
.is_reply
= 1;
897 rp
->hdr
.len
= sizeof (*rp
);
898 *reply_port
= kdp
.reply_port
;
905 kdp_breakpoint64_remove(
908 unsigned short *reply_port
911 kdp_breakpoint64_req_t
*rq
= &pkt
->breakpoint64_req
;
912 kdp_breakpoint64_reply_t
*rp
= &pkt
->breakpoint64_reply
;
916 if (plen
< sizeof (*rq
))
919 dprintf(("kdp_breakpoint64_remove %llx\n", rq
->address
));
921 kerr
= kdp_remove_breakpoint_internal((mach_vm_address_t
)rq
->address
);
925 rp
->hdr
.is_reply
= 1;
926 rp
->hdr
.len
= sizeof (*rp
);
927 *reply_port
= kdp
.reply_port
;
935 kdp_set_breakpoint_internal(
936 mach_vm_address_t address
940 uint8_t breakinstr
[MAX_BREAKINSN_BYTES
], oldinstr
[MAX_BREAKINSN_BYTES
];
941 uint32_t breakinstrsize
= sizeof(breakinstr
);
945 kdp_machine_get_breakinsn(breakinstr
, &breakinstrsize
);
947 if(breakpoints_initialized
== 0)
949 for(i
=0;(i
< MAX_BREAKPOINTS
); breakpoint_list
[i
].address
=0, i
++);
950 breakpoints_initialized
++;
953 cnt
= kdp_machine_vm_read(address
, (caddr_t
)&oldinstr
, (mach_vm_size_t
)breakinstrsize
);
955 if (0 == memcmp(oldinstr
, breakinstr
, breakinstrsize
)) {
956 printf("A trap was already set at that address, not setting new breakpoint\n");
958 return KDPERR_BREAKPOINT_ALREADY_SET
;
961 for(i
=0;(i
< MAX_BREAKPOINTS
) && (breakpoint_list
[i
].address
!= 0); i
++);
963 if (i
== MAX_BREAKPOINTS
) {
964 return KDPERR_MAX_BREAKPOINTS
;
967 breakpoint_list
[i
].address
= address
;
968 memcpy(breakpoint_list
[i
].oldbytes
, oldinstr
, breakinstrsize
);
969 breakpoint_list
[i
].bytesused
= breakinstrsize
;
971 cnt
= kdp_machine_vm_write((caddr_t
)&breakinstr
, address
, breakinstrsize
);
973 return KDPERR_NO_ERROR
;
977 kdp_remove_breakpoint_internal(
978 mach_vm_address_t address
984 for(i
=0;(i
< MAX_BREAKPOINTS
) && (breakpoint_list
[i
].address
!= address
); i
++);
986 if (i
== MAX_BREAKPOINTS
)
988 return KDPERR_BREAKPOINT_NOT_FOUND
;
991 breakpoint_list
[i
].address
= 0;
992 cnt
= kdp_machine_vm_write((caddr_t
)&breakpoint_list
[i
].oldbytes
, address
, breakpoint_list
[i
].bytesused
);
994 return KDPERR_NO_ERROR
;
998 kdp_remove_all_breakpoints(void)
1001 boolean_t breakpoint_found
= FALSE
;
1003 if (breakpoints_initialized
)
1005 for(i
=0;i
< MAX_BREAKPOINTS
; i
++)
1007 if (breakpoint_list
[i
].address
)
1009 kdp_machine_vm_write((caddr_t
)&(breakpoint_list
[i
].oldbytes
), (mach_vm_address_t
)breakpoint_list
[i
].address
, (mach_vm_size_t
)breakpoint_list
[i
].bytesused
);
1010 breakpoint_found
= TRUE
;
1011 breakpoint_list
[i
].address
= 0;
1015 if (breakpoint_found
)
1016 printf("kdp_remove_all_breakpoints: found extant breakpoints, removing them.\n");
1018 return breakpoint_found
;
1023 __unused kdp_pkt_t
*pkt
,
1025 __unused
unsigned short *reply_port
1028 dprintf(("kdp_reboot\n"));
1030 kdp_machine_reboot();
1032 return (TRUE
); // no, not really, we won't return
1035 #define MAX_FRAMES 1000
1037 static int pid_from_task(task_t task
)
1042 pid
= proc_pid(task
->bsd_info
);
1048 kdp_copyin(pmap_t p
, uint64_t uaddr
, void *dest
, size_t size
) {
1050 char *kvaddr
= dest
;
1053 ppnum_t upn
= pmap_find_phys(p
, uaddr
);
1054 uint64_t phys_src
= ptoa_64(upn
) | (uaddr
& PAGE_MASK
);
1055 uint64_t phys_dest
= kvtophys((vm_offset_t
)kvaddr
);
1056 uint64_t src_rem
= PAGE_SIZE
- (phys_src
& PAGE_MASK
);
1057 uint64_t dst_rem
= PAGE_SIZE
- (phys_dest
& PAGE_MASK
);
1058 size_t cur_size
= (uint32_t) MIN(src_rem
, dst_rem
);
1059 cur_size
= MIN(cur_size
, rem
);
1061 if (upn
&& pmap_valid_page(upn
) && phys_dest
) {
1062 bcopy_phys(phys_src
, phys_dest
, cur_size
);
1075 kdp_mem_and_io_snapshot(struct mem_and_io_snapshot
*memio_snap
)
1077 unsigned int pages_reclaimed
;
1078 unsigned int pages_wanted
;
1081 memio_snap
->snapshot_magic
= STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC
;
1082 memio_snap
->free_pages
= vm_page_free_count
;
1083 memio_snap
->active_pages
= vm_page_active_count
;
1084 memio_snap
->inactive_pages
= vm_page_inactive_count
;
1085 memio_snap
->purgeable_pages
= vm_page_purgeable_count
;
1086 memio_snap
->wired_pages
= vm_page_wire_count
;
1087 memio_snap
->speculative_pages
= vm_page_speculative_count
;
1088 memio_snap
->throttled_pages
= vm_page_throttled_count
;
1089 memio_snap
->busy_buffer_count
= count_busy_buffers();
1090 kErr
= mach_vm_pressure_monitor(FALSE
, VM_PRESSURE_TIME_WINDOW
, &pages_reclaimed
, &pages_wanted
);
1092 memio_snap
->pages_wanted
= (uint32_t)pages_wanted
;
1093 memio_snap
->pages_reclaimed
= (uint32_t)pages_reclaimed
;
1094 memio_snap
->pages_wanted_reclaimed_valid
= 1;
1096 memio_snap
->pages_wanted
= 0;
1097 memio_snap
->pages_reclaimed
= 0;
1098 memio_snap
->pages_wanted_reclaimed_valid
= 0;
1105 * Method for grabbing timer values safely, in the sense that no infinite loop will occur
1106 * Certain flavors of the timer_grab function, which would seem to be the thing to use,
1107 * can loop infinitely if called while the timer is in the process of being updated.
1108 * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of
1109 * the timer using this method. This seems insoluble, since stackshot runs in a context
1110 * where the timer might be half-updated, and has no way of yielding control just long
1111 * enough to finish the update.
1114 static uint64_t safe_grab_timer_value(struct timer
*t
)
1116 #if defined(__LP64__)
1119 uint64_t time
= t
->high_bits
; /* endian independent grab */
1120 time
= (time
<< 32) | t
->low_bits
;
1126 kdp_stackshot(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t trace_flags
, uint32_t dispatch_offset
, uint32_t *pbytesTraced
)
1128 char *tracepos
= (char *) tracebuf
;
1129 char *tracebound
= tracepos
+ tracebuf_size
;
1130 uint32_t tracebytes
= 0;
1133 task_t task
= TASK_NULL
;
1134 thread_t thread
= THREAD_NULL
;
1135 thread_snapshot_t tsnap
= NULL
;
1136 unsigned framesize
= 2 * sizeof(vm_offset_t
);
1138 struct thread cthread
;
1139 struct _vm_map cmap
;
1142 queue_head_t
*task_list
= &tasks
;
1143 boolean_t is_active_list
= TRUE
;
1145 boolean_t dispatch_p
= ((trace_flags
& STACKSHOT_GET_DQ
) != 0);
1146 boolean_t save_loadinfo_p
= ((trace_flags
& STACKSHOT_SAVE_LOADINFO
) != 0);
1148 if(trace_flags
& STACKSHOT_GET_GLOBAL_MEM_STATS
) {
1149 if(tracepos
+ sizeof(struct mem_and_io_snapshot
) > tracebound
) {
1153 kdp_mem_and_io_snapshot((struct mem_and_io_snapshot
*)tracepos
);
1154 tracepos
+= sizeof(struct mem_and_io_snapshot
);
1158 queue_iterate(task_list
, task
, task_t
, tasks
) {
1159 if ((task
== NULL
) || (ml_nofault_copy((vm_offset_t
) task
, (vm_offset_t
) &ctask
, sizeof(struct task
)) != sizeof(struct task
)))
1162 int task_pid
= pid_from_task(task
);
1163 boolean_t task64
= task_has_64BitAddr(task
);
1165 if (!task
->active
) {
1167 * Not interested in terminated tasks without threads, and
1168 * at the moment, stackshot can't handle a task without a name.
1170 if (queue_empty(&task
->threads
) || task_pid
== -1) {
1175 /* Trace everything, unless a process was specified */
1176 if ((pid
== -1) || (pid
== task_pid
)) {
1177 task_snapshot_t task_snap
;
1178 uint32_t uuid_info_count
= 0;
1179 mach_vm_address_t uuid_info_addr
= 0;
1180 boolean_t have_map
= (task
->map
!= NULL
) &&
1181 (ml_nofault_copy((vm_offset_t
)(task
->map
), (vm_offset_t
)&cmap
, sizeof(struct _vm_map
)) == sizeof(struct _vm_map
));
1182 boolean_t have_pmap
= have_map
&& (cmap
.pmap
!= NULL
) &&
1183 (ml_nofault_copy((vm_offset_t
)(cmap
.pmap
), (vm_offset_t
)&cpmap
, sizeof(struct pmap
)) == sizeof(struct pmap
));
1185 if (have_pmap
&& task
->active
&& save_loadinfo_p
&& task_pid
> 0) {
1186 // Read the dyld_all_image_infos struct from the task memory to get UUID array count and location
1188 struct user64_dyld_all_image_infos task_image_infos
;
1189 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct user64_dyld_all_image_infos
))) {
1190 uuid_info_count
= (uint32_t)task_image_infos
.uuidArrayCount
;
1191 uuid_info_addr
= task_image_infos
.uuidArray
;
1194 struct user32_dyld_all_image_infos task_image_infos
;
1195 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct user32_dyld_all_image_infos
))) {
1196 uuid_info_count
= task_image_infos
.uuidArrayCount
;
1197 uuid_info_addr
= task_image_infos
.uuidArray
;
1201 // If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1202 // this data structure), we zero the uuid_info_count so that we won't even try to save load info
1204 if (!uuid_info_addr
) {
1205 uuid_info_count
= 0;
1209 if (tracepos
+ sizeof(struct task_snapshot
) > tracebound
) {
1214 task_snap
= (task_snapshot_t
) tracepos
;
1215 task_snap
->snapshot_magic
= STACKSHOT_TASK_SNAPSHOT_MAGIC
;
1216 task_snap
->pid
= task_pid
;
1217 task_snap
->nloadinfos
= uuid_info_count
;
1218 /* Add the BSD process identifiers */
1220 proc_name_kdp(task
, task_snap
->p_comm
, sizeof(task_snap
->p_comm
));
1222 task_snap
->p_comm
[0] = '\0';
1223 task_snap
->ss_flags
= 0;
1225 task_snap
->ss_flags
|= kUser64_p
;
1227 task_snap
->ss_flags
|= kTerminatedSnapshot
;
1228 if(task
->pidsuspended
) task_snap
->ss_flags
|= kPidSuspended
;
1229 if(task
->frozen
) task_snap
->ss_flags
|= kFrozen
;
1231 task_snap
->suspend_count
= task
->suspend_count
;
1232 task_snap
->task_size
= have_pmap
? pmap_resident_count(task
->map
->pmap
) : 0;
1233 task_snap
->faults
= task
->faults
;
1234 task_snap
->pageins
= task
->pageins
;
1235 task_snap
->cow_faults
= task
->cow_faults
;
1237 task_snap
->user_time_in_terminated_threads
= task
->total_user_time
;
1238 task_snap
->system_time_in_terminated_threads
= task
->total_system_time
;
1239 tracepos
+= sizeof(struct task_snapshot
);
1241 if (task_pid
> 0 && uuid_info_count
> 0) {
1242 uint32_t uuid_info_size
= (uint32_t)(task64
? sizeof(struct user64_dyld_uuid_info
) : sizeof(struct user32_dyld_uuid_info
));
1243 uint32_t uuid_info_array_size
= uuid_info_count
* uuid_info_size
;
1245 if (tracepos
+ uuid_info_array_size
> tracebound
) {
1250 // Copy in the UUID info array
1251 // It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1252 if (have_pmap
&& !kdp_copyin(task
->map
->pmap
, uuid_info_addr
, tracepos
, uuid_info_array_size
))
1253 task_snap
->nloadinfos
= 0;
1255 tracepos
+= uuid_info_array_size
;
1258 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
){
1261 if ((thread
== NULL
) || (ml_nofault_copy((vm_offset_t
) thread
, (vm_offset_t
) &cthread
, sizeof(struct thread
)) != sizeof(struct thread
)))
1264 if (((tracepos
+ 4 * sizeof(struct thread_snapshot
)) > tracebound
)) {
1268 /* Populate the thread snapshot header */
1269 tsnap
= (thread_snapshot_t
) tracepos
;
1270 tsnap
->thread_id
= thread_tid(thread
);
1271 tsnap
->state
= thread
->state
;
1272 tsnap
->sched_pri
= thread
->sched_pri
;
1273 tsnap
->sched_flags
= thread
->sched_flags
;
1274 tsnap
->wait_event
= VM_KERNEL_UNSLIDE(thread
->wait_event
);
1275 tsnap
->continuation
= VM_KERNEL_UNSLIDE(thread
->continuation
);
1276 tval
= safe_grab_timer_value(&thread
->user_timer
);
1277 tsnap
->user_time
= tval
;
1278 tval
= safe_grab_timer_value(&thread
->system_timer
);
1279 if (thread
->precise_user_kernel_time
) {
1280 tsnap
->system_time
= tval
;
1282 tsnap
->user_time
+= tval
;
1283 tsnap
->system_time
= 0;
1285 tsnap
->snapshot_magic
= STACKSHOT_THREAD_SNAPSHOT_MAGIC
;
1286 tracepos
+= sizeof(struct thread_snapshot
);
1287 tsnap
->ss_flags
= 0;
1289 if (dispatch_p
&& (task
!= kernel_task
) && (task
->active
) && have_pmap
) {
1290 uint64_t dqkeyaddr
= thread_dispatchqaddr(thread
);
1291 if (dqkeyaddr
!= 0) {
1292 uint64_t dqaddr
= 0;
1293 if (kdp_copyin(task
->map
->pmap
, dqkeyaddr
, &dqaddr
, (task64
? 8 : 4)) && (dqaddr
!= 0)) {
1294 uint64_t dqserialnumaddr
= dqaddr
+ dispatch_offset
;
1295 uint64_t dqserialnum
= 0;
1296 if (kdp_copyin(task
->map
->pmap
, dqserialnumaddr
, &dqserialnum
, (task64
? 8 : 4))) {
1297 tsnap
->ss_flags
|= kHasDispatchSerial
;
1298 *(uint64_t *)tracepos
= dqserialnum
;
1304 /* Call through to the machine specific trace routines
1305 * Frames are added past the snapshot header.
1308 if (thread
->kernel_stack
!= 0) {
1309 #if defined(__LP64__)
1310 tracebytes
= machine_trace_thread64(thread
, tracepos
, tracebound
, MAX_FRAMES
, FALSE
);
1311 tsnap
->ss_flags
|= kKernel64_p
;
1314 tracebytes
= machine_trace_thread(thread
, tracepos
, tracebound
, MAX_FRAMES
, FALSE
);
1318 tsnap
->nkern_frames
= tracebytes
/framesize
;
1319 tracepos
+= tracebytes
;
1321 /* Trace user stack, if any */
1322 if (task
->active
&& thread
->task
->map
!= kernel_map
) {
1324 if (task_has_64BitAddr(thread
->task
)) {
1325 tracebytes
= machine_trace_thread64(thread
, tracepos
, tracebound
, MAX_FRAMES
, TRUE
);
1326 tsnap
->ss_flags
|= kUser64_p
;
1330 tracebytes
= machine_trace_thread(thread
, tracepos
, tracebound
, MAX_FRAMES
, TRUE
);
1334 tsnap
->nuser_frames
= tracebytes
/framesize
;
1335 tracepos
+= tracebytes
;
1341 if (is_active_list
) {
1342 is_active_list
= FALSE
;
1343 task_list
= &terminated_tasks
;
1348 /* Release stack snapshot wait indicator */
1349 kdp_snapshot_postflight();
1351 *pbytesTraced
= (uint32_t)(tracepos
- (char *) tracebuf
);
1357 kdp_readioport(kdp_pkt_t
*pkt
,
1359 unsigned short *reply_port
1362 kdp_readioport_req_t
*rq
= &pkt
->readioport_req
;
1363 kdp_readioport_reply_t
*rp
= &pkt
->readioport_reply
;
1366 if (plen
< sizeof (*rq
))
1369 rp
->hdr
.is_reply
= 1;
1370 rp
->hdr
.len
= sizeof (*rp
);
1372 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
1373 rp
->error
= KDPERR_BAD_NBYTES
;
1375 #if KDP_TEST_HARNESS
1376 uint16_t addr
= rq
->address
;
1378 uint16_t size
= rq
->nbytes
;
1379 dprintf(("kdp_readioport addr %x size %d\n", addr
, size
));
1381 rp
->error
= kdp_machine_ioport_read(rq
, rp
->data
, rq
->lcpu
);
1382 if (rp
->error
== KDPERR_NO_ERROR
)
1383 rp
->hdr
.len
+= size
;
1386 *reply_port
= kdp
.reply_port
;
1396 unsigned short *reply_port
1399 kdp_writeioport_req_t
*rq
= &pkt
->writeioport_req
;
1400 kdp_writeioport_reply_t
*rp
= &pkt
->writeioport_reply
;
1403 if (plen
< sizeof (*rq
))
1406 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
1407 rp
->error
= KDPERR_BAD_NBYTES
;
1409 dprintf(("kdp_writeioport addr %x size %d\n", rq
->address
,
1412 rp
->error
= kdp_machine_ioport_write(rq
, rq
->data
, rq
->lcpu
);
1415 rp
->hdr
.is_reply
= 1;
1416 rp
->hdr
.len
= sizeof (*rp
);
1418 *reply_port
= kdp
.reply_port
;
1425 kdp_readmsr64(kdp_pkt_t
*pkt
,
1427 unsigned short *reply_port
1430 kdp_readmsr64_req_t
*rq
= &pkt
->readmsr64_req
;
1431 kdp_readmsr64_reply_t
*rp
= &pkt
->readmsr64_reply
;
1434 if (plen
< sizeof (*rq
))
1437 rp
->hdr
.is_reply
= 1;
1438 rp
->hdr
.len
= sizeof (*rp
);
1440 dprintf(("kdp_readmsr64 lcpu %x addr %x\n", rq
->lcpu
, rq
->address
));
1441 rp
->error
= kdp_machine_msr64_read(rq
, rp
->data
, rq
->lcpu
);
1442 if (rp
->error
== KDPERR_NO_ERROR
)
1443 rp
->hdr
.len
+= sizeof(uint64_t);
1445 *reply_port
= kdp
.reply_port
;
1455 unsigned short *reply_port
1458 kdp_writemsr64_req_t
*rq
= &pkt
->writemsr64_req
;
1459 kdp_writemsr64_reply_t
*rp
= &pkt
->writemsr64_reply
;
1462 if (plen
< sizeof (*rq
))
1465 dprintf(("kdp_writemsr64 lcpu %x addr %x\n", rq
->lcpu
, rq
->address
));
1466 rp
->error
= kdp_machine_msr64_write(rq
, rq
->data
, rq
->lcpu
);
1468 rp
->hdr
.is_reply
= 1;
1469 rp
->hdr
.len
= sizeof (*rp
);
1471 *reply_port
= kdp
.reply_port
;
1481 unsigned short *reply_port
1484 kdp_dumpinfo_req_t
*rq
= &pkt
->dumpinfo_req
;
1485 kdp_dumpinfo_reply_t
*rp
= &pkt
->dumpinfo_reply
;
1488 if (plen
< sizeof (*rq
))
1491 dprintf(("kdp_dumpinfo file=%s destip=%s routerip=%s\n", rq
->name
, rq
->destip
, rq
->routerip
));
1492 rp
->hdr
.is_reply
= 1;
1493 rp
->hdr
.len
= sizeof (*rp
);
1495 if ((rq
->type
& KDP_DUMPINFO_MASK
) != KDP_DUMPINFO_GETINFO
) {
1496 kdp_set_dump_info(rq
->type
, rq
->name
, rq
->destip
, rq
->routerip
,
1500 /* gather some stats for reply */
1501 kdp_get_dump_info(&rp
->type
, rp
->name
, rp
->destip
, rp
->routerip
,
1504 *reply_port
= kdp
.reply_port
;