2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/vm_param.h>
31 #include <sys/appleapiopts.h>
32 #include <kern/debug.h>
33 #include <uuid/uuid.h>
35 #include <kdp/kdp_internal.h>
36 #include <kdp/kdp_private.h>
37 #include <kdp/kdp_core.h>
38 #include <kdp/kdp_dyld.h>
40 #include <libsa/types.h>
41 #include <libkern/version.h>
43 #include <string.h> /* bcopy */
45 #include <kern/processor.h>
46 #include <kern/thread.h>
47 #include <kern/clock.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_shared_region.h>
52 #include <libkern/OSKextLibPrivate.h>
54 extern int count_busy_buffers(void); /* must track with declaration in bsd/sys/buf_internal.h */
56 #define DO_ALIGN 1 /* align all packet data accesses */
58 #define KDP_TEST_HARNESS 0
60 #define dprintf(x) kprintf x
66 dispatch_table
[KDP_INVALID_REQUEST
-KDP_CONNECT
] =
69 /* 1 */ kdp_disconnect
,
76 /* 8 */ kdp_writeregs
,
80 /* C */ kdp_resumecpus
,
83 /* F */ kdp_breakpoint_set
,
84 /*10 */ kdp_breakpoint_remove
,
88 /*14 */ kdp_readmem64
,
89 /*15 */ kdp_writemem64
,
90 /*16 */ kdp_breakpoint64_set
,
91 /*17 */ kdp_breakpoint64_remove
,
92 /*18 */ kdp_kernelversion
,
93 /*19 */ kdp_readphysmem64
,
94 /*1A */ kdp_writephysmem64
,
95 /*1B */ kdp_readioport
,
96 /*1C */ kdp_writeioport
,
97 /*1D */ kdp_readmsr64
,
98 /*1E */ kdp_writemsr64
,
104 #define MAX_BREAKPOINTS 100
107 * Version 11 of the KDP Protocol adds support for 64-bit wide memory
108 * addresses (read/write and breakpoints) as well as a dedicated
109 * kernelversion request. Version 12 adds read/writing of physical
110 * memory with 64-bit wide memory addresses.
112 #define KDP_VERSION 12
115 mach_vm_address_t address
;
117 uint8_t oldbytes
[MAX_BREAKINSN_BYTES
];
118 } kdp_breakpoint_record_t
;
120 static kdp_breakpoint_record_t breakpoint_list
[MAX_BREAKPOINTS
];
121 static unsigned int breakpoints_initialized
= 0;
123 int reattach_wait
= 0;
124 int noresume_on_disconnect
= 0;
125 extern unsigned int return_on_panic
;
127 typedef struct thread_snapshot
*thread_snapshot_t
;
128 typedef struct task_snapshot
*task_snapshot_t
;
131 machine_trace_thread(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
133 machine_trace_thread64(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
137 proc_uniqueid(void *p
);
139 proc_was_throttled(void *p
);
141 proc_did_throttle(void *p
);
144 proc_name_kdp(task_t task
, char *buf
, int size
);
147 kdp_snapshot_postflight(void);
150 pid_from_task(task_t task
);
153 proc_uniqueid_from_task(task_t task
);
156 kdp_set_breakpoint_internal(
157 mach_vm_address_t address
161 kdp_remove_breakpoint_internal(
162 mach_vm_address_t address
167 kdp_stackshot(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t trace_flags
, uint32_t dispatch_offset
, uint32_t *pbytesTraced
);
169 boolean_t
kdp_copyin(pmap_t
, uint64_t, void *, size_t);
170 extern void bcopy_phys(addr64_t
, addr64_t
, vm_size_t
);
176 unsigned short *reply_port
179 static unsigned aligned_pkt
[1538/sizeof(unsigned)+1]; // max ether pkt
180 kdp_pkt_t
*rd
= (kdp_pkt_t
*)&aligned_pkt
;
186 bcopy((char *)pkt
, (char *)rd
, sizeof(aligned_pkt
));
188 rd
= (kdp_pkt_t
*)pkt
;
190 if (plen
< sizeof (rd
->hdr
) || rd
->hdr
.len
!= plen
) {
191 printf("kdp_packet bad len pkt %lu hdr %d\n", plen
, rd
->hdr
.len
);
196 if (rd
->hdr
.is_reply
) {
197 printf("kdp_packet reply recvd req %x seq %x\n",
198 rd
->hdr
.request
, rd
->hdr
.seq
);
203 req
= rd
->hdr
.request
;
204 if (req
>= KDP_INVALID_REQUEST
) {
205 printf("kdp_packet bad request %x len %d seq %x key %x\n",
206 rd
->hdr
.request
, rd
->hdr
.len
, rd
->hdr
.seq
, rd
->hdr
.key
);
211 ret
= ((*dispatch_table
[req
- KDP_CONNECT
])(rd
, len
, reply_port
));
213 bcopy((char *)rd
, (char *) pkt
, *len
);
222 __unused
unsigned short *reply_port
225 kdp_pkt_t
*rd
= (kdp_pkt_t
*)pkt
;
227 printf("kdp_unknown request %x len %d seq %x key %x\n",
228 rd
->hdr
.request
, rd
->hdr
.len
, rd
->hdr
.seq
, rd
->hdr
.key
);
237 unsigned short *reply_port
240 kdp_connect_req_t
*rq
= &pkt
->connect_req
;
242 kdp_connect_reply_t
*rp
= &pkt
->connect_reply
;
243 uint16_t rport
, eport
;
247 if (plen
< sizeof (*rq
))
250 dprintf(("kdp_connect seq %x greeting %s\n", rq
->hdr
.seq
, rq
->greeting
));
252 rport
= rq
->req_reply_port
;
253 eport
= rq
->exc_note_port
;
257 if ((seq
== kdp
.conn_seq
) && /* duplicate request */
258 (rport
== kdp
.reply_port
) &&
259 (eport
== kdp
.exception_port
) &&
260 (key
== kdp
.session_key
))
261 rp
->error
= KDPERR_NO_ERROR
;
263 rp
->error
= KDPERR_ALREADY_CONNECTED
;
266 kdp
.reply_port
= rport
;
267 kdp
.exception_port
= eport
;
270 kdp
.session_key
= key
;
272 rp
->error
= KDPERR_NO_ERROR
;
275 rp
->hdr
.is_reply
= 1;
276 rp
->hdr
.len
= sizeof (*rp
);
281 if (current_debugger
== KDP_CUR_DB
)
291 unsigned short *reply_port
294 kdp_disconnect_req_t
*rq
= &pkt
->disconnect_req
;
296 kdp_disconnect_reply_t
*rp
= &pkt
->disconnect_reply
;
298 if (plen
< sizeof (*rq
))
304 dprintf(("kdp_disconnect\n"));
306 *reply_port
= kdp
.reply_port
;
308 kdp
.reply_port
= kdp
.exception_port
= 0;
309 kdp
.is_halted
= kdp
.is_conn
= FALSE
;
310 kdp
.exception_seq
= kdp
.conn_seq
= 0;
313 if ((panicstr
!= NULL
) && (return_on_panic
== 0))
316 if (noresume_on_disconnect
== 1) {
318 noresume_on_disconnect
= 0;
321 rp
->hdr
.is_reply
= 1;
322 rp
->hdr
.len
= sizeof (*rp
);
326 if (current_debugger
== KDP_CUR_DB
)
336 unsigned short *reply_port
339 kdp_reattach_req_t
*rq
= &pkt
->reattach_req
;
342 kdp_disconnect(pkt
, len
, reply_port
);
343 *reply_port
= rq
->req_reply_port
;
352 unsigned short *reply_port
355 kdp_hostinfo_req_t
*rq
= &pkt
->hostinfo_req
;
357 kdp_hostinfo_reply_t
*rp
= &pkt
->hostinfo_reply
;
359 if (plen
< sizeof (*rq
))
362 dprintf(("kdp_hostinfo\n"));
364 rp
->hdr
.is_reply
= 1;
365 rp
->hdr
.len
= sizeof (*rp
);
367 kdp_machine_hostinfo(&rp
->hostinfo
);
369 *reply_port
= kdp
.reply_port
;
379 unsigned short *reply_port
382 kdp_kernelversion_req_t
*rq
= &pkt
->kernelversion_req
;
384 kdp_kernelversion_reply_t
*rp
= &pkt
->kernelversion_reply
;
387 if (plen
< sizeof (*rq
))
390 rp
->hdr
.is_reply
= 1;
391 rp
->hdr
.len
= sizeof (*rp
);
393 dprintf(("kdp_kernelversion\n"));
394 slen
= strlcpy(rp
->version
, kdp_kernelversion_string
, MAX_KDP_DATA_SIZE
);
396 rp
->hdr
.len
+= slen
+ 1; /* strlcpy returns the amount copied with NUL */
398 *reply_port
= kdp
.reply_port
;
408 unsigned short *reply_port
411 kdp_suspend_req_t
*rq
= &pkt
->suspend_req
;
413 kdp_suspend_reply_t
*rp
= &pkt
->suspend_reply
;
415 if (plen
< sizeof (*rq
))
418 rp
->hdr
.is_reply
= 1;
419 rp
->hdr
.len
= sizeof (*rp
);
421 dprintf(("kdp_suspend\n"));
423 kdp
.is_halted
= TRUE
;
425 *reply_port
= kdp
.reply_port
;
435 unsigned short *reply_port
438 kdp_resumecpus_req_t
*rq
= &pkt
->resumecpus_req
;
440 kdp_resumecpus_reply_t
*rp
= &pkt
->resumecpus_reply
;
442 if (plen
< sizeof (*rq
))
445 rp
->hdr
.is_reply
= 1;
446 rp
->hdr
.len
= sizeof (*rp
);
448 dprintf(("kdp_resumecpus %x\n", rq
->cpu_mask
));
450 kdp
.is_halted
= FALSE
;
452 *reply_port
= kdp
.reply_port
;
462 unsigned short *reply_port
465 kdp_writemem_req_t
*rq
= &pkt
->writemem_req
;
467 kdp_writemem_reply_t
*rp
= &pkt
->writemem_reply
;
470 if (plen
< sizeof (*rq
))
473 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
474 rp
->error
= KDPERR_BAD_NBYTES
;
476 dprintf(("kdp_writemem addr %x size %d\n", rq
->address
, rq
->nbytes
));
477 cnt
= kdp_machine_vm_write((caddr_t
)rq
->data
, (mach_vm_address_t
)rq
->address
, rq
->nbytes
);
478 rp
->error
= KDPERR_ACCESS(rq
->nbytes
, cnt
);
479 dprintf((" cnt %lld error %d\n", cnt
, rp
->error
));
482 rp
->hdr
.is_reply
= 1;
483 rp
->hdr
.len
= sizeof (*rp
);
485 *reply_port
= kdp
.reply_port
;
495 unsigned short *reply_port
498 kdp_writemem64_req_t
*rq
= &pkt
->writemem64_req
;
500 kdp_writemem64_reply_t
*rp
= &pkt
->writemem64_reply
;
503 if (plen
< sizeof (*rq
))
506 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
507 rp
->error
= KDPERR_BAD_NBYTES
;
509 dprintf(("kdp_writemem64 addr %llx size %d\n", rq
->address
, rq
->nbytes
));
510 cnt
= kdp_machine_vm_write((caddr_t
)rq
->data
, (mach_vm_address_t
)rq
->address
, (mach_vm_size_t
)rq
->nbytes
);
511 rp
->error
= KDPERR_ACCESS(rq
->nbytes
, cnt
);
512 dprintf((" cnt %lld error %d\n", cnt
, rp
->error
));
515 rp
->hdr
.is_reply
= 1;
516 rp
->hdr
.len
= sizeof (*rp
);
518 *reply_port
= kdp
.reply_port
;
528 unsigned short *reply_port
531 kdp_writephysmem64_req_t
*rq
= &pkt
->writephysmem64_req
;
533 kdp_writephysmem64_reply_t
*rp
= &pkt
->writephysmem64_reply
;
537 if (plen
< sizeof (*rq
))
541 if (size
> MAX_KDP_DATA_SIZE
)
542 rp
->error
= KDPERR_BAD_NBYTES
;
544 dprintf(("kdp_writephysmem64 addr %llx size %d\n", rq
->address
, size
));
545 cnt
= kdp_machine_phys_write(rq
, rq
->data
, rq
->lcpu
);
546 rp
->error
= KDPERR_ACCESS(size
, cnt
);
547 dprintf((" cnt %lld error %d\n", cnt
, rp
->error
));
550 rp
->hdr
.is_reply
= 1;
551 rp
->hdr
.len
= sizeof (*rp
);
553 *reply_port
= kdp
.reply_port
;
563 unsigned short *reply_port
566 kdp_readmem_req_t
*rq
= &pkt
->readmem_req
;
568 kdp_readmem_reply_t
*rp
= &pkt
->readmem_reply
;
572 if (plen
< sizeof (*rq
))
575 rp
->hdr
.is_reply
= 1;
576 rp
->hdr
.len
= sizeof (*rp
);
579 if (size
> MAX_KDP_DATA_SIZE
)
580 rp
->error
= KDPERR_BAD_NBYTES
;
582 dprintf(("kdp_readmem addr %x size %d\n", rq
->address
, size
));
583 cnt
= kdp_machine_vm_read((mach_vm_address_t
)rq
->address
, (caddr_t
)rp
->data
, rq
->nbytes
);
584 rp
->error
= KDPERR_ACCESS(size
, cnt
);
585 dprintf((" cnt %lld error %d\n", cnt
, rp
->error
));
590 *reply_port
= kdp
.reply_port
;
600 unsigned short *reply_port
603 kdp_readmem64_req_t
*rq
= &pkt
->readmem64_req
;
605 kdp_readmem64_reply_t
*rp
= &pkt
->readmem64_reply
;
609 if (plen
< sizeof (*rq
))
612 rp
->hdr
.is_reply
= 1;
613 rp
->hdr
.len
= sizeof (*rp
);
616 if (size
> MAX_KDP_DATA_SIZE
)
617 rp
->error
= KDPERR_BAD_NBYTES
;
619 dprintf(("kdp_readmem64 addr %llx size %d\n", rq
->address
, size
));
620 cnt
= kdp_machine_vm_read((mach_vm_address_t
)rq
->address
, (caddr_t
)rp
->data
, rq
->nbytes
);
621 rp
->error
= KDPERR_ACCESS(size
, cnt
);
622 dprintf((" cnt %lld error %d\n", cnt
, rp
->error
));
627 *reply_port
= kdp
.reply_port
;
637 unsigned short *reply_port
640 kdp_readphysmem64_req_t
*rq
= &pkt
->readphysmem64_req
;
642 kdp_readphysmem64_reply_t
*rp
= &pkt
->readphysmem64_reply
;
646 if (plen
< sizeof (*rq
))
649 rp
->hdr
.is_reply
= 1;
650 rp
->hdr
.len
= sizeof (*rp
);
653 if (size
> MAX_KDP_DATA_SIZE
)
654 rp
->error
= KDPERR_BAD_NBYTES
;
656 dprintf(("kdp_readphysmem64 addr %llx size %d\n", rq
->address
, size
));
657 cnt
= kdp_machine_phys_read(rq
, rp
->data
, rq
->lcpu
);
658 rp
->error
= KDPERR_ACCESS(size
, cnt
);
659 dprintf((" cnt %lld error %d\n", cnt
, rp
->error
));
664 *reply_port
= kdp
.reply_port
;
674 unsigned short *reply_port
677 kdp_maxbytes_req_t
*rq
= &pkt
->maxbytes_req
;
679 kdp_maxbytes_reply_t
*rp
= &pkt
->maxbytes_reply
;
681 if (plen
< sizeof (*rq
))
684 rp
->hdr
.is_reply
= 1;
685 rp
->hdr
.len
= sizeof (*rp
);
687 dprintf(("kdp_maxbytes\n"));
689 rp
->max_bytes
= MAX_KDP_DATA_SIZE
;
691 *reply_port
= kdp
.reply_port
;
701 unsigned short *reply_port
704 kdp_version_req_t
*rq
= &pkt
->version_req
;
706 kdp_version_reply_t
*rp
= &pkt
->version_reply
;
708 if (plen
< sizeof (*rq
))
711 rp
->hdr
.is_reply
= 1;
712 rp
->hdr
.len
= sizeof (*rp
);
714 dprintf(("kdp_version\n"));
716 rp
->version
= KDP_VERSION
;
717 if (!(kdp_flag
& KDP_BP_DIS
))
718 rp
->feature
= KDP_FEATURE_BP
;
722 *reply_port
= kdp
.reply_port
;
732 unsigned short *reply_port
735 kdp_regions_req_t
*rq
= &pkt
->regions_req
;
737 kdp_regions_reply_t
*rp
= &pkt
->regions_reply
;
740 if (plen
< sizeof (*rq
))
743 rp
->hdr
.is_reply
= 1;
744 rp
->hdr
.len
= sizeof (*rp
);
746 dprintf(("kdp_regions\n"));
752 r
->nbytes
= 0xffffffff;
754 r
->protection
= VM_PROT_ALL
; r
++; rp
->nregions
++;
756 rp
->hdr
.len
+= rp
->nregions
* sizeof (kdp_region_t
);
758 *reply_port
= kdp
.reply_port
;
768 unsigned short *reply_port
771 kdp_writeregs_req_t
*rq
= &pkt
->writeregs_req
;
774 kdp_writeregs_reply_t
*rp
= &pkt
->writeregs_reply
;
776 if (plen
< sizeof (*rq
))
779 size
= rq
->hdr
.len
- (unsigned)sizeof(kdp_hdr_t
) - (unsigned)sizeof(unsigned int);
780 rp
->error
= kdp_machine_write_regs(rq
->cpu
, rq
->flavor
, rq
->data
, &size
);
782 rp
->hdr
.is_reply
= 1;
783 rp
->hdr
.len
= sizeof (*rp
);
785 *reply_port
= kdp
.reply_port
;
795 unsigned short *reply_port
798 kdp_readregs_req_t
*rq
= &pkt
->readregs_req
;
800 kdp_readregs_reply_t
*rp
= &pkt
->readregs_reply
;
803 if (plen
< sizeof (*rq
))
806 rp
->hdr
.is_reply
= 1;
807 rp
->hdr
.len
= sizeof (*rp
);
809 rp
->error
= kdp_machine_read_regs(rq
->cpu
, rq
->flavor
, rp
->data
, &size
);
812 *reply_port
= kdp
.reply_port
;
823 unsigned short *reply_port
826 kdp_breakpoint_req_t
*rq
= &pkt
->breakpoint_req
;
827 kdp_breakpoint_reply_t
*rp
= &pkt
->breakpoint_reply
;
831 if (plen
< sizeof (*rq
))
834 dprintf(("kdp_breakpoint_set %x\n", rq
->address
));
836 kerr
= kdp_set_breakpoint_internal((mach_vm_address_t
)rq
->address
);
840 rp
->hdr
.is_reply
= 1;
841 rp
->hdr
.len
= sizeof (*rp
);
842 *reply_port
= kdp
.reply_port
;
849 kdp_breakpoint64_set(
852 unsigned short *reply_port
855 kdp_breakpoint64_req_t
*rq
= &pkt
->breakpoint64_req
;
856 kdp_breakpoint64_reply_t
*rp
= &pkt
->breakpoint64_reply
;
860 if (plen
< sizeof (*rq
))
863 dprintf(("kdp_breakpoint64_set %llx\n", rq
->address
));
865 kerr
= kdp_set_breakpoint_internal((mach_vm_address_t
)rq
->address
);
869 rp
->hdr
.is_reply
= 1;
870 rp
->hdr
.len
= sizeof (*rp
);
871 *reply_port
= kdp
.reply_port
;
878 kdp_breakpoint_remove(
881 unsigned short *reply_port
884 kdp_breakpoint_req_t
*rq
= &pkt
->breakpoint_req
;
885 kdp_breakpoint_reply_t
*rp
= &pkt
->breakpoint_reply
;
888 if (plen
< sizeof (*rq
))
891 dprintf(("kdp_breakpoint_remove %x\n", rq
->address
));
893 kerr
= kdp_remove_breakpoint_internal((mach_vm_address_t
)rq
->address
);
897 rp
->hdr
.is_reply
= 1;
898 rp
->hdr
.len
= sizeof (*rp
);
899 *reply_port
= kdp
.reply_port
;
906 kdp_breakpoint64_remove(
909 unsigned short *reply_port
912 kdp_breakpoint64_req_t
*rq
= &pkt
->breakpoint64_req
;
913 kdp_breakpoint64_reply_t
*rp
= &pkt
->breakpoint64_reply
;
917 if (plen
< sizeof (*rq
))
920 dprintf(("kdp_breakpoint64_remove %llx\n", rq
->address
));
922 kerr
= kdp_remove_breakpoint_internal((mach_vm_address_t
)rq
->address
);
926 rp
->hdr
.is_reply
= 1;
927 rp
->hdr
.len
= sizeof (*rp
);
928 *reply_port
= kdp
.reply_port
;
936 kdp_set_breakpoint_internal(
937 mach_vm_address_t address
941 uint8_t breakinstr
[MAX_BREAKINSN_BYTES
], oldinstr
[MAX_BREAKINSN_BYTES
];
942 uint32_t breakinstrsize
= sizeof(breakinstr
);
946 kdp_machine_get_breakinsn(breakinstr
, &breakinstrsize
);
948 if(breakpoints_initialized
== 0)
950 for(i
=0;(i
< MAX_BREAKPOINTS
); breakpoint_list
[i
].address
=0, i
++);
951 breakpoints_initialized
++;
954 cnt
= kdp_machine_vm_read(address
, (caddr_t
)&oldinstr
, (mach_vm_size_t
)breakinstrsize
);
956 if (0 == memcmp(oldinstr
, breakinstr
, breakinstrsize
)) {
957 printf("A trap was already set at that address, not setting new breakpoint\n");
959 return KDPERR_BREAKPOINT_ALREADY_SET
;
962 for(i
=0;(i
< MAX_BREAKPOINTS
) && (breakpoint_list
[i
].address
!= 0); i
++);
964 if (i
== MAX_BREAKPOINTS
) {
965 return KDPERR_MAX_BREAKPOINTS
;
968 breakpoint_list
[i
].address
= address
;
969 memcpy(breakpoint_list
[i
].oldbytes
, oldinstr
, breakinstrsize
);
970 breakpoint_list
[i
].bytesused
= breakinstrsize
;
972 cnt
= kdp_machine_vm_write((caddr_t
)&breakinstr
, address
, breakinstrsize
);
974 return KDPERR_NO_ERROR
;
978 kdp_remove_breakpoint_internal(
979 mach_vm_address_t address
985 for(i
=0;(i
< MAX_BREAKPOINTS
) && (breakpoint_list
[i
].address
!= address
); i
++);
987 if (i
== MAX_BREAKPOINTS
)
989 return KDPERR_BREAKPOINT_NOT_FOUND
;
992 breakpoint_list
[i
].address
= 0;
993 cnt
= kdp_machine_vm_write((caddr_t
)&breakpoint_list
[i
].oldbytes
, address
, breakpoint_list
[i
].bytesused
);
995 return KDPERR_NO_ERROR
;
999 kdp_remove_all_breakpoints(void)
1002 boolean_t breakpoint_found
= FALSE
;
1004 if (breakpoints_initialized
)
1006 for(i
=0;i
< MAX_BREAKPOINTS
; i
++)
1008 if (breakpoint_list
[i
].address
)
1010 kdp_machine_vm_write((caddr_t
)&(breakpoint_list
[i
].oldbytes
), (mach_vm_address_t
)breakpoint_list
[i
].address
, (mach_vm_size_t
)breakpoint_list
[i
].bytesused
);
1011 breakpoint_found
= TRUE
;
1012 breakpoint_list
[i
].address
= 0;
1016 if (breakpoint_found
)
1017 printf("kdp_remove_all_breakpoints: found extant breakpoints, removing them.\n");
1019 return breakpoint_found
;
1024 __unused kdp_pkt_t
*pkt
,
1026 __unused
unsigned short *reply_port
1029 dprintf(("kdp_reboot\n"));
1031 kdp_machine_reboot();
1033 return (TRUE
); // no, not really, we won't return
1036 #define MAX_FRAMES 1000
1038 static int pid_from_task(task_t task
)
1043 pid
= proc_pid(task
->bsd_info
);
1049 proc_uniqueid_from_task(task_t task
)
1051 uint64_t uniqueid
= ~(0ULL);
1054 uniqueid
= proc_uniqueid(task
->bsd_info
);
1060 proc_was_throttled_from_task(task_t task
)
1062 uint64_t was_throttled
= 0;
1065 was_throttled
= proc_was_throttled(task
->bsd_info
);
1067 return was_throttled
;
1071 proc_did_throttle_from_task(task_t task
)
1073 uint64_t did_throttle
= 0;
1076 did_throttle
= proc_did_throttle(task
->bsd_info
);
1078 return did_throttle
;
1082 kdp_copyin(pmap_t p
, uint64_t uaddr
, void *dest
, size_t size
) {
1084 char *kvaddr
= dest
;
1087 ppnum_t upn
= pmap_find_phys(p
, uaddr
);
1088 uint64_t phys_src
= ptoa_64(upn
) | (uaddr
& PAGE_MASK
);
1089 uint64_t phys_dest
= kvtophys((vm_offset_t
)kvaddr
);
1090 uint64_t src_rem
= PAGE_SIZE
- (phys_src
& PAGE_MASK
);
1091 uint64_t dst_rem
= PAGE_SIZE
- (phys_dest
& PAGE_MASK
);
1092 size_t cur_size
= (uint32_t) MIN(src_rem
, dst_rem
);
1093 cur_size
= MIN(cur_size
, rem
);
1095 if (upn
&& pmap_valid_page(upn
) && phys_dest
) {
1096 bcopy_phys(phys_src
, phys_dest
, cur_size
);
1109 kdp_mem_and_io_snapshot(struct mem_and_io_snapshot
*memio_snap
)
1111 unsigned int pages_reclaimed
;
1112 unsigned int pages_wanted
;
1115 processor_t processor
;
1116 vm_statistics64_t stat
;
1117 vm_statistics64_data_t host_vm_stat
;
1119 processor
= processor_list
;
1120 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
1121 host_vm_stat
= *stat
;
1123 if (processor_count
> 1) {
1124 simple_lock(&processor_list_lock
);
1126 while ((processor
= processor
->processor_list
) != NULL
) {
1127 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
1128 host_vm_stat
.compressions
+= stat
->compressions
;
1129 host_vm_stat
.decompressions
+= stat
->decompressions
;
1132 simple_unlock(&processor_list_lock
);
1135 memio_snap
->snapshot_magic
= STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC
;
1136 memio_snap
->free_pages
= vm_page_free_count
;
1137 memio_snap
->active_pages
= vm_page_active_count
;
1138 memio_snap
->inactive_pages
= vm_page_inactive_count
;
1139 memio_snap
->purgeable_pages
= vm_page_purgeable_count
;
1140 memio_snap
->wired_pages
= vm_page_wire_count
;
1141 memio_snap
->speculative_pages
= vm_page_speculative_count
;
1142 memio_snap
->throttled_pages
= vm_page_throttled_count
;
1143 memio_snap
->busy_buffer_count
= count_busy_buffers();
1144 memio_snap
->filebacked_pages
= vm_page_external_count
;
1145 memio_snap
->compressions
= (uint32_t)host_vm_stat
.compressions
;
1146 memio_snap
->decompressions
= (uint32_t)host_vm_stat
.decompressions
;
1147 memio_snap
->compressor_size
= VM_PAGE_COMPRESSOR_COUNT
;
1148 kErr
= mach_vm_pressure_monitor(FALSE
, VM_PRESSURE_TIME_WINDOW
, &pages_reclaimed
, &pages_wanted
);
1150 memio_snap
->pages_wanted
= (uint32_t)pages_wanted
;
1151 memio_snap
->pages_reclaimed
= (uint32_t)pages_reclaimed
;
1152 memio_snap
->pages_wanted_reclaimed_valid
= 1;
1154 memio_snap
->pages_wanted
= 0;
1155 memio_snap
->pages_reclaimed
= 0;
1156 memio_snap
->pages_wanted_reclaimed_valid
= 0;
1163 * Method for grabbing timer values safely, in the sense that no infinite loop will occur
1164 * Certain flavors of the timer_grab function, which would seem to be the thing to use,
1165 * can loop infinitely if called while the timer is in the process of being updated.
1166 * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of
1167 * the timer using this method. This seems insoluble, since stackshot runs in a context
1168 * where the timer might be half-updated, and has no way of yielding control just long
1169 * enough to finish the update.
1172 static uint64_t safe_grab_timer_value(struct timer
*t
)
1174 #if defined(__LP64__)
1177 uint64_t time
= t
->high_bits
; /* endian independent grab */
1178 time
= (time
<< 32) | t
->low_bits
;
1184 kdp_stackshot(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t trace_flags
, uint32_t dispatch_offset
, uint32_t *pbytesTraced
)
1186 char *tracepos
= (char *) tracebuf
;
1187 char *tracebound
= tracepos
+ tracebuf_size
;
1188 uint32_t tracebytes
= 0;
1191 task_t task
= TASK_NULL
;
1192 thread_t thread
= THREAD_NULL
;
1193 thread_snapshot_t tsnap
= NULL
;
1194 unsigned framesize
= 2 * sizeof(vm_offset_t
);
1196 queue_head_t
*task_list
= &tasks
;
1197 boolean_t is_active_list
= TRUE
;
1199 boolean_t dispatch_p
= ((trace_flags
& STACKSHOT_GET_DQ
) != 0);
1200 boolean_t save_loadinfo_p
= ((trace_flags
& STACKSHOT_SAVE_LOADINFO
) != 0);
1201 boolean_t save_kextloadinfo_p
= ((trace_flags
& STACKSHOT_SAVE_KEXT_LOADINFO
) != 0);
1202 boolean_t save_userframes_p
= ((trace_flags
& STACKSHOT_SAVE_KERNEL_FRAMES_ONLY
) == 0);
1204 if(trace_flags
& STACKSHOT_GET_GLOBAL_MEM_STATS
) {
1205 if(tracepos
+ sizeof(struct mem_and_io_snapshot
) > tracebound
) {
1209 kdp_mem_and_io_snapshot((struct mem_and_io_snapshot
*)tracepos
);
1210 tracepos
+= sizeof(struct mem_and_io_snapshot
);
1214 queue_iterate(task_list
, task
, task_t
, tasks
) {
1215 if ((task
== NULL
) || !ml_validate_nofault((vm_offset_t
) task
, sizeof(struct task
)))
1218 int task_pid
= pid_from_task(task
);
1219 uint64_t task_uniqueid
= proc_uniqueid_from_task(task
);
1220 boolean_t task64
= task_has_64BitAddr(task
);
1222 if (!task
->active
) {
1224 * Not interested in terminated tasks without threads, and
1225 * at the moment, stackshot can't handle a task without a name.
1227 if (queue_empty(&task
->threads
) || task_pid
== -1) {
1232 /* Trace everything, unless a process was specified */
1233 if ((pid
== -1) || (pid
== task_pid
)) {
1234 task_snapshot_t task_snap
;
1235 uint32_t uuid_info_count
= 0;
1236 mach_vm_address_t uuid_info_addr
= 0;
1237 boolean_t have_map
= (task
->map
!= NULL
) &&
1238 (ml_validate_nofault((vm_offset_t
)(task
->map
), sizeof(struct _vm_map
)));
1239 boolean_t have_pmap
= have_map
&& (task
->map
->pmap
!= NULL
) &&
1240 (ml_validate_nofault((vm_offset_t
)(task
->map
->pmap
), sizeof(struct pmap
)));
1241 uint64_t shared_cache_base_address
= 0;
1243 if (have_pmap
&& task
->active
&& save_loadinfo_p
&& task_pid
> 0) {
1244 // Read the dyld_all_image_infos struct from the task memory to get UUID array count and location
1246 struct user64_dyld_all_image_infos task_image_infos
;
1247 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct user64_dyld_all_image_infos
))) {
1248 uuid_info_count
= (uint32_t)task_image_infos
.uuidArrayCount
;
1249 uuid_info_addr
= task_image_infos
.uuidArray
;
1252 struct user32_dyld_all_image_infos task_image_infos
;
1253 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct user32_dyld_all_image_infos
))) {
1254 uuid_info_count
= task_image_infos
.uuidArrayCount
;
1255 uuid_info_addr
= task_image_infos
.uuidArray
;
1259 // If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1260 // this data structure), we zero the uuid_info_count so that we won't even try to save load info
1262 if (!uuid_info_addr
) {
1263 uuid_info_count
= 0;
1267 if (have_pmap
&& save_kextloadinfo_p
&& task_pid
== 0) {
1268 if (ml_validate_nofault((vm_offset_t
)(gLoadedKextSummaries
), sizeof(OSKextLoadedKextSummaryHeader
))) {
1269 uuid_info_count
= gLoadedKextSummaries
->numSummaries
+ 1; /* include main kernel UUID */
1273 if (tracepos
+ sizeof(struct task_snapshot
) > tracebound
) {
1278 task_snap
= (task_snapshot_t
) tracepos
;
1279 task_snap
->snapshot_magic
= STACKSHOT_TASK_SNAPSHOT_MAGIC
;
1280 task_snap
->pid
= task_pid
;
1281 task_snap
->uniqueid
= task_uniqueid
;
1282 task_snap
->nloadinfos
= uuid_info_count
;
1283 /* Add the BSD process identifiers */
1285 proc_name_kdp(task
, task_snap
->p_comm
, sizeof(task_snap
->p_comm
));
1287 task_snap
->p_comm
[0] = '\0';
1288 task_snap
->ss_flags
= 0;
1290 task_snap
->ss_flags
|= kUser64_p
;
1291 if (task64
&& task_pid
== 0)
1292 task_snap
->ss_flags
|= kKernel64_p
;
1294 task_snap
->ss_flags
|= kTerminatedSnapshot
;
1295 if(task
->pidsuspended
) task_snap
->ss_flags
|= kPidSuspended
;
1296 if(task
->frozen
) task_snap
->ss_flags
|= kFrozen
;
1298 if (task
->effective_policy
.t_sup_active
== 1)
1299 task_snap
->ss_flags
|= kTaskIsSuppressed
;
1301 task_snap
->latency_qos
= (task
->effective_policy
.t_latency_qos
== LATENCY_QOS_TIER_UNSPECIFIED
) ?
1302 LATENCY_QOS_TIER_UNSPECIFIED
: ((0xFF << 16) | task
->effective_policy
.t_latency_qos
);
1304 task_snap
->suspend_count
= task
->suspend_count
;
1305 task_snap
->task_size
= have_pmap
? pmap_resident_count(task
->map
->pmap
) : 0;
1306 task_snap
->faults
= task
->faults
;
1307 task_snap
->pageins
= task
->pageins
;
1308 task_snap
->cow_faults
= task
->cow_faults
;
1310 task_snap
->user_time_in_terminated_threads
= task
->total_user_time
;
1311 task_snap
->system_time_in_terminated_threads
= task
->total_system_time
;
1313 * The throttling counters are maintained as 64-bit counters in the proc
1314 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
1315 * struct to save space and since we do not expect them to overflow 32-bits. If we
1316 * find these values overflowing in the future, the fix would be to simply
1317 * upgrade these counters to 64-bit in the task_snapshot struct
1319 task_snap
->was_throttled
= (uint32_t) proc_was_throttled_from_task(task
);
1320 task_snap
->did_throttle
= (uint32_t) proc_did_throttle_from_task(task
);
1322 if (task
->shared_region
&& ml_validate_nofault((vm_offset_t
)task
->shared_region
,
1323 sizeof(struct vm_shared_region
))) {
1324 struct vm_shared_region
*sr
= task
->shared_region
;
1326 shared_cache_base_address
= sr
->sr_base_address
+ sr
->sr_first_mapping
;
1328 if (!shared_cache_base_address
1329 || !kdp_copyin(task
->map
->pmap
, shared_cache_base_address
, task_snap
->shared_cache_identifier
, sizeof(task_snap
->shared_cache_identifier
))) {
1330 memset(task_snap
->shared_cache_identifier
, 0x0, sizeof(task_snap
->shared_cache_identifier
));
1332 if (task
->shared_region
) {
1334 * No refcounting here, but we are in debugger
1335 * context, so that should be safe.
1337 task_snap
->shared_cache_slide
= task
->shared_region
->sr_slide_info
.slide
;
1339 task_snap
->shared_cache_slide
= 0;
1342 tracepos
+= sizeof(struct task_snapshot
);
1344 if (task_pid
> 0 && uuid_info_count
> 0) {
1345 uint32_t uuid_info_size
= (uint32_t)(task64
? sizeof(struct user64_dyld_uuid_info
) : sizeof(struct user32_dyld_uuid_info
));
1346 uint32_t uuid_info_array_size
= uuid_info_count
* uuid_info_size
;
1348 if (tracepos
+ uuid_info_array_size
> tracebound
) {
1353 // Copy in the UUID info array
1354 // It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1355 if (have_pmap
&& !kdp_copyin(task
->map
->pmap
, uuid_info_addr
, tracepos
, uuid_info_array_size
))
1356 task_snap
->nloadinfos
= 0;
1358 tracepos
+= uuid_info_array_size
;
1359 } else if (task_pid
== 0 && uuid_info_count
> 0) {
1360 uint32_t uuid_info_size
= (uint32_t)sizeof(kernel_uuid_info
);
1361 uint32_t uuid_info_array_size
= uuid_info_count
* uuid_info_size
;
1362 kernel_uuid_info
*output_uuids
;
1364 if (tracepos
+ uuid_info_array_size
> tracebound
) {
1369 output_uuids
= (kernel_uuid_info
*)tracepos
;
1373 if (!kernel_uuid
|| !ml_validate_nofault((vm_offset_t
)kernel_uuid
, sizeof(uuid_t
))) {
1374 /* Kernel UUID not found or inaccessible */
1375 task_snap
->nloadinfos
= 0;
1379 output_uuids
[0].imageLoadAddress
= (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext
);
1380 memcpy(&output_uuids
[0].imageUUID
, kernel_uuid
, sizeof(uuid_t
));
1382 if (ml_validate_nofault((vm_offset_t
)(&gLoadedKextSummaries
->summaries
[0]),
1383 gLoadedKextSummaries
->entry_size
* gLoadedKextSummaries
->numSummaries
)) {
1386 for (kexti
=0 ; kexti
< gLoadedKextSummaries
->numSummaries
; kexti
++) {
1387 output_uuids
[1+kexti
].imageLoadAddress
= (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries
->summaries
[kexti
].address
);
1388 memcpy(&output_uuids
[1+kexti
].imageUUID
, &gLoadedKextSummaries
->summaries
[kexti
].uuid
, sizeof(uuid_t
));
1391 tracepos
+= uuid_info_array_size
;
1393 /* kext summary invalid, but kernel UUID was copied */
1394 task_snap
->nloadinfos
= 1;
1395 tracepos
+= uuid_info_size
;
1401 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
){
1404 if ((thread
== NULL
) || !ml_validate_nofault((vm_offset_t
) thread
, sizeof(struct thread
)))
1407 if (((tracepos
+ 4 * sizeof(struct thread_snapshot
)) > tracebound
)) {
1411 if (!save_userframes_p
&& thread
->kernel_stack
== 0)
1414 /* Populate the thread snapshot header */
1415 tsnap
= (thread_snapshot_t
) tracepos
;
1416 tsnap
->thread_id
= thread_tid(thread
);
1417 tsnap
->state
= thread
->state
;
1418 tsnap
->priority
= thread
->priority
;
1419 tsnap
->sched_pri
= thread
->sched_pri
;
1420 tsnap
->sched_flags
= thread
->sched_flags
;
1421 tsnap
->wait_event
= VM_KERNEL_UNSLIDE(thread
->wait_event
);
1422 tsnap
->continuation
= VM_KERNEL_UNSLIDE(thread
->continuation
);
1423 tval
= safe_grab_timer_value(&thread
->user_timer
);
1424 tsnap
->user_time
= tval
;
1425 tval
= safe_grab_timer_value(&thread
->system_timer
);
1426 if (thread
->precise_user_kernel_time
) {
1427 tsnap
->system_time
= tval
;
1429 tsnap
->user_time
+= tval
;
1430 tsnap
->system_time
= 0;
1432 tsnap
->snapshot_magic
= STACKSHOT_THREAD_SNAPSHOT_MAGIC
;
1433 tracepos
+= sizeof(struct thread_snapshot
);
1434 tsnap
->ss_flags
= 0;
1436 if (thread
->effective_policy
.darwinbg
) {
1437 tsnap
->ss_flags
|= kThreadDarwinBG
;
1440 if (dispatch_p
&& (task
!= kernel_task
) && (task
->active
) && have_pmap
) {
1441 uint64_t dqkeyaddr
= thread_dispatchqaddr(thread
);
1442 if (dqkeyaddr
!= 0) {
1443 uint64_t dqaddr
= 0;
1444 if (kdp_copyin(task
->map
->pmap
, dqkeyaddr
, &dqaddr
, (task64
? 8 : 4)) && (dqaddr
!= 0)) {
1445 uint64_t dqserialnumaddr
= dqaddr
+ dispatch_offset
;
1446 uint64_t dqserialnum
= 0;
1447 if (kdp_copyin(task
->map
->pmap
, dqserialnumaddr
, &dqserialnum
, (task64
? 8 : 4))) {
1448 tsnap
->ss_flags
|= kHasDispatchSerial
;
1449 *(uint64_t *)tracepos
= dqserialnum
;
1455 /* Call through to the machine specific trace routines
1456 * Frames are added past the snapshot header.
1459 if (thread
->kernel_stack
!= 0) {
1460 #if defined(__LP64__)
1461 tracebytes
= machine_trace_thread64(thread
, tracepos
, tracebound
, MAX_FRAMES
, FALSE
);
1462 tsnap
->ss_flags
|= kKernel64_p
;
1465 tracebytes
= machine_trace_thread(thread
, tracepos
, tracebound
, MAX_FRAMES
, FALSE
);
1469 tsnap
->nkern_frames
= tracebytes
/framesize
;
1470 tracepos
+= tracebytes
;
1472 /* Trace user stack, if any */
1473 if (save_userframes_p
&& task
->active
&& thread
->task
->map
!= kernel_map
) {
1475 if (task_has_64BitAddr(thread
->task
)) {
1476 tracebytes
= machine_trace_thread64(thread
, tracepos
, tracebound
, MAX_FRAMES
, TRUE
);
1477 tsnap
->ss_flags
|= kUser64_p
;
1481 tracebytes
= machine_trace_thread(thread
, tracepos
, tracebound
, MAX_FRAMES
, TRUE
);
1485 tsnap
->nuser_frames
= tracebytes
/framesize
;
1486 tracepos
+= tracebytes
;
1492 if (is_active_list
) {
1493 is_active_list
= FALSE
;
1494 task_list
= &terminated_tasks
;
1499 /* Release stack snapshot wait indicator */
1500 kdp_snapshot_postflight();
1502 *pbytesTraced
= (uint32_t)(tracepos
- (char *) tracebuf
);
1511 unsigned short *reply_port
1514 kdp_readioport_req_t
*rq
= &pkt
->readioport_req
;
1515 kdp_readioport_reply_t
*rp
= &pkt
->readioport_reply
;
1518 if (plen
< sizeof (*rq
))
1521 rp
->hdr
.is_reply
= 1;
1522 rp
->hdr
.len
= sizeof (*rp
);
1524 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
1525 rp
->error
= KDPERR_BAD_NBYTES
;
1527 #if KDP_TEST_HARNESS
1528 uint16_t addr
= rq
->address
;
1530 uint16_t size
= rq
->nbytes
;
1531 dprintf(("kdp_readioport addr %x size %d\n", addr
, size
));
1533 rp
->error
= kdp_machine_ioport_read(rq
, rp
->data
, rq
->lcpu
);
1534 if (rp
->error
== KDPERR_NO_ERROR
)
1535 rp
->hdr
.len
+= size
;
1538 *reply_port
= kdp
.reply_port
;
1548 unsigned short *reply_port
1551 kdp_writeioport_req_t
*rq
= &pkt
->writeioport_req
;
1552 kdp_writeioport_reply_t
*rp
= &pkt
->writeioport_reply
;
1555 if (plen
< sizeof (*rq
))
1558 if (rq
->nbytes
> MAX_KDP_DATA_SIZE
)
1559 rp
->error
= KDPERR_BAD_NBYTES
;
1561 dprintf(("kdp_writeioport addr %x size %d\n", rq
->address
,
1564 rp
->error
= kdp_machine_ioport_write(rq
, rq
->data
, rq
->lcpu
);
1567 rp
->hdr
.is_reply
= 1;
1568 rp
->hdr
.len
= sizeof (*rp
);
1570 *reply_port
= kdp
.reply_port
;
1580 unsigned short *reply_port
1583 kdp_readmsr64_req_t
*rq
= &pkt
->readmsr64_req
;
1584 kdp_readmsr64_reply_t
*rp
= &pkt
->readmsr64_reply
;
1587 if (plen
< sizeof (*rq
))
1590 rp
->hdr
.is_reply
= 1;
1591 rp
->hdr
.len
= sizeof (*rp
);
1593 dprintf(("kdp_readmsr64 lcpu %x addr %x\n", rq
->lcpu
, rq
->address
));
1594 rp
->error
= kdp_machine_msr64_read(rq
, rp
->data
, rq
->lcpu
);
1595 if (rp
->error
== KDPERR_NO_ERROR
)
1596 rp
->hdr
.len
+= sizeof(uint64_t);
1598 *reply_port
= kdp
.reply_port
;
1608 unsigned short *reply_port
1611 kdp_writemsr64_req_t
*rq
= &pkt
->writemsr64_req
;
1612 kdp_writemsr64_reply_t
*rp
= &pkt
->writemsr64_reply
;
1615 if (plen
< sizeof (*rq
))
1618 dprintf(("kdp_writemsr64 lcpu %x addr %x\n", rq
->lcpu
, rq
->address
));
1619 rp
->error
= kdp_machine_msr64_write(rq
, rq
->data
, rq
->lcpu
);
1621 rp
->hdr
.is_reply
= 1;
1622 rp
->hdr
.len
= sizeof (*rp
);
1624 *reply_port
= kdp
.reply_port
;
1634 unsigned short *reply_port
1637 kdp_dumpinfo_req_t
*rq
= &pkt
->dumpinfo_req
;
1638 kdp_dumpinfo_reply_t
*rp
= &pkt
->dumpinfo_reply
;
1641 if (plen
< sizeof (*rq
))
1644 dprintf(("kdp_dumpinfo file=%s destip=%s routerip=%s\n", rq
->name
, rq
->destip
, rq
->routerip
));
1645 rp
->hdr
.is_reply
= 1;
1646 rp
->hdr
.len
= sizeof (*rp
);
1648 if ((rq
->type
& KDP_DUMPINFO_MASK
) != KDP_DUMPINFO_GETINFO
) {
1649 kdp_set_dump_info(rq
->type
, rq
->name
, rq
->destip
, rq
->routerip
,
1653 /* gather some stats for reply */
1654 kdp_get_dump_info(&rp
->type
, rp
->name
, rp
->destip
, rp
->routerip
,
1657 *reply_port
= kdp
.reply_port
;