]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/kdp.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / osfmk / kdp / kdp.c
1 /*
2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/vm_param.h>
31 #include <sys/appleapiopts.h>
32 #include <kern/debug.h>
33 #include <uuid/uuid.h>
34
35 #include <kdp/kdp_internal.h>
36 #include <kdp/kdp_private.h>
37 #include <kdp/kdp_core.h>
38 #include <kdp/kdp_dyld.h>
39
40 #include <libsa/types.h>
41 #include <libkern/version.h>
42
43 #include <string.h> /* bcopy */
44
45 #include <kern/processor.h>
46 #include <kern/thread.h>
47 #include <kern/clock.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_pageout.h>
51
52 extern int count_busy_buffers(void); /* must track with declaration in bsd/sys/buf_internal.h */
53
54 #define DO_ALIGN 1 /* align all packet data accesses */
55
56 #define KDP_TEST_HARNESS 0
57 #if KDP_TEST_HARNESS
58 #define dprintf(x) kprintf x
59 #else
60 #define dprintf(x)
61 #endif
62
63 static kdp_dispatch_t
64 dispatch_table[KDP_INVALID_REQUEST-KDP_CONNECT] =
65 {
66 /* 0 */ kdp_connect,
67 /* 1 */ kdp_disconnect,
68 /* 2 */ kdp_hostinfo,
69 /* 3 */ kdp_version,
70 /* 4 */ kdp_maxbytes,
71 /* 5 */ kdp_readmem,
72 /* 6 */ kdp_writemem,
73 /* 7 */ kdp_readregs,
74 /* 8 */ kdp_writeregs,
75 /* 9 */ kdp_unknown,
76 /* A */ kdp_unknown,
77 /* B */ kdp_suspend,
78 /* C */ kdp_resumecpus,
79 /* D */ kdp_unknown,
80 /* E */ kdp_unknown,
81 /* F */ kdp_breakpoint_set,
82 /*10 */ kdp_breakpoint_remove,
83 /*11 */ kdp_regions,
84 /*12 */ kdp_reattach,
85 /*13 */ kdp_reboot,
86 /*14 */ kdp_readmem64,
87 /*15 */ kdp_writemem64,
88 /*16 */ kdp_breakpoint64_set,
89 /*17 */ kdp_breakpoint64_remove,
90 /*18 */ kdp_kernelversion,
91 /*19 */ kdp_readphysmem64,
92 /*1A */ kdp_writephysmem64,
93 /*1B */ kdp_readioport,
94 /*1C */ kdp_writeioport,
95 /*1D */ kdp_readmsr64,
96 /*1E */ kdp_writemsr64,
97 /*1F */ kdp_dumpinfo,
98 };
99
100 kdp_glob_t kdp;
101
102 #define MAX_BREAKPOINTS 100
103
104 /*
105 * Version 11 of the KDP Protocol adds support for 64-bit wide memory
106 * addresses (read/write and breakpoints) as well as a dedicated
107 * kernelversion request. Version 12 adds read/writing of physical
108 * memory with 64-bit wide memory addresses.
109 */
110 #define KDP_VERSION 12
111
112 typedef struct{
113 mach_vm_address_t address;
114 uint32_t bytesused;
115 uint8_t oldbytes[MAX_BREAKINSN_BYTES];
116 } kdp_breakpoint_record_t;
117
118 static kdp_breakpoint_record_t breakpoint_list[MAX_BREAKPOINTS];
119 static unsigned int breakpoints_initialized = 0;
120
121 int reattach_wait = 0;
122 int noresume_on_disconnect = 0;
123 extern unsigned int return_on_panic;
124
125 typedef struct thread_snapshot *thread_snapshot_t;
126 typedef struct task_snapshot *task_snapshot_t;
127
128 extern int
129 machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
130 extern int
131 machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
132 extern int
133 proc_pid(void *p);
134 extern void
135 proc_name_kdp(task_t task, char *buf, int size);
136
137 extern void
138 kdp_snapshot_postflight(void);
139
140 static int
141 pid_from_task(task_t task);
142
143 kdp_error_t
144 kdp_set_breakpoint_internal(
145 mach_vm_address_t address
146 );
147
148 kdp_error_t
149 kdp_remove_breakpoint_internal(
150 mach_vm_address_t address
151 );
152
153
154 int
155 kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t dispatch_offset, uint32_t *pbytesTraced);
156
157 boolean_t kdp_copyin(pmap_t, uint64_t, void *, size_t);
158 extern void bcopy_phys(addr64_t, addr64_t, vm_size_t);
159
160 boolean_t
161 kdp_packet(
162 unsigned char *pkt,
163 int *len,
164 unsigned short *reply_port
165 )
166 {
167 static unsigned aligned_pkt[1538/sizeof(unsigned)+1]; // max ether pkt
168 kdp_pkt_t *rd = (kdp_pkt_t *)&aligned_pkt;
169 size_t plen = *len;
170 kdp_req_t req;
171 boolean_t ret;
172
173 #if DO_ALIGN
174 bcopy((char *)pkt, (char *)rd, sizeof(aligned_pkt));
175 #else
176 rd = (kdp_pkt_t *)pkt;
177 #endif
178 if (plen < sizeof (rd->hdr) || rd->hdr.len != plen) {
179 printf("kdp_packet bad len pkt %lu hdr %d\n", plen, rd->hdr.len);
180
181 return (FALSE);
182 }
183
184 if (rd->hdr.is_reply) {
185 printf("kdp_packet reply recvd req %x seq %x\n",
186 rd->hdr.request, rd->hdr.seq);
187
188 return (FALSE);
189 }
190
191 req = rd->hdr.request;
192 if (req >= KDP_INVALID_REQUEST) {
193 printf("kdp_packet bad request %x len %d seq %x key %x\n",
194 rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key);
195
196 return (FALSE);
197 }
198
199 ret = ((*dispatch_table[req - KDP_CONNECT])(rd, len, reply_port));
200 #if DO_ALIGN
201 bcopy((char *)rd, (char *) pkt, *len);
202 #endif
203 return ret;
204 }
205
206 static boolean_t
207 kdp_unknown(
208 kdp_pkt_t *pkt,
209 __unused int *len,
210 __unused unsigned short *reply_port
211 )
212 {
213 kdp_pkt_t *rd = (kdp_pkt_t *)pkt;
214
215 printf("kdp_unknown request %x len %d seq %x key %x\n",
216 rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key);
217
218 return (FALSE);
219 }
220
221 static boolean_t
222 kdp_connect(
223 kdp_pkt_t *pkt,
224 int *len,
225 unsigned short *reply_port
226 )
227 {
228 kdp_connect_req_t *rq = &pkt->connect_req;
229 size_t plen = *len;
230 kdp_connect_reply_t *rp = &pkt->connect_reply;
231 uint16_t rport, eport;
232 uint32_t key;
233 uint8_t seq;
234
235 if (plen < sizeof (*rq))
236 return (FALSE);
237
238 dprintf(("kdp_connect seq %x greeting %s\n", rq->hdr.seq, rq->greeting));
239
240 rport = rq->req_reply_port;
241 eport = rq->exc_note_port;
242 key = rq->hdr.key;
243 seq = rq->hdr.seq;
244 if (kdp.is_conn) {
245 if ((seq == kdp.conn_seq) && /* duplicate request */
246 (rport == kdp.reply_port) &&
247 (eport == kdp.exception_port) &&
248 (key == kdp.session_key))
249 rp->error = KDPERR_NO_ERROR;
250 else
251 rp->error = KDPERR_ALREADY_CONNECTED;
252 }
253 else {
254 kdp.reply_port = rport;
255 kdp.exception_port = eport;
256 kdp.is_conn = TRUE;
257 kdp.conn_seq = seq;
258 kdp.session_key = key;
259
260 rp->error = KDPERR_NO_ERROR;
261 }
262
263 rp->hdr.is_reply = 1;
264 rp->hdr.len = sizeof (*rp);
265
266 *reply_port = rport;
267 *len = rp->hdr.len;
268
269 if (current_debugger == KDP_CUR_DB)
270 active_debugger=1;
271
272 return (TRUE);
273 }
274
275 static boolean_t
276 kdp_disconnect(
277 kdp_pkt_t *pkt,
278 int *len,
279 unsigned short *reply_port
280 )
281 {
282 kdp_disconnect_req_t *rq = &pkt->disconnect_req;
283 size_t plen = *len;
284 kdp_disconnect_reply_t *rp = &pkt->disconnect_reply;
285
286 if (plen < sizeof (*rq))
287 return (FALSE);
288
289 if (!kdp.is_conn)
290 return (FALSE);
291
292 dprintf(("kdp_disconnect\n"));
293
294 *reply_port = kdp.reply_port;
295
296 kdp.reply_port = kdp.exception_port = 0;
297 kdp.is_halted = kdp.is_conn = FALSE;
298 kdp.exception_seq = kdp.conn_seq = 0;
299 kdp.session_key = 0;
300
301 if ((panicstr != NULL) && (return_on_panic == 0))
302 reattach_wait = 1;
303
304 if (noresume_on_disconnect == 1) {
305 reattach_wait = 1;
306 noresume_on_disconnect = 0;
307 }
308
309 rp->hdr.is_reply = 1;
310 rp->hdr.len = sizeof (*rp);
311
312 *len = rp->hdr.len;
313
314 if (current_debugger == KDP_CUR_DB)
315 active_debugger=0;
316
317 return (TRUE);
318 }
319
320 static boolean_t
321 kdp_reattach(
322 kdp_pkt_t *pkt,
323 int *len,
324 unsigned short *reply_port
325 )
326 {
327 kdp_reattach_req_t *rq = &pkt->reattach_req;
328
329 kdp.is_conn = TRUE;
330 kdp_disconnect(pkt, len, reply_port);
331 *reply_port = rq->req_reply_port;
332 reattach_wait = 1;
333 return (TRUE);
334 }
335
336 static boolean_t
337 kdp_hostinfo(
338 kdp_pkt_t *pkt,
339 int *len,
340 unsigned short *reply_port
341 )
342 {
343 kdp_hostinfo_req_t *rq = &pkt->hostinfo_req;
344 size_t plen = *len;
345 kdp_hostinfo_reply_t *rp = &pkt->hostinfo_reply;
346
347 if (plen < sizeof (*rq))
348 return (FALSE);
349
350 dprintf(("kdp_hostinfo\n"));
351
352 rp->hdr.is_reply = 1;
353 rp->hdr.len = sizeof (*rp);
354
355 kdp_machine_hostinfo(&rp->hostinfo);
356
357 *reply_port = kdp.reply_port;
358 *len = rp->hdr.len;
359
360 return (TRUE);
361 }
362
363 static boolean_t
364 kdp_kernelversion(
365 kdp_pkt_t *pkt,
366 int *len,
367 unsigned short *reply_port
368 )
369 {
370 kdp_kernelversion_req_t *rq = &pkt->kernelversion_req;
371 size_t plen = *len;
372 kdp_kernelversion_reply_t *rp = &pkt->kernelversion_reply;
373 size_t slen;
374
375 if (plen < sizeof (*rq))
376 return (FALSE);
377
378 rp->hdr.is_reply = 1;
379 rp->hdr.len = sizeof (*rp);
380
381 dprintf(("kdp_kernelversion\n"));
382 slen = strlcpy(rp->version, kdp_kernelversion_string, MAX_KDP_DATA_SIZE);
383
384 rp->hdr.len += slen + 1; /* strlcpy returns the amount copied with NUL */
385
386 *reply_port = kdp.reply_port;
387 *len = rp->hdr.len;
388
389 return (TRUE);
390 }
391
392 static boolean_t
393 kdp_suspend(
394 kdp_pkt_t *pkt,
395 int *len,
396 unsigned short *reply_port
397 )
398 {
399 kdp_suspend_req_t *rq = &pkt->suspend_req;
400 size_t plen = *len;
401 kdp_suspend_reply_t *rp = &pkt->suspend_reply;
402
403 if (plen < sizeof (*rq))
404 return (FALSE);
405
406 rp->hdr.is_reply = 1;
407 rp->hdr.len = sizeof (*rp);
408
409 dprintf(("kdp_suspend\n"));
410
411 kdp.is_halted = TRUE;
412
413 *reply_port = kdp.reply_port;
414 *len = rp->hdr.len;
415
416 return (TRUE);
417 }
418
419 static boolean_t
420 kdp_resumecpus(
421 kdp_pkt_t *pkt,
422 int *len,
423 unsigned short *reply_port
424 )
425 {
426 kdp_resumecpus_req_t *rq = &pkt->resumecpus_req;
427 size_t plen = *len;
428 kdp_resumecpus_reply_t *rp = &pkt->resumecpus_reply;
429
430 if (plen < sizeof (*rq))
431 return (FALSE);
432
433 rp->hdr.is_reply = 1;
434 rp->hdr.len = sizeof (*rp);
435
436 dprintf(("kdp_resumecpus %x\n", rq->cpu_mask));
437
438 kdp.is_halted = FALSE;
439
440 *reply_port = kdp.reply_port;
441 *len = rp->hdr.len;
442
443 return (TRUE);
444 }
445
446 static boolean_t
447 kdp_writemem(
448 kdp_pkt_t *pkt,
449 int *len,
450 unsigned short *reply_port
451 )
452 {
453 kdp_writemem_req_t *rq = &pkt->writemem_req;
454 size_t plen = *len;
455 kdp_writemem_reply_t *rp = &pkt->writemem_reply;
456 mach_vm_size_t cnt;
457
458 if (plen < sizeof (*rq))
459 return (FALSE);
460
461 if (rq->nbytes > MAX_KDP_DATA_SIZE)
462 rp->error = KDPERR_BAD_NBYTES;
463 else {
464 dprintf(("kdp_writemem addr %x size %d\n", rq->address, rq->nbytes));
465
466 cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, rq->nbytes);
467 rp->error = KDPERR_NO_ERROR;
468 }
469
470 rp->hdr.is_reply = 1;
471 rp->hdr.len = sizeof (*rp);
472
473 *reply_port = kdp.reply_port;
474 *len = rp->hdr.len;
475
476 return (TRUE);
477 }
478
479 static boolean_t
480 kdp_writemem64(
481 kdp_pkt_t *pkt,
482 int *len,
483 unsigned short *reply_port
484 )
485 {
486 kdp_writemem64_req_t *rq = &pkt->writemem64_req;
487 size_t plen = *len;
488 kdp_writemem64_reply_t *rp = &pkt->writemem64_reply;
489 mach_vm_size_t cnt;
490
491 if (plen < sizeof (*rq))
492 return (FALSE);
493
494 if (rq->nbytes > MAX_KDP_DATA_SIZE)
495 rp->error = KDPERR_BAD_NBYTES;
496 else {
497 dprintf(("kdp_writemem64 addr %llx size %d\n", rq->address, rq->nbytes));
498
499 cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, (mach_vm_size_t)rq->nbytes);
500 rp->error = KDPERR_NO_ERROR;
501 }
502
503 rp->hdr.is_reply = 1;
504 rp->hdr.len = sizeof (*rp);
505
506 *reply_port = kdp.reply_port;
507 *len = rp->hdr.len;
508
509 return (TRUE);
510 }
511
512 static boolean_t
513 kdp_writephysmem64(
514 kdp_pkt_t *pkt,
515 int *len,
516 unsigned short *reply_port
517 )
518 {
519 kdp_writephysmem64_req_t *rq = &pkt->writephysmem64_req;
520 size_t plen = *len;
521 kdp_writephysmem64_reply_t *rp = &pkt->writephysmem64_reply;
522
523 if (plen < sizeof (*rq))
524 return (FALSE);
525
526 if (rq->nbytes > MAX_KDP_DATA_SIZE)
527 rp->error = KDPERR_BAD_NBYTES;
528 else {
529 dprintf(("kdp_writephysmem64 addr %llx size %d\n", rq->address, rq->nbytes));
530 kdp_machine_phys_write(rq, rq->data, rq->lcpu);
531 rp->error = KDPERR_NO_ERROR;
532 }
533
534 rp->hdr.is_reply = 1;
535 rp->hdr.len = sizeof (*rp);
536
537 *reply_port = kdp.reply_port;
538 *len = rp->hdr.len;
539
540 return (TRUE);
541 }
542
543 static boolean_t
544 kdp_readmem(
545 kdp_pkt_t *pkt,
546 int *len,
547 unsigned short *reply_port
548 )
549 {
550 kdp_readmem_req_t *rq = &pkt->readmem_req;
551 size_t plen = *len;
552 kdp_readmem_reply_t *rp = &pkt->readmem_reply;
553 mach_vm_size_t cnt;
554 #if __i386__
555 void *pversion = &kdp_kernelversion_string;
556 #endif
557
558 if (plen < sizeof (*rq))
559 return (FALSE);
560
561 rp->hdr.is_reply = 1;
562 rp->hdr.len = sizeof (*rp);
563
564 if (rq->nbytes > MAX_KDP_DATA_SIZE)
565 rp->error = KDPERR_BAD_NBYTES;
566 else {
567 unsigned int n = rq->nbytes;
568
569 dprintf(("kdp_readmem addr %x size %d\n", rq->address, n));
570 #if __i386__
571 /* XXX This is a hack to facilitate the "showversion" macro
572 * on i386, which is used to obtain the kernel version without
573 * symbols - a pointer to the version string should eventually
574 * be pinned at a fixed address when an equivalent of the
575 * VECTORS segment (loaded at a fixed load address, and contains
576 * a table) is implemented on these architectures, as with PPC.
577 * N.B.: x86 now has a low global page, and the version indirection
578 * is pinned at 0x201C. We retain the 0x501C address override
579 * for compatibility. Future architectures should instead use
580 * the KDP_KERNELVERSION request.
581 */
582 if (rq->address == 0x501C)
583 rq->address = (uintptr_t)&pversion;
584 #endif
585 cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, n);
586 rp->error = KDPERR_NO_ERROR;
587
588 rp->hdr.len += cnt;
589 }
590
591 *reply_port = kdp.reply_port;
592 *len = rp->hdr.len;
593
594 return (TRUE);
595 }
596
597 static boolean_t
598 kdp_readmem64(
599 kdp_pkt_t *pkt,
600 int *len,
601 unsigned short *reply_port
602 )
603 {
604 kdp_readmem64_req_t *rq = &pkt->readmem64_req;
605 size_t plen = *len;
606 kdp_readmem64_reply_t *rp = &pkt->readmem64_reply;
607 mach_vm_size_t cnt;
608
609 if (plen < sizeof (*rq))
610 return (FALSE);
611
612 rp->hdr.is_reply = 1;
613 rp->hdr.len = sizeof (*rp);
614
615 if (rq->nbytes > MAX_KDP_DATA_SIZE)
616 rp->error = KDPERR_BAD_NBYTES;
617 else {
618
619 dprintf(("kdp_readmem64 addr %llx size %d\n", rq->address, rq->nbytes));
620
621 cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, rq->nbytes);
622 rp->error = KDPERR_NO_ERROR;
623
624 rp->hdr.len += cnt;
625 }
626
627 *reply_port = kdp.reply_port;
628 *len = rp->hdr.len;
629
630 return (TRUE);
631 }
632
633 static boolean_t
634 kdp_readphysmem64(
635 kdp_pkt_t *pkt,
636 int *len,
637 unsigned short *reply_port
638 )
639 {
640 kdp_readphysmem64_req_t *rq = &pkt->readphysmem64_req;
641 size_t plen = *len;
642 kdp_readphysmem64_reply_t *rp = &pkt->readphysmem64_reply;
643 int cnt;
644
645 if (plen < sizeof (*rq))
646 return (FALSE);
647
648 rp->hdr.is_reply = 1;
649 rp->hdr.len = sizeof (*rp);
650
651 if (rq->nbytes > MAX_KDP_DATA_SIZE)
652 rp->error = KDPERR_BAD_NBYTES;
653 else {
654
655 dprintf(("kdp_readphysmem64 addr %llx size %d\n", rq->address, rq->nbytes));
656
657 cnt = (int)kdp_machine_phys_read(rq, rp->data, rq->lcpu);
658 rp->error = KDPERR_NO_ERROR;
659
660 rp->hdr.len += cnt;
661 }
662
663 *reply_port = kdp.reply_port;
664 *len = rp->hdr.len;
665
666 return (TRUE);
667 }
668
669 static boolean_t
670 kdp_maxbytes(
671 kdp_pkt_t *pkt,
672 int *len,
673 unsigned short *reply_port
674 )
675 {
676 kdp_maxbytes_req_t *rq = &pkt->maxbytes_req;
677 size_t plen = *len;
678 kdp_maxbytes_reply_t *rp = &pkt->maxbytes_reply;
679
680 if (plen < sizeof (*rq))
681 return (FALSE);
682
683 rp->hdr.is_reply = 1;
684 rp->hdr.len = sizeof (*rp);
685
686 dprintf(("kdp_maxbytes\n"));
687
688 rp->max_bytes = MAX_KDP_DATA_SIZE;
689
690 *reply_port = kdp.reply_port;
691 *len = rp->hdr.len;
692
693 return (TRUE);
694 }
695
696 static boolean_t
697 kdp_version(
698 kdp_pkt_t *pkt,
699 int *len,
700 unsigned short *reply_port
701 )
702 {
703 kdp_version_req_t *rq = &pkt->version_req;
704 size_t plen = *len;
705 kdp_version_reply_t *rp = &pkt->version_reply;
706
707 if (plen < sizeof (*rq))
708 return (FALSE);
709
710 rp->hdr.is_reply = 1;
711 rp->hdr.len = sizeof (*rp);
712
713 dprintf(("kdp_version\n"));
714
715 rp->version = KDP_VERSION;
716 if (!(kdp_flag & KDP_BP_DIS))
717 rp->feature = KDP_FEATURE_BP;
718 else
719 rp->feature = 0;
720
721 *reply_port = kdp.reply_port;
722 *len = rp->hdr.len;
723
724 return (TRUE);
725 }
726
727 static boolean_t
728 kdp_regions(
729 kdp_pkt_t *pkt,
730 int *len,
731 unsigned short *reply_port
732 )
733 {
734 kdp_regions_req_t *rq = &pkt->regions_req;
735 size_t plen = *len;
736 kdp_regions_reply_t *rp = &pkt->regions_reply;
737 kdp_region_t *r;
738
739 if (plen < sizeof (*rq))
740 return (FALSE);
741
742 rp->hdr.is_reply = 1;
743 rp->hdr.len = sizeof (*rp);
744
745 dprintf(("kdp_regions\n"));
746
747 r = rp->regions;
748 rp->nregions = 0;
749
750 r->address = 0;
751 r->nbytes = 0xffffffff;
752
753 r->protection = VM_PROT_ALL; r++; rp->nregions++;
754
755 rp->hdr.len += rp->nregions * sizeof (kdp_region_t);
756
757 *reply_port = kdp.reply_port;
758 *len = rp->hdr.len;
759
760 return (TRUE);
761 }
762
763 static boolean_t
764 kdp_writeregs(
765 kdp_pkt_t *pkt,
766 int *len,
767 unsigned short *reply_port
768 )
769 {
770 kdp_writeregs_req_t *rq = &pkt->writeregs_req;
771 size_t plen = *len;
772 int size;
773 kdp_writeregs_reply_t *rp = &pkt->writeregs_reply;
774
775 if (plen < sizeof (*rq))
776 return (FALSE);
777
778 size = rq->hdr.len - (unsigned)sizeof(kdp_hdr_t) - (unsigned)sizeof(unsigned int);
779 rp->error = kdp_machine_write_regs(rq->cpu, rq->flavor, rq->data, &size);
780
781 rp->hdr.is_reply = 1;
782 rp->hdr.len = sizeof (*rp);
783
784 *reply_port = kdp.reply_port;
785 *len = rp->hdr.len;
786
787 return (TRUE);
788 }
789
790 static boolean_t
791 kdp_readregs(
792 kdp_pkt_t *pkt,
793 int *len,
794 unsigned short *reply_port
795 )
796 {
797 kdp_readregs_req_t *rq = &pkt->readregs_req;
798 size_t plen = *len;
799 kdp_readregs_reply_t *rp = &pkt->readregs_reply;
800 int size;
801
802 if (plen < sizeof (*rq))
803 return (FALSE);
804
805 rp->hdr.is_reply = 1;
806 rp->hdr.len = sizeof (*rp);
807
808 rp->error = kdp_machine_read_regs(rq->cpu, rq->flavor, rp->data, &size);
809 rp->hdr.len += size;
810
811 *reply_port = kdp.reply_port;
812 *len = rp->hdr.len;
813
814 return (TRUE);
815 }
816
817
818 boolean_t
819 kdp_breakpoint_set(
820 kdp_pkt_t *pkt,
821 int *len,
822 unsigned short *reply_port
823 )
824 {
825 kdp_breakpoint_req_t *rq = &pkt->breakpoint_req;
826 kdp_breakpoint_reply_t *rp = &pkt->breakpoint_reply;
827 size_t plen = *len;
828 kdp_error_t kerr;
829
830 if (plen < sizeof (*rq))
831 return (FALSE);
832
833 dprintf(("kdp_breakpoint_set %x\n", rq->address));
834
835 kerr = kdp_set_breakpoint_internal((mach_vm_address_t)rq->address);
836
837 rp->error = kerr;
838
839 rp->hdr.is_reply = 1;
840 rp->hdr.len = sizeof (*rp);
841 *reply_port = kdp.reply_port;
842 *len = rp->hdr.len;
843
844 return (TRUE);
845 }
846
847 boolean_t
848 kdp_breakpoint64_set(
849 kdp_pkt_t *pkt,
850 int *len,
851 unsigned short *reply_port
852 )
853 {
854 kdp_breakpoint64_req_t *rq = &pkt->breakpoint64_req;
855 kdp_breakpoint64_reply_t *rp = &pkt->breakpoint64_reply;
856 size_t plen = *len;
857 kdp_error_t kerr;
858
859 if (plen < sizeof (*rq))
860 return (FALSE);
861
862 dprintf(("kdp_breakpoint64_set %llx\n", rq->address));
863
864 kerr = kdp_set_breakpoint_internal((mach_vm_address_t)rq->address);
865
866 rp->error = kerr;
867
868 rp->hdr.is_reply = 1;
869 rp->hdr.len = sizeof (*rp);
870 *reply_port = kdp.reply_port;
871 *len = rp->hdr.len;
872
873 return (TRUE);
874 }
875
876 boolean_t
877 kdp_breakpoint_remove(
878 kdp_pkt_t *pkt,
879 int *len,
880 unsigned short *reply_port
881 )
882 {
883 kdp_breakpoint_req_t *rq = &pkt->breakpoint_req;
884 kdp_breakpoint_reply_t *rp = &pkt->breakpoint_reply;
885 size_t plen = *len;
886 kdp_error_t kerr;
887 if (plen < sizeof (*rq))
888 return (FALSE);
889
890 dprintf(("kdp_breakpoint_remove %x\n", rq->address));
891
892 kerr = kdp_remove_breakpoint_internal((mach_vm_address_t)rq->address);
893
894 rp->error = kerr;
895
896 rp->hdr.is_reply = 1;
897 rp->hdr.len = sizeof (*rp);
898 *reply_port = kdp.reply_port;
899 *len = rp->hdr.len;
900
901 return (TRUE);
902 }
903
904 boolean_t
905 kdp_breakpoint64_remove(
906 kdp_pkt_t *pkt,
907 int *len,
908 unsigned short *reply_port
909 )
910 {
911 kdp_breakpoint64_req_t *rq = &pkt->breakpoint64_req;
912 kdp_breakpoint64_reply_t *rp = &pkt->breakpoint64_reply;
913 size_t plen = *len;
914 kdp_error_t kerr;
915
916 if (plen < sizeof (*rq))
917 return (FALSE);
918
919 dprintf(("kdp_breakpoint64_remove %llx\n", rq->address));
920
921 kerr = kdp_remove_breakpoint_internal((mach_vm_address_t)rq->address);
922
923 rp->error = kerr;
924
925 rp->hdr.is_reply = 1;
926 rp->hdr.len = sizeof (*rp);
927 *reply_port = kdp.reply_port;
928 *len = rp->hdr.len;
929
930 return (TRUE);
931 }
932
933
934 kdp_error_t
935 kdp_set_breakpoint_internal(
936 mach_vm_address_t address
937 )
938 {
939
940 uint8_t breakinstr[MAX_BREAKINSN_BYTES], oldinstr[MAX_BREAKINSN_BYTES];
941 uint32_t breakinstrsize = sizeof(breakinstr);
942 mach_vm_size_t cnt;
943 int i;
944
945 kdp_machine_get_breakinsn(breakinstr, &breakinstrsize);
946
947 if(breakpoints_initialized == 0)
948 {
949 for(i=0;(i < MAX_BREAKPOINTS); breakpoint_list[i].address=0, i++);
950 breakpoints_initialized++;
951 }
952
953 cnt = kdp_machine_vm_read(address, (caddr_t)&oldinstr, (mach_vm_size_t)breakinstrsize);
954
955 if (0 == memcmp(oldinstr, breakinstr, breakinstrsize)) {
956 printf("A trap was already set at that address, not setting new breakpoint\n");
957
958 return KDPERR_BREAKPOINT_ALREADY_SET;
959 }
960
961 for(i=0;(i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != 0); i++);
962
963 if (i == MAX_BREAKPOINTS) {
964 return KDPERR_MAX_BREAKPOINTS;
965 }
966
967 breakpoint_list[i].address = address;
968 memcpy(breakpoint_list[i].oldbytes, oldinstr, breakinstrsize);
969 breakpoint_list[i].bytesused = breakinstrsize;
970
971 cnt = kdp_machine_vm_write((caddr_t)&breakinstr, address, breakinstrsize);
972
973 return KDPERR_NO_ERROR;
974 }
975
976 kdp_error_t
977 kdp_remove_breakpoint_internal(
978 mach_vm_address_t address
979 )
980 {
981 mach_vm_size_t cnt;
982 int i;
983
984 for(i=0;(i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != address); i++);
985
986 if (i == MAX_BREAKPOINTS)
987 {
988 return KDPERR_BREAKPOINT_NOT_FOUND;
989 }
990
991 breakpoint_list[i].address = 0;
992 cnt = kdp_machine_vm_write((caddr_t)&breakpoint_list[i].oldbytes, address, breakpoint_list[i].bytesused);
993
994 return KDPERR_NO_ERROR;
995 }
996
997 boolean_t
998 kdp_remove_all_breakpoints(void)
999 {
1000 int i;
1001 boolean_t breakpoint_found = FALSE;
1002
1003 if (breakpoints_initialized)
1004 {
1005 for(i=0;i < MAX_BREAKPOINTS; i++)
1006 {
1007 if (breakpoint_list[i].address)
1008 {
1009 kdp_machine_vm_write((caddr_t)&(breakpoint_list[i].oldbytes), (mach_vm_address_t)breakpoint_list[i].address, (mach_vm_size_t)breakpoint_list[i].bytesused);
1010 breakpoint_found = TRUE;
1011 breakpoint_list[i].address = 0;
1012 }
1013 }
1014
1015 if (breakpoint_found)
1016 printf("kdp_remove_all_breakpoints: found extant breakpoints, removing them.\n");
1017 }
1018 return breakpoint_found;
1019 }
1020
1021 boolean_t
1022 kdp_reboot(
1023 __unused kdp_pkt_t *pkt,
1024 __unused int *len,
1025 __unused unsigned short *reply_port
1026 )
1027 {
1028 dprintf(("kdp_reboot\n"));
1029
1030 kdp_machine_reboot();
1031
1032 return (TRUE); // no, not really, we won't return
1033 }
1034
1035 #define MAX_FRAMES 1000
1036
1037 static int pid_from_task(task_t task)
1038 {
1039 int pid = -1;
1040
1041 if (task->bsd_info)
1042 pid = proc_pid(task->bsd_info);
1043
1044 return pid;
1045 }
1046
1047 boolean_t
1048 kdp_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size) {
1049 size_t rem = size;
1050 char *kvaddr = dest;
1051
1052 while (rem) {
1053 ppnum_t upn = pmap_find_phys(p, uaddr);
1054 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1055 uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
1056 uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
1057 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
1058 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
1059 cur_size = MIN(cur_size, rem);
1060
1061 if (upn && pmap_valid_page(upn) && phys_dest) {
1062 bcopy_phys(phys_src, phys_dest, cur_size);
1063 }
1064 else
1065 break;
1066 uaddr += cur_size;
1067 kvaddr += cur_size;
1068 rem -= cur_size;
1069 }
1070 return (rem == 0);
1071 }
1072
1073
1074 static void
1075 kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap)
1076 {
1077 unsigned int pages_reclaimed;
1078 unsigned int pages_wanted;
1079 kern_return_t kErr;
1080
1081 memio_snap->snapshot_magic = STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC;
1082 memio_snap->free_pages = vm_page_free_count;
1083 memio_snap->active_pages = vm_page_active_count;
1084 memio_snap->inactive_pages = vm_page_inactive_count;
1085 memio_snap->purgeable_pages = vm_page_purgeable_count;
1086 memio_snap->wired_pages = vm_page_wire_count;
1087 memio_snap->speculative_pages = vm_page_speculative_count;
1088 memio_snap->throttled_pages = vm_page_throttled_count;
1089 memio_snap->busy_buffer_count = count_busy_buffers();
1090 kErr = mach_vm_pressure_monitor(FALSE, VM_PRESSURE_TIME_WINDOW, &pages_reclaimed, &pages_wanted);
1091 if ( ! kErr ) {
1092 memio_snap->pages_wanted = (uint32_t)pages_wanted;
1093 memio_snap->pages_reclaimed = (uint32_t)pages_reclaimed;
1094 memio_snap->pages_wanted_reclaimed_valid = 1;
1095 } else {
1096 memio_snap->pages_wanted = 0;
1097 memio_snap->pages_reclaimed = 0;
1098 memio_snap->pages_wanted_reclaimed_valid = 0;
1099 }
1100 }
1101
1102
1103
1104 /*
1105 * Method for grabbing timer values safely, in the sense that no infinite loop will occur
1106 * Certain flavors of the timer_grab function, which would seem to be the thing to use,
1107 * can loop infinitely if called while the timer is in the process of being updated.
1108 * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of
1109 * the timer using this method. This seems insoluble, since stackshot runs in a context
1110 * where the timer might be half-updated, and has no way of yielding control just long
1111 * enough to finish the update.
1112 */
1113
1114 static uint64_t safe_grab_timer_value(struct timer *t)
1115 {
1116 #if defined(__LP64__)
1117 return t->all_bits;
1118 #else
1119 uint64_t time = t->high_bits; /* endian independent grab */
1120 time = (time << 32) | t->low_bits;
1121 return time;
1122 #endif
1123 }
1124
1125 int
1126 kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t dispatch_offset, uint32_t *pbytesTraced)
1127 {
1128 char *tracepos = (char *) tracebuf;
1129 char *tracebound = tracepos + tracebuf_size;
1130 uint32_t tracebytes = 0;
1131 int error = 0;
1132
1133 task_t task = TASK_NULL;
1134 thread_t thread = THREAD_NULL;
1135 thread_snapshot_t tsnap = NULL;
1136 unsigned framesize = 2 * sizeof(vm_offset_t);
1137 struct task ctask;
1138 struct thread cthread;
1139 struct _vm_map cmap;
1140 struct pmap cpmap;
1141
1142 queue_head_t *task_list = &tasks;
1143 boolean_t is_active_list = TRUE;
1144
1145 boolean_t dispatch_p = ((trace_flags & STACKSHOT_GET_DQ) != 0);
1146 boolean_t save_loadinfo_p = ((trace_flags & STACKSHOT_SAVE_LOADINFO) != 0);
1147
1148 if(trace_flags & STACKSHOT_GET_GLOBAL_MEM_STATS) {
1149 if(tracepos + sizeof(struct mem_and_io_snapshot) > tracebound) {
1150 error = -1;
1151 goto error_exit;
1152 }
1153 kdp_mem_and_io_snapshot((struct mem_and_io_snapshot *)tracepos);
1154 tracepos += sizeof(struct mem_and_io_snapshot);
1155 }
1156
1157 walk_list:
1158 queue_iterate(task_list, task, task_t, tasks) {
1159 if ((task == NULL) || (ml_nofault_copy((vm_offset_t) task, (vm_offset_t) &ctask, sizeof(struct task)) != sizeof(struct task)))
1160 goto error_exit;
1161
1162 int task_pid = pid_from_task(task);
1163 boolean_t task64 = task_has_64BitAddr(task);
1164
1165 if (!task->active) {
1166 /*
1167 * Not interested in terminated tasks without threads, and
1168 * at the moment, stackshot can't handle a task without a name.
1169 */
1170 if (queue_empty(&task->threads) || task_pid == -1) {
1171 continue;
1172 }
1173 }
1174
1175 /* Trace everything, unless a process was specified */
1176 if ((pid == -1) || (pid == task_pid)) {
1177 task_snapshot_t task_snap;
1178 uint32_t uuid_info_count = 0;
1179 mach_vm_address_t uuid_info_addr = 0;
1180 boolean_t have_map = (task->map != NULL) &&
1181 (ml_nofault_copy((vm_offset_t)(task->map), (vm_offset_t)&cmap, sizeof(struct _vm_map)) == sizeof(struct _vm_map));
1182 boolean_t have_pmap = have_map && (cmap.pmap != NULL) &&
1183 (ml_nofault_copy((vm_offset_t)(cmap.pmap), (vm_offset_t)&cpmap, sizeof(struct pmap)) == sizeof(struct pmap));
1184
1185 if (have_pmap && task->active && save_loadinfo_p && task_pid > 0) {
1186 // Read the dyld_all_image_infos struct from the task memory to get UUID array count and location
1187 if (task64) {
1188 struct user64_dyld_all_image_infos task_image_infos;
1189 if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
1190 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1191 uuid_info_addr = task_image_infos.uuidArray;
1192 }
1193 } else {
1194 struct user32_dyld_all_image_infos task_image_infos;
1195 if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user32_dyld_all_image_infos))) {
1196 uuid_info_count = task_image_infos.uuidArrayCount;
1197 uuid_info_addr = task_image_infos.uuidArray;
1198 }
1199 }
1200
1201 // If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1202 // this data structure), we zero the uuid_info_count so that we won't even try to save load info
1203 // for this task.
1204 if (!uuid_info_addr) {
1205 uuid_info_count = 0;
1206 }
1207 }
1208
1209 if (tracepos + sizeof(struct task_snapshot) > tracebound) {
1210 error = -1;
1211 goto error_exit;
1212 }
1213
1214 task_snap = (task_snapshot_t) tracepos;
1215 task_snap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
1216 task_snap->pid = task_pid;
1217 task_snap->nloadinfos = uuid_info_count;
1218 /* Add the BSD process identifiers */
1219 if (task_pid != -1)
1220 proc_name_kdp(task, task_snap->p_comm, sizeof(task_snap->p_comm));
1221 else
1222 task_snap->p_comm[0] = '\0';
1223 task_snap->ss_flags = 0;
1224 if (task64)
1225 task_snap->ss_flags |= kUser64_p;
1226 if (!task->active)
1227 task_snap->ss_flags |= kTerminatedSnapshot;
1228 if(task->pidsuspended) task_snap->ss_flags |= kPidSuspended;
1229 if(task->frozen) task_snap->ss_flags |= kFrozen;
1230
1231 task_snap->suspend_count = task->suspend_count;
1232 task_snap->task_size = have_pmap ? pmap_resident_count(task->map->pmap) : 0;
1233 task_snap->faults = task->faults;
1234 task_snap->pageins = task->pageins;
1235 task_snap->cow_faults = task->cow_faults;
1236
1237 task_snap->user_time_in_terminated_threads = task->total_user_time;
1238 task_snap->system_time_in_terminated_threads = task->total_system_time;
1239 tracepos += sizeof(struct task_snapshot);
1240
1241 if (task_pid > 0 && uuid_info_count > 0) {
1242 uint32_t uuid_info_size = (uint32_t)(task64 ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1243 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
1244
1245 if (tracepos + uuid_info_array_size > tracebound) {
1246 error = -1;
1247 goto error_exit;
1248 }
1249
1250 // Copy in the UUID info array
1251 // It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1252 if (have_pmap && !kdp_copyin(task->map->pmap, uuid_info_addr, tracepos, uuid_info_array_size))
1253 task_snap->nloadinfos = 0;
1254 else
1255 tracepos += uuid_info_array_size;
1256 }
1257
1258 queue_iterate(&task->threads, thread, thread_t, task_threads){
1259 uint64_t tval;
1260
1261 if ((thread == NULL) || (ml_nofault_copy((vm_offset_t) thread, (vm_offset_t) &cthread, sizeof(struct thread)) != sizeof(struct thread)))
1262 goto error_exit;
1263
1264 if (((tracepos + 4 * sizeof(struct thread_snapshot)) > tracebound)) {
1265 error = -1;
1266 goto error_exit;
1267 }
1268 /* Populate the thread snapshot header */
1269 tsnap = (thread_snapshot_t) tracepos;
1270 tsnap->thread_id = thread_tid(thread);
1271 tsnap->state = thread->state;
1272 tsnap->sched_pri = thread->sched_pri;
1273 tsnap->sched_flags = thread->sched_flags;
1274 tsnap->wait_event = VM_KERNEL_UNSLIDE(thread->wait_event);
1275 tsnap->continuation = VM_KERNEL_UNSLIDE(thread->continuation);
1276 tval = safe_grab_timer_value(&thread->user_timer);
1277 tsnap->user_time = tval;
1278 tval = safe_grab_timer_value(&thread->system_timer);
1279 if (thread->precise_user_kernel_time) {
1280 tsnap->system_time = tval;
1281 } else {
1282 tsnap->user_time += tval;
1283 tsnap->system_time = 0;
1284 }
1285 tsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1286 tracepos += sizeof(struct thread_snapshot);
1287 tsnap->ss_flags = 0;
1288
1289 if (dispatch_p && (task != kernel_task) && (task->active) && have_pmap) {
1290 uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
1291 if (dqkeyaddr != 0) {
1292 uint64_t dqaddr = 0;
1293 if (kdp_copyin(task->map->pmap, dqkeyaddr, &dqaddr, (task64 ? 8 : 4)) && (dqaddr != 0)) {
1294 uint64_t dqserialnumaddr = dqaddr + dispatch_offset;
1295 uint64_t dqserialnum = 0;
1296 if (kdp_copyin(task->map->pmap, dqserialnumaddr, &dqserialnum, (task64 ? 8 : 4))) {
1297 tsnap->ss_flags |= kHasDispatchSerial;
1298 *(uint64_t *)tracepos = dqserialnum;
1299 tracepos += 8;
1300 }
1301 }
1302 }
1303 }
1304 /* Call through to the machine specific trace routines
1305 * Frames are added past the snapshot header.
1306 */
1307 tracebytes = 0;
1308 if (thread->kernel_stack != 0) {
1309 #if defined(__LP64__)
1310 tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, FALSE);
1311 tsnap->ss_flags |= kKernel64_p;
1312 framesize = 16;
1313 #else
1314 tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, FALSE);
1315 framesize = 8;
1316 #endif
1317 }
1318 tsnap->nkern_frames = tracebytes/framesize;
1319 tracepos += tracebytes;
1320 tracebytes = 0;
1321 /* Trace user stack, if any */
1322 if (task->active && thread->task->map != kernel_map) {
1323 /* 64-bit task? */
1324 if (task_has_64BitAddr(thread->task)) {
1325 tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, TRUE);
1326 tsnap->ss_flags |= kUser64_p;
1327 framesize = 16;
1328 }
1329 else {
1330 tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, TRUE);
1331 framesize = 8;
1332 }
1333 }
1334 tsnap->nuser_frames = tracebytes/framesize;
1335 tracepos += tracebytes;
1336 tracebytes = 0;
1337 }
1338 }
1339 }
1340
1341 if (is_active_list) {
1342 is_active_list = FALSE;
1343 task_list = &terminated_tasks;
1344 goto walk_list;
1345 }
1346
1347 error_exit:
1348 /* Release stack snapshot wait indicator */
1349 kdp_snapshot_postflight();
1350
1351 *pbytesTraced = (uint32_t)(tracepos - (char *) tracebuf);
1352
1353 return error;
1354 }
1355
1356 static boolean_t
1357 kdp_readioport(kdp_pkt_t *pkt,
1358 int *len,
1359 unsigned short *reply_port
1360 )
1361 {
1362 kdp_readioport_req_t *rq = &pkt->readioport_req;
1363 kdp_readioport_reply_t *rp = &pkt->readioport_reply;
1364 size_t plen = *len;
1365
1366 if (plen < sizeof (*rq))
1367 return (FALSE);
1368
1369 rp->hdr.is_reply = 1;
1370 rp->hdr.len = sizeof (*rp);
1371
1372 if (rq->nbytes > MAX_KDP_DATA_SIZE)
1373 rp->error = KDPERR_BAD_NBYTES;
1374 else {
1375 #if KDP_TEST_HARNESS
1376 uint16_t addr = rq->address;
1377 #endif
1378 uint16_t size = rq->nbytes;
1379 dprintf(("kdp_readioport addr %x size %d\n", addr, size));
1380
1381 rp->error = kdp_machine_ioport_read(rq, rp->data, rq->lcpu);
1382 if (rp->error == KDPERR_NO_ERROR)
1383 rp->hdr.len += size;
1384 }
1385
1386 *reply_port = kdp.reply_port;
1387 *len = rp->hdr.len;
1388
1389 return (TRUE);
1390 }
1391
1392 static boolean_t
1393 kdp_writeioport(
1394 kdp_pkt_t *pkt,
1395 int *len,
1396 unsigned short *reply_port
1397 )
1398 {
1399 kdp_writeioport_req_t *rq = &pkt->writeioport_req;
1400 kdp_writeioport_reply_t *rp = &pkt->writeioport_reply;
1401 size_t plen = *len;
1402
1403 if (plen < sizeof (*rq))
1404 return (FALSE);
1405
1406 if (rq->nbytes > MAX_KDP_DATA_SIZE)
1407 rp->error = KDPERR_BAD_NBYTES;
1408 else {
1409 dprintf(("kdp_writeioport addr %x size %d\n", rq->address,
1410 rq->nbytes));
1411
1412 rp->error = kdp_machine_ioport_write(rq, rq->data, rq->lcpu);
1413 }
1414
1415 rp->hdr.is_reply = 1;
1416 rp->hdr.len = sizeof (*rp);
1417
1418 *reply_port = kdp.reply_port;
1419 *len = rp->hdr.len;
1420
1421 return (TRUE);
1422 }
1423
1424 static boolean_t
1425 kdp_readmsr64(kdp_pkt_t *pkt,
1426 int *len,
1427 unsigned short *reply_port
1428 )
1429 {
1430 kdp_readmsr64_req_t *rq = &pkt->readmsr64_req;
1431 kdp_readmsr64_reply_t *rp = &pkt->readmsr64_reply;
1432 size_t plen = *len;
1433
1434 if (plen < sizeof (*rq))
1435 return (FALSE);
1436
1437 rp->hdr.is_reply = 1;
1438 rp->hdr.len = sizeof (*rp);
1439
1440 dprintf(("kdp_readmsr64 lcpu %x addr %x\n", rq->lcpu, rq->address));
1441 rp->error = kdp_machine_msr64_read(rq, rp->data, rq->lcpu);
1442 if (rp->error == KDPERR_NO_ERROR)
1443 rp->hdr.len += sizeof(uint64_t);
1444
1445 *reply_port = kdp.reply_port;
1446 *len = rp->hdr.len;
1447
1448 return (TRUE);
1449 }
1450
1451 static boolean_t
1452 kdp_writemsr64(
1453 kdp_pkt_t *pkt,
1454 int *len,
1455 unsigned short *reply_port
1456 )
1457 {
1458 kdp_writemsr64_req_t *rq = &pkt->writemsr64_req;
1459 kdp_writemsr64_reply_t *rp = &pkt->writemsr64_reply;
1460 size_t plen = *len;
1461
1462 if (plen < sizeof (*rq))
1463 return (FALSE);
1464
1465 dprintf(("kdp_writemsr64 lcpu %x addr %x\n", rq->lcpu, rq->address));
1466 rp->error = kdp_machine_msr64_write(rq, rq->data, rq->lcpu);
1467
1468 rp->hdr.is_reply = 1;
1469 rp->hdr.len = sizeof (*rp);
1470
1471 *reply_port = kdp.reply_port;
1472 *len = rp->hdr.len;
1473
1474 return (TRUE);
1475 }
1476
1477 static boolean_t
1478 kdp_dumpinfo(
1479 kdp_pkt_t *pkt,
1480 int *len,
1481 unsigned short *reply_port
1482 )
1483 {
1484 kdp_dumpinfo_req_t *rq = &pkt->dumpinfo_req;
1485 kdp_dumpinfo_reply_t *rp = &pkt->dumpinfo_reply;
1486 size_t plen = *len;
1487
1488 if (plen < sizeof (*rq))
1489 return (FALSE);
1490
1491 dprintf(("kdp_dumpinfo file=%s destip=%s routerip=%s\n", rq->name, rq->destip, rq->routerip));
1492 rp->hdr.is_reply = 1;
1493 rp->hdr.len = sizeof (*rp);
1494
1495 if ((rq->type & KDP_DUMPINFO_MASK) != KDP_DUMPINFO_GETINFO) {
1496 kdp_set_dump_info(rq->type, rq->name, rq->destip, rq->routerip,
1497 rq->port);
1498 }
1499
1500 /* gather some stats for reply */
1501 kdp_get_dump_info(&rp->type, rp->name, rp->destip, rp->routerip,
1502 &rp->port);
1503
1504 *reply_port = kdp.reply_port;
1505 *len = rp->hdr.len;
1506
1507 return (TRUE);
1508 }