]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/kdp.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / kdp / kdp.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/vm_param.h>
31 #include <sys/appleapiopts.h>
32 #include <kern/debug.h>
33 #include <uuid/uuid.h>
34
35 #include <kdp/kdp_internal.h>
36 #include <kdp/kdp_private.h>
37 #include <kdp/kdp_core.h>
38 #include <kdp/kdp_dyld.h>
39
40 #include <libsa/types.h>
41 #include <libkern/version.h>
42
43 #include <string.h> /* bcopy */
44
45 #include <kern/processor.h>
46 #include <kern/thread.h>
47 #include <kern/clock.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_shared_region.h>
52 #include <libkern/OSKextLibPrivate.h>
53
54 extern int count_busy_buffers(void); /* must track with declaration in bsd/sys/buf_internal.h */
55
56 #define DO_ALIGN 1 /* align all packet data accesses */
57
58 #define KDP_TEST_HARNESS 0
59 #if KDP_TEST_HARNESS
60 #define dprintf(x) kprintf x
61 #else
62 #define dprintf(x)
63 #endif
64
65 static kdp_dispatch_t
66 dispatch_table[KDP_INVALID_REQUEST-KDP_CONNECT] =
67 {
68 /* 0 */ kdp_connect,
69 /* 1 */ kdp_disconnect,
70 /* 2 */ kdp_hostinfo,
71 /* 3 */ kdp_version,
72 /* 4 */ kdp_maxbytes,
73 /* 5 */ kdp_readmem,
74 /* 6 */ kdp_writemem,
75 /* 7 */ kdp_readregs,
76 /* 8 */ kdp_writeregs,
77 /* 9 */ kdp_unknown,
78 /* A */ kdp_unknown,
79 /* B */ kdp_suspend,
80 /* C */ kdp_resumecpus,
81 /* D */ kdp_unknown,
82 /* E */ kdp_unknown,
83 /* F */ kdp_breakpoint_set,
84 /*10 */ kdp_breakpoint_remove,
85 /*11 */ kdp_regions,
86 /*12 */ kdp_reattach,
87 /*13 */ kdp_reboot,
88 /*14 */ kdp_readmem64,
89 /*15 */ kdp_writemem64,
90 /*16 */ kdp_breakpoint64_set,
91 /*17 */ kdp_breakpoint64_remove,
92 /*18 */ kdp_kernelversion,
93 /*19 */ kdp_readphysmem64,
94 /*1A */ kdp_writephysmem64,
95 /*1B */ kdp_readioport,
96 /*1C */ kdp_writeioport,
97 /*1D */ kdp_readmsr64,
98 /*1E */ kdp_writemsr64,
99 /*1F */ kdp_dumpinfo,
100 };
101
102 kdp_glob_t kdp;
103
104 #define MAX_BREAKPOINTS 100
105
106 /*
107 * Version 11 of the KDP Protocol adds support for 64-bit wide memory
108 * addresses (read/write and breakpoints) as well as a dedicated
109 * kernelversion request. Version 12 adds read/writing of physical
110 * memory with 64-bit wide memory addresses.
111 */
112 #define KDP_VERSION 12
113
114 typedef struct{
115 mach_vm_address_t address;
116 uint32_t bytesused;
117 uint8_t oldbytes[MAX_BREAKINSN_BYTES];
118 } kdp_breakpoint_record_t;
119
120 static kdp_breakpoint_record_t breakpoint_list[MAX_BREAKPOINTS];
121 static unsigned int breakpoints_initialized = 0;
122
123 int reattach_wait = 0;
124 int noresume_on_disconnect = 0;
125 extern unsigned int return_on_panic;
126
127 typedef struct thread_snapshot *thread_snapshot_t;
128 typedef struct task_snapshot *task_snapshot_t;
129
130 extern int
131 machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
132 extern int
133 machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
134 extern int
135 proc_pid(void *p);
136 extern uint64_t
137 proc_uniqueid(void *p);
138 extern uint64_t
139 proc_was_throttled(void *p);
140 extern uint64_t
141 proc_did_throttle(void *p);
142
143 extern void
144 proc_name_kdp(task_t task, char *buf, int size);
145
146 extern void
147 kdp_snapshot_postflight(void);
148
149 static int
150 pid_from_task(task_t task);
151
152 static uint64_t
153 proc_uniqueid_from_task(task_t task);
154
155 kdp_error_t
156 kdp_set_breakpoint_internal(
157 mach_vm_address_t address
158 );
159
160 kdp_error_t
161 kdp_remove_breakpoint_internal(
162 mach_vm_address_t address
163 );
164
165
166 int
167 kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t dispatch_offset, uint32_t *pbytesTraced);
168
169 boolean_t kdp_copyin(pmap_t, uint64_t, void *, size_t);
170 extern void bcopy_phys(addr64_t, addr64_t, vm_size_t);
171
172 boolean_t
173 kdp_packet(
174 unsigned char *pkt,
175 int *len,
176 unsigned short *reply_port
177 )
178 {
179 static unsigned aligned_pkt[1538/sizeof(unsigned)+1]; // max ether pkt
180 kdp_pkt_t *rd = (kdp_pkt_t *)&aligned_pkt;
181 size_t plen = *len;
182 kdp_req_t req;
183 boolean_t ret;
184
185 #if DO_ALIGN
186 bcopy((char *)pkt, (char *)rd, sizeof(aligned_pkt));
187 #else
188 rd = (kdp_pkt_t *)pkt;
189 #endif
190 if (plen < sizeof (rd->hdr) || rd->hdr.len != plen) {
191 printf("kdp_packet bad len pkt %lu hdr %d\n", plen, rd->hdr.len);
192
193 return (FALSE);
194 }
195
196 if (rd->hdr.is_reply) {
197 printf("kdp_packet reply recvd req %x seq %x\n",
198 rd->hdr.request, rd->hdr.seq);
199
200 return (FALSE);
201 }
202
203 req = rd->hdr.request;
204 if (req >= KDP_INVALID_REQUEST) {
205 printf("kdp_packet bad request %x len %d seq %x key %x\n",
206 rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key);
207
208 return (FALSE);
209 }
210
211 ret = ((*dispatch_table[req - KDP_CONNECT])(rd, len, reply_port));
212 #if DO_ALIGN
213 bcopy((char *)rd, (char *) pkt, *len);
214 #endif
215 return ret;
216 }
217
218 static boolean_t
219 kdp_unknown(
220 kdp_pkt_t *pkt,
221 __unused int *len,
222 __unused unsigned short *reply_port
223 )
224 {
225 kdp_pkt_t *rd = (kdp_pkt_t *)pkt;
226
227 printf("kdp_unknown request %x len %d seq %x key %x\n",
228 rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key);
229
230 return (FALSE);
231 }
232
233 static boolean_t
234 kdp_connect(
235 kdp_pkt_t *pkt,
236 int *len,
237 unsigned short *reply_port
238 )
239 {
240 kdp_connect_req_t *rq = &pkt->connect_req;
241 size_t plen = *len;
242 kdp_connect_reply_t *rp = &pkt->connect_reply;
243 uint16_t rport, eport;
244 uint32_t key;
245 uint8_t seq;
246
247 if (plen < sizeof (*rq))
248 return (FALSE);
249
250 dprintf(("kdp_connect seq %x greeting %s\n", rq->hdr.seq, rq->greeting));
251
252 rport = rq->req_reply_port;
253 eport = rq->exc_note_port;
254 key = rq->hdr.key;
255 seq = rq->hdr.seq;
256 if (kdp.is_conn) {
257 if ((seq == kdp.conn_seq) && /* duplicate request */
258 (rport == kdp.reply_port) &&
259 (eport == kdp.exception_port) &&
260 (key == kdp.session_key))
261 rp->error = KDPERR_NO_ERROR;
262 else
263 rp->error = KDPERR_ALREADY_CONNECTED;
264 }
265 else {
266 kdp.reply_port = rport;
267 kdp.exception_port = eport;
268 kdp.is_conn = TRUE;
269 kdp.conn_seq = seq;
270 kdp.session_key = key;
271
272 rp->error = KDPERR_NO_ERROR;
273 }
274
275 rp->hdr.is_reply = 1;
276 rp->hdr.len = sizeof (*rp);
277
278 *reply_port = rport;
279 *len = rp->hdr.len;
280
281 if (current_debugger == KDP_CUR_DB)
282 active_debugger=1;
283
284 return (TRUE);
285 }
286
287 static boolean_t
288 kdp_disconnect(
289 kdp_pkt_t *pkt,
290 int *len,
291 unsigned short *reply_port
292 )
293 {
294 kdp_disconnect_req_t *rq = &pkt->disconnect_req;
295 size_t plen = *len;
296 kdp_disconnect_reply_t *rp = &pkt->disconnect_reply;
297
298 if (plen < sizeof (*rq))
299 return (FALSE);
300
301 if (!kdp.is_conn)
302 return (FALSE);
303
304 dprintf(("kdp_disconnect\n"));
305
306 *reply_port = kdp.reply_port;
307
308 kdp.reply_port = kdp.exception_port = 0;
309 kdp.is_halted = kdp.is_conn = FALSE;
310 kdp.exception_seq = kdp.conn_seq = 0;
311 kdp.session_key = 0;
312
313 if ((panicstr != NULL) && (return_on_panic == 0))
314 reattach_wait = 1;
315
316 if (noresume_on_disconnect == 1) {
317 reattach_wait = 1;
318 noresume_on_disconnect = 0;
319 }
320
321 rp->hdr.is_reply = 1;
322 rp->hdr.len = sizeof (*rp);
323
324 *len = rp->hdr.len;
325
326 if (current_debugger == KDP_CUR_DB)
327 active_debugger=0;
328
329 return (TRUE);
330 }
331
332 static boolean_t
333 kdp_reattach(
334 kdp_pkt_t *pkt,
335 int *len,
336 unsigned short *reply_port
337 )
338 {
339 kdp_reattach_req_t *rq = &pkt->reattach_req;
340
341 kdp.is_conn = TRUE;
342 kdp_disconnect(pkt, len, reply_port);
343 *reply_port = rq->req_reply_port;
344 reattach_wait = 1;
345 return (TRUE);
346 }
347
348 static boolean_t
349 kdp_hostinfo(
350 kdp_pkt_t *pkt,
351 int *len,
352 unsigned short *reply_port
353 )
354 {
355 kdp_hostinfo_req_t *rq = &pkt->hostinfo_req;
356 size_t plen = *len;
357 kdp_hostinfo_reply_t *rp = &pkt->hostinfo_reply;
358
359 if (plen < sizeof (*rq))
360 return (FALSE);
361
362 dprintf(("kdp_hostinfo\n"));
363
364 rp->hdr.is_reply = 1;
365 rp->hdr.len = sizeof (*rp);
366
367 kdp_machine_hostinfo(&rp->hostinfo);
368
369 *reply_port = kdp.reply_port;
370 *len = rp->hdr.len;
371
372 return (TRUE);
373 }
374
375 static boolean_t
376 kdp_kernelversion(
377 kdp_pkt_t *pkt,
378 int *len,
379 unsigned short *reply_port
380 )
381 {
382 kdp_kernelversion_req_t *rq = &pkt->kernelversion_req;
383 size_t plen = *len;
384 kdp_kernelversion_reply_t *rp = &pkt->kernelversion_reply;
385 size_t slen;
386
387 if (plen < sizeof (*rq))
388 return (FALSE);
389
390 rp->hdr.is_reply = 1;
391 rp->hdr.len = sizeof (*rp);
392
393 dprintf(("kdp_kernelversion\n"));
394 slen = strlcpy(rp->version, kdp_kernelversion_string, MAX_KDP_DATA_SIZE);
395
396 rp->hdr.len += slen + 1; /* strlcpy returns the amount copied with NUL */
397
398 *reply_port = kdp.reply_port;
399 *len = rp->hdr.len;
400
401 return (TRUE);
402 }
403
404 static boolean_t
405 kdp_suspend(
406 kdp_pkt_t *pkt,
407 int *len,
408 unsigned short *reply_port
409 )
410 {
411 kdp_suspend_req_t *rq = &pkt->suspend_req;
412 size_t plen = *len;
413 kdp_suspend_reply_t *rp = &pkt->suspend_reply;
414
415 if (plen < sizeof (*rq))
416 return (FALSE);
417
418 rp->hdr.is_reply = 1;
419 rp->hdr.len = sizeof (*rp);
420
421 dprintf(("kdp_suspend\n"));
422
423 kdp.is_halted = TRUE;
424
425 *reply_port = kdp.reply_port;
426 *len = rp->hdr.len;
427
428 return (TRUE);
429 }
430
431 static boolean_t
432 kdp_resumecpus(
433 kdp_pkt_t *pkt,
434 int *len,
435 unsigned short *reply_port
436 )
437 {
438 kdp_resumecpus_req_t *rq = &pkt->resumecpus_req;
439 size_t plen = *len;
440 kdp_resumecpus_reply_t *rp = &pkt->resumecpus_reply;
441
442 if (plen < sizeof (*rq))
443 return (FALSE);
444
445 rp->hdr.is_reply = 1;
446 rp->hdr.len = sizeof (*rp);
447
448 dprintf(("kdp_resumecpus %x\n", rq->cpu_mask));
449
450 kdp.is_halted = FALSE;
451
452 *reply_port = kdp.reply_port;
453 *len = rp->hdr.len;
454
455 return (TRUE);
456 }
457
458 static boolean_t
459 kdp_writemem(
460 kdp_pkt_t *pkt,
461 int *len,
462 unsigned short *reply_port
463 )
464 {
465 kdp_writemem_req_t *rq = &pkt->writemem_req;
466 size_t plen = *len;
467 kdp_writemem_reply_t *rp = &pkt->writemem_reply;
468 mach_vm_size_t cnt;
469
470 if (plen < sizeof (*rq))
471 return (FALSE);
472
473 if (rq->nbytes > MAX_KDP_DATA_SIZE)
474 rp->error = KDPERR_BAD_NBYTES;
475 else {
476 dprintf(("kdp_writemem addr %x size %d\n", rq->address, rq->nbytes));
477 cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, rq->nbytes);
478 rp->error = KDPERR_ACCESS(rq->nbytes, cnt);
479 dprintf((" cnt %lld error %d\n", cnt, rp->error));
480 }
481
482 rp->hdr.is_reply = 1;
483 rp->hdr.len = sizeof (*rp);
484
485 *reply_port = kdp.reply_port;
486 *len = rp->hdr.len;
487
488 return (TRUE);
489 }
490
491 static boolean_t
492 kdp_writemem64(
493 kdp_pkt_t *pkt,
494 int *len,
495 unsigned short *reply_port
496 )
497 {
498 kdp_writemem64_req_t *rq = &pkt->writemem64_req;
499 size_t plen = *len;
500 kdp_writemem64_reply_t *rp = &pkt->writemem64_reply;
501 mach_vm_size_t cnt;
502
503 if (plen < sizeof (*rq))
504 return (FALSE);
505
506 if (rq->nbytes > MAX_KDP_DATA_SIZE)
507 rp->error = KDPERR_BAD_NBYTES;
508 else {
509 dprintf(("kdp_writemem64 addr %llx size %d\n", rq->address, rq->nbytes));
510 cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, (mach_vm_size_t)rq->nbytes);
511 rp->error = KDPERR_ACCESS(rq->nbytes, cnt);
512 dprintf((" cnt %lld error %d\n", cnt, rp->error));
513 }
514
515 rp->hdr.is_reply = 1;
516 rp->hdr.len = sizeof (*rp);
517
518 *reply_port = kdp.reply_port;
519 *len = rp->hdr.len;
520
521 return (TRUE);
522 }
523
524 static boolean_t
525 kdp_writephysmem64(
526 kdp_pkt_t *pkt,
527 int *len,
528 unsigned short *reply_port
529 )
530 {
531 kdp_writephysmem64_req_t *rq = &pkt->writephysmem64_req;
532 size_t plen = *len;
533 kdp_writephysmem64_reply_t *rp = &pkt->writephysmem64_reply;
534 mach_vm_size_t cnt;
535 unsigned int size;
536
537 if (plen < sizeof (*rq))
538 return (FALSE);
539
540 size = rq->nbytes;
541 if (size > MAX_KDP_DATA_SIZE)
542 rp->error = KDPERR_BAD_NBYTES;
543 else {
544 dprintf(("kdp_writephysmem64 addr %llx size %d\n", rq->address, size));
545 cnt = kdp_machine_phys_write(rq, rq->data, rq->lcpu);
546 rp->error = KDPERR_ACCESS(size, cnt);
547 dprintf((" cnt %lld error %d\n", cnt, rp->error));
548 }
549
550 rp->hdr.is_reply = 1;
551 rp->hdr.len = sizeof (*rp);
552
553 *reply_port = kdp.reply_port;
554 *len = rp->hdr.len;
555
556 return (TRUE);
557 }
558
559 static boolean_t
560 kdp_readmem(
561 kdp_pkt_t *pkt,
562 int *len,
563 unsigned short *reply_port
564 )
565 {
566 kdp_readmem_req_t *rq = &pkt->readmem_req;
567 size_t plen = *len;
568 kdp_readmem_reply_t *rp = &pkt->readmem_reply;
569 mach_vm_size_t cnt;
570 unsigned int size;
571
572 if (plen < sizeof (*rq))
573 return (FALSE);
574
575 rp->hdr.is_reply = 1;
576 rp->hdr.len = sizeof (*rp);
577
578 size = rq->nbytes;
579 if (size > MAX_KDP_DATA_SIZE)
580 rp->error = KDPERR_BAD_NBYTES;
581 else {
582 dprintf(("kdp_readmem addr %x size %d\n", rq->address, size));
583 cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, rq->nbytes);
584 rp->error = KDPERR_ACCESS(size, cnt);
585 dprintf((" cnt %lld error %d\n", cnt, rp->error));
586
587 rp->hdr.len += cnt;
588 }
589
590 *reply_port = kdp.reply_port;
591 *len = rp->hdr.len;
592
593 return (TRUE);
594 }
595
596 static boolean_t
597 kdp_readmem64(
598 kdp_pkt_t *pkt,
599 int *len,
600 unsigned short *reply_port
601 )
602 {
603 kdp_readmem64_req_t *rq = &pkt->readmem64_req;
604 size_t plen = *len;
605 kdp_readmem64_reply_t *rp = &pkt->readmem64_reply;
606 mach_vm_size_t cnt;
607 unsigned int size;
608
609 if (plen < sizeof (*rq))
610 return (FALSE);
611
612 rp->hdr.is_reply = 1;
613 rp->hdr.len = sizeof (*rp);
614
615 size = rq->nbytes;
616 if (size > MAX_KDP_DATA_SIZE)
617 rp->error = KDPERR_BAD_NBYTES;
618 else {
619 dprintf(("kdp_readmem64 addr %llx size %d\n", rq->address, size));
620 cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, rq->nbytes);
621 rp->error = KDPERR_ACCESS(size, cnt);
622 dprintf((" cnt %lld error %d\n", cnt, rp->error));
623
624 rp->hdr.len += cnt;
625 }
626
627 *reply_port = kdp.reply_port;
628 *len = rp->hdr.len;
629
630 return (TRUE);
631 }
632
633 static boolean_t
634 kdp_readphysmem64(
635 kdp_pkt_t *pkt,
636 int *len,
637 unsigned short *reply_port
638 )
639 {
640 kdp_readphysmem64_req_t *rq = &pkt->readphysmem64_req;
641 size_t plen = *len;
642 kdp_readphysmem64_reply_t *rp = &pkt->readphysmem64_reply;
643 mach_vm_size_t cnt;
644 unsigned int size;
645
646 if (plen < sizeof (*rq))
647 return (FALSE);
648
649 rp->hdr.is_reply = 1;
650 rp->hdr.len = sizeof (*rp);
651
652 size = rq->nbytes;
653 if (size > MAX_KDP_DATA_SIZE)
654 rp->error = KDPERR_BAD_NBYTES;
655 else {
656 dprintf(("kdp_readphysmem64 addr %llx size %d\n", rq->address, size));
657 cnt = kdp_machine_phys_read(rq, rp->data, rq->lcpu);
658 rp->error = KDPERR_ACCESS(size, cnt);
659 dprintf((" cnt %lld error %d\n", cnt, rp->error));
660
661 rp->hdr.len += cnt;
662 }
663
664 *reply_port = kdp.reply_port;
665 *len = rp->hdr.len;
666
667 return (TRUE);
668 }
669
670 static boolean_t
671 kdp_maxbytes(
672 kdp_pkt_t *pkt,
673 int *len,
674 unsigned short *reply_port
675 )
676 {
677 kdp_maxbytes_req_t *rq = &pkt->maxbytes_req;
678 size_t plen = *len;
679 kdp_maxbytes_reply_t *rp = &pkt->maxbytes_reply;
680
681 if (plen < sizeof (*rq))
682 return (FALSE);
683
684 rp->hdr.is_reply = 1;
685 rp->hdr.len = sizeof (*rp);
686
687 dprintf(("kdp_maxbytes\n"));
688
689 rp->max_bytes = MAX_KDP_DATA_SIZE;
690
691 *reply_port = kdp.reply_port;
692 *len = rp->hdr.len;
693
694 return (TRUE);
695 }
696
697 static boolean_t
698 kdp_version(
699 kdp_pkt_t *pkt,
700 int *len,
701 unsigned short *reply_port
702 )
703 {
704 kdp_version_req_t *rq = &pkt->version_req;
705 size_t plen = *len;
706 kdp_version_reply_t *rp = &pkt->version_reply;
707
708 if (plen < sizeof (*rq))
709 return (FALSE);
710
711 rp->hdr.is_reply = 1;
712 rp->hdr.len = sizeof (*rp);
713
714 dprintf(("kdp_version\n"));
715
716 rp->version = KDP_VERSION;
717 if (!(kdp_flag & KDP_BP_DIS))
718 rp->feature = KDP_FEATURE_BP;
719 else
720 rp->feature = 0;
721
722 *reply_port = kdp.reply_port;
723 *len = rp->hdr.len;
724
725 return (TRUE);
726 }
727
728 static boolean_t
729 kdp_regions(
730 kdp_pkt_t *pkt,
731 int *len,
732 unsigned short *reply_port
733 )
734 {
735 kdp_regions_req_t *rq = &pkt->regions_req;
736 size_t plen = *len;
737 kdp_regions_reply_t *rp = &pkt->regions_reply;
738 kdp_region_t *r;
739
740 if (plen < sizeof (*rq))
741 return (FALSE);
742
743 rp->hdr.is_reply = 1;
744 rp->hdr.len = sizeof (*rp);
745
746 dprintf(("kdp_regions\n"));
747
748 r = rp->regions;
749 rp->nregions = 0;
750
751 r->address = 0;
752 r->nbytes = 0xffffffff;
753
754 r->protection = VM_PROT_ALL; r++; rp->nregions++;
755
756 rp->hdr.len += rp->nregions * sizeof (kdp_region_t);
757
758 *reply_port = kdp.reply_port;
759 *len = rp->hdr.len;
760
761 return (TRUE);
762 }
763
764 static boolean_t
765 kdp_writeregs(
766 kdp_pkt_t *pkt,
767 int *len,
768 unsigned short *reply_port
769 )
770 {
771 kdp_writeregs_req_t *rq = &pkt->writeregs_req;
772 size_t plen = *len;
773 int size;
774 kdp_writeregs_reply_t *rp = &pkt->writeregs_reply;
775
776 if (plen < sizeof (*rq))
777 return (FALSE);
778
779 size = rq->hdr.len - (unsigned)sizeof(kdp_hdr_t) - (unsigned)sizeof(unsigned int);
780 rp->error = kdp_machine_write_regs(rq->cpu, rq->flavor, rq->data, &size);
781
782 rp->hdr.is_reply = 1;
783 rp->hdr.len = sizeof (*rp);
784
785 *reply_port = kdp.reply_port;
786 *len = rp->hdr.len;
787
788 return (TRUE);
789 }
790
791 static boolean_t
792 kdp_readregs(
793 kdp_pkt_t *pkt,
794 int *len,
795 unsigned short *reply_port
796 )
797 {
798 kdp_readregs_req_t *rq = &pkt->readregs_req;
799 size_t plen = *len;
800 kdp_readregs_reply_t *rp = &pkt->readregs_reply;
801 int size;
802
803 if (plen < sizeof (*rq))
804 return (FALSE);
805
806 rp->hdr.is_reply = 1;
807 rp->hdr.len = sizeof (*rp);
808
809 rp->error = kdp_machine_read_regs(rq->cpu, rq->flavor, rp->data, &size);
810 rp->hdr.len += size;
811
812 *reply_port = kdp.reply_port;
813 *len = rp->hdr.len;
814
815 return (TRUE);
816 }
817
818
819 boolean_t
820 kdp_breakpoint_set(
821 kdp_pkt_t *pkt,
822 int *len,
823 unsigned short *reply_port
824 )
825 {
826 kdp_breakpoint_req_t *rq = &pkt->breakpoint_req;
827 kdp_breakpoint_reply_t *rp = &pkt->breakpoint_reply;
828 size_t plen = *len;
829 kdp_error_t kerr;
830
831 if (plen < sizeof (*rq))
832 return (FALSE);
833
834 dprintf(("kdp_breakpoint_set %x\n", rq->address));
835
836 kerr = kdp_set_breakpoint_internal((mach_vm_address_t)rq->address);
837
838 rp->error = kerr;
839
840 rp->hdr.is_reply = 1;
841 rp->hdr.len = sizeof (*rp);
842 *reply_port = kdp.reply_port;
843 *len = rp->hdr.len;
844
845 return (TRUE);
846 }
847
848 boolean_t
849 kdp_breakpoint64_set(
850 kdp_pkt_t *pkt,
851 int *len,
852 unsigned short *reply_port
853 )
854 {
855 kdp_breakpoint64_req_t *rq = &pkt->breakpoint64_req;
856 kdp_breakpoint64_reply_t *rp = &pkt->breakpoint64_reply;
857 size_t plen = *len;
858 kdp_error_t kerr;
859
860 if (plen < sizeof (*rq))
861 return (FALSE);
862
863 dprintf(("kdp_breakpoint64_set %llx\n", rq->address));
864
865 kerr = kdp_set_breakpoint_internal((mach_vm_address_t)rq->address);
866
867 rp->error = kerr;
868
869 rp->hdr.is_reply = 1;
870 rp->hdr.len = sizeof (*rp);
871 *reply_port = kdp.reply_port;
872 *len = rp->hdr.len;
873
874 return (TRUE);
875 }
876
877 boolean_t
878 kdp_breakpoint_remove(
879 kdp_pkt_t *pkt,
880 int *len,
881 unsigned short *reply_port
882 )
883 {
884 kdp_breakpoint_req_t *rq = &pkt->breakpoint_req;
885 kdp_breakpoint_reply_t *rp = &pkt->breakpoint_reply;
886 size_t plen = *len;
887 kdp_error_t kerr;
888 if (plen < sizeof (*rq))
889 return (FALSE);
890
891 dprintf(("kdp_breakpoint_remove %x\n", rq->address));
892
893 kerr = kdp_remove_breakpoint_internal((mach_vm_address_t)rq->address);
894
895 rp->error = kerr;
896
897 rp->hdr.is_reply = 1;
898 rp->hdr.len = sizeof (*rp);
899 *reply_port = kdp.reply_port;
900 *len = rp->hdr.len;
901
902 return (TRUE);
903 }
904
905 boolean_t
906 kdp_breakpoint64_remove(
907 kdp_pkt_t *pkt,
908 int *len,
909 unsigned short *reply_port
910 )
911 {
912 kdp_breakpoint64_req_t *rq = &pkt->breakpoint64_req;
913 kdp_breakpoint64_reply_t *rp = &pkt->breakpoint64_reply;
914 size_t plen = *len;
915 kdp_error_t kerr;
916
917 if (plen < sizeof (*rq))
918 return (FALSE);
919
920 dprintf(("kdp_breakpoint64_remove %llx\n", rq->address));
921
922 kerr = kdp_remove_breakpoint_internal((mach_vm_address_t)rq->address);
923
924 rp->error = kerr;
925
926 rp->hdr.is_reply = 1;
927 rp->hdr.len = sizeof (*rp);
928 *reply_port = kdp.reply_port;
929 *len = rp->hdr.len;
930
931 return (TRUE);
932 }
933
934
935 kdp_error_t
936 kdp_set_breakpoint_internal(
937 mach_vm_address_t address
938 )
939 {
940
941 uint8_t breakinstr[MAX_BREAKINSN_BYTES], oldinstr[MAX_BREAKINSN_BYTES];
942 uint32_t breakinstrsize = sizeof(breakinstr);
943 mach_vm_size_t cnt;
944 int i;
945
946 kdp_machine_get_breakinsn(breakinstr, &breakinstrsize);
947
948 if(breakpoints_initialized == 0)
949 {
950 for(i=0;(i < MAX_BREAKPOINTS); breakpoint_list[i].address=0, i++);
951 breakpoints_initialized++;
952 }
953
954 cnt = kdp_machine_vm_read(address, (caddr_t)&oldinstr, (mach_vm_size_t)breakinstrsize);
955
956 if (0 == memcmp(oldinstr, breakinstr, breakinstrsize)) {
957 printf("A trap was already set at that address, not setting new breakpoint\n");
958
959 return KDPERR_BREAKPOINT_ALREADY_SET;
960 }
961
962 for(i=0;(i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != 0); i++);
963
964 if (i == MAX_BREAKPOINTS) {
965 return KDPERR_MAX_BREAKPOINTS;
966 }
967
968 breakpoint_list[i].address = address;
969 memcpy(breakpoint_list[i].oldbytes, oldinstr, breakinstrsize);
970 breakpoint_list[i].bytesused = breakinstrsize;
971
972 cnt = kdp_machine_vm_write((caddr_t)&breakinstr, address, breakinstrsize);
973
974 return KDPERR_NO_ERROR;
975 }
976
977 kdp_error_t
978 kdp_remove_breakpoint_internal(
979 mach_vm_address_t address
980 )
981 {
982 mach_vm_size_t cnt;
983 int i;
984
985 for(i=0;(i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != address); i++);
986
987 if (i == MAX_BREAKPOINTS)
988 {
989 return KDPERR_BREAKPOINT_NOT_FOUND;
990 }
991
992 breakpoint_list[i].address = 0;
993 cnt = kdp_machine_vm_write((caddr_t)&breakpoint_list[i].oldbytes, address, breakpoint_list[i].bytesused);
994
995 return KDPERR_NO_ERROR;
996 }
997
998 boolean_t
999 kdp_remove_all_breakpoints(void)
1000 {
1001 int i;
1002 boolean_t breakpoint_found = FALSE;
1003
1004 if (breakpoints_initialized)
1005 {
1006 for(i=0;i < MAX_BREAKPOINTS; i++)
1007 {
1008 if (breakpoint_list[i].address)
1009 {
1010 kdp_machine_vm_write((caddr_t)&(breakpoint_list[i].oldbytes), (mach_vm_address_t)breakpoint_list[i].address, (mach_vm_size_t)breakpoint_list[i].bytesused);
1011 breakpoint_found = TRUE;
1012 breakpoint_list[i].address = 0;
1013 }
1014 }
1015
1016 if (breakpoint_found)
1017 printf("kdp_remove_all_breakpoints: found extant breakpoints, removing them.\n");
1018 }
1019 return breakpoint_found;
1020 }
1021
1022 boolean_t
1023 kdp_reboot(
1024 __unused kdp_pkt_t *pkt,
1025 __unused int *len,
1026 __unused unsigned short *reply_port
1027 )
1028 {
1029 dprintf(("kdp_reboot\n"));
1030
1031 kdp_machine_reboot();
1032
1033 return (TRUE); // no, not really, we won't return
1034 }
1035
1036 #define MAX_FRAMES 1000
1037
1038 static int pid_from_task(task_t task)
1039 {
1040 int pid = -1;
1041
1042 if (task->bsd_info)
1043 pid = proc_pid(task->bsd_info);
1044
1045 return pid;
1046 }
1047
1048 static uint64_t
1049 proc_uniqueid_from_task(task_t task)
1050 {
1051 uint64_t uniqueid = ~(0ULL);
1052
1053 if (task->bsd_info)
1054 uniqueid = proc_uniqueid(task->bsd_info);
1055
1056 return uniqueid;
1057 }
1058
1059 static uint64_t
1060 proc_was_throttled_from_task(task_t task)
1061 {
1062 uint64_t was_throttled = 0;
1063
1064 if (task->bsd_info)
1065 was_throttled = proc_was_throttled(task->bsd_info);
1066
1067 return was_throttled;
1068 }
1069
1070 static uint64_t
1071 proc_did_throttle_from_task(task_t task)
1072 {
1073 uint64_t did_throttle = 0;
1074
1075 if (task->bsd_info)
1076 did_throttle = proc_did_throttle(task->bsd_info);
1077
1078 return did_throttle;
1079 }
1080
1081 boolean_t
1082 kdp_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size) {
1083 size_t rem = size;
1084 char *kvaddr = dest;
1085
1086 while (rem) {
1087 ppnum_t upn = pmap_find_phys(p, uaddr);
1088 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1089 uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
1090 uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
1091 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
1092 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
1093 cur_size = MIN(cur_size, rem);
1094
1095 if (upn && pmap_valid_page(upn) && phys_dest) {
1096 bcopy_phys(phys_src, phys_dest, cur_size);
1097 }
1098 else
1099 break;
1100 uaddr += cur_size;
1101 kvaddr += cur_size;
1102 rem -= cur_size;
1103 }
1104 return (rem == 0);
1105 }
1106
1107
1108 static void
1109 kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap)
1110 {
1111 unsigned int pages_reclaimed;
1112 unsigned int pages_wanted;
1113 kern_return_t kErr;
1114
1115 processor_t processor;
1116 vm_statistics64_t stat;
1117 vm_statistics64_data_t host_vm_stat;
1118
1119 processor = processor_list;
1120 stat = &PROCESSOR_DATA(processor, vm_stat);
1121 host_vm_stat = *stat;
1122
1123 if (processor_count > 1) {
1124 simple_lock(&processor_list_lock);
1125
1126 while ((processor = processor->processor_list) != NULL) {
1127 stat = &PROCESSOR_DATA(processor, vm_stat);
1128 host_vm_stat.compressions += stat->compressions;
1129 host_vm_stat.decompressions += stat->decompressions;
1130 }
1131
1132 simple_unlock(&processor_list_lock);
1133 }
1134
1135 memio_snap->snapshot_magic = STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC;
1136 memio_snap->free_pages = vm_page_free_count;
1137 memio_snap->active_pages = vm_page_active_count;
1138 memio_snap->inactive_pages = vm_page_inactive_count;
1139 memio_snap->purgeable_pages = vm_page_purgeable_count;
1140 memio_snap->wired_pages = vm_page_wire_count;
1141 memio_snap->speculative_pages = vm_page_speculative_count;
1142 memio_snap->throttled_pages = vm_page_throttled_count;
1143 memio_snap->busy_buffer_count = count_busy_buffers();
1144 memio_snap->filebacked_pages = vm_page_external_count;
1145 memio_snap->compressions = (uint32_t)host_vm_stat.compressions;
1146 memio_snap->decompressions = (uint32_t)host_vm_stat.decompressions;
1147 memio_snap->compressor_size = VM_PAGE_COMPRESSOR_COUNT;
1148 kErr = mach_vm_pressure_monitor(FALSE, VM_PRESSURE_TIME_WINDOW, &pages_reclaimed, &pages_wanted);
1149 if ( ! kErr ) {
1150 memio_snap->pages_wanted = (uint32_t)pages_wanted;
1151 memio_snap->pages_reclaimed = (uint32_t)pages_reclaimed;
1152 memio_snap->pages_wanted_reclaimed_valid = 1;
1153 } else {
1154 memio_snap->pages_wanted = 0;
1155 memio_snap->pages_reclaimed = 0;
1156 memio_snap->pages_wanted_reclaimed_valid = 0;
1157 }
1158 }
1159
1160
1161
1162 /*
1163 * Method for grabbing timer values safely, in the sense that no infinite loop will occur
1164 * Certain flavors of the timer_grab function, which would seem to be the thing to use,
1165 * can loop infinitely if called while the timer is in the process of being updated.
1166 * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of
1167 * the timer using this method. This seems insoluble, since stackshot runs in a context
1168 * where the timer might be half-updated, and has no way of yielding control just long
1169 * enough to finish the update.
1170 */
1171
1172 static uint64_t safe_grab_timer_value(struct timer *t)
1173 {
1174 #if defined(__LP64__)
1175 return t->all_bits;
1176 #else
1177 uint64_t time = t->high_bits; /* endian independent grab */
1178 time = (time << 32) | t->low_bits;
1179 return time;
1180 #endif
1181 }
1182
1183 int
1184 kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t dispatch_offset, uint32_t *pbytesTraced)
1185 {
1186 char *tracepos = (char *) tracebuf;
1187 char *tracebound = tracepos + tracebuf_size;
1188 uint32_t tracebytes = 0;
1189 int error = 0;
1190
1191 task_t task = TASK_NULL;
1192 thread_t thread = THREAD_NULL;
1193 thread_snapshot_t tsnap = NULL;
1194 unsigned framesize = 2 * sizeof(vm_offset_t);
1195
1196 queue_head_t *task_list = &tasks;
1197 boolean_t is_active_list = TRUE;
1198
1199 boolean_t dispatch_p = ((trace_flags & STACKSHOT_GET_DQ) != 0);
1200 boolean_t save_loadinfo_p = ((trace_flags & STACKSHOT_SAVE_LOADINFO) != 0);
1201 boolean_t save_kextloadinfo_p = ((trace_flags & STACKSHOT_SAVE_KEXT_LOADINFO) != 0);
1202 boolean_t save_userframes_p = ((trace_flags & STACKSHOT_SAVE_KERNEL_FRAMES_ONLY) == 0);
1203
1204 if(trace_flags & STACKSHOT_GET_GLOBAL_MEM_STATS) {
1205 if(tracepos + sizeof(struct mem_and_io_snapshot) > tracebound) {
1206 error = -1;
1207 goto error_exit;
1208 }
1209 kdp_mem_and_io_snapshot((struct mem_and_io_snapshot *)tracepos);
1210 tracepos += sizeof(struct mem_and_io_snapshot);
1211 }
1212
1213 walk_list:
1214 queue_iterate(task_list, task, task_t, tasks) {
1215 if ((task == NULL) || !ml_validate_nofault((vm_offset_t) task, sizeof(struct task)))
1216 goto error_exit;
1217
1218 int task_pid = pid_from_task(task);
1219 uint64_t task_uniqueid = proc_uniqueid_from_task(task);
1220 boolean_t task64 = task_has_64BitAddr(task);
1221
1222 if (!task->active) {
1223 /*
1224 * Not interested in terminated tasks without threads, and
1225 * at the moment, stackshot can't handle a task without a name.
1226 */
1227 if (queue_empty(&task->threads) || task_pid == -1) {
1228 continue;
1229 }
1230 }
1231
1232 /* Trace everything, unless a process was specified */
1233 if ((pid == -1) || (pid == task_pid)) {
1234 task_snapshot_t task_snap;
1235 uint32_t uuid_info_count = 0;
1236 mach_vm_address_t uuid_info_addr = 0;
1237 boolean_t have_map = (task->map != NULL) &&
1238 (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
1239 boolean_t have_pmap = have_map && (task->map->pmap != NULL) &&
1240 (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
1241 uint64_t shared_cache_base_address = 0;
1242
1243 if (have_pmap && task->active && save_loadinfo_p && task_pid > 0) {
1244 // Read the dyld_all_image_infos struct from the task memory to get UUID array count and location
1245 if (task64) {
1246 struct user64_dyld_all_image_infos task_image_infos;
1247 if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
1248 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1249 uuid_info_addr = task_image_infos.uuidArray;
1250 }
1251 } else {
1252 struct user32_dyld_all_image_infos task_image_infos;
1253 if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user32_dyld_all_image_infos))) {
1254 uuid_info_count = task_image_infos.uuidArrayCount;
1255 uuid_info_addr = task_image_infos.uuidArray;
1256 }
1257 }
1258
1259 // If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1260 // this data structure), we zero the uuid_info_count so that we won't even try to save load info
1261 // for this task.
1262 if (!uuid_info_addr) {
1263 uuid_info_count = 0;
1264 }
1265 }
1266
1267 if (have_pmap && save_kextloadinfo_p && task_pid == 0) {
1268 if (ml_validate_nofault((vm_offset_t)(gLoadedKextSummaries), sizeof(OSKextLoadedKextSummaryHeader))) {
1269 uuid_info_count = gLoadedKextSummaries->numSummaries + 1; /* include main kernel UUID */
1270 }
1271 }
1272
1273 if (tracepos + sizeof(struct task_snapshot) > tracebound) {
1274 error = -1;
1275 goto error_exit;
1276 }
1277
1278 task_snap = (task_snapshot_t) tracepos;
1279 task_snap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
1280 task_snap->pid = task_pid;
1281 task_snap->uniqueid = task_uniqueid;
1282 task_snap->nloadinfos = uuid_info_count;
1283 /* Add the BSD process identifiers */
1284 if (task_pid != -1)
1285 proc_name_kdp(task, task_snap->p_comm, sizeof(task_snap->p_comm));
1286 else
1287 task_snap->p_comm[0] = '\0';
1288 task_snap->ss_flags = 0;
1289 if (task64)
1290 task_snap->ss_flags |= kUser64_p;
1291 if (task64 && task_pid == 0)
1292 task_snap->ss_flags |= kKernel64_p;
1293 if (!task->active)
1294 task_snap->ss_flags |= kTerminatedSnapshot;
1295 if(task->pidsuspended) task_snap->ss_flags |= kPidSuspended;
1296 if(task->frozen) task_snap->ss_flags |= kFrozen;
1297
1298 if (task->effective_policy.t_sup_active == 1)
1299 task_snap->ss_flags |= kTaskIsSuppressed;
1300
1301 task_snap->latency_qos = (task->effective_policy.t_latency_qos == LATENCY_QOS_TIER_UNSPECIFIED) ?
1302 LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | task->effective_policy.t_latency_qos);
1303
1304 task_snap->suspend_count = task->suspend_count;
1305 task_snap->task_size = have_pmap ? pmap_resident_count(task->map->pmap) : 0;
1306 task_snap->faults = task->faults;
1307 task_snap->pageins = task->pageins;
1308 task_snap->cow_faults = task->cow_faults;
1309
1310 task_snap->user_time_in_terminated_threads = task->total_user_time;
1311 task_snap->system_time_in_terminated_threads = task->total_system_time;
1312 /*
1313 * The throttling counters are maintained as 64-bit counters in the proc
1314 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
1315 * struct to save space and since we do not expect them to overflow 32-bits. If we
1316 * find these values overflowing in the future, the fix would be to simply
1317 * upgrade these counters to 64-bit in the task_snapshot struct
1318 */
1319 task_snap->was_throttled = (uint32_t) proc_was_throttled_from_task(task);
1320 task_snap->did_throttle = (uint32_t) proc_did_throttle_from_task(task);
1321
1322 if (task->shared_region && ml_validate_nofault((vm_offset_t)task->shared_region,
1323 sizeof(struct vm_shared_region))) {
1324 struct vm_shared_region *sr = task->shared_region;
1325
1326 shared_cache_base_address = sr->sr_base_address + sr->sr_first_mapping;
1327 }
1328 if (!shared_cache_base_address
1329 || !kdp_copyin(task->map->pmap, shared_cache_base_address, task_snap->shared_cache_identifier, sizeof(task_snap->shared_cache_identifier))) {
1330 memset(task_snap->shared_cache_identifier, 0x0, sizeof(task_snap->shared_cache_identifier));
1331 }
1332 if (task->shared_region) {
1333 /*
1334 * No refcounting here, but we are in debugger
1335 * context, so that should be safe.
1336 */
1337 task_snap->shared_cache_slide = task->shared_region->sr_slide_info.slide;
1338 } else {
1339 task_snap->shared_cache_slide = 0;
1340 }
1341
1342 tracepos += sizeof(struct task_snapshot);
1343
1344 if (task_pid > 0 && uuid_info_count > 0) {
1345 uint32_t uuid_info_size = (uint32_t)(task64 ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1346 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
1347
1348 if (tracepos + uuid_info_array_size > tracebound) {
1349 error = -1;
1350 goto error_exit;
1351 }
1352
1353 // Copy in the UUID info array
1354 // It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1355 if (have_pmap && !kdp_copyin(task->map->pmap, uuid_info_addr, tracepos, uuid_info_array_size))
1356 task_snap->nloadinfos = 0;
1357 else
1358 tracepos += uuid_info_array_size;
1359 } else if (task_pid == 0 && uuid_info_count > 0) {
1360 uint32_t uuid_info_size = (uint32_t)sizeof(kernel_uuid_info);
1361 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
1362 kernel_uuid_info *output_uuids;
1363
1364 if (tracepos + uuid_info_array_size > tracebound) {
1365 error = -1;
1366 goto error_exit;
1367 }
1368
1369 output_uuids = (kernel_uuid_info *)tracepos;
1370
1371 do {
1372
1373 if (!kernel_uuid || !ml_validate_nofault((vm_offset_t)kernel_uuid, sizeof(uuid_t))) {
1374 /* Kernel UUID not found or inaccessible */
1375 task_snap->nloadinfos = 0;
1376 break;
1377 }
1378
1379 output_uuids[0].imageLoadAddress = (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext);
1380 memcpy(&output_uuids[0].imageUUID, kernel_uuid, sizeof(uuid_t));
1381
1382 if (ml_validate_nofault((vm_offset_t)(&gLoadedKextSummaries->summaries[0]),
1383 gLoadedKextSummaries->entry_size * gLoadedKextSummaries->numSummaries)) {
1384 uint32_t kexti;
1385
1386 for (kexti=0 ; kexti < gLoadedKextSummaries->numSummaries; kexti++) {
1387 output_uuids[1+kexti].imageLoadAddress = (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries->summaries[kexti].address);
1388 memcpy(&output_uuids[1+kexti].imageUUID, &gLoadedKextSummaries->summaries[kexti].uuid, sizeof(uuid_t));
1389 }
1390
1391 tracepos += uuid_info_array_size;
1392 } else {
1393 /* kext summary invalid, but kernel UUID was copied */
1394 task_snap->nloadinfos = 1;
1395 tracepos += uuid_info_size;
1396 break;
1397 }
1398 } while(0);
1399 }
1400
1401 queue_iterate(&task->threads, thread, thread_t, task_threads){
1402 uint64_t tval;
1403
1404 if ((thread == NULL) || !ml_validate_nofault((vm_offset_t) thread, sizeof(struct thread)))
1405 goto error_exit;
1406
1407 if (((tracepos + 4 * sizeof(struct thread_snapshot)) > tracebound)) {
1408 error = -1;
1409 goto error_exit;
1410 }
1411 if (!save_userframes_p && thread->kernel_stack == 0)
1412 continue;
1413
1414 /* Populate the thread snapshot header */
1415 tsnap = (thread_snapshot_t) tracepos;
1416 tsnap->thread_id = thread_tid(thread);
1417 tsnap->state = thread->state;
1418 tsnap->priority = thread->priority;
1419 tsnap->sched_pri = thread->sched_pri;
1420 tsnap->sched_flags = thread->sched_flags;
1421 tsnap->wait_event = VM_KERNEL_UNSLIDE(thread->wait_event);
1422 tsnap->continuation = VM_KERNEL_UNSLIDE(thread->continuation);
1423 tval = safe_grab_timer_value(&thread->user_timer);
1424 tsnap->user_time = tval;
1425 tval = safe_grab_timer_value(&thread->system_timer);
1426 if (thread->precise_user_kernel_time) {
1427 tsnap->system_time = tval;
1428 } else {
1429 tsnap->user_time += tval;
1430 tsnap->system_time = 0;
1431 }
1432 tsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1433 tracepos += sizeof(struct thread_snapshot);
1434 tsnap->ss_flags = 0;
1435
1436 if (thread->effective_policy.darwinbg) {
1437 tsnap->ss_flags |= kThreadDarwinBG;
1438 }
1439
1440 if (dispatch_p && (task != kernel_task) && (task->active) && have_pmap) {
1441 uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
1442 if (dqkeyaddr != 0) {
1443 uint64_t dqaddr = 0;
1444 if (kdp_copyin(task->map->pmap, dqkeyaddr, &dqaddr, (task64 ? 8 : 4)) && (dqaddr != 0)) {
1445 uint64_t dqserialnumaddr = dqaddr + dispatch_offset;
1446 uint64_t dqserialnum = 0;
1447 if (kdp_copyin(task->map->pmap, dqserialnumaddr, &dqserialnum, (task64 ? 8 : 4))) {
1448 tsnap->ss_flags |= kHasDispatchSerial;
1449 *(uint64_t *)tracepos = dqserialnum;
1450 tracepos += 8;
1451 }
1452 }
1453 }
1454 }
1455 /* Call through to the machine specific trace routines
1456 * Frames are added past the snapshot header.
1457 */
1458 tracebytes = 0;
1459 if (thread->kernel_stack != 0) {
1460 #if defined(__LP64__)
1461 tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, FALSE);
1462 tsnap->ss_flags |= kKernel64_p;
1463 framesize = 16;
1464 #else
1465 tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, FALSE);
1466 framesize = 8;
1467 #endif
1468 }
1469 tsnap->nkern_frames = tracebytes/framesize;
1470 tracepos += tracebytes;
1471 tracebytes = 0;
1472 /* Trace user stack, if any */
1473 if (save_userframes_p && task->active && thread->task->map != kernel_map) {
1474 /* 64-bit task? */
1475 if (task_has_64BitAddr(thread->task)) {
1476 tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, TRUE);
1477 tsnap->ss_flags |= kUser64_p;
1478 framesize = 16;
1479 }
1480 else {
1481 tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, TRUE);
1482 framesize = 8;
1483 }
1484 }
1485 tsnap->nuser_frames = tracebytes/framesize;
1486 tracepos += tracebytes;
1487 tracebytes = 0;
1488 }
1489 }
1490 }
1491
1492 if (is_active_list) {
1493 is_active_list = FALSE;
1494 task_list = &terminated_tasks;
1495 goto walk_list;
1496 }
1497
1498 error_exit:
1499 /* Release stack snapshot wait indicator */
1500 kdp_snapshot_postflight();
1501
1502 *pbytesTraced = (uint32_t)(tracepos - (char *) tracebuf);
1503
1504 return error;
1505 }
1506
1507 static boolean_t
1508 kdp_readioport(
1509 kdp_pkt_t *pkt,
1510 int *len,
1511 unsigned short *reply_port
1512 )
1513 {
1514 kdp_readioport_req_t *rq = &pkt->readioport_req;
1515 kdp_readioport_reply_t *rp = &pkt->readioport_reply;
1516 size_t plen = *len;
1517
1518 if (plen < sizeof (*rq))
1519 return (FALSE);
1520
1521 rp->hdr.is_reply = 1;
1522 rp->hdr.len = sizeof (*rp);
1523
1524 if (rq->nbytes > MAX_KDP_DATA_SIZE)
1525 rp->error = KDPERR_BAD_NBYTES;
1526 else {
1527 #if KDP_TEST_HARNESS
1528 uint16_t addr = rq->address;
1529 #endif
1530 uint16_t size = rq->nbytes;
1531 dprintf(("kdp_readioport addr %x size %d\n", addr, size));
1532
1533 rp->error = kdp_machine_ioport_read(rq, rp->data, rq->lcpu);
1534 if (rp->error == KDPERR_NO_ERROR)
1535 rp->hdr.len += size;
1536 }
1537
1538 *reply_port = kdp.reply_port;
1539 *len = rp->hdr.len;
1540
1541 return (TRUE);
1542 }
1543
1544 static boolean_t
1545 kdp_writeioport(
1546 kdp_pkt_t *pkt,
1547 int *len,
1548 unsigned short *reply_port
1549 )
1550 {
1551 kdp_writeioport_req_t *rq = &pkt->writeioport_req;
1552 kdp_writeioport_reply_t *rp = &pkt->writeioport_reply;
1553 size_t plen = *len;
1554
1555 if (plen < sizeof (*rq))
1556 return (FALSE);
1557
1558 if (rq->nbytes > MAX_KDP_DATA_SIZE)
1559 rp->error = KDPERR_BAD_NBYTES;
1560 else {
1561 dprintf(("kdp_writeioport addr %x size %d\n", rq->address,
1562 rq->nbytes));
1563
1564 rp->error = kdp_machine_ioport_write(rq, rq->data, rq->lcpu);
1565 }
1566
1567 rp->hdr.is_reply = 1;
1568 rp->hdr.len = sizeof (*rp);
1569
1570 *reply_port = kdp.reply_port;
1571 *len = rp->hdr.len;
1572
1573 return (TRUE);
1574 }
1575
1576 static boolean_t
1577 kdp_readmsr64(
1578 kdp_pkt_t *pkt,
1579 int *len,
1580 unsigned short *reply_port
1581 )
1582 {
1583 kdp_readmsr64_req_t *rq = &pkt->readmsr64_req;
1584 kdp_readmsr64_reply_t *rp = &pkt->readmsr64_reply;
1585 size_t plen = *len;
1586
1587 if (plen < sizeof (*rq))
1588 return (FALSE);
1589
1590 rp->hdr.is_reply = 1;
1591 rp->hdr.len = sizeof (*rp);
1592
1593 dprintf(("kdp_readmsr64 lcpu %x addr %x\n", rq->lcpu, rq->address));
1594 rp->error = kdp_machine_msr64_read(rq, rp->data, rq->lcpu);
1595 if (rp->error == KDPERR_NO_ERROR)
1596 rp->hdr.len += sizeof(uint64_t);
1597
1598 *reply_port = kdp.reply_port;
1599 *len = rp->hdr.len;
1600
1601 return (TRUE);
1602 }
1603
1604 static boolean_t
1605 kdp_writemsr64(
1606 kdp_pkt_t *pkt,
1607 int *len,
1608 unsigned short *reply_port
1609 )
1610 {
1611 kdp_writemsr64_req_t *rq = &pkt->writemsr64_req;
1612 kdp_writemsr64_reply_t *rp = &pkt->writemsr64_reply;
1613 size_t plen = *len;
1614
1615 if (plen < sizeof (*rq))
1616 return (FALSE);
1617
1618 dprintf(("kdp_writemsr64 lcpu %x addr %x\n", rq->lcpu, rq->address));
1619 rp->error = kdp_machine_msr64_write(rq, rq->data, rq->lcpu);
1620
1621 rp->hdr.is_reply = 1;
1622 rp->hdr.len = sizeof (*rp);
1623
1624 *reply_port = kdp.reply_port;
1625 *len = rp->hdr.len;
1626
1627 return (TRUE);
1628 }
1629
1630 static boolean_t
1631 kdp_dumpinfo(
1632 kdp_pkt_t *pkt,
1633 int *len,
1634 unsigned short *reply_port
1635 )
1636 {
1637 kdp_dumpinfo_req_t *rq = &pkt->dumpinfo_req;
1638 kdp_dumpinfo_reply_t *rp = &pkt->dumpinfo_reply;
1639 size_t plen = *len;
1640
1641 if (plen < sizeof (*rq))
1642 return (FALSE);
1643
1644 dprintf(("kdp_dumpinfo file=%s destip=%s routerip=%s\n", rq->name, rq->destip, rq->routerip));
1645 rp->hdr.is_reply = 1;
1646 rp->hdr.len = sizeof (*rp);
1647
1648 if ((rq->type & KDP_DUMPINFO_MASK) != KDP_DUMPINFO_GETINFO) {
1649 kdp_set_dump_info(rq->type, rq->name, rq->destip, rq->routerip,
1650 rq->port);
1651 }
1652
1653 /* gather some stats for reply */
1654 kdp_get_dump_info(&rp->type, rp->name, rp->destip, rp->routerip,
1655 &rp->port);
1656
1657 *reply_port = kdp.reply_port;
1658 *len = rp->hdr.len;
1659
1660 return (TRUE);
1661 }