]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/i386/kdp_x86_common.c
xnu-2050.24.15.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / i386 / kdp_x86_common.c
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/vm_attributes.h>
31 #include <mach/vm_param.h>
32 #include <libsa/types.h>
33
34 #include <vm/vm_map.h>
35 #include <i386/pmap.h>
36 #include <i386/pmap_internal.h> /* pmap_pde */
37 #include <i386/mp.h>
38 #include <i386/misc_protos.h>
39 #include <i386/pio.h>
40 #include <i386/proc_reg.h>
41
42 #include <i386/pmap_internal.h>
43
44 #include <kdp/kdp_internal.h>
45 #include <kdp/kdp_core.h>
46 #include <kdp/ml/i386/kdp_x86_common.h>
47 #include <mach/vm_map.h>
48
49 #include <vm/vm_protos.h>
50 #include <vm/vm_kern.h>
51
52 #include <machine/pal_routines.h>
53 #include <libkern/kernel_mach_header.h>
54
55 // #define KDP_VM_READ_DEBUG 1
56 // #define KDP_VM_WRITE_DEBUG 1
57
58 boolean_t kdp_read_io;
59 boolean_t kdp_trans_off;
60
61 static addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
62
63 int kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
64 vm_map_offset_t end,
65 void *context);
66 int kern_dump_pmap_traverse_send_callback(vm_map_offset_t start,
67 vm_map_offset_t end,
68 void *context);
69
70 pmap_t kdp_pmap = 0;
71
72 static addr64_t
73 kdp_vtophys(
74 pmap_t pmap,
75 addr64_t va)
76 {
77 addr64_t pa;
78 ppnum_t pp;
79
80 pp = pmap_find_phys(pmap, va);
81 if(!pp) return 0;
82
83 pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL);
84
85 return(pa);
86 }
87
88 mach_vm_size_t
89 kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len)
90 {
91 addr64_t cur_virt_src = PAL_KDP_ADDR((addr64_t)src);
92 addr64_t cur_virt_dst = PAL_KDP_ADDR((addr64_t)(intptr_t)dst);
93 addr64_t cur_phys_dst, cur_phys_src;
94 mach_vm_size_t resid = len;
95 mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
96 pmap_t src_pmap = kernel_pmap;
97
98 #ifdef KDP_VM_READ_DEBUG
99 printf("kdp_vm_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
100 #endif
101
102 if (kdp_trans_off) {
103 kdp_readphysmem64_req_t rq;
104 mach_vm_size_t ret;
105
106 rq.address = src;
107 rq.nbytes = (uint32_t)len;
108 ret = kdp_machine_phys_read(&rq, dst, KDP_CURRENT_LCPU);
109 return ret;
110 }
111
112 /* If a different pmap has been specified with kdp_pmap, use it to translate the
113 * source (cur_virt_src); otherwise, the source is translated using the
114 * kernel_pmap.
115 */
116 if (kdp_pmap)
117 src_pmap = kdp_pmap;
118
119 while (resid != 0) {
120 if (!(cur_phys_src = kdp_vtophys(src_pmap,
121 cur_virt_src)))
122 goto exit;
123
124 /* Always translate the destination buffer using the kernel_pmap */
125 if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)))
126 goto exit;
127
128 /* Validate physical page numbers unless kdp_read_io is set */
129 if (kdp_read_io == FALSE)
130 if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src)))
131 goto exit;
132
133 /* Get length left on page */
134 cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
135 cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
136 if (cnt_src > cnt_dst)
137 cnt = cnt_dst;
138 else
139 cnt = cnt_src;
140 if (cnt > resid)
141 cnt = resid;
142
143 /* Do a physical copy */
144 ml_copy_phys(cur_phys_src, cur_phys_dst, (vm_size_t)cnt);
145
146 cur_virt_src += cnt;
147 cur_virt_dst += cnt;
148 resid -= cnt;
149 }
150 exit:
151 return (len - resid);
152 }
153
154 mach_vm_size_t
155 kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst,
156 uint16_t lcpu)
157 {
158 mach_vm_address_t src = rq->address;
159 mach_vm_size_t len = rq->nbytes;
160
161 addr64_t cur_virt_dst;
162 addr64_t cur_phys_dst, cur_phys_src;
163 mach_vm_size_t resid = len;
164 mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
165
166 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
167 return (mach_vm_size_t)
168 kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_read, rq, dst);
169 }
170
171 #ifdef KDP_VM_READ_DEBUG
172 printf("kdp_phys_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
173 #endif
174
175 cur_virt_dst = (addr64_t)(intptr_t)dst;
176 cur_phys_src = (addr64_t)src;
177
178 while (resid != 0) {
179
180 if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)))
181 goto exit;
182
183 /* Get length left on page */
184 cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
185 cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
186 if (cnt_src > cnt_dst)
187 cnt = cnt_dst;
188 else
189 cnt = cnt_src;
190 if (cnt > resid)
191 cnt = resid;
192
193 /* Do a physical copy; use ml_copy_phys() in the event this is
194 * a short read with potential side effects.
195 */
196 ml_copy_phys(cur_phys_src, cur_phys_dst, (vm_size_t)cnt);
197 cur_phys_src += cnt;
198 cur_virt_dst += cnt;
199 resid -= cnt;
200 }
201 exit:
202 return (len - resid);
203 }
204
205 /*
206 *
207 */
208 mach_vm_size_t
209 kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len)
210 {
211 addr64_t cur_virt_src, cur_virt_dst;
212 addr64_t cur_phys_src, cur_phys_dst;
213 unsigned resid, cnt, cnt_src, cnt_dst;
214
215 #ifdef KDP_VM_WRITE_DEBUG
216 printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
217 #endif
218
219 cur_virt_src = PAL_KDP_ADDR((addr64_t)(intptr_t)src);
220 cur_virt_dst = PAL_KDP_ADDR((addr64_t)dst);
221
222 resid = (unsigned)len;
223
224 while (resid != 0) {
225 if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
226 goto exit;
227
228 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
229 goto exit;
230
231 /* Copy as many bytes as possible without crossing a page */
232 cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
233 cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
234
235 if (cnt_src > cnt_dst)
236 cnt = cnt_dst;
237 else
238 cnt = cnt_src;
239 if (cnt > resid)
240 cnt = resid;
241
242 ml_copy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
243
244 cur_virt_src +=cnt;
245 cur_virt_dst +=cnt;
246 resid -= cnt;
247 }
248 exit:
249 return (len - resid);
250 }
251
252 /*
253 *
254 */
255 mach_vm_size_t
256 kdp_machine_phys_write(kdp_writephysmem64_req_t *rq, caddr_t src,
257 uint16_t lcpu)
258 {
259 mach_vm_address_t dst = rq->address;
260 mach_vm_size_t len = rq->nbytes;
261 addr64_t cur_virt_src;
262 addr64_t cur_phys_src, cur_phys_dst;
263 unsigned resid, cnt, cnt_src, cnt_dst;
264
265 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
266 return (mach_vm_size_t)
267 kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_write, rq, src);
268 }
269
270 #ifdef KDP_VM_WRITE_DEBUG
271 printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
272 #endif
273
274 cur_virt_src = (addr64_t)(intptr_t)src;
275 cur_phys_dst = (addr64_t)dst;
276
277 resid = (unsigned)len;
278
279 while (resid != 0) {
280 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
281 goto exit;
282
283 /* Copy as many bytes as possible without crossing a page */
284 cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
285 cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
286
287 if (cnt_src > cnt_dst)
288 cnt = cnt_dst;
289 else
290 cnt = cnt_src;
291 if (cnt > resid)
292 cnt = resid;
293
294 ml_copy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
295
296 cur_virt_src +=cnt;
297 cur_phys_dst +=cnt;
298 resid -= cnt;
299 }
300
301 exit:
302 return (len - resid);
303 }
304
305 int
306 kdp_machine_ioport_read(kdp_readioport_req_t *rq, caddr_t data, uint16_t lcpu)
307 {
308 uint16_t addr = rq->address;
309 uint16_t size = rq->nbytes;
310
311 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
312 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_read, rq, data);
313 }
314
315 switch (size)
316 {
317 case 1:
318 *((uint8_t *) data) = inb(addr);
319 break;
320 case 2:
321 *((uint16_t *) data) = inw(addr);
322 break;
323 case 4:
324 *((uint32_t *) data) = inl(addr);
325 break;
326 default:
327 return KDPERR_BADFLAVOR;
328 break;
329 }
330
331 return KDPERR_NO_ERROR;
332 }
333
334 int
335 kdp_machine_ioport_write(kdp_writeioport_req_t *rq, caddr_t data, uint16_t lcpu)
336 {
337 uint16_t addr = rq->address;
338 uint16_t size = rq->nbytes;
339
340 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
341 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_write, rq, data);
342 }
343
344 switch (size)
345 {
346 case 1:
347 outb(addr, *((uint8_t *) data));
348 break;
349 case 2:
350 outw(addr, *((uint16_t *) data));
351 break;
352 case 4:
353 outl(addr, *((uint32_t *) data));
354 break;
355 default:
356 return KDPERR_BADFLAVOR;
357 break;
358 }
359
360 return KDPERR_NO_ERROR;
361 }
362
363 int
364 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
365 {
366 uint64_t *value = (uint64_t *) data;
367 uint32_t msr = rq->address;
368
369 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
370 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_read, rq, data);
371 }
372
373 *value = rdmsr64(msr);
374 return KDPERR_NO_ERROR;
375 }
376
377 int
378 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
379 {
380 uint64_t *value = (uint64_t *) data;
381 uint32_t msr = rq->address;
382
383 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
384 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_write, rq, data);
385 }
386
387 wrmsr64(msr, *value);
388 return KDPERR_NO_ERROR;
389 }
390
391 int
392 pmap_traverse_present_mappings(pmap_t pmap,
393 vm_map_offset_t start,
394 vm_map_offset_t end,
395 pmap_traverse_callback callback,
396 void *context)
397 {
398 int ret = KERN_SUCCESS;
399 vm_map_offset_t vcurstart, vcur;
400 boolean_t lastvavalid = FALSE;
401
402 /* Assumes pmap is locked, or being called from the kernel debugger */
403
404 if (start > end) {
405 return (KERN_INVALID_ARGUMENT);
406 }
407
408 if (start & PAGE_MASK_64) {
409 return (KERN_INVALID_ARGUMENT);
410 }
411
412 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) {
413 ppnum_t ppn = pmap_find_phys(pmap, vcur);
414
415 if (ppn != 0 && !pmap_valid_page(ppn)) {
416 /* not something we want */
417 ppn = 0;
418 }
419
420 if (ppn != 0) {
421 if (!lastvavalid) {
422 /* Start of a new virtual region */
423 vcurstart = vcur;
424 lastvavalid = TRUE;
425 }
426 } else {
427 if (lastvavalid) {
428 /* end of a virtual region */
429
430 ret = callback(vcurstart, vcur, context);
431
432 lastvavalid = FALSE;
433 }
434
435 /* Try to skip by 2MB if possible */
436 if (((vcur & PDMASK) == 0) && cpu_64bit) {
437 pd_entry_t *pde;
438
439 pde = pmap_pde(pmap, vcur);
440 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
441 /* Make sure we wouldn't overflow */
442 if (vcur < (end - NBPD)) {
443 vcur += NBPD;
444 continue;
445 }
446 }
447 }
448 }
449
450 vcur += PAGE_SIZE_64;
451 }
452
453 if ((ret == KERN_SUCCESS)
454 && lastvavalid) {
455 /* send previous run */
456
457 ret = callback(vcurstart, vcur, context);
458 }
459 return (ret);
460 }
461
462 struct kern_dump_preflight_context {
463 uint32_t region_count;
464 uint64_t dumpable_bytes;
465 };
466
467 struct kern_dump_send_context {
468 uint64_t hoffset;
469 uint64_t foffset;
470 uint64_t header_size;
471 };
472
473 int
474 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
475 vm_map_offset_t end,
476 void *context)
477 {
478 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
479 int ret = KERN_SUCCESS;
480
481 kdc->region_count++;
482 kdc->dumpable_bytes += (end - start);
483
484 return (ret);
485 }
486
487 int
488 kern_dump_pmap_traverse_send_callback(vm_map_offset_t start,
489 vm_map_offset_t end,
490 void *context)
491 {
492 struct kern_dump_send_context *kdc = (struct kern_dump_send_context *)context;
493 int ret = KERN_SUCCESS;
494 kernel_segment_command_t sc;
495 vm_size_t size = (vm_size_t)(end - start);
496
497 if (kdc->hoffset + sizeof(sc) > kdc->header_size) {
498 return (KERN_NO_SPACE);
499 }
500
501 /*
502 * Fill in segment command structure.
503 */
504
505 sc.cmd = LC_SEGMENT_KERNEL;
506 sc.cmdsize = sizeof(kernel_segment_command_t);
507 sc.segname[0] = 0;
508 sc.vmaddr = (vm_address_t)start;
509 sc.vmsize = size;
510 sc.fileoff = (vm_address_t)kdc->foffset;
511 sc.filesize = size;
512 sc.maxprot = VM_PROT_READ;
513 sc.initprot = VM_PROT_READ;
514 sc.nsects = 0;
515 sc.flags = 0;
516
517 if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(kdc->hoffset) , &kdc->hoffset)) < 0) {
518 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
519 goto out;
520 }
521
522 if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(kernel_segment_command_t) , (caddr_t) &sc)) < 0) {
523 printf ("kdp_send_crashdump_data failed with error %d\n", ret);
524 goto out;
525 }
526
527 kdc->hoffset += sizeof(kernel_segment_command_t);
528
529 if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(kdc->foffset) , &kdc->foffset)) < 0) {
530 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
531 goto out;
532 }
533
534 if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, (unsigned int)size, (caddr_t)(uintptr_t)start)) < 0) {
535 printf ("kdp_send_crashdump_data failed with error %d\n", ret);
536 goto out;
537 }
538
539 kdc->foffset += size;
540
541 out:
542 return (ret);
543 }
544
545 int
546 kern_dump(void)
547 {
548 int ret;
549 struct kern_dump_preflight_context kdc_preflight;
550 struct kern_dump_send_context kdc_send;
551 uint32_t segment_count;
552 size_t command_size = 0, header_size = 0, tstate_size = 0;
553 uint64_t hoffset = 0, foffset = 0;
554 kernel_mach_header_t mh;
555
556
557 kdc_preflight.region_count = 0;
558 kdc_preflight.dumpable_bytes = 0;
559
560 ret = pmap_traverse_present_mappings(kernel_pmap,
561 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
562 VM_MAX_KERNEL_ADDRESS,
563 kern_dump_pmap_traverse_preflight_callback,
564 &kdc_preflight);
565 if (ret) {
566 printf("pmap traversal failed: %d\n", ret);
567 return (ret);
568 }
569
570 printf("Kernel dump region count: %u\n", kdc_preflight.region_count);
571 printf("Kernel dump byte count: %llu\n", kdc_preflight.dumpable_bytes);
572
573 segment_count = kdc_preflight.region_count;
574
575 tstate_size = sizeof(struct thread_command) + kern_collectth_state_size();
576
577 command_size = segment_count * sizeof(kernel_segment_command_t) +
578 tstate_size;
579
580 header_size = command_size + sizeof(kernel_mach_header_t);
581
582 /*
583 * Set up Mach-O header for currently executing kernel.
584 */
585 printf ("Generated Mach-O header size was %lu\n", header_size);
586
587 mh.magic = _mh_execute_header.magic;
588 mh.cputype = _mh_execute_header.cputype;;
589 mh.cpusubtype = _mh_execute_header.cpusubtype;
590 mh.filetype = MH_CORE;
591 mh.ncmds = segment_count + 1 /* thread */;
592 mh.sizeofcmds = (uint32_t)command_size;
593 mh.flags = 0;
594 #if defined(__LP64__)
595 mh.reserved = 0;
596 #endif
597
598 hoffset = 0; /* offset into header */
599 foffset = (uint32_t)round_page(header_size); /* offset into file */
600
601 /* Transmit the Mach-O MH_CORE header, and seek forward past the
602 * area reserved for the segment and thread commands
603 * to begin data transmission
604 */
605 if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
606 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
607 goto out;
608 }
609 if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(kernel_mach_header_t), (caddr_t) &mh) < 0)) {
610 printf ("kdp_send_crashdump_data failed with error %d\n", ret);
611 goto out;
612 }
613
614 hoffset += sizeof(kernel_mach_header_t);
615
616 if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
617 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
618 goto out;
619 }
620
621 printf ("Transmitting kernel state, please wait: ");
622
623 kdc_send.hoffset = hoffset;
624 kdc_send.foffset = foffset;
625 kdc_send.header_size = header_size;
626
627 ret = pmap_traverse_present_mappings(kernel_pmap,
628 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
629 VM_MAX_KERNEL_ADDRESS,
630 kern_dump_pmap_traverse_send_callback,
631 &kdc_send);
632 if (ret) {
633 kprintf("pmap traversal failed: %d\n", ret);
634 return (ret);
635 }
636
637 /* Reload mutated offsets */
638 hoffset = kdc_send.hoffset;
639 foffset = kdc_send.foffset;
640
641 /*
642 * Now send out the LC_THREAD load command, with the thread information
643 * for the current activation.
644 */
645 if (tstate_size > 0) {
646 char tstate[tstate_size];
647
648 kern_collectth_state (current_thread(), tstate, tstate_size);
649
650 if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset), &hoffset)) < 0) {
651 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
652 goto out;
653 }
654
655 if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, tstate_size, tstate)) < 0) {
656 printf ("kdp_send_crashdump_data failed with error %d\n", ret);
657 goto out;
658 }
659
660 hoffset += tstate_size;
661 }
662
663 /* last packet */
664 if ((ret = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
665 {
666 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
667 goto out;
668 }
669
670 out:
671 return (ret);
672 }
673
674
675 pt_entry_t *debugger_ptep;
676 vm_map_offset_t debugger_window_kva;
677
678 /* Establish a pagetable window that can be remapped on demand.
679 * This is utilized by the debugger to address regions outside
680 * the physical map.
681 */
682
683 void
684 kdp_machine_init(void) {
685 if (debug_boot_arg == 0)
686 return;
687
688 vm_map_entry_t e;
689 kern_return_t kr = vm_map_find_space(kernel_map,
690 &debugger_window_kva,
691 PAGE_SIZE, 0,
692 VM_MAKE_TAG(VM_MEMORY_IOKIT), &e);
693
694 if (kr != KERN_SUCCESS) {
695 panic("%s: vm_map_find_space failed with %d\n", __FUNCTION__, kr);
696 }
697
698 vm_map_unlock(kernel_map);
699
700 debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
701
702 if (debugger_ptep == NULL) {
703 pmap_expand(kernel_pmap, debugger_window_kva, PMAP_EXPAND_OPTIONS_NONE);
704 debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
705 }
706 }
707