]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/ppc/kdp_vm.c
701db7e7fdd0407d840ae51fab1508d3f94e7d93
[apple/xnu.git] / osfmk / kdp / ml / ppc / kdp_vm.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <mach/mach_types.h>
31 #include <mach/vm_attributes.h>
32 #include <mach/vm_param.h>
33
34 #include <vm/pmap.h>
35
36 #include <ppc/proc_reg.h>
37 #include <ppc/machparam.h>
38 #include <ppc/mem.h>
39 #include <ppc/pmap.h>
40 #include <ppc/mappings.h>
41 #include <ppc/cpu_data.h>
42
43 #include <mach/thread_status.h>
44 #include <mach-o/loader.h>
45 #include <mach/vm_region.h>
46 #include <mach/vm_statistics.h>
47
48 #include <vm/vm_kern.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_protos.h>
51 #include <kdp/kdp_core.h>
52 #include <kdp/kdp_udp.h>
53 #include <kdp/kdp_internal.h>
54
55 #include <ppc/misc_protos.h>
56 #include <mach/vm_map.h>
57
58
59 pmap_t kdp_pmap=0;
60 boolean_t kdp_trans_off=0;
61 boolean_t kdp_read_io =0;
62
63 unsigned kdp_vm_read( caddr_t, caddr_t, unsigned);
64 unsigned kdp_vm_write( caddr_t, caddr_t, unsigned);
65
66 extern vm_offset_t sectTEXTB, sectDATAB, sectLINKB, sectPRELINKB;
67 extern int sectSizeTEXT, sectSizeDATA, sectSizeLINK, sectSizePRELINK;
68
69 /* XXX prototypes which should be in a commmon header file */
70 addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
71 int kern_dump(void);
72 int kdp_dump_trap(int type, struct savearea *regs);
73 /*
74 * XXX the following prototype doesn't match the declaration because the
75 * XXX actual declaration is wrong.
76 */
77 extern int kdp_send_panic_packets(unsigned int request, char *corename,
78 unsigned int length, caddr_t txstart);
79
80
81
82
83 typedef struct {
84 int flavor; /* the number for this flavor */
85 int count; /* count of ints in this flavor */
86 } mythread_state_flavor_t;
87
88 /* These will need to be uncommented and completed
89 *if we support other architectures
90 */
91
92 /*
93 #if defined (__ppc__)
94 */
95 static mythread_state_flavor_t thread_flavor_array[] = {
96 {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT},
97 };
98 /*
99 #elif defined (__i386__)
100 mythread_state_flavor_t thread_flavor_array [] = {
101 {i386_THREAD_STATE, i386_THREAD_STATE_COUNT},
102 };
103 #else
104 #error architecture not supported
105 #endif
106 */
107 static int kdp_mynum_flavors = 1;
108 static int MAX_TSTATE_FLAVORS = 1;
109
110 typedef struct {
111 vm_offset_t header;
112 int hoffset;
113 mythread_state_flavor_t *flavors;
114 int tstate_size;
115 } tir_t;
116
117 unsigned int not_in_kdp = 1; /* Cleared when we begin to access vm functions in kdp */
118
119 char command_buffer[512];
120
121 // XXX static struct vm_object test_object;
122
123 /*
124 *
125 */
126 addr64_t
127 kdp_vtophys(
128 pmap_t pmap,
129 addr64_t va)
130 {
131 addr64_t pa;
132 ppnum_t pp;
133
134 pp = pmap_find_phys(pmap, va); /* Get the page number */
135 if(!pp) return 0; /* Just return if no translation */
136
137 pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL); /* Shove in the page offset */
138 return(pa);
139 }
140 /* Verify that src is valid, and physically copy len bytes from src to
141 * dst, translating if necessary. If translation is enabled
142 * (kdp_trans_off is 0), a non-zero kdp_pmap specifies the pmap to use
143 * when translating src.
144 */
145
146 unsigned kdp_vm_read(
147 caddr_t src,
148 caddr_t dst,
149 unsigned len)
150 {
151 addr64_t cur_virt_src, cur_virt_dst;
152 addr64_t cur_phys_src, cur_phys_dst;
153 unsigned resid, cnt;
154 unsigned int dummy;
155 pmap_t pmap;
156
157 #ifdef KDP_VM_READ_DEBUG
158 kprintf("kdp_vm_read1: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]);
159 #endif
160
161 cur_virt_src = (addr64_t)((unsigned int)src);
162 cur_virt_dst = (addr64_t)((unsigned int)dst);
163
164 if (kdp_trans_off) {
165
166
167 resid = len; /* Get the length to copy */
168
169 while (resid != 0) {
170
171 if((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
172 goto exit;
173
174 if(kdp_read_io == 0)
175 if(!mapping_phys_lookup((ppnum_t)(cur_virt_src >> 12), &dummy)) return 0; /* Can't read where there's not any memory */
176
177 cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */
178 if (cnt > (4096 - (cur_virt_dst & 0xFFF)))
179 cnt = 4096 - (cur_virt_dst & 0xFFF);
180
181 if (cnt > resid) cnt = resid;
182
183 bcopy_phys(cur_virt_src, cur_phys_dst, cnt); /* Copy stuff over */
184
185 cur_virt_src += cnt;
186 cur_virt_dst += cnt;
187 resid -= cnt;
188 }
189
190 } else {
191
192 resid = len;
193
194 if(kdp_pmap) pmap = kdp_pmap; /* If special pmap, use it */
195 else pmap = kernel_pmap; /* otherwise, use kernel's */
196
197 while (resid != 0) {
198 /* Always translate the destination using the kernel_pmap. */
199 if((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
200 goto exit;
201
202 if((cur_phys_src = kdp_vtophys(pmap, cur_virt_src)) == 0)
203 goto exit;
204
205 if(kdp_read_io == 0)
206 if(!mapping_phys_lookup((ppnum_t)(cur_phys_src >> 12), &dummy)) goto exit; /* Can't read where there's not any memory */
207
208 cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */
209 if (cnt > (4096 - (cur_virt_dst & 0xFFF)))
210 cnt = 4096 - (cur_virt_dst & 0xFFF);
211
212 if (cnt > resid) cnt = resid;
213
214 #ifdef KDP_VM_READ_DEBUG
215 kprintf("kdp_vm_read2: pmap %08X, virt %016LLX, phys %016LLX\n",
216 pmap, cur_virt_src, cur_phys_src);
217 #endif
218
219 bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
220
221 cur_virt_src +=cnt;
222 cur_virt_dst +=cnt;
223 resid -= cnt;
224 }
225 }
226 exit:
227 #ifdef KDP_VM_READ_DEBUG
228 kprintf("kdp_vm_read: ret %08X\n", len-resid);
229 #endif
230 return (len - resid);
231 }
232
233 /*
234 *
235 */
236 unsigned kdp_vm_write(
237 caddr_t src,
238 caddr_t dst,
239 unsigned len)
240 {
241 addr64_t cur_virt_src, cur_virt_dst;
242 addr64_t cur_phys_src, cur_phys_dst;
243 unsigned resid, cnt, cnt_src, cnt_dst;
244
245 #ifdef KDP_VM_WRITE_DEBUG
246 printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]);
247 #endif
248
249 cur_virt_src = (addr64_t)((unsigned int)src);
250 cur_virt_dst = (addr64_t)((unsigned int)dst);
251
252 resid = len;
253
254 while (resid != 0) {
255 if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
256 goto exit;
257
258 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
259 goto exit;
260
261 cnt_src = ((cur_phys_src + NBPG) & (-NBPG)) - cur_phys_src;
262 cnt_dst = ((cur_phys_dst + NBPG) & (-NBPG)) - cur_phys_dst;
263
264 if (cnt_src > cnt_dst)
265 cnt = cnt_dst;
266 else
267 cnt = cnt_src;
268 if (cnt > resid)
269 cnt = resid;
270
271 bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
272 sync_cache64(cur_phys_dst, cnt); /* Sync caches */
273
274 cur_virt_src +=cnt;
275 cur_virt_dst +=cnt;
276 resid -= cnt;
277 }
278 exit:
279 return (len - resid);
280 }
281
282
283 static void
284 kern_collectth_state(thread_t thread, tir_t *t)
285 {
286 vm_offset_t header;
287 int hoffset, i ;
288 mythread_state_flavor_t *flavors;
289 struct thread_command *tc;
290 /*
291 * Fill in thread command structure.
292 */
293 header = t->header;
294 hoffset = t->hoffset;
295 flavors = t->flavors;
296
297 tc = (struct thread_command *) (header + hoffset);
298 tc->cmd = LC_THREAD;
299 tc->cmdsize = sizeof(struct thread_command)
300 + t->tstate_size;
301 hoffset += sizeof(struct thread_command);
302 /*
303 * Follow with a struct thread_state_flavor and
304 * the appropriate thread state struct for each
305 * thread state flavor.
306 */
307 for (i = 0; i < kdp_mynum_flavors; i++) {
308 *(mythread_state_flavor_t *)(header+hoffset) =
309 flavors[i];
310 hoffset += sizeof(mythread_state_flavor_t);
311
312 if (machine_thread_get_kern_state(thread, flavors[i].flavor,
313 (thread_state_t) (header+hoffset),
314 &flavors[i].count) != KERN_SUCCESS)
315 printf ("Failure in machine_thread_get_kern_state()\n");
316 hoffset += flavors[i].count*sizeof(int);
317 }
318
319 t->hoffset = hoffset;
320 }
321
322 int
323 kdp_dump_trap(
324 int type,
325 __unused struct savearea *regs)
326 {
327 printf ("An unexpected trap (type %d) occurred during the kernel dump, terminating.\n", type);
328 kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0));
329 abort_panic_transfer();
330 kdp_flag &= ~KDP_PANIC_DUMP_ENABLED;
331 kdp_flag &= ~PANIC_CORE_ON_NMI;
332 kdp_flag &= ~PANIC_LOG_DUMP;
333
334 kdp_reset();
335
336 kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state);
337 return( 0 );
338 }
339
340 /*
341 * Kernel dump (limited to currently executing 32 bit mach_kernel only)
342 */
343 int
344 kern_dump(void)
345 {
346 int error = 0;
347 vm_map_t map;
348 unsigned int thread_count, segment_count;
349 unsigned int command_size = 0, header_size = 0, tstate_size = 0;
350 unsigned int hoffset = 0, foffset = 0, nfoffset = 0, vmoffset = 0;
351 unsigned int max_header_size = 0;
352 vm_offset_t header;
353 struct mach_header *mh;
354 struct segment_command *sc;
355 vm_size_t size;
356 vm_prot_t prot = 0;
357 vm_prot_t maxprot = 0;
358 vm_inherit_t inherit = 0;
359 int error1 = 0;
360 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
361 vm_size_t nflavors;
362 vm_size_t i;
363 int nesting_depth = 0;
364 kern_return_t kret = 0;
365 struct vm_region_submap_info_64 vbr;
366 int vbrcount = 0;
367 tir_t tir1;
368
369 int panic_error = 0;
370 unsigned int txstart = 0;
371 unsigned int mach_section_count = 4;
372 unsigned int num_sects_txed = 0;
373
374 map = kernel_map;
375 not_in_kdp = 0; /* Tell vm functions not to acquire locks */
376
377 thread_count = 1;
378 segment_count = get_vmmap_entries(map);
379
380 printf("Kernel map has %d entries\n", segment_count);
381
382 nflavors = kdp_mynum_flavors;
383 bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array));
384
385 for (i = 0; i < nflavors; i++)
386 tstate_size += sizeof(mythread_state_flavor_t) +
387 (flavors[i].count * sizeof(int));
388
389 command_size = (segment_count + mach_section_count) *
390 sizeof(struct segment_command) +
391 thread_count*sizeof(struct thread_command) +
392 tstate_size*thread_count;
393
394 header_size = command_size + sizeof(struct mach_header);
395 header = (vm_offset_t) command_buffer;
396
397 /*
398 * Set up Mach-O header for currently executing 32 bit kernel.
399 */
400 printf ("Generated Mach-O header size was %d\n", header_size);
401
402 mh = (struct mach_header *) header;
403 mh->magic = MH_MAGIC;
404 mh->cputype = cpu_type();
405 mh->cpusubtype = cpu_subtype(); /* XXX incorrect; should match kernel */
406 mh->filetype = MH_CORE;
407 mh->ncmds = segment_count + thread_count + mach_section_count;
408 mh->sizeofcmds = command_size;
409 mh->flags = 0;
410
411 hoffset = sizeof(struct mach_header); /* offset into header */
412 foffset = round_page_32(header_size); /* offset into file */
413 /* Padding.. */
414 if ((foffset - header_size) < (4*sizeof(struct segment_command))) {
415 /* Hack */
416 foffset += ((4*sizeof(struct segment_command)) - (foffset-header_size));
417 }
418
419 max_header_size = foffset;
420
421 vmoffset = VM_MIN_ADDRESS; /* offset into VM */
422
423 /* Transmit the Mach-O MH_CORE header, and seek forward past the
424 * area reserved for the segment and thread commands
425 * to begin data transmission
426 */
427
428 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) {
429 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
430 return -1;
431 }
432
433 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, sizeof(struct mach_header), (caddr_t) mh) < 0)) {
434 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
435 return -1 ;
436 }
437
438 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
439 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
440 return (-1);
441 }
442 printf ("Transmitting kernel state, please wait: ");
443
444 while ((segment_count > 0) || (kret == KERN_SUCCESS)){
445 /* Check if we've transmitted all the kernel sections */
446 if (num_sects_txed == mach_section_count-1) {
447
448 while (1) {
449
450 /*
451 * Get region information for next region.
452 */
453
454 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
455 if((kret = vm_region_recurse_64(map,
456 &vmoffset, &size, &nesting_depth,
457 (vm_region_recurse_info_t)&vbr,
458 &vbrcount)) != KERN_SUCCESS) {
459 break;
460 }
461
462 if(vbr.is_submap) {
463 nesting_depth++;
464 continue;
465 } else {
466 break;
467 }
468 }
469
470 if(kret != KERN_SUCCESS)
471 break;
472
473 prot = vbr.protection;
474 maxprot = vbr.max_protection;
475 inherit = vbr.inheritance;
476 }
477 else
478 {
479 switch (num_sects_txed) {
480 case 0:
481 {
482 /* Transmit the kernel text section */
483 vmoffset = sectTEXTB;
484 size = sectSizeTEXT;
485 }
486 break;
487 case 1:
488 {
489 vmoffset = sectDATAB;
490 size = sectSizeDATA;
491 }
492 break;
493 case 2:
494 {
495 vmoffset = sectPRELINKB;
496 size = sectSizePRELINK;
497 }
498 break;
499 case 3:
500 {
501 vmoffset = sectLINKB;
502 size = sectSizeLINK;
503 }
504 break;
505 /* TODO the lowmem vector area may be useful, but its transmission is
506 * disabled for now. The traceback table area should be transmitted
507 * as well - that's indirected from 0x5080.
508 */
509 }
510 num_sects_txed++;
511 }
512 /*
513 * Fill in segment command structure.
514 */
515
516 if (hoffset > max_header_size)
517 break;
518 sc = (struct segment_command *) (header);
519 sc->cmd = LC_SEGMENT;
520 sc->cmdsize = sizeof(struct segment_command);
521 sc->segname[0] = 0;
522 sc->vmaddr = vmoffset;
523 sc->vmsize = size;
524 sc->fileoff = foffset;
525 sc->filesize = size;
526 sc->maxprot = maxprot;
527 sc->initprot = prot;
528 sc->nsects = 0;
529
530 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
531 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
532 return -1;
533 }
534
535 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, sizeof(struct segment_command) , (caddr_t) sc)) < 0) {
536 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
537 return -1 ;
538 }
539
540 /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead, seek past that
541 * region on the server - this creates a hole in the file
542 */
543
544 if ((vbr.user_tag != VM_MEMORY_IOKIT)) {
545
546 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) {
547 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
548 return (-1);
549 }
550
551 txstart = vmoffset;
552
553 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, size, (caddr_t) txstart)) < 0) {
554 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
555 return -1 ;
556 }
557 }
558
559 hoffset += sizeof(struct segment_command);
560 foffset += size;
561 vmoffset += size;
562 segment_count--;
563 }
564 tir1.header = header;
565 tir1.hoffset = 0;
566 tir1.flavors = flavors;
567 tir1.tstate_size = tstate_size;
568
569 /* Now send out the LC_THREAD load command, with the thread information
570 * for the current activation.
571 * Note that the corefile can contain LC_SEGMENT commands with file offsets
572 * that point past the edge of the corefile, in the event that the last N
573 * VM regions were all I/O mapped or otherwise non-transferable memory,
574 * not followed by a normal VM region; i.e. there will be no hole that
575 * reaches to the end of the core file.
576 */
577 kern_collectth_state (current_thread(), &tir1);
578
579 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
580 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
581 return -1;
582 }
583
584 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) {
585 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
586 return -1 ;
587 }
588
589 /* last packet */
590 if ((panic_error = kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
591 {
592 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
593 return (-1) ;
594 }
595
596 if (error == 0)
597 error = error1;
598 return (error);
599 }