]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kdp/ml/ppc/kdp_vm.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / ppc / kdp_vm.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <mach/mach_types.h>
23#include <mach/vm_attributes.h>
24#include <mach/vm_param.h>
25
26#include <vm/pmap.h>
27
28#include <ppc/proc_reg.h>
29#include <ppc/machparam.h>
30#include <ppc/mem.h>
31#include <ppc/pmap.h>
1c79356b 32#include <ppc/mappings.h>
91447636 33#include <ppc/cpu_data.h>
1c79356b 34
55e303ae
A
35#include <mach/thread_status.h>
36#include <mach-o/loader.h>
37#include <mach/vm_region.h>
38#include <mach/vm_statistics.h>
39
40#include <vm/vm_kern.h>
91447636
A
41#include <vm/vm_object.h>
42#include <vm/vm_protos.h>
55e303ae
A
43#include <kdp/kdp_core.h>
44#include <kdp/kdp_udp.h>
45#include <kdp/kdp_internal.h>
46
91447636
A
47#include <ppc/misc_protos.h>
48#include <mach/vm_map.h>
49
55e303ae 50
1c79356b
A
51pmap_t kdp_pmap=0;
52boolean_t kdp_trans_off=0;
55e303ae 53boolean_t kdp_read_io =0;
1c79356b 54
1c79356b
A
55unsigned kdp_vm_read( caddr_t, caddr_t, unsigned);
56unsigned kdp_vm_write( caddr_t, caddr_t, unsigned);
57
91447636
A
58extern vm_offset_t sectTEXTB, sectDATAB, sectLINKB, sectPRELINKB;
59extern int sectSizeTEXT, sectSizeDATA, sectSizeLINK, sectSizePRELINK;
60
61/* XXX prototypes which should be in a commmon header file */
62addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
63int kern_dump(void);
64int kdp_dump_trap(int type, struct savearea *regs);
65/*
66 * XXX the following prototype doesn't match the declaration because the
67 * XXX actual declaration is wrong.
68 */
69extern int kdp_send_panic_packets(unsigned int request, char *corename,
70 unsigned int length, caddr_t txstart);
71
72
73
1c79356b 74
55e303ae
A
75typedef struct {
76 int flavor; /* the number for this flavor */
77 int count; /* count of ints in this flavor */
78} mythread_state_flavor_t;
de355530 79
55e303ae
A
80/* These will need to be uncommented and completed
81 *if we support other architectures
82 */
de355530 83
55e303ae
A
84/*
85#if defined (__ppc__)
86*/
87static mythread_state_flavor_t thread_flavor_array[] = {
88 {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT},
89};
90/*
91#elif defined (__i386__)
92mythread_state_flavor_t thread_flavor_array [] = {
93 {i386_THREAD_STATE, i386_THREAD_STATE_COUNT},
94};
95#else
96#error architecture not supported
97#endif
98*/
99static int kdp_mynum_flavors = 1;
100static int MAX_TSTATE_FLAVORS = 1;
de355530 101
55e303ae
A
102typedef struct {
103 vm_offset_t header;
104 int hoffset;
105 mythread_state_flavor_t *flavors;
106 int tstate_size;
107} tir_t;
de355530 108
55e303ae 109unsigned int not_in_kdp = 1; /* Cleared when we begin to access vm functions in kdp */
de355530 110
55e303ae 111char command_buffer[512];
1c79356b 112
91447636 113// XXX static struct vm_object test_object;
1c79356b 114
de355530
A
115/*
116 *
117 */
91447636
A
118addr64_t
119kdp_vtophys(
55e303ae
A
120 pmap_t pmap,
121 addr64_t va)
de355530 122{
55e303ae
A
123 addr64_t pa;
124 ppnum_t pp;
125
126 pp = pmap_find_phys(pmap, va); /* Get the page number */
127 if(!pp) return 0; /* Just return if no translation */
128
129 pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL); /* Shove in the page offset */
130 return(pa);
de355530 131}
91447636
A
132/* Verify that src is valid, and physically copy len bytes from src to
133 * dst, translating if necessary. If translation is enabled
134 * (kdp_trans_off is 0), a non-zero kdp_pmap specifies the pmap to use
135 * when translating src.
1c79356b 136 */
91447636 137
1c79356b
A
138unsigned kdp_vm_read(
139 caddr_t src,
140 caddr_t dst,
141 unsigned len)
142{
55e303ae 143 addr64_t cur_virt_src, cur_virt_dst;
91447636 144 addr64_t cur_phys_src, cur_phys_dst;
1c79356b 145 unsigned resid, cnt;
55e303ae
A
146 unsigned int dummy;
147 pmap_t pmap;
1c79356b
A
148
149#ifdef KDP_VM_READ_DEBUG
150 kprintf("kdp_vm_read1: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]);
151#endif
55e303ae
A
152
153 cur_virt_src = (addr64_t)((unsigned int)src);
154 cur_virt_dst = (addr64_t)((unsigned int)dst);
155
1c79356b 156 if (kdp_trans_off) {
55e303ae
A
157
158
159 resid = len; /* Get the length to copy */
1c79356b
A
160
161 while (resid != 0) {
55e303ae 162
91447636
A
163 if((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
164 goto exit;
165
55e303ae
A
166 if(kdp_read_io == 0)
167 if(!mapping_phys_lookup((ppnum_t)(cur_virt_src >> 12), &dummy)) return 0; /* Can't read where there's not any memory */
168
169 cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */
91447636
A
170 if (cnt > (4096 - (cur_virt_dst & 0xFFF)))
171 cnt = 4096 - (cur_virt_dst & 0xFFF);
55e303ae 172
1c79356b 173 if (cnt > resid) cnt = resid;
55e303ae 174
91447636 175 bcopy_phys(cur_virt_src, cur_phys_dst, cnt); /* Copy stuff over */
55e303ae
A
176
177 cur_virt_src += cnt;
178 cur_virt_dst += cnt;
1c79356b
A
179 resid -= cnt;
180 }
55e303ae 181
1c79356b 182 } else {
55e303ae 183
1c79356b
A
184 resid = len;
185
55e303ae
A
186 if(kdp_pmap) pmap = kdp_pmap; /* If special pmap, use it */
187 else pmap = kernel_pmap; /* otherwise, use kernel's */
188
1c79356b 189 while (resid != 0) {
91447636
A
190/* Always translate the destination using the kernel_pmap. */
191 if((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
192 goto exit;
193
194 if((cur_phys_src = kdp_vtophys(pmap, cur_virt_src)) == 0)
195 goto exit;
55e303ae 196
55e303ae
A
197 if(kdp_read_io == 0)
198 if(!mapping_phys_lookup((ppnum_t)(cur_phys_src >> 12), &dummy)) goto exit; /* Can't read where there's not any memory */
199
200 cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */
91447636
A
201 if (cnt > (4096 - (cur_virt_dst & 0xFFF)))
202 cnt = 4096 - (cur_virt_dst & 0xFFF);
203
1c79356b 204 if (cnt > resid) cnt = resid;
55e303ae 205
1c79356b 206#ifdef KDP_VM_READ_DEBUG
55e303ae
A
207 kprintf("kdp_vm_read2: pmap %08X, virt %016LLX, phys %016LLX\n",
208 pmap, cur_virt_src, cur_phys_src);
1c79356b 209#endif
55e303ae 210
91447636 211 bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
55e303ae 212
1c79356b
A
213 cur_virt_src +=cnt;
214 cur_virt_dst +=cnt;
215 resid -= cnt;
216 }
217 }
218exit:
219#ifdef KDP_VM_READ_DEBUG
220 kprintf("kdp_vm_read: ret %08X\n", len-resid);
221#endif
55e303ae 222 return (len - resid);
1c79356b
A
223}
224
225/*
226 *
227 */
228unsigned kdp_vm_write(
229 caddr_t src,
230 caddr_t dst,
231 unsigned len)
232{
55e303ae
A
233 addr64_t cur_virt_src, cur_virt_dst;
234 addr64_t cur_phys_src, cur_phys_dst;
235 unsigned resid, cnt, cnt_src, cnt_dst;
1c79356b
A
236
237#ifdef KDP_VM_WRITE_DEBUG
238 printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]);
239#endif
240
55e303ae
A
241 cur_virt_src = (addr64_t)((unsigned int)src);
242 cur_virt_dst = (addr64_t)((unsigned int)dst);
243
1c79356b
A
244 resid = len;
245
246 while (resid != 0) {
247 if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
248 goto exit;
91447636 249
1c79356b
A
250 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
251 goto exit;
252
253 cnt_src = ((cur_phys_src + NBPG) & (-NBPG)) - cur_phys_src;
254 cnt_dst = ((cur_phys_dst + NBPG) & (-NBPG)) - cur_phys_dst;
255
256 if (cnt_src > cnt_dst)
257 cnt = cnt_dst;
258 else
259 cnt = cnt_src;
260 if (cnt > resid)
261 cnt = resid;
262
55e303ae
A
263 bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
264 sync_cache64(cur_phys_dst, cnt); /* Sync caches */
1c79356b
A
265
266 cur_virt_src +=cnt;
267 cur_virt_dst +=cnt;
268 resid -= cnt;
269 }
270exit:
55e303ae
A
271 return (len - resid);
272}
273
274
275static void
91447636 276kern_collectth_state(thread_t thread, tir_t *t)
55e303ae
A
277{
278 vm_offset_t header;
279 int hoffset, i ;
280 mythread_state_flavor_t *flavors;
281 struct thread_command *tc;
282 /*
283 * Fill in thread command structure.
284 */
285 header = t->header;
286 hoffset = t->hoffset;
287 flavors = t->flavors;
288
289 tc = (struct thread_command *) (header + hoffset);
290 tc->cmd = LC_THREAD;
291 tc->cmdsize = sizeof(struct thread_command)
292 + t->tstate_size;
293 hoffset += sizeof(struct thread_command);
294 /*
295 * Follow with a struct thread_state_flavor and
296 * the appropriate thread state struct for each
297 * thread state flavor.
298 */
299 for (i = 0; i < kdp_mynum_flavors; i++) {
300 *(mythread_state_flavor_t *)(header+hoffset) =
301 flavors[i];
302 hoffset += sizeof(mythread_state_flavor_t);
303
91447636 304 if (machine_thread_get_kern_state(thread, flavors[i].flavor,
55e303ae
A
305 (thread_state_t) (header+hoffset),
306 &flavors[i].count) != KERN_SUCCESS)
307 printf ("Failure in machine_thread_get_kern_state()\n");
308 hoffset += flavors[i].count*sizeof(int);
309 }
310
311 t->hoffset = hoffset;
1c79356b
A
312}
313
55e303ae
A
314int
315kdp_dump_trap(
316 int type,
91447636 317 __unused struct savearea *regs)
55e303ae 318{
55e303ae
A
319 printf ("An unexpected trap (type %d) occurred during the kernel dump, terminating.\n", type);
320 kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0));
321 abort_panic_transfer();
322 kdp_flag &= ~KDP_PANIC_DUMP_ENABLED;
323 kdp_flag &= ~PANIC_CORE_ON_NMI;
324 kdp_flag &= ~PANIC_LOG_DUMP;
325
326 kdp_reset();
327
328 kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state);
91447636 329 return( 0 );
55e303ae
A
330}
331
91447636
A
332/*
333 * Kernel dump (limited to currently executing 32 bit mach_kernel only)
334 */
55e303ae 335int
91447636 336kern_dump(void)
55e303ae
A
337{
338 int error = 0;
339 vm_map_t map;
340 unsigned int thread_count, segment_count;
341 unsigned int command_size = 0, header_size = 0, tstate_size = 0;
342 unsigned int hoffset = 0, foffset = 0, nfoffset = 0, vmoffset = 0;
343 unsigned int max_header_size = 0;
344 vm_offset_t header;
55e303ae
A
345 struct mach_header *mh;
346 struct segment_command *sc;
55e303ae
A
347 vm_size_t size;
348 vm_prot_t prot = 0;
349 vm_prot_t maxprot = 0;
350 vm_inherit_t inherit = 0;
91447636 351 int error1 = 0;
55e303ae
A
352 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
353 vm_size_t nflavors;
91447636 354 vm_size_t i;
55e303ae 355 int nesting_depth = 0;
91447636 356 kern_return_t kret = 0;
55e303ae
A
357 struct vm_region_submap_info_64 vbr;
358 int vbrcount = 0;
359 tir_t tir1;
360
361 int panic_error = 0;
362 unsigned int txstart = 0;
363 unsigned int mach_section_count = 4;
364 unsigned int num_sects_txed = 0;
365
55e303ae
A
366 map = kernel_map;
367 not_in_kdp = 0; /* Tell vm functions not to acquire locks */
368
369 thread_count = 1;
370 segment_count = get_vmmap_entries(map);
371
372 printf("Kernel map has %d entries\n", segment_count);
373
374 nflavors = kdp_mynum_flavors;
375 bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array));
376
377 for (i = 0; i < nflavors; i++)
378 tstate_size += sizeof(mythread_state_flavor_t) +
379 (flavors[i].count * sizeof(int));
380
381 command_size = (segment_count + mach_section_count) *
382 sizeof(struct segment_command) +
383 thread_count*sizeof(struct thread_command) +
384 tstate_size*thread_count;
385
386 header_size = command_size + sizeof(struct mach_header);
387 header = (vm_offset_t) command_buffer;
388
389 /*
91447636 390 * Set up Mach-O header for currently executing 32 bit kernel.
55e303ae
A
391 */
392 printf ("Generated Mach-O header size was %d\n", header_size);
393
394 mh = (struct mach_header *) header;
55e303ae 395 mh->magic = MH_MAGIC;
91447636
A
396 mh->cputype = cpu_type();
397 mh->cpusubtype = cpu_subtype(); /* XXX incorrect; should match kernel */
55e303ae
A
398 mh->filetype = MH_CORE;
399 mh->ncmds = segment_count + thread_count + mach_section_count;
400 mh->sizeofcmds = command_size;
401 mh->flags = 0;
402
403 hoffset = sizeof(struct mach_header); /* offset into header */
404 foffset = round_page_32(header_size); /* offset into file */
405 /* Padding.. */
406 if ((foffset - header_size) < (4*sizeof(struct segment_command))) {
407 /* Hack */
408 foffset += ((4*sizeof(struct segment_command)) - (foffset-header_size));
409 }
410
411 max_header_size = foffset;
412
413 vmoffset = VM_MIN_ADDRESS; /* offset into VM */
414
415 /* Transmit the Mach-O MH_CORE header, and seek forward past the
416 * area reserved for the segment and thread commands
417 * to begin data transmission
418 */
419
420 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) {
421 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
422 return -1;
423 }
424
425 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, sizeof(struct mach_header), (caddr_t) mh) < 0)) {
426 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
427 return -1 ;
428 }
429
430 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
431 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
432 return (-1);
433 }
434 printf ("Transmitting kernel state, please wait: ");
435
436 while ((segment_count > 0) || (kret == KERN_SUCCESS)){
437 /* Check if we've transmitted all the kernel sections */
438 if (num_sects_txed == mach_section_count-1) {
439
440 while (1) {
441
442 /*
443 * Get region information for next region.
444 */
445
446 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
447 if((kret = vm_region_recurse_64(map,
448 &vmoffset, &size, &nesting_depth,
91447636
A
449 (vm_region_recurse_info_t)&vbr,
450 &vbrcount)) != KERN_SUCCESS) {
55e303ae
A
451 break;
452 }
453
454 if(vbr.is_submap) {
455 nesting_depth++;
456 continue;
457 } else {
458 break;
459 }
460 }
461
462 if(kret != KERN_SUCCESS)
463 break;
464
465 prot = vbr.protection;
466 maxprot = vbr.max_protection;
467 inherit = vbr.inheritance;
468 }
469 else
470 {
471 switch (num_sects_txed) {
472 case 0:
473 {
474 /* Transmit the kernel text section */
475 vmoffset = sectTEXTB;
476 size = sectSizeTEXT;
477 }
478 break;
479 case 1:
480 {
481 vmoffset = sectDATAB;
482 size = sectSizeDATA;
483 }
484 break;
485 case 2:
486 {
487 vmoffset = sectPRELINKB;
488 size = sectSizePRELINK;
489 }
490 break;
491 case 3:
492 {
493 vmoffset = sectLINKB;
494 size = sectSizeLINK;
495 }
496 break;
497 /* TODO the lowmem vector area may be useful, but its transmission is
498 * disabled for now. The traceback table area should be transmitted
499 * as well - that's indirected from 0x5080.
500 */
501 }
502 num_sects_txed++;
503 }
504 /*
505 * Fill in segment command structure.
506 */
507
508 if (hoffset > max_header_size)
509 break;
510 sc = (struct segment_command *) (header);
511 sc->cmd = LC_SEGMENT;
512 sc->cmdsize = sizeof(struct segment_command);
513 sc->segname[0] = 0;
514 sc->vmaddr = vmoffset;
515 sc->vmsize = size;
516 sc->fileoff = foffset;
517 sc->filesize = size;
518 sc->maxprot = maxprot;
519 sc->initprot = prot;
520 sc->nsects = 0;
521
522 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
523 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
524 return -1;
525 }
526
527 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, sizeof(struct segment_command) , (caddr_t) sc)) < 0) {
528 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
529 return -1 ;
530 }
531
532 /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead, seek past that
533 * region on the server - this creates a hole in the file
534 */
535
536 if ((vbr.user_tag != VM_MEMORY_IOKIT)) {
537
538 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) {
539 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
540 return (-1);
541 }
542
543 txstart = vmoffset;
544
545 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, size, (caddr_t) txstart)) < 0) {
546 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
547 return -1 ;
548 }
549 }
550
551 hoffset += sizeof(struct segment_command);
552 foffset += size;
553 vmoffset += size;
554 segment_count--;
555 }
556 tir1.header = header;
557 tir1.hoffset = 0;
558 tir1.flavors = flavors;
559 tir1.tstate_size = tstate_size;
560
561 /* Now send out the LC_THREAD load command, with the thread information
562 * for the current activation.
563 * Note that the corefile can contain LC_SEGMENT commands with file offsets
564 * that point past the edge of the corefile, in the event that the last N
565 * VM regions were all I/O mapped or otherwise non-transferable memory,
566 * not followed by a normal VM region; i.e. there will be no hole that
567 * reaches to the end of the core file.
568 */
91447636 569 kern_collectth_state (current_thread(), &tir1);
55e303ae
A
570
571 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
572 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
573 return -1;
574 }
575
576 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) {
577 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
578 return -1 ;
579 }
580
581 /* last packet */
582 if ((panic_error = kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
583 {
584 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
585 return (-1) ;
586 }
587
55e303ae
A
588 if (error == 0)
589 error = error1;
590 return (error);
591}