]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/ppc/kdp_vm.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / ppc / kdp_vm.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/vm_attributes.h>
30 #include <mach/vm_param.h>
31
32 #include <vm/pmap.h>
33
34 #include <ppc/proc_reg.h>
35 #include <ppc/machparam.h>
36 #include <ppc/mem.h>
37 #include <ppc/pmap.h>
38 #include <ppc/mappings.h>
39 #include <ppc/cpu_data.h>
40
41 #include <mach/thread_status.h>
42 #include <mach-o/loader.h>
43 #include <mach/vm_region.h>
44 #include <mach/vm_statistics.h>
45
46 #include <vm/vm_kern.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_protos.h>
49 #include <kdp/kdp_core.h>
50 #include <kdp/kdp_udp.h>
51 #include <kdp/kdp_internal.h>
52
53 #include <ppc/misc_protos.h>
54 #include <mach/vm_map.h>
55
56
57 pmap_t kdp_pmap=0;
58 boolean_t kdp_trans_off=0;
59 boolean_t kdp_read_io =0;
60
61 unsigned kdp_vm_read( caddr_t, caddr_t, unsigned);
62 unsigned kdp_vm_write( caddr_t, caddr_t, unsigned);
63
64 extern vm_offset_t sectTEXTB, sectDATAB, sectLINKB, sectPRELINKB;
65 extern int sectSizeTEXT, sectSizeDATA, sectSizeLINK, sectSizePRELINK;
66
67 /* XXX prototypes which should be in a commmon header file */
68 addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
69 int kern_dump(void);
70 int kdp_dump_trap(int type, struct savearea *regs);
71 /*
72 * XXX the following prototype doesn't match the declaration because the
73 * XXX actual declaration is wrong.
74 */
75 extern int kdp_send_panic_packets(unsigned int request, char *corename,
76 unsigned int length, caddr_t txstart);
77
78
79
80
81 typedef struct {
82 int flavor; /* the number for this flavor */
83 int count; /* count of ints in this flavor */
84 } mythread_state_flavor_t;
85
86 /* These will need to be uncommented and completed
87 *if we support other architectures
88 */
89
90 /*
91 #if defined (__ppc__)
92 */
93 static mythread_state_flavor_t thread_flavor_array[] = {
94 {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT},
95 };
96 /*
97 #elif defined (__i386__)
98 mythread_state_flavor_t thread_flavor_array [] = {
99 {i386_THREAD_STATE, i386_THREAD_STATE_COUNT},
100 };
101 #else
102 #error architecture not supported
103 #endif
104 */
105 static int kdp_mynum_flavors = 1;
106 static int MAX_TSTATE_FLAVORS = 1;
107
108 typedef struct {
109 vm_offset_t header;
110 int hoffset;
111 mythread_state_flavor_t *flavors;
112 int tstate_size;
113 } tir_t;
114
115 unsigned int not_in_kdp = 1; /* Cleared when we begin to access vm functions in kdp */
116
117 char command_buffer[512];
118
119 // XXX static struct vm_object test_object;
120
121 /*
122 *
123 */
124 addr64_t
125 kdp_vtophys(
126 pmap_t pmap,
127 addr64_t va)
128 {
129 addr64_t pa;
130 ppnum_t pp;
131
132 pp = pmap_find_phys(pmap, va); /* Get the page number */
133 if(!pp) return 0; /* Just return if no translation */
134
135 pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL); /* Shove in the page offset */
136 return(pa);
137 }
138 /* Verify that src is valid, and physically copy len bytes from src to
139 * dst, translating if necessary. If translation is enabled
140 * (kdp_trans_off is 0), a non-zero kdp_pmap specifies the pmap to use
141 * when translating src.
142 */
143
144 unsigned kdp_vm_read(
145 caddr_t src,
146 caddr_t dst,
147 unsigned len)
148 {
149 addr64_t cur_virt_src, cur_virt_dst;
150 addr64_t cur_phys_src, cur_phys_dst;
151 unsigned resid, cnt;
152 unsigned int dummy;
153 pmap_t pmap;
154
155 #ifdef KDP_VM_READ_DEBUG
156 kprintf("kdp_vm_read1: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]);
157 #endif
158
159 cur_virt_src = (addr64_t)((unsigned int)src);
160 cur_virt_dst = (addr64_t)((unsigned int)dst);
161
162 if (kdp_trans_off) {
163
164
165 resid = len; /* Get the length to copy */
166
167 while (resid != 0) {
168
169 if((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
170 goto exit;
171
172 if(kdp_read_io == 0)
173 if(!mapping_phys_lookup((ppnum_t)(cur_virt_src >> 12), &dummy)) return 0; /* Can't read where there's not any memory */
174
175 cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */
176 if (cnt > (4096 - (cur_virt_dst & 0xFFF)))
177 cnt = 4096 - (cur_virt_dst & 0xFFF);
178
179 if (cnt > resid) cnt = resid;
180
181 bcopy_phys(cur_virt_src, cur_phys_dst, cnt); /* Copy stuff over */
182
183 cur_virt_src += cnt;
184 cur_virt_dst += cnt;
185 resid -= cnt;
186 }
187
188 } else {
189
190 resid = len;
191
192 if(kdp_pmap) pmap = kdp_pmap; /* If special pmap, use it */
193 else pmap = kernel_pmap; /* otherwise, use kernel's */
194
195 while (resid != 0) {
196 /* Always translate the destination using the kernel_pmap. */
197 if((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
198 goto exit;
199
200 if((cur_phys_src = kdp_vtophys(pmap, cur_virt_src)) == 0)
201 goto exit;
202
203 if(kdp_read_io == 0)
204 if(!mapping_phys_lookup((ppnum_t)(cur_phys_src >> 12), &dummy)) goto exit; /* Can't read where there's not any memory */
205
206 cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */
207 if (cnt > (4096 - (cur_virt_dst & 0xFFF)))
208 cnt = 4096 - (cur_virt_dst & 0xFFF);
209
210 if (cnt > resid) cnt = resid;
211
212 #ifdef KDP_VM_READ_DEBUG
213 kprintf("kdp_vm_read2: pmap %08X, virt %016LLX, phys %016LLX\n",
214 pmap, cur_virt_src, cur_phys_src);
215 #endif
216
217 bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
218
219 cur_virt_src +=cnt;
220 cur_virt_dst +=cnt;
221 resid -= cnt;
222 }
223 }
224 exit:
225 #ifdef KDP_VM_READ_DEBUG
226 kprintf("kdp_vm_read: ret %08X\n", len-resid);
227 #endif
228 return (len - resid);
229 }
230
231 /*
232 *
233 */
234 unsigned kdp_vm_write(
235 caddr_t src,
236 caddr_t dst,
237 unsigned len)
238 {
239 addr64_t cur_virt_src, cur_virt_dst;
240 addr64_t cur_phys_src, cur_phys_dst;
241 unsigned resid, cnt, cnt_src, cnt_dst;
242
243 #ifdef KDP_VM_WRITE_DEBUG
244 printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]);
245 #endif
246
247 cur_virt_src = (addr64_t)((unsigned int)src);
248 cur_virt_dst = (addr64_t)((unsigned int)dst);
249
250 resid = len;
251
252 while (resid != 0) {
253 if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
254 goto exit;
255
256 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
257 goto exit;
258
259 cnt_src = ((cur_phys_src + NBPG) & (-NBPG)) - cur_phys_src;
260 cnt_dst = ((cur_phys_dst + NBPG) & (-NBPG)) - cur_phys_dst;
261
262 if (cnt_src > cnt_dst)
263 cnt = cnt_dst;
264 else
265 cnt = cnt_src;
266 if (cnt > resid)
267 cnt = resid;
268
269 bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
270 sync_cache64(cur_phys_dst, cnt); /* Sync caches */
271
272 cur_virt_src +=cnt;
273 cur_virt_dst +=cnt;
274 resid -= cnt;
275 }
276 exit:
277 return (len - resid);
278 }
279
280
281 static void
282 kern_collectth_state(thread_t thread, tir_t *t)
283 {
284 vm_offset_t header;
285 int hoffset, i ;
286 mythread_state_flavor_t *flavors;
287 struct thread_command *tc;
288 /*
289 * Fill in thread command structure.
290 */
291 header = t->header;
292 hoffset = t->hoffset;
293 flavors = t->flavors;
294
295 tc = (struct thread_command *) (header + hoffset);
296 tc->cmd = LC_THREAD;
297 tc->cmdsize = sizeof(struct thread_command)
298 + t->tstate_size;
299 hoffset += sizeof(struct thread_command);
300 /*
301 * Follow with a struct thread_state_flavor and
302 * the appropriate thread state struct for each
303 * thread state flavor.
304 */
305 for (i = 0; i < kdp_mynum_flavors; i++) {
306 *(mythread_state_flavor_t *)(header+hoffset) =
307 flavors[i];
308 hoffset += sizeof(mythread_state_flavor_t);
309
310 if (machine_thread_get_kern_state(thread, flavors[i].flavor,
311 (thread_state_t) (header+hoffset),
312 &flavors[i].count) != KERN_SUCCESS)
313 printf ("Failure in machine_thread_get_kern_state()\n");
314 hoffset += flavors[i].count*sizeof(int);
315 }
316
317 t->hoffset = hoffset;
318 }
319
320 int
321 kdp_dump_trap(
322 int type,
323 __unused struct savearea *regs)
324 {
325 printf ("An unexpected trap (type %d) occurred during the kernel dump, terminating.\n", type);
326 kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0));
327 abort_panic_transfer();
328 kdp_flag &= ~KDP_PANIC_DUMP_ENABLED;
329 kdp_flag &= ~PANIC_CORE_ON_NMI;
330 kdp_flag &= ~PANIC_LOG_DUMP;
331
332 kdp_reset();
333
334 kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state);
335 return( 0 );
336 }
337
338 /*
339 * Kernel dump (limited to currently executing 32 bit mach_kernel only)
340 */
341 int
342 kern_dump(void)
343 {
344 int error = 0;
345 vm_map_t map;
346 unsigned int thread_count, segment_count;
347 unsigned int command_size = 0, header_size = 0, tstate_size = 0;
348 unsigned int hoffset = 0, foffset = 0, nfoffset = 0, vmoffset = 0;
349 unsigned int max_header_size = 0;
350 vm_offset_t header;
351 struct mach_header *mh;
352 struct segment_command *sc;
353 vm_size_t size;
354 vm_prot_t prot = 0;
355 vm_prot_t maxprot = 0;
356 vm_inherit_t inherit = 0;
357 int error1 = 0;
358 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
359 vm_size_t nflavors;
360 vm_size_t i;
361 int nesting_depth = 0;
362 kern_return_t kret = 0;
363 struct vm_region_submap_info_64 vbr;
364 int vbrcount = 0;
365 tir_t tir1;
366
367 int panic_error = 0;
368 unsigned int txstart = 0;
369 unsigned int mach_section_count = 4;
370 unsigned int num_sects_txed = 0;
371
372 map = kernel_map;
373 not_in_kdp = 0; /* Tell vm functions not to acquire locks */
374
375 thread_count = 1;
376 segment_count = get_vmmap_entries(map);
377
378 printf("Kernel map has %d entries\n", segment_count);
379
380 nflavors = kdp_mynum_flavors;
381 bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array));
382
383 for (i = 0; i < nflavors; i++)
384 tstate_size += sizeof(mythread_state_flavor_t) +
385 (flavors[i].count * sizeof(int));
386
387 command_size = (segment_count + mach_section_count) *
388 sizeof(struct segment_command) +
389 thread_count*sizeof(struct thread_command) +
390 tstate_size*thread_count;
391
392 header_size = command_size + sizeof(struct mach_header);
393 header = (vm_offset_t) command_buffer;
394
395 /*
396 * Set up Mach-O header for currently executing 32 bit kernel.
397 */
398 printf ("Generated Mach-O header size was %d\n", header_size);
399
400 mh = (struct mach_header *) header;
401 mh->magic = MH_MAGIC;
402 mh->cputype = cpu_type();
403 mh->cpusubtype = cpu_subtype(); /* XXX incorrect; should match kernel */
404 mh->filetype = MH_CORE;
405 mh->ncmds = segment_count + thread_count + mach_section_count;
406 mh->sizeofcmds = command_size;
407 mh->flags = 0;
408
409 hoffset = sizeof(struct mach_header); /* offset into header */
410 foffset = round_page_32(header_size); /* offset into file */
411 /* Padding.. */
412 if ((foffset - header_size) < (4*sizeof(struct segment_command))) {
413 /* Hack */
414 foffset += ((4*sizeof(struct segment_command)) - (foffset-header_size));
415 }
416
417 max_header_size = foffset;
418
419 vmoffset = VM_MIN_ADDRESS; /* offset into VM */
420
421 /* Transmit the Mach-O MH_CORE header, and seek forward past the
422 * area reserved for the segment and thread commands
423 * to begin data transmission
424 */
425
426 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) {
427 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
428 return -1;
429 }
430
431 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, sizeof(struct mach_header), (caddr_t) mh) < 0)) {
432 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
433 return -1 ;
434 }
435
436 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
437 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
438 return (-1);
439 }
440 printf ("Transmitting kernel state, please wait: ");
441
442 while ((segment_count > 0) || (kret == KERN_SUCCESS)){
443 /* Check if we've transmitted all the kernel sections */
444 if (num_sects_txed == mach_section_count-1) {
445
446 while (1) {
447
448 /*
449 * Get region information for next region.
450 */
451
452 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
453 if((kret = vm_region_recurse_64(map,
454 &vmoffset, &size, &nesting_depth,
455 (vm_region_recurse_info_t)&vbr,
456 &vbrcount)) != KERN_SUCCESS) {
457 break;
458 }
459
460 if(vbr.is_submap) {
461 nesting_depth++;
462 continue;
463 } else {
464 break;
465 }
466 }
467
468 if(kret != KERN_SUCCESS)
469 break;
470
471 prot = vbr.protection;
472 maxprot = vbr.max_protection;
473 inherit = vbr.inheritance;
474 }
475 else
476 {
477 switch (num_sects_txed) {
478 case 0:
479 {
480 /* Transmit the kernel text section */
481 vmoffset = sectTEXTB;
482 size = sectSizeTEXT;
483 }
484 break;
485 case 1:
486 {
487 vmoffset = sectDATAB;
488 size = sectSizeDATA;
489 }
490 break;
491 case 2:
492 {
493 vmoffset = sectPRELINKB;
494 size = sectSizePRELINK;
495 }
496 break;
497 case 3:
498 {
499 vmoffset = sectLINKB;
500 size = sectSizeLINK;
501 }
502 break;
503 /* TODO the lowmem vector area may be useful, but its transmission is
504 * disabled for now. The traceback table area should be transmitted
505 * as well - that's indirected from 0x5080.
506 */
507 }
508 num_sects_txed++;
509 }
510 /*
511 * Fill in segment command structure.
512 */
513
514 if (hoffset > max_header_size)
515 break;
516 sc = (struct segment_command *) (header);
517 sc->cmd = LC_SEGMENT;
518 sc->cmdsize = sizeof(struct segment_command);
519 sc->segname[0] = 0;
520 sc->vmaddr = vmoffset;
521 sc->vmsize = size;
522 sc->fileoff = foffset;
523 sc->filesize = size;
524 sc->maxprot = maxprot;
525 sc->initprot = prot;
526 sc->nsects = 0;
527
528 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
529 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
530 return -1;
531 }
532
533 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, sizeof(struct segment_command) , (caddr_t) sc)) < 0) {
534 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
535 return -1 ;
536 }
537
538 /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead, seek past that
539 * region on the server - this creates a hole in the file
540 */
541
542 if ((vbr.user_tag != VM_MEMORY_IOKIT)) {
543
544 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) {
545 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
546 return (-1);
547 }
548
549 txstart = vmoffset;
550
551 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, size, (caddr_t) txstart)) < 0) {
552 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
553 return -1 ;
554 }
555 }
556
557 hoffset += sizeof(struct segment_command);
558 foffset += size;
559 vmoffset += size;
560 segment_count--;
561 }
562 tir1.header = header;
563 tir1.hoffset = 0;
564 tir1.flavors = flavors;
565 tir1.tstate_size = tstate_size;
566
567 /* Now send out the LC_THREAD load command, with the thread information
568 * for the current activation.
569 * Note that the corefile can contain LC_SEGMENT commands with file offsets
570 * that point past the edge of the corefile, in the event that the last N
571 * VM regions were all I/O mapped or otherwise non-transferable memory,
572 * not followed by a normal VM region; i.e. there will be no hole that
573 * reaches to the end of the core file.
574 */
575 kern_collectth_state (current_thread(), &tir1);
576
577 if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
578 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
579 return -1;
580 }
581
582 if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) {
583 printf ("kdp_send_panic_packets failed with error %d\n", panic_error);
584 return -1 ;
585 }
586
587 /* last packet */
588 if ((panic_error = kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
589 {
590 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
591 return (-1) ;
592 }
593
594 if (error == 0)
595 error = error1;
596 return (error);
597 }