2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
29 #include <mach/vm_attributes.h>
30 #include <mach/vm_param.h>
34 #include <ppc/proc_reg.h>
35 #include <ppc/machparam.h>
38 #include <ppc/mappings.h>
39 #include <ppc/cpu_data.h>
41 #include <mach/thread_status.h>
42 #include <mach-o/loader.h>
43 #include <mach/vm_region.h>
44 #include <mach/vm_statistics.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_protos.h>
49 #include <kdp/kdp_core.h>
50 #include <kdp/kdp_udp.h>
51 #include <kdp/kdp_internal.h>
53 #include <ppc/misc_protos.h>
54 #include <mach/vm_map.h>
58 boolean_t kdp_trans_off
=0;
59 boolean_t kdp_read_io
=0;
61 unsigned kdp_vm_read( caddr_t
, caddr_t
, unsigned);
62 unsigned kdp_vm_write( caddr_t
, caddr_t
, unsigned);
64 extern vm_offset_t sectTEXTB
, sectDATAB
, sectLINKB
, sectPRELINKB
;
65 extern int sectSizeTEXT
, sectSizeDATA
, sectSizeLINK
, sectSizePRELINK
;
67 /* XXX prototypes which should be in a commmon header file */
68 addr64_t
kdp_vtophys(pmap_t pmap
, addr64_t va
);
70 int kdp_dump_trap(int type
, struct savearea
*regs
);
72 * XXX the following prototype doesn't match the declaration because the
73 * XXX actual declaration is wrong.
75 extern int kdp_send_panic_packets(unsigned int request
, char *corename
,
76 unsigned int length
, caddr_t txstart
);
82 int flavor
; /* the number for this flavor */
83 int count
; /* count of ints in this flavor */
84 } mythread_state_flavor_t
;
86 /* These will need to be uncommented and completed
87 *if we support other architectures
93 static mythread_state_flavor_t thread_flavor_array
[] = {
94 {PPC_THREAD_STATE
, PPC_THREAD_STATE_COUNT
},
97 #elif defined (__i386__)
98 mythread_state_flavor_t thread_flavor_array [] = {
99 {i386_THREAD_STATE, i386_THREAD_STATE_COUNT},
102 #error architecture not supported
105 static int kdp_mynum_flavors
= 1;
106 static int MAX_TSTATE_FLAVORS
= 1;
111 mythread_state_flavor_t
*flavors
;
115 unsigned int not_in_kdp
= 1; /* Cleared when we begin to access vm functions in kdp */
117 char command_buffer
[512];
119 // XXX static struct vm_object test_object;
132 pp
= pmap_find_phys(pmap
, va
); /* Get the page number */
133 if(!pp
) return 0; /* Just return if no translation */
135 pa
= ((addr64_t
)pp
<< 12) | (va
& 0x0000000000000FFFULL
); /* Shove in the page offset */
138 /* Verify that src is valid, and physically copy len bytes from src to
139 * dst, translating if necessary. If translation is enabled
140 * (kdp_trans_off is 0), a non-zero kdp_pmap specifies the pmap to use
141 * when translating src.
144 unsigned kdp_vm_read(
149 addr64_t cur_virt_src
, cur_virt_dst
;
150 addr64_t cur_phys_src
, cur_phys_dst
;
155 #ifdef KDP_VM_READ_DEBUG
156 kprintf("kdp_vm_read1: src %x dst %x len %x - %08X %08X\n", src
, dst
, len
, ((unsigned long *)src
)[0], ((unsigned long *)src
)[1]);
159 cur_virt_src
= (addr64_t
)((unsigned int)src
);
160 cur_virt_dst
= (addr64_t
)((unsigned int)dst
);
165 resid
= len
; /* Get the length to copy */
169 if((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0)
173 if(!mapping_phys_lookup((ppnum_t
)(cur_virt_src
>> 12), &dummy
)) return 0; /* Can't read where there's not any memory */
175 cnt
= 4096 - (cur_virt_src
& 0xFFF); /* Get length left on page */
176 if (cnt
> (4096 - (cur_virt_dst
& 0xFFF)))
177 cnt
= 4096 - (cur_virt_dst
& 0xFFF);
179 if (cnt
> resid
) cnt
= resid
;
181 bcopy_phys(cur_virt_src
, cur_phys_dst
, cnt
); /* Copy stuff over */
192 if(kdp_pmap
) pmap
= kdp_pmap
; /* If special pmap, use it */
193 else pmap
= kernel_pmap
; /* otherwise, use kernel's */
196 /* Always translate the destination using the kernel_pmap. */
197 if((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0)
200 if((cur_phys_src
= kdp_vtophys(pmap
, cur_virt_src
)) == 0)
204 if(!mapping_phys_lookup((ppnum_t
)(cur_phys_src
>> 12), &dummy
)) goto exit
; /* Can't read where there's not any memory */
206 cnt
= 4096 - (cur_virt_src
& 0xFFF); /* Get length left on page */
207 if (cnt
> (4096 - (cur_virt_dst
& 0xFFF)))
208 cnt
= 4096 - (cur_virt_dst
& 0xFFF);
210 if (cnt
> resid
) cnt
= resid
;
212 #ifdef KDP_VM_READ_DEBUG
213 kprintf("kdp_vm_read2: pmap %08X, virt %016LLX, phys %016LLX\n",
214 pmap
, cur_virt_src
, cur_phys_src
);
217 bcopy_phys(cur_phys_src
, cur_phys_dst
, cnt
); /* Copy stuff over */
225 #ifdef KDP_VM_READ_DEBUG
226 kprintf("kdp_vm_read: ret %08X\n", len
-resid
);
228 return (len
- resid
);
234 unsigned kdp_vm_write(
239 addr64_t cur_virt_src
, cur_virt_dst
;
240 addr64_t cur_phys_src
, cur_phys_dst
;
241 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
243 #ifdef KDP_VM_WRITE_DEBUG
244 printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src
, dst
, len
, ((unsigned long *)src
)[0], ((unsigned long *)src
)[1]);
247 cur_virt_src
= (addr64_t
)((unsigned int)src
);
248 cur_virt_dst
= (addr64_t
)((unsigned int)dst
);
253 if ((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0)
256 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0)
259 cnt_src
= ((cur_phys_src
+ NBPG
) & (-NBPG
)) - cur_phys_src
;
260 cnt_dst
= ((cur_phys_dst
+ NBPG
) & (-NBPG
)) - cur_phys_dst
;
262 if (cnt_src
> cnt_dst
)
269 bcopy_phys(cur_phys_src
, cur_phys_dst
, cnt
); /* Copy stuff over */
270 sync_cache64(cur_phys_dst
, cnt
); /* Sync caches */
277 return (len
- resid
);
282 kern_collectth_state(thread_t thread
, tir_t
*t
)
286 mythread_state_flavor_t
*flavors
;
287 struct thread_command
*tc
;
289 * Fill in thread command structure.
292 hoffset
= t
->hoffset
;
293 flavors
= t
->flavors
;
295 tc
= (struct thread_command
*) (header
+ hoffset
);
297 tc
->cmdsize
= sizeof(struct thread_command
)
299 hoffset
+= sizeof(struct thread_command
);
301 * Follow with a struct thread_state_flavor and
302 * the appropriate thread state struct for each
303 * thread state flavor.
305 for (i
= 0; i
< kdp_mynum_flavors
; i
++) {
306 *(mythread_state_flavor_t
*)(header
+hoffset
) =
308 hoffset
+= sizeof(mythread_state_flavor_t
);
310 if (machine_thread_get_kern_state(thread
, flavors
[i
].flavor
,
311 (thread_state_t
) (header
+hoffset
),
312 &flavors
[i
].count
) != KERN_SUCCESS
)
313 printf ("Failure in machine_thread_get_kern_state()\n");
314 hoffset
+= flavors
[i
].count
*sizeof(int);
317 t
->hoffset
= hoffset
;
323 __unused
struct savearea
*regs
)
325 printf ("An unexpected trap (type %d) occurred during the kernel dump, terminating.\n", type
);
326 kdp_send_panic_pkt (KDP_EOF
, NULL
, 0, ((void *) 0));
327 abort_panic_transfer();
328 kdp_flag
&= ~KDP_PANIC_DUMP_ENABLED
;
329 kdp_flag
&= ~PANIC_CORE_ON_NMI
;
330 kdp_flag
&= ~PANIC_LOG_DUMP
;
334 kdp_raise_exception(EXC_BAD_ACCESS
, 0, 0, kdp
.saved_state
);
339 * Kernel dump (limited to currently executing 32 bit mach_kernel only)
346 unsigned int thread_count
, segment_count
;
347 unsigned int command_size
= 0, header_size
= 0, tstate_size
= 0;
348 unsigned int hoffset
= 0, foffset
= 0, nfoffset
= 0, vmoffset
= 0;
349 unsigned int max_header_size
= 0;
351 struct mach_header
*mh
;
352 struct segment_command
*sc
;
355 vm_prot_t maxprot
= 0;
356 vm_inherit_t inherit
= 0;
358 mythread_state_flavor_t flavors
[MAX_TSTATE_FLAVORS
];
361 int nesting_depth
= 0;
362 kern_return_t kret
= 0;
363 struct vm_region_submap_info_64 vbr
;
368 unsigned int txstart
= 0;
369 unsigned int mach_section_count
= 4;
370 unsigned int num_sects_txed
= 0;
373 not_in_kdp
= 0; /* Tell vm functions not to acquire locks */
376 segment_count
= get_vmmap_entries(map
);
378 printf("Kernel map has %d entries\n", segment_count
);
380 nflavors
= kdp_mynum_flavors
;
381 bcopy((char *)thread_flavor_array
,(char *) flavors
,sizeof(thread_flavor_array
));
383 for (i
= 0; i
< nflavors
; i
++)
384 tstate_size
+= sizeof(mythread_state_flavor_t
) +
385 (flavors
[i
].count
* sizeof(int));
387 command_size
= (segment_count
+ mach_section_count
) *
388 sizeof(struct segment_command
) +
389 thread_count
*sizeof(struct thread_command
) +
390 tstate_size
*thread_count
;
392 header_size
= command_size
+ sizeof(struct mach_header
);
393 header
= (vm_offset_t
) command_buffer
;
396 * Set up Mach-O header for currently executing 32 bit kernel.
398 printf ("Generated Mach-O header size was %d\n", header_size
);
400 mh
= (struct mach_header
*) header
;
401 mh
->magic
= MH_MAGIC
;
402 mh
->cputype
= cpu_type();
403 mh
->cpusubtype
= cpu_subtype(); /* XXX incorrect; should match kernel */
404 mh
->filetype
= MH_CORE
;
405 mh
->ncmds
= segment_count
+ thread_count
+ mach_section_count
;
406 mh
->sizeofcmds
= command_size
;
409 hoffset
= sizeof(struct mach_header
); /* offset into header */
410 foffset
= round_page_32(header_size
); /* offset into file */
412 if ((foffset
- header_size
) < (4*sizeof(struct segment_command
))) {
414 foffset
+= ((4*sizeof(struct segment_command
)) - (foffset
-header_size
));
417 max_header_size
= foffset
;
419 vmoffset
= VM_MIN_ADDRESS
; /* offset into VM */
421 /* Transmit the Mach-O MH_CORE header, and seek forward past the
422 * area reserved for the segment and thread commands
423 * to begin data transmission
426 if ((panic_error
= kdp_send_panic_pkt (KDP_SEEK
, NULL
, sizeof(nfoffset
) , &nfoffset
)) < 0) {
427 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error
);
431 if ((panic_error
= kdp_send_panic_packets (KDP_DATA
, NULL
, sizeof(struct mach_header
), (caddr_t
) mh
) < 0)) {
432 printf ("kdp_send_panic_packets failed with error %d\n", panic_error
);
436 if ((panic_error
= kdp_send_panic_pkt (KDP_SEEK
, NULL
, sizeof(foffset
) , &foffset
) < 0)) {
437 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error
);
440 printf ("Transmitting kernel state, please wait: ");
442 while ((segment_count
> 0) || (kret
== KERN_SUCCESS
)){
443 /* Check if we've transmitted all the kernel sections */
444 if (num_sects_txed
== mach_section_count
-1) {
449 * Get region information for next region.
452 vbrcount
= VM_REGION_SUBMAP_INFO_COUNT_64
;
453 if((kret
= vm_region_recurse_64(map
,
454 &vmoffset
, &size
, &nesting_depth
,
455 (vm_region_recurse_info_t
)&vbr
,
456 &vbrcount
)) != KERN_SUCCESS
) {
468 if(kret
!= KERN_SUCCESS
)
471 prot
= vbr
.protection
;
472 maxprot
= vbr
.max_protection
;
473 inherit
= vbr
.inheritance
;
477 switch (num_sects_txed
) {
480 /* Transmit the kernel text section */
481 vmoffset
= sectTEXTB
;
487 vmoffset
= sectDATAB
;
493 vmoffset
= sectPRELINKB
;
494 size
= sectSizePRELINK
;
499 vmoffset
= sectLINKB
;
503 /* TODO the lowmem vector area may be useful, but its transmission is
504 * disabled for now. The traceback table area should be transmitted
505 * as well - that's indirected from 0x5080.
511 * Fill in segment command structure.
514 if (hoffset
> max_header_size
)
516 sc
= (struct segment_command
*) (header
);
517 sc
->cmd
= LC_SEGMENT
;
518 sc
->cmdsize
= sizeof(struct segment_command
);
520 sc
->vmaddr
= vmoffset
;
522 sc
->fileoff
= foffset
;
524 sc
->maxprot
= maxprot
;
528 if ((panic_error
= kdp_send_panic_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
) , &hoffset
)) < 0) {
529 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error
);
533 if ((panic_error
= kdp_send_panic_packets (KDP_DATA
, NULL
, sizeof(struct segment_command
) , (caddr_t
) sc
)) < 0) {
534 printf ("kdp_send_panic_packets failed with error %d\n", panic_error
);
538 /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead, seek past that
539 * region on the server - this creates a hole in the file
542 if ((vbr
.user_tag
!= VM_MEMORY_IOKIT
)) {
544 if ((panic_error
= kdp_send_panic_pkt (KDP_SEEK
, NULL
, sizeof(foffset
) , &foffset
)) < 0) {
545 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error
);
551 if ((panic_error
= kdp_send_panic_packets (KDP_DATA
, NULL
, size
, (caddr_t
) txstart
)) < 0) {
552 printf ("kdp_send_panic_packets failed with error %d\n", panic_error
);
557 hoffset
+= sizeof(struct segment_command
);
562 tir1
.header
= header
;
564 tir1
.flavors
= flavors
;
565 tir1
.tstate_size
= tstate_size
;
567 /* Now send out the LC_THREAD load command, with the thread information
568 * for the current activation.
569 * Note that the corefile can contain LC_SEGMENT commands with file offsets
570 * that point past the edge of the corefile, in the event that the last N
571 * VM regions were all I/O mapped or otherwise non-transferable memory,
572 * not followed by a normal VM region; i.e. there will be no hole that
573 * reaches to the end of the core file.
575 kern_collectth_state (current_thread(), &tir1
);
577 if ((panic_error
= kdp_send_panic_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
) , &hoffset
)) < 0) {
578 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error
);
582 if ((panic_error
= kdp_send_panic_packets (KDP_DATA
, NULL
, tir1
.hoffset
, (caddr_t
) header
)) < 0) {
583 printf ("kdp_send_panic_packets failed with error %d\n", panic_error
);
588 if ((panic_error
= kdp_send_panic_pkt (KDP_EOF
, NULL
, 0, ((void *) 0))) < 0)
590 printf ("kdp_send_panic_pkt failed with error %d\n", panic_error
);