2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
29 #include <mach/vm_attributes.h>
30 #include <mach/vm_param.h>
31 #include <libsa/types.h>
33 #include <vm/vm_map.h>
34 #include <i386/pmap.h>
36 #include <kdp/kdp_core.h>
37 #include <kdp/kdp_internal.h>
38 #include <mach-o/loader.h>
39 #include <mach/vm_map.h>
40 #include <mach/vm_statistics.h>
41 #include <mach/thread_status.h>
42 #include <i386/thread.h>
44 #include <vm/vm_protos.h>
45 #include <vm/vm_kern.h>
47 unsigned kdp_vm_read( caddr_t
, caddr_t
, unsigned);
48 unsigned kdp_vm_write( caddr_t
, caddr_t
, unsigned);
50 boolean_t kdp_trans_off
= 0;
51 uint32_t kdp_src_high32
= 0;
52 extern pmap_paddr_t avail_start
, avail_end
;
54 extern void bcopy_phys(addr64_t from
, addr64_t to
, int size
);
55 static addr64_t
kdp_vtophys(pmap_t pmap
, addr64_t va
);
59 unsigned int not_in_kdp
= 1; /* Cleared when we begin to access vm functions in kdp */
61 extern vm_offset_t sectTEXTB
, sectDATAB
, sectLINKB
, sectPRELINKB
;
62 extern int sectSizeTEXT
, sectSizeDATA
, sectSizeLINK
, sectSizePRELINK
;
65 int kdp_dump_trap(int type
, x86_saved_state32_t
*regs
);
68 int flavor
; /* the number for this flavor */
69 mach_msg_type_number_t count
; /* count of ints in this flavor */
70 } mythread_state_flavor_t
;
72 static mythread_state_flavor_t thread_flavor_array
[] = {
73 {x86_THREAD_STATE32
, x86_THREAD_STATE32_COUNT
}
76 static int kdp_mynum_flavors
= 1;
77 static int MAX_TSTATE_FLAVORS
= 1;
82 mythread_state_flavor_t
*flavors
;
86 char command_buffer
[512];
95 /* Clear high 32 - pmap_find_phys() may panic() otherwise */
97 pp
= pmap_find_phys(pmap
, va
);
100 pa
= ((addr64_t
)pp
<< 12) | (va
& 0x0000000000000FFFULL
);
107 unsigned kdp_vm_read(
112 addr64_t cur_virt_src
= (addr64_t
)((unsigned int)src
| (((uint64_t)kdp_src_high32
) << 32));
113 addr64_t cur_virt_dst
= (addr64_t
)((unsigned int)dst
);
114 addr64_t cur_phys_dst
, cur_phys_src
;
115 unsigned resid
= len
;
117 pmap_t src_pmap
= kernel_pmap
;
119 /* If a different pmap has been specified with kdp_pmap, use it to translate the
120 * source (cur_virt_src); otherwise, the source is translated using the
127 /* Translate, unless kdp_trans_off is set */
128 if (!kdp_trans_off
) {
129 if (!(cur_phys_src
= kdp_vtophys(src_pmap
,
134 cur_phys_src
= cur_virt_src
;
136 /* Always translate the destination buffer using the kernel_pmap */
137 if(!(cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)))
140 /* Validate physical page numbers when performing a crashdump */
142 if (!pmap_valid_page(i386_btop(cur_phys_dst
)) || !pmap_valid_page(i386_btop(cur_phys_src
)))
145 /* Get length left on page */
146 cnt
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
147 if (cnt
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
)))
148 cnt
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
152 /* Do a physical copy */
153 bcopy_phys(cur_phys_src
, cur_phys_dst
, cnt
);
160 return (len
- resid
);
166 unsigned kdp_vm_write(
171 addr64_t cur_virt_src
, cur_virt_dst
;
172 addr64_t cur_phys_src
, cur_phys_dst
;
173 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
175 #ifdef KDP_VM_WRITE_DEBUG
176 printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src
, dst
, len
, ((unsigned long *)src
)[0], ((unsigned long *)src
)[1]);
179 cur_virt_src
= (addr64_t
)((unsigned int)src
);
180 cur_virt_dst
= (addr64_t
)((unsigned int)dst
);
185 if ((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0)
188 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0)
191 cnt_src
= ((cur_phys_src
+ PAGE_SIZE
) & (PAGE_MASK
)) - cur_phys_src
;
192 cnt_dst
= ((cur_phys_dst
+ PAGE_SIZE
) & (PAGE_MASK
)) - cur_phys_dst
;
194 if (cnt_src
> cnt_dst
)
201 bcopy_phys(cur_phys_src
, cur_phys_dst
, cnt
); /* Copy stuff over */
208 return (len
- resid
);
212 kern_collectth_state(thread_t thread
, tir_t
*t
)
216 mythread_state_flavor_t
*flavors
;
217 struct thread_command
*tc
;
219 * Fill in thread command structure.
222 hoffset
= t
->hoffset
;
223 flavors
= t
->flavors
;
225 tc
= (struct thread_command
*) (header
+ hoffset
);
227 tc
->cmdsize
= sizeof(struct thread_command
) + t
->tstate_size
;
228 hoffset
+= sizeof(struct thread_command
);
230 * Follow with a struct thread_state_flavor and
231 * the appropriate thread state struct for each
232 * thread state flavor.
234 for (i
= 0; i
< kdp_mynum_flavors
; i
++) {
235 *(mythread_state_flavor_t
*)(header
+hoffset
) =
237 hoffset
+= sizeof(mythread_state_flavor_t
);
238 /* Locate and obtain the non-volatile register context
239 * for this kernel thread. This should ideally be
240 * encapsulated in machine_thread_get_kern_state()
241 * but that routine appears to have been co-opted
242 * by CHUD to obtain pre-interrupt state.
244 if (flavors
[i
].flavor
== x86_THREAD_STATE32
) {
245 x86_thread_state32_t
*tstate
= (x86_thread_state32_t
*) (header
+ hoffset
);
247 bzero(tstate
, x86_THREAD_STATE32_COUNT
* sizeof(int));
248 if ((kstack
= thread
->kernel_stack
) != 0){
249 struct x86_kernel_state32
*iks
= STACK_IKS(kstack
);
250 tstate
->ebx
= iks
->k_ebx
;
251 tstate
->esp
= iks
->k_esp
;
252 tstate
->ebp
= iks
->k_ebp
;
253 tstate
->edi
= iks
->k_edi
;
254 tstate
->esi
= iks
->k_esi
;
255 tstate
->eip
= iks
->k_eip
;
258 else if (machine_thread_get_kern_state(thread
,
259 flavors
[i
].flavor
, (thread_state_t
) (header
+hoffset
),
260 &flavors
[i
].count
) != KERN_SUCCESS
)
261 printf ("Failure in machine_thread_get_kern_state()\n");
262 hoffset
+= flavors
[i
].count
*sizeof(int);
265 t
->hoffset
= hoffset
;
268 /* Intended to be called from the kernel trap handler if an unrecoverable fault
269 * occurs during a crashdump (which shouldn't happen since we validate mappings
270 * and so on). This should be reworked to attempt some form of recovery.
275 __unused x86_saved_state32_t
*saved_state
)
277 printf ("An unexpected trap (type %d) occurred during the system dump, terminating.\n", type
);
278 kdp_send_crashdump_pkt (KDP_EOF
, NULL
, 0, ((void *) 0));
279 abort_panic_transfer();
280 kdp_flag
&= ~KDP_PANIC_DUMP_ENABLED
;
281 kdp_flag
&= ~PANIC_CORE_ON_NMI
;
282 kdp_flag
&= ~PANIC_LOG_DUMP
;
286 kdp_raise_exception(EXC_BAD_ACCESS
, 0, 0, kdp
.saved_state
);
294 unsigned int thread_count
, segment_count
;
295 unsigned int command_size
= 0, header_size
= 0, tstate_size
= 0;
296 unsigned int hoffset
= 0, foffset
= 0, nfoffset
= 0, vmoffset
= 0;
297 unsigned int max_header_size
= 0;
299 struct mach_header
*mh
;
300 struct segment_command
*sc
;
303 vm_prot_t maxprot
= 0;
304 vm_inherit_t inherit
= 0;
305 mythread_state_flavor_t flavors
[MAX_TSTATE_FLAVORS
];
308 uint32_t nesting_depth
= 0;
309 kern_return_t kret
= 0;
310 struct vm_region_submap_info_64 vbr
;
311 mach_msg_type_number_t vbrcount
= 0;
316 unsigned int txstart
= 0;
317 unsigned int mach_section_count
= 4;
318 unsigned int num_sects_txed
= 0;
322 not_in_kdp
= 0; /* Signal vm functions not to acquire locks */
325 segment_count
= get_vmmap_entries(map
);
327 printf("Kernel map has %d entries\n", segment_count
);
329 nflavors
= kdp_mynum_flavors
;
330 bcopy((char *)thread_flavor_array
,(char *) flavors
,sizeof(thread_flavor_array
));
332 for (i
= 0; i
< nflavors
; i
++)
333 tstate_size
+= sizeof(mythread_state_flavor_t
) +
334 (flavors
[i
].count
* sizeof(int));
336 command_size
= (segment_count
+ mach_section_count
) *
337 sizeof(struct segment_command
) +
338 thread_count
* sizeof(struct thread_command
) +
339 tstate_size
* thread_count
;
341 header_size
= command_size
+ sizeof(struct mach_header
);
342 header
= (vm_offset_t
) command_buffer
;
345 * Set up Mach-O header for currently executing 32 bit kernel.
347 printf ("Generated Mach-O header size was %d\n", header_size
);
349 mh
= (struct mach_header
*) header
;
350 mh
->magic
= MH_MAGIC
;
351 mh
->cputype
= cpu_type();
352 mh
->cpusubtype
= cpu_subtype();
353 mh
->filetype
= MH_CORE
;
354 mh
->ncmds
= segment_count
+ thread_count
+ mach_section_count
;
355 mh
->sizeofcmds
= command_size
;
358 hoffset
= sizeof(struct mach_header
); /* offset into header */
359 foffset
= round_page_32(header_size
); /* offset into file */
361 if ((foffset
- header_size
) < (4*sizeof(struct segment_command
))) {
362 foffset
+= ((4*sizeof(struct segment_command
)) - (foffset
-header_size
));
365 max_header_size
= foffset
;
367 vmoffset
= VM_MIN_ADDRESS
; /* offset into VM */
369 /* Transmit the Mach-O MH_CORE header, and seek forward past the
370 * area reserved for the segment and thread commands
371 * to begin data transmission
374 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(nfoffset
) , &nfoffset
)) < 0) {
375 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
380 if ((panic_error
= kdp_send_crashdump_data (KDP_DATA
, NULL
, sizeof(struct mach_header
), (caddr_t
) mh
) < 0)) {
381 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error
);
386 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(foffset
) , &foffset
) < 0)) {
387 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
391 printf ("Transmitting kernel state, please wait: ");
393 while ((segment_count
> 0) || (kret
== KERN_SUCCESS
)){
394 /* Check if we've transmitted all the kernel sections */
395 if (num_sects_txed
== mach_section_count
) {
400 * Get region information for next region.
403 vbrcount
= VM_REGION_SUBMAP_INFO_COUNT_64
;
404 if((kret
= vm_region_recurse_64(map
,
405 &vmoffset
, &size
, &nesting_depth
,
406 (vm_region_recurse_info_t
)&vbr
,
407 &vbrcount
)) != KERN_SUCCESS
) {
419 if(kret
!= KERN_SUCCESS
)
422 prot
= vbr
.protection
;
423 maxprot
= vbr
.max_protection
;
424 inherit
= vbr
.inheritance
;
428 switch (num_sects_txed
) {
430 /* Transmit the kernel text section */
431 vmoffset
= sectTEXTB
;
435 vmoffset
= sectDATAB
;
439 vmoffset
= sectPRELINKB
;
440 size
= sectSizePRELINK
;
443 vmoffset
= sectLINKB
;
450 * Fill in segment command structure.
453 if (hoffset
> max_header_size
)
455 sc
= (struct segment_command
*) (header
);
456 sc
->cmd
= LC_SEGMENT
;
457 sc
->cmdsize
= sizeof(struct segment_command
);
459 sc
->vmaddr
= vmoffset
;
461 sc
->fileoff
= foffset
;
463 sc
->maxprot
= maxprot
;
467 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
) , &hoffset
)) < 0) {
468 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
473 if ((panic_error
= kdp_send_crashdump_data (KDP_DATA
, NULL
, sizeof(struct segment_command
) , (caddr_t
) sc
)) < 0) {
474 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error
);
479 /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead,
480 * seek past that region on the server - this creates a
484 if ((vbr
.user_tag
!= VM_MEMORY_IOKIT
)) {
486 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(foffset
) , &foffset
)) < 0) {
487 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
494 if ((panic_error
= kdp_send_crashdump_data (KDP_DATA
, NULL
, size
, (caddr_t
) txstart
)) < 0) {
495 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error
);
501 hoffset
+= sizeof(struct segment_command
);
506 tir1
.header
= header
;
508 tir1
.flavors
= flavors
;
509 tir1
.tstate_size
= tstate_size
;
511 /* Now send out the LC_THREAD load command, with the thread information
512 * for the current activation.
513 * Note that the corefile can contain LC_SEGMENT commands with file
514 * offsets that point past the edge of the corefile, in the event that
515 * the last N VM regions were all I/O mapped or otherwise
516 * non-transferable memory, not followed by a normal VM region;
517 * i.e. there will be no hole that reaches to the end of the core file.
519 kern_collectth_state (current_thread(), &tir1
);
521 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
) , &hoffset
)) < 0) {
522 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
527 if ((panic_error
= kdp_send_crashdump_data (KDP_DATA
, NULL
, tir1
.hoffset
, (caddr_t
) header
)) < 0) {
528 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error
);
534 if ((panic_error
= kdp_send_crashdump_pkt (KDP_EOF
, NULL
, 0, ((void *) 0))) < 0)
536 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);