2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
29 #include <mach/vm_attributes.h>
30 #include <mach/vm_param.h>
31 #include <libsa/types.h>
33 #include <vm/vm_map.h>
34 #include <i386/pmap.h>
36 #include <kdp/kdp_core.h>
37 #include <kdp/kdp_internal.h>
38 #include <mach-o/loader.h>
39 #include <mach/vm_map.h>
40 #include <mach/mach_vm.h>
41 #include <mach/vm_statistics.h>
42 #include <mach/thread_status.h>
43 #include <i386/thread.h>
45 #include <vm/vm_protos.h>
46 #include <vm/vm_kern.h>
49 int kdp_dump_trap(int type
, x86_saved_state64_t
*regs
);
52 int flavor
; /* the number for this flavor */
53 mach_msg_type_number_t count
; /* count of ints in this flavor */
54 } mythread_state_flavor_t
;
56 static mythread_state_flavor_t thread_flavor_array
[] = {
57 {x86_THREAD_STATE64
, x86_THREAD_STATE64_COUNT
}
60 static int kdp_mynum_flavors
= 1;
61 static int MAX_TSTATE_FLAVORS
= 1;
66 mythread_state_flavor_t
*flavors
;
70 char command_buffer
[512];
73 kern_collectth_state(thread_t thread
, tir_t
*t
)
77 mythread_state_flavor_t
*flavors
;
78 struct thread_command
*tc
;
80 * Fill in thread command structure.
86 tc
= (struct thread_command
*) (header
+ hoffset
);
88 tc
->cmdsize
= (uint32_t)sizeof(struct thread_command
) + t
->tstate_size
;
89 hoffset
+= (uint32_t)sizeof(struct thread_command
);
91 * Follow with a struct thread_state_flavor and
92 * the appropriate thread state struct for each
93 * thread state flavor.
95 for (i
= 0; i
< kdp_mynum_flavors
; i
++) {
96 *(mythread_state_flavor_t
*)(header
+hoffset
) =
98 hoffset
+= (uint32_t)sizeof(mythread_state_flavor_t
);
99 /* Locate and obtain the non-volatile register context
100 * for this kernel thread. This should ideally be
101 * encapsulated in machine_thread_get_kern_state()
102 * but that routine appears to have been co-opted
103 * by CHUD to obtain pre-interrupt state.
105 if (flavors
[i
].flavor
== x86_THREAD_STATE64
) {
106 x86_thread_state64_t
*tstate
= (x86_thread_state64_t
*) (header
+ hoffset
);
108 x86_saved_state64_t
*cpstate
= current_cpu_datap()->cpu_fatal_trap_state
;
109 bzero(tstate
, x86_THREAD_STATE64_COUNT
* sizeof(int));
110 if ((current_thread() == thread
) && (cpstate
!= NULL
)) {
111 tstate
->rax
= cpstate
->rax
;
112 tstate
->rbx
= cpstate
->rbx
;
113 tstate
->rcx
= cpstate
->rcx
;
114 tstate
->rdx
= cpstate
->rdx
;
115 tstate
->rdi
= cpstate
->rdi
;
116 tstate
->rsi
= cpstate
->rsi
;
117 tstate
->rbp
= cpstate
->rbp
;
118 tstate
->r8
= cpstate
->r8
;
119 tstate
->r9
= cpstate
->r9
;
120 tstate
->r10
= cpstate
->r10
;
121 tstate
->r11
= cpstate
->r11
;
122 tstate
->r12
= cpstate
->r12
;
123 tstate
->r13
= cpstate
->r13
;
124 tstate
->r14
= cpstate
->r14
;
125 tstate
->r15
= cpstate
->r15
;
126 tstate
->rip
= cpstate
->isf
.rip
;
127 tstate
->rsp
= cpstate
->isf
.rsp
;
128 tstate
->rflags
= cpstate
->isf
.rflags
;
129 tstate
->cs
= cpstate
->isf
.cs
;
130 tstate
->fs
= cpstate
->fs
;
131 tstate
->gs
= cpstate
->gs
;
132 } else if ((kstack
= thread
->kernel_stack
) != 0){
133 struct x86_kernel_state
*iks
= STACK_IKS(kstack
);
134 tstate
->rbx
= iks
->k_rbx
;
135 tstate
->rsp
= iks
->k_rsp
;
136 tstate
->rbp
= iks
->k_rbp
;
137 tstate
->r12
= iks
->k_r12
;
138 tstate
->r13
= iks
->k_r13
;
139 tstate
->r14
= iks
->k_r14
;
140 tstate
->r15
= iks
->k_r15
;
141 tstate
->rip
= iks
->k_rip
;
144 else if (machine_thread_get_kern_state(thread
,
145 flavors
[i
].flavor
, (thread_state_t
) (header
+hoffset
),
146 &flavors
[i
].count
) != KERN_SUCCESS
)
147 printf ("Failure in machine_thread_get_kern_state()\n");
148 hoffset
+= (uint32_t)(flavors
[i
].count
*sizeof(int));
151 t
->hoffset
= hoffset
;
154 /* Intended to be called from the kernel trap handler if an unrecoverable fault
155 * occurs during a crashdump (which shouldn't happen since we validate mappings
156 * and so on). This should be reworked to attempt some form of recovery.
161 __unused x86_saved_state64_t
*saved_state
)
163 printf ("An unexpected trap (type %d) occurred during the system dump, terminating.\n", type
);
164 kdp_send_crashdump_pkt (KDP_EOF
, NULL
, 0, ((void *) 0));
165 abort_panic_transfer();
166 kdp_flag
&= ~KDP_PANIC_DUMP_ENABLED
;
167 kdp_flag
&= ~PANIC_CORE_ON_NMI
;
168 kdp_flag
&= ~PANIC_LOG_DUMP
;
172 kdp_raise_exception(EXC_BAD_ACCESS
, 0, 0, kdp
.saved_state
);
180 unsigned int thread_count
, segment_count
;
181 unsigned int command_size
= 0, header_size
= 0, tstate_size
= 0;
182 uint64_t hoffset
= 0, foffset
= 0, nfoffset
= 0;
183 unsigned int max_header_size
= 0;
184 vm_offset_t header
, txstart
;
185 vm_map_offset_t vmoffset
;
186 struct mach_header_64
*mh64
;
187 struct segment_command_64
*sc64
;
188 mach_vm_size_t size
= 0;
190 vm_prot_t maxprot
= 0;
191 mythread_state_flavor_t flavors
[MAX_TSTATE_FLAVORS
];
194 uint32_t nesting_depth
= 0;
195 kern_return_t kret
= 0;
196 struct vm_region_submap_info_64 vbr
;
197 mach_msg_type_number_t vbrcount
= 0;
206 segment_count
= get_vmmap_entries(map
);
208 printf("Kernel map has %d entries\n", segment_count
);
210 nflavors
= kdp_mynum_flavors
;
211 bcopy((char *)thread_flavor_array
,(char *) flavors
,sizeof(thread_flavor_array
));
213 for (i
= 0; i
< nflavors
; i
++)
214 tstate_size
+= (uint32_t)(sizeof(mythread_state_flavor_t
) +
215 (flavors
[i
].count
* sizeof(int)));
217 command_size
= (uint32_t)((segment_count
) *
218 sizeof(struct segment_command_64
) +
219 thread_count
* sizeof(struct thread_command
) +
220 tstate_size
* thread_count
);
222 header_size
= command_size
+ (uint32_t)sizeof(struct mach_header_64
);
223 header
= (vm_offset_t
) command_buffer
;
226 * Set up Mach-O header for currently executing 32 bit kernel.
228 printf ("Generated Mach-O header size was %d\n", header_size
);
230 mh64
= (struct mach_header_64
*) header
;
231 mh64
->magic
= MH_MAGIC_64
;
232 mh64
->cputype
= cpu_type();
233 mh64
->cpusubtype
= cpu_subtype();
234 mh64
->filetype
= MH_CORE
;
235 mh64
->ncmds
= segment_count
+ thread_count
;
236 mh64
->sizeofcmds
= command_size
;
240 hoffset
= sizeof(struct mach_header_64
); /* offset into header */
241 foffset
= (uint32_t)round_page(header_size
); /* offset into file */
243 if ((foffset
- header_size
) < (4*sizeof(struct segment_command_64
))) {
244 foffset
+= (uint32_t)((4*sizeof(struct segment_command_64
)) - (foffset
-header_size
));
247 max_header_size
= (unsigned int)foffset
;
249 vmoffset
= vm_map_min(map
);
251 /* Transmit the Mach-O MH_CORE header, and seek forward past the
252 * area reserved for the segment and thread commands
253 * to begin data transmission
255 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(nfoffset
) , &nfoffset
)) < 0) {
256 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
261 if ((panic_error
= kdp_send_crashdump_data (KDP_DATA
, NULL
, sizeof(struct mach_header_64
), (caddr_t
) mh64
) < 0)) {
262 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error
);
266 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(foffset
) , &foffset
) < 0)) {
267 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
271 printf ("Transmitting kernel state, please wait: ");
273 while ((segment_count
> 0) || (kret
== KERN_SUCCESS
)){
278 * Get region information for next region.
281 vbrcount
= VM_REGION_SUBMAP_INFO_COUNT_64
;
282 if((kret
= mach_vm_region_recurse(map
,
283 &vmoffset
, &size
, &nesting_depth
,
284 (vm_region_recurse_info_t
)&vbr
,
285 &vbrcount
)) != KERN_SUCCESS
) {
297 if(kret
!= KERN_SUCCESS
)
300 prot
= vbr
.protection
;
301 maxprot
= vbr
.max_protection
;
304 * Fill in segment command structure.
307 if (hoffset
> max_header_size
)
309 sc64
= (struct segment_command_64
*) (header
);
310 sc64
->cmd
= LC_SEGMENT_64
;
311 sc64
->cmdsize
= sizeof(struct segment_command_64
);
312 sc64
->segname
[0] = 0;
313 sc64
->vmaddr
= vmoffset
;
315 sc64
->fileoff
= foffset
;
316 sc64
->filesize
= size
;
317 sc64
->maxprot
= maxprot
;
318 sc64
->initprot
= prot
;
321 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
) , &hoffset
)) < 0) {
322 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
327 if ((panic_error
= kdp_send_crashdump_data (KDP_DATA
, NULL
, sizeof(struct segment_command_64
) , (caddr_t
) sc64
)) < 0) {
328 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error
);
333 /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead,
334 * seek past that region on the server - this creates a
338 if ((vbr
.user_tag
!= VM_MEMORY_IOKIT
)) {
340 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(foffset
) , &foffset
)) < 0) {
341 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
348 if ((panic_error
= kdp_send_crashdump_data (KDP_DATA
, NULL
, (unsigned int)size
, (caddr_t
) txstart
)) < 0) {
349 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error
);
355 hoffset
+= (unsigned int)sizeof(struct segment_command_64
);
356 foffset
+= (unsigned int)size
;
360 tir1
.header
= header
;
362 tir1
.flavors
= flavors
;
363 tir1
.tstate_size
= tstate_size
;
365 /* Now send out the LC_THREAD load command, with the thread information
366 * for the current activation.
367 * Note that the corefile can contain LC_SEGMENT commands with file
368 * offsets that point past the edge of the corefile, in the event that
369 * the last N VM regions were all I/O mapped or otherwise
370 * non-transferable memory, not followed by a normal VM region;
371 * i.e. there will be no hole that reaches to the end of the core file.
373 kern_collectth_state (current_thread(), &tir1
);
375 if ((panic_error
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
) , &hoffset
)) < 0) {
376 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);
381 if ((panic_error
= kdp_send_crashdump_data (KDP_DATA
, NULL
, tir1
.hoffset
, (caddr_t
) header
)) < 0) {
382 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error
);
388 if ((panic_error
= kdp_send_crashdump_pkt (KDP_EOF
, NULL
, 0, ((void *) 0))) < 0)
390 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error
);