]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/x86_64/kdp_vm.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / x86_64 / kdp_vm.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/vm_attributes.h>
30 #include <mach/vm_param.h>
31 #include <libsa/types.h>
32
33 #include <vm/vm_map.h>
34 #include <i386/pmap.h>
35
36 #include <kdp/kdp_core.h>
37 #include <kdp/kdp_internal.h>
38 #include <mach-o/loader.h>
39 #include <mach/vm_map.h>
40 #include <mach/mach_vm.h>
41 #include <mach/vm_statistics.h>
42 #include <mach/thread_status.h>
43 #include <i386/thread.h>
44
45 #include <vm/vm_protos.h>
46 #include <vm/vm_kern.h>
47
48 int kern_dump(void);
49 int kdp_dump_trap(int type, x86_saved_state64_t *regs);
50
51 typedef struct {
52 int flavor; /* the number for this flavor */
53 mach_msg_type_number_t count; /* count of ints in this flavor */
54 } mythread_state_flavor_t;
55
56 static mythread_state_flavor_t thread_flavor_array [] = {
57 {x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT}
58 };
59
60 static int kdp_mynum_flavors = 1;
61 static int MAX_TSTATE_FLAVORS = 1;
62
63 typedef struct {
64 vm_offset_t header;
65 int hoffset;
66 mythread_state_flavor_t *flavors;
67 int tstate_size;
68 } tir_t;
69
70 char command_buffer[512];
71
72 static void
73 kern_collectth_state(thread_t thread, tir_t *t)
74 {
75 vm_offset_t header;
76 int hoffset, i ;
77 mythread_state_flavor_t *flavors;
78 struct thread_command *tc;
79 /*
80 * Fill in thread command structure.
81 */
82 header = t->header;
83 hoffset = t->hoffset;
84 flavors = t->flavors;
85
86 tc = (struct thread_command *) (header + hoffset);
87 tc->cmd = LC_THREAD;
88 tc->cmdsize = (uint32_t)sizeof(struct thread_command) + t->tstate_size;
89 hoffset += (uint32_t)sizeof(struct thread_command);
90 /*
91 * Follow with a struct thread_state_flavor and
92 * the appropriate thread state struct for each
93 * thread state flavor.
94 */
95 for (i = 0; i < kdp_mynum_flavors; i++) {
96 *(mythread_state_flavor_t *)(header+hoffset) =
97 flavors[i];
98 hoffset += (uint32_t)sizeof(mythread_state_flavor_t);
99 /* Locate and obtain the non-volatile register context
100 * for this kernel thread. This should ideally be
101 * encapsulated in machine_thread_get_kern_state()
102 * but that routine appears to have been co-opted
103 * by CHUD to obtain pre-interrupt state.
104 */
105 if (flavors[i].flavor == x86_THREAD_STATE64) {
106 x86_thread_state64_t *tstate = (x86_thread_state64_t *) (header + hoffset);
107 vm_offset_t kstack;
108 x86_saved_state64_t *cpstate = current_cpu_datap()->cpu_fatal_trap_state;
109 bzero(tstate, x86_THREAD_STATE64_COUNT * sizeof(int));
110 if ((current_thread() == thread) && (cpstate != NULL)) {
111 tstate->rax = cpstate->rax;
112 tstate->rbx = cpstate->rbx;
113 tstate->rcx = cpstate->rcx;
114 tstate->rdx = cpstate->rdx;
115 tstate->rdi = cpstate->rdi;
116 tstate->rsi = cpstate->rsi;
117 tstate->rbp = cpstate->rbp;
118 tstate->r8 = cpstate->r8;
119 tstate->r9 = cpstate->r9;
120 tstate->r10 = cpstate->r10;
121 tstate->r11 = cpstate->r11;
122 tstate->r12 = cpstate->r12;
123 tstate->r13 = cpstate->r13;
124 tstate->r14 = cpstate->r14;
125 tstate->r15 = cpstate->r15;
126 tstate->rip = cpstate->isf.rip;
127 tstate->rsp = cpstate->isf.rsp;
128 tstate->rflags = cpstate->isf.rflags;
129 tstate->cs = cpstate->isf.cs;
130 tstate->fs = cpstate->fs;
131 tstate->gs = cpstate->gs;
132 } else if ((kstack = thread->kernel_stack) != 0){
133 struct x86_kernel_state *iks = STACK_IKS(kstack);
134 tstate->rbx = iks->k_rbx;
135 tstate->rsp = iks->k_rsp;
136 tstate->rbp = iks->k_rbp;
137 tstate->r12 = iks->k_r12;
138 tstate->r13 = iks->k_r13;
139 tstate->r14 = iks->k_r14;
140 tstate->r15 = iks->k_r15;
141 tstate->rip = iks->k_rip;
142 }
143 }
144 else if (machine_thread_get_kern_state(thread,
145 flavors[i].flavor, (thread_state_t) (header+hoffset),
146 &flavors[i].count) != KERN_SUCCESS)
147 printf ("Failure in machine_thread_get_kern_state()\n");
148 hoffset += (uint32_t)(flavors[i].count*sizeof(int));
149 }
150
151 t->hoffset = hoffset;
152 }
153
154 /* Intended to be called from the kernel trap handler if an unrecoverable fault
155 * occurs during a crashdump (which shouldn't happen since we validate mappings
156 * and so on). This should be reworked to attempt some form of recovery.
157 */
158 int
159 kdp_dump_trap(
160 int type,
161 __unused x86_saved_state64_t *saved_state)
162 {
163 printf ("An unexpected trap (type %d) occurred during the system dump, terminating.\n", type);
164 kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0));
165 abort_panic_transfer();
166 kdp_flag &= ~KDP_PANIC_DUMP_ENABLED;
167 kdp_flag &= ~PANIC_CORE_ON_NMI;
168 kdp_flag &= ~PANIC_LOG_DUMP;
169
170 kdp_reset();
171
172 kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state);
173 return( 0 );
174 }
175
176 int
177 kern_dump(void)
178 {
179 vm_map_t map;
180 unsigned int thread_count, segment_count;
181 unsigned int command_size = 0, header_size = 0, tstate_size = 0;
182 uint64_t hoffset = 0, foffset = 0, nfoffset = 0;
183 unsigned int max_header_size = 0;
184 vm_offset_t header, txstart;
185 vm_map_offset_t vmoffset;
186 struct mach_header_64 *mh64;
187 struct segment_command_64 *sc64;
188 mach_vm_size_t size = 0;
189 vm_prot_t prot = 0;
190 vm_prot_t maxprot = 0;
191 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
192 vm_size_t nflavors;
193 vm_size_t i;
194 uint32_t nesting_depth = 0;
195 kern_return_t kret = 0;
196 struct vm_region_submap_info_64 vbr;
197 mach_msg_type_number_t vbrcount = 0;
198 tir_t tir1;
199
200 int error = 0;
201 int panic_error = 0;
202
203 map = kernel_map;
204
205 thread_count = 1;
206 segment_count = get_vmmap_entries(map);
207
208 printf("Kernel map has %d entries\n", segment_count);
209
210 nflavors = kdp_mynum_flavors;
211 bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array));
212
213 for (i = 0; i < nflavors; i++)
214 tstate_size += (uint32_t)(sizeof(mythread_state_flavor_t) +
215 (flavors[i].count * sizeof(int)));
216
217 command_size = (uint32_t)((segment_count) *
218 sizeof(struct segment_command_64) +
219 thread_count * sizeof(struct thread_command) +
220 tstate_size * thread_count);
221
222 header_size = command_size + (uint32_t)sizeof(struct mach_header_64);
223 header = (vm_offset_t) command_buffer;
224
225 /*
226 * Set up Mach-O header for currently executing 32 bit kernel.
227 */
228 printf ("Generated Mach-O header size was %d\n", header_size);
229
230 mh64 = (struct mach_header_64 *) header;
231 mh64->magic = MH_MAGIC_64;
232 mh64->cputype = cpu_type();
233 mh64->cpusubtype = cpu_subtype();
234 mh64->filetype = MH_CORE;
235 mh64->ncmds = segment_count + thread_count;
236 mh64->sizeofcmds = command_size;
237 mh64->flags = 0;
238 mh64->reserved = 0;
239
240 hoffset = sizeof(struct mach_header_64); /* offset into header */
241 foffset = (uint32_t)round_page(header_size); /* offset into file */
242 /* Padding */
243 if ((foffset - header_size) < (4*sizeof(struct segment_command_64))) {
244 foffset += (uint32_t)((4*sizeof(struct segment_command_64)) - (foffset-header_size));
245 }
246
247 max_header_size = (unsigned int)foffset;
248
249 vmoffset = vm_map_min(map);
250
251 /* Transmit the Mach-O MH_CORE header, and seek forward past the
252 * area reserved for the segment and thread commands
253 * to begin data transmission
254 */
255 if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) {
256 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
257 error = panic_error;
258 goto out;
259 }
260
261 if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct mach_header_64), (caddr_t) mh64) < 0)) {
262 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
263 error = panic_error;
264 goto out;
265 }
266 if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
267 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
268 error = panic_error;
269 goto out;
270 }
271 printf ("Transmitting kernel state, please wait: ");
272
273 while ((segment_count > 0) || (kret == KERN_SUCCESS)){
274
275 while (1) {
276
277 /*
278 * Get region information for next region.
279 */
280
281 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
282 if((kret = mach_vm_region_recurse(map,
283 &vmoffset, &size, &nesting_depth,
284 (vm_region_recurse_info_t)&vbr,
285 &vbrcount)) != KERN_SUCCESS) {
286 break;
287 }
288
289 if(vbr.is_submap) {
290 nesting_depth++;
291 continue;
292 } else {
293 break;
294 }
295 }
296
297 if(kret != KERN_SUCCESS)
298 break;
299
300 prot = vbr.protection;
301 maxprot = vbr.max_protection;
302
303 /*
304 * Fill in segment command structure.
305 */
306
307 if (hoffset > max_header_size)
308 break;
309 sc64 = (struct segment_command_64 *) (header);
310 sc64->cmd = LC_SEGMENT_64;
311 sc64->cmdsize = sizeof(struct segment_command_64);
312 sc64->segname[0] = 0;
313 sc64->vmaddr = vmoffset;
314 sc64->vmsize = size;
315 sc64->fileoff = foffset;
316 sc64->filesize = size;
317 sc64->maxprot = maxprot;
318 sc64->initprot = prot;
319 sc64->nsects = 0;
320
321 if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
322 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
323 error = panic_error;
324 goto out;
325 }
326
327 if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct segment_command_64) , (caddr_t) sc64)) < 0) {
328 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
329 error = panic_error;
330 goto out;
331 }
332
333 /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead,
334 * seek past that region on the server - this creates a
335 * hole in the file.
336 */
337
338 if ((vbr.user_tag != VM_MEMORY_IOKIT)) {
339
340 if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) {
341 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
342 error = panic_error;
343 goto out;
344 }
345
346 txstart = vmoffset;
347
348 if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, (unsigned int)size, (caddr_t) txstart)) < 0) {
349 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
350 error = panic_error;
351 goto out;
352 }
353 }
354
355 hoffset += (unsigned int)sizeof(struct segment_command_64);
356 foffset += (unsigned int)size;
357 vmoffset += size;
358 segment_count--;
359 }
360 tir1.header = header;
361 tir1.hoffset = 0;
362 tir1.flavors = flavors;
363 tir1.tstate_size = tstate_size;
364
365 /* Now send out the LC_THREAD load command, with the thread information
366 * for the current activation.
367 * Note that the corefile can contain LC_SEGMENT commands with file
368 * offsets that point past the edge of the corefile, in the event that
369 * the last N VM regions were all I/O mapped or otherwise
370 * non-transferable memory, not followed by a normal VM region;
371 * i.e. there will be no hole that reaches to the end of the core file.
372 */
373 kern_collectth_state (current_thread(), &tir1);
374
375 if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
376 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
377 error = panic_error;
378 goto out;
379 }
380
381 if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) {
382 printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
383 error = panic_error;
384 goto out;
385 }
386
387 /* last packet */
388 if ((panic_error = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
389 {
390 printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
391 error = panic_error;
392 goto out;
393 }
394 out:
395 return (error);
396 }