]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_core.c
5d149c7dbcc3bca77eec17701ed7d90cf5661f1e
[apple/xnu.git] / bsd / kern / kern_core.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
29 *
30 * File: bsd/kern/kern_core.c
31 *
32 * This file contains machine independent code for performing core dumps.
33 *
34 */
35
36 #include <mach/vm_param.h>
37 #include <mach/thread_status.h>
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/signalvar.h>
42 #include <sys/resourcevar.h>
43 #include <sys/namei.h>
44 #include <sys/vnode_internal.h>
45 #include <sys/proc_internal.h>
46 #include <sys/kauth.h>
47 #include <sys/timeb.h>
48 #include <sys/times.h>
49 #include <sys/acct.h>
50 #include <sys/file_internal.h>
51 #include <sys/uio.h>
52 #include <sys/kernel.h>
53 #include <sys/stat.h>
54
55 #include <mach-o/loader.h>
56 #include <mach/vm_region.h>
57 #include <mach/vm_statistics.h>
58
59 #include <vm/vm_kern.h>
60 #include <vm/vm_protos.h> /* last */
61 #include <vm/vm_map.h> /* current_map() */
62 #include <mach/mach_vm.h> /* mach_vm_region_recurse() */
63 #include <mach/task.h> /* task_suspend() */
64 #include <kern/task.h> /* get_task_numacts() */
65
66 typedef struct {
67 int flavor; /* the number for this flavor */
68 mach_msg_type_number_t count; /* count of ints in this flavor */
69 } mythread_state_flavor_t;
70
71 #if defined (__ppc__)
72 /* 64 bit */
73 mythread_state_flavor_t thread_flavor_array64[]={
74 {PPC_THREAD_STATE64 , PPC_THREAD_STATE64_COUNT},
75 {PPC_FLOAT_STATE, PPC_FLOAT_STATE_COUNT},
76 {PPC_EXCEPTION_STATE64, PPC_EXCEPTION_STATE64_COUNT},
77 {PPC_VECTOR_STATE, PPC_VECTOR_STATE_COUNT}
78 };
79
80 /* 32 bit */
81 mythread_state_flavor_t thread_flavor_array[]={
82 {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT},
83 {PPC_FLOAT_STATE, PPC_FLOAT_STATE_COUNT},
84 {PPC_EXCEPTION_STATE, PPC_EXCEPTION_STATE_COUNT},
85 {PPC_VECTOR_STATE, PPC_VECTOR_STATE_COUNT}
86 };
87
88 #elif defined (__i386__)
89 mythread_state_flavor_t thread_flavor_array [] = {
90 {x86_THREAD_STATE, x86_THREAD_STATE_COUNT},
91 {x86_FLOAT_STATE, x86_FLOAT_STATE_COUNT},
92 {x86_EXCEPTION_STATE, x86_EXCEPTION_STATE_COUNT},
93 };
94 int mynum_flavors=3;
95 #elif defined (__arm__)
96 mythread_state_flavor_t thread_flavor_array[]={
97 {ARM_THREAD_STATE , ARM_THREAD_STATE_COUNT},
98 {ARM_VFP_STATE, ARM_VFP_STATE_COUNT},
99 {ARM_EXCEPTION_STATE, ARM_EXCEPTION_STATE_COUNT}
100 };
101 int mynum_flavors=3;
102
103 #else
104 #error architecture not supported
105 #endif
106
107
108 typedef struct {
109 vm_offset_t header;
110 int hoffset;
111 mythread_state_flavor_t *flavors;
112 int tstate_size;
113 int flavor_count;
114 } tir_t;
115
116 /* XXX should be static */
117 void collectth_state(thread_t th_act, void *tirp);
118
119 /* XXX not in a Mach header anywhere */
120 kern_return_t thread_getstatus(register thread_t act, int flavor,
121 thread_state_t tstate, mach_msg_type_number_t *count);
122 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
123
124
125 __private_extern__ int do_coredump = 1; /* default: dump cores */
126 __private_extern__ int sugid_coredump = 0; /* default: but not SGUID binaries */
127
128 void
129 collectth_state(thread_t th_act, void *tirp)
130 {
131 vm_offset_t header;
132 int hoffset, i ;
133 mythread_state_flavor_t *flavors;
134 struct thread_command *tc;
135 tir_t *t = (tir_t *)tirp;
136
137 /*
138 * Fill in thread command structure.
139 */
140 header = t->header;
141 hoffset = t->hoffset;
142 flavors = t->flavors;
143
144 tc = (struct thread_command *) (header + hoffset);
145 tc->cmd = LC_THREAD;
146 tc->cmdsize = sizeof(struct thread_command)
147 + t->tstate_size;
148 hoffset += sizeof(struct thread_command);
149 /*
150 * Follow with a struct thread_state_flavor and
151 * the appropriate thread state struct for each
152 * thread state flavor.
153 */
154 for (i = 0; i < t->flavor_count; i++) {
155 *(mythread_state_flavor_t *)(header+hoffset) =
156 flavors[i];
157 hoffset += sizeof(mythread_state_flavor_t);
158 thread_getstatus(th_act, flavors[i].flavor,
159 (thread_state_t)(header+hoffset),
160 &flavors[i].count);
161 hoffset += flavors[i].count*sizeof(int);
162 }
163
164 t->hoffset = hoffset;
165 }
166
167
168 /*
169 * coredump
170 *
171 * Description: Create a core image on the file "core" for the process
172 * indicated
173 *
174 * Parameters: core_proc Process to dump core [*]
175 *
176 * Returns: 0 Success
177 * EFAULT Failed
178 *
179 * IMPORTANT: This function can only be called on the current process, due
180 * to assumptions below; see variable declaration section for
181 * details.
182 */
183 #define MAX_TSTATE_FLAVORS 10
184 int
185 coredump(proc_t core_proc)
186 {
187 /* Begin assumptions that limit us to only the current process */
188 vfs_context_t ctx = vfs_context_current();
189 vm_map_t map = current_map();
190 task_t task = current_task();
191 /* End assumptions */
192 kauth_cred_t cred = vfs_context_ucred(ctx);
193 int error = 0;
194 struct vnode_attr va;
195 int thread_count, segment_count;
196 int command_size, header_size, tstate_size;
197 int hoffset;
198 off_t foffset;
199 vm_map_offset_t vmoffset;
200 vm_offset_t header;
201 vm_map_size_t vmsize;
202 vm_prot_t prot;
203 vm_prot_t maxprot;
204 vm_inherit_t inherit;
205 int error1 = 0;
206 char stack_name[MAXCOMLEN+6];
207 char *alloced_name = NULL;
208 char *name;
209 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
210 vm_size_t mapsize;
211 int i;
212 uint32_t nesting_depth = 0;
213 kern_return_t kret;
214 struct vm_region_submap_info_64 vbr;
215 mach_msg_type_number_t vbrcount = 0;
216 tir_t tir1;
217 struct vnode * vp;
218 struct mach_header *mh = NULL; /* protected by is_64 */
219 struct mach_header_64 *mh64 = NULL; /* protected by is_64 */
220 int is_64 = 0;
221 size_t mach_header_sz = sizeof(struct mach_header);
222 size_t segment_command_sz = sizeof(struct segment_command);
223
224 if (do_coredump == 0 || /* Not dumping at all */
225 ( (sugid_coredump == 0) && /* Not dumping SUID/SGID binaries */
226 ( (cred->cr_svuid != cred->cr_ruid) ||
227 (cred->cr_svgid != cred->cr_rgid)))) {
228
229 return (EFAULT);
230 }
231
232 if (IS_64BIT_PROCESS(core_proc)) {
233 is_64 = 1;
234 mach_header_sz = sizeof(struct mach_header_64);
235 segment_command_sz = sizeof(struct segment_command_64);
236 }
237
238 mapsize = get_vmmap_size(map);
239
240 if (mapsize >= core_proc->p_rlimit[RLIMIT_CORE].rlim_cur)
241 return (EFAULT);
242 (void) task_suspend(task);
243
244 MALLOC(alloced_name, char *, MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
245
246 /* create name according to sysctl'able format string */
247 /* if name creation fails, fall back to historical behaviour... */
248 if (proc_core_name(core_proc->p_comm, kauth_cred_getuid(cred),
249 core_proc->p_pid, alloced_name, MAXPATHLEN)) {
250 snprintf(stack_name, sizeof(stack_name),
251 "/cores/core.%d", core_proc->p_pid);
252 name = stack_name;
253 } else
254 name = alloced_name;
255
256 if ((error = vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, ctx)))
257 goto out2;
258
259 VATTR_INIT(&va);
260 VATTR_WANTED(&va, va_nlink);
261 /* Don't dump to non-regular files or files with links. */
262 if (vp->v_type != VREG ||
263 vnode_getattr(vp, &va, ctx) || va.va_nlink != 1) {
264 error = EFAULT;
265 goto out;
266 }
267
268 VATTR_INIT(&va); /* better to do it here than waste more stack in vnode_setsize */
269 VATTR_SET(&va, va_data_size, 0);
270 vnode_setattr(vp, &va, ctx);
271 core_proc->p_acflag |= ACORE;
272
273 /*
274 * If the task is modified while dumping the file
275 * (e.g., changes in threads or VM, the resulting
276 * file will not necessarily be correct.
277 */
278
279 thread_count = get_task_numacts(task);
280 segment_count = get_vmmap_entries(map); /* XXX */
281 #if defined (__ppc__)
282 if (is_64) {
283 tir1.flavor_count = sizeof(thread_flavor_array64)/sizeof(mythread_state_flavor_t);
284 bcopy(thread_flavor_array64, flavors,sizeof(thread_flavor_array64));
285 } else {
286 #endif /* __ppc __ */
287 tir1.flavor_count = sizeof(thread_flavor_array)/sizeof(mythread_state_flavor_t);
288 bcopy(thread_flavor_array, flavors,sizeof(thread_flavor_array));
289 #if defined (__ppc__)
290 }
291 #endif /* __ppc __ */
292 tstate_size = 0;
293 for (i = 0; i < tir1.flavor_count; i++)
294 tstate_size += sizeof(mythread_state_flavor_t) +
295 (flavors[i].count * sizeof(int));
296 command_size = segment_count * segment_command_sz +
297 thread_count*sizeof(struct thread_command) +
298 tstate_size*thread_count;
299
300 header_size = command_size + mach_header_sz;
301
302 (void) kmem_alloc(kernel_map,
303 (vm_offset_t *)&header,
304 (vm_size_t)header_size);
305
306 /*
307 * Set up Mach-O header.
308 */
309 if (is_64) {
310 mh64 = (struct mach_header_64 *)header;
311 mh64->magic = MH_MAGIC_64;
312 mh64->cputype = cpu_type();
313 mh64->cpusubtype = cpu_subtype();
314 mh64->filetype = MH_CORE;
315 mh64->ncmds = segment_count + thread_count;
316 mh64->sizeofcmds = command_size;
317 mh64->reserved = 0; /* 8 byte alignment */
318 } else {
319 mh = (struct mach_header *)header;
320 mh->magic = MH_MAGIC;
321 mh->cputype = cpu_type();
322 mh->cpusubtype = cpu_subtype();
323 mh->filetype = MH_CORE;
324 mh->ncmds = segment_count + thread_count;
325 mh->sizeofcmds = command_size;
326 }
327
328 hoffset = mach_header_sz; /* offset into header */
329 foffset = round_page(header_size); /* offset into file */
330 vmoffset = MACH_VM_MIN_ADDRESS; /* offset into VM */
331
332 /*
333 * We use to check for an error, here, now we try and get
334 * as much as we can
335 */
336 while (segment_count > 0) {
337 struct segment_command *sc;
338 struct segment_command_64 *sc64;
339
340 /*
341 * Get region information for next region.
342 */
343
344 while (1) {
345 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
346 if((kret = mach_vm_region_recurse(map,
347 &vmoffset, &vmsize, &nesting_depth,
348 (vm_region_recurse_info_t)&vbr,
349 &vbrcount)) != KERN_SUCCESS) {
350 break;
351 }
352 /*
353 * If we get a valid mapping back, but we're dumping
354 * a 32 bit process, and it's over the allowable
355 * address space of a 32 bit process, it's the same
356 * as if mach_vm_region_recurse() failed.
357 */
358 if (!(is_64) &&
359 (vmoffset + vmsize > VM_MAX_ADDRESS)) {
360 kret = KERN_INVALID_ADDRESS;
361 break;
362 }
363 if(vbr.is_submap) {
364 nesting_depth++;
365 continue;
366 } else {
367 break;
368 }
369 }
370 if(kret != KERN_SUCCESS)
371 break;
372
373 prot = vbr.protection;
374 maxprot = vbr.max_protection;
375 inherit = vbr.inheritance;
376 /*
377 * Fill in segment command structure.
378 */
379 if (is_64) {
380 sc64 = (struct segment_command_64 *)(header + hoffset);
381 sc64->cmd = LC_SEGMENT_64;
382 sc64->cmdsize = sizeof(struct segment_command_64);
383 /* segment name is zeroed by kmem_alloc */
384 sc64->segname[0] = 0;
385 sc64->vmaddr = vmoffset;
386 sc64->vmsize = vmsize;
387 sc64->fileoff = foffset;
388 sc64->filesize = vmsize;
389 sc64->maxprot = maxprot;
390 sc64->initprot = prot;
391 sc64->nsects = 0;
392 } else {
393 sc = (struct segment_command *) (header + hoffset);
394 sc->cmd = LC_SEGMENT;
395 sc->cmdsize = sizeof(struct segment_command);
396 /* segment name is zeroed by kmem_alloc */
397 sc->segname[0] = 0;
398 sc->vmaddr = CAST_DOWN(vm_offset_t,vmoffset);
399 sc->vmsize = CAST_DOWN(vm_size_t,vmsize);
400 sc->fileoff = CAST_DOWN(uint32_t,foffset);
401 sc->filesize = CAST_DOWN(uint32_t,vmsize);
402 sc->maxprot = maxprot;
403 sc->initprot = prot;
404 sc->nsects = 0;
405 }
406
407 /*
408 * Write segment out. Try as hard as possible to
409 * get read access to the data.
410 */
411 if ((prot & VM_PROT_READ) == 0) {
412 mach_vm_protect(map, vmoffset, vmsize, FALSE,
413 prot|VM_PROT_READ);
414 }
415 /*
416 * Only actually perform write if we can read.
417 * Note: if we can't read, then we end up with
418 * a hole in the file.
419 */
420 if ((maxprot & VM_PROT_READ) == VM_PROT_READ
421 && vbr.user_tag != VM_MEMORY_IOKIT
422 && coredumpok(map,vmoffset)) {
423 vm_map_size_t tmp_vmsize = vmsize;
424 off_t xfer_foffset = foffset;
425
426 //LP64todo - works around vn_rdwr_64() 2G limit
427 while (tmp_vmsize > 0) {
428 vm_map_size_t xfer_vmsize = tmp_vmsize;
429 if (xfer_vmsize > INT_MAX)
430 xfer_vmsize = INT_MAX;
431 error = vn_rdwr_64(UIO_WRITE, vp,
432 vmoffset, xfer_vmsize, xfer_foffset,
433 (IS_64BIT_PROCESS(core_proc) ? UIO_USERSPACE64 : UIO_USERSPACE32),
434 IO_NODELOCKED|IO_UNIT, cred, (int *) 0, core_proc);
435 tmp_vmsize -= xfer_vmsize;
436 xfer_foffset += xfer_vmsize;
437 }
438 }
439
440 hoffset += segment_command_sz;
441 foffset += vmsize;
442 vmoffset += vmsize;
443 segment_count--;
444 }
445
446 /*
447 * If there are remaining segments which have not been written
448 * out because break in the loop above, then they were not counted
449 * because they exceed the real address space of the executable
450 * type: remove them from the header's count. This is OK, since
451 * we are allowed to have a sparse area following the segments.
452 */
453 if (is_64) {
454 mh64->ncmds -= segment_count;
455 mh64->sizeofcmds -= segment_count * segment_command_sz;
456 } else {
457 mh->ncmds -= segment_count;
458 mh->sizeofcmds -= segment_count * segment_command_sz;
459 }
460
461 tir1.header = header;
462 tir1.hoffset = hoffset;
463 tir1.flavors = flavors;
464 tir1.tstate_size = tstate_size;
465 task_act_iterate_wth_args(task, collectth_state,&tir1);
466
467 /*
468 * Write out the Mach header at the beginning of the
469 * file. OK to use a 32 bit write for this.
470 */
471 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0,
472 UIO_SYSSPACE32, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, core_proc);
473 kmem_free(kernel_map, header, header_size);
474 out:
475 error1 = vnode_close(vp, FWRITE, ctx);
476 out2:
477 if (alloced_name != NULL)
478 FREE(alloced_name, M_TEMP);
479 if (error == 0)
480 error = error1;
481
482 return (error);
483 }