]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved. | |
29 | * | |
30 | * File: bsd/kern/kern_core.c | |
31 | * | |
32 | * This file contains machine independent code for performing core dumps. | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <mach/vm_param.h> | |
37 | #include <mach/thread_status.h> | |
38 | ||
39 | #include <sys/param.h> | |
40 | #include <sys/systm.h> | |
41 | #include <sys/signalvar.h> | |
42 | #include <sys/resourcevar.h> | |
43 | #include <sys/namei.h> | |
44 | #include <sys/vnode_internal.h> | |
45 | #include <sys/proc_internal.h> | |
46 | #include <sys/kauth.h> | |
47 | #include <sys/timeb.h> | |
48 | #include <sys/times.h> | |
49 | #include <sys/acct.h> | |
50 | #include <sys/file_internal.h> | |
51 | #include <sys/uio.h> | |
52 | #include <sys/kernel.h> | |
53 | #include <sys/stat.h> | |
54 | ||
55 | #include <mach-o/loader.h> | |
56 | #include <mach/vm_region.h> | |
57 | #include <mach/vm_statistics.h> | |
58 | ||
59 | #include <vm/vm_kern.h> | |
60 | #include <vm/vm_protos.h> /* last */ | |
61 | #include <vm/vm_map.h> /* current_map() */ | |
62 | #include <mach/mach_vm.h> /* mach_vm_region_recurse() */ | |
63 | #include <mach/task.h> /* task_suspend() */ | |
64 | #include <kern/task.h> /* get_task_numacts() */ | |
65 | ||
66 | #include <security/audit/audit.h> | |
67 | ||
68 | typedef struct { | |
69 | int flavor; /* the number for this flavor */ | |
70 | mach_msg_type_number_t count; /* count of ints in this flavor */ | |
71 | } mythread_state_flavor_t; | |
72 | ||
73 | #if defined (__i386__) || defined (__x86_64__) | |
74 | mythread_state_flavor_t thread_flavor_array [] = { | |
75 | {x86_THREAD_STATE, x86_THREAD_STATE_COUNT}, | |
76 | {x86_FLOAT_STATE, x86_FLOAT_STATE_COUNT}, | |
77 | {x86_EXCEPTION_STATE, x86_EXCEPTION_STATE_COUNT}, | |
78 | }; | |
79 | int mynum_flavors=3; | |
80 | #else | |
81 | #error architecture not supported | |
82 | #endif | |
83 | ||
84 | ||
85 | typedef struct { | |
86 | vm_offset_t header; | |
87 | int hoffset; | |
88 | mythread_state_flavor_t *flavors; | |
89 | int tstate_size; | |
90 | int flavor_count; | |
91 | } tir_t; | |
92 | ||
93 | /* XXX should be static */ | |
94 | void collectth_state(thread_t th_act, void *tirp); | |
95 | ||
96 | /* XXX not in a Mach header anywhere */ | |
97 | kern_return_t thread_getstatus(register thread_t act, int flavor, | |
98 | thread_state_t tstate, mach_msg_type_number_t *count); | |
99 | void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *); | |
100 | ||
101 | static cpu_type_t process_cpu_type(proc_t proc); | |
102 | static cpu_type_t process_cpu_subtype(proc_t proc); | |
103 | ||
104 | #ifdef SECURE_KERNEL | |
105 | __private_extern__ int do_coredump = 0; /* default: don't dump cores */ | |
106 | #else | |
107 | __private_extern__ int do_coredump = 1; /* default: dump cores */ | |
108 | #endif | |
109 | __private_extern__ int sugid_coredump = 0; /* default: but not SGUID binaries */ | |
110 | ||
111 | ||
112 | /* cpu_type returns only the most generic indication of the current CPU. */ | |
113 | /* in a core we want to know the kind of process. */ | |
114 | ||
115 | static cpu_type_t | |
116 | process_cpu_type(proc_t core_proc) | |
117 | { | |
118 | cpu_type_t what_we_think; | |
119 | #if defined (__i386__) || defined (__x86_64__) | |
120 | if (IS_64BIT_PROCESS(core_proc)) { | |
121 | what_we_think = CPU_TYPE_X86_64; | |
122 | } else { | |
123 | what_we_think = CPU_TYPE_I386; | |
124 | } | |
125 | #endif | |
126 | return what_we_think; | |
127 | } | |
128 | ||
129 | static cpu_type_t | |
130 | process_cpu_subtype(proc_t core_proc) | |
131 | { | |
132 | cpu_type_t what_we_think; | |
133 | #if defined (__i386__) || defined (__x86_64__) | |
134 | if (IS_64BIT_PROCESS(core_proc)) { | |
135 | what_we_think = CPU_SUBTYPE_X86_64_ALL; | |
136 | } else { | |
137 | what_we_think = CPU_SUBTYPE_I386_ALL; | |
138 | } | |
139 | #endif | |
140 | return what_we_think; | |
141 | } | |
142 | ||
143 | void | |
144 | collectth_state(thread_t th_act, void *tirp) | |
145 | { | |
146 | vm_offset_t header; | |
147 | int hoffset, i ; | |
148 | mythread_state_flavor_t *flavors; | |
149 | struct thread_command *tc; | |
150 | tir_t *t = (tir_t *)tirp; | |
151 | ||
152 | /* | |
153 | * Fill in thread command structure. | |
154 | */ | |
155 | header = t->header; | |
156 | hoffset = t->hoffset; | |
157 | flavors = t->flavors; | |
158 | ||
159 | tc = (struct thread_command *) (header + hoffset); | |
160 | tc->cmd = LC_THREAD; | |
161 | tc->cmdsize = sizeof(struct thread_command) | |
162 | + t->tstate_size; | |
163 | hoffset += sizeof(struct thread_command); | |
164 | /* | |
165 | * Follow with a struct thread_state_flavor and | |
166 | * the appropriate thread state struct for each | |
167 | * thread state flavor. | |
168 | */ | |
169 | for (i = 0; i < t->flavor_count; i++) { | |
170 | *(mythread_state_flavor_t *)(header+hoffset) = | |
171 | flavors[i]; | |
172 | hoffset += sizeof(mythread_state_flavor_t); | |
173 | thread_getstatus(th_act, flavors[i].flavor, | |
174 | (thread_state_t)(header+hoffset), | |
175 | &flavors[i].count); | |
176 | hoffset += flavors[i].count*sizeof(int); | |
177 | } | |
178 | ||
179 | t->hoffset = hoffset; | |
180 | } | |
181 | ||
182 | ||
183 | /* | |
184 | * coredump | |
185 | * | |
186 | * Description: Create a core image on the file "core" for the process | |
187 | * indicated | |
188 | * | |
189 | * Parameters: core_proc Process to dump core [*] | |
190 | * | |
191 | * Returns: 0 Success | |
192 | * EFAULT Failed | |
193 | * | |
194 | * IMPORTANT: This function can only be called on the current process, due | |
195 | * to assumptions below; see variable declaration section for | |
196 | * details. | |
197 | */ | |
198 | #define MAX_TSTATE_FLAVORS 10 | |
199 | int | |
200 | coredump(proc_t core_proc) | |
201 | { | |
202 | /* Begin assumptions that limit us to only the current process */ | |
203 | vfs_context_t ctx = vfs_context_current(); | |
204 | vm_map_t map = current_map(); | |
205 | task_t task = current_task(); | |
206 | /* End assumptions */ | |
207 | kauth_cred_t cred = vfs_context_ucred(ctx); | |
208 | int error = 0; | |
209 | struct vnode_attr va; | |
210 | int thread_count, segment_count; | |
211 | int command_size, header_size, tstate_size; | |
212 | int hoffset; | |
213 | off_t foffset; | |
214 | vm_map_offset_t vmoffset; | |
215 | vm_offset_t header; | |
216 | vm_map_size_t vmsize; | |
217 | vm_prot_t prot; | |
218 | vm_prot_t maxprot; | |
219 | vm_inherit_t inherit; | |
220 | int error1 = 0; | |
221 | char stack_name[MAXCOMLEN+6]; | |
222 | char *alloced_name = NULL; | |
223 | char *name; | |
224 | mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS]; | |
225 | vm_size_t mapsize; | |
226 | int i; | |
227 | uint32_t nesting_depth = 0; | |
228 | kern_return_t kret; | |
229 | struct vm_region_submap_info_64 vbr; | |
230 | mach_msg_type_number_t vbrcount = 0; | |
231 | tir_t tir1; | |
232 | struct vnode * vp; | |
233 | struct mach_header *mh = NULL; /* protected by is_64 */ | |
234 | struct mach_header_64 *mh64 = NULL; /* protected by is_64 */ | |
235 | int is_64 = 0; | |
236 | size_t mach_header_sz = sizeof(struct mach_header); | |
237 | size_t segment_command_sz = sizeof(struct segment_command); | |
238 | ||
239 | if (do_coredump == 0 || /* Not dumping at all */ | |
240 | ( (sugid_coredump == 0) && /* Not dumping SUID/SGID binaries */ | |
241 | ( (kauth_cred_getsvuid(cred) != kauth_cred_getruid(cred)) || | |
242 | (kauth_cred_getsvgid(cred) != kauth_cred_getrgid(cred))))) { | |
243 | ||
244 | #if CONFIG_AUDIT | |
245 | audit_proc_coredump(core_proc, NULL, EFAULT); | |
246 | #endif | |
247 | return (EFAULT); | |
248 | } | |
249 | ||
250 | if (IS_64BIT_PROCESS(core_proc)) { | |
251 | is_64 = 1; | |
252 | mach_header_sz = sizeof(struct mach_header_64); | |
253 | segment_command_sz = sizeof(struct segment_command_64); | |
254 | } | |
255 | ||
256 | mapsize = get_vmmap_size(map); | |
257 | ||
258 | if (mapsize >= core_proc->p_rlimit[RLIMIT_CORE].rlim_cur) | |
259 | return (EFAULT); | |
260 | (void) task_suspend(task); | |
261 | ||
262 | MALLOC(alloced_name, char *, MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO); | |
263 | ||
264 | /* create name according to sysctl'able format string */ | |
265 | /* if name creation fails, fall back to historical behaviour... */ | |
266 | if (alloced_name == NULL || | |
267 | proc_core_name(core_proc->p_comm, kauth_cred_getuid(cred), | |
268 | core_proc->p_pid, alloced_name, MAXPATHLEN)) { | |
269 | snprintf(stack_name, sizeof(stack_name), | |
270 | "/cores/core.%d", core_proc->p_pid); | |
271 | name = stack_name; | |
272 | } else | |
273 | name = alloced_name; | |
274 | ||
275 | if ((error = vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, ctx))) | |
276 | goto out2; | |
277 | ||
278 | VATTR_INIT(&va); | |
279 | VATTR_WANTED(&va, va_nlink); | |
280 | /* Don't dump to non-regular files or files with links. */ | |
281 | if (vp->v_type != VREG || | |
282 | vnode_getattr(vp, &va, ctx) || va.va_nlink != 1) { | |
283 | error = EFAULT; | |
284 | goto out; | |
285 | } | |
286 | ||
287 | VATTR_INIT(&va); /* better to do it here than waste more stack in vnode_setsize */ | |
288 | VATTR_SET(&va, va_data_size, 0); | |
289 | vnode_setattr(vp, &va, ctx); | |
290 | core_proc->p_acflag |= ACORE; | |
291 | ||
292 | /* | |
293 | * If the task is modified while dumping the file | |
294 | * (e.g., changes in threads or VM, the resulting | |
295 | * file will not necessarily be correct. | |
296 | */ | |
297 | ||
298 | thread_count = get_task_numacts(task); | |
299 | segment_count = get_vmmap_entries(map); /* XXX */ | |
300 | tir1.flavor_count = sizeof(thread_flavor_array)/sizeof(mythread_state_flavor_t); | |
301 | bcopy(thread_flavor_array, flavors,sizeof(thread_flavor_array)); | |
302 | tstate_size = 0; | |
303 | for (i = 0; i < tir1.flavor_count; i++) | |
304 | tstate_size += sizeof(mythread_state_flavor_t) + | |
305 | (flavors[i].count * sizeof(int)); | |
306 | command_size = segment_count * segment_command_sz + | |
307 | thread_count*sizeof(struct thread_command) + | |
308 | tstate_size*thread_count; | |
309 | ||
310 | header_size = command_size + mach_header_sz; | |
311 | ||
312 | if (kmem_alloc(kernel_map, &header, (vm_size_t)header_size) != KERN_SUCCESS) { | |
313 | error = ENOMEM; | |
314 | goto out; | |
315 | } | |
316 | ||
317 | /* | |
318 | * Set up Mach-O header. | |
319 | */ | |
320 | if (is_64) { | |
321 | mh64 = (struct mach_header_64 *)header; | |
322 | mh64->magic = MH_MAGIC_64; | |
323 | mh64->cputype = process_cpu_type(core_proc); | |
324 | mh64->cpusubtype = process_cpu_subtype(core_proc); | |
325 | mh64->filetype = MH_CORE; | |
326 | mh64->ncmds = segment_count + thread_count; | |
327 | mh64->sizeofcmds = command_size; | |
328 | mh64->reserved = 0; /* 8 byte alignment */ | |
329 | } else { | |
330 | mh = (struct mach_header *)header; | |
331 | mh->magic = MH_MAGIC; | |
332 | mh->cputype = process_cpu_type(core_proc); | |
333 | mh->cpusubtype = process_cpu_subtype(core_proc); | |
334 | mh->filetype = MH_CORE; | |
335 | mh->ncmds = segment_count + thread_count; | |
336 | mh->sizeofcmds = command_size; | |
337 | } | |
338 | ||
339 | hoffset = mach_header_sz; /* offset into header */ | |
340 | foffset = round_page(header_size); /* offset into file */ | |
341 | vmoffset = MACH_VM_MIN_ADDRESS; /* offset into VM */ | |
342 | ||
343 | /* | |
344 | * We use to check for an error, here, now we try and get | |
345 | * as much as we can | |
346 | */ | |
347 | while (segment_count > 0) { | |
348 | struct segment_command *sc; | |
349 | struct segment_command_64 *sc64; | |
350 | ||
351 | /* | |
352 | * Get region information for next region. | |
353 | */ | |
354 | ||
355 | while (1) { | |
356 | vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64; | |
357 | if((kret = mach_vm_region_recurse(map, | |
358 | &vmoffset, &vmsize, &nesting_depth, | |
359 | (vm_region_recurse_info_t)&vbr, | |
360 | &vbrcount)) != KERN_SUCCESS) { | |
361 | break; | |
362 | } | |
363 | /* | |
364 | * If we get a valid mapping back, but we're dumping | |
365 | * a 32 bit process, and it's over the allowable | |
366 | * address space of a 32 bit process, it's the same | |
367 | * as if mach_vm_region_recurse() failed. | |
368 | */ | |
369 | if (!(is_64) && | |
370 | (vmoffset + vmsize > VM_MAX_ADDRESS)) { | |
371 | kret = KERN_INVALID_ADDRESS; | |
372 | break; | |
373 | } | |
374 | if(vbr.is_submap) { | |
375 | nesting_depth++; | |
376 | continue; | |
377 | } else { | |
378 | break; | |
379 | } | |
380 | } | |
381 | if(kret != KERN_SUCCESS) | |
382 | break; | |
383 | ||
384 | prot = vbr.protection; | |
385 | maxprot = vbr.max_protection; | |
386 | inherit = vbr.inheritance; | |
387 | /* | |
388 | * Fill in segment command structure. | |
389 | */ | |
390 | if (is_64) { | |
391 | sc64 = (struct segment_command_64 *)(header + hoffset); | |
392 | sc64->cmd = LC_SEGMENT_64; | |
393 | sc64->cmdsize = sizeof(struct segment_command_64); | |
394 | /* segment name is zeroed by kmem_alloc */ | |
395 | sc64->segname[0] = 0; | |
396 | sc64->vmaddr = vmoffset; | |
397 | sc64->vmsize = vmsize; | |
398 | sc64->fileoff = foffset; | |
399 | sc64->filesize = vmsize; | |
400 | sc64->maxprot = maxprot; | |
401 | sc64->initprot = prot; | |
402 | sc64->nsects = 0; | |
403 | } else { | |
404 | sc = (struct segment_command *) (header + hoffset); | |
405 | sc->cmd = LC_SEGMENT; | |
406 | sc->cmdsize = sizeof(struct segment_command); | |
407 | /* segment name is zeroed by kmem_alloc */ | |
408 | sc->segname[0] = 0; | |
409 | sc->vmaddr = CAST_DOWN_EXPLICIT(vm_offset_t,vmoffset); | |
410 | sc->vmsize = CAST_DOWN_EXPLICIT(vm_size_t,vmsize); | |
411 | sc->fileoff = CAST_DOWN_EXPLICIT(uint32_t,foffset); /* will never truncate */ | |
412 | sc->filesize = CAST_DOWN_EXPLICIT(uint32_t,vmsize); /* will never truncate */ | |
413 | sc->maxprot = maxprot; | |
414 | sc->initprot = prot; | |
415 | sc->nsects = 0; | |
416 | } | |
417 | ||
418 | /* | |
419 | * Write segment out. Try as hard as possible to | |
420 | * get read access to the data. | |
421 | */ | |
422 | if ((prot & VM_PROT_READ) == 0) { | |
423 | mach_vm_protect(map, vmoffset, vmsize, FALSE, | |
424 | prot|VM_PROT_READ); | |
425 | } | |
426 | /* | |
427 | * Only actually perform write if we can read. | |
428 | * Note: if we can't read, then we end up with | |
429 | * a hole in the file. | |
430 | */ | |
431 | if ((maxprot & VM_PROT_READ) == VM_PROT_READ | |
432 | && vbr.user_tag != VM_MEMORY_IOKIT | |
433 | && coredumpok(map,vmoffset)) { | |
434 | ||
435 | error = vn_rdwr_64(UIO_WRITE, vp, vmoffset, vmsize, foffset, | |
436 | (IS_64BIT_PROCESS(core_proc) ? UIO_USERSPACE64 : UIO_USERSPACE32), | |
437 | IO_NOCACHE|IO_NODELOCKED|IO_UNIT, cred, (int64_t *) 0, core_proc); | |
438 | ||
439 | } | |
440 | ||
441 | hoffset += segment_command_sz; | |
442 | foffset += vmsize; | |
443 | vmoffset += vmsize; | |
444 | segment_count--; | |
445 | } | |
446 | ||
447 | /* | |
448 | * If there are remaining segments which have not been written | |
449 | * out because break in the loop above, then they were not counted | |
450 | * because they exceed the real address space of the executable | |
451 | * type: remove them from the header's count. This is OK, since | |
452 | * we are allowed to have a sparse area following the segments. | |
453 | */ | |
454 | if (is_64) { | |
455 | mh64->ncmds -= segment_count; | |
456 | mh64->sizeofcmds -= segment_count * segment_command_sz; | |
457 | } else { | |
458 | mh->ncmds -= segment_count; | |
459 | mh->sizeofcmds -= segment_count * segment_command_sz; | |
460 | } | |
461 | ||
462 | tir1.header = header; | |
463 | tir1.hoffset = hoffset; | |
464 | tir1.flavors = flavors; | |
465 | tir1.tstate_size = tstate_size; | |
466 | task_act_iterate_wth_args(task, collectth_state,&tir1); | |
467 | ||
468 | /* | |
469 | * Write out the Mach header at the beginning of the | |
470 | * file. OK to use a 32 bit write for this. | |
471 | */ | |
472 | error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0, | |
473 | UIO_SYSSPACE, IO_NOCACHE|IO_NODELOCKED|IO_UNIT, cred, (int *) 0, core_proc); | |
474 | kmem_free(kernel_map, header, header_size); | |
475 | out: | |
476 | error1 = vnode_close(vp, FWRITE, ctx); | |
477 | out2: | |
478 | #if CONFIG_AUDIT | |
479 | audit_proc_coredump(core_proc, name, error); | |
480 | #endif | |
481 | if (alloced_name != NULL) | |
482 | FREE(alloced_name, M_TEMP); | |
483 | if (error == 0) | |
484 | error = error1; | |
485 | ||
486 | return (error); | |
487 | } |