2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
30 * File: bsd/kern/kern_core.c
32 * This file contains machine independent code for performing core dumps.
36 #include <mach/vm_param.h>
37 #include <mach/thread_status.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/signalvar.h>
42 #include <sys/resourcevar.h>
43 #include <sys/namei.h>
44 #include <sys/vnode_internal.h>
45 #include <sys/proc_internal.h>
46 #include <sys/kauth.h>
47 #include <sys/timeb.h>
48 #include <sys/times.h>
50 #include <sys/file_internal.h>
52 #include <sys/kernel.h>
55 #include <mach-o/loader.h>
56 #include <mach/vm_region.h>
57 #include <mach/vm_statistics.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_protos.h> /* last */
61 #include <vm/vm_map.h> /* current_map() */
62 #include <mach/mach_vm.h> /* mach_vm_region_recurse() */
63 #include <mach/task.h> /* task_suspend() */
64 #include <kern/task.h> /* get_task_numacts() */
67 int flavor
; /* the number for this flavor */
68 mach_msg_type_number_t count
; /* count of ints in this flavor */
69 } mythread_state_flavor_t
;
73 mythread_state_flavor_t thread_flavor_array64
[]={
74 {PPC_THREAD_STATE64
, PPC_THREAD_STATE64_COUNT
},
75 {PPC_FLOAT_STATE
, PPC_FLOAT_STATE_COUNT
},
76 {PPC_EXCEPTION_STATE64
, PPC_EXCEPTION_STATE64_COUNT
},
77 {PPC_VECTOR_STATE
, PPC_VECTOR_STATE_COUNT
}
81 mythread_state_flavor_t thread_flavor_array
[]={
82 {PPC_THREAD_STATE
, PPC_THREAD_STATE_COUNT
},
83 {PPC_FLOAT_STATE
, PPC_FLOAT_STATE_COUNT
},
84 {PPC_EXCEPTION_STATE
, PPC_EXCEPTION_STATE_COUNT
},
85 {PPC_VECTOR_STATE
, PPC_VECTOR_STATE_COUNT
}
88 #elif defined (__i386__)
89 mythread_state_flavor_t thread_flavor_array
[] = {
90 {x86_THREAD_STATE
, x86_THREAD_STATE_COUNT
},
91 {x86_FLOAT_STATE
, x86_FLOAT_STATE_COUNT
},
92 {x86_EXCEPTION_STATE
, x86_EXCEPTION_STATE_COUNT
},
95 #elif defined (__arm__)
96 mythread_state_flavor_t thread_flavor_array
[]={
97 {ARM_THREAD_STATE
, ARM_THREAD_STATE_COUNT
},
98 {ARM_VFP_STATE
, ARM_VFP_STATE_COUNT
},
99 {ARM_EXCEPTION_STATE
, ARM_EXCEPTION_STATE_COUNT
}
104 #error architecture not supported
111 mythread_state_flavor_t
*flavors
;
116 /* XXX should be static */
117 void collectth_state(thread_t th_act
, void *tirp
);
119 /* XXX not in a Mach header anywhere */
120 kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
121 thread_state_t tstate
, mach_msg_type_number_t
*count
);
122 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
125 __private_extern__
int do_coredump
= 1; /* default: dump cores */
126 __private_extern__
int sugid_coredump
= 0; /* default: but not SGUID binaries */
129 collectth_state(thread_t th_act
, void *tirp
)
133 mythread_state_flavor_t
*flavors
;
134 struct thread_command
*tc
;
135 tir_t
*t
= (tir_t
*)tirp
;
138 * Fill in thread command structure.
141 hoffset
= t
->hoffset
;
142 flavors
= t
->flavors
;
144 tc
= (struct thread_command
*) (header
+ hoffset
);
146 tc
->cmdsize
= sizeof(struct thread_command
)
148 hoffset
+= sizeof(struct thread_command
);
150 * Follow with a struct thread_state_flavor and
151 * the appropriate thread state struct for each
152 * thread state flavor.
154 for (i
= 0; i
< t
->flavor_count
; i
++) {
155 *(mythread_state_flavor_t
*)(header
+hoffset
) =
157 hoffset
+= sizeof(mythread_state_flavor_t
);
158 thread_getstatus(th_act
, flavors
[i
].flavor
,
159 (thread_state_t
)(header
+hoffset
),
161 hoffset
+= flavors
[i
].count
*sizeof(int);
164 t
->hoffset
= hoffset
;
171 * Description: Create a core image on the file "core" for the process
174 * Parameters: core_proc Process to dump core [*]
179 * IMPORTANT: This function can only be called on the current process, due
180 * to assumptions below; see variable declaration section for
183 #define MAX_TSTATE_FLAVORS 10
185 coredump(proc_t core_proc
)
187 /* Begin assumptions that limit us to only the current process */
188 vfs_context_t ctx
= vfs_context_current();
189 vm_map_t map
= current_map();
190 task_t task
= current_task();
191 /* End assumptions */
192 kauth_cred_t cred
= vfs_context_ucred(ctx
);
194 struct vnode_attr va
;
195 int thread_count
, segment_count
;
196 int command_size
, header_size
, tstate_size
;
199 vm_map_offset_t vmoffset
;
201 vm_map_size_t vmsize
;
204 vm_inherit_t inherit
;
206 char stack_name
[MAXCOMLEN
+6];
207 char *alloced_name
= NULL
;
209 mythread_state_flavor_t flavors
[MAX_TSTATE_FLAVORS
];
212 uint32_t nesting_depth
= 0;
214 struct vm_region_submap_info_64 vbr
;
215 mach_msg_type_number_t vbrcount
= 0;
218 struct mach_header
*mh
= NULL
; /* protected by is_64 */
219 struct mach_header_64
*mh64
= NULL
; /* protected by is_64 */
221 size_t mach_header_sz
= sizeof(struct mach_header
);
222 size_t segment_command_sz
= sizeof(struct segment_command
);
224 if (do_coredump
== 0 || /* Not dumping at all */
225 ( (sugid_coredump
== 0) && /* Not dumping SUID/SGID binaries */
226 ( (cred
->cr_svuid
!= cred
->cr_ruid
) ||
227 (cred
->cr_svgid
!= cred
->cr_rgid
)))) {
232 if (IS_64BIT_PROCESS(core_proc
)) {
234 mach_header_sz
= sizeof(struct mach_header_64
);
235 segment_command_sz
= sizeof(struct segment_command_64
);
238 mapsize
= get_vmmap_size(map
);
240 if (mapsize
>= core_proc
->p_rlimit
[RLIMIT_CORE
].rlim_cur
)
242 (void) task_suspend(task
);
244 MALLOC(alloced_name
, char *, MAXPATHLEN
, M_TEMP
, M_NOWAIT
| M_ZERO
);
246 /* create name according to sysctl'able format string */
247 /* if name creation fails, fall back to historical behaviour... */
248 if (proc_core_name(core_proc
->p_comm
, kauth_cred_getuid(cred
),
249 core_proc
->p_pid
, alloced_name
, MAXPATHLEN
)) {
250 snprintf(stack_name
, sizeof(stack_name
),
251 "/cores/core.%d", core_proc
->p_pid
);
256 if ((error
= vnode_open(name
, (O_CREAT
| FWRITE
| O_NOFOLLOW
), S_IRUSR
, VNODE_LOOKUP_NOFOLLOW
, &vp
, ctx
)))
260 VATTR_WANTED(&va
, va_nlink
);
261 /* Don't dump to non-regular files or files with links. */
262 if (vp
->v_type
!= VREG
||
263 vnode_getattr(vp
, &va
, ctx
) || va
.va_nlink
!= 1) {
268 VATTR_INIT(&va
); /* better to do it here than waste more stack in vnode_setsize */
269 VATTR_SET(&va
, va_data_size
, 0);
270 vnode_setattr(vp
, &va
, ctx
);
271 core_proc
->p_acflag
|= ACORE
;
274 * If the task is modified while dumping the file
275 * (e.g., changes in threads or VM, the resulting
276 * file will not necessarily be correct.
279 thread_count
= get_task_numacts(task
);
280 segment_count
= get_vmmap_entries(map
); /* XXX */
281 #if defined (__ppc__)
283 tir1
.flavor_count
= sizeof(thread_flavor_array64
)/sizeof(mythread_state_flavor_t
);
284 bcopy(thread_flavor_array64
, flavors
,sizeof(thread_flavor_array64
));
286 #endif /* __ppc __ */
287 tir1
.flavor_count
= sizeof(thread_flavor_array
)/sizeof(mythread_state_flavor_t
);
288 bcopy(thread_flavor_array
, flavors
,sizeof(thread_flavor_array
));
289 #if defined (__ppc__)
291 #endif /* __ppc __ */
293 for (i
= 0; i
< tir1
.flavor_count
; i
++)
294 tstate_size
+= sizeof(mythread_state_flavor_t
) +
295 (flavors
[i
].count
* sizeof(int));
296 command_size
= segment_count
* segment_command_sz
+
297 thread_count
*sizeof(struct thread_command
) +
298 tstate_size
*thread_count
;
300 header_size
= command_size
+ mach_header_sz
;
302 (void) kmem_alloc(kernel_map
,
303 (vm_offset_t
*)&header
,
304 (vm_size_t
)header_size
);
307 * Set up Mach-O header.
310 mh64
= (struct mach_header_64
*)header
;
311 mh64
->magic
= MH_MAGIC_64
;
312 mh64
->cputype
= cpu_type();
313 mh64
->cpusubtype
= cpu_subtype();
314 mh64
->filetype
= MH_CORE
;
315 mh64
->ncmds
= segment_count
+ thread_count
;
316 mh64
->sizeofcmds
= command_size
;
317 mh64
->reserved
= 0; /* 8 byte alignment */
319 mh
= (struct mach_header
*)header
;
320 mh
->magic
= MH_MAGIC
;
321 mh
->cputype
= cpu_type();
322 mh
->cpusubtype
= cpu_subtype();
323 mh
->filetype
= MH_CORE
;
324 mh
->ncmds
= segment_count
+ thread_count
;
325 mh
->sizeofcmds
= command_size
;
328 hoffset
= mach_header_sz
; /* offset into header */
329 foffset
= round_page(header_size
); /* offset into file */
330 vmoffset
= MACH_VM_MIN_ADDRESS
; /* offset into VM */
333 * We use to check for an error, here, now we try and get
336 while (segment_count
> 0) {
337 struct segment_command
*sc
;
338 struct segment_command_64
*sc64
;
341 * Get region information for next region.
345 vbrcount
= VM_REGION_SUBMAP_INFO_COUNT_64
;
346 if((kret
= mach_vm_region_recurse(map
,
347 &vmoffset
, &vmsize
, &nesting_depth
,
348 (vm_region_recurse_info_t
)&vbr
,
349 &vbrcount
)) != KERN_SUCCESS
) {
353 * If we get a valid mapping back, but we're dumping
354 * a 32 bit process, and it's over the allowable
355 * address space of a 32 bit process, it's the same
356 * as if mach_vm_region_recurse() failed.
359 (vmoffset
+ vmsize
> VM_MAX_ADDRESS
)) {
360 kret
= KERN_INVALID_ADDRESS
;
370 if(kret
!= KERN_SUCCESS
)
373 prot
= vbr
.protection
;
374 maxprot
= vbr
.max_protection
;
375 inherit
= vbr
.inheritance
;
377 * Fill in segment command structure.
380 sc64
= (struct segment_command_64
*)(header
+ hoffset
);
381 sc64
->cmd
= LC_SEGMENT_64
;
382 sc64
->cmdsize
= sizeof(struct segment_command_64
);
383 /* segment name is zeroed by kmem_alloc */
384 sc64
->segname
[0] = 0;
385 sc64
->vmaddr
= vmoffset
;
386 sc64
->vmsize
= vmsize
;
387 sc64
->fileoff
= foffset
;
388 sc64
->filesize
= vmsize
;
389 sc64
->maxprot
= maxprot
;
390 sc64
->initprot
= prot
;
393 sc
= (struct segment_command
*) (header
+ hoffset
);
394 sc
->cmd
= LC_SEGMENT
;
395 sc
->cmdsize
= sizeof(struct segment_command
);
396 /* segment name is zeroed by kmem_alloc */
398 sc
->vmaddr
= CAST_DOWN(vm_offset_t
,vmoffset
);
399 sc
->vmsize
= CAST_DOWN(vm_size_t
,vmsize
);
400 sc
->fileoff
= CAST_DOWN(uint32_t,foffset
);
401 sc
->filesize
= CAST_DOWN(uint32_t,vmsize
);
402 sc
->maxprot
= maxprot
;
408 * Write segment out. Try as hard as possible to
409 * get read access to the data.
411 if ((prot
& VM_PROT_READ
) == 0) {
412 mach_vm_protect(map
, vmoffset
, vmsize
, FALSE
,
416 * Only actually perform write if we can read.
417 * Note: if we can't read, then we end up with
418 * a hole in the file.
420 if ((maxprot
& VM_PROT_READ
) == VM_PROT_READ
421 && vbr
.user_tag
!= VM_MEMORY_IOKIT
422 && coredumpok(map
,vmoffset
)) {
423 vm_map_size_t tmp_vmsize
= vmsize
;
424 off_t xfer_foffset
= foffset
;
426 //LP64todo - works around vn_rdwr_64() 2G limit
427 while (tmp_vmsize
> 0) {
428 vm_map_size_t xfer_vmsize
= tmp_vmsize
;
429 if (xfer_vmsize
> INT_MAX
)
430 xfer_vmsize
= INT_MAX
;
431 error
= vn_rdwr_64(UIO_WRITE
, vp
,
432 vmoffset
, xfer_vmsize
, xfer_foffset
,
433 (IS_64BIT_PROCESS(core_proc
) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
434 IO_NODELOCKED
|IO_UNIT
, cred
, (int *) 0, core_proc
);
435 tmp_vmsize
-= xfer_vmsize
;
436 xfer_foffset
+= xfer_vmsize
;
440 hoffset
+= segment_command_sz
;
447 * If there are remaining segments which have not been written
448 * out because break in the loop above, then they were not counted
449 * because they exceed the real address space of the executable
450 * type: remove them from the header's count. This is OK, since
451 * we are allowed to have a sparse area following the segments.
454 mh64
->ncmds
-= segment_count
;
455 mh64
->sizeofcmds
-= segment_count
* segment_command_sz
;
457 mh
->ncmds
-= segment_count
;
458 mh
->sizeofcmds
-= segment_count
* segment_command_sz
;
461 tir1
.header
= header
;
462 tir1
.hoffset
= hoffset
;
463 tir1
.flavors
= flavors
;
464 tir1
.tstate_size
= tstate_size
;
465 task_act_iterate_wth_args(task
, collectth_state
,&tir1
);
468 * Write out the Mach header at the beginning of the
469 * file. OK to use a 32 bit write for this.
471 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)header
, header_size
, (off_t
)0,
472 UIO_SYSSPACE32
, IO_NODELOCKED
|IO_UNIT
, cred
, (int *) 0, core_proc
);
473 kmem_free(kernel_map
, header
, header_size
);
475 error1
= vnode_close(vp
, FWRITE
, ctx
);
477 if (alloced_name
!= NULL
)
478 FREE(alloced_name
, M_TEMP
);