2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
24 * File: bsd/kern/kern_core.c
26 * This file contains machine independent code for performing core dumps.
30 #include <mach/vm_param.h>
31 #include <mach/thread_status.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/signalvar.h>
36 #include <sys/resourcevar.h>
37 #include <sys/namei.h>
38 #include <sys/vnode_internal.h>
39 #include <sys/proc_internal.h>
40 #include <sys/kauth.h>
41 #include <sys/timeb.h>
42 #include <sys/times.h>
44 #include <sys/file_internal.h>
46 #include <sys/kernel.h>
49 #include <mach-o/loader.h>
50 #include <mach/vm_region.h>
51 #include <mach/vm_statistics.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_protos.h> /* last */
55 #include <vm/vm_map.h> /* current_map() */
56 #include <mach/mach_vm.h> /* mach_vm_region_recurse() */
57 #include <mach/task.h> /* task_suspend() */
58 #include <kern/task.h> /* get_task_numacts() */
61 int flavor
; /* the number for this flavor */
62 int count
; /* count of ints in this flavor */
63 } mythread_state_flavor_t
;
67 mythread_state_flavor_t thread_flavor_array
[]={
68 {PPC_THREAD_STATE
, PPC_THREAD_STATE_COUNT
},
69 {PPC_FLOAT_STATE
, PPC_FLOAT_STATE_COUNT
},
70 {PPC_EXCEPTION_STATE
, PPC_EXCEPTION_STATE_COUNT
},
71 {PPC_VECTOR_STATE
, PPC_VECTOR_STATE_COUNT
}
74 #elif defined (__i386__)
75 mythread_state_flavor_t thread_flavor_array
[] = {
76 {i386_THREAD_STATE
, i386_THREAD_STATE_COUNT
},
77 {i386_THREAD_FPSTATE
, i386_THREAD_FPSTATE_COUNT
},
78 {i386_THREAD_EXCEPTSTATE
, i386_THREAD_EXCEPTSTATE_COUNT
},
79 {i386_THREAD_CTHREADSTATE
, i386_THREAD_CTHREADSTATE_COUNT
},
80 {i386_NEW_THREAD_STATE
, i386_NEW_THREAD_STATE_COUNT
},
81 {i386_FLOAT_STATE
, i386_FLOAT_STATE_COUNT
},
82 {i386_ISA_PORT_MAP_STATE
, i386_ISA_PORT_MAP_STATE_COUNT
},
83 {i386_V86_ASSIST_STATE
, i386_V86_ASSIST_STATE_COUNT
},
84 {THREAD_SYSCALL_STATE
, i386_THREAD_SYSCALL_STATE_COUNT
}
89 #error architecture not supported
96 mythread_state_flavor_t
*flavors
;
100 /* XXX should be static */
101 void collectth_state(thread_t th_act
, void *tirp
);
103 /* XXX not in a Mach header anywhere */
104 kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
105 thread_state_t tstate
, mach_msg_type_number_t
*count
);
106 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
109 __private_extern__
int do_coredump
= 1; /* default: dump cores */
110 __private_extern__
int sugid_coredump
= 0; /* default: but not SGUID binaries */
113 collectth_state(thread_t th_act
, void *tirp
)
117 mythread_state_flavor_t
*flavors
;
118 struct thread_command
*tc
;
119 tir_t
*t
= (tir_t
*)tirp
;
122 * Fill in thread command structure.
125 hoffset
= t
->hoffset
;
126 flavors
= t
->flavors
;
128 tc
= (struct thread_command
*) (header
+ hoffset
);
130 tc
->cmdsize
= sizeof(struct thread_command
)
132 hoffset
+= sizeof(struct thread_command
);
134 * Follow with a struct thread_state_flavor and
135 * the appropriate thread state struct for each
136 * thread state flavor.
138 for (i
= 0; i
< mynum_flavors
; i
++) {
139 *(mythread_state_flavor_t
*)(header
+hoffset
) =
141 hoffset
+= sizeof(mythread_state_flavor_t
);
142 thread_getstatus(th_act
, flavors
[i
].flavor
,
143 (thread_state_t
)(header
+hoffset
),
145 hoffset
+= flavors
[i
].count
*sizeof(int);
148 t
->hoffset
= hoffset
;
152 * Create a core image on the file "core".
154 #define MAX_TSTATE_FLAVORS 10
156 coredump(struct proc
*p
)
159 kauth_cred_t cred
= kauth_cred_get();
160 struct vnode_attr va
;
161 struct vfs_context context
;
163 int thread_count
, segment_count
;
164 int command_size
, header_size
, tstate_size
;
167 vm_map_offset_t vmoffset
;
169 vm_map_size_t vmsize
;
172 vm_inherit_t inherit
;
175 char core_name
[MAXCOMLEN
+6];
177 mythread_state_flavor_t flavors
[MAX_TSTATE_FLAVORS
];
180 int nesting_depth
= 0;
182 struct vm_region_submap_info_64 vbr
;
186 struct mach_header
*mh
;
187 struct mach_header_64
*mh64
;
189 size_t mach_header_sz
= sizeof(struct mach_header
);
190 size_t segment_command_sz
= sizeof(struct segment_command
);
192 if (do_coredump
== 0 || /* Not dumping at all */
193 ( (sugid_coredump
== 0) && /* Not dumping SUID/SGID binaries */
194 ( (cred
->cr_svuid
!= cred
->cr_ruid
) ||
195 (cred
->cr_svgid
!= cred
->cr_rgid
)))) {
200 if (IS_64BIT_PROCESS(p
)) {
202 mach_header_sz
= sizeof(struct mach_header_64
);
203 segment_command_sz
= sizeof(struct segment_command_64
);
206 task
= current_task();
208 mapsize
= get_vmmap_size(map
);
210 if (mapsize
>= p
->p_rlimit
[RLIMIT_CORE
].rlim_cur
)
212 (void) task_suspend(task
);
214 /* create name according to sysctl'able format string */
215 name
= proc_core_name(p
->p_comm
, kauth_cred_getuid(cred
), p
->p_pid
);
217 /* if name creation fails, fall back to historical behaviour... */
219 sprintf(core_name
, "/cores/core.%d", p
->p_pid
);
223 context
.vc_ucred
= cred
;
225 if ((error
= vnode_open(name
, (O_CREAT
| FWRITE
| O_NOFOLLOW
), S_IRUSR
, VNODE_LOOKUP_NOFOLLOW
, &vp
, &context
)))
229 VATTR_WANTED(&va
, va_nlink
);
230 /* Don't dump to non-regular files or files with links. */
231 if (vp
->v_type
!= VREG
||
232 vnode_getattr(vp
, &va
, &context
) || va
.va_nlink
!= 1) {
237 VATTR_INIT(&va
); /* better to do it here than waste more stack in vnode_setsize */
238 VATTR_SET(&va
, va_data_size
, 0);
239 vnode_setattr(vp
, &va
, &context
);
240 p
->p_acflag
|= ACORE
;
243 * If the task is modified while dumping the file
244 * (e.g., changes in threads or VM, the resulting
245 * file will not necessarily be correct.
248 thread_count
= get_task_numacts(task
);
249 segment_count
= get_vmmap_entries(map
); /* XXX */
250 bcopy(thread_flavor_array
,flavors
,sizeof(thread_flavor_array
));
252 for (i
= 0; i
< mynum_flavors
; i
++)
253 tstate_size
+= sizeof(mythread_state_flavor_t
) +
254 (flavors
[i
].count
* sizeof(int));
256 command_size
= segment_count
* segment_command_sz
+
257 thread_count
*sizeof(struct thread_command
) +
258 tstate_size
*thread_count
;
260 header_size
= command_size
+ mach_header_sz
;
262 (void) kmem_alloc(kernel_map
,
263 (vm_offset_t
*)&header
,
264 (vm_size_t
)header_size
);
267 * Set up Mach-O header.
270 mh64
= (struct mach_header_64
*)header
;
271 mh64
->magic
= MH_MAGIC_64
;
272 mh64
->cputype
= cpu_type();
273 mh64
->cpusubtype
= cpu_subtype();
274 mh64
->filetype
= MH_CORE
;
275 mh64
->ncmds
= segment_count
+ thread_count
;
276 mh64
->sizeofcmds
= command_size
;
277 mh64
->reserved
= 0; /* 8 byte alignment */
279 mh
= (struct mach_header
*)header
;
280 mh
->magic
= MH_MAGIC
;
281 mh
->cputype
= cpu_type();
282 mh
->cpusubtype
= cpu_subtype();
283 mh
->filetype
= MH_CORE
;
284 mh
->ncmds
= segment_count
+ thread_count
;
285 mh
->sizeofcmds
= command_size
;
288 hoffset
= mach_header_sz
; /* offset into header */
289 foffset
= round_page(header_size
); /* offset into file */
290 vmoffset
= MACH_VM_MIN_ADDRESS
; /* offset into VM */
293 * We use to check for an error, here, now we try and get
296 while (segment_count
> 0) {
297 struct segment_command
*sc
;
298 struct segment_command_64
*sc64
;
301 * Get region information for next region.
305 vbrcount
= VM_REGION_SUBMAP_INFO_COUNT_64
;
306 if((kret
= mach_vm_region_recurse(map
,
307 &vmoffset
, &vmsize
, &nesting_depth
,
308 (vm_region_recurse_info_t
)&vbr
,
309 &vbrcount
)) != KERN_SUCCESS
) {
313 * If we get a valid mapping back, but we're dumping
314 * a 32 bit process, and it's over the allowable
315 * address space of a 32 bit process, it's the same
316 * as if mach_vm_region_recurse() failed.
319 (vmoffset
+ vmsize
> VM_MAX_ADDRESS
)) {
320 kret
= KERN_INVALID_ADDRESS
;
330 if(kret
!= KERN_SUCCESS
)
333 prot
= vbr
.protection
;
334 maxprot
= vbr
.max_protection
;
335 inherit
= vbr
.inheritance
;
337 * Fill in segment command structure.
340 sc64
= (struct segment_command_64
*)(header
+ hoffset
);
341 sc64
->cmd
= LC_SEGMENT_64
;
342 sc64
->cmdsize
= sizeof(struct segment_command_64
);
343 /* segment name is zeroed by kmem_alloc */
344 sc64
->segname
[0] = 0;
345 sc64
->vmaddr
= vmoffset
;
346 sc64
->vmsize
= vmsize
;
347 sc64
->fileoff
= foffset
;
348 sc64
->filesize
= vmsize
;
349 sc64
->maxprot
= maxprot
;
350 sc64
->initprot
= prot
;
353 sc
= (struct segment_command
*) (header
+ hoffset
);
354 sc
->cmd
= LC_SEGMENT
;
355 sc
->cmdsize
= sizeof(struct segment_command
);
356 /* segment name is zeroed by kmem_alloc */
358 sc
->vmaddr
= CAST_DOWN(vm_offset_t
,vmoffset
);
359 sc
->vmsize
= CAST_DOWN(vm_size_t
,vmsize
);
360 sc
->fileoff
= CAST_DOWN(uint32_t,foffset
);
361 sc
->filesize
= CAST_DOWN(uint32_t,vmsize
);
362 sc
->maxprot
= maxprot
;
368 * Write segment out. Try as hard as possible to
369 * get read access to the data.
371 if ((prot
& VM_PROT_READ
) == 0) {
372 mach_vm_protect(map
, vmoffset
, vmsize
, FALSE
,
376 * Only actually perform write if we can read.
377 * Note: if we can't read, then we end up with
378 * a hole in the file.
380 if ((maxprot
& VM_PROT_READ
) == VM_PROT_READ
381 && vbr
.user_tag
!= VM_MEMORY_IOKIT
382 && coredumpok(map
,vmoffset
)) {
383 vm_map_size_t tmp_vmsize
= vmsize
;
384 off_t xfer_foffset
= foffset
;
386 //LP64todo - works around vn_rdwr_64() 2G limit
387 while (tmp_vmsize
> 0) {
388 vm_map_size_t xfer_vmsize
= tmp_vmsize
;
389 if (xfer_vmsize
> INT_MAX
)
390 xfer_vmsize
= INT_MAX
;
391 error
= vn_rdwr_64(UIO_WRITE
, vp
,
392 vmoffset
, xfer_vmsize
, xfer_foffset
,
393 (IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
394 IO_NODELOCKED
|IO_UNIT
, cred
, (int *) 0, p
);
395 tmp_vmsize
-= xfer_vmsize
;
396 xfer_foffset
+= xfer_vmsize
;
400 hoffset
+= segment_command_sz
;
407 * If there are remaining segments which have not been written
408 * out because break in the loop above, then they were not counted
409 * because they exceed the real address space of the executable
410 * type: remove them from the header's count. This is OK, since
411 * we are allowed to have a sparse area following the segments.
414 mh64
->ncmds
-= segment_count
;
416 mh
->ncmds
-= segment_count
;
419 tir1
.header
= header
;
420 tir1
.hoffset
= hoffset
;
421 tir1
.flavors
= flavors
;
422 tir1
.tstate_size
= tstate_size
;
423 task_act_iterate_wth_args(task
, collectth_state
,&tir1
);
426 * Write out the Mach header at the beginning of the
427 * file. OK to use a 32 bit write for this.
429 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)header
, header_size
, (off_t
)0,
430 UIO_SYSSPACE32
, IO_NODELOCKED
|IO_UNIT
, cred
, (int *) 0, p
);
431 kmem_free(kernel_map
, header
, header_size
);
433 error1
= vnode_close(vp
, FWRITE
, &context
);