2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
27 * File: bsd/kern/kern_core.c
29 * This file contains machine independent code for performing core dumps.
32 * 16-Feb-91 Mike DeMoney (mike@next.com)
33 * Massaged into MI form from m68k/core.c.
36 #include <mach/vm_param.h>
37 #include <mach/thread_status.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/signalvar.h>
42 #include <sys/resourcevar.h>
43 #include <sys/namei.h>
44 #include <sys/vnode.h>
46 #include <sys/timeb.h>
47 #include <sys/times.h>
52 #include <sys/kernel.h>
55 #include <mach-o/loader.h>
56 #include <mach/vm_region.h>
57 #include <mach/vm_statistics.h>
59 #include <vm/vm_kern.h>
62 int flavor
; /* the number for this flavor */
63 int count
; /* count of ints in this flavor */
64 } mythread_state_flavor_t
;
68 mythread_state_flavor_t thread_flavor_array
[]={
69 {PPC_THREAD_STATE
, PPC_THREAD_STATE_COUNT
},
70 {PPC_FLOAT_STATE
, PPC_FLOAT_STATE_COUNT
},
71 {PPC_EXCEPTION_STATE
, PPC_EXCEPTION_STATE_COUNT
}
74 #elif defined (__i386__)
75 mythread_state_flavor_t thread_flavor_array
[] = {
76 {i386_THREAD_STATE
, i386_THREAD_STATE_COUNT
},
77 {i386_THREAD_FPSTATE
, i386_THREAD_FPSTATE_COUNT
},
78 {i386_THREAD_EXCEPTSTATE
, i386_THREAD_EXCEPTSTATE_COUNT
},
79 {i386_THREAD_CTHREADSTATE
, i386_THREAD_CTHREADSTATE_COUNT
},
80 {i386_NEW_THREAD_STATE
, i386_NEW_THREAD_STATE_COUNT
},
81 {i386_FLOAT_STATE
, i386_FLOAT_STATE_COUNT
},
82 {i386_ISA_PORT_MAP_STATE
, i386_ISA_PORT_MAP_STATE_COUNT
},
83 {i386_V86_ASSIST_STATE
, i386_V86_ASSIST_STATE_COUNT
},
84 {THREAD_SYSCALL_STATE
, i386_THREAD_SYSCALL_STATE_COUNT
}
89 #error architecture not supported
96 mythread_state_flavor_t
*flavors
;
100 collectth_state(thread_act_t th_act
, tir_t
*t
)
104 mythread_state_flavor_t
*flavors
;
105 struct thread_command
*tc
;
107 * Fill in thread command structure.
110 hoffset
= t
->hoffset
;
111 flavors
= t
->flavors
;
113 tc
= (struct thread_command
*) (header
+ hoffset
);
115 tc
->cmdsize
= sizeof(struct thread_command
)
117 hoffset
+= sizeof(struct thread_command
);
119 * Follow with a struct thread_state_flavor and
120 * the appropriate thread state struct for each
121 * thread state flavor.
123 for (i
= 0; i
< mynum_flavors
; i
++) {
124 *(mythread_state_flavor_t
*)(header
+hoffset
) =
126 hoffset
+= sizeof(mythread_state_flavor_t
);
127 thread_getstatus(th_act
, flavors
[i
].flavor
,
128 (thread_state_t
*)(header
+hoffset
),
130 hoffset
+= flavors
[i
].count
*sizeof(int);
133 t
->hoffset
= hoffset
;
136 * Create a core image on the file "core".
138 #define MAX_TSTATE_FLAVORS 10
141 register struct proc
*p
;
144 register struct pcred
*pcred
= p
->p_cred
;
145 register struct ucred
*cred
= pcred
->pc_ucred
;
149 int thread_count
, segment_count
;
150 int command_size
, header_size
, tstate_size
;
151 int hoffset
, foffset
, vmoffset
;
153 struct machine_slot
*ms
;
154 struct mach_header
*mh
;
155 struct segment_command
*sc
;
156 struct thread_command
*tc
;
160 vm_inherit_t inherit
;
164 char core_name
[MAXCOMLEN
+6];
165 mythread_state_flavor_t flavors
[MAX_TSTATE_FLAVORS
];
166 vm_size_t nflavors
,mapsize
;
168 int nesting_depth
= 0;
170 struct vm_region_submap_info_64 vbr
;
174 extern boolean_t
coredumpok(vm_map_t map
, vm_offset_t va
); /* temp fix */
176 if (pcred
->p_svuid
!= pcred
->p_ruid
|| pcred
->p_svgid
!= pcred
->p_rgid
)
179 task
= current_task();
181 mapsize
= get_vmmap_size(map
);
183 if (mapsize
>= p
->p_rlimit
[RLIMIT_CORE
].rlim_cur
)
185 (void) task_suspend(task
);
187 sprintf(core_name
, "/cores/core.%d", p
->p_pid
);
188 NDINIT(&nd
, LOOKUP
, FOLLOW
, UIO_SYSSPACE
, core_name
, p
);
189 if(error
= vn_open(&nd
, O_CREAT
| FWRITE
, S_IRUSR
))
193 /* Don't dump to non-regular files or files with links. */
194 if (vp
->v_type
!= VREG
||
195 VOP_GETATTR(vp
, &vattr
, cred
, p
) || vattr
.va_nlink
!= 1) {
202 VOP_LEASE(vp
, p
, cred
, LEASE_WRITE
);
203 VOP_SETATTR(vp
, &vattr
, cred
, p
);
204 p
->p_acflag
|= ACORE
;
207 * If the task is modified while dumping the file
208 * (e.g., changes in threads or VM, the resulting
209 * file will not necessarily be correct.
212 thread_count
= get_task_numacts(task
);
213 segment_count
= get_vmmap_entries(map
); /* XXX */
215 * nflavors here is really the number of ints in flavors
216 * to meet the thread_getstatus() calling convention
219 nflavors
= sizeof(flavors
)/sizeof(int);
220 if (thread_getstatus(current_thread(), THREAD_STATE_FLAVOR_LIST
,
221 (thread_state_t
)(flavors
),
222 &nflavors
) != KERN_SUCCESS
)
223 panic("core flavor list");
224 /* now convert to number of flavors */
225 nflavors
/= sizeof(mythread_state_flavor_t
)/sizeof(int);
227 nflavors
= mynum_flavors
;
228 bcopy(thread_flavor_array
,flavors
,sizeof(thread_flavor_array
));
231 for (i
= 0; i
< nflavors
; i
++)
232 tstate_size
+= sizeof(mythread_state_flavor_t
) +
233 (flavors
[i
].count
* sizeof(int));
235 command_size
= segment_count
*sizeof(struct segment_command
) +
236 thread_count
*sizeof(struct thread_command
) +
237 tstate_size
*thread_count
;
239 header_size
= command_size
+ sizeof(struct mach_header
);
241 (void) kmem_alloc_wired(kernel_map
,
242 (vm_offset_t
*)&header
,
243 (vm_size_t
)header_size
);
246 * Set up Mach-O header.
248 mh
= (struct mach_header
*) header
;
249 ms
= &machine_slot
[cpu_number()];
250 mh
->magic
= MH_MAGIC
;
251 mh
->cputype
= ms
->cpu_type
;
252 mh
->cpusubtype
= ms
->cpu_subtype
;
253 mh
->filetype
= MH_CORE
;
254 mh
->ncmds
= segment_count
+ thread_count
;
255 mh
->sizeofcmds
= command_size
;
257 hoffset
= sizeof(struct mach_header
); /* offset into header */
258 foffset
= round_page(header_size
); /* offset into file */
259 vmoffset
= VM_MIN_ADDRESS
; /* offset into VM */
260 /* We use to check for an error, here, now we try and get
263 while (segment_count
> 0){
265 * Get region information for next region.
269 vbrcount
= VM_REGION_SUBMAP_INFO_COUNT_64
;
270 if((kret
= vm_region_recurse_64(map
,
271 &vmoffset
, &size
, &nesting_depth
,
272 &vbr
, &vbrcount
)) != KERN_SUCCESS
) {
282 if(kret
!= KERN_SUCCESS
)
285 prot
= vbr
.protection
;
286 maxprot
= vbr
.max_protection
;
287 inherit
= vbr
.inheritance
;
289 * Fill in segment command structure.
291 sc
= (struct segment_command
*) (header
+ hoffset
);
292 sc
->cmd
= LC_SEGMENT
;
293 sc
->cmdsize
= sizeof(struct segment_command
);
294 /* segment name is zerod by kmem_alloc */
296 sc
->vmaddr
= vmoffset
;
298 sc
->fileoff
= foffset
;
300 sc
->maxprot
= maxprot
;
305 * Write segment out. Try as hard as possible to
306 * get read access to the data.
308 if ((prot
& VM_PROT_READ
) == 0) {
309 vm_protect(map
, vmoffset
, size
, FALSE
,
313 * Only actually perform write if we can read.
314 * Note: if we can't read, then we end up with
315 * a hole in the file.
317 if ((maxprot
& VM_PROT_READ
) == VM_PROT_READ
&& vbr
.user_tag
!= VM_MEMORY_IOKIT
&& coredumpok(map
,vmoffset
)) {
318 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)vmoffset
, size
, foffset
,
319 UIO_USERSPACE
, IO_NODELOCKED
|IO_UNIT
, cred
, (int *) 0, p
);
322 hoffset
+= sizeof(struct segment_command
);
330 thread
= (thread_t
) queue_first(&task
->thread_list
);
331 while (thread_count
> 0) {
333 * Fill in thread command structure.
335 tc
= (struct thread_command
*) (header
+ hoffset
);
337 tc
->cmdsize
= sizeof(struct thread_command
)
339 hoffset
+= sizeof(struct thread_command
);
341 * Follow with a struct thread_state_flavor and
342 * the appropriate thread state struct for each
343 * thread state flavor.
345 for (i
= 0; i
< nflavors
; i
++) {
346 *(mythread_state_flavor_t
*)(header
+hoffset
) =
348 hoffset
+= sizeof(mythread_state_flavor_t
);
349 thread_getstatus(thread
, flavors
[i
].flavor
,
350 (thread_state_t
*)(header
+hoffset
),
352 hoffset
+= flavors
[i
].count
*sizeof(int);
354 thread
= (thread_t
) queue_next(&thread
->thread_list
);
359 tir1
.header
= header
;
360 tir1
.hoffset
= hoffset
;
361 tir1
.flavors
= flavors
;
362 tir1
.tstate_size
= tstate_size
;
363 task_act_iterate_wth_args(task
, collectth_state
,&tir1
);
367 * Write out the Mach header at the beginning of the
370 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)header
, header_size
, (off_t
)0,
371 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, cred
, (int *) 0, p
);
372 kmem_free(kernel_map
, header
, header_size
);
374 VOP_UNLOCK(vp
, 0, p
);
375 error1
= vn_close(vp
, FWRITE
, cred
, p
);