]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_core.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / bsd / kern / kern_core.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
26 *
27 * File: bsd/kern/kern_core.c
28 *
29 * This file contains machine independent code for performing core dumps.
30 *
31 * HISTORY
32 * 16-Feb-91 Mike DeMoney (mike@next.com)
33 * Massaged into MI form from m68k/core.c.
34 */
35
36#include <mach/vm_param.h>
37#include <mach/thread_status.h>
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/signalvar.h>
42#include <sys/resourcevar.h>
43#include <sys/namei.h>
44#include <sys/vnode.h>
45#include <sys/proc.h>
46#include <sys/timeb.h>
47#include <sys/times.h>
48#include <sys/buf.h>
49#include <sys/acct.h>
50#include <sys/file.h>
51#include <sys/uio.h>
52#include <sys/kernel.h>
53#include <sys/stat.h>
54
55#include <mach-o/loader.h>
56#include <mach/vm_region.h>
765c9de3 57#include <mach/vm_statistics.h>
1c79356b
A
58
59#include <vm/vm_kern.h>
60
61typedef struct {
62 int flavor; /* the number for this flavor */
63 int count; /* count of ints in this flavor */
64} mythread_state_flavor_t;
65
66#if defined (__ppc__)
67
68mythread_state_flavor_t thread_flavor_array[]={
69 {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT},
70 {PPC_FLOAT_STATE, PPC_FLOAT_STATE_COUNT},
d7e50217
A
71 {PPC_EXCEPTION_STATE, PPC_EXCEPTION_STATE_COUNT},
72 {PPC_VECTOR_STATE, PPC_VECTOR_STATE_COUNT}
1c79356b 73 };
d7e50217 74int mynum_flavors=4;
1c79356b
A
75#elif defined (__i386__)
76mythread_state_flavor_t thread_flavor_array [] = {
77 {i386_THREAD_STATE, i386_THREAD_STATE_COUNT},
78 {i386_THREAD_FPSTATE, i386_THREAD_FPSTATE_COUNT},
79 {i386_THREAD_EXCEPTSTATE, i386_THREAD_EXCEPTSTATE_COUNT},
80 {i386_THREAD_CTHREADSTATE, i386_THREAD_CTHREADSTATE_COUNT},
81 {i386_NEW_THREAD_STATE, i386_NEW_THREAD_STATE_COUNT},
82 {i386_FLOAT_STATE, i386_FLOAT_STATE_COUNT},
83 {i386_ISA_PORT_MAP_STATE, i386_ISA_PORT_MAP_STATE_COUNT},
84 {i386_V86_ASSIST_STATE, i386_V86_ASSIST_STATE_COUNT},
85 {THREAD_SYSCALL_STATE, i386_THREAD_SYSCALL_STATE_COUNT}
86 };
87int mynum_flavors=9;
88
89#else
90#error architecture not supported
91#endif
92
93
94typedef struct {
95 vm_offset_t header;
96 int hoffset;
97 mythread_state_flavor_t *flavors;
98 int tstate_size;
99} tir_t;
100
101collectth_state(thread_act_t th_act, tir_t *t)
102{
103 vm_offset_t header;
104 int hoffset, i ;
105 mythread_state_flavor_t *flavors;
106 struct thread_command *tc;
107 /*
108 * Fill in thread command structure.
109 */
110 header = t->header;
111 hoffset = t->hoffset;
112 flavors = t->flavors;
113
114 tc = (struct thread_command *) (header + hoffset);
115 tc->cmd = LC_THREAD;
116 tc->cmdsize = sizeof(struct thread_command)
117 + t->tstate_size;
118 hoffset += sizeof(struct thread_command);
119 /*
120 * Follow with a struct thread_state_flavor and
121 * the appropriate thread state struct for each
122 * thread state flavor.
123 */
124 for (i = 0; i < mynum_flavors; i++) {
125 *(mythread_state_flavor_t *)(header+hoffset) =
126 flavors[i];
127 hoffset += sizeof(mythread_state_flavor_t);
128 thread_getstatus(th_act, flavors[i].flavor,
129 (thread_state_t *)(header+hoffset),
130 &flavors[i].count);
131 hoffset += flavors[i].count*sizeof(int);
132 }
133
134 t->hoffset = hoffset;
135}
136/*
137 * Create a core image on the file "core".
138 */
139#define MAX_TSTATE_FLAVORS 10
140int
141coredump(p)
142 register struct proc *p;
143{
144 int error=0;
145 register struct pcred *pcred = p->p_cred;
146 register struct ucred *cred = pcred->pc_ucred;
147 struct nameidata nd;
148 struct vattr vattr;
149 vm_map_t map;
150 int thread_count, segment_count;
151 int command_size, header_size, tstate_size;
152 int hoffset, foffset, vmoffset;
153 vm_offset_t header;
154 struct machine_slot *ms;
155 struct mach_header *mh;
156 struct segment_command *sc;
157 struct thread_command *tc;
158 vm_size_t size;
159 vm_prot_t prot;
160 vm_prot_t maxprot;
161 vm_inherit_t inherit;
162 vm_offset_t offset;
163 int error1;
164 task_t task;
165 char core_name[MAXCOMLEN+6];
166 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
167 vm_size_t nflavors,mapsize;
168 int i;
169 int nesting_depth = 0;
170 kern_return_t kret;
171 struct vm_region_submap_info_64 vbr;
172 int vbrcount=0;
173 tir_t tir1;
174 struct vnode * vp;
9bccf70c 175 extern boolean_t coredumpok(vm_map_t map, vm_offset_t va); /* temp fix */
1c79356b
A
176
177 if (pcred->p_svuid != pcred->p_ruid || pcred->p_svgid != pcred->p_rgid)
178 return (EFAULT);
179
180 task = current_task();
181 map = current_map();
182 mapsize = get_vmmap_size(map);
183
184 if (mapsize >= p->p_rlimit[RLIMIT_CORE].rlim_cur)
185 return (EFAULT);
186 (void) task_suspend(task);
187
1c79356b
A
188 sprintf(core_name, "/cores/core.%d", p->p_pid);
189 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, core_name, p);
190 if(error = vn_open(&nd, O_CREAT | FWRITE, S_IRUSR ))
191 return (error);
192 vp = nd.ni_vp;
193
194 /* Don't dump to non-regular files or files with links. */
195 if (vp->v_type != VREG ||
196 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
197 error = EFAULT;
198 goto out;
199 }
200
201 VATTR_NULL(&vattr);
202 vattr.va_size = 0;
203 VOP_LEASE(vp, p, cred, LEASE_WRITE);
204 VOP_SETATTR(vp, &vattr, cred, p);
205 p->p_acflag |= ACORE;
206
207 /*
208 * If the task is modified while dumping the file
209 * (e.g., changes in threads or VM, the resulting
210 * file will not necessarily be correct.
211 */
212
213 thread_count = get_task_numacts(task);
214 segment_count = get_vmmap_entries(map); /* XXX */
215 /*
216 * nflavors here is really the number of ints in flavors
217 * to meet the thread_getstatus() calling convention
218 */
219#if 0
220 nflavors = sizeof(flavors)/sizeof(int);
221 if (thread_getstatus(current_thread(), THREAD_STATE_FLAVOR_LIST,
222 (thread_state_t)(flavors),
223 &nflavors) != KERN_SUCCESS)
224 panic("core flavor list");
225 /* now convert to number of flavors */
226 nflavors /= sizeof(mythread_state_flavor_t)/sizeof(int);
227#else
228 nflavors = mynum_flavors;
229 bcopy(thread_flavor_array,flavors,sizeof(thread_flavor_array));
230#endif
231 tstate_size = 0;
232 for (i = 0; i < nflavors; i++)
233 tstate_size += sizeof(mythread_state_flavor_t) +
234 (flavors[i].count * sizeof(int));
235
236 command_size = segment_count*sizeof(struct segment_command) +
237 thread_count*sizeof(struct thread_command) +
238 tstate_size*thread_count;
239
240 header_size = command_size + sizeof(struct mach_header);
241
242 (void) kmem_alloc_wired(kernel_map,
243 (vm_offset_t *)&header,
244 (vm_size_t)header_size);
245
246 /*
247 * Set up Mach-O header.
248 */
249 mh = (struct mach_header *) header;
250 ms = &machine_slot[cpu_number()];
251 mh->magic = MH_MAGIC;
252 mh->cputype = ms->cpu_type;
253 mh->cpusubtype = ms->cpu_subtype;
254 mh->filetype = MH_CORE;
255 mh->ncmds = segment_count + thread_count;
256 mh->sizeofcmds = command_size;
257
258 hoffset = sizeof(struct mach_header); /* offset into header */
d7e50217 259 foffset = round_page_32(header_size); /* offset into file */
1c79356b
A
260 vmoffset = VM_MIN_ADDRESS; /* offset into VM */
261 /* We use to check for an error, here, now we try and get
262 * as much as we can
263 */
264 while (segment_count > 0){
265 /*
266 * Get region information for next region.
267 */
268
269 while (1) {
270 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
271 if((kret = vm_region_recurse_64(map,
272 &vmoffset, &size, &nesting_depth,
273 &vbr, &vbrcount)) != KERN_SUCCESS) {
274 break;
275 }
276 if(vbr.is_submap) {
277 nesting_depth++;
278 continue;
279 } else {
280 break;
281 }
282 }
283 if(kret != KERN_SUCCESS)
284 break;
285
286 prot = vbr.protection;
287 maxprot = vbr.max_protection;
288 inherit = vbr.inheritance;
289 /*
290 * Fill in segment command structure.
291 */
292 sc = (struct segment_command *) (header + hoffset);
293 sc->cmd = LC_SEGMENT;
294 sc->cmdsize = sizeof(struct segment_command);
295 /* segment name is zerod by kmem_alloc */
9bccf70c 296 sc->segname[0] = 0;
1c79356b
A
297 sc->vmaddr = vmoffset;
298 sc->vmsize = size;
299 sc->fileoff = foffset;
300 sc->filesize = size;
301 sc->maxprot = maxprot;
302 sc->initprot = prot;
303 sc->nsects = 0;
304
305 /*
306 * Write segment out. Try as hard as possible to
307 * get read access to the data.
308 */
309 if ((prot & VM_PROT_READ) == 0) {
310 vm_protect(map, vmoffset, size, FALSE,
311 prot|VM_PROT_READ);
312 }
313 /*
314 * Only actually perform write if we can read.
315 * Note: if we can't read, then we end up with
316 * a hole in the file.
317 */
9bccf70c 318 if ((maxprot & VM_PROT_READ) == VM_PROT_READ && vbr.user_tag != VM_MEMORY_IOKIT && coredumpok(map,vmoffset)) {
1c79356b
A
319 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)vmoffset, size, foffset,
320 UIO_USERSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
321 }
322
323 hoffset += sizeof(struct segment_command);
324 foffset += size;
325 vmoffset += size;
326 segment_count--;
327 }
328
329#if 0 /* [ */
330 task_lock(task);
331 thread = (thread_t) queue_first(&task->thread_list);
332 while (thread_count > 0) {
333 /*
334 * Fill in thread command structure.
335 */
336 tc = (struct thread_command *) (header + hoffset);
337 tc->cmd = LC_THREAD;
338 tc->cmdsize = sizeof(struct thread_command)
339 + tstate_size;
340 hoffset += sizeof(struct thread_command);
341 /*
342 * Follow with a struct thread_state_flavor and
343 * the appropriate thread state struct for each
344 * thread state flavor.
345 */
346 for (i = 0; i < nflavors; i++) {
347 *(mythread_state_flavor_t *)(header+hoffset) =
348 flavors[i];
349 hoffset += sizeof(mythread_state_flavor_t);
350 thread_getstatus(thread, flavors[i].flavor,
351 (thread_state_t *)(header+hoffset),
352 &flavors[i].count);
353 hoffset += flavors[i].count*sizeof(int);
354 }
355 thread = (thread_t) queue_next(&thread->thread_list);
356 thread_count--;
357 }
358 task_unlock(task);
359#else /* /* 0 ][ */
360 tir1.header = header;
361 tir1.hoffset = hoffset;
362 tir1.flavors = flavors;
363 tir1.tstate_size = tstate_size;
364 task_act_iterate_wth_args(task, collectth_state,&tir1);
365
366#endif /* 0 ] */
367 /*
368 * Write out the Mach header at the beginning of the
369 * file.
370 */
371 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0,
372 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
373 kmem_free(kernel_map, header, header_size);
374out:
375 VOP_UNLOCK(vp, 0, p);
376 error1 = vn_close(vp, FWRITE, cred, p);
377 if (error == 0)
378 error = error1;
379}