]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_core.c
xnu-201.tar.gz
[apple/xnu.git] / bsd / kern / kern_core.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
23 *
24 * File: bsd/kern/kern_core.c
25 *
26 * This file contains machine independent code for performing core dumps.
27 *
28 * HISTORY
29 * 16-Feb-91 Mike DeMoney (mike@next.com)
30 * Massaged into MI form from m68k/core.c.
31 */
32
33#include <mach/vm_param.h>
34#include <mach/thread_status.h>
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/signalvar.h>
39#include <sys/resourcevar.h>
40#include <sys/namei.h>
41#include <sys/vnode.h>
42#include <sys/proc.h>
43#include <sys/timeb.h>
44#include <sys/times.h>
45#include <sys/buf.h>
46#include <sys/acct.h>
47#include <sys/file.h>
48#include <sys/uio.h>
49#include <sys/kernel.h>
50#include <sys/stat.h>
51
52#include <mach-o/loader.h>
53#include <mach/vm_region.h>
54
55#include <vm/vm_kern.h>
56
57typedef struct {
58 int flavor; /* the number for this flavor */
59 int count; /* count of ints in this flavor */
60} mythread_state_flavor_t;
61
62#if defined (__ppc__)
63
64mythread_state_flavor_t thread_flavor_array[]={
65 {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT},
66 {PPC_FLOAT_STATE, PPC_FLOAT_STATE_COUNT},
67 {PPC_EXCEPTION_STATE, PPC_EXCEPTION_STATE_COUNT}
68 };
69int mynum_flavors=3;
70#elif defined (__i386__)
71mythread_state_flavor_t thread_flavor_array [] = {
72 {i386_THREAD_STATE, i386_THREAD_STATE_COUNT},
73 {i386_THREAD_FPSTATE, i386_THREAD_FPSTATE_COUNT},
74 {i386_THREAD_EXCEPTSTATE, i386_THREAD_EXCEPTSTATE_COUNT},
75 {i386_THREAD_CTHREADSTATE, i386_THREAD_CTHREADSTATE_COUNT},
76 {i386_NEW_THREAD_STATE, i386_NEW_THREAD_STATE_COUNT},
77 {i386_FLOAT_STATE, i386_FLOAT_STATE_COUNT},
78 {i386_ISA_PORT_MAP_STATE, i386_ISA_PORT_MAP_STATE_COUNT},
79 {i386_V86_ASSIST_STATE, i386_V86_ASSIST_STATE_COUNT},
80 {THREAD_SYSCALL_STATE, i386_THREAD_SYSCALL_STATE_COUNT}
81 };
82int mynum_flavors=9;
83
84#else
85#error architecture not supported
86#endif
87
88
89typedef struct {
90 vm_offset_t header;
91 int hoffset;
92 mythread_state_flavor_t *flavors;
93 int tstate_size;
94} tir_t;
95
96collectth_state(thread_act_t th_act, tir_t *t)
97{
98 vm_offset_t header;
99 int hoffset, i ;
100 mythread_state_flavor_t *flavors;
101 struct thread_command *tc;
102 /*
103 * Fill in thread command structure.
104 */
105 header = t->header;
106 hoffset = t->hoffset;
107 flavors = t->flavors;
108
109 tc = (struct thread_command *) (header + hoffset);
110 tc->cmd = LC_THREAD;
111 tc->cmdsize = sizeof(struct thread_command)
112 + t->tstate_size;
113 hoffset += sizeof(struct thread_command);
114 /*
115 * Follow with a struct thread_state_flavor and
116 * the appropriate thread state struct for each
117 * thread state flavor.
118 */
119 for (i = 0; i < mynum_flavors; i++) {
120 *(mythread_state_flavor_t *)(header+hoffset) =
121 flavors[i];
122 hoffset += sizeof(mythread_state_flavor_t);
123 thread_getstatus(th_act, flavors[i].flavor,
124 (thread_state_t *)(header+hoffset),
125 &flavors[i].count);
126 hoffset += flavors[i].count*sizeof(int);
127 }
128
129 t->hoffset = hoffset;
130}
131/*
132 * Create a core image on the file "core".
133 */
134#define MAX_TSTATE_FLAVORS 10
135int
136coredump(p)
137 register struct proc *p;
138{
139 int error=0;
140 register struct pcred *pcred = p->p_cred;
141 register struct ucred *cred = pcred->pc_ucred;
142 struct nameidata nd;
143 struct vattr vattr;
144 vm_map_t map;
145 int thread_count, segment_count;
146 int command_size, header_size, tstate_size;
147 int hoffset, foffset, vmoffset;
148 vm_offset_t header;
149 struct machine_slot *ms;
150 struct mach_header *mh;
151 struct segment_command *sc;
152 struct thread_command *tc;
153 vm_size_t size;
154 vm_prot_t prot;
155 vm_prot_t maxprot;
156 vm_inherit_t inherit;
157 vm_offset_t offset;
158 int error1;
159 task_t task;
160 char core_name[MAXCOMLEN+6];
161 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
162 vm_size_t nflavors,mapsize;
163 int i;
164 int nesting_depth = 0;
165 kern_return_t kret;
166 struct vm_region_submap_info_64 vbr;
167 int vbrcount=0;
168 tir_t tir1;
169 struct vnode * vp;
170
171
172 if (pcred->p_svuid != pcred->p_ruid || pcred->p_svgid != pcred->p_rgid)
173 return (EFAULT);
174
175 task = current_task();
176 map = current_map();
177 mapsize = get_vmmap_size(map);
178
179 if (mapsize >= p->p_rlimit[RLIMIT_CORE].rlim_cur)
180 return (EFAULT);
181 (void) task_suspend(task);
182
183 /*
184 * Make sure all registers, etc. are in pcb so they get
185 * into core file.
186 */
187#if defined (__ppc__)
0b4e3aa0
A
188 fpu_save(current_act());
189 vec_save(current_act());
1c79356b
A
190#endif
191 sprintf(core_name, "/cores/core.%d", p->p_pid);
192 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, core_name, p);
193 if(error = vn_open(&nd, O_CREAT | FWRITE, S_IRUSR ))
194 return (error);
195 vp = nd.ni_vp;
196
197 /* Don't dump to non-regular files or files with links. */
198 if (vp->v_type != VREG ||
199 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
200 error = EFAULT;
201 goto out;
202 }
203
204 VATTR_NULL(&vattr);
205 vattr.va_size = 0;
206 VOP_LEASE(vp, p, cred, LEASE_WRITE);
207 VOP_SETATTR(vp, &vattr, cred, p);
208 p->p_acflag |= ACORE;
209
210 /*
211 * If the task is modified while dumping the file
212 * (e.g., changes in threads or VM, the resulting
213 * file will not necessarily be correct.
214 */
215
216 thread_count = get_task_numacts(task);
217 segment_count = get_vmmap_entries(map); /* XXX */
218 /*
219 * nflavors here is really the number of ints in flavors
220 * to meet the thread_getstatus() calling convention
221 */
222#if 0
223 nflavors = sizeof(flavors)/sizeof(int);
224 if (thread_getstatus(current_thread(), THREAD_STATE_FLAVOR_LIST,
225 (thread_state_t)(flavors),
226 &nflavors) != KERN_SUCCESS)
227 panic("core flavor list");
228 /* now convert to number of flavors */
229 nflavors /= sizeof(mythread_state_flavor_t)/sizeof(int);
230#else
231 nflavors = mynum_flavors;
232 bcopy(thread_flavor_array,flavors,sizeof(thread_flavor_array));
233#endif
234 tstate_size = 0;
235 for (i = 0; i < nflavors; i++)
236 tstate_size += sizeof(mythread_state_flavor_t) +
237 (flavors[i].count * sizeof(int));
238
239 command_size = segment_count*sizeof(struct segment_command) +
240 thread_count*sizeof(struct thread_command) +
241 tstate_size*thread_count;
242
243 header_size = command_size + sizeof(struct mach_header);
244
245 (void) kmem_alloc_wired(kernel_map,
246 (vm_offset_t *)&header,
247 (vm_size_t)header_size);
248
249 /*
250 * Set up Mach-O header.
251 */
252 mh = (struct mach_header *) header;
253 ms = &machine_slot[cpu_number()];
254 mh->magic = MH_MAGIC;
255 mh->cputype = ms->cpu_type;
256 mh->cpusubtype = ms->cpu_subtype;
257 mh->filetype = MH_CORE;
258 mh->ncmds = segment_count + thread_count;
259 mh->sizeofcmds = command_size;
260
261 hoffset = sizeof(struct mach_header); /* offset into header */
262 foffset = round_page(header_size); /* offset into file */
263 vmoffset = VM_MIN_ADDRESS; /* offset into VM */
264 /* We use to check for an error, here, now we try and get
265 * as much as we can
266 */
267 while (segment_count > 0){
268 /*
269 * Get region information for next region.
270 */
271
272 while (1) {
273 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
274 if((kret = vm_region_recurse_64(map,
275 &vmoffset, &size, &nesting_depth,
276 &vbr, &vbrcount)) != KERN_SUCCESS) {
277 break;
278 }
279 if(vbr.is_submap) {
280 nesting_depth++;
281 continue;
282 } else {
283 break;
284 }
285 }
286 if(kret != KERN_SUCCESS)
287 break;
288
289 prot = vbr.protection;
290 maxprot = vbr.max_protection;
291 inherit = vbr.inheritance;
292 /*
293 * Fill in segment command structure.
294 */
295 sc = (struct segment_command *) (header + hoffset);
296 sc->cmd = LC_SEGMENT;
297 sc->cmdsize = sizeof(struct segment_command);
298 /* segment name is zerod by kmem_alloc */
299 sc->vmaddr = vmoffset;
300 sc->vmsize = size;
301 sc->fileoff = foffset;
302 sc->filesize = size;
303 sc->maxprot = maxprot;
304 sc->initprot = prot;
305 sc->nsects = 0;
306
307 /*
308 * Write segment out. Try as hard as possible to
309 * get read access to the data.
310 */
311 if ((prot & VM_PROT_READ) == 0) {
312 vm_protect(map, vmoffset, size, FALSE,
313 prot|VM_PROT_READ);
314 }
315 /*
316 * Only actually perform write if we can read.
317 * Note: if we can't read, then we end up with
318 * a hole in the file.
319 */
320 if ((maxprot & VM_PROT_READ) == VM_PROT_READ) {
321 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)vmoffset, size, foffset,
322 UIO_USERSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
323 }
324
325 hoffset += sizeof(struct segment_command);
326 foffset += size;
327 vmoffset += size;
328 segment_count--;
329 }
330
331#if 0 /* [ */
332 task_lock(task);
333 thread = (thread_t) queue_first(&task->thread_list);
334 while (thread_count > 0) {
335 /*
336 * Fill in thread command structure.
337 */
338 tc = (struct thread_command *) (header + hoffset);
339 tc->cmd = LC_THREAD;
340 tc->cmdsize = sizeof(struct thread_command)
341 + tstate_size;
342 hoffset += sizeof(struct thread_command);
343 /*
344 * Follow with a struct thread_state_flavor and
345 * the appropriate thread state struct for each
346 * thread state flavor.
347 */
348 for (i = 0; i < nflavors; i++) {
349 *(mythread_state_flavor_t *)(header+hoffset) =
350 flavors[i];
351 hoffset += sizeof(mythread_state_flavor_t);
352 thread_getstatus(thread, flavors[i].flavor,
353 (thread_state_t *)(header+hoffset),
354 &flavors[i].count);
355 hoffset += flavors[i].count*sizeof(int);
356 }
357 thread = (thread_t) queue_next(&thread->thread_list);
358 thread_count--;
359 }
360 task_unlock(task);
361#else /* /* 0 ][ */
362 tir1.header = header;
363 tir1.hoffset = hoffset;
364 tir1.flavors = flavors;
365 tir1.tstate_size = tstate_size;
366 task_act_iterate_wth_args(task, collectth_state,&tir1);
367
368#endif /* 0 ] */
369 /*
370 * Write out the Mach header at the beginning of the
371 * file.
372 */
373 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0,
374 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
375 kmem_free(kernel_map, header, header_size);
376out:
377 VOP_UNLOCK(vp, 0, p);
378 error1 = vn_close(vp, FWRITE, cred, p);
379 if (error == 0)
380 error = error1;
381}