]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_core.c
xnu-124.8.tar.gz
[apple/xnu.git] / bsd / kern / kern_core.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
23 *
24 * File: bsd/kern/kern_core.c
25 *
26 * This file contains machine independent code for performing core dumps.
27 *
28 * HISTORY
29 * 16-Feb-91 Mike DeMoney (mike@next.com)
30 * Massaged into MI form from m68k/core.c.
31 */
32
33#include <mach/vm_param.h>
34#include <mach/thread_status.h>
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/signalvar.h>
39#include <sys/resourcevar.h>
40#include <sys/namei.h>
41#include <sys/vnode.h>
42#include <sys/proc.h>
43#include <sys/timeb.h>
44#include <sys/times.h>
45#include <sys/buf.h>
46#include <sys/acct.h>
47#include <sys/file.h>
48#include <sys/uio.h>
49#include <sys/kernel.h>
50#include <sys/stat.h>
51
52#include <mach-o/loader.h>
53#include <mach/vm_region.h>
54
55#include <vm/vm_kern.h>
56
57typedef struct {
58 int flavor; /* the number for this flavor */
59 int count; /* count of ints in this flavor */
60} mythread_state_flavor_t;
61
62#if defined (__ppc__)
63
64mythread_state_flavor_t thread_flavor_array[]={
65 {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT},
66 {PPC_FLOAT_STATE, PPC_FLOAT_STATE_COUNT},
67 {PPC_EXCEPTION_STATE, PPC_EXCEPTION_STATE_COUNT}
68 };
69int mynum_flavors=3;
70#elif defined (__i386__)
71mythread_state_flavor_t thread_flavor_array [] = {
72 {i386_THREAD_STATE, i386_THREAD_STATE_COUNT},
73 {i386_THREAD_FPSTATE, i386_THREAD_FPSTATE_COUNT},
74 {i386_THREAD_EXCEPTSTATE, i386_THREAD_EXCEPTSTATE_COUNT},
75 {i386_THREAD_CTHREADSTATE, i386_THREAD_CTHREADSTATE_COUNT},
76 {i386_NEW_THREAD_STATE, i386_NEW_THREAD_STATE_COUNT},
77 {i386_FLOAT_STATE, i386_FLOAT_STATE_COUNT},
78 {i386_ISA_PORT_MAP_STATE, i386_ISA_PORT_MAP_STATE_COUNT},
79 {i386_V86_ASSIST_STATE, i386_V86_ASSIST_STATE_COUNT},
80 {THREAD_SYSCALL_STATE, i386_THREAD_SYSCALL_STATE_COUNT}
81 };
82int mynum_flavors=9;
83
84#else
85#error architecture not supported
86#endif
87
88
89typedef struct {
90 vm_offset_t header;
91 int hoffset;
92 mythread_state_flavor_t *flavors;
93 int tstate_size;
94} tir_t;
95
96collectth_state(thread_act_t th_act, tir_t *t)
97{
98 vm_offset_t header;
99 int hoffset, i ;
100 mythread_state_flavor_t *flavors;
101 struct thread_command *tc;
102 /*
103 * Fill in thread command structure.
104 */
105 header = t->header;
106 hoffset = t->hoffset;
107 flavors = t->flavors;
108
109 tc = (struct thread_command *) (header + hoffset);
110 tc->cmd = LC_THREAD;
111 tc->cmdsize = sizeof(struct thread_command)
112 + t->tstate_size;
113 hoffset += sizeof(struct thread_command);
114 /*
115 * Follow with a struct thread_state_flavor and
116 * the appropriate thread state struct for each
117 * thread state flavor.
118 */
119 for (i = 0; i < mynum_flavors; i++) {
120 *(mythread_state_flavor_t *)(header+hoffset) =
121 flavors[i];
122 hoffset += sizeof(mythread_state_flavor_t);
123 thread_getstatus(th_act, flavors[i].flavor,
124 (thread_state_t *)(header+hoffset),
125 &flavors[i].count);
126 hoffset += flavors[i].count*sizeof(int);
127 }
128
129 t->hoffset = hoffset;
130}
131/*
132 * Create a core image on the file "core".
133 */
134#define MAX_TSTATE_FLAVORS 10
135int
136coredump(p)
137 register struct proc *p;
138{
139 int error=0;
140 register struct pcred *pcred = p->p_cred;
141 register struct ucred *cred = pcred->pc_ucred;
142 struct nameidata nd;
143 struct vattr vattr;
144 vm_map_t map;
145 int thread_count, segment_count;
146 int command_size, header_size, tstate_size;
147 int hoffset, foffset, vmoffset;
148 vm_offset_t header;
149 struct machine_slot *ms;
150 struct mach_header *mh;
151 struct segment_command *sc;
152 struct thread_command *tc;
153 vm_size_t size;
154 vm_prot_t prot;
155 vm_prot_t maxprot;
156 vm_inherit_t inherit;
157 vm_offset_t offset;
158 int error1;
159 task_t task;
160 char core_name[MAXCOMLEN+6];
161 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
162 vm_size_t nflavors,mapsize;
163 int i;
164 int nesting_depth = 0;
165 kern_return_t kret;
166 struct vm_region_submap_info_64 vbr;
167 int vbrcount=0;
168 tir_t tir1;
169 struct vnode * vp;
170
171
172 if (pcred->p_svuid != pcred->p_ruid || pcred->p_svgid != pcred->p_rgid)
173 return (EFAULT);
174
175 task = current_task();
176 map = current_map();
177 mapsize = get_vmmap_size(map);
178
179 if (mapsize >= p->p_rlimit[RLIMIT_CORE].rlim_cur)
180 return (EFAULT);
181 (void) task_suspend(task);
182
183 /*
184 * Make sure all registers, etc. are in pcb so they get
185 * into core file.
186 */
187#if defined (__ppc__)
188 fpu_save();
189#endif
190 sprintf(core_name, "/cores/core.%d", p->p_pid);
191 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, core_name, p);
192 if(error = vn_open(&nd, O_CREAT | FWRITE, S_IRUSR ))
193 return (error);
194 vp = nd.ni_vp;
195
196 /* Don't dump to non-regular files or files with links. */
197 if (vp->v_type != VREG ||
198 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
199 error = EFAULT;
200 goto out;
201 }
202
203 VATTR_NULL(&vattr);
204 vattr.va_size = 0;
205 VOP_LEASE(vp, p, cred, LEASE_WRITE);
206 VOP_SETATTR(vp, &vattr, cred, p);
207 p->p_acflag |= ACORE;
208
209 /*
210 * If the task is modified while dumping the file
211 * (e.g., changes in threads or VM, the resulting
212 * file will not necessarily be correct.
213 */
214
215 thread_count = get_task_numacts(task);
216 segment_count = get_vmmap_entries(map); /* XXX */
217 /*
218 * nflavors here is really the number of ints in flavors
219 * to meet the thread_getstatus() calling convention
220 */
221#if 0
222 nflavors = sizeof(flavors)/sizeof(int);
223 if (thread_getstatus(current_thread(), THREAD_STATE_FLAVOR_LIST,
224 (thread_state_t)(flavors),
225 &nflavors) != KERN_SUCCESS)
226 panic("core flavor list");
227 /* now convert to number of flavors */
228 nflavors /= sizeof(mythread_state_flavor_t)/sizeof(int);
229#else
230 nflavors = mynum_flavors;
231 bcopy(thread_flavor_array,flavors,sizeof(thread_flavor_array));
232#endif
233 tstate_size = 0;
234 for (i = 0; i < nflavors; i++)
235 tstate_size += sizeof(mythread_state_flavor_t) +
236 (flavors[i].count * sizeof(int));
237
238 command_size = segment_count*sizeof(struct segment_command) +
239 thread_count*sizeof(struct thread_command) +
240 tstate_size*thread_count;
241
242 header_size = command_size + sizeof(struct mach_header);
243
244 (void) kmem_alloc_wired(kernel_map,
245 (vm_offset_t *)&header,
246 (vm_size_t)header_size);
247
248 /*
249 * Set up Mach-O header.
250 */
251 mh = (struct mach_header *) header;
252 ms = &machine_slot[cpu_number()];
253 mh->magic = MH_MAGIC;
254 mh->cputype = ms->cpu_type;
255 mh->cpusubtype = ms->cpu_subtype;
256 mh->filetype = MH_CORE;
257 mh->ncmds = segment_count + thread_count;
258 mh->sizeofcmds = command_size;
259
260 hoffset = sizeof(struct mach_header); /* offset into header */
261 foffset = round_page(header_size); /* offset into file */
262 vmoffset = VM_MIN_ADDRESS; /* offset into VM */
263 /* We use to check for an error, here, now we try and get
264 * as much as we can
265 */
266 while (segment_count > 0){
267 /*
268 * Get region information for next region.
269 */
270
271 while (1) {
272 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
273 if((kret = vm_region_recurse_64(map,
274 &vmoffset, &size, &nesting_depth,
275 &vbr, &vbrcount)) != KERN_SUCCESS) {
276 break;
277 }
278 if(vbr.is_submap) {
279 nesting_depth++;
280 continue;
281 } else {
282 break;
283 }
284 }
285 if(kret != KERN_SUCCESS)
286 break;
287
288 prot = vbr.protection;
289 maxprot = vbr.max_protection;
290 inherit = vbr.inheritance;
291 /*
292 * Fill in segment command structure.
293 */
294 sc = (struct segment_command *) (header + hoffset);
295 sc->cmd = LC_SEGMENT;
296 sc->cmdsize = sizeof(struct segment_command);
297 /* segment name is zerod by kmem_alloc */
298 sc->vmaddr = vmoffset;
299 sc->vmsize = size;
300 sc->fileoff = foffset;
301 sc->filesize = size;
302 sc->maxprot = maxprot;
303 sc->initprot = prot;
304 sc->nsects = 0;
305
306 /*
307 * Write segment out. Try as hard as possible to
308 * get read access to the data.
309 */
310 if ((prot & VM_PROT_READ) == 0) {
311 vm_protect(map, vmoffset, size, FALSE,
312 prot|VM_PROT_READ);
313 }
314 /*
315 * Only actually perform write if we can read.
316 * Note: if we can't read, then we end up with
317 * a hole in the file.
318 */
319 if ((maxprot & VM_PROT_READ) == VM_PROT_READ) {
320 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)vmoffset, size, foffset,
321 UIO_USERSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
322 }
323
324 hoffset += sizeof(struct segment_command);
325 foffset += size;
326 vmoffset += size;
327 segment_count--;
328 }
329
330#if 0 /* [ */
331 task_lock(task);
332 thread = (thread_t) queue_first(&task->thread_list);
333 while (thread_count > 0) {
334 /*
335 * Fill in thread command structure.
336 */
337 tc = (struct thread_command *) (header + hoffset);
338 tc->cmd = LC_THREAD;
339 tc->cmdsize = sizeof(struct thread_command)
340 + tstate_size;
341 hoffset += sizeof(struct thread_command);
342 /*
343 * Follow with a struct thread_state_flavor and
344 * the appropriate thread state struct for each
345 * thread state flavor.
346 */
347 for (i = 0; i < nflavors; i++) {
348 *(mythread_state_flavor_t *)(header+hoffset) =
349 flavors[i];
350 hoffset += sizeof(mythread_state_flavor_t);
351 thread_getstatus(thread, flavors[i].flavor,
352 (thread_state_t *)(header+hoffset),
353 &flavors[i].count);
354 hoffset += flavors[i].count*sizeof(int);
355 }
356 thread = (thread_t) queue_next(&thread->thread_list);
357 thread_count--;
358 }
359 task_unlock(task);
360#else /* /* 0 ][ */
361 tir1.header = header;
362 tir1.hoffset = hoffset;
363 tir1.flavors = flavors;
364 tir1.tstate_size = tstate_size;
365 task_act_iterate_wth_args(task, collectth_state,&tir1);
366
367#endif /* 0 ] */
368 /*
369 * Write out the Mach header at the beginning of the
370 * file.
371 */
372 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0,
373 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
374 kmem_free(kernel_map, header, header_size);
375out:
376 VOP_UNLOCK(vp, 0, p);
377 error1 = vn_close(vp, FWRITE, cred, p);
378 if (error == 0)
379 error = error1;
380}