]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_core.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_core.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
23 *
24 * File: bsd/kern/kern_core.c
25 *
26 * This file contains machine independent code for performing core dumps.
27 *
28 * HISTORY
29 * 16-Feb-91 Mike DeMoney (mike@next.com)
30 * Massaged into MI form from m68k/core.c.
31 */
32
33 #include <mach/vm_param.h>
34 #include <mach/thread_status.h>
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/signalvar.h>
39 #include <sys/resourcevar.h>
40 #include <sys/namei.h>
41 #include <sys/vnode.h>
42 #include <sys/proc.h>
43 #include <sys/timeb.h>
44 #include <sys/times.h>
45 #include <sys/buf.h>
46 #include <sys/acct.h>
47 #include <sys/file.h>
48 #include <sys/uio.h>
49 #include <sys/kernel.h>
50 #include <sys/stat.h>
51
52 #include <mach-o/loader.h>
53 #include <mach/vm_region.h>
54 #include <mach/vm_statistics.h>
55
56 #include <vm/vm_kern.h>
57
58 typedef struct {
59 int flavor; /* the number for this flavor */
60 int count; /* count of ints in this flavor */
61 } mythread_state_flavor_t;
62
63 #if defined (__ppc__)
64
65 mythread_state_flavor_t thread_flavor_array[]={
66 {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT},
67 {PPC_FLOAT_STATE, PPC_FLOAT_STATE_COUNT},
68 {PPC_EXCEPTION_STATE, PPC_EXCEPTION_STATE_COUNT}
69 };
70 int mynum_flavors=3;
71 #elif defined (__i386__)
72 mythread_state_flavor_t thread_flavor_array [] = {
73 {i386_THREAD_STATE, i386_THREAD_STATE_COUNT},
74 {i386_THREAD_FPSTATE, i386_THREAD_FPSTATE_COUNT},
75 {i386_THREAD_EXCEPTSTATE, i386_THREAD_EXCEPTSTATE_COUNT},
76 {i386_THREAD_CTHREADSTATE, i386_THREAD_CTHREADSTATE_COUNT},
77 {i386_NEW_THREAD_STATE, i386_NEW_THREAD_STATE_COUNT},
78 {i386_FLOAT_STATE, i386_FLOAT_STATE_COUNT},
79 {i386_ISA_PORT_MAP_STATE, i386_ISA_PORT_MAP_STATE_COUNT},
80 {i386_V86_ASSIST_STATE, i386_V86_ASSIST_STATE_COUNT},
81 {THREAD_SYSCALL_STATE, i386_THREAD_SYSCALL_STATE_COUNT}
82 };
83 int mynum_flavors=9;
84
85 #else
86 #error architecture not supported
87 #endif
88
89
90 typedef struct {
91 vm_offset_t header;
92 int hoffset;
93 mythread_state_flavor_t *flavors;
94 int tstate_size;
95 } tir_t;
96
97 collectth_state(thread_act_t th_act, tir_t *t)
98 {
99 vm_offset_t header;
100 int hoffset, i ;
101 mythread_state_flavor_t *flavors;
102 struct thread_command *tc;
103 /*
104 * Fill in thread command structure.
105 */
106 header = t->header;
107 hoffset = t->hoffset;
108 flavors = t->flavors;
109
110 tc = (struct thread_command *) (header + hoffset);
111 tc->cmd = LC_THREAD;
112 tc->cmdsize = sizeof(struct thread_command)
113 + t->tstate_size;
114 hoffset += sizeof(struct thread_command);
115 /*
116 * Follow with a struct thread_state_flavor and
117 * the appropriate thread state struct for each
118 * thread state flavor.
119 */
120 for (i = 0; i < mynum_flavors; i++) {
121 *(mythread_state_flavor_t *)(header+hoffset) =
122 flavors[i];
123 hoffset += sizeof(mythread_state_flavor_t);
124 thread_getstatus(th_act, flavors[i].flavor,
125 (thread_state_t *)(header+hoffset),
126 &flavors[i].count);
127 hoffset += flavors[i].count*sizeof(int);
128 }
129
130 t->hoffset = hoffset;
131 }
132 /*
133 * Create a core image on the file "core".
134 */
135 #define MAX_TSTATE_FLAVORS 10
136 int
137 coredump(p)
138 register struct proc *p;
139 {
140 int error=0;
141 register struct pcred *pcred = p->p_cred;
142 register struct ucred *cred = pcred->pc_ucred;
143 struct nameidata nd;
144 struct vattr vattr;
145 vm_map_t map;
146 int thread_count, segment_count;
147 int command_size, header_size, tstate_size;
148 int hoffset, foffset, vmoffset;
149 vm_offset_t header;
150 struct machine_slot *ms;
151 struct mach_header *mh;
152 struct segment_command *sc;
153 struct thread_command *tc;
154 vm_size_t size;
155 vm_prot_t prot;
156 vm_prot_t maxprot;
157 vm_inherit_t inherit;
158 vm_offset_t offset;
159 int error1;
160 task_t task;
161 char core_name[MAXCOMLEN+6];
162 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
163 vm_size_t nflavors,mapsize;
164 int i;
165 int nesting_depth = 0;
166 kern_return_t kret;
167 struct vm_region_submap_info_64 vbr;
168 int vbrcount=0;
169 tir_t tir1;
170 struct vnode * vp;
171
172
173 if (pcred->p_svuid != pcred->p_ruid || pcred->p_svgid != pcred->p_rgid)
174 return (EFAULT);
175
176 task = current_task();
177 map = current_map();
178 mapsize = get_vmmap_size(map);
179
180 if (mapsize >= p->p_rlimit[RLIMIT_CORE].rlim_cur)
181 return (EFAULT);
182 (void) task_suspend(task);
183
184 /*
185 * Make sure all registers, etc. are in pcb so they get
186 * into core file.
187 */
188 #if defined (__ppc__)
189 fpu_save(current_act());
190 vec_save(current_act());
191 #endif
192 sprintf(core_name, "/cores/core.%d", p->p_pid);
193 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, core_name, p);
194 if(error = vn_open(&nd, O_CREAT | FWRITE, S_IRUSR ))
195 return (error);
196 vp = nd.ni_vp;
197
198 /* Don't dump to non-regular files or files with links. */
199 if (vp->v_type != VREG ||
200 VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
201 error = EFAULT;
202 goto out;
203 }
204
205 VATTR_NULL(&vattr);
206 vattr.va_size = 0;
207 VOP_LEASE(vp, p, cred, LEASE_WRITE);
208 VOP_SETATTR(vp, &vattr, cred, p);
209 p->p_acflag |= ACORE;
210
211 /*
212 * If the task is modified while dumping the file
213 * (e.g., changes in threads or VM, the resulting
214 * file will not necessarily be correct.
215 */
216
217 thread_count = get_task_numacts(task);
218 segment_count = get_vmmap_entries(map); /* XXX */
219 /*
220 * nflavors here is really the number of ints in flavors
221 * to meet the thread_getstatus() calling convention
222 */
223 #if 0
224 nflavors = sizeof(flavors)/sizeof(int);
225 if (thread_getstatus(current_thread(), THREAD_STATE_FLAVOR_LIST,
226 (thread_state_t)(flavors),
227 &nflavors) != KERN_SUCCESS)
228 panic("core flavor list");
229 /* now convert to number of flavors */
230 nflavors /= sizeof(mythread_state_flavor_t)/sizeof(int);
231 #else
232 nflavors = mynum_flavors;
233 bcopy(thread_flavor_array,flavors,sizeof(thread_flavor_array));
234 #endif
235 tstate_size = 0;
236 for (i = 0; i < nflavors; i++)
237 tstate_size += sizeof(mythread_state_flavor_t) +
238 (flavors[i].count * sizeof(int));
239
240 command_size = segment_count*sizeof(struct segment_command) +
241 thread_count*sizeof(struct thread_command) +
242 tstate_size*thread_count;
243
244 header_size = command_size + sizeof(struct mach_header);
245
246 (void) kmem_alloc_wired(kernel_map,
247 (vm_offset_t *)&header,
248 (vm_size_t)header_size);
249
250 /*
251 * Set up Mach-O header.
252 */
253 mh = (struct mach_header *) header;
254 ms = &machine_slot[cpu_number()];
255 mh->magic = MH_MAGIC;
256 mh->cputype = ms->cpu_type;
257 mh->cpusubtype = ms->cpu_subtype;
258 mh->filetype = MH_CORE;
259 mh->ncmds = segment_count + thread_count;
260 mh->sizeofcmds = command_size;
261
262 hoffset = sizeof(struct mach_header); /* offset into header */
263 foffset = round_page(header_size); /* offset into file */
264 vmoffset = VM_MIN_ADDRESS; /* offset into VM */
265 /* We use to check for an error, here, now we try and get
266 * as much as we can
267 */
268 while (segment_count > 0){
269 /*
270 * Get region information for next region.
271 */
272
273 while (1) {
274 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
275 if((kret = vm_region_recurse_64(map,
276 &vmoffset, &size, &nesting_depth,
277 &vbr, &vbrcount)) != KERN_SUCCESS) {
278 break;
279 }
280 if(vbr.is_submap) {
281 nesting_depth++;
282 continue;
283 } else {
284 break;
285 }
286 }
287 if(kret != KERN_SUCCESS)
288 break;
289
290 prot = vbr.protection;
291 maxprot = vbr.max_protection;
292 inherit = vbr.inheritance;
293 /*
294 * Fill in segment command structure.
295 */
296 sc = (struct segment_command *) (header + hoffset);
297 sc->cmd = LC_SEGMENT;
298 sc->cmdsize = sizeof(struct segment_command);
299 /* segment name is zerod by kmem_alloc */
300 sc->vmaddr = vmoffset;
301 sc->vmsize = size;
302 sc->fileoff = foffset;
303 sc->filesize = size;
304 sc->maxprot = maxprot;
305 sc->initprot = prot;
306 sc->nsects = 0;
307
308 /*
309 * Write segment out. Try as hard as possible to
310 * get read access to the data.
311 */
312 if ((prot & VM_PROT_READ) == 0) {
313 vm_protect(map, vmoffset, size, FALSE,
314 prot|VM_PROT_READ);
315 }
316 /*
317 * Only actually perform write if we can read.
318 * Note: if we can't read, then we end up with
319 * a hole in the file.
320 */
321 if ((maxprot & VM_PROT_READ) == VM_PROT_READ && vbr.user_tag != VM_MEMORY_IOKIT) {
322 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)vmoffset, size, foffset,
323 UIO_USERSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
324 }
325
326 hoffset += sizeof(struct segment_command);
327 foffset += size;
328 vmoffset += size;
329 segment_count--;
330 }
331
332 #if 0 /* [ */
333 task_lock(task);
334 thread = (thread_t) queue_first(&task->thread_list);
335 while (thread_count > 0) {
336 /*
337 * Fill in thread command structure.
338 */
339 tc = (struct thread_command *) (header + hoffset);
340 tc->cmd = LC_THREAD;
341 tc->cmdsize = sizeof(struct thread_command)
342 + tstate_size;
343 hoffset += sizeof(struct thread_command);
344 /*
345 * Follow with a struct thread_state_flavor and
346 * the appropriate thread state struct for each
347 * thread state flavor.
348 */
349 for (i = 0; i < nflavors; i++) {
350 *(mythread_state_flavor_t *)(header+hoffset) =
351 flavors[i];
352 hoffset += sizeof(mythread_state_flavor_t);
353 thread_getstatus(thread, flavors[i].flavor,
354 (thread_state_t *)(header+hoffset),
355 &flavors[i].count);
356 hoffset += flavors[i].count*sizeof(int);
357 }
358 thread = (thread_t) queue_next(&thread->thread_list);
359 thread_count--;
360 }
361 task_unlock(task);
362 #else /* /* 0 ][ */
363 tir1.header = header;
364 tir1.hoffset = hoffset;
365 tir1.flavors = flavors;
366 tir1.tstate_size = tstate_size;
367 task_act_iterate_wth_args(task, collectth_state,&tir1);
368
369 #endif /* 0 ] */
370 /*
371 * Write out the Mach header at the beginning of the
372 * file.
373 */
374 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0,
375 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
376 kmem_free(kernel_map, header, header_size);
377 out:
378 VOP_UNLOCK(vp, 0, p);
379 error1 = vn_close(vp, FWRITE, cred, p);
380 if (error == 0)
381 error = error1;
382 }