]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_core.c
xnu-792.6.22.tar.gz
[apple/xnu.git] / bsd / kern / kern_core.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
23 *
24 * File: bsd/kern/kern_core.c
25 *
26 * This file contains machine independent code for performing core dumps.
27 *
28 */
29
30 #include <mach/vm_param.h>
31 #include <mach/thread_status.h>
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/signalvar.h>
36 #include <sys/resourcevar.h>
37 #include <sys/namei.h>
38 #include <sys/vnode_internal.h>
39 #include <sys/proc_internal.h>
40 #include <sys/kauth.h>
41 #include <sys/timeb.h>
42 #include <sys/times.h>
43 #include <sys/acct.h>
44 #include <sys/file_internal.h>
45 #include <sys/uio.h>
46 #include <sys/kernel.h>
47 #include <sys/stat.h>
48
49 #include <mach-o/loader.h>
50 #include <mach/vm_region.h>
51 #include <mach/vm_statistics.h>
52
53 #include <vm/vm_kern.h>
54 #include <vm/vm_protos.h> /* last */
55 #include <vm/vm_map.h> /* current_map() */
56 #include <mach/mach_vm.h> /* mach_vm_region_recurse() */
57 #include <mach/task.h> /* task_suspend() */
58 #include <kern/task.h> /* get_task_numacts() */
59
60 typedef struct {
61 int flavor; /* the number for this flavor */
62 int count; /* count of ints in this flavor */
63 } mythread_state_flavor_t;
64
65 #if defined (__ppc__)
66
67 mythread_state_flavor_t thread_flavor_array[]={
68 {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT},
69 {PPC_FLOAT_STATE, PPC_FLOAT_STATE_COUNT},
70 {PPC_EXCEPTION_STATE, PPC_EXCEPTION_STATE_COUNT},
71 {PPC_VECTOR_STATE, PPC_VECTOR_STATE_COUNT}
72 };
73 int mynum_flavors=4;
74 #elif defined (__i386__)
75 mythread_state_flavor_t thread_flavor_array [] = {
76 {i386_THREAD_STATE, i386_THREAD_STATE_COUNT},
77 {i386_THREAD_FPSTATE, i386_THREAD_FPSTATE_COUNT},
78 {i386_THREAD_EXCEPTSTATE, i386_THREAD_EXCEPTSTATE_COUNT},
79 {i386_THREAD_CTHREADSTATE, i386_THREAD_CTHREADSTATE_COUNT},
80 {i386_NEW_THREAD_STATE, i386_NEW_THREAD_STATE_COUNT},
81 {i386_FLOAT_STATE, i386_FLOAT_STATE_COUNT},
82 {i386_ISA_PORT_MAP_STATE, i386_ISA_PORT_MAP_STATE_COUNT},
83 {i386_V86_ASSIST_STATE, i386_V86_ASSIST_STATE_COUNT},
84 {THREAD_SYSCALL_STATE, i386_THREAD_SYSCALL_STATE_COUNT}
85 };
86 int mynum_flavors=9;
87
88 #else
89 #error architecture not supported
90 #endif
91
92
93 typedef struct {
94 vm_offset_t header;
95 int hoffset;
96 mythread_state_flavor_t *flavors;
97 int tstate_size;
98 } tir_t;
99
100 /* XXX should be static */
101 void collectth_state(thread_t th_act, void *tirp);
102
103 /* XXX not in a Mach header anywhere */
104 kern_return_t thread_getstatus(register thread_t act, int flavor,
105 thread_state_t tstate, mach_msg_type_number_t *count);
106 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
107
108
109 __private_extern__ int do_coredump = 1; /* default: dump cores */
110 __private_extern__ int sugid_coredump = 0; /* default: but not SGUID binaries */
111
112 void
113 collectth_state(thread_t th_act, void *tirp)
114 {
115 vm_offset_t header;
116 int hoffset, i ;
117 mythread_state_flavor_t *flavors;
118 struct thread_command *tc;
119 tir_t *t = (tir_t *)tirp;
120
121 /*
122 * Fill in thread command structure.
123 */
124 header = t->header;
125 hoffset = t->hoffset;
126 flavors = t->flavors;
127
128 tc = (struct thread_command *) (header + hoffset);
129 tc->cmd = LC_THREAD;
130 tc->cmdsize = sizeof(struct thread_command)
131 + t->tstate_size;
132 hoffset += sizeof(struct thread_command);
133 /*
134 * Follow with a struct thread_state_flavor and
135 * the appropriate thread state struct for each
136 * thread state flavor.
137 */
138 for (i = 0; i < mynum_flavors; i++) {
139 *(mythread_state_flavor_t *)(header+hoffset) =
140 flavors[i];
141 hoffset += sizeof(mythread_state_flavor_t);
142 thread_getstatus(th_act, flavors[i].flavor,
143 (thread_state_t)(header+hoffset),
144 &flavors[i].count);
145 hoffset += flavors[i].count*sizeof(int);
146 }
147
148 t->hoffset = hoffset;
149 }
150
151 /*
152 * Create a core image on the file "core".
153 */
154 #define MAX_TSTATE_FLAVORS 10
155 int
156 coredump(struct proc *p)
157 {
158 int error=0;
159 kauth_cred_t cred = kauth_cred_get();
160 struct vnode_attr va;
161 struct vfs_context context;
162 vm_map_t map;
163 int thread_count, segment_count;
164 int command_size, header_size, tstate_size;
165 int hoffset;
166 off_t foffset;
167 vm_map_offset_t vmoffset;
168 vm_offset_t header;
169 vm_map_size_t vmsize;
170 vm_prot_t prot;
171 vm_prot_t maxprot;
172 vm_inherit_t inherit;
173 int error1;
174 task_t task;
175 char core_name[MAXCOMLEN+6];
176 char *name;
177 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
178 vm_size_t mapsize;
179 int i;
180 int nesting_depth = 0;
181 kern_return_t kret;
182 struct vm_region_submap_info_64 vbr;
183 int vbrcount=0;
184 tir_t tir1;
185 struct vnode * vp;
186 struct mach_header *mh;
187 struct mach_header_64 *mh64;
188 int is_64 = 0;
189 size_t mach_header_sz = sizeof(struct mach_header);
190 size_t segment_command_sz = sizeof(struct segment_command);
191
192 if (do_coredump == 0 || /* Not dumping at all */
193 ( (sugid_coredump == 0) && /* Not dumping SUID/SGID binaries */
194 ( (cred->cr_svuid != cred->cr_ruid) ||
195 (cred->cr_svgid != cred->cr_rgid)))) {
196
197 return (EFAULT);
198 }
199
200 if (IS_64BIT_PROCESS(p)) {
201 is_64 = 1;
202 mach_header_sz = sizeof(struct mach_header_64);
203 segment_command_sz = sizeof(struct segment_command_64);
204 }
205
206 task = current_task();
207 map = current_map();
208 mapsize = get_vmmap_size(map);
209
210 if (mapsize >= p->p_rlimit[RLIMIT_CORE].rlim_cur)
211 return (EFAULT);
212 (void) task_suspend(task);
213
214 /* create name according to sysctl'able format string */
215 name = proc_core_name(p->p_comm, kauth_cred_getuid(cred), p->p_pid);
216
217 /* if name creation fails, fall back to historical behaviour... */
218 if (name == NULL) {
219 sprintf(core_name, "/cores/core.%d", p->p_pid);
220 name = core_name;
221 }
222 context.vc_proc = p;
223 context.vc_ucred = cred;
224
225 if ((error = vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, &context)))
226 return (error);
227
228 VATTR_INIT(&va);
229 VATTR_WANTED(&va, va_nlink);
230 /* Don't dump to non-regular files or files with links. */
231 if (vp->v_type != VREG ||
232 vnode_getattr(vp, &va, &context) || va.va_nlink != 1) {
233 error = EFAULT;
234 goto out;
235 }
236
237 VATTR_INIT(&va); /* better to do it here than waste more stack in vnode_setsize */
238 VATTR_SET(&va, va_data_size, 0);
239 vnode_setattr(vp, &va, &context);
240 p->p_acflag |= ACORE;
241
242 /*
243 * If the task is modified while dumping the file
244 * (e.g., changes in threads or VM, the resulting
245 * file will not necessarily be correct.
246 */
247
248 thread_count = get_task_numacts(task);
249 segment_count = get_vmmap_entries(map); /* XXX */
250 bcopy(thread_flavor_array,flavors,sizeof(thread_flavor_array));
251 tstate_size = 0;
252 for (i = 0; i < mynum_flavors; i++)
253 tstate_size += sizeof(mythread_state_flavor_t) +
254 (flavors[i].count * sizeof(int));
255
256 command_size = segment_count * segment_command_sz +
257 thread_count*sizeof(struct thread_command) +
258 tstate_size*thread_count;
259
260 header_size = command_size + mach_header_sz;
261
262 (void) kmem_alloc(kernel_map,
263 (vm_offset_t *)&header,
264 (vm_size_t)header_size);
265
266 /*
267 * Set up Mach-O header.
268 */
269 if (is_64) {
270 mh64 = (struct mach_header_64 *)header;
271 mh64->magic = MH_MAGIC_64;
272 mh64->cputype = cpu_type();
273 mh64->cpusubtype = cpu_subtype();
274 mh64->filetype = MH_CORE;
275 mh64->ncmds = segment_count + thread_count;
276 mh64->sizeofcmds = command_size;
277 mh64->reserved = 0; /* 8 byte alignment */
278 } else {
279 mh = (struct mach_header *)header;
280 mh->magic = MH_MAGIC;
281 mh->cputype = cpu_type();
282 mh->cpusubtype = cpu_subtype();
283 mh->filetype = MH_CORE;
284 mh->ncmds = segment_count + thread_count;
285 mh->sizeofcmds = command_size;
286 }
287
288 hoffset = mach_header_sz; /* offset into header */
289 foffset = round_page(header_size); /* offset into file */
290 vmoffset = MACH_VM_MIN_ADDRESS; /* offset into VM */
291
292 /*
293 * We use to check for an error, here, now we try and get
294 * as much as we can
295 */
296 while (segment_count > 0) {
297 struct segment_command *sc;
298 struct segment_command_64 *sc64;
299
300 /*
301 * Get region information for next region.
302 */
303
304 while (1) {
305 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
306 if((kret = mach_vm_region_recurse(map,
307 &vmoffset, &vmsize, &nesting_depth,
308 (vm_region_recurse_info_t)&vbr,
309 &vbrcount)) != KERN_SUCCESS) {
310 break;
311 }
312 /*
313 * If we get a valid mapping back, but we're dumping
314 * a 32 bit process, and it's over the allowable
315 * address space of a 32 bit process, it's the same
316 * as if mach_vm_region_recurse() failed.
317 */
318 if (!(is_64) &&
319 (vmoffset + vmsize > VM_MAX_ADDRESS)) {
320 kret = KERN_INVALID_ADDRESS;
321 break;
322 }
323 if(vbr.is_submap) {
324 nesting_depth++;
325 continue;
326 } else {
327 break;
328 }
329 }
330 if(kret != KERN_SUCCESS)
331 break;
332
333 prot = vbr.protection;
334 maxprot = vbr.max_protection;
335 inherit = vbr.inheritance;
336 /*
337 * Fill in segment command structure.
338 */
339 if (is_64) {
340 sc64 = (struct segment_command_64 *)(header + hoffset);
341 sc64->cmd = LC_SEGMENT_64;
342 sc64->cmdsize = sizeof(struct segment_command_64);
343 /* segment name is zeroed by kmem_alloc */
344 sc64->segname[0] = 0;
345 sc64->vmaddr = vmoffset;
346 sc64->vmsize = vmsize;
347 sc64->fileoff = foffset;
348 sc64->filesize = vmsize;
349 sc64->maxprot = maxprot;
350 sc64->initprot = prot;
351 sc64->nsects = 0;
352 } else {
353 sc = (struct segment_command *) (header + hoffset);
354 sc->cmd = LC_SEGMENT;
355 sc->cmdsize = sizeof(struct segment_command);
356 /* segment name is zeroed by kmem_alloc */
357 sc->segname[0] = 0;
358 sc->vmaddr = CAST_DOWN(vm_offset_t,vmoffset);
359 sc->vmsize = CAST_DOWN(vm_size_t,vmsize);
360 sc->fileoff = CAST_DOWN(uint32_t,foffset);
361 sc->filesize = CAST_DOWN(uint32_t,vmsize);
362 sc->maxprot = maxprot;
363 sc->initprot = prot;
364 sc->nsects = 0;
365 }
366
367 /*
368 * Write segment out. Try as hard as possible to
369 * get read access to the data.
370 */
371 if ((prot & VM_PROT_READ) == 0) {
372 mach_vm_protect(map, vmoffset, vmsize, FALSE,
373 prot|VM_PROT_READ);
374 }
375 /*
376 * Only actually perform write if we can read.
377 * Note: if we can't read, then we end up with
378 * a hole in the file.
379 */
380 if ((maxprot & VM_PROT_READ) == VM_PROT_READ
381 && vbr.user_tag != VM_MEMORY_IOKIT
382 && coredumpok(map,vmoffset)) {
383 vm_map_size_t tmp_vmsize = vmsize;
384 off_t xfer_foffset = foffset;
385
386 //LP64todo - works around vn_rdwr_64() 2G limit
387 while (tmp_vmsize > 0) {
388 vm_map_size_t xfer_vmsize = tmp_vmsize;
389 if (xfer_vmsize > INT_MAX)
390 xfer_vmsize = INT_MAX;
391 error = vn_rdwr_64(UIO_WRITE, vp,
392 vmoffset, xfer_vmsize, xfer_foffset,
393 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
394 IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
395 tmp_vmsize -= xfer_vmsize;
396 xfer_foffset += xfer_vmsize;
397 }
398 }
399
400 hoffset += segment_command_sz;
401 foffset += vmsize;
402 vmoffset += vmsize;
403 segment_count--;
404 }
405
406 /*
407 * If there are remaining segments which have not been written
408 * out because break in the loop above, then they were not counted
409 * because they exceed the real address space of the executable
410 * type: remove them from the header's count. This is OK, since
411 * we are allowed to have a sparse area following the segments.
412 */
413 if (is_64) {
414 mh64->ncmds -= segment_count;
415 } else {
416 mh->ncmds -= segment_count;
417 }
418
419 tir1.header = header;
420 tir1.hoffset = hoffset;
421 tir1.flavors = flavors;
422 tir1.tstate_size = tstate_size;
423 task_act_iterate_wth_args(task, collectth_state,&tir1);
424
425 /*
426 * Write out the Mach header at the beginning of the
427 * file. OK to use a 32 bit write for this.
428 */
429 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0,
430 UIO_SYSSPACE32, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
431 kmem_free(kernel_map, header, header_size);
432 out:
433 error1 = vnode_close(vp, FWRITE, &context);
434 if (error == 0)
435 error = error1;
436
437 return (error);
438 }