]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_ktrace.c
xnu-344.12.2.tar.gz
[apple/xnu.git] / bsd / kern / kern_ktrace.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
56 * $FreeBSD: src/sys/kern/kern_ktrace.c,v 1.35.2.4 2001/03/05 13:09:01 obrien Exp $
57 */
58
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/types.h>
63 #include <sys/proc.h>
64 #include <sys/file.h>
65 #include <sys/namei.h>
66 #include <sys/vnode.h>
67 #if KTRACE
68 #include <sys/ktrace.h>
69 #endif
70 #include <sys/malloc.h>
71 #include <sys/syslog.h>
72
73 #if KTRACE
74 static struct ktr_header *ktrgetheader __P((int type));
75 static void ktrwrite __P((struct vnode *, struct ktr_header *,
76 struct uio *, int));
77 static int ktrcanset __P((struct proc *,struct proc *));
78 static int ktrsetchildren __P((struct proc *,struct proc *,
79 int, int, struct vnode *));
80 static int ktrops __P((struct proc *,struct proc *,int,int,struct vnode *));
81
82
83 static struct ktr_header *
84 ktrgetheader(type)
85 int type;
86 {
87 register struct ktr_header *kth;
88 struct proc *p = current_proc(); /* XXX */
89
90 MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
91 M_KTRACE, M_WAITOK);
92 kth->ktr_type = type;
93 microtime(&kth->ktr_time);
94 kth->ktr_pid = p->p_pid;
95 bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
96 return (kth);
97 }
98 #endif
99
100 void
101 ktrsyscall(p, code, narg, args, funnel_type)
102 struct proc *p;
103 int code, narg;
104 register_t args[];
105 int funnel_type;
106 {
107 #if KTRACE
108 struct vnode *vp;
109 struct ktr_header *kth;
110 struct ktr_syscall *ktp;
111 register int len;
112 register_t *argp;
113 int i;
114
115 if (!KTRPOINT(p, KTR_SYSCALL))
116 return;
117
118 vp = p->p_tracep;
119 len = __offsetof(struct ktr_syscall, ktr_args) +
120 (narg * sizeof(register_t));
121 p->p_traceflag |= KTRFAC_ACTIVE;
122 kth = ktrgetheader(KTR_SYSCALL);
123 MALLOC(ktp, struct ktr_syscall *, len, M_KTRACE, M_WAITOK);
124 ktp->ktr_code = code;
125 ktp->ktr_narg = narg;
126 argp = &ktp->ktr_args[0];
127 for (i = 0; i < narg; i++)
128 *argp++ = args[i];
129 kth->ktr_buf = (caddr_t)ktp;
130 kth->ktr_len = len;
131 ktrwrite(vp, kth, NULL, funnel_type);
132 FREE(ktp, M_KTRACE);
133 FREE(kth, M_KTRACE);
134 p->p_traceflag &= ~KTRFAC_ACTIVE;
135 #else
136 return;
137 #endif
138 }
139
140 void
141 ktrsysret(p, code, error, retval, funnel_type)
142 struct proc *p;
143 int code, error;
144 register_t retval;
145 int funnel_type;
146 {
147 #if KTRACE
148 struct vnode *vp;
149 struct ktr_header *kth;
150 struct ktr_sysret ktp;
151
152 if (!KTRPOINT(p, KTR_SYSRET))
153 return;
154
155 vp = p->p_tracep;
156 p->p_traceflag |= KTRFAC_ACTIVE;
157 kth = ktrgetheader(KTR_SYSRET);
158 ktp.ktr_code = code;
159 ktp.ktr_error = error;
160 ktp.ktr_retval = retval; /* what about val2 ? */
161
162 kth->ktr_buf = (caddr_t)&ktp;
163 kth->ktr_len = sizeof(struct ktr_sysret);
164
165 ktrwrite(vp, kth, NULL, funnel_type);
166 FREE(kth, M_KTRACE);
167 p->p_traceflag &= ~KTRFAC_ACTIVE;
168 #else
169 return;
170 #endif
171 }
172
173 #if KTRACE
174 void
175 ktrnamei(vp, path)
176 struct vnode *vp;
177 char *path;
178 {
179 struct ktr_header *kth;
180 struct proc *p = current_proc(); /* XXX */
181
182 p->p_traceflag |= KTRFAC_ACTIVE;
183 kth = ktrgetheader(KTR_NAMEI);
184 kth->ktr_len = strlen(path);
185 kth->ktr_buf = path;
186
187 ktrwrite(vp, kth, NULL, KERNEL_FUNNEL);
188 FREE(kth, M_KTRACE);
189 p->p_traceflag &= ~KTRFAC_ACTIVE;
190 }
191
192 void
193 ktrgenio(vp, fd, rw, uio, error, funnel_type)
194 struct vnode *vp;
195 int fd;
196 enum uio_rw rw;
197 struct uio *uio;
198 int error;
199 int funnel_type;
200 {
201 struct ktr_header *kth;
202 struct ktr_genio ktg;
203 struct proc *p = current_proc(); /* XXX */
204
205 if (error)
206 return;
207
208 p->p_traceflag |= KTRFAC_ACTIVE;
209 kth = ktrgetheader(KTR_GENIO);
210 ktg.ktr_fd = fd;
211 ktg.ktr_rw = rw;
212 kth->ktr_buf = (caddr_t)&ktg;
213 kth->ktr_len = sizeof(struct ktr_genio);
214 uio->uio_offset = 0;
215 uio->uio_rw = UIO_WRITE;
216
217 ktrwrite(vp, kth, uio, funnel_type);
218 FREE(kth, M_KTRACE);
219 p->p_traceflag &= ~KTRFAC_ACTIVE;
220 }
221
222 void
223 ktrpsig(vp, sig, action, mask, code, funnel_type)
224 struct vnode *vp;
225 int sig;
226 sig_t action;
227 sigset_t *mask;
228 int code;
229 int funnel_type;
230 {
231 struct ktr_header *kth;
232 struct ktr_psig kp;
233 struct proc *p = current_proc(); /* XXX */
234
235 p->p_traceflag |= KTRFAC_ACTIVE;
236 kth = ktrgetheader(KTR_PSIG);
237 kp.signo = (char)sig;
238 kp.action = action;
239 kp.mask = *mask;
240 kp.code = code;
241 kth->ktr_buf = (caddr_t)&kp;
242 kth->ktr_len = sizeof (struct ktr_psig);
243
244 ktrwrite(vp, kth, NULL, funnel_type);
245 FREE(kth, M_KTRACE);
246 p->p_traceflag &= ~KTRFAC_ACTIVE;
247 }
248
249 void
250 ktrcsw(vp, out, user, funnel_type)
251 struct vnode *vp;
252 int out, user;
253 int funnel_type;
254 {
255 struct ktr_header *kth;
256 struct ktr_csw kc;
257 struct proc *p = current_proc(); /* XXX */
258
259 p->p_traceflag |= KTRFAC_ACTIVE;
260 kth = ktrgetheader(KTR_CSW);
261 kc.out = out;
262 kc.user = user;
263 kth->ktr_buf = (caddr_t)&kc;
264 kth->ktr_len = sizeof (struct ktr_csw);
265
266 ktrwrite(vp, kth, NULL, funnel_type);
267 FREE(kth, M_KTRACE);
268 p->p_traceflag &= ~KTRFAC_ACTIVE;
269 }
270 #endif /* KTRACE */
271
272 /* Interface and common routines */
273
274 /*
275 * ktrace system call
276 */
277 struct ktrace_args {
278 char *fname;
279 int ops;
280 int facs;
281 int pid;
282 };
283 /* ARGSUSED */
284 int
285 ktrace(curp, uap, retval)
286 struct proc *curp;
287 register struct ktrace_args *uap;
288 register_t *retval;
289 {
290 #if KTRACE
291 register struct vnode *vp = NULL;
292 register struct proc *p;
293 struct pgrp *pg;
294 int facs = uap->facs & ~KTRFAC_ROOT;
295 int ops = KTROP(uap->ops);
296 int descend = uap->ops & KTRFLAG_DESCEND;
297 int ret = 0;
298 int error = 0;
299 struct nameidata nd;
300
301 curp->p_traceflag |= KTRFAC_ACTIVE;
302 if (ops != KTROP_CLEAR) {
303 /*
304 * an operation which requires a file argument.
305 */
306 NDINIT(&nd, LOOKUP, (NOFOLLOW|LOCKLEAF), UIO_USERSPACE, uap->fname, curp);
307 error = vn_open(&nd, FREAD|FWRITE|O_NOFOLLOW, 0);
308 if (error) {
309 curp->p_traceflag &= ~KTRFAC_ACTIVE;
310 return (error);
311 }
312 vp = nd.ni_vp;
313 VOP_UNLOCK(vp, 0, curp);
314 if (vp->v_type != VREG) {
315 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
316 curp->p_traceflag &= ~KTRFAC_ACTIVE;
317 return (EACCES);
318 }
319 }
320 /*
321 * Clear all uses of the tracefile
322 */
323 if (ops == KTROP_CLEARFILE) {
324 LIST_FOREACH(p, &allproc, p_list) {
325 if (p->p_tracep == vp) {
326 if (ktrcanset(curp, p)) {
327 struct vnode *tvp = p->p_tracep;
328 /* no more tracing */
329 p->p_traceflag = 0;
330 if (tvp != NULL) {
331 p->p_tracep = NULL;
332 vrele(tvp);
333 }
334 } else
335 error = EPERM;
336 }
337 }
338 goto done;
339 }
340
341 /*
342 * need something to (un)trace (XXX - why is this here?)
343 */
344 if (!facs) {
345 error = EINVAL;
346 goto done;
347 }
348 /*
349 * do it
350 */
351 if (uap->pid < 0) {
352 /*
353 * by process group
354 */
355 pg = pgfind(-uap->pid);
356 if (pg == NULL) {
357 error = ESRCH;
358 goto done;
359 }
360 LIST_FOREACH(p, &pg->pg_members, p_pglist)
361 if (descend)
362 ret |= ktrsetchildren(curp, p, ops, facs, vp);
363 else
364 ret |= ktrops(curp, p, ops, facs, vp);
365
366 } else {
367 /*
368 * by pid
369 */
370 p = pfind(uap->pid);
371 if (p == NULL) {
372 error = ESRCH;
373 goto done;
374 }
375 if (descend)
376 ret |= ktrsetchildren(curp, p, ops, facs, vp);
377 else
378 ret |= ktrops(curp, p, ops, facs, vp);
379 }
380 if (!ret)
381 error = EPERM;
382 done:
383 if (vp != NULL)
384 (void) vn_close(vp, FWRITE, curp->p_ucred, curp);
385 curp->p_traceflag &= ~KTRFAC_ACTIVE;
386 return (error);
387 #else
388 return ENOSYS;
389 #endif
390 }
391
392 /*
393 * utrace system call
394 */
395 struct utrace_args {
396 const void * addr;
397 size_t len;
398 };
399
400 /* ARGSUSED */
401 int
402 utrace(curp, uap, retval)
403 struct proc *curp;
404 register struct utrace_args *uap;
405 register_t *retval;
406 {
407 #if KTRACE
408 struct ktr_header *kth;
409 struct proc *p = current_proc(); /* XXX */
410 register caddr_t cp;
411
412 if (!KTRPOINT(p, KTR_USER))
413 return (0);
414 if (uap->len > KTR_USER_MAXLEN)
415 return (EINVAL);
416 p->p_traceflag |= KTRFAC_ACTIVE;
417 kth = ktrgetheader(KTR_USER);
418 MALLOC(cp, caddr_t, uap->len, M_KTRACE, M_WAITOK);
419 if (!copyin(uap->addr, cp, uap->len)) {
420 kth->ktr_buf = cp;
421 kth->ktr_len = uap->len;
422 ktrwrite(p->p_tracep, kth, NULL, KERNEL_FUNNEL);
423 }
424 FREE(kth, M_KTRACE);
425 FREE(cp, M_KTRACE);
426 p->p_traceflag &= ~KTRFAC_ACTIVE;
427
428 return (0);
429 #else
430 return (ENOSYS);
431 #endif
432 }
433
434 #if KTRACE
435 static int
436 ktrops(curp, p, ops, facs, vp)
437 struct proc *p, *curp;
438 int ops, facs;
439 struct vnode *vp;
440 {
441 struct vnode *tvp;
442
443 if (!ktrcanset(curp, p))
444 return (0);
445 if (ops == KTROP_SET) {
446 if (p->p_tracep != vp) {
447 /*
448 * if trace file already in use, relinquish
449 */
450 tvp = p->p_tracep;
451 VREF(vp);
452 p->p_tracep = vp;
453 if (tvp != NULL)
454 vrele(tvp);
455 }
456 p->p_traceflag |= facs;
457 if (curp->p_ucred->cr_uid == 0)
458 p->p_traceflag |= KTRFAC_ROOT;
459 } else {
460 /* KTROP_CLEAR */
461 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
462 /* no more tracing */
463 tvp = p->p_tracep;
464 p->p_traceflag = 0;
465 if (tvp != NULL) {
466 p->p_tracep = NULL;
467 vrele(tvp);
468 }
469 }
470 }
471
472 return (1);
473 }
474
475 static int
476 ktrsetchildren(curp, top, ops, facs, vp)
477 struct proc *curp, *top;
478 int ops, facs;
479 struct vnode *vp;
480 {
481 register struct proc *p;
482 register int ret = 0;
483
484 p = top;
485 for (;;) {
486 ret |= ktrops(curp, p, ops, facs, vp);
487 /*
488 * If this process has children, descend to them next,
489 * otherwise do any siblings, and if done with this level,
490 * follow back up the tree (but not past top).
491 */
492 if (!LIST_EMPTY(&p->p_children))
493 p = LIST_FIRST(&p->p_children);
494 else for (;;) {
495 if (p == top)
496 return (ret);
497 if (LIST_NEXT(p, p_sibling)) {
498 p = LIST_NEXT(p, p_sibling);
499 break;
500 }
501 p = p->p_pptr;
502 }
503 }
504 /*NOTREACHED*/
505 }
506
507 static void
508 ktrwrite(vp, kth, uio, funnel_type)
509 struct vnode *vp;
510 register struct ktr_header *kth;
511 struct uio *uio;
512 {
513 struct uio auio;
514 struct iovec aiov[2];
515 register struct proc *p = current_proc(); /* XXX */
516 int error;
517
518 if (vp == NULL)
519 return;
520
521 if (funnel_type == -1) {
522 funnel_t *f = thread_funnel_get();
523 if(f == THR_FUNNEL_NULL)
524 funnel_type = NO_FUNNEL;
525 else if (f == (funnel_t *)network_flock)
526 funnel_type = NETWORK_FUNNEL;
527 else if (f == (funnel_t *)kernel_flock)
528 funnel_type = KERNEL_FUNNEL;
529 }
530
531 switch (funnel_type) {
532 case KERNEL_FUNNEL:
533 /* Nothing more to do */
534 break;
535 case NETWORK_FUNNEL:
536 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
537 break;
538 case NO_FUNNEL:
539 (void) thread_funnel_set(kernel_flock, TRUE);
540 break;
541 default:
542 panic("Invalid funnel (%)", funnel_type);
543 }
544 auio.uio_iov = &aiov[0];
545 auio.uio_offset = 0;
546 auio.uio_segflg = UIO_SYSSPACE;
547 auio.uio_rw = UIO_WRITE;
548 aiov[0].iov_base = (caddr_t)kth;
549 aiov[0].iov_len = sizeof(struct ktr_header);
550 auio.uio_resid = sizeof(struct ktr_header);
551 auio.uio_iovcnt = 1;
552 auio.uio_procp = current_proc();
553 if (kth->ktr_len > 0) {
554 auio.uio_iovcnt++;
555 aiov[1].iov_base = kth->ktr_buf;
556 aiov[1].iov_len = kth->ktr_len;
557 auio.uio_resid += kth->ktr_len;
558 if (uio != NULL)
559 kth->ktr_len += uio->uio_resid;
560 }
561 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
562 if (error)
563 goto bad;
564 (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
565 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, p->p_ucred);
566 if (error == 0 && uio != NULL) {
567 (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
568 error = VOP_WRITE(vp, uio, IO_UNIT | IO_APPEND, p->p_ucred);
569 }
570 VOP_UNLOCK(vp, 0, p);
571 if (!error) {
572 switch (funnel_type) {
573 case KERNEL_FUNNEL:
574 /* Nothing more to do */
575 break;
576 case NETWORK_FUNNEL:
577 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
578 /* switch funnel to NETWORK_FUNNEL */
579 break;
580 case NO_FUNNEL:
581 (void) thread_funnel_set(kernel_flock, FALSE);
582 break;
583 default:
584 panic("Invalid funnel (%)", funnel_type);
585 }
586 return;
587 }
588
589 bad:
590 /*
591 * If error encountered, give up tracing on this vnode.
592 */
593 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
594 error);
595 LIST_FOREACH(p, &allproc, p_list) {
596 if (p->p_tracep == vp) {
597 p->p_tracep = NULL;
598 p->p_traceflag = 0;
599 vrele(vp);
600 }
601 }
602
603 switch (funnel_type) {
604 case KERNEL_FUNNEL:
605 /* Nothing more to do */
606 break;
607 case NETWORK_FUNNEL:
608 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
609 /* switch funnel to NETWORK_FUNNEL */
610 break;
611 case NO_FUNNEL:
612 (void) thread_funnel_set(kernel_flock, FALSE);
613 break;
614 default:
615 panic("Invalid funnel (%)", funnel_type);
616 }
617 }
618
619 /*
620 * Return true if caller has permission to set the ktracing state
621 * of target. Essentially, the target can't possess any
622 * more permissions than the caller. KTRFAC_ROOT signifies that
623 * root previously set the tracing status on the target process, and
624 * so, only root may further change it.
625 *
626 * TODO: check groups. use caller effective gid.
627 */
628 static int
629 ktrcanset(callp, targetp)
630 struct proc *callp, *targetp;
631 {
632 register struct pcred *caller = callp->p_cred;
633 register struct pcred *target = targetp->p_cred;
634
635 if (!PRISON_CHECK(callp, targetp))
636 return (0);
637 if ((caller->pc_ucred->cr_uid == target->p_ruid &&
638 target->p_ruid == target->p_svuid &&
639 caller->p_rgid == target->p_rgid && /* XXX */
640 target->p_rgid == target->p_svgid &&
641 (targetp->p_traceflag & KTRFAC_ROOT) == 0) ||
642 caller->pc_ucred->cr_uid == 0)
643 return (1);
644
645 return (0);
646 }
647
648 #endif /* KTRACE */