]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/kern_ktrace.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / bsd / kern / kern_ktrace.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*
24 * Copyright (c) 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
56 * $FreeBSD: src/sys/kern/kern_ktrace.c,v 1.35.2.4 2001/03/05 13:09:01 obrien Exp $
57 */
58
59
60#include <sys/param.h>
61#include <sys/systm.h>
62#include <sys/types.h>
63#include <sys/proc.h>
64#include <sys/file.h>
65#include <sys/namei.h>
66#include <sys/vnode.h>
67#if KTRACE
68#include <sys/ktrace.h>
69#endif
70#include <sys/malloc.h>
71#include <sys/syslog.h>
72#include <sys/ubc.h>
73
74#include <bsm/audit_kernel.h>
75
76#if KTRACE
77static struct ktr_header *ktrgetheader __P((int type));
78static void ktrwrite __P((struct vnode *, struct ktr_header *,
79 struct uio *, int));
80static int ktrcanset __P((struct proc *,struct proc *));
81static int ktrsetchildren __P((struct proc *,struct proc *,
82 int, int, struct vnode *));
83static int ktrops __P((struct proc *,struct proc *,int,int,struct vnode *));
84
85
86static struct ktr_header *
87ktrgetheader(type)
88 int type;
89{
90 register struct ktr_header *kth;
91 struct proc *p = current_proc(); /* XXX */
92
93 MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
94 M_KTRACE, M_WAITOK);
95 kth->ktr_type = type;
96 microtime(&kth->ktr_time);
97 kth->ktr_pid = p->p_pid;
98 bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
99 return (kth);
100}
101#endif
102
103void
104ktrsyscall(p, code, narg, args, funnel_type)
105 struct proc *p;
106 int code, narg;
107 register_t args[];
108 int funnel_type;
109{
110#if KTRACE
111 struct vnode *vp;
112 struct ktr_header *kth;
113 struct ktr_syscall *ktp;
114 register int len;
115 register_t *argp;
116 int i;
117
118 if (!KTRPOINT(p, KTR_SYSCALL))
119 return;
120
121 vp = p->p_tracep;
122 len = __offsetof(struct ktr_syscall, ktr_args) +
123 (narg * sizeof(register_t));
124 p->p_traceflag |= KTRFAC_ACTIVE;
125 kth = ktrgetheader(KTR_SYSCALL);
126 MALLOC(ktp, struct ktr_syscall *, len, M_KTRACE, M_WAITOK);
127 ktp->ktr_code = code;
128 ktp->ktr_narg = narg;
129 argp = &ktp->ktr_args[0];
130 for (i = 0; i < narg; i++)
131 *argp++ = args[i];
132 kth->ktr_buf = (caddr_t)ktp;
133 kth->ktr_len = len;
134 ktrwrite(vp, kth, NULL, funnel_type);
135 FREE(ktp, M_KTRACE);
136 FREE(kth, M_KTRACE);
137 p->p_traceflag &= ~KTRFAC_ACTIVE;
138#else
139 return;
140#endif
141}
142
143void
144ktrsysret(p, code, error, retval, funnel_type)
145 struct proc *p;
146 int code, error;
147 register_t retval;
148 int funnel_type;
149{
150#if KTRACE
151 struct vnode *vp;
152 struct ktr_header *kth;
153 struct ktr_sysret ktp;
154
155 if (!KTRPOINT(p, KTR_SYSRET))
156 return;
157
158 vp = p->p_tracep;
159 p->p_traceflag |= KTRFAC_ACTIVE;
160 kth = ktrgetheader(KTR_SYSRET);
161 ktp.ktr_code = code;
162 ktp.ktr_error = error;
163 ktp.ktr_retval = retval; /* what about val2 ? */
164
165 kth->ktr_buf = (caddr_t)&ktp;
166 kth->ktr_len = sizeof(struct ktr_sysret);
167
168 ktrwrite(vp, kth, NULL, funnel_type);
169 FREE(kth, M_KTRACE);
170 p->p_traceflag &= ~KTRFAC_ACTIVE;
171#else
172 return;
173#endif
174}
175
176#if KTRACE
177void
178ktrnamei(vp, path)
179 struct vnode *vp;
180 char *path;
181{
182 struct ktr_header *kth;
183 struct proc *p = current_proc(); /* XXX */
184
185 p->p_traceflag |= KTRFAC_ACTIVE;
186 kth = ktrgetheader(KTR_NAMEI);
187 kth->ktr_len = strlen(path);
188 kth->ktr_buf = path;
189
190 ktrwrite(vp, kth, NULL, KERNEL_FUNNEL);
191 FREE(kth, M_KTRACE);
192 p->p_traceflag &= ~KTRFAC_ACTIVE;
193}
194
195void
196ktrgenio(vp, fd, rw, uio, error, funnel_type)
197 struct vnode *vp;
198 int fd;
199 enum uio_rw rw;
200 struct uio *uio;
201 int error;
202 int funnel_type;
203{
204 struct ktr_header *kth;
205 struct ktr_genio ktg;
206 struct proc *p = current_proc(); /* XXX */
207
208 if (error)
209 return;
210
211 p->p_traceflag |= KTRFAC_ACTIVE;
212 kth = ktrgetheader(KTR_GENIO);
213 ktg.ktr_fd = fd;
214 ktg.ktr_rw = rw;
215 kth->ktr_buf = (caddr_t)&ktg;
216 kth->ktr_len = sizeof(struct ktr_genio);
217 uio->uio_offset = 0;
218 uio->uio_rw = UIO_WRITE;
219
220 ktrwrite(vp, kth, uio, funnel_type);
221 FREE(kth, M_KTRACE);
222 p->p_traceflag &= ~KTRFAC_ACTIVE;
223}
224
225void
226ktrpsig(vp, sig, action, mask, code, funnel_type)
227 struct vnode *vp;
228 int sig;
229 sig_t action;
230 sigset_t *mask;
231 int code;
232 int funnel_type;
233{
234 struct ktr_header *kth;
235 struct ktr_psig kp;
236 struct proc *p = current_proc(); /* XXX */
237
238 p->p_traceflag |= KTRFAC_ACTIVE;
239 kth = ktrgetheader(KTR_PSIG);
240 kp.signo = (char)sig;
241 kp.action = action;
242 kp.mask = *mask;
243 kp.code = code;
244 kth->ktr_buf = (caddr_t)&kp;
245 kth->ktr_len = sizeof (struct ktr_psig);
246
247 ktrwrite(vp, kth, NULL, funnel_type);
248 FREE(kth, M_KTRACE);
249 p->p_traceflag &= ~KTRFAC_ACTIVE;
250}
251
252void
253ktrcsw(vp, out, user, funnel_type)
254 struct vnode *vp;
255 int out, user;
256 int funnel_type;
257{
258 struct ktr_header *kth;
259 struct ktr_csw kc;
260 struct proc *p = current_proc(); /* XXX */
261
262 p->p_traceflag |= KTRFAC_ACTIVE;
263 kth = ktrgetheader(KTR_CSW);
264 kc.out = out;
265 kc.user = user;
266 kth->ktr_buf = (caddr_t)&kc;
267 kth->ktr_len = sizeof (struct ktr_csw);
268
269 ktrwrite(vp, kth, NULL, funnel_type);
270 FREE(kth, M_KTRACE);
271 p->p_traceflag &= ~KTRFAC_ACTIVE;
272}
273#endif /* KTRACE */
274
275/* Interface and common routines */
276
277/*
278 * ktrace system call
279 */
280struct ktrace_args {
281 char *fname;
282 int ops;
283 int facs;
284 int pid;
285};
286/* ARGSUSED */
287int
288ktrace(curp, uap, retval)
289 struct proc *curp;
290 register struct ktrace_args *uap;
291 register_t *retval;
292{
293#if KTRACE
294 register struct vnode *vp = NULL;
295 register struct proc *p;
296 struct pgrp *pg;
297 int facs = uap->facs & ~KTRFAC_ROOT;
298 int ops = KTROP(uap->ops);
299 int descend = uap->ops & KTRFLAG_DESCEND;
300 int ret = 0;
301 int error = 0;
302 struct nameidata nd;
303
304 AUDIT_ARG(cmd, uap->ops);
305 AUDIT_ARG(pid, uap->pid);
306 AUDIT_ARG(value, uap->facs);
307 curp->p_traceflag |= KTRFAC_ACTIVE;
308 if (ops != KTROP_CLEAR) {
309 /*
310 * an operation which requires a file argument.
311 */
312 NDINIT(&nd, LOOKUP, (NOFOLLOW|LOCKLEAF), UIO_USERSPACE, uap->fname, curp);
313 error = vn_open(&nd, FREAD|FWRITE|O_NOFOLLOW, 0);
314 if (error) {
315 curp->p_traceflag &= ~KTRFAC_ACTIVE;
316 return (error);
317 }
318 vp = nd.ni_vp;
319 VOP_UNLOCK(vp, 0, curp);
320 if (vp->v_type != VREG) {
321 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
322 curp->p_traceflag &= ~KTRFAC_ACTIVE;
323 return (EACCES);
324 }
325 }
326 /*
327 * Clear all uses of the tracefile
328 */
329 if (ops == KTROP_CLEARFILE) {
330 LIST_FOREACH(p, &allproc, p_list) {
331 if (p->p_tracep == vp) {
332 if (ktrcanset(curp, p)) {
333 struct vnode *tvp = p->p_tracep;
334 /* no more tracing */
335 p->p_traceflag = 0;
336 if (tvp != NULL) {
337 p->p_tracep = NULL;
338
339 VOP_CLOSE(vp, FREAD|FWRITE, curp->p_ucred, curp);
340 ubc_rele(tvp);
341 vrele(tvp);
342 }
343 } else
344 error = EPERM;
345 }
346 }
347 goto done;
348 }
349
350 /*
351 * need something to (un)trace (XXX - why is this here?)
352 */
353 if (!facs) {
354 error = EINVAL;
355 goto done;
356 }
357 /*
358 * do it
359 */
360 if (uap->pid < 0) {
361 /*
362 * by process group
363 */
364 pg = pgfind(-uap->pid);
365 if (pg == NULL) {
366 error = ESRCH;
367 goto done;
368 }
369 LIST_FOREACH(p, &pg->pg_members, p_pglist)
370 if (descend)
371 ret |= ktrsetchildren(curp, p, ops, facs, vp);
372 else
373 ret |= ktrops(curp, p, ops, facs, vp);
374
375 } else {
376 /*
377 * by pid
378 */
379 p = pfind(uap->pid);
380 if (p == NULL) {
381 error = ESRCH;
382 goto done;
383 }
384 AUDIT_ARG(process, p);
385 if (descend)
386 ret |= ktrsetchildren(curp, p, ops, facs, vp);
387 else
388 ret |= ktrops(curp, p, ops, facs, vp);
389 }
390 if (!ret)
391 error = EPERM;
392done:
393 if (vp != NULL)
394 (void) vn_close(vp, FWRITE, curp->p_ucred, curp);
395 curp->p_traceflag &= ~KTRFAC_ACTIVE;
396 return (error);
397#else
398 return ENOSYS;
399#endif
400}
401
402/*
403 * utrace system call
404 */
405struct utrace_args {
406 const void * addr;
407 size_t len;
408};
409
410/* ARGSUSED */
411int
412utrace(curp, uap, retval)
413 struct proc *curp;
414 register struct utrace_args *uap;
415 register_t *retval;
416{
417#if KTRACE
418 struct ktr_header *kth;
419 struct proc *p = current_proc(); /* XXX */
420 register caddr_t cp;
421
422 if (!KTRPOINT(p, KTR_USER))
423 return (0);
424 if (uap->len > KTR_USER_MAXLEN)
425 return (EINVAL);
426 p->p_traceflag |= KTRFAC_ACTIVE;
427 kth = ktrgetheader(KTR_USER);
428 MALLOC(cp, caddr_t, uap->len, M_KTRACE, M_WAITOK);
429 if (!copyin((caddr_t)uap->addr, cp, uap->len)) {
430 kth->ktr_buf = cp;
431 kth->ktr_len = uap->len;
432 ktrwrite(p->p_tracep, kth, NULL, KERNEL_FUNNEL);
433 }
434 FREE(kth, M_KTRACE);
435 FREE(cp, M_KTRACE);
436 p->p_traceflag &= ~KTRFAC_ACTIVE;
437
438 return (0);
439#else
440 return (ENOSYS);
441#endif
442}
443
444#if KTRACE
445static int
446ktrops(curp, p, ops, facs, vp)
447 struct proc *p, *curp;
448 int ops, facs;
449 struct vnode *vp;
450{
451 struct vnode *tvp;
452
453 if (!ktrcanset(curp, p))
454 return (0);
455 if (ops == KTROP_SET) {
456 if (p->p_tracep != vp) {
457 /*
458 * if trace file already in use, relinquish
459 */
460 tvp = p->p_tracep;
461
462 if (UBCINFOEXISTS(vp))
463 ubc_hold(vp);
464 VREF(vp);
465
466 p->p_tracep = vp;
467 if (tvp != NULL) {
468 VOP_CLOSE(tvp, FREAD|FWRITE, p->p_ucred, p);
469 ubc_rele(tvp);
470 vrele(tvp);
471 }
472 }
473 p->p_traceflag |= facs;
474 if (curp->p_ucred->cr_uid == 0)
475 p->p_traceflag |= KTRFAC_ROOT;
476 } else {
477 /* KTROP_CLEAR */
478 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
479 /* no more tracing */
480 tvp = p->p_tracep;
481 p->p_traceflag = 0;
482 if (tvp != NULL) {
483 p->p_tracep = NULL;
484
485 VOP_CLOSE(tvp, FREAD|FWRITE, p->p_ucred, p);
486 ubc_rele(tvp);
487 vrele(tvp);
488 }
489 }
490 }
491
492 return (1);
493}
494
495static int
496ktrsetchildren(curp, top, ops, facs, vp)
497 struct proc *curp, *top;
498 int ops, facs;
499 struct vnode *vp;
500{
501 register struct proc *p;
502 register int ret = 0;
503
504 p = top;
505 for (;;) {
506 ret |= ktrops(curp, p, ops, facs, vp);
507 /*
508 * If this process has children, descend to them next,
509 * otherwise do any siblings, and if done with this level,
510 * follow back up the tree (but not past top).
511 */
512 if (!LIST_EMPTY(&p->p_children))
513 p = LIST_FIRST(&p->p_children);
514 else for (;;) {
515 if (p == top)
516 return (ret);
517 if (LIST_NEXT(p, p_sibling)) {
518 p = LIST_NEXT(p, p_sibling);
519 break;
520 }
521 p = p->p_pptr;
522 }
523 }
524 /*NOTREACHED*/
525}
526
527static void
528ktrwrite(vp, kth, uio, funnel_type)
529 struct vnode *vp;
530 register struct ktr_header *kth;
531 struct uio *uio;
532{
533 struct uio auio;
534 struct iovec aiov[2];
535 register struct proc *p = current_proc(); /* XXX */
536 int error;
537
538 if (vp == NULL)
539 return;
540
541 if (funnel_type == -1) {
542 funnel_t *f = thread_funnel_get();
543 if(f == THR_FUNNEL_NULL)
544 funnel_type = NO_FUNNEL;
545 else if (f == (funnel_t *)network_flock)
546 funnel_type = NETWORK_FUNNEL;
547 else if (f == (funnel_t *)kernel_flock)
548 funnel_type = KERNEL_FUNNEL;
549 }
550
551 switch (funnel_type) {
552 case KERNEL_FUNNEL:
553 /* Nothing more to do */
554 break;
555 case NETWORK_FUNNEL:
556 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
557 break;
558 case NO_FUNNEL:
559 (void) thread_funnel_set(kernel_flock, TRUE);
560 break;
561 default:
562 panic("Invalid funnel (%)", funnel_type);
563 }
564 auio.uio_iov = &aiov[0];
565 auio.uio_offset = 0;
566 auio.uio_segflg = UIO_SYSSPACE;
567 auio.uio_rw = UIO_WRITE;
568 aiov[0].iov_base = (caddr_t)kth;
569 aiov[0].iov_len = sizeof(struct ktr_header);
570 auio.uio_resid = sizeof(struct ktr_header);
571 auio.uio_iovcnt = 1;
572 auio.uio_procp = current_proc();
573 if (kth->ktr_len > 0) {
574 auio.uio_iovcnt++;
575 aiov[1].iov_base = kth->ktr_buf;
576 aiov[1].iov_len = kth->ktr_len;
577 auio.uio_resid += kth->ktr_len;
578 if (uio != NULL)
579 kth->ktr_len += uio->uio_resid;
580 }
581 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
582 if (error)
583 goto bad;
584 (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
585 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, p->p_ucred);
586 if (error == 0 && uio != NULL) {
587 (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
588 error = VOP_WRITE(vp, uio, IO_UNIT | IO_APPEND, p->p_ucred);
589 }
590 VOP_UNLOCK(vp, 0, p);
591 if (!error) {
592 switch (funnel_type) {
593 case KERNEL_FUNNEL:
594 /* Nothing more to do */
595 break;
596 case NETWORK_FUNNEL:
597 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
598 /* switch funnel to NETWORK_FUNNEL */
599 break;
600 case NO_FUNNEL:
601 (void) thread_funnel_set(kernel_flock, FALSE);
602 break;
603 default:
604 panic("Invalid funnel (%)", funnel_type);
605 }
606 return;
607 }
608
609bad:
610 /*
611 * If error encountered, give up tracing on this vnode.
612 */
613 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
614 error);
615 LIST_FOREACH(p, &allproc, p_list) {
616 if (p->p_tracep == vp) {
617 p->p_tracep = NULL;
618 p->p_traceflag = 0;
619
620 VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
621 ubc_rele(vp);
622 vrele(vp);
623 }
624 }
625
626 switch (funnel_type) {
627 case KERNEL_FUNNEL:
628 /* Nothing more to do */
629 break;
630 case NETWORK_FUNNEL:
631 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
632 /* switch funnel to NETWORK_FUNNEL */
633 break;
634 case NO_FUNNEL:
635 (void) thread_funnel_set(kernel_flock, FALSE);
636 break;
637 default:
638 panic("Invalid funnel (%)", funnel_type);
639 }
640}
641
642/*
643 * Return true if caller has permission to set the ktracing state
644 * of target. Essentially, the target can't possess any
645 * more permissions than the caller. KTRFAC_ROOT signifies that
646 * root previously set the tracing status on the target process, and
647 * so, only root may further change it.
648 *
649 * TODO: check groups. use caller effective gid.
650 */
651static int
652ktrcanset(callp, targetp)
653 struct proc *callp, *targetp;
654{
655 register struct pcred *caller = callp->p_cred;
656 register struct pcred *target = targetp->p_cred;
657
658 if (!PRISON_CHECK(callp, targetp))
659 return (0);
660 if ((caller->pc_ucred->cr_uid == target->p_ruid &&
661 target->p_ruid == target->p_svuid &&
662 caller->p_rgid == target->p_rgid && /* XXX */
663 target->p_rgid == target->p_svgid &&
664 (targetp->p_traceflag & KTRFAC_ROOT) == 0 &&
665 (targetp->p_flag & P_SUGID) == 0) ||
666 caller->pc_ucred->cr_uid == 0)
667 return (1);
668
669 return (0);
670}
671
672#endif /* KTRACE */