]>
git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_ktrace.c
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
27 * Copyright (c) 1989, 1993
28 * The Regents of the University of California. All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
59 * $FreeBSD: src/sys/kern/kern_ktrace.c,v 1.35.2.4 2001/03/05 13:09:01 obrien Exp $
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/types.h>
68 #include <sys/namei.h>
69 #include <sys/vnode.h>
71 #include <sys/ktrace.h>
73 #include <sys/malloc.h>
74 #include <sys/syslog.h>
77 static struct ktr_header
*ktrgetheader
__P((int type
));
78 static void ktrwrite
__P((struct vnode
*, struct ktr_header
*,
80 static int ktrcanset
__P((struct proc
*,struct proc
*));
81 static int ktrsetchildren
__P((struct proc
*,struct proc
*,
82 int, int, struct vnode
*));
83 static int ktrops
__P((struct proc
*,struct proc
*,int,int,struct vnode
*));
86 static struct ktr_header
*
90 register struct ktr_header
*kth
;
91 struct proc
*p
= current_proc(); /* XXX */
93 MALLOC(kth
, struct ktr_header
*, sizeof (struct ktr_header
),
96 microtime(&kth
->ktr_time
);
97 kth
->ktr_pid
= p
->p_pid
;
98 bcopy(p
->p_comm
, kth
->ktr_comm
, MAXCOMLEN
);
104 ktrsyscall(p
, code
, narg
, args
, funnel_type
)
112 struct ktr_header
*kth
;
113 struct ktr_syscall
*ktp
;
118 if (!KTRPOINT(p
, KTR_SYSCALL
))
122 len
= __offsetof(struct ktr_syscall
, ktr_args
) +
123 (narg
* sizeof(register_t
));
124 p
->p_traceflag
|= KTRFAC_ACTIVE
;
125 kth
= ktrgetheader(KTR_SYSCALL
);
126 MALLOC(ktp
, struct ktr_syscall
*, len
, M_KTRACE
, M_WAITOK
);
127 ktp
->ktr_code
= code
;
128 ktp
->ktr_narg
= narg
;
129 argp
= &ktp
->ktr_args
[0];
130 for (i
= 0; i
< narg
; i
++)
132 kth
->ktr_buf
= (caddr_t
)ktp
;
134 ktrwrite(vp
, kth
, NULL
, funnel_type
);
137 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
144 ktrsysret(p
, code
, error
, retval
, funnel_type
)
152 struct ktr_header
*kth
;
153 struct ktr_sysret ktp
;
155 if (!KTRPOINT(p
, KTR_SYSRET
))
159 p
->p_traceflag
|= KTRFAC_ACTIVE
;
160 kth
= ktrgetheader(KTR_SYSRET
);
162 ktp
.ktr_error
= error
;
163 ktp
.ktr_retval
= retval
; /* what about val2 ? */
165 kth
->ktr_buf
= (caddr_t
)&ktp
;
166 kth
->ktr_len
= sizeof(struct ktr_sysret
);
168 ktrwrite(vp
, kth
, NULL
, funnel_type
);
170 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
182 struct ktr_header
*kth
;
183 struct proc
*p
= current_proc(); /* XXX */
185 p
->p_traceflag
|= KTRFAC_ACTIVE
;
186 kth
= ktrgetheader(KTR_NAMEI
);
187 kth
->ktr_len
= strlen(path
);
190 ktrwrite(vp
, kth
, NULL
, KERNEL_FUNNEL
);
192 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
196 ktrgenio(vp
, fd
, rw
, uio
, error
, funnel_type
)
204 struct ktr_header
*kth
;
205 struct ktr_genio ktg
;
206 struct proc
*p
= current_proc(); /* XXX */
211 p
->p_traceflag
|= KTRFAC_ACTIVE
;
212 kth
= ktrgetheader(KTR_GENIO
);
215 kth
->ktr_buf
= (caddr_t
)&ktg
;
216 kth
->ktr_len
= sizeof(struct ktr_genio
);
218 uio
->uio_rw
= UIO_WRITE
;
220 ktrwrite(vp
, kth
, uio
, funnel_type
);
222 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
226 ktrpsig(vp
, sig
, action
, mask
, code
, funnel_type
)
234 struct ktr_header
*kth
;
236 struct proc
*p
= current_proc(); /* XXX */
238 p
->p_traceflag
|= KTRFAC_ACTIVE
;
239 kth
= ktrgetheader(KTR_PSIG
);
240 kp
.signo
= (char)sig
;
244 kth
->ktr_buf
= (caddr_t
)&kp
;
245 kth
->ktr_len
= sizeof (struct ktr_psig
);
247 ktrwrite(vp
, kth
, NULL
, funnel_type
);
249 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
253 ktrcsw(vp
, out
, user
, funnel_type
)
258 struct ktr_header
*kth
;
260 struct proc
*p
= current_proc(); /* XXX */
262 p
->p_traceflag
|= KTRFAC_ACTIVE
;
263 kth
= ktrgetheader(KTR_CSW
);
266 kth
->ktr_buf
= (caddr_t
)&kc
;
267 kth
->ktr_len
= sizeof (struct ktr_csw
);
269 ktrwrite(vp
, kth
, NULL
, funnel_type
);
271 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
275 /* Interface and common routines */
288 ktrace(curp
, uap
, retval
)
290 register struct ktrace_args
*uap
;
294 register struct vnode
*vp
= NULL
;
295 register struct proc
*p
;
297 int facs
= uap
->facs
& ~KTRFAC_ROOT
;
298 int ops
= KTROP(uap
->ops
);
299 int descend
= uap
->ops
& KTRFLAG_DESCEND
;
304 curp
->p_traceflag
|= KTRFAC_ACTIVE
;
305 if (ops
!= KTROP_CLEAR
) {
307 * an operation which requires a file argument.
309 NDINIT(&nd
, LOOKUP
, (NOFOLLOW
|LOCKLEAF
), UIO_USERSPACE
, uap
->fname
, curp
);
310 error
= vn_open(&nd
, FREAD
|FWRITE
|O_NOFOLLOW
, 0);
312 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
316 VOP_UNLOCK(vp
, 0, curp
);
317 if (vp
->v_type
!= VREG
) {
318 (void) vn_close(vp
, FREAD
|FWRITE
, curp
->p_ucred
, curp
);
319 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
324 * Clear all uses of the tracefile
326 if (ops
== KTROP_CLEARFILE
) {
327 LIST_FOREACH(p
, &allproc
, p_list
) {
328 if (p
->p_tracep
== vp
) {
329 if (ktrcanset(curp
, p
)) {
330 struct vnode
*tvp
= p
->p_tracep
;
331 /* no more tracing */
345 * need something to (un)trace (XXX - why is this here?)
358 pg
= pgfind(-uap
->pid
);
363 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
)
365 ret
|= ktrsetchildren(curp
, p
, ops
, facs
, vp
);
367 ret
|= ktrops(curp
, p
, ops
, facs
, vp
);
379 ret
|= ktrsetchildren(curp
, p
, ops
, facs
, vp
);
381 ret
|= ktrops(curp
, p
, ops
, facs
, vp
);
387 (void) vn_close(vp
, FWRITE
, curp
->p_ucred
, curp
);
388 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
405 utrace(curp
, uap
, retval
)
407 register struct utrace_args
*uap
;
411 struct ktr_header
*kth
;
412 struct proc
*p
= current_proc(); /* XXX */
415 if (!KTRPOINT(p
, KTR_USER
))
417 if (uap
->len
> KTR_USER_MAXLEN
)
419 p
->p_traceflag
|= KTRFAC_ACTIVE
;
420 kth
= ktrgetheader(KTR_USER
);
421 MALLOC(cp
, caddr_t
, uap
->len
, M_KTRACE
, M_WAITOK
);
422 if (!copyin((caddr_t
)uap
->addr
, cp
, uap
->len
)) {
424 kth
->ktr_len
= uap
->len
;
425 ktrwrite(p
->p_tracep
, kth
, NULL
, KERNEL_FUNNEL
);
429 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
439 ktrops(curp
, p
, ops
, facs
, vp
)
440 struct proc
*p
, *curp
;
446 if (!ktrcanset(curp
, p
))
448 if (ops
== KTROP_SET
) {
449 if (p
->p_tracep
!= vp
) {
451 * if trace file already in use, relinquish
459 p
->p_traceflag
|= facs
;
460 if (curp
->p_ucred
->cr_uid
== 0)
461 p
->p_traceflag
|= KTRFAC_ROOT
;
464 if (((p
->p_traceflag
&= ~facs
) & KTRFAC_MASK
) == 0) {
465 /* no more tracing */
479 ktrsetchildren(curp
, top
, ops
, facs
, vp
)
480 struct proc
*curp
, *top
;
484 register struct proc
*p
;
485 register int ret
= 0;
489 ret
|= ktrops(curp
, p
, ops
, facs
, vp
);
491 * If this process has children, descend to them next,
492 * otherwise do any siblings, and if done with this level,
493 * follow back up the tree (but not past top).
495 if (!LIST_EMPTY(&p
->p_children
))
496 p
= LIST_FIRST(&p
->p_children
);
500 if (LIST_NEXT(p
, p_sibling
)) {
501 p
= LIST_NEXT(p
, p_sibling
);
511 ktrwrite(vp
, kth
, uio
, funnel_type
)
513 register struct ktr_header
*kth
;
517 struct iovec aiov
[2];
518 register struct proc
*p
= current_proc(); /* XXX */
524 if (funnel_type
== -1) {
525 funnel_t
*f
= thread_funnel_get();
526 if(f
== THR_FUNNEL_NULL
)
527 funnel_type
= NO_FUNNEL
;
528 else if (f
== (funnel_t
*)network_flock
)
529 funnel_type
= NETWORK_FUNNEL
;
530 else if (f
== (funnel_t
*)kernel_flock
)
531 funnel_type
= KERNEL_FUNNEL
;
534 switch (funnel_type
) {
536 /* Nothing more to do */
539 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
542 (void) thread_funnel_set(kernel_flock
, TRUE
);
545 panic("Invalid funnel (%)", funnel_type
);
547 auio
.uio_iov
= &aiov
[0];
549 auio
.uio_segflg
= UIO_SYSSPACE
;
550 auio
.uio_rw
= UIO_WRITE
;
551 aiov
[0].iov_base
= (caddr_t
)kth
;
552 aiov
[0].iov_len
= sizeof(struct ktr_header
);
553 auio
.uio_resid
= sizeof(struct ktr_header
);
555 auio
.uio_procp
= current_proc();
556 if (kth
->ktr_len
> 0) {
558 aiov
[1].iov_base
= kth
->ktr_buf
;
559 aiov
[1].iov_len
= kth
->ktr_len
;
560 auio
.uio_resid
+= kth
->ktr_len
;
562 kth
->ktr_len
+= uio
->uio_resid
;
564 error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
567 (void)VOP_LEASE(vp
, p
, p
->p_ucred
, LEASE_WRITE
);
568 error
= VOP_WRITE(vp
, &auio
, IO_UNIT
| IO_APPEND
, p
->p_ucred
);
569 if (error
== 0 && uio
!= NULL
) {
570 (void)VOP_LEASE(vp
, p
, p
->p_ucred
, LEASE_WRITE
);
571 error
= VOP_WRITE(vp
, uio
, IO_UNIT
| IO_APPEND
, p
->p_ucred
);
573 VOP_UNLOCK(vp
, 0, p
);
575 switch (funnel_type
) {
577 /* Nothing more to do */
580 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
581 /* switch funnel to NETWORK_FUNNEL */
584 (void) thread_funnel_set(kernel_flock
, FALSE
);
587 panic("Invalid funnel (%)", funnel_type
);
594 * If error encountered, give up tracing on this vnode.
596 log(LOG_NOTICE
, "ktrace write failed, errno %d, tracing stopped\n",
598 LIST_FOREACH(p
, &allproc
, p_list
) {
599 if (p
->p_tracep
== vp
) {
606 switch (funnel_type
) {
608 /* Nothing more to do */
611 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
612 /* switch funnel to NETWORK_FUNNEL */
615 (void) thread_funnel_set(kernel_flock
, FALSE
);
618 panic("Invalid funnel (%)", funnel_type
);
623 * Return true if caller has permission to set the ktracing state
624 * of target. Essentially, the target can't possess any
625 * more permissions than the caller. KTRFAC_ROOT signifies that
626 * root previously set the tracing status on the target process, and
627 * so, only root may further change it.
629 * TODO: check groups. use caller effective gid.
632 ktrcanset(callp
, targetp
)
633 struct proc
*callp
, *targetp
;
635 register struct pcred
*caller
= callp
->p_cred
;
636 register struct pcred
*target
= targetp
->p_cred
;
638 if (!PRISON_CHECK(callp
, targetp
))
640 if ((caller
->pc_ucred
->cr_uid
== target
->p_ruid
&&
641 target
->p_ruid
== target
->p_svuid
&&
642 caller
->p_rgid
== target
->p_rgid
&& /* XXX */
643 target
->p_rgid
== target
->p_svgid
&&
644 (targetp
->p_traceflag
& KTRFAC_ROOT
) == 0 &&
645 (targetp
->p_flag
& P_SUGID
) == 0) ||
646 caller
->pc_ucred
->cr_uid
== 0)