]>
git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_ktrace.c
a4ccca92a0e7cf705dbd9f25f129c25cfed4b59a
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1989, 1993
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
56 * $FreeBSD: src/sys/kern/kern_ktrace.c,v 1.35.2.4 2001/03/05 13:09:01 obrien Exp $
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/types.h>
65 #include <sys/namei.h>
66 #include <sys/vnode.h>
68 #include <sys/ktrace.h>
70 #include <sys/malloc.h>
71 #include <sys/syslog.h>
74 static struct ktr_header
*ktrgetheader
__P((int type
));
75 static void ktrwrite
__P((struct vnode
*, struct ktr_header
*,
77 static int ktrcanset
__P((struct proc
*,struct proc
*));
78 static int ktrsetchildren
__P((struct proc
*,struct proc
*,
79 int, int, struct vnode
*));
80 static int ktrops
__P((struct proc
*,struct proc
*,int,int,struct vnode
*));
83 static struct ktr_header
*
87 register struct ktr_header
*kth
;
88 struct proc
*p
= current_proc(); /* XXX */
90 MALLOC(kth
, struct ktr_header
*, sizeof (struct ktr_header
),
93 microtime(&kth
->ktr_time
);
94 kth
->ktr_pid
= p
->p_pid
;
95 bcopy(p
->p_comm
, kth
->ktr_comm
, MAXCOMLEN
);
101 ktrsyscall(p
, code
, narg
, args
, funnel_type
)
109 struct ktr_header
*kth
;
110 struct ktr_syscall
*ktp
;
115 if (!KTRPOINT(p
, KTR_SYSCALL
))
119 len
= __offsetof(struct ktr_syscall
, ktr_args
) +
120 (narg
* sizeof(register_t
));
121 p
->p_traceflag
|= KTRFAC_ACTIVE
;
122 kth
= ktrgetheader(KTR_SYSCALL
);
123 MALLOC(ktp
, struct ktr_syscall
*, len
, M_KTRACE
, M_WAITOK
);
124 ktp
->ktr_code
= code
;
125 ktp
->ktr_narg
= narg
;
126 argp
= &ktp
->ktr_args
[0];
127 for (i
= 0; i
< narg
; i
++)
129 kth
->ktr_buf
= (caddr_t
)ktp
;
131 ktrwrite(vp
, kth
, NULL
, funnel_type
);
134 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
141 ktrsysret(p
, code
, error
, retval
, funnel_type
)
149 struct ktr_header
*kth
;
150 struct ktr_sysret ktp
;
152 if (!KTRPOINT(p
, KTR_SYSRET
))
156 p
->p_traceflag
|= KTRFAC_ACTIVE
;
157 kth
= ktrgetheader(KTR_SYSRET
);
159 ktp
.ktr_error
= error
;
160 ktp
.ktr_retval
= retval
; /* what about val2 ? */
162 kth
->ktr_buf
= (caddr_t
)&ktp
;
163 kth
->ktr_len
= sizeof(struct ktr_sysret
);
165 ktrwrite(vp
, kth
, NULL
, funnel_type
);
167 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
179 struct ktr_header
*kth
;
180 struct proc
*p
= current_proc(); /* XXX */
182 p
->p_traceflag
|= KTRFAC_ACTIVE
;
183 kth
= ktrgetheader(KTR_NAMEI
);
184 kth
->ktr_len
= strlen(path
);
187 ktrwrite(vp
, kth
, NULL
, KERNEL_FUNNEL
);
189 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
193 ktrgenio(vp
, fd
, rw
, uio
, error
, funnel_type
)
201 struct ktr_header
*kth
;
202 struct ktr_genio ktg
;
203 struct proc
*p
= current_proc(); /* XXX */
208 p
->p_traceflag
|= KTRFAC_ACTIVE
;
209 kth
= ktrgetheader(KTR_GENIO
);
212 kth
->ktr_buf
= (caddr_t
)&ktg
;
213 kth
->ktr_len
= sizeof(struct ktr_genio
);
215 uio
->uio_rw
= UIO_WRITE
;
217 ktrwrite(vp
, kth
, uio
, funnel_type
);
219 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
223 ktrpsig(vp
, sig
, action
, mask
, code
, funnel_type
)
231 struct ktr_header
*kth
;
233 struct proc
*p
= current_proc(); /* XXX */
235 p
->p_traceflag
|= KTRFAC_ACTIVE
;
236 kth
= ktrgetheader(KTR_PSIG
);
237 kp
.signo
= (char)sig
;
241 kth
->ktr_buf
= (caddr_t
)&kp
;
242 kth
->ktr_len
= sizeof (struct ktr_psig
);
244 ktrwrite(vp
, kth
, NULL
, funnel_type
);
246 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
250 ktrcsw(vp
, out
, user
, funnel_type
)
255 struct ktr_header
*kth
;
257 struct proc
*p
= current_proc(); /* XXX */
259 p
->p_traceflag
|= KTRFAC_ACTIVE
;
260 kth
= ktrgetheader(KTR_CSW
);
263 kth
->ktr_buf
= (caddr_t
)&kc
;
264 kth
->ktr_len
= sizeof (struct ktr_csw
);
266 ktrwrite(vp
, kth
, NULL
, funnel_type
);
268 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
272 /* Interface and common routines */
285 ktrace(curp
, uap
, retval
)
287 register struct ktrace_args
*uap
;
291 register struct vnode
*vp
= NULL
;
292 register struct proc
*p
;
294 int facs
= uap
->facs
& ~KTRFAC_ROOT
;
295 int ops
= KTROP(uap
->ops
);
296 int descend
= uap
->ops
& KTRFLAG_DESCEND
;
301 curp
->p_traceflag
|= KTRFAC_ACTIVE
;
302 if (ops
!= KTROP_CLEAR
) {
304 * an operation which requires a file argument.
306 NDINIT(&nd
, LOOKUP
, (NOFOLLOW
|LOCKLEAF
), UIO_USERSPACE
, uap
->fname
, curp
);
307 error
= vn_open(&nd
, FREAD
|FWRITE
|O_NOFOLLOW
, 0);
309 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
313 VOP_UNLOCK(vp
, 0, curp
);
314 if (vp
->v_type
!= VREG
) {
315 (void) vn_close(vp
, FREAD
|FWRITE
, curp
->p_ucred
, curp
);
316 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
321 * Clear all uses of the tracefile
323 if (ops
== KTROP_CLEARFILE
) {
324 LIST_FOREACH(p
, &allproc
, p_list
) {
325 if (p
->p_tracep
== vp
) {
326 if (ktrcanset(curp
, p
)) {
327 struct vnode
*tvp
= p
->p_tracep
;
328 /* no more tracing */
342 * need something to (un)trace (XXX - why is this here?)
355 pg
= pgfind(-uap
->pid
);
360 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
)
362 ret
|= ktrsetchildren(curp
, p
, ops
, facs
, vp
);
364 ret
|= ktrops(curp
, p
, ops
, facs
, vp
);
376 ret
|= ktrsetchildren(curp
, p
, ops
, facs
, vp
);
378 ret
|= ktrops(curp
, p
, ops
, facs
, vp
);
384 (void) vn_close(vp
, FWRITE
, curp
->p_ucred
, curp
);
385 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
402 utrace(curp
, uap
, retval
)
404 register struct utrace_args
*uap
;
408 struct ktr_header
*kth
;
409 struct proc
*p
= current_proc(); /* XXX */
412 if (!KTRPOINT(p
, KTR_USER
))
414 if (uap
->len
> KTR_USER_MAXLEN
)
416 p
->p_traceflag
|= KTRFAC_ACTIVE
;
417 kth
= ktrgetheader(KTR_USER
);
418 MALLOC(cp
, caddr_t
, uap
->len
, M_KTRACE
, M_WAITOK
);
419 if (!copyin(uap
->addr
, cp
, uap
->len
)) {
421 kth
->ktr_len
= uap
->len
;
422 ktrwrite(p
->p_tracep
, kth
, NULL
, KERNEL_FUNNEL
);
426 p
->p_traceflag
&= ~KTRFAC_ACTIVE
;
436 ktrops(curp
, p
, ops
, facs
, vp
)
437 struct proc
*p
, *curp
;
443 if (!ktrcanset(curp
, p
))
445 if (ops
== KTROP_SET
) {
446 if (p
->p_tracep
!= vp
) {
448 * if trace file already in use, relinquish
456 p
->p_traceflag
|= facs
;
457 if (curp
->p_ucred
->cr_uid
== 0)
458 p
->p_traceflag
|= KTRFAC_ROOT
;
461 if (((p
->p_traceflag
&= ~facs
) & KTRFAC_MASK
) == 0) {
462 /* no more tracing */
476 ktrsetchildren(curp
, top
, ops
, facs
, vp
)
477 struct proc
*curp
, *top
;
481 register struct proc
*p
;
482 register int ret
= 0;
486 ret
|= ktrops(curp
, p
, ops
, facs
, vp
);
488 * If this process has children, descend to them next,
489 * otherwise do any siblings, and if done with this level,
490 * follow back up the tree (but not past top).
492 if (!LIST_EMPTY(&p
->p_children
))
493 p
= LIST_FIRST(&p
->p_children
);
497 if (LIST_NEXT(p
, p_sibling
)) {
498 p
= LIST_NEXT(p
, p_sibling
);
508 ktrwrite(vp
, kth
, uio
, funnel_type
)
510 register struct ktr_header
*kth
;
514 struct iovec aiov
[2];
515 register struct proc
*p
= current_proc(); /* XXX */
521 if (funnel_type
== -1) {
522 funnel_t
*f
= thread_funnel_get();
523 if(f
== THR_FUNNEL_NULL
)
524 funnel_type
= NO_FUNNEL
;
525 else if (f
== (funnel_t
*)network_flock
)
526 funnel_type
= NETWORK_FUNNEL
;
527 else if (f
== (funnel_t
*)kernel_flock
)
528 funnel_type
= KERNEL_FUNNEL
;
531 switch (funnel_type
) {
533 /* Nothing more to do */
536 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
539 (void) thread_funnel_set(kernel_flock
, TRUE
);
542 panic("Invalid funnel (%)", funnel_type
);
544 auio
.uio_iov
= &aiov
[0];
546 auio
.uio_segflg
= UIO_SYSSPACE
;
547 auio
.uio_rw
= UIO_WRITE
;
548 aiov
[0].iov_base
= (caddr_t
)kth
;
549 aiov
[0].iov_len
= sizeof(struct ktr_header
);
550 auio
.uio_resid
= sizeof(struct ktr_header
);
552 auio
.uio_procp
= current_proc();
553 if (kth
->ktr_len
> 0) {
555 aiov
[1].iov_base
= kth
->ktr_buf
;
556 aiov
[1].iov_len
= kth
->ktr_len
;
557 auio
.uio_resid
+= kth
->ktr_len
;
559 kth
->ktr_len
+= uio
->uio_resid
;
561 error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
564 (void)VOP_LEASE(vp
, p
, p
->p_ucred
, LEASE_WRITE
);
565 error
= VOP_WRITE(vp
, &auio
, IO_UNIT
| IO_APPEND
, p
->p_ucred
);
566 if (error
== 0 && uio
!= NULL
) {
567 (void)VOP_LEASE(vp
, p
, p
->p_ucred
, LEASE_WRITE
);
568 error
= VOP_WRITE(vp
, uio
, IO_UNIT
| IO_APPEND
, p
->p_ucred
);
570 VOP_UNLOCK(vp
, 0, p
);
572 switch (funnel_type
) {
574 /* Nothing more to do */
577 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
578 /* switch funnel to NETWORK_FUNNEL */
581 (void) thread_funnel_set(kernel_flock
, FALSE
);
584 panic("Invalid funnel (%)", funnel_type
);
591 * If error encountered, give up tracing on this vnode.
593 log(LOG_NOTICE
, "ktrace write failed, errno %d, tracing stopped\n",
595 LIST_FOREACH(p
, &allproc
, p_list
) {
596 if (p
->p_tracep
== vp
) {
603 switch (funnel_type
) {
605 /* Nothing more to do */
608 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
609 /* switch funnel to NETWORK_FUNNEL */
612 (void) thread_funnel_set(kernel_flock
, FALSE
);
615 panic("Invalid funnel (%)", funnel_type
);
620 * Return true if caller has permission to set the ktracing state
621 * of target. Essentially, the target can't possess any
622 * more permissions than the caller. KTRFAC_ROOT signifies that
623 * root previously set the tracing status on the target process, and
624 * so, only root may further change it.
626 * TODO: check groups. use caller effective gid.
629 ktrcanset(callp
, targetp
)
630 struct proc
*callp
, *targetp
;
632 register struct pcred
*caller
= callp
->p_cred
;
633 register struct pcred
*target
= targetp
->p_cred
;
635 if (!PRISON_CHECK(callp
, targetp
))
637 if ((caller
->pc_ucred
->cr_uid
== target
->p_ruid
&&
638 target
->p_ruid
== target
->p_svuid
&&
639 caller
->p_rgid
== target
->p_rgid
&& /* XXX */
640 target
->p_rgid
== target
->p_svgid
&&
641 (targetp
->p_traceflag
& KTRFAC_ROOT
) == 0) ||
642 caller
->pc_ucred
->cr_uid
== 0)