]>
git.saurik.com Git - apple/xnu.git/blob - bsd/kern/subr_prof.c
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1993
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)subr_prof.c 8.3 (Berkeley) 9/23/93
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
63 #include <machine/spl.h>
65 #include <sys/mount.h>
67 #include <kern/cpu_number.h>
70 #include <sys/malloc.h>
72 #include <kern/mach_header.h>
73 #include <machine/profile.h>
75 decl_simple_lock_data(,mcount_lock
);
78 * Froms is actually a bunch of unsigned shorts indexing tos
80 struct gmonparam _gmonparam
= { GMON_PROF_OFF
};
85 u_long fromssize
, tossize
;
86 struct segment_command
*sgp
;
87 struct gmonparam
*p
= &_gmonparam
;
89 sgp
= getsegbyname("__TEXT");
90 p
->lowpc
= (u_long
)sgp
->vmaddr
;
91 p
->highpc
= (u_long
)(sgp
->vmaddr
+ sgp
->vmsize
);
94 * Round lowpc and highpc to multiples of the density we're using
95 * so the rest of the scaling (here and in gprof) stays in ints.
97 p
->lowpc
= ROUNDDOWN(p
->lowpc
, HISTFRACTION
* sizeof(HISTCOUNTER
));
98 p
->highpc
= ROUNDUP(p
->highpc
, HISTFRACTION
* sizeof(HISTCOUNTER
));
99 p
->textsize
= p
->highpc
- p
->lowpc
;
100 printf("Profiling kernel, textsize=%d [0x%08x..0x%08x]\n",
101 p
->textsize
, p
->lowpc
, p
->highpc
);
102 p
->kcountsize
= p
->textsize
/ HISTFRACTION
;
103 p
->hashfraction
= HASHFRACTION
;
104 p
->fromssize
= p
->textsize
/ HASHFRACTION
;
105 p
->tolimit
= p
->textsize
* ARCDENSITY
/ 100;
106 if (p
->tolimit
< MINARCS
)
107 p
->tolimit
= MINARCS
;
108 else if (p
->tolimit
> MAXARCS
)
109 p
->tolimit
= MAXARCS
;
110 p
->tossize
= p
->tolimit
* sizeof(struct tostruct
);
111 /* Why not use MALLOC with M_GPROF ? */
112 cp
= (char *)kalloc(p
->kcountsize
+ p
->fromssize
+ p
->tossize
);
114 printf("No memory for profiling.\n");
117 bzero(cp
, p
->kcountsize
+ p
->tossize
+ p
->fromssize
);
118 p
->tos
= (struct tostruct
*)cp
;
120 p
->kcount
= (u_short
*)cp
;
122 p
->froms
= (u_short
*)cp
;
123 simple_lock_init(&mcount_lock
);
127 * Return kernel profiling information.
130 sysctl_doprof(name
, namelen
, oldp
, oldlenp
, newp
, newlen
)
138 struct gmonparam
*gp
= &_gmonparam
;
141 /* all sysctl names at this level are terminal */
143 return (ENOTDIR
); /* overloaded */
147 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &gp
->state
);
150 if (gp
->state
== GMON_PROF_OFF
)
151 stopprofclock(kernproc
);
153 startprofclock(kernproc
);
156 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
157 gp
->kcount
, gp
->kcountsize
));
159 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
160 gp
->froms
, gp
->fromssize
));
162 return (sysctl_struct(oldp
, oldlenp
, newp
, newlen
,
163 gp
->tos
, gp
->tossize
));
164 case GPROF_GMONPARAM
:
165 return (sysctl_rdstruct(oldp
, oldlenp
, newp
, gp
, sizeof *gp
));
174 * mcount() called with interrupts disabled.
178 register u_long frompc
,
179 register u_long selfpc
182 unsigned short *frompcindex
;
183 register struct tostruct
*top
, *prevtop
;
184 struct gmonparam
*p
= &_gmonparam
;
185 register long toindex
;
188 * check that we are profiling
189 * and that we aren't recursively invoked.
191 if (p
->state
!= GMON_PROF_ON
)
194 usimple_lock(&mcount_lock
);
197 * check that frompcindex is a reasonable pc value.
198 * for example: signal catchers get called from the stack,
199 * not from text space. too bad.
202 if (frompc
> p
->textsize
)
205 frompcindex
= &p
->froms
[frompc
/ (p
->hashfraction
* sizeof(*p
->froms
))];
206 toindex
= *frompcindex
;
209 * first time traversing this arc
211 toindex
= ++p
->tos
[0].link
;
212 if (toindex
>= p
->tolimit
) {
213 /* halt further profiling */
216 *frompcindex
= toindex
;
217 top
= &p
->tos
[toindex
];
218 top
->selfpc
= selfpc
;
223 top
= &p
->tos
[toindex
];
224 if (top
->selfpc
== selfpc
) {
226 * arc at front of chain; usual case.
232 * have to go looking down chain for it.
233 * top points to what we are looking at,
234 * prevtop points to previous top.
235 * we know it is not at the head of the chain.
237 for (; /* goto done */; ) {
238 if (top
->link
== 0) {
240 * top is end of the chain and none of the chain
241 * had top->selfpc == selfpc.
242 * so we allocate a new tostruct
243 * and link it to the head of the chain.
245 toindex
= ++p
->tos
[0].link
;
246 if (toindex
>= p
->tolimit
) {
249 top
= &p
->tos
[toindex
];
250 top
->selfpc
= selfpc
;
252 top
->link
= *frompcindex
;
253 *frompcindex
= toindex
;
257 * otherwise, check the next arc on the chain.
260 top
= &p
->tos
[top
->link
];
261 if (top
->selfpc
== selfpc
) {
264 * increment its count
265 * move it to the head of the chain.
268 toindex
= prevtop
->link
;
269 prevtop
->link
= top
->link
;
270 top
->link
= *frompcindex
;
271 *frompcindex
= toindex
;
277 usimple_unlock(&mcount_lock
);
281 p
->state
= GMON_PROF_ERROR
;
282 usimple_unlock(&mcount_lock
);
283 printf("mcount: tos overflow\n");
289 #define PROFILE_LOCK(x) simple_lock(x)
290 #define PROFILE_UNLOCK(x) simple_unlock(x)
299 profil(p
, uap
, retval
)
301 register struct profil_args
*uap
;
304 register struct uprof
*upp
= &p
->p_stats
->p_prof
;
305 struct uprof
*upc
, *nupc
;
308 if (uap
->pcscale
> (1 << 16))
310 if (uap
->pcscale
== 0) {
315 /* Block profile interrupts while changing state. */
316 s
= ml_set_interrupts_enabled(FALSE
);
317 PROFILE_LOCK(&upp
->pr_lock
);
318 upp
->pr_base
= (caddr_t
)uap
->bufbase
;
319 upp
->pr_size
= uap
->bufsize
;
320 upp
->pr_off
= uap
->pcoffset
;
321 upp
->pr_scale
= uap
->pcscale
;
323 /* remove buffers previously allocated with add_profil() */
324 for (upc
= upp
->pr_next
; upc
; upc
= nupc
) {
326 kfree(upc
, sizeof (struct uprof
));
330 PROFILE_UNLOCK(&upp
->pr_lock
);
332 ml_set_interrupts_enabled(s
);
336 struct add_profile_args
{
343 add_profil(p
, uap
, retval
)
345 register struct add_profile_args
*uap
;
348 struct uprof
*upp
= &p
->p_stats
->p_prof
, *upc
;
351 if (upp
->pr_scale
== 0)
353 s
= ml_set_interrupts_enabled(FALSE
);
354 upc
= (struct uprof
*) kalloc(sizeof (struct uprof
));
355 upc
->pr_base
= (caddr_t
)uap
->bufbase
;
356 upc
->pr_size
= uap
->bufsize
;
357 upc
->pr_off
= uap
->pcoffset
;
358 upc
->pr_scale
= uap
->pcscale
;
359 PROFILE_LOCK(&upp
->pr_lock
);
360 upc
->pr_next
= upp
->pr_next
;
362 PROFILE_UNLOCK(&upp
->pr_lock
);
363 ml_set_interrupts_enabled(s
);
368 * Scale is a fixed-point number with the binary point 16 bits
369 * into the value, and is <= 1.0. pc is at most 32 bits, so the
370 * intermediate result is at most 48 bits.
372 #define PC_TO_INDEX(pc, prof) \
373 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
374 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
377 * Collect user-level profiling statistics; called on a profiling tick,
378 * when a process is running in user-mode. We use
379 * an AST that will vector us to trap() with a context in which copyin
380 * and copyout will work. Trap will then call addupc_task().
382 * Note that we may (rarely) not get around to the AST soon enough, and
383 * lose profile ticks when the next tick overwrites this one, but in this
384 * case the system is overloaded and the profile is probably already
387 * We can afford to take faults here. If the
388 * update fails, we simply turn off profiling.
391 addupc_task(p
, pc
, ticks
)
392 register struct proc
*p
;
396 register struct uprof
*prof
;
397 register short *cell
;
401 /* Testing P_PROFIL may be unnecessary, but is certainly safe. */
402 if ((p
->p_flag
& P_PROFIL
) == 0 || ticks
== 0)
405 for (prof
= &p
->p_stats
->p_prof
; prof
; prof
= prof
->pr_next
) {
406 off
= PC_TO_INDEX(pc
,prof
);
407 cell
= (short *)(prof
->pr_base
+ off
);
408 if (cell
>= (short *)prof
->pr_base
&&
409 cell
< (short*)(prof
->pr_size
+ (int) prof
->pr_base
)) {
410 if (copyin((caddr_t
)cell
, (caddr_t
) &count
, sizeof(count
)) == 0) {
412 if(copyout((caddr_t
) &count
, (caddr_t
)cell
, sizeof(count
)) == 0)
415 p
->p_stats
->p_prof
.pr_scale
= 0;