]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/subr_prof.c
xnu-344.49.tar.gz
[apple/xnu.git] / bsd / kern / subr_prof.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
26/*-
27 * Copyright (c) 1982, 1986, 1993
28 * The Regents of the University of California. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)subr_prof.c 8.3 (Berkeley) 9/23/93
59 */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/kernel.h>
64#include <sys/proc.h>
65#include <sys/user.h>
66#include <machine/spl.h>
67
68#include <sys/mount.h>
69
70#include <kern/cpu_number.h>
71
72#ifdef GPROF
73#include <sys/malloc.h>
74#include <sys/gmon.h>
75#include <kern/mach_header.h>
76#include <machine/profile.h>
77
78/*
79 * Froms is actually a bunch of unsigned shorts indexing tos
80 */
81struct gmonparam _gmonparam = { GMON_PROF_OFF };
82
83kmstartup()
84{
85 char *cp;
86 u_long fromssize, tossize;
87 struct segment_command *sgp;
88 struct gmonparam *p = &_gmonparam;
89
90 sgp = getsegbyname("__TEXT");
91 p->lowpc = (u_long)sgp->vmaddr;
92 p->highpc = (u_long)(sgp->vmaddr + sgp->vmsize);
93
94 /*
95 * Round lowpc and highpc to multiples of the density we're using
96 * so the rest of the scaling (here and in gprof) stays in ints.
97 */
98 p->lowpc = ROUNDDOWN(p->lowpc, HISTFRACTION * sizeof(HISTCOUNTER));
99 p->highpc = ROUNDUP(p->highpc, HISTFRACTION * sizeof(HISTCOUNTER));
100 p->textsize = p->highpc - p->lowpc;
101 printf("Profiling kernel, textsize=%d [0x%08x..0x%08x]\n",
102 p->textsize, p->lowpc, p->highpc);
103 p->kcountsize = p->textsize / HISTFRACTION;
104 p->hashfraction = HASHFRACTION;
105 p->fromssize = p->textsize / HASHFRACTION;
106 p->tolimit = p->textsize * ARCDENSITY / 100;
107 if (p->tolimit < MINARCS)
108 p->tolimit = MINARCS;
109 else if (p->tolimit > MAXARCS)
110 p->tolimit = MAXARCS;
111 p->tossize = p->tolimit * sizeof(struct tostruct);
112 /* Why not use MALLOC with M_GPROF ? */
113 cp = (char *)kalloc(p->kcountsize + p->fromssize + p->tossize);
114 if (cp == 0) {
115 printf("No memory for profiling.\n");
116 return;
117 }
118 bzero(cp, p->kcountsize + p->tossize + p->fromssize);
119 p->tos = (struct tostruct *)cp;
120 cp += p->tossize;
121 p->kcount = (u_short *)cp;
122 cp += p->kcountsize;
123 p->froms = (u_short *)cp;
124}
125
126/*
127 * Return kernel profiling information.
128 */
9bccf70c
A
129int
130sysctl_doprof(name, namelen, oldp, oldlenp, newp, newlen)
1c79356b
A
131 int *name;
132 u_int namelen;
133 void *oldp;
134 size_t *oldlenp;
135 void *newp;
136 size_t newlen;
137{
138 struct gmonparam *gp = &_gmonparam;
139 int error;
140
141 /* all sysctl names at this level are terminal */
142 if (namelen != 1)
143 return (ENOTDIR); /* overloaded */
144
145 switch (name[0]) {
146 case GPROF_STATE:
147 error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state);
148 if (error)
149 return (error);
150 if (gp->state == GMON_PROF_OFF)
151 stopprofclock(kernproc);
152 else
153 startprofclock(kernproc);
154 return (0);
155 case GPROF_COUNT:
156 return (sysctl_struct(oldp, oldlenp, newp, newlen,
157 gp->kcount, gp->kcountsize));
158 case GPROF_FROMS:
159 return (sysctl_struct(oldp, oldlenp, newp, newlen,
160 gp->froms, gp->fromssize));
161 case GPROF_TOS:
162 return (sysctl_struct(oldp, oldlenp, newp, newlen,
163 gp->tos, gp->tossize));
164 case GPROF_GMONPARAM:
165 return (sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof *gp));
166 default:
167 return (EOPNOTSUPP);
168 }
169 /* NOTREACHED */
170}
171
172
173/*
174 * mcount() called with interrupts disabled.
175 */
176void
177mcount(
178 register u_long frompc,
179 register u_long selfpc
180)
181{
182 unsigned short *frompcindex;
183 register struct tostruct *top, *prevtop;
184 struct gmonparam *p = &_gmonparam;
185 register long toindex;
186 MCOUNT_INIT;
187
188 /*
189 * check that we are profiling
190 * and that we aren't recursively invoked.
191 */
192 if (p->state != GMON_PROF_ON)
193 return;
194
195 MCOUNT_ENTER;
196
197 /*
198 * check that frompcindex is a reasonable pc value.
199 * for example: signal catchers get called from the stack,
200 * not from text space. too bad.
201 */
202 frompc -= p->lowpc;
203 if (frompc > p->textsize)
204 goto done;
205
206 frompcindex = &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))];
207 toindex = *frompcindex;
208 if (toindex == 0) {
209 /*
210 * first time traversing this arc
211 */
212 toindex = ++p->tos[0].link;
213 if (toindex >= p->tolimit) {
214 /* halt further profiling */
215 goto overflow;
216 }
217 *frompcindex = toindex;
218 top = &p->tos[toindex];
219 top->selfpc = selfpc;
220 top->count = 1;
221 top->link = 0;
222 goto done;
223 }
224 top = &p->tos[toindex];
225 if (top->selfpc == selfpc) {
226 /*
227 * arc at front of chain; usual case.
228 */
229 top->count++;
230 goto done;
231 }
232 /*
233 * have to go looking down chain for it.
234 * top points to what we are looking at,
235 * prevtop points to previous top.
236 * we know it is not at the head of the chain.
237 */
238 for (; /* goto done */; ) {
239 if (top->link == 0) {
240 /*
241 * top is end of the chain and none of the chain
242 * had top->selfpc == selfpc.
243 * so we allocate a new tostruct
244 * and link it to the head of the chain.
245 */
246 toindex = ++p->tos[0].link;
247 if (toindex >= p->tolimit) {
248 goto overflow;
249 }
250 top = &p->tos[toindex];
251 top->selfpc = selfpc;
252 top->count = 1;
253 top->link = *frompcindex;
254 *frompcindex = toindex;
255 goto done;
256 }
257 /*
258 * otherwise, check the next arc on the chain.
259 */
260 prevtop = top;
261 top = &p->tos[top->link];
262 if (top->selfpc == selfpc) {
263 /*
264 * there it is.
265 * increment its count
266 * move it to the head of the chain.
267 */
268 top->count++;
269 toindex = prevtop->link;
270 prevtop->link = top->link;
271 top->link = *frompcindex;
272 *frompcindex = toindex;
273 goto done;
274 }
275
276 }
277done:
278 MCOUNT_EXIT;
279 return;
280
281overflow:
282 p->state = GMON_PROF_ERROR;
283 MCOUNT_EXIT;
284 printf("mcount: tos overflow\n");
285 return;
286}
287
288#endif /* GPROF */
289
290#if NCPUS > 1
291#define PROFILE_LOCK(x) simple_lock(x)
292#define PROFILE_UNLOCK(x) simple_unlock(x)
293#else
294#define PROFILE_LOCK(x)
295#define PROFILE_UNLOCK(x)
296#endif
297
298struct profil_args {
299 short *bufbase;
300 u_int bufsize;
301 u_int pcoffset;
302 u_int pcscale;
303};
304int
305profil(p, uap, retval)
306 struct proc *p;
307 register struct profil_args *uap;
308 register_t *retval;
309{
310 register struct uprof *upp = &p->p_stats->p_prof;
311 struct uprof *upc, *nupc;
312 int s;
313
314 if (uap->pcscale > (1 << 16))
315 return (EINVAL);
316 if (uap->pcscale == 0) {
317 stopprofclock(p);
318 return (0);
319 }
320
321 /* Block profile interrupts while changing state. */
322 s = splstatclock();
323 PROFILE_LOCK(&upp->pr_lock);
324 upp->pr_base = (caddr_t)uap->bufbase;
325 upp->pr_size = uap->bufsize;
326 upp->pr_off = uap->pcoffset;
327 upp->pr_scale = uap->pcscale;
328
329 /* remove buffers previously allocated with add_profil() */
330 for (upc = upp->pr_next; upc; upc = nupc) {
331 nupc = upc->pr_next;
332 kfree(upc, sizeof (struct uprof));
333 }
334
335 upp->pr_next = 0;
336 PROFILE_UNLOCK(&upp->pr_lock);
337 startprofclock(p);
338 splx(s);
339 return(0);
340}
341
342struct add_profile_args {
343 short *bufbase;
344 u_int bufsize;
345 u_int pcoffset;
346 u_int pcscale;
347};
348int
349add_profil(p, uap, retval)
350 struct proc *p;
351 register struct add_profile_args *uap;
352 register_t *retval;
353{
354 struct uprof *upp = &p->p_stats->p_prof, *upc;
355 int s;
356
357 if (upp->pr_scale == 0)
358 return (0);
359 s = splstatclock();
360 upc = (struct uprof *) kalloc(sizeof (struct uprof));
361 upc->pr_base = (caddr_t)uap->bufbase;
362 upc->pr_size = uap->bufsize;
363 upc->pr_off = uap->pcoffset;
364 upc->pr_scale = uap->pcscale;
365 PROFILE_LOCK(&upp->pr_lock);
366 upc->pr_next = upp->pr_next;
367 upp->pr_next = upc;
368 PROFILE_UNLOCK(&upp->pr_lock);
369 splx(s);
370 return(0);
371}
372
373/*
374 * Scale is a fixed-point number with the binary point 16 bits
375 * into the value, and is <= 1.0. pc is at most 32 bits, so the
376 * intermediate result is at most 48 bits.
377 */
378#define PC_TO_INDEX(pc, prof) \
379 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
380 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
381
382/*
383 * Collect user-level profiling statistics; called on a profiling tick,
384 * when a process is running in user-mode. We use
385 * an AST that will vector us to trap() with a context in which copyin
386 * and copyout will work. Trap will then call addupc_task().
387 *
388 * Note that we may (rarely) not get around to the AST soon enough, and
389 * lose profile ticks when the next tick overwrites this one, but in this
390 * case the system is overloaded and the profile is probably already
391 * inaccurate.
392 *
393 * We can afford to take faults here. If the
394 * update fails, we simply turn off profiling.
395 */
396void
397addupc_task(p, pc, ticks)
398 register struct proc *p;
399 register u_long pc;
400 u_int ticks;
401{
402 register struct uprof *prof;
403 register short *cell;
404 register u_int off;
405 u_short count;
406
407 /* Testing P_PROFIL may be unnecessary, but is certainly safe. */
408 if ((p->p_flag & P_PROFIL) == 0 || ticks == 0)
409 return;
410
411 for (prof = &p->p_stats->p_prof; prof; prof = prof->pr_next) {
412 off = PC_TO_INDEX(pc,prof);
413 cell = (short *)(prof->pr_base + off);
414 if (cell >= (short *)prof->pr_base &&
415 cell < (short*)(prof->pr_size + (int) prof->pr_base)) {
416 if (copyin((caddr_t)cell, (caddr_t) &count, sizeof(count)) == 0) {
417 count += ticks;
418 if(copyout((caddr_t) &count, (caddr_t)cell, sizeof(count)) == 0)
419 return;
420 }
421 p->p_stats->p_prof.pr_scale = 0;
422 stopprofclock(p);
423 break;
424 }
425 }
426}