]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/subr_prof.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / bsd / kern / subr_prof.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*-
24 * Copyright (c) 1982, 1986, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)subr_prof.c 8.3 (Berkeley) 9/23/93
56 */
57
58#include <sys/param.h>
59#include <sys/systm.h>
60#include <sys/kernel.h>
61#include <sys/proc.h>
62#include <sys/user.h>
63#include <machine/spl.h>
64
65#include <sys/mount.h>
66
67#include <kern/cpu_number.h>
68
69#ifdef GPROF
70#include <sys/malloc.h>
71#include <sys/gmon.h>
72#include <kern/mach_header.h>
73#include <machine/profile.h>
74
55e303ae
A
75decl_simple_lock_data(,mcount_lock);
76
1c79356b
A
77/*
78 * Froms is actually a bunch of unsigned shorts indexing tos
79 */
80struct gmonparam _gmonparam = { GMON_PROF_OFF };
81
82kmstartup()
83{
84 char *cp;
85 u_long fromssize, tossize;
86 struct segment_command *sgp;
87 struct gmonparam *p = &_gmonparam;
88
89 sgp = getsegbyname("__TEXT");
90 p->lowpc = (u_long)sgp->vmaddr;
91 p->highpc = (u_long)(sgp->vmaddr + sgp->vmsize);
92
93 /*
94 * Round lowpc and highpc to multiples of the density we're using
95 * so the rest of the scaling (here and in gprof) stays in ints.
96 */
97 p->lowpc = ROUNDDOWN(p->lowpc, HISTFRACTION * sizeof(HISTCOUNTER));
98 p->highpc = ROUNDUP(p->highpc, HISTFRACTION * sizeof(HISTCOUNTER));
99 p->textsize = p->highpc - p->lowpc;
100 printf("Profiling kernel, textsize=%d [0x%08x..0x%08x]\n",
101 p->textsize, p->lowpc, p->highpc);
102 p->kcountsize = p->textsize / HISTFRACTION;
103 p->hashfraction = HASHFRACTION;
104 p->fromssize = p->textsize / HASHFRACTION;
105 p->tolimit = p->textsize * ARCDENSITY / 100;
106 if (p->tolimit < MINARCS)
107 p->tolimit = MINARCS;
108 else if (p->tolimit > MAXARCS)
109 p->tolimit = MAXARCS;
110 p->tossize = p->tolimit * sizeof(struct tostruct);
111 /* Why not use MALLOC with M_GPROF ? */
112 cp = (char *)kalloc(p->kcountsize + p->fromssize + p->tossize);
113 if (cp == 0) {
114 printf("No memory for profiling.\n");
115 return;
116 }
117 bzero(cp, p->kcountsize + p->tossize + p->fromssize);
118 p->tos = (struct tostruct *)cp;
119 cp += p->tossize;
120 p->kcount = (u_short *)cp;
121 cp += p->kcountsize;
122 p->froms = (u_short *)cp;
55e303ae 123 simple_lock_init(&mcount_lock);
1c79356b
A
124}
125
126/*
127 * Return kernel profiling information.
128 */
9bccf70c
A
129int
130sysctl_doprof(name, namelen, oldp, oldlenp, newp, newlen)
1c79356b
A
131 int *name;
132 u_int namelen;
133 void *oldp;
134 size_t *oldlenp;
135 void *newp;
136 size_t newlen;
137{
138 struct gmonparam *gp = &_gmonparam;
139 int error;
140
141 /* all sysctl names at this level are terminal */
142 if (namelen != 1)
143 return (ENOTDIR); /* overloaded */
144
145 switch (name[0]) {
146 case GPROF_STATE:
147 error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state);
148 if (error)
149 return (error);
150 if (gp->state == GMON_PROF_OFF)
151 stopprofclock(kernproc);
152 else
153 startprofclock(kernproc);
154 return (0);
155 case GPROF_COUNT:
156 return (sysctl_struct(oldp, oldlenp, newp, newlen,
157 gp->kcount, gp->kcountsize));
158 case GPROF_FROMS:
159 return (sysctl_struct(oldp, oldlenp, newp, newlen,
160 gp->froms, gp->fromssize));
161 case GPROF_TOS:
162 return (sysctl_struct(oldp, oldlenp, newp, newlen,
163 gp->tos, gp->tossize));
164 case GPROF_GMONPARAM:
165 return (sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof *gp));
166 default:
167 return (EOPNOTSUPP);
168 }
169 /* NOTREACHED */
170}
171
172
173/*
174 * mcount() called with interrupts disabled.
175 */
176void
177mcount(
178 register u_long frompc,
179 register u_long selfpc
180)
181{
182 unsigned short *frompcindex;
183 register struct tostruct *top, *prevtop;
184 struct gmonparam *p = &_gmonparam;
185 register long toindex;
1c79356b
A
186
187 /*
188 * check that we are profiling
189 * and that we aren't recursively invoked.
190 */
191 if (p->state != GMON_PROF_ON)
192 return;
193
55e303ae 194 usimple_lock(&mcount_lock);
1c79356b
A
195
196 /*
197 * check that frompcindex is a reasonable pc value.
198 * for example: signal catchers get called from the stack,
199 * not from text space. too bad.
200 */
201 frompc -= p->lowpc;
202 if (frompc > p->textsize)
203 goto done;
204
205 frompcindex = &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))];
206 toindex = *frompcindex;
207 if (toindex == 0) {
208 /*
209 * first time traversing this arc
210 */
211 toindex = ++p->tos[0].link;
212 if (toindex >= p->tolimit) {
213 /* halt further profiling */
214 goto overflow;
215 }
216 *frompcindex = toindex;
217 top = &p->tos[toindex];
218 top->selfpc = selfpc;
219 top->count = 1;
220 top->link = 0;
221 goto done;
222 }
223 top = &p->tos[toindex];
224 if (top->selfpc == selfpc) {
225 /*
226 * arc at front of chain; usual case.
227 */
228 top->count++;
229 goto done;
230 }
231 /*
232 * have to go looking down chain for it.
233 * top points to what we are looking at,
234 * prevtop points to previous top.
235 * we know it is not at the head of the chain.
236 */
237 for (; /* goto done */; ) {
238 if (top->link == 0) {
239 /*
240 * top is end of the chain and none of the chain
241 * had top->selfpc == selfpc.
242 * so we allocate a new tostruct
243 * and link it to the head of the chain.
244 */
245 toindex = ++p->tos[0].link;
246 if (toindex >= p->tolimit) {
247 goto overflow;
248 }
249 top = &p->tos[toindex];
250 top->selfpc = selfpc;
251 top->count = 1;
252 top->link = *frompcindex;
253 *frompcindex = toindex;
254 goto done;
255 }
256 /*
257 * otherwise, check the next arc on the chain.
258 */
259 prevtop = top;
260 top = &p->tos[top->link];
261 if (top->selfpc == selfpc) {
262 /*
263 * there it is.
264 * increment its count
265 * move it to the head of the chain.
266 */
267 top->count++;
268 toindex = prevtop->link;
269 prevtop->link = top->link;
270 top->link = *frompcindex;
271 *frompcindex = toindex;
272 goto done;
273 }
274
275 }
276done:
55e303ae 277 usimple_unlock(&mcount_lock);
1c79356b
A
278 return;
279
280overflow:
281 p->state = GMON_PROF_ERROR;
55e303ae 282 usimple_unlock(&mcount_lock);
1c79356b
A
283 printf("mcount: tos overflow\n");
284 return;
285}
286
287#endif /* GPROF */
288
1c79356b
A
289#define PROFILE_LOCK(x) simple_lock(x)
290#define PROFILE_UNLOCK(x) simple_unlock(x)
1c79356b
A
291
292struct profil_args {
293 short *bufbase;
294 u_int bufsize;
295 u_int pcoffset;
296 u_int pcscale;
297};
298int
299profil(p, uap, retval)
300 struct proc *p;
301 register struct profil_args *uap;
302 register_t *retval;
303{
304 register struct uprof *upp = &p->p_stats->p_prof;
305 struct uprof *upc, *nupc;
306 int s;
307
308 if (uap->pcscale > (1 << 16))
309 return (EINVAL);
310 if (uap->pcscale == 0) {
311 stopprofclock(p);
312 return (0);
313 }
314
315 /* Block profile interrupts while changing state. */
55e303ae 316 s = ml_set_interrupts_enabled(FALSE);
1c79356b
A
317 PROFILE_LOCK(&upp->pr_lock);
318 upp->pr_base = (caddr_t)uap->bufbase;
319 upp->pr_size = uap->bufsize;
320 upp->pr_off = uap->pcoffset;
321 upp->pr_scale = uap->pcscale;
322
323 /* remove buffers previously allocated with add_profil() */
324 for (upc = upp->pr_next; upc; upc = nupc) {
325 nupc = upc->pr_next;
326 kfree(upc, sizeof (struct uprof));
327 }
328
329 upp->pr_next = 0;
330 PROFILE_UNLOCK(&upp->pr_lock);
331 startprofclock(p);
55e303ae 332 ml_set_interrupts_enabled(s);
1c79356b
A
333 return(0);
334}
335
336struct add_profile_args {
337 short *bufbase;
338 u_int bufsize;
339 u_int pcoffset;
340 u_int pcscale;
341};
342int
343add_profil(p, uap, retval)
344 struct proc *p;
345 register struct add_profile_args *uap;
346 register_t *retval;
347{
348 struct uprof *upp = &p->p_stats->p_prof, *upc;
349 int s;
350
351 if (upp->pr_scale == 0)
352 return (0);
55e303ae 353 s = ml_set_interrupts_enabled(FALSE);
1c79356b
A
354 upc = (struct uprof *) kalloc(sizeof (struct uprof));
355 upc->pr_base = (caddr_t)uap->bufbase;
356 upc->pr_size = uap->bufsize;
357 upc->pr_off = uap->pcoffset;
358 upc->pr_scale = uap->pcscale;
359 PROFILE_LOCK(&upp->pr_lock);
360 upc->pr_next = upp->pr_next;
361 upp->pr_next = upc;
362 PROFILE_UNLOCK(&upp->pr_lock);
55e303ae 363 ml_set_interrupts_enabled(s);
1c79356b
A
364 return(0);
365}
366
367/*
368 * Scale is a fixed-point number with the binary point 16 bits
369 * into the value, and is <= 1.0. pc is at most 32 bits, so the
370 * intermediate result is at most 48 bits.
371 */
372#define PC_TO_INDEX(pc, prof) \
373 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
374 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
375
376/*
377 * Collect user-level profiling statistics; called on a profiling tick,
378 * when a process is running in user-mode. We use
379 * an AST that will vector us to trap() with a context in which copyin
380 * and copyout will work. Trap will then call addupc_task().
381 *
382 * Note that we may (rarely) not get around to the AST soon enough, and
383 * lose profile ticks when the next tick overwrites this one, but in this
384 * case the system is overloaded and the profile is probably already
385 * inaccurate.
386 *
387 * We can afford to take faults here. If the
388 * update fails, we simply turn off profiling.
389 */
390void
391addupc_task(p, pc, ticks)
392 register struct proc *p;
393 register u_long pc;
394 u_int ticks;
395{
396 register struct uprof *prof;
397 register short *cell;
398 register u_int off;
399 u_short count;
400
401 /* Testing P_PROFIL may be unnecessary, but is certainly safe. */
402 if ((p->p_flag & P_PROFIL) == 0 || ticks == 0)
403 return;
404
405 for (prof = &p->p_stats->p_prof; prof; prof = prof->pr_next) {
406 off = PC_TO_INDEX(pc,prof);
407 cell = (short *)(prof->pr_base + off);
408 if (cell >= (short *)prof->pr_base &&
409 cell < (short*)(prof->pr_size + (int) prof->pr_base)) {
410 if (copyin((caddr_t)cell, (caddr_t) &count, sizeof(count)) == 0) {
411 count += ticks;
412 if(copyout((caddr_t) &count, (caddr_t)cell, sizeof(count)) == 0)
413 return;
414 }
415 p->p_stats->p_prof.pr_scale = 0;
416 stopprofclock(p);
417 break;
418 }
419 }
420}