]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/subr_prof.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / bsd / kern / subr_prof.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
1c79356b 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
1c79356b
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24/*-
25 * Copyright (c) 1982, 1986, 1993
26 * The Regents of the University of California. All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. All advertising materials mentioning features or use of this software
37 * must display the following acknowledgement:
38 * This product includes software developed by the University of
39 * California, Berkeley and its contributors.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 *
56 * @(#)subr_prof.c 8.3 (Berkeley) 9/23/93
57 */
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/kernel.h>
91447636 62#include <sys/proc_internal.h>
1c79356b
A
63#include <sys/user.h>
64#include <machine/spl.h>
91447636 65#include <machine/machine_routines.h>
1c79356b 66
91447636
A
67#include <sys/mount_internal.h>
68#include <sys/sysproto.h>
1c79356b 69
91447636
A
70#include <mach/mach_types.h>
71#include <kern/kern_types.h>
1c79356b 72#include <kern/cpu_number.h>
91447636
A
73#include <kern/kalloc.h>
74
75extern boolean_t ml_set_interrupts_enabled(boolean_t enable);
1c79356b
A
76
77#ifdef GPROF
78#include <sys/malloc.h>
79#include <sys/gmon.h>
80#include <kern/mach_header.h>
81#include <machine/profile.h>
82
91447636
A
83lck_spin_t * mcount_lock;
84lck_grp_t * mcount_lock_grp;
85lck_attr_t * mcount_lock_attr;
55e303ae 86
1c79356b
A
87/*
88 * Froms is actually a bunch of unsigned shorts indexing tos
89 */
90struct gmonparam _gmonparam = { GMON_PROF_OFF };
91
91447636
A
92/*
93 * This code uses 32 bit mach object segment information from the currently
94 * running kernel.
95 */
96void
97kmstartup(void)
1c79356b
A
98{
99 char *cp;
100 u_long fromssize, tossize;
91447636 101 struct segment_command *sgp; /* 32 bit mach object file segment */
1c79356b
A
102 struct gmonparam *p = &_gmonparam;
103
104 sgp = getsegbyname("__TEXT");
105 p->lowpc = (u_long)sgp->vmaddr;
106 p->highpc = (u_long)(sgp->vmaddr + sgp->vmsize);
107
108 /*
109 * Round lowpc and highpc to multiples of the density we're using
110 * so the rest of the scaling (here and in gprof) stays in ints.
111 */
112 p->lowpc = ROUNDDOWN(p->lowpc, HISTFRACTION * sizeof(HISTCOUNTER));
113 p->highpc = ROUNDUP(p->highpc, HISTFRACTION * sizeof(HISTCOUNTER));
114 p->textsize = p->highpc - p->lowpc;
115 printf("Profiling kernel, textsize=%d [0x%08x..0x%08x]\n",
116 p->textsize, p->lowpc, p->highpc);
117 p->kcountsize = p->textsize / HISTFRACTION;
118 p->hashfraction = HASHFRACTION;
119 p->fromssize = p->textsize / HASHFRACTION;
120 p->tolimit = p->textsize * ARCDENSITY / 100;
121 if (p->tolimit < MINARCS)
122 p->tolimit = MINARCS;
123 else if (p->tolimit > MAXARCS)
124 p->tolimit = MAXARCS;
125 p->tossize = p->tolimit * sizeof(struct tostruct);
126 /* Why not use MALLOC with M_GPROF ? */
127 cp = (char *)kalloc(p->kcountsize + p->fromssize + p->tossize);
128 if (cp == 0) {
129 printf("No memory for profiling.\n");
130 return;
131 }
132 bzero(cp, p->kcountsize + p->tossize + p->fromssize);
133 p->tos = (struct tostruct *)cp;
134 cp += p->tossize;
135 p->kcount = (u_short *)cp;
136 cp += p->kcountsize;
137 p->froms = (u_short *)cp;
91447636
A
138
139 mcount_lock_grp = lck_grp_alloc_init("MCOUNT", LCK_GRP_ATTR_NULL);
140 mcount_lock_attr = lck_attr_alloc_init();
141 //lck_attr_setdebug(mcount_lock_attr);
142 mcount_lock = lck_spin_alloc_init(mcount_lock_grp, mcount_lock_attr);
143
1c79356b
A
144}
145
146/*
147 * Return kernel profiling information.
148 */
9bccf70c 149int
91447636
A
150sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
151 user_addr_t newp, size_t newlen)
1c79356b
A
152{
153 struct gmonparam *gp = &_gmonparam;
154 int error;
155
156 /* all sysctl names at this level are terminal */
157 if (namelen != 1)
158 return (ENOTDIR); /* overloaded */
159
160 switch (name[0]) {
161 case GPROF_STATE:
162 error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state);
163 if (error)
164 return (error);
165 if (gp->state == GMON_PROF_OFF)
166 stopprofclock(kernproc);
167 else
168 startprofclock(kernproc);
169 return (0);
170 case GPROF_COUNT:
91447636
A
171 return (sysctl_struct(oldp, oldlenp, newp, newlen,
172 gp->kcount, gp->kcountsize));
1c79356b
A
173 case GPROF_FROMS:
174 return (sysctl_struct(oldp, oldlenp, newp, newlen,
91447636 175 gp->froms, gp->fromssize));
1c79356b
A
176 case GPROF_TOS:
177 return (sysctl_struct(oldp, oldlenp, newp, newlen,
91447636 178 gp->tos, gp->tossize));
1c79356b
A
179 case GPROF_GMONPARAM:
180 return (sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof *gp));
181 default:
91447636 182 return (ENOTSUP);
1c79356b
A
183 }
184 /* NOTREACHED */
185}
186
187
188/*
189 * mcount() called with interrupts disabled.
190 */
191void
192mcount(
193 register u_long frompc,
194 register u_long selfpc
195)
196{
197 unsigned short *frompcindex;
198 register struct tostruct *top, *prevtop;
199 struct gmonparam *p = &_gmonparam;
200 register long toindex;
1c79356b
A
201
202 /*
203 * check that we are profiling
204 * and that we aren't recursively invoked.
205 */
206 if (p->state != GMON_PROF_ON)
207 return;
208
91447636 209 lck_spin_lock(mcount_lock);
1c79356b
A
210
211 /*
212 * check that frompcindex is a reasonable pc value.
213 * for example: signal catchers get called from the stack,
214 * not from text space. too bad.
215 */
216 frompc -= p->lowpc;
217 if (frompc > p->textsize)
218 goto done;
219
220 frompcindex = &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))];
221 toindex = *frompcindex;
222 if (toindex == 0) {
223 /*
224 * first time traversing this arc
225 */
226 toindex = ++p->tos[0].link;
227 if (toindex >= p->tolimit) {
228 /* halt further profiling */
229 goto overflow;
230 }
231 *frompcindex = toindex;
232 top = &p->tos[toindex];
233 top->selfpc = selfpc;
234 top->count = 1;
235 top->link = 0;
236 goto done;
237 }
238 top = &p->tos[toindex];
239 if (top->selfpc == selfpc) {
240 /*
241 * arc at front of chain; usual case.
242 */
243 top->count++;
244 goto done;
245 }
246 /*
247 * have to go looking down chain for it.
248 * top points to what we are looking at,
249 * prevtop points to previous top.
250 * we know it is not at the head of the chain.
251 */
252 for (; /* goto done */; ) {
253 if (top->link == 0) {
254 /*
255 * top is end of the chain and none of the chain
256 * had top->selfpc == selfpc.
257 * so we allocate a new tostruct
258 * and link it to the head of the chain.
259 */
260 toindex = ++p->tos[0].link;
261 if (toindex >= p->tolimit) {
262 goto overflow;
263 }
264 top = &p->tos[toindex];
265 top->selfpc = selfpc;
266 top->count = 1;
267 top->link = *frompcindex;
268 *frompcindex = toindex;
269 goto done;
270 }
271 /*
272 * otherwise, check the next arc on the chain.
273 */
274 prevtop = top;
275 top = &p->tos[top->link];
276 if (top->selfpc == selfpc) {
277 /*
278 * there it is.
279 * increment its count
280 * move it to the head of the chain.
281 */
282 top->count++;
283 toindex = prevtop->link;
284 prevtop->link = top->link;
285 top->link = *frompcindex;
286 *frompcindex = toindex;
287 goto done;
288 }
289
290 }
291done:
91447636 292 lck_spin_unlock(mcount_lock);
1c79356b
A
293 return;
294
295overflow:
296 p->state = GMON_PROF_ERROR;
91447636 297 lck_spin_unlock(mcount_lock);
1c79356b
A
298 printf("mcount: tos overflow\n");
299 return;
300}
301
302#endif /* GPROF */
303
91447636
A
304#define PROFILE_LOCK(x)
305#define PROFILE_UNLOCK(x)
1c79356b 306
1c79356b 307int
91447636 308profil(struct proc *p, register struct profil_args *uap, __unused register_t *retval)
1c79356b 309{
91447636
A
310 struct uprof *upp = &p->p_stats->p_prof;
311 int s;
1c79356b
A
312
313 if (uap->pcscale > (1 << 16))
314 return (EINVAL);
315 if (uap->pcscale == 0) {
316 stopprofclock(p);
317 return (0);
318 }
319
320 /* Block profile interrupts while changing state. */
91447636
A
321 s = ml_set_interrupts_enabled(FALSE);
322
323 if (proc_is64bit(p)) {
324 struct user_uprof *user_upp = &p->p_stats->user_p_prof;
325 struct user_uprof *upc, *nupc;
326
327 PROFILE_LOCK(&user_upp->pr_lock);
328 user_upp->pr_base = uap->bufbase;
329 user_upp->pr_size = uap->bufsize;
330 user_upp->pr_off = uap->pcoffset;
331 user_upp->pr_scale = uap->pcscale;
332 upp->pr_base = NULL;
333 upp->pr_size = 0;
334 upp->pr_scale = 0;
335
336 /* remove buffers previously allocated with add_profil() */
337 for (upc = user_upp->pr_next; upc; upc = nupc) {
338 nupc = upc->pr_next;
339 kfree(upc, sizeof (*upc));
340 }
341 user_upp->pr_next = 0;
342 PROFILE_UNLOCK(&user_upp->pr_lock);
343 }
344 else {
345 struct uprof *upc, *nupc;
346
347 PROFILE_LOCK(&upp->pr_lock);
348 upp->pr_base = CAST_DOWN(caddr_t, uap->bufbase);
349 upp->pr_size = uap->bufsize;
350 upp->pr_off = uap->pcoffset;
351 upp->pr_scale = uap->pcscale;
352
353 /* remove buffers previously allocated with add_profil() */
354 for (upc = upp->pr_next; upc; upc = nupc) {
355 nupc = upc->pr_next;
356 kfree(upc, sizeof (struct uprof));
357 }
358 upp->pr_next = 0;
359 PROFILE_UNLOCK(&upp->pr_lock);
1c79356b
A
360 }
361
1c79356b 362 startprofclock(p);
55e303ae 363 ml_set_interrupts_enabled(s);
1c79356b
A
364 return(0);
365}
366
1c79356b 367int
91447636 368add_profil(struct proc *p, register struct add_profil_args *uap, __unused register_t *retval)
1c79356b
A
369{
370 struct uprof *upp = &p->p_stats->p_prof, *upc;
91447636 371 struct user_uprof *user_upp = NULL, *user_upc;
1c79356b 372 int s;
91447636 373 boolean_t is64bit = proc_is64bit(p);
1c79356b 374
91447636
A
375 if (is64bit) {
376 user_upp = &p->p_stats->user_p_prof;
377 if (user_upp->pr_scale == 0)
378 return (0);
379 }
380 else {
381 if (upp->pr_scale == 0)
382 return (0);
383 }
384
385 s = ml_set_interrupts_enabled(FALSE);
386
387 if (is64bit) {
388 user_upc = (struct user_uprof *) kalloc(sizeof (struct user_uprof));
389 user_upc->pr_base = uap->bufbase;
390 user_upc->pr_size = uap->bufsize;
391 user_upc->pr_off = uap->pcoffset;
392 user_upc->pr_scale = uap->pcscale;
393 PROFILE_LOCK(&user_upp->pr_lock);
394 user_upc->pr_next = user_upp->pr_next;
395 user_upp->pr_next = user_upc;
396 PROFILE_UNLOCK(&user_upp->pr_lock);
397 }
398 else {
399 upc = (struct uprof *) kalloc(sizeof (struct uprof));
400 upc->pr_base = CAST_DOWN(caddr_t, uap->bufbase);
401 upc->pr_size = uap->bufsize;
402 upc->pr_off = uap->pcoffset;
403 upc->pr_scale = uap->pcscale;
404 PROFILE_LOCK(&upp->pr_lock);
405 upc->pr_next = upp->pr_next;
406 upp->pr_next = upc;
407 PROFILE_UNLOCK(&upp->pr_lock);
408 }
409
55e303ae 410 ml_set_interrupts_enabled(s);
1c79356b
A
411 return(0);
412}
413
414/*
415 * Scale is a fixed-point number with the binary point 16 bits
416 * into the value, and is <= 1.0. pc is at most 32 bits, so the
417 * intermediate result is at most 48 bits.
418 */
419#define PC_TO_INDEX(pc, prof) \
420 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
421 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
422
423/*
424 * Collect user-level profiling statistics; called on a profiling tick,
425 * when a process is running in user-mode. We use
426 * an AST that will vector us to trap() with a context in which copyin
427 * and copyout will work. Trap will then call addupc_task().
428 *
429 * Note that we may (rarely) not get around to the AST soon enough, and
430 * lose profile ticks when the next tick overwrites this one, but in this
431 * case the system is overloaded and the profile is probably already
432 * inaccurate.
433 *
434 * We can afford to take faults here. If the
435 * update fails, we simply turn off profiling.
436 */
437void
438addupc_task(p, pc, ticks)
439 register struct proc *p;
91447636 440 user_addr_t pc;
1c79356b
A
441 u_int ticks;
442{
1c79356b
A
443 register u_int off;
444 u_short count;
445
446 /* Testing P_PROFIL may be unnecessary, but is certainly safe. */
447 if ((p->p_flag & P_PROFIL) == 0 || ticks == 0)
448 return;
449
91447636
A
450 if (proc_is64bit(p)) {
451 struct user_uprof *prof;
452 user_addr_t cell;
453
454 for (prof = &p->p_stats->user_p_prof; prof; prof = prof->pr_next) {
455 off = PC_TO_INDEX(pc, prof);
456 cell = (prof->pr_base + off);
457 if (cell >= prof->pr_base &&
458 cell < (prof->pr_size + prof->pr_base)) {
459 if (copyin(cell, (caddr_t) &count, sizeof(count)) == 0) {
460 count += ticks;
461 if(copyout((caddr_t) &count, cell, sizeof(count)) == 0)
462 return;
463 }
464 p->p_stats->user_p_prof.pr_scale = 0;
465 stopprofclock(p);
466 break;
467 }
468 }
469 }
470 else {
471 struct uprof *prof;
472 short *cell;
473
474 for (prof = &p->p_stats->p_prof; prof; prof = prof->pr_next) {
475 off = PC_TO_INDEX(CAST_DOWN(uint, pc),prof);
476 cell = (short *)(prof->pr_base + off);
477 if (cell >= (short *)prof->pr_base &&
478 cell < (short*)(prof->pr_size + (int) prof->pr_base)) {
479 if (copyin(CAST_USER_ADDR_T(cell), (caddr_t) &count, sizeof(count)) == 0) {
480 count += ticks;
481 if(copyout((caddr_t) &count, CAST_USER_ADDR_T(cell), sizeof(count)) == 0)
482 return;
483 }
484 p->p_stats->p_prof.pr_scale = 0;
485 stopprofclock(p);
486 break;
487 }
488 }
1c79356b
A
489 }
490}