]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/subr_prof.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / bsd / kern / subr_prof.c
1 /*
2 * Copyright (c) 2000-2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)subr_prof.c 8.3 (Berkeley) 9/23/93
62 */
63
64 #ifdef GPROF
65 #include <libkern/kernel_mach_header.h>
66 #endif
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/proc_internal.h>
72 #include <sys/user.h>
73 #include <machine/spl.h>
74 #include <machine/machine_routines.h>
75
76 #include <sys/mount_internal.h>
77 #include <sys/sysproto.h>
78
79 #include <mach/mach_types.h>
80 #include <kern/kern_types.h>
81 #include <kern/cpu_number.h>
82 #include <kern/kalloc.h>
83
84 #ifdef GPROF
85 #include <sys/malloc.h>
86 #include <sys/gmon.h>
87
88 extern int sysctl_doprof(int *, u_int, user_addr_t, size_t *,
89 user_addr_t, size_t newlen);
90 extern int sysctl_struct(user_addr_t, size_t *,
91 user_addr_t, size_t, void *, int);
92
93 lck_spin_t * mcount_lock;
94 lck_grp_t * mcount_lock_grp;
95 lck_attr_t * mcount_lock_attr;
96
97 /*
98 * Froms is actually a bunch of unsigned shorts indexing tos
99 */
100 struct gmonparam _gmonparam = { .state = GMON_PROF_OFF };
101
102 /*
103 * This code uses 32 bit mach object segment information from the currently
104 * running kernel.
105 */
106 void
107 kmstartup(void)
108 {
109 tostruct_t *cp;
110 kernel_segment_command_t *sgp; /* 32 bit mach object file segment */
111 struct gmonparam *p = &_gmonparam;
112
113 sgp = getsegbyname("__TEXT");
114 p->lowpc = (u_int32_t)sgp->vmaddr;
115 p->highpc = (u_int32_t)(sgp->vmaddr + sgp->vmsize);
116
117 /*
118 * Round lowpc and highpc to multiples of the density we're using
119 * so the rest of the scaling (here and in gprof) stays in ints.
120 */
121 p->lowpc = ROUNDDOWN(p->lowpc, HISTFRACTION * sizeof(HISTCOUNTER));
122 p->highpc = ROUNDUP(p->highpc, HISTFRACTION * sizeof(HISTCOUNTER));
123 p->textsize = p->highpc - p->lowpc;
124 printf("Profiling kernel, textsize=%lu [0x%016lx..0x%016lx]\n",
125 p->textsize, p->lowpc, p->highpc);
126 p->kcountsize = p->textsize / HISTFRACTION;
127 p->hashfraction = HASHFRACTION;
128 p->fromssize = p->textsize / HASHFRACTION;
129 p->tolimit = p->textsize * ARCDENSITY / 100;
130 if (p->tolimit < MINARCS)
131 p->tolimit = MINARCS;
132 else if (p->tolimit > MAXARCS)
133 p->tolimit = MAXARCS;
134 p->tossize = p->tolimit * sizeof(tostruct_t);
135 /* Why not use MALLOC with M_GPROF ? */
136 cp = (tostruct_t *)kalloc(p->kcountsize + p->fromssize + p->tossize);
137 if (cp == 0) {
138 printf("No memory for profiling.\n");
139 return;
140 }
141 bzero(cp, p->kcountsize + p->tossize + p->fromssize);
142 p->tos = cp;
143 cp = (tostruct_t *)((vm_offset_t)cp + p->tossize);
144 p->kcount = (u_short *)cp;
145 cp = (tostruct_t *)((vm_offset_t)cp + p->kcountsize);
146 p->froms = (u_short *)cp;
147
148 mcount_lock_grp = lck_grp_alloc_init("MCOUNT", LCK_GRP_ATTR_NULL);
149 mcount_lock_attr = lck_attr_alloc_init();
150 mcount_lock = lck_spin_alloc_init(mcount_lock_grp, mcount_lock_attr);
151
152 }
153
154 /*
155 * Return kernel profiling information.
156 */
157 int
158 sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
159 user_addr_t newp, size_t newlen)
160 {
161 struct gmonparam *gp = &_gmonparam;
162 int error;
163
164 /* all sysctl names at this level are terminal */
165 if (namelen != 1)
166 return (ENOTDIR); /* overloaded */
167
168 switch (name[0]) {
169 case GPROF_STATE:
170 error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state);
171 if (error)
172 return (error);
173 if (gp->state == GMON_PROF_OFF)
174 stopprofclock(kernproc);
175 else
176 startprofclock(kernproc);
177 return (0);
178 case GPROF_COUNT:
179 return (sysctl_struct(oldp, oldlenp, newp, newlen,
180 gp->kcount, gp->kcountsize));
181 case GPROF_FROMS:
182 return (sysctl_struct(oldp, oldlenp, newp, newlen,
183 gp->froms, gp->fromssize));
184 case GPROF_TOS:
185 return (sysctl_struct(oldp, oldlenp, newp, newlen,
186 gp->tos, gp->tossize));
187 case GPROF_GMONPARAM:
188 return (sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof *gp));
189 default:
190 return (ENOTSUP);
191 }
192 /* NOTREACHED */
193 }
194
195
196 /*
197 * mcount() called with interrupts disabled.
198 */
199 void
200 mcount(
201 uintptr_t frompc,
202 uintptr_t selfpc
203 )
204 {
205 unsigned short *frompcindex;
206 tostruct_t *top, *prevtop;
207 struct gmonparam *p = &_gmonparam;
208 long toindex;
209
210 /*
211 * check that we are profiling
212 * and that we aren't recursively invoked.
213 */
214 if (p->state != GMON_PROF_ON)
215 return;
216
217 lck_spin_lock(mcount_lock);
218
219 /*
220 * check that frompcindex is a reasonable pc value.
221 * for example: signal catchers get called from the stack,
222 * not from text space. too bad.
223 */
224 frompc -= p->lowpc;
225 if (frompc > p->textsize)
226 goto done;
227
228 frompcindex = &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))];
229 toindex = *frompcindex;
230 if (toindex == 0) {
231 /*
232 * first time traversing this arc
233 */
234 toindex = ++p->tos[0].link;
235 if (toindex >= p->tolimit) {
236 /* halt further profiling */
237 goto overflow;
238 }
239 *frompcindex = toindex;
240 top = &p->tos[toindex];
241 top->selfpc = selfpc;
242 top->count = 1;
243 top->link = 0;
244 goto done;
245 }
246 top = &p->tos[toindex];
247 if (top->selfpc == selfpc) {
248 /*
249 * arc at front of chain; usual case.
250 */
251 top->count++;
252 goto done;
253 }
254 /*
255 * have to go looking down chain for it.
256 * top points to what we are looking at,
257 * prevtop points to previous top.
258 * we know it is not at the head of the chain.
259 */
260 for (; /* goto done */; ) {
261 if (top->link == 0) {
262 /*
263 * top is end of the chain and none of the chain
264 * had top->selfpc == selfpc.
265 * so we allocate a new tostruct
266 * and link it to the head of the chain.
267 */
268 toindex = ++p->tos[0].link;
269 if (toindex >= p->tolimit) {
270 goto overflow;
271 }
272 top = &p->tos[toindex];
273 top->selfpc = selfpc;
274 top->count = 1;
275 top->link = *frompcindex;
276 *frompcindex = toindex;
277 goto done;
278 }
279 /*
280 * otherwise, check the next arc on the chain.
281 */
282 prevtop = top;
283 top = &p->tos[top->link];
284 if (top->selfpc == selfpc) {
285 /*
286 * there it is.
287 * increment its count
288 * move it to the head of the chain.
289 */
290 top->count++;
291 toindex = prevtop->link;
292 prevtop->link = top->link;
293 top->link = *frompcindex;
294 *frompcindex = toindex;
295 goto done;
296 }
297
298 }
299 done:
300 lck_spin_unlock(mcount_lock);
301 return;
302
303 overflow:
304 p->state = GMON_PROF_ERROR;
305 lck_spin_unlock(mcount_lock);
306 printf("mcount: tos overflow\n");
307 return;
308 }
309
310 #endif /* GPROF */
311
312 #define PROFILE_LOCK(x)
313 #define PROFILE_UNLOCK(x)
314
315
316 int
317 profil(struct proc *p, struct profil_args *uap, int32_t *retval)
318 {
319 void *tmp;
320
321 tmp = p;
322 tmp = uap;
323 tmp = retval;
324
325 return EINVAL;
326 }
327
328 int
329 add_profil(struct proc *p, struct add_profil_args *uap, int32_t *retval)
330 {
331 void *tmp;
332
333 tmp = p;
334 tmp = uap;
335 tmp = retval;
336
337 return EINVAL;
338 }
339
340 /*
341 * Scale is a fixed-point number with the binary point 16 bits
342 * into the value, and is <= 1.0. pc is at most 32 bits, so the
343 * intermediate result is at most 48 bits.
344 */
345 //K64todo - this doesn't fit into 64 bit any more, it needs 64+16
346 #define PC_TO_INDEX(pc, prof) \
347 ((user_addr_t)(((u_quad_t)((pc) - (prof)->pr_off) * \
348 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
349
350 /*
351 * Collect user-level profiling statistics; called on a profiling tick,
352 * when a process is running in user-mode. We use
353 * an AST that will vector us to trap() with a context in which copyin
354 * and copyout will work. Trap will then call addupc_task().
355 *
356 * Note that we may (rarely) not get around to the AST soon enough, and
357 * lose profile ticks when the next tick overwrites this one, but in this
358 * case the system is overloaded and the profile is probably already
359 * inaccurate.
360 *
361 * We can afford to take faults here. If the
362 * update fails, we simply turn off profiling.
363 */
364 void
365 addupc_task(struct proc *p, user_addr_t pc, u_int ticks)
366 {
367 user_addr_t off;
368 u_short count;
369
370 /* Testing P_PROFIL may be unnecessary, but is certainly safe. */
371 if ((p->p_flag & P_PROFIL) == 0 || ticks == 0)
372 return;
373
374 if (proc_is64bit(p)) {
375 struct user_uprof *prof;
376 user_addr_t cell;
377
378 for (prof = &p->p_stats->user_p_prof; prof; prof = prof->pr_next) {
379 off = PC_TO_INDEX(pc, prof);
380 cell = (prof->pr_base + off);
381 if (cell >= prof->pr_base &&
382 cell < (prof->pr_size + prof->pr_base)) {
383 if (copyin(cell, (caddr_t) &count, sizeof(count)) == 0) {
384 count += ticks;
385 if(copyout((caddr_t) &count, cell, sizeof(count)) == 0)
386 return;
387 }
388 p->p_stats->user_p_prof.pr_scale = 0;
389 stopprofclock(p);
390 break;
391 }
392 }
393 }
394 else {
395 struct uprof *prof;
396 short *cell;
397
398 for (prof = &p->p_stats->p_prof; prof; prof = prof->pr_next) {
399 off = PC_TO_INDEX(pc,prof);
400 cell = (short *)(prof->pr_base + off);
401 if (cell >= (short *)prof->pr_base &&
402 cell < (short*)(prof->pr_size + prof->pr_base)) {
403 if (copyin(CAST_USER_ADDR_T(cell), (caddr_t) &count, sizeof(count)) == 0) {
404 count += ticks;
405 if(copyout((caddr_t) &count, CAST_USER_ADDR_T(cell), sizeof(count)) == 0)
406 return;
407 }
408 p->p_stats->p_prof.pr_scale = 0;
409 stopprofclock(p);
410 break;
411 }
412 }
413 }
414 }