]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_pcsamples.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / bsd / kern / kern_pcsamples.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/kdebug.h>
24 #include <sys/errno.h>
25 #include <sys/param.h>
26 #include <sys/proc.h>
27 #include <sys/vm.h>
28 #include <sys/sysctl.h>
29 #include <vm/vm_kern.h>
30
31 unsigned int pc_buftomem = 0;
32 u_long * pc_buffer = 0; /* buffer that holds each pc */
33 u_long * pc_bufptr = 0;
34 u_long * pc_buflast = 0;
35 unsigned int npcbufs = 8192; /* number of pc entries in buffer */
36 unsigned int pc_bufsize = 0;
37 unsigned int pcsample_flags = 0;
38 unsigned int pcsample_enable = 0;
39
40 pid_t pc_sample_pid = 0;
41 boolean_t pc_trace_frameworks = FALSE;
42
43 char pcsample_comm[MAXCOMLEN + 1];
44
45 /* Set the default framework boundaries */
46 u_long pcsample_beg = 0;
47 u_long pcsample_end = 0;
48
49 static pid_t global_state_pid = -1; /* Used to control exclusive use of pc_buffer */
50
51 extern int pc_trace_buf[];
52 extern int pc_trace_cnt;
53
54 int
55 enable_branch_tracing()
56 {
57 #ifndef i386
58 struct proc *p;
59 if (-1 != pc_sample_pid) {
60 p = pfind(pc_sample_pid);
61 if (p) {
62 p->p_flag |= P_BTRACE;
63 }
64 }
65 else {
66 pc_trace_frameworks = TRUE;
67 }
68
69 return 1;
70
71 #else
72 return 0;
73 #endif
74 }
75
76 int
77 disable_branch_tracing()
78 {
79 struct proc *p;
80 switch (pc_sample_pid) {
81 case -1:
82 pc_trace_frameworks = FALSE;
83 break;
84 case 0:
85 break;
86 default:
87 p = pfind(pc_sample_pid);
88 if (p) {
89 p->p_flag &= ~P_BTRACE;
90 }
91 break;
92 }
93 clr_be_bit();
94 return 1;
95 }
96
97 /*
98 * this only works for the current proc as it
99 * is called from context_switch in the scheduler
100 */
101 int
102 branch_tracing_enabled()
103 {
104 struct proc *p = current_proc();
105 if (TRUE == pc_trace_frameworks) return TRUE;
106 if (p) {
107 return (P_BTRACE == (p->p_flag & P_BTRACE));
108 }
109 return 0;
110 }
111
112
113 void
114 add_pcbuffer()
115 {
116 int i;
117 u_long pc;
118 struct proc *curproc;
119 extern unsigned int kdebug_flags;
120
121 if (!pcsample_enable)
122 return;
123
124 for (i=0; i < pc_trace_cnt; i++)
125 {
126 pc = pc_trace_buf[i];
127
128 if ((pcsample_beg <= pc) && (pc < pcsample_end))
129 {
130 if (pc_bufptr > pc_buffer)
131 {
132 if ( (*(pc_bufptr-1)) == pc )
133 continue; /* Ignore, probably spinning */
134 }
135
136 /* Then the sample is in our range */
137 *pc_bufptr = (u_long)pc;
138 pc_bufptr++;
139 }
140 }
141
142 /* We never wrap the buffer */
143 if ((pc_bufptr + pc_trace_cnt) >= pc_buflast)
144 {
145 pcsample_enable = 0;
146 (void)disable_branch_tracing();
147 wakeup(&pcsample_enable);
148 }
149 return;
150 }
151
152 pcsamples_bootstrap()
153 {
154 if (!disable_branch_tracing())
155 return(ENOTSUP);
156
157 pc_bufsize = npcbufs * sizeof(* pc_buffer);
158 if (kmem_alloc(kernel_map, &pc_buftomem,
159 (vm_size_t)pc_bufsize) == KERN_SUCCESS)
160 pc_buffer = (u_long *) pc_buftomem;
161 else
162 pc_buffer= (u_long *) 0;
163
164 if (pc_buffer) {
165 pc_bufptr = pc_buffer;
166 pc_buflast = &pc_bufptr[npcbufs];
167 pcsample_enable = 0;
168 return(0);
169 } else {
170 pc_bufsize=0;
171 return(EINVAL);
172 }
173
174 }
175
176 pcsamples_reinit()
177 {
178 int x;
179 int ret=0;
180
181 pcsample_enable = 0;
182
183 if (pc_bufsize && pc_buffer)
184 kmem_free(kernel_map, (vm_offset_t)pc_buffer, pc_bufsize);
185
186 ret= pcsamples_bootstrap();
187 return(ret);
188 }
189
190 pcsamples_clear()
191 {
192 /* Clean up the sample buffer, set defaults */
193 global_state_pid = -1;
194 pcsample_enable = 0;
195 if(pc_bufsize && pc_buffer)
196 kmem_free(kernel_map, (vm_offset_t)pc_buffer, pc_bufsize);
197 pc_buffer = (u_long *)0;
198 pc_bufptr = (u_long *)0;
199 pc_buflast = (u_long *)0;
200 pc_bufsize = 0;
201 pcsample_beg= 0;
202 pcsample_end= 0;
203 bzero((void *)pcsample_comm, sizeof(pcsample_comm));
204 (void)disable_branch_tracing();
205 pc_sample_pid = 0;
206 pc_trace_frameworks = FALSE;
207
208 }
209
210 pcsamples_control(name, namelen, where, sizep)
211 int *name;
212 u_int namelen;
213 char *where;
214 size_t *sizep;
215 {
216 int ret=0;
217 int size=*sizep;
218 unsigned int value = name[1];
219 pcinfo_t pc_bufinfo;
220 pid_t *pidcheck;
221
222 pid_t curpid;
223 struct proc *p, *curproc;
224
225 if (name[0] != PCSAMPLE_GETNUMBUF)
226 {
227 if(curproc = current_proc())
228 curpid = curproc->p_pid;
229 else
230 return (ESRCH);
231
232 if (global_state_pid == -1)
233 global_state_pid = curpid;
234 else if (global_state_pid != curpid)
235 {
236 if((p = pfind(global_state_pid)) == NULL)
237 {
238 /* The global pid no longer exists */
239 global_state_pid = curpid;
240 }
241 else
242 {
243 /* The global pid exists, deny this request */
244 return(EBUSY);
245 }
246 }
247 }
248
249
250 switch(name[0]) {
251 case PCSAMPLE_DISABLE: /* used to disable */
252 pcsample_enable=0;
253 break;
254 case PCSAMPLE_SETNUMBUF:
255 /* The buffer size is bounded by a min and max number of samples */
256 if (value < pc_trace_cnt) {
257 ret=EINVAL;
258 break;
259 }
260 if (value <= MAX_PCSAMPLES)
261 /* npcbufs = value & ~(PC_TRACE_CNT-1); */
262 npcbufs = value;
263 else
264 npcbufs = MAX_PCSAMPLES;
265 break;
266 case PCSAMPLE_GETNUMBUF:
267 if(size < sizeof(pcinfo_t)) {
268 ret=EINVAL;
269 break;
270 }
271 pc_bufinfo.npcbufs = npcbufs;
272 pc_bufinfo.bufsize = pc_bufsize;
273 pc_bufinfo.enable = pcsample_enable;
274 pc_bufinfo.pcsample_beg = pcsample_beg;
275 pc_bufinfo.pcsample_end = pcsample_end;
276 if(copyout (&pc_bufinfo, where, sizeof(pc_bufinfo)))
277 {
278 ret=EINVAL;
279 }
280 break;
281 case PCSAMPLE_SETUP:
282 ret=pcsamples_reinit();
283 break;
284 case PCSAMPLE_REMOVE:
285 pcsamples_clear();
286 break;
287 case PCSAMPLE_READBUF:
288 /* A nonzero value says enable and wait on the buffer */
289 /* A zero value says read up the buffer immediately */
290 if (value == 0)
291 {
292 /* Do not wait on the buffer */
293 pcsample_enable = 0;
294 (void)disable_branch_tracing();
295 ret = pcsamples_read(where, sizep);
296 break;
297 }
298 else if ((pc_bufsize <= 0) || (!pc_buffer))
299 {
300 /* enable only if buffer is initialized */
301 ret=EINVAL;
302 break;
303 }
304
305 /* Turn on branch tracing */
306 if (!enable_branch_tracing())
307 {
308 ret = ENOTSUP;
309 break;
310 }
311
312 /* Enable sampling */
313 pcsample_enable = 1;
314
315 ret = tsleep(&pcsample_enable, PRIBIO | PCATCH, "pcsample", 0);
316 pcsample_enable = 0;
317 (void)disable_branch_tracing();
318
319 if (ret)
320 {
321 /* Eventually fix this... if (ret != EINTR) */
322 if (ret)
323 {
324 /* On errors, except EINTR, we want to cleanup buffer ptrs */
325 /* pc_bufptr = pc_buffer; */
326 *sizep = 0;
327 }
328 }
329 else
330 {
331 /* The only way to get here is if the buffer is full */
332 ret = pcsamples_read(where, sizep);
333 }
334
335 break;
336 case PCSAMPLE_SETREG:
337 if (size < sizeof(pcinfo_t))
338 {
339 ret = EINVAL;
340 break;
341 }
342 if (copyin(where, &pc_bufinfo, sizeof(pcinfo_t)))
343 {
344 ret = EINVAL;
345 break;
346 }
347
348 pcsample_beg = pc_bufinfo.pcsample_beg;
349 pcsample_end = pc_bufinfo.pcsample_end;
350 break;
351 case PCSAMPLE_COMM:
352 if (!(sizeof(pcsample_comm) > size))
353 {
354 ret = EINVAL;
355 break;
356 }
357 bzero((void *)pcsample_comm, sizeof(pcsample_comm));
358 if (copyin(where, pcsample_comm, size))
359 {
360 ret = EINVAL;
361 break;
362 }
363
364 /* Check for command name or pid */
365 if (pcsample_comm[0] != '\0')
366 {
367 ret= EOPNOTSUPP;
368 break;
369 }
370 else
371 {
372 if (size != (2 * sizeof(pid_t)))
373 {
374 ret = EINVAL;
375 break;
376 }
377 else
378 {
379 pidcheck = (pid_t *)pcsample_comm;
380 pc_sample_pid = pidcheck[1];
381 }
382 }
383 break;
384 default:
385 ret= EOPNOTSUPP;
386 break;
387 }
388 return(ret);
389 }
390
391
392 /*
393 This buffer must be read up in one call.
394 If the buffer isn't big enough to hold
395 all the samples, it will copy up enough
396 to fill the buffer and throw the rest away.
397 This buffer never wraps.
398 */
399 pcsamples_read(u_long *buffer, size_t *number)
400 {
401 int count=0;
402 int ret=0;
403 int copycount;
404
405 count = (*number)/sizeof(u_long);
406
407 if (count && pc_bufsize && pc_buffer)
408 {
409 copycount = pc_bufptr - pc_buffer;
410
411 if (copycount <= 0)
412 {
413 *number = 0;
414 return(0);
415 }
416
417 if (copycount > count)
418 copycount = count;
419
420 /* We actually have data to send up */
421 if(copyout(pc_buffer, buffer, copycount * sizeof(u_long)))
422 {
423 *number = 0;
424 return(EINVAL);
425 }
426 *number = copycount;
427 pc_bufptr = pc_buffer;
428 return(0);
429 }
430 else
431 {
432 *number = 0;
433 return(0);
434 }
435 }
436