2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 // #include <libkern/libkern.h>
37 #include <kern/kalloc.h>
38 #include <kern/debug.h> /* panic */
39 #include <kern/thread.h>
40 #include <sys/errno.h>
42 #include <chud/chud_xnu.h>
43 #include <kperf/kperf.h>
45 #include <kperf/buffer.h>
46 #include <kperf/timetrigger.h>
47 #include <kperf/threadinfo.h>
48 #include <kperf/callstack.h>
49 #include <kperf/sample.h>
50 #include <kperf/action.h>
51 #include <kperf/context.h>
52 #include <kperf/ast.h>
56 /* the list of different actions to take */
64 /* the list of actions */
65 static unsigned actionc
= 0;
66 static struct action
*actionv
= NULL
;
68 /* whether to record callstacks on kdebug events */
69 static int kdebug_callstack_action
= 0;
71 /* whether we get a callback on a thread switch */
72 int kperf_cswitch_hook
= 0;
74 /* indirect hooks to play nice with CHUD for the transition to kperf */
75 kern_return_t
chudxnu_kdebug_callback_enter(chudxnu_kdebug_callback_func_t fn
);
76 kern_return_t
chudxnu_kdebug_callback_cancel(void);
78 /* Do the real work! */
79 /* this can be called in any context ... right? */
81 kperf_sample_internal( struct kperf_sample
*sbuf
,
82 struct kperf_context
*context
,
83 unsigned sample_what
, unsigned sample_flags
,
87 int did_ucallstack
= 0, did_tinfo_extra
= 0;
90 /* not much point continuing here, but what to do ? return
91 * Shutdown? cut a tracepoint and continue?
93 if( sample_what
== 0 )
94 return SAMPLE_CONTINUE
;
96 int is_kernel
= (context
->cur_pid
== 0);
98 sbuf
->kcallstack
.nframes
= 0;
99 sbuf
->kcallstack
.flags
= CALLSTACK_VALID
;
100 sbuf
->ucallstack
.nframes
= 0;
101 sbuf
->ucallstack
.flags
= CALLSTACK_VALID
;
103 /* an event occurred. Sample everything and dump it in a
107 /* collect data from samplers */
108 if( sample_what
& SAMPLER_TINFO
) {
109 kperf_threadinfo_sample( &sbuf
->threadinfo
, context
);
111 /* See if we should drop idle thread samples */
112 if( !(sample_flags
& SAMPLE_FLAG_IDLE_THREADS
) )
113 if (sbuf
->threadinfo
.runmode
& 0x40)
114 return SAMPLE_CONTINUE
;
117 if( (sample_what
& SAMPLER_KSTACK
) && !(sample_flags
& SAMPLE_FLAG_EMPTY_CALLSTACK
) )
118 kperf_kcallstack_sample( &sbuf
->kcallstack
, context
);
122 if( sample_flags
& SAMPLE_FLAG_PEND_USER
)
124 if( (sample_what
& SAMPLER_USTACK
) && !(sample_flags
& SAMPLE_FLAG_EMPTY_CALLSTACK
) )
125 did_ucallstack
= kperf_ucallstack_pend( context
);
127 if( sample_what
& SAMPLER_TINFOEX
)
128 did_tinfo_extra
= kperf_threadinfo_extra_pend( context
);
132 if( (sample_what
& SAMPLER_USTACK
) && !(sample_flags
& SAMPLE_FLAG_EMPTY_CALLSTACK
) )
133 kperf_ucallstack_sample( &sbuf
->ucallstack
, context
);
135 if( sample_what
& SAMPLER_TINFOEX
)
136 kperf_threadinfo_extra_sample( &sbuf
->tinfo_ex
,
142 if ( sample_what
& SAMPLER_PMC_CPU
)
143 kperf_kpc_cpu_sample( &sbuf
->kpcdata
,
144 (sample_what
& SAMPLER_PMC_CPU
) != 0 );
147 /* lookup the user tag, if any */
149 && (actionid
<= actionc
) )
150 userdata
= actionv
[actionid
-1].userdata
;
154 /* stash the data into the buffer
155 * interrupts off to ensure we don't get split
157 enabled
= ml_set_interrupts_enabled(FALSE
);
159 BUF_DATA( PERF_GEN_EVENT
| DBG_FUNC_START
, sample_what
,
160 actionid
, userdata
, sample_flags
);
162 /* dump threadinfo */
163 if( sample_what
& SAMPLER_TINFO
)
164 kperf_threadinfo_log( &sbuf
->threadinfo
);
166 /* dump kcallstack */
167 if( sample_what
& SAMPLER_KSTACK
)
168 kperf_kcallstack_log( &sbuf
->kcallstack
);
171 /* dump user stuff */
173 if ( sample_flags
& SAMPLE_FLAG_PEND_USER
)
175 if ( did_ucallstack
)
176 BUF_INFO1( PERF_CS_UPEND
, 0 );
178 if ( did_tinfo_extra
)
179 BUF_INFO1( PERF_TI_XPEND
, 0 );
183 if( sample_what
& SAMPLER_USTACK
)
184 kperf_ucallstack_log( &sbuf
->ucallstack
);
186 if( sample_what
& SAMPLER_TINFOEX
)
187 kperf_threadinfo_extra_log( &sbuf
->tinfo_ex
);
192 if ( sample_what
& SAMPLER_PMC_CPU
)
193 kperf_kpc_cpu_log( &sbuf
->kpcdata
);
197 BUF_DATA1( PERF_GEN_EVENT
| DBG_FUNC_END
, sample_what
);
200 ml_set_interrupts_enabled(enabled
);
202 return SAMPLE_CONTINUE
;
205 /* Translate actionid into sample bits and take a sample */
207 kperf_sample( struct kperf_sample
*sbuf
,
208 struct kperf_context
*context
,
209 unsigned actionid
, unsigned sample_flags
)
211 unsigned sample_what
= 0;
214 /* work out what to sample, if anything */
215 if( (actionid
> actionc
) || (actionid
== 0) )
216 return SAMPLE_SHUTDOWN
;
218 /* check the pid filter against the context's current pid.
219 * filter pid == -1 means any pid
221 pid_filter
= actionv
[actionid
-1].pid_filter
;
222 if( (pid_filter
!= -1)
223 && (pid_filter
!= context
->cur_pid
) )
224 return SAMPLE_CONTINUE
;
226 /* the samplers to run */
227 sample_what
= actionv
[actionid
-1].sample
;
229 /* do the actual sample operation */
230 return kperf_sample_internal( sbuf
, context
, sample_what
,
231 sample_flags
, actionid
);
234 /* ast callback on a thread */
236 kperf_thread_ast_handler( thread_t thread
)
240 unsigned sample_what
= 0;
241 /* we know we're on a thread, so let's do stuff */
244 BUF_INFO1(PERF_AST_HNDLR
| DBG_FUNC_START
, thread
);
246 /* use ~2kb of the stack for the sample, should be ok since we're in the ast */
247 struct kperf_sample sbuf
;
248 bzero(&sbuf
, sizeof(struct kperf_sample
));
250 /* make a context, take a sample */
251 struct kperf_context ctx
;
252 ctx
.cur_thread
= thread
;
255 task
= chudxnu_task_for_thread(thread
);
257 ctx
.cur_pid
= chudxnu_pid_for_task(task
);
259 /* decode the chud bits so we know what to sample */
260 t_chud
= kperf_get_thread_bits(thread
);
262 if (t_chud
& T_AST_NAME
)
263 sample_what
|= SAMPLER_TINFOEX
;
265 if (t_chud
& T_AST_CALLSTACK
)
267 sample_what
|= SAMPLER_USTACK
;
268 sample_what
|= SAMPLER_TINFO
;
271 /* do the sample, just of the user stuff */
272 r
= kperf_sample_internal( &sbuf
, &ctx
, sample_what
, 0, 0 );
274 BUF_INFO1(PERF_AST_HNDLR
| DBG_FUNC_END
, r
);
277 /* register AST bits */
279 kperf_ast_pend( thread_t cur_thread
, uint32_t check_bits
,
282 /* pend on the thread */
283 uint32_t t_chud
, set_done
= 0;
285 /* can only pend on the current thread */
286 if( cur_thread
!= chudxnu_current_thread() )
287 panic("pending to non-current thread");
289 /* get our current bits */
290 t_chud
= kperf_get_thread_bits(cur_thread
);
292 /* see if it's already been done or pended */
293 if( !(t_chud
& check_bits
) )
295 /* set the bit on the thread */
297 kperf_set_thread_bits(cur_thread
, t_chud
);
299 /* set the actual AST */
300 kperf_set_thread_ast( cur_thread
);
307 // BUF_INFO3( dbg_code, (uintptr_t)cur_thread, t_chud, set_done );
311 * kdebug callback & stack management
314 #define IS_END(debugid) ((debugid & 3) == DBG_FUNC_END)
315 #define IS_MIG(debugid) (IS_END(debugid) && ((debugid & 0xff000000U) == KDBG_CLASS_ENCODE((unsigned)DBG_MIG, 0U)))
316 #define IS_MACH_SYSCALL(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_MACH, DBG_MACH_EXCP_SC)))
317 #define IS_VM_FAULT(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_MACH, DBG_MACH_VM)))
318 #define IS_BSD_SYSCTLL(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_BSD, DBG_BSD_EXCP_SC)))
319 #define IS_APPS_SIGNPOST(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_APPS, DBG_MACH_CHUD)))
320 #define IS_MACH_SIGNPOST(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_MACH, DBG_MACH_CHUD)))
323 kperf_kdebug_callback(uint32_t debugid
)
328 /* if we're not doing kperf callback stacks, return */
329 if( !kdebug_callstack_action
)
332 /* if we're looking at a kperf tracepoint, don't recurse */
333 if( (debugid
& 0xff000000) == KDBG_CLASS_ENCODE(DBG_PERF
, 0) )
336 /* ensure interrupts are already off thanks to kdebug */
337 if( ml_get_interrupts_enabled() )
340 /* make sure we're not being called recursively. */
342 if( kperf_kdbg_recurse(KPERF_RECURSE_IN
) )
346 /* check the happy list of trace codes */
347 if( !( IS_MIG(debugid
)
348 || IS_MACH_SYSCALL(debugid
)
349 || IS_VM_FAULT(debugid
)
350 || IS_BSD_SYSCTLL(debugid
)
351 || IS_MACH_SIGNPOST(debugid
)
352 || IS_APPS_SIGNPOST(debugid
) ) )
355 /* check for kernel */
356 thread_t thread
= chudxnu_current_thread();
357 task
= chudxnu_task_for_thread(thread
);
359 cur_pid
= chudxnu_pid_for_task(task
);
364 /* setup a context */
365 struct kperf_context ctx
;
366 struct kperf_sample
*intbuf
= NULL
;
368 ctx
.cur_thread
= thread
;
369 ctx
.cur_pid
= cur_pid
;
370 ctx
.trigger_type
= TRIGGER_TYPE_TRACE
;
373 /* CPU sample buffer -- only valid with interrupts off (above)
374 * Technically this isn't true -- tracepoints can, and often
375 * are, cut from interrupt handlers, but none of those tracepoints
376 * should make it this far.
378 intbuf
= kperf_intr_sample_buffer();
381 kperf_sample( intbuf
, &ctx
, kdebug_callstack_action
, SAMPLE_FLAG_PEND_USER
);
383 /* no longer recursive */
384 kperf_kdbg_recurse(KPERF_RECURSE_OUT
);
386 /* dicing with death */
387 BUF_INFO2(PERF_KDBG_HNDLR
, debugid
, cur_pid
);
390 kperf_ast_pend( thread
, T_AST_CALLSTACK
, T_AST_CALLSTACK
);
396 kperf_kdbg_get_stacks(void)
398 return kdebug_callstack_action
;
402 kperf_kdbg_set_stacks(int newval
)
405 kdebug_callstack_action
= newval
;
407 /* enable the callback from kdebug */
409 chudxnu_kdebug_callback_enter(NULL
);
411 chudxnu_kdebug_callback_cancel();
420 /* called from context switch handler */
422 kperf_switch_context( __unused thread_t old
, thread_t
new )
424 task_t task
= get_threadtask(new);
425 int pid
= chudxnu_pid_for_task(task
);
427 /* cut a tracepoint to tell us what the new thread's PID is
430 BUF_DATA2( PERF_TI_CSWITCH
, thread_tid(new), pid
);
434 * Action configuration
437 kperf_action_get_count(void)
443 kperf_action_set_samplers( unsigned actionid
, uint32_t samplers
)
445 if( (actionid
> actionc
) || (actionid
== 0) )
448 actionv
[actionid
-1].sample
= samplers
;
454 kperf_action_get_samplers( unsigned actionid
, uint32_t *samplers_out
)
456 if( (actionid
> actionc
) )
460 *samplers_out
= 0; /* "NULL" action */
462 *samplers_out
= actionv
[actionid
-1].sample
;
468 kperf_action_set_userdata( unsigned actionid
, uint32_t userdata
)
470 if( (actionid
> actionc
) || (actionid
== 0) )
473 actionv
[actionid
-1].userdata
= userdata
;
479 kperf_action_get_userdata( unsigned actionid
, uint32_t *userdata_out
)
481 if( (actionid
> actionc
) )
485 *userdata_out
= 0; /* "NULL" action */
487 *userdata_out
= actionv
[actionid
-1].userdata
;
493 kperf_action_set_filter( unsigned actionid
,
496 if( (actionid
> actionc
) || (actionid
== 0) )
499 actionv
[actionid
-1].pid_filter
= pid
;
505 kperf_action_get_filter( unsigned actionid
,
508 if( (actionid
> actionc
) )
512 *pid_out
= -1; /* "NULL" action */
514 *pid_out
= actionv
[actionid
-1].pid_filter
;
520 kperf_action_set_count(unsigned count
)
522 struct action
*new_actionv
= NULL
, *old_actionv
= NULL
;
523 unsigned old_count
, i
;
526 if( count
== actionc
)
529 /* TODO: allow shrinking? */
530 if( count
< actionc
)
533 /* cap it for good measure */
534 if( count
> ACTION_MAX
)
537 /* creating the action arror for the first time. create a few
549 /* create a new array */
550 new_actionv
= kalloc( count
* sizeof(*new_actionv
) );
551 if( new_actionv
== NULL
)
554 old_actionv
= actionv
;
557 if( old_actionv
!= NULL
)
558 bcopy( actionv
, new_actionv
, actionc
* sizeof(*actionv
) );
560 bzero( &new_actionv
[actionc
], (count
- old_count
) * sizeof(*actionv
) );
562 for( i
= old_count
; i
< count
; i
++ )
563 new_actionv
[i
].pid_filter
= -1;
565 actionv
= new_actionv
;
568 if( old_actionv
!= NULL
)
569 kfree( old_actionv
, old_count
* sizeof(*actionv
) );