2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
41 #include <vm/vm_page.h>
42 #include <vm/vm_pageout.h>
44 #include <kperf/action.h>
45 #include <kperf/ast.h>
46 #include <kperf/buffer.h>
47 #include <kperf/callstack.h>
48 #include <kperf/context.h>
49 #include <kperf/kdebug_trigger.h>
50 #include <kperf/kperf.h>
51 #include <kperf/kperf_kpc.h>
52 #include <kperf/kperf_timer.h>
53 #include <kperf/pet.h>
54 #include <kperf/sample.h>
55 #include <kperf/thread_samplers.h>
57 #define ACTION_MAX (32)
59 /* the list of different actions to take */
63 uint32_t ucallstack_depth
;
64 uint32_t kcallstack_depth
;
69 /* the list of actions */
70 static unsigned actionc
= 0;
71 static struct action
*actionv
= NULL
;
73 /* should emit tracepoint on context switch */
74 int kperf_kdebug_cswitch
= 0;
77 kperf_sample_has_non_system(unsigned actionid
)
79 if (actionid
> actionc
) {
83 if (actionv
[actionid
- 1].sample
& ~SAMPLER_SYS_MEM
) {
91 kperf_system_memory_log(void)
93 BUF_DATA(PERF_MI_SYS_DATA
, (uintptr_t)vm_page_free_count
,
94 (uintptr_t)vm_page_wire_count
, (uintptr_t)vm_page_external_count
,
95 (uintptr_t)(vm_page_active_count
+ vm_page_inactive_count
+
96 vm_page_speculative_count
));
100 kperf_sample_internal(struct kperf_sample
*sbuf
,
101 struct kperf_context
*context
,
102 unsigned sample_what
, unsigned sample_flags
,
103 unsigned actionid
, uint32_t ucallstack_depth
)
105 int pended_ucallstack
= 0;
106 int pended_th_dispatch
= 0;
107 bool on_idle_thread
= false;
108 uint32_t userdata
= actionid
;
110 /* not much point continuing here, but what to do ? return
111 * Shutdown? cut a tracepoint and continue?
113 if (sample_what
== 0) {
114 return SAMPLE_CONTINUE
;
117 /* callstacks should be explicitly ignored */
118 if (sample_flags
& SAMPLE_FLAG_EMPTY_CALLSTACK
) {
119 sample_what
&= ~(SAMPLER_KSTACK
| SAMPLER_USTACK
);
122 if (sample_flags
& SAMPLE_FLAG_ONLY_SYSTEM
) {
123 sample_what
&= SAMPLER_SYS_MEM
;
126 context
->cur_thread
->kperf_pet_gen
= kperf_pet_gen
;
127 boolean_t is_kernel
= (context
->cur_pid
== 0);
129 if (actionid
&& actionid
<= actionc
) {
130 sbuf
->kcallstack
.nframes
= actionv
[actionid
- 1].kcallstack_depth
;
132 sbuf
->kcallstack
.nframes
= MAX_CALLSTACK_FRAMES
;
135 if (ucallstack_depth
) {
136 sbuf
->ucallstack
.nframes
= ucallstack_depth
;
138 sbuf
->ucallstack
.nframes
= MAX_CALLSTACK_FRAMES
;
141 sbuf
->kcallstack
.flags
= CALLSTACK_VALID
;
142 sbuf
->ucallstack
.flags
= CALLSTACK_VALID
;
144 /* an event occurred. Sample everything and dump it in a
148 /* collect data from samplers */
149 if (sample_what
& SAMPLER_TH_INFO
) {
150 kperf_thread_info_sample(&sbuf
->th_info
, context
);
152 /* See if we should drop idle thread samples */
153 if (!(sample_flags
& SAMPLE_FLAG_IDLE_THREADS
)) {
154 if (sbuf
->th_info
.kpthi_runmode
& 0x40) {
155 on_idle_thread
= true;
161 if (sample_what
& SAMPLER_TH_SNAPSHOT
) {
162 kperf_thread_snapshot_sample(&(sbuf
->th_snapshot
), context
);
164 if (sample_what
& SAMPLER_TH_SCHEDULING
) {
165 kperf_thread_scheduling_sample(&(sbuf
->th_scheduling
), context
);
167 if (sample_what
& SAMPLER_KSTACK
) {
168 if (sample_flags
& SAMPLE_FLAG_CONTINUATION
) {
169 kperf_continuation_sample(&(sbuf
->kcallstack
), context
);
170 /* outside of interrupt context, backtrace the current thread */
171 } else if (sample_flags
& SAMPLE_FLAG_NON_INTERRUPT
) {
172 kperf_backtrace_sample(&(sbuf
->kcallstack
), context
);
174 kperf_kcallstack_sample(&(sbuf
->kcallstack
), context
);
177 if (sample_what
& SAMPLER_TK_SNAPSHOT
) {
178 kperf_task_snapshot_sample(&(sbuf
->tk_snapshot
), context
);
183 if (sample_what
& SAMPLER_MEMINFO
) {
184 kperf_meminfo_sample(&(sbuf
->meminfo
), context
);
187 if (sample_flags
& SAMPLE_FLAG_PEND_USER
) {
188 if (sample_what
& SAMPLER_USTACK
) {
189 pended_ucallstack
= kperf_ucallstack_pend(context
, sbuf
->ucallstack
.nframes
);
192 if (sample_what
& SAMPLER_TH_DISPATCH
) {
193 pended_th_dispatch
= kperf_thread_dispatch_pend(context
);
196 if (sample_what
& SAMPLER_USTACK
) {
197 kperf_ucallstack_sample(&(sbuf
->ucallstack
), context
);
200 if (sample_what
& SAMPLER_TH_DISPATCH
) {
201 kperf_thread_dispatch_sample(&(sbuf
->th_dispatch
), context
);
206 if (sample_what
& SAMPLER_PMC_THREAD
) {
207 kperf_kpc_thread_sample(&(sbuf
->kpcdata
), sample_what
);
208 } else if (sample_what
& SAMPLER_PMC_CPU
) {
209 kperf_kpc_cpu_sample(&(sbuf
->kpcdata
), sample_what
);
213 /* lookup the user tag, if any */
214 if (actionid
&& (actionid
<= actionc
)) {
215 userdata
= actionv
[actionid
- 1].userdata
;
218 /* avoid logging if this sample only pended samples */
219 if (sample_flags
& SAMPLE_FLAG_PEND_USER
&&
220 !(sample_what
& ~(SAMPLER_USTACK
| SAMPLER_TH_DISPATCH
)))
222 return SAMPLE_CONTINUE
;
225 /* stash the data into the buffer
226 * interrupts off to ensure we don't get split
228 boolean_t enabled
= ml_set_interrupts_enabled(FALSE
);
230 BUF_DATA(PERF_GEN_EVENT
| DBG_FUNC_START
, sample_what
,
231 actionid
, userdata
, sample_flags
);
233 if (sample_flags
& SAMPLE_FLAG_SYSTEM
) {
234 if (sample_what
& SAMPLER_SYS_MEM
) {
235 kperf_system_memory_log();
238 if (on_idle_thread
) {
242 if (sample_what
& SAMPLER_TH_INFO
) {
243 kperf_thread_info_log(&sbuf
->th_info
);
245 if (sample_what
& SAMPLER_TH_SCHEDULING
) {
246 kperf_thread_scheduling_log(&(sbuf
->th_scheduling
));
248 if (sample_what
& SAMPLER_TH_SNAPSHOT
) {
249 kperf_thread_snapshot_log(&(sbuf
->th_snapshot
));
251 if (sample_what
& SAMPLER_KSTACK
) {
252 kperf_kcallstack_log(&sbuf
->kcallstack
);
254 if (sample_what
& SAMPLER_TH_INSCYC
) {
255 kperf_thread_inscyc_log(context
);
257 if (sample_what
& SAMPLER_TK_SNAPSHOT
) {
258 kperf_task_snapshot_log(&(sbuf
->tk_snapshot
));
261 /* dump user stuff */
264 if (sample_what
& SAMPLER_MEMINFO
) {
265 kperf_meminfo_log(&(sbuf
->meminfo
));
268 if (sample_flags
& SAMPLE_FLAG_PEND_USER
) {
269 if (pended_ucallstack
) {
270 BUF_INFO(PERF_CS_UPEND
);
273 if (pended_th_dispatch
) {
274 BUF_INFO(PERF_TI_DISPPEND
);
277 if (sample_what
& SAMPLER_USTACK
) {
278 kperf_ucallstack_log(&(sbuf
->ucallstack
));
281 if (sample_what
& SAMPLER_TH_DISPATCH
) {
282 kperf_thread_dispatch_log(&(sbuf
->th_dispatch
));
287 if (sample_what
& SAMPLER_PMC_THREAD
) {
288 kperf_kpc_thread_log(&(sbuf
->kpcdata
));
289 } else if (sample_what
& SAMPLER_PMC_CPU
) {
290 kperf_kpc_cpu_log(&(sbuf
->kpcdata
));
294 BUF_DATA(PERF_GEN_EVENT
| DBG_FUNC_END
, sample_what
, on_idle_thread
? 1 : 0);
297 ml_set_interrupts_enabled(enabled
);
299 return SAMPLE_CONTINUE
;
302 /* Translate actionid into sample bits and take a sample */
304 kperf_sample(struct kperf_sample
*sbuf
,
305 struct kperf_context
*context
,
306 unsigned actionid
, unsigned sample_flags
)
308 /* work out what to sample, if anything */
309 if ((actionid
> actionc
) || (actionid
== 0)) {
310 return SAMPLE_SHUTDOWN
;
313 /* check the pid filter against the context's current pid.
314 * filter pid == -1 means any pid
316 int pid_filter
= actionv
[actionid
- 1].pid_filter
;
317 if ((pid_filter
!= -1) && (pid_filter
!= context
->cur_pid
)) {
318 return SAMPLE_CONTINUE
;
321 /* the samplers to run */
322 unsigned int sample_what
= actionv
[actionid
- 1].sample
;
324 /* do the actual sample operation */
325 return kperf_sample_internal(sbuf
, context
, sample_what
,
326 sample_flags
, actionid
,
327 actionv
[actionid
- 1].ucallstack_depth
);
331 kperf_kdebug_handler(uint32_t debugid
, uintptr_t *starting_fp
)
333 uint32_t sample_flags
= SAMPLE_FLAG_PEND_USER
;
334 struct kperf_context ctx
;
335 struct kperf_sample
*sample
= NULL
;
336 kern_return_t kr
= KERN_SUCCESS
;
339 if (!kperf_kdebug_should_trigger(debugid
)) {
343 BUF_VERB(PERF_KDBG_HNDLR
| DBG_FUNC_START
, debugid
);
345 ctx
.cur_thread
= current_thread();
346 ctx
.cur_pid
= task_pid(get_threadtask(ctx
.cur_thread
));
347 ctx
.trigger_type
= TRIGGER_TYPE_KDEBUG
;
350 s
= ml_set_interrupts_enabled(0);
352 sample
= kperf_intr_sample_buffer();
354 if (!ml_at_interrupt_context()) {
355 sample_flags
|= SAMPLE_FLAG_NON_INTERRUPT
;
356 ctx
.starting_fp
= starting_fp
;
359 kr
= kperf_sample(sample
, &ctx
, kperf_kdebug_get_action(), sample_flags
);
361 ml_set_interrupts_enabled(s
);
362 BUF_VERB(PERF_KDBG_HNDLR
| DBG_FUNC_END
, kr
);
366 * This function allocates >2.3KB of the stack. Prevent the compiler from
367 * inlining this function into ast_taken and ensure the stack memory is only
368 * allocated for the kperf AST.
370 __attribute__((noinline
))
372 kperf_thread_ast_handler(thread_t thread
)
374 BUF_INFO(PERF_AST_HNDLR
| DBG_FUNC_START
, thread
, kperf_get_thread_flags(thread
));
376 /* ~2KB of the stack for the sample since this is called from AST */
377 struct kperf_sample sbuf
;
378 memset(&sbuf
, 0, sizeof(struct kperf_sample
));
380 task_t task
= get_threadtask(thread
);
382 if (task_did_exec(task
) || task_is_exec_copy(task
)) {
383 BUF_INFO(PERF_AST_HNDLR
| DBG_FUNC_END
, SAMPLE_CONTINUE
);
387 /* make a context, take a sample */
388 struct kperf_context ctx
;
389 ctx
.cur_thread
= thread
;
390 ctx
.cur_pid
= task_pid(task
);
392 /* decode the flags to determine what to sample */
393 unsigned int sample_what
= 0;
394 uint32_t flags
= kperf_get_thread_flags(thread
);
396 if (flags
& T_KPERF_AST_DISPATCH
) {
397 sample_what
|= SAMPLER_TH_DISPATCH
;
399 if (flags
& T_KPERF_AST_CALLSTACK
) {
400 sample_what
|= SAMPLER_USTACK
;
401 sample_what
|= SAMPLER_TH_INFO
;
404 uint32_t ucallstack_depth
= T_KPERF_GET_CALLSTACK_DEPTH(flags
);
406 int r
= kperf_sample_internal(&sbuf
, &ctx
, sample_what
, 0, 0, ucallstack_depth
);
408 BUF_INFO(PERF_AST_HNDLR
| DBG_FUNC_END
, r
);
411 /* register AST bits */
413 kperf_ast_pend(thread_t thread
, uint32_t set_flags
)
415 /* can only pend on the current thread */
416 if (thread
!= current_thread()) {
417 panic("pending to non-current thread");
420 /* get our current bits */
421 uint32_t flags
= kperf_get_thread_flags(thread
);
423 /* see if it's already been done or pended */
424 if (!(flags
& set_flags
)) {
425 /* set the bit on the thread */
427 kperf_set_thread_flags(thread
, flags
);
429 /* set the actual AST */
430 act_set_kperf(thread
);
438 kperf_ast_set_callstack_depth(thread_t thread
, uint32_t depth
)
440 uint32_t ast_flags
= kperf_get_thread_flags(thread
);
441 uint32_t existing_callstack_depth
= T_KPERF_GET_CALLSTACK_DEPTH(ast_flags
);
443 if (existing_callstack_depth
!= depth
) {
444 ast_flags
&= ~T_KPERF_SET_CALLSTACK_DEPTH(depth
);
445 ast_flags
|= T_KPERF_SET_CALLSTACK_DEPTH(depth
);
447 kperf_set_thread_flags(thread
, ast_flags
);
452 kperf_kdbg_cswitch_get(void)
454 return kperf_kdebug_cswitch
;
458 kperf_kdbg_cswitch_set(int newval
)
460 kperf_kdebug_cswitch
= newval
;
461 kperf_on_cpu_update();
467 * Action configuration
470 kperf_action_get_count(void)
476 kperf_action_set_samplers(unsigned actionid
, uint32_t samplers
)
478 if ((actionid
> actionc
) || (actionid
== 0)) {
482 /* disallow both CPU and thread counters to be sampled in the same
484 if ((samplers
& SAMPLER_PMC_THREAD
) && (samplers
& SAMPLER_PMC_CPU
)) {
488 actionv
[actionid
- 1].sample
= samplers
;
494 kperf_action_get_samplers(unsigned actionid
, uint32_t *samplers_out
)
496 if ((actionid
> actionc
)) {
501 *samplers_out
= 0; /* "NULL" action */
503 *samplers_out
= actionv
[actionid
- 1].sample
;
510 kperf_action_set_userdata(unsigned actionid
, uint32_t userdata
)
512 if ((actionid
> actionc
) || (actionid
== 0)) {
516 actionv
[actionid
- 1].userdata
= userdata
;
522 kperf_action_get_userdata(unsigned actionid
, uint32_t *userdata_out
)
524 if ((actionid
> actionc
)) {
529 *userdata_out
= 0; /* "NULL" action */
531 *userdata_out
= actionv
[actionid
- 1].userdata
;
538 kperf_action_set_filter(unsigned actionid
, int pid
)
540 if ((actionid
> actionc
) || (actionid
== 0)) {
544 actionv
[actionid
- 1].pid_filter
= pid
;
550 kperf_action_get_filter(unsigned actionid
, int *pid_out
)
552 if ((actionid
> actionc
)) {
557 *pid_out
= -1; /* "NULL" action */
559 *pid_out
= actionv
[actionid
- 1].pid_filter
;
566 kperf_action_reset(void)
568 for (unsigned int i
= 0; i
< actionc
; i
++) {
569 kperf_action_set_samplers(i
+ 1, 0);
570 kperf_action_set_userdata(i
+ 1, 0);
571 kperf_action_set_filter(i
+ 1, -1);
572 kperf_action_set_ucallstack_depth(i
+ 1, MAX_CALLSTACK_FRAMES
);
573 kperf_action_set_kcallstack_depth(i
+ 1, MAX_CALLSTACK_FRAMES
);
578 kperf_action_set_count(unsigned count
)
580 struct action
*new_actionv
= NULL
, *old_actionv
= NULL
;
584 if (count
== actionc
) {
588 /* TODO: allow shrinking? */
589 if (count
< actionc
) {
593 /* cap it for good measure */
594 if (count
> ACTION_MAX
) {
598 /* creating the action arror for the first time. create a few
603 if ((r
= kperf_init())) {
608 /* create a new array */
609 new_actionv
= kalloc_tag(count
* sizeof(*new_actionv
), VM_KERN_MEMORY_DIAG
);
610 if (new_actionv
== NULL
) {
614 old_actionv
= actionv
;
617 if (old_actionv
!= NULL
) {
618 memcpy(new_actionv
, actionv
, actionc
* sizeof(*actionv
));
621 memset(&(new_actionv
[actionc
]), 0, (count
- old_count
) * sizeof(*actionv
));
623 for (unsigned int i
= old_count
; i
< count
; i
++) {
624 new_actionv
[i
].pid_filter
= -1;
625 new_actionv
[i
].ucallstack_depth
= MAX_CALLSTACK_FRAMES
;
626 new_actionv
[i
].kcallstack_depth
= MAX_CALLSTACK_FRAMES
;
629 actionv
= new_actionv
;
632 if (old_actionv
!= NULL
) {
633 kfree(old_actionv
, old_count
* sizeof(*actionv
));
640 kperf_action_set_ucallstack_depth(unsigned action_id
, uint32_t depth
)
642 if ((action_id
> actionc
) || (action_id
== 0)) {
646 if (depth
> MAX_CALLSTACK_FRAMES
) {
650 actionv
[action_id
- 1].ucallstack_depth
= depth
;
656 kperf_action_set_kcallstack_depth(unsigned action_id
, uint32_t depth
)
658 if ((action_id
> actionc
) || (action_id
== 0)) {
662 if (depth
> MAX_CALLSTACK_FRAMES
) {
666 actionv
[action_id
- 1].kcallstack_depth
= depth
;
672 kperf_action_get_ucallstack_depth(unsigned action_id
, uint32_t * depth_out
)
674 if ((action_id
> actionc
)) {
680 if (action_id
== 0) {
681 *depth_out
= MAX_CALLSTACK_FRAMES
;
683 *depth_out
= actionv
[action_id
- 1].ucallstack_depth
;
690 kperf_action_get_kcallstack_depth(unsigned action_id
, uint32_t * depth_out
)
692 if ((action_id
> actionc
)) {
698 if (action_id
== 0) {
699 *depth_out
= MAX_CALLSTACK_FRAMES
;
701 *depth_out
= actionv
[action_id
- 1].kcallstack_depth
;