]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/action.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / osfmk / kperf / action.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
40 #include <sys/vm.h>
41
42 #include <kperf/action.h>
43 #include <kperf/ast.h>
44 #include <kperf/buffer.h>
45 #include <kperf/callstack.h>
46 #include <kperf/context.h>
47 #include <kperf/kdebug_trigger.h>
48 #include <kperf/kperf.h>
49 #include <kperf/kperf_kpc.h>
50 #include <kperf/kperf_timer.h>
51 #include <kperf/pet.h>
52 #include <kperf/sample.h>
53 #include <kperf/thread_samplers.h>
54
55 #define ACTION_MAX (32)
56
57 /* the list of different actions to take */
58 struct action
59 {
60 uint32_t sample;
61 uint32_t ucallstack_depth;
62 uint32_t kcallstack_depth;
63 uint32_t userdata;
64 int pid_filter;
65 };
66
67 /* the list of actions */
68 static unsigned actionc = 0;
69 static struct action *actionv = NULL;
70
71 /* should emit tracepoint on context switch */
72 int kperf_kdebug_cswitch = 0;
73
74 static kern_return_t
75 kperf_sample_internal(struct kperf_sample *sbuf,
76 struct kperf_context *context,
77 unsigned sample_what, unsigned sample_flags,
78 unsigned actionid, uint32_t ucallstack_depth)
79 {
80 int pended_ucallstack = 0;
81 int pended_th_dispatch = 0;
82
83 /* not much point continuing here, but what to do ? return
84 * Shutdown? cut a tracepoint and continue?
85 */
86 if (sample_what == 0) {
87 return SAMPLE_CONTINUE;
88 }
89
90 /* callstacks should be explicitly ignored */
91 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
92 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
93 }
94
95 context->cur_thread->kperf_pet_gen = kperf_pet_gen;
96 boolean_t is_kernel = (context->cur_pid == 0);
97
98 if (actionid && actionid <= actionc) {
99 sbuf->kcallstack.nframes = actionv[actionid - 1].kcallstack_depth;
100 } else {
101 sbuf->kcallstack.nframes = MAX_CALLSTACK_FRAMES;
102 }
103
104 if (ucallstack_depth) {
105 sbuf->ucallstack.nframes = ucallstack_depth;
106 } else {
107 sbuf->ucallstack.nframes = MAX_CALLSTACK_FRAMES;
108 }
109
110 sbuf->kcallstack.flags = CALLSTACK_VALID;
111 sbuf->ucallstack.flags = CALLSTACK_VALID;
112
113 /* an event occurred. Sample everything and dump it in a
114 * buffer.
115 */
116
117 /* collect data from samplers */
118 if (sample_what & SAMPLER_TH_INFO) {
119 kperf_thread_info_sample(&sbuf->th_info, context);
120
121 /* See if we should drop idle thread samples */
122 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
123 if (sbuf->th_info.kpthi_runmode & 0x40) {
124 return SAMPLE_CONTINUE;
125 }
126 }
127 }
128
129 if (sample_what & SAMPLER_TH_SNAPSHOT) {
130 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
131 }
132 if (sample_what & SAMPLER_TH_SCHEDULING) {
133 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
134 }
135 if (sample_what & SAMPLER_KSTACK) {
136 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
137 kperf_continuation_sample(&(sbuf->kcallstack), context);
138 /* outside of interrupt context, backtrace the current thread */
139 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
140 kperf_backtrace_sample(&(sbuf->kcallstack), context);
141 } else {
142 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
143 }
144 }
145 if (sample_what & SAMPLER_TK_SNAPSHOT) {
146 kperf_task_snapshot_sample(&(sbuf->tk_snapshot), context);
147 }
148
149 /* sensitive ones */
150 if (!is_kernel) {
151 if (sample_what & SAMPLER_MEMINFO) {
152 kperf_meminfo_sample(&(sbuf->meminfo), context);
153 }
154
155 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
156 if (sample_what & SAMPLER_USTACK) {
157 pended_ucallstack = kperf_ucallstack_pend(context, sbuf->ucallstack.nframes);
158 }
159
160 if (sample_what & SAMPLER_TH_DISPATCH) {
161 pended_th_dispatch = kperf_thread_dispatch_pend(context);
162 }
163 } else {
164 if (sample_what & SAMPLER_USTACK) {
165 kperf_ucallstack_sample(&(sbuf->ucallstack), context);
166 }
167
168 if (sample_what & SAMPLER_TH_DISPATCH) {
169 kperf_thread_dispatch_sample(&(sbuf->th_dispatch), context);
170 }
171 }
172 }
173
174 if (sample_what & SAMPLER_PMC_THREAD) {
175 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
176 } else if (sample_what & SAMPLER_PMC_CPU) {
177 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
178 }
179
180 /* lookup the user tag, if any */
181 uint32_t userdata;
182 if (actionid && (actionid <= actionc)) {
183 userdata = actionv[actionid - 1].userdata;
184 } else {
185 userdata = actionid;
186 }
187
188 /* avoid logging if this sample only pended samples */
189 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
190 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH)))
191 {
192 return SAMPLE_CONTINUE;
193 }
194
195 /* stash the data into the buffer
196 * interrupts off to ensure we don't get split
197 */
198 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
199
200 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
201 actionid, userdata, sample_flags);
202
203 if (sample_what & SAMPLER_TH_INFO) {
204 kperf_thread_info_log(&sbuf->th_info);
205 }
206 if (sample_what & SAMPLER_TH_SCHEDULING) {
207 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
208 }
209 if (sample_what & SAMPLER_TH_SNAPSHOT) {
210 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
211 }
212 if (sample_what & SAMPLER_KSTACK) {
213 kperf_kcallstack_log(&sbuf->kcallstack);
214 }
215 if (sample_what & SAMPLER_TK_SNAPSHOT) {
216 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
217 }
218
219 /* dump user stuff */
220 if (!is_kernel) {
221 /* dump meminfo */
222 if (sample_what & SAMPLER_MEMINFO) {
223 kperf_meminfo_log(&(sbuf->meminfo));
224 }
225
226 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
227 if (pended_ucallstack) {
228 BUF_INFO(PERF_CS_UPEND);
229 }
230
231 if (pended_th_dispatch) {
232 BUF_INFO(PERF_TI_DISPPEND);
233 }
234 } else {
235 if (sample_what & SAMPLER_USTACK) {
236 kperf_ucallstack_log(&(sbuf->ucallstack));
237 }
238
239 if (sample_what & SAMPLER_TH_DISPATCH) {
240 kperf_thread_dispatch_log(&(sbuf->th_dispatch));
241 }
242 }
243 }
244
245 if (sample_what & SAMPLER_PMC_THREAD) {
246 kperf_kpc_thread_log(&(sbuf->kpcdata));
247 } else if (sample_what & SAMPLER_PMC_CPU) {
248 kperf_kpc_cpu_log(&(sbuf->kpcdata));
249 }
250
251 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
252
253 /* intrs back on */
254 ml_set_interrupts_enabled(enabled);
255
256 return SAMPLE_CONTINUE;
257 }
258
259 /* Translate actionid into sample bits and take a sample */
260 kern_return_t
261 kperf_sample(struct kperf_sample *sbuf,
262 struct kperf_context *context,
263 unsigned actionid, unsigned sample_flags)
264 {
265 /* work out what to sample, if anything */
266 if ((actionid > actionc) || (actionid == 0)) {
267 return SAMPLE_SHUTDOWN;
268 }
269
270 /* check the pid filter against the context's current pid.
271 * filter pid == -1 means any pid
272 */
273 int pid_filter = actionv[actionid - 1].pid_filter;
274 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
275 return SAMPLE_CONTINUE;
276 }
277
278 /* the samplers to run */
279 unsigned int sample_what = actionv[actionid - 1].sample;
280
281 /* do the actual sample operation */
282 return kperf_sample_internal(sbuf, context, sample_what,
283 sample_flags, actionid,
284 actionv[actionid - 1].ucallstack_depth);
285 }
286
287 void
288 kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
289 {
290 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
291 struct kperf_context ctx;
292 struct kperf_sample *sample = NULL;
293 kern_return_t kr = KERN_SUCCESS;
294 int s;
295
296 if (!kperf_kdebug_should_trigger(debugid)) {
297 return;
298 }
299
300 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
301
302 ctx.cur_thread = current_thread();
303 ctx.cur_pid = task_pid(get_threadtask(ctx.cur_thread));
304 ctx.trigger_type = TRIGGER_TYPE_KDEBUG;
305 ctx.trigger_id = 0;
306
307 s = ml_set_interrupts_enabled(0);
308
309 sample = kperf_intr_sample_buffer();
310
311 if (!ml_at_interrupt_context()) {
312 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
313 ctx.starting_fp = starting_fp;
314 }
315
316 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
317
318 ml_set_interrupts_enabled(s);
319 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
320 }
321
322 /*
323 * This function allocates >2.3KB of the stack. Prevent the compiler from
324 * inlining this function into ast_taken and ensure the stack memory is only
325 * allocated for the kperf AST.
326 */
327 __attribute__((noinline))
328 void
329 kperf_thread_ast_handler(thread_t thread)
330 {
331 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, kperf_get_thread_flags(thread));
332
333 /* ~2KB of the stack for the sample since this is called from AST */
334 struct kperf_sample sbuf;
335 memset(&sbuf, 0, sizeof(struct kperf_sample));
336
337 task_t task = get_threadtask(thread);
338
339 /* make a context, take a sample */
340 struct kperf_context ctx;
341 ctx.cur_thread = thread;
342 ctx.cur_pid = task_pid(task);
343
344 /* decode the flags to determine what to sample */
345 unsigned int sample_what = 0;
346 uint32_t flags = kperf_get_thread_flags(thread);
347
348 if (flags & T_KPERF_AST_DISPATCH) {
349 sample_what |= SAMPLER_TH_DISPATCH;
350 }
351 if (flags & T_KPERF_AST_CALLSTACK) {
352 sample_what |= SAMPLER_USTACK;
353 sample_what |= SAMPLER_TH_INFO;
354 }
355
356 uint32_t ucallstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(flags);
357
358 int r = kperf_sample_internal(&sbuf, &ctx, sample_what, 0, 0, ucallstack_depth);
359
360 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, r);
361 }
362
363 /* register AST bits */
364 int
365 kperf_ast_pend(thread_t thread, uint32_t set_flags)
366 {
367 /* can only pend on the current thread */
368 if (thread != current_thread()) {
369 panic("pending to non-current thread");
370 }
371
372 /* get our current bits */
373 uint32_t flags = kperf_get_thread_flags(thread);
374
375 /* see if it's already been done or pended */
376 if (!(flags & set_flags)) {
377 /* set the bit on the thread */
378 flags |= set_flags;
379 kperf_set_thread_flags(thread, flags);
380
381 /* set the actual AST */
382 act_set_kperf(thread);
383 return 1;
384 }
385
386 return 0;
387 }
388
389 void
390 kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
391 {
392 uint32_t ast_flags = kperf_get_thread_flags(thread);
393 uint32_t existing_callstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast_flags);
394
395 if (existing_callstack_depth != depth) {
396 ast_flags &= ~T_KPERF_SET_CALLSTACK_DEPTH(depth);
397 ast_flags |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
398
399 kperf_set_thread_flags(thread, ast_flags);
400 }
401 }
402
403 int
404 kperf_kdbg_cswitch_get(void)
405 {
406 return kperf_kdebug_cswitch;
407 }
408
409 int
410 kperf_kdbg_cswitch_set(int newval)
411 {
412 kperf_kdebug_cswitch = newval;
413 kperf_on_cpu_update();
414
415 return 0;
416 }
417
418 /*
419 * Action configuration
420 */
421 unsigned int
422 kperf_action_get_count(void)
423 {
424 return actionc;
425 }
426
427 int
428 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
429 {
430 if ((actionid > actionc) || (actionid == 0)) {
431 return EINVAL;
432 }
433
434 /* disallow both CPU and thread counters to be sampled in the same
435 * action */
436 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
437 return EINVAL;
438 }
439
440 actionv[actionid - 1].sample = samplers;
441
442 return 0;
443 }
444
445 int
446 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
447 {
448 if ((actionid > actionc)) {
449 return EINVAL;
450 }
451
452 if (actionid == 0) {
453 *samplers_out = 0; /* "NULL" action */
454 } else {
455 *samplers_out = actionv[actionid - 1].sample;
456 }
457
458 return 0;
459 }
460
461 int
462 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
463 {
464 if ((actionid > actionc) || (actionid == 0)) {
465 return EINVAL;
466 }
467
468 actionv[actionid - 1].userdata = userdata;
469
470 return 0;
471 }
472
473 int
474 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
475 {
476 if ((actionid > actionc)) {
477 return EINVAL;
478 }
479
480 if (actionid == 0) {
481 *userdata_out = 0; /* "NULL" action */
482 } else {
483 *userdata_out = actionv[actionid - 1].userdata;
484 }
485
486 return 0;
487 }
488
489 int
490 kperf_action_set_filter(unsigned actionid, int pid)
491 {
492 if ((actionid > actionc) || (actionid == 0)) {
493 return EINVAL;
494 }
495
496 actionv[actionid - 1].pid_filter = pid;
497
498 return 0;
499 }
500
501 int
502 kperf_action_get_filter(unsigned actionid, int *pid_out)
503 {
504 if ((actionid > actionc)) {
505 return EINVAL;
506 }
507
508 if (actionid == 0) {
509 *pid_out = -1; /* "NULL" action */
510 } else {
511 *pid_out = actionv[actionid - 1].pid_filter;
512 }
513
514 return 0;
515 }
516
517 void
518 kperf_action_reset(void)
519 {
520 for (unsigned int i = 0; i < actionc; i++) {
521 kperf_action_set_samplers(i + 1, 0);
522 kperf_action_set_userdata(i + 1, 0);
523 kperf_action_set_filter(i + 1, -1);
524 kperf_action_set_ucallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
525 kperf_action_set_kcallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
526 }
527 }
528
529 int
530 kperf_action_set_count(unsigned count)
531 {
532 struct action *new_actionv = NULL, *old_actionv = NULL;
533 unsigned old_count;
534
535 /* easy no-op */
536 if (count == actionc) {
537 return 0;
538 }
539
540 /* TODO: allow shrinking? */
541 if (count < actionc) {
542 return EINVAL;
543 }
544
545 /* cap it for good measure */
546 if (count > ACTION_MAX) {
547 return EINVAL;
548 }
549
550 /* creating the action arror for the first time. create a few
551 * more things, too.
552 */
553 if (actionc == 0) {
554 int r;
555 if ((r = kperf_init())) {
556 return r;
557 }
558 }
559
560 /* create a new array */
561 new_actionv = kalloc_tag(count * sizeof(*new_actionv), VM_KERN_MEMORY_DIAG);
562 if (new_actionv == NULL) {
563 return ENOMEM;
564 }
565
566 old_actionv = actionv;
567 old_count = actionc;
568
569 if (old_actionv != NULL) {
570 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
571 }
572
573 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
574
575 for (unsigned int i = old_count; i < count; i++) {
576 new_actionv[i].pid_filter = -1;
577 new_actionv[i].ucallstack_depth = MAX_CALLSTACK_FRAMES;
578 new_actionv[i].kcallstack_depth = MAX_CALLSTACK_FRAMES;
579 }
580
581 actionv = new_actionv;
582 actionc = count;
583
584 if (old_actionv != NULL) {
585 kfree(old_actionv, old_count * sizeof(*actionv));
586 }
587
588 return 0;
589 }
590
591 int
592 kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
593 {
594 if ((action_id > actionc) || (action_id == 0)) {
595 return EINVAL;
596 }
597
598 if (depth > MAX_CALLSTACK_FRAMES) {
599 return EINVAL;
600 }
601
602 actionv[action_id - 1].ucallstack_depth = depth;
603
604 return 0;
605 }
606
607 int
608 kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
609 {
610 if ((action_id > actionc) || (action_id == 0)) {
611 return EINVAL;
612 }
613
614 if (depth > MAX_CALLSTACK_FRAMES) {
615 return EINVAL;
616 }
617
618 actionv[action_id - 1].kcallstack_depth = depth;
619
620 return 0;
621 }
622
623 int
624 kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
625 {
626 if ((action_id > actionc)) {
627 return EINVAL;
628 }
629
630 assert(depth_out);
631
632 if (action_id == 0) {
633 *depth_out = MAX_CALLSTACK_FRAMES;
634 } else {
635 *depth_out = actionv[action_id - 1].ucallstack_depth;
636 }
637
638 return 0;
639 }
640
641 int
642 kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
643 {
644 if ((action_id > actionc)) {
645 return EINVAL;
646 }
647
648 assert(depth_out);
649
650 if (action_id == 0) {
651 *depth_out = MAX_CALLSTACK_FRAMES;
652 } else {
653 *depth_out = actionv[action_id - 1].kcallstack_depth;
654 }
655
656 return 0;
657 }