]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/action.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / kperf / action.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
40 #include <sys/vm.h>
41
42 #include <kperf/action.h>
43 #include <kperf/ast.h>
44 #include <kperf/buffer.h>
45 #include <kperf/callstack.h>
46 #include <kperf/context.h>
47 #include <kperf/kdebug_trigger.h>
48 #include <kperf/kperf.h>
49 #include <kperf/kperf_kpc.h>
50 #include <kperf/kperf_timer.h>
51 #include <kperf/pet.h>
52 #include <kperf/sample.h>
53 #include <kperf/thread_samplers.h>
54
55 #define ACTION_MAX (32)
56
57 /* the list of different actions to take */
58 struct action
59 {
60 uint32_t sample;
61 uint32_t ucallstack_depth;
62 uint32_t kcallstack_depth;
63 uint32_t userdata;
64 int pid_filter;
65 };
66
67 /* the list of actions */
68 static unsigned actionc = 0;
69 static struct action *actionv = NULL;
70
71 /* should emit tracepoint on context switch */
72 int kperf_kdebug_cswitch = 0;
73
74 static kern_return_t
75 kperf_sample_internal(struct kperf_sample *sbuf,
76 struct kperf_context *context,
77 unsigned sample_what, unsigned sample_flags,
78 unsigned actionid, uint32_t ucallstack_depth)
79 {
80 int pended_ucallstack = 0;
81 int pended_th_dispatch = 0;
82
83 /* not much point continuing here, but what to do ? return
84 * Shutdown? cut a tracepoint and continue?
85 */
86 if (sample_what == 0) {
87 return SAMPLE_CONTINUE;
88 }
89
90 /* callstacks should be explicitly ignored */
91 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
92 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
93 }
94
95 context->cur_thread->kperf_pet_gen = kperf_pet_gen;
96 boolean_t is_kernel = (context->cur_pid == 0);
97
98 if (actionid && actionid <= actionc) {
99 sbuf->kcallstack.nframes = actionv[actionid - 1].kcallstack_depth;
100 } else {
101 sbuf->kcallstack.nframes = MAX_CALLSTACK_FRAMES;
102 }
103
104 if (ucallstack_depth) {
105 sbuf->ucallstack.nframes = ucallstack_depth;
106 } else {
107 sbuf->ucallstack.nframes = MAX_CALLSTACK_FRAMES;
108 }
109
110 sbuf->kcallstack.flags = CALLSTACK_VALID;
111 sbuf->ucallstack.flags = CALLSTACK_VALID;
112
113 /* an event occurred. Sample everything and dump it in a
114 * buffer.
115 */
116
117 /* collect data from samplers */
118 if (sample_what & SAMPLER_TH_INFO) {
119 kperf_thread_info_sample(&sbuf->th_info, context);
120
121 /* See if we should drop idle thread samples */
122 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
123 if (sbuf->th_info.kpthi_runmode & 0x40) {
124 return SAMPLE_CONTINUE;
125 }
126 }
127 }
128
129 if (sample_what & SAMPLER_TH_SNAPSHOT) {
130 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
131 }
132 if (sample_what & SAMPLER_TH_SCHEDULING) {
133 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
134 }
135 if (sample_what & SAMPLER_KSTACK) {
136 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
137 kperf_continuation_sample(&(sbuf->kcallstack), context);
138 /* outside of interrupt context, backtrace the current thread */
139 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
140 kperf_backtrace_sample(&(sbuf->kcallstack), context);
141 } else {
142 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
143 }
144 }
145 if (sample_what & SAMPLER_TK_SNAPSHOT) {
146 kperf_task_snapshot_sample(&(sbuf->tk_snapshot), context);
147 }
148
149 /* sensitive ones */
150 if (!is_kernel) {
151 if (sample_what & SAMPLER_MEMINFO) {
152 kperf_meminfo_sample(&(sbuf->meminfo), context);
153 }
154
155 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
156 if (sample_what & SAMPLER_USTACK) {
157 pended_ucallstack = kperf_ucallstack_pend(context, sbuf->ucallstack.nframes);
158 }
159
160 if (sample_what & SAMPLER_TH_DISPATCH) {
161 pended_th_dispatch = kperf_thread_dispatch_pend(context);
162 }
163 } else {
164 if (sample_what & SAMPLER_USTACK) {
165 kperf_ucallstack_sample(&(sbuf->ucallstack), context);
166 }
167
168 if (sample_what & SAMPLER_TH_DISPATCH) {
169 kperf_thread_dispatch_sample(&(sbuf->th_dispatch), context);
170 }
171 }
172 }
173
174 if (sample_what & SAMPLER_PMC_THREAD) {
175 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
176 } else if (sample_what & SAMPLER_PMC_CPU) {
177 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
178 }
179
180 /* lookup the user tag, if any */
181 uint32_t userdata;
182 if (actionid && (actionid <= actionc)) {
183 userdata = actionv[actionid - 1].userdata;
184 } else {
185 userdata = actionid;
186 }
187
188 /* avoid logging if this sample only pended samples */
189 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
190 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH)))
191 {
192 return SAMPLE_CONTINUE;
193 }
194
195 /* stash the data into the buffer
196 * interrupts off to ensure we don't get split
197 */
198 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
199
200 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
201 actionid, userdata, sample_flags);
202
203 if (sample_what & SAMPLER_TH_INFO) {
204 kperf_thread_info_log(&sbuf->th_info);
205 }
206 if (sample_what & SAMPLER_TH_SCHEDULING) {
207 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
208 }
209 if (sample_what & SAMPLER_TH_SNAPSHOT) {
210 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
211 }
212 if (sample_what & SAMPLER_KSTACK) {
213 kperf_kcallstack_log(&sbuf->kcallstack);
214 }
215 if (sample_what & SAMPLER_TK_SNAPSHOT) {
216 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
217 }
218
219 /* dump user stuff */
220 if (!is_kernel) {
221 /* dump meminfo */
222 if (sample_what & SAMPLER_MEMINFO) {
223 kperf_meminfo_log(&(sbuf->meminfo));
224 }
225
226 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
227 if (pended_ucallstack) {
228 BUF_INFO(PERF_CS_UPEND);
229 }
230
231 if (pended_th_dispatch) {
232 BUF_INFO(PERF_TI_DISPPEND);
233 }
234 } else {
235 if (sample_what & SAMPLER_USTACK) {
236 kperf_ucallstack_log(&(sbuf->ucallstack));
237 }
238
239 if (sample_what & SAMPLER_TH_DISPATCH) {
240 kperf_thread_dispatch_log(&(sbuf->th_dispatch));
241 }
242 }
243 }
244
245 if (sample_what & SAMPLER_PMC_THREAD) {
246 kperf_kpc_thread_log(&(sbuf->kpcdata));
247 } else if (sample_what & SAMPLER_PMC_CPU) {
248 kperf_kpc_cpu_log(&(sbuf->kpcdata));
249 }
250
251 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
252
253 /* intrs back on */
254 ml_set_interrupts_enabled(enabled);
255
256 return SAMPLE_CONTINUE;
257 }
258
259 /* Translate actionid into sample bits and take a sample */
260 kern_return_t
261 kperf_sample(struct kperf_sample *sbuf,
262 struct kperf_context *context,
263 unsigned actionid, unsigned sample_flags)
264 {
265 /* work out what to sample, if anything */
266 if ((actionid > actionc) || (actionid == 0)) {
267 return SAMPLE_SHUTDOWN;
268 }
269
270 /* check the pid filter against the context's current pid.
271 * filter pid == -1 means any pid
272 */
273 int pid_filter = actionv[actionid - 1].pid_filter;
274 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
275 return SAMPLE_CONTINUE;
276 }
277
278 /* the samplers to run */
279 unsigned int sample_what = actionv[actionid - 1].sample;
280
281 /* do the actual sample operation */
282 return kperf_sample_internal(sbuf, context, sample_what,
283 sample_flags, actionid,
284 actionv[actionid - 1].ucallstack_depth);
285 }
286
287 void
288 kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
289 {
290 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
291 struct kperf_context ctx;
292 struct kperf_sample *sample = NULL;
293 kern_return_t kr = KERN_SUCCESS;
294 int s;
295
296 if (!kperf_kdebug_should_trigger(debugid)) {
297 return;
298 }
299
300 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
301
302 ctx.cur_thread = current_thread();
303 ctx.cur_pid = task_pid(get_threadtask(ctx.cur_thread));
304 ctx.trigger_type = TRIGGER_TYPE_KDEBUG;
305 ctx.trigger_id = 0;
306
307 s = ml_set_interrupts_enabled(0);
308
309 sample = kperf_intr_sample_buffer();
310
311 if (!ml_at_interrupt_context()) {
312 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
313 ctx.starting_fp = starting_fp;
314 }
315
316 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
317
318 ml_set_interrupts_enabled(s);
319 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
320 }
321
322 /*
323 * This function allocates >2.3KB of the stack. Prevent the compiler from
324 * inlining this function into ast_taken and ensure the stack memory is only
325 * allocated for the kperf AST.
326 */
327 __attribute__((noinline))
328 void
329 kperf_thread_ast_handler(thread_t thread)
330 {
331 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, kperf_get_thread_flags(thread));
332
333 /* ~2KB of the stack for the sample since this is called from AST */
334 struct kperf_sample sbuf;
335 memset(&sbuf, 0, sizeof(struct kperf_sample));
336
337 task_t task = get_threadtask(thread);
338
339 if (task_did_exec(task) || task_is_exec_copy(task)) {
340 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
341 return;
342 }
343
344 /* make a context, take a sample */
345 struct kperf_context ctx;
346 ctx.cur_thread = thread;
347 ctx.cur_pid = task_pid(task);
348
349 /* decode the flags to determine what to sample */
350 unsigned int sample_what = 0;
351 uint32_t flags = kperf_get_thread_flags(thread);
352
353 if (flags & T_KPERF_AST_DISPATCH) {
354 sample_what |= SAMPLER_TH_DISPATCH;
355 }
356 if (flags & T_KPERF_AST_CALLSTACK) {
357 sample_what |= SAMPLER_USTACK;
358 sample_what |= SAMPLER_TH_INFO;
359 }
360
361 uint32_t ucallstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(flags);
362
363 int r = kperf_sample_internal(&sbuf, &ctx, sample_what, 0, 0, ucallstack_depth);
364
365 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, r);
366 }
367
368 /* register AST bits */
369 int
370 kperf_ast_pend(thread_t thread, uint32_t set_flags)
371 {
372 /* can only pend on the current thread */
373 if (thread != current_thread()) {
374 panic("pending to non-current thread");
375 }
376
377 /* get our current bits */
378 uint32_t flags = kperf_get_thread_flags(thread);
379
380 /* see if it's already been done or pended */
381 if (!(flags & set_flags)) {
382 /* set the bit on the thread */
383 flags |= set_flags;
384 kperf_set_thread_flags(thread, flags);
385
386 /* set the actual AST */
387 act_set_kperf(thread);
388 return 1;
389 }
390
391 return 0;
392 }
393
394 void
395 kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
396 {
397 uint32_t ast_flags = kperf_get_thread_flags(thread);
398 uint32_t existing_callstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast_flags);
399
400 if (existing_callstack_depth != depth) {
401 ast_flags &= ~T_KPERF_SET_CALLSTACK_DEPTH(depth);
402 ast_flags |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
403
404 kperf_set_thread_flags(thread, ast_flags);
405 }
406 }
407
408 int
409 kperf_kdbg_cswitch_get(void)
410 {
411 return kperf_kdebug_cswitch;
412 }
413
414 int
415 kperf_kdbg_cswitch_set(int newval)
416 {
417 kperf_kdebug_cswitch = newval;
418 kperf_on_cpu_update();
419
420 return 0;
421 }
422
423 /*
424 * Action configuration
425 */
426 unsigned int
427 kperf_action_get_count(void)
428 {
429 return actionc;
430 }
431
432 int
433 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
434 {
435 if ((actionid > actionc) || (actionid == 0)) {
436 return EINVAL;
437 }
438
439 /* disallow both CPU and thread counters to be sampled in the same
440 * action */
441 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
442 return EINVAL;
443 }
444
445 actionv[actionid - 1].sample = samplers;
446
447 return 0;
448 }
449
450 int
451 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
452 {
453 if ((actionid > actionc)) {
454 return EINVAL;
455 }
456
457 if (actionid == 0) {
458 *samplers_out = 0; /* "NULL" action */
459 } else {
460 *samplers_out = actionv[actionid - 1].sample;
461 }
462
463 return 0;
464 }
465
466 int
467 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
468 {
469 if ((actionid > actionc) || (actionid == 0)) {
470 return EINVAL;
471 }
472
473 actionv[actionid - 1].userdata = userdata;
474
475 return 0;
476 }
477
478 int
479 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
480 {
481 if ((actionid > actionc)) {
482 return EINVAL;
483 }
484
485 if (actionid == 0) {
486 *userdata_out = 0; /* "NULL" action */
487 } else {
488 *userdata_out = actionv[actionid - 1].userdata;
489 }
490
491 return 0;
492 }
493
494 int
495 kperf_action_set_filter(unsigned actionid, int pid)
496 {
497 if ((actionid > actionc) || (actionid == 0)) {
498 return EINVAL;
499 }
500
501 actionv[actionid - 1].pid_filter = pid;
502
503 return 0;
504 }
505
506 int
507 kperf_action_get_filter(unsigned actionid, int *pid_out)
508 {
509 if ((actionid > actionc)) {
510 return EINVAL;
511 }
512
513 if (actionid == 0) {
514 *pid_out = -1; /* "NULL" action */
515 } else {
516 *pid_out = actionv[actionid - 1].pid_filter;
517 }
518
519 return 0;
520 }
521
522 void
523 kperf_action_reset(void)
524 {
525 for (unsigned int i = 0; i < actionc; i++) {
526 kperf_action_set_samplers(i + 1, 0);
527 kperf_action_set_userdata(i + 1, 0);
528 kperf_action_set_filter(i + 1, -1);
529 kperf_action_set_ucallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
530 kperf_action_set_kcallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
531 }
532 }
533
534 int
535 kperf_action_set_count(unsigned count)
536 {
537 struct action *new_actionv = NULL, *old_actionv = NULL;
538 unsigned old_count;
539
540 /* easy no-op */
541 if (count == actionc) {
542 return 0;
543 }
544
545 /* TODO: allow shrinking? */
546 if (count < actionc) {
547 return EINVAL;
548 }
549
550 /* cap it for good measure */
551 if (count > ACTION_MAX) {
552 return EINVAL;
553 }
554
555 /* creating the action arror for the first time. create a few
556 * more things, too.
557 */
558 if (actionc == 0) {
559 int r;
560 if ((r = kperf_init())) {
561 return r;
562 }
563 }
564
565 /* create a new array */
566 new_actionv = kalloc_tag(count * sizeof(*new_actionv), VM_KERN_MEMORY_DIAG);
567 if (new_actionv == NULL) {
568 return ENOMEM;
569 }
570
571 old_actionv = actionv;
572 old_count = actionc;
573
574 if (old_actionv != NULL) {
575 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
576 }
577
578 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
579
580 for (unsigned int i = old_count; i < count; i++) {
581 new_actionv[i].pid_filter = -1;
582 new_actionv[i].ucallstack_depth = MAX_CALLSTACK_FRAMES;
583 new_actionv[i].kcallstack_depth = MAX_CALLSTACK_FRAMES;
584 }
585
586 actionv = new_actionv;
587 actionc = count;
588
589 if (old_actionv != NULL) {
590 kfree(old_actionv, old_count * sizeof(*actionv));
591 }
592
593 return 0;
594 }
595
596 int
597 kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
598 {
599 if ((action_id > actionc) || (action_id == 0)) {
600 return EINVAL;
601 }
602
603 if (depth > MAX_CALLSTACK_FRAMES) {
604 return EINVAL;
605 }
606
607 actionv[action_id - 1].ucallstack_depth = depth;
608
609 return 0;
610 }
611
612 int
613 kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
614 {
615 if ((action_id > actionc) || (action_id == 0)) {
616 return EINVAL;
617 }
618
619 if (depth > MAX_CALLSTACK_FRAMES) {
620 return EINVAL;
621 }
622
623 actionv[action_id - 1].kcallstack_depth = depth;
624
625 return 0;
626 }
627
628 int
629 kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
630 {
631 if ((action_id > actionc)) {
632 return EINVAL;
633 }
634
635 assert(depth_out);
636
637 if (action_id == 0) {
638 *depth_out = MAX_CALLSTACK_FRAMES;
639 } else {
640 *depth_out = actionv[action_id - 1].ucallstack_depth;
641 }
642
643 return 0;
644 }
645
646 int
647 kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
648 {
649 if ((action_id > actionc)) {
650 return EINVAL;
651 }
652
653 assert(depth_out);
654
655 if (action_id == 0) {
656 *depth_out = MAX_CALLSTACK_FRAMES;
657 } else {
658 *depth_out = actionv[action_id - 1].kcallstack_depth;
659 }
660
661 return 0;
662 }