]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/action.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / kperf / action.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
40 #include <sys/vm.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44
45 #include <kperf/action.h>
46 #include <kperf/ast.h>
47 #include <kperf/buffer.h>
48 #include <kperf/callstack.h>
49 #include <kperf/context.h>
50 #include <kperf/kdebug_trigger.h>
51 #include <kperf/kperf.h>
52 #include <kperf/kperf_kpc.h>
53 #include <kperf/kperf_timer.h>
54 #include <kperf/pet.h>
55 #include <kperf/sample.h>
56 #include <kperf/thread_samplers.h>
57
58 #define ACTION_MAX (32)
59
60 /* the list of different actions to take */
61 struct action {
62 uint32_t sample;
63 uint32_t ucallstack_depth;
64 uint32_t kcallstack_depth;
65 uint32_t userdata;
66 int pid_filter;
67 };
68
69 /* the list of actions */
70 static unsigned int actionc = 0;
71 static struct action *actionv = NULL;
72
73 /* should emit tracepoint on context switch */
74 int kperf_kdebug_cswitch = 0;
75
76 bool
77 kperf_action_has_non_system(unsigned int actionid)
78 {
79 if (actionid > actionc) {
80 return false;
81 }
82
83 if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
84 return true;
85 } else {
86 return false;
87 }
88 }
89
90 bool
91 kperf_action_has_task(unsigned int actionid)
92 {
93 if (actionid > actionc) {
94 return false;
95 }
96
97 return actionv[actionid - 1].sample & SAMPLER_TASK_MASK;
98 }
99
100 bool
101 kperf_action_has_thread(unsigned int actionid)
102 {
103 if (actionid > actionc) {
104 return false;
105 }
106
107 return actionv[actionid - 1].sample & SAMPLER_THREAD_MASK;
108 }
109
110 static void
111 kperf_system_memory_log(void)
112 {
113 BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
114 (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
115 (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
116 vm_page_speculative_count));
117 BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count,
118 (uintptr_t)vm_page_internal_count,
119 (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions,
120 (uintptr_t)VM_PAGE_COMPRESSOR_COUNT);
121 }
122
123 static void
124 kperf_sample_user_internal(struct kperf_usample *sbuf,
125 struct kperf_context *context, unsigned int actionid,
126 unsigned int sample_what)
127 {
128 if (sample_what & SAMPLER_USTACK) {
129 kperf_ucallstack_sample(&sbuf->ucallstack, context);
130 }
131 if (sample_what & SAMPLER_TH_DISPATCH) {
132 kperf_thread_dispatch_sample(&sbuf->th_dispatch, context);
133 }
134 if (sample_what & SAMPLER_TH_INFO) {
135 kperf_thread_info_sample(&sbuf->th_info, context);
136 }
137
138 boolean_t intren = ml_set_interrupts_enabled(FALSE);
139
140 /*
141 * No userdata or sample_flags for this one.
142 */
143 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what, actionid);
144
145 if (sample_what & SAMPLER_USTACK) {
146 kperf_ucallstack_log(&sbuf->ucallstack);
147 }
148 if (sample_what & SAMPLER_TH_DISPATCH) {
149 kperf_thread_dispatch_log(&sbuf->th_dispatch);
150 }
151 if (sample_what & SAMPLER_TH_INFO) {
152 kperf_thread_info_log(&sbuf->th_info);
153 }
154
155 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
156
157 ml_set_interrupts_enabled(intren);
158 }
159
160 void
161 kperf_sample_user(struct kperf_usample *sbuf, struct kperf_context *context,
162 unsigned int actionid, unsigned int sample_flags)
163 {
164 if (actionid == 0 || actionid > actionc) {
165 return;
166 }
167
168 unsigned int sample_what = actionv[actionid - 1].sample;
169 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
170
171 /* callstacks should be explicitly ignored */
172 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
173 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
174 }
175 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
176 sample_what &= SAMPLER_SYS_MEM;
177 }
178 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
179 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
180 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
181 sample_what &= SAMPLER_THREAD_MASK;
182 }
183 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
184 sample_what &= SAMPLER_TASK_MASK;
185 }
186
187 if (sample_what == 0) {
188 return;
189 }
190
191 sbuf->ucallstack.kpuc_nframes = ucallstack_depth ?:
192 MAX_UCALLSTACK_FRAMES;
193
194 kperf_sample_user_internal(sbuf, context, actionid, sample_what);
195 }
196
197 static kern_return_t
198 kperf_sample_internal(struct kperf_sample *sbuf,
199 struct kperf_context *context,
200 unsigned sample_what, unsigned sample_flags,
201 unsigned actionid, unsigned ucallstack_depth)
202 {
203 int pended_ucallstack = 0;
204 int pended_th_dispatch = 0;
205 bool on_idle_thread = false;
206 uint32_t userdata = actionid;
207 bool task_only = false;
208
209 if (sample_what == 0) {
210 return SAMPLE_CONTINUE;
211 }
212
213 /* callstacks should be explicitly ignored */
214 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
215 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
216 }
217
218 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
219 sample_what &= SAMPLER_SYS_MEM;
220 }
221
222 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
223 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
224 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
225 sample_what &= SAMPLER_THREAD_MASK;
226 }
227 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
228 task_only = true;
229 sample_what &= SAMPLER_TASK_MASK;
230 }
231
232 if (!task_only) {
233 context->cur_thread->kperf_pet_gen = kperf_pet_gen;
234 }
235 bool is_kernel = (context->cur_pid == 0);
236
237 if (actionid && actionid <= actionc) {
238 sbuf->kcallstack.kpkc_nframes =
239 actionv[actionid - 1].kcallstack_depth;
240 } else {
241 sbuf->kcallstack.kpkc_nframes = MAX_KCALLSTACK_FRAMES;
242 }
243
244 ucallstack_depth = ucallstack_depth ?: MAX_UCALLSTACK_FRAMES;
245 sbuf->kcallstack.kpkc_flags = 0;
246 sbuf->usample.ucallstack.kpuc_flags = 0;
247
248 if (sample_what & SAMPLER_TH_INFO) {
249 kperf_thread_info_sample(&sbuf->th_info, context);
250
251 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
252 if (sbuf->th_info.kpthi_runmode & 0x40) {
253 on_idle_thread = true;
254 goto log_sample;
255 }
256 }
257 }
258
259 if (sample_what & SAMPLER_TH_SNAPSHOT) {
260 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
261 }
262 if (sample_what & SAMPLER_TH_SCHEDULING) {
263 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
264 }
265 if (sample_what & SAMPLER_KSTACK) {
266 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
267 kperf_continuation_sample(&(sbuf->kcallstack), context);
268 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
269 /* outside of interrupt context, backtrace the current thread */
270 kperf_backtrace_sample(&(sbuf->kcallstack), context);
271 } else {
272 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
273 }
274 }
275 if (sample_what & SAMPLER_TK_SNAPSHOT) {
276 kperf_task_snapshot_sample(context->cur_task, &(sbuf->tk_snapshot));
277 }
278
279 if (!is_kernel) {
280 if (sample_what & SAMPLER_MEMINFO) {
281 kperf_meminfo_sample(context->cur_task, &(sbuf->meminfo));
282 }
283
284 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
285 if (sample_what & SAMPLER_USTACK) {
286 pended_ucallstack = kperf_ucallstack_pend(context,
287 ucallstack_depth, actionid);
288 }
289
290 if (sample_what & SAMPLER_TH_DISPATCH) {
291 pended_th_dispatch =
292 kperf_thread_dispatch_pend(context, actionid);
293 }
294 }
295 }
296
297 if (sample_what & SAMPLER_PMC_THREAD) {
298 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
299 } else if (sample_what & SAMPLER_PMC_CPU) {
300 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
301 }
302
303 log_sample:
304 /* lookup the user tag, if any */
305 if (actionid && (actionid <= actionc)) {
306 userdata = actionv[actionid - 1].userdata;
307 }
308
309 /* avoid logging if this sample only pended samples */
310 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
311 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) {
312 return SAMPLE_CONTINUE;
313 }
314
315 /* stash the data into the buffer
316 * interrupts off to ensure we don't get split
317 */
318 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
319
320 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
321 actionid, userdata, sample_flags);
322
323 if (sample_flags & SAMPLE_FLAG_SYSTEM) {
324 if (sample_what & SAMPLER_SYS_MEM) {
325 kperf_system_memory_log();
326 }
327 }
328 if (on_idle_thread) {
329 goto log_sample_end;
330 }
331
332 if (sample_what & SAMPLER_TH_INFO) {
333 kperf_thread_info_log(&sbuf->th_info);
334 }
335 if (sample_what & SAMPLER_TH_SCHEDULING) {
336 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
337 }
338 if (sample_what & SAMPLER_TH_SNAPSHOT) {
339 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
340 }
341 if (sample_what & SAMPLER_KSTACK) {
342 kperf_kcallstack_log(&sbuf->kcallstack);
343 }
344 if (sample_what & SAMPLER_TH_INSCYC) {
345 kperf_thread_inscyc_log(context);
346 }
347 if (sample_what & SAMPLER_TK_SNAPSHOT) {
348 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
349 }
350 if (sample_what & SAMPLER_TK_INFO) {
351 kperf_task_info_log(context);
352 }
353
354 /* dump user stuff */
355 if (!is_kernel) {
356 /* dump meminfo */
357 if (sample_what & SAMPLER_MEMINFO) {
358 kperf_meminfo_log(&(sbuf->meminfo));
359 }
360
361 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
362 if (pended_ucallstack) {
363 BUF_INFO(PERF_CS_UPEND);
364 }
365
366 if (pended_th_dispatch) {
367 BUF_INFO(PERF_TI_DISPPEND);
368 }
369 }
370 }
371
372 if (sample_what & SAMPLER_PMC_CONFIG) {
373 kperf_kpc_config_log(&(sbuf->kpcdata));
374 }
375 if (sample_what & SAMPLER_PMC_THREAD) {
376 kperf_kpc_thread_log(&(sbuf->kpcdata));
377 } else if (sample_what & SAMPLER_PMC_CPU) {
378 kperf_kpc_cpu_log(&(sbuf->kpcdata));
379 }
380
381 log_sample_end:
382 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, on_idle_thread ? 1 : 0);
383
384 /* intrs back on */
385 ml_set_interrupts_enabled(enabled);
386
387 return SAMPLE_CONTINUE;
388 }
389
390 /* Translate actionid into sample bits and take a sample */
391 kern_return_t
392 kperf_sample(struct kperf_sample *sbuf,
393 struct kperf_context *context,
394 unsigned actionid, unsigned sample_flags)
395 {
396 /* work out what to sample, if anything */
397 if ((actionid > actionc) || (actionid == 0)) {
398 return SAMPLE_SHUTDOWN;
399 }
400
401 /* check the pid filter against the context's current pid.
402 * filter pid == -1 means any pid
403 */
404 int pid_filter = actionv[actionid - 1].pid_filter;
405 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
406 return SAMPLE_CONTINUE;
407 }
408
409 /* the samplers to run */
410 unsigned int sample_what = actionv[actionid - 1].sample;
411 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
412
413 /* do the actual sample operation */
414 return kperf_sample_internal(sbuf, context, sample_what,
415 sample_flags, actionid, ucallstack_depth);
416 }
417
418 void
419 kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
420 {
421 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
422 struct kperf_sample *sample = NULL;
423 kern_return_t kr = KERN_SUCCESS;
424 int s;
425
426 if (!kperf_kdebug_should_trigger(debugid)) {
427 return;
428 }
429
430 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
431
432 thread_t thread = current_thread();
433 task_t task = get_threadtask(thread);
434 struct kperf_context ctx = {
435 .cur_thread = thread,
436 .cur_task = task,
437 .cur_pid = task_pid(task),
438 .trigger_type = TRIGGER_TYPE_KDEBUG,
439 .trigger_id = 0,
440 };
441
442 s = ml_set_interrupts_enabled(0);
443
444 sample = kperf_intr_sample_buffer();
445
446 if (!ml_at_interrupt_context()) {
447 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
448 ctx.starting_fp = starting_fp;
449 }
450
451 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
452
453 ml_set_interrupts_enabled(s);
454 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
455 }
456
457 /*
458 * This function allocates >2.3KB of the stack. Prevent the compiler from
459 * inlining this function into ast_taken and ensure the stack memory is only
460 * allocated for the kperf AST.
461 */
462 __attribute__((noinline))
463 void
464 kperf_thread_ast_handler(thread_t thread)
465 {
466 uint32_t ast = thread->kperf_ast;
467
468 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, ast);
469
470 struct kperf_usample sbuf = {};
471
472 task_t task = get_threadtask(thread);
473
474 if (task_did_exec(task) || task_is_exec_copy(task)) {
475 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
476 return;
477 }
478
479 struct kperf_context ctx = {
480 .cur_thread = thread,
481 .cur_task = task,
482 .cur_pid = task_pid(task),
483 };
484
485 unsigned int sample_what = 0;
486 if (ast & T_KPERF_AST_DISPATCH) {
487 sample_what |= SAMPLER_TH_DISPATCH;
488 }
489 if (ast & T_KPERF_AST_CALLSTACK) {
490 /* TH_INFO for backwards compatibility */
491 sample_what |= SAMPLER_USTACK | SAMPLER_TH_INFO;
492 }
493
494 sbuf.ucallstack.kpuc_nframes =
495 T_KPERF_GET_CALLSTACK_DEPTH(ast) ?: MAX_UCALLSTACK_FRAMES;
496 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
497 kperf_sample_user_internal(&sbuf, &ctx, actionid, sample_what);
498
499 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END);
500 }
501
502 int
503 kperf_ast_pend(thread_t thread, uint32_t set_flags, unsigned int set_actionid)
504 {
505 if (thread != current_thread()) {
506 panic("kperf: pending AST to non-current thread");
507 }
508
509 uint32_t ast = thread->kperf_ast;
510 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
511 uint32_t flags = ast & T_KPERF_AST_ALL;
512
513 if ((flags | set_flags) != flags || actionid != set_actionid) {
514 ast &= ~T_KPERF_SET_ACTIONID(actionid);
515 ast |= T_KPERF_SET_ACTIONID(set_actionid);
516 ast |= set_flags;
517
518 thread->kperf_ast = ast;
519
520 /* set the actual AST */
521 act_set_kperf(thread);
522 return 1;
523 }
524
525 return 0;
526 }
527
528 void
529 kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
530 {
531 uint32_t ast = thread->kperf_ast;
532 uint32_t existing_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast);
533 if (existing_depth < depth) {
534 ast &= ~T_KPERF_SET_CALLSTACK_DEPTH(existing_depth);
535 ast |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
536 thread->kperf_ast = ast;
537 }
538 }
539
540 int
541 kperf_kdbg_cswitch_get(void)
542 {
543 return kperf_kdebug_cswitch;
544 }
545
546 int
547 kperf_kdbg_cswitch_set(int newval)
548 {
549 kperf_kdebug_cswitch = newval;
550 kperf_on_cpu_update();
551
552 return 0;
553 }
554
555 /*
556 * Action configuration
557 */
558 unsigned int
559 kperf_action_get_count(void)
560 {
561 return actionc;
562 }
563
564 int
565 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
566 {
567 if ((actionid > actionc) || (actionid == 0)) {
568 return EINVAL;
569 }
570
571 /* disallow both CPU and thread counters to be sampled in the same
572 * action */
573 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
574 return EINVAL;
575 }
576
577 actionv[actionid - 1].sample = samplers;
578
579 return 0;
580 }
581
582 int
583 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
584 {
585 if ((actionid > actionc)) {
586 return EINVAL;
587 }
588
589 if (actionid == 0) {
590 *samplers_out = 0; /* "NULL" action */
591 } else {
592 *samplers_out = actionv[actionid - 1].sample;
593 }
594
595 return 0;
596 }
597
598 int
599 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
600 {
601 if ((actionid > actionc) || (actionid == 0)) {
602 return EINVAL;
603 }
604
605 actionv[actionid - 1].userdata = userdata;
606
607 return 0;
608 }
609
610 int
611 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
612 {
613 if ((actionid > actionc)) {
614 return EINVAL;
615 }
616
617 if (actionid == 0) {
618 *userdata_out = 0; /* "NULL" action */
619 } else {
620 *userdata_out = actionv[actionid - 1].userdata;
621 }
622
623 return 0;
624 }
625
626 int
627 kperf_action_set_filter(unsigned actionid, int pid)
628 {
629 if ((actionid > actionc) || (actionid == 0)) {
630 return EINVAL;
631 }
632
633 actionv[actionid - 1].pid_filter = pid;
634
635 return 0;
636 }
637
638 int
639 kperf_action_get_filter(unsigned actionid, int *pid_out)
640 {
641 if ((actionid > actionc)) {
642 return EINVAL;
643 }
644
645 if (actionid == 0) {
646 *pid_out = -1; /* "NULL" action */
647 } else {
648 *pid_out = actionv[actionid - 1].pid_filter;
649 }
650
651 return 0;
652 }
653
654 void
655 kperf_action_reset(void)
656 {
657 for (unsigned int i = 0; i < actionc; i++) {
658 kperf_action_set_samplers(i + 1, 0);
659 kperf_action_set_userdata(i + 1, 0);
660 kperf_action_set_filter(i + 1, -1);
661 kperf_action_set_ucallstack_depth(i + 1, MAX_UCALLSTACK_FRAMES);
662 kperf_action_set_kcallstack_depth(i + 1, MAX_KCALLSTACK_FRAMES);
663 }
664 }
665
666 int
667 kperf_action_set_count(unsigned count)
668 {
669 struct action *new_actionv = NULL, *old_actionv = NULL;
670 unsigned old_count;
671
672 /* easy no-op */
673 if (count == actionc) {
674 return 0;
675 }
676
677 /* TODO: allow shrinking? */
678 if (count < actionc) {
679 return EINVAL;
680 }
681
682 /* cap it for good measure */
683 if (count > ACTION_MAX) {
684 return EINVAL;
685 }
686
687 /* creating the action arror for the first time. create a few
688 * more things, too.
689 */
690 if (actionc == 0) {
691 int r;
692 if ((r = kperf_init())) {
693 return r;
694 }
695 }
696
697 /* create a new array */
698 new_actionv = kalloc_tag(count * sizeof(*new_actionv), VM_KERN_MEMORY_DIAG);
699 if (new_actionv == NULL) {
700 return ENOMEM;
701 }
702
703 old_actionv = actionv;
704 old_count = actionc;
705
706 if (old_actionv != NULL) {
707 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
708 }
709
710 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
711
712 for (unsigned int i = old_count; i < count; i++) {
713 new_actionv[i].pid_filter = -1;
714 new_actionv[i].ucallstack_depth = MAX_UCALLSTACK_FRAMES;
715 new_actionv[i].kcallstack_depth = MAX_KCALLSTACK_FRAMES;
716 }
717
718 actionv = new_actionv;
719 actionc = count;
720
721 if (old_actionv != NULL) {
722 kfree(old_actionv, old_count * sizeof(*actionv));
723 }
724
725 return 0;
726 }
727
728 int
729 kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
730 {
731 if ((action_id > actionc) || (action_id == 0)) {
732 return EINVAL;
733 }
734
735 if (depth > MAX_UCALLSTACK_FRAMES) {
736 return EINVAL;
737 }
738 if (depth < 2) {
739 return EINVAL;
740 }
741
742 actionv[action_id - 1].ucallstack_depth = depth;
743
744 return 0;
745 }
746
747 int
748 kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
749 {
750 if ((action_id > actionc) || (action_id == 0)) {
751 return EINVAL;
752 }
753
754 if (depth > MAX_KCALLSTACK_FRAMES) {
755 return EINVAL;
756 }
757 if (depth < 1) {
758 return EINVAL;
759 }
760
761 actionv[action_id - 1].kcallstack_depth = depth;
762
763 return 0;
764 }
765
766 int
767 kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
768 {
769 if ((action_id > actionc)) {
770 return EINVAL;
771 }
772
773 assert(depth_out);
774
775 if (action_id == 0) {
776 *depth_out = MAX_UCALLSTACK_FRAMES;
777 } else {
778 *depth_out = actionv[action_id - 1].ucallstack_depth;
779 }
780
781 return 0;
782 }
783
784 int
785 kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
786 {
787 if ((action_id > actionc)) {
788 return EINVAL;
789 }
790
791 assert(depth_out);
792
793 if (action_id == 0) {
794 *depth_out = MAX_KCALLSTACK_FRAMES;
795 } else {
796 *depth_out = actionv[action_id - 1].kcallstack_depth;
797 }
798
799 return 0;
800 }