]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/action.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / kperf / action.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
40 #include <sys/vm.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44
45 #include <kperf/action.h>
46 #include <kperf/ast.h>
47 #include <kperf/buffer.h>
48 #include <kperf/callstack.h>
49 #include <kperf/context.h>
50 #include <kperf/kdebug_trigger.h>
51 #include <kperf/kperf.h>
52 #include <kperf/kperf_kpc.h>
53 #include <kperf/kptimer.h>
54 #include <kperf/pet.h>
55 #include <kperf/sample.h>
56 #include <kperf/thread_samplers.h>
57
58 #define ACTION_MAX (32)
59
60 /* the list of different actions to take */
61 struct action {
62 uint32_t sample;
63 uint32_t ucallstack_depth;
64 uint32_t kcallstack_depth;
65 uint32_t userdata;
66 int pid_filter;
67 };
68
69 /* the list of actions */
70 static unsigned int actionc = 0;
71 static struct action *actionv = NULL;
72
73 /* should emit tracepoint on context switch */
74 int kperf_kdebug_cswitch = 0;
75
76 bool
77 kperf_action_has_non_system(unsigned int actionid)
78 {
79 if (actionid > actionc) {
80 return false;
81 }
82
83 if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
84 return true;
85 } else {
86 return false;
87 }
88 }
89
90 bool
91 kperf_action_has_task(unsigned int actionid)
92 {
93 if (actionid > actionc) {
94 return false;
95 }
96
97 return actionv[actionid - 1].sample & SAMPLER_TASK_MASK;
98 }
99
100 bool
101 kperf_action_has_thread(unsigned int actionid)
102 {
103 if (actionid > actionc) {
104 return false;
105 }
106
107 return actionv[actionid - 1].sample & SAMPLER_THREAD_MASK;
108 }
109
110 static void
111 kperf_system_memory_log(void)
112 {
113 BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
114 (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
115 (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
116 vm_page_speculative_count));
117 BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count,
118 (uintptr_t)vm_page_internal_count,
119 (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions,
120 (uintptr_t)VM_PAGE_COMPRESSOR_COUNT);
121 }
122
123 static void
124 kperf_sample_user_internal(struct kperf_usample *sbuf,
125 struct kperf_context *context, unsigned int actionid,
126 unsigned int sample_what)
127 {
128 if (sample_what & SAMPLER_USTACK) {
129 kperf_ucallstack_sample(&sbuf->ucallstack, context);
130 }
131 if (sample_what & SAMPLER_TH_DISPATCH) {
132 kperf_thread_dispatch_sample(&sbuf->th_dispatch, context);
133 }
134 if (sample_what & SAMPLER_TH_INFO) {
135 kperf_thread_info_sample(&sbuf->th_info, context);
136 }
137
138 boolean_t intren = ml_set_interrupts_enabled(FALSE);
139
140 /*
141 * No userdata or sample_flags for this one.
142 */
143 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what, actionid);
144
145 if (sample_what & SAMPLER_USTACK) {
146 kperf_ucallstack_log(&sbuf->ucallstack);
147 }
148 if (sample_what & SAMPLER_TH_DISPATCH) {
149 kperf_thread_dispatch_log(&sbuf->th_dispatch);
150 }
151 if (sample_what & SAMPLER_TH_INFO) {
152 kperf_thread_info_log(&sbuf->th_info);
153 }
154
155 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
156
157 ml_set_interrupts_enabled(intren);
158 }
159
160 void
161 kperf_sample_user(struct kperf_usample *sbuf, struct kperf_context *context,
162 unsigned int actionid, unsigned int sample_flags)
163 {
164 if (actionid == 0 || actionid > actionc) {
165 return;
166 }
167
168 unsigned int sample_what = actionv[actionid - 1].sample;
169 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
170
171 /* callstacks should be explicitly ignored */
172 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
173 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
174 }
175 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
176 sample_what &= SAMPLER_SYS_MEM;
177 }
178 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
179 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
180 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
181 sample_what &= SAMPLER_THREAD_MASK;
182 }
183 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
184 sample_what &= SAMPLER_TASK_MASK;
185 }
186
187 if (sample_what == 0) {
188 return;
189 }
190
191 sbuf->ucallstack.kpuc_nframes = ucallstack_depth ?:
192 MAX_UCALLSTACK_FRAMES;
193
194 kperf_sample_user_internal(sbuf, context, actionid, sample_what);
195 }
196
197 static kern_return_t
198 kperf_sample_internal(struct kperf_sample *sbuf,
199 struct kperf_context *context,
200 unsigned sample_what, unsigned sample_flags,
201 unsigned actionid, unsigned ucallstack_depth)
202 {
203 int pended_ucallstack = 0;
204 int pended_th_dispatch = 0;
205 bool on_idle_thread = false;
206 uint32_t userdata = actionid;
207 bool task_only = false;
208
209 if (sample_what == 0) {
210 return SAMPLE_CONTINUE;
211 }
212
213 /* callstacks should be explicitly ignored */
214 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
215 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
216 }
217
218 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
219 sample_what &= SAMPLER_SYS_MEM;
220 }
221
222 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
223 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
224 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
225 sample_what &= SAMPLER_THREAD_MASK;
226 }
227 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
228 task_only = true;
229 sample_what &= SAMPLER_TASK_MASK;
230 }
231
232 if (!task_only) {
233 context->cur_thread->kperf_pet_gen =
234 os_atomic_load(&kppet_gencount, relaxed);
235 }
236 bool is_kernel = (context->cur_pid == 0);
237
238 if (actionid && actionid <= actionc) {
239 sbuf->kcallstack.kpkc_nframes =
240 actionv[actionid - 1].kcallstack_depth;
241 } else {
242 sbuf->kcallstack.kpkc_nframes = MAX_KCALLSTACK_FRAMES;
243 }
244
245 ucallstack_depth = ucallstack_depth ?: MAX_UCALLSTACK_FRAMES;
246 sbuf->kcallstack.kpkc_flags = 0;
247 sbuf->usample.ucallstack.kpuc_flags = 0;
248
249 if (sample_what & SAMPLER_TH_INFO) {
250 kperf_thread_info_sample(&sbuf->th_info, context);
251
252 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
253 if (sbuf->th_info.kpthi_runmode & 0x40) {
254 on_idle_thread = true;
255 goto log_sample;
256 }
257 }
258 }
259
260 if (sample_what & SAMPLER_TH_SNAPSHOT) {
261 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
262 }
263 if (sample_what & SAMPLER_TH_SCHEDULING) {
264 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
265 }
266 if (sample_what & SAMPLER_KSTACK) {
267 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
268 kperf_continuation_sample(&(sbuf->kcallstack), context);
269 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
270 /* outside of interrupt context, backtrace the current thread */
271 kperf_backtrace_sample(&(sbuf->kcallstack), context);
272 } else {
273 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
274 }
275 }
276 if (sample_what & SAMPLER_TK_SNAPSHOT) {
277 kperf_task_snapshot_sample(context->cur_task, &(sbuf->tk_snapshot));
278 }
279
280 if (!is_kernel) {
281 if (sample_what & SAMPLER_MEMINFO) {
282 kperf_meminfo_sample(context->cur_task, &(sbuf->meminfo));
283 }
284
285 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
286 if (sample_what & SAMPLER_USTACK) {
287 pended_ucallstack = kperf_ucallstack_pend(context,
288 ucallstack_depth, actionid);
289 }
290
291 if (sample_what & SAMPLER_TH_DISPATCH) {
292 pended_th_dispatch =
293 kperf_thread_dispatch_pend(context, actionid);
294 }
295 }
296 }
297
298 if (sample_what & SAMPLER_PMC_THREAD) {
299 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
300 } else if (sample_what & SAMPLER_PMC_CPU) {
301 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
302 }
303
304 log_sample:
305 /* lookup the user tag, if any */
306 if (actionid && (actionid <= actionc)) {
307 userdata = actionv[actionid - 1].userdata;
308 }
309
310 /* avoid logging if this sample only pended samples */
311 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
312 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) {
313 return SAMPLE_CONTINUE;
314 }
315
316 /* stash the data into the buffer
317 * interrupts off to ensure we don't get split
318 */
319 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
320
321 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
322 actionid, userdata, sample_flags);
323
324 if (sample_flags & SAMPLE_FLAG_SYSTEM) {
325 if (sample_what & SAMPLER_SYS_MEM) {
326 kperf_system_memory_log();
327 }
328 }
329 if (on_idle_thread) {
330 goto log_sample_end;
331 }
332
333 if (sample_what & SAMPLER_TH_INFO) {
334 kperf_thread_info_log(&sbuf->th_info);
335 }
336 if (sample_what & SAMPLER_TH_SCHEDULING) {
337 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
338 }
339 if (sample_what & SAMPLER_TH_SNAPSHOT) {
340 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
341 }
342 if (sample_what & SAMPLER_KSTACK) {
343 kperf_kcallstack_log(&sbuf->kcallstack);
344 }
345 if (sample_what & SAMPLER_TH_INSCYC) {
346 kperf_thread_inscyc_log(context);
347 }
348 if (sample_what & SAMPLER_TK_SNAPSHOT) {
349 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
350 }
351 if (sample_what & SAMPLER_TK_INFO) {
352 kperf_task_info_log(context);
353 }
354
355 /* dump user stuff */
356 if (!is_kernel) {
357 /* dump meminfo */
358 if (sample_what & SAMPLER_MEMINFO) {
359 kperf_meminfo_log(&(sbuf->meminfo));
360 }
361
362 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
363 if (pended_ucallstack) {
364 BUF_INFO(PERF_CS_UPEND);
365 }
366
367 if (pended_th_dispatch) {
368 BUF_INFO(PERF_TI_DISPPEND);
369 }
370 }
371 }
372
373 if (sample_what & SAMPLER_PMC_CONFIG) {
374 kperf_kpc_config_log(&(sbuf->kpcdata));
375 }
376 if (sample_what & SAMPLER_PMC_THREAD) {
377 kperf_kpc_thread_log(&(sbuf->kpcdata));
378 } else if (sample_what & SAMPLER_PMC_CPU) {
379 kperf_kpc_cpu_log(&(sbuf->kpcdata));
380 }
381
382 log_sample_end:
383 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, on_idle_thread ? 1 : 0);
384
385 /* intrs back on */
386 ml_set_interrupts_enabled(enabled);
387
388 return SAMPLE_CONTINUE;
389 }
390
391 /* Translate actionid into sample bits and take a sample */
392 kern_return_t
393 kperf_sample(struct kperf_sample *sbuf,
394 struct kperf_context *context,
395 unsigned actionid, unsigned sample_flags)
396 {
397 /* work out what to sample, if anything */
398 if ((actionid > actionc) || (actionid == 0)) {
399 return SAMPLE_SHUTDOWN;
400 }
401
402 /* check the pid filter against the context's current pid.
403 * filter pid == -1 means any pid
404 */
405 int pid_filter = actionv[actionid - 1].pid_filter;
406 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
407 return SAMPLE_CONTINUE;
408 }
409
410 /* the samplers to run */
411 unsigned int sample_what = actionv[actionid - 1].sample;
412 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
413
414 /* do the actual sample operation */
415 return kperf_sample_internal(sbuf, context, sample_what,
416 sample_flags, actionid, ucallstack_depth);
417 }
418
419 void
420 kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
421 {
422 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
423 struct kperf_sample *sample = NULL;
424 kern_return_t kr = KERN_SUCCESS;
425 int s;
426
427 if (!kperf_kdebug_should_trigger(debugid)) {
428 return;
429 }
430
431 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
432
433 thread_t thread = current_thread();
434 task_t task = get_threadtask(thread);
435 struct kperf_context ctx = {
436 .cur_thread = thread,
437 .cur_task = task,
438 .cur_pid = task_pid(task),
439 .trigger_type = TRIGGER_TYPE_KDEBUG,
440 .trigger_id = 0,
441 };
442
443 s = ml_set_interrupts_enabled(0);
444
445 sample = kperf_intr_sample_buffer();
446
447 if (!ml_at_interrupt_context()) {
448 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
449 ctx.starting_fp = starting_fp;
450 }
451
452 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
453
454 ml_set_interrupts_enabled(s);
455 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
456 }
457
458 /*
459 * This function allocates >2.3KB of the stack. Prevent the compiler from
460 * inlining this function into ast_taken and ensure the stack memory is only
461 * allocated for the kperf AST.
462 */
463 __attribute__((noinline))
464 void
465 kperf_thread_ast_handler(thread_t thread)
466 {
467 uint32_t ast = thread->kperf_ast;
468
469 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, ast);
470
471 struct kperf_usample sbuf = {};
472
473 task_t task = get_threadtask(thread);
474
475 if (task_did_exec(task) || task_is_exec_copy(task)) {
476 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
477 return;
478 }
479
480 struct kperf_context ctx = {
481 .cur_thread = thread,
482 .cur_task = task,
483 .cur_pid = task_pid(task),
484 };
485
486 unsigned int sample_what = 0;
487 if (ast & T_KPERF_AST_DISPATCH) {
488 sample_what |= SAMPLER_TH_DISPATCH;
489 }
490 if (ast & T_KPERF_AST_CALLSTACK) {
491 /* TH_INFO for backwards compatibility */
492 sample_what |= SAMPLER_USTACK | SAMPLER_TH_INFO;
493 }
494
495 sbuf.ucallstack.kpuc_nframes =
496 T_KPERF_GET_CALLSTACK_DEPTH(ast) ?: MAX_UCALLSTACK_FRAMES;
497 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
498 kperf_sample_user_internal(&sbuf, &ctx, actionid, sample_what);
499
500 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END);
501 }
502
503 int
504 kperf_ast_pend(thread_t thread, uint32_t set_flags, unsigned int set_actionid)
505 {
506 if (thread != current_thread()) {
507 panic("kperf: pending AST to non-current thread");
508 }
509
510 uint32_t ast = thread->kperf_ast;
511 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
512 uint32_t flags = ast & T_KPERF_AST_ALL;
513
514 if ((flags | set_flags) != flags || actionid != set_actionid) {
515 ast &= ~T_KPERF_SET_ACTIONID(actionid);
516 ast |= T_KPERF_SET_ACTIONID(set_actionid);
517 ast |= set_flags;
518
519 thread->kperf_ast = ast;
520
521 /* set the actual AST */
522 act_set_kperf(thread);
523 return 1;
524 }
525
526 return 0;
527 }
528
529 void
530 kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
531 {
532 uint32_t ast = thread->kperf_ast;
533 uint32_t existing_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast);
534 if (existing_depth < depth) {
535 ast &= ~T_KPERF_SET_CALLSTACK_DEPTH(existing_depth);
536 ast |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
537 thread->kperf_ast = ast;
538 }
539 }
540
541 int
542 kperf_kdbg_cswitch_get(void)
543 {
544 return kperf_kdebug_cswitch;
545 }
546
547 int
548 kperf_kdbg_cswitch_set(int newval)
549 {
550 kperf_kdebug_cswitch = newval;
551 kperf_on_cpu_update();
552
553 return 0;
554 }
555
556 /*
557 * Action configuration
558 */
559 unsigned int
560 kperf_action_get_count(void)
561 {
562 return actionc;
563 }
564
565 int
566 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
567 {
568 if ((actionid > actionc) || (actionid == 0)) {
569 return EINVAL;
570 }
571
572 /* disallow both CPU and thread counters to be sampled in the same
573 * action */
574 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
575 return EINVAL;
576 }
577
578 actionv[actionid - 1].sample = samplers;
579
580 return 0;
581 }
582
583 int
584 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
585 {
586 if ((actionid > actionc)) {
587 return EINVAL;
588 }
589
590 if (actionid == 0) {
591 *samplers_out = 0; /* "NULL" action */
592 } else {
593 *samplers_out = actionv[actionid - 1].sample;
594 }
595
596 return 0;
597 }
598
599 int
600 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
601 {
602 if ((actionid > actionc) || (actionid == 0)) {
603 return EINVAL;
604 }
605
606 actionv[actionid - 1].userdata = userdata;
607
608 return 0;
609 }
610
611 int
612 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
613 {
614 if ((actionid > actionc)) {
615 return EINVAL;
616 }
617
618 if (actionid == 0) {
619 *userdata_out = 0; /* "NULL" action */
620 } else {
621 *userdata_out = actionv[actionid - 1].userdata;
622 }
623
624 return 0;
625 }
626
627 int
628 kperf_action_set_filter(unsigned actionid, int pid)
629 {
630 if ((actionid > actionc) || (actionid == 0)) {
631 return EINVAL;
632 }
633
634 actionv[actionid - 1].pid_filter = pid;
635
636 return 0;
637 }
638
639 int
640 kperf_action_get_filter(unsigned actionid, int *pid_out)
641 {
642 if ((actionid > actionc)) {
643 return EINVAL;
644 }
645
646 if (actionid == 0) {
647 *pid_out = -1; /* "NULL" action */
648 } else {
649 *pid_out = actionv[actionid - 1].pid_filter;
650 }
651
652 return 0;
653 }
654
655 void
656 kperf_action_reset(void)
657 {
658 for (unsigned int i = 0; i < actionc; i++) {
659 kperf_action_set_samplers(i + 1, 0);
660 kperf_action_set_userdata(i + 1, 0);
661 kperf_action_set_filter(i + 1, -1);
662 kperf_action_set_ucallstack_depth(i + 1, MAX_UCALLSTACK_FRAMES);
663 kperf_action_set_kcallstack_depth(i + 1, MAX_KCALLSTACK_FRAMES);
664 }
665 }
666
667 int
668 kperf_action_set_count(unsigned count)
669 {
670 struct action *new_actionv = NULL, *old_actionv = NULL;
671 unsigned old_count;
672
673 /* easy no-op */
674 if (count == actionc) {
675 return 0;
676 }
677
678 /* TODO: allow shrinking? */
679 if (count < actionc) {
680 return EINVAL;
681 }
682
683 /* cap it for good measure */
684 if (count > ACTION_MAX) {
685 return EINVAL;
686 }
687
688 /* creating the action arror for the first time. create a few
689 * more things, too.
690 */
691 if (actionc == 0) {
692 kperf_setup();
693 }
694
695 /* create a new array */
696 new_actionv = kalloc_tag(count * sizeof(*new_actionv), VM_KERN_MEMORY_DIAG);
697 if (new_actionv == NULL) {
698 return ENOMEM;
699 }
700
701 old_actionv = actionv;
702 old_count = actionc;
703
704 if (old_actionv != NULL) {
705 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
706 }
707
708 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
709
710 for (unsigned int i = old_count; i < count; i++) {
711 new_actionv[i].pid_filter = -1;
712 new_actionv[i].ucallstack_depth = MAX_UCALLSTACK_FRAMES;
713 new_actionv[i].kcallstack_depth = MAX_KCALLSTACK_FRAMES;
714 }
715
716 actionv = new_actionv;
717 actionc = count;
718
719 if (old_actionv != NULL) {
720 kfree(old_actionv, old_count * sizeof(*actionv));
721 }
722
723 return 0;
724 }
725
726 int
727 kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
728 {
729 if ((action_id > actionc) || (action_id == 0)) {
730 return EINVAL;
731 }
732
733 if (depth > MAX_UCALLSTACK_FRAMES) {
734 return EINVAL;
735 }
736 if (depth < 2) {
737 return EINVAL;
738 }
739
740 actionv[action_id - 1].ucallstack_depth = depth;
741
742 return 0;
743 }
744
745 int
746 kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
747 {
748 if ((action_id > actionc) || (action_id == 0)) {
749 return EINVAL;
750 }
751
752 if (depth > MAX_KCALLSTACK_FRAMES) {
753 return EINVAL;
754 }
755 if (depth < 1) {
756 return EINVAL;
757 }
758
759 actionv[action_id - 1].kcallstack_depth = depth;
760
761 return 0;
762 }
763
764 int
765 kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
766 {
767 if ((action_id > actionc)) {
768 return EINVAL;
769 }
770
771 assert(depth_out);
772
773 if (action_id == 0) {
774 *depth_out = MAX_UCALLSTACK_FRAMES;
775 } else {
776 *depth_out = actionv[action_id - 1].ucallstack_depth;
777 }
778
779 return 0;
780 }
781
782 int
783 kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
784 {
785 if ((action_id > actionc)) {
786 return EINVAL;
787 }
788
789 assert(depth_out);
790
791 if (action_id == 0) {
792 *depth_out = MAX_KCALLSTACK_FRAMES;
793 } else {
794 *depth_out = actionv[action_id - 1].kcallstack_depth;
795 }
796
797 return 0;
798 }