]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kperf/action.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / osfmk / kperf / action.c
CommitLineData
316670eb
A
1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34#include <mach/mach_types.h>
35#include <machine/machine_routines.h>
36// #include <libkern/libkern.h>
37#include <kern/kalloc.h>
38#include <kern/debug.h> /* panic */
39#include <kern/thread.h>
40#include <sys/errno.h>
41
42#include <chud/chud_xnu.h>
43#include <kperf/kperf.h>
44
45#include <kperf/buffer.h>
46#include <kperf/timetrigger.h>
47#include <kperf/threadinfo.h>
48#include <kperf/callstack.h>
49#include <kperf/sample.h>
316670eb
A
50#include <kperf/action.h>
51#include <kperf/context.h>
52#include <kperf/ast.h>
3e170ce0 53#include <kperf/kperf_kpc.h>
316670eb
A
54
55#define ACTION_MAX 32
56
316670eb
A
57/* the list of different actions to take */
58struct action
59{
39236c6e
A
60 uint32_t sample;
61 uint32_t userdata;
62 int pid_filter;
316670eb
A
63};
64
65/* the list of actions */
66static unsigned actionc = 0;
67static struct action *actionv = NULL;
68
3e170ce0
A
69/* manage callbacks from system */
70
71/* callback set for kdebug */
72static int kperf_kdbg_callback_set = 0;
39236c6e 73/* whether to record callstacks on kdebug events */
3e170ce0
A
74static int kdebug_callstacks = 0;
75/* the action ID to trigger on signposts */
76static int kperf_signpost_action = 0;
39236c6e 77
3e170ce0
A
78/* callback set for context-switch */
79int kperf_cswitch_callback_set = 0;
80/* should emit tracepoint on context switch */
81static int kdebug_cswitch = 0;
82/* the action ID to trigger on context switches */
83static int kperf_cswitch_action = 0;
39236c6e
A
84
85/* indirect hooks to play nice with CHUD for the transition to kperf */
86kern_return_t chudxnu_kdebug_callback_enter(chudxnu_kdebug_callback_func_t fn);
87kern_return_t chudxnu_kdebug_callback_cancel(void);
316670eb
A
88
89/* Do the real work! */
90/* this can be called in any context ... right? */
91static kern_return_t
3e170ce0
A
92kperf_sample_internal(struct kperf_sample *sbuf,
93 struct kperf_context *context,
94 unsigned sample_what, unsigned sample_flags,
95 unsigned actionid)
316670eb
A
96{
97 boolean_t enabled;
98 int did_ucallstack = 0, did_tinfo_extra = 0;
39236c6e 99 uint32_t userdata;
316670eb
A
100
101 /* not much point continuing here, but what to do ? return
102 * Shutdown? cut a tracepoint and continue?
103 */
3e170ce0 104 if (sample_what == 0) {
316670eb 105 return SAMPLE_CONTINUE;
3e170ce0 106 }
316670eb
A
107
108 int is_kernel = (context->cur_pid == 0);
109
39236c6e
A
110 sbuf->kcallstack.nframes = 0;
111 sbuf->kcallstack.flags = CALLSTACK_VALID;
112 sbuf->ucallstack.nframes = 0;
113 sbuf->ucallstack.flags = CALLSTACK_VALID;
114
3e170ce0
A
115 /* an event occurred. Sample everything and dump it in a
116 * buffer.
316670eb
A
117 */
118
119 /* collect data from samplers */
3e170ce0
A
120 if (sample_what & SAMPLER_TINFO) {
121 kperf_threadinfo_sample(&sbuf->threadinfo, context);
122
39236c6e 123 /* See if we should drop idle thread samples */
3e170ce0
A
124 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
125 if (sbuf->threadinfo.runmode & 0x40) {
39236c6e 126 return SAMPLE_CONTINUE;
3e170ce0
A
127 }
128 }
316670eb
A
129 }
130
3e170ce0
A
131 if ((sample_what & SAMPLER_KSTACK) && !(sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK)) {
132 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
133 }
316670eb
A
134
135 /* sensitive ones */
3e170ce0
A
136 if (!is_kernel) {
137 if (sample_what & SAMPLER_MEMINFO) {
138 kperf_meminfo_sample(&(sbuf->meminfo), context);
316670eb 139 }
3e170ce0
A
140
141 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
142 if ((sample_what & SAMPLER_USTACK)
143 && !(sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK))
144 {
145 did_ucallstack = kperf_ucallstack_pend(context);
146 }
147
148 if (sample_what & SAMPLER_TINFOEX) {
149 did_tinfo_extra = kperf_threadinfo_extra_pend(context);
150 }
151 } else {
152 if ((sample_what & SAMPLER_USTACK)
153 && !(sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK))
154 {
155 kperf_ucallstack_sample(&(sbuf->ucallstack), context);
156 }
157
158 if (sample_what & SAMPLER_TINFOEX) {
159 kperf_threadinfo_extra_sample(&(sbuf->tinfo_ex),
160 context);
161 }
316670eb
A
162 }
163 }
164
3e170ce0
A
165 if (sample_what & SAMPLER_PMC_THREAD) {
166 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
167 } else if (sample_what & SAMPLER_PMC_CPU) {
168 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
169 }
39236c6e
A
170
171 /* lookup the user tag, if any */
3e170ce0
A
172 if (actionid && (actionid <= actionc)) {
173 userdata = actionv[actionid - 1].userdata;
174 } else {
39236c6e 175 userdata = actionid;
3e170ce0 176 }
39236c6e 177
316670eb
A
178 /* stash the data into the buffer
179 * interrupts off to ensure we don't get split
180 */
181 enabled = ml_set_interrupts_enabled(FALSE);
182
3e170ce0
A
183 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
184 actionid, userdata, sample_flags);
316670eb
A
185
186 /* dump threadinfo */
3e170ce0 187 if (sample_what & SAMPLER_TINFO) {
316670eb 188 kperf_threadinfo_log( &sbuf->threadinfo );
3e170ce0 189 }
316670eb
A
190
191 /* dump kcallstack */
3e170ce0 192 if (sample_what & SAMPLER_KSTACK) {
316670eb 193 kperf_kcallstack_log( &sbuf->kcallstack );
3e170ce0 194 }
316670eb
A
195
196 /* dump user stuff */
3e170ce0
A
197 if (!is_kernel) {
198 /* dump meminfo */
199 if (sample_what & SAMPLER_MEMINFO) {
200 kperf_meminfo_log(&(sbuf->meminfo));
316670eb 201 }
316670eb 202
3e170ce0
A
203 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
204 if (did_ucallstack) {
205 BUF_INFO1(PERF_CS_UPEND, 0);
206 }
207
208 if (did_tinfo_extra) {
209 BUF_INFO1(PERF_TI_XPEND, 0);
210 }
211 } else {
212 if (sample_what & SAMPLER_USTACK) {
213 kperf_ucallstack_log(&(sbuf->ucallstack));
214 }
215
216 if (sample_what & SAMPLER_TINFOEX) {
217 kperf_threadinfo_extra_log(&(sbuf->tinfo_ex));
218 }
316670eb
A
219 }
220 }
221
3e170ce0
A
222 if (sample_what & SAMPLER_PMC_THREAD) {
223 kperf_kpc_thread_log(&(sbuf->kpcdata));
224 } else if (sample_what & SAMPLER_PMC_CPU) {
225 kperf_kpc_cpu_log(&(sbuf->kpcdata));
226 }
39236c6e 227
3e170ce0 228 BUF_DATA1(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
316670eb
A
229
230 /* intrs back on */
231 ml_set_interrupts_enabled(enabled);
232
233 return SAMPLE_CONTINUE;
234}
235
236/* Translate actionid into sample bits and take a sample */
237kern_return_t
3e170ce0
A
238kperf_sample(struct kperf_sample *sbuf,
239 struct kperf_context *context,
240 unsigned actionid, unsigned sample_flags)
316670eb
A
241{
242 unsigned sample_what = 0;
39236c6e 243 int pid_filter;
316670eb
A
244
245 /* work out what to sample, if anything */
3e170ce0 246 if ((actionid > actionc) || (actionid == 0)) {
316670eb 247 return SAMPLE_SHUTDOWN;
3e170ce0 248 }
316670eb 249
39236c6e
A
250 /* check the pid filter against the context's current pid.
251 * filter pid == -1 means any pid
252 */
3e170ce0
A
253 pid_filter = actionv[actionid - 1].pid_filter;
254 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
39236c6e 255 return SAMPLE_CONTINUE;
3e170ce0 256 }
39236c6e
A
257
258 /* the samplers to run */
3e170ce0 259 sample_what = actionv[actionid - 1].sample;
316670eb 260
39236c6e 261 /* do the actual sample operation */
3e170ce0
A
262 return kperf_sample_internal(sbuf, context, sample_what,
263 sample_flags, actionid);
316670eb
A
264}
265
266/* ast callback on a thread */
267void
3e170ce0 268kperf_thread_ast_handler(thread_t thread)
316670eb
A
269{
270 int r;
271 uint32_t t_chud;
272 unsigned sample_what = 0;
273 /* we know we're on a thread, so let's do stuff */
274 task_t task = NULL;
275
316670eb
A
276 BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_START, thread);
277
39236c6e
A
278 /* use ~2kb of the stack for the sample, should be ok since we're in the ast */
279 struct kperf_sample sbuf;
3e170ce0 280 memset(&sbuf, 0, sizeof(struct kperf_sample));
316670eb
A
281
282 /* make a context, take a sample */
283 struct kperf_context ctx;
284 ctx.cur_thread = thread;
285 ctx.cur_pid = -1;
286
287 task = chudxnu_task_for_thread(thread);
3e170ce0 288 if (task) {
316670eb 289 ctx.cur_pid = chudxnu_pid_for_task(task);
3e170ce0 290 }
316670eb
A
291
292 /* decode the chud bits so we know what to sample */
293 t_chud = kperf_get_thread_bits(thread);
3e170ce0
A
294
295 if (t_chud & T_AST_NAME) {
316670eb 296 sample_what |= SAMPLER_TINFOEX;
3e170ce0
A
297 }
298
299 if (t_chud & T_AST_CALLSTACK) {
316670eb 300 sample_what |= SAMPLER_USTACK;
39236c6e
A
301 sample_what |= SAMPLER_TINFO;
302 }
316670eb
A
303
304 /* do the sample, just of the user stuff */
3e170ce0 305 r = kperf_sample_internal(&sbuf, &ctx, sample_what, 0, 0);
316670eb 306
316670eb 307 BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_END, r);
316670eb
A
308}
309
310/* register AST bits */
311int
3e170ce0
A
312kperf_ast_pend(thread_t cur_thread, uint32_t check_bits,
313 uint32_t set_bits)
316670eb
A
314{
315 /* pend on the thread */
316 uint32_t t_chud, set_done = 0;
3e170ce0 317
316670eb 318 /* can only pend on the current thread */
3e170ce0 319 if (cur_thread != chudxnu_current_thread()) {
316670eb 320 panic("pending to non-current thread");
3e170ce0 321 }
316670eb
A
322
323 /* get our current bits */
324 t_chud = kperf_get_thread_bits(cur_thread);
325
326 /* see if it's already been done or pended */
3e170ce0 327 if (!(t_chud & check_bits)) {
316670eb
A
328 /* set the bit on the thread */
329 t_chud |= set_bits;
330 kperf_set_thread_bits(cur_thread, t_chud);
331
332 /* set the actual AST */
3e170ce0 333 kperf_set_thread_ast(cur_thread);
316670eb
A
334
335 set_done = 1;
336 }
337
338 return set_done;
316670eb
A
339}
340
39236c6e
A
341/*
342 * kdebug callback & stack management
343 */
344
345#define IS_END(debugid) ((debugid & 3) == DBG_FUNC_END)
346#define IS_MIG(debugid) (IS_END(debugid) && ((debugid & 0xff000000U) == KDBG_CLASS_ENCODE((unsigned)DBG_MIG, 0U)))
347#define IS_MACH_SYSCALL(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_MACH, DBG_MACH_EXCP_SC)))
348#define IS_VM_FAULT(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_MACH, DBG_MACH_VM)))
349#define IS_BSD_SYSCTLL(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_BSD, DBG_BSD_EXCP_SC)))
3e170ce0
A
350#define IS_APPS_SIGNPOST(debugid) (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_APPS, DBG_MACH_CHUD))
351#define IS_MACH_SIGNPOST(debugid) (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_MACH, DBG_MACH_CHUD))
352#define IS_ENERGYTRACE(debugid) ((debugid & 0xff000000U) == KDBG_CLASS_ENCODE((unsigned)DBG_ENERGYTRACE, 0U))
39236c6e
A
353
354void
355kperf_kdebug_callback(uint32_t debugid)
356{
357 int cur_pid = 0;
358 task_t task = NULL;
359
3e170ce0 360 if (!kdebug_callstacks && !kperf_signpost_action) {
39236c6e 361 return;
3e170ce0 362 }
39236c6e
A
363
364 /* if we're looking at a kperf tracepoint, don't recurse */
3e170ce0 365 if ((debugid & 0xff000000) == KDBG_CLASS_ENCODE(DBG_PERF, 0)) {
39236c6e 366 return;
3e170ce0 367 }
39236c6e
A
368
369 /* ensure interrupts are already off thanks to kdebug */
3e170ce0 370 if (ml_get_interrupts_enabled()) {
39236c6e 371 return;
3e170ce0 372 }
39236c6e
A
373
374 /* make sure we're not being called recursively. */
375#if NOTYET
3e170ce0 376 if (kperf_kdbg_recurse(KPERF_RECURSE_IN)) {
39236c6e 377 return;
3e170ce0 378 }
39236c6e
A
379#endif
380
381 /* check the happy list of trace codes */
3e170ce0
A
382 if(!(IS_MIG(debugid)
383 || IS_MACH_SYSCALL(debugid)
384 || IS_VM_FAULT(debugid)
385 || IS_BSD_SYSCTLL(debugid)
386 || IS_MACH_SIGNPOST(debugid)
387 || IS_ENERGYTRACE(debugid)
388 || IS_APPS_SIGNPOST(debugid)))
389 {
39236c6e 390 return;
3e170ce0 391 }
39236c6e
A
392
393 /* check for kernel */
394 thread_t thread = chudxnu_current_thread();
395 task = chudxnu_task_for_thread(thread);
3e170ce0 396 if (task) {
39236c6e 397 cur_pid = chudxnu_pid_for_task(task);
3e170ce0
A
398 }
399 if (!cur_pid) {
39236c6e 400 return;
3e170ce0
A
401 }
402
403 if (kdebug_callstacks) {
404 /* dicing with death */
405 BUF_INFO2(PERF_KDBG_HNDLR, debugid, cur_pid);
406
407 /* pend the AST */
408 kperf_ast_pend( thread, T_AST_CALLSTACK, T_AST_CALLSTACK );
409 }
39236c6e 410
3e170ce0
A
411 if (kperf_signpost_action && (IS_MACH_SIGNPOST(debugid)
412 || IS_APPS_SIGNPOST(debugid)))
413 {
39236c6e 414#if NOTYET
3e170ce0
A
415 /* make sure we're not being called recursively. */
416 if(kperf_kdbg_recurse(KPERF_RECURSE_IN)) {
417 return;
418 }
419#endif
39236c6e 420
3e170ce0
A
421 /* setup a context */
422 struct kperf_context ctx;
423 struct kperf_sample *intbuf = NULL;
424 BUF_INFO2(PERF_SIGNPOST_HNDLR | DBG_FUNC_START, debugid, cur_pid);
425
426 ctx.cur_thread = thread;
427 ctx.cur_pid = cur_pid;
428 ctx.trigger_type = TRIGGER_TYPE_TRACE;
429 ctx.trigger_id = 0;
430
431 /* CPU sample buffer -- only valid with interrupts off (above)
432 * Technically this isn't true -- tracepoints can, and often
433 * are, cut from interrupt handlers, but none of those tracepoints
434 * should make it this far.
435 */
436 intbuf = kperf_intr_sample_buffer();
437
438 /* do the sample */
439 kperf_sample(intbuf, &ctx, kperf_signpost_action,
440 SAMPLE_FLAG_PEND_USER);
441
442 BUF_INFO2(PERF_SIGNPOST_HNDLR | DBG_FUNC_END, debugid, cur_pid);
443#if NOTYET
444 /* no longer recursive */
445 kperf_kdbg_recurse(KPERF_RECURSE_OUT);
39236c6e 446#endif
3e170ce0
A
447 }
448}
449
450static void
451kperf_kdbg_callback_update(void)
452{
453 unsigned old_callback_set = kperf_kdbg_callback_set;
454
455 /* compute new callback state */
456 kperf_kdbg_callback_set = kdebug_callstacks || kperf_signpost_action;
39236c6e 457
3e170ce0
A
458 if (old_callback_set && !kperf_kdbg_callback_set) {
459 /* callback should no longer be set */
460 chudxnu_kdebug_callback_cancel();
461 } else if (!old_callback_set && kperf_kdbg_callback_set) {
462 /* callback must now be set */
463 chudxnu_kdebug_callback_enter(NULL);
464 }
39236c6e
A
465}
466
467int
468kperf_kdbg_get_stacks(void)
469{
3e170ce0 470 return kdebug_callstacks;
39236c6e
A
471}
472
473int
474kperf_kdbg_set_stacks(int newval)
475{
3e170ce0
A
476 kdebug_callstacks = newval;
477 kperf_kdbg_callback_update();
39236c6e 478
3e170ce0
A
479 return 0;
480}
481
482int
483kperf_signpost_action_get(void)
484{
485 return kperf_signpost_action;
486}
487
488int
489kperf_signpost_action_set(int newval)
490{
491 kperf_signpost_action = newval;
492 kperf_kdbg_callback_update();
39236c6e
A
493
494 return 0;
495}
496
497/*
498 * Thread switch
499 */
500
501/* called from context switch handler */
502void
3e170ce0 503kperf_switch_context(__unused thread_t old, thread_t new)
39236c6e
A
504{
505 task_t task = get_threadtask(new);
506 int pid = chudxnu_pid_for_task(task);
507
508 /* cut a tracepoint to tell us what the new thread's PID is
509 * for Instruments
510 */
3e170ce0
A
511 BUF_DATA2(PERF_TI_CSWITCH, thread_tid(new), pid);
512
513 /* trigger action after counters have been updated */
514 if (kperf_cswitch_action) {
515 struct kperf_sample sbuf;
516 struct kperf_context ctx;
517 int r;
518
519 BUF_DATA1(PERF_CSWITCH_HNDLR | DBG_FUNC_START, 0);
520
521 ctx.cur_pid = 0;
522 ctx.cur_thread = old;
523
524 /* get PID for context */
525 task_t old_task = chudxnu_task_for_thread(ctx.cur_thread);
526 if (old_task) {
527 ctx.cur_pid = chudxnu_pid_for_task(old_task);
528 }
529
530 ctx.trigger_type = TRIGGER_TYPE_CSWITCH;
531 ctx.trigger_id = 0;
532
533 r = kperf_sample(&sbuf, &ctx, kperf_cswitch_action,
534 SAMPLE_FLAG_PEND_USER);
535
536 BUF_INFO1(PERF_CSWITCH_HNDLR | DBG_FUNC_END, r);
537 }
538}
539
540static void
541kperf_cswitch_callback_update(void)
542{
543 unsigned old_callback_set = kperf_cswitch_callback_set;
544
545 unsigned new_callback_set = kdebug_cswitch || kperf_cswitch_action;
546
547 if (old_callback_set && !new_callback_set) {
548 kperf_cswitch_callback_set = 0;
549 } else if (!old_callback_set && new_callback_set) {
550 kperf_cswitch_callback_set = 1;
551 } else {
552 return;
553 }
554
555 kperf_kpc_cswitch_callback_update();
556}
557
558int
559kperf_kdbg_cswitch_get(void)
560{
561 return kdebug_cswitch;
562}
563
564int
565kperf_kdbg_cswitch_set(int newval)
566{
567 kdebug_cswitch = newval;
568 kperf_cswitch_callback_update();
569
570 return 0;
571}
572
573int
574kperf_cswitch_action_get(void)
575{
576 return kperf_cswitch_action;
577}
578
579int
580kperf_cswitch_action_set(int newval)
581{
582 kperf_cswitch_action = newval;
583 kperf_cswitch_callback_update();
584
585 return 0;
39236c6e
A
586}
587
588/*
589 * Action configuration
590 */
316670eb
A
591unsigned
592kperf_action_get_count(void)
593{
594 return actionc;
595}
596
597int
3e170ce0 598kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
316670eb 599{
3e170ce0
A
600 if ((actionid > actionc) || (actionid == 0)) {
601 return EINVAL;
602 }
603
604 /* disallow both CPU and thread counters to be sampled in the same
605 * action */
606 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
316670eb 607 return EINVAL;
3e170ce0 608 }
316670eb 609
3e170ce0 610 actionv[actionid - 1].sample = samplers;
316670eb
A
611
612 return 0;
613}
614
615int
3e170ce0 616kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
316670eb 617{
3e170ce0 618 if ((actionid > actionc)) {
316670eb 619 return EINVAL;
3e170ce0 620 }
316670eb 621
3e170ce0 622 if (actionid == 0) {
39236c6e 623 *samplers_out = 0; /* "NULL" action */
3e170ce0
A
624 } else {
625 *samplers_out = actionv[actionid - 1].sample;
626 }
39236c6e
A
627
628 return 0;
629}
630
631int
3e170ce0 632kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
39236c6e 633{
3e170ce0 634 if ((actionid > actionc) || (actionid == 0)) {
39236c6e 635 return EINVAL;
3e170ce0 636 }
39236c6e 637
3e170ce0 638 actionv[actionid - 1].userdata = userdata;
39236c6e
A
639
640 return 0;
641}
642
643int
3e170ce0 644kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
39236c6e 645{
3e170ce0 646 if ((actionid > actionc)) {
39236c6e 647 return EINVAL;
3e170ce0 648 }
39236c6e 649
3e170ce0 650 if (actionid == 0) {
39236c6e 651 *userdata_out = 0; /* "NULL" action */
3e170ce0
A
652 } else {
653 *userdata_out = actionv[actionid - 1].userdata;
654 }
39236c6e
A
655
656 return 0;
657}
658
659int
3e170ce0 660kperf_action_set_filter(unsigned actionid, int pid)
39236c6e 661{
3e170ce0 662 if ((actionid > actionc) || (actionid == 0)) {
39236c6e 663 return EINVAL;
3e170ce0 664 }
39236c6e 665
3e170ce0 666 actionv[actionid - 1].pid_filter = pid;
39236c6e
A
667
668 return 0;
669}
670
671int
3e170ce0 672kperf_action_get_filter(unsigned actionid, int *pid_out)
39236c6e 673{
3e170ce0 674 if ((actionid > actionc)) {
39236c6e 675 return EINVAL;
3e170ce0 676 }
39236c6e 677
3e170ce0 678 if (actionid == 0) {
39236c6e 679 *pid_out = -1; /* "NULL" action */
3e170ce0
A
680 } else {
681 *pid_out = actionv[actionid - 1].pid_filter;
682 }
316670eb
A
683
684 return 0;
685}
686
687int
688kperf_action_set_count(unsigned count)
689{
690 struct action *new_actionv = NULL, *old_actionv = NULL;
39236c6e 691 unsigned old_count, i;
316670eb
A
692
693 /* easy no-op */
3e170ce0 694 if (count == actionc) {
316670eb 695 return 0;
3e170ce0 696 }
316670eb
A
697
698 /* TODO: allow shrinking? */
3e170ce0 699 if (count < actionc) {
316670eb 700 return EINVAL;
3e170ce0 701 }
316670eb
A
702
703 /* cap it for good measure */
3e170ce0 704 if (count > ACTION_MAX) {
316670eb 705 return EINVAL;
3e170ce0 706 }
316670eb
A
707
708 /* creating the action arror for the first time. create a few
709 * more things, too.
710 */
3e170ce0 711 if (actionc == 0) {
316670eb
A
712 int r;
713 r = kperf_init();
714
3e170ce0 715 if (r != 0) {
316670eb 716 return r;
3e170ce0 717 }
316670eb
A
718 }
719
720 /* create a new array */
3e170ce0
A
721 new_actionv = kalloc(count * sizeof(*new_actionv));
722 if (new_actionv == NULL) {
316670eb 723 return ENOMEM;
3e170ce0 724 }
316670eb
A
725
726 old_actionv = actionv;
727 old_count = actionc;
728
3e170ce0
A
729 if (old_actionv != NULL) {
730 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
731 }
316670eb 732
3e170ce0 733 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
316670eb 734
3e170ce0 735 for (i = old_count; i < count; i++) {
39236c6e 736 new_actionv[i].pid_filter = -1;
3e170ce0 737 }
39236c6e 738
316670eb
A
739 actionv = new_actionv;
740 actionc = count;
741
3e170ce0
A
742 if (old_actionv != NULL) {
743 kfree(old_actionv, old_count * sizeof(*actionv));
744 }
316670eb 745
316670eb
A
746 return 0;
747}