2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* sysctl interface for paramters from user-land */
31 #include <kern/debug.h>
32 #include <libkern/libkern.h>
33 #include <pexpert/pexpert.h>
34 #include <sys/param.h>
37 #include <sys/sysctl.h>
38 #include <sys/kauth.h>
40 #include <kperf/action.h>
41 #include <kperf/context.h>
42 #include <kperf/kdebug_trigger.h>
43 #include <kperf/kperf.h>
44 #include <kperf/kperfbsd.h>
45 #include <kperf/kperf_timer.h>
46 #include <kperf/pet.h>
47 #include <kperf/lazy.h>
49 #include <sys/ktrace.h>
51 /* Requests from kperf sysctls. */
59 REQ_ACTION_FILTER_BY_TASK
,
60 REQ_ACTION_FILTER_BY_PID
,
61 REQ_ACTION_UCALLSTACK_DEPTH
,
62 REQ_ACTION_KCALLSTACK_DEPTH
,
80 REQ_LAZY_WAIT_TIME_THRESHOLD
,
82 REQ_LAZY_CPU_TIME_THRESHOLD
,
86 int kperf_debug_level
= 0;
88 #if DEVELOPMENT || DEBUG
89 _Atomic
long long kperf_pending_ipis
= 0;
90 #endif /* DEVELOPMENT || DEBUG */
93 * kperf has unique requirements from sysctl.
95 * For simple queries like the number of actions, the normal sysctl style
96 * of get/set works well.
98 * However, when requesting information about something specific, like an
99 * action, user space needs to provide some contextual information. This
100 * information is stored in a uint64_t array that includes the context, like
101 * the action ID it is interested in. If user space is getting the value from
102 * the kernel, then the get side of the sysctl is valid. If it is setting the
103 * value, then the get pointers are left NULL.
105 * These functions handle marshalling and unmarshalling data from sysctls.
109 kperf_sysctl_get_set_uint32(struct sysctl_req
*req
,
110 uint32_t (*get
)(void), int (*set
)(uint32_t))
121 int error
= sysctl_io_number(req
, value
, sizeof(value
), &value
, NULL
);
123 if (error
|| !req
->newptr
) {
131 kperf_sysctl_get_set_int(struct sysctl_req
*req
,
132 int (*get
)(void), int (*set
)(int))
143 int error
= sysctl_io_number(req
, value
, sizeof(value
), &value
, NULL
);
145 if (error
|| !req
->newptr
) {
153 kperf_sysctl_get_set_uint64(struct sysctl_req
*req
,
154 uint64_t (*get
)(void), int (*set
)(uint64_t))
165 int error
= sysctl_io_number(req
, value
, sizeof(value
), &value
, NULL
);
167 if (error
|| !req
->newptr
) {
175 kperf_sysctl_get_set_unsigned_uint32(struct sysctl_req
*req
,
176 int (*get
)(unsigned int, uint32_t *), int (*set
)(unsigned int, uint32_t))
183 uint64_t inputs
[2] = {};
185 if (req
->newptr
== USER_ADDR_NULL
) {
189 if ((error
= copyin(req
->newptr
, inputs
, sizeof(inputs
)))) {
193 unsigned int action_id
= (unsigned int)inputs
[0];
194 uint32_t new_value
= (uint32_t)inputs
[1];
196 if (req
->oldptr
!= USER_ADDR_NULL
) {
197 uint32_t value_out
= 0;
198 if ((error
= get(action_id
, &value_out
))) {
202 inputs
[1] = value_out
;
204 return copyout(inputs
, req
->oldptr
, sizeof(inputs
));
206 return set(action_id
, new_value
);
211 * These functions are essentially the same as the generic
212 * kperf_sysctl_get_set_unsigned_uint32, except they have unique input sizes.
216 sysctl_timer_period(struct sysctl_req
*req
)
219 uint64_t inputs
[2] = {};
223 if (req
->newptr
== USER_ADDR_NULL
) {
227 if ((error
= copyin(req
->newptr
, inputs
, sizeof(inputs
)))) {
231 unsigned int timer
= (unsigned int)inputs
[0];
232 uint64_t new_period
= inputs
[1];
234 if (req
->oldptr
!= USER_ADDR_NULL
) {
235 uint64_t period_out
= 0;
236 if ((error
= kperf_timer_get_period(timer
, &period_out
))) {
240 inputs
[1] = period_out
;
242 return copyout(inputs
, req
->oldptr
, sizeof(inputs
));
244 return kperf_timer_set_period(timer
, new_period
);
249 sysctl_action_filter(struct sysctl_req
*req
, bool is_task_t
)
252 uint64_t inputs
[2] = {};
256 if (req
->newptr
== USER_ADDR_NULL
) {
260 if ((error
= copyin(req
->newptr
, inputs
, sizeof(inputs
)))) {
264 unsigned int actionid
= (unsigned int)inputs
[0];
265 int new_filter
= (int)inputs
[1];
267 if (req
->oldptr
!= USER_ADDR_NULL
) {
269 if ((error
= kperf_action_get_filter(actionid
, &filter_out
))) {
273 inputs
[1] = filter_out
;
274 return copyout(inputs
, req
->oldptr
, sizeof(inputs
));
276 int pid
= is_task_t
? kperf_port_to_pid((mach_port_name_t
)new_filter
)
279 return kperf_action_set_filter(actionid
, pid
);
284 sysctl_bless(struct sysctl_req
*req
)
286 int value
= ktrace_get_owning_pid();
287 int error
= sysctl_io_number(req
, value
, sizeof(value
), &value
, NULL
);
289 if (error
|| !req
->newptr
) {
293 return ktrace_set_owning_pid(value
);
296 /* sysctl handlers that use the generic functions */
299 sysctl_action_samplers(struct sysctl_req
*req
)
301 return kperf_sysctl_get_set_unsigned_uint32(req
,
302 kperf_action_get_samplers
, kperf_action_set_samplers
);
306 sysctl_action_userdata(struct sysctl_req
*req
)
308 return kperf_sysctl_get_set_unsigned_uint32(req
,
309 kperf_action_get_userdata
, kperf_action_set_userdata
);
313 sysctl_action_ucallstack_depth(struct sysctl_req
*req
)
315 return kperf_sysctl_get_set_unsigned_uint32(req
,
316 kperf_action_get_ucallstack_depth
, kperf_action_set_ucallstack_depth
);
320 sysctl_action_kcallstack_depth(struct sysctl_req
*req
)
322 return kperf_sysctl_get_set_unsigned_uint32(req
,
323 kperf_action_get_kcallstack_depth
, kperf_action_set_kcallstack_depth
);
327 sysctl_kdebug_action(struct sysctl_req
*req
)
329 return kperf_sysctl_get_set_int(req
, kperf_kdebug_get_action
,
330 kperf_kdebug_set_action
);
334 sysctl_kdebug_filter(struct sysctl_req
*req
)
338 if (req
->oldptr
!= USER_ADDR_NULL
) {
339 struct kperf_kdebug_filter
*filter
= NULL
;
340 uint32_t n_debugids
= kperf_kdebug_get_filter(&filter
);
341 size_t filter_size
= KPERF_KDEBUG_FILTER_SIZE(n_debugids
);
343 if (n_debugids
== 0) {
347 return SYSCTL_OUT(req
, filter
, filter_size
);
348 } else if (req
->newptr
!= USER_ADDR_NULL
) {
349 return kperf_kdebug_set_filter(req
->newptr
, (uint32_t)req
->newlen
);
356 kperf_sampling_set(uint32_t sample_start
)
359 return kperf_sampling_enable();
361 return kperf_sampling_disable();
366 sysctl_sampling(struct sysctl_req
*req
)
368 return kperf_sysctl_get_set_uint32(req
, kperf_sampling_status
,
373 sysctl_action_count(struct sysctl_req
*req
)
375 return kperf_sysctl_get_set_uint32(req
, kperf_action_get_count
,
376 kperf_action_set_count
);
380 sysctl_timer_count(struct sysctl_req
*req
)
382 return kperf_sysctl_get_set_uint32(req
, kperf_timer_get_count
,
383 kperf_timer_set_count
);
387 sysctl_timer_action(struct sysctl_req
*req
)
389 return kperf_sysctl_get_set_unsigned_uint32(req
, kperf_timer_get_action
,
390 kperf_timer_set_action
);
394 sysctl_timer_pet(struct sysctl_req
*req
)
396 return kperf_sysctl_get_set_uint32(req
, kperf_timer_get_petid
,
397 kperf_timer_set_petid
);
401 sysctl_bless_preempt(struct sysctl_req
*req
)
403 return sysctl_io_number(req
, ktrace_root_set_owner_allowed
,
404 sizeof(ktrace_root_set_owner_allowed
),
405 &ktrace_root_set_owner_allowed
, NULL
);
409 sysctl_kperf_reset(struct sysctl_req
*req
)
411 int should_reset
= 0;
413 int error
= sysctl_io_number(req
, should_reset
, sizeof(should_reset
),
414 &should_reset
, NULL
);
420 ktrace_reset(KTRACE_KPERF
);
426 sysctl_pet_idle_rate(struct sysctl_req
*req
)
428 return kperf_sysctl_get_set_int(req
, kperf_get_pet_idle_rate
,
429 kperf_set_pet_idle_rate
);
433 sysctl_lightweight_pet(struct sysctl_req
*req
)
435 return kperf_sysctl_get_set_int(req
, kperf_get_lightweight_pet
,
436 kperf_set_lightweight_pet
);
440 sysctl_kdbg_cswitch(struct sysctl_req
*req
)
442 return kperf_sysctl_get_set_int(req
, kperf_kdbg_cswitch_get
,
443 kperf_kdbg_cswitch_set
);
447 sysctl_lazy_wait_time_threshold(struct sysctl_req
*req
)
449 return kperf_sysctl_get_set_uint64(req
, kperf_lazy_get_wait_time_threshold
,
450 kperf_lazy_set_wait_time_threshold
);
454 sysctl_lazy_wait_action(struct sysctl_req
*req
)
456 return kperf_sysctl_get_set_int(req
, kperf_lazy_get_wait_action
,
457 kperf_lazy_set_wait_action
);
461 sysctl_lazy_cpu_time_threshold(struct sysctl_req
*req
)
463 return kperf_sysctl_get_set_uint64(req
, kperf_lazy_get_cpu_time_threshold
,
464 kperf_lazy_set_cpu_time_threshold
);
468 sysctl_lazy_cpu_action(struct sysctl_req
*req
)
470 return kperf_sysctl_get_set_int(req
, kperf_lazy_get_cpu_action
,
471 kperf_lazy_set_cpu_action
);
475 kperf_sysctl SYSCTL_HANDLER_ARGS
477 #pragma unused(oidp, arg2)
479 enum kperf_request type
= (enum kperf_request
)arg1
;
483 if (req
->oldptr
== USER_ADDR_NULL
&& req
->newptr
!= USER_ADDR_NULL
) {
484 if ((ret
= ktrace_configure(KTRACE_KPERF
))) {
489 if ((ret
= ktrace_read_check())) {
497 case REQ_ACTION_COUNT
:
498 ret
= sysctl_action_count(req
);
500 case REQ_ACTION_SAMPLERS
:
501 ret
= sysctl_action_samplers(req
);
503 case REQ_ACTION_USERDATA
:
504 ret
= sysctl_action_userdata(req
);
506 case REQ_TIMER_COUNT
:
507 ret
= sysctl_timer_count(req
);
509 case REQ_TIMER_PERIOD
:
510 ret
= sysctl_timer_period(req
);
513 ret
= sysctl_timer_pet(req
);
515 case REQ_TIMER_ACTION
:
516 ret
= sysctl_timer_action(req
);
519 ret
= sysctl_sampling(req
);
521 case REQ_KDBG_CSWITCH
:
522 ret
= sysctl_kdbg_cswitch(req
);
524 case REQ_ACTION_FILTER_BY_TASK
:
525 ret
= sysctl_action_filter(req
, true);
527 case REQ_ACTION_FILTER_BY_PID
:
528 ret
= sysctl_action_filter(req
, false);
530 case REQ_KDEBUG_ACTION
:
531 ret
= sysctl_kdebug_action(req
);
533 case REQ_KDEBUG_FILTER
:
534 ret
= sysctl_kdebug_filter(req
);
536 case REQ_PET_IDLE_RATE
:
537 ret
= sysctl_pet_idle_rate(req
);
539 case REQ_BLESS_PREEMPT
:
540 ret
= sysctl_bless_preempt(req
);
543 ret
= sysctl_kperf_reset(req
);
545 case REQ_ACTION_UCALLSTACK_DEPTH
:
546 ret
= sysctl_action_ucallstack_depth(req
);
548 case REQ_ACTION_KCALLSTACK_DEPTH
:
549 ret
= sysctl_action_kcallstack_depth(req
);
551 case REQ_LIGHTWEIGHT_PET
:
552 ret
= sysctl_lightweight_pet(req
);
554 case REQ_LAZY_WAIT_TIME_THRESHOLD
:
555 ret
= sysctl_lazy_wait_time_threshold(req
);
557 case REQ_LAZY_WAIT_ACTION
:
558 ret
= sysctl_lazy_wait_action(req
);
560 case REQ_LAZY_CPU_TIME_THRESHOLD
:
561 ret
= sysctl_lazy_cpu_time_threshold(req
);
563 case REQ_LAZY_CPU_ACTION
:
564 ret
= sysctl_lazy_cpu_action(req
);
577 kperf_sysctl_bless_handler SYSCTL_HANDLER_ARGS
579 #pragma unused(oidp, arg2)
584 /* if setting a new "blessed pid" (ktrace owning pid) */
585 if (req
->newptr
!= USER_ADDR_NULL
) {
587 * root can bypass the ktrace check when a flag is set (for
588 * backwards compatibility) or when ownership is maintained over
589 * subsystems resets (to allow the user space process that set
590 * ownership to unset it).
592 if (!((ktrace_root_set_owner_allowed
||
593 ktrace_keep_ownership_on_reset
) &&
594 kauth_cred_issuser(kauth_cred_get()))) {
595 if ((ret
= ktrace_configure(KTRACE_KPERF
))) {
601 if ((ret
= ktrace_read_check())) {
608 if ((uintptr_t)arg1
== REQ_BLESS
) {
609 ret
= sysctl_bless(req
);
619 /* root kperf node */
621 SYSCTL_NODE(, OID_AUTO
, kperf
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
626 SYSCTL_NODE(_kperf
, OID_AUTO
, action
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
629 SYSCTL_PROC(_kperf_action
, OID_AUTO
, count
,
630 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
|
632 (void *)REQ_ACTION_COUNT
,
633 sizeof(int), kperf_sysctl
, "I", "Number of actions");
635 SYSCTL_PROC(_kperf_action
, OID_AUTO
, samplers
,
636 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
637 (void *)REQ_ACTION_SAMPLERS
,
638 3 * sizeof(uint64_t), kperf_sysctl
, "UQ",
639 "What to sample when a trigger fires an action");
641 SYSCTL_PROC(_kperf_action
, OID_AUTO
, userdata
,
642 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
643 (void *)REQ_ACTION_USERDATA
,
644 3 * sizeof(uint64_t), kperf_sysctl
, "UQ",
645 "User data to attribute to action");
647 SYSCTL_PROC(_kperf_action
, OID_AUTO
, filter_by_task
,
648 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
649 (void *)REQ_ACTION_FILTER_BY_TASK
,
650 3 * sizeof(uint64_t), kperf_sysctl
, "UQ",
651 "Apply a task filter to the action");
653 SYSCTL_PROC(_kperf_action
, OID_AUTO
, filter_by_pid
,
654 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
655 (void *)REQ_ACTION_FILTER_BY_PID
,
656 3 * sizeof(uint64_t), kperf_sysctl
, "UQ",
657 "Apply a pid filter to the action");
659 SYSCTL_PROC(_kperf_action
, OID_AUTO
, ucallstack_depth
,
660 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
661 (void *)REQ_ACTION_UCALLSTACK_DEPTH
,
662 sizeof(int), kperf_sysctl
, "I",
663 "Maximum number of frames to include in user callstacks");
665 SYSCTL_PROC(_kperf_action
, OID_AUTO
, kcallstack_depth
,
666 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
667 (void *)REQ_ACTION_KCALLSTACK_DEPTH
,
668 sizeof(int), kperf_sysctl
, "I",
669 "Maximum number of frames to include in kernel callstacks");
673 SYSCTL_NODE(_kperf
, OID_AUTO
, timer
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
676 SYSCTL_PROC(_kperf_timer
, OID_AUTO
, count
,
677 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
679 (void *)REQ_TIMER_COUNT
,
680 sizeof(int), kperf_sysctl
, "I", "Number of time triggers");
682 SYSCTL_PROC(_kperf_timer
, OID_AUTO
, period
,
683 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
684 (void *)REQ_TIMER_PERIOD
,
685 2 * sizeof(uint64_t), kperf_sysctl
, "UQ",
686 "Timer number and period");
688 SYSCTL_PROC(_kperf_timer
, OID_AUTO
, action
,
689 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
690 (void *)REQ_TIMER_ACTION
,
691 2 * sizeof(uint64_t), kperf_sysctl
, "UQ",
692 "Timer number and actionid");
694 SYSCTL_PROC(_kperf_timer
, OID_AUTO
, pet_timer
,
695 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
697 (void *)REQ_TIMER_PET
,
698 sizeof(int), kperf_sysctl
, "I", "Which timer ID does PET");
702 SYSCTL_NODE(_kperf
, OID_AUTO
, kdebug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
705 SYSCTL_PROC(_kperf_kdebug
, OID_AUTO
, action
,
706 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
708 (void*)REQ_KDEBUG_ACTION
,
709 sizeof(int), kperf_sysctl
, "I", "ID of action to trigger on kdebug events");
711 SYSCTL_PROC(_kperf_kdebug
, OID_AUTO
, filter
,
712 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
713 (void*)REQ_KDEBUG_FILTER
,
714 sizeof(int), kperf_sysctl
, "P", "The filter that determines which kdebug events trigger a sample");
718 SYSCTL_NODE(_kperf
, OID_AUTO
, lazy
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
721 SYSCTL_PROC(_kperf_lazy
, OID_AUTO
, wait_time_threshold
,
722 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
723 (void *)REQ_LAZY_WAIT_TIME_THRESHOLD
,
724 sizeof(uint64_t), kperf_sysctl
, "UQ",
725 "How many ticks a thread must wait to take a sample");
727 SYSCTL_PROC(_kperf_lazy
, OID_AUTO
, wait_action
,
728 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
729 (void *)REQ_LAZY_WAIT_ACTION
,
730 sizeof(uint64_t), kperf_sysctl
, "UQ",
731 "Which action to fire when a thread waits longer than threshold");
733 SYSCTL_PROC(_kperf_lazy
, OID_AUTO
, cpu_time_threshold
,
734 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
735 (void *)REQ_LAZY_CPU_TIME_THRESHOLD
,
736 sizeof(uint64_t), kperf_sysctl
, "UQ",
737 "Minimum number of ticks a CPU must run between samples");
739 SYSCTL_PROC(_kperf_lazy
, OID_AUTO
, cpu_action
,
740 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
741 (void *)REQ_LAZY_CPU_ACTION
,
742 sizeof(uint64_t), kperf_sysctl
, "UQ",
743 "Which action to fire for lazy CPU samples");
747 SYSCTL_PROC(_kperf
, OID_AUTO
, sampling
,
748 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
750 (void *)REQ_SAMPLING
,
751 sizeof(int), kperf_sysctl
, "I", "Sampling running");
753 SYSCTL_PROC(_kperf
, OID_AUTO
, reset
,
754 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
756 0, kperf_sysctl
, "-", "Reset kperf");
758 SYSCTL_PROC(_kperf
, OID_AUTO
, blessed_pid
,
759 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
/* must be root */
762 sizeof(int), kperf_sysctl_bless_handler
, "I", "Blessed pid");
764 SYSCTL_PROC(_kperf
, OID_AUTO
, blessed_preempt
,
765 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
|
767 (void *)REQ_BLESS_PREEMPT
,
768 sizeof(int), kperf_sysctl
, "I", "Blessed preemption");
770 SYSCTL_PROC(_kperf
, OID_AUTO
, kdbg_cswitch
,
771 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
773 (void *)REQ_KDBG_CSWITCH
,
774 sizeof(int), kperf_sysctl
, "I", "Generate context switch info");
776 SYSCTL_PROC(_kperf
, OID_AUTO
, pet_idle_rate
,
777 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
779 (void *)REQ_PET_IDLE_RATE
,
780 sizeof(int), kperf_sysctl
, "I",
781 "Rate at which unscheduled threads are forced to be sampled in "
784 SYSCTL_PROC(_kperf
, OID_AUTO
, lightweight_pet
,
785 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
787 (void *)REQ_LIGHTWEIGHT_PET
,
788 sizeof(int), kperf_sysctl
, "I",
789 "Status of lightweight PET mode");
793 SYSCTL_NODE(_kperf
, OID_AUTO
, limits
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
796 enum kperf_limit_request
{
798 REQ_LIM_BG_PERIOD_NS
,
799 REQ_LIM_PET_PERIOD_NS
,
800 REQ_LIM_BG_PET_PERIOD_NS
,
804 kperf_sysctl_limits SYSCTL_HANDLER_ARGS
806 #pragma unused(oidp, arg2)
807 enum kperf_limit_request type
= (enum kperf_limit_request
)arg1
;
811 case REQ_LIM_PERIOD_NS
:
812 limit
= KP_MIN_PERIOD_NS
;
815 case REQ_LIM_BG_PERIOD_NS
:
816 limit
= KP_MIN_PERIOD_BG_NS
;
819 case REQ_LIM_PET_PERIOD_NS
:
820 limit
= KP_MIN_PERIOD_PET_NS
;
823 case REQ_LIM_BG_PET_PERIOD_NS
:
824 limit
= KP_MIN_PERIOD_PET_BG_NS
;
831 return sysctl_io_number(req
, limit
, sizeof(limit
), &limit
, NULL
);
834 SYSCTL_PROC(_kperf_limits
, OID_AUTO
, timer_min_period_ns
,
835 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
836 (void *)REQ_LIM_PERIOD_NS
, sizeof(uint64_t), kperf_sysctl_limits
,
837 "Q", "Minimum timer period in nanoseconds");
838 SYSCTL_PROC(_kperf_limits
, OID_AUTO
, timer_min_bg_period_ns
,
839 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
840 (void *)REQ_LIM_BG_PERIOD_NS
, sizeof(uint64_t), kperf_sysctl_limits
,
841 "Q", "Minimum background timer period in nanoseconds");
842 SYSCTL_PROC(_kperf_limits
, OID_AUTO
, timer_min_pet_period_ns
,
843 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
844 (void *)REQ_LIM_PET_PERIOD_NS
, sizeof(uint64_t), kperf_sysctl_limits
,
845 "Q", "Minimum PET timer period in nanoseconds");
846 SYSCTL_PROC(_kperf_limits
, OID_AUTO
, timer_min_bg_pet_period_ns
,
847 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
848 (void *)REQ_LIM_BG_PET_PERIOD_NS
, sizeof(uint64_t), kperf_sysctl_limits
,
849 "Q", "Minimum background PET timer period in nanoseconds");
852 SYSCTL_INT(_kperf
, OID_AUTO
, debug_level
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
853 &kperf_debug_level
, 0, "debug level");
855 #if DEVELOPMENT || DEBUG
856 SYSCTL_QUAD(_kperf
, OID_AUTO
, already_pending_ipis
,
857 CTLFLAG_RD
| CTLFLAG_LOCKED
,
858 &kperf_pending_ipis
, "");
859 #endif /* DEVELOPMENT || DEBUG */