]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kperf/kperfbsd.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / osfmk / kperf / kperfbsd.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* sysctl interface for paramters from user-land */
30
31#include <kern/debug.h>
32#include <libkern/libkern.h>
33#include <pexpert/pexpert.h>
34#include <sys/param.h>
35#include <sys/mman.h>
36#include <sys/stat.h>
37#include <sys/sysctl.h>
38#include <sys/kauth.h>
39
40#include <kperf/action.h>
41#include <kperf/context.h>
42#include <kperf/kdebug_trigger.h>
43#include <kperf/kperf.h>
44#include <kperf/kperfbsd.h>
45#include <kperf/kperf_timer.h>
46#include <kperf/pet.h>
47
48#include <sys/ktrace.h>
49
50/* IDs for dispatch from SYSCTL macros */
51#define REQ_SAMPLING (1)
52#define REQ_ACTION_COUNT (2)
53#define REQ_ACTION_SAMPLERS (3)
54#define REQ_TIMER_COUNT (4)
55#define REQ_TIMER_PERIOD (5)
56#define REQ_TIMER_PET (6)
57#define REQ_TIMER_ACTION (7)
58#define REQ_BLESS (8)
59#define REQ_ACTION_USERDATA (9)
60#define REQ_ACTION_FILTER_BY_TASK (10)
61#define REQ_ACTION_FILTER_BY_PID (11)
62/* 12 unused */
63#define REQ_PET_IDLE_RATE (13)
64#define REQ_BLESS_PREEMPT (14)
65#define REQ_KDBG_CSWITCH (15)
66#define REQ_RESET (16)
67/* 17 unused */
68#define REQ_ACTION_UCALLSTACK_DEPTH (18)
69#define REQ_ACTION_KCALLSTACK_DEPTH (19)
70#define REQ_LIGHTWEIGHT_PET (20)
71#define REQ_KDEBUG_ACTION (21)
72#define REQ_KDEBUG_FILTER (22)
73
74int kperf_debug_level = 0;
75
76#if DEVELOPMENT || DEBUG
77_Atomic long long kperf_pending_ipis = 0;
78#endif /* DEVELOPMENT || DEBUG */
79
80/*
81 * kperf has a different sysctl model than others.
82 *
83 * For simple queries like the number of actions, the normal sysctl style
84 * of get/set works well.
85 *
86 * However, when requesting information about something specific, like an
87 * action, user space needs to provide some contextual information. This
88 * information is stored in a uint64_t array that includes the context, like
89 * the action ID it is interested in. If user space is getting the value from
90 * the kernel, then the get side of the sysctl is valid. If it is setting the
91 * value, then the get pointers are left NULL.
92 *
93 * These functions handle marshalling and unmarshalling data from sysctls.
94 */
95
96static int
97kperf_sysctl_get_set_uint32(struct sysctl_req *req,
98 uint32_t (*get)(void), int (*set)(uint32_t))
99{
100 assert(req != NULL);
101 assert(get != NULL);
102 assert(set != NULL);
103
104 uint32_t value = 0;
105 if (req->oldptr) {
106 value = get();
107 }
108
109 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
110
111 if (error || !req->newptr) {
112 return error;
113 }
114
115 return set(value);
116}
117
118static int
119kperf_sysctl_get_set_int(struct sysctl_req *req,
120 int (*get)(void), int (*set)(int))
121{
122 assert(req != NULL);
123 assert(get != NULL);
124 assert(set != NULL);
125
126 int value = 0;
127 if (req->oldptr) {
128 value = get();
129 }
130
131 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
132
133 if (error || !req->newptr) {
134 return error;
135 }
136
137 return set(value);
138}
139
140static int
141kperf_sysctl_get_set_unsigned_uint32(struct sysctl_req *req,
142 int (*get)(unsigned int, uint32_t *), int (*set)(unsigned int, uint32_t))
143{
144 assert(req != NULL);
145 assert(get != NULL);
146 assert(set != NULL);
147
148 int error;
149 uint64_t inputs[2];
150 if ((error = SYSCTL_IN(req, inputs, sizeof(inputs)))) {
151 return error;
152 }
153
154 unsigned int action_id = (unsigned int)inputs[0];
155 uint32_t new_value = (uint32_t)inputs[1];
156
157 if (req->oldptr != USER_ADDR_NULL) {
158 uint32_t value_out = 0;
159 if ((error = get(action_id, &value_out))) {
160 return error;
161 }
162
163 inputs[1] = value_out;
164 } else {
165 if ((error = set(action_id, new_value))) {
166 return error;
167 }
168 }
169
170 if (req->oldptr != USER_ADDR_NULL) {
171 error = SYSCTL_OUT(req, inputs, sizeof(inputs));
172 return error;
173 } else {
174 return 0;
175 }
176}
177
178/*
179 * These functions are essentially the same as the generic
180 * kperf_sysctl_get_set_unsigned_uint32, except they have unique input sizes.
181 */
182
183static int
184sysctl_timer_period(struct sysctl_req *req)
185{
186 assert(req != NULL);
187
188 int error;
189 uint64_t inputs[2];
190 if ((error = SYSCTL_IN(req, inputs, sizeof(inputs)))) {
191 return error;
192 }
193
194 unsigned int timer = (unsigned int)inputs[0];
195 uint64_t new_period = inputs[1];
196
197 if (req->oldptr != USER_ADDR_NULL) {
198 uint64_t period_out = 0;
199 if ((error = kperf_timer_get_period(timer, &period_out))) {
200 return error;
201 }
202
203 inputs[1] = period_out;
204 } else {
205 if ((error = kperf_timer_set_period(timer, new_period))) {
206 return error;
207 }
208 }
209
210 return SYSCTL_OUT(req, inputs, sizeof(inputs));
211}
212
213static int
214sysctl_action_filter(struct sysctl_req *req, boolean_t is_task_t)
215{
216 assert(req != NULL);
217
218 int error;
219 uint64_t inputs[2];
220 if ((error = SYSCTL_IN(req, inputs, sizeof(inputs)))) {
221 return error;
222 }
223
224 unsigned int actionid = (unsigned int)inputs[0];
225 int new_filter = (int)inputs[1];
226
227 if (req->oldptr != USER_ADDR_NULL) {
228 int filter_out;
229 if ((error = kperf_action_get_filter(actionid, &filter_out))) {
230 return error;
231 }
232
233 inputs[1] = filter_out;
234 } else {
235 int pid = is_task_t ? kperf_port_to_pid((mach_port_name_t)new_filter)
236 : new_filter;
237
238 if ((error = kperf_action_set_filter(actionid, pid))) {
239 return error;
240 }
241 }
242
243 return SYSCTL_OUT(req, inputs, sizeof(inputs));
244}
245
246static int
247sysctl_bless(struct sysctl_req *req)
248{
249 int value = ktrace_get_owning_pid();
250 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
251
252 if (error || !req->newptr) {
253 return error;
254 }
255
256 return ktrace_set_owning_pid(value);
257}
258
259/* sysctl handlers that use the generic functions */
260
261static int
262sysctl_action_samplers(struct sysctl_req *req)
263{
264 return kperf_sysctl_get_set_unsigned_uint32(req,
265 kperf_action_get_samplers, kperf_action_set_samplers);
266}
267
268static int
269sysctl_action_userdata(struct sysctl_req *req)
270{
271 return kperf_sysctl_get_set_unsigned_uint32(req,
272 kperf_action_get_userdata, kperf_action_set_userdata);
273}
274
275static int
276sysctl_action_ucallstack_depth(struct sysctl_req *req)
277{
278 return kperf_sysctl_get_set_unsigned_uint32(req,
279 kperf_action_get_ucallstack_depth, kperf_action_set_ucallstack_depth);
280}
281
282static int
283sysctl_action_kcallstack_depth(struct sysctl_req *req)
284{
285 return kperf_sysctl_get_set_unsigned_uint32(req,
286 kperf_action_get_kcallstack_depth, kperf_action_set_kcallstack_depth);
287}
288
289static int
290sysctl_kdebug_action(struct sysctl_req *req)
291{
292 return kperf_sysctl_get_set_int(req, kperf_kdebug_get_action,
293 kperf_kdebug_set_action);
294}
295
296static int
297sysctl_kdebug_filter(struct sysctl_req *req)
298{
299 assert(req != NULL);
300
301 if (req->oldptr != USER_ADDR_NULL) {
302 struct kperf_kdebug_filter *filter = NULL;
303 uint32_t n_debugids = kperf_kdebug_get_filter(&filter);
304 size_t filter_size = KPERF_KDEBUG_FILTER_SIZE(n_debugids);
305
306 if (n_debugids == 0) {
307 return EINVAL;
308 }
309
310 return SYSCTL_OUT(req, filter, filter_size);
311 }
312
313 return kperf_kdebug_set_filter(req->newptr, (uint32_t)req->newlen);
314}
315
316static int
317kperf_sampling_set(uint32_t sample_start)
318{
319 if (sample_start) {
320 return kperf_sampling_enable();
321 } else {
322 return kperf_sampling_disable();
323 }
324}
325
326static int
327sysctl_sampling(struct sysctl_req *req)
328{
329 return kperf_sysctl_get_set_uint32(req, kperf_sampling_status,
330 kperf_sampling_set);
331}
332
333static int
334sysctl_action_count(struct sysctl_req *req)
335{
336 return kperf_sysctl_get_set_uint32(req, kperf_action_get_count,
337 kperf_action_set_count);
338}
339
340static int
341sysctl_timer_count(struct sysctl_req *req)
342{
343 return kperf_sysctl_get_set_uint32(req, kperf_timer_get_count,
344 kperf_timer_set_count);
345}
346
347static int
348sysctl_timer_action(struct sysctl_req *req)
349{
350 return kperf_sysctl_get_set_unsigned_uint32(req, kperf_timer_get_action,
351 kperf_timer_set_action);
352}
353
354static int
355sysctl_timer_pet(struct sysctl_req *req)
356{
357 return kperf_sysctl_get_set_uint32(req, kperf_timer_get_petid,
358 kperf_timer_set_petid);
359}
360
361static int
362sysctl_bless_preempt(struct sysctl_req *req)
363{
364 return sysctl_io_number(req, ktrace_root_set_owner_allowed,
365 sizeof(ktrace_root_set_owner_allowed),
366 &ktrace_root_set_owner_allowed, NULL);
367}
368
369static int
370sysctl_kperf_reset(struct sysctl_req *req)
371{
372 int should_reset = 0;
373
374 int error = sysctl_io_number(req, should_reset, sizeof(should_reset),
375 &should_reset, NULL);
376 if (error) {
377 return error;
378 }
379
380 if (should_reset) {
381 ktrace_reset(KTRACE_KPERF);
382 }
383 return 0;
384}
385
386static int
387sysctl_pet_idle_rate(struct sysctl_req *req)
388{
389 return kperf_sysctl_get_set_int(req, kperf_get_pet_idle_rate,
390 kperf_set_pet_idle_rate);
391}
392
393static int
394sysctl_lightweight_pet(struct sysctl_req *req)
395{
396 return kperf_sysctl_get_set_int(req, kperf_get_lightweight_pet,
397 kperf_set_lightweight_pet);
398}
399
400static int
401sysctl_kdbg_cswitch(struct sysctl_req *req)
402{
403 return kperf_sysctl_get_set_int(req, kperf_kdbg_cswitch_get,
404 kperf_kdbg_cswitch_set);
405}
406
407static int
408kperf_sysctl SYSCTL_HANDLER_ARGS
409{
410#pragma unused(oidp, arg2)
411 int ret;
412 uintptr_t type = (uintptr_t)arg1;
413
414 lck_mtx_lock(ktrace_lock);
415
416 if (req->oldptr == USER_ADDR_NULL && req->newptr != USER_ADDR_NULL) {
417 if ((ret = ktrace_configure(KTRACE_KPERF))) {
418 lck_mtx_unlock(ktrace_lock);
419 return ret;
420 }
421 } else {
422 if ((ret = ktrace_read_check())) {
423 lck_mtx_unlock(ktrace_lock);
424 return ret;
425 }
426 }
427
428 /* which request */
429 switch (type) {
430 case REQ_ACTION_COUNT:
431 ret = sysctl_action_count(req);
432 break;
433 case REQ_ACTION_SAMPLERS:
434 ret = sysctl_action_samplers(req);
435 break;
436 case REQ_ACTION_USERDATA:
437 ret = sysctl_action_userdata(req);
438 break;
439 case REQ_TIMER_COUNT:
440 ret = sysctl_timer_count(req);
441 break;
442 case REQ_TIMER_PERIOD:
443 ret = sysctl_timer_period(req);
444 break;
445 case REQ_TIMER_PET:
446 ret = sysctl_timer_pet(req);
447 break;
448 case REQ_TIMER_ACTION:
449 ret = sysctl_timer_action(req);
450 break;
451 case REQ_SAMPLING:
452 ret = sysctl_sampling(req);
453 break;
454 case REQ_KDBG_CSWITCH:
455 ret = sysctl_kdbg_cswitch(req);
456 break;
457 case REQ_ACTION_FILTER_BY_TASK:
458 ret = sysctl_action_filter(req, TRUE);
459 break;
460 case REQ_ACTION_FILTER_BY_PID:
461 ret = sysctl_action_filter(req, FALSE);
462 break;
463 case REQ_KDEBUG_ACTION:
464 ret = sysctl_kdebug_action(req);
465 break;
466 case REQ_KDEBUG_FILTER:
467 ret = sysctl_kdebug_filter(req);
468 break;
469 case REQ_PET_IDLE_RATE:
470 ret = sysctl_pet_idle_rate(req);
471 break;
472 case REQ_BLESS_PREEMPT:
473 ret = sysctl_bless_preempt(req);
474 break;
475 case REQ_RESET:
476 ret = sysctl_kperf_reset(req);
477 break;
478 case REQ_ACTION_UCALLSTACK_DEPTH:
479 ret = sysctl_action_ucallstack_depth(req);
480 break;
481 case REQ_ACTION_KCALLSTACK_DEPTH:
482 ret = sysctl_action_kcallstack_depth(req);
483 break;
484 case REQ_LIGHTWEIGHT_PET:
485 ret = sysctl_lightweight_pet(req);
486 break;
487 default:
488 ret = ENOENT;
489 break;
490 }
491
492 lck_mtx_unlock(ktrace_lock);
493
494 return ret;
495}
496
497static int
498kperf_sysctl_bless_handler SYSCTL_HANDLER_ARGS
499{
500#pragma unused(oidp, arg2)
501 int ret;
502
503 lck_mtx_lock(ktrace_lock);
504
505 /* if setting a new "blessed pid" (ktrace owning pid) */
506 if (req->newptr != USER_ADDR_NULL) {
507 /*
508 * root can bypass the ktrace check when a flag is set (for
509 * backwards compatibility) or when ownership is maintained over
510 * subsystems resets (to allow the user space process that set
511 * ownership to unset it).
512 */
513 if (!((ktrace_root_set_owner_allowed ||
514 ktrace_keep_ownership_on_reset) &&
515 kauth_cred_issuser(kauth_cred_get())))
516 {
517 if ((ret = ktrace_configure(KTRACE_KPERF))) {
518 lck_mtx_unlock(ktrace_lock);
519 return ret;
520 }
521 }
522 } else {
523 if ((ret = ktrace_read_check())) {
524 lck_mtx_unlock(ktrace_lock);
525 return ret;
526 }
527 }
528
529 /* which request */
530 if ((uintptr_t)arg1 == REQ_BLESS) {
531 ret = sysctl_bless(req);
532 } else {
533 ret = ENOENT;
534 }
535
536 lck_mtx_unlock(ktrace_lock);
537
538 return ret;
539}
540
541/* root kperf node */
542
543SYSCTL_NODE(, OID_AUTO, kperf, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
544 "kperf");
545
546/* actions */
547
548SYSCTL_NODE(_kperf, OID_AUTO, action, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
549 "action");
550
551SYSCTL_PROC(_kperf_action, OID_AUTO, count,
552 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
553 (void *)REQ_ACTION_COUNT,
554 sizeof(int), kperf_sysctl, "I", "Number of actions");
555
556SYSCTL_PROC(_kperf_action, OID_AUTO, samplers,
557 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
558 (void *)REQ_ACTION_SAMPLERS,
559 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
560 "What to sample when a trigger fires an action");
561
562SYSCTL_PROC(_kperf_action, OID_AUTO, userdata,
563 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
564 (void *)REQ_ACTION_USERDATA,
565 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
566 "User data to attribute to action");
567
568SYSCTL_PROC(_kperf_action, OID_AUTO, filter_by_task,
569 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
570 (void *)REQ_ACTION_FILTER_BY_TASK,
571 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
572 "Apply a task filter to the action");
573
574SYSCTL_PROC(_kperf_action, OID_AUTO, filter_by_pid,
575 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
576 (void *)REQ_ACTION_FILTER_BY_PID,
577 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
578 "Apply a pid filter to the action");
579
580SYSCTL_PROC(_kperf_action, OID_AUTO, ucallstack_depth,
581 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
582 (void *)REQ_ACTION_UCALLSTACK_DEPTH,
583 sizeof(int), kperf_sysctl, "I",
584 "Maximum number of frames to include in user callstacks");
585
586SYSCTL_PROC(_kperf_action, OID_AUTO, kcallstack_depth,
587 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
588 (void *)REQ_ACTION_KCALLSTACK_DEPTH,
589 sizeof(int), kperf_sysctl, "I",
590 "Maximum number of frames to include in kernel callstacks");
591
592/* timers */
593
594SYSCTL_NODE(_kperf, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
595 "timer");
596
597SYSCTL_PROC(_kperf_timer, OID_AUTO, count,
598 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
599 (void *)REQ_TIMER_COUNT,
600 sizeof(int), kperf_sysctl, "I", "Number of time triggers");
601
602SYSCTL_PROC(_kperf_timer, OID_AUTO, period,
603 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
604 (void *)REQ_TIMER_PERIOD,
605 2 * sizeof(uint64_t), kperf_sysctl, "UQ",
606 "Timer number and period");
607
608SYSCTL_PROC(_kperf_timer, OID_AUTO, action,
609 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
610 (void *)REQ_TIMER_ACTION,
611 2 * sizeof(uint64_t), kperf_sysctl, "UQ",
612 "Timer number and actionid");
613
614SYSCTL_PROC(_kperf_timer, OID_AUTO, pet_timer,
615 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
616 (void *)REQ_TIMER_PET,
617 sizeof(int), kperf_sysctl, "I", "Which timer ID does PET");
618
619/* kdebug trigger */
620
621SYSCTL_NODE(_kperf, OID_AUTO, kdebug, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
622 "kdebug");
623
624SYSCTL_PROC(_kperf_kdebug, OID_AUTO, action,
625 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
626 (void*)REQ_KDEBUG_ACTION,
627 sizeof(int), kperf_sysctl, "I", "ID of action to trigger on kdebug events");
628
629SYSCTL_PROC(_kperf_kdebug, OID_AUTO, filter,
630 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
631 (void*)REQ_KDEBUG_FILTER,
632 sizeof(int), kperf_sysctl, "P", "The filter that determines which kdebug events trigger a sample");
633
634/* misc */
635
636SYSCTL_PROC(_kperf, OID_AUTO, sampling,
637 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
638 (void *)REQ_SAMPLING,
639 sizeof(int), kperf_sysctl, "I", "Sampling running");
640
641SYSCTL_PROC(_kperf, OID_AUTO, reset,
642 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
643 (void *)REQ_RESET,
644 0, kperf_sysctl, "-", "Reset kperf");
645
646SYSCTL_PROC(_kperf, OID_AUTO, blessed_pid,
647 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, /* must be root */
648 (void *)REQ_BLESS,
649 sizeof(int), kperf_sysctl_bless_handler, "I", "Blessed pid");
650
651SYSCTL_PROC(_kperf, OID_AUTO, blessed_preempt,
652 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
653 (void *)REQ_BLESS_PREEMPT,
654 sizeof(int), kperf_sysctl, "I", "Blessed preemption");
655
656SYSCTL_PROC(_kperf, OID_AUTO, kdbg_cswitch,
657 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
658 (void *)REQ_KDBG_CSWITCH,
659 sizeof(int), kperf_sysctl, "I", "Generate context switch info");
660
661SYSCTL_PROC(_kperf, OID_AUTO, pet_idle_rate,
662 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
663 (void *)REQ_PET_IDLE_RATE,
664 sizeof(int), kperf_sysctl, "I",
665 "Rate at which unscheduled threads are forced to be sampled in "
666 "PET mode");
667
668SYSCTL_PROC(_kperf, OID_AUTO, lightweight_pet,
669 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
670 (void *)REQ_LIGHTWEIGHT_PET,
671 sizeof(int), kperf_sysctl, "I",
672 "Status of lightweight PET mode");
673
674/* debug */
675SYSCTL_INT(_kperf, OID_AUTO, debug_level, CTLFLAG_RW | CTLFLAG_LOCKED,
676 &kperf_debug_level, 0, "debug level");
677
678#if DEVELOPMENT || DEBUG
679SYSCTL_QUAD(_kperf, OID_AUTO, already_pending_ipis,
680 CTLFLAG_RD | CTLFLAG_LOCKED,
681 &kperf_pending_ipis, "");
682#endif /* DEVELOPMENT || DEBUG */