]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/kperfbsd.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / kperf / kperfbsd.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* sysctl interface for paramters from user-land */
30
31 #include <kern/debug.h>
32 #include <libkern/libkern.h>
33 #include <pexpert/pexpert.h>
34 #include <sys/param.h>
35 #include <sys/mman.h>
36 #include <sys/stat.h>
37 #include <sys/sysctl.h>
38 #include <sys/kauth.h>
39
40 #include <kperf/action.h>
41 #include <kperf/context.h>
42 #include <kperf/kdebug_trigger.h>
43 #include <kperf/kperf.h>
44 #include <kperf/kperfbsd.h>
45 #include <kperf/kperf_timer.h>
46 #include <kperf/pet.h>
47
48 #include <sys/ktrace.h>
49
50 /* IDs for dispatch from SYSCTL macros */
51 #define REQ_SAMPLING (1)
52 #define REQ_ACTION_COUNT (2)
53 #define REQ_ACTION_SAMPLERS (3)
54 #define REQ_TIMER_COUNT (4)
55 #define REQ_TIMER_PERIOD (5)
56 #define REQ_TIMER_PET (6)
57 #define REQ_TIMER_ACTION (7)
58 #define REQ_BLESS (8)
59 #define REQ_ACTION_USERDATA (9)
60 #define REQ_ACTION_FILTER_BY_TASK (10)
61 #define REQ_ACTION_FILTER_BY_PID (11)
62 /* 12 unused */
63 #define REQ_PET_IDLE_RATE (13)
64 #define REQ_BLESS_PREEMPT (14)
65 #define REQ_KDBG_CSWITCH (15)
66 #define REQ_RESET (16)
67 /* 17 unused */
68 #define REQ_ACTION_UCALLSTACK_DEPTH (18)
69 #define REQ_ACTION_KCALLSTACK_DEPTH (19)
70 #define REQ_LIGHTWEIGHT_PET (20)
71 #define REQ_KDEBUG_ACTION (21)
72 #define REQ_KDEBUG_FILTER (22)
73
74 int kperf_debug_level = 0;
75
76 #if DEVELOPMENT || DEBUG
77 _Atomic long long kperf_pending_ipis = 0;
78 #endif /* DEVELOPMENT || DEBUG */
79
80 /*
81 * kperf has a different sysctl model than others.
82 *
83 * For simple queries like the number of actions, the normal sysctl style
84 * of get/set works well.
85 *
86 * However, when requesting information about something specific, like an
87 * action, user space needs to provide some contextual information. This
88 * information is stored in a uint64_t array that includes the context, like
89 * the action ID it is interested in. If user space is getting the value from
90 * the kernel, then the get side of the sysctl is valid. If it is setting the
91 * value, then the get pointers are left NULL.
92 *
93 * These functions handle marshalling and unmarshalling data from sysctls.
94 */
95
96 static int
97 kperf_sysctl_get_set_uint32(struct sysctl_req *req,
98 uint32_t (*get)(void), int (*set)(uint32_t))
99 {
100 assert(req != NULL);
101 assert(get != NULL);
102 assert(set != NULL);
103
104 uint32_t value = 0;
105 if (req->oldptr) {
106 value = get();
107 }
108
109 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
110
111 if (error || !req->newptr) {
112 return error;
113 }
114
115 return set(value);
116 }
117
118 static int
119 kperf_sysctl_get_set_int(struct sysctl_req *req,
120 int (*get)(void), int (*set)(int))
121 {
122 assert(req != NULL);
123 assert(get != NULL);
124 assert(set != NULL);
125
126 int value = 0;
127 if (req->oldptr) {
128 value = get();
129 }
130
131 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
132
133 if (error || !req->newptr) {
134 return error;
135 }
136
137 return set(value);
138 }
139
140 static int
141 kperf_sysctl_get_set_unsigned_uint32(struct sysctl_req *req,
142 int (*get)(unsigned int, uint32_t *), int (*set)(unsigned int, uint32_t))
143 {
144 assert(req != NULL);
145 assert(get != NULL);
146 assert(set != NULL);
147
148 int error = 0;
149 uint64_t inputs[2] = {};
150
151 if (req->newptr == USER_ADDR_NULL) {
152 return EFAULT;
153 }
154
155 if ((error = copyin(req->newptr, inputs, sizeof(inputs)))) {
156 return error;
157 }
158
159 unsigned int action_id = (unsigned int)inputs[0];
160 uint32_t new_value = (uint32_t)inputs[1];
161
162 if (req->oldptr != USER_ADDR_NULL) {
163 uint32_t value_out = 0;
164 if ((error = get(action_id, &value_out))) {
165 return error;
166 }
167
168 inputs[1] = value_out;
169
170 return copyout(inputs, req->oldptr, sizeof(inputs));
171 } else {
172 return set(action_id, new_value);
173 }
174 }
175
176 /*
177 * These functions are essentially the same as the generic
178 * kperf_sysctl_get_set_unsigned_uint32, except they have unique input sizes.
179 */
180
181 static int
182 sysctl_timer_period(struct sysctl_req *req)
183 {
184 int error;
185 uint64_t inputs[2] = {};
186
187 assert(req != NULL);
188
189 if (req->newptr == USER_ADDR_NULL) {
190 return EFAULT;
191 }
192
193 if ((error = copyin(req->newptr, inputs, sizeof(inputs)))) {
194 return error;
195 }
196
197 unsigned int timer = (unsigned int)inputs[0];
198 uint64_t new_period = inputs[1];
199
200 if (req->oldptr != USER_ADDR_NULL) {
201 uint64_t period_out = 0;
202 if ((error = kperf_timer_get_period(timer, &period_out))) {
203 return error;
204 }
205
206 inputs[1] = period_out;
207
208 return copyout(inputs, req->oldptr, sizeof(inputs));
209 } else {
210 return kperf_timer_set_period(timer, new_period);
211 }
212 }
213
214 static int
215 sysctl_action_filter(struct sysctl_req *req, bool is_task_t)
216 {
217 int error = 0;
218 uint64_t inputs[2] = {};
219
220 assert(req != NULL);
221
222 if (req->newptr == USER_ADDR_NULL) {
223 return EFAULT;
224 }
225
226 if ((error = copyin(req->newptr, inputs, sizeof(inputs)))) {
227 return error;
228 }
229
230 unsigned int actionid = (unsigned int)inputs[0];
231 int new_filter = (int)inputs[1];
232
233 if (req->oldptr != USER_ADDR_NULL) {
234 int filter_out;
235 if ((error = kperf_action_get_filter(actionid, &filter_out))) {
236 return error;
237 }
238
239 inputs[1] = filter_out;
240 return copyout(inputs, req->oldptr, sizeof(inputs));
241 } else {
242 int pid = is_task_t ? kperf_port_to_pid((mach_port_name_t)new_filter)
243 : new_filter;
244
245 return kperf_action_set_filter(actionid, pid);
246 }
247 }
248
249 static int
250 sysctl_bless(struct sysctl_req *req)
251 {
252 int value = ktrace_get_owning_pid();
253 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
254
255 if (error || !req->newptr) {
256 return error;
257 }
258
259 return ktrace_set_owning_pid(value);
260 }
261
262 /* sysctl handlers that use the generic functions */
263
264 static int
265 sysctl_action_samplers(struct sysctl_req *req)
266 {
267 return kperf_sysctl_get_set_unsigned_uint32(req,
268 kperf_action_get_samplers, kperf_action_set_samplers);
269 }
270
271 static int
272 sysctl_action_userdata(struct sysctl_req *req)
273 {
274 return kperf_sysctl_get_set_unsigned_uint32(req,
275 kperf_action_get_userdata, kperf_action_set_userdata);
276 }
277
278 static int
279 sysctl_action_ucallstack_depth(struct sysctl_req *req)
280 {
281 return kperf_sysctl_get_set_unsigned_uint32(req,
282 kperf_action_get_ucallstack_depth, kperf_action_set_ucallstack_depth);
283 }
284
285 static int
286 sysctl_action_kcallstack_depth(struct sysctl_req *req)
287 {
288 return kperf_sysctl_get_set_unsigned_uint32(req,
289 kperf_action_get_kcallstack_depth, kperf_action_set_kcallstack_depth);
290 }
291
292 static int
293 sysctl_kdebug_action(struct sysctl_req *req)
294 {
295 return kperf_sysctl_get_set_int(req, kperf_kdebug_get_action,
296 kperf_kdebug_set_action);
297 }
298
299 static int
300 sysctl_kdebug_filter(struct sysctl_req *req)
301 {
302 assert(req != NULL);
303
304 if (req->oldptr != USER_ADDR_NULL) {
305 struct kperf_kdebug_filter *filter = NULL;
306 uint32_t n_debugids = kperf_kdebug_get_filter(&filter);
307 size_t filter_size = KPERF_KDEBUG_FILTER_SIZE(n_debugids);
308
309 if (n_debugids == 0) {
310 return EINVAL;
311 }
312
313 return SYSCTL_OUT(req, filter, filter_size);
314 }
315
316 return kperf_kdebug_set_filter(req->newptr, (uint32_t)req->newlen);
317 }
318
319 static int
320 kperf_sampling_set(uint32_t sample_start)
321 {
322 if (sample_start) {
323 return kperf_sampling_enable();
324 } else {
325 return kperf_sampling_disable();
326 }
327 }
328
329 static int
330 sysctl_sampling(struct sysctl_req *req)
331 {
332 return kperf_sysctl_get_set_uint32(req, kperf_sampling_status,
333 kperf_sampling_set);
334 }
335
336 static int
337 sysctl_action_count(struct sysctl_req *req)
338 {
339 return kperf_sysctl_get_set_uint32(req, kperf_action_get_count,
340 kperf_action_set_count);
341 }
342
343 static int
344 sysctl_timer_count(struct sysctl_req *req)
345 {
346 return kperf_sysctl_get_set_uint32(req, kperf_timer_get_count,
347 kperf_timer_set_count);
348 }
349
350 static int
351 sysctl_timer_action(struct sysctl_req *req)
352 {
353 return kperf_sysctl_get_set_unsigned_uint32(req, kperf_timer_get_action,
354 kperf_timer_set_action);
355 }
356
357 static int
358 sysctl_timer_pet(struct sysctl_req *req)
359 {
360 return kperf_sysctl_get_set_uint32(req, kperf_timer_get_petid,
361 kperf_timer_set_petid);
362 }
363
364 static int
365 sysctl_bless_preempt(struct sysctl_req *req)
366 {
367 return sysctl_io_number(req, ktrace_root_set_owner_allowed,
368 sizeof(ktrace_root_set_owner_allowed),
369 &ktrace_root_set_owner_allowed, NULL);
370 }
371
372 static int
373 sysctl_kperf_reset(struct sysctl_req *req)
374 {
375 int should_reset = 0;
376
377 int error = sysctl_io_number(req, should_reset, sizeof(should_reset),
378 &should_reset, NULL);
379 if (error) {
380 return error;
381 }
382
383 if (should_reset) {
384 ktrace_reset(KTRACE_KPERF);
385 }
386 return 0;
387 }
388
389 static int
390 sysctl_pet_idle_rate(struct sysctl_req *req)
391 {
392 return kperf_sysctl_get_set_int(req, kperf_get_pet_idle_rate,
393 kperf_set_pet_idle_rate);
394 }
395
396 static int
397 sysctl_lightweight_pet(struct sysctl_req *req)
398 {
399 return kperf_sysctl_get_set_int(req, kperf_get_lightweight_pet,
400 kperf_set_lightweight_pet);
401 }
402
403 static int
404 sysctl_kdbg_cswitch(struct sysctl_req *req)
405 {
406 return kperf_sysctl_get_set_int(req, kperf_kdbg_cswitch_get,
407 kperf_kdbg_cswitch_set);
408 }
409
410 static int
411 kperf_sysctl SYSCTL_HANDLER_ARGS
412 {
413 #pragma unused(oidp, arg2)
414 int ret;
415 uintptr_t type = (uintptr_t)arg1;
416
417 ktrace_lock();
418
419 if (req->oldptr == USER_ADDR_NULL && req->newptr != USER_ADDR_NULL) {
420 if ((ret = ktrace_configure(KTRACE_KPERF))) {
421 ktrace_unlock();
422 return ret;
423 }
424 } else {
425 if ((ret = ktrace_read_check())) {
426 ktrace_unlock();
427 return ret;
428 }
429 }
430
431 /* which request */
432 switch (type) {
433 case REQ_ACTION_COUNT:
434 ret = sysctl_action_count(req);
435 break;
436 case REQ_ACTION_SAMPLERS:
437 ret = sysctl_action_samplers(req);
438 break;
439 case REQ_ACTION_USERDATA:
440 ret = sysctl_action_userdata(req);
441 break;
442 case REQ_TIMER_COUNT:
443 ret = sysctl_timer_count(req);
444 break;
445 case REQ_TIMER_PERIOD:
446 ret = sysctl_timer_period(req);
447 break;
448 case REQ_TIMER_PET:
449 ret = sysctl_timer_pet(req);
450 break;
451 case REQ_TIMER_ACTION:
452 ret = sysctl_timer_action(req);
453 break;
454 case REQ_SAMPLING:
455 ret = sysctl_sampling(req);
456 break;
457 case REQ_KDBG_CSWITCH:
458 ret = sysctl_kdbg_cswitch(req);
459 break;
460 case REQ_ACTION_FILTER_BY_TASK:
461 ret = sysctl_action_filter(req, true);
462 break;
463 case REQ_ACTION_FILTER_BY_PID:
464 ret = sysctl_action_filter(req, false);
465 break;
466 case REQ_KDEBUG_ACTION:
467 ret = sysctl_kdebug_action(req);
468 break;
469 case REQ_KDEBUG_FILTER:
470 ret = sysctl_kdebug_filter(req);
471 break;
472 case REQ_PET_IDLE_RATE:
473 ret = sysctl_pet_idle_rate(req);
474 break;
475 case REQ_BLESS_PREEMPT:
476 ret = sysctl_bless_preempt(req);
477 break;
478 case REQ_RESET:
479 ret = sysctl_kperf_reset(req);
480 break;
481 case REQ_ACTION_UCALLSTACK_DEPTH:
482 ret = sysctl_action_ucallstack_depth(req);
483 break;
484 case REQ_ACTION_KCALLSTACK_DEPTH:
485 ret = sysctl_action_kcallstack_depth(req);
486 break;
487 case REQ_LIGHTWEIGHT_PET:
488 ret = sysctl_lightweight_pet(req);
489 break;
490 default:
491 ret = ENOENT;
492 break;
493 }
494
495 ktrace_unlock();
496
497 return ret;
498 }
499
500 static int
501 kperf_sysctl_bless_handler SYSCTL_HANDLER_ARGS
502 {
503 #pragma unused(oidp, arg2)
504 int ret;
505
506 ktrace_lock();
507
508 /* if setting a new "blessed pid" (ktrace owning pid) */
509 if (req->newptr != USER_ADDR_NULL) {
510 /*
511 * root can bypass the ktrace check when a flag is set (for
512 * backwards compatibility) or when ownership is maintained over
513 * subsystems resets (to allow the user space process that set
514 * ownership to unset it).
515 */
516 if (!((ktrace_root_set_owner_allowed ||
517 ktrace_keep_ownership_on_reset) &&
518 kauth_cred_issuser(kauth_cred_get())))
519 {
520 if ((ret = ktrace_configure(KTRACE_KPERF))) {
521 ktrace_unlock();
522 return ret;
523 }
524 }
525 } else {
526 if ((ret = ktrace_read_check())) {
527 ktrace_unlock();
528 return ret;
529 }
530 }
531
532 /* which request */
533 if ((uintptr_t)arg1 == REQ_BLESS) {
534 ret = sysctl_bless(req);
535 } else {
536 ret = ENOENT;
537 }
538
539 ktrace_unlock();
540
541 return ret;
542 }
543
544 /* root kperf node */
545
546 SYSCTL_NODE(, OID_AUTO, kperf, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
547 "kperf");
548
549 /* actions */
550
551 SYSCTL_NODE(_kperf, OID_AUTO, action, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
552 "action");
553
554 SYSCTL_PROC(_kperf_action, OID_AUTO, count,
555 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
556 (void *)REQ_ACTION_COUNT,
557 sizeof(int), kperf_sysctl, "I", "Number of actions");
558
559 SYSCTL_PROC(_kperf_action, OID_AUTO, samplers,
560 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
561 (void *)REQ_ACTION_SAMPLERS,
562 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
563 "What to sample when a trigger fires an action");
564
565 SYSCTL_PROC(_kperf_action, OID_AUTO, userdata,
566 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
567 (void *)REQ_ACTION_USERDATA,
568 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
569 "User data to attribute to action");
570
571 SYSCTL_PROC(_kperf_action, OID_AUTO, filter_by_task,
572 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
573 (void *)REQ_ACTION_FILTER_BY_TASK,
574 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
575 "Apply a task filter to the action");
576
577 SYSCTL_PROC(_kperf_action, OID_AUTO, filter_by_pid,
578 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
579 (void *)REQ_ACTION_FILTER_BY_PID,
580 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
581 "Apply a pid filter to the action");
582
583 SYSCTL_PROC(_kperf_action, OID_AUTO, ucallstack_depth,
584 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
585 (void *)REQ_ACTION_UCALLSTACK_DEPTH,
586 sizeof(int), kperf_sysctl, "I",
587 "Maximum number of frames to include in user callstacks");
588
589 SYSCTL_PROC(_kperf_action, OID_AUTO, kcallstack_depth,
590 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
591 (void *)REQ_ACTION_KCALLSTACK_DEPTH,
592 sizeof(int), kperf_sysctl, "I",
593 "Maximum number of frames to include in kernel callstacks");
594
595 /* timers */
596
597 SYSCTL_NODE(_kperf, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
598 "timer");
599
600 SYSCTL_PROC(_kperf_timer, OID_AUTO, count,
601 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
602 (void *)REQ_TIMER_COUNT,
603 sizeof(int), kperf_sysctl, "I", "Number of time triggers");
604
605 SYSCTL_PROC(_kperf_timer, OID_AUTO, period,
606 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
607 (void *)REQ_TIMER_PERIOD,
608 2 * sizeof(uint64_t), kperf_sysctl, "UQ",
609 "Timer number and period");
610
611 SYSCTL_PROC(_kperf_timer, OID_AUTO, action,
612 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
613 (void *)REQ_TIMER_ACTION,
614 2 * sizeof(uint64_t), kperf_sysctl, "UQ",
615 "Timer number and actionid");
616
617 SYSCTL_PROC(_kperf_timer, OID_AUTO, pet_timer,
618 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
619 (void *)REQ_TIMER_PET,
620 sizeof(int), kperf_sysctl, "I", "Which timer ID does PET");
621
622 /* kdebug trigger */
623
624 SYSCTL_NODE(_kperf, OID_AUTO, kdebug, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
625 "kdebug");
626
627 SYSCTL_PROC(_kperf_kdebug, OID_AUTO, action,
628 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
629 (void*)REQ_KDEBUG_ACTION,
630 sizeof(int), kperf_sysctl, "I", "ID of action to trigger on kdebug events");
631
632 SYSCTL_PROC(_kperf_kdebug, OID_AUTO, filter,
633 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
634 (void*)REQ_KDEBUG_FILTER,
635 sizeof(int), kperf_sysctl, "P", "The filter that determines which kdebug events trigger a sample");
636
637 /* misc */
638
639 SYSCTL_PROC(_kperf, OID_AUTO, sampling,
640 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
641 (void *)REQ_SAMPLING,
642 sizeof(int), kperf_sysctl, "I", "Sampling running");
643
644 SYSCTL_PROC(_kperf, OID_AUTO, reset,
645 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
646 (void *)REQ_RESET,
647 0, kperf_sysctl, "-", "Reset kperf");
648
649 SYSCTL_PROC(_kperf, OID_AUTO, blessed_pid,
650 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, /* must be root */
651 (void *)REQ_BLESS,
652 sizeof(int), kperf_sysctl_bless_handler, "I", "Blessed pid");
653
654 SYSCTL_PROC(_kperf, OID_AUTO, blessed_preempt,
655 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
656 (void *)REQ_BLESS_PREEMPT,
657 sizeof(int), kperf_sysctl, "I", "Blessed preemption");
658
659 SYSCTL_PROC(_kperf, OID_AUTO, kdbg_cswitch,
660 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
661 (void *)REQ_KDBG_CSWITCH,
662 sizeof(int), kperf_sysctl, "I", "Generate context switch info");
663
664 SYSCTL_PROC(_kperf, OID_AUTO, pet_idle_rate,
665 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
666 (void *)REQ_PET_IDLE_RATE,
667 sizeof(int), kperf_sysctl, "I",
668 "Rate at which unscheduled threads are forced to be sampled in "
669 "PET mode");
670
671 SYSCTL_PROC(_kperf, OID_AUTO, lightweight_pet,
672 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
673 (void *)REQ_LIGHTWEIGHT_PET,
674 sizeof(int), kperf_sysctl, "I",
675 "Status of lightweight PET mode");
676
677 /* limits */
678
679 SYSCTL_NODE(_kperf, OID_AUTO, limits, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
680 "limits");
681
682 #define REQ_LIM_PERIOD_NS (1)
683 #define REQ_LIM_BG_PERIOD_NS (2)
684 #define REQ_LIM_PET_PERIOD_NS (3)
685 #define REQ_LIM_BG_PET_PERIOD_NS (4)
686
687 static int
688 kperf_sysctl_limits SYSCTL_HANDLER_ARGS
689 {
690 #pragma unused(oidp, arg2)
691 int type = (int)arg1;
692 uint64_t limit = 0;
693
694 switch (type) {
695 case REQ_LIM_PERIOD_NS:
696 limit = KP_MIN_PERIOD_NS;
697 break;
698
699 case REQ_LIM_BG_PERIOD_NS:
700 limit = KP_MIN_PERIOD_BG_NS;
701 break;
702
703 case REQ_LIM_PET_PERIOD_NS:
704 limit = KP_MIN_PERIOD_PET_NS;
705 break;
706
707 case REQ_LIM_BG_PET_PERIOD_NS:
708 limit = KP_MIN_PERIOD_PET_BG_NS;
709 break;
710
711 default:
712 return ENOENT;
713 }
714
715 return sysctl_io_number(req, limit, sizeof(limit), &limit, NULL);
716 }
717
718 SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_period_ns,
719 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
720 (void *)REQ_LIM_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits,
721 "Q", "Minimum timer period in nanoseconds");
722 SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_bg_period_ns,
723 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
724 (void *)REQ_LIM_BG_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits,
725 "Q", "Minimum background timer period in nanoseconds");
726 SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_pet_period_ns,
727 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
728 (void *)REQ_LIM_PET_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits,
729 "Q", "Minimum PET timer period in nanoseconds");
730 SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_bg_pet_period_ns,
731 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
732 (void *)REQ_LIM_BG_PET_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits,
733 "Q", "Minimum background PET timer period in nanoseconds");
734
735 /* debug */
736 SYSCTL_INT(_kperf, OID_AUTO, debug_level, CTLFLAG_RW | CTLFLAG_LOCKED,
737 &kperf_debug_level, 0, "debug level");
738
739 #if DEVELOPMENT || DEBUG
740 SYSCTL_QUAD(_kperf, OID_AUTO, already_pending_ipis,
741 CTLFLAG_RD | CTLFLAG_LOCKED,
742 &kperf_pending_ipis, "");
743 #endif /* DEVELOPMENT || DEBUG */