]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/kperfbsd.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kperf / kperfbsd.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* sysctl interface for paramters from user-land */
30
31 #include <kern/debug.h>
32 #include <libkern/libkern.h>
33 #include <pexpert/pexpert.h>
34 #include <sys/param.h>
35 #include <sys/mman.h>
36 #include <sys/stat.h>
37 #include <sys/sysctl.h>
38 #include <sys/kauth.h>
39
40 #include <kperf/action.h>
41 #include <kperf/context.h>
42 #include <kperf/kdebug_trigger.h>
43 #include <kperf/kperf.h>
44 #include <kperf/kperfbsd.h>
45 #include <kperf/kperf_timer.h>
46 #include <kperf/pet.h>
47 #include <kperf/lazy.h>
48
49 #include <sys/ktrace.h>
50
51 /* Requests from kperf sysctls. */
52 enum kperf_request {
53 REQ_SAMPLING,
54 REQ_RESET,
55
56 REQ_ACTION_COUNT,
57 REQ_ACTION_SAMPLERS,
58 REQ_ACTION_USERDATA,
59 REQ_ACTION_FILTER_BY_TASK,
60 REQ_ACTION_FILTER_BY_PID,
61 REQ_ACTION_UCALLSTACK_DEPTH,
62 REQ_ACTION_KCALLSTACK_DEPTH,
63
64 REQ_TIMER_COUNT,
65 REQ_TIMER_PERIOD,
66 REQ_TIMER_PET,
67 REQ_TIMER_ACTION,
68
69 REQ_KDBG_CSWITCH,
70
71 REQ_BLESS,
72 REQ_BLESS_PREEMPT,
73
74 REQ_PET_IDLE_RATE,
75 REQ_LIGHTWEIGHT_PET,
76
77 REQ_KDEBUG_FILTER,
78 REQ_KDEBUG_ACTION,
79
80 REQ_LAZY_WAIT_TIME_THRESHOLD,
81 REQ_LAZY_WAIT_ACTION,
82 REQ_LAZY_CPU_TIME_THRESHOLD,
83 REQ_LAZY_CPU_ACTION,
84 };
85
86 int kperf_debug_level = 0;
87
88 #if DEVELOPMENT || DEBUG
89 _Atomic long long kperf_pending_ipis = 0;
90 #endif /* DEVELOPMENT || DEBUG */
91
92 /*
93 * kperf has unique requirements from sysctl.
94 *
95 * For simple queries like the number of actions, the normal sysctl style
96 * of get/set works well.
97 *
98 * However, when requesting information about something specific, like an
99 * action, user space needs to provide some contextual information. This
100 * information is stored in a uint64_t array that includes the context, like
101 * the action ID it is interested in. If user space is getting the value from
102 * the kernel, then the get side of the sysctl is valid. If it is setting the
103 * value, then the get pointers are left NULL.
104 *
105 * These functions handle marshalling and unmarshalling data from sysctls.
106 */
107
108 static int
109 kperf_sysctl_get_set_uint32(struct sysctl_req *req,
110 uint32_t (*get)(void), int (*set)(uint32_t))
111 {
112 assert(req != NULL);
113 assert(get != NULL);
114 assert(set != NULL);
115
116 uint32_t value = 0;
117 if (req->oldptr) {
118 value = get();
119 }
120
121 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
122
123 if (error || !req->newptr) {
124 return error;
125 }
126
127 return set(value);
128 }
129
130 static int
131 kperf_sysctl_get_set_int(struct sysctl_req *req,
132 int (*get)(void), int (*set)(int))
133 {
134 assert(req != NULL);
135 assert(get != NULL);
136 assert(set != NULL);
137
138 int value = 0;
139 if (req->oldptr) {
140 value = get();
141 }
142
143 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
144
145 if (error || !req->newptr) {
146 return error;
147 }
148
149 return set(value);
150 }
151
152 static int
153 kperf_sysctl_get_set_uint64(struct sysctl_req *req,
154 uint64_t (*get)(void), int (*set)(uint64_t))
155 {
156 assert(req != NULL);
157 assert(get != NULL);
158 assert(set != NULL);
159
160 uint64_t value = 0;
161 if (req->oldptr) {
162 value = get();
163 }
164
165 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
166
167 if (error || !req->newptr) {
168 return error;
169 }
170
171 return set(value);
172 }
173
174 static int
175 kperf_sysctl_get_set_unsigned_uint32(struct sysctl_req *req,
176 int (*get)(unsigned int, uint32_t *), int (*set)(unsigned int, uint32_t))
177 {
178 assert(req != NULL);
179 assert(get != NULL);
180 assert(set != NULL);
181
182 int error = 0;
183 uint64_t inputs[2] = {};
184
185 if (req->newptr == USER_ADDR_NULL) {
186 return EFAULT;
187 }
188
189 if ((error = copyin(req->newptr, inputs, sizeof(inputs)))) {
190 return error;
191 }
192
193 unsigned int action_id = (unsigned int)inputs[0];
194 uint32_t new_value = (uint32_t)inputs[1];
195
196 if (req->oldptr != USER_ADDR_NULL) {
197 uint32_t value_out = 0;
198 if ((error = get(action_id, &value_out))) {
199 return error;
200 }
201
202 inputs[1] = value_out;
203
204 return copyout(inputs, req->oldptr, sizeof(inputs));
205 } else {
206 return set(action_id, new_value);
207 }
208 }
209
210 /*
211 * These functions are essentially the same as the generic
212 * kperf_sysctl_get_set_unsigned_uint32, except they have unique input sizes.
213 */
214
215 static int
216 sysctl_timer_period(struct sysctl_req *req)
217 {
218 int error;
219 uint64_t inputs[2] = {};
220
221 assert(req != NULL);
222
223 if (req->newptr == USER_ADDR_NULL) {
224 return EFAULT;
225 }
226
227 if ((error = copyin(req->newptr, inputs, sizeof(inputs)))) {
228 return error;
229 }
230
231 unsigned int timer = (unsigned int)inputs[0];
232 uint64_t new_period = inputs[1];
233
234 if (req->oldptr != USER_ADDR_NULL) {
235 uint64_t period_out = 0;
236 if ((error = kperf_timer_get_period(timer, &period_out))) {
237 return error;
238 }
239
240 inputs[1] = period_out;
241
242 return copyout(inputs, req->oldptr, sizeof(inputs));
243 } else {
244 return kperf_timer_set_period(timer, new_period);
245 }
246 }
247
248 static int
249 sysctl_action_filter(struct sysctl_req *req, bool is_task_t)
250 {
251 int error = 0;
252 uint64_t inputs[2] = {};
253
254 assert(req != NULL);
255
256 if (req->newptr == USER_ADDR_NULL) {
257 return EFAULT;
258 }
259
260 if ((error = copyin(req->newptr, inputs, sizeof(inputs)))) {
261 return error;
262 }
263
264 unsigned int actionid = (unsigned int)inputs[0];
265 int new_filter = (int)inputs[1];
266
267 if (req->oldptr != USER_ADDR_NULL) {
268 int filter_out;
269 if ((error = kperf_action_get_filter(actionid, &filter_out))) {
270 return error;
271 }
272
273 inputs[1] = filter_out;
274 return copyout(inputs, req->oldptr, sizeof(inputs));
275 } else {
276 int pid = is_task_t ? kperf_port_to_pid((mach_port_name_t)new_filter)
277 : new_filter;
278
279 return kperf_action_set_filter(actionid, pid);
280 }
281 }
282
283 static int
284 sysctl_bless(struct sysctl_req *req)
285 {
286 int value = ktrace_get_owning_pid();
287 int error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
288
289 if (error || !req->newptr) {
290 return error;
291 }
292
293 return ktrace_set_owning_pid(value);
294 }
295
296 /* sysctl handlers that use the generic functions */
297
298 static int
299 sysctl_action_samplers(struct sysctl_req *req)
300 {
301 return kperf_sysctl_get_set_unsigned_uint32(req,
302 kperf_action_get_samplers, kperf_action_set_samplers);
303 }
304
305 static int
306 sysctl_action_userdata(struct sysctl_req *req)
307 {
308 return kperf_sysctl_get_set_unsigned_uint32(req,
309 kperf_action_get_userdata, kperf_action_set_userdata);
310 }
311
312 static int
313 sysctl_action_ucallstack_depth(struct sysctl_req *req)
314 {
315 return kperf_sysctl_get_set_unsigned_uint32(req,
316 kperf_action_get_ucallstack_depth, kperf_action_set_ucallstack_depth);
317 }
318
319 static int
320 sysctl_action_kcallstack_depth(struct sysctl_req *req)
321 {
322 return kperf_sysctl_get_set_unsigned_uint32(req,
323 kperf_action_get_kcallstack_depth, kperf_action_set_kcallstack_depth);
324 }
325
326 static int
327 sysctl_kdebug_action(struct sysctl_req *req)
328 {
329 return kperf_sysctl_get_set_int(req, kperf_kdebug_get_action,
330 kperf_kdebug_set_action);
331 }
332
333 static int
334 sysctl_kdebug_filter(struct sysctl_req *req)
335 {
336 assert(req != NULL);
337
338 if (req->oldptr != USER_ADDR_NULL) {
339 struct kperf_kdebug_filter *filter = NULL;
340 uint32_t n_debugids = kperf_kdebug_get_filter(&filter);
341 size_t filter_size = KPERF_KDEBUG_FILTER_SIZE(n_debugids);
342
343 if (n_debugids == 0) {
344 return EINVAL;
345 }
346
347 return SYSCTL_OUT(req, filter, filter_size);
348 } else if (req->newptr != USER_ADDR_NULL) {
349 return kperf_kdebug_set_filter(req->newptr, (uint32_t)req->newlen);
350 } else {
351 return EINVAL;
352 }
353 }
354
355 static int
356 kperf_sampling_set(uint32_t sample_start)
357 {
358 if (sample_start) {
359 return kperf_sampling_enable();
360 } else {
361 return kperf_sampling_disable();
362 }
363 }
364
365 static int
366 sysctl_sampling(struct sysctl_req *req)
367 {
368 return kperf_sysctl_get_set_uint32(req, kperf_sampling_status,
369 kperf_sampling_set);
370 }
371
372 static int
373 sysctl_action_count(struct sysctl_req *req)
374 {
375 return kperf_sysctl_get_set_uint32(req, kperf_action_get_count,
376 kperf_action_set_count);
377 }
378
379 static int
380 sysctl_timer_count(struct sysctl_req *req)
381 {
382 return kperf_sysctl_get_set_uint32(req, kperf_timer_get_count,
383 kperf_timer_set_count);
384 }
385
386 static int
387 sysctl_timer_action(struct sysctl_req *req)
388 {
389 return kperf_sysctl_get_set_unsigned_uint32(req, kperf_timer_get_action,
390 kperf_timer_set_action);
391 }
392
393 static int
394 sysctl_timer_pet(struct sysctl_req *req)
395 {
396 return kperf_sysctl_get_set_uint32(req, kperf_timer_get_petid,
397 kperf_timer_set_petid);
398 }
399
400 static int
401 sysctl_bless_preempt(struct sysctl_req *req)
402 {
403 return sysctl_io_number(req, ktrace_root_set_owner_allowed,
404 sizeof(ktrace_root_set_owner_allowed),
405 &ktrace_root_set_owner_allowed, NULL);
406 }
407
408 static int
409 sysctl_kperf_reset(struct sysctl_req *req)
410 {
411 int should_reset = 0;
412
413 int error = sysctl_io_number(req, should_reset, sizeof(should_reset),
414 &should_reset, NULL);
415 if (error) {
416 return error;
417 }
418
419 if (should_reset) {
420 ktrace_reset(KTRACE_KPERF);
421 }
422 return 0;
423 }
424
425 static int
426 sysctl_pet_idle_rate(struct sysctl_req *req)
427 {
428 return kperf_sysctl_get_set_int(req, kperf_get_pet_idle_rate,
429 kperf_set_pet_idle_rate);
430 }
431
432 static int
433 sysctl_lightweight_pet(struct sysctl_req *req)
434 {
435 return kperf_sysctl_get_set_int(req, kperf_get_lightweight_pet,
436 kperf_set_lightweight_pet);
437 }
438
439 static int
440 sysctl_kdbg_cswitch(struct sysctl_req *req)
441 {
442 return kperf_sysctl_get_set_int(req, kperf_kdbg_cswitch_get,
443 kperf_kdbg_cswitch_set);
444 }
445
446 static int
447 sysctl_lazy_wait_time_threshold(struct sysctl_req *req)
448 {
449 return kperf_sysctl_get_set_uint64(req, kperf_lazy_get_wait_time_threshold,
450 kperf_lazy_set_wait_time_threshold);
451 }
452
453 static int
454 sysctl_lazy_wait_action(struct sysctl_req *req)
455 {
456 return kperf_sysctl_get_set_int(req, kperf_lazy_get_wait_action,
457 kperf_lazy_set_wait_action);
458 }
459
460 static int
461 sysctl_lazy_cpu_time_threshold(struct sysctl_req *req)
462 {
463 return kperf_sysctl_get_set_uint64(req, kperf_lazy_get_cpu_time_threshold,
464 kperf_lazy_set_cpu_time_threshold);
465 }
466
467 static int
468 sysctl_lazy_cpu_action(struct sysctl_req *req)
469 {
470 return kperf_sysctl_get_set_int(req, kperf_lazy_get_cpu_action,
471 kperf_lazy_set_cpu_action);
472 }
473
474 static int
475 kperf_sysctl SYSCTL_HANDLER_ARGS
476 {
477 #pragma unused(oidp, arg2)
478 int ret;
479 enum kperf_request type = (enum kperf_request)arg1;
480
481 ktrace_lock();
482
483 if (req->oldptr == USER_ADDR_NULL && req->newptr != USER_ADDR_NULL) {
484 if ((ret = ktrace_configure(KTRACE_KPERF))) {
485 ktrace_unlock();
486 return ret;
487 }
488 } else {
489 if ((ret = ktrace_read_check())) {
490 ktrace_unlock();
491 return ret;
492 }
493 }
494
495 /* which request */
496 switch (type) {
497 case REQ_ACTION_COUNT:
498 ret = sysctl_action_count(req);
499 break;
500 case REQ_ACTION_SAMPLERS:
501 ret = sysctl_action_samplers(req);
502 break;
503 case REQ_ACTION_USERDATA:
504 ret = sysctl_action_userdata(req);
505 break;
506 case REQ_TIMER_COUNT:
507 ret = sysctl_timer_count(req);
508 break;
509 case REQ_TIMER_PERIOD:
510 ret = sysctl_timer_period(req);
511 break;
512 case REQ_TIMER_PET:
513 ret = sysctl_timer_pet(req);
514 break;
515 case REQ_TIMER_ACTION:
516 ret = sysctl_timer_action(req);
517 break;
518 case REQ_SAMPLING:
519 ret = sysctl_sampling(req);
520 break;
521 case REQ_KDBG_CSWITCH:
522 ret = sysctl_kdbg_cswitch(req);
523 break;
524 case REQ_ACTION_FILTER_BY_TASK:
525 ret = sysctl_action_filter(req, true);
526 break;
527 case REQ_ACTION_FILTER_BY_PID:
528 ret = sysctl_action_filter(req, false);
529 break;
530 case REQ_KDEBUG_ACTION:
531 ret = sysctl_kdebug_action(req);
532 break;
533 case REQ_KDEBUG_FILTER:
534 ret = sysctl_kdebug_filter(req);
535 break;
536 case REQ_PET_IDLE_RATE:
537 ret = sysctl_pet_idle_rate(req);
538 break;
539 case REQ_BLESS_PREEMPT:
540 ret = sysctl_bless_preempt(req);
541 break;
542 case REQ_RESET:
543 ret = sysctl_kperf_reset(req);
544 break;
545 case REQ_ACTION_UCALLSTACK_DEPTH:
546 ret = sysctl_action_ucallstack_depth(req);
547 break;
548 case REQ_ACTION_KCALLSTACK_DEPTH:
549 ret = sysctl_action_kcallstack_depth(req);
550 break;
551 case REQ_LIGHTWEIGHT_PET:
552 ret = sysctl_lightweight_pet(req);
553 break;
554 case REQ_LAZY_WAIT_TIME_THRESHOLD:
555 ret = sysctl_lazy_wait_time_threshold(req);
556 break;
557 case REQ_LAZY_WAIT_ACTION:
558 ret = sysctl_lazy_wait_action(req);
559 break;
560 case REQ_LAZY_CPU_TIME_THRESHOLD:
561 ret = sysctl_lazy_cpu_time_threshold(req);
562 break;
563 case REQ_LAZY_CPU_ACTION:
564 ret = sysctl_lazy_cpu_action(req);
565 break;
566 default:
567 ret = ENOENT;
568 break;
569 }
570
571 ktrace_unlock();
572
573 return ret;
574 }
575
576 static int
577 kperf_sysctl_bless_handler SYSCTL_HANDLER_ARGS
578 {
579 #pragma unused(oidp, arg2)
580 int ret;
581
582 ktrace_lock();
583
584 /* if setting a new "blessed pid" (ktrace owning pid) */
585 if (req->newptr != USER_ADDR_NULL) {
586 /*
587 * root can bypass the ktrace check when a flag is set (for
588 * backwards compatibility) or when ownership is maintained over
589 * subsystems resets (to allow the user space process that set
590 * ownership to unset it).
591 */
592 if (!((ktrace_root_set_owner_allowed ||
593 ktrace_keep_ownership_on_reset) &&
594 kauth_cred_issuser(kauth_cred_get()))) {
595 if ((ret = ktrace_configure(KTRACE_KPERF))) {
596 ktrace_unlock();
597 return ret;
598 }
599 }
600 } else {
601 if ((ret = ktrace_read_check())) {
602 ktrace_unlock();
603 return ret;
604 }
605 }
606
607 /* which request */
608 if ((uintptr_t)arg1 == REQ_BLESS) {
609 ret = sysctl_bless(req);
610 } else {
611 ret = ENOENT;
612 }
613
614 ktrace_unlock();
615
616 return ret;
617 }
618
619 /* root kperf node */
620
621 SYSCTL_NODE(, OID_AUTO, kperf, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
622 "kperf");
623
624 /* actions */
625
626 SYSCTL_NODE(_kperf, OID_AUTO, action, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
627 "action");
628
629 SYSCTL_PROC(_kperf_action, OID_AUTO, count,
630 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED |
631 CTLFLAG_MASKED,
632 (void *)REQ_ACTION_COUNT,
633 sizeof(int), kperf_sysctl, "I", "Number of actions");
634
635 SYSCTL_PROC(_kperf_action, OID_AUTO, samplers,
636 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
637 (void *)REQ_ACTION_SAMPLERS,
638 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
639 "What to sample when a trigger fires an action");
640
641 SYSCTL_PROC(_kperf_action, OID_AUTO, userdata,
642 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
643 (void *)REQ_ACTION_USERDATA,
644 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
645 "User data to attribute to action");
646
647 SYSCTL_PROC(_kperf_action, OID_AUTO, filter_by_task,
648 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
649 (void *)REQ_ACTION_FILTER_BY_TASK,
650 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
651 "Apply a task filter to the action");
652
653 SYSCTL_PROC(_kperf_action, OID_AUTO, filter_by_pid,
654 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
655 (void *)REQ_ACTION_FILTER_BY_PID,
656 3 * sizeof(uint64_t), kperf_sysctl, "UQ",
657 "Apply a pid filter to the action");
658
659 SYSCTL_PROC(_kperf_action, OID_AUTO, ucallstack_depth,
660 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
661 (void *)REQ_ACTION_UCALLSTACK_DEPTH,
662 sizeof(int), kperf_sysctl, "I",
663 "Maximum number of frames to include in user callstacks");
664
665 SYSCTL_PROC(_kperf_action, OID_AUTO, kcallstack_depth,
666 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
667 (void *)REQ_ACTION_KCALLSTACK_DEPTH,
668 sizeof(int), kperf_sysctl, "I",
669 "Maximum number of frames to include in kernel callstacks");
670
671 /* timers */
672
673 SYSCTL_NODE(_kperf, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
674 "timer");
675
676 SYSCTL_PROC(_kperf_timer, OID_AUTO, count,
677 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED
678 | CTLFLAG_MASKED,
679 (void *)REQ_TIMER_COUNT,
680 sizeof(int), kperf_sysctl, "I", "Number of time triggers");
681
682 SYSCTL_PROC(_kperf_timer, OID_AUTO, period,
683 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
684 (void *)REQ_TIMER_PERIOD,
685 2 * sizeof(uint64_t), kperf_sysctl, "UQ",
686 "Timer number and period");
687
688 SYSCTL_PROC(_kperf_timer, OID_AUTO, action,
689 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
690 (void *)REQ_TIMER_ACTION,
691 2 * sizeof(uint64_t), kperf_sysctl, "UQ",
692 "Timer number and actionid");
693
694 SYSCTL_PROC(_kperf_timer, OID_AUTO, pet_timer,
695 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED
696 | CTLFLAG_MASKED,
697 (void *)REQ_TIMER_PET,
698 sizeof(int), kperf_sysctl, "I", "Which timer ID does PET");
699
700 /* kdebug trigger */
701
702 SYSCTL_NODE(_kperf, OID_AUTO, kdebug, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
703 "kdebug");
704
705 SYSCTL_PROC(_kperf_kdebug, OID_AUTO, action,
706 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED
707 | CTLFLAG_MASKED,
708 (void*)REQ_KDEBUG_ACTION,
709 sizeof(int), kperf_sysctl, "I", "ID of action to trigger on kdebug events");
710
711 SYSCTL_PROC(_kperf_kdebug, OID_AUTO, filter,
712 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
713 (void*)REQ_KDEBUG_FILTER,
714 sizeof(int), kperf_sysctl, "P", "The filter that determines which kdebug events trigger a sample");
715
716 /* lazy sampling */
717
718 SYSCTL_NODE(_kperf, OID_AUTO, lazy, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
719 "lazy");
720
721 SYSCTL_PROC(_kperf_lazy, OID_AUTO, wait_time_threshold,
722 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
723 (void *)REQ_LAZY_WAIT_TIME_THRESHOLD,
724 sizeof(uint64_t), kperf_sysctl, "UQ",
725 "How many ticks a thread must wait to take a sample");
726
727 SYSCTL_PROC(_kperf_lazy, OID_AUTO, wait_action,
728 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
729 (void *)REQ_LAZY_WAIT_ACTION,
730 sizeof(uint64_t), kperf_sysctl, "UQ",
731 "Which action to fire when a thread waits longer than threshold");
732
733 SYSCTL_PROC(_kperf_lazy, OID_AUTO, cpu_time_threshold,
734 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
735 (void *)REQ_LAZY_CPU_TIME_THRESHOLD,
736 sizeof(uint64_t), kperf_sysctl, "UQ",
737 "Minimum number of ticks a CPU must run between samples");
738
739 SYSCTL_PROC(_kperf_lazy, OID_AUTO, cpu_action,
740 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
741 (void *)REQ_LAZY_CPU_ACTION,
742 sizeof(uint64_t), kperf_sysctl, "UQ",
743 "Which action to fire for lazy CPU samples");
744
745 /* misc */
746
747 SYSCTL_PROC(_kperf, OID_AUTO, sampling,
748 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED
749 | CTLFLAG_MASKED,
750 (void *)REQ_SAMPLING,
751 sizeof(int), kperf_sysctl, "I", "Sampling running");
752
753 SYSCTL_PROC(_kperf, OID_AUTO, reset,
754 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED,
755 (void *)REQ_RESET,
756 0, kperf_sysctl, "-", "Reset kperf");
757
758 SYSCTL_PROC(_kperf, OID_AUTO, blessed_pid,
759 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED /* must be root */
760 | CTLFLAG_MASKED,
761 (void *)REQ_BLESS,
762 sizeof(int), kperf_sysctl_bless_handler, "I", "Blessed pid");
763
764 SYSCTL_PROC(_kperf, OID_AUTO, blessed_preempt,
765 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED |
766 CTLFLAG_MASKED,
767 (void *)REQ_BLESS_PREEMPT,
768 sizeof(int), kperf_sysctl, "I", "Blessed preemption");
769
770 SYSCTL_PROC(_kperf, OID_AUTO, kdbg_cswitch,
771 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED
772 | CTLFLAG_MASKED,
773 (void *)REQ_KDBG_CSWITCH,
774 sizeof(int), kperf_sysctl, "I", "Generate context switch info");
775
776 SYSCTL_PROC(_kperf, OID_AUTO, pet_idle_rate,
777 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED
778 | CTLFLAG_MASKED,
779 (void *)REQ_PET_IDLE_RATE,
780 sizeof(int), kperf_sysctl, "I",
781 "Rate at which unscheduled threads are forced to be sampled in "
782 "PET mode");
783
784 SYSCTL_PROC(_kperf, OID_AUTO, lightweight_pet,
785 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED
786 | CTLFLAG_MASKED,
787 (void *)REQ_LIGHTWEIGHT_PET,
788 sizeof(int), kperf_sysctl, "I",
789 "Status of lightweight PET mode");
790
791 /* limits */
792
793 SYSCTL_NODE(_kperf, OID_AUTO, limits, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
794 "limits");
795
796 enum kperf_limit_request {
797 REQ_LIM_PERIOD_NS,
798 REQ_LIM_BG_PERIOD_NS,
799 REQ_LIM_PET_PERIOD_NS,
800 REQ_LIM_BG_PET_PERIOD_NS,
801 };
802
803 static int
804 kperf_sysctl_limits SYSCTL_HANDLER_ARGS
805 {
806 #pragma unused(oidp, arg2)
807 enum kperf_limit_request type = (enum kperf_limit_request)arg1;
808 uint64_t limit = 0;
809
810 switch (type) {
811 case REQ_LIM_PERIOD_NS:
812 limit = KP_MIN_PERIOD_NS;
813 break;
814
815 case REQ_LIM_BG_PERIOD_NS:
816 limit = KP_MIN_PERIOD_BG_NS;
817 break;
818
819 case REQ_LIM_PET_PERIOD_NS:
820 limit = KP_MIN_PERIOD_PET_NS;
821 break;
822
823 case REQ_LIM_BG_PET_PERIOD_NS:
824 limit = KP_MIN_PERIOD_PET_BG_NS;
825 break;
826
827 default:
828 return ENOENT;
829 }
830
831 return sysctl_io_number(req, limit, sizeof(limit), &limit, NULL);
832 }
833
834 SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_period_ns,
835 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
836 (void *)REQ_LIM_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits,
837 "Q", "Minimum timer period in nanoseconds");
838 SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_bg_period_ns,
839 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
840 (void *)REQ_LIM_BG_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits,
841 "Q", "Minimum background timer period in nanoseconds");
842 SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_pet_period_ns,
843 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
844 (void *)REQ_LIM_PET_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits,
845 "Q", "Minimum PET timer period in nanoseconds");
846 SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_bg_pet_period_ns,
847 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
848 (void *)REQ_LIM_BG_PET_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits,
849 "Q", "Minimum background PET timer period in nanoseconds");
850
851 /* debug */
852 SYSCTL_INT(_kperf, OID_AUTO, debug_level, CTLFLAG_RW | CTLFLAG_LOCKED,
853 &kperf_debug_level, 0, "debug level");
854
855 #if DEVELOPMENT || DEBUG
856 SYSCTL_QUAD(_kperf, OID_AUTO, already_pending_ipis,
857 CTLFLAG_RD | CTLFLAG_LOCKED,
858 &kperf_pending_ipis, "");
859 #endif /* DEVELOPMENT || DEBUG */