]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/dev/monotonic.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / dev / monotonic.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/monotonic.h>
30#include <machine/machine_routines.h>
31#include <machine/monotonic.h>
32#include <pexpert/pexpert.h>
33#include <sys/param.h> /* NULL */
34#include <sys/stat.h> /* dev_t */
35#include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
36#include <sys/conf.h> /* must come after sys/stat.h */
37#include <sys/sysctl.h>
38#include <sys/sysproto.h>
39#include <sys/systm.h>
40#include <sys/types.h>
41#include <sys/monotonic.h>
42
43static int mt_cdev_open(dev_t dev, int flags, int devtype, proc_t p);
44static int mt_cdev_close(dev_t dev, int flags, int devtype, proc_t p);
45static int mt_cdev_ioctl(dev_t dev, unsigned long cmd, char *uptr, int fflag,
46 proc_t p);
47
48#define MT_NODE "monotonic"
49
50static const struct cdevsw mt_cdevsw = {
51 .d_open = mt_cdev_open,
52 .d_close = mt_cdev_close,
53 .d_ioctl = mt_cdev_ioctl,
54
55 .d_read = eno_rdwrt, .d_write = eno_rdwrt, .d_stop = eno_stop,
56 .d_reset = eno_reset, .d_ttys = NULL, .d_select = eno_select,
57 .d_mmap = eno_mmap, .d_strategy = eno_strat, .d_type = 0
58};
59
60/*
61 * Written at initialization, read-only thereafter.
62 */
63LCK_GRP_DECLARE(mt_lock_grp, MT_NODE);
64static int mt_dev_major;
65
66static mt_device_t
67mt_get_device(dev_t devnum)
68{
69 return &mt_devices[minor(devnum)];
70}
71
72static void
73mt_device_lock(mt_device_t dev)
74{
75 lck_mtx_lock(&dev->mtd_lock);
76}
77
78static void
79mt_device_unlock(mt_device_t dev)
80{
81 lck_mtx_unlock(&dev->mtd_lock);
82}
83
84static void
85mt_device_assert_lock_held(__assert_only mt_device_t dev)
86{
87 LCK_MTX_ASSERT(&dev->mtd_lock, LCK_MTX_ASSERT_OWNED);
88}
89
90static void
91mt_device_assert_inuse(__assert_only mt_device_t dev)
92{
93 assert(dev->mtd_inuse == true);
94}
95
96int
97mt_dev_init(void)
98{
99 mt_dev_major = cdevsw_add(-1 /* allocate a major number */, &mt_cdevsw);
100 if (mt_dev_major < 0) {
101 panic("monotonic: cdevsw_add failed: %d", mt_dev_major);
102 __builtin_unreachable();
103 }
104
105 for (int i = 0; i < MT_NDEVS; i++) {
106 if (mt_devices[i].mtd_init(&mt_devices[i])) {
107 continue;
108 }
109
110 assert(mt_devices[i].mtd_ncounters > 0);
111
112 dev_t dev = makedev(mt_dev_major, i);
113 char name[128];
114 snprintf(name, sizeof(name), MT_NODE "/%s", mt_devices[i].mtd_name);
115 void *node = devfs_make_node(dev, DEVFS_CHAR, UID_ROOT,
116 GID_WINDOWSERVER, 0666, name);
117 if (!node) {
118 panic("monotonic: devfs_make_node failed for '%s'",
119 mt_devices[i].mtd_name);
120 __builtin_unreachable();
121 }
122
123 lck_mtx_init(&mt_devices[i].mtd_lock, &mt_lock_grp, LCK_ATTR_NULL);
124 }
125
126 return 0;
127}
128
129static int
130mt_cdev_open(dev_t devnum, __unused int flags, __unused int devtype,
131 __unused proc_t p)
132{
133 int error = 0;
134
135 mt_device_t dev = mt_get_device(devnum);
136 mt_device_lock(dev);
137 if (dev->mtd_inuse) {
138 error = EBUSY;
139 } else {
140 dev->mtd_inuse = true;
141 }
142 mt_device_unlock(dev);
143
144 return error;
145}
146
147static int
148mt_cdev_close(dev_t devnum, __unused int flags, __unused int devtype,
149 __unused struct proc *p)
150{
151 mt_device_t dev = mt_get_device(devnum);
152
153 mt_device_lock(dev);
154 mt_device_assert_inuse(dev);
155 dev->mtd_inuse = false;
156 dev->mtd_reset();
157 mt_device_unlock(dev);
158
159 return 0;
160}
161
162static int
163mt_ctl_add(mt_device_t dev, user_addr_t uptr)
164{
165 int error;
166 uint32_t ctr;
167 union monotonic_ctl_add ctl;
168
169 mt_device_assert_lock_held(dev);
170
171 error = copyin(uptr, &ctl, sizeof(ctl.in));
172 if (error) {
173 return error;
174 }
175
176 error = dev->mtd_add(&ctl.in.config, &ctr);
177 if (error) {
178 return error;
179 }
180
181 ctl.out.ctr = ctr;
182
183 error = copyout(&ctl, uptr, sizeof(ctl.out));
184 if (error) {
185 return error;
186 }
187
188 return 0;
189}
190
191static int
192mt_ctl_counts(mt_device_t dev, user_addr_t uptr)
193{
194 int error;
195 union monotonic_ctl_counts ctl;
196
197 mt_device_assert_lock_held(dev);
198
199 error = copyin(uptr, &ctl, sizeof(ctl.in));
200 if (error) {
201 return error;
202 }
203
204 if (ctl.in.ctr_mask == 0) {
205 return EINVAL;
206 }
207
208 {
209 uint64_t counts[dev->mtd_nmonitors][dev->mtd_ncounters];
210 memset(counts, 0,
211 dev->mtd_ncounters * dev->mtd_nmonitors * sizeof(counts[0][0]));
212 error = dev->mtd_read(ctl.in.ctr_mask, (uint64_t *)counts);
213 if (error) {
214 return error;
215 }
216
217 error = copyout(&counts, uptr, sizeof(counts));
218 if (error) {
219 return error;
220 }
221 }
222
223 return 0;
224}
225
226static int
227mt_ctl_enable(mt_device_t dev, user_addr_t uptr)
228{
229 int error;
230 union monotonic_ctl_enable ctl;
231
232 mt_device_assert_lock_held(dev);
233
234 error = copyin(uptr, &ctl, sizeof(ctl));
235 if (error) {
236 return error;
237 }
238
239 dev->mtd_enable(ctl.in.enable);
240
241 return 0;
242}
243
244static int
245mt_ctl_reset(mt_device_t dev)
246{
247 mt_device_assert_lock_held(dev);
248 dev->mtd_reset();
249 return 0;
250}
251
252static int
253mt_cdev_ioctl(dev_t devnum, unsigned long cmd, char *arg, __unused int flags,
254 __unused proc_t p)
255{
256 int error = ENODEV;
257 user_addr_t uptr = *(user_addr_t *)(void *)arg;
258
259 mt_device_t dev = mt_get_device(devnum);
260 mt_device_lock(dev);
261
262 switch (cmd) {
263 case MT_IOC_RESET:
264 error = mt_ctl_reset(dev);
265 break;
266
267 case MT_IOC_ADD:
268 error = mt_ctl_add(dev, uptr);
269 break;
270
271 case MT_IOC_ENABLE:
272 error = mt_ctl_enable(dev, uptr);
273 break;
274
275 case MT_IOC_COUNTS:
276 error = mt_ctl_counts(dev, uptr);
277 break;
278
279 case MT_IOC_GET_INFO: {
280 union monotonic_ctl_info info = {
281 .out = {
282 .nmonitors = dev->mtd_nmonitors,
283 .ncounters = dev->mtd_ncounters,
284 },
285 };
286 error = copyout(&info, uptr, sizeof(info));
287 break;
288 }
289
290 default:
291 error = ENODEV;
292 break;
293 }
294
295 mt_device_unlock(dev);
296
297 return error;
298}
299
300int
301thread_selfcounts(__unused struct proc *p,
302 struct thread_selfcounts_args *uap, __unused int *ret_out)
303{
304 switch (uap->type) {
305 case 1: {
306 uint64_t counts[2] = { 0 };
307 uint64_t thread_counts[MT_CORE_NFIXED] = { 0 };
308
309 mt_cur_thread_fixed_counts(thread_counts);
310
311#ifdef MT_CORE_INSTRS
312 counts[0] = thread_counts[MT_CORE_INSTRS];
313#endif /* defined(MT_CORE_INSTRS) */
314 counts[1] = thread_counts[MT_CORE_CYCLES];
315
316 return copyout(counts, uap->buf, MIN(sizeof(counts), uap->nbytes));
317 }
318 default:
319 return EINVAL;
320 }
321}
322
323enum mt_sysctl {
324 MT_SUPPORTED,
325 MT_PMIS,
326 MT_RETROGRADE,
327 MT_TASK_THREAD,
328 MT_DEBUG,
329 MT_KDBG_TEST,
330 MT_FIX_CPU_PERF,
331 MT_FIX_THREAD_PERF,
332 MT_FIX_TASK_PERF,
333};
334
335static int
336mt_sysctl SYSCTL_HANDLER_ARGS
337{
338#pragma unused(oidp, arg2)
339 uint64_t start[MT_CORE_NFIXED] = { 0 }, end[MT_CORE_NFIXED] = { 0 };
340 uint64_t counts[2] = { 0 };
341
342 switch ((enum mt_sysctl)arg1) {
343 case MT_SUPPORTED:
344 return sysctl_io_number(req, (int)mt_core_supported, sizeof(int), NULL, NULL);
345 case MT_PMIS:
346 return sysctl_io_number(req, mt_count_pmis(), sizeof(uint64_t), NULL, NULL);
347 case MT_RETROGRADE: {
348 uint64_t value = os_atomic_load_wide(&mt_retrograde, relaxed);
349 return sysctl_io_number(req, value, sizeof(mt_retrograde), NULL, NULL);
350 }
351 case MT_TASK_THREAD:
352 return sysctl_io_number(req, (int)mt_core_supported, sizeof(int), NULL, NULL);
353 case MT_DEBUG: {
354 int value = mt_debug;
355
356 int r = sysctl_io_number(req, value, sizeof(value), &value, NULL);
357 if (r) {
358 return r;
359 }
360 mt_debug = value;
361
362 return 0;
363 }
364 case MT_KDBG_TEST: {
365 if (req->newptr == USER_ADDR_NULL) {
366 return EINVAL;
367 }
368
369 int intrs_en = ml_set_interrupts_enabled(FALSE);
370 MT_KDBG_TMPCPU_START(0x3fff);
371 MT_KDBG_TMPCPU_END(0x3fff);
372
373 MT_KDBG_TMPTH_START(0x3fff);
374 MT_KDBG_TMPTH_END(0x3fff);
375 ml_set_interrupts_enabled(intrs_en);
376
377 return 0;
378 }
379 case MT_FIX_CPU_PERF: {
380 int intrs_en = ml_set_interrupts_enabled(FALSE);
381 mt_fixed_counts(start);
382 mt_fixed_counts(end);
383 ml_set_interrupts_enabled(intrs_en);
384
385 goto copyout_counts;
386 }
387 case MT_FIX_THREAD_PERF: {
388 int intrs_en = ml_set_interrupts_enabled(FALSE);
389 mt_cur_thread_fixed_counts(start);
390 mt_cur_thread_fixed_counts(end);
391 ml_set_interrupts_enabled(intrs_en);
392
393 goto copyout_counts;
394 }
395 case MT_FIX_TASK_PERF: {
396 int intrs_en = ml_set_interrupts_enabled(FALSE);
397 mt_cur_task_fixed_counts(start);
398 mt_cur_task_fixed_counts(end);
399 ml_set_interrupts_enabled(intrs_en);
400
401 goto copyout_counts;
402 }
403 default:
404 return ENOENT;
405 }
406
407copyout_counts:
408
409#ifdef MT_CORE_INSTRS
410 counts[0] = end[MT_CORE_INSTRS] - start[MT_CORE_INSTRS];
411#endif /* defined(MT_CORE_INSTRS) */
412 counts[1] = end[MT_CORE_CYCLES] - start[MT_CORE_CYCLES];
413
414 return copyout(counts, req->oldptr, MIN(req->oldlen, sizeof(counts)));
415}
416
417SYSCTL_DECL(_kern_monotonic);
418SYSCTL_NODE(_kern, OID_AUTO, monotonic, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
419 "monotonic");
420
421#define MT_SYSCTL(NAME, ARG, FLAGS, SIZE, SIZESTR, DESC) \
422 SYSCTL_PROC(_kern_monotonic, OID_AUTO, NAME, \
423 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | (FLAGS), \
424 (void *)(ARG), SIZE, mt_sysctl, SIZESTR, DESC)
425
426MT_SYSCTL(supported, MT_SUPPORTED, 0, sizeof(int), "I",
427 "whether monotonic is supported");
428MT_SYSCTL(debug, MT_DEBUG, CTLFLAG_MASKED, sizeof(int), "I",
429 "whether monotonic is printing debug messages");
430MT_SYSCTL(pmis, MT_PMIS, 0, sizeof(uint64_t), "Q",
431 "number of PMIs seen");
432MT_SYSCTL(retrograde_updates, MT_RETROGRADE, 0, sizeof(uint64_t), "Q",
433 "number of times a counter appeared to go backwards");
434MT_SYSCTL(task_thread_counting, MT_TASK_THREAD, 0, sizeof(int), "I",
435 "whether task and thread counting is enabled");
436MT_SYSCTL(kdebug_test, MT_KDBG_TEST, CTLFLAG_MASKED, sizeof(int), "O",
437 "whether task and thread counting is enabled");
438MT_SYSCTL(fixed_cpu_perf, MT_FIX_CPU_PERF, CTLFLAG_MASKED,
439 sizeof(uint64_t) * 2, "O",
440 "overhead of accessing the current CPU's counters");
441MT_SYSCTL(fixed_thread_perf, MT_FIX_THREAD_PERF, CTLFLAG_MASKED,
442 sizeof(uint64_t) * 2, "O",
443 "overhead of accessing the current thread's counters");
444MT_SYSCTL(fixed_task_perf, MT_FIX_TASK_PERF, CTLFLAG_MASKED,
445 sizeof(uint64_t) * 2, "O",
446 "overhead of accessing the current task's counters");