]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/monotonic.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / dev / monotonic.c
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/monotonic.h>
30 #include <machine/machine_routines.h>
31 #include <machine/monotonic.h>
32 #include <pexpert/pexpert.h>
33 #include <sys/param.h> /* NULL */
34 #include <sys/stat.h> /* dev_t */
35 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
36 #include <sys/conf.h> /* must come after sys/stat.h */
37 #include <sys/sysctl.h>
38 #include <sys/sysproto.h>
39 #include <sys/systm.h>
40 #include <sys/types.h>
41 #include <sys/monotonic.h>
42
43 static int mt_cdev_open(dev_t dev, int flags, int devtype, proc_t p);
44 static int mt_cdev_close(dev_t dev, int flags, int devtype, proc_t p);
45 static int mt_cdev_ioctl(dev_t dev, unsigned long cmd, char *uptr, int fflag,
46 proc_t p);
47
48 #define MT_NODE "monotonic"
49
50 static const struct cdevsw mt_cdevsw = {
51 .d_open = mt_cdev_open,
52 .d_close = mt_cdev_close,
53 .d_ioctl = mt_cdev_ioctl,
54
55 .d_read = eno_rdwrt, .d_write = eno_rdwrt, .d_stop = eno_stop,
56 .d_reset = eno_reset, .d_ttys = NULL, .d_select = eno_select,
57 .d_mmap = eno_mmap, .d_strategy = eno_strat, .d_type = 0
58 };
59
60 /*
61 * Written at initialization, read-only thereafter.
62 */
63 lck_grp_t *mt_lock_grp = NULL;
64 static int mt_dev_major;
65
66 static mt_device_t
67 mt_get_device(dev_t devnum)
68 {
69 return &mt_devices[minor(devnum)];
70 }
71
72 static void
73 mt_device_lock(mt_device_t dev)
74 {
75 lck_mtx_lock(&dev->mtd_lock);
76 }
77
78 static void
79 mt_device_unlock(mt_device_t dev)
80 {
81 lck_mtx_unlock(&dev->mtd_lock);
82 }
83
84 static void
85 mt_device_assert_lock_held(__assert_only mt_device_t dev)
86 {
87 LCK_MTX_ASSERT(&dev->mtd_lock, LCK_MTX_ASSERT_OWNED);
88 }
89
90 static void
91 mt_device_assert_inuse(__assert_only mt_device_t dev)
92 {
93 assert(dev->mtd_inuse == true);
94 }
95
96 int
97 mt_dev_init(void)
98 {
99 mt_lock_grp = lck_grp_alloc_init(MT_NODE, LCK_GRP_ATTR_NULL);
100 assert(mt_lock_grp != NULL);
101
102 mt_dev_major = cdevsw_add(-1 /* allocate a major number */, &mt_cdevsw);
103 if (mt_dev_major < 0) {
104 panic("monotonic: cdevsw_add failed: %d", mt_dev_major);
105 __builtin_unreachable();
106 }
107
108 for (int i = 0; i < MT_NDEVS; i++) {
109 if (mt_devices[i].mtd_init(&mt_devices[i])) {
110 continue;
111 }
112
113 assert(mt_devices[i].mtd_ncounters > 0);
114
115 dev_t dev = makedev(mt_dev_major, i);
116 char name[128];
117 snprintf(name, sizeof(name), MT_NODE "/%s", mt_devices[i].mtd_name);
118 void *node = devfs_make_node(dev, DEVFS_CHAR, UID_ROOT,
119 GID_WINDOWSERVER, 0666, name);
120 if (!node) {
121 panic("monotonic: devfs_make_node failed for '%s'",
122 mt_devices[i].mtd_name);
123 __builtin_unreachable();
124 }
125
126 lck_mtx_init(&mt_devices[i].mtd_lock, mt_lock_grp, LCK_ATTR_NULL);
127 }
128
129 return 0;
130 }
131
132 static int
133 mt_cdev_open(dev_t devnum, __unused int flags, __unused int devtype,
134 __unused proc_t p)
135 {
136 int error = 0;
137
138 mt_device_t dev = mt_get_device(devnum);
139 mt_device_lock(dev);
140 if (dev->mtd_inuse) {
141 error = EBUSY;
142 } else {
143 dev->mtd_inuse = true;
144 }
145 mt_device_unlock(dev);
146
147 return error;
148 }
149
150 static int
151 mt_cdev_close(dev_t devnum, __unused int flags, __unused int devtype,
152 __unused struct proc *p)
153 {
154 mt_device_t dev = mt_get_device(devnum);
155
156 mt_device_lock(dev);
157 mt_device_assert_inuse(dev);
158 dev->mtd_inuse = false;
159 dev->mtd_reset();
160 mt_device_unlock(dev);
161
162 return 0;
163 }
164
165 static int
166 mt_ctl_add(mt_device_t dev, user_addr_t uptr)
167 {
168 int error;
169 uint32_t ctr;
170 union monotonic_ctl_add ctl;
171
172 mt_device_assert_lock_held(dev);
173
174 error = copyin(uptr, &ctl, sizeof(ctl.in));
175 if (error) {
176 return error;
177 }
178
179 error = dev->mtd_add(&ctl.in.config, &ctr);
180 if (error) {
181 return error;
182 }
183
184 ctl.out.ctr = ctr;
185
186 error = copyout(&ctl, uptr, sizeof(ctl.out));
187 if (error) {
188 return error;
189 }
190
191 return 0;
192 }
193
194 static int
195 mt_ctl_counts(mt_device_t dev, user_addr_t uptr)
196 {
197 int error;
198 union monotonic_ctl_counts ctl;
199
200 mt_device_assert_lock_held(dev);
201
202 error = copyin(uptr, &ctl, sizeof(ctl.in));
203 if (error) {
204 return error;
205 }
206
207 if (ctl.in.ctr_mask == 0) {
208 return EINVAL;
209 }
210
211 {
212 uint64_t counts[dev->mtd_nmonitors][dev->mtd_ncounters];
213 memset(counts, 0,
214 dev->mtd_ncounters * dev->mtd_nmonitors * sizeof(counts[0][0]));
215 error = dev->mtd_read(ctl.in.ctr_mask, (uint64_t *)counts);
216 if (error) {
217 return error;
218 }
219
220 error = copyout(&counts, uptr, sizeof(counts));
221 if (error) {
222 return error;
223 }
224 }
225
226 return 0;
227 }
228
229 static int
230 mt_ctl_enable(mt_device_t dev, user_addr_t uptr)
231 {
232 int error;
233 union monotonic_ctl_enable ctl;
234
235 mt_device_assert_lock_held(dev);
236
237 error = copyin(uptr, &ctl, sizeof(ctl));
238 if (error) {
239 return error;
240 }
241
242 dev->mtd_enable(ctl.in.enable);
243
244 return 0;
245 }
246
247 static int
248 mt_ctl_reset(mt_device_t dev)
249 {
250 mt_device_assert_lock_held(dev);
251 dev->mtd_reset();
252 return 0;
253 }
254
255 static int
256 mt_cdev_ioctl(dev_t devnum, unsigned long cmd, char *arg, __unused int flags,
257 __unused proc_t p)
258 {
259 int error = ENODEV;
260 user_addr_t uptr = *(user_addr_t *)(void *)arg;
261
262 mt_device_t dev = mt_get_device(devnum);
263 mt_device_lock(dev);
264
265 switch (cmd) {
266 case MT_IOC_RESET:
267 error = mt_ctl_reset(dev);
268 break;
269
270 case MT_IOC_ADD:
271 error = mt_ctl_add(dev, uptr);
272 break;
273
274 case MT_IOC_ENABLE:
275 error = mt_ctl_enable(dev, uptr);
276 break;
277
278 case MT_IOC_COUNTS:
279 error = mt_ctl_counts(dev, uptr);
280 break;
281
282 case MT_IOC_GET_INFO: {
283 union monotonic_ctl_info info = {
284 .out = {
285 .nmonitors = dev->mtd_nmonitors,
286 .ncounters = dev->mtd_ncounters,
287 },
288 };
289 error = copyout(&info, uptr, sizeof(info));
290 break;
291 }
292
293 default:
294 error = ENODEV;
295 break;
296 }
297
298 mt_device_unlock(dev);
299
300 return error;
301 }
302
303 int
304 thread_selfcounts(__unused struct proc *p,
305 struct thread_selfcounts_args *uap, __unused int *ret_out)
306 {
307 switch (uap->type) {
308 case 1: {
309 uint64_t counts[2] = { 0 };
310 uint64_t thread_counts[MT_CORE_NFIXED] = { 0 };
311
312 mt_cur_thread_fixed_counts(thread_counts);
313
314 #ifdef MT_CORE_INSTRS
315 counts[0] = thread_counts[MT_CORE_INSTRS];
316 #endif /* defined(MT_CORE_INSTRS) */
317 counts[1] = thread_counts[MT_CORE_CYCLES];
318
319 return copyout(counts, uap->buf, MIN(sizeof(counts), uap->nbytes));
320 }
321 default:
322 return EINVAL;
323 }
324 }
325
326 enum mt_sysctl {
327 MT_SUPPORTED,
328 MT_PMIS,
329 MT_RETROGRADE,
330 MT_TASK_THREAD,
331 MT_DEBUG,
332 MT_KDBG_TEST,
333 MT_FIX_CPU_PERF,
334 MT_FIX_THREAD_PERF,
335 MT_FIX_TASK_PERF,
336 };
337
338 static int
339 mt_sysctl SYSCTL_HANDLER_ARGS
340 {
341 #pragma unused(oidp, arg2)
342 uint64_t start[MT_CORE_NFIXED] = { 0 }, end[MT_CORE_NFIXED] = { 0 };
343 uint64_t counts[2] = { 0 };
344
345 switch ((enum mt_sysctl)arg1) {
346 case MT_SUPPORTED:
347 return sysctl_io_number(req, (int)mt_core_supported, sizeof(int), NULL, NULL);
348 case MT_PMIS:
349 return sysctl_io_number(req, mt_count_pmis(), sizeof(uint64_t), NULL, NULL);
350 case MT_RETROGRADE: {
351 uint64_t value = os_atomic_load_wide(&mt_retrograde, relaxed);
352 return sysctl_io_number(req, value, sizeof(mt_retrograde), NULL, NULL);
353 }
354 case MT_TASK_THREAD:
355 return sysctl_io_number(req, (int)mt_core_supported, sizeof(int), NULL, NULL);
356 case MT_DEBUG: {
357 int value = mt_debug;
358
359 int r = sysctl_io_number(req, value, sizeof(value), &value, NULL);
360 if (r) {
361 return r;
362 }
363 mt_debug = value;
364
365 return 0;
366 }
367 case MT_KDBG_TEST: {
368 if (req->newptr == USER_ADDR_NULL) {
369 return EINVAL;
370 }
371
372 int intrs_en = ml_set_interrupts_enabled(FALSE);
373 MT_KDBG_TMPCPU_START(0x3fff);
374 MT_KDBG_TMPCPU_END(0x3fff);
375
376 MT_KDBG_TMPTH_START(0x3fff);
377 MT_KDBG_TMPTH_END(0x3fff);
378 ml_set_interrupts_enabled(intrs_en);
379
380 return 0;
381 }
382 case MT_FIX_CPU_PERF: {
383 int intrs_en = ml_set_interrupts_enabled(FALSE);
384 mt_fixed_counts(start);
385 mt_fixed_counts(end);
386 ml_set_interrupts_enabled(intrs_en);
387
388 goto copyout_counts;
389 }
390 case MT_FIX_THREAD_PERF: {
391 int intrs_en = ml_set_interrupts_enabled(FALSE);
392 mt_cur_thread_fixed_counts(start);
393 mt_cur_thread_fixed_counts(end);
394 ml_set_interrupts_enabled(intrs_en);
395
396 goto copyout_counts;
397 }
398 case MT_FIX_TASK_PERF: {
399 int intrs_en = ml_set_interrupts_enabled(FALSE);
400 mt_cur_task_fixed_counts(start);
401 mt_cur_task_fixed_counts(end);
402 ml_set_interrupts_enabled(intrs_en);
403
404 goto copyout_counts;
405 }
406 default:
407 return ENOENT;
408 }
409
410 copyout_counts:
411
412 #ifdef MT_CORE_INSTRS
413 counts[0] = end[MT_CORE_INSTRS] - start[MT_CORE_INSTRS];
414 #endif /* defined(MT_CORE_INSTRS) */
415 counts[1] = end[MT_CORE_CYCLES] - start[MT_CORE_CYCLES];
416
417 return copyout(counts, req->oldptr, MIN(req->oldlen, sizeof(counts)));
418 }
419
420 SYSCTL_DECL(_kern_monotonic);
421 SYSCTL_NODE(_kern, OID_AUTO, monotonic, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
422 "monotonic");
423
424 #define MT_SYSCTL(NAME, ARG, FLAGS, SIZE, SIZESTR, DESC) \
425 SYSCTL_PROC(_kern_monotonic, OID_AUTO, NAME, \
426 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | (FLAGS), \
427 (void *)(ARG), SIZE, mt_sysctl, SIZESTR, DESC)
428
429 MT_SYSCTL(supported, MT_SUPPORTED, 0, sizeof(int), "I",
430 "whether monotonic is supported");
431 MT_SYSCTL(debug, MT_DEBUG, CTLFLAG_MASKED, sizeof(int), "I",
432 "whether monotonic is printing debug messages");
433 MT_SYSCTL(pmis, MT_PMIS, 0, sizeof(uint64_t), "Q",
434 "number of PMIs seen");
435 MT_SYSCTL(retrograde_updates, MT_RETROGRADE, 0, sizeof(uint64_t), "Q",
436 "number of times a counter appeared to go backwards");
437 MT_SYSCTL(task_thread_counting, MT_TASK_THREAD, 0, sizeof(int), "I",
438 "whether task and thread counting is enabled");
439 MT_SYSCTL(kdebug_test, MT_KDBG_TEST, CTLFLAG_MASKED, sizeof(int), "O",
440 "whether task and thread counting is enabled");
441 MT_SYSCTL(fixed_cpu_perf, MT_FIX_CPU_PERF, CTLFLAG_MASKED,
442 sizeof(uint64_t) * 2, "O",
443 "overhead of accessing the current CPU's counters");
444 MT_SYSCTL(fixed_thread_perf, MT_FIX_THREAD_PERF, CTLFLAG_MASKED,
445 sizeof(uint64_t) * 2, "O",
446 "overhead of accessing the current thread's counters");
447 MT_SYSCTL(fixed_task_perf, MT_FIX_TASK_PERF, CTLFLAG_MASKED,
448 sizeof(uint64_t) * 2, "O",
449 "overhead of accessing the current task's counters");