]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/monotonic.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / bsd / dev / monotonic.c
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/monotonic.h>
30 #include <machine/machine_routines.h>
31 #include <machine/monotonic.h>
32 #include <pexpert/pexpert.h>
33 #include <sys/param.h> /* NULL */
34 #include <sys/stat.h> /* dev_t */
35 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
36 #include <sys/conf.h> /* must come after sys/stat.h */
37 #include <sys/sysctl.h>
38 #include <sys/sysproto.h>
39 #include <sys/systm.h>
40 #include <sys/types.h>
41 #include <sys/monotonic.h>
42
43 static int mt_dev_open(dev_t dev, int flags, int devtype, struct proc *p);
44 static int mt_dev_close(dev_t dev, int flags, int devtype, struct proc *p);
45 static int mt_dev_ioctl(dev_t dev, unsigned long cmd, char *uptr, int fflag,
46 struct proc *p);
47
48 static struct cdevsw mt_cdevsw = {
49 .d_open = mt_dev_open,
50 .d_close = mt_dev_close,
51 .d_read = eno_rdwrt,
52 .d_write = eno_rdwrt,
53 .d_ioctl = mt_dev_ioctl,
54 .d_stop = eno_stop,
55 .d_reset = eno_reset,
56 .d_ttys = NULL,
57 .d_select = eno_select,
58 .d_mmap = eno_mmap,
59 .d_strategy = eno_strat,
60 .d_type = 0
61 };
62
63 /*
64 * Written at initialization, read-only thereafter.
65 */
66 lck_grp_t *mt_lock_grp = NULL;
67
68 static int mt_dev_major;
69 decl_lck_mtx_data(static, mt_dev_mtxs[MT_NDEVS]);
70 static bool mt_dev_owned[MT_NDEVS];
71
72 static void
73 mt_dev_lock(dev_t dev)
74 {
75 lck_mtx_lock(&mt_dev_mtxs[minor(dev)]);
76 }
77
78 static void
79 mt_dev_unlock(dev_t dev)
80 {
81 lck_mtx_unlock(&mt_dev_mtxs[minor(dev)]);
82 }
83
84 static void
85 mt_dev_assert_lock_held(__assert_only dev_t dev)
86 {
87 LCK_MTX_ASSERT(&mt_dev_mtxs[minor(dev)], LCK_MTX_ASSERT_OWNED);
88 }
89
90 int
91 mt_dev_init(void)
92 {
93 lck_grp_attr_t *lock_grp_attr = NULL;
94 int devices = 0;
95
96 lock_grp_attr = lck_grp_attr_alloc_init();
97 mt_lock_grp = lck_grp_alloc_init("monotonic", lock_grp_attr);
98 lck_grp_attr_free(lock_grp_attr);
99
100 mt_dev_major = cdevsw_add(-1 /* allocate a major number */, &mt_cdevsw);
101 if (mt_dev_major < 0) {
102 panic("monotonic: cdevsw_add failed: %d", mt_dev_major);
103 __builtin_trap();
104 }
105
106 for (int i = 0; i < MT_NDEVS; i++) {
107 dev_t dev;
108 void *dn;
109 int error;
110
111 error = monotonic_devs[i].mtd_init();
112 if (error) {
113 continue;
114 }
115
116 dev = makedev(mt_dev_major, i);
117 dn = devfs_make_node(dev,
118 DEVFS_CHAR, UID_ROOT, GID_WINDOWSERVER, 0666,
119 monotonic_devs[i].mtd_name);
120 if (dn == NULL) {
121 panic("monotonic: devfs_make_node failed for '%s'",
122 monotonic_devs[i].mtd_name);
123 __builtin_trap();
124 }
125
126 lck_mtx_init(&mt_dev_mtxs[i], mt_lock_grp, LCK_ATTR_NULL);
127
128 devices++;
129 }
130
131 return 0;
132 }
133
134 static int
135 mt_dev_open(dev_t dev, __unused int flags, __unused int devtype,
136 __unused struct proc *p)
137 {
138 int error = 0;
139
140 mt_dev_lock(dev);
141
142 if (mt_dev_owned[minor(dev)]) {
143 error = EBUSY;
144 goto out;
145 }
146
147 mt_dev_owned[minor(dev)] = true;
148
149 out:
150 mt_dev_unlock(dev);
151 return error;
152 }
153
154 static int
155 mt_dev_close(dev_t dev, __unused int flags, __unused int devtype,
156 __unused struct proc *p)
157 {
158 mt_dev_lock(dev);
159
160 assert(mt_dev_owned[minor(dev)]);
161 mt_dev_owned[minor(dev)] = false;
162
163 monotonic_devs[minor(dev)].mtd_reset();
164
165 mt_dev_unlock(dev);
166
167 return 0;
168 }
169
170 static int
171 mt_ctl_add(dev_t dev, user_addr_t uptr, __unused int flags,
172 __unused struct proc *p)
173 {
174 int error;
175 uint32_t ctr;
176 union monotonic_ctl_add ctl;
177
178 mt_dev_assert_lock_held(dev);
179
180 error = copyin(uptr, &ctl, sizeof(ctl.in));
181 if (error) {
182 return error;
183 }
184
185 error = monotonic_devs[minor(dev)].mtd_add(&ctl.in.config, &ctr);
186 if (error) {
187 return error;
188 }
189
190 ctl.out.ctr = ctr;
191
192 error = copyout(&ctl, uptr, sizeof(ctl.out));
193 if (error) {
194 return error;
195 }
196
197 return 0;
198 }
199
200 static int
201 mt_ctl_counts(dev_t dev, user_addr_t uptr, __unused int flags,
202 __unused struct proc *p)
203 {
204 int error;
205 uint64_t ctrs;
206 union monotonic_ctl_counts ctl;
207
208 mt_dev_assert_lock_held(dev);
209
210 error = copyin(uptr, &ctl, sizeof(ctl.in));
211 if (error) {
212 return error;
213 }
214
215 if (ctl.in.ctr_mask == 0) {
216 return EINVAL;
217 }
218 ctrs = __builtin_popcountll(ctl.in.ctr_mask);
219
220 {
221 uint64_t counts[ctrs];
222 error = monotonic_devs[minor(dev)].mtd_read(ctl.in.ctr_mask, counts);
223 if (error) {
224 return error;
225 }
226
227 error = copyout(&counts, uptr, sizeof(counts));
228 if (error) {
229 return error;
230 }
231 }
232
233 return 0;
234 }
235
236 static int
237 mt_ctl_enable(dev_t dev, user_addr_t uptr)
238 {
239 int error;
240 union monotonic_ctl_enable ctl;
241
242 mt_dev_assert_lock_held(dev);
243
244 error = copyin(uptr, &ctl, sizeof(ctl));
245 if (error) {
246 return error;
247 }
248
249 monotonic_devs[minor(dev)].mtd_enable(ctl.in.enable);
250
251 return 0;
252 }
253
254 static int
255 mt_ctl_reset(dev_t dev)
256 {
257 mt_dev_assert_lock_held(dev);
258 monotonic_devs[minor(dev)].mtd_reset();
259 return 0;
260 }
261
262 static int
263 mt_dev_ioctl(dev_t dev, unsigned long cmd, char *arg, int flags,
264 struct proc *p)
265 {
266 int error;
267 user_addr_t uptr = *(user_addr_t *)(void *)arg;
268
269 mt_dev_lock(dev);
270
271 switch (cmd) {
272 case MT_IOC_RESET:
273 error = mt_ctl_reset(dev);
274 break;
275
276 case MT_IOC_ADD:
277 error = mt_ctl_add(dev, uptr, flags, p);
278 break;
279
280 case MT_IOC_ENABLE:
281 error = mt_ctl_enable(dev, uptr);
282 break;
283
284 case MT_IOC_COUNTS:
285 error = mt_ctl_counts(dev, uptr, flags, p);
286 break;
287
288 default:
289 error = ENODEV;
290 break;
291 }
292
293 mt_dev_unlock(dev);
294
295 return error;
296 }
297
298 int thread_selfcounts(__unused struct proc *p,
299 struct thread_selfcounts_args *uap, __unused int *ret_out)
300 {
301 switch (uap->type) {
302 case 1: {
303 uint64_t counts[2] = {};
304 uint64_t thread_counts[MT_CORE_NFIXED];
305
306 mt_cur_thread_fixed_counts(thread_counts);
307
308 #ifdef MT_CORE_INSTRS
309 counts[0] = thread_counts[MT_CORE_INSTRS];
310 #endif /* defined(MT_CORE_INSTRS) */
311 counts[1] = thread_counts[MT_CORE_CYCLES];
312
313 return copyout(counts, uap->buf, MIN(sizeof(counts), uap->nbytes));
314 }
315 default:
316 return EINVAL;
317 }
318 }
319
320 enum mt_sysctl {
321 MT_SUPPORTED,
322 MT_PMIS,
323 MT_RETROGRADE,
324 MT_TASK_THREAD,
325 MT_DEBUG,
326 MT_KDBG_TEST,
327 MT_FIX_CPU_PERF,
328 MT_FIX_THREAD_PERF,
329 MT_FIX_TASK_PERF,
330 };
331
332 static int
333 mt_sysctl SYSCTL_HANDLER_ARGS
334 {
335 #pragma unused(oidp, arg2)
336 uint64_t start[MT_CORE_NFIXED], end[MT_CORE_NFIXED];
337 uint64_t counts[2] = {};
338
339 switch ((enum mt_sysctl)arg1) {
340 case MT_SUPPORTED:
341 return sysctl_io_number(req, (int)mt_core_supported, sizeof(int), NULL, NULL);
342 case MT_PMIS:
343 return sysctl_io_number(req, mt_pmis, sizeof(mt_pmis), NULL, NULL);
344 case MT_RETROGRADE:
345 return sysctl_io_number(req, mt_retrograde, sizeof(mt_retrograde), NULL, NULL);
346 case MT_TASK_THREAD:
347 return sysctl_io_number(req, (int)mt_core_supported, sizeof(int), NULL, NULL);
348 case MT_DEBUG: {
349 int value = mt_debug;
350
351 int r = sysctl_io_number(req, value, sizeof(value), &value, NULL);
352 if (r) {
353 return r;
354 }
355 mt_debug = value;
356
357 return 0;
358 }
359 case MT_KDBG_TEST: {
360 if (req->newptr == USER_ADDR_NULL) {
361 return EINVAL;
362 }
363
364 int intrs_en = ml_set_interrupts_enabled(FALSE);
365 MT_KDBG_TMPCPU_START(0x3fff);
366 MT_KDBG_TMPCPU_END(0x3fff);
367
368 MT_KDBG_TMPTH_START(0x3fff);
369 MT_KDBG_TMPTH_END(0x3fff);
370 ml_set_interrupts_enabled(intrs_en);
371
372 return 0;
373 }
374 case MT_FIX_CPU_PERF: {
375 int intrs_en = ml_set_interrupts_enabled(FALSE);
376 mt_fixed_counts(start);
377 mt_fixed_counts(end);
378 ml_set_interrupts_enabled(intrs_en);
379
380 goto copyout_counts;
381 }
382 case MT_FIX_THREAD_PERF: {
383 int intrs_en = ml_set_interrupts_enabled(FALSE);
384 mt_cur_thread_fixed_counts(start);
385 mt_cur_thread_fixed_counts(end);
386 ml_set_interrupts_enabled(intrs_en);
387
388 goto copyout_counts;
389 }
390 case MT_FIX_TASK_PERF: {
391 int intrs_en = ml_set_interrupts_enabled(FALSE);
392 mt_cur_task_fixed_counts(start);
393 mt_cur_task_fixed_counts(end);
394 ml_set_interrupts_enabled(intrs_en);
395
396 goto copyout_counts;
397 }
398 default:
399 return ENOENT;
400 }
401
402 copyout_counts:
403
404 #ifdef MT_CORE_INSTRS
405 counts[0] = end[MT_CORE_INSTRS] - start[MT_CORE_INSTRS];
406 #endif /* defined(MT_CORE_INSTRS) */
407 counts[1] = end[MT_CORE_CYCLES] - start[MT_CORE_CYCLES];
408
409 return copyout(counts, req->oldptr, MIN(req->oldlen, sizeof(counts)));
410 }
411
412 SYSCTL_DECL(_kern_monotonic);
413 SYSCTL_NODE(_kern, OID_AUTO, monotonic, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
414 "monotonic");
415
416 SYSCTL_PROC(_kern_monotonic, OID_AUTO, supported,
417 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
418 (void *)MT_SUPPORTED, sizeof(int), mt_sysctl, "I",
419 "whether monotonic is supported");
420
421 SYSCTL_PROC(_kern_monotonic, OID_AUTO, debug,
422 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED,
423 (void *)MT_DEBUG, sizeof(int), mt_sysctl, "I",
424 "whether monotonic is printing debug messages");
425
426 SYSCTL_PROC(_kern_monotonic, OID_AUTO, pmis,
427 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
428 (void *)MT_PMIS, sizeof(uint64_t), mt_sysctl, "Q",
429 "how many PMIs have been seen");
430
431 SYSCTL_PROC(_kern_monotonic, OID_AUTO, retrograde_updates,
432 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
433 (void *)MT_RETROGRADE, sizeof(uint64_t), mt_sysctl, "Q",
434 "how many times a counter appeared to go backwards");
435
436 SYSCTL_PROC(_kern_monotonic, OID_AUTO, task_thread_counting,
437 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED,
438 (void *)MT_TASK_THREAD, sizeof(int), mt_sysctl, "I",
439 "task and thread counting enabled");
440
441 SYSCTL_PROC(_kern_monotonic, OID_AUTO, kdebug_test,
442 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
443 (void *)MT_KDBG_TEST, sizeof(int), mt_sysctl, "O",
444 "test that kdebug integration works");
445
446 SYSCTL_PROC(_kern_monotonic, OID_AUTO, fixed_cpu_perf,
447 CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
448 (void *)MT_FIX_CPU_PERF, sizeof(uint64_t) * 2, mt_sysctl, "O",
449 "overhead of accessing the current CPU's counters");
450
451 SYSCTL_PROC(_kern_monotonic, OID_AUTO, fixed_thread_perf,
452 CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
453 (void *)MT_FIX_THREAD_PERF, sizeof(uint64_t) * 2, mt_sysctl, "O",
454 "overhead of accessing the current thread's counters");
455
456 SYSCTL_PROC(_kern_monotonic, OID_AUTO, fixed_task_perf,
457 CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
458 (void *)MT_FIX_TASK_PERF, sizeof(uint64_t) * 2, mt_sysctl, "O",
459 "overhead of accessing the current task's counters");