2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/monotonic.h>
30 #include <machine/machine_routines.h>
31 #include <machine/monotonic.h>
32 #include <pexpert/pexpert.h>
33 #include <sys/param.h> /* NULL */
34 #include <sys/stat.h> /* dev_t */
35 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
36 #include <sys/conf.h> /* must come after sys/stat.h */
37 #include <sys/sysctl.h>
38 #include <sys/sysproto.h>
39 #include <sys/systm.h>
40 #include <sys/types.h>
41 #include <sys/monotonic.h>
43 static int mt_dev_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
);
44 static int mt_dev_close(dev_t dev
, int flags
, int devtype
, struct proc
*p
);
45 static int mt_dev_ioctl(dev_t dev
, unsigned long cmd
, char *uptr
, int fflag
,
48 static struct cdevsw mt_cdevsw
= {
49 .d_open
= mt_dev_open
,
50 .d_close
= mt_dev_close
,
53 .d_ioctl
= mt_dev_ioctl
,
57 .d_select
= eno_select
,
59 .d_strategy
= eno_strat
,
64 * Written at initialization, read-only thereafter.
66 lck_grp_t
*mt_lock_grp
= NULL
;
68 static int mt_dev_major
;
69 decl_lck_mtx_data(static, mt_dev_mtxs
[MT_NDEVS
]);
70 static bool mt_dev_owned
[MT_NDEVS
];
73 mt_dev_lock(dev_t dev
)
75 lck_mtx_lock(&mt_dev_mtxs
[minor(dev
)]);
79 mt_dev_unlock(dev_t dev
)
81 lck_mtx_unlock(&mt_dev_mtxs
[minor(dev
)]);
85 mt_dev_assert_lock_held(__assert_only dev_t dev
)
87 LCK_MTX_ASSERT(&mt_dev_mtxs
[minor(dev
)], LCK_MTX_ASSERT_OWNED
);
93 lck_grp_attr_t
*lock_grp_attr
= NULL
;
96 lock_grp_attr
= lck_grp_attr_alloc_init();
97 mt_lock_grp
= lck_grp_alloc_init("monotonic", lock_grp_attr
);
98 lck_grp_attr_free(lock_grp_attr
);
100 mt_dev_major
= cdevsw_add(-1 /* allocate a major number */, &mt_cdevsw
);
101 if (mt_dev_major
< 0) {
102 panic("monotonic: cdevsw_add failed: %d", mt_dev_major
);
106 for (int i
= 0; i
< MT_NDEVS
; i
++) {
111 error
= monotonic_devs
[i
].mtd_init();
116 dev
= makedev(mt_dev_major
, i
);
117 dn
= devfs_make_node(dev
,
118 DEVFS_CHAR
, UID_ROOT
, GID_WINDOWSERVER
, 0666,
119 monotonic_devs
[i
].mtd_name
);
121 panic("monotonic: devfs_make_node failed for '%s'",
122 monotonic_devs
[i
].mtd_name
);
126 lck_mtx_init(&mt_dev_mtxs
[i
], mt_lock_grp
, LCK_ATTR_NULL
);
135 mt_dev_open(dev_t dev
, __unused
int flags
, __unused
int devtype
,
136 __unused
struct proc
*p
)
142 if (mt_dev_owned
[minor(dev
)]) {
147 mt_dev_owned
[minor(dev
)] = true;
155 mt_dev_close(dev_t dev
, __unused
int flags
, __unused
int devtype
,
156 __unused
struct proc
*p
)
160 assert(mt_dev_owned
[minor(dev
)]);
161 mt_dev_owned
[minor(dev
)] = false;
163 monotonic_devs
[minor(dev
)].mtd_reset();
171 mt_ctl_add(dev_t dev
, user_addr_t uptr
, __unused
int flags
,
172 __unused
struct proc
*p
)
176 union monotonic_ctl_add ctl
;
178 mt_dev_assert_lock_held(dev
);
180 error
= copyin(uptr
, &ctl
, sizeof(ctl
.in
));
185 error
= monotonic_devs
[minor(dev
)].mtd_add(&ctl
.in
.config
, &ctr
);
192 error
= copyout(&ctl
, uptr
, sizeof(ctl
.out
));
201 mt_ctl_counts(dev_t dev
, user_addr_t uptr
, __unused
int flags
,
202 __unused
struct proc
*p
)
206 union monotonic_ctl_counts ctl
;
208 mt_dev_assert_lock_held(dev
);
210 error
= copyin(uptr
, &ctl
, sizeof(ctl
.in
));
215 if (ctl
.in
.ctr_mask
== 0) {
218 ctrs
= __builtin_popcountll(ctl
.in
.ctr_mask
);
221 uint64_t counts
[ctrs
];
222 error
= monotonic_devs
[minor(dev
)].mtd_read(ctl
.in
.ctr_mask
, counts
);
227 error
= copyout(&counts
, uptr
, sizeof(counts
));
237 mt_ctl_enable(dev_t dev
, user_addr_t uptr
)
240 union monotonic_ctl_enable ctl
;
242 mt_dev_assert_lock_held(dev
);
244 error
= copyin(uptr
, &ctl
, sizeof(ctl
));
249 monotonic_devs
[minor(dev
)].mtd_enable(ctl
.in
.enable
);
255 mt_ctl_reset(dev_t dev
)
257 mt_dev_assert_lock_held(dev
);
258 monotonic_devs
[minor(dev
)].mtd_reset();
263 mt_dev_ioctl(dev_t dev
, unsigned long cmd
, char *arg
, int flags
,
267 user_addr_t uptr
= *(user_addr_t
*)(void *)arg
;
273 error
= mt_ctl_reset(dev
);
277 error
= mt_ctl_add(dev
, uptr
, flags
, p
);
281 error
= mt_ctl_enable(dev
, uptr
);
285 error
= mt_ctl_counts(dev
, uptr
, flags
, p
);
298 int thread_selfcounts(__unused
struct proc
*p
,
299 struct thread_selfcounts_args
*uap
, __unused
int *ret_out
)
303 uint64_t counts
[2] = {};
304 uint64_t thread_counts
[MT_CORE_NFIXED
];
306 mt_cur_thread_fixed_counts(thread_counts
);
308 #ifdef MT_CORE_INSTRS
309 counts
[0] = thread_counts
[MT_CORE_INSTRS
];
310 #endif /* defined(MT_CORE_INSTRS) */
311 counts
[1] = thread_counts
[MT_CORE_CYCLES
];
313 return copyout(counts
, uap
->buf
, MIN(sizeof(counts
), uap
->nbytes
));
333 mt_sysctl SYSCTL_HANDLER_ARGS
335 #pragma unused(oidp, arg2)
336 uint64_t start
[MT_CORE_NFIXED
], end
[MT_CORE_NFIXED
];
337 uint64_t counts
[2] = {};
339 switch ((enum mt_sysctl
)arg1
) {
341 return sysctl_io_number(req
, (int)mt_core_supported
, sizeof(int), NULL
, NULL
);
343 return sysctl_io_number(req
, mt_pmis
, sizeof(mt_pmis
), NULL
, NULL
);
345 return sysctl_io_number(req
, mt_retrograde
, sizeof(mt_retrograde
), NULL
, NULL
);
347 return sysctl_io_number(req
, (int)mt_core_supported
, sizeof(int), NULL
, NULL
);
349 int value
= mt_debug
;
351 int r
= sysctl_io_number(req
, value
, sizeof(value
), &value
, NULL
);
360 if (req
->newptr
== USER_ADDR_NULL
) {
364 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
365 MT_KDBG_TMPCPU_START(0x3fff);
366 MT_KDBG_TMPCPU_END(0x3fff);
368 MT_KDBG_TMPTH_START(0x3fff);
369 MT_KDBG_TMPTH_END(0x3fff);
370 ml_set_interrupts_enabled(intrs_en
);
374 case MT_FIX_CPU_PERF
: {
375 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
376 mt_fixed_counts(start
);
377 mt_fixed_counts(end
);
378 ml_set_interrupts_enabled(intrs_en
);
382 case MT_FIX_THREAD_PERF
: {
383 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
384 mt_cur_thread_fixed_counts(start
);
385 mt_cur_thread_fixed_counts(end
);
386 ml_set_interrupts_enabled(intrs_en
);
390 case MT_FIX_TASK_PERF
: {
391 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
392 mt_cur_task_fixed_counts(start
);
393 mt_cur_task_fixed_counts(end
);
394 ml_set_interrupts_enabled(intrs_en
);
404 #ifdef MT_CORE_INSTRS
405 counts
[0] = end
[MT_CORE_INSTRS
] - start
[MT_CORE_INSTRS
];
406 #endif /* defined(MT_CORE_INSTRS) */
407 counts
[1] = end
[MT_CORE_CYCLES
] - start
[MT_CORE_CYCLES
];
409 return copyout(counts
, req
->oldptr
, MIN(req
->oldlen
, sizeof(counts
)));
412 SYSCTL_DECL(_kern_monotonic
);
413 SYSCTL_NODE(_kern
, OID_AUTO
, monotonic
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
416 SYSCTL_PROC(_kern_monotonic
, OID_AUTO
, supported
,
417 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
418 (void *)MT_SUPPORTED
, sizeof(int), mt_sysctl
, "I",
419 "whether monotonic is supported");
421 SYSCTL_PROC(_kern_monotonic
, OID_AUTO
, debug
,
422 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MASKED
,
423 (void *)MT_DEBUG
, sizeof(int), mt_sysctl
, "I",
424 "whether monotonic is printing debug messages");
426 SYSCTL_PROC(_kern_monotonic
, OID_AUTO
, pmis
,
427 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
428 (void *)MT_PMIS
, sizeof(uint64_t), mt_sysctl
, "Q",
429 "how many PMIs have been seen");
431 SYSCTL_PROC(_kern_monotonic
, OID_AUTO
, retrograde_updates
,
432 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
433 (void *)MT_RETROGRADE
, sizeof(uint64_t), mt_sysctl
, "Q",
434 "how many times a counter appeared to go backwards");
436 SYSCTL_PROC(_kern_monotonic
, OID_AUTO
, task_thread_counting
,
437 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_MASKED
,
438 (void *)MT_TASK_THREAD
, sizeof(int), mt_sysctl
, "I",
439 "task and thread counting enabled");
441 SYSCTL_PROC(_kern_monotonic
, OID_AUTO
, kdebug_test
,
442 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
443 (void *)MT_KDBG_TEST
, sizeof(int), mt_sysctl
, "O",
444 "test that kdebug integration works");
446 SYSCTL_PROC(_kern_monotonic
, OID_AUTO
, fixed_cpu_perf
,
447 CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
448 (void *)MT_FIX_CPU_PERF
, sizeof(uint64_t) * 2, mt_sysctl
, "O",
449 "overhead of accessing the current CPU's counters");
451 SYSCTL_PROC(_kern_monotonic
, OID_AUTO
, fixed_thread_perf
,
452 CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
453 (void *)MT_FIX_THREAD_PERF
, sizeof(uint64_t) * 2, mt_sysctl
, "O",
454 "overhead of accessing the current thread's counters");
456 SYSCTL_PROC(_kern_monotonic
, OID_AUTO
, fixed_task_perf
,
457 CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
458 (void *)MT_FIX_TASK_PERF
, sizeof(uint64_t) * 2, mt_sysctl
, "O",
459 "overhead of accessing the current task's counters");