2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/monotonic.h>
30 #include <machine/machine_routines.h>
31 #include <machine/monotonic.h>
32 #include <pexpert/pexpert.h>
33 #include <sys/param.h> /* NULL */
34 #include <sys/stat.h> /* dev_t */
35 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
36 #include <sys/conf.h> /* must come after sys/stat.h */
37 #include <sys/sysctl.h>
38 #include <sys/sysproto.h>
39 #include <sys/systm.h>
40 #include <sys/types.h>
41 #include <sys/monotonic.h>
43 static int mt_cdev_open(dev_t dev
, int flags
, int devtype
, proc_t p
);
44 static int mt_cdev_close(dev_t dev
, int flags
, int devtype
, proc_t p
);
45 static int mt_cdev_ioctl(dev_t dev
, unsigned long cmd
, char *uptr
, int fflag
,
48 #define MT_NODE "monotonic"
50 static const struct cdevsw mt_cdevsw
= {
51 .d_open
= mt_cdev_open
,
52 .d_close
= mt_cdev_close
,
53 .d_ioctl
= mt_cdev_ioctl
,
55 .d_read
= eno_rdwrt
, .d_write
= eno_rdwrt
, .d_stop
= eno_stop
,
56 .d_reset
= eno_reset
, .d_ttys
= NULL
, .d_select
= eno_select
,
57 .d_mmap
= eno_mmap
, .d_strategy
= eno_strat
, .d_type
= 0
61 * Written at initialization, read-only thereafter.
63 lck_grp_t
*mt_lock_grp
= NULL
;
64 static int mt_dev_major
;
67 mt_get_device(dev_t devnum
)
69 return &mt_devices
[minor(devnum
)];
73 mt_device_lock(mt_device_t dev
)
75 lck_mtx_lock(&dev
->mtd_lock
);
79 mt_device_unlock(mt_device_t dev
)
81 lck_mtx_unlock(&dev
->mtd_lock
);
85 mt_device_assert_lock_held(__assert_only mt_device_t dev
)
87 LCK_MTX_ASSERT(&dev
->mtd_lock
, LCK_MTX_ASSERT_OWNED
);
91 mt_device_assert_inuse(__assert_only mt_device_t dev
)
93 assert(dev
->mtd_inuse
== true);
99 mt_lock_grp
= lck_grp_alloc_init(MT_NODE
, LCK_GRP_ATTR_NULL
);
100 assert(mt_lock_grp
!= NULL
);
102 mt_dev_major
= cdevsw_add(-1 /* allocate a major number */, &mt_cdevsw
);
103 if (mt_dev_major
< 0) {
104 panic("monotonic: cdevsw_add failed: %d", mt_dev_major
);
105 __builtin_unreachable();
108 for (int i
= 0; i
< MT_NDEVS
; i
++) {
109 if (mt_devices
[i
].mtd_init(&mt_devices
[i
])) {
113 assert(mt_devices
[i
].mtd_ncounters
> 0);
115 dev_t dev
= makedev(mt_dev_major
, i
);
117 snprintf(name
, sizeof(name
), MT_NODE
"/%s", mt_devices
[i
].mtd_name
);
118 void *node
= devfs_make_node(dev
, DEVFS_CHAR
, UID_ROOT
,
119 GID_WINDOWSERVER
, 0666, name
);
121 panic("monotonic: devfs_make_node failed for '%s'",
122 mt_devices
[i
].mtd_name
);
123 __builtin_unreachable();
126 lck_mtx_init(&mt_devices
[i
].mtd_lock
, mt_lock_grp
, LCK_ATTR_NULL
);
133 mt_cdev_open(dev_t devnum
, __unused
int flags
, __unused
int devtype
,
138 mt_device_t dev
= mt_get_device(devnum
);
140 if (dev
->mtd_inuse
) {
143 dev
->mtd_inuse
= true;
145 mt_device_unlock(dev
);
151 mt_cdev_close(dev_t devnum
, __unused
int flags
, __unused
int devtype
,
152 __unused
struct proc
*p
)
154 mt_device_t dev
= mt_get_device(devnum
);
157 mt_device_assert_inuse(dev
);
158 dev
->mtd_inuse
= false;
160 mt_device_unlock(dev
);
166 mt_ctl_add(mt_device_t dev
, user_addr_t uptr
)
170 union monotonic_ctl_add ctl
;
172 mt_device_assert_lock_held(dev
);
174 error
= copyin(uptr
, &ctl
, sizeof(ctl
.in
));
179 error
= dev
->mtd_add(&ctl
.in
.config
, &ctr
);
186 error
= copyout(&ctl
, uptr
, sizeof(ctl
.out
));
195 mt_ctl_counts(mt_device_t dev
, user_addr_t uptr
)
198 union monotonic_ctl_counts ctl
;
200 mt_device_assert_lock_held(dev
);
202 error
= copyin(uptr
, &ctl
, sizeof(ctl
.in
));
207 if (ctl
.in
.ctr_mask
== 0) {
212 uint64_t counts
[dev
->mtd_nmonitors
][dev
->mtd_ncounters
];
214 dev
->mtd_ncounters
* dev
->mtd_nmonitors
* sizeof(counts
[0][0]));
215 error
= dev
->mtd_read(ctl
.in
.ctr_mask
, (uint64_t *)counts
);
220 error
= copyout(&counts
, uptr
, sizeof(counts
));
230 mt_ctl_enable(mt_device_t dev
, user_addr_t uptr
)
233 union monotonic_ctl_enable ctl
;
235 mt_device_assert_lock_held(dev
);
237 error
= copyin(uptr
, &ctl
, sizeof(ctl
));
242 dev
->mtd_enable(ctl
.in
.enable
);
248 mt_ctl_reset(mt_device_t dev
)
250 mt_device_assert_lock_held(dev
);
256 mt_cdev_ioctl(dev_t devnum
, unsigned long cmd
, char *arg
, __unused
int flags
,
260 user_addr_t uptr
= *(user_addr_t
*)(void *)arg
;
262 mt_device_t dev
= mt_get_device(devnum
);
267 error
= mt_ctl_reset(dev
);
271 error
= mt_ctl_add(dev
, uptr
);
275 error
= mt_ctl_enable(dev
, uptr
);
279 error
= mt_ctl_counts(dev
, uptr
);
282 case MT_IOC_GET_INFO
: {
283 union monotonic_ctl_info info
= {
285 .nmonitors
= dev
->mtd_nmonitors
,
286 .ncounters
= dev
->mtd_ncounters
,
289 error
= copyout(&info
, uptr
, sizeof(info
));
298 mt_device_unlock(dev
);
304 thread_selfcounts(__unused
struct proc
*p
,
305 struct thread_selfcounts_args
*uap
, __unused
int *ret_out
)
309 uint64_t counts
[2] = { 0 };
310 uint64_t thread_counts
[MT_CORE_NFIXED
] = { 0 };
312 mt_cur_thread_fixed_counts(thread_counts
);
314 #ifdef MT_CORE_INSTRS
315 counts
[0] = thread_counts
[MT_CORE_INSTRS
];
316 #endif /* defined(MT_CORE_INSTRS) */
317 counts
[1] = thread_counts
[MT_CORE_CYCLES
];
319 return copyout(counts
, uap
->buf
, MIN(sizeof(counts
), uap
->nbytes
));
339 mt_sysctl SYSCTL_HANDLER_ARGS
341 #pragma unused(oidp, arg2)
342 uint64_t start
[MT_CORE_NFIXED
] = { 0 }, end
[MT_CORE_NFIXED
] = { 0 };
343 uint64_t counts
[2] = { 0 };
345 switch ((enum mt_sysctl
)arg1
) {
347 return sysctl_io_number(req
, (int)mt_core_supported
, sizeof(int), NULL
, NULL
);
349 return sysctl_io_number(req
, mt_count_pmis(), sizeof(uint64_t), NULL
, NULL
);
350 case MT_RETROGRADE
: {
351 uint64_t value
= os_atomic_load_wide(&mt_retrograde
, relaxed
);
352 return sysctl_io_number(req
, value
, sizeof(mt_retrograde
), NULL
, NULL
);
355 return sysctl_io_number(req
, (int)mt_core_supported
, sizeof(int), NULL
, NULL
);
357 int value
= mt_debug
;
359 int r
= sysctl_io_number(req
, value
, sizeof(value
), &value
, NULL
);
368 if (req
->newptr
== USER_ADDR_NULL
) {
372 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
373 MT_KDBG_TMPCPU_START(0x3fff);
374 MT_KDBG_TMPCPU_END(0x3fff);
376 MT_KDBG_TMPTH_START(0x3fff);
377 MT_KDBG_TMPTH_END(0x3fff);
378 ml_set_interrupts_enabled(intrs_en
);
382 case MT_FIX_CPU_PERF
: {
383 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
384 mt_fixed_counts(start
);
385 mt_fixed_counts(end
);
386 ml_set_interrupts_enabled(intrs_en
);
390 case MT_FIX_THREAD_PERF
: {
391 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
392 mt_cur_thread_fixed_counts(start
);
393 mt_cur_thread_fixed_counts(end
);
394 ml_set_interrupts_enabled(intrs_en
);
398 case MT_FIX_TASK_PERF
: {
399 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
400 mt_cur_task_fixed_counts(start
);
401 mt_cur_task_fixed_counts(end
);
402 ml_set_interrupts_enabled(intrs_en
);
412 #ifdef MT_CORE_INSTRS
413 counts
[0] = end
[MT_CORE_INSTRS
] - start
[MT_CORE_INSTRS
];
414 #endif /* defined(MT_CORE_INSTRS) */
415 counts
[1] = end
[MT_CORE_CYCLES
] - start
[MT_CORE_CYCLES
];
417 return copyout(counts
, req
->oldptr
, MIN(req
->oldlen
, sizeof(counts
)));
420 SYSCTL_DECL(_kern_monotonic
);
421 SYSCTL_NODE(_kern
, OID_AUTO
, monotonic
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
424 #define MT_SYSCTL(NAME, ARG, FLAGS, SIZE, SIZESTR, DESC) \
425 SYSCTL_PROC(_kern_monotonic, OID_AUTO, NAME, \
426 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | (FLAGS), \
427 (void *)(ARG), SIZE, mt_sysctl, SIZESTR, DESC)
429 MT_SYSCTL(supported
, MT_SUPPORTED
, 0, sizeof(int), "I",
430 "whether monotonic is supported");
431 MT_SYSCTL(debug
, MT_DEBUG
, CTLFLAG_MASKED
, sizeof(int), "I",
432 "whether monotonic is printing debug messages");
433 MT_SYSCTL(pmis
, MT_PMIS
, 0, sizeof(uint64_t), "Q",
434 "number of PMIs seen");
435 MT_SYSCTL(retrograde_updates
, MT_RETROGRADE
, 0, sizeof(uint64_t), "Q",
436 "number of times a counter appeared to go backwards");
437 MT_SYSCTL(task_thread_counting
, MT_TASK_THREAD
, 0, sizeof(int), "I",
438 "whether task and thread counting is enabled");
439 MT_SYSCTL(kdebug_test
, MT_KDBG_TEST
, CTLFLAG_MASKED
, sizeof(int), "O",
440 "whether task and thread counting is enabled");
441 MT_SYSCTL(fixed_cpu_perf
, MT_FIX_CPU_PERF
, CTLFLAG_MASKED
,
442 sizeof(uint64_t) * 2, "O",
443 "overhead of accessing the current CPU's counters");
444 MT_SYSCTL(fixed_thread_perf
, MT_FIX_THREAD_PERF
, CTLFLAG_MASKED
,
445 sizeof(uint64_t) * 2, "O",
446 "overhead of accessing the current thread's counters");
447 MT_SYSCTL(fixed_task_perf
, MT_FIX_TASK_PERF
, CTLFLAG_MASKED
,
448 sizeof(uint64_t) * 2, "O",
449 "overhead of accessing the current task's counters");