]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/monotonic.h
2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #ifndef KERN_MONOTONIC_H
29 #define KERN_MONOTONIC_H
35 #include <sys/cdefs.h>
40 extern _Atomic
uint64_t mt_pmis
;
41 extern _Atomic
uint64_t mt_retrograde
;
43 void mt_fixed_counts(uint64_t *counts
);
44 void mt_cur_thread_fixed_counts(uint64_t *counts
);
45 void mt_cur_task_fixed_counts(uint64_t *counts
);
46 uint64_t mt_cur_cpu_instrs(void);
47 uint64_t mt_cur_cpu_cycles(void);
48 uint64_t mt_cur_thread_instrs(void);
49 uint64_t mt_cur_thread_cycles(void);
53 #if MACH_KERNEL_PRIVATE
55 #include <kern/thread.h>
56 #include <kern/task.h>
61 #if defined(__arm__) || defined(__arm64__)
62 #include <arm/cpu_data_internal.h>
63 #elif defined(__x86_64__)
64 #include <i386/cpu_data.h>
65 #else /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */
66 #error unsupported architecture
67 #endif /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */
69 void mt_update_fixed_counts(void);
70 void mt_update_task(task_t task
, thread_t thread
);
71 bool mt_update_thread(thread_t thread
);
72 int mt_fixed_thread_counts(thread_t thread
, uint64_t *counts_out
);
73 int mt_fixed_task_counts(task_t task
, uint64_t *counts_out
);
76 * Private API for the platform layers.
80 * Called once early in boot, before CPU initialization occurs (where
81 * `mt_cpu_up` is called).
83 * This allows monotonic to detect if the hardware supports performance counters
84 * and install the global PMI handler.
86 void mt_early_init(void);
89 * Called when a core is idling and exiting from idle.
91 void mt_cpu_idle(cpu_data_t
*cpu
);
92 void mt_cpu_run(cpu_data_t
*cpu
);
95 * Called when a core is shutting down or powering up.
97 void mt_cpu_down(cpu_data_t
*cpu
);
98 void mt_cpu_up(cpu_data_t
*cpu
);
101 * Called while single-threaded when the system is going to sleep.
106 * Called on each CPU as the system is waking from sleep.
108 void mt_wake_per_core(void);
110 #if __ARM_CLUSTER_COUNT__
112 * Called when a cluster is initialized.
114 void mt_cluster_init(void);
115 #endif /* __ARM_CLUSTER_COUNT__ */
118 * "Up-call" to the Mach layer to update counters from a PMI.
120 uint64_t mt_cpu_update_count(cpu_data_t
*cpu
, unsigned int ctr
);
123 * Private API for the scheduler.
127 * Called when a thread is switching off-core or expires its quantum.
129 void mt_sched_update(thread_t thread
);
132 * Called when a thread is terminating to save its counters into the task. The
133 * task lock must be held and the thread should be removed from the task's
134 * thread list in that same critical section.
136 void mt_terminate_update(task_t task
, thread_t thread
);
139 * Private API for the performance controller callout.
141 void mt_perfcontrol(uint64_t *instrs
, uint64_t *cycles
);
144 * Private API for stackshot.
146 void mt_stackshot_thread(thread_t thread
, uint64_t *instrs
, uint64_t *cycles
);
147 void mt_stackshot_task(task_t task
, uint64_t *instrs
, uint64_t *cycles
);
150 * Private API for microstackshot.
152 typedef void (*mt_pmi_fn
)(bool user_mode
, void *ctx
);
153 int mt_microstackshot_start(unsigned int ctr
, uint64_t period
, mt_pmi_fn fn
,
155 int mt_microstackshot_stop(void);
159 #endif /* MACH_KERNEL_PRIVATE */
161 #endif /* MONOTONIC */
163 #endif /* !defined(KERN_MONOTONIC_H) */