]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/cpu_quiesce.c
bd04dc7da8c1f8edd4f783f0f2847ecabc89cb6e
[apple/xnu.git] / osfmk / kern / cpu_quiesce.c
1 /*
2 * Copyright (c) 2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef __x86_64__
30 #error This file is only needed on weakly-ordered systems!
31 #endif
32
33 #include <machine/atomic.h>
34 #include <machine/commpage.h>
35 #include <machine/machine_cpu.h>
36
37 #include <kern/sched_prim.h>
38 #include <kern/processor.h>
39 #include <kern/ast.h>
40
41 #include <kern/cpu_quiesce.h>
42
43 /*
44 * CPU quiescing generation counter implemented with a checkin mask
45 *
46 * A tri-state bitfield, with 2 bits for each processor:;
47 * 1) 'checkin' bit, saying this processor has 'checked in', i.e. executed the acqrel barrier
48 * 2) 'expected' bit, saying this processor is expected to check in, i.e. not idle.
49 *
50 * When a processor causes the 'expected' bits to equal the 'checkin' bits, which
51 * indicates that all processors have executed the barrier, it ticks the algorithm
52 * and resets the state.
53 *
54 * Idle CPUs won't check in, because they don't run, so the algorithm won't tick.
55 * However, they can't do anything in userspace while idle, so we don't need
56 * them to execute barriers, so we have them 'leave' the counter so that
57 * they don't delay the tick while idle.
58 *
59 * This bitfield currently limits MAX_CPUS to 32 on LP64.
60 * In the future, we can use double-wide atomics and int128 if we need 64 CPUS.
61 *
62 * The mask only guarantees ordering to code running in userspace.
63 * We defer joining the counter until we actually reach userspace, allowing
64 * processors that come out of idle and only run kernel code to avoid the overhead
65 * of participation.
66 *
67 * We additionally defer updating the counter for a minimum interval to
68 * reduce the frequency of executing the exclusive atomic operations.
69 *
70 * The longest delay between two checkins assuming that at least one processor
71 * joins is <checkin delay> + (<thread quantum> * 2)
72 */
73
74 typedef unsigned long checkin_mask_t;
75
76 static _Atomic checkin_mask_t cpu_quiescing_checkin_state;
77
78 static uint64_t cpu_checkin_last_commit;
79
80 #define CPU_CHECKIN_MIN_INTERVAL_US 4000 /* 4ms */
81 #define CPU_CHECKIN_MIN_INTERVAL_MAX_US USEC_PER_SEC /* 1s */
82 static uint64_t cpu_checkin_min_interval;
83 uint32_t cpu_checkin_min_interval_us;
84
85 #if __LP64__
86 static_assert(MAX_CPUS <= 32);
87 #define CPU_CHECKIN_MASK 0x5555555555555555UL
88 #define CPU_EXPECTED_MASK (~CPU_CHECKIN_MASK)
89 #else
90 /* Avoid double-wide CAS on 32-bit platforms by using a 32-bit state and mask */
91 static_assert(MAX_CPUS <= 16);
92 #define CPU_CHECKIN_MASK 0x55555555UL
93 #define CPU_EXPECTED_MASK (~CPU_CHECKIN_MASK)
94 #endif
95
96 static_assert(CPU_CHECKIN_MASK == CPU_EXPECTED_MASK >> 1);
97
98 static inline checkin_mask_t
99 cpu_checked_in_bit(int cpuid)
100 {
101 return 1UL << (2 * cpuid);
102 }
103
104 static inline checkin_mask_t
105 cpu_expected_bit(int cpuid)
106 {
107 return 1UL << (2 * cpuid + 1);
108 }
109
110 void
111 cpu_quiescent_counter_init(void)
112 {
113 assert(CPU_CHECKIN_MASK & cpu_checked_in_bit(MAX_CPUS));
114 assert(CPU_EXPECTED_MASK & cpu_expected_bit(MAX_CPUS));
115 assert((CPU_CHECKIN_MASK & cpu_expected_bit(MAX_CPUS)) == 0);
116 assert((CPU_EXPECTED_MASK & cpu_checked_in_bit(MAX_CPUS)) == 0);
117
118 cpu_quiescent_counter_set_min_interval_us(CPU_CHECKIN_MIN_INTERVAL_US);
119 }
120
121 void
122 cpu_quiescent_counter_set_min_interval_us(uint32_t new_value_us)
123 {
124 /* clamp to something vaguely sane */
125 if (new_value_us > CPU_CHECKIN_MIN_INTERVAL_MAX_US) {
126 new_value_us = CPU_CHECKIN_MIN_INTERVAL_MAX_US;
127 }
128
129 cpu_checkin_min_interval_us = new_value_us;
130
131 uint64_t abstime = 0;
132 clock_interval_to_absolutetime_interval(cpu_checkin_min_interval_us,
133 NSEC_PER_USEC, &abstime);
134 cpu_checkin_min_interval = abstime;
135 }
136
137
138 /*
139 * Called when all running CPUs have checked in.
140 *
141 * The commpage increment is protected by the 'lock' of having caused the tick,
142 * and it is published by the state reset release barrier.
143 */
144 static void
145 cpu_quiescent_counter_commit(uint64_t ctime)
146 {
147 __kdebug_only uint64_t old_gen;
148 __kdebug_only checkin_mask_t old_state;
149
150 old_gen = commpage_increment_cpu_quiescent_counter();
151
152 cpu_checkin_last_commit = ctime;
153
154 old_state = os_atomic_and(&cpu_quiescing_checkin_state, ~CPU_CHECKIN_MASK, release);
155
156 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUIESCENT_COUNTER), old_gen, old_state, ctime, 0);
157 }
158
159 /*
160 * Have all the expected CPUs checked in?
161 */
162 static bool
163 cpu_quiescent_counter_needs_commit(checkin_mask_t state)
164 {
165 return (state & CPU_CHECKIN_MASK) == ((state & CPU_EXPECTED_MASK) >> 1);
166 }
167
168 /*
169 * Called when a processor wants to start participating in the counter, e.g.
170 * 1) when context switching away from the idle thread
171 * 2) when coming up for the first time
172 * 3) when coming up after a shutdown
173 *
174 * Called with interrupts disabled.
175 */
176 void
177 cpu_quiescent_counter_join(__unused uint64_t ctime)
178 {
179 processor_t processor = current_processor();
180 __assert_only int cpuid = processor->cpu_id;
181
182 assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_NONE ||
183 processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_LEFT);
184
185 assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) &
186 (cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid))) == 0);
187
188 processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_PENDING_JOIN;
189
190 /*
191 * Mark the processor to call cpu_quiescent_counter_ast before it
192 * ever returns to userspace.
193 */
194 ast_on(AST_UNQUIESCE);
195 }
196
197 /*
198 * Called with interrupts disabled from the userspace boundary at the AST_UNQUIESCE callback
199 * It needs to acquire the counter to see data and the counter published by other CPUs.
200 */
201 void
202 cpu_quiescent_counter_ast(void)
203 {
204 processor_t processor = current_processor();
205 int cpuid = processor->cpu_id;
206
207 assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_PENDING_JOIN);
208
209 /* We had better not already be joined. */
210 assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) &
211 (cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid))) == 0);
212
213 /*
214 * No release barrier needed because we have no prior state to publish.
215 * Acquire barrier needed because we need this processor to see
216 * the latest counter value.
217 *
218 * The state may be in 'needs checkin' both before and after
219 * this atomic or.
220 *
221 * Additionally, if this is the first processor to come out of idle,
222 * it may need to kickstart the algorithm, otherwise it would
223 * stay in 'needs commit' perpetually with no processor assigned to
224 * actually do the commit. To do that, the first processor only adds
225 * its expected bit.
226 */
227
228 processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_JOINED;
229 processor->cpu_quiesce_last_checkin = mach_absolute_time();
230
231 checkin_mask_t old_mask, new_mask;
232 os_atomic_rmw_loop(&cpu_quiescing_checkin_state, old_mask, new_mask, acquire, {
233 if (old_mask == 0) {
234 new_mask = old_mask | cpu_expected_bit(cpuid);
235 } else {
236 new_mask = old_mask | cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid);
237 }
238 });
239 }
240
241 /*
242 * Called when a processor no longer wants to participate in the counter,
243 * i.e. when a processor is on its way to idle or shutdown.
244 *
245 * Called with interrupts disabled.
246 *
247 * The processor needs to remove itself from the expected mask, to allow the
248 * algorithm to continue ticking without its participation.
249 * However, it needs to ensure that anything it has done since the last time
250 * it checked in has been published before the next tick is allowed to commit.
251 */
252 void
253 cpu_quiescent_counter_leave(uint64_t ctime)
254 {
255 processor_t processor = current_processor();
256 int cpuid = processor->cpu_id;
257
258 assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_JOINED ||
259 processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_PENDING_JOIN);
260
261 /* We no longer need the cpu_quiescent_counter_ast callback to be armed */
262 ast_off(AST_UNQUIESCE);
263
264 if (processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_PENDING_JOIN) {
265 /* We never actually joined, so we don't have to do the work to leave. */
266 processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_LEFT;
267 return;
268 }
269
270 /* Leaving can't be deferred, even if we're within the min interval */
271 processor->cpu_quiesce_last_checkin = ctime;
272
273 checkin_mask_t mask = cpu_checked_in_bit(cpuid) | cpu_expected_bit(cpuid);
274
275 checkin_mask_t orig_state = os_atomic_and_orig(&cpu_quiescing_checkin_state,
276 ~mask, acq_rel);
277
278 assert((orig_state & cpu_expected_bit(cpuid)));
279
280 processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_LEFT;
281
282 if (cpu_quiescent_counter_needs_commit(orig_state)) {
283 /*
284 * the old state indicates someone else was already doing a commit
285 * but hadn't finished yet. We successfully inserted the acq_rel
286 * before they finished the commit by resetting the bitfield,
287 * so we're done here.
288 */
289 return;
290 }
291
292 checkin_mask_t new_state = orig_state & ~mask;
293
294 if (cpu_quiescent_counter_needs_commit(new_state)) {
295 cpu_quiescent_counter_commit(ctime);
296 }
297 }
298
299 /*
300 * Called when a processor wants to check in to the counter
301 * If it hasn't yet fully joined, it doesn't need to check in.
302 *
303 * Called with interrupts disabled.
304 */
305 void
306 cpu_quiescent_counter_checkin(uint64_t ctime)
307 {
308 processor_t processor = current_processor();
309 int cpuid = processor->cpu_id;
310
311 assert(processor->cpu_quiesce_state != CPU_QUIESCE_COUNTER_NONE);
312
313 /* If we're not joined yet, we don't need to check in */
314 if (__probable(processor->cpu_quiesce_state != CPU_QUIESCE_COUNTER_JOINED)) {
315 return;
316 }
317
318 /* If we've checked in recently, we don't need to check in yet. */
319 if (__probable((ctime - processor->cpu_quiesce_last_checkin) <= cpu_checkin_min_interval)) {
320 return;
321 }
322
323 processor->cpu_quiesce_last_checkin = ctime;
324
325 checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed);
326
327 assert((state & cpu_expected_bit(cpuid)));
328
329 if (__probable((state & cpu_checked_in_bit(cpuid)))) {
330 /*
331 * Processor has already checked in for this round, no need to
332 * acquire the cacheline exclusive.
333 */
334 return;
335 }
336
337 checkin_mask_t orig_state = os_atomic_or_orig(&cpu_quiescing_checkin_state,
338 cpu_checked_in_bit(cpuid), acq_rel);
339
340 checkin_mask_t new_state = orig_state | cpu_checked_in_bit(cpuid);
341
342 if (cpu_quiescent_counter_needs_commit(new_state)) {
343 assertf(!cpu_quiescent_counter_needs_commit(orig_state),
344 "old: 0x%lx, new: 0x%lx", orig_state, new_state);
345 cpu_quiescent_counter_commit(ctime);
346 }
347 }
348
349 #if MACH_ASSERT
350 /*
351 * Called on all AST exits to userspace to assert this processor actually joined
352 *
353 * Called with interrupts disabled after the AST should have been handled
354 */
355 void
356 cpu_quiescent_counter_assert_ast(void)
357 {
358 processor_t processor = current_processor();
359 int cpuid = processor->cpu_id;
360
361 assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_JOINED);
362
363 checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed);
364 assert((state & cpu_expected_bit(cpuid)));
365 }
366 #endif /* MACH_ASSERT */