]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/rtclock.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / arm / rtclock.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34 /*
35 * File: arm/rtclock.c
36 * Purpose: Routines for handling the machine dependent
37 * real-time clock.
38 */
39
40 #include <mach/mach_types.h>
41
42 #include <kern/clock.h>
43 #include <kern/thread.h>
44 #include <kern/macro_help.h>
45 #include <kern/spl.h>
46 #include <kern/timer_queue.h>
47
48 #include <kern/host_notify.h>
49
50 #include <machine/commpage.h>
51 #include <machine/machine_routines.h>
52 #include <machine/config.h>
53 #include <arm/exception.h>
54 #include <arm/cpu_data_internal.h>
55 #if __arm64__
56 #include <arm64/proc_reg.h>
57 #elif __arm__
58 #include <arm/proc_reg.h>
59 #else
60 #error Unsupported arch
61 #endif
62 #include <arm/rtclock.h>
63
64 #include <IOKit/IOPlatformExpert.h>
65 #include <libkern/OSAtomic.h>
66
67 #include <sys/kdebug.h>
68
69 #define MAX_TIMEBASE_TRIES 10
70
71 int rtclock_init(void);
72
73 static int
74 deadline_to_decrementer(uint64_t deadline,
75 uint64_t now);
76 static void
77 timebase_callback(struct timebase_freq_t * freq);
78
79 #if DEVELOPMENT || DEBUG
80 uint32_t absolute_time_validation = 0;
81 #endif
82
83 /*
84 * Configure the real-time clock device at boot
85 */
86 void
87 rtclock_early_init(void)
88 {
89 PE_register_timebase_callback(timebase_callback);
90 #if DEVELOPMENT || DEBUG
91 uint32_t tmp_mv = 1;
92
93 #if defined(APPLE_ARM64_ARCH_FAMILY)
94 /* Enable MAT validation on A0 hardware by default. */
95 absolute_time_validation = ml_get_topology_info()->chip_revision == CPU_VERSION_A0;
96 #endif
97
98 if (kern_feature_override(KF_MATV_OVRD)) {
99 absolute_time_validation = 0;
100 }
101 if (PE_parse_boot_argn("timebase_validation", &tmp_mv, sizeof(tmp_mv))) {
102 absolute_time_validation = tmp_mv;
103 }
104 #endif
105 }
106
107 static void
108 timebase_callback(struct timebase_freq_t * freq)
109 {
110 unsigned long numer, denom;
111 uint64_t t64_1, t64_2;
112 uint32_t divisor;
113
114 if (freq->timebase_den < 1 || freq->timebase_den > 4 ||
115 freq->timebase_num < freq->timebase_den) {
116 panic("rtclock timebase_callback: invalid constant %ld / %ld",
117 freq->timebase_num, freq->timebase_den);
118 }
119
120 denom = freq->timebase_num;
121 numer = freq->timebase_den * NSEC_PER_SEC;
122 // reduce by the greatest common denominator to minimize overflow
123 if (numer > denom) {
124 t64_1 = numer;
125 t64_2 = denom;
126 } else {
127 t64_1 = denom;
128 t64_2 = numer;
129 }
130 while (t64_2 != 0) {
131 uint64_t temp = t64_2;
132 t64_2 = t64_1 % t64_2;
133 t64_1 = temp;
134 }
135 numer /= t64_1;
136 denom /= t64_1;
137
138 rtclock_timebase_const.numer = (uint32_t)numer;
139 rtclock_timebase_const.denom = (uint32_t)denom;
140 divisor = (uint32_t)(freq->timebase_num / freq->timebase_den);
141
142 rtclock_sec_divisor = divisor;
143 rtclock_usec_divisor = divisor / USEC_PER_SEC;
144 }
145
146 /*
147 * Initialize the system clock device for the current cpu
148 */
149 int
150 rtclock_init(void)
151 {
152 uint64_t abstime;
153 cpu_data_t * cdp;
154
155 clock_timebase_init();
156 ml_init_lock_timeout();
157
158 cdp = getCpuDatap();
159
160 abstime = mach_absolute_time();
161 cdp->rtcPop = EndOfAllTime; /* Init Pop time */
162 timer_resync_deadlines(); /* Start the timers going */
163
164 return 1;
165 }
166
167 uint64_t
168 mach_absolute_time(void)
169 {
170 #if DEVELOPMENT || DEBUG
171 if (__improbable(absolute_time_validation == 1)) {
172 static volatile uint64_t s_last_absolute_time = 0;
173 uint64_t new_absolute_time, old_absolute_time;
174 int attempts = 0;
175
176 /* ARM 64: We need a dsb here to ensure that the load of s_last_absolute_time
177 * completes before the timebase read. Were the load to complete after the
178 * timebase read, there would be a window for another CPU to update
179 * s_last_absolute_time and leave us in an inconsistent state. Consider the
180 * following interleaving:
181 *
182 * Let s_last_absolute_time = t0
183 * CPU0: Read timebase at t1
184 * CPU1: Read timebase at t2
185 * CPU1: Update s_last_absolute_time to t2
186 * CPU0: Load completes
187 * CPU0: Update s_last_absolute_time to t1
188 *
189 * This would cause the assertion to fail even though time did not go
190 * backwards. Thus, we use a dsb to guarantee completion of the load before
191 * the timebase read.
192 */
193 do {
194 attempts++;
195 old_absolute_time = s_last_absolute_time;
196
197 #if __arm64__
198 __asm__ volatile ("dsb ld" ::: "memory");
199 #else
200 OSSynchronizeIO(); // See osfmk/arm64/rtclock.c
201 #endif
202
203 new_absolute_time = ml_get_timebase();
204 } while (attempts < MAX_TIMEBASE_TRIES && !OSCompareAndSwap64(old_absolute_time, new_absolute_time, &s_last_absolute_time));
205
206 if (attempts < MAX_TIMEBASE_TRIES && old_absolute_time > new_absolute_time) {
207 panic("mach_absolute_time returning non-monotonically increasing value 0x%llx (old value 0x%llx\n)\n",
208 new_absolute_time, old_absolute_time);
209 }
210 return new_absolute_time;
211 } else {
212 return ml_get_timebase();
213 }
214 #else
215 return ml_get_timebase();
216 #endif
217 }
218
219 uint64_t
220 mach_approximate_time(void)
221 {
222 #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ || __arm64__
223 /* Hardware supports a fast timestamp, so grab it without asserting monotonicity */
224 return ml_get_timebase();
225 #else
226 processor_t processor;
227 uint64_t approx_time;
228
229 disable_preemption();
230 processor = current_processor();
231 approx_time = processor->last_dispatch;
232 enable_preemption();
233
234 return approx_time;
235 #endif
236 }
237
238 void
239 clock_get_system_microtime(clock_sec_t * secs,
240 clock_usec_t * microsecs)
241 {
242 absolutetime_to_microtime(mach_absolute_time(), secs, microsecs);
243 }
244
245 void
246 clock_get_system_nanotime(clock_sec_t * secs,
247 clock_nsec_t * nanosecs)
248 {
249 uint64_t abstime;
250 uint64_t t64;
251
252 abstime = mach_absolute_time();
253 *secs = (t64 = abstime / rtclock_sec_divisor);
254 abstime -= (t64 * rtclock_sec_divisor);
255
256 *nanosecs = (clock_nsec_t)((abstime * NSEC_PER_SEC) / rtclock_sec_divisor);
257 }
258
259 void
260 clock_gettimeofday_set_commpage(uint64_t abstime,
261 uint64_t sec,
262 uint64_t frac,
263 uint64_t scale,
264 uint64_t tick_per_sec)
265 {
266 commpage_set_timestamp(abstime, sec, frac, scale, tick_per_sec);
267 }
268
269 void
270 clock_timebase_info(mach_timebase_info_t info)
271 {
272 *info = rtclock_timebase_const;
273 }
274
275 /*
276 * Real-time clock device interrupt.
277 */
278 void
279 rtclock_intr(__unused unsigned int is_user_context)
280 {
281 uint64_t abstime;
282 cpu_data_t * cdp;
283 struct arm_saved_state * regs;
284 unsigned int user_mode;
285 uintptr_t pc;
286
287 cdp = getCpuDatap();
288
289 cdp->cpu_stat.timer_cnt++;
290 SCHED_STATS_INC(timer_pop_count);
291
292 assert(!ml_get_interrupts_enabled());
293
294 abstime = mach_absolute_time();
295
296 if (cdp->cpu_idle_pop != 0x0ULL) {
297 if ((cdp->rtcPop - abstime) < cdp->cpu_idle_latency) {
298 cdp->cpu_idle_pop = 0x0ULL;
299 while (abstime < cdp->rtcPop) {
300 abstime = mach_absolute_time();
301 }
302 } else {
303 ClearIdlePop(FALSE);
304 }
305 }
306
307 if ((regs = cdp->cpu_int_state)) {
308 pc = get_saved_state_pc(regs);
309
310 #if __arm64__
311 user_mode = PSR64_IS_USER(get_saved_state_cpsr(regs));
312 #else
313 user_mode = (regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE;
314 #endif
315 } else {
316 pc = 0;
317 user_mode = 0;
318 }
319 if (abstime >= cdp->rtcPop) {
320 /* Log the interrupt service latency (-ve value expected by tool) */
321 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
322 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
323 -(abstime - cdp->rtcPop),
324 user_mode ? pc : VM_KERNEL_UNSLIDE(pc), user_mode, 0, 0);
325 }
326
327 /* call the generic etimer */
328 timer_intr(user_mode, pc);
329 }
330
331 static int
332 deadline_to_decrementer(uint64_t deadline,
333 uint64_t now)
334 {
335 uint64_t delt;
336
337 if (deadline <= now) {
338 return DECREMENTER_MIN;
339 } else {
340 delt = deadline - now;
341
342 return (delt >= (DECREMENTER_MAX + 1)) ? DECREMENTER_MAX : ((delt >= (DECREMENTER_MIN + 1)) ? (int)delt : DECREMENTER_MIN);
343 }
344 }
345
346 /*
347 * Request a decrementer pop
348 */
349 int
350 setPop(uint64_t time)
351 {
352 int delay_time;
353 uint64_t current_time;
354 cpu_data_t * cdp;
355
356 cdp = getCpuDatap();
357 current_time = mach_absolute_time();
358
359 delay_time = deadline_to_decrementer(time, current_time);
360 cdp->rtcPop = delay_time + current_time;
361
362 ml_set_decrementer((uint32_t) delay_time);
363
364 return delay_time;
365 }
366
367 /*
368 * Request decrementer Idle Pop. Return true if set
369 */
370 boolean_t
371 SetIdlePop(void)
372 {
373 int delay_time;
374 uint64_t time;
375 uint64_t current_time;
376 cpu_data_t * cdp;
377
378 cdp = getCpuDatap();
379 current_time = mach_absolute_time();
380
381 if (((cdp->rtcPop < current_time) ||
382 (cdp->rtcPop - current_time) < cdp->cpu_idle_latency)) {
383 return FALSE;
384 }
385
386 time = cdp->rtcPop - cdp->cpu_idle_latency;
387
388 delay_time = deadline_to_decrementer(time, current_time);
389 cdp->cpu_idle_pop = delay_time + current_time;
390 ml_set_decrementer((uint32_t) delay_time);
391
392 return TRUE;
393 }
394
395 /*
396 * Clear decrementer Idle Pop
397 */
398 void
399 ClearIdlePop(
400 boolean_t wfi)
401 {
402 #if !__arm64__
403 #pragma unused(wfi)
404 #endif
405 cpu_data_t * cdp;
406
407 cdp = getCpuDatap();
408 cdp->cpu_idle_pop = 0x0ULL;
409
410 #if __arm64__
411 /*
412 * Don't update the HW timer if there's a pending
413 * interrupt (we can lose interrupt assertion);
414 * we want to take the interrupt right now and update
415 * the deadline from the handler).
416 *
417 * ARM64_TODO: consider this more carefully.
418 */
419 if (!(wfi && ml_get_timer_pending()))
420 #endif
421 {
422 setPop(cdp->rtcPop);
423 }
424 }
425
426 void
427 absolutetime_to_microtime(uint64_t abstime,
428 clock_sec_t * secs,
429 clock_usec_t * microsecs)
430 {
431 uint64_t t64;
432
433 *secs = t64 = abstime / rtclock_sec_divisor;
434 abstime -= (t64 * rtclock_sec_divisor);
435
436 *microsecs = (uint32_t)(abstime / rtclock_usec_divisor);
437 }
438
439 void
440 absolutetime_to_nanoseconds(uint64_t abstime,
441 uint64_t * result)
442 {
443 uint64_t t64;
444
445 *result = (t64 = abstime / rtclock_sec_divisor) * NSEC_PER_SEC;
446 abstime -= (t64 * rtclock_sec_divisor);
447 *result += (abstime * NSEC_PER_SEC) / rtclock_sec_divisor;
448 }
449
450 void
451 nanoseconds_to_absolutetime(uint64_t nanosecs,
452 uint64_t * result)
453 {
454 uint64_t t64;
455
456 *result = (t64 = nanosecs / NSEC_PER_SEC) * rtclock_sec_divisor;
457 nanosecs -= (t64 * NSEC_PER_SEC);
458 *result += (nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
459 }
460
461 void
462 nanotime_to_absolutetime(clock_sec_t secs,
463 clock_nsec_t nanosecs,
464 uint64_t * result)
465 {
466 *result = ((uint64_t) secs * rtclock_sec_divisor) +
467 ((uint64_t) nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
468 }
469
470 void
471 clock_interval_to_absolutetime_interval(uint32_t interval,
472 uint32_t scale_factor,
473 uint64_t * result)
474 {
475 uint64_t nanosecs = (uint64_t) interval * scale_factor;
476 uint64_t t64;
477
478 *result = (t64 = nanosecs / NSEC_PER_SEC) * rtclock_sec_divisor;
479 nanosecs -= (t64 * NSEC_PER_SEC);
480 *result += (nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
481 }
482
483 void
484 machine_delay_until(uint64_t interval,
485 uint64_t deadline)
486 {
487 #pragma unused(interval)
488 uint64_t now;
489
490 do {
491 #if __ARM_ENABLE_WFE_
492 __builtin_arm_wfe();
493 #endif /* __ARM_ENABLE_WFE_ */
494
495 now = mach_absolute_time();
496 } while (now < deadline);
497 }