]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/rtclock.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm / rtclock.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34/*
35 * File: arm/rtclock.c
36 * Purpose: Routines for handling the machine dependent
37 * real-time clock.
38 */
39
40#include <mach/mach_types.h>
41
42#include <kern/clock.h>
43#include <kern/thread.h>
44#include <kern/macro_help.h>
45#include <kern/spl.h>
46#include <kern/timer_queue.h>
47
48#include <kern/host_notify.h>
49
50#include <machine/commpage.h>
51#include <machine/machine_routines.h>
f427ee49 52#include <machine/config.h>
5ba3f43e
A
53#include <arm/exception.h>
54#include <arm/cpu_data_internal.h>
55#if __arm64__
56#include <arm64/proc_reg.h>
57#elif __arm__
58#include <arm/proc_reg.h>
59#else
60#error Unsupported arch
61#endif
62#include <arm/rtclock.h>
63
64#include <IOKit/IOPlatformExpert.h>
65#include <libkern/OSAtomic.h>
66
67#include <sys/kdebug.h>
68
69#define MAX_TIMEBASE_TRIES 10
70
71int rtclock_init(void);
72
73static int
74deadline_to_decrementer(uint64_t deadline,
0a7de745 75 uint64_t now);
5ba3f43e
A
76static void
77timebase_callback(struct timebase_freq_t * freq);
78
79#if DEVELOPMENT || DEBUG
0a7de745 80uint32_t absolute_time_validation = 0;
5ba3f43e
A
81#endif
82
83/*
84 * Configure the real-time clock device at boot
85 */
86void
87rtclock_early_init(void)
88{
89 PE_register_timebase_callback(timebase_callback);
90#if DEVELOPMENT || DEBUG
91 uint32_t tmp_mv = 1;
0a7de745 92
f427ee49 93#if defined(APPLE_ARM64_ARCH_FAMILY)
0a7de745 94 /* Enable MAT validation on A0 hardware by default. */
f427ee49
A
95 absolute_time_validation = ml_get_topology_info()->chip_revision == CPU_VERSION_A0;
96#endif
0a7de745 97
5ba3f43e
A
98 if (kern_feature_override(KF_MATV_OVRD)) {
99 absolute_time_validation = 0;
100 }
101 if (PE_parse_boot_argn("timebase_validation", &tmp_mv, sizeof(tmp_mv))) {
0a7de745 102 absolute_time_validation = tmp_mv;
5ba3f43e
A
103 }
104#endif
105}
106
107static void
108timebase_callback(struct timebase_freq_t * freq)
109{
110 unsigned long numer, denom;
111 uint64_t t64_1, t64_2;
112 uint32_t divisor;
113
114 if (freq->timebase_den < 1 || freq->timebase_den > 4 ||
0a7de745 115 freq->timebase_num < freq->timebase_den) {
5ba3f43e 116 panic("rtclock timebase_callback: invalid constant %ld / %ld",
0a7de745
A
117 freq->timebase_num, freq->timebase_den);
118 }
5ba3f43e
A
119
120 denom = freq->timebase_num;
121 numer = freq->timebase_den * NSEC_PER_SEC;
122 // reduce by the greatest common denominator to minimize overflow
123 if (numer > denom) {
124 t64_1 = numer;
125 t64_2 = denom;
126 } else {
127 t64_1 = denom;
128 t64_2 = numer;
129 }
130 while (t64_2 != 0) {
131 uint64_t temp = t64_2;
132 t64_2 = t64_1 % t64_2;
133 t64_1 = temp;
134 }
135 numer /= t64_1;
136 denom /= t64_1;
137
138 rtclock_timebase_const.numer = (uint32_t)numer;
139 rtclock_timebase_const.denom = (uint32_t)denom;
140 divisor = (uint32_t)(freq->timebase_num / freq->timebase_den);
141
142 rtclock_sec_divisor = divisor;
143 rtclock_usec_divisor = divisor / USEC_PER_SEC;
144}
145
146/*
147 * Initialize the system clock device for the current cpu
148 */
149int
150rtclock_init(void)
151{
152 uint64_t abstime;
153 cpu_data_t * cdp;
154
155 clock_timebase_init();
c3c9b80d
A
156
157 if (cpu_number() == master_cpu) {
158 ml_init_lock_timeout();
159 }
5ba3f43e
A
160
161 cdp = getCpuDatap();
162
163 abstime = mach_absolute_time();
0a7de745
A
164 cdp->rtcPop = EndOfAllTime; /* Init Pop time */
165 timer_resync_deadlines(); /* Start the timers going */
5ba3f43e 166
0a7de745 167 return 1;
5ba3f43e
A
168}
169
170uint64_t
171mach_absolute_time(void)
172{
173#if DEVELOPMENT || DEBUG
174 if (__improbable(absolute_time_validation == 1)) {
175 static volatile uint64_t s_last_absolute_time = 0;
176 uint64_t new_absolute_time, old_absolute_time;
177 int attempts = 0;
178
179 /* ARM 64: We need a dsb here to ensure that the load of s_last_absolute_time
180 * completes before the timebase read. Were the load to complete after the
181 * timebase read, there would be a window for another CPU to update
182 * s_last_absolute_time and leave us in an inconsistent state. Consider the
183 * following interleaving:
184 *
185 * Let s_last_absolute_time = t0
186 * CPU0: Read timebase at t1
187 * CPU1: Read timebase at t2
188 * CPU1: Update s_last_absolute_time to t2
189 * CPU0: Load completes
190 * CPU0: Update s_last_absolute_time to t1
191 *
192 * This would cause the assertion to fail even though time did not go
193 * backwards. Thus, we use a dsb to guarantee completion of the load before
194 * the timebase read.
195 */
196 do {
197 attempts++;
198 old_absolute_time = s_last_absolute_time;
199
200#if __arm64__
0a7de745 201 __asm__ volatile ("dsb ld" ::: "memory");
5ba3f43e
A
202#else
203 OSSynchronizeIO(); // See osfmk/arm64/rtclock.c
204#endif
205
206 new_absolute_time = ml_get_timebase();
207 } while (attempts < MAX_TIMEBASE_TRIES && !OSCompareAndSwap64(old_absolute_time, new_absolute_time, &s_last_absolute_time));
208
209 if (attempts < MAX_TIMEBASE_TRIES && old_absolute_time > new_absolute_time) {
210 panic("mach_absolute_time returning non-monotonically increasing value 0x%llx (old value 0x%llx\n)\n",
211 new_absolute_time, old_absolute_time);
212 }
213 return new_absolute_time;
214 } else {
215 return ml_get_timebase();
216 }
217#else
218 return ml_get_timebase();
219#endif
220}
221
222uint64_t
223mach_approximate_time(void)
224{
225#if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ || __arm64__
226 /* Hardware supports a fast timestamp, so grab it without asserting monotonicity */
227 return ml_get_timebase();
228#else
229 processor_t processor;
230 uint64_t approx_time;
231
232 disable_preemption();
233 processor = current_processor();
234 approx_time = processor->last_dispatch;
235 enable_preemption();
236
237 return approx_time;
238#endif
239}
240
241void
242clock_get_system_microtime(clock_sec_t * secs,
0a7de745 243 clock_usec_t * microsecs)
5ba3f43e
A
244{
245 absolutetime_to_microtime(mach_absolute_time(), secs, microsecs);
246}
247
248void
249clock_get_system_nanotime(clock_sec_t * secs,
0a7de745 250 clock_nsec_t * nanosecs)
5ba3f43e
A
251{
252 uint64_t abstime;
253 uint64_t t64;
254
255 abstime = mach_absolute_time();
256 *secs = (t64 = abstime / rtclock_sec_divisor);
257 abstime -= (t64 * rtclock_sec_divisor);
258
259 *nanosecs = (clock_nsec_t)((abstime * NSEC_PER_SEC) / rtclock_sec_divisor);
260}
261
262void
263clock_gettimeofday_set_commpage(uint64_t abstime,
0a7de745
A
264 uint64_t sec,
265 uint64_t frac,
266 uint64_t scale,
267 uint64_t tick_per_sec)
5ba3f43e
A
268{
269 commpage_set_timestamp(abstime, sec, frac, scale, tick_per_sec);
270}
271
272void
273clock_timebase_info(mach_timebase_info_t info)
274{
275 *info = rtclock_timebase_const;
276}
277
278/*
279 * Real-time clock device interrupt.
280 */
281void
282rtclock_intr(__unused unsigned int is_user_context)
283{
284 uint64_t abstime;
285 cpu_data_t * cdp;
286 struct arm_saved_state * regs;
287 unsigned int user_mode;
288 uintptr_t pc;
289
290 cdp = getCpuDatap();
291
292 cdp->cpu_stat.timer_cnt++;
f427ee49 293 SCHED_STATS_INC(timer_pop_count);
5ba3f43e
A
294
295 assert(!ml_get_interrupts_enabled());
296
297 abstime = mach_absolute_time();
298
299 if (cdp->cpu_idle_pop != 0x0ULL) {
0a7de745 300 if ((cdp->rtcPop - abstime) < cdp->cpu_idle_latency) {
5ba3f43e 301 cdp->cpu_idle_pop = 0x0ULL;
0a7de745 302 while (abstime < cdp->rtcPop) {
5ba3f43e 303 abstime = mach_absolute_time();
0a7de745 304 }
5ba3f43e
A
305 } else {
306 ClearIdlePop(FALSE);
307 }
308 }
309
310 if ((regs = cdp->cpu_int_state)) {
311 pc = get_saved_state_pc(regs);
312
313#if __arm64__
314 user_mode = PSR64_IS_USER(get_saved_state_cpsr(regs));
315#else
316 user_mode = (regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE;
317#endif
318 } else {
319 pc = 0;
320 user_mode = 0;
321 }
322 if (abstime >= cdp->rtcPop) {
323 /* Log the interrupt service latency (-ve value expected by tool) */
324 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
325 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
326 -(abstime - cdp->rtcPop),
327 user_mode ? pc : VM_KERNEL_UNSLIDE(pc), user_mode, 0, 0);
5ba3f43e
A
328 }
329
330 /* call the generic etimer */
331 timer_intr(user_mode, pc);
332}
333
334static int
335deadline_to_decrementer(uint64_t deadline,
0a7de745 336 uint64_t now)
5ba3f43e
A
337{
338 uint64_t delt;
339
0a7de745 340 if (deadline <= now) {
5ba3f43e 341 return DECREMENTER_MIN;
0a7de745 342 } else {
5ba3f43e
A
343 delt = deadline - now;
344
345 return (delt >= (DECREMENTER_MAX + 1)) ? DECREMENTER_MAX : ((delt >= (DECREMENTER_MIN + 1)) ? (int)delt : DECREMENTER_MIN);
346 }
347}
348
349/*
350 * Request a decrementer pop
351 */
352int
353setPop(uint64_t time)
354{
355 int delay_time;
356 uint64_t current_time;
357 cpu_data_t * cdp;
358
359 cdp = getCpuDatap();
360 current_time = mach_absolute_time();
361
362 delay_time = deadline_to_decrementer(time, current_time);
363 cdp->rtcPop = delay_time + current_time;
364
365 ml_set_decrementer((uint32_t) delay_time);
366
0a7de745 367 return delay_time;
5ba3f43e
A
368}
369
370/*
371 * Request decrementer Idle Pop. Return true if set
372 */
373boolean_t
374SetIdlePop(void)
375{
376 int delay_time;
377 uint64_t time;
378 uint64_t current_time;
379 cpu_data_t * cdp;
380
381 cdp = getCpuDatap();
382 current_time = mach_absolute_time();
383
384 if (((cdp->rtcPop < current_time) ||
0a7de745 385 (cdp->rtcPop - current_time) < cdp->cpu_idle_latency)) {
5ba3f43e 386 return FALSE;
0a7de745 387 }
5ba3f43e
A
388
389 time = cdp->rtcPop - cdp->cpu_idle_latency;
390
391 delay_time = deadline_to_decrementer(time, current_time);
392 cdp->cpu_idle_pop = delay_time + current_time;
393 ml_set_decrementer((uint32_t) delay_time);
394
395 return TRUE;
396}
397
398/*
399 * Clear decrementer Idle Pop
400 */
401void
402ClearIdlePop(
0a7de745 403 boolean_t wfi)
5ba3f43e
A
404{
405#if !__arm64__
406#pragma unused(wfi)
407#endif
408 cpu_data_t * cdp;
409
410 cdp = getCpuDatap();
411 cdp->cpu_idle_pop = 0x0ULL;
412
413#if __arm64__
414 /*
415 * Don't update the HW timer if there's a pending
416 * interrupt (we can lose interrupt assertion);
417 * we want to take the interrupt right now and update
418 * the deadline from the handler).
419 *
420 * ARM64_TODO: consider this more carefully.
421 */
422 if (!(wfi && ml_get_timer_pending()))
423#endif
424 {
425 setPop(cdp->rtcPop);
426 }
427}
428
429void
430absolutetime_to_microtime(uint64_t abstime,
0a7de745
A
431 clock_sec_t * secs,
432 clock_usec_t * microsecs)
5ba3f43e
A
433{
434 uint64_t t64;
435
436 *secs = t64 = abstime / rtclock_sec_divisor;
437 abstime -= (t64 * rtclock_sec_divisor);
438
439 *microsecs = (uint32_t)(abstime / rtclock_usec_divisor);
440}
441
442void
443absolutetime_to_nanoseconds(uint64_t abstime,
0a7de745 444 uint64_t * result)
5ba3f43e
A
445{
446 uint64_t t64;
447
448 *result = (t64 = abstime / rtclock_sec_divisor) * NSEC_PER_SEC;
449 abstime -= (t64 * rtclock_sec_divisor);
450 *result += (abstime * NSEC_PER_SEC) / rtclock_sec_divisor;
451}
452
453void
454nanoseconds_to_absolutetime(uint64_t nanosecs,
0a7de745 455 uint64_t * result)
5ba3f43e
A
456{
457 uint64_t t64;
458
459 *result = (t64 = nanosecs / NSEC_PER_SEC) * rtclock_sec_divisor;
460 nanosecs -= (t64 * NSEC_PER_SEC);
461 *result += (nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
462}
463
464void
465nanotime_to_absolutetime(clock_sec_t secs,
0a7de745
A
466 clock_nsec_t nanosecs,
467 uint64_t * result)
5ba3f43e
A
468{
469 *result = ((uint64_t) secs * rtclock_sec_divisor) +
0a7de745 470 ((uint64_t) nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
5ba3f43e
A
471}
472
473void
474clock_interval_to_absolutetime_interval(uint32_t interval,
0a7de745
A
475 uint32_t scale_factor,
476 uint64_t * result)
5ba3f43e
A
477{
478 uint64_t nanosecs = (uint64_t) interval * scale_factor;
479 uint64_t t64;
480
481 *result = (t64 = nanosecs / NSEC_PER_SEC) * rtclock_sec_divisor;
482 nanosecs -= (t64 * NSEC_PER_SEC);
483 *result += (nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
484}
485
486void
487machine_delay_until(uint64_t interval,
0a7de745 488 uint64_t deadline)
5ba3f43e
A
489{
490#pragma unused(interval)
491 uint64_t now;
492
493 do {
0a7de745 494#if __ARM_ENABLE_WFE_
f427ee49 495 __builtin_arm_wfe();
5ba3f43e
A
496#endif /* __ARM_ENABLE_WFE_ */
497
498 now = mach_absolute_time();
499 } while (now < deadline);
500}