2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * @APPLE_FREE_COPYRIGHT@
36 * Purpose: Routines for handling the machine dependent
40 #include <mach/mach_types.h>
42 #include <kern/clock.h>
43 #include <kern/thread.h>
44 #include <kern/macro_help.h>
46 #include <kern/timer_queue.h>
48 #include <kern/host_notify.h>
50 #include <machine/commpage.h>
51 #include <machine/machine_routines.h>
52 #include <machine/config.h>
53 #include <arm/exception.h>
54 #include <arm/cpu_data_internal.h>
56 #include <arm64/proc_reg.h>
58 #include <arm/proc_reg.h>
60 #error Unsupported arch
62 #include <arm/rtclock.h>
64 #include <IOKit/IOPlatformExpert.h>
65 #include <libkern/OSAtomic.h>
67 #include <sys/kdebug.h>
69 #define MAX_TIMEBASE_TRIES 10
71 int rtclock_init(void);
74 deadline_to_decrementer(uint64_t deadline
,
77 timebase_callback(struct timebase_freq_t
* freq
);
79 #if DEVELOPMENT || DEBUG
80 uint32_t absolute_time_validation
= 0;
84 * Configure the real-time clock device at boot
87 rtclock_early_init(void)
89 PE_register_timebase_callback(timebase_callback
);
90 #if DEVELOPMENT || DEBUG
93 #if defined(APPLE_ARM64_ARCH_FAMILY)
94 /* Enable MAT validation on A0 hardware by default. */
95 absolute_time_validation
= ml_get_topology_info()->chip_revision
== CPU_VERSION_A0
;
98 if (kern_feature_override(KF_MATV_OVRD
)) {
99 absolute_time_validation
= 0;
101 if (PE_parse_boot_argn("timebase_validation", &tmp_mv
, sizeof(tmp_mv
))) {
102 absolute_time_validation
= tmp_mv
;
108 timebase_callback(struct timebase_freq_t
* freq
)
110 unsigned long numer
, denom
;
111 uint64_t t64_1
, t64_2
;
114 if (freq
->timebase_den
< 1 || freq
->timebase_den
> 4 ||
115 freq
->timebase_num
< freq
->timebase_den
) {
116 panic("rtclock timebase_callback: invalid constant %ld / %ld",
117 freq
->timebase_num
, freq
->timebase_den
);
120 denom
= freq
->timebase_num
;
121 numer
= freq
->timebase_den
* NSEC_PER_SEC
;
122 // reduce by the greatest common denominator to minimize overflow
131 uint64_t temp
= t64_2
;
132 t64_2
= t64_1
% t64_2
;
138 rtclock_timebase_const
.numer
= (uint32_t)numer
;
139 rtclock_timebase_const
.denom
= (uint32_t)denom
;
140 divisor
= (uint32_t)(freq
->timebase_num
/ freq
->timebase_den
);
142 rtclock_sec_divisor
= divisor
;
143 rtclock_usec_divisor
= divisor
/ USEC_PER_SEC
;
147 * Initialize the system clock device for the current cpu
155 clock_timebase_init();
157 if (cpu_number() == master_cpu
) {
158 ml_init_lock_timeout();
163 abstime
= mach_absolute_time();
164 cdp
->rtcPop
= EndOfAllTime
; /* Init Pop time */
165 timer_resync_deadlines(); /* Start the timers going */
171 mach_absolute_time(void)
173 #if DEVELOPMENT || DEBUG
174 if (__improbable(absolute_time_validation
== 1)) {
175 static volatile uint64_t s_last_absolute_time
= 0;
176 uint64_t new_absolute_time
, old_absolute_time
;
179 /* ARM 64: We need a dsb here to ensure that the load of s_last_absolute_time
180 * completes before the timebase read. Were the load to complete after the
181 * timebase read, there would be a window for another CPU to update
182 * s_last_absolute_time and leave us in an inconsistent state. Consider the
183 * following interleaving:
185 * Let s_last_absolute_time = t0
186 * CPU0: Read timebase at t1
187 * CPU1: Read timebase at t2
188 * CPU1: Update s_last_absolute_time to t2
189 * CPU0: Load completes
190 * CPU0: Update s_last_absolute_time to t1
192 * This would cause the assertion to fail even though time did not go
193 * backwards. Thus, we use a dsb to guarantee completion of the load before
198 old_absolute_time
= s_last_absolute_time
;
201 __asm__
volatile ("dsb ld" ::: "memory");
203 OSSynchronizeIO(); // See osfmk/arm64/rtclock.c
206 new_absolute_time
= ml_get_timebase();
207 } while (attempts
< MAX_TIMEBASE_TRIES
&& !OSCompareAndSwap64(old_absolute_time
, new_absolute_time
, &s_last_absolute_time
));
209 if (attempts
< MAX_TIMEBASE_TRIES
&& old_absolute_time
> new_absolute_time
) {
210 panic("mach_absolute_time returning non-monotonically increasing value 0x%llx (old value 0x%llx\n)\n",
211 new_absolute_time
, old_absolute_time
);
213 return new_absolute_time
;
215 return ml_get_timebase();
218 return ml_get_timebase();
223 mach_approximate_time(void)
225 #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ || __arm64__
226 /* Hardware supports a fast timestamp, so grab it without asserting monotonicity */
227 return ml_get_timebase();
229 processor_t processor
;
230 uint64_t approx_time
;
232 disable_preemption();
233 processor
= current_processor();
234 approx_time
= processor
->last_dispatch
;
242 clock_get_system_microtime(clock_sec_t
* secs
,
243 clock_usec_t
* microsecs
)
245 absolutetime_to_microtime(mach_absolute_time(), secs
, microsecs
);
249 clock_get_system_nanotime(clock_sec_t
* secs
,
250 clock_nsec_t
* nanosecs
)
255 abstime
= mach_absolute_time();
256 *secs
= (t64
= abstime
/ rtclock_sec_divisor
);
257 abstime
-= (t64
* rtclock_sec_divisor
);
259 *nanosecs
= (clock_nsec_t
)((abstime
* NSEC_PER_SEC
) / rtclock_sec_divisor
);
263 clock_gettimeofday_set_commpage(uint64_t abstime
,
267 uint64_t tick_per_sec
)
269 commpage_set_timestamp(abstime
, sec
, frac
, scale
, tick_per_sec
);
273 clock_timebase_info(mach_timebase_info_t info
)
275 *info
= rtclock_timebase_const
;
279 * Real-time clock device interrupt.
282 rtclock_intr(__unused
unsigned int is_user_context
)
286 struct arm_saved_state
* regs
;
287 unsigned int user_mode
;
292 cdp
->cpu_stat
.timer_cnt
++;
293 SCHED_STATS_INC(timer_pop_count
);
295 assert(!ml_get_interrupts_enabled());
297 abstime
= mach_absolute_time();
299 if (cdp
->cpu_idle_pop
!= 0x0ULL
) {
300 if ((cdp
->rtcPop
- abstime
) < cdp
->cpu_idle_latency
) {
301 cdp
->cpu_idle_pop
= 0x0ULL
;
302 while (abstime
< cdp
->rtcPop
) {
303 abstime
= mach_absolute_time();
310 if ((regs
= cdp
->cpu_int_state
)) {
311 pc
= get_saved_state_pc(regs
);
314 user_mode
= PSR64_IS_USER(get_saved_state_cpsr(regs
));
316 user_mode
= (regs
->cpsr
& PSR_MODE_MASK
) == PSR_USER_MODE
;
322 if (abstime
>= cdp
->rtcPop
) {
323 /* Log the interrupt service latency (-ve value expected by tool) */
324 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
325 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 0) | DBG_FUNC_NONE
,
326 -(abstime
- cdp
->rtcPop
),
327 user_mode
? pc
: VM_KERNEL_UNSLIDE(pc
), user_mode
, 0, 0);
330 /* call the generic etimer */
331 timer_intr(user_mode
, pc
);
335 deadline_to_decrementer(uint64_t deadline
,
340 if (deadline
<= now
) {
341 return DECREMENTER_MIN
;
343 delt
= deadline
- now
;
345 return (delt
>= (DECREMENTER_MAX
+ 1)) ? DECREMENTER_MAX
: ((delt
>= (DECREMENTER_MIN
+ 1)) ? (int)delt
: DECREMENTER_MIN
);
350 * Request a decrementer pop
353 setPop(uint64_t time
)
356 uint64_t current_time
;
360 current_time
= mach_absolute_time();
362 delay_time
= deadline_to_decrementer(time
, current_time
);
363 cdp
->rtcPop
= delay_time
+ current_time
;
365 ml_set_decrementer((uint32_t) delay_time
);
371 * Request decrementer Idle Pop. Return true if set
378 uint64_t current_time
;
382 current_time
= mach_absolute_time();
384 if (((cdp
->rtcPop
< current_time
) ||
385 (cdp
->rtcPop
- current_time
) < cdp
->cpu_idle_latency
)) {
389 time
= cdp
->rtcPop
- cdp
->cpu_idle_latency
;
391 delay_time
= deadline_to_decrementer(time
, current_time
);
392 cdp
->cpu_idle_pop
= delay_time
+ current_time
;
393 ml_set_decrementer((uint32_t) delay_time
);
399 * Clear decrementer Idle Pop
411 cdp
->cpu_idle_pop
= 0x0ULL
;
415 * Don't update the HW timer if there's a pending
416 * interrupt (we can lose interrupt assertion);
417 * we want to take the interrupt right now and update
418 * the deadline from the handler).
420 * ARM64_TODO: consider this more carefully.
422 if (!(wfi
&& ml_get_timer_pending()))
430 absolutetime_to_microtime(uint64_t abstime
,
432 clock_usec_t
* microsecs
)
436 *secs
= t64
= abstime
/ rtclock_sec_divisor
;
437 abstime
-= (t64
* rtclock_sec_divisor
);
439 *microsecs
= (uint32_t)(abstime
/ rtclock_usec_divisor
);
443 absolutetime_to_nanoseconds(uint64_t abstime
,
448 *result
= (t64
= abstime
/ rtclock_sec_divisor
) * NSEC_PER_SEC
;
449 abstime
-= (t64
* rtclock_sec_divisor
);
450 *result
+= (abstime
* NSEC_PER_SEC
) / rtclock_sec_divisor
;
454 nanoseconds_to_absolutetime(uint64_t nanosecs
,
459 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) * rtclock_sec_divisor
;
460 nanosecs
-= (t64
* NSEC_PER_SEC
);
461 *result
+= (nanosecs
* rtclock_sec_divisor
) / NSEC_PER_SEC
;
465 nanotime_to_absolutetime(clock_sec_t secs
,
466 clock_nsec_t nanosecs
,
469 *result
= ((uint64_t) secs
* rtclock_sec_divisor
) +
470 ((uint64_t) nanosecs
* rtclock_sec_divisor
) / NSEC_PER_SEC
;
474 clock_interval_to_absolutetime_interval(uint32_t interval
,
475 uint32_t scale_factor
,
478 uint64_t nanosecs
= (uint64_t) interval
* scale_factor
;
481 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) * rtclock_sec_divisor
;
482 nanosecs
-= (t64
* NSEC_PER_SEC
);
483 *result
+= (nanosecs
* rtclock_sec_divisor
) / NSEC_PER_SEC
;
487 machine_delay_until(uint64_t interval
,
490 #pragma unused(interval)
494 #if __ARM_ENABLE_WFE_
496 #endif /* __ARM_ENABLE_WFE_ */
498 now
= mach_absolute_time();
499 } while (now
< deadline
);