]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/rtclock.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / arm / rtclock.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34 /*
35 * File: arm/rtclock.c
36 * Purpose: Routines for handling the machine dependent
37 * real-time clock.
38 */
39
40 #include <mach/mach_types.h>
41
42 #include <kern/clock.h>
43 #include <kern/thread.h>
44 #include <kern/macro_help.h>
45 #include <kern/spl.h>
46 #include <kern/timer_queue.h>
47
48 #include <kern/host_notify.h>
49
50 #include <machine/commpage.h>
51 #include <machine/machine_routines.h>
52 #include <arm/exception.h>
53 #include <arm/cpu_data_internal.h>
54 #if __arm64__
55 #include <arm64/proc_reg.h>
56 #elif __arm__
57 #include <arm/proc_reg.h>
58 #else
59 #error Unsupported arch
60 #endif
61 #include <arm/rtclock.h>
62
63 #include <IOKit/IOPlatformExpert.h>
64 #include <libkern/OSAtomic.h>
65
66 #include <sys/kdebug.h>
67
68 #define MAX_TIMEBASE_TRIES 10
69
70 int rtclock_init(void);
71
72 static int
73 deadline_to_decrementer(uint64_t deadline,
74 uint64_t now);
75 static void
76 timebase_callback(struct timebase_freq_t * freq);
77
78 #if DEVELOPMENT || DEBUG
79 uint32_t absolute_time_validation = 0;
80 #endif
81
82 /*
83 * Configure the real-time clock device at boot
84 */
85 void
86 rtclock_early_init(void)
87 {
88 PE_register_timebase_callback(timebase_callback);
89 #if DEVELOPMENT || DEBUG
90 uint32_t tmp_mv = 1;
91
92 /* Enable MAT validation on A0 hardware by default. */
93 absolute_time_validation = (get_arm_cpu_version() == 0x00);
94
95 if (kern_feature_override(KF_MATV_OVRD)) {
96 absolute_time_validation = 0;
97 }
98 if (PE_parse_boot_argn("timebase_validation", &tmp_mv, sizeof(tmp_mv))) {
99 absolute_time_validation = tmp_mv;
100 }
101 #endif
102 }
103
104 static void
105 timebase_callback(struct timebase_freq_t * freq)
106 {
107 unsigned long numer, denom;
108 uint64_t t64_1, t64_2;
109 uint32_t divisor;
110
111 if (freq->timebase_den < 1 || freq->timebase_den > 4 ||
112 freq->timebase_num < freq->timebase_den) {
113 panic("rtclock timebase_callback: invalid constant %ld / %ld",
114 freq->timebase_num, freq->timebase_den);
115 }
116
117 denom = freq->timebase_num;
118 numer = freq->timebase_den * NSEC_PER_SEC;
119 // reduce by the greatest common denominator to minimize overflow
120 if (numer > denom) {
121 t64_1 = numer;
122 t64_2 = denom;
123 } else {
124 t64_1 = denom;
125 t64_2 = numer;
126 }
127 while (t64_2 != 0) {
128 uint64_t temp = t64_2;
129 t64_2 = t64_1 % t64_2;
130 t64_1 = temp;
131 }
132 numer /= t64_1;
133 denom /= t64_1;
134
135 rtclock_timebase_const.numer = (uint32_t)numer;
136 rtclock_timebase_const.denom = (uint32_t)denom;
137 divisor = (uint32_t)(freq->timebase_num / freq->timebase_den);
138
139 rtclock_sec_divisor = divisor;
140 rtclock_usec_divisor = divisor / USEC_PER_SEC;
141 }
142
143 /*
144 * Initialize the system clock device for the current cpu
145 */
146 int
147 rtclock_init(void)
148 {
149 uint64_t abstime;
150 cpu_data_t * cdp;
151
152 clock_timebase_init();
153 ml_init_lock_timeout();
154
155 cdp = getCpuDatap();
156
157 abstime = mach_absolute_time();
158 cdp->rtcPop = EndOfAllTime; /* Init Pop time */
159 timer_resync_deadlines(); /* Start the timers going */
160
161 return 1;
162 }
163
164 uint64_t
165 mach_absolute_time(void)
166 {
167 #if DEVELOPMENT || DEBUG
168 if (__improbable(absolute_time_validation == 1)) {
169 static volatile uint64_t s_last_absolute_time = 0;
170 uint64_t new_absolute_time, old_absolute_time;
171 int attempts = 0;
172
173 /* ARM 64: We need a dsb here to ensure that the load of s_last_absolute_time
174 * completes before the timebase read. Were the load to complete after the
175 * timebase read, there would be a window for another CPU to update
176 * s_last_absolute_time and leave us in an inconsistent state. Consider the
177 * following interleaving:
178 *
179 * Let s_last_absolute_time = t0
180 * CPU0: Read timebase at t1
181 * CPU1: Read timebase at t2
182 * CPU1: Update s_last_absolute_time to t2
183 * CPU0: Load completes
184 * CPU0: Update s_last_absolute_time to t1
185 *
186 * This would cause the assertion to fail even though time did not go
187 * backwards. Thus, we use a dsb to guarantee completion of the load before
188 * the timebase read.
189 */
190 do {
191 attempts++;
192 old_absolute_time = s_last_absolute_time;
193
194 #if __arm64__
195 __asm__ volatile ("dsb ld" ::: "memory");
196 #else
197 OSSynchronizeIO(); // See osfmk/arm64/rtclock.c
198 #endif
199
200 new_absolute_time = ml_get_timebase();
201 } while (attempts < MAX_TIMEBASE_TRIES && !OSCompareAndSwap64(old_absolute_time, new_absolute_time, &s_last_absolute_time));
202
203 if (attempts < MAX_TIMEBASE_TRIES && old_absolute_time > new_absolute_time) {
204 panic("mach_absolute_time returning non-monotonically increasing value 0x%llx (old value 0x%llx\n)\n",
205 new_absolute_time, old_absolute_time);
206 }
207 return new_absolute_time;
208 } else {
209 return ml_get_timebase();
210 }
211 #else
212 return ml_get_timebase();
213 #endif
214 }
215
216 uint64_t
217 mach_approximate_time(void)
218 {
219 #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ || __arm64__
220 /* Hardware supports a fast timestamp, so grab it without asserting monotonicity */
221 return ml_get_timebase();
222 #else
223 processor_t processor;
224 uint64_t approx_time;
225
226 disable_preemption();
227 processor = current_processor();
228 approx_time = processor->last_dispatch;
229 enable_preemption();
230
231 return approx_time;
232 #endif
233 }
234
235 void
236 clock_get_system_microtime(clock_sec_t * secs,
237 clock_usec_t * microsecs)
238 {
239 absolutetime_to_microtime(mach_absolute_time(), secs, microsecs);
240 }
241
242 void
243 clock_get_system_nanotime(clock_sec_t * secs,
244 clock_nsec_t * nanosecs)
245 {
246 uint64_t abstime;
247 uint64_t t64;
248
249 abstime = mach_absolute_time();
250 *secs = (t64 = abstime / rtclock_sec_divisor);
251 abstime -= (t64 * rtclock_sec_divisor);
252
253 *nanosecs = (clock_nsec_t)((abstime * NSEC_PER_SEC) / rtclock_sec_divisor);
254 }
255
256 void
257 clock_gettimeofday_set_commpage(uint64_t abstime,
258 uint64_t sec,
259 uint64_t frac,
260 uint64_t scale,
261 uint64_t tick_per_sec)
262 {
263 commpage_set_timestamp(abstime, sec, frac, scale, tick_per_sec);
264 }
265
266 void
267 clock_timebase_info(mach_timebase_info_t info)
268 {
269 *info = rtclock_timebase_const;
270 }
271
272 /*
273 * Real-time clock device interrupt.
274 */
275 void
276 rtclock_intr(__unused unsigned int is_user_context)
277 {
278 uint64_t abstime;
279 cpu_data_t * cdp;
280 struct arm_saved_state * regs;
281 unsigned int user_mode;
282 uintptr_t pc;
283
284 cdp = getCpuDatap();
285
286 cdp->cpu_stat.timer_cnt++;
287 cdp->cpu_stat.timer_cnt_wake++;
288 SCHED_STATS_TIMER_POP(current_processor());
289
290 assert(!ml_get_interrupts_enabled());
291
292 abstime = mach_absolute_time();
293
294 if (cdp->cpu_idle_pop != 0x0ULL) {
295 if ((cdp->rtcPop - abstime) < cdp->cpu_idle_latency) {
296 cdp->cpu_idle_pop = 0x0ULL;
297 while (abstime < cdp->rtcPop) {
298 abstime = mach_absolute_time();
299 }
300 } else {
301 ClearIdlePop(FALSE);
302 }
303 }
304
305 if ((regs = cdp->cpu_int_state)) {
306 pc = get_saved_state_pc(regs);
307
308 #if __arm64__
309 user_mode = PSR64_IS_USER(get_saved_state_cpsr(regs));
310 #else
311 user_mode = (regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE;
312 #endif
313 } else {
314 pc = 0;
315 user_mode = 0;
316 }
317 if (abstime >= cdp->rtcPop) {
318 /* Log the interrupt service latency (-ve value expected by tool) */
319 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
320 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
321 -(abstime - cdp->rtcPop),
322 user_mode ? pc : VM_KERNEL_UNSLIDE(pc), user_mode, 0, 0);
323 }
324
325 /* call the generic etimer */
326 timer_intr(user_mode, pc);
327 }
328
329 static int
330 deadline_to_decrementer(uint64_t deadline,
331 uint64_t now)
332 {
333 uint64_t delt;
334
335 if (deadline <= now) {
336 return DECREMENTER_MIN;
337 } else {
338 delt = deadline - now;
339
340 return (delt >= (DECREMENTER_MAX + 1)) ? DECREMENTER_MAX : ((delt >= (DECREMENTER_MIN + 1)) ? (int)delt : DECREMENTER_MIN);
341 }
342 }
343
344 /*
345 * Request a decrementer pop
346 */
347 int
348 setPop(uint64_t time)
349 {
350 int delay_time;
351 uint64_t current_time;
352 cpu_data_t * cdp;
353
354 cdp = getCpuDatap();
355 current_time = mach_absolute_time();
356
357 delay_time = deadline_to_decrementer(time, current_time);
358 cdp->rtcPop = delay_time + current_time;
359
360 ml_set_decrementer((uint32_t) delay_time);
361
362 return delay_time;
363 }
364
365 /*
366 * Request decrementer Idle Pop. Return true if set
367 */
368 boolean_t
369 SetIdlePop(void)
370 {
371 int delay_time;
372 uint64_t time;
373 uint64_t current_time;
374 cpu_data_t * cdp;
375
376 cdp = getCpuDatap();
377 current_time = mach_absolute_time();
378
379 if (((cdp->rtcPop < current_time) ||
380 (cdp->rtcPop - current_time) < cdp->cpu_idle_latency)) {
381 return FALSE;
382 }
383
384 time = cdp->rtcPop - cdp->cpu_idle_latency;
385
386 delay_time = deadline_to_decrementer(time, current_time);
387 cdp->cpu_idle_pop = delay_time + current_time;
388 ml_set_decrementer((uint32_t) delay_time);
389
390 return TRUE;
391 }
392
393 /*
394 * Clear decrementer Idle Pop
395 */
396 void
397 ClearIdlePop(
398 boolean_t wfi)
399 {
400 #if !__arm64__
401 #pragma unused(wfi)
402 #endif
403 cpu_data_t * cdp;
404
405 cdp = getCpuDatap();
406 cdp->cpu_idle_pop = 0x0ULL;
407
408 #if __arm64__
409 /*
410 * Don't update the HW timer if there's a pending
411 * interrupt (we can lose interrupt assertion);
412 * we want to take the interrupt right now and update
413 * the deadline from the handler).
414 *
415 * ARM64_TODO: consider this more carefully.
416 */
417 if (!(wfi && ml_get_timer_pending()))
418 #endif
419 {
420 setPop(cdp->rtcPop);
421 }
422 }
423
424 void
425 absolutetime_to_microtime(uint64_t abstime,
426 clock_sec_t * secs,
427 clock_usec_t * microsecs)
428 {
429 uint64_t t64;
430
431 *secs = t64 = abstime / rtclock_sec_divisor;
432 abstime -= (t64 * rtclock_sec_divisor);
433
434 *microsecs = (uint32_t)(abstime / rtclock_usec_divisor);
435 }
436
437 void
438 absolutetime_to_nanoseconds(uint64_t abstime,
439 uint64_t * result)
440 {
441 uint64_t t64;
442
443 *result = (t64 = abstime / rtclock_sec_divisor) * NSEC_PER_SEC;
444 abstime -= (t64 * rtclock_sec_divisor);
445 *result += (abstime * NSEC_PER_SEC) / rtclock_sec_divisor;
446 }
447
448 void
449 nanoseconds_to_absolutetime(uint64_t nanosecs,
450 uint64_t * result)
451 {
452 uint64_t t64;
453
454 *result = (t64 = nanosecs / NSEC_PER_SEC) * rtclock_sec_divisor;
455 nanosecs -= (t64 * NSEC_PER_SEC);
456 *result += (nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
457 }
458
459 void
460 nanotime_to_absolutetime(clock_sec_t secs,
461 clock_nsec_t nanosecs,
462 uint64_t * result)
463 {
464 *result = ((uint64_t) secs * rtclock_sec_divisor) +
465 ((uint64_t) nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
466 }
467
468 void
469 clock_interval_to_absolutetime_interval(uint32_t interval,
470 uint32_t scale_factor,
471 uint64_t * result)
472 {
473 uint64_t nanosecs = (uint64_t) interval * scale_factor;
474 uint64_t t64;
475
476 *result = (t64 = nanosecs / NSEC_PER_SEC) * rtclock_sec_divisor;
477 nanosecs -= (t64 * NSEC_PER_SEC);
478 *result += (nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
479 }
480
481 void
482 machine_delay_until(uint64_t interval,
483 uint64_t deadline)
484 {
485 #pragma unused(interval)
486 uint64_t now;
487
488 do {
489 #if __ARM_ENABLE_WFE_
490 #if __arm64__
491 if (arm64_wfe_allowed())
492 #endif /* __arm64__ */
493 {
494 __builtin_arm_wfe();
495 }
496 #endif /* __ARM_ENABLE_WFE_ */
497
498 now = mach_absolute_time();
499 } while (now < deadline);
500 }