]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/rtclock.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm / rtclock.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34 /*
35 * File: arm/rtclock.c
36 * Purpose: Routines for handling the machine dependent
37 * real-time clock.
38 */
39
40 #include <mach/mach_types.h>
41
42 #include <kern/clock.h>
43 #include <kern/thread.h>
44 #include <kern/macro_help.h>
45 #include <kern/spl.h>
46 #include <kern/timer_queue.h>
47
48 #include <kern/host_notify.h>
49
50 #include <machine/commpage.h>
51 #include <machine/machine_routines.h>
52 #include <arm/exception.h>
53 #include <arm/cpu_data_internal.h>
54 #if __arm64__
55 #include <arm64/proc_reg.h>
56 #elif __arm__
57 #include <arm/proc_reg.h>
58 #else
59 #error Unsupported arch
60 #endif
61 #include <arm/rtclock.h>
62
63 #include <IOKit/IOPlatformExpert.h>
64 #include <libkern/OSAtomic.h>
65
66 #include <sys/kdebug.h>
67
68 #define MAX_TIMEBASE_TRIES 10
69
70 int rtclock_init(void);
71
72 static int
73 deadline_to_decrementer(uint64_t deadline,
74 uint64_t now);
75 static void
76 timebase_callback(struct timebase_freq_t * freq);
77
78 #if DEVELOPMENT || DEBUG
79 uint32_t absolute_time_validation = 1;
80 #endif
81
82 /*
83 * Configure the real-time clock device at boot
84 */
85 void
86 rtclock_early_init(void)
87 {
88 PE_register_timebase_callback(timebase_callback);
89 #if DEVELOPMENT || DEBUG
90 uint32_t tmp_mv = 1;
91 if (kern_feature_override(KF_MATV_OVRD)) {
92 absolute_time_validation = 0;
93 }
94 if (PE_parse_boot_argn("timebase_validation", &tmp_mv, sizeof(tmp_mv))) {
95 if (tmp_mv == 0) {
96 absolute_time_validation = 0;
97 }
98 }
99 #endif
100 }
101
102 static void
103 timebase_callback(struct timebase_freq_t * freq)
104 {
105 unsigned long numer, denom;
106 uint64_t t64_1, t64_2;
107 uint32_t divisor;
108
109 if (freq->timebase_den < 1 || freq->timebase_den > 4 ||
110 freq->timebase_num < freq->timebase_den)
111 panic("rtclock timebase_callback: invalid constant %ld / %ld",
112 freq->timebase_num, freq->timebase_den);
113
114 denom = freq->timebase_num;
115 numer = freq->timebase_den * NSEC_PER_SEC;
116 // reduce by the greatest common denominator to minimize overflow
117 if (numer > denom) {
118 t64_1 = numer;
119 t64_2 = denom;
120 } else {
121 t64_1 = denom;
122 t64_2 = numer;
123 }
124 while (t64_2 != 0) {
125 uint64_t temp = t64_2;
126 t64_2 = t64_1 % t64_2;
127 t64_1 = temp;
128 }
129 numer /= t64_1;
130 denom /= t64_1;
131
132 rtclock_timebase_const.numer = (uint32_t)numer;
133 rtclock_timebase_const.denom = (uint32_t)denom;
134 divisor = (uint32_t)(freq->timebase_num / freq->timebase_den);
135
136 rtclock_sec_divisor = divisor;
137 rtclock_usec_divisor = divisor / USEC_PER_SEC;
138 }
139
140 /*
141 * Initialize the system clock device for the current cpu
142 */
143 int
144 rtclock_init(void)
145 {
146 uint64_t abstime;
147 cpu_data_t * cdp;
148
149 clock_timebase_init();
150 ml_init_lock_timeout();
151
152 cdp = getCpuDatap();
153
154 abstime = mach_absolute_time();
155 cdp->rtcPop = EndOfAllTime; /* Init Pop time */
156 timer_resync_deadlines(); /* Start the timers going */
157
158 return (1);
159 }
160
161 uint64_t
162 mach_absolute_time(void)
163 {
164 #if DEVELOPMENT || DEBUG
165 if (__improbable(absolute_time_validation == 1)) {
166 static volatile uint64_t s_last_absolute_time = 0;
167 uint64_t new_absolute_time, old_absolute_time;
168 int attempts = 0;
169
170 /* ARM 64: We need a dsb here to ensure that the load of s_last_absolute_time
171 * completes before the timebase read. Were the load to complete after the
172 * timebase read, there would be a window for another CPU to update
173 * s_last_absolute_time and leave us in an inconsistent state. Consider the
174 * following interleaving:
175 *
176 * Let s_last_absolute_time = t0
177 * CPU0: Read timebase at t1
178 * CPU1: Read timebase at t2
179 * CPU1: Update s_last_absolute_time to t2
180 * CPU0: Load completes
181 * CPU0: Update s_last_absolute_time to t1
182 *
183 * This would cause the assertion to fail even though time did not go
184 * backwards. Thus, we use a dsb to guarantee completion of the load before
185 * the timebase read.
186 */
187 do {
188 attempts++;
189 old_absolute_time = s_last_absolute_time;
190
191 #if __arm64__
192 __asm__ volatile("dsb ld" ::: "memory");
193 #else
194 OSSynchronizeIO(); // See osfmk/arm64/rtclock.c
195 #endif
196
197 new_absolute_time = ml_get_timebase();
198 } while (attempts < MAX_TIMEBASE_TRIES && !OSCompareAndSwap64(old_absolute_time, new_absolute_time, &s_last_absolute_time));
199
200 if (attempts < MAX_TIMEBASE_TRIES && old_absolute_time > new_absolute_time) {
201 panic("mach_absolute_time returning non-monotonically increasing value 0x%llx (old value 0x%llx\n)\n",
202 new_absolute_time, old_absolute_time);
203 }
204 return new_absolute_time;
205 } else {
206 return ml_get_timebase();
207 }
208 #else
209 return ml_get_timebase();
210 #endif
211 }
212
213 uint64_t
214 mach_approximate_time(void)
215 {
216 #if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__ || __arm64__
217 /* Hardware supports a fast timestamp, so grab it without asserting monotonicity */
218 return ml_get_timebase();
219 #else
220 processor_t processor;
221 uint64_t approx_time;
222
223 disable_preemption();
224 processor = current_processor();
225 approx_time = processor->last_dispatch;
226 enable_preemption();
227
228 return approx_time;
229 #endif
230 }
231
232 void
233 clock_get_system_microtime(clock_sec_t * secs,
234 clock_usec_t * microsecs)
235 {
236 absolutetime_to_microtime(mach_absolute_time(), secs, microsecs);
237 }
238
239 void
240 clock_get_system_nanotime(clock_sec_t * secs,
241 clock_nsec_t * nanosecs)
242 {
243 uint64_t abstime;
244 uint64_t t64;
245
246 abstime = mach_absolute_time();
247 *secs = (t64 = abstime / rtclock_sec_divisor);
248 abstime -= (t64 * rtclock_sec_divisor);
249
250 *nanosecs = (clock_nsec_t)((abstime * NSEC_PER_SEC) / rtclock_sec_divisor);
251 }
252
253 void
254 clock_gettimeofday_set_commpage(uint64_t abstime,
255 uint64_t sec,
256 uint64_t frac,
257 uint64_t scale,
258 uint64_t tick_per_sec)
259 {
260 commpage_set_timestamp(abstime, sec, frac, scale, tick_per_sec);
261 }
262
263 void
264 clock_timebase_info(mach_timebase_info_t info)
265 {
266 *info = rtclock_timebase_const;
267 }
268
269 /*
270 * Real-time clock device interrupt.
271 */
272 void
273 rtclock_intr(__unused unsigned int is_user_context)
274 {
275 uint64_t abstime;
276 cpu_data_t * cdp;
277 struct arm_saved_state * regs;
278 unsigned int user_mode;
279 uintptr_t pc;
280
281 cdp = getCpuDatap();
282
283 cdp->cpu_stat.timer_cnt++;
284 cdp->cpu_stat.timer_cnt_wake++;
285 SCHED_STATS_TIMER_POP(current_processor());
286
287 assert(!ml_get_interrupts_enabled());
288
289 abstime = mach_absolute_time();
290
291 if (cdp->cpu_idle_pop != 0x0ULL) {
292 if (( cdp->rtcPop-abstime) < cdp->cpu_idle_latency) {
293 cdp->cpu_idle_pop = 0x0ULL;
294 while (abstime < cdp->rtcPop)
295 abstime = mach_absolute_time();
296 } else {
297 ClearIdlePop(FALSE);
298 }
299 }
300
301 if ((regs = cdp->cpu_int_state)) {
302 pc = get_saved_state_pc(regs);
303
304 #if __arm64__
305 user_mode = PSR64_IS_USER(get_saved_state_cpsr(regs));
306 #else
307 user_mode = (regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE;
308 #endif
309 } else {
310 pc = 0;
311 user_mode = 0;
312 }
313 if (abstime >= cdp->rtcPop) {
314 /* Log the interrupt service latency (-ve value expected by tool) */
315 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
316 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
317 -(abstime - cdp->rtcPop),
318 user_mode ? pc : VM_KERNEL_UNSLIDE(pc), user_mode, 0, 0);
319 }
320
321 /* call the generic etimer */
322 timer_intr(user_mode, pc);
323 }
324
325 static int
326 deadline_to_decrementer(uint64_t deadline,
327 uint64_t now)
328 {
329 uint64_t delt;
330
331 if (deadline <= now)
332 return DECREMENTER_MIN;
333 else {
334 delt = deadline - now;
335
336 return (delt >= (DECREMENTER_MAX + 1)) ? DECREMENTER_MAX : ((delt >= (DECREMENTER_MIN + 1)) ? (int)delt : DECREMENTER_MIN);
337 }
338 }
339
340 /*
341 * Request a decrementer pop
342 */
343 int
344 setPop(uint64_t time)
345 {
346 int delay_time;
347 uint64_t current_time;
348 cpu_data_t * cdp;
349
350 cdp = getCpuDatap();
351 current_time = mach_absolute_time();
352
353 delay_time = deadline_to_decrementer(time, current_time);
354 cdp->rtcPop = delay_time + current_time;
355
356 ml_set_decrementer((uint32_t) delay_time);
357
358 return (delay_time);
359 }
360
361 /*
362 * Request decrementer Idle Pop. Return true if set
363 */
364 boolean_t
365 SetIdlePop(void)
366 {
367 int delay_time;
368 uint64_t time;
369 uint64_t current_time;
370 cpu_data_t * cdp;
371
372 cdp = getCpuDatap();
373 current_time = mach_absolute_time();
374
375 if (((cdp->rtcPop < current_time) ||
376 (cdp->rtcPop - current_time) < cdp->cpu_idle_latency))
377 return FALSE;
378
379 time = cdp->rtcPop - cdp->cpu_idle_latency;
380
381 delay_time = deadline_to_decrementer(time, current_time);
382 cdp->cpu_idle_pop = delay_time + current_time;
383 ml_set_decrementer((uint32_t) delay_time);
384
385 return TRUE;
386 }
387
388 /*
389 * Clear decrementer Idle Pop
390 */
391 void
392 ClearIdlePop(
393 boolean_t wfi)
394 {
395 #if !__arm64__
396 #pragma unused(wfi)
397 #endif
398 cpu_data_t * cdp;
399
400 cdp = getCpuDatap();
401 cdp->cpu_idle_pop = 0x0ULL;
402
403 #if __arm64__
404 /*
405 * Don't update the HW timer if there's a pending
406 * interrupt (we can lose interrupt assertion);
407 * we want to take the interrupt right now and update
408 * the deadline from the handler).
409 *
410 * ARM64_TODO: consider this more carefully.
411 */
412 if (!(wfi && ml_get_timer_pending()))
413 #endif
414 {
415 setPop(cdp->rtcPop);
416 }
417 }
418
419 void
420 absolutetime_to_microtime(uint64_t abstime,
421 clock_sec_t * secs,
422 clock_usec_t * microsecs)
423 {
424 uint64_t t64;
425
426 *secs = t64 = abstime / rtclock_sec_divisor;
427 abstime -= (t64 * rtclock_sec_divisor);
428
429 *microsecs = (uint32_t)(abstime / rtclock_usec_divisor);
430 }
431
432 void
433 absolutetime_to_nanoseconds(uint64_t abstime,
434 uint64_t * result)
435 {
436 uint64_t t64;
437
438 *result = (t64 = abstime / rtclock_sec_divisor) * NSEC_PER_SEC;
439 abstime -= (t64 * rtclock_sec_divisor);
440 *result += (abstime * NSEC_PER_SEC) / rtclock_sec_divisor;
441 }
442
443 void
444 nanoseconds_to_absolutetime(uint64_t nanosecs,
445 uint64_t * result)
446 {
447 uint64_t t64;
448
449 *result = (t64 = nanosecs / NSEC_PER_SEC) * rtclock_sec_divisor;
450 nanosecs -= (t64 * NSEC_PER_SEC);
451 *result += (nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
452 }
453
454 void
455 nanotime_to_absolutetime(clock_sec_t secs,
456 clock_nsec_t nanosecs,
457 uint64_t * result)
458 {
459 *result = ((uint64_t) secs * rtclock_sec_divisor) +
460 ((uint64_t) nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
461 }
462
463 void
464 clock_interval_to_absolutetime_interval(uint32_t interval,
465 uint32_t scale_factor,
466 uint64_t * result)
467 {
468 uint64_t nanosecs = (uint64_t) interval * scale_factor;
469 uint64_t t64;
470
471 *result = (t64 = nanosecs / NSEC_PER_SEC) * rtclock_sec_divisor;
472 nanosecs -= (t64 * NSEC_PER_SEC);
473 *result += (nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC;
474 }
475
476 void
477 machine_delay_until(uint64_t interval,
478 uint64_t deadline)
479 {
480 #pragma unused(interval)
481 uint64_t now;
482
483 do {
484 #if __ARM_ENABLE_WFE_
485 #if __arm64__
486 if (arm64_wfe_allowed())
487 #endif /* __arm64__ */
488 {
489 __builtin_arm_wfe();
490 }
491 #endif /* __ARM_ENABLE_WFE_ */
492
493 now = mach_absolute_time();
494 } while (now < deadline);
495 }