2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
43 #include <mach/mach_types.h>
45 #include <kern/cpu_data.h>
46 #include <kern/cpu_number.h>
47 #include <kern/clock.h>
48 #include <kern/host_notify.h>
49 #include <kern/macro_help.h>
50 #include <kern/misc_protos.h>
52 #include <kern/assert.h>
53 #include <kern/timer_queue.h>
54 #include <mach/vm_prot.h>
56 #include <vm/vm_kern.h> /* for kernel_map */
57 #include <architecture/i386/pio.h>
58 #include <i386/machine_cpu.h>
59 #include <i386/cpuid.h>
60 #include <i386/cpu_threads.h>
62 #include <i386/machine_routines.h>
63 #include <i386/pal_routines.h>
64 #include <i386/proc_reg.h>
65 #include <i386/misc_protos.h>
66 #include <pexpert/pexpert.h>
67 #include <machine/limits.h>
68 #include <machine/commpage.h>
69 #include <sys/kdebug.h>
71 #include <i386/rtclock_protos.h>
72 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
74 int rtclock_init(void);
76 uint64_t tsc_rebase_abs_time
= 0;
78 volatile uint64_t gAcpiLastSleepTscBase
= 0;
79 volatile uint64_t gAcpiLastSleepNanoBase
= 0;
80 volatile uint64_t gAcpiLastWakeTscBase
= 0;
81 volatile uint64_t gAcpiLastWakeNanoBase
= 0;
83 static void rtc_set_timescale(uint64_t cycles
);
84 static uint64_t rtc_export_speed(uint64_t cycles
);
90 * Force a complete re-evaluation of timer deadlines.
92 x86_lcpu()->rtcDeadline
= EndOfAllTime
;
93 timer_resync_deadlines();
96 static inline uint32_t
97 _absolutetime_to_microtime(uint64_t abstime
, clock_sec_t
*secs
, clock_usec_t
*microsecs
)
100 *secs
= abstime
/ (uint64_t)NSEC_PER_SEC
;
101 remain
= (uint32_t)(abstime
% (uint64_t)NSEC_PER_SEC
);
102 *microsecs
= remain
/ NSEC_PER_USEC
;
107 _absolutetime_to_nanotime(uint64_t abstime
, clock_sec_t
*secs
, clock_usec_t
*nanosecs
)
109 *secs
= abstime
/ (uint64_t)NSEC_PER_SEC
;
110 *nanosecs
= (clock_usec_t
)(abstime
% (uint64_t)NSEC_PER_SEC
);
114 * Nanotime/mach_absolutime_time
115 * -----------------------------
116 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
117 * efficiently by the kernel and in userspace - is the reference for all timing.
118 * The cpu clock rate is platform-dependent and may stop or be reset when the
119 * processor is napped/slept. As a result, nanotime is the software abstraction
120 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
122 * The kernel maintains nanotime information recording:
123 * - the ratio of tsc to nanoseconds
124 * with this ratio expressed as a 32-bit scale and shift
125 * (power of 2 divider);
126 * - { tsc_base, ns_base } pair of corresponding timestamps.
128 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
129 * for the userspace nanotime routine to read.
131 * All of the routines which update the nanotime data are non-reentrant. This must
132 * be guaranteed by the caller.
135 rtc_nanotime_set_commpage(pal_rtc_nanotime_t
*rntp
)
137 commpage_set_nanotime(rntp
->tsc_base
, rntp
->ns_base
, rntp
->scale
, rntp
->shift
);
143 * Intialize the nanotime info from the base time.
146 _rtc_nanotime_init(pal_rtc_nanotime_t
*rntp
, uint64_t base
)
148 uint64_t tsc
= rdtsc64();
150 _pal_rtc_nanotime_store(tsc
, base
, rntp
->scale
, rntp
->shift
, rntp
);
154 rtc_nanotime_init(uint64_t base
)
156 gAcpiLastSleepTscBase
= pal_rtc_nanotime_info
.tsc_base
;
157 gAcpiLastSleepNanoBase
= pal_rtc_nanotime_info
.ns_base
;
159 _rtc_nanotime_init(&pal_rtc_nanotime_info
, base
);
161 gAcpiLastWakeTscBase
= pal_rtc_nanotime_info
.tsc_base
;
162 gAcpiLastWakeNanoBase
= pal_rtc_nanotime_info
.ns_base
;
164 rtc_nanotime_set_commpage(&pal_rtc_nanotime_info
);
168 * rtc_nanotime_init_commpage:
170 * Call back from the commpage initialization to
171 * cause the commpage data to be filled in once the
172 * commpages have been created.
175 rtc_nanotime_init_commpage(void)
177 spl_t s
= splclock();
179 rtc_nanotime_set_commpage(&pal_rtc_nanotime_info
);
186 * Returns the current nanotime value, accessable from any
189 static inline uint64_t
190 rtc_nanotime_read(void)
192 return _rtc_nanotime_read(&pal_rtc_nanotime_info
);
198 * Invoked from power management when we exit from a low C-State (>= C4)
199 * and the TSC has stopped counting. The nanotime data is updated according
200 * to the provided value which represents the new value for nanotime.
203 rtc_clock_napped(uint64_t base
, uint64_t tsc_base
)
205 pal_rtc_nanotime_t
*rntp
= &pal_rtc_nanotime_info
;
210 assert(!ml_get_interrupts_enabled());
212 oldnsecs
= rntp
->ns_base
+ _rtc_tsc_to_nanoseconds(tsc
- rntp
->tsc_base
, rntp
);
213 newnsecs
= base
+ _rtc_tsc_to_nanoseconds(tsc
- tsc_base
, rntp
);
216 * Only update the base values if time using the new base values
217 * is later than the time using the old base values.
219 if (oldnsecs
< newnsecs
) {
220 _pal_rtc_nanotime_store(tsc_base
, base
, rntp
->scale
, rntp
->shift
, rntp
);
221 rtc_nanotime_set_commpage(rntp
);
226 * Invoked from power management to correct the SFLM TSC entry drift problem:
227 * a small delta is added to the tsc_base. This is equivalent to nudgin time
228 * backwards. We require this to be on the order of a TSC quantum which won't
229 * cause callers of mach_absolute_time() to see time going backwards!
232 rtc_clock_adjust(uint64_t tsc_base_delta
)
234 pal_rtc_nanotime_t
*rntp
= &pal_rtc_nanotime_info
;
236 assert(!ml_get_interrupts_enabled());
237 assert(tsc_base_delta
< 100ULL); /* i.e. it's small */
238 _rtc_nanotime_adjust(tsc_base_delta
, rntp
);
239 rtc_nanotime_set_commpage(rntp
);
245 * Invoked from power management when we have awoken from a sleep (S3)
246 * and the TSC has been reset, or from Deep Idle (S0) sleep when the TSC
247 * has progressed. The nanotime data is updated based on the passed-in value.
249 * The caller must guarantee non-reentrancy.
255 /* Set fixed configuration for lapic timers */
256 rtc_timer
->rtc_config();
260 * The timestamp counter will have been reset
261 * but nanotime (uptime) marches onward.
263 rtc_nanotime_init(base
);
267 rtc_decrementer_configure(void)
269 rtc_timer
->rtc_config();
272 * rtclock_early_init() is called very early at boot to
273 * establish mach_absolute_time() and set it to zero.
276 rtclock_early_init(void)
279 rtc_set_timescale(tscFreq
);
283 * Initialize the real-time clock device.
284 * In addition, various variables used to support the clock are initialized.
291 assert(!ml_get_interrupts_enabled());
293 if (cpu_number() == master_cpu
) {
297 * Adjust and set the exported cpu speed.
299 cycles
= rtc_export_speed(tscFreq
);
302 * Set min/max to actual.
303 * ACPI may update these later if speed-stepping is detected.
305 gPEClockFrequencyInfo
.cpu_frequency_min_hz
= cycles
;
306 gPEClockFrequencyInfo
.cpu_frequency_max_hz
= cycles
;
309 clock_timebase_init();
310 ml_init_lock_timeout();
311 ml_init_delay_spin_threshold(10);
314 /* Set fixed configuration for lapic timers */
315 rtc_timer
->rtc_config();
322 // Code to calculate how many processor cycles are in a second...
325 rtc_set_timescale(uint64_t cycles
)
327 pal_rtc_nanotime_t
*rntp
= &pal_rtc_nanotime_info
;
330 /* the "scale" factor will overflow unless cycles>SLOW_TSC_THRESHOLD */
332 while (cycles
<= SLOW_TSC_THRESHOLD
) {
337 rntp
->scale
= (uint32_t)(((uint64_t)NSEC_PER_SEC
<< 32) / cycles
);
342 * On some platforms, the TSC is not reset at warm boot. But the
343 * rebase time must be relative to the current boot so we can't use
344 * mach_absolute_time(). Instead, we convert the TSC delta since boot
347 if (tsc_rebase_abs_time
== 0) {
348 tsc_rebase_abs_time
= _rtc_tsc_to_nanoseconds(
349 rdtsc64() - tsc_at_boot
, rntp
);
352 rtc_nanotime_init(0);
356 rtc_export_speed(uint64_t cyc_per_sec
)
358 pal_rtc_nanotime_t
*rntp
= &pal_rtc_nanotime_info
;
361 if (rntp
->shift
!= 0) {
362 printf("Slow TSC, rtc_nanotime.shift == %d\n", rntp
->shift
);
366 cycles
= ((cyc_per_sec
+ (UI_CPUFREQ_ROUNDING_FACTOR
/ 2))
367 / UI_CPUFREQ_ROUNDING_FACTOR
)
368 * UI_CPUFREQ_ROUNDING_FACTOR
;
371 * Set current measured speed.
373 if (cycles
>= 0x100000000ULL
) {
374 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= 0xFFFFFFFFUL
;
376 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= (unsigned long)cycles
;
378 gPEClockFrequencyInfo
.cpu_frequency_hz
= cycles
;
380 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles
, cyc_per_sec
);
385 clock_get_system_microtime(
387 clock_usec_t
*microsecs
)
389 uint64_t now
= rtc_nanotime_read();
391 _absolutetime_to_microtime(now
, secs
, microsecs
);
395 clock_get_system_nanotime(
397 clock_nsec_t
*nanosecs
)
399 uint64_t now
= rtc_nanotime_read();
401 _absolutetime_to_nanotime(now
, secs
, nanosecs
);
405 clock_gettimeofday_set_commpage(uint64_t abstime
, uint64_t sec
, uint64_t frac
, uint64_t scale
, uint64_t tick_per_sec
)
407 commpage_set_timestamp(abstime
, sec
, frac
, scale
, tick_per_sec
);
412 mach_timebase_info_t info
)
414 info
->numer
= info
->denom
= 1;
418 * Real-time clock device interrupt.
422 x86_saved_state_t
*tregs
)
425 boolean_t user_mode
= FALSE
;
427 assert(get_preemption_level() > 0);
428 assert(!ml_get_interrupts_enabled());
430 if (is_saved_state64(tregs
) == TRUE
) {
431 x86_saved_state64_t
*regs
;
433 regs
= saved_state64(tregs
);
435 if (regs
->isf
.cs
& 0x03) {
440 x86_saved_state32_t
*regs
;
442 regs
= saved_state32(tregs
);
444 if (regs
->cs
& 0x03) {
450 /* call the generic etimer */
451 timer_intr(user_mode
, rip
);
456 * Request timer pop from the hardware
460 setPop(uint64_t time
)
465 /* 0 and EndOfAllTime are special-cases for "clear the timer" */
466 if (time
== 0 || time
== EndOfAllTime
) {
469 pop
= rtc_timer
->rtc_set(0, 0);
471 now
= rtc_nanotime_read(); /* The time in nanoseconds */
472 pop
= rtc_timer
->rtc_set(time
, now
);
475 /* Record requested and actual deadlines set */
476 x86_lcpu()->rtcDeadline
= time
;
477 x86_lcpu()->rtcPop
= pop
;
483 mach_absolute_time(void)
485 return rtc_nanotime_read();
489 mach_approximate_time(void)
491 return rtc_nanotime_read();
495 clock_interval_to_absolutetime_interval(
497 uint32_t scale_factor
,
500 *result
= (uint64_t)interval
* scale_factor
;
504 absolutetime_to_microtime(
507 clock_usec_t
*microsecs
)
509 _absolutetime_to_microtime(abstime
, secs
, microsecs
);
513 nanotime_to_absolutetime(
515 clock_nsec_t nanosecs
,
518 *result
= ((uint64_t)secs
* NSEC_PER_SEC
) + nanosecs
;
522 absolutetime_to_nanoseconds(
530 nanoseconds_to_absolutetime(
531 uint64_t nanoseconds
,
534 *result
= nanoseconds
;
543 while (mach_absolute_time() < deadline
) {