2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
42 #include <platforms.h>
45 #include <mach/mach_types.h>
47 #include <kern/cpu_data.h>
48 #include <kern/cpu_number.h>
49 #include <kern/clock.h>
50 #include <kern/host_notify.h>
51 #include <kern/macro_help.h>
52 #include <kern/misc_protos.h>
54 #include <kern/assert.h>
55 #include <mach/vm_prot.h>
57 #include <vm/vm_kern.h> /* for kernel_map */
60 #include <architecture/i386/pio.h>
61 #include <i386/misc_protos.h>
62 #include <i386/proc_reg.h>
63 #include <i386/machine_cpu.h>
65 #include <i386/cpuid.h>
66 #include <i386/cpu_data.h>
67 #include <i386/cpu_threads.h>
68 #include <i386/perfmon.h>
69 #include <i386/machine_routines.h>
70 #include <pexpert/pexpert.h>
71 #include <machine/limits.h>
72 #include <machine/commpage.h>
73 #include <sys/kdebug.h>
75 #include <i386/hpet.h>
76 #include <i386/rtclock.h>
78 #define MAX(a,b) (((a)>(b))?(a):(b))
79 #define MIN(a,b) (((a)>(b))?(b):(a))
81 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
83 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
85 int rtclock_config(void);
87 int rtclock_init(void);
89 uint64_t rtc_decrementer_min
;
91 void rtclock_intr(x86_saved_state_t
*regs
);
92 static uint64_t maxDec
; /* longest interval our hardware timer can handle (nsec) */
94 /* XXX this should really be in a header somewhere */
95 extern clock_timer_func_t rtclock_timer_expire
;
97 static void rtc_set_timescale(uint64_t cycles
);
98 static uint64_t rtc_export_speed(uint64_t cycles
);
100 extern void rtc_nanotime_store(
105 rtc_nanotime_t
*dst
);
107 extern void rtc_nanotime_load(
109 rtc_nanotime_t
*dst
);
111 rtc_nanotime_t rtc_nanotime_info
;
114 * tsc_to_nanoseconds:
116 * Basic routine to convert a raw 64 bit TSC value to a
117 * 64 bit nanosecond value. The conversion is implemented
118 * based on the scale factor and an implicit 32 bit shift.
120 static inline uint64_t
121 _tsc_to_nanoseconds(uint64_t value
)
123 asm volatile("movl %%edx,%%esi ;"
130 : "+A" (value
) : "c" (rtc_nanotime_info
.scale
) : "esi", "edi");
136 tsc_to_nanoseconds(uint64_t value
)
138 return _tsc_to_nanoseconds(value
);
142 deadline_to_decrementer(
149 return rtc_decrementer_min
;
151 delta
= deadline
- now
;
152 return MIN(MAX(rtc_decrementer_min
,delta
),maxDec
);
157 rtc_lapic_start_ticking(void)
161 cpu_data_t
*cdp
= current_cpu_datap();
163 abstime
= mach_absolute_time();
164 rtclock_tick_interval
= NSEC_PER_HZ
;
166 first_tick
= abstime
+ rtclock_tick_interval
;
167 cdp
->rtclock_intr_deadline
= first_tick
;
170 * Force a complete re-evaluation of timer deadlines.
172 cdp
->rtcPop
= EndOfAllTime
;
173 etimer_resync_deadlines();
177 * Configure the real-time clock device. Return success (1)
190 * Nanotime/mach_absolutime_time
191 * -----------------------------
192 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
193 * efficiently by the kernel and in userspace - is the reference for all timing.
194 * The cpu clock rate is platform-dependent and may stop or be reset when the
195 * processor is napped/slept. As a result, nanotime is the software abstraction
196 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
198 * The kernel maintains nanotime information recording:
199 * - the ratio of tsc to nanoseconds
200 * with this ratio expressed as a 32-bit scale and shift
201 * (power of 2 divider);
202 * - { tsc_base, ns_base } pair of corresponding timestamps.
204 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
205 * for the userspace nanotime routine to read.
207 * All of the routines which update the nanotime data are non-reentrant. This must
208 * be guaranteed by the caller.
211 rtc_nanotime_set_commpage(rtc_nanotime_t
*rntp
)
213 commpage_set_nanotime(rntp
->tsc_base
, rntp
->ns_base
, rntp
->scale
, rntp
->shift
);
219 * Intialize the nanotime info from the base time. Since
220 * the base value might be from a lower resolution clock,
221 * we compare it to the TSC derived value, and use the
222 * greater of the two values.
225 _rtc_nanotime_init(rtc_nanotime_t
*rntp
, uint64_t base
)
227 uint64_t nsecs
, tsc
= rdtsc64();
229 nsecs
= _tsc_to_nanoseconds(tsc
);
230 rtc_nanotime_store(tsc
, MAX(nsecs
, base
), rntp
->scale
, rntp
->shift
, rntp
);
234 rtc_nanotime_init(uint64_t base
)
236 rtc_nanotime_t
*rntp
= &rtc_nanotime_info
;
238 _rtc_nanotime_init(rntp
, base
);
239 rtc_nanotime_set_commpage(rntp
);
245 * Call back from the commpage initialization to
246 * cause the commpage data to be filled in once the
247 * commpages have been created.
250 rtc_nanotime_init_commpage(void)
252 spl_t s
= splclock();
254 rtc_nanotime_set_commpage(&rtc_nanotime_info
);
260 * rtc_nanotime_update:
262 * Update the nanotime info from the base time. Since
263 * the base value might be from a lower resolution clock,
264 * we compare it to the TSC derived value, and use the
265 * greater of the two values.
267 * N.B. In comparison to the above init routine, this assumes
268 * that the TSC has remained monotonic compared to the tsc_base
269 * value, which is not the case after S3 sleep.
272 _rtc_nanotime_update(rtc_nanotime_t
*rntp
, uint64_t base
)
274 uint64_t nsecs
, tsc
= rdtsc64();
276 nsecs
= rntp
->ns_base
+ _tsc_to_nanoseconds(tsc
- rntp
->tsc_base
);
277 rtc_nanotime_store(tsc
, MAX(nsecs
, base
), rntp
->scale
, rntp
->shift
, rntp
);
284 rtc_nanotime_t
*rntp
= &rtc_nanotime_info
;
286 assert(!ml_get_interrupts_enabled());
288 _rtc_nanotime_update(rntp
, base
);
289 rtc_nanotime_set_commpage(rntp
);
295 * Returns the current nanotime value, accessable from any
299 rtc_nanotime_read(void)
301 rtc_nanotime_t rnt
, *rntp
= &rtc_nanotime_info
;
305 rtc_nanotime_load(rntp
, &rnt
);
306 result
= rnt
.ns_base
+ _tsc_to_nanoseconds(rdtsc64() - rnt
.tsc_base
);
307 } while (rntp
->tsc_base
!= rnt
.tsc_base
);
315 * Invoked from power manangement when we have awoken from a nap (C3/C4)
316 * during which the TSC lost counts. The nanotime data is updated according
317 * to the provided nanosecond base value.
319 * The caller must guarantee non-reentrancy.
325 rtc_nanotime_update(base
);
329 rtc_clock_stepping(__unused
uint32_t new_frequency
,
330 __unused
uint32_t old_frequency
)
332 panic("rtc_clock_stepping unsupported");
336 rtc_clock_stepped(__unused
uint32_t new_frequency
,
337 __unused
uint32_t old_frequency
)
339 panic("rtc_clock_stepping unsupported");
345 * Invoked from power manageent when we have awoken from a sleep (S3)
346 * and the TSC has been reset. The nanotime data is updated based on
349 * The caller must guarantee non-reentrancy.
352 rtc_sleep_wakeup(void)
356 istate
= ml_set_interrupts_enabled(FALSE
);
360 * The timestamp counter will have been reset
361 * but nanotime (uptime) marches onward.
363 rtc_nanotime_init(tmrCvt(rdHPET(), hpetCvtt2n
));
365 /* Restart tick interrupts from the LAPIC timer */
366 rtc_lapic_start_ticking();
368 ml_set_interrupts_enabled(istate
);
372 * Initialize the real-time clock device.
373 * In addition, various variables used to support the clock are initialized.
380 assert(!ml_get_interrupts_enabled());
382 if (cpu_number() == master_cpu
) {
385 rtc_set_timescale(tscFreq
);
388 * Adjust and set the exported cpu speed.
390 cycles
= rtc_export_speed(tscFreq
);
393 * Set min/max to actual.
394 * ACPI may update these later if speed-stepping is detected.
396 gPEClockFrequencyInfo
.cpu_frequency_min_hz
= cycles
;
397 gPEClockFrequencyInfo
.cpu_frequency_max_hz
= cycles
;
400 * Compute the longest interval we can represent.
402 maxDec
= tmrCvt(0x7fffffffULL
, busFCvtt2n
);
403 kprintf("maxDec: %lld\n", maxDec
);
405 /* Minimum interval is 1usec */
406 rtc_decrementer_min
= deadline_to_decrementer(NSEC_PER_USEC
, 0ULL);
407 /* Point LAPIC interrupts to hardclock() */
408 lapic_set_timer_func((i386_intr_func_t
) rtclock_intr
);
410 clock_timebase_init();
411 ml_init_lock_timeout();
414 rtc_lapic_start_ticking();
420 // Code to calculate how many processor cycles are in a second...
423 rtc_set_timescale(uint64_t cycles
)
425 rtc_nanotime_info
.scale
= ((uint64_t)NSEC_PER_SEC
<< 32) / cycles
;
426 rtc_nanotime_info
.shift
= 32;
428 rtc_nanotime_init(0);
432 rtc_export_speed(uint64_t cyc_per_sec
)
437 cycles
= ((cyc_per_sec
+ (UI_CPUFREQ_ROUNDING_FACTOR
/2))
438 / UI_CPUFREQ_ROUNDING_FACTOR
)
439 * UI_CPUFREQ_ROUNDING_FACTOR
;
442 * Set current measured speed.
444 if (cycles
>= 0x100000000ULL
) {
445 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= 0xFFFFFFFFUL
;
447 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= (unsigned long)cycles
;
449 gPEClockFrequencyInfo
.cpu_frequency_hz
= cycles
;
451 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles
, cyc_per_sec
);
456 clock_get_system_microtime(
460 uint64_t now
= rtc_nanotime_read();
465 : "=a" (*secs
), "=d" (remain
)
466 : "A" (now
), "r" (NSEC_PER_SEC
));
470 : "0" (remain
), "d" (0), "r" (NSEC_PER_USEC
));
474 clock_get_system_nanotime(
478 uint64_t now
= rtc_nanotime_read();
482 : "=a" (*secs
), "=d" (*nanosecs
)
483 : "A" (now
), "r" (NSEC_PER_SEC
));
487 clock_gettimeofday_set_commpage(
494 uint64_t now
= abstime
;
501 : "=a" (*secs
), "=d" (remain
)
502 : "A" (now
), "r" (NSEC_PER_SEC
));
506 : "0" (remain
), "d" (0), "r" (NSEC_PER_USEC
));
510 commpage_set_timestamp(abstime
- remain
, *secs
, NSEC_PER_SEC
);
515 mach_timebase_info_t info
)
517 info
->numer
= info
->denom
= 1;
521 clock_set_timer_func(
522 clock_timer_func_t func
)
524 if (rtclock_timer_expire
== NULL
)
525 rtclock_timer_expire
= func
;
529 * Real-time clock device interrupt.
533 x86_saved_state_t
*tregs
)
536 boolean_t user_mode
= FALSE
;
539 cpu_data_t
*pp
= current_cpu_datap();
541 assert(get_preemption_level() > 0);
542 assert(!ml_get_interrupts_enabled());
544 abstime
= rtc_nanotime_read();
545 latency
= (uint32_t) abstime
- pp
->rtcPop
;
547 if (is_saved_state64(tregs
) == TRUE
) {
548 x86_saved_state64_t
*regs
;
550 regs
= saved_state64(tregs
);
555 x86_saved_state32_t
*regs
;
557 regs
= saved_state32(tregs
);
564 /* Log the interrupt service latency (-ve value expected by tool) */
565 KERNEL_DEBUG_CONSTANT(
566 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 0) | DBG_FUNC_NONE
,
567 -latency
, (uint32_t)rip
, user_mode
, 0, 0);
569 /* call the generic etimer */
570 etimer_intr(user_mode
, rip
);
574 * Request timer pop from the hardware
585 now
= rtc_nanotime_read(); /* The time in nanoseconds */
586 decr
= deadline_to_decrementer(time
, now
);
588 count
= tmrCvt(decr
, busFCvtn2t
);
589 lapic_set_timer(TRUE
, one_shot
, divide_by_1
, (uint32_t) count
);
591 return decr
; /* Pass back what we set */
601 cpu_data_t
*cdp
= current_cpu_datap();
603 now
= rtc_nanotime_read();
605 decr
= deadline_to_decrementer(cdp
->rtcPop
, now
);
607 count
= tmrCvt(decr
, busFCvtn2t
);
608 lapic_set_timer(TRUE
, one_shot
, divide_by_1
, (uint32_t)count
);
613 mach_absolute_time(void)
615 return rtc_nanotime_read();
619 clock_interval_to_absolutetime_interval(
621 uint32_t scale_factor
,
624 *result
= (uint64_t)interval
* scale_factor
;
628 absolutetime_to_microtime(
637 : "=a" (*secs
), "=d" (remain
)
638 : "A" (abstime
), "r" (NSEC_PER_SEC
));
642 : "0" (remain
), "d" (0), "r" (NSEC_PER_USEC
));
646 absolutetime_to_nanotime(
653 : "=a" (*secs
), "=d" (*nanosecs
)
654 : "A" (abstime
), "r" (NSEC_PER_SEC
));
658 nanotime_to_absolutetime(
663 *result
= ((uint64_t)secs
* NSEC_PER_SEC
) + nanosecs
;
667 absolutetime_to_nanoseconds(
675 nanoseconds_to_absolutetime(
676 uint64_t nanoseconds
,
679 *result
= nanoseconds
;
690 now
= mach_absolute_time();
691 } while (now
< deadline
);