2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
27 * File: i386/rtclock.c
28 * Purpose: Routines for handling the machine dependent
29 * real-time clock. Historically, this clock is
30 * generated by the Intel 8254 Programmable Interval
31 * Timer, but local apic timers are now used for
32 * this purpose with the master time reference being
33 * the cpu clock counted by the timestamp MSR.
36 #include <platforms.h>
39 #include <mach/mach_types.h>
41 #include <kern/cpu_data.h>
42 #include <kern/cpu_number.h>
43 #include <kern/clock.h>
44 #include <kern/host_notify.h>
45 #include <kern/macro_help.h>
46 #include <kern/misc_protos.h>
48 #include <kern/assert.h>
49 #include <mach/vm_prot.h>
51 #include <vm/vm_kern.h> /* for kernel_map */
55 #include <i386/misc_protos.h>
56 #include <i386/proc_reg.h>
57 #include <i386/machine_cpu.h>
59 #include <i386/cpuid.h>
60 #include <i386/cpu_data.h>
61 #include <i386/cpu_threads.h>
62 #include <i386/perfmon.h>
63 #include <i386/machine_routines.h>
64 #include <i386/AT386/bbclock_entries.h>
65 #include <pexpert/pexpert.h>
66 #include <machine/limits.h>
67 #include <machine/commpage.h>
68 #include <sys/kdebug.h>
70 #define MAX(a,b) (((a)>(b))?(a):(b))
71 #define MIN(a,b) (((a)>(b))?(b):(a))
73 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
75 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
77 int sysclk_config(void);
79 int sysclk_init(void);
81 kern_return_t
sysclk_gettime(
82 mach_timespec_t
*cur_time
);
84 kern_return_t
sysclk_getattr(
85 clock_flavor_t flavor
,
87 mach_msg_type_number_t
*count
);
90 mach_timespec_t
*alarm_time
);
93 * Lists of clock routines.
95 struct clock_ops sysclk_ops
= {
96 sysclk_config
, sysclk_init
,
102 int calend_config(void);
104 int calend_init(void);
106 kern_return_t
calend_gettime(
107 mach_timespec_t
*cur_time
);
109 kern_return_t
calend_getattr(
110 clock_flavor_t flavor
,
112 mach_msg_type_number_t
*count
);
114 struct clock_ops calend_ops
= {
115 calend_config
, calend_init
,
121 /* local data declarations */
123 static clock_timer_func_t rtclock_timer_expire
;
125 static timer_call_data_t rtclock_alarm_timer
;
127 static void rtclock_alarm_expire(
128 timer_call_param_t p0
,
129 timer_call_param_t p1
);
132 mach_timespec_t calend_offset
;
133 boolean_t calend_is_set
;
135 int64_t calend_adjtotal
;
136 int32_t calend_adjdelta
;
140 mach_timebase_info_data_t timebase_const
;
142 decl_simple_lock_data(,lock
) /* real-time clock device lock */
145 boolean_t rtc_initialized
= FALSE
;
146 clock_res_t rtc_intr_nsec
= NSEC_PER_HZ
; /* interrupt res */
147 uint64_t rtc_cycle_count
; /* clocks in 1/20th second */
148 uint64_t rtc_cyc_per_sec
; /* processor cycles per sec */
149 uint32_t rtc_boot_frequency
; /* provided by 1st speed-step */
150 uint32_t rtc_quant_scale
; /* clock to nanos multiplier */
151 uint32_t rtc_quant_shift
; /* clock to nanos right shift */
152 uint64_t rtc_decrementer_min
;
154 static mach_timebase_info_data_t rtc_lapic_scale
; /* nsec to lapic count */
157 * Macros to lock/unlock real-time clock data.
159 #define RTC_INTRS_OFF(s) \
162 #define RTC_INTRS_ON(s) \
165 #define RTC_LOCK(s) \
168 simple_lock(&rtclock.lock); \
171 #define RTC_UNLOCK(s) \
173 simple_unlock(&rtclock.lock); \
178 * i8254 control. ** MONUMENT **
180 * The i8254 is a traditional PC device with some arbitrary characteristics.
181 * Basically, it is a register that counts at a fixed rate and can be
182 * programmed to generate an interrupt every N counts. The count rate is
183 * clknum counts per sec (see pit.h), historically 1193167=14.318MHz/12
184 * but the more accurate value is 1193182=14.31818MHz/12. [14.31818 MHz being
185 * the master crystal oscillator reference frequency since the very first PC.]
186 * Various constants are computed based on this value, and we calculate
187 * them at init time for execution efficiency. To obtain sufficient
188 * accuracy, some of the calculation are most easily done in floating
189 * point and then converted to int.
197 static uint64_t rtc_set_cyc_per_sec(uint64_t cycles
);
198 uint64_t rtc_nanotime_read(void);
201 * create_mul_quant_GHZ
202 * create a constant used to multiply the TSC by to convert to nanoseconds.
203 * This is a 32 bit number and the TSC *MUST* have a frequency higher than
204 * 1000Mhz for this routine to work.
206 * The theory here is that we know how many TSCs-per-sec the processor runs at.
207 * Normally to convert this to nanoseconds you would multiply the current
208 * timestamp by 1000000000 (a billion) then divide by TSCs-per-sec.
209 * Unfortunatly the TSC is 64 bits which would leave us with 96 bit intermediate
210 * results from the multiply that must be divided by.
212 * uint96 = tsc * numer
213 * nanos = uint96 / denom
214 * Instead, we create this quant constant and it becomes the numerator,
215 * the denominator can then be 0x100000000 which makes our division as simple as
216 * forgetting the lower 32 bits of the result. We can also pass this number to
217 * user space as the numer and pass 0xFFFFFFFF (RTC_FAST_DENOM) as the denom to
218 * convert raw counts * to nanos. The difference is so small as to be
219 * undetectable by anything.
221 * Unfortunatly we can not do this for sub GHZ processors. In this case, all
222 * we do is pass the CPU speed in raw as the denom and we pass in 1000000000
223 * as the numerator. No short cuts allowed
225 #define RTC_FAST_DENOM 0xFFFFFFFF
226 inline static uint32_t
227 create_mul_quant_GHZ(int shift
, uint32_t quant
)
229 return (uint32_t)((((uint64_t)NSEC_PER_SEC
/20) << shift
) / quant
);
232 * This routine takes a value of raw TSC ticks and applies the passed mul_quant
233 * generated by create_mul_quant() This is our internal routine for creating
235 * Since we don't really have uint96_t this routine basically does this....
236 * uint96_t intermediate = (*value) * scale
237 * return (intermediate >> 32)
239 inline static uint64_t
240 fast_get_nano_from_abs(uint64_t value
, int scale
)
242 asm (" movl %%edx,%%esi \n\t"
244 " movl %%edx,%%edi \n\t"
245 " movl %%esi,%%eax \n\t"
247 " xorl %%ecx,%%ecx \n\t"
248 " addl %%edi,%%eax \n\t"
257 * This routine basically does this...
258 * ts.tv_sec = nanos / 1000000000; create seconds
259 * ts.tv_nsec = nanos % 1000000000; create remainder nanos
261 inline static mach_timespec_t
262 nanos_to_timespec(uint64_t nanos
)
269 asm volatile("divl %1" : "+A" (ret
.u64
) : "r" (NSEC_PER_SEC
));
274 * The following two routines perform the 96 bit arithmetic we need to
275 * convert generic absolute<->nanoseconds
276 * The multiply routine takes a uint64_t and a uint32_t and returns the result
277 * in a uint32_t[3] array.
278 * The divide routine takes this uint32_t[3] array and divides it by a uint32_t
279 * returning a uint64_t
282 longmul(uint64_t *abstime
, uint32_t multiplicand
, uint32_t *result
)
286 " movl %%eax,%%ebx \n\t"
287 " movl (%%eax),%%eax \n\t"
289 " xchg %%eax,%%ebx \n\t"
291 " movl 4(%%eax),%%eax \n\t"
293 " movl %2,%%ecx \n\t"
294 " movl %%ebx,(%%ecx) \n\t"
296 " addl %%ebx,%%eax \n\t"
298 " movl %%eax,4(%%ecx) \n\t"
299 " adcl $0,%%edx \n\t"
300 " movl %%edx,8(%%ecx) // and save it"
301 : : "a"(abstime
), "c"(multiplicand
), "m"(result
));
305 inline static uint64_t
306 longdiv(uint32_t *numer
, uint32_t denom
)
311 " movl %%eax,%%ebx \n\t"
312 " movl 8(%%eax),%%edx \n\t"
313 " movl 4(%%eax),%%eax \n\t"
315 " xchg %%ebx,%%eax \n\t"
316 " movl (%%eax),%%eax \n\t"
318 " xchg %%ebx,%%edx \n\t"
320 : "=A"(result
) : "a"(numer
),"c"(denom
));
325 * Enable or disable timer 2.
326 * Port 0x61 controls timer 2:
327 * bit 0 gates the clock,
328 * bit 1 gates output to speaker.
334 " inb $0x61,%%al \n\t"
335 " and $0xFC,%%al \n\t"
337 " outb %%al,$0x61 \n\t"
345 " inb $0x61,%%al \n\t"
346 " and $0xFC,%%al \n\t"
347 " outb %%al,$0x61 \n\t"
355 * First, tell the clock we are going to write 16 bits to the counter
356 * and enable one-shot mode (command 0xB8 to port 0x43)
357 * Then write the two bytes into the PIT2 clock register (port 0x42).
358 * Loop until the value is "realized" in the clock,
359 * this happens on the next tick.
362 " movb $0xB8,%%al \n\t"
363 " outb %%al,$0x43 \n\t"
364 " movb %%dl,%%al \n\t"
365 " outb %%al,$0x42 \n\t"
366 " movb %%dh,%%al \n\t"
367 " outb %%al,$0x42 \n"
368 "1: inb $0x42,%%al \n\t"
369 " inb $0x42,%%al \n\t"
370 " cmp %%al,%%dh \n\t"
372 : : "d"(value
) : "%al");
375 inline static uint64_t
376 get_PIT2(unsigned int *value
)
378 register uint64_t result
;
380 * This routine first latches the time (command 0x80 to port 0x43),
381 * then gets the time stamp so we know how long the read will take later.
382 * Read (from port 0x42) and return the current value of the timer.
385 " xorl %%ecx,%%ecx \n\t"
386 " movb $0x80,%%al \n\t"
387 " outb %%al,$0x43 \n\t"
390 " inb $0x42,%%al \n\t"
391 " movb %%al,%%cl \n\t"
392 " inb $0x42,%%al \n\t"
393 " movb %%al,%%ch \n\t"
395 : "=A"(result
), "=c"(*value
));
401 * This routine sets up PIT counter 2 to count down 1/20 of a second.
402 * It pauses until the value is latched in the counter
403 * and then reads the time stamp counter to return to the caller.
410 uint64_t saveTime
,intermediate
;
411 unsigned int timerValue
, lastValue
;
412 boolean_t int_enabled
;
414 * Table of correction factors to account for
415 * - timer counter quantization errors, and
418 #define SAMPLE_CLKS_EXACT (((double) CLKNUM) / 20.0)
419 #define SAMPLE_CLKS_INT ((int) CLKNUM / 20)
420 #define SAMPLE_NSECS (2000000000LL)
421 #define SAMPLE_MULTIPLIER (((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
422 #define ROUND64(x) ((uint64_t)((x) + 0.5))
423 uint64_t scale
[6] = {
424 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-0)),
425 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-1)),
426 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-2)),
427 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-3)),
428 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-4)),
429 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-5))
432 int_enabled
= ml_set_interrupts_enabled(FALSE
);
436 panic("timeRDTSC() calibation failed with %d attempts\n", attempts
);
438 enable_PIT2(); // turn on PIT2
439 set_PIT2(0); // reset timer 2 to be zero
440 latchTime
= rdtsc64(); // get the time stamp to time
441 latchTime
= get_PIT2(&timerValue
) - latchTime
; // time how long this takes
442 set_PIT2(SAMPLE_CLKS_INT
); // set up the timer for (almost) 1/20th a second
443 saveTime
= rdtsc64(); // now time how long a 20th a second is...
444 get_PIT2(&lastValue
);
445 get_PIT2(&lastValue
); // read twice, first value may be unreliable
447 intermediate
= get_PIT2(&timerValue
);
448 if (timerValue
> lastValue
) {
449 printf("Hey we are going backwards! %u -> %u, restarting timing\n",
450 timerValue
,lastValue
);
455 lastValue
= timerValue
;
456 } while (timerValue
> 5);
457 kprintf("timerValue %d\n",timerValue
);
458 kprintf("intermediate 0x%016llx\n",intermediate
);
459 kprintf("saveTime 0x%016llx\n",saveTime
);
461 intermediate
-= saveTime
; // raw count for about 1/20 second
462 intermediate
*= scale
[timerValue
]; // rescale measured time spent
463 intermediate
/= SAMPLE_NSECS
; // so its exactly 1/20 a second
464 intermediate
+= latchTime
; // add on our save fudge
466 set_PIT2(0); // reset timer 2 to be zero
467 disable_PIT2(); // turn off PIT 2
469 ml_set_interrupts_enabled(int_enabled
);
474 tsc_to_nanoseconds(uint64_t abstime
)
478 uint32_t intermediate
[3];
480 numer
= rtclock
.timebase_const
.numer
;
481 denom
= rtclock
.timebase_const
.denom
;
482 if (denom
== RTC_FAST_DENOM
) {
483 abstime
= fast_get_nano_from_abs(abstime
, numer
);
485 longmul(&abstime
, numer
, intermediate
);
486 abstime
= longdiv(intermediate
, denom
);
491 inline static mach_timespec_t
492 tsc_to_timespec(void)
495 currNanos
= rtc_nanotime_read();
496 return nanos_to_timespec(currNanos
);
499 #define DECREMENTER_MAX UINT_MAX
501 deadline_to_decrementer(
508 return rtc_decrementer_min
;
510 delta
= deadline
- now
;
511 return MIN(MAX(rtc_decrementer_min
,delta
),DECREMENTER_MAX
);
515 static inline uint64_t
516 lapic_time_countdown(uint32_t initial_count
)
521 lapic_timer_count_t count
;
523 state
= ml_set_interrupts_enabled(FALSE
);
524 lapic_set_timer(FALSE
, one_shot
, divide_by_1
, initial_count
);
525 start_time
= rdtsc64();
527 lapic_get_timer(NULL
, NULL
, NULL
, &count
);
529 stop_time
= rdtsc64();
530 ml_set_interrupts_enabled(state
);
532 return tsc_to_nanoseconds(stop_time
- start_time
);
536 rtc_lapic_timer_calibrate(void)
541 if (!(cpuid_features() & CPUID_FEATURE_APIC
))
545 * Set the local apic timer counting down to zero without an interrupt.
546 * Use the timestamp to calculate how long this takes.
548 nsecs
= (uint32_t) lapic_time_countdown(rtc_intr_nsec
);
551 * Compute a countdown ratio for a given time in nanoseconds.
552 * That is, countdown = time * numer / denom.
554 countdown
= (uint64_t)rtc_intr_nsec
* (uint64_t)rtc_intr_nsec
/ nsecs
;
556 nsecs
= (uint32_t) lapic_time_countdown((uint32_t) countdown
);
558 rtc_lapic_scale
.numer
= countdown
;
559 rtc_lapic_scale
.denom
= nsecs
;
561 kprintf("rtc_lapic_timer_calibrate() scale: %d/%d\n",
562 (uint32_t) countdown
, nsecs
);
571 assert(rtc_lapic_scale
.denom
);
573 count
= interval
* (uint64_t) rtc_lapic_scale
.numer
;
574 count
/= rtc_lapic_scale
.denom
;
576 lapic_set_timer(TRUE
, one_shot
, divide_by_1
, (uint32_t) count
);
580 rtc_lapic_start_ticking(void)
586 abstime
= mach_absolute_time();
587 first_tick
= abstime
+ NSEC_PER_HZ
;
588 current_cpu_datap()->cpu_rtc_tick_deadline
= first_tick
;
589 decr
= deadline_to_decrementer(first_tick
, abstime
);
590 rtc_lapic_set_timer(decr
);
594 * Configure the real-time clock device. Return success (1)
602 mp_disable_preemption();
603 if (cpu_number() != master_cpu
) {
604 mp_enable_preemption();
607 mp_enable_preemption();
609 timer_call_setup(&rtclock_alarm_timer
, rtclock_alarm_expire
, NULL
);
611 simple_lock_init(&rtclock
.lock
, 0);
618 * Nanotime/mach_absolutime_time
619 * -----------------------------
620 * The timestamp counter (tsc) - which counts cpu clock cycles and can be read
621 * efficient by the kernel and in userspace - is the reference for all timing.
622 * However, the cpu clock rate is not only platform-dependent but can change
623 * (speed-step) dynamically. Hence tsc is converted into nanoseconds which is
624 * identical to mach_absolute_time. The conversion to tsc to nanoseconds is
625 * encapsulated by nanotime.
627 * The kernel maintains nanotime information recording:
628 * - the current ratio of tsc to nanoseconds
629 * with this ratio expressed as a 32-bit scale and shift
630 * (power of 2 divider);
631 * - the tsc (step_tsc) and nanotime (step_ns) at which the current
632 * ratio (clock speed) began.
633 * So a tsc value can be converted to nanotime by:
635 * nanotime = (((tsc - step_tsc)*scale) >> shift) + step_ns
637 * In general, (tsc - step_tsc) is a 64-bit quantity with the scaling
638 * involving a 96-bit intermediate value. However, by saving the converted
639 * values at each tick (or at any intervening speed-step) - base_tsc and
640 * base_ns - we can perform conversions relative to these and be assured that
641 * (tsc - tick_tsc) is 32-bits. Hence:
643 * fast_nanotime = (((tsc - base_tsc)*scale) >> shift) + base_ns
645 * The tuple {base_tsc, base_ns, scale, shift} is exported in the commpage
646 * for the userspace nanotime routine to read. A duplicate check_tsc is
647 * appended so that the consistency of the read can be verified. Note that
648 * this scheme is essential for MP systems in which the commpage is updated
649 * by the master cpu but may be read concurrently by other cpus.
653 rtc_nanotime_set_commpage(rtc_nanotime_t
*rntp
)
655 commpage_nanotime_t cp_nanotime
;
657 /* Only the master cpu updates the commpage */
658 if (cpu_number() != master_cpu
)
661 cp_nanotime
.nt_base_tsc
= rntp
->rnt_tsc
;
662 cp_nanotime
.nt_base_ns
= rntp
->rnt_nanos
;
663 cp_nanotime
.nt_scale
= rntp
->rnt_scale
;
664 cp_nanotime
.nt_shift
= rntp
->rnt_shift
;
666 commpage_set_nanotime(&cp_nanotime
);
670 rtc_nanotime_init(void)
672 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
673 rtc_nanotime_t
*master_rntp
= &cpu_datap(master_cpu
)->cpu_rtc_nanotime
;
675 if (cpu_number() == master_cpu
) {
676 rntp
->rnt_tsc
= rdtsc64();
677 rntp
->rnt_nanos
= tsc_to_nanoseconds(rntp
->rnt_tsc
);
678 rntp
->rnt_scale
= rtc_quant_scale
;
679 rntp
->rnt_shift
= rtc_quant_shift
;
680 rntp
->rnt_step_tsc
= 0ULL;
681 rntp
->rnt_step_nanos
= 0ULL;
684 * Copy master processor's nanotime info.
685 * Loop required in case this changes while copying.
688 *rntp
= *master_rntp
;
689 } while (rntp
->rnt_tsc
!= master_rntp
->rnt_tsc
);
694 _rtc_nanotime_update(rtc_nanotime_t
*rntp
, uint64_t tsc
)
699 tsc_delta
= tsc
- rntp
->rnt_step_tsc
;
700 ns_delta
= tsc_to_nanoseconds(tsc_delta
);
701 rntp
->rnt_nanos
= rntp
->rnt_step_nanos
+ ns_delta
;
706 rtc_nanotime_update(void)
708 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
710 assert(get_preemption_level() > 0);
711 assert(!ml_get_interrupts_enabled());
713 _rtc_nanotime_update(rntp
, rdtsc64());
714 rtc_nanotime_set_commpage(rntp
);
718 rtc_nanotime_scale_update(void)
720 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
721 uint64_t tsc
= rdtsc64();
723 assert(!ml_get_interrupts_enabled());
726 * Update time based on past scale.
728 _rtc_nanotime_update(rntp
, tsc
);
731 * Update scale and timestamp this update.
733 rntp
->rnt_scale
= rtc_quant_scale
;
734 rntp
->rnt_shift
= rtc_quant_shift
;
735 rntp
->rnt_step_tsc
= rntp
->rnt_tsc
;
736 rntp
->rnt_step_nanos
= rntp
->rnt_nanos
;
738 /* Export update to userland */
739 rtc_nanotime_set_commpage(rntp
);
743 _rtc_nanotime_read(void)
745 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
753 rnt_scale
= rntp
->rnt_scale
;
757 rnt_shift
= rntp
->rnt_shift
;
758 rnt_nanos
= rntp
->rnt_nanos
;
759 rnt_tsc
= rntp
->rnt_tsc
;
762 tsc_delta
= tsc
- rnt_tsc
;
763 if ((tsc_delta
>> 32) != 0)
764 return rnt_nanos
+ tsc_to_nanoseconds(tsc_delta
);
766 /* Let the compiler optimize(?): */
768 return rnt_nanos
+ ((tsc_delta
* rnt_scale
) >> 32);
770 return rnt_nanos
+ ((tsc_delta
* rnt_scale
) >> rnt_shift
);
774 rtc_nanotime_read(void)
778 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
781 * Use timestamp to ensure the uptime record isn't changed.
782 * This avoids disabling interrupts.
783 * And not this is a per-cpu structure hence no locking.
786 rnt_tsc
= rntp
->rnt_tsc
;
787 result
= _rtc_nanotime_read();
788 } while (rnt_tsc
!= rntp
->rnt_tsc
);
795 * This function is called by the speed-step driver when a
796 * change of cpu clock frequency is about to occur.
797 * The scale is not changed until rtc_clock_stepped() is called.
798 * Between these times there is an uncertainty is exactly when
799 * the change takes effect. FIXME: by using another timing source
800 * we could eliminate this error.
803 rtc_clock_stepping(__unused
uint32_t new_frequency
,
804 __unused
uint32_t old_frequency
)
808 istate
= ml_set_interrupts_enabled(FALSE
);
809 rtc_nanotime_scale_update();
810 ml_set_interrupts_enabled(istate
);
814 * This function is called by the speed-step driver when a
815 * change of cpu clock frequency has just occured. This change
816 * is expressed as a ratio relative to the boot clock rate.
819 rtc_clock_stepped(uint32_t new_frequency
, uint32_t old_frequency
)
823 istate
= ml_set_interrupts_enabled(FALSE
);
824 if (rtc_boot_frequency
== 0) {
826 * At the first ever stepping, old frequency is the real
827 * initial clock rate. This step and all others are based
828 * relative to this initial frequency at which the tsc
829 * calibration was made. Hence we must remember this base
830 * frequency as reference.
832 rtc_boot_frequency
= old_frequency
;
834 rtc_set_cyc_per_sec(rtc_cycle_count
* new_frequency
/
836 rtc_nanotime_scale_update();
837 ml_set_interrupts_enabled(istate
);
841 * rtc_sleep_wakeup() is called from acpi on awakening from a S3 sleep
844 rtc_sleep_wakeup(void)
846 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
850 istate
= ml_set_interrupts_enabled(FALSE
);
854 * The timestamp counter will have been reset
855 * but nanotime (uptime) marches onward.
856 * We assume that we're still at the former cpu frequency.
858 rntp
->rnt_tsc
= rdtsc64();
859 rntp
->rnt_step_tsc
= 0ULL;
860 rntp
->rnt_step_nanos
= rntp
->rnt_nanos
;
861 rtc_nanotime_set_commpage(rntp
);
863 /* Restart tick interrupts from the LAPIC timer */
864 rtc_lapic_start_ticking();
866 ml_set_interrupts_enabled(istate
);
870 * Initialize the real-time clock device.
871 * In addition, various variables used to support the clock are initialized.
878 mp_disable_preemption();
879 if (cpu_number() == master_cpu
) {
881 * Perform calibration.
882 * The PIT is used as the reference to compute how many
883 * TCS counts (cpu clock cycles) occur per second.
885 rtc_cycle_count
= timeRDTSC();
886 cycles
= rtc_set_cyc_per_sec(rtc_cycle_count
);
889 * Set min/max to actual.
890 * ACPI may update these later if speed-stepping is detected.
892 gPEClockFrequencyInfo
.cpu_frequency_min_hz
= cycles
;
893 gPEClockFrequencyInfo
.cpu_frequency_max_hz
= cycles
;
894 printf("[RTCLOCK] frequency %llu (%llu)\n",
895 cycles
, rtc_cyc_per_sec
);
897 rtc_lapic_timer_calibrate();
899 /* Minimum interval is 1usec */
900 rtc_decrementer_min
= deadline_to_decrementer(NSEC_PER_USEC
,
902 /* Point LAPIC interrupts to hardclock() */
903 lapic_set_timer_func((i386_intr_func_t
) rtclock_intr
);
905 clock_timebase_init();
906 rtc_initialized
= TRUE
;
911 rtc_lapic_start_ticking();
913 mp_enable_preemption();
919 * Get the clock device time. This routine is responsible
920 * for converting the device's machine dependent time value
921 * into a canonical mach_timespec_t value.
924 sysclk_gettime_internal(
925 mach_timespec_t
*cur_time
) /* OUT */
927 *cur_time
= tsc_to_timespec();
928 return (KERN_SUCCESS
);
933 mach_timespec_t
*cur_time
) /* OUT */
935 return sysclk_gettime_internal(cur_time
);
939 sysclk_gettime_interrupts_disabled(
940 mach_timespec_t
*cur_time
) /* OUT */
942 (void) sysclk_gettime_internal(cur_time
);
946 // Code to calculate how many processor cycles are in a second...
949 rtc_set_cyc_per_sec(uint64_t cycles
)
952 if (cycles
> (NSEC_PER_SEC
/20)) {
953 // we can use just a "fast" multiply to get nanos
954 rtc_quant_shift
= 32;
955 rtc_quant_scale
= create_mul_quant_GHZ(rtc_quant_shift
, cycles
);
956 rtclock
.timebase_const
.numer
= rtc_quant_scale
; // timeRDTSC is 1/20
957 rtclock
.timebase_const
.denom
= RTC_FAST_DENOM
;
959 rtc_quant_shift
= 26;
960 rtc_quant_scale
= create_mul_quant_GHZ(rtc_quant_shift
, cycles
);
961 rtclock
.timebase_const
.numer
= NSEC_PER_SEC
/20; // timeRDTSC is 1/20
962 rtclock
.timebase_const
.denom
= cycles
;
964 rtc_cyc_per_sec
= cycles
*20; // multiply it by 20 and we are done..
965 // BUT we also want to calculate...
967 cycles
= ((rtc_cyc_per_sec
+ (UI_CPUFREQ_ROUNDING_FACTOR
/2))
968 / UI_CPUFREQ_ROUNDING_FACTOR
)
969 * UI_CPUFREQ_ROUNDING_FACTOR
;
972 * Set current measured speed.
974 if (cycles
>= 0x100000000ULL
) {
975 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= 0xFFFFFFFFUL
;
977 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= (unsigned long)cycles
;
979 gPEClockFrequencyInfo
.cpu_frequency_hz
= cycles
;
981 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles
, rtc_cyc_per_sec
);
986 clock_get_system_microtime(
992 (void) sysclk_gettime_internal(&now
);
995 *microsecs
= now
.tv_nsec
/ NSEC_PER_USEC
;
999 clock_get_system_nanotime(
1003 mach_timespec_t now
;
1005 (void) sysclk_gettime_internal(&now
);
1008 *nanosecs
= now
.tv_nsec
;
1012 * Get clock device attributes.
1016 clock_flavor_t flavor
,
1017 clock_attr_t attr
, /* OUT */
1018 mach_msg_type_number_t
*count
) /* IN/OUT */
1021 return (KERN_FAILURE
);
1024 case CLOCK_GET_TIME_RES
: /* >0 res */
1025 *(clock_res_t
*) attr
= rtc_intr_nsec
;
1028 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
1029 case CLOCK_ALARM_MAXRES
:
1030 case CLOCK_ALARM_MINRES
:
1031 *(clock_res_t
*) attr
= 0;
1035 return (KERN_INVALID_VALUE
);
1037 return (KERN_SUCCESS
);
1041 * Set next alarm time for the clock device. This call
1042 * always resets the time to deliver an alarm for the
1047 mach_timespec_t
*alarm_time
)
1049 timer_call_enter(&rtclock_alarm_timer
,
1050 (uint64_t) alarm_time
->tv_sec
* NSEC_PER_SEC
1051 + alarm_time
->tv_nsec
);
1055 * Configure the calendar clock.
1060 return bbc_config();
1064 * Initialize calendar clock.
1073 * Get the current clock time.
1077 mach_timespec_t
*cur_time
) /* OUT */
1082 if (!rtclock
.calend_is_set
) {
1084 return (KERN_FAILURE
);
1087 (void) sysclk_gettime_internal(cur_time
);
1088 ADD_MACH_TIMESPEC(cur_time
, &rtclock
.calend_offset
);
1091 return (KERN_SUCCESS
);
1095 clock_get_calendar_microtime(
1097 uint32_t *microsecs
)
1099 mach_timespec_t now
;
1101 calend_gettime(&now
);
1104 *microsecs
= now
.tv_nsec
/ NSEC_PER_USEC
;
1108 clock_get_calendar_nanotime(
1112 mach_timespec_t now
;
1114 calend_gettime(&now
);
1117 *nanosecs
= now
.tv_nsec
;
1121 clock_set_calendar_microtime(
1125 mach_timespec_t new_time
, curr_time
;
1126 uint32_t old_offset
;
1129 new_time
.tv_sec
= secs
;
1130 new_time
.tv_nsec
= microsecs
* NSEC_PER_USEC
;
1133 old_offset
= rtclock
.calend_offset
.tv_sec
;
1134 (void) sysclk_gettime_internal(&curr_time
);
1135 rtclock
.calend_offset
= new_time
;
1136 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
1137 rtclock
.boottime
+= rtclock
.calend_offset
.tv_sec
- old_offset
;
1138 rtclock
.calend_is_set
= TRUE
;
1141 (void) bbc_settime(&new_time
);
1143 host_notify_calendar_change();
1147 * Get clock device attributes.
1151 clock_flavor_t flavor
,
1152 clock_attr_t attr
, /* OUT */
1153 mach_msg_type_number_t
*count
) /* IN/OUT */
1156 return (KERN_FAILURE
);
1159 case CLOCK_GET_TIME_RES
: /* >0 res */
1160 *(clock_res_t
*) attr
= rtc_intr_nsec
;
1163 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
1164 case CLOCK_ALARM_MINRES
:
1165 case CLOCK_ALARM_MAXRES
:
1166 *(clock_res_t
*) attr
= 0;
1170 return (KERN_INVALID_VALUE
);
1172 return (KERN_SUCCESS
);
1175 #define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */
1176 #define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */
1179 clock_set_calendar_adjtime(
1183 int64_t total
, ototal
;
1184 uint32_t interval
= 0;
1187 total
= (int64_t)*secs
* NSEC_PER_SEC
+ *microsecs
* NSEC_PER_USEC
;
1190 ototal
= rtclock
.calend_adjtotal
;
1193 int32_t delta
= tickadj
;
1202 if (total
< -bigadj
)
1209 rtclock
.calend_adjtotal
= total
;
1210 rtclock
.calend_adjdelta
= delta
;
1212 interval
= NSEC_PER_HZ
;
1215 rtclock
.calend_adjdelta
= rtclock
.calend_adjtotal
= 0;
1220 *secs
= *microsecs
= 0;
1222 *secs
= ototal
/ NSEC_PER_SEC
;
1223 *microsecs
= ototal
% NSEC_PER_SEC
;
1230 clock_adjust_calendar(void)
1232 uint32_t interval
= 0;
1237 delta
= rtclock
.calend_adjdelta
;
1238 ADD_MACH_TIMESPEC_NSEC(&rtclock
.calend_offset
, delta
);
1240 rtclock
.calend_adjtotal
-= delta
;
1243 if (delta
> rtclock
.calend_adjtotal
)
1244 rtclock
.calend_adjdelta
= rtclock
.calend_adjtotal
;
1248 if (delta
< rtclock
.calend_adjtotal
)
1249 rtclock
.calend_adjdelta
= rtclock
.calend_adjtotal
;
1252 if (rtclock
.calend_adjdelta
!= 0)
1253 interval
= NSEC_PER_HZ
;
1261 clock_initialize_calendar(void)
1263 mach_timespec_t bbc_time
, curr_time
;
1266 if (bbc_gettime(&bbc_time
) != KERN_SUCCESS
)
1270 if (rtclock
.boottime
== 0)
1271 rtclock
.boottime
= bbc_time
.tv_sec
;
1272 (void) sysclk_gettime_internal(&curr_time
);
1273 rtclock
.calend_offset
= bbc_time
;
1274 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
1275 rtclock
.calend_is_set
= TRUE
;
1278 host_notify_calendar_change();
1282 clock_get_boottime_nanotime(
1286 *secs
= rtclock
.boottime
;
1291 clock_timebase_info(
1292 mach_timebase_info_t info
)
1294 info
->numer
= info
->denom
= 1;
1298 clock_set_timer_deadline(
1302 cpu_data_t
*pp
= current_cpu_datap();
1303 rtclock_timer_t
*mytimer
= &pp
->cpu_rtc_timer
;
1307 assert(get_preemption_level() > 0);
1308 assert(rtclock_timer_expire
);
1311 mytimer
->deadline
= deadline
;
1312 mytimer
->is_set
= TRUE
;
1313 if (!mytimer
->has_expired
) {
1314 abstime
= mach_absolute_time();
1315 if (mytimer
->deadline
< pp
->cpu_rtc_tick_deadline
) {
1316 decr
= deadline_to_decrementer(mytimer
->deadline
,
1318 rtc_lapic_set_timer(decr
);
1319 pp
->cpu_rtc_intr_deadline
= mytimer
->deadline
;
1320 KERNEL_DEBUG_CONSTANT(
1321 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1) |
1322 DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
1329 clock_set_timer_func(
1330 clock_timer_func_t func
)
1332 if (rtclock_timer_expire
== NULL
)
1333 rtclock_timer_expire
= func
;
1337 * Real-time clock device interrupt.
1340 rtclock_intr(struct i386_interrupt_state
*regs
)
1346 uint64_t decr_timer
;
1347 cpu_data_t
*pp
= current_cpu_datap();
1348 rtclock_timer_t
*mytimer
= &pp
->cpu_rtc_timer
;
1350 assert(get_preemption_level() > 0);
1351 assert(!ml_get_interrupts_enabled());
1353 abstime
= _rtc_nanotime_read();
1354 latency
= (uint32_t) abstime
- pp
->cpu_rtc_intr_deadline
;
1355 if (pp
->cpu_rtc_tick_deadline
<= abstime
) {
1356 rtc_nanotime_update();
1357 clock_deadline_for_periodic_event(
1358 NSEC_PER_HZ
, abstime
, &pp
->cpu_rtc_tick_deadline
);
1363 (regs
->efl
& EFL_VM
) || ((regs
->cs
& 0x03) != 0),
1367 abstime
= _rtc_nanotime_read();
1368 if (mytimer
->is_set
&& mytimer
->deadline
<= abstime
) {
1369 mytimer
->has_expired
= TRUE
;
1370 mytimer
->is_set
= FALSE
;
1371 (*rtclock_timer_expire
)(abstime
);
1372 assert(!ml_get_interrupts_enabled());
1373 mytimer
->has_expired
= FALSE
;
1376 /* Log the interrupt service latency (-ve value expected by tool) */
1377 KERNEL_DEBUG_CONSTANT(
1378 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 0) | DBG_FUNC_NONE
,
1379 -latency
, (uint32_t)regs
->eip
, 0, 0, 0);
1381 abstime
= _rtc_nanotime_read();
1382 decr_tick
= deadline_to_decrementer(pp
->cpu_rtc_tick_deadline
, abstime
);
1383 decr_timer
= (mytimer
->is_set
) ?
1384 deadline_to_decrementer(mytimer
->deadline
, abstime
) :
1386 decr
= MIN(decr_tick
, decr_timer
);
1387 pp
->cpu_rtc_intr_deadline
= abstime
+ decr
;
1389 rtc_lapic_set_timer(decr
);
1391 /* Log the new decrementer value */
1392 KERNEL_DEBUG_CONSTANT(
1393 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1) | DBG_FUNC_NONE
,
1399 rtclock_alarm_expire(
1400 __unused timer_call_param_t p0
,
1401 __unused timer_call_param_t p1
)
1403 mach_timespec_t clock_time
;
1405 (void) sysclk_gettime_internal(&clock_time
);
1407 clock_alarm_intr(SYSTEM_CLOCK
, &clock_time
);
1414 *result
= rtc_nanotime_read();
1418 mach_absolute_time(void)
1420 return rtc_nanotime_read();
1424 absolutetime_to_microtime(
1427 uint32_t *microsecs
)
1433 : "=a" (*secs
), "=d" (remain
)
1434 : "A" (abstime
), "r" (NSEC_PER_SEC
));
1438 : "0" (remain
), "d" (0), "r" (NSEC_PER_USEC
));
1442 clock_interval_to_deadline(
1444 uint32_t scale_factor
,
1449 clock_get_uptime(result
);
1451 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1457 clock_interval_to_absolutetime_interval(
1459 uint32_t scale_factor
,
1462 *result
= (uint64_t)interval
* scale_factor
;
1466 clock_absolutetime_interval_to_deadline(
1470 clock_get_uptime(result
);
1476 absolutetime_to_nanoseconds(
1484 nanoseconds_to_absolutetime(
1485 uint64_t nanoseconds
,
1488 *result
= nanoseconds
;
1492 machine_delay_until(
1499 now
= mach_absolute_time();
1500 } while (now
< deadline
);