2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
35 * File: i386/rtclock.c
36 * Purpose: Routines for handling the machine dependent
37 * real-time clock. Historically, this clock is
38 * generated by the Intel 8254 Programmable Interval
39 * Timer, but local apic timers are now used for
40 * this purpose with the master time reference being
41 * the cpu clock counted by the timestamp MSR.
44 #include <platforms.h>
47 #include <mach/mach_types.h>
49 #include <kern/cpu_data.h>
50 #include <kern/cpu_number.h>
51 #include <kern/clock.h>
52 #include <kern/host_notify.h>
53 #include <kern/macro_help.h>
54 #include <kern/misc_protos.h>
56 #include <kern/assert.h>
57 #include <mach/vm_prot.h>
59 #include <vm/vm_kern.h> /* for kernel_map */
63 #include <i386/misc_protos.h>
64 #include <i386/proc_reg.h>
65 #include <i386/machine_cpu.h>
67 #include <i386/cpuid.h>
68 #include <i386/cpu_data.h>
69 #include <i386/cpu_threads.h>
70 #include <i386/perfmon.h>
71 #include <i386/machine_routines.h>
72 #include <i386/AT386/bbclock_entries.h>
73 #include <pexpert/pexpert.h>
74 #include <machine/limits.h>
75 #include <machine/commpage.h>
76 #include <sys/kdebug.h>
78 #define MAX(a,b) (((a)>(b))?(a):(b))
79 #define MIN(a,b) (((a)>(b))?(b):(a))
81 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
83 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
85 int sysclk_config(void);
87 int sysclk_init(void);
89 kern_return_t
sysclk_gettime(
90 mach_timespec_t
*cur_time
);
92 kern_return_t
sysclk_getattr(
93 clock_flavor_t flavor
,
95 mach_msg_type_number_t
*count
);
98 mach_timespec_t
*alarm_time
);
101 * Lists of clock routines.
103 struct clock_ops sysclk_ops
= {
104 sysclk_config
, sysclk_init
,
110 int calend_config(void);
112 int calend_init(void);
114 kern_return_t
calend_gettime(
115 mach_timespec_t
*cur_time
);
117 kern_return_t
calend_getattr(
118 clock_flavor_t flavor
,
120 mach_msg_type_number_t
*count
);
122 struct clock_ops calend_ops
= {
123 calend_config
, calend_init
,
129 /* local data declarations */
131 static clock_timer_func_t rtclock_timer_expire
;
133 static timer_call_data_t rtclock_alarm_timer
;
135 static void rtclock_alarm_expire(
136 timer_call_param_t p0
,
137 timer_call_param_t p1
);
140 mach_timespec_t calend_offset
;
141 boolean_t calend_is_set
;
143 int64_t calend_adjtotal
;
144 int32_t calend_adjdelta
;
148 mach_timebase_info_data_t timebase_const
;
150 decl_simple_lock_data(,lock
) /* real-time clock device lock */
153 boolean_t rtc_initialized
= FALSE
;
154 clock_res_t rtc_intr_nsec
= NSEC_PER_HZ
; /* interrupt res */
155 uint64_t rtc_cycle_count
; /* clocks in 1/20th second */
156 uint64_t rtc_cyc_per_sec
; /* processor cycles per sec */
157 uint32_t rtc_boot_frequency
; /* provided by 1st speed-step */
158 uint32_t rtc_quant_scale
; /* clock to nanos multiplier */
159 uint32_t rtc_quant_shift
; /* clock to nanos right shift */
160 uint64_t rtc_decrementer_min
;
162 static mach_timebase_info_data_t rtc_lapic_scale
; /* nsec to lapic count */
165 * Macros to lock/unlock real-time clock data.
167 #define RTC_INTRS_OFF(s) \
170 #define RTC_INTRS_ON(s) \
173 #define RTC_LOCK(s) \
176 simple_lock(&rtclock.lock); \
179 #define RTC_UNLOCK(s) \
181 simple_unlock(&rtclock.lock); \
186 * i8254 control. ** MONUMENT **
188 * The i8254 is a traditional PC device with some arbitrary characteristics.
189 * Basically, it is a register that counts at a fixed rate and can be
190 * programmed to generate an interrupt every N counts. The count rate is
191 * clknum counts per sec (see pit.h), historically 1193167=14.318MHz/12
192 * but the more accurate value is 1193182=14.31818MHz/12. [14.31818 MHz being
193 * the master crystal oscillator reference frequency since the very first PC.]
194 * Various constants are computed based on this value, and we calculate
195 * them at init time for execution efficiency. To obtain sufficient
196 * accuracy, some of the calculation are most easily done in floating
197 * point and then converted to int.
205 static uint64_t rtc_set_cyc_per_sec(uint64_t cycles
);
206 uint64_t rtc_nanotime_read(void);
209 * create_mul_quant_GHZ
210 * create a constant used to multiply the TSC by to convert to nanoseconds.
211 * This is a 32 bit number and the TSC *MUST* have a frequency higher than
212 * 1000Mhz for this routine to work.
214 * The theory here is that we know how many TSCs-per-sec the processor runs at.
215 * Normally to convert this to nanoseconds you would multiply the current
216 * timestamp by 1000000000 (a billion) then divide by TSCs-per-sec.
217 * Unfortunatly the TSC is 64 bits which would leave us with 96 bit intermediate
218 * results from the multiply that must be divided by.
220 * uint96 = tsc * numer
221 * nanos = uint96 / denom
222 * Instead, we create this quant constant and it becomes the numerator,
223 * the denominator can then be 0x100000000 which makes our division as simple as
224 * forgetting the lower 32 bits of the result. We can also pass this number to
225 * user space as the numer and pass 0xFFFFFFFF (RTC_FAST_DENOM) as the denom to
226 * convert raw counts * to nanos. The difference is so small as to be
227 * undetectable by anything.
229 * Unfortunatly we can not do this for sub GHZ processors. In this case, all
230 * we do is pass the CPU speed in raw as the denom and we pass in 1000000000
231 * as the numerator. No short cuts allowed
233 #define RTC_FAST_DENOM 0xFFFFFFFF
234 inline static uint32_t
235 create_mul_quant_GHZ(int shift
, uint32_t quant
)
237 return (uint32_t)((((uint64_t)NSEC_PER_SEC
/20) << shift
) / quant
);
240 * This routine takes a value of raw TSC ticks and applies the passed mul_quant
241 * generated by create_mul_quant() This is our internal routine for creating
243 * Since we don't really have uint96_t this routine basically does this....
244 * uint96_t intermediate = (*value) * scale
245 * return (intermediate >> 32)
247 inline static uint64_t
248 fast_get_nano_from_abs(uint64_t value
, int scale
)
250 asm (" movl %%edx,%%esi \n\t"
252 " movl %%edx,%%edi \n\t"
253 " movl %%esi,%%eax \n\t"
255 " xorl %%ecx,%%ecx \n\t"
256 " addl %%edi,%%eax \n\t"
265 * This routine basically does this...
266 * ts.tv_sec = nanos / 1000000000; create seconds
267 * ts.tv_nsec = nanos % 1000000000; create remainder nanos
269 inline static mach_timespec_t
270 nanos_to_timespec(uint64_t nanos
)
277 asm volatile("divl %1" : "+A" (ret
.u64
) : "r" (NSEC_PER_SEC
));
282 * The following two routines perform the 96 bit arithmetic we need to
283 * convert generic absolute<->nanoseconds
284 * The multiply routine takes a uint64_t and a uint32_t and returns the result
285 * in a uint32_t[3] array.
286 * The divide routine takes this uint32_t[3] array and divides it by a uint32_t
287 * returning a uint64_t
290 longmul(uint64_t *abstime
, uint32_t multiplicand
, uint32_t *result
)
294 " movl %%eax,%%ebx \n\t"
295 " movl (%%eax),%%eax \n\t"
297 " xchg %%eax,%%ebx \n\t"
299 " movl 4(%%eax),%%eax \n\t"
301 " movl %2,%%ecx \n\t"
302 " movl %%ebx,(%%ecx) \n\t"
304 " addl %%ebx,%%eax \n\t"
306 " movl %%eax,4(%%ecx) \n\t"
307 " adcl $0,%%edx \n\t"
308 " movl %%edx,8(%%ecx) // and save it"
309 : : "a"(abstime
), "c"(multiplicand
), "m"(result
));
313 inline static uint64_t
314 longdiv(uint32_t *numer
, uint32_t denom
)
319 " movl %%eax,%%ebx \n\t"
320 " movl 8(%%eax),%%edx \n\t"
321 " movl 4(%%eax),%%eax \n\t"
323 " xchg %%ebx,%%eax \n\t"
324 " movl (%%eax),%%eax \n\t"
326 " xchg %%ebx,%%edx \n\t"
328 : "=A"(result
) : "a"(numer
),"c"(denom
));
333 * Enable or disable timer 2.
334 * Port 0x61 controls timer 2:
335 * bit 0 gates the clock,
336 * bit 1 gates output to speaker.
342 " inb $0x61,%%al \n\t"
343 " and $0xFC,%%al \n\t"
345 " outb %%al,$0x61 \n\t"
353 " inb $0x61,%%al \n\t"
354 " and $0xFC,%%al \n\t"
355 " outb %%al,$0x61 \n\t"
363 * First, tell the clock we are going to write 16 bits to the counter
364 * and enable one-shot mode (command 0xB8 to port 0x43)
365 * Then write the two bytes into the PIT2 clock register (port 0x42).
366 * Loop until the value is "realized" in the clock,
367 * this happens on the next tick.
370 " movb $0xB8,%%al \n\t"
371 " outb %%al,$0x43 \n\t"
372 " movb %%dl,%%al \n\t"
373 " outb %%al,$0x42 \n\t"
374 " movb %%dh,%%al \n\t"
375 " outb %%al,$0x42 \n"
376 "1: inb $0x42,%%al \n\t"
377 " inb $0x42,%%al \n\t"
378 " cmp %%al,%%dh \n\t"
380 : : "d"(value
) : "%al");
383 inline static uint64_t
384 get_PIT2(unsigned int *value
)
386 register uint64_t result
;
388 * This routine first latches the time (command 0x80 to port 0x43),
389 * then gets the time stamp so we know how long the read will take later.
390 * Read (from port 0x42) and return the current value of the timer.
393 " xorl %%ecx,%%ecx \n\t"
394 " movb $0x80,%%al \n\t"
395 " outb %%al,$0x43 \n\t"
398 " inb $0x42,%%al \n\t"
399 " movb %%al,%%cl \n\t"
400 " inb $0x42,%%al \n\t"
401 " movb %%al,%%ch \n\t"
403 : "=A"(result
), "=c"(*value
));
409 * This routine sets up PIT counter 2 to count down 1/20 of a second.
410 * It pauses until the value is latched in the counter
411 * and then reads the time stamp counter to return to the caller.
418 uint64_t saveTime
,intermediate
;
419 unsigned int timerValue
, lastValue
;
420 boolean_t int_enabled
;
422 * Table of correction factors to account for
423 * - timer counter quantization errors, and
426 #define SAMPLE_CLKS_EXACT (((double) CLKNUM) / 20.0)
427 #define SAMPLE_CLKS_INT ((int) CLKNUM / 20)
428 #define SAMPLE_NSECS (2000000000LL)
429 #define SAMPLE_MULTIPLIER (((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
430 #define ROUND64(x) ((uint64_t)((x) + 0.5))
431 uint64_t scale
[6] = {
432 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-0)),
433 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-1)),
434 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-2)),
435 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-3)),
436 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-4)),
437 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-5))
440 int_enabled
= ml_set_interrupts_enabled(FALSE
);
444 panic("timeRDTSC() calibation failed with %d attempts\n", attempts
);
446 enable_PIT2(); // turn on PIT2
447 set_PIT2(0); // reset timer 2 to be zero
448 latchTime
= rdtsc64(); // get the time stamp to time
449 latchTime
= get_PIT2(&timerValue
) - latchTime
; // time how long this takes
450 set_PIT2(SAMPLE_CLKS_INT
); // set up the timer for (almost) 1/20th a second
451 saveTime
= rdtsc64(); // now time how long a 20th a second is...
452 get_PIT2(&lastValue
);
453 get_PIT2(&lastValue
); // read twice, first value may be unreliable
455 intermediate
= get_PIT2(&timerValue
);
456 if (timerValue
> lastValue
) {
457 printf("Hey we are going backwards! %u -> %u, restarting timing\n",
458 timerValue
,lastValue
);
463 lastValue
= timerValue
;
464 } while (timerValue
> 5);
465 kprintf("timerValue %d\n",timerValue
);
466 kprintf("intermediate 0x%016llx\n",intermediate
);
467 kprintf("saveTime 0x%016llx\n",saveTime
);
469 intermediate
-= saveTime
; // raw count for about 1/20 second
470 intermediate
*= scale
[timerValue
]; // rescale measured time spent
471 intermediate
/= SAMPLE_NSECS
; // so its exactly 1/20 a second
472 intermediate
+= latchTime
; // add on our save fudge
474 set_PIT2(0); // reset timer 2 to be zero
475 disable_PIT2(); // turn off PIT 2
477 ml_set_interrupts_enabled(int_enabled
);
482 tsc_to_nanoseconds(uint64_t abstime
)
486 uint32_t intermediate
[3];
488 numer
= rtclock
.timebase_const
.numer
;
489 denom
= rtclock
.timebase_const
.denom
;
490 if (denom
== RTC_FAST_DENOM
) {
491 abstime
= fast_get_nano_from_abs(abstime
, numer
);
493 longmul(&abstime
, numer
, intermediate
);
494 abstime
= longdiv(intermediate
, denom
);
499 inline static mach_timespec_t
500 tsc_to_timespec(void)
503 currNanos
= rtc_nanotime_read();
504 return nanos_to_timespec(currNanos
);
507 #define DECREMENTER_MAX UINT_MAX
509 deadline_to_decrementer(
516 return rtc_decrementer_min
;
518 delta
= deadline
- now
;
519 return MIN(MAX(rtc_decrementer_min
,delta
),DECREMENTER_MAX
);
523 static inline uint64_t
524 lapic_time_countdown(uint32_t initial_count
)
529 lapic_timer_count_t count
;
531 state
= ml_set_interrupts_enabled(FALSE
);
532 lapic_set_timer(FALSE
, one_shot
, divide_by_1
, initial_count
);
533 start_time
= rdtsc64();
535 lapic_get_timer(NULL
, NULL
, NULL
, &count
);
537 stop_time
= rdtsc64();
538 ml_set_interrupts_enabled(state
);
540 return tsc_to_nanoseconds(stop_time
- start_time
);
544 rtc_lapic_timer_calibrate(void)
549 if (!(cpuid_features() & CPUID_FEATURE_APIC
))
553 * Set the local apic timer counting down to zero without an interrupt.
554 * Use the timestamp to calculate how long this takes.
556 nsecs
= (uint32_t) lapic_time_countdown(rtc_intr_nsec
);
559 * Compute a countdown ratio for a given time in nanoseconds.
560 * That is, countdown = time * numer / denom.
562 countdown
= (uint64_t)rtc_intr_nsec
* (uint64_t)rtc_intr_nsec
/ nsecs
;
564 nsecs
= (uint32_t) lapic_time_countdown((uint32_t) countdown
);
566 rtc_lapic_scale
.numer
= countdown
;
567 rtc_lapic_scale
.denom
= nsecs
;
569 kprintf("rtc_lapic_timer_calibrate() scale: %d/%d\n",
570 (uint32_t) countdown
, nsecs
);
579 assert(rtc_lapic_scale
.denom
);
581 count
= interval
* (uint64_t) rtc_lapic_scale
.numer
;
582 count
/= rtc_lapic_scale
.denom
;
584 lapic_set_timer(TRUE
, one_shot
, divide_by_1
, (uint32_t) count
);
588 rtc_lapic_start_ticking(void)
594 abstime
= mach_absolute_time();
595 first_tick
= abstime
+ NSEC_PER_HZ
;
596 current_cpu_datap()->cpu_rtc_tick_deadline
= first_tick
;
597 decr
= deadline_to_decrementer(first_tick
, abstime
);
598 rtc_lapic_set_timer(decr
);
602 * Configure the real-time clock device. Return success (1)
610 mp_disable_preemption();
611 if (cpu_number() != master_cpu
) {
612 mp_enable_preemption();
615 mp_enable_preemption();
617 timer_call_setup(&rtclock_alarm_timer
, rtclock_alarm_expire
, NULL
);
619 simple_lock_init(&rtclock
.lock
, 0);
626 * Nanotime/mach_absolutime_time
627 * -----------------------------
628 * The timestamp counter (tsc) - which counts cpu clock cycles and can be read
629 * efficient by the kernel and in userspace - is the reference for all timing.
630 * However, the cpu clock rate is not only platform-dependent but can change
631 * (speed-step) dynamically. Hence tsc is converted into nanoseconds which is
632 * identical to mach_absolute_time. The conversion to tsc to nanoseconds is
633 * encapsulated by nanotime.
635 * The kernel maintains nanotime information recording:
636 * - the current ratio of tsc to nanoseconds
637 * with this ratio expressed as a 32-bit scale and shift
638 * (power of 2 divider);
639 * - the tsc (step_tsc) and nanotime (step_ns) at which the current
640 * ratio (clock speed) began.
641 * So a tsc value can be converted to nanotime by:
643 * nanotime = (((tsc - step_tsc)*scale) >> shift) + step_ns
645 * In general, (tsc - step_tsc) is a 64-bit quantity with the scaling
646 * involving a 96-bit intermediate value. However, by saving the converted
647 * values at each tick (or at any intervening speed-step) - base_tsc and
648 * base_ns - we can perform conversions relative to these and be assured that
649 * (tsc - tick_tsc) is 32-bits. Hence:
651 * fast_nanotime = (((tsc - base_tsc)*scale) >> shift) + base_ns
653 * The tuple {base_tsc, base_ns, scale, shift} is exported in the commpage
654 * for the userspace nanotime routine to read. A duplicate check_tsc is
655 * appended so that the consistency of the read can be verified. Note that
656 * this scheme is essential for MP systems in which the commpage is updated
657 * by the master cpu but may be read concurrently by other cpus.
661 rtc_nanotime_set_commpage(rtc_nanotime_t
*rntp
)
663 commpage_nanotime_t cp_nanotime
;
665 /* Only the master cpu updates the commpage */
666 if (cpu_number() != master_cpu
)
669 cp_nanotime
.nt_base_tsc
= rntp
->rnt_tsc
;
670 cp_nanotime
.nt_base_ns
= rntp
->rnt_nanos
;
671 cp_nanotime
.nt_scale
= rntp
->rnt_scale
;
672 cp_nanotime
.nt_shift
= rntp
->rnt_shift
;
674 commpage_set_nanotime(&cp_nanotime
);
678 rtc_nanotime_init(void)
680 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
681 rtc_nanotime_t
*master_rntp
= &cpu_datap(master_cpu
)->cpu_rtc_nanotime
;
683 if (cpu_number() == master_cpu
) {
684 rntp
->rnt_tsc
= rdtsc64();
685 rntp
->rnt_nanos
= tsc_to_nanoseconds(rntp
->rnt_tsc
);
686 rntp
->rnt_scale
= rtc_quant_scale
;
687 rntp
->rnt_shift
= rtc_quant_shift
;
688 rntp
->rnt_step_tsc
= 0ULL;
689 rntp
->rnt_step_nanos
= 0ULL;
692 * Copy master processor's nanotime info.
693 * Loop required in case this changes while copying.
696 *rntp
= *master_rntp
;
697 } while (rntp
->rnt_tsc
!= master_rntp
->rnt_tsc
);
702 _rtc_nanotime_update(rtc_nanotime_t
*rntp
, uint64_t tsc
)
707 tsc_delta
= tsc
- rntp
->rnt_step_tsc
;
708 ns_delta
= tsc_to_nanoseconds(tsc_delta
);
709 rntp
->rnt_nanos
= rntp
->rnt_step_nanos
+ ns_delta
;
714 rtc_nanotime_update(void)
716 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
718 assert(get_preemption_level() > 0);
719 assert(!ml_get_interrupts_enabled());
721 _rtc_nanotime_update(rntp
, rdtsc64());
722 rtc_nanotime_set_commpage(rntp
);
726 rtc_nanotime_scale_update(void)
728 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
729 uint64_t tsc
= rdtsc64();
731 assert(!ml_get_interrupts_enabled());
734 * Update time based on past scale.
736 _rtc_nanotime_update(rntp
, tsc
);
739 * Update scale and timestamp this update.
741 rntp
->rnt_scale
= rtc_quant_scale
;
742 rntp
->rnt_shift
= rtc_quant_shift
;
743 rntp
->rnt_step_tsc
= rntp
->rnt_tsc
;
744 rntp
->rnt_step_nanos
= rntp
->rnt_nanos
;
746 /* Export update to userland */
747 rtc_nanotime_set_commpage(rntp
);
751 _rtc_nanotime_read(void)
753 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
761 rnt_scale
= rntp
->rnt_scale
;
765 rnt_shift
= rntp
->rnt_shift
;
766 rnt_nanos
= rntp
->rnt_nanos
;
767 rnt_tsc
= rntp
->rnt_tsc
;
770 tsc_delta
= tsc
- rnt_tsc
;
771 if ((tsc_delta
>> 32) != 0)
772 return rnt_nanos
+ tsc_to_nanoseconds(tsc_delta
);
774 /* Let the compiler optimize(?): */
776 return rnt_nanos
+ ((tsc_delta
* rnt_scale
) >> 32);
778 return rnt_nanos
+ ((tsc_delta
* rnt_scale
) >> rnt_shift
);
782 rtc_nanotime_read(void)
786 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
789 * Use timestamp to ensure the uptime record isn't changed.
790 * This avoids disabling interrupts.
791 * And not this is a per-cpu structure hence no locking.
794 rnt_tsc
= rntp
->rnt_tsc
;
795 result
= _rtc_nanotime_read();
796 } while (rnt_tsc
!= rntp
->rnt_tsc
);
803 * This function is called by the speed-step driver when a
804 * change of cpu clock frequency is about to occur.
805 * The scale is not changed until rtc_clock_stepped() is called.
806 * Between these times there is an uncertainty is exactly when
807 * the change takes effect. FIXME: by using another timing source
808 * we could eliminate this error.
811 rtc_clock_stepping(__unused
uint32_t new_frequency
,
812 __unused
uint32_t old_frequency
)
816 istate
= ml_set_interrupts_enabled(FALSE
);
817 rtc_nanotime_scale_update();
818 ml_set_interrupts_enabled(istate
);
822 * This function is called by the speed-step driver when a
823 * change of cpu clock frequency has just occured. This change
824 * is expressed as a ratio relative to the boot clock rate.
827 rtc_clock_stepped(uint32_t new_frequency
, uint32_t old_frequency
)
831 istate
= ml_set_interrupts_enabled(FALSE
);
832 if (rtc_boot_frequency
== 0) {
834 * At the first ever stepping, old frequency is the real
835 * initial clock rate. This step and all others are based
836 * relative to this initial frequency at which the tsc
837 * calibration was made. Hence we must remember this base
838 * frequency as reference.
840 rtc_boot_frequency
= old_frequency
;
842 rtc_set_cyc_per_sec(rtc_cycle_count
* new_frequency
/
844 rtc_nanotime_scale_update();
845 ml_set_interrupts_enabled(istate
);
849 * rtc_sleep_wakeup() is called from acpi on awakening from a S3 sleep
852 rtc_sleep_wakeup(void)
854 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
858 istate
= ml_set_interrupts_enabled(FALSE
);
862 * The timestamp counter will have been reset
863 * but nanotime (uptime) marches onward.
864 * We assume that we're still at the former cpu frequency.
866 rntp
->rnt_tsc
= rdtsc64();
867 rntp
->rnt_step_tsc
= 0ULL;
868 rntp
->rnt_step_nanos
= rntp
->rnt_nanos
;
869 rtc_nanotime_set_commpage(rntp
);
871 /* Restart tick interrupts from the LAPIC timer */
872 rtc_lapic_start_ticking();
874 ml_set_interrupts_enabled(istate
);
878 * Initialize the real-time clock device.
879 * In addition, various variables used to support the clock are initialized.
886 mp_disable_preemption();
887 if (cpu_number() == master_cpu
) {
889 * Perform calibration.
890 * The PIT is used as the reference to compute how many
891 * TCS counts (cpu clock cycles) occur per second.
893 rtc_cycle_count
= timeRDTSC();
894 cycles
= rtc_set_cyc_per_sec(rtc_cycle_count
);
897 * Set min/max to actual.
898 * ACPI may update these later if speed-stepping is detected.
900 gPEClockFrequencyInfo
.cpu_frequency_min_hz
= cycles
;
901 gPEClockFrequencyInfo
.cpu_frequency_max_hz
= cycles
;
902 printf("[RTCLOCK] frequency %llu (%llu)\n",
903 cycles
, rtc_cyc_per_sec
);
905 rtc_lapic_timer_calibrate();
907 /* Minimum interval is 1usec */
908 rtc_decrementer_min
= deadline_to_decrementer(NSEC_PER_USEC
,
910 /* Point LAPIC interrupts to hardclock() */
911 lapic_set_timer_func((i386_intr_func_t
) rtclock_intr
);
913 clock_timebase_init();
914 rtc_initialized
= TRUE
;
919 rtc_lapic_start_ticking();
921 mp_enable_preemption();
927 * Get the clock device time. This routine is responsible
928 * for converting the device's machine dependent time value
929 * into a canonical mach_timespec_t value.
932 sysclk_gettime_internal(
933 mach_timespec_t
*cur_time
) /* OUT */
935 *cur_time
= tsc_to_timespec();
936 return (KERN_SUCCESS
);
941 mach_timespec_t
*cur_time
) /* OUT */
943 return sysclk_gettime_internal(cur_time
);
947 sysclk_gettime_interrupts_disabled(
948 mach_timespec_t
*cur_time
) /* OUT */
950 (void) sysclk_gettime_internal(cur_time
);
954 // Code to calculate how many processor cycles are in a second...
957 rtc_set_cyc_per_sec(uint64_t cycles
)
960 if (cycles
> (NSEC_PER_SEC
/20)) {
961 // we can use just a "fast" multiply to get nanos
962 rtc_quant_shift
= 32;
963 rtc_quant_scale
= create_mul_quant_GHZ(rtc_quant_shift
, cycles
);
964 rtclock
.timebase_const
.numer
= rtc_quant_scale
; // timeRDTSC is 1/20
965 rtclock
.timebase_const
.denom
= RTC_FAST_DENOM
;
967 rtc_quant_shift
= 26;
968 rtc_quant_scale
= create_mul_quant_GHZ(rtc_quant_shift
, cycles
);
969 rtclock
.timebase_const
.numer
= NSEC_PER_SEC
/20; // timeRDTSC is 1/20
970 rtclock
.timebase_const
.denom
= cycles
;
972 rtc_cyc_per_sec
= cycles
*20; // multiply it by 20 and we are done..
973 // BUT we also want to calculate...
975 cycles
= ((rtc_cyc_per_sec
+ (UI_CPUFREQ_ROUNDING_FACTOR
/2))
976 / UI_CPUFREQ_ROUNDING_FACTOR
)
977 * UI_CPUFREQ_ROUNDING_FACTOR
;
980 * Set current measured speed.
982 if (cycles
>= 0x100000000ULL
) {
983 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= 0xFFFFFFFFUL
;
985 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= (unsigned long)cycles
;
987 gPEClockFrequencyInfo
.cpu_frequency_hz
= cycles
;
989 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles
, rtc_cyc_per_sec
);
994 clock_get_system_microtime(
1000 (void) sysclk_gettime_internal(&now
);
1003 *microsecs
= now
.tv_nsec
/ NSEC_PER_USEC
;
1007 clock_get_system_nanotime(
1011 mach_timespec_t now
;
1013 (void) sysclk_gettime_internal(&now
);
1016 *nanosecs
= now
.tv_nsec
;
1020 * Get clock device attributes.
1024 clock_flavor_t flavor
,
1025 clock_attr_t attr
, /* OUT */
1026 mach_msg_type_number_t
*count
) /* IN/OUT */
1029 return (KERN_FAILURE
);
1032 case CLOCK_GET_TIME_RES
: /* >0 res */
1033 *(clock_res_t
*) attr
= rtc_intr_nsec
;
1036 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
1037 case CLOCK_ALARM_MAXRES
:
1038 case CLOCK_ALARM_MINRES
:
1039 *(clock_res_t
*) attr
= 0;
1043 return (KERN_INVALID_VALUE
);
1045 return (KERN_SUCCESS
);
1049 * Set next alarm time for the clock device. This call
1050 * always resets the time to deliver an alarm for the
1055 mach_timespec_t
*alarm_time
)
1057 timer_call_enter(&rtclock_alarm_timer
,
1058 (uint64_t) alarm_time
->tv_sec
* NSEC_PER_SEC
1059 + alarm_time
->tv_nsec
);
1063 * Configure the calendar clock.
1068 return bbc_config();
1072 * Initialize calendar clock.
1081 * Get the current clock time.
1085 mach_timespec_t
*cur_time
) /* OUT */
1090 if (!rtclock
.calend_is_set
) {
1092 return (KERN_FAILURE
);
1095 (void) sysclk_gettime_internal(cur_time
);
1096 ADD_MACH_TIMESPEC(cur_time
, &rtclock
.calend_offset
);
1099 return (KERN_SUCCESS
);
1103 clock_get_calendar_microtime(
1105 uint32_t *microsecs
)
1107 mach_timespec_t now
;
1109 calend_gettime(&now
);
1112 *microsecs
= now
.tv_nsec
/ NSEC_PER_USEC
;
1116 clock_get_calendar_nanotime(
1120 mach_timespec_t now
;
1122 calend_gettime(&now
);
1125 *nanosecs
= now
.tv_nsec
;
1129 clock_set_calendar_microtime(
1133 mach_timespec_t new_time
, curr_time
;
1134 uint32_t old_offset
;
1137 new_time
.tv_sec
= secs
;
1138 new_time
.tv_nsec
= microsecs
* NSEC_PER_USEC
;
1141 old_offset
= rtclock
.calend_offset
.tv_sec
;
1142 (void) sysclk_gettime_internal(&curr_time
);
1143 rtclock
.calend_offset
= new_time
;
1144 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
1145 rtclock
.boottime
+= rtclock
.calend_offset
.tv_sec
- old_offset
;
1146 rtclock
.calend_is_set
= TRUE
;
1149 (void) bbc_settime(&new_time
);
1151 host_notify_calendar_change();
1155 * Get clock device attributes.
1159 clock_flavor_t flavor
,
1160 clock_attr_t attr
, /* OUT */
1161 mach_msg_type_number_t
*count
) /* IN/OUT */
1164 return (KERN_FAILURE
);
1167 case CLOCK_GET_TIME_RES
: /* >0 res */
1168 *(clock_res_t
*) attr
= rtc_intr_nsec
;
1171 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
1172 case CLOCK_ALARM_MINRES
:
1173 case CLOCK_ALARM_MAXRES
:
1174 *(clock_res_t
*) attr
= 0;
1178 return (KERN_INVALID_VALUE
);
1180 return (KERN_SUCCESS
);
1183 #define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */
1184 #define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */
1187 clock_set_calendar_adjtime(
1191 int64_t total
, ototal
;
1192 uint32_t interval
= 0;
1195 total
= (int64_t)*secs
* NSEC_PER_SEC
+ *microsecs
* NSEC_PER_USEC
;
1198 ototal
= rtclock
.calend_adjtotal
;
1201 int32_t delta
= tickadj
;
1210 if (total
< -bigadj
)
1217 rtclock
.calend_adjtotal
= total
;
1218 rtclock
.calend_adjdelta
= delta
;
1220 interval
= NSEC_PER_HZ
;
1223 rtclock
.calend_adjdelta
= rtclock
.calend_adjtotal
= 0;
1228 *secs
= *microsecs
= 0;
1230 *secs
= ototal
/ NSEC_PER_SEC
;
1231 *microsecs
= ototal
% NSEC_PER_SEC
;
1238 clock_adjust_calendar(void)
1240 uint32_t interval
= 0;
1245 delta
= rtclock
.calend_adjdelta
;
1246 ADD_MACH_TIMESPEC_NSEC(&rtclock
.calend_offset
, delta
);
1248 rtclock
.calend_adjtotal
-= delta
;
1251 if (delta
> rtclock
.calend_adjtotal
)
1252 rtclock
.calend_adjdelta
= rtclock
.calend_adjtotal
;
1256 if (delta
< rtclock
.calend_adjtotal
)
1257 rtclock
.calend_adjdelta
= rtclock
.calend_adjtotal
;
1260 if (rtclock
.calend_adjdelta
!= 0)
1261 interval
= NSEC_PER_HZ
;
1269 clock_initialize_calendar(void)
1271 mach_timespec_t bbc_time
, curr_time
;
1274 if (bbc_gettime(&bbc_time
) != KERN_SUCCESS
)
1278 if (rtclock
.boottime
== 0)
1279 rtclock
.boottime
= bbc_time
.tv_sec
;
1280 (void) sysclk_gettime_internal(&curr_time
);
1281 rtclock
.calend_offset
= bbc_time
;
1282 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
1283 rtclock
.calend_is_set
= TRUE
;
1286 host_notify_calendar_change();
1290 clock_get_boottime_nanotime(
1294 *secs
= rtclock
.boottime
;
1299 clock_timebase_info(
1300 mach_timebase_info_t info
)
1302 info
->numer
= info
->denom
= 1;
1306 clock_set_timer_deadline(
1310 cpu_data_t
*pp
= current_cpu_datap();
1311 rtclock_timer_t
*mytimer
= &pp
->cpu_rtc_timer
;
1315 assert(get_preemption_level() > 0);
1316 assert(rtclock_timer_expire
);
1319 mytimer
->deadline
= deadline
;
1320 mytimer
->is_set
= TRUE
;
1321 if (!mytimer
->has_expired
) {
1322 abstime
= mach_absolute_time();
1323 if (mytimer
->deadline
< pp
->cpu_rtc_tick_deadline
) {
1324 decr
= deadline_to_decrementer(mytimer
->deadline
,
1326 rtc_lapic_set_timer(decr
);
1327 pp
->cpu_rtc_intr_deadline
= mytimer
->deadline
;
1328 KERNEL_DEBUG_CONSTANT(
1329 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1) |
1330 DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
1337 clock_set_timer_func(
1338 clock_timer_func_t func
)
1340 if (rtclock_timer_expire
== NULL
)
1341 rtclock_timer_expire
= func
;
1345 * Real-time clock device interrupt.
1348 rtclock_intr(struct i386_interrupt_state
*regs
)
1354 uint64_t decr_timer
;
1355 cpu_data_t
*pp
= current_cpu_datap();
1356 rtclock_timer_t
*mytimer
= &pp
->cpu_rtc_timer
;
1358 assert(get_preemption_level() > 0);
1359 assert(!ml_get_interrupts_enabled());
1361 abstime
= _rtc_nanotime_read();
1362 latency
= (uint32_t) abstime
- pp
->cpu_rtc_intr_deadline
;
1363 if (pp
->cpu_rtc_tick_deadline
<= abstime
) {
1364 rtc_nanotime_update();
1365 clock_deadline_for_periodic_event(
1366 NSEC_PER_HZ
, abstime
, &pp
->cpu_rtc_tick_deadline
);
1371 (regs
->efl
& EFL_VM
) || ((regs
->cs
& 0x03) != 0),
1375 abstime
= _rtc_nanotime_read();
1376 if (mytimer
->is_set
&& mytimer
->deadline
<= abstime
) {
1377 mytimer
->has_expired
= TRUE
;
1378 mytimer
->is_set
= FALSE
;
1379 (*rtclock_timer_expire
)(abstime
);
1380 assert(!ml_get_interrupts_enabled());
1381 mytimer
->has_expired
= FALSE
;
1384 /* Log the interrupt service latency (-ve value expected by tool) */
1385 KERNEL_DEBUG_CONSTANT(
1386 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 0) | DBG_FUNC_NONE
,
1387 -latency
, (uint32_t)regs
->eip
, 0, 0, 0);
1389 abstime
= _rtc_nanotime_read();
1390 decr_tick
= deadline_to_decrementer(pp
->cpu_rtc_tick_deadline
, abstime
);
1391 decr_timer
= (mytimer
->is_set
) ?
1392 deadline_to_decrementer(mytimer
->deadline
, abstime
) :
1394 decr
= MIN(decr_tick
, decr_timer
);
1395 pp
->cpu_rtc_intr_deadline
= abstime
+ decr
;
1397 rtc_lapic_set_timer(decr
);
1399 /* Log the new decrementer value */
1400 KERNEL_DEBUG_CONSTANT(
1401 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1) | DBG_FUNC_NONE
,
1407 rtclock_alarm_expire(
1408 __unused timer_call_param_t p0
,
1409 __unused timer_call_param_t p1
)
1411 mach_timespec_t clock_time
;
1413 (void) sysclk_gettime_internal(&clock_time
);
1415 clock_alarm_intr(SYSTEM_CLOCK
, &clock_time
);
1422 *result
= rtc_nanotime_read();
1426 mach_absolute_time(void)
1428 return rtc_nanotime_read();
1432 absolutetime_to_microtime(
1435 uint32_t *microsecs
)
1441 : "=a" (*secs
), "=d" (remain
)
1442 : "A" (abstime
), "r" (NSEC_PER_SEC
));
1446 : "0" (remain
), "d" (0), "r" (NSEC_PER_USEC
));
1450 clock_interval_to_deadline(
1452 uint32_t scale_factor
,
1457 clock_get_uptime(result
);
1459 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1465 clock_interval_to_absolutetime_interval(
1467 uint32_t scale_factor
,
1470 *result
= (uint64_t)interval
* scale_factor
;
1474 clock_absolutetime_interval_to_deadline(
1478 clock_get_uptime(result
);
1484 absolutetime_to_nanoseconds(
1492 nanoseconds_to_absolutetime(
1493 uint64_t nanoseconds
,
1496 *result
= nanoseconds
;
1500 machine_delay_until(
1507 now
= mach_absolute_time();
1508 } while (now
< deadline
);