2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
42 #include <platforms.h>
45 #include <mach/mach_types.h>
47 #include <kern/cpu_data.h>
48 #include <kern/cpu_number.h>
49 #include <kern/clock.h>
50 #include <kern/host_notify.h>
51 #include <kern/macro_help.h>
52 #include <kern/misc_protos.h>
54 #include <kern/assert.h>
55 #include <mach/vm_prot.h>
57 #include <vm/vm_kern.h> /* for kernel_map */
61 #include <i386/misc_protos.h>
62 #include <i386/proc_reg.h>
63 #include <i386/machine_cpu.h>
65 #include <i386/cpuid.h>
66 #include <i386/cpu_data.h>
67 #include <i386/cpu_threads.h>
68 #include <i386/perfmon.h>
69 #include <i386/machine_routines.h>
70 #include <i386/AT386/bbclock_entries.h>
71 #include <pexpert/pexpert.h>
72 #include <machine/limits.h>
73 #include <machine/commpage.h>
74 #include <sys/kdebug.h>
76 #define MAX(a,b) (((a)>(b))?(a):(b))
77 #define MIN(a,b) (((a)>(b))?(b):(a))
79 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
81 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
83 int sysclk_config(void);
85 int sysclk_init(void);
87 kern_return_t
sysclk_gettime(
88 mach_timespec_t
*cur_time
);
90 kern_return_t
sysclk_getattr(
91 clock_flavor_t flavor
,
93 mach_msg_type_number_t
*count
);
96 mach_timespec_t
*alarm_time
);
99 * Lists of clock routines.
101 struct clock_ops sysclk_ops
= {
102 sysclk_config
, sysclk_init
,
108 int calend_config(void);
110 int calend_init(void);
112 kern_return_t
calend_gettime(
113 mach_timespec_t
*cur_time
);
115 kern_return_t
calend_getattr(
116 clock_flavor_t flavor
,
118 mach_msg_type_number_t
*count
);
120 struct clock_ops calend_ops
= {
121 calend_config
, calend_init
,
127 /* local data declarations */
129 static clock_timer_func_t rtclock_timer_expire
;
131 static timer_call_data_t rtclock_alarm_timer
;
133 static void rtclock_alarm_expire(
134 timer_call_param_t p0
,
135 timer_call_param_t p1
);
138 mach_timespec_t calend_offset
;
139 boolean_t calend_is_set
;
141 int64_t calend_adjtotal
;
142 int32_t calend_adjdelta
;
146 mach_timebase_info_data_t timebase_const
;
148 decl_simple_lock_data(,lock
) /* real-time clock device lock */
151 boolean_t rtc_initialized
= FALSE
;
152 clock_res_t rtc_intr_nsec
= NSEC_PER_HZ
; /* interrupt res */
153 uint64_t rtc_cycle_count
; /* clocks in 1/20th second */
154 uint64_t rtc_cyc_per_sec
; /* processor cycles per sec */
155 uint32_t rtc_boot_frequency
; /* provided by 1st speed-step */
156 uint32_t rtc_quant_scale
; /* clock to nanos multiplier */
157 uint32_t rtc_quant_shift
; /* clock to nanos right shift */
158 uint64_t rtc_decrementer_min
;
160 static mach_timebase_info_data_t rtc_lapic_scale
; /* nsec to lapic count */
163 * Macros to lock/unlock real-time clock data.
165 #define RTC_INTRS_OFF(s) \
168 #define RTC_INTRS_ON(s) \
171 #define RTC_LOCK(s) \
174 simple_lock(&rtclock.lock); \
177 #define RTC_UNLOCK(s) \
179 simple_unlock(&rtclock.lock); \
184 * i8254 control. ** MONUMENT **
186 * The i8254 is a traditional PC device with some arbitrary characteristics.
187 * Basically, it is a register that counts at a fixed rate and can be
188 * programmed to generate an interrupt every N counts. The count rate is
189 * clknum counts per sec (see pit.h), historically 1193167=14.318MHz/12
190 * but the more accurate value is 1193182=14.31818MHz/12. [14.31818 MHz being
191 * the master crystal oscillator reference frequency since the very first PC.]
192 * Various constants are computed based on this value, and we calculate
193 * them at init time for execution efficiency. To obtain sufficient
194 * accuracy, some of the calculation are most easily done in floating
195 * point and then converted to int.
203 static uint64_t rtc_set_cyc_per_sec(uint64_t cycles
);
204 uint64_t rtc_nanotime_read(void);
207 * create_mul_quant_GHZ
208 * create a constant used to multiply the TSC by to convert to nanoseconds.
209 * This is a 32 bit number and the TSC *MUST* have a frequency higher than
210 * 1000Mhz for this routine to work.
212 * The theory here is that we know how many TSCs-per-sec the processor runs at.
213 * Normally to convert this to nanoseconds you would multiply the current
214 * timestamp by 1000000000 (a billion) then divide by TSCs-per-sec.
215 * Unfortunatly the TSC is 64 bits which would leave us with 96 bit intermediate
216 * results from the multiply that must be divided by.
218 * uint96 = tsc * numer
219 * nanos = uint96 / denom
220 * Instead, we create this quant constant and it becomes the numerator,
221 * the denominator can then be 0x100000000 which makes our division as simple as
222 * forgetting the lower 32 bits of the result. We can also pass this number to
223 * user space as the numer and pass 0xFFFFFFFF (RTC_FAST_DENOM) as the denom to
224 * convert raw counts * to nanos. The difference is so small as to be
225 * undetectable by anything.
227 * Unfortunatly we can not do this for sub GHZ processors. In this case, all
228 * we do is pass the CPU speed in raw as the denom and we pass in 1000000000
229 * as the numerator. No short cuts allowed
231 #define RTC_FAST_DENOM 0xFFFFFFFF
232 inline static uint32_t
233 create_mul_quant_GHZ(int shift
, uint32_t quant
)
235 return (uint32_t)((((uint64_t)NSEC_PER_SEC
/20) << shift
) / quant
);
238 * This routine takes a value of raw TSC ticks and applies the passed mul_quant
239 * generated by create_mul_quant() This is our internal routine for creating
241 * Since we don't really have uint96_t this routine basically does this....
242 * uint96_t intermediate = (*value) * scale
243 * return (intermediate >> 32)
245 inline static uint64_t
246 fast_get_nano_from_abs(uint64_t value
, int scale
)
248 asm (" movl %%edx,%%esi \n\t"
250 " movl %%edx,%%edi \n\t"
251 " movl %%esi,%%eax \n\t"
253 " xorl %%ecx,%%ecx \n\t"
254 " addl %%edi,%%eax \n\t"
263 * This routine basically does this...
264 * ts.tv_sec = nanos / 1000000000; create seconds
265 * ts.tv_nsec = nanos % 1000000000; create remainder nanos
267 inline static mach_timespec_t
268 nanos_to_timespec(uint64_t nanos
)
275 asm volatile("divl %1" : "+A" (ret
.u64
) : "r" (NSEC_PER_SEC
));
280 * The following two routines perform the 96 bit arithmetic we need to
281 * convert generic absolute<->nanoseconds
282 * The multiply routine takes a uint64_t and a uint32_t and returns the result
283 * in a uint32_t[3] array.
284 * The divide routine takes this uint32_t[3] array and divides it by a uint32_t
285 * returning a uint64_t
288 longmul(uint64_t *abstime
, uint32_t multiplicand
, uint32_t *result
)
292 " movl %%eax,%%ebx \n\t"
293 " movl (%%eax),%%eax \n\t"
295 " xchg %%eax,%%ebx \n\t"
297 " movl 4(%%eax),%%eax \n\t"
299 " movl %2,%%ecx \n\t"
300 " movl %%ebx,(%%ecx) \n\t"
302 " addl %%ebx,%%eax \n\t"
304 " movl %%eax,4(%%ecx) \n\t"
305 " adcl $0,%%edx \n\t"
306 " movl %%edx,8(%%ecx) // and save it"
307 : : "a"(abstime
), "c"(multiplicand
), "m"(result
));
311 inline static uint64_t
312 longdiv(uint32_t *numer
, uint32_t denom
)
317 " movl %%eax,%%ebx \n\t"
318 " movl 8(%%eax),%%edx \n\t"
319 " movl 4(%%eax),%%eax \n\t"
321 " xchg %%ebx,%%eax \n\t"
322 " movl (%%eax),%%eax \n\t"
324 " xchg %%ebx,%%edx \n\t"
326 : "=A"(result
) : "a"(numer
),"c"(denom
));
331 * Enable or disable timer 2.
332 * Port 0x61 controls timer 2:
333 * bit 0 gates the clock,
334 * bit 1 gates output to speaker.
340 " inb $0x61,%%al \n\t"
341 " and $0xFC,%%al \n\t"
343 " outb %%al,$0x61 \n\t"
351 " inb $0x61,%%al \n\t"
352 " and $0xFC,%%al \n\t"
353 " outb %%al,$0x61 \n\t"
361 * First, tell the clock we are going to write 16 bits to the counter
362 * and enable one-shot mode (command 0xB8 to port 0x43)
363 * Then write the two bytes into the PIT2 clock register (port 0x42).
364 * Loop until the value is "realized" in the clock,
365 * this happens on the next tick.
368 " movb $0xB8,%%al \n\t"
369 " outb %%al,$0x43 \n\t"
370 " movb %%dl,%%al \n\t"
371 " outb %%al,$0x42 \n\t"
372 " movb %%dh,%%al \n\t"
373 " outb %%al,$0x42 \n"
374 "1: inb $0x42,%%al \n\t"
375 " inb $0x42,%%al \n\t"
376 " cmp %%al,%%dh \n\t"
378 : : "d"(value
) : "%al");
381 inline static uint64_t
382 get_PIT2(unsigned int *value
)
384 register uint64_t result
;
386 * This routine first latches the time (command 0x80 to port 0x43),
387 * then gets the time stamp so we know how long the read will take later.
388 * Read (from port 0x42) and return the current value of the timer.
391 " xorl %%ecx,%%ecx \n\t"
392 " movb $0x80,%%al \n\t"
393 " outb %%al,$0x43 \n\t"
396 " inb $0x42,%%al \n\t"
397 " movb %%al,%%cl \n\t"
398 " inb $0x42,%%al \n\t"
399 " movb %%al,%%ch \n\t"
401 : "=A"(result
), "=c"(*value
));
407 * This routine sets up PIT counter 2 to count down 1/20 of a second.
408 * It pauses until the value is latched in the counter
409 * and then reads the time stamp counter to return to the caller.
416 uint64_t saveTime
,intermediate
;
417 unsigned int timerValue
, lastValue
;
418 boolean_t int_enabled
;
420 * Table of correction factors to account for
421 * - timer counter quantization errors, and
424 #define SAMPLE_CLKS_EXACT (((double) CLKNUM) / 20.0)
425 #define SAMPLE_CLKS_INT ((int) CLKNUM / 20)
426 #define SAMPLE_NSECS (2000000000LL)
427 #define SAMPLE_MULTIPLIER (((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
428 #define ROUND64(x) ((uint64_t)((x) + 0.5))
429 uint64_t scale
[6] = {
430 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-0)),
431 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-1)),
432 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-2)),
433 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-3)),
434 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-4)),
435 ROUND64(SAMPLE_MULTIPLIER
/(double)(SAMPLE_CLKS_INT
-5))
438 int_enabled
= ml_set_interrupts_enabled(FALSE
);
442 panic("timeRDTSC() calibation failed with %d attempts\n", attempts
);
444 enable_PIT2(); // turn on PIT2
445 set_PIT2(0); // reset timer 2 to be zero
446 latchTime
= rdtsc64(); // get the time stamp to time
447 latchTime
= get_PIT2(&timerValue
) - latchTime
; // time how long this takes
448 set_PIT2(SAMPLE_CLKS_INT
); // set up the timer for (almost) 1/20th a second
449 saveTime
= rdtsc64(); // now time how long a 20th a second is...
450 get_PIT2(&lastValue
);
451 get_PIT2(&lastValue
); // read twice, first value may be unreliable
453 intermediate
= get_PIT2(&timerValue
);
454 if (timerValue
> lastValue
) {
455 printf("Hey we are going backwards! %u -> %u, restarting timing\n",
456 timerValue
,lastValue
);
461 lastValue
= timerValue
;
462 } while (timerValue
> 5);
463 kprintf("timerValue %d\n",timerValue
);
464 kprintf("intermediate 0x%016llx\n",intermediate
);
465 kprintf("saveTime 0x%016llx\n",saveTime
);
467 intermediate
-= saveTime
; // raw count for about 1/20 second
468 intermediate
*= scale
[timerValue
]; // rescale measured time spent
469 intermediate
/= SAMPLE_NSECS
; // so its exactly 1/20 a second
470 intermediate
+= latchTime
; // add on our save fudge
472 set_PIT2(0); // reset timer 2 to be zero
473 disable_PIT2(); // turn off PIT 2
475 ml_set_interrupts_enabled(int_enabled
);
480 tsc_to_nanoseconds(uint64_t abstime
)
484 uint32_t intermediate
[3];
486 numer
= rtclock
.timebase_const
.numer
;
487 denom
= rtclock
.timebase_const
.denom
;
488 if (denom
== RTC_FAST_DENOM
) {
489 abstime
= fast_get_nano_from_abs(abstime
, numer
);
491 longmul(&abstime
, numer
, intermediate
);
492 abstime
= longdiv(intermediate
, denom
);
497 inline static mach_timespec_t
498 tsc_to_timespec(void)
501 currNanos
= rtc_nanotime_read();
502 return nanos_to_timespec(currNanos
);
505 #define DECREMENTER_MAX UINT_MAX
507 deadline_to_decrementer(
514 return rtc_decrementer_min
;
516 delta
= deadline
- now
;
517 return MIN(MAX(rtc_decrementer_min
,delta
),DECREMENTER_MAX
);
521 static inline uint64_t
522 lapic_time_countdown(uint32_t initial_count
)
527 lapic_timer_count_t count
;
529 state
= ml_set_interrupts_enabled(FALSE
);
530 lapic_set_timer(FALSE
, one_shot
, divide_by_1
, initial_count
);
531 start_time
= rdtsc64();
533 lapic_get_timer(NULL
, NULL
, NULL
, &count
);
535 stop_time
= rdtsc64();
536 ml_set_interrupts_enabled(state
);
538 return tsc_to_nanoseconds(stop_time
- start_time
);
542 rtc_lapic_timer_calibrate(void)
547 if (!(cpuid_features() & CPUID_FEATURE_APIC
))
551 * Set the local apic timer counting down to zero without an interrupt.
552 * Use the timestamp to calculate how long this takes.
554 nsecs
= (uint32_t) lapic_time_countdown(rtc_intr_nsec
);
557 * Compute a countdown ratio for a given time in nanoseconds.
558 * That is, countdown = time * numer / denom.
560 countdown
= (uint64_t)rtc_intr_nsec
* (uint64_t)rtc_intr_nsec
/ nsecs
;
562 nsecs
= (uint32_t) lapic_time_countdown((uint32_t) countdown
);
564 rtc_lapic_scale
.numer
= countdown
;
565 rtc_lapic_scale
.denom
= nsecs
;
567 kprintf("rtc_lapic_timer_calibrate() scale: %d/%d\n",
568 (uint32_t) countdown
, nsecs
);
577 assert(rtc_lapic_scale
.denom
);
579 count
= interval
* (uint64_t) rtc_lapic_scale
.numer
;
580 count
/= rtc_lapic_scale
.denom
;
582 lapic_set_timer(TRUE
, one_shot
, divide_by_1
, (uint32_t) count
);
586 rtc_lapic_start_ticking(void)
592 abstime
= mach_absolute_time();
593 first_tick
= abstime
+ NSEC_PER_HZ
;
594 current_cpu_datap()->cpu_rtc_tick_deadline
= first_tick
;
595 decr
= deadline_to_decrementer(first_tick
, abstime
);
596 rtc_lapic_set_timer(decr
);
600 * Configure the real-time clock device. Return success (1)
608 mp_disable_preemption();
609 if (cpu_number() != master_cpu
) {
610 mp_enable_preemption();
613 mp_enable_preemption();
615 timer_call_setup(&rtclock_alarm_timer
, rtclock_alarm_expire
, NULL
);
617 simple_lock_init(&rtclock
.lock
, 0);
624 * Nanotime/mach_absolutime_time
625 * -----------------------------
626 * The timestamp counter (tsc) - which counts cpu clock cycles and can be read
627 * efficient by the kernel and in userspace - is the reference for all timing.
628 * However, the cpu clock rate is not only platform-dependent but can change
629 * (speed-step) dynamically. Hence tsc is converted into nanoseconds which is
630 * identical to mach_absolute_time. The conversion to tsc to nanoseconds is
631 * encapsulated by nanotime.
633 * The kernel maintains nanotime information recording:
634 * - the current ratio of tsc to nanoseconds
635 * with this ratio expressed as a 32-bit scale and shift
636 * (power of 2 divider);
637 * - the tsc (step_tsc) and nanotime (step_ns) at which the current
638 * ratio (clock speed) began.
639 * So a tsc value can be converted to nanotime by:
641 * nanotime = (((tsc - step_tsc)*scale) >> shift) + step_ns
643 * In general, (tsc - step_tsc) is a 64-bit quantity with the scaling
644 * involving a 96-bit intermediate value. However, by saving the converted
645 * values at each tick (or at any intervening speed-step) - base_tsc and
646 * base_ns - we can perform conversions relative to these and be assured that
647 * (tsc - tick_tsc) is 32-bits. Hence:
649 * fast_nanotime = (((tsc - base_tsc)*scale) >> shift) + base_ns
651 * The tuple {base_tsc, base_ns, scale, shift} is exported in the commpage
652 * for the userspace nanotime routine to read. A duplicate check_tsc is
653 * appended so that the consistency of the read can be verified. Note that
654 * this scheme is essential for MP systems in which the commpage is updated
655 * by the master cpu but may be read concurrently by other cpus.
659 rtc_nanotime_set_commpage(rtc_nanotime_t
*rntp
)
661 commpage_nanotime_t cp_nanotime
;
663 /* Only the master cpu updates the commpage */
664 if (cpu_number() != master_cpu
)
667 cp_nanotime
.nt_base_tsc
= rntp
->rnt_tsc
;
668 cp_nanotime
.nt_base_ns
= rntp
->rnt_nanos
;
669 cp_nanotime
.nt_scale
= rntp
->rnt_scale
;
670 cp_nanotime
.nt_shift
= rntp
->rnt_shift
;
672 commpage_set_nanotime(&cp_nanotime
);
676 rtc_nanotime_init(void)
678 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
679 rtc_nanotime_t
*master_rntp
= &cpu_datap(master_cpu
)->cpu_rtc_nanotime
;
681 if (cpu_number() == master_cpu
) {
682 rntp
->rnt_tsc
= rdtsc64();
683 rntp
->rnt_nanos
= tsc_to_nanoseconds(rntp
->rnt_tsc
);
684 rntp
->rnt_scale
= rtc_quant_scale
;
685 rntp
->rnt_shift
= rtc_quant_shift
;
686 rntp
->rnt_step_tsc
= 0ULL;
687 rntp
->rnt_step_nanos
= 0ULL;
690 * Copy master processor's nanotime info.
691 * Loop required in case this changes while copying.
694 *rntp
= *master_rntp
;
695 } while (rntp
->rnt_tsc
!= master_rntp
->rnt_tsc
);
700 _rtc_nanotime_update(rtc_nanotime_t
*rntp
, uint64_t tsc
)
705 tsc_delta
= tsc
- rntp
->rnt_step_tsc
;
706 ns_delta
= tsc_to_nanoseconds(tsc_delta
);
707 rntp
->rnt_nanos
= rntp
->rnt_step_nanos
+ ns_delta
;
712 rtc_nanotime_update(void)
714 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
716 assert(get_preemption_level() > 0);
717 assert(!ml_get_interrupts_enabled());
719 _rtc_nanotime_update(rntp
, rdtsc64());
720 rtc_nanotime_set_commpage(rntp
);
724 rtc_nanotime_scale_update(void)
726 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
727 uint64_t tsc
= rdtsc64();
729 assert(!ml_get_interrupts_enabled());
732 * Update time based on past scale.
734 _rtc_nanotime_update(rntp
, tsc
);
737 * Update scale and timestamp this update.
739 rntp
->rnt_scale
= rtc_quant_scale
;
740 rntp
->rnt_shift
= rtc_quant_shift
;
741 rntp
->rnt_step_tsc
= rntp
->rnt_tsc
;
742 rntp
->rnt_step_nanos
= rntp
->rnt_nanos
;
744 /* Export update to userland */
745 rtc_nanotime_set_commpage(rntp
);
749 _rtc_nanotime_read(void)
751 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
759 rnt_scale
= rntp
->rnt_scale
;
763 rnt_shift
= rntp
->rnt_shift
;
764 rnt_nanos
= rntp
->rnt_nanos
;
765 rnt_tsc
= rntp
->rnt_tsc
;
768 tsc_delta
= tsc
- rnt_tsc
;
769 if ((tsc_delta
>> 32) != 0)
770 return rnt_nanos
+ tsc_to_nanoseconds(tsc_delta
);
772 /* Let the compiler optimize(?): */
774 return rnt_nanos
+ ((tsc_delta
* rnt_scale
) >> 32);
776 return rnt_nanos
+ ((tsc_delta
* rnt_scale
) >> rnt_shift
);
780 rtc_nanotime_read(void)
784 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
787 * Use timestamp to ensure the uptime record isn't changed.
788 * This avoids disabling interrupts.
789 * And not this is a per-cpu structure hence no locking.
792 rnt_tsc
= rntp
->rnt_tsc
;
793 result
= _rtc_nanotime_read();
794 } while (rnt_tsc
!= rntp
->rnt_tsc
);
801 * This function is called by the speed-step driver when a
802 * change of cpu clock frequency is about to occur.
803 * The scale is not changed until rtc_clock_stepped() is called.
804 * Between these times there is an uncertainty is exactly when
805 * the change takes effect. FIXME: by using another timing source
806 * we could eliminate this error.
809 rtc_clock_stepping(__unused
uint32_t new_frequency
,
810 __unused
uint32_t old_frequency
)
814 istate
= ml_set_interrupts_enabled(FALSE
);
815 rtc_nanotime_scale_update();
816 ml_set_interrupts_enabled(istate
);
820 * This function is called by the speed-step driver when a
821 * change of cpu clock frequency has just occured. This change
822 * is expressed as a ratio relative to the boot clock rate.
825 rtc_clock_stepped(uint32_t new_frequency
, uint32_t old_frequency
)
829 istate
= ml_set_interrupts_enabled(FALSE
);
830 if (rtc_boot_frequency
== 0) {
832 * At the first ever stepping, old frequency is the real
833 * initial clock rate. This step and all others are based
834 * relative to this initial frequency at which the tsc
835 * calibration was made. Hence we must remember this base
836 * frequency as reference.
838 rtc_boot_frequency
= old_frequency
;
840 rtc_set_cyc_per_sec(rtc_cycle_count
* new_frequency
/
842 rtc_nanotime_scale_update();
843 ml_set_interrupts_enabled(istate
);
847 * rtc_sleep_wakeup() is called from acpi on awakening from a S3 sleep
850 rtc_sleep_wakeup(void)
852 rtc_nanotime_t
*rntp
= ¤t_cpu_datap()->cpu_rtc_nanotime
;
856 istate
= ml_set_interrupts_enabled(FALSE
);
860 * The timestamp counter will have been reset
861 * but nanotime (uptime) marches onward.
862 * We assume that we're still at the former cpu frequency.
864 rntp
->rnt_tsc
= rdtsc64();
865 rntp
->rnt_step_tsc
= 0ULL;
866 rntp
->rnt_step_nanos
= rntp
->rnt_nanos
;
867 rtc_nanotime_set_commpage(rntp
);
869 /* Restart tick interrupts from the LAPIC timer */
870 rtc_lapic_start_ticking();
872 ml_set_interrupts_enabled(istate
);
876 * Initialize the real-time clock device.
877 * In addition, various variables used to support the clock are initialized.
884 mp_disable_preemption();
885 if (cpu_number() == master_cpu
) {
887 * Perform calibration.
888 * The PIT is used as the reference to compute how many
889 * TCS counts (cpu clock cycles) occur per second.
891 rtc_cycle_count
= timeRDTSC();
892 cycles
= rtc_set_cyc_per_sec(rtc_cycle_count
);
895 * Set min/max to actual.
896 * ACPI may update these later if speed-stepping is detected.
898 gPEClockFrequencyInfo
.cpu_frequency_min_hz
= cycles
;
899 gPEClockFrequencyInfo
.cpu_frequency_max_hz
= cycles
;
900 printf("[RTCLOCK] frequency %llu (%llu)\n",
901 cycles
, rtc_cyc_per_sec
);
903 rtc_lapic_timer_calibrate();
905 /* Minimum interval is 1usec */
906 rtc_decrementer_min
= deadline_to_decrementer(NSEC_PER_USEC
,
908 /* Point LAPIC interrupts to hardclock() */
909 lapic_set_timer_func((i386_intr_func_t
) rtclock_intr
);
911 clock_timebase_init();
912 rtc_initialized
= TRUE
;
917 rtc_lapic_start_ticking();
919 mp_enable_preemption();
925 * Get the clock device time. This routine is responsible
926 * for converting the device's machine dependent time value
927 * into a canonical mach_timespec_t value.
930 sysclk_gettime_internal(
931 mach_timespec_t
*cur_time
) /* OUT */
933 *cur_time
= tsc_to_timespec();
934 return (KERN_SUCCESS
);
939 mach_timespec_t
*cur_time
) /* OUT */
941 return sysclk_gettime_internal(cur_time
);
945 sysclk_gettime_interrupts_disabled(
946 mach_timespec_t
*cur_time
) /* OUT */
948 (void) sysclk_gettime_internal(cur_time
);
952 // Code to calculate how many processor cycles are in a second...
955 rtc_set_cyc_per_sec(uint64_t cycles
)
958 if (cycles
> (NSEC_PER_SEC
/20)) {
959 // we can use just a "fast" multiply to get nanos
960 rtc_quant_shift
= 32;
961 rtc_quant_scale
= create_mul_quant_GHZ(rtc_quant_shift
, cycles
);
962 rtclock
.timebase_const
.numer
= rtc_quant_scale
; // timeRDTSC is 1/20
963 rtclock
.timebase_const
.denom
= RTC_FAST_DENOM
;
965 rtc_quant_shift
= 26;
966 rtc_quant_scale
= create_mul_quant_GHZ(rtc_quant_shift
, cycles
);
967 rtclock
.timebase_const
.numer
= NSEC_PER_SEC
/20; // timeRDTSC is 1/20
968 rtclock
.timebase_const
.denom
= cycles
;
970 rtc_cyc_per_sec
= cycles
*20; // multiply it by 20 and we are done..
971 // BUT we also want to calculate...
973 cycles
= ((rtc_cyc_per_sec
+ (UI_CPUFREQ_ROUNDING_FACTOR
/2))
974 / UI_CPUFREQ_ROUNDING_FACTOR
)
975 * UI_CPUFREQ_ROUNDING_FACTOR
;
978 * Set current measured speed.
980 if (cycles
>= 0x100000000ULL
) {
981 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= 0xFFFFFFFFUL
;
983 gPEClockFrequencyInfo
.cpu_clock_rate_hz
= (unsigned long)cycles
;
985 gPEClockFrequencyInfo
.cpu_frequency_hz
= cycles
;
987 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles
, rtc_cyc_per_sec
);
992 clock_get_system_microtime(
998 (void) sysclk_gettime_internal(&now
);
1001 *microsecs
= now
.tv_nsec
/ NSEC_PER_USEC
;
1005 clock_get_system_nanotime(
1009 mach_timespec_t now
;
1011 (void) sysclk_gettime_internal(&now
);
1014 *nanosecs
= now
.tv_nsec
;
1018 * Get clock device attributes.
1022 clock_flavor_t flavor
,
1023 clock_attr_t attr
, /* OUT */
1024 mach_msg_type_number_t
*count
) /* IN/OUT */
1027 return (KERN_FAILURE
);
1030 case CLOCK_GET_TIME_RES
: /* >0 res */
1031 *(clock_res_t
*) attr
= rtc_intr_nsec
;
1034 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
1035 case CLOCK_ALARM_MAXRES
:
1036 case CLOCK_ALARM_MINRES
:
1037 *(clock_res_t
*) attr
= 0;
1041 return (KERN_INVALID_VALUE
);
1043 return (KERN_SUCCESS
);
1047 * Set next alarm time for the clock device. This call
1048 * always resets the time to deliver an alarm for the
1053 mach_timespec_t
*alarm_time
)
1055 timer_call_enter(&rtclock_alarm_timer
,
1056 (uint64_t) alarm_time
->tv_sec
* NSEC_PER_SEC
1057 + alarm_time
->tv_nsec
);
1061 * Configure the calendar clock.
1066 return bbc_config();
1070 * Initialize calendar clock.
1079 * Get the current clock time.
1083 mach_timespec_t
*cur_time
) /* OUT */
1088 if (!rtclock
.calend_is_set
) {
1090 return (KERN_FAILURE
);
1093 (void) sysclk_gettime_internal(cur_time
);
1094 ADD_MACH_TIMESPEC(cur_time
, &rtclock
.calend_offset
);
1097 return (KERN_SUCCESS
);
1101 clock_get_calendar_microtime(
1103 uint32_t *microsecs
)
1105 mach_timespec_t now
;
1107 calend_gettime(&now
);
1110 *microsecs
= now
.tv_nsec
/ NSEC_PER_USEC
;
1114 clock_get_calendar_nanotime(
1118 mach_timespec_t now
;
1120 calend_gettime(&now
);
1123 *nanosecs
= now
.tv_nsec
;
1127 clock_set_calendar_microtime(
1131 mach_timespec_t new_time
, curr_time
;
1132 uint32_t old_offset
;
1135 new_time
.tv_sec
= secs
;
1136 new_time
.tv_nsec
= microsecs
* NSEC_PER_USEC
;
1139 old_offset
= rtclock
.calend_offset
.tv_sec
;
1140 (void) sysclk_gettime_internal(&curr_time
);
1141 rtclock
.calend_offset
= new_time
;
1142 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
1143 rtclock
.boottime
+= rtclock
.calend_offset
.tv_sec
- old_offset
;
1144 rtclock
.calend_is_set
= TRUE
;
1147 (void) bbc_settime(&new_time
);
1149 host_notify_calendar_change();
1153 * Get clock device attributes.
1157 clock_flavor_t flavor
,
1158 clock_attr_t attr
, /* OUT */
1159 mach_msg_type_number_t
*count
) /* IN/OUT */
1162 return (KERN_FAILURE
);
1165 case CLOCK_GET_TIME_RES
: /* >0 res */
1166 *(clock_res_t
*) attr
= rtc_intr_nsec
;
1169 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
1170 case CLOCK_ALARM_MINRES
:
1171 case CLOCK_ALARM_MAXRES
:
1172 *(clock_res_t
*) attr
= 0;
1176 return (KERN_INVALID_VALUE
);
1178 return (KERN_SUCCESS
);
1181 #define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */
1182 #define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */
1185 clock_set_calendar_adjtime(
1189 int64_t total
, ototal
;
1190 uint32_t interval
= 0;
1193 total
= (int64_t)*secs
* NSEC_PER_SEC
+ *microsecs
* NSEC_PER_USEC
;
1196 ototal
= rtclock
.calend_adjtotal
;
1199 int32_t delta
= tickadj
;
1208 if (total
< -bigadj
)
1215 rtclock
.calend_adjtotal
= total
;
1216 rtclock
.calend_adjdelta
= delta
;
1218 interval
= NSEC_PER_HZ
;
1221 rtclock
.calend_adjdelta
= rtclock
.calend_adjtotal
= 0;
1226 *secs
= *microsecs
= 0;
1228 *secs
= ototal
/ NSEC_PER_SEC
;
1229 *microsecs
= ototal
% NSEC_PER_SEC
;
1236 clock_adjust_calendar(void)
1238 uint32_t interval
= 0;
1243 delta
= rtclock
.calend_adjdelta
;
1244 ADD_MACH_TIMESPEC_NSEC(&rtclock
.calend_offset
, delta
);
1246 rtclock
.calend_adjtotal
-= delta
;
1249 if (delta
> rtclock
.calend_adjtotal
)
1250 rtclock
.calend_adjdelta
= rtclock
.calend_adjtotal
;
1254 if (delta
< rtclock
.calend_adjtotal
)
1255 rtclock
.calend_adjdelta
= rtclock
.calend_adjtotal
;
1258 if (rtclock
.calend_adjdelta
!= 0)
1259 interval
= NSEC_PER_HZ
;
1267 clock_initialize_calendar(void)
1269 mach_timespec_t bbc_time
, curr_time
;
1272 if (bbc_gettime(&bbc_time
) != KERN_SUCCESS
)
1276 if (rtclock
.boottime
== 0)
1277 rtclock
.boottime
= bbc_time
.tv_sec
;
1278 (void) sysclk_gettime_internal(&curr_time
);
1279 rtclock
.calend_offset
= bbc_time
;
1280 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
1281 rtclock
.calend_is_set
= TRUE
;
1284 host_notify_calendar_change();
1288 clock_get_boottime_nanotime(
1292 *secs
= rtclock
.boottime
;
1297 clock_timebase_info(
1298 mach_timebase_info_t info
)
1300 info
->numer
= info
->denom
= 1;
1304 clock_set_timer_deadline(
1308 cpu_data_t
*pp
= current_cpu_datap();
1309 rtclock_timer_t
*mytimer
= &pp
->cpu_rtc_timer
;
1313 assert(get_preemption_level() > 0);
1314 assert(rtclock_timer_expire
);
1317 mytimer
->deadline
= deadline
;
1318 mytimer
->is_set
= TRUE
;
1319 if (!mytimer
->has_expired
) {
1320 abstime
= mach_absolute_time();
1321 if (mytimer
->deadline
< pp
->cpu_rtc_tick_deadline
) {
1322 decr
= deadline_to_decrementer(mytimer
->deadline
,
1324 rtc_lapic_set_timer(decr
);
1325 pp
->cpu_rtc_intr_deadline
= mytimer
->deadline
;
1326 KERNEL_DEBUG_CONSTANT(
1327 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1) |
1328 DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
1335 clock_set_timer_func(
1336 clock_timer_func_t func
)
1338 if (rtclock_timer_expire
== NULL
)
1339 rtclock_timer_expire
= func
;
1343 * Real-time clock device interrupt.
1346 rtclock_intr(struct i386_interrupt_state
*regs
)
1352 uint64_t decr_timer
;
1353 cpu_data_t
*pp
= current_cpu_datap();
1354 rtclock_timer_t
*mytimer
= &pp
->cpu_rtc_timer
;
1356 assert(get_preemption_level() > 0);
1357 assert(!ml_get_interrupts_enabled());
1359 abstime
= _rtc_nanotime_read();
1360 latency
= (uint32_t) abstime
- pp
->cpu_rtc_intr_deadline
;
1361 if (pp
->cpu_rtc_tick_deadline
<= abstime
) {
1362 rtc_nanotime_update();
1363 clock_deadline_for_periodic_event(
1364 NSEC_PER_HZ
, abstime
, &pp
->cpu_rtc_tick_deadline
);
1369 (regs
->efl
& EFL_VM
) || ((regs
->cs
& 0x03) != 0),
1373 abstime
= _rtc_nanotime_read();
1374 if (mytimer
->is_set
&& mytimer
->deadline
<= abstime
) {
1375 mytimer
->has_expired
= TRUE
;
1376 mytimer
->is_set
= FALSE
;
1377 (*rtclock_timer_expire
)(abstime
);
1378 assert(!ml_get_interrupts_enabled());
1379 mytimer
->has_expired
= FALSE
;
1382 /* Log the interrupt service latency (-ve value expected by tool) */
1383 KERNEL_DEBUG_CONSTANT(
1384 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 0) | DBG_FUNC_NONE
,
1385 -latency
, (uint32_t)regs
->eip
, 0, 0, 0);
1387 abstime
= _rtc_nanotime_read();
1388 decr_tick
= deadline_to_decrementer(pp
->cpu_rtc_tick_deadline
, abstime
);
1389 decr_timer
= (mytimer
->is_set
) ?
1390 deadline_to_decrementer(mytimer
->deadline
, abstime
) :
1392 decr
= MIN(decr_tick
, decr_timer
);
1393 pp
->cpu_rtc_intr_deadline
= abstime
+ decr
;
1395 rtc_lapic_set_timer(decr
);
1397 /* Log the new decrementer value */
1398 KERNEL_DEBUG_CONSTANT(
1399 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1) | DBG_FUNC_NONE
,
1405 rtclock_alarm_expire(
1406 __unused timer_call_param_t p0
,
1407 __unused timer_call_param_t p1
)
1409 mach_timespec_t clock_time
;
1411 (void) sysclk_gettime_internal(&clock_time
);
1413 clock_alarm_intr(SYSTEM_CLOCK
, &clock_time
);
1420 *result
= rtc_nanotime_read();
1424 mach_absolute_time(void)
1426 return rtc_nanotime_read();
1430 absolutetime_to_microtime(
1433 uint32_t *microsecs
)
1439 : "=a" (*secs
), "=d" (remain
)
1440 : "A" (abstime
), "r" (NSEC_PER_SEC
));
1444 : "0" (remain
), "d" (0), "r" (NSEC_PER_USEC
));
1448 clock_interval_to_deadline(
1450 uint32_t scale_factor
,
1455 clock_get_uptime(result
);
1457 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1463 clock_interval_to_absolutetime_interval(
1465 uint32_t scale_factor
,
1468 *result
= (uint64_t)interval
* scale_factor
;
1472 clock_absolutetime_interval_to_deadline(
1476 clock_get_uptime(result
);
1482 absolutetime_to_nanoseconds(
1490 nanoseconds_to_absolutetime(
1491 uint64_t nanoseconds
,
1494 *result
= nanoseconds
;
1498 machine_delay_until(
1505 now
= mach_absolute_time();
1506 } while (now
< deadline
);