]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/rtclock.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32/*
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
91447636
A
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
1c79356b
A
40 */
41
1c79356b 42#include <platforms.h>
1c79356b 43#include <mach_kdb.h>
55e303ae
A
44
45#include <mach/mach_types.h>
46
1c79356b 47#include <kern/cpu_data.h>
91447636 48#include <kern/cpu_number.h>
1c79356b 49#include <kern/clock.h>
55e303ae 50#include <kern/host_notify.h>
1c79356b
A
51#include <kern/macro_help.h>
52#include <kern/misc_protos.h>
53#include <kern/spl.h>
91447636 54#include <kern/assert.h>
1c79356b
A
55#include <mach/vm_prot.h>
56#include <vm/pmap.h>
57#include <vm/vm_kern.h> /* for kernel_map */
58#include <i386/ipl.h>
59#include <i386/pit.h>
8f6c56a5 60#include <i386/pio.h>
1c79356b 61#include <i386/misc_protos.h>
55e303ae
A
62#include <i386/proc_reg.h>
63#include <i386/machine_cpu.h>
91447636
A
64#include <i386/mp.h>
65#include <i386/cpuid.h>
66#include <i386/cpu_data.h>
67#include <i386/cpu_threads.h>
68#include <i386/perfmon.h>
69#include <i386/machine_routines.h>
8f6c56a5 70#include <i386/AT386/bbclock_entries.h>
55e303ae 71#include <pexpert/pexpert.h>
91447636
A
72#include <machine/limits.h>
73#include <machine/commpage.h>
74#include <sys/kdebug.h>
75
76#define MAX(a,b) (((a)>(b))?(a):(b))
77#define MIN(a,b) (((a)>(b))?(b):(a))
55e303ae 78
91447636
A
79#define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
80
81#define UI_CPUFREQ_ROUNDING_FACTOR 10000000
1c79356b 82
8f6c56a5
A
83int sysclk_config(void);
84
85int sysclk_init(void);
86
87kern_return_t sysclk_gettime(
88 mach_timespec_t *cur_time);
89
90kern_return_t sysclk_getattr(
91 clock_flavor_t flavor,
92 clock_attr_t attr,
93 mach_msg_type_number_t *count);
94
95void sysclk_setalarm(
96 mach_timespec_t *alarm_time);
97
98/*
99 * Lists of clock routines.
100 */
101struct clock_ops sysclk_ops = {
102 sysclk_config, sysclk_init,
103 sysclk_gettime, 0,
104 sysclk_getattr, 0,
105 sysclk_setalarm,
106};
8ad349bb 107
8f6c56a5 108int calend_config(void);
8ad349bb 109
8f6c56a5 110int calend_init(void);
8ad349bb 111
8f6c56a5
A
112kern_return_t calend_gettime(
113 mach_timespec_t *cur_time);
8ad349bb 114
8f6c56a5
A
115kern_return_t calend_getattr(
116 clock_flavor_t flavor,
117 clock_attr_t attr,
118 mach_msg_type_number_t *count);
8ad349bb 119
8f6c56a5
A
120struct clock_ops calend_ops = {
121 calend_config, calend_init,
122 calend_gettime, 0,
123 calend_getattr, 0,
124 0,
125};
8ad349bb 126
8f6c56a5 127/* local data declarations */
8ad349bb 128
8f6c56a5 129static clock_timer_func_t rtclock_timer_expire;
8ad349bb 130
8f6c56a5
A
131static timer_call_data_t rtclock_alarm_timer;
132
133static void rtclock_alarm_expire(
134 timer_call_param_t p0,
135 timer_call_param_t p1);
136
137struct {
138 mach_timespec_t calend_offset;
139 boolean_t calend_is_set;
140
141 int64_t calend_adjtotal;
142 int32_t calend_adjdelta;
143
144 uint32_t boottime;
145
146 mach_timebase_info_data_t timebase_const;
147
148 decl_simple_lock_data(,lock) /* real-time clock device lock */
149} rtclock;
150
151boolean_t rtc_initialized = FALSE;
152clock_res_t rtc_intr_nsec = NSEC_PER_HZ; /* interrupt res */
153uint64_t rtc_cycle_count; /* clocks in 1/20th second */
154uint64_t rtc_cyc_per_sec; /* processor cycles per sec */
155uint32_t rtc_boot_frequency; /* provided by 1st speed-step */
156uint32_t rtc_quant_scale; /* clock to nanos multiplier */
157uint32_t rtc_quant_shift; /* clock to nanos right shift */
158uint64_t rtc_decrementer_min;
159
160static mach_timebase_info_data_t rtc_lapic_scale; /* nsec to lapic count */
8ad349bb
A
161
162/*
8f6c56a5
A
163 * Macros to lock/unlock real-time clock data.
164 */
165#define RTC_INTRS_OFF(s) \
166 (s) = splclock()
167
168#define RTC_INTRS_ON(s) \
169 splx(s)
170
171#define RTC_LOCK(s) \
172MACRO_BEGIN \
173 RTC_INTRS_OFF(s); \
174 simple_lock(&rtclock.lock); \
175MACRO_END
176
177#define RTC_UNLOCK(s) \
178MACRO_BEGIN \
179 simple_unlock(&rtclock.lock); \
180 RTC_INTRS_ON(s); \
181MACRO_END
182
183/*
184 * i8254 control. ** MONUMENT **
185 *
186 * The i8254 is a traditional PC device with some arbitrary characteristics.
187 * Basically, it is a register that counts at a fixed rate and can be
188 * programmed to generate an interrupt every N counts. The count rate is
189 * clknum counts per sec (see pit.h), historically 1193167=14.318MHz/12
190 * but the more accurate value is 1193182=14.31818MHz/12. [14.31818 MHz being
191 * the master crystal oscillator reference frequency since the very first PC.]
192 * Various constants are computed based on this value, and we calculate
193 * them at init time for execution efficiency. To obtain sufficient
194 * accuracy, some of the calculation are most easily done in floating
195 * point and then converted to int.
91447636 196 *
91447636 197 */
8f6c56a5
A
198
199/*
200 * Forward decl.
201 */
202
203static uint64_t rtc_set_cyc_per_sec(uint64_t cycles);
204uint64_t rtc_nanotime_read(void);
205
206/*
207 * create_mul_quant_GHZ
208 * create a constant used to multiply the TSC by to convert to nanoseconds.
209 * This is a 32 bit number and the TSC *MUST* have a frequency higher than
210 * 1000Mhz for this routine to work.
211 *
212 * The theory here is that we know how many TSCs-per-sec the processor runs at.
213 * Normally to convert this to nanoseconds you would multiply the current
214 * timestamp by 1000000000 (a billion) then divide by TSCs-per-sec.
215 * Unfortunatly the TSC is 64 bits which would leave us with 96 bit intermediate
216 * results from the multiply that must be divided by.
217 * Usually thats
218 * uint96 = tsc * numer
219 * nanos = uint96 / denom
220 * Instead, we create this quant constant and it becomes the numerator,
221 * the denominator can then be 0x100000000 which makes our division as simple as
222 * forgetting the lower 32 bits of the result. We can also pass this number to
223 * user space as the numer and pass 0xFFFFFFFF (RTC_FAST_DENOM) as the denom to
224 * convert raw counts * to nanos. The difference is so small as to be
225 * undetectable by anything.
226 *
227 * Unfortunatly we can not do this for sub GHZ processors. In this case, all
228 * we do is pass the CPU speed in raw as the denom and we pass in 1000000000
229 * as the numerator. No short cuts allowed
230 */
231#define RTC_FAST_DENOM 0xFFFFFFFF
232inline static uint32_t
233create_mul_quant_GHZ(int shift, uint32_t quant)
234{
235 return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant);
236}
237/*
238 * This routine takes a value of raw TSC ticks and applies the passed mul_quant
239 * generated by create_mul_quant() This is our internal routine for creating
240 * nanoseconds.
241 * Since we don't really have uint96_t this routine basically does this....
242 * uint96_t intermediate = (*value) * scale
243 * return (intermediate >> 32)
244 */
245inline static uint64_t
246fast_get_nano_from_abs(uint64_t value, int scale)
8ad349bb 247{
8f6c56a5
A
248 asm (" movl %%edx,%%esi \n\t"
249 " mull %%ecx \n\t"
250 " movl %%edx,%%edi \n\t"
251 " movl %%esi,%%eax \n\t"
252 " mull %%ecx \n\t"
253 " xorl %%ecx,%%ecx \n\t"
254 " addl %%edi,%%eax \n\t"
255 " adcl %%ecx,%%edx "
256 : "+A" (value)
257 : "c" (scale)
258 : "%esi", "%edi");
259 return value;
260}
8ad349bb 261
8f6c56a5
A
262/*
263 * This routine basically does this...
264 * ts.tv_sec = nanos / 1000000000; create seconds
265 * ts.tv_nsec = nanos % 1000000000; create remainder nanos
266 */
267inline static mach_timespec_t
268nanos_to_timespec(uint64_t nanos)
269{
270 union {
271 mach_timespec_t ts;
272 uint64_t u64;
273 } ret;
274 ret.u64 = nanos;
275 asm volatile("divl %1" : "+A" (ret.u64) : "r" (NSEC_PER_SEC));
276 return ret.ts;
8ad349bb
A
277}
278
8f6c56a5
A
279/*
280 * The following two routines perform the 96 bit arithmetic we need to
281 * convert generic absolute<->nanoseconds
282 * The multiply routine takes a uint64_t and a uint32_t and returns the result
283 * in a uint32_t[3] array.
284 * The divide routine takes this uint32_t[3] array and divides it by a uint32_t
285 * returning a uint64_t
286 */
287inline static void
288longmul(uint64_t *abstime, uint32_t multiplicand, uint32_t *result)
8ad349bb 289{
8f6c56a5
A
290 asm volatile(
291 " pushl %%ebx \n\t"
292 " movl %%eax,%%ebx \n\t"
293 " movl (%%eax),%%eax \n\t"
294 " mull %%ecx \n\t"
295 " xchg %%eax,%%ebx \n\t"
296 " pushl %%edx \n\t"
297 " movl 4(%%eax),%%eax \n\t"
298 " mull %%ecx \n\t"
299 " movl %2,%%ecx \n\t"
300 " movl %%ebx,(%%ecx) \n\t"
301 " popl %%ebx \n\t"
302 " addl %%ebx,%%eax \n\t"
303 " popl %%ebx \n\t"
304 " movl %%eax,4(%%ecx) \n\t"
305 " adcl $0,%%edx \n\t"
306 " movl %%edx,8(%%ecx) // and save it"
307 : : "a"(abstime), "c"(multiplicand), "m"(result));
308
8ad349bb
A
309}
310
8f6c56a5
A
311inline static uint64_t
312longdiv(uint32_t *numer, uint32_t denom)
313{
314 uint64_t result;
315 asm volatile(
316 " pushl %%ebx \n\t"
317 " movl %%eax,%%ebx \n\t"
318 " movl 8(%%eax),%%edx \n\t"
319 " movl 4(%%eax),%%eax \n\t"
320 " divl %%ecx \n\t"
321 " xchg %%ebx,%%eax \n\t"
322 " movl (%%eax),%%eax \n\t"
323 " divl %%ecx \n\t"
324 " xchg %%ebx,%%edx \n\t"
325 " popl %%ebx \n\t"
326 : "=A"(result) : "a"(numer),"c"(denom));
327 return result;
328}
329
330/*
331 * Enable or disable timer 2.
332 * Port 0x61 controls timer 2:
333 * bit 0 gates the clock,
334 * bit 1 gates output to speaker.
335 */
336inline static void
337enable_PIT2(void)
338{
339 asm volatile(
340 " inb $0x61,%%al \n\t"
341 " and $0xFC,%%al \n\t"
342 " or $1,%%al \n\t"
343 " outb %%al,$0x61 \n\t"
344 : : : "%al" );
345}
346
347inline static void
348disable_PIT2(void)
349{
350 asm volatile(
351 " inb $0x61,%%al \n\t"
352 " and $0xFC,%%al \n\t"
353 " outb %%al,$0x61 \n\t"
354 : : : "%al" );
355}
356
357inline static void
358set_PIT2(int value)
359{
360/*
361 * First, tell the clock we are going to write 16 bits to the counter
362 * and enable one-shot mode (command 0xB8 to port 0x43)
363 * Then write the two bytes into the PIT2 clock register (port 0x42).
364 * Loop until the value is "realized" in the clock,
365 * this happens on the next tick.
366 */
367 asm volatile(
368 " movb $0xB8,%%al \n\t"
369 " outb %%al,$0x43 \n\t"
370 " movb %%dl,%%al \n\t"
371 " outb %%al,$0x42 \n\t"
372 " movb %%dh,%%al \n\t"
373 " outb %%al,$0x42 \n"
374"1: inb $0x42,%%al \n\t"
375 " inb $0x42,%%al \n\t"
376 " cmp %%al,%%dh \n\t"
377 " jne 1b"
378 : : "d"(value) : "%al");
379}
380
381inline static uint64_t
382get_PIT2(unsigned int *value)
383{
384 register uint64_t result;
385/*
386 * This routine first latches the time (command 0x80 to port 0x43),
387 * then gets the time stamp so we know how long the read will take later.
388 * Read (from port 0x42) and return the current value of the timer.
389 */
390 asm volatile(
391 " xorl %%ecx,%%ecx \n\t"
392 " movb $0x80,%%al \n\t"
393 " outb %%al,$0x43 \n\t"
394 " rdtsc \n\t"
395 " pushl %%eax \n\t"
396 " inb $0x42,%%al \n\t"
397 " movb %%al,%%cl \n\t"
398 " inb $0x42,%%al \n\t"
399 " movb %%al,%%ch \n\t"
400 " popl %%eax "
401 : "=A"(result), "=c"(*value));
402 return result;
403}
404
405/*
406 * timeRDTSC()
407 * This routine sets up PIT counter 2 to count down 1/20 of a second.
408 * It pauses until the value is latched in the counter
409 * and then reads the time stamp counter to return to the caller.
410 */
411static uint64_t
412timeRDTSC(void)
413{
414 int attempts = 0;
415 uint64_t latchTime;
416 uint64_t saveTime,intermediate;
417 unsigned int timerValue, lastValue;
418 boolean_t int_enabled;
419 /*
420 * Table of correction factors to account for
421 * - timer counter quantization errors, and
422 * - undercounts 0..5
423 */
424#define SAMPLE_CLKS_EXACT (((double) CLKNUM) / 20.0)
425#define SAMPLE_CLKS_INT ((int) CLKNUM / 20)
426#define SAMPLE_NSECS (2000000000LL)
427#define SAMPLE_MULTIPLIER (((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
428#define ROUND64(x) ((uint64_t)((x) + 0.5))
429 uint64_t scale[6] = {
430 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
431 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
432 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
433 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
434 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
435 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
436 };
437
438 int_enabled = ml_set_interrupts_enabled(FALSE);
439
440restart:
441 if (attempts >= 2)
442 panic("timeRDTSC() calibation failed with %d attempts\n", attempts);
443 attempts++;
444 enable_PIT2(); // turn on PIT2
445 set_PIT2(0); // reset timer 2 to be zero
446 latchTime = rdtsc64(); // get the time stamp to time
447 latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
448 set_PIT2(SAMPLE_CLKS_INT); // set up the timer for (almost) 1/20th a second
449 saveTime = rdtsc64(); // now time how long a 20th a second is...
450 get_PIT2(&lastValue);
451 get_PIT2(&lastValue); // read twice, first value may be unreliable
452 do {
453 intermediate = get_PIT2(&timerValue);
454 if (timerValue > lastValue) {
455 printf("Hey we are going backwards! %u -> %u, restarting timing\n",
456 timerValue,lastValue);
457 set_PIT2(0);
458 disable_PIT2();
459 goto restart;
460 }
461 lastValue = timerValue;
462 } while (timerValue > 5);
463 kprintf("timerValue %d\n",timerValue);
464 kprintf("intermediate 0x%016llx\n",intermediate);
465 kprintf("saveTime 0x%016llx\n",saveTime);
466
467 intermediate -= saveTime; // raw count for about 1/20 second
468 intermediate *= scale[timerValue]; // rescale measured time spent
469 intermediate /= SAMPLE_NSECS; // so its exactly 1/20 a second
470 intermediate += latchTime; // add on our save fudge
471
472 set_PIT2(0); // reset timer 2 to be zero
473 disable_PIT2(); // turn off PIT 2
474
475 ml_set_interrupts_enabled(int_enabled);
476 return intermediate;
477}
478
479static uint64_t
480tsc_to_nanoseconds(uint64_t abstime)
481{
482 uint32_t numer;
483 uint32_t denom;
484 uint32_t intermediate[3];
485
486 numer = rtclock.timebase_const.numer;
487 denom = rtclock.timebase_const.denom;
488 if (denom == RTC_FAST_DENOM) {
489 abstime = fast_get_nano_from_abs(abstime, numer);
490 } else {
491 longmul(&abstime, numer, intermediate);
492 abstime = longdiv(intermediate, denom);
493 }
494 return abstime;
495}
496
497inline static mach_timespec_t
498tsc_to_timespec(void)
499{
500 uint64_t currNanos;
501 currNanos = rtc_nanotime_read();
502 return nanos_to_timespec(currNanos);
503}
504
505#define DECREMENTER_MAX UINT_MAX
91447636
A
506static uint32_t
507deadline_to_decrementer(
508 uint64_t deadline,
509 uint64_t now)
510{
511 uint64_t delta;
512
513 if (deadline <= now)
514 return rtc_decrementer_min;
515 else {
516 delta = deadline - now;
8f6c56a5 517 return MIN(MAX(rtc_decrementer_min,delta),DECREMENTER_MAX);
91447636
A
518 }
519}
520
8f6c56a5
A
521static inline uint64_t
522lapic_time_countdown(uint32_t initial_count)
523{
524 boolean_t state;
525 uint64_t start_time;
526 uint64_t stop_time;
527 lapic_timer_count_t count;
528
529 state = ml_set_interrupts_enabled(FALSE);
530 lapic_set_timer(FALSE, one_shot, divide_by_1, initial_count);
531 start_time = rdtsc64();
532 do {
533 lapic_get_timer(NULL, NULL, NULL, &count);
534 } while (count > 0);
535 stop_time = rdtsc64();
536 ml_set_interrupts_enabled(state);
537
538 return tsc_to_nanoseconds(stop_time - start_time);
539}
540
8ad349bb 541static void
8f6c56a5 542rtc_lapic_timer_calibrate(void)
8ad349bb 543{
8f6c56a5
A
544 uint32_t nsecs;
545 uint64_t countdown;
8ad349bb 546
8f6c56a5
A
547 if (!(cpuid_features() & CPUID_FEATURE_APIC))
548 return;
5d5c5d0d 549
8f6c56a5
A
550 /*
551 * Set the local apic timer counting down to zero without an interrupt.
552 * Use the timestamp to calculate how long this takes.
553 */
554 nsecs = (uint32_t) lapic_time_countdown(rtc_intr_nsec);
5d5c5d0d
A
555
556 /*
8f6c56a5
A
557 * Compute a countdown ratio for a given time in nanoseconds.
558 * That is, countdown = time * numer / denom.
5d5c5d0d 559 */
8f6c56a5
A
560 countdown = (uint64_t)rtc_intr_nsec * (uint64_t)rtc_intr_nsec / nsecs;
561
562 nsecs = (uint32_t) lapic_time_countdown((uint32_t) countdown);
563
564 rtc_lapic_scale.numer = countdown;
565 rtc_lapic_scale.denom = nsecs;
566
567 kprintf("rtc_lapic_timer_calibrate() scale: %d/%d\n",
568 (uint32_t) countdown, nsecs);
569}
570
571static void
572rtc_lapic_set_timer(
573 uint32_t interval)
574{
575 uint64_t count;
576
577 assert(rtc_lapic_scale.denom);
578
579 count = interval * (uint64_t) rtc_lapic_scale.numer;
580 count /= rtc_lapic_scale.denom;
581
582 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
583}
584
585static void
586rtc_lapic_start_ticking(void)
587{
588 uint64_t abstime;
589 uint64_t first_tick;
590 uint64_t decr;
591
592 abstime = mach_absolute_time();
593 first_tick = abstime + NSEC_PER_HZ;
594 current_cpu_datap()->cpu_rtc_tick_deadline = first_tick;
595 decr = deadline_to_decrementer(first_tick, abstime);
596 rtc_lapic_set_timer(decr);
1c79356b
A
597}
598
599/*
600 * Configure the real-time clock device. Return success (1)
601 * or failure (0).
602 */
603
604int
8f6c56a5 605sysclk_config(void)
1c79356b 606{
8f6c56a5
A
607
608 mp_disable_preemption();
609 if (cpu_number() != master_cpu) {
610 mp_enable_preemption();
611 return(1);
612 }
613 mp_enable_preemption();
614
615 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
616
617 simple_lock_init(&rtclock.lock, 0);
618
91447636
A
619 return (1);
620}
621
622
623/*
624 * Nanotime/mach_absolutime_time
625 * -----------------------------
8f6c56a5
A
626 * The timestamp counter (tsc) - which counts cpu clock cycles and can be read
627 * efficient by the kernel and in userspace - is the reference for all timing.
628 * However, the cpu clock rate is not only platform-dependent but can change
629 * (speed-step) dynamically. Hence tsc is converted into nanoseconds which is
630 * identical to mach_absolute_time. The conversion to tsc to nanoseconds is
631 * encapsulated by nanotime.
91447636
A
632 *
633 * The kernel maintains nanotime information recording:
8f6c56a5 634 * - the current ratio of tsc to nanoseconds
91447636
A
635 * with this ratio expressed as a 32-bit scale and shift
636 * (power of 2 divider);
8f6c56a5
A
637 * - the tsc (step_tsc) and nanotime (step_ns) at which the current
638 * ratio (clock speed) began.
639 * So a tsc value can be converted to nanotime by:
640 *
641 * nanotime = (((tsc - step_tsc)*scale) >> shift) + step_ns
642 *
643 * In general, (tsc - step_tsc) is a 64-bit quantity with the scaling
644 * involving a 96-bit intermediate value. However, by saving the converted
645 * values at each tick (or at any intervening speed-step) - base_tsc and
646 * base_ns - we can perform conversions relative to these and be assured that
647 * (tsc - tick_tsc) is 32-bits. Hence:
8ad349bb 648 *
8f6c56a5 649 * fast_nanotime = (((tsc - base_tsc)*scale) >> shift) + base_ns
8ad349bb 650 *
8f6c56a5
A
651 * The tuple {base_tsc, base_ns, scale, shift} is exported in the commpage
652 * for the userspace nanotime routine to read. A duplicate check_tsc is
653 * appended so that the consistency of the read can be verified. Note that
654 * this scheme is essential for MP systems in which the commpage is updated
655 * by the master cpu but may be read concurrently by other cpus.
656 *
91447636
A
657 */
658static inline void
659rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
660{
8f6c56a5 661 commpage_nanotime_t cp_nanotime;
8ad349bb 662
8f6c56a5
A
663 /* Only the master cpu updates the commpage */
664 if (cpu_number() != master_cpu)
665 return;
666
667 cp_nanotime.nt_base_tsc = rntp->rnt_tsc;
668 cp_nanotime.nt_base_ns = rntp->rnt_nanos;
669 cp_nanotime.nt_scale = rntp->rnt_scale;
670 cp_nanotime.nt_shift = rntp->rnt_shift;
91447636 671
8f6c56a5 672 commpage_set_nanotime(&cp_nanotime);
91447636
A
673}
674
675static void
8f6c56a5 676rtc_nanotime_init(void)
91447636 677{
8f6c56a5
A
678 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
679 rtc_nanotime_t *master_rntp = &cpu_datap(master_cpu)->cpu_rtc_nanotime;
91447636 680
8f6c56a5
A
681 if (cpu_number() == master_cpu) {
682 rntp->rnt_tsc = rdtsc64();
683 rntp->rnt_nanos = tsc_to_nanoseconds(rntp->rnt_tsc);
684 rntp->rnt_scale = rtc_quant_scale;
685 rntp->rnt_shift = rtc_quant_shift;
686 rntp->rnt_step_tsc = 0ULL;
687 rntp->rnt_step_nanos = 0ULL;
688 } else {
689 /*
690 * Copy master processor's nanotime info.
691 * Loop required in case this changes while copying.
692 */
693 do {
694 *rntp = *master_rntp;
695 } while (rntp->rnt_tsc != master_rntp->rnt_tsc);
696 }
91447636
A
697}
698
8f6c56a5
A
699static inline void
700_rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t tsc)
91447636 701{
8f6c56a5
A
702 uint64_t tsc_delta;
703 uint64_t ns_delta;
5d5c5d0d 704
8f6c56a5
A
705 tsc_delta = tsc - rntp->rnt_step_tsc;
706 ns_delta = tsc_to_nanoseconds(tsc_delta);
707 rntp->rnt_nanos = rntp->rnt_step_nanos + ns_delta;
708 rntp->rnt_tsc = tsc;
91447636
A
709}
710
8f6c56a5
A
711static void
712rtc_nanotime_update(void)
91447636 713{
8f6c56a5 714 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
91447636 715
8f6c56a5
A
716 assert(get_preemption_level() > 0);
717 assert(!ml_get_interrupts_enabled());
718
719 _rtc_nanotime_update(rntp, rdtsc64());
720 rtc_nanotime_set_commpage(rntp);
91447636
A
721}
722
723static void
8f6c56a5 724rtc_nanotime_scale_update(void)
91447636 725{
8f6c56a5
A
726 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
727 uint64_t tsc = rdtsc64();
91447636
A
728
729 assert(!ml_get_interrupts_enabled());
730
8f6c56a5
A
731 /*
732 * Update time based on past scale.
733 */
734 _rtc_nanotime_update(rntp, tsc);
735
736 /*
737 * Update scale and timestamp this update.
738 */
739 rntp->rnt_scale = rtc_quant_scale;
740 rntp->rnt_shift = rtc_quant_shift;
741 rntp->rnt_step_tsc = rntp->rnt_tsc;
742 rntp->rnt_step_nanos = rntp->rnt_nanos;
743
744 /* Export update to userland */
91447636
A
745 rtc_nanotime_set_commpage(rntp);
746}
747
748static uint64_t
8f6c56a5
A
749_rtc_nanotime_read(void)
750{
751 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
752 uint64_t rnt_tsc;
753 uint32_t rnt_scale;
754 uint32_t rnt_shift;
755 uint64_t rnt_nanos;
756 uint64_t tsc;
757 uint64_t tsc_delta;
758
759 rnt_scale = rntp->rnt_scale;
760 if (rnt_scale == 0)
761 return 0ULL;
762
763 rnt_shift = rntp->rnt_shift;
764 rnt_nanos = rntp->rnt_nanos;
765 rnt_tsc = rntp->rnt_tsc;
766 tsc = rdtsc64();
767
768 tsc_delta = tsc - rnt_tsc;
769 if ((tsc_delta >> 32) != 0)
770 return rnt_nanos + tsc_to_nanoseconds(tsc_delta);
771
772 /* Let the compiler optimize(?): */
773 if (rnt_shift == 32)
774 return rnt_nanos + ((tsc_delta * rnt_scale) >> 32);
775 else
776 return rnt_nanos + ((tsc_delta * rnt_scale) >> rnt_shift);
777}
778
779uint64_t
91447636
A
780rtc_nanotime_read(void)
781{
8f6c56a5
A
782 uint64_t result;
783 uint64_t rnt_tsc;
784 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
1c79356b 785
8f6c56a5
A
786 /*
787 * Use timestamp to ensure the uptime record isn't changed.
788 * This avoids disabling interrupts.
789 * And not this is a per-cpu structure hence no locking.
790 */
91447636 791 do {
8f6c56a5
A
792 rnt_tsc = rntp->rnt_tsc;
793 result = _rtc_nanotime_read();
794 } while (rnt_tsc != rntp->rnt_tsc);
91447636 795
8f6c56a5 796 return result;
91447636
A
797}
798
8f6c56a5 799
91447636 800/*
8f6c56a5
A
801 * This function is called by the speed-step driver when a
802 * change of cpu clock frequency is about to occur.
803 * The scale is not changed until rtc_clock_stepped() is called.
804 * Between these times there is an uncertainty is exactly when
805 * the change takes effect. FIXME: by using another timing source
806 * we could eliminate this error.
91447636
A
807 */
808void
809rtc_clock_stepping(__unused uint32_t new_frequency,
810 __unused uint32_t old_frequency)
811{
8f6c56a5
A
812 boolean_t istate;
813
814 istate = ml_set_interrupts_enabled(FALSE);
815 rtc_nanotime_scale_update();
816 ml_set_interrupts_enabled(istate);
91447636
A
817}
818
8f6c56a5
A
819/*
820 * This function is called by the speed-step driver when a
821 * change of cpu clock frequency has just occured. This change
822 * is expressed as a ratio relative to the boot clock rate.
823 */
91447636 824void
8f6c56a5 825rtc_clock_stepped(uint32_t new_frequency, uint32_t old_frequency)
91447636 826{
8f6c56a5
A
827 boolean_t istate;
828
829 istate = ml_set_interrupts_enabled(FALSE);
830 if (rtc_boot_frequency == 0) {
831 /*
832 * At the first ever stepping, old frequency is the real
833 * initial clock rate. This step and all others are based
834 * relative to this initial frequency at which the tsc
835 * calibration was made. Hence we must remember this base
836 * frequency as reference.
837 */
838 rtc_boot_frequency = old_frequency;
839 }
840 rtc_set_cyc_per_sec(rtc_cycle_count * new_frequency /
841 rtc_boot_frequency);
842 rtc_nanotime_scale_update();
843 ml_set_interrupts_enabled(istate);
1c79356b
A
844}
845
846/*
8f6c56a5 847 * rtc_sleep_wakeup() is called from acpi on awakening from a S3 sleep
91447636
A
848 */
849void
850rtc_sleep_wakeup(void)
851{
8f6c56a5
A
852 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
853
854 boolean_t istate;
91447636
A
855
856 istate = ml_set_interrupts_enabled(FALSE);
857
858 /*
859 * Reset nanotime.
860 * The timestamp counter will have been reset
861 * but nanotime (uptime) marches onward.
8f6c56a5 862 * We assume that we're still at the former cpu frequency.
91447636 863 */
8f6c56a5
A
864 rntp->rnt_tsc = rdtsc64();
865 rntp->rnt_step_tsc = 0ULL;
866 rntp->rnt_step_nanos = rntp->rnt_nanos;
867 rtc_nanotime_set_commpage(rntp);
91447636
A
868
869 /* Restart tick interrupts from the LAPIC timer */
870 rtc_lapic_start_ticking();
871
872 ml_set_interrupts_enabled(istate);
873}
874
875/*
876 * Initialize the real-time clock device.
877 * In addition, various variables used to support the clock are initialized.
1c79356b
A
878 */
879int
8f6c56a5 880sysclk_init(void)
1c79356b 881{
91447636
A
882 uint64_t cycles;
883
8f6c56a5 884 mp_disable_preemption();
91447636
A
885 if (cpu_number() == master_cpu) {
886 /*
8f6c56a5
A
887 * Perform calibration.
888 * The PIT is used as the reference to compute how many
889 * TCS counts (cpu clock cycles) occur per second.
91447636 890 */
8f6c56a5
A
891 rtc_cycle_count = timeRDTSC();
892 cycles = rtc_set_cyc_per_sec(rtc_cycle_count);
91447636
A
893
894 /*
895 * Set min/max to actual.
896 * ACPI may update these later if speed-stepping is detected.
897 */
8f6c56a5
A
898 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
899 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
900 printf("[RTCLOCK] frequency %llu (%llu)\n",
901 cycles, rtc_cyc_per_sec);
91447636 902
8f6c56a5 903 rtc_lapic_timer_calibrate();
91447636
A
904
905 /* Minimum interval is 1usec */
8f6c56a5
A
906 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC,
907 0ULL);
91447636
A
908 /* Point LAPIC interrupts to hardclock() */
909 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
910
911 clock_timebase_init();
8f6c56a5 912 rtc_initialized = TRUE;
1c79356b 913 }
91447636 914
8f6c56a5
A
915 rtc_nanotime_init();
916
91447636
A
917 rtc_lapic_start_ticking();
918
8f6c56a5
A
919 mp_enable_preemption();
920
1c79356b
A
921 return (1);
922}
923
8f6c56a5
A
924/*
925 * Get the clock device time. This routine is responsible
926 * for converting the device's machine dependent time value
927 * into a canonical mach_timespec_t value.
928 */
929static kern_return_t
930sysclk_gettime_internal(
931 mach_timespec_t *cur_time) /* OUT */
932{
933 *cur_time = tsc_to_timespec();
934 return (KERN_SUCCESS);
935}
1c79356b 936
8f6c56a5
A
937kern_return_t
938sysclk_gettime(
939 mach_timespec_t *cur_time) /* OUT */
1c79356b 940{
8f6c56a5
A
941 return sysclk_gettime_internal(cur_time);
942}
1c79356b 943
8f6c56a5
A
944void
945sysclk_gettime_interrupts_disabled(
946 mach_timespec_t *cur_time) /* OUT */
947{
948 (void) sysclk_gettime_internal(cur_time);
1c79356b
A
949}
950
8f6c56a5
A
951// utility routine
952// Code to calculate how many processor cycles are in a second...
953
91447636 954static uint64_t
8f6c56a5 955rtc_set_cyc_per_sec(uint64_t cycles)
9bccf70c 956{
1c79356b 957
8f6c56a5
A
958 if (cycles > (NSEC_PER_SEC/20)) {
959 // we can use just a "fast" multiply to get nanos
960 rtc_quant_shift = 32;
961 rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles);
962 rtclock.timebase_const.numer = rtc_quant_scale; // timeRDTSC is 1/20
963 rtclock.timebase_const.denom = RTC_FAST_DENOM;
964 } else {
965 rtc_quant_shift = 26;
966 rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles);
967 rtclock.timebase_const.numer = NSEC_PER_SEC/20; // timeRDTSC is 1/20
968 rtclock.timebase_const.denom = cycles;
969 }
970 rtc_cyc_per_sec = cycles*20; // multiply it by 20 and we are done..
971 // BUT we also want to calculate...
972
973 cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
91447636
A
974 / UI_CPUFREQ_ROUNDING_FACTOR)
975 * UI_CPUFREQ_ROUNDING_FACTOR;
9bccf70c 976
91447636
A
977 /*
978 * Set current measured speed.
979 */
980 if (cycles >= 0x100000000ULL) {
981 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
55e303ae 982 } else {
91447636 983 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
9bccf70c 984 }
91447636 985 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
55e303ae 986
8f6c56a5 987 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, rtc_cyc_per_sec);
91447636 988 return(cycles);
9bccf70c 989}
1c79356b 990
55e303ae
A
991void
992clock_get_system_microtime(
993 uint32_t *secs,
994 uint32_t *microsecs)
9bccf70c 995{
8f6c56a5 996 mach_timespec_t now;
8ad349bb 997
8f6c56a5
A
998 (void) sysclk_gettime_internal(&now);
999
1000 *secs = now.tv_sec;
1001 *microsecs = now.tv_nsec / NSEC_PER_USEC;
1c79356b
A
1002}
1003
55e303ae
A
1004void
1005clock_get_system_nanotime(
1006 uint32_t *secs,
1007 uint32_t *nanosecs)
1008{
8f6c56a5 1009 mach_timespec_t now;
8ad349bb 1010
8f6c56a5
A
1011 (void) sysclk_gettime_internal(&now);
1012
1013 *secs = now.tv_sec;
1014 *nanosecs = now.tv_nsec;
1015}
1016
1017/*
1018 * Get clock device attributes.
1019 */
1020kern_return_t
1021sysclk_getattr(
1022 clock_flavor_t flavor,
1023 clock_attr_t attr, /* OUT */
1024 mach_msg_type_number_t *count) /* IN/OUT */
1025{
1026 if (*count != 1)
1027 return (KERN_FAILURE);
1028 switch (flavor) {
1029
1030 case CLOCK_GET_TIME_RES: /* >0 res */
1031 *(clock_res_t *) attr = rtc_intr_nsec;
1032 break;
1033
1034 case CLOCK_ALARM_CURRES: /* =0 no alarm */
1035 case CLOCK_ALARM_MAXRES:
1036 case CLOCK_ALARM_MINRES:
1037 *(clock_res_t *) attr = 0;
1038 break;
1039
1040 default:
1041 return (KERN_INVALID_VALUE);
1042 }
1043 return (KERN_SUCCESS);
8ad349bb
A
1044}
1045
8f6c56a5
A
1046/*
1047 * Set next alarm time for the clock device. This call
1048 * always resets the time to deliver an alarm for the
1049 * clock.
1050 */
8ad349bb 1051void
8f6c56a5
A
1052sysclk_setalarm(
1053 mach_timespec_t *alarm_time)
1054{
1055 timer_call_enter(&rtclock_alarm_timer,
1056 (uint64_t) alarm_time->tv_sec * NSEC_PER_SEC
1057 + alarm_time->tv_nsec);
1058}
1059
1060/*
1061 * Configure the calendar clock.
1062 */
1063int
1064calend_config(void)
1065{
1066 return bbc_config();
1067}
8ad349bb 1068
8f6c56a5
A
1069/*
1070 * Initialize calendar clock.
1071 */
1072int
1073calend_init(void)
1074{
1075 return (1);
1076}
8ad349bb 1077
8f6c56a5
A
1078/*
1079 * Get the current clock time.
1080 */
1081kern_return_t
1082calend_gettime(
1083 mach_timespec_t *cur_time) /* OUT */
1084{
1085 spl_t s;
1086
1087 RTC_LOCK(s);
1088 if (!rtclock.calend_is_set) {
1089 RTC_UNLOCK(s);
1090 return (KERN_FAILURE);
1091 }
8ad349bb 1092
8f6c56a5
A
1093 (void) sysclk_gettime_internal(cur_time);
1094 ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset);
1095 RTC_UNLOCK(s);
8ad349bb 1096
8f6c56a5
A
1097 return (KERN_SUCCESS);
1098}
1099
1100void
1101clock_get_calendar_microtime(
1102 uint32_t *secs,
1103 uint32_t *microsecs)
1104{
1105 mach_timespec_t now;
1106
1107 calend_gettime(&now);
1108
1109 *secs = now.tv_sec;
1110 *microsecs = now.tv_nsec / NSEC_PER_USEC;
1111}
1112
1113void
1114clock_get_calendar_nanotime(
1115 uint32_t *secs,
1116 uint32_t *nanosecs)
1117{
1118 mach_timespec_t now;
1119
1120 calend_gettime(&now);
1121
1122 *secs = now.tv_sec;
1123 *nanosecs = now.tv_nsec;
1124}
1125
1126void
1127clock_set_calendar_microtime(
1128 uint32_t secs,
1129 uint32_t microsecs)
1130{
1131 mach_timespec_t new_time, curr_time;
1132 uint32_t old_offset;
1133 spl_t s;
1134
1135 new_time.tv_sec = secs;
1136 new_time.tv_nsec = microsecs * NSEC_PER_USEC;
1137
1138 RTC_LOCK(s);
1139 old_offset = rtclock.calend_offset.tv_sec;
1140 (void) sysclk_gettime_internal(&curr_time);
1141 rtclock.calend_offset = new_time;
1142 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
1143 rtclock.boottime += rtclock.calend_offset.tv_sec - old_offset;
1144 rtclock.calend_is_set = TRUE;
1145 RTC_UNLOCK(s);
1146
1147 (void) bbc_settime(&new_time);
1148
1149 host_notify_calendar_change();
1150}
1151
1152/*
1153 * Get clock device attributes.
1154 */
1155kern_return_t
1156calend_getattr(
1157 clock_flavor_t flavor,
1158 clock_attr_t attr, /* OUT */
1159 mach_msg_type_number_t *count) /* IN/OUT */
1160{
1161 if (*count != 1)
1162 return (KERN_FAILURE);
1163 switch (flavor) {
1164
1165 case CLOCK_GET_TIME_RES: /* >0 res */
1166 *(clock_res_t *) attr = rtc_intr_nsec;
1167 break;
1168
1169 case CLOCK_ALARM_CURRES: /* =0 no alarm */
1170 case CLOCK_ALARM_MINRES:
1171 case CLOCK_ALARM_MAXRES:
1172 *(clock_res_t *) attr = 0;
1173 break;
1174
1175 default:
1176 return (KERN_INVALID_VALUE);
1177 }
1178 return (KERN_SUCCESS);
1179}
1180
1181#define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */
1182#define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */
1183
1184uint32_t
1185clock_set_calendar_adjtime(
1186 int32_t *secs,
1187 int32_t *microsecs)
1188{
1189 int64_t total, ototal;
1190 uint32_t interval = 0;
1191 spl_t s;
1192
1193 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
1194
1195 RTC_LOCK(s);
1196 ototal = rtclock.calend_adjtotal;
1197
1198 if (total != 0) {
1199 int32_t delta = tickadj;
1200
1201 if (total > 0) {
1202 if (total > bigadj)
1203 delta *= 10;
1204 if (delta > total)
1205 delta = total;
1206 }
1207 else {
1208 if (total < -bigadj)
1209 delta *= 10;
1210 delta = -delta;
1211 if (delta < total)
1212 delta = total;
1213 }
1214
1215 rtclock.calend_adjtotal = total;
1216 rtclock.calend_adjdelta = delta;
1217
1218 interval = NSEC_PER_HZ;
1219 }
1220 else
1221 rtclock.calend_adjdelta = rtclock.calend_adjtotal = 0;
1222
1223 RTC_UNLOCK(s);
1224
1225 if (ototal == 0)
1226 *secs = *microsecs = 0;
1227 else {
1228 *secs = ototal / NSEC_PER_SEC;
1229 *microsecs = ototal % NSEC_PER_SEC;
1230 }
1231
1232 return (interval);
1233}
1234
1235uint32_t
1236clock_adjust_calendar(void)
1237{
1238 uint32_t interval = 0;
1239 int32_t delta;
1240 spl_t s;
1241
1242 RTC_LOCK(s);
1243 delta = rtclock.calend_adjdelta;
1244 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, delta);
1245
1246 rtclock.calend_adjtotal -= delta;
1247
1248 if (delta > 0) {
1249 if (delta > rtclock.calend_adjtotal)
1250 rtclock.calend_adjdelta = rtclock.calend_adjtotal;
1251 }
1252 else
1253 if (delta < 0) {
1254 if (delta < rtclock.calend_adjtotal)
1255 rtclock.calend_adjdelta = rtclock.calend_adjtotal;
1256 }
1257
1258 if (rtclock.calend_adjdelta != 0)
1259 interval = NSEC_PER_HZ;
1260
1261 RTC_UNLOCK(s);
1262
1263 return (interval);
1264}
1265
1266void
1267clock_initialize_calendar(void)
1268{
1269 mach_timespec_t bbc_time, curr_time;
1270 spl_t s;
1271
1272 if (bbc_gettime(&bbc_time) != KERN_SUCCESS)
1273 return;
1274
1275 RTC_LOCK(s);
1276 if (rtclock.boottime == 0)
1277 rtclock.boottime = bbc_time.tv_sec;
1278 (void) sysclk_gettime_internal(&curr_time);
1279 rtclock.calend_offset = bbc_time;
1280 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
1281 rtclock.calend_is_set = TRUE;
1282 RTC_UNLOCK(s);
1283
1284 host_notify_calendar_change();
1285}
1286
1287void
1288clock_get_boottime_nanotime(
1289 uint32_t *secs,
1290 uint32_t *nanosecs)
1291{
1292 *secs = rtclock.boottime;
1293 *nanosecs = 0;
91447636
A
1294}
1295
1c79356b
A
1296void
1297clock_timebase_info(
1298 mach_timebase_info_t info)
1299{
91447636 1300 info->numer = info->denom = 1;
1c79356b
A
1301}
1302
8f6c56a5
A
1303void
1304clock_set_timer_deadline(
1305 uint64_t deadline)
1306{
1307 spl_t s;
1308 cpu_data_t *pp = current_cpu_datap();
1309 rtclock_timer_t *mytimer = &pp->cpu_rtc_timer;
1310 uint64_t abstime;
1311 uint64_t decr;
1312
1313 assert(get_preemption_level() > 0);
1314 assert(rtclock_timer_expire);
1315
1316 RTC_INTRS_OFF(s);
1317 mytimer->deadline = deadline;
1318 mytimer->is_set = TRUE;
1319 if (!mytimer->has_expired) {
1320 abstime = mach_absolute_time();
1321 if (mytimer->deadline < pp->cpu_rtc_tick_deadline) {
1322 decr = deadline_to_decrementer(mytimer->deadline,
1323 abstime);
1324 rtc_lapic_set_timer(decr);
1325 pp->cpu_rtc_intr_deadline = mytimer->deadline;
1326 KERNEL_DEBUG_CONSTANT(
1327 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) |
1328 DBG_FUNC_NONE, decr, 2, 0, 0, 0);
1329 }
1330 }
1331 RTC_INTRS_ON(s);
1332}
1333
1c79356b
A
1334void
1335clock_set_timer_func(
1336 clock_timer_func_t func)
1337{
91447636
A
1338 if (rtclock_timer_expire == NULL)
1339 rtclock_timer_expire = func;
1c79356b
A
1340}
1341
1c79356b 1342/*
91447636 1343 * Real-time clock device interrupt.
1c79356b 1344 */
1c79356b 1345void
8f6c56a5 1346rtclock_intr(struct i386_interrupt_state *regs)
1c79356b 1347{
55e303ae 1348 uint64_t abstime;
91447636 1349 uint32_t latency;
8f6c56a5
A
1350 uint64_t decr;
1351 uint64_t decr_tick;
1352 uint64_t decr_timer;
91447636 1353 cpu_data_t *pp = current_cpu_datap();
8f6c56a5 1354 rtclock_timer_t *mytimer = &pp->cpu_rtc_timer;
91447636
A
1355
1356 assert(get_preemption_level() > 0);
1357 assert(!ml_get_interrupts_enabled());
1358
8f6c56a5
A
1359 abstime = _rtc_nanotime_read();
1360 latency = (uint32_t) abstime - pp->cpu_rtc_intr_deadline;
1361 if (pp->cpu_rtc_tick_deadline <= abstime) {
1362 rtc_nanotime_update();
1363 clock_deadline_for_periodic_event(
1364 NSEC_PER_HZ, abstime, &pp->cpu_rtc_tick_deadline);
1365 hertz_tick(
1366#if STAT_TIME
1367 NSEC_PER_HZ,
1368#endif
1369 (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0),
1370 regs->eip);
1371 }
1c79356b 1372
8f6c56a5
A
1373 abstime = _rtc_nanotime_read();
1374 if (mytimer->is_set && mytimer->deadline <= abstime) {
1375 mytimer->has_expired = TRUE;
1376 mytimer->is_set = FALSE;
1377 (*rtclock_timer_expire)(abstime);
1378 assert(!ml_get_interrupts_enabled());
1379 mytimer->has_expired = FALSE;
5d5c5d0d 1380 }
c0fea474 1381
5d5c5d0d 1382 /* Log the interrupt service latency (-ve value expected by tool) */
8ad349bb 1383 KERNEL_DEBUG_CONSTANT(
5d5c5d0d 1384 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
8f6c56a5 1385 -latency, (uint32_t)regs->eip, 0, 0, 0);
1c79356b 1386
8f6c56a5
A
1387 abstime = _rtc_nanotime_read();
1388 decr_tick = deadline_to_decrementer(pp->cpu_rtc_tick_deadline, abstime);
1389 decr_timer = (mytimer->is_set) ?
1390 deadline_to_decrementer(mytimer->deadline, abstime) :
1391 DECREMENTER_MAX;
1392 decr = MIN(decr_tick, decr_timer);
1393 pp->cpu_rtc_intr_deadline = abstime + decr;
1c79356b 1394
8f6c56a5 1395 rtc_lapic_set_timer(decr);
91447636 1396
8f6c56a5
A
1397 /* Log the new decrementer value */
1398 KERNEL_DEBUG_CONSTANT(
1399 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE,
1400 decr, 3, 0, 0, 0);
91447636 1401
1c79356b
A
1402}
1403
8f6c56a5
A
1404static void
1405rtclock_alarm_expire(
1406 __unused timer_call_param_t p0,
1407 __unused timer_call_param_t p1)
1c79356b 1408{
8f6c56a5 1409 mach_timespec_t clock_time;
5d5c5d0d 1410
8f6c56a5 1411 (void) sysclk_gettime_internal(&clock_time);
5d5c5d0d 1412
8f6c56a5 1413 clock_alarm_intr(SYSTEM_CLOCK, &clock_time);
8ad349bb
A
1414}
1415
8f6c56a5
A
1416void
1417clock_get_uptime(
1418 uint64_t *result)
8ad349bb 1419{
8f6c56a5 1420 *result = rtc_nanotime_read();
5d5c5d0d
A
1421}
1422
8f6c56a5
A
1423uint64_t
1424mach_absolute_time(void)
5d5c5d0d 1425{
8f6c56a5 1426 return rtc_nanotime_read();
91447636
A
1427}
1428
1429void
1430absolutetime_to_microtime(
1431 uint64_t abstime,
1432 uint32_t *secs,
1433 uint32_t *microsecs)
1434{
1435 uint32_t remain;
1436
1437 asm volatile(
1438 "divl %3"
1439 : "=a" (*secs), "=d" (remain)
1440 : "A" (abstime), "r" (NSEC_PER_SEC));
1441 asm volatile(
1442 "divl %3"
1443 : "=a" (*microsecs)
1444 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
1c79356b
A
1445}
1446
1447void
8f6c56a5
A
1448clock_interval_to_deadline(
1449 uint32_t interval,
1450 uint32_t scale_factor,
1451 uint64_t *result)
8ad349bb 1452{
8f6c56a5
A
1453 uint64_t abstime;
1454
1455 clock_get_uptime(result);
1456
1457 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1458
1459 *result += abstime;
8ad349bb
A
1460}
1461
1462void
8f6c56a5
A
1463clock_interval_to_absolutetime_interval(
1464 uint32_t interval,
1465 uint32_t scale_factor,
1466 uint64_t *result)
1467{
1468 *result = (uint64_t)interval * scale_factor;
1469}
1470
1471void
1472clock_absolutetime_interval_to_deadline(
1473 uint64_t abstime,
1474 uint64_t *result)
1c79356b 1475{
8f6c56a5
A
1476 clock_get_uptime(result);
1477
1478 *result += abstime;
1c79356b
A
1479}
1480
1481void
1482absolutetime_to_nanoseconds(
0b4e3aa0
A
1483 uint64_t abstime,
1484 uint64_t *result)
1c79356b 1485{
0b4e3aa0 1486 *result = abstime;
1c79356b
A
1487}
1488
1489void
1490nanoseconds_to_absolutetime(
0b4e3aa0
A
1491 uint64_t nanoseconds,
1492 uint64_t *result)
1c79356b 1493{
0b4e3aa0 1494 *result = nanoseconds;
1c79356b
A
1495}
1496
55e303ae 1497void
91447636 1498machine_delay_until(
55e303ae
A
1499 uint64_t deadline)
1500{
1501 uint64_t now;
1502
1503 do {
1504 cpu_pause();
1505 now = mach_absolute_time();
1506 } while (now < deadline);
1507}