]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/rtclock.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 /*
27 * File: i386/rtclock.c
28 * Purpose: Routines for handling the machine dependent
29 * real-time clock. Historically, this clock is
30 * generated by the Intel 8254 Programmable Interval
31 * Timer, but local apic timers are now used for
32 * this purpose with the master time reference being
33 * the cpu clock counted by the timestamp MSR.
34 */
35
36 #include <platforms.h>
37 #include <mach_kdb.h>
38
39 #include <mach/mach_types.h>
40
41 #include <kern/cpu_data.h>
42 #include <kern/cpu_number.h>
43 #include <kern/clock.h>
44 #include <kern/host_notify.h>
45 #include <kern/macro_help.h>
46 #include <kern/misc_protos.h>
47 #include <kern/spl.h>
48 #include <kern/assert.h>
49 #include <mach/vm_prot.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_kern.h> /* for kernel_map */
52 #include <i386/ipl.h>
53 #include <i386/pit.h>
54 #include <i386/pio.h>
55 #include <i386/misc_protos.h>
56 #include <i386/proc_reg.h>
57 #include <i386/machine_cpu.h>
58 #include <i386/mp.h>
59 #include <i386/cpuid.h>
60 #include <i386/cpu_data.h>
61 #include <i386/cpu_threads.h>
62 #include <i386/perfmon.h>
63 #include <i386/machine_routines.h>
64 #include <i386/AT386/bbclock_entries.h>
65 #include <pexpert/pexpert.h>
66 #include <machine/limits.h>
67 #include <machine/commpage.h>
68 #include <sys/kdebug.h>
69
70 #define MAX(a,b) (((a)>(b))?(a):(b))
71 #define MIN(a,b) (((a)>(b))?(b):(a))
72
73 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
74
75 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
76
77 int sysclk_config(void);
78
79 int sysclk_init(void);
80
81 kern_return_t sysclk_gettime(
82 mach_timespec_t *cur_time);
83
84 kern_return_t sysclk_getattr(
85 clock_flavor_t flavor,
86 clock_attr_t attr,
87 mach_msg_type_number_t *count);
88
89 void sysclk_setalarm(
90 mach_timespec_t *alarm_time);
91
92 /*
93 * Lists of clock routines.
94 */
95 struct clock_ops sysclk_ops = {
96 sysclk_config, sysclk_init,
97 sysclk_gettime, 0,
98 sysclk_getattr, 0,
99 sysclk_setalarm,
100 };
101
102 int calend_config(void);
103
104 int calend_init(void);
105
106 kern_return_t calend_gettime(
107 mach_timespec_t *cur_time);
108
109 kern_return_t calend_getattr(
110 clock_flavor_t flavor,
111 clock_attr_t attr,
112 mach_msg_type_number_t *count);
113
114 struct clock_ops calend_ops = {
115 calend_config, calend_init,
116 calend_gettime, 0,
117 calend_getattr, 0,
118 0,
119 };
120
121 /* local data declarations */
122
123 static clock_timer_func_t rtclock_timer_expire;
124
125 static timer_call_data_t rtclock_alarm_timer;
126
127 static void rtclock_alarm_expire(
128 timer_call_param_t p0,
129 timer_call_param_t p1);
130
131 struct {
132 mach_timespec_t calend_offset;
133 boolean_t calend_is_set;
134
135 int64_t calend_adjtotal;
136 int32_t calend_adjdelta;
137
138 uint32_t boottime;
139
140 mach_timebase_info_data_t timebase_const;
141
142 decl_simple_lock_data(,lock) /* real-time clock device lock */
143 } rtclock;
144
145 boolean_t rtc_initialized = FALSE;
146 clock_res_t rtc_intr_nsec = NSEC_PER_HZ; /* interrupt res */
147 uint64_t rtc_cycle_count; /* clocks in 1/20th second */
148 uint64_t rtc_cyc_per_sec; /* processor cycles per sec */
149 uint32_t rtc_boot_frequency; /* provided by 1st speed-step */
150 uint32_t rtc_quant_scale; /* clock to nanos multiplier */
151 uint32_t rtc_quant_shift; /* clock to nanos right shift */
152 uint64_t rtc_decrementer_min;
153
154 static mach_timebase_info_data_t rtc_lapic_scale; /* nsec to lapic count */
155
156 /*
157 * Macros to lock/unlock real-time clock data.
158 */
159 #define RTC_INTRS_OFF(s) \
160 (s) = splclock()
161
162 #define RTC_INTRS_ON(s) \
163 splx(s)
164
165 #define RTC_LOCK(s) \
166 MACRO_BEGIN \
167 RTC_INTRS_OFF(s); \
168 simple_lock(&rtclock.lock); \
169 MACRO_END
170
171 #define RTC_UNLOCK(s) \
172 MACRO_BEGIN \
173 simple_unlock(&rtclock.lock); \
174 RTC_INTRS_ON(s); \
175 MACRO_END
176
177 /*
178 * i8254 control. ** MONUMENT **
179 *
180 * The i8254 is a traditional PC device with some arbitrary characteristics.
181 * Basically, it is a register that counts at a fixed rate and can be
182 * programmed to generate an interrupt every N counts. The count rate is
183 * clknum counts per sec (see pit.h), historically 1193167=14.318MHz/12
184 * but the more accurate value is 1193182=14.31818MHz/12. [14.31818 MHz being
185 * the master crystal oscillator reference frequency since the very first PC.]
186 * Various constants are computed based on this value, and we calculate
187 * them at init time for execution efficiency. To obtain sufficient
188 * accuracy, some of the calculation are most easily done in floating
189 * point and then converted to int.
190 *
191 */
192
193 /*
194 * Forward decl.
195 */
196
197 static uint64_t rtc_set_cyc_per_sec(uint64_t cycles);
198 uint64_t rtc_nanotime_read(void);
199
200 /*
201 * create_mul_quant_GHZ
202 * create a constant used to multiply the TSC by to convert to nanoseconds.
203 * This is a 32 bit number and the TSC *MUST* have a frequency higher than
204 * 1000Mhz for this routine to work.
205 *
206 * The theory here is that we know how many TSCs-per-sec the processor runs at.
207 * Normally to convert this to nanoseconds you would multiply the current
208 * timestamp by 1000000000 (a billion) then divide by TSCs-per-sec.
209 * Unfortunatly the TSC is 64 bits which would leave us with 96 bit intermediate
210 * results from the multiply that must be divided by.
211 * Usually thats
212 * uint96 = tsc * numer
213 * nanos = uint96 / denom
214 * Instead, we create this quant constant and it becomes the numerator,
215 * the denominator can then be 0x100000000 which makes our division as simple as
216 * forgetting the lower 32 bits of the result. We can also pass this number to
217 * user space as the numer and pass 0xFFFFFFFF (RTC_FAST_DENOM) as the denom to
218 * convert raw counts * to nanos. The difference is so small as to be
219 * undetectable by anything.
220 *
221 * Unfortunatly we can not do this for sub GHZ processors. In this case, all
222 * we do is pass the CPU speed in raw as the denom and we pass in 1000000000
223 * as the numerator. No short cuts allowed
224 */
225 #define RTC_FAST_DENOM 0xFFFFFFFF
226 inline static uint32_t
227 create_mul_quant_GHZ(int shift, uint32_t quant)
228 {
229 return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant);
230 }
231 /*
232 * This routine takes a value of raw TSC ticks and applies the passed mul_quant
233 * generated by create_mul_quant() This is our internal routine for creating
234 * nanoseconds.
235 * Since we don't really have uint96_t this routine basically does this....
236 * uint96_t intermediate = (*value) * scale
237 * return (intermediate >> 32)
238 */
239 inline static uint64_t
240 fast_get_nano_from_abs(uint64_t value, int scale)
241 {
242 asm (" movl %%edx,%%esi \n\t"
243 " mull %%ecx \n\t"
244 " movl %%edx,%%edi \n\t"
245 " movl %%esi,%%eax \n\t"
246 " mull %%ecx \n\t"
247 " xorl %%ecx,%%ecx \n\t"
248 " addl %%edi,%%eax \n\t"
249 " adcl %%ecx,%%edx "
250 : "+A" (value)
251 : "c" (scale)
252 : "%esi", "%edi");
253 return value;
254 }
255
256 /*
257 * This routine basically does this...
258 * ts.tv_sec = nanos / 1000000000; create seconds
259 * ts.tv_nsec = nanos % 1000000000; create remainder nanos
260 */
261 inline static mach_timespec_t
262 nanos_to_timespec(uint64_t nanos)
263 {
264 union {
265 mach_timespec_t ts;
266 uint64_t u64;
267 } ret;
268 ret.u64 = nanos;
269 asm volatile("divl %1" : "+A" (ret.u64) : "r" (NSEC_PER_SEC));
270 return ret.ts;
271 }
272
273 /*
274 * The following two routines perform the 96 bit arithmetic we need to
275 * convert generic absolute<->nanoseconds
276 * The multiply routine takes a uint64_t and a uint32_t and returns the result
277 * in a uint32_t[3] array.
278 * The divide routine takes this uint32_t[3] array and divides it by a uint32_t
279 * returning a uint64_t
280 */
281 inline static void
282 longmul(uint64_t *abstime, uint32_t multiplicand, uint32_t *result)
283 {
284 asm volatile(
285 " pushl %%ebx \n\t"
286 " movl %%eax,%%ebx \n\t"
287 " movl (%%eax),%%eax \n\t"
288 " mull %%ecx \n\t"
289 " xchg %%eax,%%ebx \n\t"
290 " pushl %%edx \n\t"
291 " movl 4(%%eax),%%eax \n\t"
292 " mull %%ecx \n\t"
293 " movl %2,%%ecx \n\t"
294 " movl %%ebx,(%%ecx) \n\t"
295 " popl %%ebx \n\t"
296 " addl %%ebx,%%eax \n\t"
297 " popl %%ebx \n\t"
298 " movl %%eax,4(%%ecx) \n\t"
299 " adcl $0,%%edx \n\t"
300 " movl %%edx,8(%%ecx) // and save it"
301 : : "a"(abstime), "c"(multiplicand), "m"(result));
302
303 }
304
305 inline static uint64_t
306 longdiv(uint32_t *numer, uint32_t denom)
307 {
308 uint64_t result;
309 asm volatile(
310 " pushl %%ebx \n\t"
311 " movl %%eax,%%ebx \n\t"
312 " movl 8(%%eax),%%edx \n\t"
313 " movl 4(%%eax),%%eax \n\t"
314 " divl %%ecx \n\t"
315 " xchg %%ebx,%%eax \n\t"
316 " movl (%%eax),%%eax \n\t"
317 " divl %%ecx \n\t"
318 " xchg %%ebx,%%edx \n\t"
319 " popl %%ebx \n\t"
320 : "=A"(result) : "a"(numer),"c"(denom));
321 return result;
322 }
323
324 /*
325 * Enable or disable timer 2.
326 * Port 0x61 controls timer 2:
327 * bit 0 gates the clock,
328 * bit 1 gates output to speaker.
329 */
330 inline static void
331 enable_PIT2(void)
332 {
333 asm volatile(
334 " inb $0x61,%%al \n\t"
335 " and $0xFC,%%al \n\t"
336 " or $1,%%al \n\t"
337 " outb %%al,$0x61 \n\t"
338 : : : "%al" );
339 }
340
341 inline static void
342 disable_PIT2(void)
343 {
344 asm volatile(
345 " inb $0x61,%%al \n\t"
346 " and $0xFC,%%al \n\t"
347 " outb %%al,$0x61 \n\t"
348 : : : "%al" );
349 }
350
351 inline static void
352 set_PIT2(int value)
353 {
354 /*
355 * First, tell the clock we are going to write 16 bits to the counter
356 * and enable one-shot mode (command 0xB8 to port 0x43)
357 * Then write the two bytes into the PIT2 clock register (port 0x42).
358 * Loop until the value is "realized" in the clock,
359 * this happens on the next tick.
360 */
361 asm volatile(
362 " movb $0xB8,%%al \n\t"
363 " outb %%al,$0x43 \n\t"
364 " movb %%dl,%%al \n\t"
365 " outb %%al,$0x42 \n\t"
366 " movb %%dh,%%al \n\t"
367 " outb %%al,$0x42 \n"
368 "1: inb $0x42,%%al \n\t"
369 " inb $0x42,%%al \n\t"
370 " cmp %%al,%%dh \n\t"
371 " jne 1b"
372 : : "d"(value) : "%al");
373 }
374
375 inline static uint64_t
376 get_PIT2(unsigned int *value)
377 {
378 register uint64_t result;
379 /*
380 * This routine first latches the time (command 0x80 to port 0x43),
381 * then gets the time stamp so we know how long the read will take later.
382 * Read (from port 0x42) and return the current value of the timer.
383 */
384 asm volatile(
385 " xorl %%ecx,%%ecx \n\t"
386 " movb $0x80,%%al \n\t"
387 " outb %%al,$0x43 \n\t"
388 " rdtsc \n\t"
389 " pushl %%eax \n\t"
390 " inb $0x42,%%al \n\t"
391 " movb %%al,%%cl \n\t"
392 " inb $0x42,%%al \n\t"
393 " movb %%al,%%ch \n\t"
394 " popl %%eax "
395 : "=A"(result), "=c"(*value));
396 return result;
397 }
398
399 /*
400 * timeRDTSC()
401 * This routine sets up PIT counter 2 to count down 1/20 of a second.
402 * It pauses until the value is latched in the counter
403 * and then reads the time stamp counter to return to the caller.
404 */
405 static uint64_t
406 timeRDTSC(void)
407 {
408 int attempts = 0;
409 uint64_t latchTime;
410 uint64_t saveTime,intermediate;
411 unsigned int timerValue, lastValue;
412 boolean_t int_enabled;
413 /*
414 * Table of correction factors to account for
415 * - timer counter quantization errors, and
416 * - undercounts 0..5
417 */
418 #define SAMPLE_CLKS_EXACT (((double) CLKNUM) / 20.0)
419 #define SAMPLE_CLKS_INT ((int) CLKNUM / 20)
420 #define SAMPLE_NSECS (2000000000LL)
421 #define SAMPLE_MULTIPLIER (((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
422 #define ROUND64(x) ((uint64_t)((x) + 0.5))
423 uint64_t scale[6] = {
424 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
425 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
426 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
427 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
428 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
429 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
430 };
431
432 int_enabled = ml_set_interrupts_enabled(FALSE);
433
434 restart:
435 if (attempts >= 2)
436 panic("timeRDTSC() calibation failed with %d attempts\n", attempts);
437 attempts++;
438 enable_PIT2(); // turn on PIT2
439 set_PIT2(0); // reset timer 2 to be zero
440 latchTime = rdtsc64(); // get the time stamp to time
441 latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
442 set_PIT2(SAMPLE_CLKS_INT); // set up the timer for (almost) 1/20th a second
443 saveTime = rdtsc64(); // now time how long a 20th a second is...
444 get_PIT2(&lastValue);
445 get_PIT2(&lastValue); // read twice, first value may be unreliable
446 do {
447 intermediate = get_PIT2(&timerValue);
448 if (timerValue > lastValue) {
449 printf("Hey we are going backwards! %u -> %u, restarting timing\n",
450 timerValue,lastValue);
451 set_PIT2(0);
452 disable_PIT2();
453 goto restart;
454 }
455 lastValue = timerValue;
456 } while (timerValue > 5);
457 kprintf("timerValue %d\n",timerValue);
458 kprintf("intermediate 0x%016llx\n",intermediate);
459 kprintf("saveTime 0x%016llx\n",saveTime);
460
461 intermediate -= saveTime; // raw count for about 1/20 second
462 intermediate *= scale[timerValue]; // rescale measured time spent
463 intermediate /= SAMPLE_NSECS; // so its exactly 1/20 a second
464 intermediate += latchTime; // add on our save fudge
465
466 set_PIT2(0); // reset timer 2 to be zero
467 disable_PIT2(); // turn off PIT 2
468
469 ml_set_interrupts_enabled(int_enabled);
470 return intermediate;
471 }
472
473 static uint64_t
474 tsc_to_nanoseconds(uint64_t abstime)
475 {
476 uint32_t numer;
477 uint32_t denom;
478 uint32_t intermediate[3];
479
480 numer = rtclock.timebase_const.numer;
481 denom = rtclock.timebase_const.denom;
482 if (denom == RTC_FAST_DENOM) {
483 abstime = fast_get_nano_from_abs(abstime, numer);
484 } else {
485 longmul(&abstime, numer, intermediate);
486 abstime = longdiv(intermediate, denom);
487 }
488 return abstime;
489 }
490
491 inline static mach_timespec_t
492 tsc_to_timespec(void)
493 {
494 uint64_t currNanos;
495 currNanos = rtc_nanotime_read();
496 return nanos_to_timespec(currNanos);
497 }
498
499 #define DECREMENTER_MAX UINT_MAX
500 static uint32_t
501 deadline_to_decrementer(
502 uint64_t deadline,
503 uint64_t now)
504 {
505 uint64_t delta;
506
507 if (deadline <= now)
508 return rtc_decrementer_min;
509 else {
510 delta = deadline - now;
511 return MIN(MAX(rtc_decrementer_min,delta),DECREMENTER_MAX);
512 }
513 }
514
515 static inline uint64_t
516 lapic_time_countdown(uint32_t initial_count)
517 {
518 boolean_t state;
519 uint64_t start_time;
520 uint64_t stop_time;
521 lapic_timer_count_t count;
522
523 state = ml_set_interrupts_enabled(FALSE);
524 lapic_set_timer(FALSE, one_shot, divide_by_1, initial_count);
525 start_time = rdtsc64();
526 do {
527 lapic_get_timer(NULL, NULL, NULL, &count);
528 } while (count > 0);
529 stop_time = rdtsc64();
530 ml_set_interrupts_enabled(state);
531
532 return tsc_to_nanoseconds(stop_time - start_time);
533 }
534
535 static void
536 rtc_lapic_timer_calibrate(void)
537 {
538 uint32_t nsecs;
539 uint64_t countdown;
540
541 if (!(cpuid_features() & CPUID_FEATURE_APIC))
542 return;
543
544 /*
545 * Set the local apic timer counting down to zero without an interrupt.
546 * Use the timestamp to calculate how long this takes.
547 */
548 nsecs = (uint32_t) lapic_time_countdown(rtc_intr_nsec);
549
550 /*
551 * Compute a countdown ratio for a given time in nanoseconds.
552 * That is, countdown = time * numer / denom.
553 */
554 countdown = (uint64_t)rtc_intr_nsec * (uint64_t)rtc_intr_nsec / nsecs;
555
556 nsecs = (uint32_t) lapic_time_countdown((uint32_t) countdown);
557
558 rtc_lapic_scale.numer = countdown;
559 rtc_lapic_scale.denom = nsecs;
560
561 kprintf("rtc_lapic_timer_calibrate() scale: %d/%d\n",
562 (uint32_t) countdown, nsecs);
563 }
564
565 static void
566 rtc_lapic_set_timer(
567 uint32_t interval)
568 {
569 uint64_t count;
570
571 assert(rtc_lapic_scale.denom);
572
573 count = interval * (uint64_t) rtc_lapic_scale.numer;
574 count /= rtc_lapic_scale.denom;
575
576 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
577 }
578
579 static void
580 rtc_lapic_start_ticking(void)
581 {
582 uint64_t abstime;
583 uint64_t first_tick;
584 uint64_t decr;
585
586 abstime = mach_absolute_time();
587 first_tick = abstime + NSEC_PER_HZ;
588 current_cpu_datap()->cpu_rtc_tick_deadline = first_tick;
589 decr = deadline_to_decrementer(first_tick, abstime);
590 rtc_lapic_set_timer(decr);
591 }
592
593 /*
594 * Configure the real-time clock device. Return success (1)
595 * or failure (0).
596 */
597
598 int
599 sysclk_config(void)
600 {
601
602 mp_disable_preemption();
603 if (cpu_number() != master_cpu) {
604 mp_enable_preemption();
605 return(1);
606 }
607 mp_enable_preemption();
608
609 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
610
611 simple_lock_init(&rtclock.lock, 0);
612
613 return (1);
614 }
615
616
617 /*
618 * Nanotime/mach_absolutime_time
619 * -----------------------------
620 * The timestamp counter (tsc) - which counts cpu clock cycles and can be read
621 * efficient by the kernel and in userspace - is the reference for all timing.
622 * However, the cpu clock rate is not only platform-dependent but can change
623 * (speed-step) dynamically. Hence tsc is converted into nanoseconds which is
624 * identical to mach_absolute_time. The conversion to tsc to nanoseconds is
625 * encapsulated by nanotime.
626 *
627 * The kernel maintains nanotime information recording:
628 * - the current ratio of tsc to nanoseconds
629 * with this ratio expressed as a 32-bit scale and shift
630 * (power of 2 divider);
631 * - the tsc (step_tsc) and nanotime (step_ns) at which the current
632 * ratio (clock speed) began.
633 * So a tsc value can be converted to nanotime by:
634 *
635 * nanotime = (((tsc - step_tsc)*scale) >> shift) + step_ns
636 *
637 * In general, (tsc - step_tsc) is a 64-bit quantity with the scaling
638 * involving a 96-bit intermediate value. However, by saving the converted
639 * values at each tick (or at any intervening speed-step) - base_tsc and
640 * base_ns - we can perform conversions relative to these and be assured that
641 * (tsc - tick_tsc) is 32-bits. Hence:
642 *
643 * fast_nanotime = (((tsc - base_tsc)*scale) >> shift) + base_ns
644 *
645 * The tuple {base_tsc, base_ns, scale, shift} is exported in the commpage
646 * for the userspace nanotime routine to read. A duplicate check_tsc is
647 * appended so that the consistency of the read can be verified. Note that
648 * this scheme is essential for MP systems in which the commpage is updated
649 * by the master cpu but may be read concurrently by other cpus.
650 *
651 */
652 static inline void
653 rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
654 {
655 commpage_nanotime_t cp_nanotime;
656
657 /* Only the master cpu updates the commpage */
658 if (cpu_number() != master_cpu)
659 return;
660
661 cp_nanotime.nt_base_tsc = rntp->rnt_tsc;
662 cp_nanotime.nt_base_ns = rntp->rnt_nanos;
663 cp_nanotime.nt_scale = rntp->rnt_scale;
664 cp_nanotime.nt_shift = rntp->rnt_shift;
665
666 commpage_set_nanotime(&cp_nanotime);
667 }
668
669 static void
670 rtc_nanotime_init(void)
671 {
672 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
673 rtc_nanotime_t *master_rntp = &cpu_datap(master_cpu)->cpu_rtc_nanotime;
674
675 if (cpu_number() == master_cpu) {
676 rntp->rnt_tsc = rdtsc64();
677 rntp->rnt_nanos = tsc_to_nanoseconds(rntp->rnt_tsc);
678 rntp->rnt_scale = rtc_quant_scale;
679 rntp->rnt_shift = rtc_quant_shift;
680 rntp->rnt_step_tsc = 0ULL;
681 rntp->rnt_step_nanos = 0ULL;
682 } else {
683 /*
684 * Copy master processor's nanotime info.
685 * Loop required in case this changes while copying.
686 */
687 do {
688 *rntp = *master_rntp;
689 } while (rntp->rnt_tsc != master_rntp->rnt_tsc);
690 }
691 }
692
693 static inline void
694 _rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t tsc)
695 {
696 uint64_t tsc_delta;
697 uint64_t ns_delta;
698
699 tsc_delta = tsc - rntp->rnt_step_tsc;
700 ns_delta = tsc_to_nanoseconds(tsc_delta);
701 rntp->rnt_nanos = rntp->rnt_step_nanos + ns_delta;
702 rntp->rnt_tsc = tsc;
703 }
704
705 static void
706 rtc_nanotime_update(void)
707 {
708 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
709
710 assert(get_preemption_level() > 0);
711 assert(!ml_get_interrupts_enabled());
712
713 _rtc_nanotime_update(rntp, rdtsc64());
714 rtc_nanotime_set_commpage(rntp);
715 }
716
717 static void
718 rtc_nanotime_scale_update(void)
719 {
720 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
721 uint64_t tsc = rdtsc64();
722
723 assert(!ml_get_interrupts_enabled());
724
725 /*
726 * Update time based on past scale.
727 */
728 _rtc_nanotime_update(rntp, tsc);
729
730 /*
731 * Update scale and timestamp this update.
732 */
733 rntp->rnt_scale = rtc_quant_scale;
734 rntp->rnt_shift = rtc_quant_shift;
735 rntp->rnt_step_tsc = rntp->rnt_tsc;
736 rntp->rnt_step_nanos = rntp->rnt_nanos;
737
738 /* Export update to userland */
739 rtc_nanotime_set_commpage(rntp);
740 }
741
742 static uint64_t
743 _rtc_nanotime_read(void)
744 {
745 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
746 uint64_t rnt_tsc;
747 uint32_t rnt_scale;
748 uint32_t rnt_shift;
749 uint64_t rnt_nanos;
750 uint64_t tsc;
751 uint64_t tsc_delta;
752
753 rnt_scale = rntp->rnt_scale;
754 if (rnt_scale == 0)
755 return 0ULL;
756
757 rnt_shift = rntp->rnt_shift;
758 rnt_nanos = rntp->rnt_nanos;
759 rnt_tsc = rntp->rnt_tsc;
760 tsc = rdtsc64();
761
762 tsc_delta = tsc - rnt_tsc;
763 if ((tsc_delta >> 32) != 0)
764 return rnt_nanos + tsc_to_nanoseconds(tsc_delta);
765
766 /* Let the compiler optimize(?): */
767 if (rnt_shift == 32)
768 return rnt_nanos + ((tsc_delta * rnt_scale) >> 32);
769 else
770 return rnt_nanos + ((tsc_delta * rnt_scale) >> rnt_shift);
771 }
772
773 uint64_t
774 rtc_nanotime_read(void)
775 {
776 uint64_t result;
777 uint64_t rnt_tsc;
778 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
779
780 /*
781 * Use timestamp to ensure the uptime record isn't changed.
782 * This avoids disabling interrupts.
783 * And not this is a per-cpu structure hence no locking.
784 */
785 do {
786 rnt_tsc = rntp->rnt_tsc;
787 result = _rtc_nanotime_read();
788 } while (rnt_tsc != rntp->rnt_tsc);
789
790 return result;
791 }
792
793
794 /*
795 * This function is called by the speed-step driver when a
796 * change of cpu clock frequency is about to occur.
797 * The scale is not changed until rtc_clock_stepped() is called.
798 * Between these times there is an uncertainty is exactly when
799 * the change takes effect. FIXME: by using another timing source
800 * we could eliminate this error.
801 */
802 void
803 rtc_clock_stepping(__unused uint32_t new_frequency,
804 __unused uint32_t old_frequency)
805 {
806 boolean_t istate;
807
808 istate = ml_set_interrupts_enabled(FALSE);
809 rtc_nanotime_scale_update();
810 ml_set_interrupts_enabled(istate);
811 }
812
813 /*
814 * This function is called by the speed-step driver when a
815 * change of cpu clock frequency has just occured. This change
816 * is expressed as a ratio relative to the boot clock rate.
817 */
818 void
819 rtc_clock_stepped(uint32_t new_frequency, uint32_t old_frequency)
820 {
821 boolean_t istate;
822
823 istate = ml_set_interrupts_enabled(FALSE);
824 if (rtc_boot_frequency == 0) {
825 /*
826 * At the first ever stepping, old frequency is the real
827 * initial clock rate. This step and all others are based
828 * relative to this initial frequency at which the tsc
829 * calibration was made. Hence we must remember this base
830 * frequency as reference.
831 */
832 rtc_boot_frequency = old_frequency;
833 }
834 rtc_set_cyc_per_sec(rtc_cycle_count * new_frequency /
835 rtc_boot_frequency);
836 rtc_nanotime_scale_update();
837 ml_set_interrupts_enabled(istate);
838 }
839
840 /*
841 * rtc_sleep_wakeup() is called from acpi on awakening from a S3 sleep
842 */
843 void
844 rtc_sleep_wakeup(void)
845 {
846 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
847
848 boolean_t istate;
849
850 istate = ml_set_interrupts_enabled(FALSE);
851
852 /*
853 * Reset nanotime.
854 * The timestamp counter will have been reset
855 * but nanotime (uptime) marches onward.
856 * We assume that we're still at the former cpu frequency.
857 */
858 rntp->rnt_tsc = rdtsc64();
859 rntp->rnt_step_tsc = 0ULL;
860 rntp->rnt_step_nanos = rntp->rnt_nanos;
861 rtc_nanotime_set_commpage(rntp);
862
863 /* Restart tick interrupts from the LAPIC timer */
864 rtc_lapic_start_ticking();
865
866 ml_set_interrupts_enabled(istate);
867 }
868
869 /*
870 * Initialize the real-time clock device.
871 * In addition, various variables used to support the clock are initialized.
872 */
873 int
874 sysclk_init(void)
875 {
876 uint64_t cycles;
877
878 mp_disable_preemption();
879 if (cpu_number() == master_cpu) {
880 /*
881 * Perform calibration.
882 * The PIT is used as the reference to compute how many
883 * TCS counts (cpu clock cycles) occur per second.
884 */
885 rtc_cycle_count = timeRDTSC();
886 cycles = rtc_set_cyc_per_sec(rtc_cycle_count);
887
888 /*
889 * Set min/max to actual.
890 * ACPI may update these later if speed-stepping is detected.
891 */
892 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
893 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
894 printf("[RTCLOCK] frequency %llu (%llu)\n",
895 cycles, rtc_cyc_per_sec);
896
897 rtc_lapic_timer_calibrate();
898
899 /* Minimum interval is 1usec */
900 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC,
901 0ULL);
902 /* Point LAPIC interrupts to hardclock() */
903 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
904
905 clock_timebase_init();
906 rtc_initialized = TRUE;
907 }
908
909 rtc_nanotime_init();
910
911 rtc_lapic_start_ticking();
912
913 mp_enable_preemption();
914
915 return (1);
916 }
917
918 /*
919 * Get the clock device time. This routine is responsible
920 * for converting the device's machine dependent time value
921 * into a canonical mach_timespec_t value.
922 */
923 static kern_return_t
924 sysclk_gettime_internal(
925 mach_timespec_t *cur_time) /* OUT */
926 {
927 *cur_time = tsc_to_timespec();
928 return (KERN_SUCCESS);
929 }
930
931 kern_return_t
932 sysclk_gettime(
933 mach_timespec_t *cur_time) /* OUT */
934 {
935 return sysclk_gettime_internal(cur_time);
936 }
937
938 void
939 sysclk_gettime_interrupts_disabled(
940 mach_timespec_t *cur_time) /* OUT */
941 {
942 (void) sysclk_gettime_internal(cur_time);
943 }
944
945 // utility routine
946 // Code to calculate how many processor cycles are in a second...
947
948 static uint64_t
949 rtc_set_cyc_per_sec(uint64_t cycles)
950 {
951
952 if (cycles > (NSEC_PER_SEC/20)) {
953 // we can use just a "fast" multiply to get nanos
954 rtc_quant_shift = 32;
955 rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles);
956 rtclock.timebase_const.numer = rtc_quant_scale; // timeRDTSC is 1/20
957 rtclock.timebase_const.denom = RTC_FAST_DENOM;
958 } else {
959 rtc_quant_shift = 26;
960 rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles);
961 rtclock.timebase_const.numer = NSEC_PER_SEC/20; // timeRDTSC is 1/20
962 rtclock.timebase_const.denom = cycles;
963 }
964 rtc_cyc_per_sec = cycles*20; // multiply it by 20 and we are done..
965 // BUT we also want to calculate...
966
967 cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
968 / UI_CPUFREQ_ROUNDING_FACTOR)
969 * UI_CPUFREQ_ROUNDING_FACTOR;
970
971 /*
972 * Set current measured speed.
973 */
974 if (cycles >= 0x100000000ULL) {
975 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
976 } else {
977 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
978 }
979 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
980
981 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, rtc_cyc_per_sec);
982 return(cycles);
983 }
984
985 void
986 clock_get_system_microtime(
987 uint32_t *secs,
988 uint32_t *microsecs)
989 {
990 mach_timespec_t now;
991
992 (void) sysclk_gettime_internal(&now);
993
994 *secs = now.tv_sec;
995 *microsecs = now.tv_nsec / NSEC_PER_USEC;
996 }
997
998 void
999 clock_get_system_nanotime(
1000 uint32_t *secs,
1001 uint32_t *nanosecs)
1002 {
1003 mach_timespec_t now;
1004
1005 (void) sysclk_gettime_internal(&now);
1006
1007 *secs = now.tv_sec;
1008 *nanosecs = now.tv_nsec;
1009 }
1010
1011 /*
1012 * Get clock device attributes.
1013 */
1014 kern_return_t
1015 sysclk_getattr(
1016 clock_flavor_t flavor,
1017 clock_attr_t attr, /* OUT */
1018 mach_msg_type_number_t *count) /* IN/OUT */
1019 {
1020 if (*count != 1)
1021 return (KERN_FAILURE);
1022 switch (flavor) {
1023
1024 case CLOCK_GET_TIME_RES: /* >0 res */
1025 *(clock_res_t *) attr = rtc_intr_nsec;
1026 break;
1027
1028 case CLOCK_ALARM_CURRES: /* =0 no alarm */
1029 case CLOCK_ALARM_MAXRES:
1030 case CLOCK_ALARM_MINRES:
1031 *(clock_res_t *) attr = 0;
1032 break;
1033
1034 default:
1035 return (KERN_INVALID_VALUE);
1036 }
1037 return (KERN_SUCCESS);
1038 }
1039
1040 /*
1041 * Set next alarm time for the clock device. This call
1042 * always resets the time to deliver an alarm for the
1043 * clock.
1044 */
1045 void
1046 sysclk_setalarm(
1047 mach_timespec_t *alarm_time)
1048 {
1049 timer_call_enter(&rtclock_alarm_timer,
1050 (uint64_t) alarm_time->tv_sec * NSEC_PER_SEC
1051 + alarm_time->tv_nsec);
1052 }
1053
1054 /*
1055 * Configure the calendar clock.
1056 */
1057 int
1058 calend_config(void)
1059 {
1060 return bbc_config();
1061 }
1062
1063 /*
1064 * Initialize calendar clock.
1065 */
1066 int
1067 calend_init(void)
1068 {
1069 return (1);
1070 }
1071
1072 /*
1073 * Get the current clock time.
1074 */
1075 kern_return_t
1076 calend_gettime(
1077 mach_timespec_t *cur_time) /* OUT */
1078 {
1079 spl_t s;
1080
1081 RTC_LOCK(s);
1082 if (!rtclock.calend_is_set) {
1083 RTC_UNLOCK(s);
1084 return (KERN_FAILURE);
1085 }
1086
1087 (void) sysclk_gettime_internal(cur_time);
1088 ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset);
1089 RTC_UNLOCK(s);
1090
1091 return (KERN_SUCCESS);
1092 }
1093
1094 void
1095 clock_get_calendar_microtime(
1096 uint32_t *secs,
1097 uint32_t *microsecs)
1098 {
1099 mach_timespec_t now;
1100
1101 calend_gettime(&now);
1102
1103 *secs = now.tv_sec;
1104 *microsecs = now.tv_nsec / NSEC_PER_USEC;
1105 }
1106
1107 void
1108 clock_get_calendar_nanotime(
1109 uint32_t *secs,
1110 uint32_t *nanosecs)
1111 {
1112 mach_timespec_t now;
1113
1114 calend_gettime(&now);
1115
1116 *secs = now.tv_sec;
1117 *nanosecs = now.tv_nsec;
1118 }
1119
1120 void
1121 clock_set_calendar_microtime(
1122 uint32_t secs,
1123 uint32_t microsecs)
1124 {
1125 mach_timespec_t new_time, curr_time;
1126 uint32_t old_offset;
1127 spl_t s;
1128
1129 new_time.tv_sec = secs;
1130 new_time.tv_nsec = microsecs * NSEC_PER_USEC;
1131
1132 RTC_LOCK(s);
1133 old_offset = rtclock.calend_offset.tv_sec;
1134 (void) sysclk_gettime_internal(&curr_time);
1135 rtclock.calend_offset = new_time;
1136 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
1137 rtclock.boottime += rtclock.calend_offset.tv_sec - old_offset;
1138 rtclock.calend_is_set = TRUE;
1139 RTC_UNLOCK(s);
1140
1141 (void) bbc_settime(&new_time);
1142
1143 host_notify_calendar_change();
1144 }
1145
1146 /*
1147 * Get clock device attributes.
1148 */
1149 kern_return_t
1150 calend_getattr(
1151 clock_flavor_t flavor,
1152 clock_attr_t attr, /* OUT */
1153 mach_msg_type_number_t *count) /* IN/OUT */
1154 {
1155 if (*count != 1)
1156 return (KERN_FAILURE);
1157 switch (flavor) {
1158
1159 case CLOCK_GET_TIME_RES: /* >0 res */
1160 *(clock_res_t *) attr = rtc_intr_nsec;
1161 break;
1162
1163 case CLOCK_ALARM_CURRES: /* =0 no alarm */
1164 case CLOCK_ALARM_MINRES:
1165 case CLOCK_ALARM_MAXRES:
1166 *(clock_res_t *) attr = 0;
1167 break;
1168
1169 default:
1170 return (KERN_INVALID_VALUE);
1171 }
1172 return (KERN_SUCCESS);
1173 }
1174
1175 #define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */
1176 #define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */
1177
1178 uint32_t
1179 clock_set_calendar_adjtime(
1180 int32_t *secs,
1181 int32_t *microsecs)
1182 {
1183 int64_t total, ototal;
1184 uint32_t interval = 0;
1185 spl_t s;
1186
1187 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
1188
1189 RTC_LOCK(s);
1190 ototal = rtclock.calend_adjtotal;
1191
1192 if (total != 0) {
1193 int32_t delta = tickadj;
1194
1195 if (total > 0) {
1196 if (total > bigadj)
1197 delta *= 10;
1198 if (delta > total)
1199 delta = total;
1200 }
1201 else {
1202 if (total < -bigadj)
1203 delta *= 10;
1204 delta = -delta;
1205 if (delta < total)
1206 delta = total;
1207 }
1208
1209 rtclock.calend_adjtotal = total;
1210 rtclock.calend_adjdelta = delta;
1211
1212 interval = NSEC_PER_HZ;
1213 }
1214 else
1215 rtclock.calend_adjdelta = rtclock.calend_adjtotal = 0;
1216
1217 RTC_UNLOCK(s);
1218
1219 if (ototal == 0)
1220 *secs = *microsecs = 0;
1221 else {
1222 *secs = ototal / NSEC_PER_SEC;
1223 *microsecs = ototal % NSEC_PER_SEC;
1224 }
1225
1226 return (interval);
1227 }
1228
1229 uint32_t
1230 clock_adjust_calendar(void)
1231 {
1232 uint32_t interval = 0;
1233 int32_t delta;
1234 spl_t s;
1235
1236 RTC_LOCK(s);
1237 delta = rtclock.calend_adjdelta;
1238 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, delta);
1239
1240 rtclock.calend_adjtotal -= delta;
1241
1242 if (delta > 0) {
1243 if (delta > rtclock.calend_adjtotal)
1244 rtclock.calend_adjdelta = rtclock.calend_adjtotal;
1245 }
1246 else
1247 if (delta < 0) {
1248 if (delta < rtclock.calend_adjtotal)
1249 rtclock.calend_adjdelta = rtclock.calend_adjtotal;
1250 }
1251
1252 if (rtclock.calend_adjdelta != 0)
1253 interval = NSEC_PER_HZ;
1254
1255 RTC_UNLOCK(s);
1256
1257 return (interval);
1258 }
1259
1260 void
1261 clock_initialize_calendar(void)
1262 {
1263 mach_timespec_t bbc_time, curr_time;
1264 spl_t s;
1265
1266 if (bbc_gettime(&bbc_time) != KERN_SUCCESS)
1267 return;
1268
1269 RTC_LOCK(s);
1270 if (rtclock.boottime == 0)
1271 rtclock.boottime = bbc_time.tv_sec;
1272 (void) sysclk_gettime_internal(&curr_time);
1273 rtclock.calend_offset = bbc_time;
1274 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
1275 rtclock.calend_is_set = TRUE;
1276 RTC_UNLOCK(s);
1277
1278 host_notify_calendar_change();
1279 }
1280
1281 void
1282 clock_get_boottime_nanotime(
1283 uint32_t *secs,
1284 uint32_t *nanosecs)
1285 {
1286 *secs = rtclock.boottime;
1287 *nanosecs = 0;
1288 }
1289
1290 void
1291 clock_timebase_info(
1292 mach_timebase_info_t info)
1293 {
1294 info->numer = info->denom = 1;
1295 }
1296
1297 void
1298 clock_set_timer_deadline(
1299 uint64_t deadline)
1300 {
1301 spl_t s;
1302 cpu_data_t *pp = current_cpu_datap();
1303 rtclock_timer_t *mytimer = &pp->cpu_rtc_timer;
1304 uint64_t abstime;
1305 uint64_t decr;
1306
1307 assert(get_preemption_level() > 0);
1308 assert(rtclock_timer_expire);
1309
1310 RTC_INTRS_OFF(s);
1311 mytimer->deadline = deadline;
1312 mytimer->is_set = TRUE;
1313 if (!mytimer->has_expired) {
1314 abstime = mach_absolute_time();
1315 if (mytimer->deadline < pp->cpu_rtc_tick_deadline) {
1316 decr = deadline_to_decrementer(mytimer->deadline,
1317 abstime);
1318 rtc_lapic_set_timer(decr);
1319 pp->cpu_rtc_intr_deadline = mytimer->deadline;
1320 KERNEL_DEBUG_CONSTANT(
1321 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) |
1322 DBG_FUNC_NONE, decr, 2, 0, 0, 0);
1323 }
1324 }
1325 RTC_INTRS_ON(s);
1326 }
1327
1328 void
1329 clock_set_timer_func(
1330 clock_timer_func_t func)
1331 {
1332 if (rtclock_timer_expire == NULL)
1333 rtclock_timer_expire = func;
1334 }
1335
1336 /*
1337 * Real-time clock device interrupt.
1338 */
1339 void
1340 rtclock_intr(struct i386_interrupt_state *regs)
1341 {
1342 uint64_t abstime;
1343 uint32_t latency;
1344 uint64_t decr;
1345 uint64_t decr_tick;
1346 uint64_t decr_timer;
1347 cpu_data_t *pp = current_cpu_datap();
1348 rtclock_timer_t *mytimer = &pp->cpu_rtc_timer;
1349
1350 assert(get_preemption_level() > 0);
1351 assert(!ml_get_interrupts_enabled());
1352
1353 abstime = _rtc_nanotime_read();
1354 latency = (uint32_t) abstime - pp->cpu_rtc_intr_deadline;
1355 if (pp->cpu_rtc_tick_deadline <= abstime) {
1356 rtc_nanotime_update();
1357 clock_deadline_for_periodic_event(
1358 NSEC_PER_HZ, abstime, &pp->cpu_rtc_tick_deadline);
1359 hertz_tick(
1360 #if STAT_TIME
1361 NSEC_PER_HZ,
1362 #endif
1363 (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0),
1364 regs->eip);
1365 }
1366
1367 abstime = _rtc_nanotime_read();
1368 if (mytimer->is_set && mytimer->deadline <= abstime) {
1369 mytimer->has_expired = TRUE;
1370 mytimer->is_set = FALSE;
1371 (*rtclock_timer_expire)(abstime);
1372 assert(!ml_get_interrupts_enabled());
1373 mytimer->has_expired = FALSE;
1374 }
1375
1376 /* Log the interrupt service latency (-ve value expected by tool) */
1377 KERNEL_DEBUG_CONSTANT(
1378 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
1379 -latency, (uint32_t)regs->eip, 0, 0, 0);
1380
1381 abstime = _rtc_nanotime_read();
1382 decr_tick = deadline_to_decrementer(pp->cpu_rtc_tick_deadline, abstime);
1383 decr_timer = (mytimer->is_set) ?
1384 deadline_to_decrementer(mytimer->deadline, abstime) :
1385 DECREMENTER_MAX;
1386 decr = MIN(decr_tick, decr_timer);
1387 pp->cpu_rtc_intr_deadline = abstime + decr;
1388
1389 rtc_lapic_set_timer(decr);
1390
1391 /* Log the new decrementer value */
1392 KERNEL_DEBUG_CONSTANT(
1393 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE,
1394 decr, 3, 0, 0, 0);
1395
1396 }
1397
1398 static void
1399 rtclock_alarm_expire(
1400 __unused timer_call_param_t p0,
1401 __unused timer_call_param_t p1)
1402 {
1403 mach_timespec_t clock_time;
1404
1405 (void) sysclk_gettime_internal(&clock_time);
1406
1407 clock_alarm_intr(SYSTEM_CLOCK, &clock_time);
1408 }
1409
1410 void
1411 clock_get_uptime(
1412 uint64_t *result)
1413 {
1414 *result = rtc_nanotime_read();
1415 }
1416
1417 uint64_t
1418 mach_absolute_time(void)
1419 {
1420 return rtc_nanotime_read();
1421 }
1422
1423 void
1424 absolutetime_to_microtime(
1425 uint64_t abstime,
1426 uint32_t *secs,
1427 uint32_t *microsecs)
1428 {
1429 uint32_t remain;
1430
1431 asm volatile(
1432 "divl %3"
1433 : "=a" (*secs), "=d" (remain)
1434 : "A" (abstime), "r" (NSEC_PER_SEC));
1435 asm volatile(
1436 "divl %3"
1437 : "=a" (*microsecs)
1438 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
1439 }
1440
1441 void
1442 clock_interval_to_deadline(
1443 uint32_t interval,
1444 uint32_t scale_factor,
1445 uint64_t *result)
1446 {
1447 uint64_t abstime;
1448
1449 clock_get_uptime(result);
1450
1451 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1452
1453 *result += abstime;
1454 }
1455
1456 void
1457 clock_interval_to_absolutetime_interval(
1458 uint32_t interval,
1459 uint32_t scale_factor,
1460 uint64_t *result)
1461 {
1462 *result = (uint64_t)interval * scale_factor;
1463 }
1464
1465 void
1466 clock_absolutetime_interval_to_deadline(
1467 uint64_t abstime,
1468 uint64_t *result)
1469 {
1470 clock_get_uptime(result);
1471
1472 *result += abstime;
1473 }
1474
1475 void
1476 absolutetime_to_nanoseconds(
1477 uint64_t abstime,
1478 uint64_t *result)
1479 {
1480 *result = abstime;
1481 }
1482
1483 void
1484 nanoseconds_to_absolutetime(
1485 uint64_t nanoseconds,
1486 uint64_t *result)
1487 {
1488 *result = nanoseconds;
1489 }
1490
1491 void
1492 machine_delay_until(
1493 uint64_t deadline)
1494 {
1495 uint64_t now;
1496
1497 do {
1498 cpu_pause();
1499 now = mach_absolute_time();
1500 } while (now < deadline);
1501 }