]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/rtclock.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33
34 /*
35 * File: i386/rtclock.c
36 * Purpose: Routines for handling the machine dependent
37 * real-time clock. Historically, this clock is
38 * generated by the Intel 8254 Programmable Interval
39 * Timer, but local apic timers are now used for
40 * this purpose with the master time reference being
41 * the cpu clock counted by the timestamp MSR.
42 */
43
44 #include <platforms.h>
45 #include <mach_kdb.h>
46
47 #include <mach/mach_types.h>
48
49 #include <kern/cpu_data.h>
50 #include <kern/cpu_number.h>
51 #include <kern/clock.h>
52 #include <kern/host_notify.h>
53 #include <kern/macro_help.h>
54 #include <kern/misc_protos.h>
55 #include <kern/spl.h>
56 #include <kern/assert.h>
57 #include <mach/vm_prot.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_kern.h> /* for kernel_map */
60 #include <i386/ipl.h>
61 #include <i386/pit.h>
62 #include <i386/pio.h>
63 #include <i386/misc_protos.h>
64 #include <i386/proc_reg.h>
65 #include <i386/machine_cpu.h>
66 #include <i386/mp.h>
67 #include <i386/cpuid.h>
68 #include <i386/cpu_data.h>
69 #include <i386/cpu_threads.h>
70 #include <i386/perfmon.h>
71 #include <i386/machine_routines.h>
72 #include <i386/AT386/bbclock_entries.h>
73 #include <pexpert/pexpert.h>
74 #include <machine/limits.h>
75 #include <machine/commpage.h>
76 #include <sys/kdebug.h>
77
78 #define MAX(a,b) (((a)>(b))?(a):(b))
79 #define MIN(a,b) (((a)>(b))?(b):(a))
80
81 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
82
83 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
84
85 int sysclk_config(void);
86
87 int sysclk_init(void);
88
89 kern_return_t sysclk_gettime(
90 mach_timespec_t *cur_time);
91
92 kern_return_t sysclk_getattr(
93 clock_flavor_t flavor,
94 clock_attr_t attr,
95 mach_msg_type_number_t *count);
96
97 void sysclk_setalarm(
98 mach_timespec_t *alarm_time);
99
100 /*
101 * Lists of clock routines.
102 */
103 struct clock_ops sysclk_ops = {
104 sysclk_config, sysclk_init,
105 sysclk_gettime, 0,
106 sysclk_getattr, 0,
107 sysclk_setalarm,
108 };
109
110 int calend_config(void);
111
112 int calend_init(void);
113
114 kern_return_t calend_gettime(
115 mach_timespec_t *cur_time);
116
117 kern_return_t calend_getattr(
118 clock_flavor_t flavor,
119 clock_attr_t attr,
120 mach_msg_type_number_t *count);
121
122 struct clock_ops calend_ops = {
123 calend_config, calend_init,
124 calend_gettime, 0,
125 calend_getattr, 0,
126 0,
127 };
128
129 /* local data declarations */
130
131 static clock_timer_func_t rtclock_timer_expire;
132
133 static timer_call_data_t rtclock_alarm_timer;
134
135 static void rtclock_alarm_expire(
136 timer_call_param_t p0,
137 timer_call_param_t p1);
138
139 struct {
140 mach_timespec_t calend_offset;
141 boolean_t calend_is_set;
142
143 int64_t calend_adjtotal;
144 int32_t calend_adjdelta;
145
146 uint32_t boottime;
147
148 mach_timebase_info_data_t timebase_const;
149
150 decl_simple_lock_data(,lock) /* real-time clock device lock */
151 } rtclock;
152
153 boolean_t rtc_initialized = FALSE;
154 clock_res_t rtc_intr_nsec = NSEC_PER_HZ; /* interrupt res */
155 uint64_t rtc_cycle_count; /* clocks in 1/20th second */
156 uint64_t rtc_cyc_per_sec; /* processor cycles per sec */
157 uint32_t rtc_boot_frequency; /* provided by 1st speed-step */
158 uint32_t rtc_quant_scale; /* clock to nanos multiplier */
159 uint32_t rtc_quant_shift; /* clock to nanos right shift */
160 uint64_t rtc_decrementer_min;
161
162 static mach_timebase_info_data_t rtc_lapic_scale; /* nsec to lapic count */
163
164 /*
165 * Macros to lock/unlock real-time clock data.
166 */
167 #define RTC_INTRS_OFF(s) \
168 (s) = splclock()
169
170 #define RTC_INTRS_ON(s) \
171 splx(s)
172
173 #define RTC_LOCK(s) \
174 MACRO_BEGIN \
175 RTC_INTRS_OFF(s); \
176 simple_lock(&rtclock.lock); \
177 MACRO_END
178
179 #define RTC_UNLOCK(s) \
180 MACRO_BEGIN \
181 simple_unlock(&rtclock.lock); \
182 RTC_INTRS_ON(s); \
183 MACRO_END
184
185 /*
186 * i8254 control. ** MONUMENT **
187 *
188 * The i8254 is a traditional PC device with some arbitrary characteristics.
189 * Basically, it is a register that counts at a fixed rate and can be
190 * programmed to generate an interrupt every N counts. The count rate is
191 * clknum counts per sec (see pit.h), historically 1193167=14.318MHz/12
192 * but the more accurate value is 1193182=14.31818MHz/12. [14.31818 MHz being
193 * the master crystal oscillator reference frequency since the very first PC.]
194 * Various constants are computed based on this value, and we calculate
195 * them at init time for execution efficiency. To obtain sufficient
196 * accuracy, some of the calculation are most easily done in floating
197 * point and then converted to int.
198 *
199 */
200
201 /*
202 * Forward decl.
203 */
204
205 static uint64_t rtc_set_cyc_per_sec(uint64_t cycles);
206 uint64_t rtc_nanotime_read(void);
207
208 /*
209 * create_mul_quant_GHZ
210 * create a constant used to multiply the TSC by to convert to nanoseconds.
211 * This is a 32 bit number and the TSC *MUST* have a frequency higher than
212 * 1000Mhz for this routine to work.
213 *
214 * The theory here is that we know how many TSCs-per-sec the processor runs at.
215 * Normally to convert this to nanoseconds you would multiply the current
216 * timestamp by 1000000000 (a billion) then divide by TSCs-per-sec.
217 * Unfortunatly the TSC is 64 bits which would leave us with 96 bit intermediate
218 * results from the multiply that must be divided by.
219 * Usually thats
220 * uint96 = tsc * numer
221 * nanos = uint96 / denom
222 * Instead, we create this quant constant and it becomes the numerator,
223 * the denominator can then be 0x100000000 which makes our division as simple as
224 * forgetting the lower 32 bits of the result. We can also pass this number to
225 * user space as the numer and pass 0xFFFFFFFF (RTC_FAST_DENOM) as the denom to
226 * convert raw counts * to nanos. The difference is so small as to be
227 * undetectable by anything.
228 *
229 * Unfortunatly we can not do this for sub GHZ processors. In this case, all
230 * we do is pass the CPU speed in raw as the denom and we pass in 1000000000
231 * as the numerator. No short cuts allowed
232 */
233 #define RTC_FAST_DENOM 0xFFFFFFFF
234 inline static uint32_t
235 create_mul_quant_GHZ(int shift, uint32_t quant)
236 {
237 return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant);
238 }
239 /*
240 * This routine takes a value of raw TSC ticks and applies the passed mul_quant
241 * generated by create_mul_quant() This is our internal routine for creating
242 * nanoseconds.
243 * Since we don't really have uint96_t this routine basically does this....
244 * uint96_t intermediate = (*value) * scale
245 * return (intermediate >> 32)
246 */
247 inline static uint64_t
248 fast_get_nano_from_abs(uint64_t value, int scale)
249 {
250 asm (" movl %%edx,%%esi \n\t"
251 " mull %%ecx \n\t"
252 " movl %%edx,%%edi \n\t"
253 " movl %%esi,%%eax \n\t"
254 " mull %%ecx \n\t"
255 " xorl %%ecx,%%ecx \n\t"
256 " addl %%edi,%%eax \n\t"
257 " adcl %%ecx,%%edx "
258 : "+A" (value)
259 : "c" (scale)
260 : "%esi", "%edi");
261 return value;
262 }
263
264 /*
265 * This routine basically does this...
266 * ts.tv_sec = nanos / 1000000000; create seconds
267 * ts.tv_nsec = nanos % 1000000000; create remainder nanos
268 */
269 inline static mach_timespec_t
270 nanos_to_timespec(uint64_t nanos)
271 {
272 union {
273 mach_timespec_t ts;
274 uint64_t u64;
275 } ret;
276 ret.u64 = nanos;
277 asm volatile("divl %1" : "+A" (ret.u64) : "r" (NSEC_PER_SEC));
278 return ret.ts;
279 }
280
281 /*
282 * The following two routines perform the 96 bit arithmetic we need to
283 * convert generic absolute<->nanoseconds
284 * The multiply routine takes a uint64_t and a uint32_t and returns the result
285 * in a uint32_t[3] array.
286 * The divide routine takes this uint32_t[3] array and divides it by a uint32_t
287 * returning a uint64_t
288 */
289 inline static void
290 longmul(uint64_t *abstime, uint32_t multiplicand, uint32_t *result)
291 {
292 asm volatile(
293 " pushl %%ebx \n\t"
294 " movl %%eax,%%ebx \n\t"
295 " movl (%%eax),%%eax \n\t"
296 " mull %%ecx \n\t"
297 " xchg %%eax,%%ebx \n\t"
298 " pushl %%edx \n\t"
299 " movl 4(%%eax),%%eax \n\t"
300 " mull %%ecx \n\t"
301 " movl %2,%%ecx \n\t"
302 " movl %%ebx,(%%ecx) \n\t"
303 " popl %%ebx \n\t"
304 " addl %%ebx,%%eax \n\t"
305 " popl %%ebx \n\t"
306 " movl %%eax,4(%%ecx) \n\t"
307 " adcl $0,%%edx \n\t"
308 " movl %%edx,8(%%ecx) // and save it"
309 : : "a"(abstime), "c"(multiplicand), "m"(result));
310
311 }
312
313 inline static uint64_t
314 longdiv(uint32_t *numer, uint32_t denom)
315 {
316 uint64_t result;
317 asm volatile(
318 " pushl %%ebx \n\t"
319 " movl %%eax,%%ebx \n\t"
320 " movl 8(%%eax),%%edx \n\t"
321 " movl 4(%%eax),%%eax \n\t"
322 " divl %%ecx \n\t"
323 " xchg %%ebx,%%eax \n\t"
324 " movl (%%eax),%%eax \n\t"
325 " divl %%ecx \n\t"
326 " xchg %%ebx,%%edx \n\t"
327 " popl %%ebx \n\t"
328 : "=A"(result) : "a"(numer),"c"(denom));
329 return result;
330 }
331
332 /*
333 * Enable or disable timer 2.
334 * Port 0x61 controls timer 2:
335 * bit 0 gates the clock,
336 * bit 1 gates output to speaker.
337 */
338 inline static void
339 enable_PIT2(void)
340 {
341 asm volatile(
342 " inb $0x61,%%al \n\t"
343 " and $0xFC,%%al \n\t"
344 " or $1,%%al \n\t"
345 " outb %%al,$0x61 \n\t"
346 : : : "%al" );
347 }
348
349 inline static void
350 disable_PIT2(void)
351 {
352 asm volatile(
353 " inb $0x61,%%al \n\t"
354 " and $0xFC,%%al \n\t"
355 " outb %%al,$0x61 \n\t"
356 : : : "%al" );
357 }
358
359 inline static void
360 set_PIT2(int value)
361 {
362 /*
363 * First, tell the clock we are going to write 16 bits to the counter
364 * and enable one-shot mode (command 0xB8 to port 0x43)
365 * Then write the two bytes into the PIT2 clock register (port 0x42).
366 * Loop until the value is "realized" in the clock,
367 * this happens on the next tick.
368 */
369 asm volatile(
370 " movb $0xB8,%%al \n\t"
371 " outb %%al,$0x43 \n\t"
372 " movb %%dl,%%al \n\t"
373 " outb %%al,$0x42 \n\t"
374 " movb %%dh,%%al \n\t"
375 " outb %%al,$0x42 \n"
376 "1: inb $0x42,%%al \n\t"
377 " inb $0x42,%%al \n\t"
378 " cmp %%al,%%dh \n\t"
379 " jne 1b"
380 : : "d"(value) : "%al");
381 }
382
383 inline static uint64_t
384 get_PIT2(unsigned int *value)
385 {
386 register uint64_t result;
387 /*
388 * This routine first latches the time (command 0x80 to port 0x43),
389 * then gets the time stamp so we know how long the read will take later.
390 * Read (from port 0x42) and return the current value of the timer.
391 */
392 asm volatile(
393 " xorl %%ecx,%%ecx \n\t"
394 " movb $0x80,%%al \n\t"
395 " outb %%al,$0x43 \n\t"
396 " rdtsc \n\t"
397 " pushl %%eax \n\t"
398 " inb $0x42,%%al \n\t"
399 " movb %%al,%%cl \n\t"
400 " inb $0x42,%%al \n\t"
401 " movb %%al,%%ch \n\t"
402 " popl %%eax "
403 : "=A"(result), "=c"(*value));
404 return result;
405 }
406
407 /*
408 * timeRDTSC()
409 * This routine sets up PIT counter 2 to count down 1/20 of a second.
410 * It pauses until the value is latched in the counter
411 * and then reads the time stamp counter to return to the caller.
412 */
413 static uint64_t
414 timeRDTSC(void)
415 {
416 int attempts = 0;
417 uint64_t latchTime;
418 uint64_t saveTime,intermediate;
419 unsigned int timerValue, lastValue;
420 boolean_t int_enabled;
421 /*
422 * Table of correction factors to account for
423 * - timer counter quantization errors, and
424 * - undercounts 0..5
425 */
426 #define SAMPLE_CLKS_EXACT (((double) CLKNUM) / 20.0)
427 #define SAMPLE_CLKS_INT ((int) CLKNUM / 20)
428 #define SAMPLE_NSECS (2000000000LL)
429 #define SAMPLE_MULTIPLIER (((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
430 #define ROUND64(x) ((uint64_t)((x) + 0.5))
431 uint64_t scale[6] = {
432 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
433 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
434 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
435 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
436 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
437 ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
438 };
439
440 int_enabled = ml_set_interrupts_enabled(FALSE);
441
442 restart:
443 if (attempts >= 2)
444 panic("timeRDTSC() calibation failed with %d attempts\n", attempts);
445 attempts++;
446 enable_PIT2(); // turn on PIT2
447 set_PIT2(0); // reset timer 2 to be zero
448 latchTime = rdtsc64(); // get the time stamp to time
449 latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
450 set_PIT2(SAMPLE_CLKS_INT); // set up the timer for (almost) 1/20th a second
451 saveTime = rdtsc64(); // now time how long a 20th a second is...
452 get_PIT2(&lastValue);
453 get_PIT2(&lastValue); // read twice, first value may be unreliable
454 do {
455 intermediate = get_PIT2(&timerValue);
456 if (timerValue > lastValue) {
457 printf("Hey we are going backwards! %u -> %u, restarting timing\n",
458 timerValue,lastValue);
459 set_PIT2(0);
460 disable_PIT2();
461 goto restart;
462 }
463 lastValue = timerValue;
464 } while (timerValue > 5);
465 kprintf("timerValue %d\n",timerValue);
466 kprintf("intermediate 0x%016llx\n",intermediate);
467 kprintf("saveTime 0x%016llx\n",saveTime);
468
469 intermediate -= saveTime; // raw count for about 1/20 second
470 intermediate *= scale[timerValue]; // rescale measured time spent
471 intermediate /= SAMPLE_NSECS; // so its exactly 1/20 a second
472 intermediate += latchTime; // add on our save fudge
473
474 set_PIT2(0); // reset timer 2 to be zero
475 disable_PIT2(); // turn off PIT 2
476
477 ml_set_interrupts_enabled(int_enabled);
478 return intermediate;
479 }
480
481 static uint64_t
482 tsc_to_nanoseconds(uint64_t abstime)
483 {
484 uint32_t numer;
485 uint32_t denom;
486 uint32_t intermediate[3];
487
488 numer = rtclock.timebase_const.numer;
489 denom = rtclock.timebase_const.denom;
490 if (denom == RTC_FAST_DENOM) {
491 abstime = fast_get_nano_from_abs(abstime, numer);
492 } else {
493 longmul(&abstime, numer, intermediate);
494 abstime = longdiv(intermediate, denom);
495 }
496 return abstime;
497 }
498
499 inline static mach_timespec_t
500 tsc_to_timespec(void)
501 {
502 uint64_t currNanos;
503 currNanos = rtc_nanotime_read();
504 return nanos_to_timespec(currNanos);
505 }
506
507 #define DECREMENTER_MAX UINT_MAX
508 static uint32_t
509 deadline_to_decrementer(
510 uint64_t deadline,
511 uint64_t now)
512 {
513 uint64_t delta;
514
515 if (deadline <= now)
516 return rtc_decrementer_min;
517 else {
518 delta = deadline - now;
519 return MIN(MAX(rtc_decrementer_min,delta),DECREMENTER_MAX);
520 }
521 }
522
523 static inline uint64_t
524 lapic_time_countdown(uint32_t initial_count)
525 {
526 boolean_t state;
527 uint64_t start_time;
528 uint64_t stop_time;
529 lapic_timer_count_t count;
530
531 state = ml_set_interrupts_enabled(FALSE);
532 lapic_set_timer(FALSE, one_shot, divide_by_1, initial_count);
533 start_time = rdtsc64();
534 do {
535 lapic_get_timer(NULL, NULL, NULL, &count);
536 } while (count > 0);
537 stop_time = rdtsc64();
538 ml_set_interrupts_enabled(state);
539
540 return tsc_to_nanoseconds(stop_time - start_time);
541 }
542
543 static void
544 rtc_lapic_timer_calibrate(void)
545 {
546 uint32_t nsecs;
547 uint64_t countdown;
548
549 if (!(cpuid_features() & CPUID_FEATURE_APIC))
550 return;
551
552 /*
553 * Set the local apic timer counting down to zero without an interrupt.
554 * Use the timestamp to calculate how long this takes.
555 */
556 nsecs = (uint32_t) lapic_time_countdown(rtc_intr_nsec);
557
558 /*
559 * Compute a countdown ratio for a given time in nanoseconds.
560 * That is, countdown = time * numer / denom.
561 */
562 countdown = (uint64_t)rtc_intr_nsec * (uint64_t)rtc_intr_nsec / nsecs;
563
564 nsecs = (uint32_t) lapic_time_countdown((uint32_t) countdown);
565
566 rtc_lapic_scale.numer = countdown;
567 rtc_lapic_scale.denom = nsecs;
568
569 kprintf("rtc_lapic_timer_calibrate() scale: %d/%d\n",
570 (uint32_t) countdown, nsecs);
571 }
572
573 static void
574 rtc_lapic_set_timer(
575 uint32_t interval)
576 {
577 uint64_t count;
578
579 assert(rtc_lapic_scale.denom);
580
581 count = interval * (uint64_t) rtc_lapic_scale.numer;
582 count /= rtc_lapic_scale.denom;
583
584 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
585 }
586
587 static void
588 rtc_lapic_start_ticking(void)
589 {
590 uint64_t abstime;
591 uint64_t first_tick;
592 uint64_t decr;
593
594 abstime = mach_absolute_time();
595 first_tick = abstime + NSEC_PER_HZ;
596 current_cpu_datap()->cpu_rtc_tick_deadline = first_tick;
597 decr = deadline_to_decrementer(first_tick, abstime);
598 rtc_lapic_set_timer(decr);
599 }
600
601 /*
602 * Configure the real-time clock device. Return success (1)
603 * or failure (0).
604 */
605
606 int
607 sysclk_config(void)
608 {
609
610 mp_disable_preemption();
611 if (cpu_number() != master_cpu) {
612 mp_enable_preemption();
613 return(1);
614 }
615 mp_enable_preemption();
616
617 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
618
619 simple_lock_init(&rtclock.lock, 0);
620
621 return (1);
622 }
623
624
625 /*
626 * Nanotime/mach_absolutime_time
627 * -----------------------------
628 * The timestamp counter (tsc) - which counts cpu clock cycles and can be read
629 * efficient by the kernel and in userspace - is the reference for all timing.
630 * However, the cpu clock rate is not only platform-dependent but can change
631 * (speed-step) dynamically. Hence tsc is converted into nanoseconds which is
632 * identical to mach_absolute_time. The conversion to tsc to nanoseconds is
633 * encapsulated by nanotime.
634 *
635 * The kernel maintains nanotime information recording:
636 * - the current ratio of tsc to nanoseconds
637 * with this ratio expressed as a 32-bit scale and shift
638 * (power of 2 divider);
639 * - the tsc (step_tsc) and nanotime (step_ns) at which the current
640 * ratio (clock speed) began.
641 * So a tsc value can be converted to nanotime by:
642 *
643 * nanotime = (((tsc - step_tsc)*scale) >> shift) + step_ns
644 *
645 * In general, (tsc - step_tsc) is a 64-bit quantity with the scaling
646 * involving a 96-bit intermediate value. However, by saving the converted
647 * values at each tick (or at any intervening speed-step) - base_tsc and
648 * base_ns - we can perform conversions relative to these and be assured that
649 * (tsc - tick_tsc) is 32-bits. Hence:
650 *
651 * fast_nanotime = (((tsc - base_tsc)*scale) >> shift) + base_ns
652 *
653 * The tuple {base_tsc, base_ns, scale, shift} is exported in the commpage
654 * for the userspace nanotime routine to read. A duplicate check_tsc is
655 * appended so that the consistency of the read can be verified. Note that
656 * this scheme is essential for MP systems in which the commpage is updated
657 * by the master cpu but may be read concurrently by other cpus.
658 *
659 */
660 static inline void
661 rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
662 {
663 commpage_nanotime_t cp_nanotime;
664
665 /* Only the master cpu updates the commpage */
666 if (cpu_number() != master_cpu)
667 return;
668
669 cp_nanotime.nt_base_tsc = rntp->rnt_tsc;
670 cp_nanotime.nt_base_ns = rntp->rnt_nanos;
671 cp_nanotime.nt_scale = rntp->rnt_scale;
672 cp_nanotime.nt_shift = rntp->rnt_shift;
673
674 commpage_set_nanotime(&cp_nanotime);
675 }
676
677 static void
678 rtc_nanotime_init(void)
679 {
680 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
681 rtc_nanotime_t *master_rntp = &cpu_datap(master_cpu)->cpu_rtc_nanotime;
682
683 if (cpu_number() == master_cpu) {
684 rntp->rnt_tsc = rdtsc64();
685 rntp->rnt_nanos = tsc_to_nanoseconds(rntp->rnt_tsc);
686 rntp->rnt_scale = rtc_quant_scale;
687 rntp->rnt_shift = rtc_quant_shift;
688 rntp->rnt_step_tsc = 0ULL;
689 rntp->rnt_step_nanos = 0ULL;
690 } else {
691 /*
692 * Copy master processor's nanotime info.
693 * Loop required in case this changes while copying.
694 */
695 do {
696 *rntp = *master_rntp;
697 } while (rntp->rnt_tsc != master_rntp->rnt_tsc);
698 }
699 }
700
701 static inline void
702 _rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t tsc)
703 {
704 uint64_t tsc_delta;
705 uint64_t ns_delta;
706
707 tsc_delta = tsc - rntp->rnt_step_tsc;
708 ns_delta = tsc_to_nanoseconds(tsc_delta);
709 rntp->rnt_nanos = rntp->rnt_step_nanos + ns_delta;
710 rntp->rnt_tsc = tsc;
711 }
712
713 static void
714 rtc_nanotime_update(void)
715 {
716 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
717
718 assert(get_preemption_level() > 0);
719 assert(!ml_get_interrupts_enabled());
720
721 _rtc_nanotime_update(rntp, rdtsc64());
722 rtc_nanotime_set_commpage(rntp);
723 }
724
725 static void
726 rtc_nanotime_scale_update(void)
727 {
728 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
729 uint64_t tsc = rdtsc64();
730
731 assert(!ml_get_interrupts_enabled());
732
733 /*
734 * Update time based on past scale.
735 */
736 _rtc_nanotime_update(rntp, tsc);
737
738 /*
739 * Update scale and timestamp this update.
740 */
741 rntp->rnt_scale = rtc_quant_scale;
742 rntp->rnt_shift = rtc_quant_shift;
743 rntp->rnt_step_tsc = rntp->rnt_tsc;
744 rntp->rnt_step_nanos = rntp->rnt_nanos;
745
746 /* Export update to userland */
747 rtc_nanotime_set_commpage(rntp);
748 }
749
750 static uint64_t
751 _rtc_nanotime_read(void)
752 {
753 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
754 uint64_t rnt_tsc;
755 uint32_t rnt_scale;
756 uint32_t rnt_shift;
757 uint64_t rnt_nanos;
758 uint64_t tsc;
759 uint64_t tsc_delta;
760
761 rnt_scale = rntp->rnt_scale;
762 if (rnt_scale == 0)
763 return 0ULL;
764
765 rnt_shift = rntp->rnt_shift;
766 rnt_nanos = rntp->rnt_nanos;
767 rnt_tsc = rntp->rnt_tsc;
768 tsc = rdtsc64();
769
770 tsc_delta = tsc - rnt_tsc;
771 if ((tsc_delta >> 32) != 0)
772 return rnt_nanos + tsc_to_nanoseconds(tsc_delta);
773
774 /* Let the compiler optimize(?): */
775 if (rnt_shift == 32)
776 return rnt_nanos + ((tsc_delta * rnt_scale) >> 32);
777 else
778 return rnt_nanos + ((tsc_delta * rnt_scale) >> rnt_shift);
779 }
780
781 uint64_t
782 rtc_nanotime_read(void)
783 {
784 uint64_t result;
785 uint64_t rnt_tsc;
786 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
787
788 /*
789 * Use timestamp to ensure the uptime record isn't changed.
790 * This avoids disabling interrupts.
791 * And not this is a per-cpu structure hence no locking.
792 */
793 do {
794 rnt_tsc = rntp->rnt_tsc;
795 result = _rtc_nanotime_read();
796 } while (rnt_tsc != rntp->rnt_tsc);
797
798 return result;
799 }
800
801
802 /*
803 * This function is called by the speed-step driver when a
804 * change of cpu clock frequency is about to occur.
805 * The scale is not changed until rtc_clock_stepped() is called.
806 * Between these times there is an uncertainty is exactly when
807 * the change takes effect. FIXME: by using another timing source
808 * we could eliminate this error.
809 */
810 void
811 rtc_clock_stepping(__unused uint32_t new_frequency,
812 __unused uint32_t old_frequency)
813 {
814 boolean_t istate;
815
816 istate = ml_set_interrupts_enabled(FALSE);
817 rtc_nanotime_scale_update();
818 ml_set_interrupts_enabled(istate);
819 }
820
821 /*
822 * This function is called by the speed-step driver when a
823 * change of cpu clock frequency has just occured. This change
824 * is expressed as a ratio relative to the boot clock rate.
825 */
826 void
827 rtc_clock_stepped(uint32_t new_frequency, uint32_t old_frequency)
828 {
829 boolean_t istate;
830
831 istate = ml_set_interrupts_enabled(FALSE);
832 if (rtc_boot_frequency == 0) {
833 /*
834 * At the first ever stepping, old frequency is the real
835 * initial clock rate. This step and all others are based
836 * relative to this initial frequency at which the tsc
837 * calibration was made. Hence we must remember this base
838 * frequency as reference.
839 */
840 rtc_boot_frequency = old_frequency;
841 }
842 rtc_set_cyc_per_sec(rtc_cycle_count * new_frequency /
843 rtc_boot_frequency);
844 rtc_nanotime_scale_update();
845 ml_set_interrupts_enabled(istate);
846 }
847
848 /*
849 * rtc_sleep_wakeup() is called from acpi on awakening from a S3 sleep
850 */
851 void
852 rtc_sleep_wakeup(void)
853 {
854 rtc_nanotime_t *rntp = &current_cpu_datap()->cpu_rtc_nanotime;
855
856 boolean_t istate;
857
858 istate = ml_set_interrupts_enabled(FALSE);
859
860 /*
861 * Reset nanotime.
862 * The timestamp counter will have been reset
863 * but nanotime (uptime) marches onward.
864 * We assume that we're still at the former cpu frequency.
865 */
866 rntp->rnt_tsc = rdtsc64();
867 rntp->rnt_step_tsc = 0ULL;
868 rntp->rnt_step_nanos = rntp->rnt_nanos;
869 rtc_nanotime_set_commpage(rntp);
870
871 /* Restart tick interrupts from the LAPIC timer */
872 rtc_lapic_start_ticking();
873
874 ml_set_interrupts_enabled(istate);
875 }
876
877 /*
878 * Initialize the real-time clock device.
879 * In addition, various variables used to support the clock are initialized.
880 */
881 int
882 sysclk_init(void)
883 {
884 uint64_t cycles;
885
886 mp_disable_preemption();
887 if (cpu_number() == master_cpu) {
888 /*
889 * Perform calibration.
890 * The PIT is used as the reference to compute how many
891 * TCS counts (cpu clock cycles) occur per second.
892 */
893 rtc_cycle_count = timeRDTSC();
894 cycles = rtc_set_cyc_per_sec(rtc_cycle_count);
895
896 /*
897 * Set min/max to actual.
898 * ACPI may update these later if speed-stepping is detected.
899 */
900 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
901 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
902 printf("[RTCLOCK] frequency %llu (%llu)\n",
903 cycles, rtc_cyc_per_sec);
904
905 rtc_lapic_timer_calibrate();
906
907 /* Minimum interval is 1usec */
908 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC,
909 0ULL);
910 /* Point LAPIC interrupts to hardclock() */
911 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
912
913 clock_timebase_init();
914 rtc_initialized = TRUE;
915 }
916
917 rtc_nanotime_init();
918
919 rtc_lapic_start_ticking();
920
921 mp_enable_preemption();
922
923 return (1);
924 }
925
926 /*
927 * Get the clock device time. This routine is responsible
928 * for converting the device's machine dependent time value
929 * into a canonical mach_timespec_t value.
930 */
931 static kern_return_t
932 sysclk_gettime_internal(
933 mach_timespec_t *cur_time) /* OUT */
934 {
935 *cur_time = tsc_to_timespec();
936 return (KERN_SUCCESS);
937 }
938
939 kern_return_t
940 sysclk_gettime(
941 mach_timespec_t *cur_time) /* OUT */
942 {
943 return sysclk_gettime_internal(cur_time);
944 }
945
946 void
947 sysclk_gettime_interrupts_disabled(
948 mach_timespec_t *cur_time) /* OUT */
949 {
950 (void) sysclk_gettime_internal(cur_time);
951 }
952
953 // utility routine
954 // Code to calculate how many processor cycles are in a second...
955
956 static uint64_t
957 rtc_set_cyc_per_sec(uint64_t cycles)
958 {
959
960 if (cycles > (NSEC_PER_SEC/20)) {
961 // we can use just a "fast" multiply to get nanos
962 rtc_quant_shift = 32;
963 rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles);
964 rtclock.timebase_const.numer = rtc_quant_scale; // timeRDTSC is 1/20
965 rtclock.timebase_const.denom = RTC_FAST_DENOM;
966 } else {
967 rtc_quant_shift = 26;
968 rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles);
969 rtclock.timebase_const.numer = NSEC_PER_SEC/20; // timeRDTSC is 1/20
970 rtclock.timebase_const.denom = cycles;
971 }
972 rtc_cyc_per_sec = cycles*20; // multiply it by 20 and we are done..
973 // BUT we also want to calculate...
974
975 cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
976 / UI_CPUFREQ_ROUNDING_FACTOR)
977 * UI_CPUFREQ_ROUNDING_FACTOR;
978
979 /*
980 * Set current measured speed.
981 */
982 if (cycles >= 0x100000000ULL) {
983 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
984 } else {
985 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
986 }
987 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
988
989 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, rtc_cyc_per_sec);
990 return(cycles);
991 }
992
993 void
994 clock_get_system_microtime(
995 uint32_t *secs,
996 uint32_t *microsecs)
997 {
998 mach_timespec_t now;
999
1000 (void) sysclk_gettime_internal(&now);
1001
1002 *secs = now.tv_sec;
1003 *microsecs = now.tv_nsec / NSEC_PER_USEC;
1004 }
1005
1006 void
1007 clock_get_system_nanotime(
1008 uint32_t *secs,
1009 uint32_t *nanosecs)
1010 {
1011 mach_timespec_t now;
1012
1013 (void) sysclk_gettime_internal(&now);
1014
1015 *secs = now.tv_sec;
1016 *nanosecs = now.tv_nsec;
1017 }
1018
1019 /*
1020 * Get clock device attributes.
1021 */
1022 kern_return_t
1023 sysclk_getattr(
1024 clock_flavor_t flavor,
1025 clock_attr_t attr, /* OUT */
1026 mach_msg_type_number_t *count) /* IN/OUT */
1027 {
1028 if (*count != 1)
1029 return (KERN_FAILURE);
1030 switch (flavor) {
1031
1032 case CLOCK_GET_TIME_RES: /* >0 res */
1033 *(clock_res_t *) attr = rtc_intr_nsec;
1034 break;
1035
1036 case CLOCK_ALARM_CURRES: /* =0 no alarm */
1037 case CLOCK_ALARM_MAXRES:
1038 case CLOCK_ALARM_MINRES:
1039 *(clock_res_t *) attr = 0;
1040 break;
1041
1042 default:
1043 return (KERN_INVALID_VALUE);
1044 }
1045 return (KERN_SUCCESS);
1046 }
1047
1048 /*
1049 * Set next alarm time for the clock device. This call
1050 * always resets the time to deliver an alarm for the
1051 * clock.
1052 */
1053 void
1054 sysclk_setalarm(
1055 mach_timespec_t *alarm_time)
1056 {
1057 timer_call_enter(&rtclock_alarm_timer,
1058 (uint64_t) alarm_time->tv_sec * NSEC_PER_SEC
1059 + alarm_time->tv_nsec);
1060 }
1061
1062 /*
1063 * Configure the calendar clock.
1064 */
1065 int
1066 calend_config(void)
1067 {
1068 return bbc_config();
1069 }
1070
1071 /*
1072 * Initialize calendar clock.
1073 */
1074 int
1075 calend_init(void)
1076 {
1077 return (1);
1078 }
1079
1080 /*
1081 * Get the current clock time.
1082 */
1083 kern_return_t
1084 calend_gettime(
1085 mach_timespec_t *cur_time) /* OUT */
1086 {
1087 spl_t s;
1088
1089 RTC_LOCK(s);
1090 if (!rtclock.calend_is_set) {
1091 RTC_UNLOCK(s);
1092 return (KERN_FAILURE);
1093 }
1094
1095 (void) sysclk_gettime_internal(cur_time);
1096 ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset);
1097 RTC_UNLOCK(s);
1098
1099 return (KERN_SUCCESS);
1100 }
1101
1102 void
1103 clock_get_calendar_microtime(
1104 uint32_t *secs,
1105 uint32_t *microsecs)
1106 {
1107 mach_timespec_t now;
1108
1109 calend_gettime(&now);
1110
1111 *secs = now.tv_sec;
1112 *microsecs = now.tv_nsec / NSEC_PER_USEC;
1113 }
1114
1115 void
1116 clock_get_calendar_nanotime(
1117 uint32_t *secs,
1118 uint32_t *nanosecs)
1119 {
1120 mach_timespec_t now;
1121
1122 calend_gettime(&now);
1123
1124 *secs = now.tv_sec;
1125 *nanosecs = now.tv_nsec;
1126 }
1127
1128 void
1129 clock_set_calendar_microtime(
1130 uint32_t secs,
1131 uint32_t microsecs)
1132 {
1133 mach_timespec_t new_time, curr_time;
1134 uint32_t old_offset;
1135 spl_t s;
1136
1137 new_time.tv_sec = secs;
1138 new_time.tv_nsec = microsecs * NSEC_PER_USEC;
1139
1140 RTC_LOCK(s);
1141 old_offset = rtclock.calend_offset.tv_sec;
1142 (void) sysclk_gettime_internal(&curr_time);
1143 rtclock.calend_offset = new_time;
1144 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
1145 rtclock.boottime += rtclock.calend_offset.tv_sec - old_offset;
1146 rtclock.calend_is_set = TRUE;
1147 RTC_UNLOCK(s);
1148
1149 (void) bbc_settime(&new_time);
1150
1151 host_notify_calendar_change();
1152 }
1153
1154 /*
1155 * Get clock device attributes.
1156 */
1157 kern_return_t
1158 calend_getattr(
1159 clock_flavor_t flavor,
1160 clock_attr_t attr, /* OUT */
1161 mach_msg_type_number_t *count) /* IN/OUT */
1162 {
1163 if (*count != 1)
1164 return (KERN_FAILURE);
1165 switch (flavor) {
1166
1167 case CLOCK_GET_TIME_RES: /* >0 res */
1168 *(clock_res_t *) attr = rtc_intr_nsec;
1169 break;
1170
1171 case CLOCK_ALARM_CURRES: /* =0 no alarm */
1172 case CLOCK_ALARM_MINRES:
1173 case CLOCK_ALARM_MAXRES:
1174 *(clock_res_t *) attr = 0;
1175 break;
1176
1177 default:
1178 return (KERN_INVALID_VALUE);
1179 }
1180 return (KERN_SUCCESS);
1181 }
1182
1183 #define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */
1184 #define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */
1185
1186 uint32_t
1187 clock_set_calendar_adjtime(
1188 int32_t *secs,
1189 int32_t *microsecs)
1190 {
1191 int64_t total, ototal;
1192 uint32_t interval = 0;
1193 spl_t s;
1194
1195 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
1196
1197 RTC_LOCK(s);
1198 ototal = rtclock.calend_adjtotal;
1199
1200 if (total != 0) {
1201 int32_t delta = tickadj;
1202
1203 if (total > 0) {
1204 if (total > bigadj)
1205 delta *= 10;
1206 if (delta > total)
1207 delta = total;
1208 }
1209 else {
1210 if (total < -bigadj)
1211 delta *= 10;
1212 delta = -delta;
1213 if (delta < total)
1214 delta = total;
1215 }
1216
1217 rtclock.calend_adjtotal = total;
1218 rtclock.calend_adjdelta = delta;
1219
1220 interval = NSEC_PER_HZ;
1221 }
1222 else
1223 rtclock.calend_adjdelta = rtclock.calend_adjtotal = 0;
1224
1225 RTC_UNLOCK(s);
1226
1227 if (ototal == 0)
1228 *secs = *microsecs = 0;
1229 else {
1230 *secs = ototal / NSEC_PER_SEC;
1231 *microsecs = ototal % NSEC_PER_SEC;
1232 }
1233
1234 return (interval);
1235 }
1236
1237 uint32_t
1238 clock_adjust_calendar(void)
1239 {
1240 uint32_t interval = 0;
1241 int32_t delta;
1242 spl_t s;
1243
1244 RTC_LOCK(s);
1245 delta = rtclock.calend_adjdelta;
1246 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, delta);
1247
1248 rtclock.calend_adjtotal -= delta;
1249
1250 if (delta > 0) {
1251 if (delta > rtclock.calend_adjtotal)
1252 rtclock.calend_adjdelta = rtclock.calend_adjtotal;
1253 }
1254 else
1255 if (delta < 0) {
1256 if (delta < rtclock.calend_adjtotal)
1257 rtclock.calend_adjdelta = rtclock.calend_adjtotal;
1258 }
1259
1260 if (rtclock.calend_adjdelta != 0)
1261 interval = NSEC_PER_HZ;
1262
1263 RTC_UNLOCK(s);
1264
1265 return (interval);
1266 }
1267
1268 void
1269 clock_initialize_calendar(void)
1270 {
1271 mach_timespec_t bbc_time, curr_time;
1272 spl_t s;
1273
1274 if (bbc_gettime(&bbc_time) != KERN_SUCCESS)
1275 return;
1276
1277 RTC_LOCK(s);
1278 if (rtclock.boottime == 0)
1279 rtclock.boottime = bbc_time.tv_sec;
1280 (void) sysclk_gettime_internal(&curr_time);
1281 rtclock.calend_offset = bbc_time;
1282 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
1283 rtclock.calend_is_set = TRUE;
1284 RTC_UNLOCK(s);
1285
1286 host_notify_calendar_change();
1287 }
1288
1289 void
1290 clock_get_boottime_nanotime(
1291 uint32_t *secs,
1292 uint32_t *nanosecs)
1293 {
1294 *secs = rtclock.boottime;
1295 *nanosecs = 0;
1296 }
1297
1298 void
1299 clock_timebase_info(
1300 mach_timebase_info_t info)
1301 {
1302 info->numer = info->denom = 1;
1303 }
1304
1305 void
1306 clock_set_timer_deadline(
1307 uint64_t deadline)
1308 {
1309 spl_t s;
1310 cpu_data_t *pp = current_cpu_datap();
1311 rtclock_timer_t *mytimer = &pp->cpu_rtc_timer;
1312 uint64_t abstime;
1313 uint64_t decr;
1314
1315 assert(get_preemption_level() > 0);
1316 assert(rtclock_timer_expire);
1317
1318 RTC_INTRS_OFF(s);
1319 mytimer->deadline = deadline;
1320 mytimer->is_set = TRUE;
1321 if (!mytimer->has_expired) {
1322 abstime = mach_absolute_time();
1323 if (mytimer->deadline < pp->cpu_rtc_tick_deadline) {
1324 decr = deadline_to_decrementer(mytimer->deadline,
1325 abstime);
1326 rtc_lapic_set_timer(decr);
1327 pp->cpu_rtc_intr_deadline = mytimer->deadline;
1328 KERNEL_DEBUG_CONSTANT(
1329 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) |
1330 DBG_FUNC_NONE, decr, 2, 0, 0, 0);
1331 }
1332 }
1333 RTC_INTRS_ON(s);
1334 }
1335
1336 void
1337 clock_set_timer_func(
1338 clock_timer_func_t func)
1339 {
1340 if (rtclock_timer_expire == NULL)
1341 rtclock_timer_expire = func;
1342 }
1343
1344 /*
1345 * Real-time clock device interrupt.
1346 */
1347 void
1348 rtclock_intr(struct i386_interrupt_state *regs)
1349 {
1350 uint64_t abstime;
1351 uint32_t latency;
1352 uint64_t decr;
1353 uint64_t decr_tick;
1354 uint64_t decr_timer;
1355 cpu_data_t *pp = current_cpu_datap();
1356 rtclock_timer_t *mytimer = &pp->cpu_rtc_timer;
1357
1358 assert(get_preemption_level() > 0);
1359 assert(!ml_get_interrupts_enabled());
1360
1361 abstime = _rtc_nanotime_read();
1362 latency = (uint32_t) abstime - pp->cpu_rtc_intr_deadline;
1363 if (pp->cpu_rtc_tick_deadline <= abstime) {
1364 rtc_nanotime_update();
1365 clock_deadline_for_periodic_event(
1366 NSEC_PER_HZ, abstime, &pp->cpu_rtc_tick_deadline);
1367 hertz_tick(
1368 #if STAT_TIME
1369 NSEC_PER_HZ,
1370 #endif
1371 (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0),
1372 regs->eip);
1373 }
1374
1375 abstime = _rtc_nanotime_read();
1376 if (mytimer->is_set && mytimer->deadline <= abstime) {
1377 mytimer->has_expired = TRUE;
1378 mytimer->is_set = FALSE;
1379 (*rtclock_timer_expire)(abstime);
1380 assert(!ml_get_interrupts_enabled());
1381 mytimer->has_expired = FALSE;
1382 }
1383
1384 /* Log the interrupt service latency (-ve value expected by tool) */
1385 KERNEL_DEBUG_CONSTANT(
1386 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
1387 -latency, (uint32_t)regs->eip, 0, 0, 0);
1388
1389 abstime = _rtc_nanotime_read();
1390 decr_tick = deadline_to_decrementer(pp->cpu_rtc_tick_deadline, abstime);
1391 decr_timer = (mytimer->is_set) ?
1392 deadline_to_decrementer(mytimer->deadline, abstime) :
1393 DECREMENTER_MAX;
1394 decr = MIN(decr_tick, decr_timer);
1395 pp->cpu_rtc_intr_deadline = abstime + decr;
1396
1397 rtc_lapic_set_timer(decr);
1398
1399 /* Log the new decrementer value */
1400 KERNEL_DEBUG_CONSTANT(
1401 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE,
1402 decr, 3, 0, 0, 0);
1403
1404 }
1405
1406 static void
1407 rtclock_alarm_expire(
1408 __unused timer_call_param_t p0,
1409 __unused timer_call_param_t p1)
1410 {
1411 mach_timespec_t clock_time;
1412
1413 (void) sysclk_gettime_internal(&clock_time);
1414
1415 clock_alarm_intr(SYSTEM_CLOCK, &clock_time);
1416 }
1417
1418 void
1419 clock_get_uptime(
1420 uint64_t *result)
1421 {
1422 *result = rtc_nanotime_read();
1423 }
1424
1425 uint64_t
1426 mach_absolute_time(void)
1427 {
1428 return rtc_nanotime_read();
1429 }
1430
1431 void
1432 absolutetime_to_microtime(
1433 uint64_t abstime,
1434 uint32_t *secs,
1435 uint32_t *microsecs)
1436 {
1437 uint32_t remain;
1438
1439 asm volatile(
1440 "divl %3"
1441 : "=a" (*secs), "=d" (remain)
1442 : "A" (abstime), "r" (NSEC_PER_SEC));
1443 asm volatile(
1444 "divl %3"
1445 : "=a" (*microsecs)
1446 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
1447 }
1448
1449 void
1450 clock_interval_to_deadline(
1451 uint32_t interval,
1452 uint32_t scale_factor,
1453 uint64_t *result)
1454 {
1455 uint64_t abstime;
1456
1457 clock_get_uptime(result);
1458
1459 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1460
1461 *result += abstime;
1462 }
1463
1464 void
1465 clock_interval_to_absolutetime_interval(
1466 uint32_t interval,
1467 uint32_t scale_factor,
1468 uint64_t *result)
1469 {
1470 *result = (uint64_t)interval * scale_factor;
1471 }
1472
1473 void
1474 clock_absolutetime_interval_to_deadline(
1475 uint64_t abstime,
1476 uint64_t *result)
1477 {
1478 clock_get_uptime(result);
1479
1480 *result += abstime;
1481 }
1482
1483 void
1484 absolutetime_to_nanoseconds(
1485 uint64_t abstime,
1486 uint64_t *result)
1487 {
1488 *result = abstime;
1489 }
1490
1491 void
1492 nanoseconds_to_absolutetime(
1493 uint64_t nanoseconds,
1494 uint64_t *result)
1495 {
1496 *result = nanoseconds;
1497 }
1498
1499 void
1500 machine_delay_until(
1501 uint64_t deadline)
1502 {
1503 uint64_t now;
1504
1505 do {
1506 cpu_pause();
1507 now = mach_absolute_time();
1508 } while (now < deadline);
1509 }