]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/rtclock.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32/*
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
91447636
A
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
1c79356b
A
40 */
41
1c79356b 42#include <platforms.h>
1c79356b 43#include <mach_kdb.h>
55e303ae
A
44
45#include <mach/mach_types.h>
46
1c79356b 47#include <kern/cpu_data.h>
91447636 48#include <kern/cpu_number.h>
1c79356b 49#include <kern/clock.h>
55e303ae 50#include <kern/host_notify.h>
1c79356b
A
51#include <kern/macro_help.h>
52#include <kern/misc_protos.h>
53#include <kern/spl.h>
91447636 54#include <kern/assert.h>
1c79356b
A
55#include <mach/vm_prot.h>
56#include <vm/pmap.h>
57#include <vm/vm_kern.h> /* for kernel_map */
58#include <i386/ipl.h>
59#include <i386/pit.h>
89b3af67 60#include <architecture/i386/pio.h>
1c79356b 61#include <i386/misc_protos.h>
55e303ae
A
62#include <i386/proc_reg.h>
63#include <i386/machine_cpu.h>
91447636
A
64#include <i386/mp.h>
65#include <i386/cpuid.h>
66#include <i386/cpu_data.h>
67#include <i386/cpu_threads.h>
68#include <i386/perfmon.h>
69#include <i386/machine_routines.h>
55e303ae 70#include <pexpert/pexpert.h>
91447636
A
71#include <machine/limits.h>
72#include <machine/commpage.h>
73#include <sys/kdebug.h>
89b3af67
A
74#include <i386/tsc.h>
75#include <i386/hpet.h>
76#include <i386/rtclock.h>
91447636
A
77
78#define MAX(a,b) (((a)>(b))?(a):(b))
79#define MIN(a,b) (((a)>(b))?(b):(a))
55e303ae 80
91447636
A
81#define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
82
83#define UI_CPUFREQ_ROUNDING_FACTOR 10000000
1c79356b 84
89b3af67 85int rtclock_config(void);
8f6c56a5 86
89b3af67 87int rtclock_init(void);
8f6c56a5 88
89b3af67 89uint64_t rtc_decrementer_min;
8f6c56a5 90
89b3af67
A
91void rtclock_intr(x86_saved_state_t *regs);
92static uint64_t maxDec; /* longest interval our hardware timer can handle (nsec) */
8f6c56a5 93
89b3af67
A
94/* XXX this should really be in a header somewhere */
95extern clock_timer_func_t rtclock_timer_expire;
8f6c56a5 96
89b3af67
A
97static void rtc_set_timescale(uint64_t cycles);
98static uint64_t rtc_export_speed(uint64_t cycles);
8f6c56a5 99
89b3af67
A
100extern void rtc_nanotime_store(
101 uint64_t tsc,
102 uint64_t nsec,
103 uint32_t scale,
104 uint32_t shift,
105 rtc_nanotime_t *dst);
8f6c56a5 106
89b3af67
A
107extern void rtc_nanotime_load(
108 rtc_nanotime_t *src,
109 rtc_nanotime_t *dst);
8f6c56a5 110
89b3af67 111rtc_nanotime_t rtc_nanotime_info;
8f6c56a5
A
112
113/*
89b3af67 114 * tsc_to_nanoseconds:
91447636 115 *
89b3af67
A
116 * Basic routine to convert a raw 64 bit TSC value to a
117 * 64 bit nanosecond value. The conversion is implemented
118 * based on the scale factor and an implicit 32 bit shift.
91447636 119 */
89b3af67
A
120static inline uint64_t
121_tsc_to_nanoseconds(uint64_t value)
8f6c56a5 122{
89b3af67
A
123 asm volatile("movl %%edx,%%esi ;"
124 "mull %%ecx ;"
125 "movl %%edx,%%edi ;"
126 "movl %%esi,%%eax ;"
127 "mull %%ecx ;"
128 "addl %%edi,%%eax ;"
129 "adcl $0,%%edx "
130 : "+A" (value) : "c" (rtc_nanotime_info.scale) : "esi", "edi");
8f6c56a5 131
89b3af67 132 return (value);
8f6c56a5
A
133}
134
89b3af67
A
135uint64_t
136tsc_to_nanoseconds(uint64_t value)
8f6c56a5 137{
89b3af67 138 return _tsc_to_nanoseconds(value);
8f6c56a5
A
139}
140
91447636
A
141static uint32_t
142deadline_to_decrementer(
143 uint64_t deadline,
144 uint64_t now)
145{
146 uint64_t delta;
147
148 if (deadline <= now)
149 return rtc_decrementer_min;
150 else {
151 delta = deadline - now;
89b3af67 152 return MIN(MAX(rtc_decrementer_min,delta),maxDec);
91447636
A
153 }
154}
155
8f6c56a5
A
156static void
157rtc_lapic_start_ticking(void)
158{
159 uint64_t abstime;
160 uint64_t first_tick;
89b3af67 161 cpu_data_t *cdp = current_cpu_datap();
8f6c56a5
A
162
163 abstime = mach_absolute_time();
89b3af67
A
164 rtclock_tick_interval = NSEC_PER_HZ;
165
166 first_tick = abstime + rtclock_tick_interval;
167 cdp->rtclock_intr_deadline = first_tick;
168
169 /*
170 * Force a complete re-evaluation of timer deadlines.
171 */
172 cdp->rtcPop = EndOfAllTime;
173 etimer_resync_deadlines();
1c79356b
A
174}
175
176/*
177 * Configure the real-time clock device. Return success (1)
178 * or failure (0).
179 */
180
181int
89b3af67 182rtclock_config(void)
1c79356b 183{
89b3af67 184 /* nothing to do */
91447636
A
185 return (1);
186}
187
188
189/*
190 * Nanotime/mach_absolutime_time
191 * -----------------------------
89b3af67
A
192 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
193 * efficiently by the kernel and in userspace - is the reference for all timing.
194 * The cpu clock rate is platform-dependent and may stop or be reset when the
195 * processor is napped/slept. As a result, nanotime is the software abstraction
196 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
91447636
A
197 *
198 * The kernel maintains nanotime information recording:
89b3af67 199 * - the ratio of tsc to nanoseconds
91447636
A
200 * with this ratio expressed as a 32-bit scale and shift
201 * (power of 2 divider);
89b3af67 202 * - { tsc_base, ns_base } pair of corresponding timestamps.
8f6c56a5 203 *
89b3af67
A
204 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
205 * for the userspace nanotime routine to read.
8f6c56a5 206 *
89b3af67
A
207 * All of the routines which update the nanotime data are non-reentrant. This must
208 * be guaranteed by the caller.
91447636
A
209 */
210static inline void
211rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
212{
89b3af67
A
213 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
214}
8f6c56a5 215
89b3af67
A
216/*
217 * rtc_nanotime_init:
218 *
219 * Intialize the nanotime info from the base time. Since
220 * the base value might be from a lower resolution clock,
221 * we compare it to the TSC derived value, and use the
222 * greater of the two values.
223 */
224static inline void
225_rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base)
226{
227 uint64_t nsecs, tsc = rdtsc64();
91447636 228
89b3af67
A
229 nsecs = _tsc_to_nanoseconds(tsc);
230 rtc_nanotime_store(tsc, MAX(nsecs, base), rntp->scale, rntp->shift, rntp);
91447636
A
231}
232
233static void
89b3af67 234rtc_nanotime_init(uint64_t base)
91447636 235{
89b3af67 236 rtc_nanotime_t *rntp = &rtc_nanotime_info;
91447636 237
89b3af67
A
238 _rtc_nanotime_init(rntp, base);
239 rtc_nanotime_set_commpage(rntp);
91447636
A
240}
241
89b3af67
A
242/*
243 * rtc_nanotime_init:
244 *
245 * Call back from the commpage initialization to
246 * cause the commpage data to be filled in once the
247 * commpages have been created.
248 */
249void
250rtc_nanotime_init_commpage(void)
91447636 251{
89b3af67 252 spl_t s = splclock();
5d5c5d0d 253
89b3af67
A
254 rtc_nanotime_set_commpage(&rtc_nanotime_info);
255
256 splx(s);
91447636
A
257}
258
89b3af67
A
259/*
260 * rtc_nanotime_update:
261 *
262 * Update the nanotime info from the base time. Since
263 * the base value might be from a lower resolution clock,
264 * we compare it to the TSC derived value, and use the
265 * greater of the two values.
266 *
267 * N.B. In comparison to the above init routine, this assumes
268 * that the TSC has remained monotonic compared to the tsc_base
269 * value, which is not the case after S3 sleep.
270 */
271static inline void
272_rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t base)
91447636 273{
89b3af67 274 uint64_t nsecs, tsc = rdtsc64();
91447636 275
89b3af67
A
276 nsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
277 rtc_nanotime_store(tsc, MAX(nsecs, base), rntp->scale, rntp->shift, rntp);
91447636
A
278}
279
280static void
89b3af67
A
281rtc_nanotime_update(
282 uint64_t base)
91447636 283{
89b3af67 284 rtc_nanotime_t *rntp = &rtc_nanotime_info;
91447636
A
285
286 assert(!ml_get_interrupts_enabled());
287
89b3af67 288 _rtc_nanotime_update(rntp, base);
91447636
A
289 rtc_nanotime_set_commpage(rntp);
290}
291
89b3af67
A
292/*
293 * rtc_nanotime_read:
294 *
295 * Returns the current nanotime value, accessable from any
296 * context.
297 */
91447636 298static uint64_t
91447636
A
299rtc_nanotime_read(void)
300{
89b3af67
A
301 rtc_nanotime_t rnt, *rntp = &rtc_nanotime_info;
302 uint64_t result;
1c79356b 303
91447636 304 do {
89b3af67
A
305 rtc_nanotime_load(rntp, &rnt);
306 result = rnt.ns_base + _tsc_to_nanoseconds(rdtsc64() - rnt.tsc_base);
307 } while (rntp->tsc_base != rnt.tsc_base);
91447636 308
89b3af67 309 return (result);
91447636
A
310}
311
91447636 312/*
89b3af67
A
313 * rtc_clock_napped:
314 *
315 * Invoked from power manangement when we have awoken from a nap (C3/C4)
316 * during which the TSC lost counts. The nanotime data is updated according
317 * to the provided nanosecond base value.
318 *
319 * The caller must guarantee non-reentrancy.
91447636 320 */
89b3af67
A
321void
322rtc_clock_napped(
323 uint64_t base)
324{
325 rtc_nanotime_update(base);
326}
327
91447636
A
328void
329rtc_clock_stepping(__unused uint32_t new_frequency,
330 __unused uint32_t old_frequency)
331{
89b3af67 332 panic("rtc_clock_stepping unsupported");
91447636
A
333}
334
91447636 335void
89b3af67
A
336rtc_clock_stepped(__unused uint32_t new_frequency,
337 __unused uint32_t old_frequency)
91447636 338{
89b3af67 339 panic("rtc_clock_stepping unsupported");
1c79356b
A
340}
341
342/*
89b3af67
A
343 * rtc_sleep_wakeup:
344 *
345 * Invoked from power manageent when we have awoken from a sleep (S3)
346 * and the TSC has been reset. The nanotime data is updated based on
347 * the HPET value.
348 *
349 * The caller must guarantee non-reentrancy.
91447636
A
350 */
351void
352rtc_sleep_wakeup(void)
353{
89b3af67 354 boolean_t istate;
91447636
A
355
356 istate = ml_set_interrupts_enabled(FALSE);
357
358 /*
359 * Reset nanotime.
360 * The timestamp counter will have been reset
361 * but nanotime (uptime) marches onward.
91447636 362 */
89b3af67 363 rtc_nanotime_init(tmrCvt(rdHPET(), hpetCvtt2n));
91447636
A
364
365 /* Restart tick interrupts from the LAPIC timer */
366 rtc_lapic_start_ticking();
367
368 ml_set_interrupts_enabled(istate);
369}
370
371/*
372 * Initialize the real-time clock device.
373 * In addition, various variables used to support the clock are initialized.
1c79356b
A
374 */
375int
89b3af67 376rtclock_init(void)
1c79356b 377{
91447636
A
378 uint64_t cycles;
379
89b3af67
A
380 assert(!ml_get_interrupts_enabled());
381
91447636 382 if (cpu_number() == master_cpu) {
89b3af67
A
383
384 assert(tscFreq);
385 rtc_set_timescale(tscFreq);
386
91447636 387 /*
89b3af67 388 * Adjust and set the exported cpu speed.
91447636 389 */
89b3af67 390 cycles = rtc_export_speed(tscFreq);
91447636
A
391
392 /*
393 * Set min/max to actual.
394 * ACPI may update these later if speed-stepping is detected.
395 */
89b3af67
A
396 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
397 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
91447636 398
89b3af67
A
399 /*
400 * Compute the longest interval we can represent.
401 */
402 maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n);
403 kprintf("maxDec: %lld\n", maxDec);
91447636
A
404
405 /* Minimum interval is 1usec */
89b3af67 406 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL);
91447636
A
407 /* Point LAPIC interrupts to hardclock() */
408 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
409
410 clock_timebase_init();
89b3af67 411 ml_init_lock_timeout();
1c79356b 412 }
91447636 413
91447636
A
414 rtc_lapic_start_ticking();
415
1c79356b
A
416 return (1);
417}
418
89b3af67
A
419// utility routine
420// Code to calculate how many processor cycles are in a second...
1c79356b 421
89b3af67
A
422static void
423rtc_set_timescale(uint64_t cycles)
1c79356b 424{
89b3af67
A
425 rtc_nanotime_info.scale = ((uint64_t)NSEC_PER_SEC << 32) / cycles;
426 rtc_nanotime_info.shift = 32;
1c79356b 427
89b3af67 428 rtc_nanotime_init(0);
1c79356b
A
429}
430
91447636 431static uint64_t
89b3af67 432rtc_export_speed(uint64_t cyc_per_sec)
9bccf70c 433{
89b3af67 434 uint64_t cycles;
1c79356b 435
89b3af67
A
436 /* Round: */
437 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
91447636
A
438 / UI_CPUFREQ_ROUNDING_FACTOR)
439 * UI_CPUFREQ_ROUNDING_FACTOR;
9bccf70c 440
91447636
A
441 /*
442 * Set current measured speed.
443 */
444 if (cycles >= 0x100000000ULL) {
445 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
55e303ae 446 } else {
91447636 447 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
9bccf70c 448 }
91447636 449 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
55e303ae 450
89b3af67 451 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
91447636 452 return(cycles);
9bccf70c 453}
1c79356b 454
55e303ae
A
455void
456clock_get_system_microtime(
457 uint32_t *secs,
458 uint32_t *microsecs)
9bccf70c 459{
89b3af67
A
460 uint64_t now = rtc_nanotime_read();
461 uint32_t remain;
8f6c56a5 462
89b3af67
A
463 asm volatile(
464 "divl %3"
465 : "=a" (*secs), "=d" (remain)
466 : "A" (now), "r" (NSEC_PER_SEC));
467 asm volatile(
468 "divl %3"
469 : "=a" (*microsecs)
470 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
1c79356b
A
471}
472
55e303ae
A
473void
474clock_get_system_nanotime(
475 uint32_t *secs,
476 uint32_t *nanosecs)
477{
89b3af67 478 uint64_t now = rtc_nanotime_read();
8f6c56a5 479
89b3af67
A
480 asm volatile(
481 "divl %3"
482 : "=a" (*secs), "=d" (*nanosecs)
483 : "A" (now), "r" (NSEC_PER_SEC));
8f6c56a5
A
484}
485
486void
89b3af67
A
487clock_gettimeofday_set_commpage(
488 uint64_t abstime,
489 uint64_t epoch,
490 uint64_t offset,
491 uint32_t *secs,
492 uint32_t *microsecs)
493{
494 uint64_t now = abstime;
495 uint32_t remain;
8f6c56a5 496
89b3af67 497 now += offset;
8f6c56a5 498
89b3af67
A
499 asm volatile(
500 "divl %3"
501 : "=a" (*secs), "=d" (remain)
502 : "A" (now), "r" (NSEC_PER_SEC));
503 asm volatile(
504 "divl %3"
505 : "=a" (*microsecs)
506 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
8f6c56a5 507
89b3af67 508 *secs += epoch;
8f6c56a5 509
89b3af67 510 commpage_set_timestamp(abstime - remain, *secs, NSEC_PER_SEC);
91447636
A
511}
512
1c79356b
A
513void
514clock_timebase_info(
515 mach_timebase_info_t info)
516{
91447636 517 info->numer = info->denom = 1;
1c79356b
A
518}
519
1c79356b
A
520void
521clock_set_timer_func(
522 clock_timer_func_t func)
523{
91447636
A
524 if (rtclock_timer_expire == NULL)
525 rtclock_timer_expire = func;
1c79356b
A
526}
527
1c79356b 528/*
91447636 529 * Real-time clock device interrupt.
1c79356b 530 */
1c79356b 531void
89b3af67
A
532rtclock_intr(
533 x86_saved_state_t *tregs)
1c79356b 534{
89b3af67
A
535 uint64_t rip;
536 boolean_t user_mode = FALSE;
55e303ae 537 uint64_t abstime;
91447636 538 uint32_t latency;
91447636 539 cpu_data_t *pp = current_cpu_datap();
91447636
A
540
541 assert(get_preemption_level() > 0);
542 assert(!ml_get_interrupts_enabled());
543
89b3af67
A
544 abstime = rtc_nanotime_read();
545 latency = (uint32_t) abstime - pp->rtcPop;
1c79356b 546
89b3af67
A
547 if (is_saved_state64(tregs) == TRUE) {
548 x86_saved_state64_t *regs;
549
550 regs = saved_state64(tregs);
c0fea474 551
89b3af67
A
552 user_mode = TRUE;
553 rip = regs->isf.rip;
554 } else {
555 x86_saved_state32_t *regs;
1c79356b 556
89b3af67 557 regs = saved_state32(tregs);
1c79356b 558
89b3af67
A
559 if (regs->cs & 0x03)
560 user_mode = TRUE;
561 rip = regs->eip;
562 }
91447636 563
89b3af67 564 /* Log the interrupt service latency (-ve value expected by tool) */
8f6c56a5 565 KERNEL_DEBUG_CONSTANT(
89b3af67
A
566 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
567 -latency, (uint32_t)rip, user_mode, 0, 0);
91447636 568
89b3af67
A
569 /* call the generic etimer */
570 etimer_intr(user_mode, rip);
1c79356b
A
571}
572
89b3af67
A
573/*
574 * Request timer pop from the hardware
575 */
576
577int
578setPop(
579 uint64_t time)
1c79356b 580{
89b3af67
A
581 uint64_t now;
582 uint32_t decr;
583 uint64_t count;
584
585 now = rtc_nanotime_read(); /* The time in nanoseconds */
586 decr = deadline_to_decrementer(time, now);
5d5c5d0d 587
89b3af67
A
588 count = tmrCvt(decr, busFCvtn2t);
589 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
5d5c5d0d 590
89b3af67 591 return decr; /* Pass back what we set */
8ad349bb
A
592}
593
89b3af67 594
8f6c56a5 595void
89b3af67 596resetPop(void)
8ad349bb 597{
89b3af67
A
598 uint64_t now;
599 uint32_t decr;
600 uint64_t count;
601 cpu_data_t *cdp = current_cpu_datap();
602
603 now = rtc_nanotime_read();
604
605 decr = deadline_to_decrementer(cdp->rtcPop, now);
606
607 count = tmrCvt(decr, busFCvtn2t);
608 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t)count);
5d5c5d0d
A
609}
610
89b3af67 611
8f6c56a5
A
612uint64_t
613mach_absolute_time(void)
5d5c5d0d 614{
89b3af67
A
615 return rtc_nanotime_read();
616}
617
618void
619clock_interval_to_absolutetime_interval(
620 uint32_t interval,
621 uint32_t scale_factor,
622 uint64_t *result)
623{
624 *result = (uint64_t)interval * scale_factor;
91447636
A
625}
626
627void
628absolutetime_to_microtime(
629 uint64_t abstime,
630 uint32_t *secs,
631 uint32_t *microsecs)
632{
633 uint32_t remain;
634
635 asm volatile(
636 "divl %3"
637 : "=a" (*secs), "=d" (remain)
638 : "A" (abstime), "r" (NSEC_PER_SEC));
639 asm volatile(
640 "divl %3"
641 : "=a" (*microsecs)
642 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
1c79356b
A
643}
644
645void
89b3af67
A
646absolutetime_to_nanotime(
647 uint64_t abstime,
648 uint32_t *secs,
649 uint32_t *nanosecs)
8f6c56a5 650{
89b3af67
A
651 asm volatile(
652 "divl %3"
653 : "=a" (*secs), "=d" (*nanosecs)
654 : "A" (abstime), "r" (NSEC_PER_SEC));
8f6c56a5
A
655}
656
657void
89b3af67
A
658nanotime_to_absolutetime(
659 uint32_t secs,
660 uint32_t nanosecs,
661 uint64_t *result)
1c79356b 662{
89b3af67 663 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
1c79356b
A
664}
665
666void
667absolutetime_to_nanoseconds(
0b4e3aa0
A
668 uint64_t abstime,
669 uint64_t *result)
1c79356b 670{
0b4e3aa0 671 *result = abstime;
1c79356b
A
672}
673
674void
675nanoseconds_to_absolutetime(
0b4e3aa0
A
676 uint64_t nanoseconds,
677 uint64_t *result)
1c79356b 678{
0b4e3aa0 679 *result = nanoseconds;
1c79356b
A
680}
681
55e303ae 682void
91447636 683machine_delay_until(
55e303ae
A
684 uint64_t deadline)
685{
686 uint64_t now;
687
688 do {
689 cpu_pause();
690 now = mach_absolute_time();
691 } while (now < deadline);
692}