]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/rtclock.c
xnu-1504.9.17.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
CommitLineData
1c79356b 1/*
0b4c1975 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32/*
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
91447636
A
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
1c79356b
A
40 */
41
1c79356b 42#include <platforms.h>
1c79356b 43#include <mach_kdb.h>
55e303ae
A
44
45#include <mach/mach_types.h>
46
1c79356b 47#include <kern/cpu_data.h>
91447636 48#include <kern/cpu_number.h>
1c79356b 49#include <kern/clock.h>
55e303ae 50#include <kern/host_notify.h>
1c79356b
A
51#include <kern/macro_help.h>
52#include <kern/misc_protos.h>
53#include <kern/spl.h>
91447636 54#include <kern/assert.h>
7e4a7d39 55#include <kern/etimer.h>
1c79356b
A
56#include <mach/vm_prot.h>
57#include <vm/pmap.h>
58#include <vm/vm_kern.h> /* for kernel_map */
59#include <i386/ipl.h>
0c530ab8 60#include <architecture/i386/pio.h>
55e303ae 61#include <i386/machine_cpu.h>
91447636 62#include <i386/cpuid.h>
91447636 63#include <i386/cpu_threads.h>
b0d623f7 64#include <i386/mp.h>
91447636 65#include <i386/machine_routines.h>
b0d623f7
A
66#include <i386/proc_reg.h>
67#include <i386/misc_protos.h>
68#include <i386/lapic.h>
55e303ae 69#include <pexpert/pexpert.h>
91447636
A
70#include <machine/limits.h>
71#include <machine/commpage.h>
72#include <sys/kdebug.h>
0c530ab8 73#include <i386/tsc.h>
0c530ab8 74#include <i386/rtclock.h>
91447636 75
91447636
A
76#define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
77
78#define UI_CPUFREQ_ROUNDING_FACTOR 10000000
1c79356b 79
0c530ab8 80int rtclock_config(void);
6601e61a 81
0c530ab8 82int rtclock_init(void);
6601e61a 83
0c530ab8 84uint64_t rtc_decrementer_min;
6601e61a 85
b0d623f7
A
86uint64_t tsc_rebase_abs_time = 0;
87
0c530ab8
A
88void rtclock_intr(x86_saved_state_t *regs);
89static uint64_t maxDec; /* longest interval our hardware timer can handle (nsec) */
6601e61a 90
0c530ab8
A
91static void rtc_set_timescale(uint64_t cycles);
92static uint64_t rtc_export_speed(uint64_t cycles);
8f6c56a5 93
2d21ac55 94rtc_nanotime_t rtc_nanotime_info = {0,0,0,0,1,0};
6601e61a 95
4a3eedf9
A
96/*
97 * tsc_to_nanoseconds:
98 *
99 * Basic routine to convert a raw 64 bit TSC value to a
100 * 64 bit nanosecond value. The conversion is implemented
101 * based on the scale factor and an implicit 32 bit shift.
102 */
103static inline uint64_t
104_tsc_to_nanoseconds(uint64_t value)
105{
b0d623f7 106#if defined(__i386__)
4a3eedf9
A
107 asm volatile("movl %%edx,%%esi ;"
108 "mull %%ecx ;"
109 "movl %%edx,%%edi ;"
110 "movl %%esi,%%eax ;"
111 "mull %%ecx ;"
112 "addl %%edi,%%eax ;"
113 "adcl $0,%%edx "
593a1d5f
A
114 : "+A" (value)
115 : "c" (current_cpu_datap()->cpu_nanotime->scale)
116 : "esi", "edi");
b0d623f7
A
117#elif defined(__x86_64__)
118 asm volatile("mul %%rcx;"
119 "shrq $32, %%rax;"
120 "shlq $32, %%rdx;"
121 "orq %%rdx, %%rax;"
122 : "=a"(value)
123 : "a"(value), "c"(rtc_nanotime_info.scale)
124 : "rdx", "cc" );
125#else
126#error Unsupported architecture
127#endif
4a3eedf9
A
128
129 return (value);
130}
131
b0d623f7
A
132static inline uint32_t
133_absolutetime_to_microtime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *microsecs)
134{
135 uint32_t remain;
136#if defined(__i386__)
137 asm volatile(
138 "divl %3"
139 : "=a" (*secs), "=d" (remain)
140 : "A" (abstime), "r" (NSEC_PER_SEC));
141 asm volatile(
142 "divl %3"
143 : "=a" (*microsecs)
144 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
145#elif defined(__x86_64__)
146 *secs = abstime / (uint64_t)NSEC_PER_SEC;
147 remain = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC);
148 *microsecs = remain / NSEC_PER_USEC;
149#else
150#error Unsupported architecture
151#endif
152 return remain;
153}
154
155static inline void
156_absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nanosecs)
157{
158#if defined(__i386__)
159 asm volatile(
160 "divl %3"
161 : "=a" (*secs), "=d" (*nanosecs)
162 : "A" (abstime), "r" (NSEC_PER_SEC));
163#elif defined(__x86_64__)
164 *secs = abstime / (uint64_t)NSEC_PER_SEC;
165 *nanosecs = (clock_usec_t)(abstime % (uint64_t)NSEC_PER_SEC);
166#else
167#error Unsupported architecture
168#endif
169}
170
91447636
A
171static uint32_t
172deadline_to_decrementer(
173 uint64_t deadline,
174 uint64_t now)
175{
176 uint64_t delta;
177
178 if (deadline <= now)
b0d623f7 179 return (uint32_t)rtc_decrementer_min;
91447636
A
180 else {
181 delta = deadline - now;
b0d623f7 182 return (uint32_t)MIN(MAX(rtc_decrementer_min,delta),maxDec);
91447636
A
183 }
184}
185
0c530ab8 186void
6601e61a
A
187rtc_lapic_start_ticking(void)
188{
2d21ac55 189 x86_lcpu_t *lcpu = x86_lcpu();
0c530ab8
A
190
191 /*
192 * Force a complete re-evaluation of timer deadlines.
193 */
2d21ac55 194 lcpu->rtcPop = EndOfAllTime;
0c530ab8 195 etimer_resync_deadlines();
1c79356b
A
196}
197
198/*
199 * Configure the real-time clock device. Return success (1)
200 * or failure (0).
201 */
202
203int
0c530ab8 204rtclock_config(void)
1c79356b 205{
0c530ab8 206 /* nothing to do */
91447636
A
207 return (1);
208}
209
210
211/*
212 * Nanotime/mach_absolutime_time
213 * -----------------------------
0c530ab8
A
214 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
215 * efficiently by the kernel and in userspace - is the reference for all timing.
216 * The cpu clock rate is platform-dependent and may stop or be reset when the
217 * processor is napped/slept. As a result, nanotime is the software abstraction
218 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
91447636
A
219 *
220 * The kernel maintains nanotime information recording:
0c530ab8 221 * - the ratio of tsc to nanoseconds
91447636
A
222 * with this ratio expressed as a 32-bit scale and shift
223 * (power of 2 divider);
0c530ab8 224 * - { tsc_base, ns_base } pair of corresponding timestamps.
6601e61a 225 *
0c530ab8
A
226 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
227 * for the userspace nanotime routine to read.
6601e61a 228 *
0c530ab8
A
229 * All of the routines which update the nanotime data are non-reentrant. This must
230 * be guaranteed by the caller.
91447636
A
231 */
232static inline void
233rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
234{
0c530ab8
A
235 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
236}
6601e61a 237
0c530ab8
A
238/*
239 * rtc_nanotime_init:
240 *
241 * Intialize the nanotime info from the base time.
242 */
243static inline void
244_rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base)
245{
246 uint64_t tsc = rdtsc64();
21362eb3 247
2d21ac55 248 _rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
91447636
A
249}
250
251static void
0c530ab8 252rtc_nanotime_init(uint64_t base)
91447636 253{
593a1d5f 254 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
91447636 255
0c530ab8
A
256 _rtc_nanotime_init(rntp, base);
257 rtc_nanotime_set_commpage(rntp);
91447636
A
258}
259
0c530ab8
A
260/*
261 * rtc_nanotime_init_commpage:
262 *
263 * Call back from the commpage initialization to
264 * cause the commpage data to be filled in once the
265 * commpages have been created.
266 */
267void
268rtc_nanotime_init_commpage(void)
91447636 269{
0c530ab8
A
270 spl_t s = splclock();
271
593a1d5f 272 rtc_nanotime_set_commpage(current_cpu_datap()->cpu_nanotime);
4452a7af 273
0c530ab8 274 splx(s);
91447636
A
275}
276
0c530ab8
A
277/*
278 * rtc_nanotime_read:
279 *
280 * Returns the current nanotime value, accessable from any
281 * context.
282 */
2d21ac55 283static inline uint64_t
91447636
A
284rtc_nanotime_read(void)
285{
2d21ac55
A
286
287#if CONFIG_EMBEDDED
288 if (gPEClockFrequencyInfo.timebase_frequency_hz > SLOW_TSC_THRESHOLD)
593a1d5f 289 return _rtc_nanotime_read(current_cpu_datap()->cpu_nanotime, 1); /* slow processor */
2d21ac55
A
290 else
291#endif
593a1d5f 292 return _rtc_nanotime_read(current_cpu_datap()->cpu_nanotime, 0); /* assume fast processor */
91447636
A
293}
294
91447636 295/*
0c530ab8
A
296 * rtc_clock_napped:
297 *
4a3eedf9
A
298 * Invoked from power management when we exit from a low C-State (>= C4)
299 * and the TSC has stopped counting. The nanotime data is updated according
300 * to the provided value which represents the new value for nanotime.
91447636 301 */
0c530ab8 302void
4a3eedf9 303rtc_clock_napped(uint64_t base, uint64_t tsc_base)
0c530ab8 304{
593a1d5f 305 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
4a3eedf9
A
306 uint64_t oldnsecs;
307 uint64_t newnsecs;
308 uint64_t tsc;
2d21ac55
A
309
310 assert(!ml_get_interrupts_enabled());
4a3eedf9
A
311 tsc = rdtsc64();
312 oldnsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
313 newnsecs = base + _tsc_to_nanoseconds(tsc - tsc_base);
314
315 /*
316 * Only update the base values if time using the new base values
317 * is later than the time using the old base values.
318 */
319 if (oldnsecs < newnsecs) {
320 _rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
321 rtc_nanotime_set_commpage(rntp);
322 }
0c530ab8
A
323}
324
0b4c1975
A
325
326/*
327 * Invoked from power management to correct the SFLM TSC entry drift problem:
328 * a small delta is added to the tsc_base. This is equivalent to nudging time
329 * backwards. We require this of the order of a TSC quantum which won't cause
330 * callers of mach_absolute_time() to see time going backwards!
331 */
332void
333rtc_clock_adjust(uint64_t tsc_base_delta)
334{
335 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
336
337 assert(!ml_get_interrupts_enabled());
338 assert(tsc_base_delta < 100ULL); /* i.e. it's small */
339 _rtc_nanotime_adjust(tsc_base_delta, rntp);
340 rtc_nanotime_set_commpage(rntp);
341}
342
343
91447636
A
344void
345rtc_clock_stepping(__unused uint32_t new_frequency,
346 __unused uint32_t old_frequency)
347{
0c530ab8 348 panic("rtc_clock_stepping unsupported");
91447636
A
349}
350
91447636 351void
0c530ab8
A
352rtc_clock_stepped(__unused uint32_t new_frequency,
353 __unused uint32_t old_frequency)
91447636 354{
2d21ac55 355 panic("rtc_clock_stepped unsupported");
1c79356b
A
356}
357
358/*
0c530ab8
A
359 * rtc_sleep_wakeup:
360 *
361 * Invoked from power manageent when we have awoken from a sleep (S3)
362 * and the TSC has been reset. The nanotime data is updated based on
363 * the passed in value.
364 *
365 * The caller must guarantee non-reentrancy.
91447636
A
366 */
367void
0c530ab8
A
368rtc_sleep_wakeup(
369 uint64_t base)
91447636 370{
91447636
A
371 /*
372 * Reset nanotime.
373 * The timestamp counter will have been reset
374 * but nanotime (uptime) marches onward.
91447636 375 */
0c530ab8 376 rtc_nanotime_init(base);
91447636
A
377}
378
379/*
380 * Initialize the real-time clock device.
381 * In addition, various variables used to support the clock are initialized.
1c79356b
A
382 */
383int
0c530ab8 384rtclock_init(void)
1c79356b 385{
91447636
A
386 uint64_t cycles;
387
0c530ab8
A
388 assert(!ml_get_interrupts_enabled());
389
91447636 390 if (cpu_number() == master_cpu) {
0c530ab8
A
391
392 assert(tscFreq);
393 rtc_set_timescale(tscFreq);
394
91447636 395 /*
0c530ab8 396 * Adjust and set the exported cpu speed.
91447636 397 */
0c530ab8 398 cycles = rtc_export_speed(tscFreq);
91447636
A
399
400 /*
401 * Set min/max to actual.
402 * ACPI may update these later if speed-stepping is detected.
403 */
0c530ab8
A
404 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
405 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
91447636 406
0c530ab8
A
407 /*
408 * Compute the longest interval we can represent.
409 */
410 maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n);
411 kprintf("maxDec: %lld\n", maxDec);
91447636
A
412
413 /* Minimum interval is 1usec */
0c530ab8 414 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL);
91447636
A
415 /* Point LAPIC interrupts to hardclock() */
416 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
417
418 clock_timebase_init();
0c530ab8 419 ml_init_lock_timeout();
1c79356b 420 }
91447636 421
91447636
A
422 rtc_lapic_start_ticking();
423
1c79356b
A
424 return (1);
425}
426
0c530ab8
A
427// utility routine
428// Code to calculate how many processor cycles are in a second...
1c79356b 429
0c530ab8
A
430static void
431rtc_set_timescale(uint64_t cycles)
1c79356b 432{
593a1d5f 433 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
b0d623f7 434 rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles);
2d21ac55
A
435
436 if (cycles <= SLOW_TSC_THRESHOLD)
b0d623f7 437 rntp->shift = (uint32_t)cycles;
2d21ac55 438 else
593a1d5f 439 rntp->shift = 32;
1c79356b 440
b0d623f7
A
441 if (tsc_rebase_abs_time == 0)
442 tsc_rebase_abs_time = mach_absolute_time();
443
0c530ab8 444 rtc_nanotime_init(0);
1c79356b
A
445}
446
91447636 447static uint64_t
0c530ab8 448rtc_export_speed(uint64_t cyc_per_sec)
9bccf70c 449{
0c530ab8 450 uint64_t cycles;
1c79356b 451
0c530ab8
A
452 /* Round: */
453 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
91447636
A
454 / UI_CPUFREQ_ROUNDING_FACTOR)
455 * UI_CPUFREQ_ROUNDING_FACTOR;
9bccf70c 456
91447636
A
457 /*
458 * Set current measured speed.
459 */
460 if (cycles >= 0x100000000ULL) {
461 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
55e303ae 462 } else {
91447636 463 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
9bccf70c 464 }
91447636 465 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
55e303ae 466
0c530ab8 467 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
91447636 468 return(cycles);
9bccf70c 469}
1c79356b 470
55e303ae
A
471void
472clock_get_system_microtime(
b0d623f7
A
473 clock_sec_t *secs,
474 clock_usec_t *microsecs)
9bccf70c 475{
0c530ab8 476 uint64_t now = rtc_nanotime_read();
6601e61a 477
b0d623f7 478 _absolutetime_to_microtime(now, secs, microsecs);
1c79356b
A
479}
480
55e303ae
A
481void
482clock_get_system_nanotime(
b0d623f7
A
483 clock_sec_t *secs,
484 clock_nsec_t *nanosecs)
55e303ae 485{
0c530ab8 486 uint64_t now = rtc_nanotime_read();
8f6c56a5 487
b0d623f7 488 _absolutetime_to_nanotime(now, secs, nanosecs);
6601e61a
A
489}
490
491void
0c530ab8
A
492clock_gettimeofday_set_commpage(
493 uint64_t abstime,
494 uint64_t epoch,
495 uint64_t offset,
b0d623f7
A
496 clock_sec_t *secs,
497 clock_usec_t *microsecs)
0c530ab8 498{
b0d623f7 499 uint64_t now = abstime + offset;
0c530ab8 500 uint32_t remain;
6601e61a 501
b0d623f7 502 remain = _absolutetime_to_microtime(now, secs, microsecs);
6601e61a 503
b0d623f7 504 *secs += (clock_sec_t)epoch;
6601e61a 505
2d21ac55 506 commpage_set_timestamp(abstime - remain, *secs);
91447636
A
507}
508
1c79356b
A
509void
510clock_timebase_info(
511 mach_timebase_info_t info)
512{
91447636 513 info->numer = info->denom = 1;
1c79356b
A
514}
515
1c79356b 516/*
91447636 517 * Real-time clock device interrupt.
1c79356b 518 */
1c79356b 519void
0c530ab8
A
520rtclock_intr(
521 x86_saved_state_t *tregs)
1c79356b 522{
0c530ab8
A
523 uint64_t rip;
524 boolean_t user_mode = FALSE;
55e303ae 525 uint64_t abstime;
91447636 526 uint32_t latency;
2d21ac55 527 x86_lcpu_t *lcpu = x86_lcpu();
91447636
A
528
529 assert(get_preemption_level() > 0);
530 assert(!ml_get_interrupts_enabled());
531
0c530ab8 532 abstime = rtc_nanotime_read();
2d21ac55
A
533 latency = (uint32_t)(abstime - lcpu->rtcDeadline);
534 if (abstime < lcpu->rtcDeadline)
535 latency = 1;
89b3af67 536
0c530ab8
A
537 if (is_saved_state64(tregs) == TRUE) {
538 x86_saved_state64_t *regs;
539
540 regs = saved_state64(tregs);
5d5c5d0d 541
b0d623f7
A
542 if (regs->isf.cs & 0x03)
543 user_mode = TRUE;
0c530ab8
A
544 rip = regs->isf.rip;
545 } else {
546 x86_saved_state32_t *regs;
8ad349bb 547
0c530ab8 548 regs = saved_state32(tregs);
4452a7af 549
0c530ab8
A
550 if (regs->cs & 0x03)
551 user_mode = TRUE;
552 rip = regs->eip;
553 }
89b3af67 554
0c530ab8 555 /* Log the interrupt service latency (-ve value expected by tool) */
6601e61a 556 KERNEL_DEBUG_CONSTANT(
0c530ab8 557 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
b0d623f7 558 -(int32_t)latency, (uint32_t)rip, user_mode, 0, 0);
89b3af67 559
0c530ab8
A
560 /* call the generic etimer */
561 etimer_intr(user_mode, rip);
5d5c5d0d
A
562}
563
0c530ab8
A
564/*
565 * Request timer pop from the hardware
566 */
567
b0d623f7 568
0c530ab8
A
569int
570setPop(
571 uint64_t time)
5d5c5d0d 572{
0c530ab8
A
573 uint64_t now;
574 uint32_t decr;
575 uint64_t count;
576
577 now = rtc_nanotime_read(); /* The time in nanoseconds */
578 decr = deadline_to_decrementer(time, now);
4452a7af 579
0c530ab8
A
580 count = tmrCvt(decr, busFCvtn2t);
581 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
4452a7af 582
0c530ab8 583 return decr; /* Pass back what we set */
89b3af67
A
584}
585
0c530ab8 586
6601e61a
A
587uint64_t
588mach_absolute_time(void)
4452a7af 589{
0c530ab8
A
590 return rtc_nanotime_read();
591}
592
593void
594clock_interval_to_absolutetime_interval(
595 uint32_t interval,
596 uint32_t scale_factor,
597 uint64_t *result)
598{
599 *result = (uint64_t)interval * scale_factor;
91447636
A
600}
601
602void
603absolutetime_to_microtime(
604 uint64_t abstime,
b0d623f7
A
605 clock_sec_t *secs,
606 clock_usec_t *microsecs)
91447636 607{
b0d623f7 608 _absolutetime_to_microtime(abstime, secs, microsecs);
1c79356b
A
609}
610
611void
0c530ab8
A
612absolutetime_to_nanotime(
613 uint64_t abstime,
b0d623f7
A
614 clock_sec_t *secs,
615 clock_nsec_t *nanosecs)
6601e61a 616{
b0d623f7 617 _absolutetime_to_nanotime(abstime, secs, nanosecs);
6601e61a
A
618}
619
620void
0c530ab8 621nanotime_to_absolutetime(
b0d623f7
A
622 clock_sec_t secs,
623 clock_nsec_t nanosecs,
0c530ab8 624 uint64_t *result)
1c79356b 625{
0c530ab8 626 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
1c79356b
A
627}
628
629void
630absolutetime_to_nanoseconds(
0b4e3aa0
A
631 uint64_t abstime,
632 uint64_t *result)
1c79356b 633{
0b4e3aa0 634 *result = abstime;
1c79356b
A
635}
636
637void
638nanoseconds_to_absolutetime(
0b4e3aa0
A
639 uint64_t nanoseconds,
640 uint64_t *result)
1c79356b 641{
0b4e3aa0 642 *result = nanoseconds;
1c79356b
A
643}
644
55e303ae 645void
91447636 646machine_delay_until(
55e303ae
A
647 uint64_t deadline)
648{
649 uint64_t now;
650
651 do {
652 cpu_pause();
653 now = mach_absolute_time();
654 } while (now < deadline);
655}