]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/rtclock.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30/*
31 * @OSF_COPYRIGHT@
32 */
33
34/*
35 * File: i386/rtclock.c
36 * Purpose: Routines for handling the machine dependent
91447636
A
37 * real-time clock. Historically, this clock is
38 * generated by the Intel 8254 Programmable Interval
39 * Timer, but local apic timers are now used for
40 * this purpose with the master time reference being
41 * the cpu clock counted by the timestamp MSR.
1c79356b
A
42 */
43
1c79356b 44#include <platforms.h>
1c79356b 45#include <mach_kdb.h>
55e303ae
A
46
47#include <mach/mach_types.h>
48
1c79356b 49#include <kern/cpu_data.h>
91447636 50#include <kern/cpu_number.h>
1c79356b 51#include <kern/clock.h>
55e303ae 52#include <kern/host_notify.h>
1c79356b
A
53#include <kern/macro_help.h>
54#include <kern/misc_protos.h>
55#include <kern/spl.h>
91447636 56#include <kern/assert.h>
1c79356b
A
57#include <mach/vm_prot.h>
58#include <vm/pmap.h>
59#include <vm/vm_kern.h> /* for kernel_map */
60#include <i386/ipl.h>
61#include <i386/pit.h>
5d5c5d0d 62#include <architecture/i386/pio.h>
1c79356b 63#include <i386/misc_protos.h>
55e303ae
A
64#include <i386/proc_reg.h>
65#include <i386/machine_cpu.h>
91447636
A
66#include <i386/mp.h>
67#include <i386/cpuid.h>
68#include <i386/cpu_data.h>
69#include <i386/cpu_threads.h>
70#include <i386/perfmon.h>
71#include <i386/machine_routines.h>
55e303ae 72#include <pexpert/pexpert.h>
91447636
A
73#include <machine/limits.h>
74#include <machine/commpage.h>
75#include <sys/kdebug.h>
5d5c5d0d
A
76#include <i386/tsc.h>
77#include <i386/hpet.h>
78#include <i386/rtclock.h>
91447636
A
79
80#define MAX(a,b) (((a)>(b))?(a):(b))
81#define MIN(a,b) (((a)>(b))?(b):(a))
55e303ae 82
91447636
A
83#define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
84
85#define UI_CPUFREQ_ROUNDING_FACTOR 10000000
1c79356b 86
5d5c5d0d 87int rtclock_config(void);
8ad349bb 88
5d5c5d0d 89int rtclock_init(void);
8ad349bb 90
5d5c5d0d 91uint64_t rtc_decrementer_min;
8ad349bb 92
5d5c5d0d
A
93void rtclock_intr(x86_saved_state_t *regs);
94static uint64_t maxDec; /* longest interval our hardware timer can handle (nsec) */
8ad349bb 95
5d5c5d0d
A
96/* XXX this should really be in a header somewhere */
97extern clock_timer_func_t rtclock_timer_expire;
8ad349bb 98
5d5c5d0d
A
99static void rtc_set_timescale(uint64_t cycles);
100static uint64_t rtc_export_speed(uint64_t cycles);
8ad349bb 101
5d5c5d0d
A
102extern void rtc_nanotime_store(
103 uint64_t tsc,
104 uint64_t nsec,
105 uint32_t scale,
106 uint32_t shift,
107 rtc_nanotime_t *dst);
8ad349bb 108
5d5c5d0d
A
109extern void rtc_nanotime_load(
110 rtc_nanotime_t *src,
111 rtc_nanotime_t *dst);
8ad349bb 112
5d5c5d0d 113rtc_nanotime_t rtc_nanotime_info;
8ad349bb
A
114
115/*
5d5c5d0d 116 * tsc_to_nanoseconds:
91447636 117 *
5d5c5d0d
A
118 * Basic routine to convert a raw 64 bit TSC value to a
119 * 64 bit nanosecond value. The conversion is implemented
120 * based on the scale factor and an implicit 32 bit shift.
91447636 121 */
5d5c5d0d
A
122static inline uint64_t
123_tsc_to_nanoseconds(uint64_t value)
8ad349bb 124{
5d5c5d0d
A
125 asm volatile("movl %%edx,%%esi ;"
126 "mull %%ecx ;"
127 "movl %%edx,%%edi ;"
128 "movl %%esi,%%eax ;"
129 "mull %%ecx ;"
130 "addl %%edi,%%eax ;"
131 "adcl $0,%%edx "
132 : "+A" (value) : "c" (rtc_nanotime_info.scale) : "esi", "edi");
8ad349bb 133
5d5c5d0d 134 return (value);
8ad349bb
A
135}
136
5d5c5d0d
A
137uint64_t
138tsc_to_nanoseconds(uint64_t value)
8ad349bb 139{
5d5c5d0d 140 return _tsc_to_nanoseconds(value);
8ad349bb
A
141}
142
91447636
A
143static uint32_t
144deadline_to_decrementer(
145 uint64_t deadline,
146 uint64_t now)
147{
148 uint64_t delta;
149
150 if (deadline <= now)
151 return rtc_decrementer_min;
152 else {
153 delta = deadline - now;
5d5c5d0d 154 return MIN(MAX(rtc_decrementer_min,delta),maxDec);
91447636
A
155 }
156}
157
8ad349bb
A
158static void
159rtc_lapic_start_ticking(void)
160{
161 uint64_t abstime;
162 uint64_t first_tick;
5d5c5d0d 163 cpu_data_t *cdp = current_cpu_datap();
8ad349bb
A
164
165 abstime = mach_absolute_time();
5d5c5d0d
A
166 rtclock_tick_interval = NSEC_PER_HZ;
167
168 first_tick = abstime + rtclock_tick_interval;
169 cdp->rtclock_intr_deadline = first_tick;
170
171 /*
172 * Force a complete re-evaluation of timer deadlines.
173 */
174 cdp->rtcPop = EndOfAllTime;
175 etimer_resync_deadlines();
1c79356b
A
176}
177
178/*
179 * Configure the real-time clock device. Return success (1)
180 * or failure (0).
181 */
182
183int
5d5c5d0d 184rtclock_config(void)
1c79356b 185{
5d5c5d0d 186 /* nothing to do */
91447636
A
187 return (1);
188}
189
190
191/*
192 * Nanotime/mach_absolutime_time
193 * -----------------------------
5d5c5d0d
A
194 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
195 * efficiently by the kernel and in userspace - is the reference for all timing.
196 * The cpu clock rate is platform-dependent and may stop or be reset when the
197 * processor is napped/slept. As a result, nanotime is the software abstraction
198 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
91447636
A
199 *
200 * The kernel maintains nanotime information recording:
5d5c5d0d 201 * - the ratio of tsc to nanoseconds
91447636
A
202 * with this ratio expressed as a 32-bit scale and shift
203 * (power of 2 divider);
5d5c5d0d 204 * - { tsc_base, ns_base } pair of corresponding timestamps.
8ad349bb 205 *
5d5c5d0d
A
206 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
207 * for the userspace nanotime routine to read.
8ad349bb 208 *
5d5c5d0d
A
209 * All of the routines which update the nanotime data are non-reentrant. This must
210 * be guaranteed by the caller.
91447636
A
211 */
212static inline void
213rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
214{
5d5c5d0d
A
215 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
216}
8ad349bb 217
5d5c5d0d
A
218/*
219 * rtc_nanotime_init:
220 *
221 * Intialize the nanotime info from the base time. Since
222 * the base value might be from a lower resolution clock,
223 * we compare it to the TSC derived value, and use the
224 * greater of the two values.
225 */
226static inline void
227_rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base)
228{
229 uint64_t nsecs, tsc = rdtsc64();
91447636 230
5d5c5d0d
A
231 nsecs = _tsc_to_nanoseconds(tsc);
232 rtc_nanotime_store(tsc, MAX(nsecs, base), rntp->scale, rntp->shift, rntp);
91447636
A
233}
234
235static void
5d5c5d0d 236rtc_nanotime_init(uint64_t base)
91447636 237{
5d5c5d0d 238 rtc_nanotime_t *rntp = &rtc_nanotime_info;
91447636 239
5d5c5d0d
A
240 _rtc_nanotime_init(rntp, base);
241 rtc_nanotime_set_commpage(rntp);
91447636
A
242}
243
5d5c5d0d
A
244/*
245 * rtc_nanotime_init:
246 *
247 * Call back from the commpage initialization to
248 * cause the commpage data to be filled in once the
249 * commpages have been created.
250 */
251void
252rtc_nanotime_init_commpage(void)
91447636 253{
5d5c5d0d 254 spl_t s = splclock();
91447636 255
5d5c5d0d
A
256 rtc_nanotime_set_commpage(&rtc_nanotime_info);
257
258 splx(s);
91447636
A
259}
260
5d5c5d0d
A
261/*
262 * rtc_nanotime_update:
263 *
264 * Update the nanotime info from the base time. Since
265 * the base value might be from a lower resolution clock,
266 * we compare it to the TSC derived value, and use the
267 * greater of the two values.
268 *
269 * N.B. In comparison to the above init routine, this assumes
270 * that the TSC has remained monotonic compared to the tsc_base
271 * value, which is not the case after S3 sleep.
272 */
273static inline void
274_rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t base)
91447636 275{
5d5c5d0d 276 uint64_t nsecs, tsc = rdtsc64();
91447636 277
5d5c5d0d
A
278 nsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
279 rtc_nanotime_store(tsc, MAX(nsecs, base), rntp->scale, rntp->shift, rntp);
91447636
A
280}
281
282static void
5d5c5d0d
A
283rtc_nanotime_update(
284 uint64_t base)
91447636 285{
5d5c5d0d 286 rtc_nanotime_t *rntp = &rtc_nanotime_info;
91447636
A
287
288 assert(!ml_get_interrupts_enabled());
289
5d5c5d0d 290 _rtc_nanotime_update(rntp, base);
91447636
A
291 rtc_nanotime_set_commpage(rntp);
292}
293
5d5c5d0d
A
294/*
295 * rtc_nanotime_read:
296 *
297 * Returns the current nanotime value, accessable from any
298 * context.
299 */
91447636 300static uint64_t
91447636
A
301rtc_nanotime_read(void)
302{
5d5c5d0d
A
303 rtc_nanotime_t rnt, *rntp = &rtc_nanotime_info;
304 uint64_t result;
1c79356b 305
91447636 306 do {
5d5c5d0d
A
307 rtc_nanotime_load(rntp, &rnt);
308 result = rnt.ns_base + _tsc_to_nanoseconds(rdtsc64() - rnt.tsc_base);
309 } while (rntp->tsc_base != rnt.tsc_base);
91447636 310
5d5c5d0d 311 return (result);
91447636
A
312}
313
91447636 314/*
5d5c5d0d
A
315 * rtc_clock_napped:
316 *
317 * Invoked from power manangement when we have awoken from a nap (C3/C4)
318 * during which the TSC lost counts. The nanotime data is updated according
319 * to the provided nanosecond base value.
320 *
321 * The caller must guarantee non-reentrancy.
91447636 322 */
5d5c5d0d
A
323void
324rtc_clock_napped(
325 uint64_t base)
326{
327 rtc_nanotime_update(base);
328}
329
91447636
A
330void
331rtc_clock_stepping(__unused uint32_t new_frequency,
332 __unused uint32_t old_frequency)
333{
5d5c5d0d 334 panic("rtc_clock_stepping unsupported");
91447636
A
335}
336
91447636 337void
5d5c5d0d
A
338rtc_clock_stepped(__unused uint32_t new_frequency,
339 __unused uint32_t old_frequency)
91447636 340{
5d5c5d0d 341 panic("rtc_clock_stepping unsupported");
1c79356b
A
342}
343
344/*
5d5c5d0d
A
345 * rtc_sleep_wakeup:
346 *
347 * Invoked from power manageent when we have awoken from a sleep (S3)
348 * and the TSC has been reset. The nanotime data is updated based on
349 * the HPET value.
350 *
351 * The caller must guarantee non-reentrancy.
91447636
A
352 */
353void
354rtc_sleep_wakeup(void)
355{
5d5c5d0d 356 boolean_t istate;
91447636
A
357
358 istate = ml_set_interrupts_enabled(FALSE);
359
360 /*
361 * Reset nanotime.
362 * The timestamp counter will have been reset
363 * but nanotime (uptime) marches onward.
91447636 364 */
5d5c5d0d 365 rtc_nanotime_init(tmrCvt(rdHPET(), hpetCvtt2n));
91447636
A
366
367 /* Restart tick interrupts from the LAPIC timer */
368 rtc_lapic_start_ticking();
369
370 ml_set_interrupts_enabled(istate);
371}
372
373/*
374 * Initialize the real-time clock device.
375 * In addition, various variables used to support the clock are initialized.
1c79356b
A
376 */
377int
5d5c5d0d 378rtclock_init(void)
1c79356b 379{
91447636
A
380 uint64_t cycles;
381
5d5c5d0d
A
382 assert(!ml_get_interrupts_enabled());
383
91447636 384 if (cpu_number() == master_cpu) {
5d5c5d0d
A
385
386 assert(tscFreq);
387 rtc_set_timescale(tscFreq);
388
91447636 389 /*
5d5c5d0d 390 * Adjust and set the exported cpu speed.
91447636 391 */
5d5c5d0d 392 cycles = rtc_export_speed(tscFreq);
91447636
A
393
394 /*
395 * Set min/max to actual.
396 * ACPI may update these later if speed-stepping is detected.
397 */
5d5c5d0d
A
398 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
399 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
91447636 400
5d5c5d0d
A
401 /*
402 * Compute the longest interval we can represent.
403 */
404 maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n);
405 kprintf("maxDec: %lld\n", maxDec);
91447636
A
406
407 /* Minimum interval is 1usec */
5d5c5d0d 408 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL);
91447636
A
409 /* Point LAPIC interrupts to hardclock() */
410 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
411
412 clock_timebase_init();
5d5c5d0d 413 ml_init_lock_timeout();
1c79356b 414 }
91447636 415
91447636
A
416 rtc_lapic_start_ticking();
417
1c79356b
A
418 return (1);
419}
420
5d5c5d0d
A
421// utility routine
422// Code to calculate how many processor cycles are in a second...
1c79356b 423
5d5c5d0d
A
424static void
425rtc_set_timescale(uint64_t cycles)
1c79356b 426{
5d5c5d0d
A
427 rtc_nanotime_info.scale = ((uint64_t)NSEC_PER_SEC << 32) / cycles;
428 rtc_nanotime_info.shift = 32;
1c79356b 429
5d5c5d0d 430 rtc_nanotime_init(0);
1c79356b
A
431}
432
91447636 433static uint64_t
5d5c5d0d 434rtc_export_speed(uint64_t cyc_per_sec)
9bccf70c 435{
5d5c5d0d 436 uint64_t cycles;
1c79356b 437
5d5c5d0d
A
438 /* Round: */
439 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
91447636
A
440 / UI_CPUFREQ_ROUNDING_FACTOR)
441 * UI_CPUFREQ_ROUNDING_FACTOR;
9bccf70c 442
91447636
A
443 /*
444 * Set current measured speed.
445 */
446 if (cycles >= 0x100000000ULL) {
447 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
55e303ae 448 } else {
91447636 449 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
9bccf70c 450 }
91447636 451 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
55e303ae 452
5d5c5d0d 453 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
91447636 454 return(cycles);
9bccf70c 455}
1c79356b 456
55e303ae
A
457void
458clock_get_system_microtime(
459 uint32_t *secs,
460 uint32_t *microsecs)
9bccf70c 461{
5d5c5d0d
A
462 uint64_t now = rtc_nanotime_read();
463 uint32_t remain;
8ad349bb 464
5d5c5d0d
A
465 asm volatile(
466 "divl %3"
467 : "=a" (*secs), "=d" (remain)
468 : "A" (now), "r" (NSEC_PER_SEC));
469 asm volatile(
470 "divl %3"
471 : "=a" (*microsecs)
472 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
1c79356b
A
473}
474
55e303ae
A
475void
476clock_get_system_nanotime(
477 uint32_t *secs,
478 uint32_t *nanosecs)
479{
5d5c5d0d 480 uint64_t now = rtc_nanotime_read();
8ad349bb 481
5d5c5d0d
A
482 asm volatile(
483 "divl %3"
484 : "=a" (*secs), "=d" (*nanosecs)
485 : "A" (now), "r" (NSEC_PER_SEC));
8ad349bb
A
486}
487
488void
5d5c5d0d
A
489clock_gettimeofday_set_commpage(
490 uint64_t abstime,
491 uint64_t epoch,
492 uint64_t offset,
493 uint32_t *secs,
494 uint32_t *microsecs)
495{
496 uint64_t now = abstime;
497 uint32_t remain;
8ad349bb 498
5d5c5d0d 499 now += offset;
8ad349bb 500
5d5c5d0d
A
501 asm volatile(
502 "divl %3"
503 : "=a" (*secs), "=d" (remain)
504 : "A" (now), "r" (NSEC_PER_SEC));
505 asm volatile(
506 "divl %3"
507 : "=a" (*microsecs)
508 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
8ad349bb 509
5d5c5d0d 510 *secs += epoch;
8ad349bb 511
5d5c5d0d 512 commpage_set_timestamp(abstime - remain, *secs, NSEC_PER_SEC);
91447636
A
513}
514
1c79356b
A
515void
516clock_timebase_info(
517 mach_timebase_info_t info)
518{
91447636 519 info->numer = info->denom = 1;
1c79356b
A
520}
521
1c79356b
A
522void
523clock_set_timer_func(
524 clock_timer_func_t func)
525{
91447636
A
526 if (rtclock_timer_expire == NULL)
527 rtclock_timer_expire = func;
1c79356b
A
528}
529
1c79356b 530/*
91447636 531 * Real-time clock device interrupt.
1c79356b 532 */
1c79356b 533void
5d5c5d0d
A
534rtclock_intr(
535 x86_saved_state_t *tregs)
1c79356b 536{
5d5c5d0d
A
537 uint64_t rip;
538 boolean_t user_mode = FALSE;
55e303ae 539 uint64_t abstime;
91447636 540 uint32_t latency;
91447636 541 cpu_data_t *pp = current_cpu_datap();
91447636
A
542
543 assert(get_preemption_level() > 0);
544 assert(!ml_get_interrupts_enabled());
545
5d5c5d0d
A
546 abstime = rtc_nanotime_read();
547 latency = (uint32_t) abstime - pp->rtcPop;
c0fea474 548
5d5c5d0d
A
549 if (is_saved_state64(tregs) == TRUE) {
550 x86_saved_state64_t *regs;
551
552 regs = saved_state64(tregs);
55e303ae 553
5d5c5d0d
A
554 user_mode = TRUE;
555 rip = regs->isf.rip;
556 } else {
557 x86_saved_state32_t *regs;
1c79356b 558
5d5c5d0d 559 regs = saved_state32(tregs);
1c79356b 560
5d5c5d0d
A
561 if (regs->cs & 0x03)
562 user_mode = TRUE;
563 rip = regs->eip;
564 }
c0fea474 565
5d5c5d0d 566 /* Log the interrupt service latency (-ve value expected by tool) */
8ad349bb 567 KERNEL_DEBUG_CONSTANT(
5d5c5d0d
A
568 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
569 -latency, (uint32_t)rip, user_mode, 0, 0);
1c79356b 570
5d5c5d0d
A
571 /* call the generic etimer */
572 etimer_intr(user_mode, rip);
91447636 573}
1c79356b 574
5d5c5d0d
A
575/*
576 * Request timer pop from the hardware
577 */
578
579int
580setPop(
581 uint64_t time)
8ad349bb 582{
5d5c5d0d
A
583 uint64_t now;
584 uint32_t decr;
585 uint64_t count;
586
587 now = rtc_nanotime_read(); /* The time in nanoseconds */
588 decr = deadline_to_decrementer(time, now);
91447636 589
5d5c5d0d
A
590 count = tmrCvt(decr, busFCvtn2t);
591 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
91447636 592
5d5c5d0d 593 return decr; /* Pass back what we set */
1c79356b
A
594}
595
5d5c5d0d 596
1c79356b 597void
5d5c5d0d 598resetPop(void)
1c79356b 599{
5d5c5d0d
A
600 uint64_t now;
601 uint32_t decr;
602 uint64_t count;
603 cpu_data_t *cdp = current_cpu_datap();
604
605 now = rtc_nanotime_read();
606
607 decr = deadline_to_decrementer(cdp->rtcPop, now);
608
609 count = tmrCvt(decr, busFCvtn2t);
610 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t)count);
8ad349bb
A
611}
612
5d5c5d0d 613
8ad349bb
A
614uint64_t
615mach_absolute_time(void)
616{
5d5c5d0d
A
617 return rtc_nanotime_read();
618}
619
620void
621clock_interval_to_absolutetime_interval(
622 uint32_t interval,
623 uint32_t scale_factor,
624 uint64_t *result)
625{
626 *result = (uint64_t)interval * scale_factor;
91447636
A
627}
628
629void
630absolutetime_to_microtime(
631 uint64_t abstime,
632 uint32_t *secs,
633 uint32_t *microsecs)
634{
635 uint32_t remain;
636
637 asm volatile(
638 "divl %3"
639 : "=a" (*secs), "=d" (remain)
640 : "A" (abstime), "r" (NSEC_PER_SEC));
641 asm volatile(
642 "divl %3"
643 : "=a" (*microsecs)
644 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
1c79356b
A
645}
646
647void
5d5c5d0d
A
648absolutetime_to_nanotime(
649 uint64_t abstime,
650 uint32_t *secs,
651 uint32_t *nanosecs)
8ad349bb 652{
5d5c5d0d
A
653 asm volatile(
654 "divl %3"
655 : "=a" (*secs), "=d" (*nanosecs)
656 : "A" (abstime), "r" (NSEC_PER_SEC));
8ad349bb
A
657}
658
659void
5d5c5d0d
A
660nanotime_to_absolutetime(
661 uint32_t secs,
662 uint32_t nanosecs,
663 uint64_t *result)
1c79356b 664{
5d5c5d0d 665 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
1c79356b
A
666}
667
668void
669absolutetime_to_nanoseconds(
0b4e3aa0
A
670 uint64_t abstime,
671 uint64_t *result)
1c79356b 672{
0b4e3aa0 673 *result = abstime;
1c79356b
A
674}
675
676void
677nanoseconds_to_absolutetime(
0b4e3aa0
A
678 uint64_t nanoseconds,
679 uint64_t *result)
1c79356b 680{
0b4e3aa0 681 *result = nanoseconds;
1c79356b
A
682}
683
55e303ae 684void
91447636 685machine_delay_until(
55e303ae
A
686 uint64_t deadline)
687{
688 uint64_t now;
689
690 do {
691 cpu_pause();
692 now = mach_absolute_time();
693 } while (now < deadline);
694}