]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
29 | */ | |
30 | /* | |
31 | * @OSF_COPYRIGHT@ | |
32 | */ | |
33 | ||
34 | /* | |
35 | * File: i386/rtclock.c | |
36 | * Purpose: Routines for handling the machine dependent | |
37 | * real-time clock. Historically, this clock is | |
38 | * generated by the Intel 8254 Programmable Interval | |
39 | * Timer, but local apic timers are now used for | |
40 | * this purpose with the master time reference being | |
41 | * the cpu clock counted by the timestamp MSR. | |
42 | */ | |
43 | ||
44 | #include <platforms.h> | |
45 | #include <mach_kdb.h> | |
46 | ||
47 | #include <mach/mach_types.h> | |
48 | ||
49 | #include <kern/cpu_data.h> | |
50 | #include <kern/cpu_number.h> | |
51 | #include <kern/clock.h> | |
52 | #include <kern/host_notify.h> | |
53 | #include <kern/macro_help.h> | |
54 | #include <kern/misc_protos.h> | |
55 | #include <kern/spl.h> | |
56 | #include <kern/assert.h> | |
57 | #include <mach/vm_prot.h> | |
58 | #include <vm/pmap.h> | |
59 | #include <vm/vm_kern.h> /* for kernel_map */ | |
60 | #include <i386/ipl.h> | |
61 | #include <i386/pit.h> | |
62 | #include <architecture/i386/pio.h> | |
63 | #include <i386/misc_protos.h> | |
64 | #include <i386/proc_reg.h> | |
65 | #include <i386/machine_cpu.h> | |
66 | #include <i386/mp.h> | |
67 | #include <i386/cpuid.h> | |
68 | #include <i386/cpu_data.h> | |
69 | #include <i386/cpu_threads.h> | |
70 | #include <i386/perfmon.h> | |
71 | #include <i386/machine_routines.h> | |
72 | #include <pexpert/pexpert.h> | |
73 | #include <machine/limits.h> | |
74 | #include <machine/commpage.h> | |
75 | #include <sys/kdebug.h> | |
76 | #include <i386/tsc.h> | |
77 | #include <i386/hpet.h> | |
78 | #include <i386/rtclock.h> | |
79 | ||
80 | #define MAX(a,b) (((a)>(b))?(a):(b)) | |
81 | #define MIN(a,b) (((a)>(b))?(b):(a)) | |
82 | ||
83 | #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */ | |
84 | ||
85 | #define UI_CPUFREQ_ROUNDING_FACTOR 10000000 | |
86 | ||
87 | int rtclock_config(void); | |
88 | ||
89 | int rtclock_init(void); | |
90 | ||
91 | uint64_t rtc_decrementer_min; | |
92 | ||
93 | void rtclock_intr(x86_saved_state_t *regs); | |
94 | static uint64_t maxDec; /* longest interval our hardware timer can handle (nsec) */ | |
95 | ||
96 | /* XXX this should really be in a header somewhere */ | |
97 | extern clock_timer_func_t rtclock_timer_expire; | |
98 | ||
99 | static void rtc_set_timescale(uint64_t cycles); | |
100 | static uint64_t rtc_export_speed(uint64_t cycles); | |
101 | ||
102 | extern void rtc_nanotime_store( | |
103 | uint64_t tsc, | |
104 | uint64_t nsec, | |
105 | uint32_t scale, | |
106 | uint32_t shift, | |
107 | rtc_nanotime_t *dst); | |
108 | ||
109 | extern void rtc_nanotime_load( | |
110 | rtc_nanotime_t *src, | |
111 | rtc_nanotime_t *dst); | |
112 | ||
113 | rtc_nanotime_t rtc_nanotime_info; | |
114 | ||
115 | /* | |
116 | * tsc_to_nanoseconds: | |
117 | * | |
118 | * Basic routine to convert a raw 64 bit TSC value to a | |
119 | * 64 bit nanosecond value. The conversion is implemented | |
120 | * based on the scale factor and an implicit 32 bit shift. | |
121 | */ | |
122 | static inline uint64_t | |
123 | _tsc_to_nanoseconds(uint64_t value) | |
124 | { | |
125 | asm volatile("movl %%edx,%%esi ;" | |
126 | "mull %%ecx ;" | |
127 | "movl %%edx,%%edi ;" | |
128 | "movl %%esi,%%eax ;" | |
129 | "mull %%ecx ;" | |
130 | "addl %%edi,%%eax ;" | |
131 | "adcl $0,%%edx " | |
132 | : "+A" (value) : "c" (rtc_nanotime_info.scale) : "esi", "edi"); | |
133 | ||
134 | return (value); | |
135 | } | |
136 | ||
137 | uint64_t | |
138 | tsc_to_nanoseconds(uint64_t value) | |
139 | { | |
140 | return _tsc_to_nanoseconds(value); | |
141 | } | |
142 | ||
143 | static uint32_t | |
144 | deadline_to_decrementer( | |
145 | uint64_t deadline, | |
146 | uint64_t now) | |
147 | { | |
148 | uint64_t delta; | |
149 | ||
150 | if (deadline <= now) | |
151 | return rtc_decrementer_min; | |
152 | else { | |
153 | delta = deadline - now; | |
154 | return MIN(MAX(rtc_decrementer_min,delta),maxDec); | |
155 | } | |
156 | } | |
157 | ||
158 | static void | |
159 | rtc_lapic_start_ticking(void) | |
160 | { | |
161 | uint64_t abstime; | |
162 | uint64_t first_tick; | |
163 | cpu_data_t *cdp = current_cpu_datap(); | |
164 | ||
165 | abstime = mach_absolute_time(); | |
166 | rtclock_tick_interval = NSEC_PER_HZ; | |
167 | ||
168 | first_tick = abstime + rtclock_tick_interval; | |
169 | cdp->rtclock_intr_deadline = first_tick; | |
170 | ||
171 | /* | |
172 | * Force a complete re-evaluation of timer deadlines. | |
173 | */ | |
174 | cdp->rtcPop = EndOfAllTime; | |
175 | etimer_resync_deadlines(); | |
176 | } | |
177 | ||
178 | /* | |
179 | * Configure the real-time clock device. Return success (1) | |
180 | * or failure (0). | |
181 | */ | |
182 | ||
183 | int | |
184 | rtclock_config(void) | |
185 | { | |
186 | /* nothing to do */ | |
187 | return (1); | |
188 | } | |
189 | ||
190 | ||
191 | /* | |
192 | * Nanotime/mach_absolutime_time | |
193 | * ----------------------------- | |
194 | * The timestamp counter (TSC) - which counts cpu clock cycles and can be read | |
195 | * efficiently by the kernel and in userspace - is the reference for all timing. | |
196 | * The cpu clock rate is platform-dependent and may stop or be reset when the | |
197 | * processor is napped/slept. As a result, nanotime is the software abstraction | |
198 | * used to maintain a monotonic clock, adjusted from an outside reference as needed. | |
199 | * | |
200 | * The kernel maintains nanotime information recording: | |
201 | * - the ratio of tsc to nanoseconds | |
202 | * with this ratio expressed as a 32-bit scale and shift | |
203 | * (power of 2 divider); | |
204 | * - { tsc_base, ns_base } pair of corresponding timestamps. | |
205 | * | |
206 | * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage | |
207 | * for the userspace nanotime routine to read. | |
208 | * | |
209 | * All of the routines which update the nanotime data are non-reentrant. This must | |
210 | * be guaranteed by the caller. | |
211 | */ | |
212 | static inline void | |
213 | rtc_nanotime_set_commpage(rtc_nanotime_t *rntp) | |
214 | { | |
215 | commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift); | |
216 | } | |
217 | ||
218 | /* | |
219 | * rtc_nanotime_init: | |
220 | * | |
221 | * Intialize the nanotime info from the base time. Since | |
222 | * the base value might be from a lower resolution clock, | |
223 | * we compare it to the TSC derived value, and use the | |
224 | * greater of the two values. | |
225 | */ | |
226 | static inline void | |
227 | _rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base) | |
228 | { | |
229 | uint64_t nsecs, tsc = rdtsc64(); | |
230 | ||
231 | nsecs = _tsc_to_nanoseconds(tsc); | |
232 | rtc_nanotime_store(tsc, MAX(nsecs, base), rntp->scale, rntp->shift, rntp); | |
233 | } | |
234 | ||
235 | static void | |
236 | rtc_nanotime_init(uint64_t base) | |
237 | { | |
238 | rtc_nanotime_t *rntp = &rtc_nanotime_info; | |
239 | ||
240 | _rtc_nanotime_init(rntp, base); | |
241 | rtc_nanotime_set_commpage(rntp); | |
242 | } | |
243 | ||
244 | /* | |
245 | * rtc_nanotime_init: | |
246 | * | |
247 | * Call back from the commpage initialization to | |
248 | * cause the commpage data to be filled in once the | |
249 | * commpages have been created. | |
250 | */ | |
251 | void | |
252 | rtc_nanotime_init_commpage(void) | |
253 | { | |
254 | spl_t s = splclock(); | |
255 | ||
256 | rtc_nanotime_set_commpage(&rtc_nanotime_info); | |
257 | ||
258 | splx(s); | |
259 | } | |
260 | ||
261 | /* | |
262 | * rtc_nanotime_update: | |
263 | * | |
264 | * Update the nanotime info from the base time. Since | |
265 | * the base value might be from a lower resolution clock, | |
266 | * we compare it to the TSC derived value, and use the | |
267 | * greater of the two values. | |
268 | * | |
269 | * N.B. In comparison to the above init routine, this assumes | |
270 | * that the TSC has remained monotonic compared to the tsc_base | |
271 | * value, which is not the case after S3 sleep. | |
272 | */ | |
273 | static inline void | |
274 | _rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t base) | |
275 | { | |
276 | uint64_t nsecs, tsc = rdtsc64(); | |
277 | ||
278 | nsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base); | |
279 | rtc_nanotime_store(tsc, MAX(nsecs, base), rntp->scale, rntp->shift, rntp); | |
280 | } | |
281 | ||
282 | static void | |
283 | rtc_nanotime_update( | |
284 | uint64_t base) | |
285 | { | |
286 | rtc_nanotime_t *rntp = &rtc_nanotime_info; | |
287 | ||
288 | assert(!ml_get_interrupts_enabled()); | |
289 | ||
290 | _rtc_nanotime_update(rntp, base); | |
291 | rtc_nanotime_set_commpage(rntp); | |
292 | } | |
293 | ||
294 | /* | |
295 | * rtc_nanotime_read: | |
296 | * | |
297 | * Returns the current nanotime value, accessable from any | |
298 | * context. | |
299 | */ | |
300 | static uint64_t | |
301 | rtc_nanotime_read(void) | |
302 | { | |
303 | rtc_nanotime_t rnt, *rntp = &rtc_nanotime_info; | |
304 | uint64_t result; | |
305 | ||
306 | do { | |
307 | rtc_nanotime_load(rntp, &rnt); | |
308 | result = rnt.ns_base + _tsc_to_nanoseconds(rdtsc64() - rnt.tsc_base); | |
309 | } while (rntp->tsc_base != rnt.tsc_base); | |
310 | ||
311 | return (result); | |
312 | } | |
313 | ||
314 | /* | |
315 | * rtc_clock_napped: | |
316 | * | |
317 | * Invoked from power manangement when we have awoken from a nap (C3/C4) | |
318 | * during which the TSC lost counts. The nanotime data is updated according | |
319 | * to the provided nanosecond base value. | |
320 | * | |
321 | * The caller must guarantee non-reentrancy. | |
322 | */ | |
323 | void | |
324 | rtc_clock_napped( | |
325 | uint64_t base) | |
326 | { | |
327 | rtc_nanotime_update(base); | |
328 | } | |
329 | ||
330 | void | |
331 | rtc_clock_stepping(__unused uint32_t new_frequency, | |
332 | __unused uint32_t old_frequency) | |
333 | { | |
334 | panic("rtc_clock_stepping unsupported"); | |
335 | } | |
336 | ||
337 | void | |
338 | rtc_clock_stepped(__unused uint32_t new_frequency, | |
339 | __unused uint32_t old_frequency) | |
340 | { | |
341 | panic("rtc_clock_stepping unsupported"); | |
342 | } | |
343 | ||
344 | /* | |
345 | * rtc_sleep_wakeup: | |
346 | * | |
347 | * Invoked from power manageent when we have awoken from a sleep (S3) | |
348 | * and the TSC has been reset. The nanotime data is updated based on | |
349 | * the HPET value. | |
350 | * | |
351 | * The caller must guarantee non-reentrancy. | |
352 | */ | |
353 | void | |
354 | rtc_sleep_wakeup(void) | |
355 | { | |
356 | boolean_t istate; | |
357 | ||
358 | istate = ml_set_interrupts_enabled(FALSE); | |
359 | ||
360 | /* | |
361 | * Reset nanotime. | |
362 | * The timestamp counter will have been reset | |
363 | * but nanotime (uptime) marches onward. | |
364 | */ | |
365 | rtc_nanotime_init(tmrCvt(rdHPET(), hpetCvtt2n)); | |
366 | ||
367 | /* Restart tick interrupts from the LAPIC timer */ | |
368 | rtc_lapic_start_ticking(); | |
369 | ||
370 | ml_set_interrupts_enabled(istate); | |
371 | } | |
372 | ||
373 | /* | |
374 | * Initialize the real-time clock device. | |
375 | * In addition, various variables used to support the clock are initialized. | |
376 | */ | |
377 | int | |
378 | rtclock_init(void) | |
379 | { | |
380 | uint64_t cycles; | |
381 | ||
382 | assert(!ml_get_interrupts_enabled()); | |
383 | ||
384 | if (cpu_number() == master_cpu) { | |
385 | ||
386 | assert(tscFreq); | |
387 | rtc_set_timescale(tscFreq); | |
388 | ||
389 | /* | |
390 | * Adjust and set the exported cpu speed. | |
391 | */ | |
392 | cycles = rtc_export_speed(tscFreq); | |
393 | ||
394 | /* | |
395 | * Set min/max to actual. | |
396 | * ACPI may update these later if speed-stepping is detected. | |
397 | */ | |
398 | gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles; | |
399 | gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles; | |
400 | ||
401 | /* | |
402 | * Compute the longest interval we can represent. | |
403 | */ | |
404 | maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n); | |
405 | kprintf("maxDec: %lld\n", maxDec); | |
406 | ||
407 | /* Minimum interval is 1usec */ | |
408 | rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL); | |
409 | /* Point LAPIC interrupts to hardclock() */ | |
410 | lapic_set_timer_func((i386_intr_func_t) rtclock_intr); | |
411 | ||
412 | clock_timebase_init(); | |
413 | ml_init_lock_timeout(); | |
414 | } | |
415 | ||
416 | rtc_lapic_start_ticking(); | |
417 | ||
418 | return (1); | |
419 | } | |
420 | ||
421 | // utility routine | |
422 | // Code to calculate how many processor cycles are in a second... | |
423 | ||
424 | static void | |
425 | rtc_set_timescale(uint64_t cycles) | |
426 | { | |
427 | rtc_nanotime_info.scale = ((uint64_t)NSEC_PER_SEC << 32) / cycles; | |
428 | rtc_nanotime_info.shift = 32; | |
429 | ||
430 | rtc_nanotime_init(0); | |
431 | } | |
432 | ||
433 | static uint64_t | |
434 | rtc_export_speed(uint64_t cyc_per_sec) | |
435 | { | |
436 | uint64_t cycles; | |
437 | ||
438 | /* Round: */ | |
439 | cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2)) | |
440 | / UI_CPUFREQ_ROUNDING_FACTOR) | |
441 | * UI_CPUFREQ_ROUNDING_FACTOR; | |
442 | ||
443 | /* | |
444 | * Set current measured speed. | |
445 | */ | |
446 | if (cycles >= 0x100000000ULL) { | |
447 | gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL; | |
448 | } else { | |
449 | gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles; | |
450 | } | |
451 | gPEClockFrequencyInfo.cpu_frequency_hz = cycles; | |
452 | ||
453 | kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec); | |
454 | return(cycles); | |
455 | } | |
456 | ||
457 | void | |
458 | clock_get_system_microtime( | |
459 | uint32_t *secs, | |
460 | uint32_t *microsecs) | |
461 | { | |
462 | uint64_t now = rtc_nanotime_read(); | |
463 | uint32_t remain; | |
464 | ||
465 | asm volatile( | |
466 | "divl %3" | |
467 | : "=a" (*secs), "=d" (remain) | |
468 | : "A" (now), "r" (NSEC_PER_SEC)); | |
469 | asm volatile( | |
470 | "divl %3" | |
471 | : "=a" (*microsecs) | |
472 | : "0" (remain), "d" (0), "r" (NSEC_PER_USEC)); | |
473 | } | |
474 | ||
475 | void | |
476 | clock_get_system_nanotime( | |
477 | uint32_t *secs, | |
478 | uint32_t *nanosecs) | |
479 | { | |
480 | uint64_t now = rtc_nanotime_read(); | |
481 | ||
482 | asm volatile( | |
483 | "divl %3" | |
484 | : "=a" (*secs), "=d" (*nanosecs) | |
485 | : "A" (now), "r" (NSEC_PER_SEC)); | |
486 | } | |
487 | ||
488 | void | |
489 | clock_gettimeofday_set_commpage( | |
490 | uint64_t abstime, | |
491 | uint64_t epoch, | |
492 | uint64_t offset, | |
493 | uint32_t *secs, | |
494 | uint32_t *microsecs) | |
495 | { | |
496 | uint64_t now = abstime; | |
497 | uint32_t remain; | |
498 | ||
499 | now += offset; | |
500 | ||
501 | asm volatile( | |
502 | "divl %3" | |
503 | : "=a" (*secs), "=d" (remain) | |
504 | : "A" (now), "r" (NSEC_PER_SEC)); | |
505 | asm volatile( | |
506 | "divl %3" | |
507 | : "=a" (*microsecs) | |
508 | : "0" (remain), "d" (0), "r" (NSEC_PER_USEC)); | |
509 | ||
510 | *secs += epoch; | |
511 | ||
512 | commpage_set_timestamp(abstime - remain, *secs, NSEC_PER_SEC); | |
513 | } | |
514 | ||
515 | void | |
516 | clock_timebase_info( | |
517 | mach_timebase_info_t info) | |
518 | { | |
519 | info->numer = info->denom = 1; | |
520 | } | |
521 | ||
522 | void | |
523 | clock_set_timer_func( | |
524 | clock_timer_func_t func) | |
525 | { | |
526 | if (rtclock_timer_expire == NULL) | |
527 | rtclock_timer_expire = func; | |
528 | } | |
529 | ||
530 | /* | |
531 | * Real-time clock device interrupt. | |
532 | */ | |
533 | void | |
534 | rtclock_intr( | |
535 | x86_saved_state_t *tregs) | |
536 | { | |
537 | uint64_t rip; | |
538 | boolean_t user_mode = FALSE; | |
539 | uint64_t abstime; | |
540 | uint32_t latency; | |
541 | cpu_data_t *pp = current_cpu_datap(); | |
542 | ||
543 | assert(get_preemption_level() > 0); | |
544 | assert(!ml_get_interrupts_enabled()); | |
545 | ||
546 | abstime = rtc_nanotime_read(); | |
547 | latency = (uint32_t) abstime - pp->rtcPop; | |
548 | ||
549 | if (is_saved_state64(tregs) == TRUE) { | |
550 | x86_saved_state64_t *regs; | |
551 | ||
552 | regs = saved_state64(tregs); | |
553 | ||
554 | user_mode = TRUE; | |
555 | rip = regs->isf.rip; | |
556 | } else { | |
557 | x86_saved_state32_t *regs; | |
558 | ||
559 | regs = saved_state32(tregs); | |
560 | ||
561 | if (regs->cs & 0x03) | |
562 | user_mode = TRUE; | |
563 | rip = regs->eip; | |
564 | } | |
565 | ||
566 | /* Log the interrupt service latency (-ve value expected by tool) */ | |
567 | KERNEL_DEBUG_CONSTANT( | |
568 | MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE, | |
569 | -latency, (uint32_t)rip, user_mode, 0, 0); | |
570 | ||
571 | /* call the generic etimer */ | |
572 | etimer_intr(user_mode, rip); | |
573 | } | |
574 | ||
575 | /* | |
576 | * Request timer pop from the hardware | |
577 | */ | |
578 | ||
579 | int | |
580 | setPop( | |
581 | uint64_t time) | |
582 | { | |
583 | uint64_t now; | |
584 | uint32_t decr; | |
585 | uint64_t count; | |
586 | ||
587 | now = rtc_nanotime_read(); /* The time in nanoseconds */ | |
588 | decr = deadline_to_decrementer(time, now); | |
589 | ||
590 | count = tmrCvt(decr, busFCvtn2t); | |
591 | lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count); | |
592 | ||
593 | return decr; /* Pass back what we set */ | |
594 | } | |
595 | ||
596 | ||
597 | void | |
598 | resetPop(void) | |
599 | { | |
600 | uint64_t now; | |
601 | uint32_t decr; | |
602 | uint64_t count; | |
603 | cpu_data_t *cdp = current_cpu_datap(); | |
604 | ||
605 | now = rtc_nanotime_read(); | |
606 | ||
607 | decr = deadline_to_decrementer(cdp->rtcPop, now); | |
608 | ||
609 | count = tmrCvt(decr, busFCvtn2t); | |
610 | lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t)count); | |
611 | } | |
612 | ||
613 | ||
614 | uint64_t | |
615 | mach_absolute_time(void) | |
616 | { | |
617 | return rtc_nanotime_read(); | |
618 | } | |
619 | ||
620 | void | |
621 | clock_interval_to_absolutetime_interval( | |
622 | uint32_t interval, | |
623 | uint32_t scale_factor, | |
624 | uint64_t *result) | |
625 | { | |
626 | *result = (uint64_t)interval * scale_factor; | |
627 | } | |
628 | ||
629 | void | |
630 | absolutetime_to_microtime( | |
631 | uint64_t abstime, | |
632 | uint32_t *secs, | |
633 | uint32_t *microsecs) | |
634 | { | |
635 | uint32_t remain; | |
636 | ||
637 | asm volatile( | |
638 | "divl %3" | |
639 | : "=a" (*secs), "=d" (remain) | |
640 | : "A" (abstime), "r" (NSEC_PER_SEC)); | |
641 | asm volatile( | |
642 | "divl %3" | |
643 | : "=a" (*microsecs) | |
644 | : "0" (remain), "d" (0), "r" (NSEC_PER_USEC)); | |
645 | } | |
646 | ||
647 | void | |
648 | absolutetime_to_nanotime( | |
649 | uint64_t abstime, | |
650 | uint32_t *secs, | |
651 | uint32_t *nanosecs) | |
652 | { | |
653 | asm volatile( | |
654 | "divl %3" | |
655 | : "=a" (*secs), "=d" (*nanosecs) | |
656 | : "A" (abstime), "r" (NSEC_PER_SEC)); | |
657 | } | |
658 | ||
659 | void | |
660 | nanotime_to_absolutetime( | |
661 | uint32_t secs, | |
662 | uint32_t nanosecs, | |
663 | uint64_t *result) | |
664 | { | |
665 | *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs; | |
666 | } | |
667 | ||
668 | void | |
669 | absolutetime_to_nanoseconds( | |
670 | uint64_t abstime, | |
671 | uint64_t *result) | |
672 | { | |
673 | *result = abstime; | |
674 | } | |
675 | ||
676 | void | |
677 | nanoseconds_to_absolutetime( | |
678 | uint64_t nanoseconds, | |
679 | uint64_t *result) | |
680 | { | |
681 | *result = nanoseconds; | |
682 | } | |
683 | ||
684 | void | |
685 | machine_delay_until( | |
686 | uint64_t deadline) | |
687 | { | |
688 | uint64_t now; | |
689 | ||
690 | do { | |
691 | cpu_pause(); | |
692 | now = mach_absolute_time(); | |
693 | } while (now < deadline); | |
694 | } |