2 * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * @APPLE_FREE_COPYRIGHT@
30 * Purpose: Routines for handling the machine dependent
34 #include <mach/mach_types.h>
36 #include <kern/clock.h>
37 #include <kern/thread.h>
38 #include <kern/macro_help.h>
41 #include <kern/host_notify.h>
43 #include <machine/commpage.h>
44 #include <machine/machine_routines.h>
45 #include <ppc/exception.h>
46 #include <ppc/proc_reg.h>
48 #include <ppc/rtclock.h>
50 #include <IOKit/IOPlatformExpert.h>
52 #include <sys/kdebug.h>
54 int sysclk_config(void);
56 int sysclk_init(void);
58 kern_return_t
sysclk_gettime(
59 mach_timespec_t
*cur_time
);
61 kern_return_t
sysclk_getattr(
62 clock_flavor_t flavor
,
64 mach_msg_type_number_t
*count
);
67 mach_timespec_t
*deadline
);
69 struct clock_ops sysclk_ops
= {
70 sysclk_config
, sysclk_init
,
76 int calend_config(void);
78 kern_return_t
calend_gettime(
79 mach_timespec_t
*cur_time
);
81 kern_return_t
calend_getattr(
82 clock_flavor_t flavor
,
84 mach_msg_type_number_t
*count
);
86 struct clock_ops calend_ops
= {
93 /* local data declarations */
95 static struct rtclock_calend
{
105 static uint32_t rtclock_boottime
;
107 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
109 if (((rfrac) += (frac)) >= (unit)) { \
116 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
118 if ((int32_t)((rfrac) -= (frac)) < 0) { \
125 #define NSEC_PER_HZ (NSEC_PER_SEC / 100)
126 static uint32_t rtclock_tick_interval
;
128 static uint32_t rtclock_sec_divisor
;
130 static mach_timebase_info_data_t rtclock_timebase_const
;
132 static boolean_t rtclock_timebase_initialized
;
134 static clock_timer_func_t rtclock_timer_expire
;
136 static timer_call_data_t rtclock_alarm_timer
;
138 static void nanotime_to_absolutetime(
143 static void rtclock_alarm_expire(
144 timer_call_param_t p0
,
145 timer_call_param_t p1
);
147 /* global data declarations */
149 decl_simple_lock_data(static,rtclock_lock
)
152 * Macros to lock/unlock real-time clock device.
154 #define LOCK_RTC(s) \
157 simple_lock(&rtclock_lock); \
160 #define UNLOCK_RTC(s) \
162 simple_unlock(&rtclock_lock); \
168 struct timebase_freq_t
*freq
)
170 uint32_t numer
, denom
;
174 if ( freq
->timebase_den
< 1 || freq
->timebase_den
> 4 ||
175 freq
->timebase_num
< freq
->timebase_den
)
176 panic("rtclock timebase_callback: invalid constant %d / %d",
177 freq
->timebase_num
, freq
->timebase_den
);
179 denom
= freq
->timebase_num
;
180 numer
= freq
->timebase_den
* NSEC_PER_SEC
;
183 if (!rtclock_timebase_initialized
) {
184 commpage_set_timestamp(0,0,0,0);
186 rtclock_timebase_const
.numer
= numer
;
187 rtclock_timebase_const
.denom
= denom
;
188 rtclock_sec_divisor
= freq
->timebase_num
/ freq
->timebase_den
;
190 nanoseconds_to_absolutetime(NSEC_PER_HZ
, &abstime
);
191 rtclock_tick_interval
= abstime
;
193 ml_init_lock_timeout();
197 printf("rtclock timebase_callback: late old %d / %d new %d / %d\n",
198 rtclock_timebase_const
.numer
, rtclock_timebase_const
.denom
,
204 clock_timebase_init();
208 * Configure the real-time clock device.
213 timer_call_setup(&rtclock_alarm_timer
, rtclock_alarm_expire
, NULL
);
215 simple_lock_init(&rtclock_lock
, 0);
217 PE_register_timebase_callback(timebase_callback
);
223 * Initialize the system clock device.
229 struct per_proc_info
*pp
;
233 abstime
= mach_absolute_time();
234 pp
->rtclock_tick_deadline
= abstime
+ rtclock_tick_interval
; /* Get the time we need to pop */
235 pp
->rtcPop
= pp
->rtclock_tick_deadline
; /* Set the rtc pop time the same for now */
237 (void)setTimerReq(); /* Start the timers going */
244 mach_timespec_t
*time
) /* OUT */
249 now
= mach_absolute_time();
251 time
->tv_sec
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
252 now
-= (t64
* divisor
);
253 time
->tv_nsec
= (now
* NSEC_PER_SEC
) / divisor
;
255 return (KERN_SUCCESS
);
259 clock_get_system_microtime(
266 now
= mach_absolute_time();
268 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
269 now
-= (t64
* divisor
);
270 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
274 clock_get_system_nanotime(
281 now
= mach_absolute_time();
283 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
284 now
-= (t64
* divisor
);
285 *nanosecs
= (now
* NSEC_PER_SEC
) / divisor
;
289 * Get clock device attributes.
293 clock_flavor_t flavor
,
294 clock_attr_t attr
, /* OUT */
295 mach_msg_type_number_t
*count
) /* IN/OUT */
300 return (KERN_FAILURE
);
304 case CLOCK_GET_TIME_RES
: /* >0 res */
305 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
306 case CLOCK_ALARM_MINRES
:
307 case CLOCK_ALARM_MAXRES
:
309 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
314 return (KERN_INVALID_VALUE
);
317 return (KERN_SUCCESS
);
321 * Set deadline for the next alarm on the clock device. This call
322 * always resets the time to deliver an alarm for the clock.
326 mach_timespec_t
*deadline
)
330 nanotime_to_absolutetime(deadline
->tv_sec
, deadline
->tv_nsec
, &abstime
);
331 timer_call_enter(&rtclock_alarm_timer
, abstime
);
335 * Configure the calendar clock.
344 * Get the current clock time.
348 mach_timespec_t
*time
) /* OUT */
350 clock_get_calendar_nanotime(
351 &time
->tv_sec
, &time
->tv_nsec
);
353 return (KERN_SUCCESS
);
357 * Get clock device attributes.
361 clock_flavor_t flavor
,
362 clock_attr_t attr
, /* OUT */
363 mach_msg_type_number_t
*count
) /* IN/OUT */
368 return (KERN_FAILURE
);
372 case CLOCK_GET_TIME_RES
: /* >0 res */
374 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
378 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
379 case CLOCK_ALARM_MINRES
:
380 case CLOCK_ALARM_MAXRES
:
381 *(clock_res_t
*) attr
= 0;
385 return (KERN_INVALID_VALUE
);
388 return (KERN_SUCCESS
);
392 clock_get_calendar_microtime(
396 uint32_t epoch
, microepoch
;
398 spl_t s
= splclock();
400 simple_lock(&rtclock_lock
);
402 if (rtclock_calend
.adjdelta
>= 0) {
405 now
= mach_absolute_time();
407 epoch
= rtclock_calend
.epoch
;
408 microepoch
= rtclock_calend
.microepoch
;
410 simple_unlock(&rtclock_lock
);
412 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
413 now
-= (t64
* divisor
);
414 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
416 TIME_ADD(*secs
, epoch
, *microsecs
, microepoch
, USEC_PER_SEC
);
421 delta
= -rtclock_calend
.adjdelta
;
423 now
= mach_absolute_time();
425 *secs
= rtclock_calend
.epoch
;
426 *microsecs
= rtclock_calend
.microepoch
;
428 if (now
> rtclock_calend
.epoch1
) {
429 t64
= now
- rtclock_calend
.epoch1
;
431 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
434 TIME_ADD(*secs
, 0, *microsecs
, (t32
- delta
), USEC_PER_SEC
);
437 simple_unlock(&rtclock_lock
);
443 /* This is only called from the gettimeofday() syscall. As a side
444 * effect, it updates the commpage timestamp. Otherwise it is
445 * identical to clock_get_calendar_microtime(). Because most
446 * gettimeofday() calls are handled by the commpage in user mode,
447 * this routine should be infrequently used except when slowing down
453 uint32_t *microsecs_p
)
455 uint32_t epoch
, microepoch
;
456 uint32_t secs
, microsecs
;
457 uint64_t now
, t64
, secs_64
, usec_64
;
458 spl_t s
= splclock();
460 simple_lock(&rtclock_lock
);
462 if (rtclock_calend
.adjdelta
>= 0) {
463 now
= mach_absolute_time();
465 epoch
= rtclock_calend
.epoch
;
466 microepoch
= rtclock_calend
.microepoch
;
468 secs
= secs_64
= now
/ rtclock_sec_divisor
;
469 t64
= now
- (secs_64
* rtclock_sec_divisor
);
470 microsecs
= usec_64
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
472 TIME_ADD(secs
, epoch
, microsecs
, microepoch
, USEC_PER_SEC
);
474 /* adjust "now" to be absolute time at _start_ of usecond */
475 now
-= t64
- ((usec_64
* rtclock_sec_divisor
) / USEC_PER_SEC
);
477 commpage_set_timestamp(now
,secs
,microsecs
,rtclock_sec_divisor
);
482 delta
= -rtclock_calend
.adjdelta
;
484 now
= mach_absolute_time();
486 secs
= rtclock_calend
.epoch
;
487 microsecs
= rtclock_calend
.microepoch
;
489 if (now
> rtclock_calend
.epoch1
) {
490 t64
= now
- rtclock_calend
.epoch1
;
492 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
495 TIME_ADD(secs
, 0, microsecs
, (t32
- delta
), USEC_PER_SEC
);
498 /* no need to disable timestamp, it is already off */
501 simple_unlock(&rtclock_lock
);
505 *microsecs_p
= microsecs
;
509 clock_get_calendar_nanotime(
513 uint32_t epoch
, nanoepoch
;
515 spl_t s
= splclock();
517 simple_lock(&rtclock_lock
);
519 if (rtclock_calend
.adjdelta
>= 0) {
522 now
= mach_absolute_time();
524 epoch
= rtclock_calend
.epoch
;
525 nanoepoch
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
527 simple_unlock(&rtclock_lock
);
529 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
530 now
-= (t64
* divisor
);
531 *nanosecs
= ((now
* USEC_PER_SEC
) / divisor
) * NSEC_PER_USEC
;
533 TIME_ADD(*secs
, epoch
, *nanosecs
, nanoepoch
, NSEC_PER_SEC
);
538 delta
= -rtclock_calend
.adjdelta
;
540 now
= mach_absolute_time();
542 *secs
= rtclock_calend
.epoch
;
543 *nanosecs
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
545 if (now
> rtclock_calend
.epoch1
) {
546 t64
= now
- rtclock_calend
.epoch1
;
548 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
551 TIME_ADD(*secs
, 0, *nanosecs
, ((t32
- delta
) * NSEC_PER_USEC
), NSEC_PER_SEC
);
554 simple_unlock(&rtclock_lock
);
561 clock_set_calendar_microtime(
565 uint32_t sys
, microsys
;
569 newsecs
= (microsecs
< 500*USEC_PER_SEC
)?
573 simple_lock(&rtclock_lock
);
575 commpage_set_timestamp(0,0,0,0);
578 * Cancel any adjustment in progress.
580 if (rtclock_calend
.adjdelta
< 0) {
584 delta
= -rtclock_calend
.adjdelta
;
586 sys
= rtclock_calend
.epoch
;
587 microsys
= rtclock_calend
.microepoch
;
589 now
= mach_absolute_time();
591 if (now
> rtclock_calend
.epoch1
)
592 t64
= now
- rtclock_calend
.epoch1
;
596 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
599 TIME_ADD(sys
, 0, microsys
, (t32
- delta
), USEC_PER_SEC
);
601 rtclock_calend
.epoch
= sys
;
602 rtclock_calend
.microepoch
= microsys
;
604 sys
= t64
= now
/ rtclock_sec_divisor
;
605 now
-= (t64
* rtclock_sec_divisor
);
606 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
608 TIME_SUB(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
611 rtclock_calend
.epoch1
= 0;
612 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
615 * Calculate the new calendar epoch based on
616 * the new value and the system clock.
618 clock_get_system_microtime(&sys
, µsys
);
619 TIME_SUB(secs
, sys
, microsecs
, microsys
, USEC_PER_SEC
);
622 * Adjust the boottime based on the delta.
624 rtclock_boottime
+= secs
- rtclock_calend
.epoch
;
627 * Set the new calendar epoch.
629 rtclock_calend
.epoch
= secs
;
630 rtclock_calend
.microepoch
= microsecs
;
632 simple_unlock(&rtclock_lock
);
635 * Set the new value for the platform clock.
637 PESetGMTTimeOfDay(newsecs
);
642 * Send host notifications.
644 host_notify_calendar_change();
647 #define tickadj (40) /* "standard" skew, us / tick */
648 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
651 clock_set_calendar_adjtime(
655 int64_t total
, ototal
;
656 uint32_t interval
= 0;
659 total
= (int64_t)*secs
* USEC_PER_SEC
+ *microsecs
;
662 commpage_set_timestamp(0,0,0,0);
664 ototal
= rtclock_calend
.adjtotal
;
666 if (rtclock_calend
.adjdelta
< 0) {
669 uint32_t sys
, microsys
;
671 delta
= -rtclock_calend
.adjdelta
;
673 sys
= rtclock_calend
.epoch
;
674 microsys
= rtclock_calend
.microepoch
;
676 now
= mach_absolute_time();
678 if (now
> rtclock_calend
.epoch1
)
679 t64
= now
- rtclock_calend
.epoch1
;
683 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
686 TIME_ADD(sys
, 0, microsys
, (t32
- delta
), USEC_PER_SEC
);
688 rtclock_calend
.epoch
= sys
;
689 rtclock_calend
.microepoch
= microsys
;
691 sys
= t64
= now
/ rtclock_sec_divisor
;
692 now
-= (t64
* rtclock_sec_divisor
);
693 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
695 TIME_SUB(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
699 int32_t delta
= tickadj
;
707 rtclock_calend
.epoch1
= 0;
711 uint32_t sys
, microsys
;
719 rtclock_calend
.epoch1
= now
= mach_absolute_time();
721 sys
= t64
= now
/ rtclock_sec_divisor
;
722 now
-= (t64
* rtclock_sec_divisor
);
723 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
725 TIME_ADD(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
728 rtclock_calend
.adjtotal
= total
;
729 rtclock_calend
.adjdelta
= delta
;
731 interval
= rtclock_tick_interval
;
734 rtclock_calend
.epoch1
= 0;
735 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
741 *secs
= *microsecs
= 0;
743 *secs
= ototal
/ USEC_PER_SEC
;
744 *microsecs
= ototal
% USEC_PER_SEC
;
751 clock_adjust_calendar(void)
753 uint32_t interval
= 0;
758 commpage_set_timestamp(0,0,0,0);
760 delta
= rtclock_calend
.adjdelta
;
763 TIME_ADD(rtclock_calend
.epoch
, 0, rtclock_calend
.microepoch
, delta
, USEC_PER_SEC
);
765 rtclock_calend
.adjtotal
-= delta
;
766 if (delta
> rtclock_calend
.adjtotal
)
767 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
774 now
= mach_absolute_time();
776 if (now
> rtclock_calend
.epoch1
)
777 t64
= now
- rtclock_calend
.epoch1
;
781 rtclock_calend
.epoch1
= now
;
783 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
785 TIME_ADD(rtclock_calend
.epoch
, 0, rtclock_calend
.microepoch
, (t32
+ delta
), USEC_PER_SEC
);
787 rtclock_calend
.adjtotal
-= delta
;
788 if (delta
< rtclock_calend
.adjtotal
)
789 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
791 if (rtclock_calend
.adjdelta
== 0) {
792 uint32_t sys
, microsys
;
794 sys
= t64
= now
/ rtclock_sec_divisor
;
795 now
-= (t64
* rtclock_sec_divisor
);
796 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
798 TIME_SUB(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
800 rtclock_calend
.epoch1
= 0;
804 if (rtclock_calend
.adjdelta
!= 0)
805 interval
= rtclock_tick_interval
;
813 * clock_initialize_calendar:
815 * Set the calendar and related clocks
816 * from the platform clock at boot or
820 clock_initialize_calendar(void)
822 uint32_t sys
, microsys
;
823 uint32_t microsecs
= 0, secs
= PEGetGMTTimeOfDay();
827 commpage_set_timestamp(0,0,0,0);
829 if ((int32_t)secs
>= (int32_t)rtclock_boottime
) {
831 * Initialize the boot time based on the platform clock.
833 if (rtclock_boottime
== 0)
834 rtclock_boottime
= secs
;
837 * Calculate the new calendar epoch based
838 * on the platform clock and the system
841 clock_get_system_microtime(&sys
, µsys
);
842 TIME_SUB(secs
, sys
, microsecs
, microsys
, USEC_PER_SEC
);
845 * Set the new calendar epoch.
847 rtclock_calend
.epoch
= secs
;
848 rtclock_calend
.microepoch
= microsecs
;
851 * Cancel any adjustment in progress.
853 rtclock_calend
.epoch1
= 0;
854 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
860 * Send host notifications.
862 host_notify_calendar_change();
866 clock_get_boottime_nanotime(
870 *secs
= rtclock_boottime
;
876 mach_timebase_info_t info
)
881 rtclock_timebase_initialized
= TRUE
;
882 *info
= rtclock_timebase_const
;
887 clock_set_timer_deadline(
892 rtclock_timer_t
*mytimer
;
893 struct per_proc_info
*pp
;
898 mytimer
= &pp
->rtclock_timer
;
899 mytimer
->deadline
= deadline
;
901 if (!mytimer
->has_expired
&& (deadline
< pp
->rtclock_tick_deadline
)) { /* Has the timer already expired or is less that set? */
902 pp
->rtcPop
= deadline
; /* Yes, set the new rtc pop time */
903 decr
= setTimerReq(); /* Start the timers going */
905 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
906 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
913 clock_set_timer_func(
914 clock_timer_func_t func
)
919 if (rtclock_timer_expire
== NULL
)
920 rtclock_timer_expire
= func
;
925 * Real-time clock device interrupt.
928 rtclock_intr(struct savearea
*ssp
) {
932 rtclock_timer_t
*mytimer
;
933 struct per_proc_info
*pp
;
936 mytimer
= &pp
->rtclock_timer
;
938 abstime
= mach_absolute_time();
939 if (pp
->rtclock_tick_deadline
<= abstime
) { /* Have we passed the pop time? */
940 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
941 &pp
->rtclock_tick_deadline
);
942 hertz_tick(USER_MODE(ssp
->save_srr1
), ssp
->save_srr0
);
943 abstime
= mach_absolute_time(); /* Refresh the current time since we went away */
946 if (mytimer
->deadline
<= abstime
) { /* Have we expired the deadline? */
947 mytimer
->has_expired
= TRUE
; /* Remember that we popped */
948 mytimer
->deadline
= EndOfAllTime
; /* Set timer request to the end of all time in case we have no more events */
949 (*rtclock_timer_expire
)(abstime
); /* Process pop */
950 mytimer
->has_expired
= FALSE
;
953 pp
->rtcPop
= (pp
->rtclock_tick_deadline
< mytimer
->deadline
) ? /* Get shortest pop */
954 pp
->rtclock_tick_deadline
: /* It was the periodic timer */
955 mytimer
->deadline
; /* Actually, an event request */
957 decr
= setTimerReq(); /* Request the timer pop */
959 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
960 | DBG_FUNC_NONE
, decr
, 3, 0, 0, 0);
964 * Request an interruption at a specific time
966 * Sets the decrementer to pop at the right time based on the timebase.
967 * The value is chosen by comparing the rtc request with the power management.
968 * request. We may add other values at a future time.
972 int setTimerReq(void) {
974 struct per_proc_info
*pp
;
978 pp
= getPerProc(); /* Get per_proc */
980 nexttime
= pp
->rtcPop
; /* Assume main timer */
982 decr
= setPop((pp
->pms
.pmsPop
< nexttime
) ? pp
->pms
.pmsPop
: nexttime
); /* Schedule timer pop */
984 return decr
; /* Pass back what we actually set */
988 rtclock_alarm_expire(
992 mach_timespec_t timestamp
;
994 (void) sysclk_gettime(×tamp
);
996 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
1000 nanotime_to_absolutetime(
1005 uint32_t divisor
= rtclock_sec_divisor
;
1007 *result
= ((uint64_t)secs
* divisor
) +
1008 ((uint64_t)nanosecs
* divisor
) / NSEC_PER_SEC
;
1012 absolutetime_to_microtime(
1015 uint32_t *microsecs
)
1020 *secs
= t64
= abstime
/ (divisor
= rtclock_sec_divisor
);
1021 abstime
-= (t64
* divisor
);
1022 *microsecs
= (abstime
* USEC_PER_SEC
) / divisor
;
1026 clock_interval_to_deadline(
1028 uint32_t scale_factor
,
1033 clock_get_uptime(result
);
1035 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1041 clock_interval_to_absolutetime_interval(
1043 uint32_t scale_factor
,
1046 uint64_t nanosecs
= (uint64_t)interval
* scale_factor
;
1050 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1051 (divisor
= rtclock_sec_divisor
);
1052 nanosecs
-= (t64
* NSEC_PER_SEC
);
1053 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1057 clock_absolutetime_interval_to_deadline(
1061 clock_get_uptime(result
);
1067 absolutetime_to_nanoseconds(
1074 *result
= (t64
= abstime
/ (divisor
= rtclock_sec_divisor
)) * NSEC_PER_SEC
;
1075 abstime
-= (t64
* divisor
);
1076 *result
+= (abstime
* NSEC_PER_SEC
) / divisor
;
1080 nanoseconds_to_absolutetime(
1087 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1088 (divisor
= rtclock_sec_divisor
);
1089 nanosecs
-= (t64
* NSEC_PER_SEC
);
1090 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1094 machine_delay_until(
1100 now
= mach_absolute_time();
1101 } while (now
< deadline
);