2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * @APPLE_FREE_COPYRIGHT@
33 * Purpose: Routines for handling the machine dependent
37 #include <mach/mach_types.h>
39 #include <kern/clock.h>
40 #include <kern/thread.h>
41 #include <kern/macro_help.h>
44 #include <kern/host_notify.h>
46 #include <machine/mach_param.h> /* HZ */
47 #include <machine/commpage.h>
48 #include <machine/machine_routines.h>
49 #include <ppc/proc_reg.h>
51 #include <pexpert/pexpert.h>
53 #include <sys/kdebug.h>
55 int sysclk_config(void);
57 int sysclk_init(void);
59 kern_return_t
sysclk_gettime(
60 mach_timespec_t
*cur_time
);
62 kern_return_t
sysclk_getattr(
63 clock_flavor_t flavor
,
65 mach_msg_type_number_t
*count
);
68 mach_timespec_t
*deadline
);
70 struct clock_ops sysclk_ops
= {
71 sysclk_config
, sysclk_init
,
77 int calend_config(void);
79 int calend_init(void);
81 kern_return_t
calend_gettime(
82 mach_timespec_t
*cur_time
);
84 kern_return_t
calend_getattr(
85 clock_flavor_t flavor
,
87 mach_msg_type_number_t
*count
);
89 struct clock_ops calend_ops
= {
90 calend_config
, calend_init
,
96 /* local data declarations */
98 static struct rtclock_calend
{
108 static boolean_t rtclock_initialized
;
110 static uint64_t rtclock_tick_deadline
[NCPUS
];
112 #define NSEC_PER_HZ (NSEC_PER_SEC / HZ)
113 static uint32_t rtclock_tick_interval
;
115 static uint32_t rtclock_sec_divisor
;
117 static mach_timebase_info_data_t rtclock_timebase_const
;
119 static boolean_t rtclock_timebase_initialized
;
121 static struct rtclock_timer
{
124 /*boolean_t*/ is_set
:1,
127 } rtclock_timer
[NCPUS
];
129 static clock_timer_func_t rtclock_timer_expire
;
131 static timer_call_data_t rtclock_alarm_timer
;
133 static void timespec_to_absolutetime(
137 static int deadline_to_decrementer(
141 static void rtclock_alarm_expire(
142 timer_call_param_t p0
,
143 timer_call_param_t p1
);
145 /* global data declarations */
147 #define DECREMENTER_MAX 0x7FFFFFFFUL
148 #define DECREMENTER_MIN 0xAUL
150 natural_t rtclock_decrementer_min
;
152 decl_simple_lock_data(static,rtclock_lock
)
155 * Macros to lock/unlock real-time clock device.
157 #define LOCK_RTC(s) \
160 simple_lock(&rtclock_lock); \
163 #define UNLOCK_RTC(s) \
165 simple_unlock(&rtclock_lock); \
171 struct timebase_freq_t
*freq
)
173 uint32_t numer
, denom
;
177 if ( freq
->timebase_den
< 1 || freq
->timebase_den
> 4 ||
178 freq
->timebase_num
< freq
->timebase_den
)
179 panic("rtclock timebase_callback: invalid constant %d / %d",
180 freq
->timebase_num
, freq
->timebase_den
);
182 denom
= freq
->timebase_num
;
183 numer
= freq
->timebase_den
* NSEC_PER_SEC
;
186 if (!rtclock_timebase_initialized
) {
187 commpage_set_timestamp(0,0,0,0);
189 rtclock_timebase_const
.numer
= numer
;
190 rtclock_timebase_const
.denom
= denom
;
191 rtclock_sec_divisor
= freq
->timebase_num
/ freq
->timebase_den
;
193 nanoseconds_to_absolutetime(NSEC_PER_HZ
, &abstime
);
194 rtclock_tick_interval
= abstime
;
196 ml_init_lock_timeout();
200 printf("rtclock timebase_callback: late old %d / %d new %d / %d",
201 rtclock_timebase_const
.numer
, rtclock_timebase_const
.denom
,
207 clock_timebase_init();
211 * Configure the real-time clock device.
216 if (cpu_number() != master_cpu
)
219 timer_call_setup(&rtclock_alarm_timer
, rtclock_alarm_expire
, NULL
);
221 simple_lock_init(&rtclock_lock
, ETAP_MISC_RT_CLOCK
);
223 PE_register_timebase_callback(timebase_callback
);
229 * Initialize the system clock device.
235 int decr
, mycpu
= cpu_number();
237 if (mycpu
!= master_cpu
) {
238 if (rtclock_initialized
== FALSE
) {
239 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu
);
241 /* Set decrementer and hence our next tick due */
242 abstime
= mach_absolute_time();
243 rtclock_tick_deadline
[mycpu
] = abstime
;
244 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
245 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
251 /* Set decrementer and our next tick due */
252 abstime
= mach_absolute_time();
253 rtclock_tick_deadline
[mycpu
] = abstime
;
254 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
255 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
258 rtclock_initialized
= TRUE
;
265 mach_timespec_t
*time
) /* OUT */
270 now
= mach_absolute_time();
272 time
->tv_sec
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
273 now
-= (t64
* divisor
);
274 time
->tv_nsec
= (now
* NSEC_PER_SEC
) / divisor
;
276 return (KERN_SUCCESS
);
280 clock_get_system_microtime(
287 now
= mach_absolute_time();
289 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
290 now
-= (t64
* divisor
);
291 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
295 clock_get_system_nanotime(
302 now
= mach_absolute_time();
304 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
305 now
-= (t64
* divisor
);
306 *nanosecs
= (now
* NSEC_PER_SEC
) / divisor
;
310 * Get clock device attributes.
314 clock_flavor_t flavor
,
315 clock_attr_t attr
, /* OUT */
316 mach_msg_type_number_t
*count
) /* IN/OUT */
321 return (KERN_FAILURE
);
325 case CLOCK_GET_TIME_RES
: /* >0 res */
326 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
327 case CLOCK_ALARM_MINRES
:
328 case CLOCK_ALARM_MAXRES
:
330 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
335 return (KERN_INVALID_VALUE
);
338 return (KERN_SUCCESS
);
342 * Set deadline for the next alarm on the clock device. This call
343 * always resets the time to deliver an alarm for the clock.
347 mach_timespec_t
*deadline
)
351 timespec_to_absolutetime(deadline
, &abstime
);
352 timer_call_enter(&rtclock_alarm_timer
, abstime
);
356 * Configure the calendar clock.
365 * Initialize the calendar clock.
370 if (cpu_number() != master_cpu
)
377 * Get the current clock time.
381 mach_timespec_t
*time
) /* OUT */
383 clock_get_calendar_nanotime(
384 &time
->tv_sec
, &time
->tv_nsec
);
386 return (KERN_SUCCESS
);
390 * Get clock device attributes.
394 clock_flavor_t flavor
,
395 clock_attr_t attr
, /* OUT */
396 mach_msg_type_number_t
*count
) /* IN/OUT */
401 return (KERN_FAILURE
);
405 case CLOCK_GET_TIME_RES
: /* >0 res */
407 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
411 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
412 case CLOCK_ALARM_MINRES
:
413 case CLOCK_ALARM_MAXRES
:
414 *(clock_res_t
*) attr
= 0;
418 return (KERN_INVALID_VALUE
);
421 return (KERN_SUCCESS
);
425 clock_get_calendar_microtime(
429 uint32_t epoch
, microepoch
;
431 spl_t s
= splclock();
433 simple_lock(&rtclock_lock
);
435 if (rtclock_calend
.adjdelta
>= 0) {
438 now
= mach_absolute_time();
440 epoch
= rtclock_calend
.epoch
;
441 microepoch
= rtclock_calend
.microepoch
;
443 simple_unlock(&rtclock_lock
);
445 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
446 now
-= (t64
* divisor
);
447 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
449 if ((*microsecs
+= microepoch
) >= USEC_PER_SEC
) {
450 *microsecs
-= USEC_PER_SEC
;
459 delta
= -rtclock_calend
.adjdelta
;
461 t64
= mach_absolute_time() - rtclock_calend
.epoch1
;
463 *secs
= rtclock_calend
.epoch
;
464 *microsecs
= rtclock_calend
.microepoch
;
466 simple_unlock(&rtclock_lock
);
468 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
471 *microsecs
+= (t32
- delta
);
473 if (*microsecs
>= USEC_PER_SEC
) {
474 *microsecs
-= USEC_PER_SEC
;
482 /* This is only called from the gettimeofday() syscall. As a side
483 * effect, it updates the commpage timestamp. Otherwise it is
484 * identical to clock_get_calendar_microtime(). Because most
485 * gettimeofday() calls are handled by the commpage in user mode,
486 * this routine should be infrequently used except when slowing down
492 uint32_t *microsecs_p
)
494 uint32_t epoch
, microepoch
;
495 uint32_t secs
, microsecs
;
496 uint64_t now
, t64
, secs_64
, usec_64
;
497 spl_t s
= splclock();
499 simple_lock(&rtclock_lock
);
501 if (rtclock_calend
.adjdelta
>= 0) {
502 now
= mach_absolute_time();
504 epoch
= rtclock_calend
.epoch
;
505 microepoch
= rtclock_calend
.microepoch
;
507 secs
= secs_64
= now
/ rtclock_sec_divisor
;
508 t64
= now
- (secs_64
* rtclock_sec_divisor
);
509 microsecs
= usec_64
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
511 if ((microsecs
+= microepoch
) >= USEC_PER_SEC
) {
512 microsecs
-= USEC_PER_SEC
;
518 /* adjust "now" to be absolute time at _start_ of usecond */
519 now
-= t64
- ((usec_64
* rtclock_sec_divisor
) / USEC_PER_SEC
);
521 commpage_set_timestamp(now
,secs
,microsecs
,rtclock_sec_divisor
);
526 delta
= -rtclock_calend
.adjdelta
;
528 now
= mach_absolute_time() - rtclock_calend
.epoch1
;
530 secs
= rtclock_calend
.epoch
;
531 microsecs
= rtclock_calend
.microepoch
;
533 t32
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
536 microsecs
+= (t32
- delta
);
538 if (microsecs
>= USEC_PER_SEC
) {
539 microsecs
-= USEC_PER_SEC
;
542 /* no need to disable timestamp, it is already off */
545 simple_unlock(&rtclock_lock
);
549 *microsecs_p
= microsecs
;
553 clock_get_calendar_nanotime(
557 uint32_t epoch
, nanoepoch
;
559 spl_t s
= splclock();
561 simple_lock(&rtclock_lock
);
563 if (rtclock_calend
.adjdelta
>= 0) {
566 now
= mach_absolute_time();
568 epoch
= rtclock_calend
.epoch
;
569 nanoepoch
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
571 simple_unlock(&rtclock_lock
);
573 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
574 now
-= (t64
* divisor
);
575 *nanosecs
= ((now
* USEC_PER_SEC
) / divisor
) * NSEC_PER_USEC
;
577 if ((*nanosecs
+= nanoepoch
) >= NSEC_PER_SEC
) {
578 *nanosecs
-= NSEC_PER_SEC
;
587 delta
= -rtclock_calend
.adjdelta
;
589 t64
= mach_absolute_time() - rtclock_calend
.epoch1
;
591 *secs
= rtclock_calend
.epoch
;
592 *nanosecs
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
594 simple_unlock(&rtclock_lock
);
596 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
599 *nanosecs
+= ((t32
- delta
) * NSEC_PER_USEC
);
601 if (*nanosecs
>= NSEC_PER_SEC
) {
602 *nanosecs
-= NSEC_PER_SEC
;
611 clock_set_calendar_microtime(
615 uint32_t sys
, microsys
;
619 newsecs
= (microsecs
< 500*USEC_PER_SEC
)?
623 commpage_set_timestamp(0,0,0,0);
625 clock_get_system_microtime(&sys
, µsys
);
626 if ((int32_t)(microsecs
-= microsys
) < 0) {
627 microsecs
+= USEC_PER_SEC
;
633 rtclock_calend
.epoch
= secs
;
634 rtclock_calend
.microepoch
= microsecs
;
635 rtclock_calend
.epoch1
= 0;
636 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
639 PESetGMTTimeOfDay(newsecs
);
641 host_notify_calendar_change();
644 #define tickadj (40) /* "standard" skew, us / tick */
645 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
648 clock_set_calendar_adjtime(
652 int64_t total
, ototal
;
653 uint32_t interval
= 0;
656 total
= (int64_t)*secs
* USEC_PER_SEC
+ *microsecs
;
659 commpage_set_timestamp(0,0,0,0);
661 ototal
= rtclock_calend
.adjtotal
;
663 if (rtclock_calend
.adjdelta
< 0) {
666 uint32_t sys
, microsys
;
668 delta
= -rtclock_calend
.adjdelta
;
670 sys
= rtclock_calend
.epoch
;
671 microsys
= rtclock_calend
.microepoch
;
673 now
= mach_absolute_time();
675 t64
= now
- rtclock_calend
.epoch1
;
676 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
679 microsys
+= (t32
- delta
);
681 if (microsys
>= USEC_PER_SEC
) {
682 microsys
-= USEC_PER_SEC
;
686 rtclock_calend
.epoch
= sys
;
687 rtclock_calend
.microepoch
= microsys
;
689 sys
= t64
= now
/ rtclock_sec_divisor
;
690 now
-= (t64
* rtclock_sec_divisor
);
691 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
693 if ((int32_t)(rtclock_calend
.microepoch
-= microsys
) < 0) {
694 rtclock_calend
.microepoch
+= USEC_PER_SEC
;
698 rtclock_calend
.epoch
-= sys
;
702 int32_t delta
= tickadj
;
710 rtclock_calend
.epoch1
= 0;
714 uint32_t sys
, microsys
;
722 rtclock_calend
.epoch1
= now
= mach_absolute_time();
724 sys
= t64
= now
/ rtclock_sec_divisor
;
725 now
-= (t64
* rtclock_sec_divisor
);
726 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
728 if ((rtclock_calend
.microepoch
+= microsys
) >= USEC_PER_SEC
) {
729 rtclock_calend
.microepoch
-= USEC_PER_SEC
;
733 rtclock_calend
.epoch
+= sys
;
736 rtclock_calend
.adjtotal
= total
;
737 rtclock_calend
.adjdelta
= delta
;
739 interval
= rtclock_tick_interval
;
742 rtclock_calend
.epoch1
= 0;
743 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
749 *secs
= *microsecs
= 0;
751 *secs
= ototal
/ USEC_PER_SEC
;
752 *microsecs
= ototal
% USEC_PER_SEC
;
759 clock_adjust_calendar(void)
761 uint32_t micronew
, interval
= 0;
766 commpage_set_timestamp(0,0,0,0);
768 delta
= rtclock_calend
.adjdelta
;
771 micronew
= rtclock_calend
.microepoch
+ delta
;
772 if (micronew
>= USEC_PER_SEC
) {
773 micronew
-= USEC_PER_SEC
;
774 rtclock_calend
.epoch
+= 1;
777 rtclock_calend
.microepoch
= micronew
;
779 rtclock_calend
.adjtotal
-= delta
;
780 if (delta
> rtclock_calend
.adjtotal
)
781 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
788 now
= mach_absolute_time();
790 t64
= now
- rtclock_calend
.epoch1
;
792 rtclock_calend
.epoch1
= now
;
794 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
796 micronew
= rtclock_calend
.microepoch
+ t32
+ delta
;
797 if (micronew
>= USEC_PER_SEC
) {
798 micronew
-= USEC_PER_SEC
;
799 rtclock_calend
.epoch
+= 1;
802 rtclock_calend
.microepoch
= micronew
;
804 rtclock_calend
.adjtotal
-= delta
;
805 if (delta
< rtclock_calend
.adjtotal
)
806 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
808 if (rtclock_calend
.adjdelta
== 0) {
809 uint32_t sys
, microsys
;
811 sys
= t64
= now
/ rtclock_sec_divisor
;
812 now
-= (t64
* rtclock_sec_divisor
);
813 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
815 if ((int32_t)(rtclock_calend
.microepoch
-= microsys
) < 0) {
816 rtclock_calend
.microepoch
+= USEC_PER_SEC
;
820 rtclock_calend
.epoch
-= sys
;
822 rtclock_calend
.epoch1
= 0;
826 if (rtclock_calend
.adjdelta
!= 0)
827 interval
= rtclock_tick_interval
;
835 clock_initialize_calendar(void)
837 uint32_t sys
, microsys
;
838 uint32_t microsecs
= 0, secs
= PEGetGMTTimeOfDay();
842 commpage_set_timestamp(0,0,0,0);
844 clock_get_system_microtime(&sys
, µsys
);
845 if ((int32_t)(microsecs
-= microsys
) < 0) {
846 microsecs
+= USEC_PER_SEC
;
852 rtclock_calend
.epoch
= secs
;
853 rtclock_calend
.microepoch
= microsecs
;
854 rtclock_calend
.epoch1
= 0;
855 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
858 host_notify_calendar_change();
863 mach_timebase_info_t info
)
868 rtclock_timebase_initialized
= TRUE
;
869 *info
= rtclock_timebase_const
;
874 clock_set_timer_deadline(
879 struct rtclock_timer
*mytimer
;
883 mycpu
= cpu_number();
884 mytimer
= &rtclock_timer
[mycpu
];
885 mytimer
->deadline
= deadline
;
886 mytimer
->is_set
= TRUE
;
887 if (!mytimer
->has_expired
) {
888 abstime
= mach_absolute_time();
889 if ( mytimer
->deadline
< rtclock_tick_deadline
[mycpu
] ) {
890 decr
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
891 if ( rtclock_decrementer_min
!= 0 &&
892 rtclock_decrementer_min
< (natural_t
)decr
)
893 decr
= rtclock_decrementer_min
;
897 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
898 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
905 clock_set_timer_func(
906 clock_timer_func_t func
)
911 if (rtclock_timer_expire
== NULL
)
912 rtclock_timer_expire
= func
;
917 * Reset the clock device. This causes the realtime clock
918 * device to reload its mode and count value (frequency).
927 * Real-time clock device interrupt.
932 struct savearea
*ssp
,
936 int decr1
, decr2
, mycpu
= cpu_number();
937 struct rtclock_timer
*mytimer
= &rtclock_timer
[mycpu
];
940 * We may receive interrupts too early, we must reject them.
942 if (rtclock_initialized
== FALSE
) {
943 mtdec(DECREMENTER_MAX
); /* Max the decrementer if not init */
947 decr1
= decr2
= DECREMENTER_MAX
;
949 abstime
= mach_absolute_time();
950 if ( rtclock_tick_deadline
[mycpu
] <= abstime
) {
951 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
952 &rtclock_tick_deadline
[mycpu
]);
953 hertz_tick(USER_MODE(ssp
->save_srr1
), ssp
->save_srr0
);
956 abstime
= mach_absolute_time();
957 if ( mytimer
->is_set
&&
958 mytimer
->deadline
<= abstime
) {
959 mytimer
->has_expired
= TRUE
; mytimer
->is_set
= FALSE
;
960 (*rtclock_timer_expire
)(abstime
);
961 mytimer
->has_expired
= FALSE
;
964 abstime
= mach_absolute_time();
965 decr1
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
968 decr2
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
973 if ( rtclock_decrementer_min
!= 0 &&
974 rtclock_decrementer_min
< (natural_t
)decr1
)
975 decr1
= rtclock_decrementer_min
;
979 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
980 | DBG_FUNC_NONE
, decr1
, 3, 0, 0, 0);
984 rtclock_alarm_expire(
985 timer_call_param_t p0
,
986 timer_call_param_t p1
)
988 mach_timespec_t timestamp
;
990 (void) sysclk_gettime(×tamp
);
992 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
996 deadline_to_decrementer(
1002 if (deadline
<= now
)
1003 return DECREMENTER_MIN
;
1005 delt
= deadline
- now
;
1006 return (delt
>= (DECREMENTER_MAX
+ 1))? DECREMENTER_MAX
:
1007 ((delt
>= (DECREMENTER_MIN
+ 1))? (delt
- 1): DECREMENTER_MIN
);
1012 timespec_to_absolutetime(
1013 mach_timespec_t
*ts
,
1018 *result
= ((uint64_t)ts
->tv_sec
* (divisor
= rtclock_sec_divisor
)) +
1019 ((uint64_t)ts
->tv_nsec
* divisor
) / NSEC_PER_SEC
;
1023 clock_interval_to_deadline(
1025 uint32_t scale_factor
,
1030 clock_get_uptime(result
);
1032 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1038 clock_interval_to_absolutetime_interval(
1040 uint32_t scale_factor
,
1043 uint64_t nanosecs
= (uint64_t)interval
* scale_factor
;
1047 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1048 (divisor
= rtclock_sec_divisor
);
1049 nanosecs
-= (t64
* NSEC_PER_SEC
);
1050 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1054 clock_absolutetime_interval_to_deadline(
1058 clock_get_uptime(result
);
1064 absolutetime_to_nanoseconds(
1071 *result
= (t64
= abstime
/ (divisor
= rtclock_sec_divisor
)) * NSEC_PER_SEC
;
1072 abstime
-= (t64
* divisor
);
1073 *result
+= (abstime
* NSEC_PER_SEC
) / divisor
;
1077 nanoseconds_to_absolutetime(
1084 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1085 (divisor
= rtclock_sec_divisor
);
1086 nanosecs
-= (t64
* NSEC_PER_SEC
);
1087 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1091 * Spin-loop delay primitives.
1096 uint32_t scale_factor
)
1100 clock_interval_to_deadline(interval
, scale_factor
, &end
);
1103 now
= mach_absolute_time();
1104 } while (now
< end
);
1114 now
= mach_absolute_time();
1115 } while (now
< deadline
);
1122 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);