2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * @APPLE_FREE_COPYRIGHT@
33 * Purpose: Routines for handling the machine dependent
37 #include <mach/mach_types.h>
39 #include <kern/clock.h>
40 #include <kern/thread.h>
41 #include <kern/macro_help.h>
44 #include <kern/host_notify.h>
46 #include <machine/mach_param.h> /* HZ */
47 #include <machine/commpage.h>
48 #include <ppc/proc_reg.h>
50 #include <pexpert/pexpert.h>
52 #include <sys/kdebug.h>
54 int sysclk_config(void);
56 int sysclk_init(void);
58 kern_return_t
sysclk_gettime(
59 mach_timespec_t
*cur_time
);
61 kern_return_t
sysclk_getattr(
62 clock_flavor_t flavor
,
64 mach_msg_type_number_t
*count
);
67 mach_timespec_t
*deadline
);
69 struct clock_ops sysclk_ops
= {
70 sysclk_config
, sysclk_init
,
76 int calend_config(void);
78 int calend_init(void);
80 kern_return_t
calend_gettime(
81 mach_timespec_t
*cur_time
);
83 kern_return_t
calend_getattr(
84 clock_flavor_t flavor
,
86 mach_msg_type_number_t
*count
);
88 struct clock_ops calend_ops
= {
89 calend_config
, calend_init
,
95 /* local data declarations */
97 static struct rtclock_calend
{
107 static boolean_t rtclock_initialized
;
109 static uint64_t rtclock_tick_deadline
[NCPUS
];
111 #define NSEC_PER_HZ (NSEC_PER_SEC / HZ)
112 static uint32_t rtclock_tick_interval
;
114 static uint32_t rtclock_sec_divisor
;
116 static mach_timebase_info_data_t rtclock_timebase_const
;
118 static boolean_t rtclock_timebase_initialized
;
120 static struct rtclock_timer
{
123 /*boolean_t*/ is_set
:1,
126 } rtclock_timer
[NCPUS
];
128 static clock_timer_func_t rtclock_timer_expire
;
130 static timer_call_data_t rtclock_alarm_timer
;
132 static void timespec_to_absolutetime(
136 static int deadline_to_decrementer(
140 static void rtclock_alarm_expire(
141 timer_call_param_t p0
,
142 timer_call_param_t p1
);
144 /* global data declarations */
146 #define DECREMENTER_MAX 0x7FFFFFFFUL
147 #define DECREMENTER_MIN 0xAUL
149 natural_t rtclock_decrementer_min
;
151 decl_simple_lock_data(static,rtclock_lock
)
154 * Macros to lock/unlock real-time clock device.
156 #define LOCK_RTC(s) \
159 simple_lock(&rtclock_lock); \
162 #define UNLOCK_RTC(s) \
164 simple_unlock(&rtclock_lock); \
170 struct timebase_freq_t
*freq
)
172 uint32_t numer
, denom
;
176 if ( freq
->timebase_den
< 1 || freq
->timebase_den
> 4 ||
177 freq
->timebase_num
< freq
->timebase_den
)
178 panic("rtclock timebase_callback: invalid constant %d / %d",
179 freq
->timebase_num
, freq
->timebase_den
);
181 denom
= freq
->timebase_num
;
182 numer
= freq
->timebase_den
* NSEC_PER_SEC
;
185 if (!rtclock_timebase_initialized
) {
186 commpage_set_timestamp(0,0,0,0);
188 rtclock_timebase_const
.numer
= numer
;
189 rtclock_timebase_const
.denom
= denom
;
190 rtclock_sec_divisor
= freq
->timebase_num
/ freq
->timebase_den
;
192 nanoseconds_to_absolutetime(NSEC_PER_HZ
, &abstime
);
193 rtclock_tick_interval
= abstime
;
197 printf("rtclock timebase_callback: late old %d / %d new %d / %d",
198 rtclock_timebase_const
.numer
, rtclock_timebase_const
.denom
,
204 clock_timebase_init();
208 * Configure the real-time clock device.
213 if (cpu_number() != master_cpu
)
216 timer_call_setup(&rtclock_alarm_timer
, rtclock_alarm_expire
, NULL
);
218 simple_lock_init(&rtclock_lock
, ETAP_MISC_RT_CLOCK
);
220 PE_register_timebase_callback(timebase_callback
);
226 * Initialize the system clock device.
232 int decr
, mycpu
= cpu_number();
234 if (mycpu
!= master_cpu
) {
235 if (rtclock_initialized
== FALSE
) {
236 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu
);
238 /* Set decrementer and hence our next tick due */
239 abstime
= mach_absolute_time();
240 rtclock_tick_deadline
[mycpu
] = abstime
;
241 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
242 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
248 /* Set decrementer and our next tick due */
249 abstime
= mach_absolute_time();
250 rtclock_tick_deadline
[mycpu
] = abstime
;
251 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
252 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
255 rtclock_initialized
= TRUE
;
262 mach_timespec_t
*time
) /* OUT */
267 now
= mach_absolute_time();
269 time
->tv_sec
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
270 now
-= (t64
* divisor
);
271 time
->tv_nsec
= (now
* NSEC_PER_SEC
) / divisor
;
273 return (KERN_SUCCESS
);
277 clock_get_system_microtime(
284 now
= mach_absolute_time();
286 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
287 now
-= (t64
* divisor
);
288 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
292 clock_get_system_nanotime(
299 now
= mach_absolute_time();
301 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
302 now
-= (t64
* divisor
);
303 *nanosecs
= (now
* NSEC_PER_SEC
) / divisor
;
307 * Get clock device attributes.
311 clock_flavor_t flavor
,
312 clock_attr_t attr
, /* OUT */
313 mach_msg_type_number_t
*count
) /* IN/OUT */
318 return (KERN_FAILURE
);
322 case CLOCK_GET_TIME_RES
: /* >0 res */
323 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
324 case CLOCK_ALARM_MINRES
:
325 case CLOCK_ALARM_MAXRES
:
327 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
332 return (KERN_INVALID_VALUE
);
335 return (KERN_SUCCESS
);
339 * Set deadline for the next alarm on the clock device. This call
340 * always resets the time to deliver an alarm for the clock.
344 mach_timespec_t
*deadline
)
348 timespec_to_absolutetime(deadline
, &abstime
);
349 timer_call_enter(&rtclock_alarm_timer
, abstime
);
353 * Configure the calendar clock.
362 * Initialize the calendar clock.
367 if (cpu_number() != master_cpu
)
374 * Get the current clock time.
378 mach_timespec_t
*time
) /* OUT */
380 clock_get_calendar_nanotime(
381 &time
->tv_sec
, &time
->tv_nsec
);
383 return (KERN_SUCCESS
);
387 * Get clock device attributes.
391 clock_flavor_t flavor
,
392 clock_attr_t attr
, /* OUT */
393 mach_msg_type_number_t
*count
) /* IN/OUT */
398 return (KERN_FAILURE
);
402 case CLOCK_GET_TIME_RES
: /* >0 res */
404 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
408 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
409 case CLOCK_ALARM_MINRES
:
410 case CLOCK_ALARM_MAXRES
:
411 *(clock_res_t
*) attr
= 0;
415 return (KERN_INVALID_VALUE
);
418 return (KERN_SUCCESS
);
422 clock_get_calendar_microtime(
426 uint32_t epoch
, microepoch
;
428 spl_t s
= splclock();
430 simple_lock(&rtclock_lock
);
432 if (rtclock_calend
.adjdelta
>= 0) {
435 now
= mach_absolute_time();
437 epoch
= rtclock_calend
.epoch
;
438 microepoch
= rtclock_calend
.microepoch
;
440 simple_unlock(&rtclock_lock
);
442 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
443 now
-= (t64
* divisor
);
444 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
446 if ((*microsecs
+= microepoch
) >= USEC_PER_SEC
) {
447 *microsecs
-= USEC_PER_SEC
;
456 delta
= -rtclock_calend
.adjdelta
;
458 t64
= mach_absolute_time() - rtclock_calend
.epoch1
;
460 *secs
= rtclock_calend
.epoch
;
461 *microsecs
= rtclock_calend
.microepoch
;
463 simple_unlock(&rtclock_lock
);
465 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
468 *microsecs
+= (t32
- delta
);
470 if (*microsecs
>= USEC_PER_SEC
) {
471 *microsecs
-= USEC_PER_SEC
;
479 /* This is only called from the gettimeofday() syscall. As a side
480 * effect, it updates the commpage timestamp. Otherwise it is
481 * identical to clock_get_calendar_microtime(). Because most
482 * gettimeofday() calls are handled by the commpage in user mode,
483 * this routine should be infrequently used except when slowing down
489 uint32_t *microsecs_p
)
491 uint32_t epoch
, microepoch
;
492 uint32_t secs
, microsecs
;
493 uint64_t now
, t64
, secs_64
, usec_64
;
494 spl_t s
= splclock();
496 simple_lock(&rtclock_lock
);
498 if (rtclock_calend
.adjdelta
>= 0) {
499 now
= mach_absolute_time();
501 epoch
= rtclock_calend
.epoch
;
502 microepoch
= rtclock_calend
.microepoch
;
504 secs
= secs_64
= now
/ rtclock_sec_divisor
;
505 t64
= now
- (secs_64
* rtclock_sec_divisor
);
506 microsecs
= usec_64
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
508 if ((microsecs
+= microepoch
) >= USEC_PER_SEC
) {
509 microsecs
-= USEC_PER_SEC
;
515 /* adjust "now" to be absolute time at _start_ of usecond */
516 now
-= t64
- ((usec_64
* rtclock_sec_divisor
) / USEC_PER_SEC
);
518 commpage_set_timestamp(now
,secs
,microsecs
,rtclock_sec_divisor
);
523 delta
= -rtclock_calend
.adjdelta
;
525 now
= mach_absolute_time() - rtclock_calend
.epoch1
;
527 secs
= rtclock_calend
.epoch
;
528 microsecs
= rtclock_calend
.microepoch
;
530 t32
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
533 microsecs
+= (t32
- delta
);
535 if (microsecs
>= USEC_PER_SEC
) {
536 microsecs
-= USEC_PER_SEC
;
539 /* no need to disable timestamp, it is already off */
542 simple_unlock(&rtclock_lock
);
546 *microsecs_p
= microsecs
;
550 clock_get_calendar_nanotime(
554 uint32_t epoch
, nanoepoch
;
556 spl_t s
= splclock();
558 simple_lock(&rtclock_lock
);
560 if (rtclock_calend
.adjdelta
>= 0) {
563 now
= mach_absolute_time();
565 epoch
= rtclock_calend
.epoch
;
566 nanoepoch
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
568 simple_unlock(&rtclock_lock
);
570 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
571 now
-= (t64
* divisor
);
572 *nanosecs
= ((now
* USEC_PER_SEC
) / divisor
) * NSEC_PER_USEC
;
574 if ((*nanosecs
+= nanoepoch
) >= NSEC_PER_SEC
) {
575 *nanosecs
-= NSEC_PER_SEC
;
584 delta
= -rtclock_calend
.adjdelta
;
586 t64
= mach_absolute_time() - rtclock_calend
.epoch1
;
588 *secs
= rtclock_calend
.epoch
;
589 *nanosecs
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
591 simple_unlock(&rtclock_lock
);
593 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
596 *nanosecs
+= ((t32
- delta
) * NSEC_PER_USEC
);
598 if (*nanosecs
>= NSEC_PER_SEC
) {
599 *nanosecs
-= NSEC_PER_SEC
;
608 clock_set_calendar_microtime(
612 uint32_t sys
, microsys
;
616 newsecs
= (microsecs
< 500*USEC_PER_SEC
)?
620 commpage_set_timestamp(0,0,0,0);
622 clock_get_system_microtime(&sys
, µsys
);
623 if ((int32_t)(microsecs
-= microsys
) < 0) {
624 microsecs
+= USEC_PER_SEC
;
630 rtclock_calend
.epoch
= secs
;
631 rtclock_calend
.microepoch
= microsecs
;
632 rtclock_calend
.epoch1
= 0;
633 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
636 PESetGMTTimeOfDay(newsecs
);
638 host_notify_calendar_change();
641 #define tickadj (40) /* "standard" skew, us / tick */
642 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
645 clock_set_calendar_adjtime(
649 int64_t total
, ototal
;
650 uint32_t interval
= 0;
653 total
= (int64_t)*secs
* USEC_PER_SEC
+ *microsecs
;
656 commpage_set_timestamp(0,0,0,0);
658 ototal
= rtclock_calend
.adjtotal
;
660 if (rtclock_calend
.adjdelta
< 0) {
663 uint32_t sys
, microsys
;
665 delta
= -rtclock_calend
.adjdelta
;
667 sys
= rtclock_calend
.epoch
;
668 microsys
= rtclock_calend
.microepoch
;
670 now
= mach_absolute_time();
672 t64
= now
- rtclock_calend
.epoch1
;
673 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
676 microsys
+= (t32
- delta
);
678 if (microsys
>= USEC_PER_SEC
) {
679 microsys
-= USEC_PER_SEC
;
683 rtclock_calend
.epoch
= sys
;
684 rtclock_calend
.microepoch
= microsys
;
686 sys
= t64
= now
/ rtclock_sec_divisor
;
687 now
-= (t64
* rtclock_sec_divisor
);
688 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
690 if ((int32_t)(rtclock_calend
.microepoch
-= microsys
) < 0) {
691 rtclock_calend
.microepoch
+= USEC_PER_SEC
;
695 rtclock_calend
.epoch
-= sys
;
699 int32_t delta
= tickadj
;
707 rtclock_calend
.epoch1
= 0;
711 uint32_t sys
, microsys
;
719 rtclock_calend
.epoch1
= now
= mach_absolute_time();
721 sys
= t64
= now
/ rtclock_sec_divisor
;
722 now
-= (t64
* rtclock_sec_divisor
);
723 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
725 if ((rtclock_calend
.microepoch
+= microsys
) >= USEC_PER_SEC
) {
726 rtclock_calend
.microepoch
-= USEC_PER_SEC
;
730 rtclock_calend
.epoch
+= sys
;
733 rtclock_calend
.adjtotal
= total
;
734 rtclock_calend
.adjdelta
= delta
;
736 interval
= rtclock_tick_interval
;
739 rtclock_calend
.epoch1
= 0;
740 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
746 *secs
= *microsecs
= 0;
748 *secs
= ototal
/ USEC_PER_SEC
;
749 *microsecs
= ototal
% USEC_PER_SEC
;
756 clock_adjust_calendar(void)
758 uint32_t micronew
, interval
= 0;
763 commpage_set_timestamp(0,0,0,0);
765 delta
= rtclock_calend
.adjdelta
;
768 micronew
= rtclock_calend
.microepoch
+ delta
;
769 if (micronew
>= USEC_PER_SEC
) {
770 micronew
-= USEC_PER_SEC
;
771 rtclock_calend
.epoch
+= 1;
774 rtclock_calend
.microepoch
= micronew
;
776 rtclock_calend
.adjtotal
-= delta
;
777 if (delta
> rtclock_calend
.adjtotal
)
778 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
785 now
= mach_absolute_time();
787 t64
= now
- rtclock_calend
.epoch1
;
789 rtclock_calend
.epoch1
= now
;
791 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
793 micronew
= rtclock_calend
.microepoch
+ t32
+ delta
;
794 if (micronew
>= USEC_PER_SEC
) {
795 micronew
-= USEC_PER_SEC
;
796 rtclock_calend
.epoch
+= 1;
799 rtclock_calend
.microepoch
= micronew
;
801 rtclock_calend
.adjtotal
-= delta
;
802 if (delta
< rtclock_calend
.adjtotal
)
803 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
805 if (rtclock_calend
.adjdelta
== 0) {
806 uint32_t sys
, microsys
;
808 sys
= t64
= now
/ rtclock_sec_divisor
;
809 now
-= (t64
* rtclock_sec_divisor
);
810 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
812 if ((int32_t)(rtclock_calend
.microepoch
-= microsys
) < 0) {
813 rtclock_calend
.microepoch
+= USEC_PER_SEC
;
817 rtclock_calend
.epoch
-= sys
;
819 rtclock_calend
.epoch1
= 0;
823 if (rtclock_calend
.adjdelta
!= 0)
824 interval
= rtclock_tick_interval
;
832 clock_initialize_calendar(void)
834 uint32_t sys
, microsys
;
835 uint32_t microsecs
= 0, secs
= PEGetGMTTimeOfDay();
839 commpage_set_timestamp(0,0,0,0);
841 clock_get_system_microtime(&sys
, µsys
);
842 if ((int32_t)(microsecs
-= microsys
) < 0) {
843 microsecs
+= USEC_PER_SEC
;
849 rtclock_calend
.epoch
= secs
;
850 rtclock_calend
.microepoch
= microsecs
;
851 rtclock_calend
.epoch1
= 0;
852 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
855 host_notify_calendar_change();
860 mach_timebase_info_t info
)
865 rtclock_timebase_initialized
= TRUE
;
866 *info
= rtclock_timebase_const
;
871 clock_set_timer_deadline(
876 struct rtclock_timer
*mytimer
;
880 mycpu
= cpu_number();
881 mytimer
= &rtclock_timer
[mycpu
];
882 mytimer
->deadline
= deadline
;
883 mytimer
->is_set
= TRUE
;
884 if (!mytimer
->has_expired
) {
885 abstime
= mach_absolute_time();
886 if ( mytimer
->deadline
< rtclock_tick_deadline
[mycpu
] ) {
887 decr
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
888 if ( rtclock_decrementer_min
!= 0 &&
889 rtclock_decrementer_min
< (natural_t
)decr
)
890 decr
= rtclock_decrementer_min
;
894 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
895 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
902 clock_set_timer_func(
903 clock_timer_func_t func
)
908 if (rtclock_timer_expire
== NULL
)
909 rtclock_timer_expire
= func
;
914 * Reset the clock device. This causes the realtime clock
915 * device to reload its mode and count value (frequency).
924 * Real-time clock device interrupt.
929 struct savearea
*ssp
,
933 int decr1
, decr2
, mycpu
= cpu_number();
934 struct rtclock_timer
*mytimer
= &rtclock_timer
[mycpu
];
937 * We may receive interrupts too early, we must reject them.
939 if (rtclock_initialized
== FALSE
) {
940 mtdec(DECREMENTER_MAX
); /* Max the decrementer if not init */
944 decr1
= decr2
= DECREMENTER_MAX
;
946 abstime
= mach_absolute_time();
947 if ( rtclock_tick_deadline
[mycpu
] <= abstime
) {
948 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
949 &rtclock_tick_deadline
[mycpu
]);
950 hertz_tick(USER_MODE(ssp
->save_srr1
), ssp
->save_srr0
);
953 abstime
= mach_absolute_time();
954 if ( mytimer
->is_set
&&
955 mytimer
->deadline
<= abstime
) {
956 mytimer
->has_expired
= TRUE
; mytimer
->is_set
= FALSE
;
957 (*rtclock_timer_expire
)(abstime
);
958 mytimer
->has_expired
= FALSE
;
961 abstime
= mach_absolute_time();
962 decr1
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
965 decr2
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
970 if ( rtclock_decrementer_min
!= 0 &&
971 rtclock_decrementer_min
< (natural_t
)decr1
)
972 decr1
= rtclock_decrementer_min
;
976 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
977 | DBG_FUNC_NONE
, decr1
, 3, 0, 0, 0);
981 rtclock_alarm_expire(
982 timer_call_param_t p0
,
983 timer_call_param_t p1
)
985 mach_timespec_t timestamp
;
987 (void) sysclk_gettime(×tamp
);
989 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
993 deadline_to_decrementer(
1000 return DECREMENTER_MIN
;
1002 delt
= deadline
- now
;
1003 return (delt
>= (DECREMENTER_MAX
+ 1))? DECREMENTER_MAX
:
1004 ((delt
>= (DECREMENTER_MIN
+ 1))? (delt
- 1): DECREMENTER_MIN
);
1009 timespec_to_absolutetime(
1010 mach_timespec_t
*ts
,
1015 *result
= ((uint64_t)ts
->tv_sec
* (divisor
= rtclock_sec_divisor
)) +
1016 ((uint64_t)ts
->tv_nsec
* divisor
) / NSEC_PER_SEC
;
1020 clock_interval_to_deadline(
1022 uint32_t scale_factor
,
1027 clock_get_uptime(result
);
1029 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1035 clock_interval_to_absolutetime_interval(
1037 uint32_t scale_factor
,
1040 uint64_t nanosecs
= (uint64_t)interval
* scale_factor
;
1044 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1045 (divisor
= rtclock_sec_divisor
);
1046 nanosecs
-= (t64
* NSEC_PER_SEC
);
1047 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1051 clock_absolutetime_interval_to_deadline(
1055 clock_get_uptime(result
);
1061 absolutetime_to_nanoseconds(
1068 *result
= (t64
= abstime
/ (divisor
= rtclock_sec_divisor
)) * NSEC_PER_SEC
;
1069 abstime
-= (t64
* divisor
);
1070 *result
+= (abstime
* NSEC_PER_SEC
) / divisor
;
1074 nanoseconds_to_absolutetime(
1081 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1082 (divisor
= rtclock_sec_divisor
);
1083 nanosecs
-= (t64
* NSEC_PER_SEC
);
1084 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1088 * Spin-loop delay primitives.
1093 uint32_t scale_factor
)
1097 clock_interval_to_deadline(interval
, scale_factor
, &end
);
1100 now
= mach_absolute_time();
1101 } while (now
< end
);
1111 now
= mach_absolute_time();
1112 } while (now
< deadline
);
1119 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);