2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * @APPLE_FREE_COPYRIGHT@
30 * Purpose: Routines for handling the machine dependent
34 #include <mach/mach_types.h>
36 #include <kern/clock.h>
37 #include <kern/thread.h>
38 #include <kern/macro_help.h>
41 #include <kern/host_notify.h>
43 #include <machine/mach_param.h> /* HZ */
44 #include <machine/commpage.h>
45 #include <machine/machine_routines.h>
46 #include <ppc/proc_reg.h>
48 #include <pexpert/pexpert.h>
50 #include <sys/kdebug.h>
52 int sysclk_config(void);
54 int sysclk_init(void);
56 kern_return_t
sysclk_gettime(
57 mach_timespec_t
*cur_time
);
59 kern_return_t
sysclk_getattr(
60 clock_flavor_t flavor
,
62 mach_msg_type_number_t
*count
);
65 mach_timespec_t
*deadline
);
67 struct clock_ops sysclk_ops
= {
68 sysclk_config
, sysclk_init
,
74 int calend_config(void);
76 int calend_init(void);
78 kern_return_t
calend_gettime(
79 mach_timespec_t
*cur_time
);
81 kern_return_t
calend_getattr(
82 clock_flavor_t flavor
,
84 mach_msg_type_number_t
*count
);
86 struct clock_ops calend_ops
= {
87 calend_config
, calend_init
,
93 /* local data declarations */
95 static struct rtclock_calend
{
105 static boolean_t rtclock_initialized
;
107 static uint64_t rtclock_tick_deadline
[NCPUS
];
109 #define NSEC_PER_HZ (NSEC_PER_SEC / HZ)
110 static uint32_t rtclock_tick_interval
;
112 static uint32_t rtclock_sec_divisor
;
114 static mach_timebase_info_data_t rtclock_timebase_const
;
116 static boolean_t rtclock_timebase_initialized
;
118 static struct rtclock_timer
{
121 /*boolean_t*/ is_set
:1,
124 } rtclock_timer
[NCPUS
];
126 static clock_timer_func_t rtclock_timer_expire
;
128 static timer_call_data_t rtclock_alarm_timer
;
130 static void timespec_to_absolutetime(
134 static int deadline_to_decrementer(
138 static void rtclock_alarm_expire(
139 timer_call_param_t p0
,
140 timer_call_param_t p1
);
142 /* global data declarations */
144 #define DECREMENTER_MAX 0x7FFFFFFFUL
145 #define DECREMENTER_MIN 0xAUL
147 natural_t rtclock_decrementer_min
;
149 decl_simple_lock_data(static,rtclock_lock
)
152 * Macros to lock/unlock real-time clock device.
154 #define LOCK_RTC(s) \
157 simple_lock(&rtclock_lock); \
160 #define UNLOCK_RTC(s) \
162 simple_unlock(&rtclock_lock); \
168 struct timebase_freq_t
*freq
)
170 uint32_t numer
, denom
;
174 if ( freq
->timebase_den
< 1 || freq
->timebase_den
> 4 ||
175 freq
->timebase_num
< freq
->timebase_den
)
176 panic("rtclock timebase_callback: invalid constant %d / %d",
177 freq
->timebase_num
, freq
->timebase_den
);
179 denom
= freq
->timebase_num
;
180 numer
= freq
->timebase_den
* NSEC_PER_SEC
;
183 if (!rtclock_timebase_initialized
) {
184 commpage_set_timestamp(0,0,0,0);
186 rtclock_timebase_const
.numer
= numer
;
187 rtclock_timebase_const
.denom
= denom
;
188 rtclock_sec_divisor
= freq
->timebase_num
/ freq
->timebase_den
;
190 nanoseconds_to_absolutetime(NSEC_PER_HZ
, &abstime
);
191 rtclock_tick_interval
= abstime
;
193 ml_init_lock_timeout();
197 printf("rtclock timebase_callback: late old %d / %d new %d / %d",
198 rtclock_timebase_const
.numer
, rtclock_timebase_const
.denom
,
204 clock_timebase_init();
208 * Configure the real-time clock device.
213 if (cpu_number() != master_cpu
)
216 timer_call_setup(&rtclock_alarm_timer
, rtclock_alarm_expire
, NULL
);
218 simple_lock_init(&rtclock_lock
, ETAP_MISC_RT_CLOCK
);
220 PE_register_timebase_callback(timebase_callback
);
226 * Initialize the system clock device.
232 int decr
, mycpu
= cpu_number();
234 if (mycpu
!= master_cpu
) {
235 if (rtclock_initialized
== FALSE
) {
236 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu
);
238 /* Set decrementer and hence our next tick due */
239 abstime
= mach_absolute_time();
240 rtclock_tick_deadline
[mycpu
] = abstime
;
241 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
242 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
248 /* Set decrementer and our next tick due */
249 abstime
= mach_absolute_time();
250 rtclock_tick_deadline
[mycpu
] = abstime
;
251 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
252 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
255 rtclock_initialized
= TRUE
;
262 mach_timespec_t
*time
) /* OUT */
267 now
= mach_absolute_time();
269 time
->tv_sec
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
270 now
-= (t64
* divisor
);
271 time
->tv_nsec
= (now
* NSEC_PER_SEC
) / divisor
;
273 return (KERN_SUCCESS
);
277 clock_get_system_microtime(
284 now
= mach_absolute_time();
286 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
287 now
-= (t64
* divisor
);
288 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
292 clock_get_system_nanotime(
299 now
= mach_absolute_time();
301 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
302 now
-= (t64
* divisor
);
303 *nanosecs
= (now
* NSEC_PER_SEC
) / divisor
;
307 * Get clock device attributes.
311 clock_flavor_t flavor
,
312 clock_attr_t attr
, /* OUT */
313 mach_msg_type_number_t
*count
) /* IN/OUT */
318 return (KERN_FAILURE
);
322 case CLOCK_GET_TIME_RES
: /* >0 res */
323 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
324 case CLOCK_ALARM_MINRES
:
325 case CLOCK_ALARM_MAXRES
:
327 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
332 return (KERN_INVALID_VALUE
);
335 return (KERN_SUCCESS
);
339 * Set deadline for the next alarm on the clock device. This call
340 * always resets the time to deliver an alarm for the clock.
344 mach_timespec_t
*deadline
)
348 timespec_to_absolutetime(deadline
, &abstime
);
349 timer_call_enter(&rtclock_alarm_timer
, abstime
);
353 * Configure the calendar clock.
362 * Initialize the calendar clock.
367 if (cpu_number() != master_cpu
)
374 * Get the current clock time.
378 mach_timespec_t
*time
) /* OUT */
380 clock_get_calendar_nanotime(
381 &time
->tv_sec
, &time
->tv_nsec
);
383 return (KERN_SUCCESS
);
387 * Get clock device attributes.
391 clock_flavor_t flavor
,
392 clock_attr_t attr
, /* OUT */
393 mach_msg_type_number_t
*count
) /* IN/OUT */
398 return (KERN_FAILURE
);
402 case CLOCK_GET_TIME_RES
: /* >0 res */
404 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
408 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
409 case CLOCK_ALARM_MINRES
:
410 case CLOCK_ALARM_MAXRES
:
411 *(clock_res_t
*) attr
= 0;
415 return (KERN_INVALID_VALUE
);
418 return (KERN_SUCCESS
);
422 clock_get_calendar_microtime(
426 uint32_t epoch
, microepoch
;
428 spl_t s
= splclock();
430 simple_lock(&rtclock_lock
);
432 if (rtclock_calend
.adjdelta
>= 0) {
435 now
= mach_absolute_time();
437 epoch
= rtclock_calend
.epoch
;
438 microepoch
= rtclock_calend
.microepoch
;
440 simple_unlock(&rtclock_lock
);
442 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
443 now
-= (t64
* divisor
);
444 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
446 if ((*microsecs
+= microepoch
) >= USEC_PER_SEC
) {
447 *microsecs
-= USEC_PER_SEC
;
456 delta
= -rtclock_calend
.adjdelta
;
458 now
= mach_absolute_time();
460 *secs
= rtclock_calend
.epoch
;
461 *microsecs
= rtclock_calend
.microepoch
;
463 if (now
> rtclock_calend
.epoch1
) {
464 t64
= now
- rtclock_calend
.epoch1
;
466 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
469 *microsecs
+= (t32
- delta
);
471 if (*microsecs
>= USEC_PER_SEC
) {
472 *microsecs
-= USEC_PER_SEC
;
477 simple_unlock(&rtclock_lock
);
483 /* This is only called from the gettimeofday() syscall. As a side
484 * effect, it updates the commpage timestamp. Otherwise it is
485 * identical to clock_get_calendar_microtime(). Because most
486 * gettimeofday() calls are handled by the commpage in user mode,
487 * this routine should be infrequently used except when slowing down
493 uint32_t *microsecs_p
)
495 uint32_t epoch
, microepoch
;
496 uint32_t secs
, microsecs
;
497 uint64_t now
, t64
, secs_64
, usec_64
;
498 spl_t s
= splclock();
500 simple_lock(&rtclock_lock
);
502 if (rtclock_calend
.adjdelta
>= 0) {
503 now
= mach_absolute_time();
505 epoch
= rtclock_calend
.epoch
;
506 microepoch
= rtclock_calend
.microepoch
;
508 secs
= secs_64
= now
/ rtclock_sec_divisor
;
509 t64
= now
- (secs_64
* rtclock_sec_divisor
);
510 microsecs
= usec_64
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
512 if ((microsecs
+= microepoch
) >= USEC_PER_SEC
) {
513 microsecs
-= USEC_PER_SEC
;
519 /* adjust "now" to be absolute time at _start_ of usecond */
520 now
-= t64
- ((usec_64
* rtclock_sec_divisor
) / USEC_PER_SEC
);
522 commpage_set_timestamp(now
,secs
,microsecs
,rtclock_sec_divisor
);
527 delta
= -rtclock_calend
.adjdelta
;
529 now
= mach_absolute_time();
531 secs
= rtclock_calend
.epoch
;
532 microsecs
= rtclock_calend
.microepoch
;
534 if (now
> rtclock_calend
.epoch1
) {
535 t64
= now
- rtclock_calend
.epoch1
;
537 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
540 microsecs
+= (t32
- delta
);
542 if (microsecs
>= USEC_PER_SEC
) {
543 microsecs
-= USEC_PER_SEC
;
548 /* no need to disable timestamp, it is already off */
551 simple_unlock(&rtclock_lock
);
555 *microsecs_p
= microsecs
;
559 clock_get_calendar_nanotime(
563 uint32_t epoch
, nanoepoch
;
565 spl_t s
= splclock();
567 simple_lock(&rtclock_lock
);
569 if (rtclock_calend
.adjdelta
>= 0) {
572 now
= mach_absolute_time();
574 epoch
= rtclock_calend
.epoch
;
575 nanoepoch
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
577 simple_unlock(&rtclock_lock
);
579 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
580 now
-= (t64
* divisor
);
581 *nanosecs
= ((now
* USEC_PER_SEC
) / divisor
) * NSEC_PER_USEC
;
583 if ((*nanosecs
+= nanoepoch
) >= NSEC_PER_SEC
) {
584 *nanosecs
-= NSEC_PER_SEC
;
593 delta
= -rtclock_calend
.adjdelta
;
595 now
= mach_absolute_time();
597 *secs
= rtclock_calend
.epoch
;
598 *nanosecs
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
600 if (now
> rtclock_calend
.epoch1
) {
601 t64
= now
- rtclock_calend
.epoch1
;
603 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
606 *nanosecs
+= ((t32
- delta
) * NSEC_PER_USEC
);
608 if (*nanosecs
>= NSEC_PER_SEC
) {
609 *nanosecs
-= NSEC_PER_SEC
;
614 simple_unlock(&rtclock_lock
);
621 clock_set_calendar_microtime(
625 uint32_t sys
, microsys
;
629 newsecs
= (microsecs
< 500*USEC_PER_SEC
)?
633 commpage_set_timestamp(0,0,0,0);
635 clock_get_system_microtime(&sys
, µsys
);
636 if ((int32_t)(microsecs
-= microsys
) < 0) {
637 microsecs
+= USEC_PER_SEC
;
643 rtclock_calend
.epoch
= secs
;
644 rtclock_calend
.microepoch
= microsecs
;
645 rtclock_calend
.epoch1
= 0;
646 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
649 PESetGMTTimeOfDay(newsecs
);
651 host_notify_calendar_change();
654 #define tickadj (40) /* "standard" skew, us / tick */
655 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
658 clock_set_calendar_adjtime(
662 int64_t total
, ototal
;
663 uint32_t interval
= 0;
666 total
= (int64_t)*secs
* USEC_PER_SEC
+ *microsecs
;
669 commpage_set_timestamp(0,0,0,0);
671 ototal
= rtclock_calend
.adjtotal
;
673 if (rtclock_calend
.adjdelta
< 0) {
676 uint32_t sys
, microsys
;
678 delta
= -rtclock_calend
.adjdelta
;
680 sys
= rtclock_calend
.epoch
;
681 microsys
= rtclock_calend
.microepoch
;
683 now
= mach_absolute_time();
685 if (now
> rtclock_calend
.epoch1
)
686 t64
= now
- rtclock_calend
.epoch1
;
690 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
693 microsys
+= (t32
- delta
);
695 if (microsys
>= USEC_PER_SEC
) {
696 microsys
-= USEC_PER_SEC
;
700 rtclock_calend
.epoch
= sys
;
701 rtclock_calend
.microepoch
= microsys
;
703 sys
= t64
= now
/ rtclock_sec_divisor
;
704 now
-= (t64
* rtclock_sec_divisor
);
705 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
707 if ((int32_t)(rtclock_calend
.microepoch
-= microsys
) < 0) {
708 rtclock_calend
.microepoch
+= USEC_PER_SEC
;
712 rtclock_calend
.epoch
-= sys
;
716 int32_t delta
= tickadj
;
724 rtclock_calend
.epoch1
= 0;
728 uint32_t sys
, microsys
;
736 rtclock_calend
.epoch1
= now
= mach_absolute_time();
738 sys
= t64
= now
/ rtclock_sec_divisor
;
739 now
-= (t64
* rtclock_sec_divisor
);
740 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
742 if ((rtclock_calend
.microepoch
+= microsys
) >= USEC_PER_SEC
) {
743 rtclock_calend
.microepoch
-= USEC_PER_SEC
;
747 rtclock_calend
.epoch
+= sys
;
750 rtclock_calend
.adjtotal
= total
;
751 rtclock_calend
.adjdelta
= delta
;
753 interval
= rtclock_tick_interval
;
756 rtclock_calend
.epoch1
= 0;
757 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
763 *secs
= *microsecs
= 0;
765 *secs
= ototal
/ USEC_PER_SEC
;
766 *microsecs
= ototal
% USEC_PER_SEC
;
773 clock_adjust_calendar(void)
775 uint32_t micronew
, interval
= 0;
780 commpage_set_timestamp(0,0,0,0);
782 delta
= rtclock_calend
.adjdelta
;
785 micronew
= rtclock_calend
.microepoch
+ delta
;
786 if (micronew
>= USEC_PER_SEC
) {
787 micronew
-= USEC_PER_SEC
;
788 rtclock_calend
.epoch
+= 1;
791 rtclock_calend
.microepoch
= micronew
;
793 rtclock_calend
.adjtotal
-= delta
;
794 if (delta
> rtclock_calend
.adjtotal
)
795 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
802 now
= mach_absolute_time();
804 if (now
> rtclock_calend
.epoch1
)
805 t64
= now
- rtclock_calend
.epoch1
;
809 rtclock_calend
.epoch1
= now
;
811 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
813 micronew
= rtclock_calend
.microepoch
+ t32
+ delta
;
814 if (micronew
>= USEC_PER_SEC
) {
815 micronew
-= USEC_PER_SEC
;
816 rtclock_calend
.epoch
+= 1;
819 rtclock_calend
.microepoch
= micronew
;
821 rtclock_calend
.adjtotal
-= delta
;
822 if (delta
< rtclock_calend
.adjtotal
)
823 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
825 if (rtclock_calend
.adjdelta
== 0) {
826 uint32_t sys
, microsys
;
828 sys
= t64
= now
/ rtclock_sec_divisor
;
829 now
-= (t64
* rtclock_sec_divisor
);
830 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
832 if ((int32_t)(rtclock_calend
.microepoch
-= microsys
) < 0) {
833 rtclock_calend
.microepoch
+= USEC_PER_SEC
;
837 rtclock_calend
.epoch
-= sys
;
839 rtclock_calend
.epoch1
= 0;
843 if (rtclock_calend
.adjdelta
!= 0)
844 interval
= rtclock_tick_interval
;
852 clock_initialize_calendar(void)
854 uint32_t sys
, microsys
;
855 uint32_t microsecs
= 0, secs
= PEGetGMTTimeOfDay();
859 commpage_set_timestamp(0,0,0,0);
861 clock_get_system_microtime(&sys
, µsys
);
862 if ((int32_t)(microsecs
-= microsys
) < 0) {
863 microsecs
+= USEC_PER_SEC
;
869 rtclock_calend
.epoch
= secs
;
870 rtclock_calend
.microepoch
= microsecs
;
871 rtclock_calend
.epoch1
= 0;
872 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
875 host_notify_calendar_change();
880 mach_timebase_info_t info
)
885 rtclock_timebase_initialized
= TRUE
;
886 *info
= rtclock_timebase_const
;
891 clock_set_timer_deadline(
896 struct rtclock_timer
*mytimer
;
900 mycpu
= cpu_number();
901 mytimer
= &rtclock_timer
[mycpu
];
902 mytimer
->deadline
= deadline
;
903 mytimer
->is_set
= TRUE
;
904 if (!mytimer
->has_expired
) {
905 abstime
= mach_absolute_time();
906 if ( mytimer
->deadline
< rtclock_tick_deadline
[mycpu
] ) {
907 decr
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
908 if ( rtclock_decrementer_min
!= 0 &&
909 rtclock_decrementer_min
< (natural_t
)decr
)
910 decr
= rtclock_decrementer_min
;
914 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
915 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
922 clock_set_timer_func(
923 clock_timer_func_t func
)
928 if (rtclock_timer_expire
== NULL
)
929 rtclock_timer_expire
= func
;
934 * Reset the clock device. This causes the realtime clock
935 * device to reload its mode and count value (frequency).
944 * Real-time clock device interrupt.
949 struct savearea
*ssp
,
953 int decr1
, decr2
, mycpu
= cpu_number();
954 struct rtclock_timer
*mytimer
= &rtclock_timer
[mycpu
];
957 * We may receive interrupts too early, we must reject them.
959 if (rtclock_initialized
== FALSE
) {
960 mtdec(DECREMENTER_MAX
); /* Max the decrementer if not init */
964 decr1
= decr2
= DECREMENTER_MAX
;
966 abstime
= mach_absolute_time();
967 if ( rtclock_tick_deadline
[mycpu
] <= abstime
) {
968 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
969 &rtclock_tick_deadline
[mycpu
]);
970 hertz_tick(USER_MODE(ssp
->save_srr1
), ssp
->save_srr0
);
973 abstime
= mach_absolute_time();
974 if ( mytimer
->is_set
&&
975 mytimer
->deadline
<= abstime
) {
976 mytimer
->has_expired
= TRUE
; mytimer
->is_set
= FALSE
;
977 (*rtclock_timer_expire
)(abstime
);
978 mytimer
->has_expired
= FALSE
;
981 abstime
= mach_absolute_time();
982 decr1
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
985 decr2
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
990 if ( rtclock_decrementer_min
!= 0 &&
991 rtclock_decrementer_min
< (natural_t
)decr1
)
992 decr1
= rtclock_decrementer_min
;
996 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
997 | DBG_FUNC_NONE
, decr1
, 3, 0, 0, 0);
1001 rtclock_alarm_expire(
1002 timer_call_param_t p0
,
1003 timer_call_param_t p1
)
1005 mach_timespec_t timestamp
;
1007 (void) sysclk_gettime(×tamp
);
1009 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
1013 deadline_to_decrementer(
1019 if (deadline
<= now
)
1020 return DECREMENTER_MIN
;
1022 delt
= deadline
- now
;
1023 return (delt
>= (DECREMENTER_MAX
+ 1))? DECREMENTER_MAX
:
1024 ((delt
>= (DECREMENTER_MIN
+ 1))? (delt
- 1): DECREMENTER_MIN
);
1029 timespec_to_absolutetime(
1030 mach_timespec_t
*ts
,
1035 *result
= ((uint64_t)ts
->tv_sec
* (divisor
= rtclock_sec_divisor
)) +
1036 ((uint64_t)ts
->tv_nsec
* divisor
) / NSEC_PER_SEC
;
1040 clock_interval_to_deadline(
1042 uint32_t scale_factor
,
1047 clock_get_uptime(result
);
1049 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1055 clock_interval_to_absolutetime_interval(
1057 uint32_t scale_factor
,
1060 uint64_t nanosecs
= (uint64_t)interval
* scale_factor
;
1064 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1065 (divisor
= rtclock_sec_divisor
);
1066 nanosecs
-= (t64
* NSEC_PER_SEC
);
1067 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1071 clock_absolutetime_interval_to_deadline(
1075 clock_get_uptime(result
);
1081 absolutetime_to_nanoseconds(
1088 *result
= (t64
= abstime
/ (divisor
= rtclock_sec_divisor
)) * NSEC_PER_SEC
;
1089 abstime
-= (t64
* divisor
);
1090 *result
+= (abstime
* NSEC_PER_SEC
) / divisor
;
1094 nanoseconds_to_absolutetime(
1101 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1102 (divisor
= rtclock_sec_divisor
);
1103 nanosecs
-= (t64
* NSEC_PER_SEC
);
1104 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1108 * Spin-loop delay primitives.
1113 uint32_t scale_factor
)
1117 clock_interval_to_deadline(interval
, scale_factor
, &end
);
1120 now
= mach_absolute_time();
1121 } while (now
< end
);
1131 now
= mach_absolute_time();
1132 } while (now
< deadline
);
1139 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);