2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * @APPLE_FREE_COPYRIGHT@
30 * Purpose: Routines for handling the machine dependent
34 #include <mach/mach_types.h>
36 #include <kern/clock.h>
37 #include <kern/thread.h>
38 #include <kern/macro_help.h>
41 #include <machine/mach_param.h> /* HZ */
42 #include <ppc/proc_reg.h>
44 #include <pexpert/pexpert.h>
46 /*XXX power management hacks XXX*/
47 #include <IOKit/IOReturn.h>
48 #include <IOKit/IOMessage.h>
50 extern void *registerSleepWakeInterest(
54 /*XXX power management hacks XXX*/
56 #include <sys/kdebug.h>
58 int sysclk_config(void);
60 int sysclk_init(void);
62 kern_return_t
sysclk_gettime(
63 mach_timespec_t
*cur_time
);
65 kern_return_t
sysclk_getattr(
66 clock_flavor_t flavor
,
68 mach_msg_type_number_t
*count
);
71 mach_timespec_t
*deadline
);
73 struct clock_ops sysclk_ops
= {
74 sysclk_config
, sysclk_init
,
80 int calend_config(void);
82 int calend_init(void);
84 kern_return_t
calend_gettime(
85 mach_timespec_t
*cur_time
);
87 kern_return_t
calend_settime(
88 mach_timespec_t
*cur_time
);
90 kern_return_t
calend_getattr(
91 clock_flavor_t flavor
,
93 mach_msg_type_number_t
*count
);
95 struct clock_ops calend_ops
= {
96 calend_config
, calend_init
,
97 calend_gettime
, calend_settime
,
102 /* local data declarations */
104 static struct rtclock
{
105 mach_timespec_t calend_offset
;
106 boolean_t calend_is_set
;
108 mach_timebase_info_data_t timebase_const
;
110 struct rtclock_timer
{
111 AbsoluteTime deadline
;
115 clock_timer_func_t timer_expire
;
117 timer_call_data_t alarm
[NCPUS
];
120 AbsoluteTime last_abstime
[NCPUS
];
121 int last_decr
[NCPUS
];
123 decl_simple_lock_data(,lock
) /* real-time clock device lock */
126 static boolean_t rtclock_initialized
;
128 static AbsoluteTime rtclock_tick_deadline
[NCPUS
];
129 static AbsoluteTime rtclock_tick_interval
;
131 static void timespec_to_absolutetime(
132 mach_timespec_t timespec
,
133 AbsoluteTime
*result
);
135 static int deadline_to_decrementer(
136 AbsoluteTime deadline
,
139 static void rtclock_alarm_timer(
140 timer_call_param_t p0
,
141 timer_call_param_t p1
);
143 /* global data declarations */
145 #define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
147 #define DECREMENTER_MAX 0x7FFFFFFFUL
148 #define DECREMENTER_MIN 0xAUL
150 natural_t rtclock_decrementer_min
;
153 * Macros to lock/unlock real-time clock device.
155 #define LOCK_RTC(s) \
158 simple_lock(&rtclock.lock); \
161 #define UNLOCK_RTC(s) \
163 simple_unlock(&rtclock.lock); \
169 struct timebase_freq_t
*freq
)
171 natural_t numer
, denom
;
175 denom
= freq
->timebase_num
;
177 while (!(denom
% 10)) {
184 numer
= freq
->timebase_den
;
190 rtclock
.timebase_const
.numer
= numer
;
191 rtclock
.timebase_const
.denom
= denom
;
196 * Configure the real-time clock device.
203 if (cpu_number() != master_cpu
)
206 for (i
= 0; i
< NCPUS
; i
++)
207 timer_call_setup(&rtclock
.alarm
[i
], rtclock_alarm_timer
, NULL
);
209 simple_lock_init(&rtclock
.lock
, ETAP_MISC_RT_CLOCK
);
211 PE_register_timebase_callback(timebase_callback
);
217 * Initialize the system clock device.
222 AbsoluteTime abstime
;
223 int decr
, mycpu
= cpu_number();
225 if (mycpu
!= master_cpu
) {
226 if (rtclock_initialized
== FALSE
) {
227 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu
);
229 /* Set decrementer and hence our next tick due */
230 clock_get_uptime(&abstime
);
231 rtclock_tick_deadline
[mycpu
] = abstime
;
232 ADD_ABSOLUTETIME(&rtclock_tick_deadline
[mycpu
],
233 &rtclock_tick_interval
);
234 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
236 rtclock
.last_decr
[mycpu
] = decr
;
242 * Initialize non-zero clock structure values.
244 clock_interval_to_absolutetime_interval(RTC_TICKPERIOD
, 1,
245 &rtclock_tick_interval
);
246 /* Set decrementer and our next tick due */
247 clock_get_uptime(&abstime
);
248 rtclock_tick_deadline
[mycpu
] = abstime
;
249 ADD_ABSOLUTETIME(&rtclock_tick_deadline
[mycpu
], &rtclock_tick_interval
);
250 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
252 rtclock
.last_decr
[mycpu
] = decr
;
254 rtclock_initialized
= TRUE
;
260 * Perform a full 64 bit by 32 bit unsigned multiply,
261 * yielding a 96 bit product. The most significant
262 * portion of the product is returned as a 64 bit
263 * quantity, with the lower portion as a 32 bit word.
269 AbsoluteTime
*result64
,
274 asm volatile(" mullw %0,%1,%2" :
276 "r" (now64
.lo
), "r" (mult32
));
278 asm volatile(" mullw %0,%1,%2" :
280 "r" (now64
.hi
), "r" (mult32
));
281 asm volatile(" mulhwu %0,%1,%2" :
283 "r" (now64
.lo
), "r" (mult32
));
285 asm volatile(" mulhwu %0,%1,%2" :
286 "=r" (result64
->hi
) :
287 "r" (now64
.hi
), "r" (mult32
));
289 asm volatile(" addc %0,%2,%3;
291 "=r" (result64
->lo
), "=r" (result64
->hi
) :
292 "r" (mid
), "r" (mid2
), "1" (result64
->hi
));
296 * Perform a partial 64 bit by 32 bit unsigned multiply,
297 * yielding a 64 bit product. Only the least significant
298 * 64 bits of the product are calculated and returned.
304 AbsoluteTime
*result64
)
308 asm volatile(" mullw %0,%1,%2" :
309 "=r" (result64
->lo
) :
310 "r" (now64
.lo
), "r" (mult32
));
312 asm volatile(" mullw %0,%1,%2" :
314 "r" (now64
.hi
), "r" (mult32
));
315 asm volatile(" mulhwu %0,%1,%2" :
317 "r" (now64
.lo
), "r" (mult32
));
319 asm volatile(" add %0,%1,%2" :
320 "=r" (result64
->hi
) :
321 "r" (mid
), "r" (mid2
));
325 * Perform an unsigned division of a 96 bit value
326 * by a 32 bit value, yielding a 96 bit quotient.
327 * The most significant portion of the product is
328 * returned as a 64 bit quantity, with the lower
329 * portion as a 32 bit word.
337 AbsoluteTime
*result64
,
342 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
343 AbsoluteTime_to_scalar(result64
) =
344 AbsoluteTime_to_scalar(&now64
) / div32
;
346 umul_64by32to64(*result64
, div32
, &t64
);
348 AbsoluteTime_to_scalar(&t64
) =
349 AbsoluteTime_to_scalar(&now64
) - AbsoluteTime_to_scalar(&t64
);
351 *result32
= (((unsigned long long)t64
.lo
<< 32) | now32
) / div32
;
354 AbsoluteTime_to_scalar(result64
) =
355 (((unsigned long long)now64
.lo
<< 32) | now32
) / div32
;
357 *result32
= result64
->lo
;
358 result64
->lo
= result64
->hi
;
364 * Perform an unsigned division of a 96 bit value
365 * by a 32 bit value, yielding a 64 bit quotient.
366 * Any higher order bits of the quotient are simply
375 AbsoluteTime
*result64
)
379 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
380 AbsoluteTime_to_scalar(result64
) =
381 AbsoluteTime_to_scalar(&now64
) / div32
;
383 umul_64by32to64(*result64
, div32
, &t64
);
385 AbsoluteTime_to_scalar(&t64
) =
386 AbsoluteTime_to_scalar(&now64
) - AbsoluteTime_to_scalar(&t64
);
388 result64
->hi
= result64
->lo
;
389 result64
->lo
= (((unsigned long long)t64
.lo
<< 32) | now32
) / div32
;
392 AbsoluteTime_to_scalar(result64
) =
393 (((unsigned long long)now64
.lo
<< 32) | now32
) / div32
;
398 * Perform an unsigned division of a 96 bit value
399 * by a 32 bit value, yielding a 32 bit quotient,
400 * and a 32 bit remainder. Any higher order bits
401 * of the quotient are simply discarded.
405 udiv_96by32to32and32(
412 AbsoluteTime t64
, u64
;
414 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
415 AbsoluteTime_to_scalar(&t64
) =
416 AbsoluteTime_to_scalar(&now64
) / div32
;
418 umul_64by32to64(t64
, div32
, &t64
);
420 AbsoluteTime_to_scalar(&t64
) =
421 AbsoluteTime_to_scalar(&now64
) - AbsoluteTime_to_scalar(&t64
);
423 AbsoluteTime_to_scalar(&t64
) =
424 ((unsigned long long)t64
.lo
<< 32) | now32
;
426 AbsoluteTime_to_scalar(&u64
) =
427 AbsoluteTime_to_scalar(&t64
) / div32
;
431 umul_64by32to64(u64
, div32
, &u64
);
433 *remain32
= AbsoluteTime_to_scalar(&t64
) -
434 AbsoluteTime_to_scalar(&u64
);
437 AbsoluteTime_to_scalar(&t64
) =
438 ((unsigned long long)now64
.lo
<< 32) | now32
;
440 AbsoluteTime_to_scalar(&u64
) =
441 AbsoluteTime_to_scalar(&t64
) / div32
;
445 umul_64by32to64(u64
, div32
, &u64
);
447 *remain32
= AbsoluteTime_to_scalar(&t64
) -
448 AbsoluteTime_to_scalar(&u64
);
453 * Get the clock device time. This routine is responsible
454 * for converting the device's machine dependent time value
455 * into a canonical mach_timespec_t value.
457 * SMP configurations - *this currently assumes that the processor
458 * clocks will be synchronised*
461 sysclk_gettime_internal(
462 mach_timespec_t
*time
) /* OUT */
467 natural_t numer
, denom
;
469 numer
= rtclock
.timebase_const
.numer
;
470 denom
= rtclock
.timebase_const
.denom
;
472 clock_get_uptime(&now
);
474 umul_64by32(now
, numer
, &t64
, &t32
);
476 udiv_96by32(t64
, t32
, denom
, &t64
, &t32
);
478 udiv_96by32to32and32(t64
, t32
, NSEC_PER_SEC
,
479 &time
->tv_sec
, &time
->tv_nsec
);
481 return (KERN_SUCCESS
);
486 mach_timespec_t
*time
) /* OUT */
491 natural_t numer
, denom
;
495 numer
= rtclock
.timebase_const
.numer
;
496 denom
= rtclock
.timebase_const
.denom
;
499 clock_get_uptime(&now
);
501 umul_64by32(now
, numer
, &t64
, &t32
);
503 udiv_96by32(t64
, t32
, denom
, &t64
, &t32
);
505 udiv_96by32to32and32(t64
, t32
, NSEC_PER_SEC
,
506 &time
->tv_sec
, &time
->tv_nsec
);
508 return (KERN_SUCCESS
);
512 * Get clock device attributes.
516 clock_flavor_t flavor
,
517 clock_attr_t attr
, /* OUT */
518 mach_msg_type_number_t
*count
) /* IN/OUT */
523 return (KERN_FAILURE
);
526 case CLOCK_GET_TIME_RES
: /* >0 res */
527 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
528 case CLOCK_ALARM_MINRES
:
529 case CLOCK_ALARM_MAXRES
:
531 *(clock_res_t
*) attr
= RTC_TICKPERIOD
;
536 return (KERN_INVALID_VALUE
);
538 return (KERN_SUCCESS
);
542 * Set deadline for the next alarm on the clock device. This call
543 * always resets the time to deliver an alarm for the clock.
547 mach_timespec_t
*deadline
)
549 AbsoluteTime abstime
;
551 timespec_to_absolutetime(*deadline
, &abstime
);
552 timer_call_enter(&rtclock
.alarm
[cpu_number()], abstime
);
556 * Configure the calendar clock.
565 * Initialize the calendar clock.
570 if (cpu_number() != master_cpu
)
577 * Get the current clock time.
581 mach_timespec_t
*curr_time
) /* OUT */
586 if (!rtclock
.calend_is_set
) {
588 return (KERN_FAILURE
);
591 (void) sysclk_gettime_internal(curr_time
);
592 ADD_MACH_TIMESPEC(curr_time
, &rtclock
.calend_offset
);
595 return (KERN_SUCCESS
);
599 * Set the current clock time.
603 mach_timespec_t
*new_time
)
605 mach_timespec_t curr_time
;
609 (void) sysclk_gettime_internal(&curr_time
);
610 rtclock
.calend_offset
= *new_time
;
611 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
612 rtclock
.calend_is_set
= TRUE
;
615 PESetGMTTimeOfDay(new_time
->tv_sec
);
617 return (KERN_SUCCESS
);
621 * Get clock device attributes.
625 clock_flavor_t flavor
,
626 clock_attr_t attr
, /* OUT */
627 mach_msg_type_number_t
*count
) /* IN/OUT */
632 return (KERN_FAILURE
);
635 case CLOCK_GET_TIME_RES
: /* >0 res */
637 *(clock_res_t
*) attr
= RTC_TICKPERIOD
;
641 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
642 case CLOCK_ALARM_MINRES
:
643 case CLOCK_ALARM_MAXRES
:
644 *(clock_res_t
*) attr
= 0;
648 return (KERN_INVALID_VALUE
);
650 return (KERN_SUCCESS
);
654 clock_adjust_calendar(
660 if (rtclock
.calend_is_set
)
661 ADD_MACH_TIMESPEC_NSEC(&rtclock
.calend_offset
, nsec
);
666 calend_setup_internal(
669 mach_timespec_t curr_time
;
671 (void) sysclk_gettime_internal(&curr_time
);
672 if (curr_time
.tv_nsec
< 500*USEC_PER_SEC
)
673 rtclock
.calend_offset
.tv_sec
= seconds
;
675 rtclock
.calend_offset
.tv_sec
= seconds
+ 1;
676 rtclock
.calend_offset
.tv_nsec
= 0;
677 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
678 rtclock
.calend_is_set
= TRUE
;
681 static thread_call_t calend_wakeup_call
;
682 static thread_call_data_t calend_wakeup_call_data
;
685 calend_wakeup_resynch(
686 thread_call_param_t p0
,
687 thread_call_param_t p1
)
689 long seconds
= PEGetGMTTimeOfDay();
693 calend_setup_internal(seconds
);
698 calend_sleep_wake_notif(
706 if (messageType
!= kIOMessageSystemHasPoweredOn
)
707 return (kIOReturnUnsupported
);
709 if (calend_wakeup_call
!= NULL
)
710 thread_call_enter(calend_wakeup_call
);
712 return (kIOReturnSuccess
);
716 clock_initialize_calendar(void)
721 thread_call_setup(&calend_wakeup_call_data
, calend_wakeup_resynch
, NULL
);
722 calend_wakeup_call
= &calend_wakeup_call_data
;
724 registerSleepWakeInterest(calend_sleep_wake_notif
, NULL
, NULL
);
726 seconds
= PEGetGMTTimeOfDay();
729 if (!rtclock
.calend_is_set
)
730 calend_setup_internal(seconds
);
735 clock_get_calendar_offset(void)
737 mach_timespec_t result
= MACH_TIMESPEC_ZERO
;
741 if (rtclock
.calend_is_set
)
742 result
= rtclock
.calend_offset
;
750 mach_timebase_info_t info
)
755 *info
= rtclock
.timebase_const
;
760 clock_set_timer_deadline(
761 AbsoluteTime deadline
)
763 AbsoluteTime abstime
;
765 struct rtclock_timer
*mytimer
;
769 mycpu
= cpu_number();
770 mytimer
= &rtclock
.timer
[mycpu
];
771 clock_get_uptime(&abstime
);
772 rtclock
.last_abstime
[mycpu
] = abstime
;
773 mytimer
->deadline
= deadline
;
774 mytimer
->is_set
= TRUE
;
775 if ( CMP_ABSOLUTETIME(&mytimer
->deadline
,
776 &rtclock_tick_deadline
[mycpu
]) < 0) {
777 decr
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
778 if ( rtclock_decrementer_min
!= 0 &&
779 rtclock_decrementer_min
< (natural_t
)decr
)
780 decr
= rtclock_decrementer_min
;
782 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
783 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
786 rtclock
.last_decr
[mycpu
] = decr
;
792 clock_set_timer_func(
793 clock_timer_func_t func
)
798 if (rtclock
.timer_expire
== NULL
)
799 rtclock
.timer_expire
= func
;
804 * Reset the clock device. This causes the realtime clock
805 * device to reload its mode and count value (frequency).
814 * Real-time clock device interrupt.
819 struct ppc_saved_state
*ssp
,
822 AbsoluteTime abstime
;
823 int decr
[3], mycpu
= cpu_number();
824 struct rtclock_timer
*mytimer
= &rtclock
.timer
[mycpu
];
827 * We may receive interrupts too early, we must reject them.
829 if (rtclock_initialized
== FALSE
) {
830 mtdec(DECREMENTER_MAX
); /* Max the decrementer if not init */
834 decr
[1] = decr
[2] = DECREMENTER_MAX
;
836 clock_get_uptime(&abstime
);
837 rtclock
.last_abstime
[mycpu
] = abstime
;
838 if (CMP_ABSOLUTETIME(&rtclock_tick_deadline
[mycpu
], &abstime
) <= 0) {
839 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
840 &rtclock_tick_deadline
[mycpu
]);
841 hertz_tick(USER_MODE(ssp
->srr1
), ssp
->srr0
);
844 clock_get_uptime(&abstime
);
845 rtclock
.last_abstime
[mycpu
] = abstime
;
846 if (mytimer
->is_set
&&
847 CMP_ABSOLUTETIME(&mytimer
->deadline
, &abstime
) <= 0) {
848 mytimer
->is_set
= FALSE
;
849 (*rtclock
.timer_expire
)(abstime
);
852 clock_get_uptime(&abstime
);
853 rtclock
.last_abstime
[mycpu
] = abstime
;
854 decr
[1] = deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
857 decr
[2] = deadline_to_decrementer(mytimer
->deadline
, abstime
);
859 if (decr
[1] > decr
[2])
862 if ( rtclock_decrementer_min
!= 0 &&
863 rtclock_decrementer_min
< (natural_t
)decr
[1] )
864 decr
[1] = rtclock_decrementer_min
;
866 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
867 | DBG_FUNC_NONE
, decr
[1], 3, 0, 0, 0);
870 rtclock
.last_decr
[mycpu
] = decr
[1];
875 timer_call_param_t p0
,
876 timer_call_param_t p1
)
878 mach_timespec_t timestamp
;
880 (void) sysclk_gettime(×tamp
);
882 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
887 AbsoluteTime
*result
)
889 natural_t hi
, lo
, hic
;
892 asm volatile(" mftbu %0" : "=r" (hi
));
893 asm volatile(" mftb %0" : "=r" (lo
));
894 asm volatile(" mftbu %0" : "=r" (hic
));
902 deadline_to_decrementer(
903 AbsoluteTime deadline
,
908 if (CMP_ABSOLUTETIME(&deadline
, &now
) <= 0)
909 return DECREMENTER_MIN
;
911 delt
= AbsoluteTime_to_scalar(&deadline
) -
912 AbsoluteTime_to_scalar(&now
);
913 return (delt
>= (DECREMENTER_MAX
+ 1))? DECREMENTER_MAX
:
914 ((delt
>= (DECREMENTER_MIN
+ 1))? (delt
- 1): DECREMENTER_MIN
);
919 timespec_to_absolutetime(
920 mach_timespec_t timespec
,
921 AbsoluteTime
*result
)
925 natural_t numer
, denom
;
929 numer
= rtclock
.timebase_const
.numer
;
930 denom
= rtclock
.timebase_const
.denom
;
933 asm volatile(" mullw %0,%1,%2" :
935 "r" (timespec
.tv_sec
), "r" (NSEC_PER_SEC
));
937 asm volatile(" mulhwu %0,%1,%2" :
939 "r" (timespec
.tv_sec
), "r" (NSEC_PER_SEC
));
941 AbsoluteTime_to_scalar(&t64
) += timespec
.tv_nsec
;
943 umul_64by32(t64
, denom
, &t64
, &t32
);
945 udiv_96by32(t64
, t32
, numer
, &t64
, &t32
);
952 clock_interval_to_deadline(
954 natural_t scale_factor
,
955 AbsoluteTime
*result
)
957 AbsoluteTime abstime
;
959 clock_get_uptime(result
);
961 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
963 ADD_ABSOLUTETIME(result
, &abstime
);
967 clock_interval_to_absolutetime_interval(
969 natural_t scale_factor
,
970 AbsoluteTime
*result
)
974 natural_t numer
, denom
;
978 numer
= rtclock
.timebase_const
.numer
;
979 denom
= rtclock
.timebase_const
.denom
;
982 asm volatile(" mullw %0,%1,%2" :
984 "r" (interval
), "r" (scale_factor
));
985 asm volatile(" mulhwu %0,%1,%2" :
987 "r" (interval
), "r" (scale_factor
));
989 umul_64by32(t64
, denom
, &t64
, &t32
);
991 udiv_96by32(t64
, t32
, numer
, &t64
, &t32
);
998 clock_absolutetime_interval_to_deadline(
999 AbsoluteTime abstime
,
1000 AbsoluteTime
*result
)
1002 clock_get_uptime(result
);
1004 ADD_ABSOLUTETIME(result
, &abstime
);
1008 absolutetime_to_nanoseconds(
1009 AbsoluteTime abstime
,
1014 natural_t numer
, denom
;
1018 numer
= rtclock
.timebase_const
.numer
;
1019 denom
= rtclock
.timebase_const
.denom
;
1022 umul_64by32(abstime
, numer
, &t64
, &t32
);
1024 udiv_96by32to64(t64
, t32
, denom
, (void *)result
);
1028 nanoseconds_to_absolutetime(
1030 AbsoluteTime
*result
)
1034 natural_t numer
, denom
;
1038 numer
= rtclock
.timebase_const
.numer
;
1039 denom
= rtclock
.timebase_const
.denom
;
1042 AbsoluteTime_to_scalar(&t64
) = nanoseconds
;
1044 umul_64by32(t64
, denom
, &t64
, &t32
);
1046 udiv_96by32to64(t64
, t32
, numer
, result
);
1050 * Spin-loop delay primitives.
1055 natural_t scale_factor
)
1057 AbsoluteTime now
, end
;
1059 clock_interval_to_deadline(interval
, scale_factor
, &end
);
1062 clock_get_uptime(&now
);
1063 } while (CMP_ABSOLUTETIME(&now
, &end
) < 0);
1068 AbsoluteTime deadline
)
1073 clock_get_uptime(&now
);
1074 } while (CMP_ABSOLUTETIME(&now
, &deadline
) < 0);
1081 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);