2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * @APPLE_FREE_COPYRIGHT@
33 * Purpose: Routines for handling the machine dependent
37 #include <libkern/OSTypes.h>
39 #include <mach/mach_types.h>
41 #include <kern/clock.h>
42 #include <kern/thread.h>
43 #include <kern/macro_help.h>
46 #include <machine/mach_param.h> /* HZ */
47 #include <machine/commpage.h>
48 #include <ppc/proc_reg.h>
50 #include <pexpert/pexpert.h>
52 #include <sys/kdebug.h>
54 int sysclk_config(void);
56 int sysclk_init(void);
58 kern_return_t
sysclk_gettime(
59 mach_timespec_t
*cur_time
);
61 kern_return_t
sysclk_getattr(
62 clock_flavor_t flavor
,
64 mach_msg_type_number_t
*count
);
67 mach_timespec_t
*deadline
);
69 struct clock_ops sysclk_ops
= {
70 sysclk_config
, sysclk_init
,
76 int calend_config(void);
78 int calend_init(void);
80 kern_return_t
calend_gettime(
81 mach_timespec_t
*cur_time
);
83 kern_return_t
calend_settime(
84 mach_timespec_t
*cur_time
);
86 kern_return_t
calend_getattr(
87 clock_flavor_t flavor
,
89 mach_msg_type_number_t
*count
);
91 struct clock_ops calend_ops
= {
92 calend_config
, calend_init
,
93 calend_gettime
, calend_settime
,
98 /* local data declarations */
100 static struct rtclock
{
101 mach_timespec_t calend_offset
;
102 boolean_t calend_is_set
;
104 mach_timebase_info_data_t timebase_const
;
106 struct rtclock_timer
{
111 clock_timer_func_t timer_expire
;
113 timer_call_data_t alarm_timer
;
116 uint64_t last_abstime
[NCPUS
];
117 int last_decr
[NCPUS
];
119 decl_simple_lock_data(,lock
) /* real-time clock device lock */
122 static boolean_t rtclock_initialized
;
124 static uint64_t rtclock_tick_deadline
[NCPUS
];
125 static uint64_t rtclock_tick_interval
;
127 static uint32_t rtclock_sec_divisor
;
128 static uint32_t rtclock_ns_per_tick
;
130 static void timespec_to_absolutetime(
131 mach_timespec_t timespec
,
134 static int deadline_to_decrementer(
138 static void rtclock_alarm_timer(
139 timer_call_param_t p0
,
140 timer_call_param_t p1
);
142 /* global data declarations */
144 #define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
146 #define DECREMENTER_MAX 0x7FFFFFFFUL
147 #define DECREMENTER_MIN 0xAUL
149 natural_t rtclock_decrementer_min
;
152 * Macros to lock/unlock real-time clock device.
154 #define LOCK_RTC(s) \
157 simple_lock(&rtclock.lock); \
160 #define UNLOCK_RTC(s) \
162 simple_unlock(&rtclock.lock); \
168 struct timebase_freq_t
*freq
)
170 natural_t numer
, denom
;
174 denom
= freq
->timebase_num
;
176 while (!(denom
% 10)) {
183 numer
= freq
->timebase_den
;
189 rtclock
.timebase_const
.numer
= numer
;
190 rtclock
.timebase_const
.denom
= denom
;
191 rtclock_sec_divisor
= freq
->timebase_num
/ freq
->timebase_den
;
192 rtclock_ns_per_tick
= NSEC_PER_SEC
/ rtclock_sec_divisor
;
193 commpage_set_timestamp(0,0,0,0);
198 * Configure the real-time clock device.
203 if (cpu_number() != master_cpu
)
206 timer_call_setup(&rtclock
.alarm_timer
, rtclock_alarm_timer
, NULL
);
208 simple_lock_init(&rtclock
.lock
, ETAP_MISC_RT_CLOCK
);
210 PE_register_timebase_callback(timebase_callback
);
216 * Initialize the system clock device.
222 int decr
, mycpu
= cpu_number();
224 if (mycpu
!= master_cpu
) {
225 if (rtclock_initialized
== FALSE
) {
226 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu
);
228 /* Set decrementer and hence our next tick due */
229 clock_get_uptime(&abstime
);
230 rtclock_tick_deadline
[mycpu
] = abstime
;
231 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
232 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
234 rtclock
.last_decr
[mycpu
] = decr
;
240 * Initialize non-zero clock structure values.
242 clock_interval_to_absolutetime_interval(RTC_TICKPERIOD
, 1,
243 &rtclock_tick_interval
);
244 /* Set decrementer and our next tick due */
245 clock_get_uptime(&abstime
);
246 rtclock_tick_deadline
[mycpu
] = abstime
;
247 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
248 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
250 rtclock
.last_decr
[mycpu
] = decr
;
252 rtclock_initialized
= TRUE
;
257 #define UnsignedWide_to_scalar(x) (*(uint64_t *)(x))
258 #define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x))
261 * Perform a full 64 bit by 32 bit unsigned multiply,
262 * yielding a 96 bit product. The most significant
263 * portion of the product is returned as a 64 bit
264 * quantity, with the lower portion as a 32 bit word.
270 UnsignedWide
*result64
,
275 asm volatile(" mullw %0,%1,%2" :
277 "r" (now64
.lo
), "r" (mult32
));
279 asm volatile(" mullw %0,%1,%2" :
281 "r" (now64
.hi
), "r" (mult32
));
282 asm volatile(" mulhwu %0,%1,%2" :
284 "r" (now64
.lo
), "r" (mult32
));
286 asm volatile(" mulhwu %0,%1,%2" :
287 "=r" (result64
->hi
) :
288 "r" (now64
.hi
), "r" (mult32
));
290 asm volatile(" addc %0,%2,%3;
292 "=r" (result64
->lo
), "=r" (result64
->hi
) :
293 "r" (mid
), "r" (mid2
), "1" (result64
->hi
));
297 * Perform a partial 64 bit by 32 bit unsigned multiply,
298 * yielding a 64 bit product. Only the least significant
299 * 64 bits of the product are calculated and returned.
305 UnsignedWide
*result64
)
309 asm volatile(" mullw %0,%1,%2" :
310 "=r" (result64
->lo
) :
311 "r" (now64
.lo
), "r" (mult32
));
313 asm volatile(" mullw %0,%1,%2" :
315 "r" (now64
.hi
), "r" (mult32
));
316 asm volatile(" mulhwu %0,%1,%2" :
318 "r" (now64
.lo
), "r" (mult32
));
320 asm volatile(" add %0,%1,%2" :
321 "=r" (result64
->hi
) :
322 "r" (mid
), "r" (mid2
));
326 * Perform an unsigned division of a 96 bit value
327 * by a 32 bit value, yielding a 96 bit quotient.
328 * The most significant portion of the product is
329 * returned as a 64 bit quantity, with the lower
330 * portion as a 32 bit word.
337 UnsignedWide
*result64
,
342 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
343 UnsignedWide_to_scalar(result64
) =
344 UnsignedWide_to_scalar(&now64
) / div32
;
346 umul_64by32to64(*result64
, div32
, &t64
);
348 UnsignedWide_to_scalar(&t64
) =
349 UnsignedWide_to_scalar(&now64
) - UnsignedWide_to_scalar(&t64
);
351 *result32
= (((uint64_t)t64
.lo
<< 32) | now32
) / div32
;
354 UnsignedWide_to_scalar(result64
) =
355 (((uint64_t)now64
.lo
<< 32) | now32
) / div32
;
357 *result32
= result64
->lo
;
358 result64
->lo
= result64
->hi
;
364 * Perform an unsigned division of a 96 bit value
365 * by a 32 bit value, yielding a 64 bit quotient.
366 * Any higher order bits of the quotient are simply
374 UnsignedWide
*result64
)
378 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
379 UnsignedWide_to_scalar(result64
) =
380 UnsignedWide_to_scalar(&now64
) / div32
;
382 umul_64by32to64(*result64
, div32
, &t64
);
384 UnsignedWide_to_scalar(&t64
) =
385 UnsignedWide_to_scalar(&now64
) - UnsignedWide_to_scalar(&t64
);
387 result64
->hi
= result64
->lo
;
388 result64
->lo
= (((uint64_t)t64
.lo
<< 32) | now32
) / div32
;
391 UnsignedWide_to_scalar(result64
) =
392 (((uint64_t)now64
.lo
<< 32) | now32
) / div32
;
397 * Perform an unsigned division of a 96 bit value
398 * by a 32 bit value, yielding a 32 bit quotient,
399 * and a 32 bit remainder. Any higher order bits
400 * of the quotient are simply discarded.
403 udiv_96by32to32and32(
410 UnsignedWide t64
, u64
;
412 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
413 UnsignedWide_to_scalar(&t64
) =
414 UnsignedWide_to_scalar(&now64
) / div32
;
416 umul_64by32to64(t64
, div32
, &t64
);
418 UnsignedWide_to_scalar(&t64
) =
419 UnsignedWide_to_scalar(&now64
) - UnsignedWide_to_scalar(&t64
);
421 UnsignedWide_to_scalar(&t64
) = ((uint64_t)t64
.lo
<< 32) | now32
;
423 UnsignedWide_to_scalar(&u64
) =
424 UnsignedWide_to_scalar(&t64
) / div32
;
428 umul_64by32to64(u64
, div32
, &u64
);
430 *remain32
= UnsignedWide_to_scalar(&t64
) -
431 UnsignedWide_to_scalar(&u64
);
434 UnsignedWide_to_scalar(&t64
) = ((uint64_t)now64
.lo
<< 32) | now32
;
436 UnsignedWide_to_scalar(&u64
) =
437 UnsignedWide_to_scalar(&t64
) / div32
;
441 umul_64by32to64(u64
, div32
, &u64
);
443 *remain32
= UnsignedWide_to_scalar(&t64
) -
444 UnsignedWide_to_scalar(&u64
);
449 * Get the clock device time. This routine is responsible
450 * for converting the device's machine dependent time value
451 * into a canonical mach_timespec_t value.
453 * SMP configurations - *the processor clocks are synchronised*
456 sysclk_gettime_internal(
457 mach_timespec_t
*time
) /* OUT */
462 uint32_t numer
, denom
;
464 numer
= rtclock
.timebase_const
.numer
;
465 denom
= rtclock
.timebase_const
.denom
;
467 clock_get_uptime((uint64_t *)&now
);
469 umul_64by32(now
, numer
, &t64
, &t32
);
471 udiv_96by32(t64
, t32
, denom
, &t64
, &t32
);
473 udiv_96by32to32and32(t64
, t32
, NSEC_PER_SEC
,
474 &time
->tv_sec
, &time
->tv_nsec
);
476 return (KERN_SUCCESS
);
481 mach_timespec_t
*time
) /* OUT */
486 uint32_t numer
, denom
;
490 numer
= rtclock
.timebase_const
.numer
;
491 denom
= rtclock
.timebase_const
.denom
;
494 clock_get_uptime((uint64_t *)&now
);
496 umul_64by32(now
, numer
, &t64
, &t32
);
498 udiv_96by32(t64
, t32
, denom
, &t64
, &t32
);
500 udiv_96by32to32and32(t64
, t32
, NSEC_PER_SEC
,
501 &time
->tv_sec
, &time
->tv_nsec
);
503 return (KERN_SUCCESS
);
507 * Get clock device attributes.
511 clock_flavor_t flavor
,
512 clock_attr_t attr
, /* OUT */
513 mach_msg_type_number_t
*count
) /* IN/OUT */
518 return (KERN_FAILURE
);
521 case CLOCK_GET_TIME_RES
: /* >0 res */
522 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
523 case CLOCK_ALARM_MINRES
:
524 case CLOCK_ALARM_MAXRES
:
526 *(clock_res_t
*) attr
= RTC_TICKPERIOD
;
531 return (KERN_INVALID_VALUE
);
533 return (KERN_SUCCESS
);
537 * Set deadline for the next alarm on the clock device. This call
538 * always resets the time to deliver an alarm for the clock.
542 mach_timespec_t
*deadline
)
546 timespec_to_absolutetime(*deadline
, &abstime
);
547 timer_call_enter(&rtclock
.alarm_timer
, abstime
);
551 * Configure the calendar clock.
560 * Initialize the calendar clock.
565 if (cpu_number() != master_cpu
)
572 * Get the current clock microtime and sync the timestamp
573 * on the commpage. Only called from ppc_gettimeofday(),
574 * ie in response to a system call from user mode.
582 UnsignedWide wide_now
;
585 uint32_t numer
, denom
;
587 mach_timespec_t curr_time
;
591 if (!rtclock
.calend_is_set
) {
596 numer
= rtclock
.timebase_const
.numer
;
597 denom
= rtclock
.timebase_const
.denom
;
599 clock_get_uptime(&now
);
600 wide_now
= *((UnsignedWide
*) &now
);
602 umul_64by32(wide_now
, numer
, &t64
, &t32
);
604 udiv_96by32(t64
, t32
, denom
, &t64
, &t32
);
606 udiv_96by32to32and32(t64
, t32
, NSEC_PER_SEC
,
607 &curr_time
.tv_sec
, &curr_time
.tv_nsec
);
609 ADD_MACH_TIMESPEC(&curr_time
, &rtclock
.calend_offset
);
611 secs
= curr_time
.tv_sec
;
612 usecs
= curr_time
.tv_nsec
/ NSEC_PER_USEC
;
616 t32
= curr_time
.tv_nsec
- (usecs
* NSEC_PER_USEC
);
617 t32
= t32
/ rtclock_ns_per_tick
;
620 commpage_set_timestamp(now
,secs
,usecs
,rtclock_sec_divisor
);
628 * Get the current clock time.
632 mach_timespec_t
*curr_time
) /* OUT */
637 if (!rtclock
.calend_is_set
) {
639 return (KERN_FAILURE
);
642 (void) sysclk_gettime_internal(curr_time
);
643 ADD_MACH_TIMESPEC(curr_time
, &rtclock
.calend_offset
);
646 return (KERN_SUCCESS
);
650 * Set the current clock time.
654 mach_timespec_t
*new_time
)
656 mach_timespec_t curr_time
;
660 (void) sysclk_gettime_internal(&curr_time
);
661 rtclock
.calend_offset
= *new_time
;
662 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
663 rtclock
.calend_is_set
= TRUE
;
664 commpage_set_timestamp(0,0,0,0); /* disable timestamp */
667 PESetGMTTimeOfDay(new_time
->tv_sec
);
669 return (KERN_SUCCESS
);
673 * Get clock device attributes.
677 clock_flavor_t flavor
,
678 clock_attr_t attr
, /* OUT */
679 mach_msg_type_number_t
*count
) /* IN/OUT */
684 return (KERN_FAILURE
);
687 case CLOCK_GET_TIME_RES
: /* >0 res */
689 *(clock_res_t
*) attr
= RTC_TICKPERIOD
;
693 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
694 case CLOCK_ALARM_MINRES
:
695 case CLOCK_ALARM_MAXRES
:
696 *(clock_res_t
*) attr
= 0;
700 return (KERN_INVALID_VALUE
);
702 return (KERN_SUCCESS
);
706 clock_adjust_calendar(
712 if (rtclock
.calend_is_set
) {
713 ADD_MACH_TIMESPEC_NSEC(&rtclock
.calend_offset
, nsec
);
714 commpage_set_timestamp(0,0,0,0); /* disable timestamp */
720 clock_initialize_calendar(void)
722 mach_timespec_t curr_time
;
723 long seconds
= PEGetGMTTimeOfDay();
727 (void) sysclk_gettime_internal(&curr_time
);
728 if (curr_time
.tv_nsec
< 500*USEC_PER_SEC
)
729 rtclock
.calend_offset
.tv_sec
= seconds
;
731 rtclock
.calend_offset
.tv_sec
= seconds
+ 1;
732 rtclock
.calend_offset
.tv_nsec
= 0;
733 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
734 rtclock
.calend_is_set
= TRUE
;
735 commpage_set_timestamp(0,0,0,0); /* disable timestamp */
740 clock_get_calendar_offset(void)
742 mach_timespec_t result
= MACH_TIMESPEC_ZERO
;
746 if (rtclock
.calend_is_set
)
747 result
= rtclock
.calend_offset
;
755 mach_timebase_info_t info
)
760 *info
= rtclock
.timebase_const
;
765 clock_set_timer_deadline(
770 struct rtclock_timer
*mytimer
;
774 mycpu
= cpu_number();
775 mytimer
= &rtclock
.timer
[mycpu
];
776 clock_get_uptime(&abstime
);
777 rtclock
.last_abstime
[mycpu
] = abstime
;
778 mytimer
->deadline
= deadline
;
779 mytimer
->is_set
= TRUE
;
780 if ( mytimer
->deadline
< rtclock_tick_deadline
[mycpu
] ) {
781 decr
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
782 if ( rtclock_decrementer_min
!= 0 &&
783 rtclock_decrementer_min
< (natural_t
)decr
)
784 decr
= rtclock_decrementer_min
;
787 rtclock
.last_decr
[mycpu
] = decr
;
789 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
790 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
796 clock_set_timer_func(
797 clock_timer_func_t func
)
802 if (rtclock
.timer_expire
== NULL
)
803 rtclock
.timer_expire
= func
;
808 * Reset the clock device. This causes the realtime clock
809 * device to reload its mode and count value (frequency).
818 * Real-time clock device interrupt.
823 struct savearea
*ssp
,
827 int decr
[3], mycpu
= cpu_number();
828 struct rtclock_timer
*mytimer
= &rtclock
.timer
[mycpu
];
831 * We may receive interrupts too early, we must reject them.
833 if (rtclock_initialized
== FALSE
) {
834 mtdec(DECREMENTER_MAX
); /* Max the decrementer if not init */
838 decr
[1] = decr
[2] = DECREMENTER_MAX
;
840 clock_get_uptime(&abstime
);
841 rtclock
.last_abstime
[mycpu
] = abstime
;
842 if ( rtclock_tick_deadline
[mycpu
] <= abstime
) {
843 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
844 &rtclock_tick_deadline
[mycpu
]);
845 hertz_tick(USER_MODE(ssp
->save_srr1
), ssp
->save_srr0
);
848 clock_get_uptime(&abstime
);
849 rtclock
.last_abstime
[mycpu
] = abstime
;
850 if ( mytimer
->is_set
&&
851 mytimer
->deadline
<= abstime
) {
852 mytimer
->is_set
= FALSE
;
853 (*rtclock
.timer_expire
)(abstime
);
856 clock_get_uptime(&abstime
);
857 rtclock
.last_abstime
[mycpu
] = abstime
;
858 decr
[1] = deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
861 decr
[2] = deadline_to_decrementer(mytimer
->deadline
, abstime
);
863 if (decr
[1] > decr
[2])
866 if ( rtclock_decrementer_min
!= 0 &&
867 rtclock_decrementer_min
< (natural_t
)decr
[1] )
868 decr
[1] = rtclock_decrementer_min
;
871 rtclock
.last_decr
[mycpu
] = decr
[1];
873 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
874 | DBG_FUNC_NONE
, decr
[1], 3, 0, 0, 0);
879 timer_call_param_t p0
,
880 timer_call_param_t p1
)
882 mach_timespec_t timestamp
;
884 (void) sysclk_gettime(×tamp
);
886 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
893 UnsignedWide
*result
= (UnsignedWide
*)result0
;
894 uint32_t hi
, lo
, hic
;
897 asm volatile(" mftbu %0" : "=r" (hi
));
898 asm volatile(" mftb %0" : "=r" (lo
));
899 asm volatile(" mftbu %0" : "=r" (hic
));
907 deadline_to_decrementer(
914 return DECREMENTER_MIN
;
916 delt
= deadline
- now
;
917 return (delt
>= (DECREMENTER_MAX
+ 1))? DECREMENTER_MAX
:
918 ((delt
>= (DECREMENTER_MIN
+ 1))? (delt
- 1): DECREMENTER_MIN
);
923 timespec_to_absolutetime(
924 mach_timespec_t timespec
,
927 UnsignedWide
*result
= (UnsignedWide
*)result0
;
930 uint32_t numer
, denom
;
934 numer
= rtclock
.timebase_const
.numer
;
935 denom
= rtclock
.timebase_const
.denom
;
938 asm volatile(" mullw %0,%1,%2" :
940 "r" (timespec
.tv_sec
), "r" (NSEC_PER_SEC
));
942 asm volatile(" mulhwu %0,%1,%2" :
944 "r" (timespec
.tv_sec
), "r" (NSEC_PER_SEC
));
946 UnsignedWide_to_scalar(&t64
) += timespec
.tv_nsec
;
948 umul_64by32(t64
, denom
, &t64
, &t32
);
950 udiv_96by32(t64
, t32
, numer
, &t64
, &t32
);
957 clock_interval_to_deadline(
959 uint32_t scale_factor
,
964 clock_get_uptime(result
);
966 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
972 clock_interval_to_absolutetime_interval(
974 uint32_t scale_factor
,
977 UnsignedWide
*result
= (UnsignedWide
*)result0
;
980 uint32_t numer
, denom
;
984 numer
= rtclock
.timebase_const
.numer
;
985 denom
= rtclock
.timebase_const
.denom
;
988 asm volatile(" mullw %0,%1,%2" :
990 "r" (interval
), "r" (scale_factor
));
991 asm volatile(" mulhwu %0,%1,%2" :
993 "r" (interval
), "r" (scale_factor
));
995 umul_64by32(t64
, denom
, &t64
, &t32
);
997 udiv_96by32(t64
, t32
, numer
, &t64
, &t32
);
1004 clock_absolutetime_interval_to_deadline(
1008 clock_get_uptime(result
);
1014 absolutetime_to_nanoseconds(
1020 uint32_t numer
, denom
;
1024 numer
= rtclock
.timebase_const
.numer
;
1025 denom
= rtclock
.timebase_const
.denom
;
1028 UnsignedWide_to_scalar(&t64
) = abstime
;
1030 umul_64by32(t64
, numer
, &t64
, &t32
);
1032 udiv_96by32to64(t64
, t32
, denom
, (void *)result
);
1036 nanoseconds_to_absolutetime(
1037 uint64_t nanoseconds
,
1042 uint32_t numer
, denom
;
1046 numer
= rtclock
.timebase_const
.numer
;
1047 denom
= rtclock
.timebase_const
.denom
;
1050 UnsignedWide_to_scalar(&t64
) = nanoseconds
;
1052 umul_64by32(t64
, denom
, &t64
, &t32
);
1054 udiv_96by32to64(t64
, t32
, numer
, (void *)result
);
1058 * Spin-loop delay primitives.
1063 uint32_t scale_factor
)
1067 clock_interval_to_deadline(interval
, scale_factor
, &end
);
1070 clock_get_uptime(&now
);
1071 } while (now
< end
);
1081 clock_get_uptime(&now
);
1082 } while (now
< deadline
);
1089 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);