2 * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * @APPLE_FREE_COPYRIGHT@
38 * Purpose: Routines for handling the machine dependent
42 #include <mach/mach_types.h>
44 #include <kern/clock.h>
45 #include <kern/thread.h>
46 #include <kern/macro_help.h>
49 #include <kern/host_notify.h>
51 #include <machine/commpage.h>
52 #include <machine/machine_routines.h>
53 #include <ppc/exception.h>
54 #include <ppc/proc_reg.h>
56 #include <ppc/rtclock.h>
58 #include <IOKit/IOPlatformExpert.h>
60 #include <sys/kdebug.h>
62 int sysclk_config(void);
64 int sysclk_init(void);
66 kern_return_t
sysclk_gettime(
67 mach_timespec_t
*cur_time
);
69 kern_return_t
sysclk_getattr(
70 clock_flavor_t flavor
,
72 mach_msg_type_number_t
*count
);
75 mach_timespec_t
*deadline
);
77 struct clock_ops sysclk_ops
= {
78 sysclk_config
, sysclk_init
,
84 int calend_config(void);
86 kern_return_t
calend_gettime(
87 mach_timespec_t
*cur_time
);
89 kern_return_t
calend_getattr(
90 clock_flavor_t flavor
,
92 mach_msg_type_number_t
*count
);
94 struct clock_ops calend_ops
= {
101 /* local data declarations */
103 static struct rtclock_calend
{
113 static uint32_t rtclock_boottime
;
115 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
117 if (((rfrac) += (frac)) >= (unit)) { \
124 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
126 if ((int32_t)((rfrac) -= (frac)) < 0) { \
133 #define NSEC_PER_HZ (NSEC_PER_SEC / 100)
134 static uint32_t rtclock_tick_interval
;
136 static uint32_t rtclock_sec_divisor
;
138 static mach_timebase_info_data_t rtclock_timebase_const
;
140 static boolean_t rtclock_timebase_initialized
;
142 static clock_timer_func_t rtclock_timer_expire
;
144 static timer_call_data_t rtclock_alarm_timer
;
146 static void nanotime_to_absolutetime(
151 static void rtclock_alarm_expire(
152 timer_call_param_t p0
,
153 timer_call_param_t p1
);
155 /* global data declarations */
157 decl_simple_lock_data(static,rtclock_lock
)
160 * Macros to lock/unlock real-time clock device.
162 #define LOCK_RTC(s) \
165 simple_lock(&rtclock_lock); \
168 #define UNLOCK_RTC(s) \
170 simple_unlock(&rtclock_lock); \
176 struct timebase_freq_t
*freq
)
178 uint32_t numer
, denom
;
182 if ( freq
->timebase_den
< 1 || freq
->timebase_den
> 4 ||
183 freq
->timebase_num
< freq
->timebase_den
)
184 panic("rtclock timebase_callback: invalid constant %d / %d",
185 freq
->timebase_num
, freq
->timebase_den
);
187 denom
= freq
->timebase_num
;
188 numer
= freq
->timebase_den
* NSEC_PER_SEC
;
191 if (!rtclock_timebase_initialized
) {
192 commpage_set_timestamp(0,0,0,0);
194 rtclock_timebase_const
.numer
= numer
;
195 rtclock_timebase_const
.denom
= denom
;
196 rtclock_sec_divisor
= freq
->timebase_num
/ freq
->timebase_den
;
198 nanoseconds_to_absolutetime(NSEC_PER_HZ
, &abstime
);
199 rtclock_tick_interval
= abstime
;
201 ml_init_lock_timeout();
205 printf("rtclock timebase_callback: late old %d / %d new %d / %d\n",
206 rtclock_timebase_const
.numer
, rtclock_timebase_const
.denom
,
212 clock_timebase_init();
216 * Configure the real-time clock device.
221 timer_call_setup(&rtclock_alarm_timer
, rtclock_alarm_expire
, NULL
);
223 simple_lock_init(&rtclock_lock
, 0);
225 PE_register_timebase_callback(timebase_callback
);
231 * Initialize the system clock device.
237 struct per_proc_info
*pp
;
241 abstime
= mach_absolute_time();
242 pp
->rtclock_tick_deadline
= abstime
+ rtclock_tick_interval
; /* Get the time we need to pop */
243 pp
->rtcPop
= pp
->rtclock_tick_deadline
; /* Set the rtc pop time the same for now */
245 (void)setTimerReq(); /* Start the timers going */
252 mach_timespec_t
*time
) /* OUT */
257 now
= mach_absolute_time();
259 time
->tv_sec
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
260 now
-= (t64
* divisor
);
261 time
->tv_nsec
= (now
* NSEC_PER_SEC
) / divisor
;
263 return (KERN_SUCCESS
);
267 clock_get_system_microtime(
274 now
= mach_absolute_time();
276 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
277 now
-= (t64
* divisor
);
278 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
282 clock_get_system_nanotime(
289 now
= mach_absolute_time();
291 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
292 now
-= (t64
* divisor
);
293 *nanosecs
= (now
* NSEC_PER_SEC
) / divisor
;
297 * Get clock device attributes.
301 clock_flavor_t flavor
,
302 clock_attr_t attr
, /* OUT */
303 mach_msg_type_number_t
*count
) /* IN/OUT */
308 return (KERN_FAILURE
);
312 case CLOCK_GET_TIME_RES
: /* >0 res */
313 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
314 case CLOCK_ALARM_MINRES
:
315 case CLOCK_ALARM_MAXRES
:
317 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
322 return (KERN_INVALID_VALUE
);
325 return (KERN_SUCCESS
);
329 * Set deadline for the next alarm on the clock device. This call
330 * always resets the time to deliver an alarm for the clock.
334 mach_timespec_t
*deadline
)
338 nanotime_to_absolutetime(deadline
->tv_sec
, deadline
->tv_nsec
, &abstime
);
339 timer_call_enter(&rtclock_alarm_timer
, abstime
);
343 * Configure the calendar clock.
352 * Get the current clock time.
356 mach_timespec_t
*time
) /* OUT */
358 clock_get_calendar_nanotime(
359 &time
->tv_sec
, &time
->tv_nsec
);
361 return (KERN_SUCCESS
);
365 * Get clock device attributes.
369 clock_flavor_t flavor
,
370 clock_attr_t attr
, /* OUT */
371 mach_msg_type_number_t
*count
) /* IN/OUT */
376 return (KERN_FAILURE
);
380 case CLOCK_GET_TIME_RES
: /* >0 res */
382 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
386 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
387 case CLOCK_ALARM_MINRES
:
388 case CLOCK_ALARM_MAXRES
:
389 *(clock_res_t
*) attr
= 0;
393 return (KERN_INVALID_VALUE
);
396 return (KERN_SUCCESS
);
400 clock_get_calendar_microtime(
404 uint32_t epoch
, microepoch
;
406 spl_t s
= splclock();
408 simple_lock(&rtclock_lock
);
410 if (rtclock_calend
.adjdelta
>= 0) {
413 now
= mach_absolute_time();
415 epoch
= rtclock_calend
.epoch
;
416 microepoch
= rtclock_calend
.microepoch
;
418 simple_unlock(&rtclock_lock
);
420 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
421 now
-= (t64
* divisor
);
422 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
424 TIME_ADD(*secs
, epoch
, *microsecs
, microepoch
, USEC_PER_SEC
);
429 delta
= -rtclock_calend
.adjdelta
;
431 now
= mach_absolute_time();
433 *secs
= rtclock_calend
.epoch
;
434 *microsecs
= rtclock_calend
.microepoch
;
436 if (now
> rtclock_calend
.epoch1
) {
437 t64
= now
- rtclock_calend
.epoch1
;
439 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
442 TIME_ADD(*secs
, 0, *microsecs
, (t32
- delta
), USEC_PER_SEC
);
445 simple_unlock(&rtclock_lock
);
451 /* This is only called from the gettimeofday() syscall. As a side
452 * effect, it updates the commpage timestamp. Otherwise it is
453 * identical to clock_get_calendar_microtime(). Because most
454 * gettimeofday() calls are handled by the commpage in user mode,
455 * this routine should be infrequently used except when slowing down
461 uint32_t *microsecs_p
)
463 uint32_t epoch
, microepoch
;
464 uint32_t secs
, microsecs
;
465 uint64_t now
, t64
, secs_64
, usec_64
;
466 spl_t s
= splclock();
468 simple_lock(&rtclock_lock
);
470 if (rtclock_calend
.adjdelta
>= 0) {
471 now
= mach_absolute_time();
473 epoch
= rtclock_calend
.epoch
;
474 microepoch
= rtclock_calend
.microepoch
;
476 secs
= secs_64
= now
/ rtclock_sec_divisor
;
477 t64
= now
- (secs_64
* rtclock_sec_divisor
);
478 microsecs
= usec_64
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
480 TIME_ADD(secs
, epoch
, microsecs
, microepoch
, USEC_PER_SEC
);
482 /* adjust "now" to be absolute time at _start_ of usecond */
483 now
-= t64
- ((usec_64
* rtclock_sec_divisor
) / USEC_PER_SEC
);
485 commpage_set_timestamp(now
,secs
,microsecs
,rtclock_sec_divisor
);
490 delta
= -rtclock_calend
.adjdelta
;
492 now
= mach_absolute_time();
494 secs
= rtclock_calend
.epoch
;
495 microsecs
= rtclock_calend
.microepoch
;
497 if (now
> rtclock_calend
.epoch1
) {
498 t64
= now
- rtclock_calend
.epoch1
;
500 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
503 TIME_ADD(secs
, 0, microsecs
, (t32
- delta
), USEC_PER_SEC
);
506 /* no need to disable timestamp, it is already off */
509 simple_unlock(&rtclock_lock
);
513 *microsecs_p
= microsecs
;
517 clock_get_calendar_nanotime(
521 uint32_t epoch
, nanoepoch
;
523 spl_t s
= splclock();
525 simple_lock(&rtclock_lock
);
527 if (rtclock_calend
.adjdelta
>= 0) {
530 now
= mach_absolute_time();
532 epoch
= rtclock_calend
.epoch
;
533 nanoepoch
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
535 simple_unlock(&rtclock_lock
);
537 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
538 now
-= (t64
* divisor
);
539 *nanosecs
= ((now
* USEC_PER_SEC
) / divisor
) * NSEC_PER_USEC
;
541 TIME_ADD(*secs
, epoch
, *nanosecs
, nanoepoch
, NSEC_PER_SEC
);
546 delta
= -rtclock_calend
.adjdelta
;
548 now
= mach_absolute_time();
550 *secs
= rtclock_calend
.epoch
;
551 *nanosecs
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
553 if (now
> rtclock_calend
.epoch1
) {
554 t64
= now
- rtclock_calend
.epoch1
;
556 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
559 TIME_ADD(*secs
, 0, *nanosecs
, ((t32
- delta
) * NSEC_PER_USEC
), NSEC_PER_SEC
);
562 simple_unlock(&rtclock_lock
);
569 clock_set_calendar_microtime(
573 uint32_t sys
, microsys
;
577 newsecs
= (microsecs
< 500*USEC_PER_SEC
)?
581 simple_lock(&rtclock_lock
);
583 commpage_set_timestamp(0,0,0,0);
586 * Cancel any adjustment in progress.
588 if (rtclock_calend
.adjdelta
< 0) {
592 delta
= -rtclock_calend
.adjdelta
;
594 sys
= rtclock_calend
.epoch
;
595 microsys
= rtclock_calend
.microepoch
;
597 now
= mach_absolute_time();
599 if (now
> rtclock_calend
.epoch1
)
600 t64
= now
- rtclock_calend
.epoch1
;
604 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
607 TIME_ADD(sys
, 0, microsys
, (t32
- delta
), USEC_PER_SEC
);
609 rtclock_calend
.epoch
= sys
;
610 rtclock_calend
.microepoch
= microsys
;
612 sys
= t64
= now
/ rtclock_sec_divisor
;
613 now
-= (t64
* rtclock_sec_divisor
);
614 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
616 TIME_SUB(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
619 rtclock_calend
.epoch1
= 0;
620 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
623 * Calculate the new calendar epoch based on
624 * the new value and the system clock.
626 clock_get_system_microtime(&sys
, µsys
);
627 TIME_SUB(secs
, sys
, microsecs
, microsys
, USEC_PER_SEC
);
630 * Adjust the boottime based on the delta.
632 rtclock_boottime
+= secs
- rtclock_calend
.epoch
;
635 * Set the new calendar epoch.
637 rtclock_calend
.epoch
= secs
;
638 rtclock_calend
.microepoch
= microsecs
;
640 simple_unlock(&rtclock_lock
);
643 * Set the new value for the platform clock.
645 PESetGMTTimeOfDay(newsecs
);
650 * Send host notifications.
652 host_notify_calendar_change();
655 #define tickadj (40) /* "standard" skew, us / tick */
656 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
659 clock_set_calendar_adjtime(
663 int64_t total
, ototal
;
664 uint32_t interval
= 0;
667 total
= (int64_t)*secs
* USEC_PER_SEC
+ *microsecs
;
670 commpage_set_timestamp(0,0,0,0);
672 ototal
= rtclock_calend
.adjtotal
;
674 if (rtclock_calend
.adjdelta
< 0) {
677 uint32_t sys
, microsys
;
679 delta
= -rtclock_calend
.adjdelta
;
681 sys
= rtclock_calend
.epoch
;
682 microsys
= rtclock_calend
.microepoch
;
684 now
= mach_absolute_time();
686 if (now
> rtclock_calend
.epoch1
)
687 t64
= now
- rtclock_calend
.epoch1
;
691 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
694 TIME_ADD(sys
, 0, microsys
, (t32
- delta
), USEC_PER_SEC
);
696 rtclock_calend
.epoch
= sys
;
697 rtclock_calend
.microepoch
= microsys
;
699 sys
= t64
= now
/ rtclock_sec_divisor
;
700 now
-= (t64
* rtclock_sec_divisor
);
701 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
703 TIME_SUB(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
707 int32_t delta
= tickadj
;
715 rtclock_calend
.epoch1
= 0;
719 uint32_t sys
, microsys
;
727 rtclock_calend
.epoch1
= now
= mach_absolute_time();
729 sys
= t64
= now
/ rtclock_sec_divisor
;
730 now
-= (t64
* rtclock_sec_divisor
);
731 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
733 TIME_ADD(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
736 rtclock_calend
.adjtotal
= total
;
737 rtclock_calend
.adjdelta
= delta
;
739 interval
= rtclock_tick_interval
;
742 rtclock_calend
.epoch1
= 0;
743 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
749 *secs
= *microsecs
= 0;
751 *secs
= ototal
/ USEC_PER_SEC
;
752 *microsecs
= ototal
% USEC_PER_SEC
;
759 clock_adjust_calendar(void)
761 uint32_t interval
= 0;
766 commpage_set_timestamp(0,0,0,0);
768 delta
= rtclock_calend
.adjdelta
;
771 TIME_ADD(rtclock_calend
.epoch
, 0, rtclock_calend
.microepoch
, delta
, USEC_PER_SEC
);
773 rtclock_calend
.adjtotal
-= delta
;
774 if (delta
> rtclock_calend
.adjtotal
)
775 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
782 now
= mach_absolute_time();
784 if (now
> rtclock_calend
.epoch1
)
785 t64
= now
- rtclock_calend
.epoch1
;
789 rtclock_calend
.epoch1
= now
;
791 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
793 TIME_ADD(rtclock_calend
.epoch
, 0, rtclock_calend
.microepoch
, (t32
+ delta
), USEC_PER_SEC
);
795 rtclock_calend
.adjtotal
-= delta
;
796 if (delta
< rtclock_calend
.adjtotal
)
797 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
799 if (rtclock_calend
.adjdelta
== 0) {
800 uint32_t sys
, microsys
;
802 sys
= t64
= now
/ rtclock_sec_divisor
;
803 now
-= (t64
* rtclock_sec_divisor
);
804 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
806 TIME_SUB(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
808 rtclock_calend
.epoch1
= 0;
812 if (rtclock_calend
.adjdelta
!= 0)
813 interval
= rtclock_tick_interval
;
821 * clock_initialize_calendar:
823 * Set the calendar and related clocks
824 * from the platform clock at boot or
828 clock_initialize_calendar(void)
830 uint32_t sys
, microsys
;
831 uint32_t microsecs
= 0, secs
= PEGetGMTTimeOfDay();
835 commpage_set_timestamp(0,0,0,0);
837 if ((int32_t)secs
>= (int32_t)rtclock_boottime
) {
839 * Initialize the boot time based on the platform clock.
841 if (rtclock_boottime
== 0)
842 rtclock_boottime
= secs
;
845 * Calculate the new calendar epoch based
846 * on the platform clock and the system
849 clock_get_system_microtime(&sys
, µsys
);
850 TIME_SUB(secs
, sys
, microsecs
, microsys
, USEC_PER_SEC
);
853 * Set the new calendar epoch.
855 rtclock_calend
.epoch
= secs
;
856 rtclock_calend
.microepoch
= microsecs
;
859 * Cancel any adjustment in progress.
861 rtclock_calend
.epoch1
= 0;
862 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
868 * Send host notifications.
870 host_notify_calendar_change();
874 clock_get_boottime_nanotime(
878 *secs
= rtclock_boottime
;
884 mach_timebase_info_t info
)
889 rtclock_timebase_initialized
= TRUE
;
890 *info
= rtclock_timebase_const
;
895 clock_set_timer_deadline(
900 rtclock_timer_t
*mytimer
;
901 struct per_proc_info
*pp
;
906 mytimer
= &pp
->rtclock_timer
;
907 mytimer
->deadline
= deadline
;
909 if (!mytimer
->has_expired
&& (deadline
< pp
->rtclock_tick_deadline
)) { /* Has the timer already expired or is less that set? */
910 pp
->rtcPop
= deadline
; /* Yes, set the new rtc pop time */
911 decr
= setTimerReq(); /* Start the timers going */
913 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
914 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
921 clock_set_timer_func(
922 clock_timer_func_t func
)
927 if (rtclock_timer_expire
== NULL
)
928 rtclock_timer_expire
= func
;
933 * Real-time clock device interrupt.
936 rtclock_intr(struct savearea
*ssp
) {
940 rtclock_timer_t
*mytimer
;
941 struct per_proc_info
*pp
;
944 mytimer
= &pp
->rtclock_timer
;
946 abstime
= mach_absolute_time();
947 if (pp
->rtclock_tick_deadline
<= abstime
) { /* Have we passed the pop time? */
948 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
949 &pp
->rtclock_tick_deadline
);
950 hertz_tick(USER_MODE(ssp
->save_srr1
), ssp
->save_srr0
);
951 abstime
= mach_absolute_time(); /* Refresh the current time since we went away */
954 if (mytimer
->deadline
<= abstime
) { /* Have we expired the deadline? */
955 mytimer
->has_expired
= TRUE
; /* Remember that we popped */
956 mytimer
->deadline
= EndOfAllTime
; /* Set timer request to the end of all time in case we have no more events */
957 (*rtclock_timer_expire
)(abstime
); /* Process pop */
958 mytimer
->has_expired
= FALSE
;
961 pp
->rtcPop
= (pp
->rtclock_tick_deadline
< mytimer
->deadline
) ? /* Get shortest pop */
962 pp
->rtclock_tick_deadline
: /* It was the periodic timer */
963 mytimer
->deadline
; /* Actually, an event request */
965 decr
= setTimerReq(); /* Request the timer pop */
967 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
968 | DBG_FUNC_NONE
, decr
, 3, 0, 0, 0);
972 * Request an interruption at a specific time
974 * Sets the decrementer to pop at the right time based on the timebase.
975 * The value is chosen by comparing the rtc request with the power management.
976 * request. We may add other values at a future time.
980 int setTimerReq(void) {
982 struct per_proc_info
*pp
;
986 pp
= getPerProc(); /* Get per_proc */
988 nexttime
= pp
->rtcPop
; /* Assume main timer */
990 decr
= setPop((pp
->pms
.pmsPop
< nexttime
) ? pp
->pms
.pmsPop
: nexttime
); /* Schedule timer pop */
992 return decr
; /* Pass back what we actually set */
996 rtclock_alarm_expire(
1000 mach_timespec_t timestamp
;
1002 (void) sysclk_gettime(×tamp
);
1004 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
1008 nanotime_to_absolutetime(
1013 uint32_t divisor
= rtclock_sec_divisor
;
1015 *result
= ((uint64_t)secs
* divisor
) +
1016 ((uint64_t)nanosecs
* divisor
) / NSEC_PER_SEC
;
1020 absolutetime_to_microtime(
1023 uint32_t *microsecs
)
1028 *secs
= t64
= abstime
/ (divisor
= rtclock_sec_divisor
);
1029 abstime
-= (t64
* divisor
);
1030 *microsecs
= (abstime
* USEC_PER_SEC
) / divisor
;
1034 clock_interval_to_deadline(
1036 uint32_t scale_factor
,
1041 clock_get_uptime(result
);
1043 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1049 clock_interval_to_absolutetime_interval(
1051 uint32_t scale_factor
,
1054 uint64_t nanosecs
= (uint64_t)interval
* scale_factor
;
1058 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1059 (divisor
= rtclock_sec_divisor
);
1060 nanosecs
-= (t64
* NSEC_PER_SEC
);
1061 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1065 clock_absolutetime_interval_to_deadline(
1069 clock_get_uptime(result
);
1075 absolutetime_to_nanoseconds(
1082 *result
= (t64
= abstime
/ (divisor
= rtclock_sec_divisor
)) * NSEC_PER_SEC
;
1083 abstime
-= (t64
* divisor
);
1084 *result
+= (abstime
* NSEC_PER_SEC
) / divisor
;
1088 nanoseconds_to_absolutetime(
1095 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1096 (divisor
= rtclock_sec_divisor
);
1097 nanosecs
-= (t64
* NSEC_PER_SEC
);
1098 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1102 machine_delay_until(
1108 now
= mach_absolute_time();
1109 } while (now
< deadline
);