2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * @APPLE_FREE_COPYRIGHT@
30 * Purpose: Routines for handling the machine dependent
34 #include <mach/mach_types.h>
36 #include <kern/clock.h>
37 #include <kern/thread.h>
38 #include <kern/macro_help.h>
41 #include <kern/host_notify.h>
43 #include <machine/commpage.h>
44 #include <machine/machine_routines.h>
45 #include <ppc/exception.h>
46 #include <ppc/proc_reg.h>
48 #include <IOKit/IOPlatformExpert.h>
50 #include <sys/kdebug.h>
52 int sysclk_config(void);
54 int sysclk_init(void);
56 void treqs(uint32_t dec
);
58 kern_return_t
sysclk_gettime(
59 mach_timespec_t
*cur_time
);
61 kern_return_t
sysclk_getattr(
62 clock_flavor_t flavor
,
64 mach_msg_type_number_t
*count
);
67 mach_timespec_t
*deadline
);
69 struct clock_ops sysclk_ops
= {
70 sysclk_config
, sysclk_init
,
76 int calend_config(void);
78 kern_return_t
calend_gettime(
79 mach_timespec_t
*cur_time
);
81 kern_return_t
calend_getattr(
82 clock_flavor_t flavor
,
84 mach_msg_type_number_t
*count
);
86 struct clock_ops calend_ops
= {
93 /* local data declarations */
95 static struct rtclock_calend
{
105 static uint32_t rtclock_boottime
;
107 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
109 if (((rfrac) += (frac)) >= (unit)) { \
116 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
118 if ((int32_t)((rfrac) -= (frac)) < 0) { \
125 #define NSEC_PER_HZ (NSEC_PER_SEC / 100)
126 static uint32_t rtclock_tick_interval
;
128 static uint32_t rtclock_sec_divisor
;
130 static mach_timebase_info_data_t rtclock_timebase_const
;
132 static boolean_t rtclock_timebase_initialized
;
134 static clock_timer_func_t rtclock_timer_expire
;
136 static timer_call_data_t rtclock_alarm_timer
;
138 static void nanotime_to_absolutetime(
143 static int deadline_to_decrementer(
147 static void rtclock_alarm_expire(
148 timer_call_param_t p0
,
149 timer_call_param_t p1
);
151 /* global data declarations */
153 #define DECREMENTER_MAX 0x7FFFFFFFUL
154 #define DECREMENTER_MIN 0xAUL
156 natural_t rtclock_decrementer_min
;
158 decl_simple_lock_data(static,rtclock_lock
)
161 * Macros to lock/unlock real-time clock device.
163 #define LOCK_RTC(s) \
166 simple_lock(&rtclock_lock); \
169 #define UNLOCK_RTC(s) \
171 simple_unlock(&rtclock_lock); \
177 struct timebase_freq_t
*freq
)
179 uint32_t numer
, denom
;
183 if ( freq
->timebase_den
< 1 || freq
->timebase_den
> 4 ||
184 freq
->timebase_num
< freq
->timebase_den
)
185 panic("rtclock timebase_callback: invalid constant %d / %d",
186 freq
->timebase_num
, freq
->timebase_den
);
188 denom
= freq
->timebase_num
;
189 numer
= freq
->timebase_den
* NSEC_PER_SEC
;
192 if (!rtclock_timebase_initialized
) {
193 commpage_set_timestamp(0,0,0,0);
195 rtclock_timebase_const
.numer
= numer
;
196 rtclock_timebase_const
.denom
= denom
;
197 rtclock_sec_divisor
= freq
->timebase_num
/ freq
->timebase_den
;
199 nanoseconds_to_absolutetime(NSEC_PER_HZ
, &abstime
);
200 rtclock_tick_interval
= abstime
;
202 ml_init_lock_timeout();
206 printf("rtclock timebase_callback: late old %d / %d new %d / %d\n",
207 rtclock_timebase_const
.numer
, rtclock_timebase_const
.denom
,
213 clock_timebase_init();
217 * Configure the real-time clock device.
222 timer_call_setup(&rtclock_alarm_timer
, rtclock_alarm_expire
, NULL
);
224 simple_lock_init(&rtclock_lock
, 0);
226 PE_register_timebase_callback(timebase_callback
);
232 * Initialize the system clock device.
237 uint64_t abstime
, nexttick
;
239 struct rtclock_timer
*mytimer
;
240 struct per_proc_info
*pp
;
242 decr1
= decr2
= DECREMENTER_MAX
;
245 mytimer
= &pp
->rtclock_timer
;
247 abstime
= mach_absolute_time();
248 nexttick
= abstime
+ rtclock_tick_interval
;
249 pp
->rtclock_tick_deadline
= nexttick
;
250 decr1
= deadline_to_decrementer(nexttick
, abstime
);
253 decr2
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
265 mach_timespec_t
*time
) /* OUT */
270 now
= mach_absolute_time();
272 time
->tv_sec
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
273 now
-= (t64
* divisor
);
274 time
->tv_nsec
= (now
* NSEC_PER_SEC
) / divisor
;
276 return (KERN_SUCCESS
);
280 clock_get_system_microtime(
287 now
= mach_absolute_time();
289 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
290 now
-= (t64
* divisor
);
291 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
295 clock_get_system_nanotime(
302 now
= mach_absolute_time();
304 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
305 now
-= (t64
* divisor
);
306 *nanosecs
= (now
* NSEC_PER_SEC
) / divisor
;
310 * Get clock device attributes.
314 clock_flavor_t flavor
,
315 clock_attr_t attr
, /* OUT */
316 mach_msg_type_number_t
*count
) /* IN/OUT */
321 return (KERN_FAILURE
);
325 case CLOCK_GET_TIME_RES
: /* >0 res */
326 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
327 case CLOCK_ALARM_MINRES
:
328 case CLOCK_ALARM_MAXRES
:
330 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
335 return (KERN_INVALID_VALUE
);
338 return (KERN_SUCCESS
);
342 * Set deadline for the next alarm on the clock device. This call
343 * always resets the time to deliver an alarm for the clock.
347 mach_timespec_t
*deadline
)
351 nanotime_to_absolutetime(deadline
->tv_sec
, deadline
->tv_nsec
, &abstime
);
352 timer_call_enter(&rtclock_alarm_timer
, abstime
);
356 * Configure the calendar clock.
365 * Get the current clock time.
369 mach_timespec_t
*time
) /* OUT */
371 clock_get_calendar_nanotime(
372 &time
->tv_sec
, &time
->tv_nsec
);
374 return (KERN_SUCCESS
);
378 * Get clock device attributes.
382 clock_flavor_t flavor
,
383 clock_attr_t attr
, /* OUT */
384 mach_msg_type_number_t
*count
) /* IN/OUT */
389 return (KERN_FAILURE
);
393 case CLOCK_GET_TIME_RES
: /* >0 res */
395 *(clock_res_t
*) attr
= NSEC_PER_HZ
;
399 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
400 case CLOCK_ALARM_MINRES
:
401 case CLOCK_ALARM_MAXRES
:
402 *(clock_res_t
*) attr
= 0;
406 return (KERN_INVALID_VALUE
);
409 return (KERN_SUCCESS
);
413 clock_get_calendar_microtime(
417 uint32_t epoch
, microepoch
;
419 spl_t s
= splclock();
421 simple_lock(&rtclock_lock
);
423 if (rtclock_calend
.adjdelta
>= 0) {
426 now
= mach_absolute_time();
428 epoch
= rtclock_calend
.epoch
;
429 microepoch
= rtclock_calend
.microepoch
;
431 simple_unlock(&rtclock_lock
);
433 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
434 now
-= (t64
* divisor
);
435 *microsecs
= (now
* USEC_PER_SEC
) / divisor
;
437 TIME_ADD(*secs
, epoch
, *microsecs
, microepoch
, USEC_PER_SEC
);
442 delta
= -rtclock_calend
.adjdelta
;
444 now
= mach_absolute_time();
446 *secs
= rtclock_calend
.epoch
;
447 *microsecs
= rtclock_calend
.microepoch
;
449 if (now
> rtclock_calend
.epoch1
) {
450 t64
= now
- rtclock_calend
.epoch1
;
452 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
455 TIME_ADD(*secs
, 0, *microsecs
, (t32
- delta
), USEC_PER_SEC
);
458 simple_unlock(&rtclock_lock
);
464 /* This is only called from the gettimeofday() syscall. As a side
465 * effect, it updates the commpage timestamp. Otherwise it is
466 * identical to clock_get_calendar_microtime(). Because most
467 * gettimeofday() calls are handled by the commpage in user mode,
468 * this routine should be infrequently used except when slowing down
474 uint32_t *microsecs_p
)
476 uint32_t epoch
, microepoch
;
477 uint32_t secs
, microsecs
;
478 uint64_t now
, t64
, secs_64
, usec_64
;
479 spl_t s
= splclock();
481 simple_lock(&rtclock_lock
);
483 if (rtclock_calend
.adjdelta
>= 0) {
484 now
= mach_absolute_time();
486 epoch
= rtclock_calend
.epoch
;
487 microepoch
= rtclock_calend
.microepoch
;
489 secs
= secs_64
= now
/ rtclock_sec_divisor
;
490 t64
= now
- (secs_64
* rtclock_sec_divisor
);
491 microsecs
= usec_64
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
493 TIME_ADD(secs
, epoch
, microsecs
, microepoch
, USEC_PER_SEC
);
495 /* adjust "now" to be absolute time at _start_ of usecond */
496 now
-= t64
- ((usec_64
* rtclock_sec_divisor
) / USEC_PER_SEC
);
498 commpage_set_timestamp(now
,secs
,microsecs
,rtclock_sec_divisor
);
503 delta
= -rtclock_calend
.adjdelta
;
505 now
= mach_absolute_time();
507 secs
= rtclock_calend
.epoch
;
508 microsecs
= rtclock_calend
.microepoch
;
510 if (now
> rtclock_calend
.epoch1
) {
511 t64
= now
- rtclock_calend
.epoch1
;
513 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
516 TIME_ADD(secs
, 0, microsecs
, (t32
- delta
), USEC_PER_SEC
);
519 /* no need to disable timestamp, it is already off */
522 simple_unlock(&rtclock_lock
);
526 *microsecs_p
= microsecs
;
530 clock_get_calendar_nanotime(
534 uint32_t epoch
, nanoepoch
;
536 spl_t s
= splclock();
538 simple_lock(&rtclock_lock
);
540 if (rtclock_calend
.adjdelta
>= 0) {
543 now
= mach_absolute_time();
545 epoch
= rtclock_calend
.epoch
;
546 nanoepoch
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
548 simple_unlock(&rtclock_lock
);
550 *secs
= t64
= now
/ (divisor
= rtclock_sec_divisor
);
551 now
-= (t64
* divisor
);
552 *nanosecs
= ((now
* USEC_PER_SEC
) / divisor
) * NSEC_PER_USEC
;
554 TIME_ADD(*secs
, epoch
, *nanosecs
, nanoepoch
, NSEC_PER_SEC
);
559 delta
= -rtclock_calend
.adjdelta
;
561 now
= mach_absolute_time();
563 *secs
= rtclock_calend
.epoch
;
564 *nanosecs
= rtclock_calend
.microepoch
* NSEC_PER_USEC
;
566 if (now
> rtclock_calend
.epoch1
) {
567 t64
= now
- rtclock_calend
.epoch1
;
569 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
572 TIME_ADD(*secs
, 0, *nanosecs
, ((t32
- delta
) * NSEC_PER_USEC
), NSEC_PER_SEC
);
575 simple_unlock(&rtclock_lock
);
582 clock_set_calendar_microtime(
586 uint32_t sys
, microsys
;
590 newsecs
= (microsecs
< 500*USEC_PER_SEC
)?
594 simple_lock(&rtclock_lock
);
596 commpage_set_timestamp(0,0,0,0);
599 * Calculate the new calendar epoch based on
600 * the new value and the system clock.
602 clock_get_system_microtime(&sys
, µsys
);
603 TIME_SUB(secs
, sys
, microsecs
, microsys
, USEC_PER_SEC
);
606 * Adjust the boottime based on the delta.
608 rtclock_boottime
+= secs
- rtclock_calend
.epoch
;
611 * Set the new calendar epoch.
613 rtclock_calend
.epoch
= secs
;
614 rtclock_calend
.microepoch
= microsecs
;
617 * Cancel any adjustment in progress.
619 rtclock_calend
.epoch1
= 0;
620 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
622 simple_unlock(&rtclock_lock
);
625 * Set the new value for the platform clock.
627 PESetGMTTimeOfDay(newsecs
);
632 * Send host notifications.
634 host_notify_calendar_change();
637 #define tickadj (40) /* "standard" skew, us / tick */
638 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
641 clock_set_calendar_adjtime(
645 int64_t total
, ototal
;
646 uint32_t interval
= 0;
649 total
= (int64_t)*secs
* USEC_PER_SEC
+ *microsecs
;
652 commpage_set_timestamp(0,0,0,0);
654 ototal
= rtclock_calend
.adjtotal
;
656 if (rtclock_calend
.adjdelta
< 0) {
659 uint32_t sys
, microsys
;
661 delta
= -rtclock_calend
.adjdelta
;
663 sys
= rtclock_calend
.epoch
;
664 microsys
= rtclock_calend
.microepoch
;
666 now
= mach_absolute_time();
668 if (now
> rtclock_calend
.epoch1
)
669 t64
= now
- rtclock_calend
.epoch1
;
673 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
676 TIME_ADD(sys
, 0, microsys
, (t32
- delta
), USEC_PER_SEC
);
678 rtclock_calend
.epoch
= sys
;
679 rtclock_calend
.microepoch
= microsys
;
681 sys
= t64
= now
/ rtclock_sec_divisor
;
682 now
-= (t64
* rtclock_sec_divisor
);
683 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
685 TIME_SUB(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
689 int32_t delta
= tickadj
;
697 rtclock_calend
.epoch1
= 0;
701 uint32_t sys
, microsys
;
709 rtclock_calend
.epoch1
= now
= mach_absolute_time();
711 sys
= t64
= now
/ rtclock_sec_divisor
;
712 now
-= (t64
* rtclock_sec_divisor
);
713 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
715 TIME_ADD(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
718 rtclock_calend
.adjtotal
= total
;
719 rtclock_calend
.adjdelta
= delta
;
721 interval
= rtclock_tick_interval
;
724 rtclock_calend
.epoch1
= 0;
725 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
731 *secs
= *microsecs
= 0;
733 *secs
= ototal
/ USEC_PER_SEC
;
734 *microsecs
= ototal
% USEC_PER_SEC
;
741 clock_adjust_calendar(void)
743 uint32_t interval
= 0;
748 commpage_set_timestamp(0,0,0,0);
750 delta
= rtclock_calend
.adjdelta
;
753 TIME_ADD(rtclock_calend
.epoch
, 0, rtclock_calend
.microepoch
, delta
, USEC_PER_SEC
);
755 rtclock_calend
.adjtotal
-= delta
;
756 if (delta
> rtclock_calend
.adjtotal
)
757 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
764 now
= mach_absolute_time();
766 if (now
> rtclock_calend
.epoch1
)
767 t64
= now
- rtclock_calend
.epoch1
;
771 rtclock_calend
.epoch1
= now
;
773 t32
= (t64
* USEC_PER_SEC
) / rtclock_sec_divisor
;
775 TIME_ADD(rtclock_calend
.epoch
, 0, rtclock_calend
.microepoch
, (t32
+ delta
), USEC_PER_SEC
);
777 rtclock_calend
.adjtotal
-= delta
;
778 if (delta
< rtclock_calend
.adjtotal
)
779 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
;
781 if (rtclock_calend
.adjdelta
== 0) {
782 uint32_t sys
, microsys
;
784 sys
= t64
= now
/ rtclock_sec_divisor
;
785 now
-= (t64
* rtclock_sec_divisor
);
786 microsys
= (now
* USEC_PER_SEC
) / rtclock_sec_divisor
;
788 TIME_SUB(rtclock_calend
.epoch
, sys
, rtclock_calend
.microepoch
, microsys
, USEC_PER_SEC
);
790 rtclock_calend
.epoch1
= 0;
794 if (rtclock_calend
.adjdelta
!= 0)
795 interval
= rtclock_tick_interval
;
803 * clock_initialize_calendar:
805 * Set the calendar and related clocks
806 * from the platform clock at boot or
810 clock_initialize_calendar(void)
812 uint32_t sys
, microsys
;
813 uint32_t microsecs
= 0, secs
= PEGetGMTTimeOfDay();
817 commpage_set_timestamp(0,0,0,0);
819 if ((int32_t)secs
>= (int32_t)rtclock_boottime
) {
821 * Initialize the boot time based on the platform clock.
823 if (rtclock_boottime
== 0)
824 rtclock_boottime
= secs
;
827 * Calculate the new calendar epoch based
828 * on the platform clock and the system
831 clock_get_system_microtime(&sys
, µsys
);
832 TIME_SUB(secs
, sys
, microsecs
, microsys
, USEC_PER_SEC
);
835 * Set the new calendar epoch.
837 rtclock_calend
.epoch
= secs
;
838 rtclock_calend
.microepoch
= microsecs
;
841 * Cancel any adjustment in progress.
843 rtclock_calend
.epoch1
= 0;
844 rtclock_calend
.adjdelta
= rtclock_calend
.adjtotal
= 0;
850 * Send host notifications.
852 host_notify_calendar_change();
856 clock_get_boottime_nanotime(
860 *secs
= rtclock_boottime
;
866 mach_timebase_info_t info
)
871 rtclock_timebase_initialized
= TRUE
;
872 *info
= rtclock_timebase_const
;
877 clock_set_timer_deadline(
882 struct rtclock_timer
*mytimer
;
883 struct per_proc_info
*pp
;
888 mytimer
= &pp
->rtclock_timer
;
889 mytimer
->deadline
= deadline
;
890 mytimer
->is_set
= TRUE
;
891 if (!mytimer
->has_expired
) {
892 abstime
= mach_absolute_time();
893 if ( mytimer
->deadline
< pp
->rtclock_tick_deadline
) {
894 decr
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
895 if ( rtclock_decrementer_min
!= 0 &&
896 rtclock_decrementer_min
< (natural_t
)decr
)
897 decr
= rtclock_decrementer_min
;
901 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
902 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
909 clock_set_timer_func(
910 clock_timer_func_t func
)
915 if (rtclock_timer_expire
== NULL
)
916 rtclock_timer_expire
= func
;
923 struct savearea
*ssp
,
927 * Real-time clock device interrupt.
932 struct savearea
*ssp
,
933 __unused spl_t old_spl
)
937 struct rtclock_timer
*mytimer
;
938 struct per_proc_info
*pp
;
940 decr1
= decr2
= DECREMENTER_MAX
;
944 abstime
= mach_absolute_time();
945 if ( pp
->rtclock_tick_deadline
<= abstime
) {
946 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
947 &pp
->rtclock_tick_deadline
);
948 hertz_tick(USER_MODE(ssp
->save_srr1
), ssp
->save_srr0
);
951 mytimer
= &pp
->rtclock_timer
;
953 abstime
= mach_absolute_time();
954 if ( mytimer
->is_set
&&
955 mytimer
->deadline
<= abstime
) {
956 mytimer
->has_expired
= TRUE
; mytimer
->is_set
= FALSE
;
957 (*rtclock_timer_expire
)(abstime
);
958 mytimer
->has_expired
= FALSE
;
961 abstime
= mach_absolute_time();
962 decr1
= deadline_to_decrementer(pp
->rtclock_tick_deadline
, abstime
);
965 decr2
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
970 if ( rtclock_decrementer_min
!= 0 &&
971 rtclock_decrementer_min
< (natural_t
)decr1
)
972 decr1
= rtclock_decrementer_min
;
976 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
977 | DBG_FUNC_NONE
, decr1
, 3, 0, 0, 0);
981 rtclock_alarm_expire(
985 mach_timespec_t timestamp
;
987 (void) sysclk_gettime(×tamp
);
989 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
993 deadline_to_decrementer(
1000 return DECREMENTER_MIN
;
1002 delt
= deadline
- now
;
1003 return (delt
>= (DECREMENTER_MAX
+ 1))? DECREMENTER_MAX
:
1004 ((delt
>= (DECREMENTER_MIN
+ 1))? (delt
- 1): DECREMENTER_MIN
);
1009 nanotime_to_absolutetime(
1014 uint32_t divisor
= rtclock_sec_divisor
;
1016 *result
= ((uint64_t)secs
* divisor
) +
1017 ((uint64_t)nanosecs
* divisor
) / NSEC_PER_SEC
;
1021 absolutetime_to_microtime(
1024 uint32_t *microsecs
)
1029 *secs
= t64
= abstime
/ (divisor
= rtclock_sec_divisor
);
1030 abstime
-= (t64
* divisor
);
1031 *microsecs
= (abstime
* USEC_PER_SEC
) / divisor
;
1035 clock_interval_to_deadline(
1037 uint32_t scale_factor
,
1042 clock_get_uptime(result
);
1044 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1050 clock_interval_to_absolutetime_interval(
1052 uint32_t scale_factor
,
1055 uint64_t nanosecs
= (uint64_t)interval
* scale_factor
;
1059 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1060 (divisor
= rtclock_sec_divisor
);
1061 nanosecs
-= (t64
* NSEC_PER_SEC
);
1062 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1066 clock_absolutetime_interval_to_deadline(
1070 clock_get_uptime(result
);
1076 absolutetime_to_nanoseconds(
1083 *result
= (t64
= abstime
/ (divisor
= rtclock_sec_divisor
)) * NSEC_PER_SEC
;
1084 abstime
-= (t64
* divisor
);
1085 *result
+= (abstime
* NSEC_PER_SEC
) / divisor
;
1089 nanoseconds_to_absolutetime(
1096 *result
= (t64
= nanosecs
/ NSEC_PER_SEC
) *
1097 (divisor
= rtclock_sec_divisor
);
1098 nanosecs
-= (t64
* NSEC_PER_SEC
);
1099 *result
+= (nanosecs
* divisor
) / NSEC_PER_SEC
;
1103 machine_delay_until(
1109 now
= mach_absolute_time();
1110 } while (now
< deadline
);
1114 * Request a decrementer pop
1118 void treqs(uint32_t dec
) {
1121 struct per_proc_info
*pp
;
1122 uint64_t nowtime
, newtime
;
1124 nowtime
= mach_absolute_time(); /* What time is it? */
1125 pp
= getPerProc(); /* Get our processor block */
1126 newtime
= nowtime
+ (uint64_t)dec
; /* Get requested pop time */
1127 pp
->rtcPop
= newtime
; /* Copy it */
1129 mtdec((uint32_t)(newtime
- nowtime
)); /* Set decrementer */