2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * @APPLE_FREE_COPYRIGHT@
30 * Purpose: Routines for handling the machine dependent
34 #include <libkern/OSTypes.h>
36 #include <mach/mach_types.h>
38 #include <kern/clock.h>
39 #include <kern/thread.h>
40 #include <kern/macro_help.h>
43 #include <machine/mach_param.h> /* HZ */
44 #include <ppc/proc_reg.h>
46 #include <pexpert/pexpert.h>
48 #include <sys/kdebug.h>
50 int sysclk_config(void);
52 int sysclk_init(void);
54 kern_return_t
sysclk_gettime(
55 mach_timespec_t
*cur_time
);
57 kern_return_t
sysclk_getattr(
58 clock_flavor_t flavor
,
60 mach_msg_type_number_t
*count
);
63 mach_timespec_t
*deadline
);
65 struct clock_ops sysclk_ops
= {
66 sysclk_config
, sysclk_init
,
72 int calend_config(void);
74 int calend_init(void);
76 kern_return_t
calend_gettime(
77 mach_timespec_t
*cur_time
);
79 kern_return_t
calend_settime(
80 mach_timespec_t
*cur_time
);
82 kern_return_t
calend_getattr(
83 clock_flavor_t flavor
,
85 mach_msg_type_number_t
*count
);
87 struct clock_ops calend_ops
= {
88 calend_config
, calend_init
,
89 calend_gettime
, calend_settime
,
94 /* local data declarations */
96 static struct rtclock
{
97 mach_timespec_t calend_offset
;
98 boolean_t calend_is_set
;
100 mach_timebase_info_data_t timebase_const
;
102 struct rtclock_timer
{
107 clock_timer_func_t timer_expire
;
109 timer_call_data_t alarm_timer
;
112 uint64_t last_abstime
[NCPUS
];
113 int last_decr
[NCPUS
];
115 decl_simple_lock_data(,lock
) /* real-time clock device lock */
118 static boolean_t rtclock_initialized
;
120 static uint64_t rtclock_tick_deadline
[NCPUS
];
121 static uint64_t rtclock_tick_interval
;
123 static void timespec_to_absolutetime(
124 mach_timespec_t timespec
,
127 static int deadline_to_decrementer(
131 static void rtclock_alarm_timer(
132 timer_call_param_t p0
,
133 timer_call_param_t p1
);
135 /* global data declarations */
137 #define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
139 #define DECREMENTER_MAX 0x7FFFFFFFUL
140 #define DECREMENTER_MIN 0xAUL
142 natural_t rtclock_decrementer_min
;
145 * Macros to lock/unlock real-time clock device.
147 #define LOCK_RTC(s) \
150 simple_lock(&rtclock.lock); \
153 #define UNLOCK_RTC(s) \
155 simple_unlock(&rtclock.lock); \
161 struct timebase_freq_t
*freq
)
163 natural_t numer
, denom
;
167 denom
= freq
->timebase_num
;
169 while (!(denom
% 10)) {
176 numer
= freq
->timebase_den
;
182 rtclock
.timebase_const
.numer
= numer
;
183 rtclock
.timebase_const
.denom
= denom
;
188 * Configure the real-time clock device.
193 if (cpu_number() != master_cpu
)
196 timer_call_setup(&rtclock
.alarm_timer
, rtclock_alarm_timer
, NULL
);
198 simple_lock_init(&rtclock
.lock
, ETAP_MISC_RT_CLOCK
);
200 PE_register_timebase_callback(timebase_callback
);
206 * Initialize the system clock device.
212 int decr
, mycpu
= cpu_number();
214 if (mycpu
!= master_cpu
) {
215 if (rtclock_initialized
== FALSE
) {
216 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu
);
218 /* Set decrementer and hence our next tick due */
219 clock_get_uptime(&abstime
);
220 rtclock_tick_deadline
[mycpu
] = abstime
;
221 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
222 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
224 rtclock
.last_decr
[mycpu
] = decr
;
230 * Initialize non-zero clock structure values.
232 clock_interval_to_absolutetime_interval(RTC_TICKPERIOD
, 1,
233 &rtclock_tick_interval
);
234 /* Set decrementer and our next tick due */
235 clock_get_uptime(&abstime
);
236 rtclock_tick_deadline
[mycpu
] = abstime
;
237 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
238 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
240 rtclock
.last_decr
[mycpu
] = decr
;
242 rtclock_initialized
= TRUE
;
247 #define UnsignedWide_to_scalar(x) (*(uint64_t *)(x))
248 #define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x))
251 * Perform a full 64 bit by 32 bit unsigned multiply,
252 * yielding a 96 bit product. The most significant
253 * portion of the product is returned as a 64 bit
254 * quantity, with the lower portion as a 32 bit word.
260 UnsignedWide
*result64
,
265 asm volatile(" mullw %0,%1,%2" :
267 "r" (now64
.lo
), "r" (mult32
));
269 asm volatile(" mullw %0,%1,%2" :
271 "r" (now64
.hi
), "r" (mult32
));
272 asm volatile(" mulhwu %0,%1,%2" :
274 "r" (now64
.lo
), "r" (mult32
));
276 asm volatile(" mulhwu %0,%1,%2" :
277 "=r" (result64
->hi
) :
278 "r" (now64
.hi
), "r" (mult32
));
280 asm volatile(" addc %0,%2,%3;
282 "=r" (result64
->lo
), "=r" (result64
->hi
) :
283 "r" (mid
), "r" (mid2
), "1" (result64
->hi
));
287 * Perform a partial 64 bit by 32 bit unsigned multiply,
288 * yielding a 64 bit product. Only the least significant
289 * 64 bits of the product are calculated and returned.
295 UnsignedWide
*result64
)
299 asm volatile(" mullw %0,%1,%2" :
300 "=r" (result64
->lo
) :
301 "r" (now64
.lo
), "r" (mult32
));
303 asm volatile(" mullw %0,%1,%2" :
305 "r" (now64
.hi
), "r" (mult32
));
306 asm volatile(" mulhwu %0,%1,%2" :
308 "r" (now64
.lo
), "r" (mult32
));
310 asm volatile(" add %0,%1,%2" :
311 "=r" (result64
->hi
) :
312 "r" (mid
), "r" (mid2
));
316 * Perform an unsigned division of a 96 bit value
317 * by a 32 bit value, yielding a 96 bit quotient.
318 * The most significant portion of the product is
319 * returned as a 64 bit quantity, with the lower
320 * portion as a 32 bit word.
327 UnsignedWide
*result64
,
332 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
333 UnsignedWide_to_scalar(result64
) =
334 UnsignedWide_to_scalar(&now64
) / div32
;
336 umul_64by32to64(*result64
, div32
, &t64
);
338 UnsignedWide_to_scalar(&t64
) =
339 UnsignedWide_to_scalar(&now64
) - UnsignedWide_to_scalar(&t64
);
341 *result32
= (((uint64_t)t64
.lo
<< 32) | now32
) / div32
;
344 UnsignedWide_to_scalar(result64
) =
345 (((uint64_t)now64
.lo
<< 32) | now32
) / div32
;
347 *result32
= result64
->lo
;
348 result64
->lo
= result64
->hi
;
354 * Perform an unsigned division of a 96 bit value
355 * by a 32 bit value, yielding a 64 bit quotient.
356 * Any higher order bits of the quotient are simply
364 UnsignedWide
*result64
)
368 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
369 UnsignedWide_to_scalar(result64
) =
370 UnsignedWide_to_scalar(&now64
) / div32
;
372 umul_64by32to64(*result64
, div32
, &t64
);
374 UnsignedWide_to_scalar(&t64
) =
375 UnsignedWide_to_scalar(&now64
) - UnsignedWide_to_scalar(&t64
);
377 result64
->hi
= result64
->lo
;
378 result64
->lo
= (((uint64_t)t64
.lo
<< 32) | now32
) / div32
;
381 UnsignedWide_to_scalar(result64
) =
382 (((uint64_t)now64
.lo
<< 32) | now32
) / div32
;
387 * Perform an unsigned division of a 96 bit value
388 * by a 32 bit value, yielding a 32 bit quotient,
389 * and a 32 bit remainder. Any higher order bits
390 * of the quotient are simply discarded.
393 udiv_96by32to32and32(
400 UnsignedWide t64
, u64
;
402 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
403 UnsignedWide_to_scalar(&t64
) =
404 UnsignedWide_to_scalar(&now64
) / div32
;
406 umul_64by32to64(t64
, div32
, &t64
);
408 UnsignedWide_to_scalar(&t64
) =
409 UnsignedWide_to_scalar(&now64
) - UnsignedWide_to_scalar(&t64
);
411 UnsignedWide_to_scalar(&t64
) = ((uint64_t)t64
.lo
<< 32) | now32
;
413 UnsignedWide_to_scalar(&u64
) =
414 UnsignedWide_to_scalar(&t64
) / div32
;
418 umul_64by32to64(u64
, div32
, &u64
);
420 *remain32
= UnsignedWide_to_scalar(&t64
) -
421 UnsignedWide_to_scalar(&u64
);
424 UnsignedWide_to_scalar(&t64
) = ((uint64_t)now64
.lo
<< 32) | now32
;
426 UnsignedWide_to_scalar(&u64
) =
427 UnsignedWide_to_scalar(&t64
) / div32
;
431 umul_64by32to64(u64
, div32
, &u64
);
433 *remain32
= UnsignedWide_to_scalar(&t64
) -
434 UnsignedWide_to_scalar(&u64
);
439 * Get the clock device time. This routine is responsible
440 * for converting the device's machine dependent time value
441 * into a canonical mach_timespec_t value.
443 * SMP configurations - *the processor clocks are synchronised*
446 sysclk_gettime_internal(
447 mach_timespec_t
*time
) /* OUT */
452 uint32_t numer
, denom
;
454 numer
= rtclock
.timebase_const
.numer
;
455 denom
= rtclock
.timebase_const
.denom
;
457 clock_get_uptime((uint64_t *)&now
);
459 umul_64by32(now
, numer
, &t64
, &t32
);
461 udiv_96by32(t64
, t32
, denom
, &t64
, &t32
);
463 udiv_96by32to32and32(t64
, t32
, NSEC_PER_SEC
,
464 &time
->tv_sec
, &time
->tv_nsec
);
466 return (KERN_SUCCESS
);
471 mach_timespec_t
*time
) /* OUT */
476 uint32_t numer
, denom
;
480 numer
= rtclock
.timebase_const
.numer
;
481 denom
= rtclock
.timebase_const
.denom
;
484 clock_get_uptime((uint64_t *)&now
);
486 umul_64by32(now
, numer
, &t64
, &t32
);
488 udiv_96by32(t64
, t32
, denom
, &t64
, &t32
);
490 udiv_96by32to32and32(t64
, t32
, NSEC_PER_SEC
,
491 &time
->tv_sec
, &time
->tv_nsec
);
493 return (KERN_SUCCESS
);
497 * Get clock device attributes.
501 clock_flavor_t flavor
,
502 clock_attr_t attr
, /* OUT */
503 mach_msg_type_number_t
*count
) /* IN/OUT */
508 return (KERN_FAILURE
);
511 case CLOCK_GET_TIME_RES
: /* >0 res */
512 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
513 case CLOCK_ALARM_MINRES
:
514 case CLOCK_ALARM_MAXRES
:
516 *(clock_res_t
*) attr
= RTC_TICKPERIOD
;
521 return (KERN_INVALID_VALUE
);
523 return (KERN_SUCCESS
);
527 * Set deadline for the next alarm on the clock device. This call
528 * always resets the time to deliver an alarm for the clock.
532 mach_timespec_t
*deadline
)
536 timespec_to_absolutetime(*deadline
, &abstime
);
537 timer_call_enter(&rtclock
.alarm_timer
, abstime
);
541 * Configure the calendar clock.
550 * Initialize the calendar clock.
555 if (cpu_number() != master_cpu
)
562 * Get the current clock time.
566 mach_timespec_t
*curr_time
) /* OUT */
571 if (!rtclock
.calend_is_set
) {
573 return (KERN_FAILURE
);
576 (void) sysclk_gettime_internal(curr_time
);
577 ADD_MACH_TIMESPEC(curr_time
, &rtclock
.calend_offset
);
580 return (KERN_SUCCESS
);
584 * Set the current clock time.
588 mach_timespec_t
*new_time
)
590 mach_timespec_t curr_time
;
594 (void) sysclk_gettime_internal(&curr_time
);
595 rtclock
.calend_offset
= *new_time
;
596 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
597 rtclock
.calend_is_set
= TRUE
;
600 PESetGMTTimeOfDay(new_time
->tv_sec
);
602 return (KERN_SUCCESS
);
606 * Get clock device attributes.
610 clock_flavor_t flavor
,
611 clock_attr_t attr
, /* OUT */
612 mach_msg_type_number_t
*count
) /* IN/OUT */
617 return (KERN_FAILURE
);
620 case CLOCK_GET_TIME_RES
: /* >0 res */
622 *(clock_res_t
*) attr
= RTC_TICKPERIOD
;
626 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
627 case CLOCK_ALARM_MINRES
:
628 case CLOCK_ALARM_MAXRES
:
629 *(clock_res_t
*) attr
= 0;
633 return (KERN_INVALID_VALUE
);
635 return (KERN_SUCCESS
);
639 clock_adjust_calendar(
645 if (rtclock
.calend_is_set
)
646 ADD_MACH_TIMESPEC_NSEC(&rtclock
.calend_offset
, nsec
);
651 clock_initialize_calendar(void)
653 mach_timespec_t curr_time
;
654 long seconds
= PEGetGMTTimeOfDay();
658 (void) sysclk_gettime_internal(&curr_time
);
659 if (curr_time
.tv_nsec
< 500*USEC_PER_SEC
)
660 rtclock
.calend_offset
.tv_sec
= seconds
;
662 rtclock
.calend_offset
.tv_sec
= seconds
+ 1;
663 rtclock
.calend_offset
.tv_nsec
= 0;
664 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
665 rtclock
.calend_is_set
= TRUE
;
670 clock_get_calendar_offset(void)
672 mach_timespec_t result
= MACH_TIMESPEC_ZERO
;
676 if (rtclock
.calend_is_set
)
677 result
= rtclock
.calend_offset
;
685 mach_timebase_info_t info
)
690 *info
= rtclock
.timebase_const
;
695 clock_set_timer_deadline(
700 struct rtclock_timer
*mytimer
;
704 mycpu
= cpu_number();
705 mytimer
= &rtclock
.timer
[mycpu
];
706 clock_get_uptime(&abstime
);
707 rtclock
.last_abstime
[mycpu
] = abstime
;
708 mytimer
->deadline
= deadline
;
709 mytimer
->is_set
= TRUE
;
710 if ( mytimer
->deadline
< rtclock_tick_deadline
[mycpu
] ) {
711 decr
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
712 if ( rtclock_decrementer_min
!= 0 &&
713 rtclock_decrementer_min
< (natural_t
)decr
)
714 decr
= rtclock_decrementer_min
;
717 rtclock
.last_decr
[mycpu
] = decr
;
719 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
720 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
726 clock_set_timer_func(
727 clock_timer_func_t func
)
732 if (rtclock
.timer_expire
== NULL
)
733 rtclock
.timer_expire
= func
;
738 * Reset the clock device. This causes the realtime clock
739 * device to reload its mode and count value (frequency).
748 * Real-time clock device interrupt.
753 struct savearea
*ssp
,
757 int decr
[3], mycpu
= cpu_number();
758 struct rtclock_timer
*mytimer
= &rtclock
.timer
[mycpu
];
761 * We may receive interrupts too early, we must reject them.
763 if (rtclock_initialized
== FALSE
) {
764 mtdec(DECREMENTER_MAX
); /* Max the decrementer if not init */
768 decr
[1] = decr
[2] = DECREMENTER_MAX
;
770 clock_get_uptime(&abstime
);
771 rtclock
.last_abstime
[mycpu
] = abstime
;
772 if ( rtclock_tick_deadline
[mycpu
] <= abstime
) {
773 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
774 &rtclock_tick_deadline
[mycpu
]);
775 hertz_tick(USER_MODE(ssp
->save_srr1
), ssp
->save_srr0
);
778 clock_get_uptime(&abstime
);
779 rtclock
.last_abstime
[mycpu
] = abstime
;
780 if ( mytimer
->is_set
&&
781 mytimer
->deadline
<= abstime
) {
782 mytimer
->is_set
= FALSE
;
783 (*rtclock
.timer_expire
)(abstime
);
786 clock_get_uptime(&abstime
);
787 rtclock
.last_abstime
[mycpu
] = abstime
;
788 decr
[1] = deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
791 decr
[2] = deadline_to_decrementer(mytimer
->deadline
, abstime
);
793 if (decr
[1] > decr
[2])
796 if ( rtclock_decrementer_min
!= 0 &&
797 rtclock_decrementer_min
< (natural_t
)decr
[1] )
798 decr
[1] = rtclock_decrementer_min
;
801 rtclock
.last_decr
[mycpu
] = decr
[1];
803 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
804 | DBG_FUNC_NONE
, decr
[1], 3, 0, 0, 0);
809 timer_call_param_t p0
,
810 timer_call_param_t p1
)
812 mach_timespec_t timestamp
;
814 (void) sysclk_gettime(×tamp
);
816 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
823 UnsignedWide
*result
= (UnsignedWide
*)result0
;
824 uint32_t hi
, lo
, hic
;
827 asm volatile(" mftbu %0" : "=r" (hi
));
828 asm volatile(" mftb %0" : "=r" (lo
));
829 asm volatile(" mftbu %0" : "=r" (hic
));
837 deadline_to_decrementer(
844 return DECREMENTER_MIN
;
846 delt
= deadline
- now
;
847 return (delt
>= (DECREMENTER_MAX
+ 1))? DECREMENTER_MAX
:
848 ((delt
>= (DECREMENTER_MIN
+ 1))? (delt
- 1): DECREMENTER_MIN
);
853 timespec_to_absolutetime(
854 mach_timespec_t timespec
,
857 UnsignedWide
*result
= (UnsignedWide
*)result0
;
860 uint32_t numer
, denom
;
864 numer
= rtclock
.timebase_const
.numer
;
865 denom
= rtclock
.timebase_const
.denom
;
868 asm volatile(" mullw %0,%1,%2" :
870 "r" (timespec
.tv_sec
), "r" (NSEC_PER_SEC
));
872 asm volatile(" mulhwu %0,%1,%2" :
874 "r" (timespec
.tv_sec
), "r" (NSEC_PER_SEC
));
876 UnsignedWide_to_scalar(&t64
) += timespec
.tv_nsec
;
878 umul_64by32(t64
, denom
, &t64
, &t32
);
880 udiv_96by32(t64
, t32
, numer
, &t64
, &t32
);
887 clock_interval_to_deadline(
889 uint32_t scale_factor
,
894 clock_get_uptime(result
);
896 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
902 clock_interval_to_absolutetime_interval(
904 uint32_t scale_factor
,
907 UnsignedWide
*result
= (UnsignedWide
*)result0
;
910 uint32_t numer
, denom
;
914 numer
= rtclock
.timebase_const
.numer
;
915 denom
= rtclock
.timebase_const
.denom
;
918 asm volatile(" mullw %0,%1,%2" :
920 "r" (interval
), "r" (scale_factor
));
921 asm volatile(" mulhwu %0,%1,%2" :
923 "r" (interval
), "r" (scale_factor
));
925 umul_64by32(t64
, denom
, &t64
, &t32
);
927 udiv_96by32(t64
, t32
, numer
, &t64
, &t32
);
934 clock_absolutetime_interval_to_deadline(
938 clock_get_uptime(result
);
944 absolutetime_to_nanoseconds(
950 uint32_t numer
, denom
;
954 numer
= rtclock
.timebase_const
.numer
;
955 denom
= rtclock
.timebase_const
.denom
;
958 UnsignedWide_to_scalar(&t64
) = abstime
;
960 umul_64by32(t64
, numer
, &t64
, &t32
);
962 udiv_96by32to64(t64
, t32
, denom
, (void *)result
);
966 nanoseconds_to_absolutetime(
967 uint64_t nanoseconds
,
972 uint32_t numer
, denom
;
976 numer
= rtclock
.timebase_const
.numer
;
977 denom
= rtclock
.timebase_const
.denom
;
980 UnsignedWide_to_scalar(&t64
) = nanoseconds
;
982 umul_64by32(t64
, denom
, &t64
, &t32
);
984 udiv_96by32to64(t64
, t32
, numer
, (void *)result
);
988 * Spin-loop delay primitives.
993 uint32_t scale_factor
)
997 clock_interval_to_deadline(interval
, scale_factor
, &end
);
1000 clock_get_uptime(&now
);
1001 } while (now
< end
);
1011 clock_get_uptime(&now
);
1012 } while (now
< deadline
);
1019 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);