2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * @APPLE_FREE_COPYRIGHT@
30 * Purpose: Routines for handling the machine dependent
34 #include <libkern/OSTypes.h>
36 #include <mach/mach_types.h>
38 #include <kern/clock.h>
39 #include <kern/thread.h>
40 #include <kern/macro_help.h>
43 #include <machine/mach_param.h> /* HZ */
44 #include <ppc/proc_reg.h>
46 #include <pexpert/pexpert.h>
48 #include <sys/kdebug.h>
50 int sysclk_config(void);
52 int sysclk_init(void);
54 kern_return_t
sysclk_gettime(
55 mach_timespec_t
*cur_time
);
57 kern_return_t
sysclk_getattr(
58 clock_flavor_t flavor
,
60 mach_msg_type_number_t
*count
);
63 mach_timespec_t
*deadline
);
65 struct clock_ops sysclk_ops
= {
66 sysclk_config
, sysclk_init
,
72 int calend_config(void);
74 int calend_init(void);
76 kern_return_t
calend_gettime(
77 mach_timespec_t
*cur_time
);
79 kern_return_t
calend_settime(
80 mach_timespec_t
*cur_time
);
82 kern_return_t
calend_getattr(
83 clock_flavor_t flavor
,
85 mach_msg_type_number_t
*count
);
87 struct clock_ops calend_ops
= {
88 calend_config
, calend_init
,
89 calend_gettime
, calend_settime
,
94 /* local data declarations */
96 static struct rtclock
{
97 mach_timespec_t calend_offset
;
98 boolean_t calend_is_set
;
100 mach_timebase_info_data_t timebase_const
;
102 struct rtclock_timer
{
107 clock_timer_func_t timer_expire
;
109 timer_call_data_t alarm
[NCPUS
];
112 uint64_t last_abstime
[NCPUS
];
113 int last_decr
[NCPUS
];
115 decl_simple_lock_data(,lock
) /* real-time clock device lock */
118 static boolean_t rtclock_initialized
;
120 static uint64_t rtclock_tick_deadline
[NCPUS
];
121 static uint64_t rtclock_tick_interval
;
123 static void timespec_to_absolutetime(
124 mach_timespec_t timespec
,
127 static int deadline_to_decrementer(
131 static void rtclock_alarm_timer(
132 timer_call_param_t p0
,
133 timer_call_param_t p1
);
135 /* global data declarations */
137 #define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
139 #define DECREMENTER_MAX 0x7FFFFFFFUL
140 #define DECREMENTER_MIN 0xAUL
142 natural_t rtclock_decrementer_min
;
145 * Macros to lock/unlock real-time clock device.
147 #define LOCK_RTC(s) \
150 simple_lock(&rtclock.lock); \
153 #define UNLOCK_RTC(s) \
155 simple_unlock(&rtclock.lock); \
161 struct timebase_freq_t
*freq
)
163 natural_t numer
, denom
;
167 denom
= freq
->timebase_num
;
169 while (!(denom
% 10)) {
176 numer
= freq
->timebase_den
;
182 rtclock
.timebase_const
.numer
= numer
;
183 rtclock
.timebase_const
.denom
= denom
;
188 * Configure the real-time clock device.
195 if (cpu_number() != master_cpu
)
198 for (i
= 0; i
< NCPUS
; i
++)
199 timer_call_setup(&rtclock
.alarm
[i
], rtclock_alarm_timer
, NULL
);
201 simple_lock_init(&rtclock
.lock
, ETAP_MISC_RT_CLOCK
);
203 PE_register_timebase_callback(timebase_callback
);
209 * Initialize the system clock device.
215 int decr
, mycpu
= cpu_number();
217 if (mycpu
!= master_cpu
) {
218 if (rtclock_initialized
== FALSE
) {
219 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu
);
221 /* Set decrementer and hence our next tick due */
222 clock_get_uptime(&abstime
);
223 rtclock_tick_deadline
[mycpu
] = abstime
;
224 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
225 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
227 rtclock
.last_decr
[mycpu
] = decr
;
233 * Initialize non-zero clock structure values.
235 clock_interval_to_absolutetime_interval(RTC_TICKPERIOD
, 1,
236 &rtclock_tick_interval
);
237 /* Set decrementer and our next tick due */
238 clock_get_uptime(&abstime
);
239 rtclock_tick_deadline
[mycpu
] = abstime
;
240 rtclock_tick_deadline
[mycpu
] += rtclock_tick_interval
;
241 decr
= deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
243 rtclock
.last_decr
[mycpu
] = decr
;
245 rtclock_initialized
= TRUE
;
250 #define UnsignedWide_to_scalar(x) (*(uint64_t *)(x))
251 #define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x))
254 * Perform a full 64 bit by 32 bit unsigned multiply,
255 * yielding a 96 bit product. The most significant
256 * portion of the product is returned as a 64 bit
257 * quantity, with the lower portion as a 32 bit word.
263 UnsignedWide
*result64
,
268 asm volatile(" mullw %0,%1,%2" :
270 "r" (now64
.lo
), "r" (mult32
));
272 asm volatile(" mullw %0,%1,%2" :
274 "r" (now64
.hi
), "r" (mult32
));
275 asm volatile(" mulhwu %0,%1,%2" :
277 "r" (now64
.lo
), "r" (mult32
));
279 asm volatile(" mulhwu %0,%1,%2" :
280 "=r" (result64
->hi
) :
281 "r" (now64
.hi
), "r" (mult32
));
283 asm volatile(" addc %0,%2,%3;
285 "=r" (result64
->lo
), "=r" (result64
->hi
) :
286 "r" (mid
), "r" (mid2
), "1" (result64
->hi
));
290 * Perform a partial 64 bit by 32 bit unsigned multiply,
291 * yielding a 64 bit product. Only the least significant
292 * 64 bits of the product are calculated and returned.
298 UnsignedWide
*result64
)
302 asm volatile(" mullw %0,%1,%2" :
303 "=r" (result64
->lo
) :
304 "r" (now64
.lo
), "r" (mult32
));
306 asm volatile(" mullw %0,%1,%2" :
308 "r" (now64
.hi
), "r" (mult32
));
309 asm volatile(" mulhwu %0,%1,%2" :
311 "r" (now64
.lo
), "r" (mult32
));
313 asm volatile(" add %0,%1,%2" :
314 "=r" (result64
->hi
) :
315 "r" (mid
), "r" (mid2
));
319 * Perform an unsigned division of a 96 bit value
320 * by a 32 bit value, yielding a 96 bit quotient.
321 * The most significant portion of the product is
322 * returned as a 64 bit quantity, with the lower
323 * portion as a 32 bit word.
330 UnsignedWide
*result64
,
335 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
336 UnsignedWide_to_scalar(result64
) =
337 UnsignedWide_to_scalar(&now64
) / div32
;
339 umul_64by32to64(*result64
, div32
, &t64
);
341 UnsignedWide_to_scalar(&t64
) =
342 UnsignedWide_to_scalar(&now64
) - UnsignedWide_to_scalar(&t64
);
344 *result32
= (((uint64_t)t64
.lo
<< 32) | now32
) / div32
;
347 UnsignedWide_to_scalar(result64
) =
348 (((uint64_t)now64
.lo
<< 32) | now32
) / div32
;
350 *result32
= result64
->lo
;
351 result64
->lo
= result64
->hi
;
357 * Perform an unsigned division of a 96 bit value
358 * by a 32 bit value, yielding a 64 bit quotient.
359 * Any higher order bits of the quotient are simply
367 UnsignedWide
*result64
)
371 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
372 UnsignedWide_to_scalar(result64
) =
373 UnsignedWide_to_scalar(&now64
) / div32
;
375 umul_64by32to64(*result64
, div32
, &t64
);
377 UnsignedWide_to_scalar(&t64
) =
378 UnsignedWide_to_scalar(&now64
) - UnsignedWide_to_scalar(&t64
);
380 result64
->hi
= result64
->lo
;
381 result64
->lo
= (((uint64_t)t64
.lo
<< 32) | now32
) / div32
;
384 UnsignedWide_to_scalar(result64
) =
385 (((uint64_t)now64
.lo
<< 32) | now32
) / div32
;
390 * Perform an unsigned division of a 96 bit value
391 * by a 32 bit value, yielding a 32 bit quotient,
392 * and a 32 bit remainder. Any higher order bits
393 * of the quotient are simply discarded.
396 udiv_96by32to32and32(
403 UnsignedWide t64
, u64
;
405 if (now64
.hi
> 0 || now64
.lo
>= div32
) {
406 UnsignedWide_to_scalar(&t64
) =
407 UnsignedWide_to_scalar(&now64
) / div32
;
409 umul_64by32to64(t64
, div32
, &t64
);
411 UnsignedWide_to_scalar(&t64
) =
412 UnsignedWide_to_scalar(&now64
) - UnsignedWide_to_scalar(&t64
);
414 UnsignedWide_to_scalar(&t64
) = ((uint64_t)t64
.lo
<< 32) | now32
;
416 UnsignedWide_to_scalar(&u64
) =
417 UnsignedWide_to_scalar(&t64
) / div32
;
421 umul_64by32to64(u64
, div32
, &u64
);
423 *remain32
= UnsignedWide_to_scalar(&t64
) -
424 UnsignedWide_to_scalar(&u64
);
427 UnsignedWide_to_scalar(&t64
) = ((uint64_t)now64
.lo
<< 32) | now32
;
429 UnsignedWide_to_scalar(&u64
) =
430 UnsignedWide_to_scalar(&t64
) / div32
;
434 umul_64by32to64(u64
, div32
, &u64
);
436 *remain32
= UnsignedWide_to_scalar(&t64
) -
437 UnsignedWide_to_scalar(&u64
);
442 * Get the clock device time. This routine is responsible
443 * for converting the device's machine dependent time value
444 * into a canonical mach_timespec_t value.
446 * SMP configurations - *the processor clocks are synchronised*
449 sysclk_gettime_internal(
450 mach_timespec_t
*time
) /* OUT */
455 uint32_t numer
, denom
;
457 numer
= rtclock
.timebase_const
.numer
;
458 denom
= rtclock
.timebase_const
.denom
;
460 clock_get_uptime((uint64_t *)&now
);
462 umul_64by32(now
, numer
, &t64
, &t32
);
464 udiv_96by32(t64
, t32
, denom
, &t64
, &t32
);
466 udiv_96by32to32and32(t64
, t32
, NSEC_PER_SEC
,
467 &time
->tv_sec
, &time
->tv_nsec
);
469 return (KERN_SUCCESS
);
474 mach_timespec_t
*time
) /* OUT */
479 uint32_t numer
, denom
;
483 numer
= rtclock
.timebase_const
.numer
;
484 denom
= rtclock
.timebase_const
.denom
;
487 clock_get_uptime((uint64_t *)&now
);
489 umul_64by32(now
, numer
, &t64
, &t32
);
491 udiv_96by32(t64
, t32
, denom
, &t64
, &t32
);
493 udiv_96by32to32and32(t64
, t32
, NSEC_PER_SEC
,
494 &time
->tv_sec
, &time
->tv_nsec
);
496 return (KERN_SUCCESS
);
500 * Get clock device attributes.
504 clock_flavor_t flavor
,
505 clock_attr_t attr
, /* OUT */
506 mach_msg_type_number_t
*count
) /* IN/OUT */
511 return (KERN_FAILURE
);
514 case CLOCK_GET_TIME_RES
: /* >0 res */
515 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
516 case CLOCK_ALARM_MINRES
:
517 case CLOCK_ALARM_MAXRES
:
519 *(clock_res_t
*) attr
= RTC_TICKPERIOD
;
524 return (KERN_INVALID_VALUE
);
526 return (KERN_SUCCESS
);
530 * Set deadline for the next alarm on the clock device. This call
531 * always resets the time to deliver an alarm for the clock.
535 mach_timespec_t
*deadline
)
539 timespec_to_absolutetime(*deadline
, &abstime
);
540 timer_call_enter(&rtclock
.alarm
[cpu_number()], abstime
);
544 * Configure the calendar clock.
553 * Initialize the calendar clock.
558 if (cpu_number() != master_cpu
)
565 * Get the current clock time.
569 mach_timespec_t
*curr_time
) /* OUT */
574 if (!rtclock
.calend_is_set
) {
576 return (KERN_FAILURE
);
579 (void) sysclk_gettime_internal(curr_time
);
580 ADD_MACH_TIMESPEC(curr_time
, &rtclock
.calend_offset
);
583 return (KERN_SUCCESS
);
587 * Set the current clock time.
591 mach_timespec_t
*new_time
)
593 mach_timespec_t curr_time
;
597 (void) sysclk_gettime_internal(&curr_time
);
598 rtclock
.calend_offset
= *new_time
;
599 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
600 rtclock
.calend_is_set
= TRUE
;
603 PESetGMTTimeOfDay(new_time
->tv_sec
);
605 return (KERN_SUCCESS
);
609 * Get clock device attributes.
613 clock_flavor_t flavor
,
614 clock_attr_t attr
, /* OUT */
615 mach_msg_type_number_t
*count
) /* IN/OUT */
620 return (KERN_FAILURE
);
623 case CLOCK_GET_TIME_RES
: /* >0 res */
625 *(clock_res_t
*) attr
= RTC_TICKPERIOD
;
629 case CLOCK_ALARM_CURRES
: /* =0 no alarm */
630 case CLOCK_ALARM_MINRES
:
631 case CLOCK_ALARM_MAXRES
:
632 *(clock_res_t
*) attr
= 0;
636 return (KERN_INVALID_VALUE
);
638 return (KERN_SUCCESS
);
642 clock_adjust_calendar(
648 if (rtclock
.calend_is_set
)
649 ADD_MACH_TIMESPEC_NSEC(&rtclock
.calend_offset
, nsec
);
654 clock_initialize_calendar(void)
656 mach_timespec_t curr_time
;
657 long seconds
= PEGetGMTTimeOfDay();
661 (void) sysclk_gettime_internal(&curr_time
);
662 if (curr_time
.tv_nsec
< 500*USEC_PER_SEC
)
663 rtclock
.calend_offset
.tv_sec
= seconds
;
665 rtclock
.calend_offset
.tv_sec
= seconds
+ 1;
666 rtclock
.calend_offset
.tv_nsec
= 0;
667 SUB_MACH_TIMESPEC(&rtclock
.calend_offset
, &curr_time
);
668 rtclock
.calend_is_set
= TRUE
;
673 clock_get_calendar_offset(void)
675 mach_timespec_t result
= MACH_TIMESPEC_ZERO
;
679 if (rtclock
.calend_is_set
)
680 result
= rtclock
.calend_offset
;
688 mach_timebase_info_t info
)
693 *info
= rtclock
.timebase_const
;
698 clock_set_timer_deadline(
703 struct rtclock_timer
*mytimer
;
707 mycpu
= cpu_number();
708 mytimer
= &rtclock
.timer
[mycpu
];
709 clock_get_uptime(&abstime
);
710 rtclock
.last_abstime
[mycpu
] = abstime
;
711 mytimer
->deadline
= deadline
;
712 mytimer
->is_set
= TRUE
;
713 if ( mytimer
->deadline
< rtclock_tick_deadline
[mycpu
] ) {
714 decr
= deadline_to_decrementer(mytimer
->deadline
, abstime
);
715 if ( rtclock_decrementer_min
!= 0 &&
716 rtclock_decrementer_min
< (natural_t
)decr
)
717 decr
= rtclock_decrementer_min
;
720 rtclock
.last_decr
[mycpu
] = decr
;
722 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
723 | DBG_FUNC_NONE
, decr
, 2, 0, 0, 0);
729 clock_set_timer_func(
730 clock_timer_func_t func
)
735 if (rtclock
.timer_expire
== NULL
)
736 rtclock
.timer_expire
= func
;
741 * Reset the clock device. This causes the realtime clock
742 * device to reload its mode and count value (frequency).
751 * Real-time clock device interrupt.
756 struct ppc_saved_state
*ssp
,
760 int decr
[3], mycpu
= cpu_number();
761 struct rtclock_timer
*mytimer
= &rtclock
.timer
[mycpu
];
764 * We may receive interrupts too early, we must reject them.
766 if (rtclock_initialized
== FALSE
) {
767 mtdec(DECREMENTER_MAX
); /* Max the decrementer if not init */
771 decr
[1] = decr
[2] = DECREMENTER_MAX
;
773 clock_get_uptime(&abstime
);
774 rtclock
.last_abstime
[mycpu
] = abstime
;
775 if ( rtclock_tick_deadline
[mycpu
] <= abstime
) {
776 clock_deadline_for_periodic_event(rtclock_tick_interval
, abstime
,
777 &rtclock_tick_deadline
[mycpu
]);
778 hertz_tick(USER_MODE(ssp
->srr1
), ssp
->srr0
);
781 clock_get_uptime(&abstime
);
782 rtclock
.last_abstime
[mycpu
] = abstime
;
783 if ( mytimer
->is_set
&&
784 mytimer
->deadline
<= abstime
) {
785 mytimer
->is_set
= FALSE
;
786 (*rtclock
.timer_expire
)(abstime
);
789 clock_get_uptime(&abstime
);
790 rtclock
.last_abstime
[mycpu
] = abstime
;
791 decr
[1] = deadline_to_decrementer(rtclock_tick_deadline
[mycpu
], abstime
);
794 decr
[2] = deadline_to_decrementer(mytimer
->deadline
, abstime
);
796 if (decr
[1] > decr
[2])
799 if ( rtclock_decrementer_min
!= 0 &&
800 rtclock_decrementer_min
< (natural_t
)decr
[1] )
801 decr
[1] = rtclock_decrementer_min
;
804 rtclock
.last_decr
[mycpu
] = decr
[1];
806 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1)
807 | DBG_FUNC_NONE
, decr
[1], 3, 0, 0, 0);
812 timer_call_param_t p0
,
813 timer_call_param_t p1
)
815 mach_timespec_t timestamp
;
817 (void) sysclk_gettime(×tamp
);
819 clock_alarm_intr(SYSTEM_CLOCK
, ×tamp
);
826 UnsignedWide
*result
= (UnsignedWide
*)result0
;
827 uint32_t hi
, lo
, hic
;
830 asm volatile(" mftbu %0" : "=r" (hi
));
831 asm volatile(" mftb %0" : "=r" (lo
));
832 asm volatile(" mftbu %0" : "=r" (hic
));
840 deadline_to_decrementer(
847 return DECREMENTER_MIN
;
849 delt
= deadline
- now
;
850 return (delt
>= (DECREMENTER_MAX
+ 1))? DECREMENTER_MAX
:
851 ((delt
>= (DECREMENTER_MIN
+ 1))? (delt
- 1): DECREMENTER_MIN
);
856 timespec_to_absolutetime(
857 mach_timespec_t timespec
,
860 UnsignedWide
*result
= (UnsignedWide
*)result0
;
863 uint32_t numer
, denom
;
867 numer
= rtclock
.timebase_const
.numer
;
868 denom
= rtclock
.timebase_const
.denom
;
871 asm volatile(" mullw %0,%1,%2" :
873 "r" (timespec
.tv_sec
), "r" (NSEC_PER_SEC
));
875 asm volatile(" mulhwu %0,%1,%2" :
877 "r" (timespec
.tv_sec
), "r" (NSEC_PER_SEC
));
879 UnsignedWide_to_scalar(&t64
) += timespec
.tv_nsec
;
881 umul_64by32(t64
, denom
, &t64
, &t32
);
883 udiv_96by32(t64
, t32
, numer
, &t64
, &t32
);
890 clock_interval_to_deadline(
892 uint32_t scale_factor
,
897 clock_get_uptime(result
);
899 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
905 clock_interval_to_absolutetime_interval(
907 uint32_t scale_factor
,
910 UnsignedWide
*result
= (UnsignedWide
*)result0
;
913 uint32_t numer
, denom
;
917 numer
= rtclock
.timebase_const
.numer
;
918 denom
= rtclock
.timebase_const
.denom
;
921 asm volatile(" mullw %0,%1,%2" :
923 "r" (interval
), "r" (scale_factor
));
924 asm volatile(" mulhwu %0,%1,%2" :
926 "r" (interval
), "r" (scale_factor
));
928 umul_64by32(t64
, denom
, &t64
, &t32
);
930 udiv_96by32(t64
, t32
, numer
, &t64
, &t32
);
937 clock_absolutetime_interval_to_deadline(
941 clock_get_uptime(result
);
947 absolutetime_to_nanoseconds(
953 uint32_t numer
, denom
;
957 numer
= rtclock
.timebase_const
.numer
;
958 denom
= rtclock
.timebase_const
.denom
;
961 UnsignedWide_to_scalar(&t64
) = abstime
;
963 umul_64by32(t64
, numer
, &t64
, &t32
);
965 udiv_96by32to64(t64
, t32
, denom
, (void *)result
);
969 nanoseconds_to_absolutetime(
970 uint64_t nanoseconds
,
975 uint32_t numer
, denom
;
979 numer
= rtclock
.timebase_const
.numer
;
980 denom
= rtclock
.timebase_const
.denom
;
983 UnsignedWide_to_scalar(&t64
) = nanoseconds
;
985 umul_64by32(t64
, denom
, &t64
, &t32
);
987 udiv_96by32to64(t64
, t32
, numer
, (void *)result
);
991 * Spin-loop delay primitives.
996 uint32_t scale_factor
)
1000 clock_interval_to_deadline(interval
, scale_factor
, &end
);
1003 clock_get_uptime(&now
);
1004 } while (now
< end
);
1014 clock_get_uptime(&now
);
1015 } while (now
< deadline
);
1022 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);