]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/rtclock.c
xnu-124.1.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28 /*
29 * File: rtclock.c
30 * Purpose: Routines for handling the machine dependent
31 * real-time clock.
32 */
33
34 #include <mach/mach_types.h>
35
36 #include <kern/clock.h>
37 #include <kern/thread.h>
38 #include <kern/macro_help.h>
39 #include <kern/spl.h>
40
41 #include <machine/mach_param.h> /* HZ */
42 #include <ppc/proc_reg.h>
43
44 #include <pexpert/pexpert.h>
45
46 /*XXX power management hacks XXX*/
47 #include <IOKit/IOReturn.h>
48 #include <IOKit/IOMessage.h>
49
50 extern void *registerSleepWakeInterest(
51 void *callback,
52 void *target,
53 void *refCon);
54 /*XXX power management hacks XXX*/
55
56 #include <sys/kdebug.h>
57
58 int sysclk_config(void);
59
60 int sysclk_init(void);
61
62 kern_return_t sysclk_gettime(
63 mach_timespec_t *cur_time);
64
65 kern_return_t sysclk_getattr(
66 clock_flavor_t flavor,
67 clock_attr_t attr,
68 mach_msg_type_number_t *count);
69
70 void sysclk_setalarm(
71 mach_timespec_t *deadline);
72
73 struct clock_ops sysclk_ops = {
74 sysclk_config, sysclk_init,
75 sysclk_gettime, 0,
76 sysclk_getattr, 0,
77 sysclk_setalarm,
78 };
79
80 int calend_config(void);
81
82 int calend_init(void);
83
84 kern_return_t calend_gettime(
85 mach_timespec_t *cur_time);
86
87 kern_return_t calend_settime(
88 mach_timespec_t *cur_time);
89
90 kern_return_t calend_getattr(
91 clock_flavor_t flavor,
92 clock_attr_t attr,
93 mach_msg_type_number_t *count);
94
95 struct clock_ops calend_ops = {
96 calend_config, calend_init,
97 calend_gettime, calend_settime,
98 calend_getattr, 0,
99 0,
100 };
101
102 /* local data declarations */
103
104 static struct rtclock {
105 mach_timespec_t calend_offset;
106 boolean_t calend_is_set;
107
108 mach_timebase_info_data_t timebase_const;
109
110 struct rtclock_timer {
111 AbsoluteTime deadline;
112 boolean_t is_set;
113 } timer[NCPUS];
114
115 clock_timer_func_t timer_expire;
116
117 timer_call_data_t alarm[NCPUS];
118
119 /* debugging */
120 AbsoluteTime last_abstime[NCPUS];
121 int last_decr[NCPUS];
122
123 decl_simple_lock_data(,lock) /* real-time clock device lock */
124 } rtclock;
125
126 static boolean_t rtclock_initialized;
127
128 static AbsoluteTime rtclock_tick_deadline[NCPUS];
129 static AbsoluteTime rtclock_tick_interval;
130
131 static void timespec_to_absolutetime(
132 mach_timespec_t timespec,
133 AbsoluteTime *result);
134
135 static int deadline_to_decrementer(
136 AbsoluteTime deadline,
137 AbsoluteTime now);
138
139 static void rtclock_alarm_timer(
140 timer_call_param_t p0,
141 timer_call_param_t p1);
142
143 /* global data declarations */
144
145 #define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
146
147 #define DECREMENTER_MAX 0x7FFFFFFFUL
148 #define DECREMENTER_MIN 0xAUL
149
150 natural_t rtclock_decrementer_min;
151
152 /*
153 * Macros to lock/unlock real-time clock device.
154 */
155 #define LOCK_RTC(s) \
156 MACRO_BEGIN \
157 (s) = splclock(); \
158 simple_lock(&rtclock.lock); \
159 MACRO_END
160
161 #define UNLOCK_RTC(s) \
162 MACRO_BEGIN \
163 simple_unlock(&rtclock.lock); \
164 splx(s); \
165 MACRO_END
166
167 static void
168 timebase_callback(
169 struct timebase_freq_t *freq)
170 {
171 natural_t numer, denom;
172 int n;
173 spl_t s;
174
175 denom = freq->timebase_num;
176 n = 9;
177 while (!(denom % 10)) {
178 if (n < 1)
179 break;
180 denom /= 10;
181 n--;
182 }
183
184 numer = freq->timebase_den;
185 while (n-- > 0) {
186 numer *= 10;
187 }
188
189 LOCK_RTC(s);
190 rtclock.timebase_const.numer = numer;
191 rtclock.timebase_const.denom = denom;
192 UNLOCK_RTC(s);
193 }
194
195 /*
196 * Configure the real-time clock device.
197 */
198 int
199 sysclk_config(void)
200 {
201 int i;
202
203 if (cpu_number() != master_cpu)
204 return(1);
205
206 for (i = 0; i < NCPUS; i++)
207 timer_call_setup(&rtclock.alarm[i], rtclock_alarm_timer, NULL);
208
209 simple_lock_init(&rtclock.lock, ETAP_MISC_RT_CLOCK);
210
211 PE_register_timebase_callback(timebase_callback);
212
213 return (1);
214 }
215
216 /*
217 * Initialize the system clock device.
218 */
219 int
220 sysclk_init(void)
221 {
222 AbsoluteTime abstime;
223 int decr, mycpu = cpu_number();
224
225 if (mycpu != master_cpu) {
226 if (rtclock_initialized == FALSE) {
227 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
228 }
229 /* Set decrementer and hence our next tick due */
230 clock_get_uptime(&abstime);
231 rtclock_tick_deadline[mycpu] = abstime;
232 ADD_ABSOLUTETIME(&rtclock_tick_deadline[mycpu],
233 &rtclock_tick_interval);
234 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
235 mtdec(decr);
236 rtclock.last_decr[mycpu] = decr;
237
238 return(1);
239 }
240
241 /*
242 * Initialize non-zero clock structure values.
243 */
244 clock_interval_to_absolutetime_interval(RTC_TICKPERIOD, 1,
245 &rtclock_tick_interval);
246 /* Set decrementer and our next tick due */
247 clock_get_uptime(&abstime);
248 rtclock_tick_deadline[mycpu] = abstime;
249 ADD_ABSOLUTETIME(&rtclock_tick_deadline[mycpu], &rtclock_tick_interval);
250 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
251 mtdec(decr);
252 rtclock.last_decr[mycpu] = decr;
253
254 rtclock_initialized = TRUE;
255
256 return (1);
257 }
258
259 /*
260 * Perform a full 64 bit by 32 bit unsigned multiply,
261 * yielding a 96 bit product. The most significant
262 * portion of the product is returned as a 64 bit
263 * quantity, with the lower portion as a 32 bit word.
264 */
265 static void
266 umul_64by32(
267 AbsoluteTime now64,
268 natural_t mult32,
269 AbsoluteTime *result64,
270 natural_t *result32)
271 {
272 natural_t mid, mid2;
273
274 asm volatile(" mullw %0,%1,%2" :
275 "=r" (*result32) :
276 "r" (now64.lo), "r" (mult32));
277
278 asm volatile(" mullw %0,%1,%2" :
279 "=r" (mid2) :
280 "r" (now64.hi), "r" (mult32));
281 asm volatile(" mulhwu %0,%1,%2" :
282 "=r" (mid) :
283 "r" (now64.lo), "r" (mult32));
284
285 asm volatile(" mulhwu %0,%1,%2" :
286 "=r" (result64->hi) :
287 "r" (now64.hi), "r" (mult32));
288
289 asm volatile(" addc %0,%2,%3;
290 addze %1,%4" :
291 "=r" (result64->lo), "=r" (result64->hi) :
292 "r" (mid), "r" (mid2), "1" (result64->hi));
293 }
294
295 /*
296 * Perform a partial 64 bit by 32 bit unsigned multiply,
297 * yielding a 64 bit product. Only the least significant
298 * 64 bits of the product are calculated and returned.
299 */
300 static void
301 umul_64by32to64(
302 AbsoluteTime now64,
303 natural_t mult32,
304 AbsoluteTime *result64)
305 {
306 natural_t mid, mid2;
307
308 asm volatile(" mullw %0,%1,%2" :
309 "=r" (result64->lo) :
310 "r" (now64.lo), "r" (mult32));
311
312 asm volatile(" mullw %0,%1,%2" :
313 "=r" (mid2) :
314 "r" (now64.hi), "r" (mult32));
315 asm volatile(" mulhwu %0,%1,%2" :
316 "=r" (mid) :
317 "r" (now64.lo), "r" (mult32));
318
319 asm volatile(" add %0,%1,%2" :
320 "=r" (result64->hi) :
321 "r" (mid), "r" (mid2));
322 }
323
324 /*
325 * Perform an unsigned division of a 96 bit value
326 * by a 32 bit value, yielding a 96 bit quotient.
327 * The most significant portion of the product is
328 * returned as a 64 bit quantity, with the lower
329 * portion as a 32 bit word.
330 */
331 static __inline__
332 void
333 udiv_96by32(
334 AbsoluteTime now64,
335 natural_t now32,
336 natural_t div32,
337 AbsoluteTime *result64,
338 natural_t *result32)
339 {
340 AbsoluteTime t64;
341
342 if (now64.hi > 0 || now64.lo >= div32) {
343 AbsoluteTime_to_scalar(result64) =
344 AbsoluteTime_to_scalar(&now64) / div32;
345
346 umul_64by32to64(*result64, div32, &t64);
347
348 AbsoluteTime_to_scalar(&t64) =
349 AbsoluteTime_to_scalar(&now64) - AbsoluteTime_to_scalar(&t64);
350
351 *result32 = (((unsigned long long)t64.lo << 32) | now32) / div32;
352 }
353 else {
354 AbsoluteTime_to_scalar(result64) =
355 (((unsigned long long)now64.lo << 32) | now32) / div32;
356
357 *result32 = result64->lo;
358 result64->lo = result64->hi;
359 result64->hi = 0;
360 }
361 }
362
363 /*
364 * Perform an unsigned division of a 96 bit value
365 * by a 32 bit value, yielding a 64 bit quotient.
366 * Any higher order bits of the quotient are simply
367 * discarded.
368 */
369 static __inline__
370 void
371 udiv_96by32to64(
372 AbsoluteTime now64,
373 natural_t now32,
374 natural_t div32,
375 AbsoluteTime *result64)
376 {
377 AbsoluteTime t64;
378
379 if (now64.hi > 0 || now64.lo >= div32) {
380 AbsoluteTime_to_scalar(result64) =
381 AbsoluteTime_to_scalar(&now64) / div32;
382
383 umul_64by32to64(*result64, div32, &t64);
384
385 AbsoluteTime_to_scalar(&t64) =
386 AbsoluteTime_to_scalar(&now64) - AbsoluteTime_to_scalar(&t64);
387
388 result64->hi = result64->lo;
389 result64->lo = (((unsigned long long)t64.lo << 32) | now32) / div32;
390 }
391 else {
392 AbsoluteTime_to_scalar(result64) =
393 (((unsigned long long)now64.lo << 32) | now32) / div32;
394 }
395 }
396
397 /*
398 * Perform an unsigned division of a 96 bit value
399 * by a 32 bit value, yielding a 32 bit quotient,
400 * and a 32 bit remainder. Any higher order bits
401 * of the quotient are simply discarded.
402 */
403 static __inline__
404 void
405 udiv_96by32to32and32(
406 AbsoluteTime now64,
407 natural_t now32,
408 natural_t div32,
409 natural_t *result32,
410 natural_t *remain32)
411 {
412 AbsoluteTime t64, u64;
413
414 if (now64.hi > 0 || now64.lo >= div32) {
415 AbsoluteTime_to_scalar(&t64) =
416 AbsoluteTime_to_scalar(&now64) / div32;
417
418 umul_64by32to64(t64, div32, &t64);
419
420 AbsoluteTime_to_scalar(&t64) =
421 AbsoluteTime_to_scalar(&now64) - AbsoluteTime_to_scalar(&t64);
422
423 AbsoluteTime_to_scalar(&t64) =
424 ((unsigned long long)t64.lo << 32) | now32;
425
426 AbsoluteTime_to_scalar(&u64) =
427 AbsoluteTime_to_scalar(&t64) / div32;
428
429 *result32 = u64.lo;
430
431 umul_64by32to64(u64, div32, &u64);
432
433 *remain32 = AbsoluteTime_to_scalar(&t64) -
434 AbsoluteTime_to_scalar(&u64);
435 }
436 else {
437 AbsoluteTime_to_scalar(&t64) =
438 ((unsigned long long)now64.lo << 32) | now32;
439
440 AbsoluteTime_to_scalar(&u64) =
441 AbsoluteTime_to_scalar(&t64) / div32;
442
443 *result32 = u64.lo;
444
445 umul_64by32to64(u64, div32, &u64);
446
447 *remain32 = AbsoluteTime_to_scalar(&t64) -
448 AbsoluteTime_to_scalar(&u64);
449 }
450 }
451
452 /*
453 * Get the clock device time. This routine is responsible
454 * for converting the device's machine dependent time value
455 * into a canonical mach_timespec_t value.
456 *
457 * SMP configurations - *this currently assumes that the processor
458 * clocks will be synchronised*
459 */
460 kern_return_t
461 sysclk_gettime_internal(
462 mach_timespec_t *time) /* OUT */
463 {
464 AbsoluteTime now;
465 AbsoluteTime t64;
466 natural_t t32;
467 natural_t numer, denom;
468
469 numer = rtclock.timebase_const.numer;
470 denom = rtclock.timebase_const.denom;
471
472 clock_get_uptime(&now);
473
474 umul_64by32(now, numer, &t64, &t32);
475
476 udiv_96by32(t64, t32, denom, &t64, &t32);
477
478 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
479 &time->tv_sec, &time->tv_nsec);
480
481 return (KERN_SUCCESS);
482 }
483
484 kern_return_t
485 sysclk_gettime(
486 mach_timespec_t *time) /* OUT */
487 {
488 AbsoluteTime now;
489 AbsoluteTime t64;
490 natural_t t32;
491 natural_t numer, denom;
492 spl_t s;
493
494 LOCK_RTC(s);
495 numer = rtclock.timebase_const.numer;
496 denom = rtclock.timebase_const.denom;
497 UNLOCK_RTC(s);
498
499 clock_get_uptime(&now);
500
501 umul_64by32(now, numer, &t64, &t32);
502
503 udiv_96by32(t64, t32, denom, &t64, &t32);
504
505 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
506 &time->tv_sec, &time->tv_nsec);
507
508 return (KERN_SUCCESS);
509 }
510
511 /*
512 * Get clock device attributes.
513 */
514 kern_return_t
515 sysclk_getattr(
516 clock_flavor_t flavor,
517 clock_attr_t attr, /* OUT */
518 mach_msg_type_number_t *count) /* IN/OUT */
519 {
520 spl_t s;
521
522 if (*count != 1)
523 return (KERN_FAILURE);
524 switch (flavor) {
525
526 case CLOCK_GET_TIME_RES: /* >0 res */
527 case CLOCK_ALARM_CURRES: /* =0 no alarm */
528 case CLOCK_ALARM_MINRES:
529 case CLOCK_ALARM_MAXRES:
530 LOCK_RTC(s);
531 *(clock_res_t *) attr = RTC_TICKPERIOD;
532 UNLOCK_RTC(s);
533 break;
534
535 default:
536 return (KERN_INVALID_VALUE);
537 }
538 return (KERN_SUCCESS);
539 }
540
541 /*
542 * Set deadline for the next alarm on the clock device. This call
543 * always resets the time to deliver an alarm for the clock.
544 */
545 void
546 sysclk_setalarm(
547 mach_timespec_t *deadline)
548 {
549 AbsoluteTime abstime;
550
551 timespec_to_absolutetime(*deadline, &abstime);
552 timer_call_enter(&rtclock.alarm[cpu_number()], abstime);
553 }
554
555 /*
556 * Configure the calendar clock.
557 */
558 int
559 calend_config(void)
560 {
561 return (1);
562 }
563
564 /*
565 * Initialize the calendar clock.
566 */
567 int
568 calend_init(void)
569 {
570 if (cpu_number() != master_cpu)
571 return(1);
572
573 return (1);
574 }
575
576 /*
577 * Get the current clock time.
578 */
579 kern_return_t
580 calend_gettime(
581 mach_timespec_t *curr_time) /* OUT */
582 {
583 spl_t s;
584
585 LOCK_RTC(s);
586 if (!rtclock.calend_is_set) {
587 UNLOCK_RTC(s);
588 return (KERN_FAILURE);
589 }
590
591 (void) sysclk_gettime_internal(curr_time);
592 ADD_MACH_TIMESPEC(curr_time, &rtclock.calend_offset);
593 UNLOCK_RTC(s);
594
595 return (KERN_SUCCESS);
596 }
597
598 /*
599 * Set the current clock time.
600 */
601 kern_return_t
602 calend_settime(
603 mach_timespec_t *new_time)
604 {
605 mach_timespec_t curr_time;
606 spl_t s;
607
608 LOCK_RTC(s);
609 (void) sysclk_gettime_internal(&curr_time);
610 rtclock.calend_offset = *new_time;
611 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
612 rtclock.calend_is_set = TRUE;
613 UNLOCK_RTC(s);
614
615 PESetGMTTimeOfDay(new_time->tv_sec);
616
617 return (KERN_SUCCESS);
618 }
619
620 /*
621 * Get clock device attributes.
622 */
623 kern_return_t
624 calend_getattr(
625 clock_flavor_t flavor,
626 clock_attr_t attr, /* OUT */
627 mach_msg_type_number_t *count) /* IN/OUT */
628 {
629 spl_t s;
630
631 if (*count != 1)
632 return (KERN_FAILURE);
633 switch (flavor) {
634
635 case CLOCK_GET_TIME_RES: /* >0 res */
636 LOCK_RTC(s);
637 *(clock_res_t *) attr = RTC_TICKPERIOD;
638 UNLOCK_RTC(s);
639 break;
640
641 case CLOCK_ALARM_CURRES: /* =0 no alarm */
642 case CLOCK_ALARM_MINRES:
643 case CLOCK_ALARM_MAXRES:
644 *(clock_res_t *) attr = 0;
645 break;
646
647 default:
648 return (KERN_INVALID_VALUE);
649 }
650 return (KERN_SUCCESS);
651 }
652
653 void
654 clock_adjust_calendar(
655 clock_res_t nsec)
656 {
657 spl_t s;
658
659 LOCK_RTC(s);
660 if (rtclock.calend_is_set)
661 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec);
662 UNLOCK_RTC(s);
663 }
664
665 static void
666 calend_setup_internal(
667 long seconds)
668 {
669 mach_timespec_t curr_time;
670
671 (void) sysclk_gettime_internal(&curr_time);
672 if (curr_time.tv_nsec < 500*USEC_PER_SEC)
673 rtclock.calend_offset.tv_sec = seconds;
674 else
675 rtclock.calend_offset.tv_sec = seconds + 1;
676 rtclock.calend_offset.tv_nsec = 0;
677 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
678 rtclock.calend_is_set = TRUE;
679 }
680
681 static thread_call_t calend_wakeup_call;
682 static thread_call_data_t calend_wakeup_call_data;
683
684 static void
685 calend_wakeup_resynch(
686 thread_call_param_t p0,
687 thread_call_param_t p1)
688 {
689 long seconds = PEGetGMTTimeOfDay();
690 spl_t s;
691
692 LOCK_RTC(s);
693 calend_setup_internal(seconds);
694 UNLOCK_RTC(s);
695 }
696
697 static IOReturn
698 calend_sleep_wake_notif(
699 void *target,
700 void *refCon,
701 UInt32 messageType,
702 void *provider,
703 void *messageArg,
704 vm_size_t argSize)
705 {
706 if (messageType != kIOMessageSystemHasPoweredOn)
707 return (kIOReturnUnsupported);
708
709 if (calend_wakeup_call != NULL)
710 thread_call_enter(calend_wakeup_call);
711
712 return (kIOReturnSuccess);
713 }
714
715 void
716 clock_initialize_calendar(void)
717 {
718 long seconds;
719 spl_t s;
720
721 thread_call_setup(&calend_wakeup_call_data, calend_wakeup_resynch, NULL);
722 calend_wakeup_call = &calend_wakeup_call_data;
723
724 registerSleepWakeInterest(calend_sleep_wake_notif, NULL, NULL);
725
726 seconds = PEGetGMTTimeOfDay();
727
728 LOCK_RTC(s);
729 if (!rtclock.calend_is_set)
730 calend_setup_internal(seconds);
731 UNLOCK_RTC(s);
732 }
733
734 mach_timespec_t
735 clock_get_calendar_offset(void)
736 {
737 mach_timespec_t result = MACH_TIMESPEC_ZERO;
738 spl_t s;
739
740 LOCK_RTC(s);
741 if (rtclock.calend_is_set)
742 result = rtclock.calend_offset;
743 UNLOCK_RTC(s);
744
745 return (result);
746 }
747
748 void
749 clock_timebase_info(
750 mach_timebase_info_t info)
751 {
752 spl_t s;
753
754 LOCK_RTC(s);
755 *info = rtclock.timebase_const;
756 UNLOCK_RTC(s);
757 }
758
759 void
760 clock_set_timer_deadline(
761 AbsoluteTime deadline)
762 {
763 AbsoluteTime abstime;
764 int decr, mycpu;
765 struct rtclock_timer *mytimer;
766 spl_t s;
767
768 s = splclock();
769 mycpu = cpu_number();
770 mytimer = &rtclock.timer[mycpu];
771 clock_get_uptime(&abstime);
772 rtclock.last_abstime[mycpu] = abstime;
773 mytimer->deadline = deadline;
774 mytimer->is_set = TRUE;
775 if ( CMP_ABSOLUTETIME(&mytimer->deadline,
776 &rtclock_tick_deadline[mycpu]) < 0) {
777 decr = deadline_to_decrementer(mytimer->deadline, abstime);
778 if ( rtclock_decrementer_min != 0 &&
779 rtclock_decrementer_min < (natural_t)decr )
780 decr = rtclock_decrementer_min;
781
782 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
783 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
784
785 mtdec(decr);
786 rtclock.last_decr[mycpu] = decr;
787 }
788 splx(s);
789 }
790
791 void
792 clock_set_timer_func(
793 clock_timer_func_t func)
794 {
795 spl_t s;
796
797 LOCK_RTC(s);
798 if (rtclock.timer_expire == NULL)
799 rtclock.timer_expire = func;
800 UNLOCK_RTC(s);
801 }
802
803 /*
804 * Reset the clock device. This causes the realtime clock
805 * device to reload its mode and count value (frequency).
806 */
807 void
808 rtclock_reset(void)
809 {
810 return;
811 }
812
813 /*
814 * Real-time clock device interrupt.
815 */
816 void
817 rtclock_intr(
818 int device,
819 struct ppc_saved_state *ssp,
820 spl_t old_spl)
821 {
822 AbsoluteTime abstime;
823 int decr[3], mycpu = cpu_number();
824 struct rtclock_timer *mytimer = &rtclock.timer[mycpu];
825
826 /*
827 * We may receive interrupts too early, we must reject them.
828 */
829 if (rtclock_initialized == FALSE) {
830 mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
831 return;
832 }
833
834 decr[1] = decr[2] = DECREMENTER_MAX;
835
836 clock_get_uptime(&abstime);
837 rtclock.last_abstime[mycpu] = abstime;
838 if (CMP_ABSOLUTETIME(&rtclock_tick_deadline[mycpu], &abstime) <= 0) {
839 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
840 &rtclock_tick_deadline[mycpu]);
841 hertz_tick(USER_MODE(ssp->srr1), ssp->srr0);
842 }
843
844 clock_get_uptime(&abstime);
845 rtclock.last_abstime[mycpu] = abstime;
846 if (mytimer->is_set &&
847 CMP_ABSOLUTETIME(&mytimer->deadline, &abstime) <= 0) {
848 mytimer->is_set = FALSE;
849 (*rtclock.timer_expire)(abstime);
850 }
851
852 clock_get_uptime(&abstime);
853 rtclock.last_abstime[mycpu] = abstime;
854 decr[1] = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
855
856 if (mytimer->is_set)
857 decr[2] = deadline_to_decrementer(mytimer->deadline, abstime);
858
859 if (decr[1] > decr[2])
860 decr[1] = decr[2];
861
862 if ( rtclock_decrementer_min != 0 &&
863 rtclock_decrementer_min < (natural_t)decr[1] )
864 decr[1] = rtclock_decrementer_min;
865
866 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
867 | DBG_FUNC_NONE, decr[1], 3, 0, 0, 0);
868
869 mtdec(decr[1]);
870 rtclock.last_decr[mycpu] = decr[1];
871 }
872
873 static void
874 rtclock_alarm_timer(
875 timer_call_param_t p0,
876 timer_call_param_t p1)
877 {
878 mach_timespec_t timestamp;
879
880 (void) sysclk_gettime(&timestamp);
881
882 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
883 }
884
885 void
886 clock_get_uptime(
887 AbsoluteTime *result)
888 {
889 natural_t hi, lo, hic;
890
891 do {
892 asm volatile(" mftbu %0" : "=r" (hi));
893 asm volatile(" mftb %0" : "=r" (lo));
894 asm volatile(" mftbu %0" : "=r" (hic));
895 } while (hic != hi);
896
897 result->lo = lo;
898 result->hi = hi;
899 }
900
901 static int
902 deadline_to_decrementer(
903 AbsoluteTime deadline,
904 AbsoluteTime now)
905 {
906 uint64_t delt;
907
908 if (CMP_ABSOLUTETIME(&deadline, &now) <= 0)
909 return DECREMENTER_MIN;
910 else {
911 delt = AbsoluteTime_to_scalar(&deadline) -
912 AbsoluteTime_to_scalar(&now);
913 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
914 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
915 }
916 }
917
918 static void
919 timespec_to_absolutetime(
920 mach_timespec_t timespec,
921 AbsoluteTime *result)
922 {
923 AbsoluteTime t64;
924 natural_t t32;
925 natural_t numer, denom;
926 spl_t s;
927
928 LOCK_RTC(s);
929 numer = rtclock.timebase_const.numer;
930 denom = rtclock.timebase_const.denom;
931 UNLOCK_RTC(s);
932
933 asm volatile(" mullw %0,%1,%2" :
934 "=r" (t64.lo) :
935 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
936
937 asm volatile(" mulhwu %0,%1,%2" :
938 "=r" (t64.hi) :
939 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
940
941 AbsoluteTime_to_scalar(&t64) += timespec.tv_nsec;
942
943 umul_64by32(t64, denom, &t64, &t32);
944
945 udiv_96by32(t64, t32, numer, &t64, &t32);
946
947 result->hi = t64.lo;
948 result->lo = t32;
949 }
950
951 void
952 clock_interval_to_deadline(
953 natural_t interval,
954 natural_t scale_factor,
955 AbsoluteTime *result)
956 {
957 AbsoluteTime abstime;
958
959 clock_get_uptime(result);
960
961 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
962
963 ADD_ABSOLUTETIME(result, &abstime);
964 }
965
966 void
967 clock_interval_to_absolutetime_interval(
968 natural_t interval,
969 natural_t scale_factor,
970 AbsoluteTime *result)
971 {
972 AbsoluteTime t64;
973 natural_t t32;
974 natural_t numer, denom;
975 spl_t s;
976
977 LOCK_RTC(s);
978 numer = rtclock.timebase_const.numer;
979 denom = rtclock.timebase_const.denom;
980 UNLOCK_RTC(s);
981
982 asm volatile(" mullw %0,%1,%2" :
983 "=r" (t64.lo) :
984 "r" (interval), "r" (scale_factor));
985 asm volatile(" mulhwu %0,%1,%2" :
986 "=r" (t64.hi) :
987 "r" (interval), "r" (scale_factor));
988
989 umul_64by32(t64, denom, &t64, &t32);
990
991 udiv_96by32(t64, t32, numer, &t64, &t32);
992
993 result->hi = t64.lo;
994 result->lo = t32;
995 }
996
997 void
998 clock_absolutetime_interval_to_deadline(
999 AbsoluteTime abstime,
1000 AbsoluteTime *result)
1001 {
1002 clock_get_uptime(result);
1003
1004 ADD_ABSOLUTETIME(result, &abstime);
1005 }
1006
1007 void
1008 absolutetime_to_nanoseconds(
1009 AbsoluteTime abstime,
1010 UInt64 *result)
1011 {
1012 AbsoluteTime t64;
1013 natural_t t32;
1014 natural_t numer, denom;
1015 spl_t s;
1016
1017 LOCK_RTC(s);
1018 numer = rtclock.timebase_const.numer;
1019 denom = rtclock.timebase_const.denom;
1020 UNLOCK_RTC(s);
1021
1022 umul_64by32(abstime, numer, &t64, &t32);
1023
1024 udiv_96by32to64(t64, t32, denom, (void *)result);
1025 }
1026
1027 void
1028 nanoseconds_to_absolutetime(
1029 UInt64 nanoseconds,
1030 AbsoluteTime *result)
1031 {
1032 AbsoluteTime t64;
1033 natural_t t32;
1034 natural_t numer, denom;
1035 spl_t s;
1036
1037 LOCK_RTC(s);
1038 numer = rtclock.timebase_const.numer;
1039 denom = rtclock.timebase_const.denom;
1040 UNLOCK_RTC(s);
1041
1042 AbsoluteTime_to_scalar(&t64) = nanoseconds;
1043
1044 umul_64by32(t64, denom, &t64, &t32);
1045
1046 udiv_96by32to64(t64, t32, numer, result);
1047 }
1048
1049 /*
1050 * Spin-loop delay primitives.
1051 */
1052 void
1053 delay_for_interval(
1054 natural_t interval,
1055 natural_t scale_factor)
1056 {
1057 AbsoluteTime now, end;
1058
1059 clock_interval_to_deadline(interval, scale_factor, &end);
1060
1061 do {
1062 clock_get_uptime(&now);
1063 } while (CMP_ABSOLUTETIME(&now, &end) < 0);
1064 }
1065
1066 void
1067 clock_delay_until(
1068 AbsoluteTime deadline)
1069 {
1070 AbsoluteTime now;
1071
1072 do {
1073 clock_get_uptime(&now);
1074 } while (CMP_ABSOLUTETIME(&now, &deadline) < 0);
1075 }
1076
1077 void
1078 delay(
1079 int usec)
1080 {
1081 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1082 }