]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/rtclock.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * @APPLE_FREE_COPYRIGHT@
30 */
31 /*
32 * File: rtclock.c
33 * Purpose: Routines for handling the machine dependent
34 * real-time clock.
35 */
36
37 #include <libkern/OSTypes.h>
38
39 #include <mach/mach_types.h>
40
41 #include <kern/clock.h>
42 #include <kern/thread.h>
43 #include <kern/macro_help.h>
44 #include <kern/spl.h>
45
46 #include <machine/mach_param.h> /* HZ */
47 #include <machine/commpage.h>
48 #include <ppc/proc_reg.h>
49
50 #include <pexpert/pexpert.h>
51
52 #include <sys/kdebug.h>
53
54 int sysclk_config(void);
55
56 int sysclk_init(void);
57
58 kern_return_t sysclk_gettime(
59 mach_timespec_t *cur_time);
60
61 kern_return_t sysclk_getattr(
62 clock_flavor_t flavor,
63 clock_attr_t attr,
64 mach_msg_type_number_t *count);
65
66 void sysclk_setalarm(
67 mach_timespec_t *deadline);
68
69 struct clock_ops sysclk_ops = {
70 sysclk_config, sysclk_init,
71 sysclk_gettime, 0,
72 sysclk_getattr, 0,
73 sysclk_setalarm,
74 };
75
76 int calend_config(void);
77
78 int calend_init(void);
79
80 kern_return_t calend_gettime(
81 mach_timespec_t *cur_time);
82
83 kern_return_t calend_settime(
84 mach_timespec_t *cur_time);
85
86 kern_return_t calend_getattr(
87 clock_flavor_t flavor,
88 clock_attr_t attr,
89 mach_msg_type_number_t *count);
90
91 struct clock_ops calend_ops = {
92 calend_config, calend_init,
93 calend_gettime, calend_settime,
94 calend_getattr, 0,
95 0,
96 };
97
98 /* local data declarations */
99
100 static struct rtclock {
101 mach_timespec_t calend_offset;
102 boolean_t calend_is_set;
103
104 mach_timebase_info_data_t timebase_const;
105
106 struct rtclock_timer {
107 uint64_t deadline;
108 boolean_t is_set;
109 } timer[NCPUS];
110
111 clock_timer_func_t timer_expire;
112
113 timer_call_data_t alarm_timer;
114
115 /* debugging */
116 uint64_t last_abstime[NCPUS];
117 int last_decr[NCPUS];
118
119 decl_simple_lock_data(,lock) /* real-time clock device lock */
120 } rtclock;
121
122 static boolean_t rtclock_initialized;
123
124 static uint64_t rtclock_tick_deadline[NCPUS];
125 static uint64_t rtclock_tick_interval;
126
127 static uint32_t rtclock_sec_divisor;
128 static uint32_t rtclock_ns_per_tick;
129
130 static void timespec_to_absolutetime(
131 mach_timespec_t timespec,
132 uint64_t *result);
133
134 static int deadline_to_decrementer(
135 uint64_t deadline,
136 uint64_t now);
137
138 static void rtclock_alarm_timer(
139 timer_call_param_t p0,
140 timer_call_param_t p1);
141
142 /* global data declarations */
143
144 #define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
145
146 #define DECREMENTER_MAX 0x7FFFFFFFUL
147 #define DECREMENTER_MIN 0xAUL
148
149 natural_t rtclock_decrementer_min;
150
151 /*
152 * Macros to lock/unlock real-time clock device.
153 */
154 #define LOCK_RTC(s) \
155 MACRO_BEGIN \
156 (s) = splclock(); \
157 simple_lock(&rtclock.lock); \
158 MACRO_END
159
160 #define UNLOCK_RTC(s) \
161 MACRO_BEGIN \
162 simple_unlock(&rtclock.lock); \
163 splx(s); \
164 MACRO_END
165
166 static void
167 timebase_callback(
168 struct timebase_freq_t *freq)
169 {
170 natural_t numer, denom;
171 int n;
172 spl_t s;
173
174 denom = freq->timebase_num;
175 n = 9;
176 while (!(denom % 10)) {
177 if (n < 1)
178 break;
179 denom /= 10;
180 n--;
181 }
182
183 numer = freq->timebase_den;
184 while (n-- > 0) {
185 numer *= 10;
186 }
187
188 LOCK_RTC(s);
189 rtclock.timebase_const.numer = numer;
190 rtclock.timebase_const.denom = denom;
191 rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
192 rtclock_ns_per_tick = NSEC_PER_SEC / rtclock_sec_divisor;
193 commpage_set_timestamp(0,0,0,0);
194 UNLOCK_RTC(s);
195 }
196
197 /*
198 * Configure the real-time clock device.
199 */
200 int
201 sysclk_config(void)
202 {
203 if (cpu_number() != master_cpu)
204 return(1);
205
206 timer_call_setup(&rtclock.alarm_timer, rtclock_alarm_timer, NULL);
207
208 simple_lock_init(&rtclock.lock, ETAP_MISC_RT_CLOCK);
209
210 PE_register_timebase_callback(timebase_callback);
211
212 return (1);
213 }
214
215 /*
216 * Initialize the system clock device.
217 */
218 int
219 sysclk_init(void)
220 {
221 uint64_t abstime;
222 int decr, mycpu = cpu_number();
223
224 if (mycpu != master_cpu) {
225 if (rtclock_initialized == FALSE) {
226 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
227 }
228 /* Set decrementer and hence our next tick due */
229 clock_get_uptime(&abstime);
230 rtclock_tick_deadline[mycpu] = abstime;
231 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
232 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
233 mtdec(decr);
234 rtclock.last_decr[mycpu] = decr;
235
236 return(1);
237 }
238
239 /*
240 * Initialize non-zero clock structure values.
241 */
242 clock_interval_to_absolutetime_interval(RTC_TICKPERIOD, 1,
243 &rtclock_tick_interval);
244 /* Set decrementer and our next tick due */
245 clock_get_uptime(&abstime);
246 rtclock_tick_deadline[mycpu] = abstime;
247 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
248 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
249 mtdec(decr);
250 rtclock.last_decr[mycpu] = decr;
251
252 rtclock_initialized = TRUE;
253
254 return (1);
255 }
256
257 #define UnsignedWide_to_scalar(x) (*(uint64_t *)(x))
258 #define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x))
259
260 /*
261 * Perform a full 64 bit by 32 bit unsigned multiply,
262 * yielding a 96 bit product. The most significant
263 * portion of the product is returned as a 64 bit
264 * quantity, with the lower portion as a 32 bit word.
265 */
266 static void
267 umul_64by32(
268 UnsignedWide now64,
269 uint32_t mult32,
270 UnsignedWide *result64,
271 uint32_t *result32)
272 {
273 uint32_t mid, mid2;
274
275 asm volatile(" mullw %0,%1,%2" :
276 "=r" (*result32) :
277 "r" (now64.lo), "r" (mult32));
278
279 asm volatile(" mullw %0,%1,%2" :
280 "=r" (mid2) :
281 "r" (now64.hi), "r" (mult32));
282 asm volatile(" mulhwu %0,%1,%2" :
283 "=r" (mid) :
284 "r" (now64.lo), "r" (mult32));
285
286 asm volatile(" mulhwu %0,%1,%2" :
287 "=r" (result64->hi) :
288 "r" (now64.hi), "r" (mult32));
289
290 asm volatile(" addc %0,%2,%3;
291 addze %1,%4" :
292 "=r" (result64->lo), "=r" (result64->hi) :
293 "r" (mid), "r" (mid2), "1" (result64->hi));
294 }
295
296 /*
297 * Perform a partial 64 bit by 32 bit unsigned multiply,
298 * yielding a 64 bit product. Only the least significant
299 * 64 bits of the product are calculated and returned.
300 */
301 static void
302 umul_64by32to64(
303 UnsignedWide now64,
304 uint32_t mult32,
305 UnsignedWide *result64)
306 {
307 uint32_t mid, mid2;
308
309 asm volatile(" mullw %0,%1,%2" :
310 "=r" (result64->lo) :
311 "r" (now64.lo), "r" (mult32));
312
313 asm volatile(" mullw %0,%1,%2" :
314 "=r" (mid2) :
315 "r" (now64.hi), "r" (mult32));
316 asm volatile(" mulhwu %0,%1,%2" :
317 "=r" (mid) :
318 "r" (now64.lo), "r" (mult32));
319
320 asm volatile(" add %0,%1,%2" :
321 "=r" (result64->hi) :
322 "r" (mid), "r" (mid2));
323 }
324
325 /*
326 * Perform an unsigned division of a 96 bit value
327 * by a 32 bit value, yielding a 96 bit quotient.
328 * The most significant portion of the product is
329 * returned as a 64 bit quantity, with the lower
330 * portion as a 32 bit word.
331 */
332 static void
333 udiv_96by32(
334 UnsignedWide now64,
335 uint32_t now32,
336 uint32_t div32,
337 UnsignedWide *result64,
338 uint32_t *result32)
339 {
340 UnsignedWide t64;
341
342 if (now64.hi > 0 || now64.lo >= div32) {
343 UnsignedWide_to_scalar(result64) =
344 UnsignedWide_to_scalar(&now64) / div32;
345
346 umul_64by32to64(*result64, div32, &t64);
347
348 UnsignedWide_to_scalar(&t64) =
349 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
350
351 *result32 = (((uint64_t)t64.lo << 32) | now32) / div32;
352 }
353 else {
354 UnsignedWide_to_scalar(result64) =
355 (((uint64_t)now64.lo << 32) | now32) / div32;
356
357 *result32 = result64->lo;
358 result64->lo = result64->hi;
359 result64->hi = 0;
360 }
361 }
362
363 /*
364 * Perform an unsigned division of a 96 bit value
365 * by a 32 bit value, yielding a 64 bit quotient.
366 * Any higher order bits of the quotient are simply
367 * discarded.
368 */
369 static void
370 udiv_96by32to64(
371 UnsignedWide now64,
372 uint32_t now32,
373 uint32_t div32,
374 UnsignedWide *result64)
375 {
376 UnsignedWide t64;
377
378 if (now64.hi > 0 || now64.lo >= div32) {
379 UnsignedWide_to_scalar(result64) =
380 UnsignedWide_to_scalar(&now64) / div32;
381
382 umul_64by32to64(*result64, div32, &t64);
383
384 UnsignedWide_to_scalar(&t64) =
385 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
386
387 result64->hi = result64->lo;
388 result64->lo = (((uint64_t)t64.lo << 32) | now32) / div32;
389 }
390 else {
391 UnsignedWide_to_scalar(result64) =
392 (((uint64_t)now64.lo << 32) | now32) / div32;
393 }
394 }
395
396 /*
397 * Perform an unsigned division of a 96 bit value
398 * by a 32 bit value, yielding a 32 bit quotient,
399 * and a 32 bit remainder. Any higher order bits
400 * of the quotient are simply discarded.
401 */
402 static void
403 udiv_96by32to32and32(
404 UnsignedWide now64,
405 uint32_t now32,
406 uint32_t div32,
407 uint32_t *result32,
408 uint32_t *remain32)
409 {
410 UnsignedWide t64, u64;
411
412 if (now64.hi > 0 || now64.lo >= div32) {
413 UnsignedWide_to_scalar(&t64) =
414 UnsignedWide_to_scalar(&now64) / div32;
415
416 umul_64by32to64(t64, div32, &t64);
417
418 UnsignedWide_to_scalar(&t64) =
419 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
420
421 UnsignedWide_to_scalar(&t64) = ((uint64_t)t64.lo << 32) | now32;
422
423 UnsignedWide_to_scalar(&u64) =
424 UnsignedWide_to_scalar(&t64) / div32;
425
426 *result32 = u64.lo;
427
428 umul_64by32to64(u64, div32, &u64);
429
430 *remain32 = UnsignedWide_to_scalar(&t64) -
431 UnsignedWide_to_scalar(&u64);
432 }
433 else {
434 UnsignedWide_to_scalar(&t64) = ((uint64_t)now64.lo << 32) | now32;
435
436 UnsignedWide_to_scalar(&u64) =
437 UnsignedWide_to_scalar(&t64) / div32;
438
439 *result32 = u64.lo;
440
441 umul_64by32to64(u64, div32, &u64);
442
443 *remain32 = UnsignedWide_to_scalar(&t64) -
444 UnsignedWide_to_scalar(&u64);
445 }
446 }
447
448 /*
449 * Get the clock device time. This routine is responsible
450 * for converting the device's machine dependent time value
451 * into a canonical mach_timespec_t value.
452 *
453 * SMP configurations - *the processor clocks are synchronised*
454 */
455 kern_return_t
456 sysclk_gettime_internal(
457 mach_timespec_t *time) /* OUT */
458 {
459 UnsignedWide now;
460 UnsignedWide t64;
461 uint32_t t32;
462 uint32_t numer, denom;
463
464 numer = rtclock.timebase_const.numer;
465 denom = rtclock.timebase_const.denom;
466
467 clock_get_uptime((uint64_t *)&now);
468
469 umul_64by32(now, numer, &t64, &t32);
470
471 udiv_96by32(t64, t32, denom, &t64, &t32);
472
473 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
474 &time->tv_sec, &time->tv_nsec);
475
476 return (KERN_SUCCESS);
477 }
478
479 kern_return_t
480 sysclk_gettime(
481 mach_timespec_t *time) /* OUT */
482 {
483 UnsignedWide now;
484 UnsignedWide t64;
485 uint32_t t32;
486 uint32_t numer, denom;
487 spl_t s;
488
489 LOCK_RTC(s);
490 numer = rtclock.timebase_const.numer;
491 denom = rtclock.timebase_const.denom;
492 UNLOCK_RTC(s);
493
494 clock_get_uptime((uint64_t *)&now);
495
496 umul_64by32(now, numer, &t64, &t32);
497
498 udiv_96by32(t64, t32, denom, &t64, &t32);
499
500 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
501 &time->tv_sec, &time->tv_nsec);
502
503 return (KERN_SUCCESS);
504 }
505
506 /*
507 * Get clock device attributes.
508 */
509 kern_return_t
510 sysclk_getattr(
511 clock_flavor_t flavor,
512 clock_attr_t attr, /* OUT */
513 mach_msg_type_number_t *count) /* IN/OUT */
514 {
515 spl_t s;
516
517 if (*count != 1)
518 return (KERN_FAILURE);
519 switch (flavor) {
520
521 case CLOCK_GET_TIME_RES: /* >0 res */
522 case CLOCK_ALARM_CURRES: /* =0 no alarm */
523 case CLOCK_ALARM_MINRES:
524 case CLOCK_ALARM_MAXRES:
525 LOCK_RTC(s);
526 *(clock_res_t *) attr = RTC_TICKPERIOD;
527 UNLOCK_RTC(s);
528 break;
529
530 default:
531 return (KERN_INVALID_VALUE);
532 }
533 return (KERN_SUCCESS);
534 }
535
536 /*
537 * Set deadline for the next alarm on the clock device. This call
538 * always resets the time to deliver an alarm for the clock.
539 */
540 void
541 sysclk_setalarm(
542 mach_timespec_t *deadline)
543 {
544 uint64_t abstime;
545
546 timespec_to_absolutetime(*deadline, &abstime);
547 timer_call_enter(&rtclock.alarm_timer, abstime);
548 }
549
550 /*
551 * Configure the calendar clock.
552 */
553 int
554 calend_config(void)
555 {
556 return (1);
557 }
558
559 /*
560 * Initialize the calendar clock.
561 */
562 int
563 calend_init(void)
564 {
565 if (cpu_number() != master_cpu)
566 return(1);
567
568 return (1);
569 }
570
571 /*
572 * Get the current clock microtime and sync the timestamp
573 * on the commpage. Only called from ppc_gettimeofday(),
574 * ie in response to a system call from user mode.
575 */
576 void
577 clock_gettimeofday(
578 uint32_t *secp,
579 uint32_t *usecp)
580 {
581 uint64_t now;
582 UnsignedWide wide_now;
583 UnsignedWide t64;
584 uint32_t t32;
585 uint32_t numer, denom;
586 uint32_t secs,usecs;
587 mach_timespec_t curr_time;
588 spl_t s;
589
590 LOCK_RTC(s);
591 if (!rtclock.calend_is_set) {
592 UNLOCK_RTC(s);
593 return;
594 }
595
596 numer = rtclock.timebase_const.numer;
597 denom = rtclock.timebase_const.denom;
598
599 clock_get_uptime(&now);
600 wide_now = *((UnsignedWide*) &now);
601
602 umul_64by32(wide_now, numer, &t64, &t32);
603
604 udiv_96by32(t64, t32, denom, &t64, &t32);
605
606 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
607 &curr_time.tv_sec, &curr_time.tv_nsec);
608
609 ADD_MACH_TIMESPEC(&curr_time, &rtclock.calend_offset);
610
611 secs = curr_time.tv_sec;
612 usecs = curr_time.tv_nsec / NSEC_PER_USEC;
613 *secp = secs;
614 *usecp = usecs;
615
616 t32 = curr_time.tv_nsec - (usecs * NSEC_PER_USEC);
617 t32 = t32 / rtclock_ns_per_tick;
618 now -= t32;
619
620 commpage_set_timestamp(now,secs,usecs,rtclock_sec_divisor);
621
622 UNLOCK_RTC(s);
623
624 return;
625 }
626
627 /*
628 * Get the current clock time.
629 */
630 kern_return_t
631 calend_gettime(
632 mach_timespec_t *curr_time) /* OUT */
633 {
634 spl_t s;
635
636 LOCK_RTC(s);
637 if (!rtclock.calend_is_set) {
638 UNLOCK_RTC(s);
639 return (KERN_FAILURE);
640 }
641
642 (void) sysclk_gettime_internal(curr_time);
643 ADD_MACH_TIMESPEC(curr_time, &rtclock.calend_offset);
644 UNLOCK_RTC(s);
645
646 return (KERN_SUCCESS);
647 }
648
649 /*
650 * Set the current clock time.
651 */
652 kern_return_t
653 calend_settime(
654 mach_timespec_t *new_time)
655 {
656 mach_timespec_t curr_time;
657 spl_t s;
658
659 LOCK_RTC(s);
660 (void) sysclk_gettime_internal(&curr_time);
661 rtclock.calend_offset = *new_time;
662 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
663 rtclock.calend_is_set = TRUE;
664 commpage_set_timestamp(0,0,0,0); /* disable timestamp */
665 UNLOCK_RTC(s);
666
667 PESetGMTTimeOfDay(new_time->tv_sec);
668
669 return (KERN_SUCCESS);
670 }
671
672 /*
673 * Get clock device attributes.
674 */
675 kern_return_t
676 calend_getattr(
677 clock_flavor_t flavor,
678 clock_attr_t attr, /* OUT */
679 mach_msg_type_number_t *count) /* IN/OUT */
680 {
681 spl_t s;
682
683 if (*count != 1)
684 return (KERN_FAILURE);
685 switch (flavor) {
686
687 case CLOCK_GET_TIME_RES: /* >0 res */
688 LOCK_RTC(s);
689 *(clock_res_t *) attr = RTC_TICKPERIOD;
690 UNLOCK_RTC(s);
691 break;
692
693 case CLOCK_ALARM_CURRES: /* =0 no alarm */
694 case CLOCK_ALARM_MINRES:
695 case CLOCK_ALARM_MAXRES:
696 *(clock_res_t *) attr = 0;
697 break;
698
699 default:
700 return (KERN_INVALID_VALUE);
701 }
702 return (KERN_SUCCESS);
703 }
704
705 void
706 clock_adjust_calendar(
707 clock_res_t nsec)
708 {
709 spl_t s;
710
711 LOCK_RTC(s);
712 if (rtclock.calend_is_set) {
713 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec);
714 commpage_set_timestamp(0,0,0,0); /* disable timestamp */
715 }
716 UNLOCK_RTC(s);
717 }
718
719 void
720 clock_initialize_calendar(void)
721 {
722 mach_timespec_t curr_time;
723 long seconds = PEGetGMTTimeOfDay();
724 spl_t s;
725
726 LOCK_RTC(s);
727 (void) sysclk_gettime_internal(&curr_time);
728 if (curr_time.tv_nsec < 500*USEC_PER_SEC)
729 rtclock.calend_offset.tv_sec = seconds;
730 else
731 rtclock.calend_offset.tv_sec = seconds + 1;
732 rtclock.calend_offset.tv_nsec = 0;
733 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
734 rtclock.calend_is_set = TRUE;
735 commpage_set_timestamp(0,0,0,0); /* disable timestamp */
736 UNLOCK_RTC(s);
737 }
738
739 mach_timespec_t
740 clock_get_calendar_offset(void)
741 {
742 mach_timespec_t result = MACH_TIMESPEC_ZERO;
743 spl_t s;
744
745 LOCK_RTC(s);
746 if (rtclock.calend_is_set)
747 result = rtclock.calend_offset;
748 UNLOCK_RTC(s);
749
750 return (result);
751 }
752
753 void
754 clock_timebase_info(
755 mach_timebase_info_t info)
756 {
757 spl_t s;
758
759 LOCK_RTC(s);
760 *info = rtclock.timebase_const;
761 UNLOCK_RTC(s);
762 }
763
764 void
765 clock_set_timer_deadline(
766 uint64_t deadline)
767 {
768 uint64_t abstime;
769 int decr, mycpu;
770 struct rtclock_timer *mytimer;
771 spl_t s;
772
773 s = splclock();
774 mycpu = cpu_number();
775 mytimer = &rtclock.timer[mycpu];
776 clock_get_uptime(&abstime);
777 rtclock.last_abstime[mycpu] = abstime;
778 mytimer->deadline = deadline;
779 mytimer->is_set = TRUE;
780 if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
781 decr = deadline_to_decrementer(mytimer->deadline, abstime);
782 if ( rtclock_decrementer_min != 0 &&
783 rtclock_decrementer_min < (natural_t)decr )
784 decr = rtclock_decrementer_min;
785
786 mtdec(decr);
787 rtclock.last_decr[mycpu] = decr;
788
789 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
790 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
791 }
792 splx(s);
793 }
794
795 void
796 clock_set_timer_func(
797 clock_timer_func_t func)
798 {
799 spl_t s;
800
801 LOCK_RTC(s);
802 if (rtclock.timer_expire == NULL)
803 rtclock.timer_expire = func;
804 UNLOCK_RTC(s);
805 }
806
807 /*
808 * Reset the clock device. This causes the realtime clock
809 * device to reload its mode and count value (frequency).
810 */
811 void
812 rtclock_reset(void)
813 {
814 return;
815 }
816
817 /*
818 * Real-time clock device interrupt.
819 */
820 void
821 rtclock_intr(
822 int device,
823 struct savearea *ssp,
824 spl_t old_spl)
825 {
826 uint64_t abstime;
827 int decr[3], mycpu = cpu_number();
828 struct rtclock_timer *mytimer = &rtclock.timer[mycpu];
829
830 /*
831 * We may receive interrupts too early, we must reject them.
832 */
833 if (rtclock_initialized == FALSE) {
834 mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
835 return;
836 }
837
838 decr[1] = decr[2] = DECREMENTER_MAX;
839
840 clock_get_uptime(&abstime);
841 rtclock.last_abstime[mycpu] = abstime;
842 if ( rtclock_tick_deadline[mycpu] <= abstime ) {
843 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
844 &rtclock_tick_deadline[mycpu]);
845 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
846 }
847
848 clock_get_uptime(&abstime);
849 rtclock.last_abstime[mycpu] = abstime;
850 if ( mytimer->is_set &&
851 mytimer->deadline <= abstime ) {
852 mytimer->is_set = FALSE;
853 (*rtclock.timer_expire)(abstime);
854 }
855
856 clock_get_uptime(&abstime);
857 rtclock.last_abstime[mycpu] = abstime;
858 decr[1] = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
859
860 if (mytimer->is_set)
861 decr[2] = deadline_to_decrementer(mytimer->deadline, abstime);
862
863 if (decr[1] > decr[2])
864 decr[1] = decr[2];
865
866 if ( rtclock_decrementer_min != 0 &&
867 rtclock_decrementer_min < (natural_t)decr[1] )
868 decr[1] = rtclock_decrementer_min;
869
870 mtdec(decr[1]);
871 rtclock.last_decr[mycpu] = decr[1];
872
873 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
874 | DBG_FUNC_NONE, decr[1], 3, 0, 0, 0);
875 }
876
877 static void
878 rtclock_alarm_timer(
879 timer_call_param_t p0,
880 timer_call_param_t p1)
881 {
882 mach_timespec_t timestamp;
883
884 (void) sysclk_gettime(&timestamp);
885
886 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
887 }
888
889 void
890 clock_get_uptime(
891 uint64_t *result0)
892 {
893 UnsignedWide *result = (UnsignedWide *)result0;
894 uint32_t hi, lo, hic;
895
896 do {
897 asm volatile(" mftbu %0" : "=r" (hi));
898 asm volatile(" mftb %0" : "=r" (lo));
899 asm volatile(" mftbu %0" : "=r" (hic));
900 } while (hic != hi);
901
902 result->lo = lo;
903 result->hi = hi;
904 }
905
906 static int
907 deadline_to_decrementer(
908 uint64_t deadline,
909 uint64_t now)
910 {
911 uint64_t delt;
912
913 if (deadline <= now)
914 return DECREMENTER_MIN;
915 else {
916 delt = deadline - now;
917 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
918 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
919 }
920 }
921
922 static void
923 timespec_to_absolutetime(
924 mach_timespec_t timespec,
925 uint64_t *result0)
926 {
927 UnsignedWide *result = (UnsignedWide *)result0;
928 UnsignedWide t64;
929 uint32_t t32;
930 uint32_t numer, denom;
931 spl_t s;
932
933 LOCK_RTC(s);
934 numer = rtclock.timebase_const.numer;
935 denom = rtclock.timebase_const.denom;
936 UNLOCK_RTC(s);
937
938 asm volatile(" mullw %0,%1,%2" :
939 "=r" (t64.lo) :
940 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
941
942 asm volatile(" mulhwu %0,%1,%2" :
943 "=r" (t64.hi) :
944 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
945
946 UnsignedWide_to_scalar(&t64) += timespec.tv_nsec;
947
948 umul_64by32(t64, denom, &t64, &t32);
949
950 udiv_96by32(t64, t32, numer, &t64, &t32);
951
952 result->hi = t64.lo;
953 result->lo = t32;
954 }
955
956 void
957 clock_interval_to_deadline(
958 uint32_t interval,
959 uint32_t scale_factor,
960 uint64_t *result)
961 {
962 uint64_t abstime;
963
964 clock_get_uptime(result);
965
966 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
967
968 *result += abstime;
969 }
970
971 void
972 clock_interval_to_absolutetime_interval(
973 uint32_t interval,
974 uint32_t scale_factor,
975 uint64_t *result0)
976 {
977 UnsignedWide *result = (UnsignedWide *)result0;
978 UnsignedWide t64;
979 uint32_t t32;
980 uint32_t numer, denom;
981 spl_t s;
982
983 LOCK_RTC(s);
984 numer = rtclock.timebase_const.numer;
985 denom = rtclock.timebase_const.denom;
986 UNLOCK_RTC(s);
987
988 asm volatile(" mullw %0,%1,%2" :
989 "=r" (t64.lo) :
990 "r" (interval), "r" (scale_factor));
991 asm volatile(" mulhwu %0,%1,%2" :
992 "=r" (t64.hi) :
993 "r" (interval), "r" (scale_factor));
994
995 umul_64by32(t64, denom, &t64, &t32);
996
997 udiv_96by32(t64, t32, numer, &t64, &t32);
998
999 result->hi = t64.lo;
1000 result->lo = t32;
1001 }
1002
1003 void
1004 clock_absolutetime_interval_to_deadline(
1005 uint64_t abstime,
1006 uint64_t *result)
1007 {
1008 clock_get_uptime(result);
1009
1010 *result += abstime;
1011 }
1012
1013 void
1014 absolutetime_to_nanoseconds(
1015 uint64_t abstime,
1016 uint64_t *result)
1017 {
1018 UnsignedWide t64;
1019 uint32_t t32;
1020 uint32_t numer, denom;
1021 spl_t s;
1022
1023 LOCK_RTC(s);
1024 numer = rtclock.timebase_const.numer;
1025 denom = rtclock.timebase_const.denom;
1026 UNLOCK_RTC(s);
1027
1028 UnsignedWide_to_scalar(&t64) = abstime;
1029
1030 umul_64by32(t64, numer, &t64, &t32);
1031
1032 udiv_96by32to64(t64, t32, denom, (void *)result);
1033 }
1034
1035 void
1036 nanoseconds_to_absolutetime(
1037 uint64_t nanoseconds,
1038 uint64_t *result)
1039 {
1040 UnsignedWide t64;
1041 uint32_t t32;
1042 uint32_t numer, denom;
1043 spl_t s;
1044
1045 LOCK_RTC(s);
1046 numer = rtclock.timebase_const.numer;
1047 denom = rtclock.timebase_const.denom;
1048 UNLOCK_RTC(s);
1049
1050 UnsignedWide_to_scalar(&t64) = nanoseconds;
1051
1052 umul_64by32(t64, denom, &t64, &t32);
1053
1054 udiv_96by32to64(t64, t32, numer, (void *)result);
1055 }
1056
1057 /*
1058 * Spin-loop delay primitives.
1059 */
1060 void
1061 delay_for_interval(
1062 uint32_t interval,
1063 uint32_t scale_factor)
1064 {
1065 uint64_t now, end;
1066
1067 clock_interval_to_deadline(interval, scale_factor, &end);
1068
1069 do {
1070 clock_get_uptime(&now);
1071 } while (now < end);
1072 }
1073
1074 void
1075 clock_delay_until(
1076 uint64_t deadline)
1077 {
1078 uint64_t now;
1079
1080 do {
1081 clock_get_uptime(&now);
1082 } while (now < deadline);
1083 }
1084
1085 void
1086 delay(
1087 int usec)
1088 {
1089 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1090 }