]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/rtclock.c
a7c215da6d384243debd045ec9ea30b35b7f9753
[apple/xnu.git] / osfmk / ppc / rtclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28 /*
29 * File: rtclock.c
30 * Purpose: Routines for handling the machine dependent
31 * real-time clock.
32 */
33
34 #include <libkern/OSTypes.h>
35
36 #include <mach/mach_types.h>
37
38 #include <kern/clock.h>
39 #include <kern/thread.h>
40 #include <kern/macro_help.h>
41 #include <kern/spl.h>
42
43 #include <machine/mach_param.h> /* HZ */
44 #include <ppc/proc_reg.h>
45
46 #include <pexpert/pexpert.h>
47
48 #include <sys/kdebug.h>
49
50 int sysclk_config(void);
51
52 int sysclk_init(void);
53
54 kern_return_t sysclk_gettime(
55 mach_timespec_t *cur_time);
56
57 kern_return_t sysclk_getattr(
58 clock_flavor_t flavor,
59 clock_attr_t attr,
60 mach_msg_type_number_t *count);
61
62 void sysclk_setalarm(
63 mach_timespec_t *deadline);
64
65 struct clock_ops sysclk_ops = {
66 sysclk_config, sysclk_init,
67 sysclk_gettime, 0,
68 sysclk_getattr, 0,
69 sysclk_setalarm,
70 };
71
72 int calend_config(void);
73
74 int calend_init(void);
75
76 kern_return_t calend_gettime(
77 mach_timespec_t *cur_time);
78
79 kern_return_t calend_settime(
80 mach_timespec_t *cur_time);
81
82 kern_return_t calend_getattr(
83 clock_flavor_t flavor,
84 clock_attr_t attr,
85 mach_msg_type_number_t *count);
86
87 struct clock_ops calend_ops = {
88 calend_config, calend_init,
89 calend_gettime, calend_settime,
90 calend_getattr, 0,
91 0,
92 };
93
94 /* local data declarations */
95
96 static struct rtclock {
97 mach_timespec_t calend_offset;
98 boolean_t calend_is_set;
99
100 mach_timebase_info_data_t timebase_const;
101
102 struct rtclock_timer {
103 uint64_t deadline;
104 boolean_t is_set;
105 } timer[NCPUS];
106
107 clock_timer_func_t timer_expire;
108
109 timer_call_data_t alarm[NCPUS];
110
111 /* debugging */
112 uint64_t last_abstime[NCPUS];
113 int last_decr[NCPUS];
114
115 decl_simple_lock_data(,lock) /* real-time clock device lock */
116 } rtclock;
117
118 static boolean_t rtclock_initialized;
119
120 static uint64_t rtclock_tick_deadline[NCPUS];
121 static uint64_t rtclock_tick_interval;
122
123 static void timespec_to_absolutetime(
124 mach_timespec_t timespec,
125 uint64_t *result);
126
127 static int deadline_to_decrementer(
128 uint64_t deadline,
129 uint64_t now);
130
131 static void rtclock_alarm_timer(
132 timer_call_param_t p0,
133 timer_call_param_t p1);
134
135 /* global data declarations */
136
137 #define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
138
139 #define DECREMENTER_MAX 0x7FFFFFFFUL
140 #define DECREMENTER_MIN 0xAUL
141
142 natural_t rtclock_decrementer_min;
143
144 /*
145 * Macros to lock/unlock real-time clock device.
146 */
147 #define LOCK_RTC(s) \
148 MACRO_BEGIN \
149 (s) = splclock(); \
150 simple_lock(&rtclock.lock); \
151 MACRO_END
152
153 #define UNLOCK_RTC(s) \
154 MACRO_BEGIN \
155 simple_unlock(&rtclock.lock); \
156 splx(s); \
157 MACRO_END
158
159 static void
160 timebase_callback(
161 struct timebase_freq_t *freq)
162 {
163 natural_t numer, denom;
164 int n;
165 spl_t s;
166
167 denom = freq->timebase_num;
168 n = 9;
169 while (!(denom % 10)) {
170 if (n < 1)
171 break;
172 denom /= 10;
173 n--;
174 }
175
176 numer = freq->timebase_den;
177 while (n-- > 0) {
178 numer *= 10;
179 }
180
181 LOCK_RTC(s);
182 rtclock.timebase_const.numer = numer;
183 rtclock.timebase_const.denom = denom;
184 UNLOCK_RTC(s);
185 }
186
187 /*
188 * Configure the real-time clock device.
189 */
190 int
191 sysclk_config(void)
192 {
193 int i;
194
195 if (cpu_number() != master_cpu)
196 return(1);
197
198 for (i = 0; i < NCPUS; i++)
199 timer_call_setup(&rtclock.alarm[i], rtclock_alarm_timer, NULL);
200
201 simple_lock_init(&rtclock.lock, ETAP_MISC_RT_CLOCK);
202
203 PE_register_timebase_callback(timebase_callback);
204
205 return (1);
206 }
207
208 /*
209 * Initialize the system clock device.
210 */
211 int
212 sysclk_init(void)
213 {
214 uint64_t abstime;
215 int decr, mycpu = cpu_number();
216
217 if (mycpu != master_cpu) {
218 if (rtclock_initialized == FALSE) {
219 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
220 }
221 /* Set decrementer and hence our next tick due */
222 clock_get_uptime(&abstime);
223 rtclock_tick_deadline[mycpu] = abstime;
224 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
225 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
226 mtdec(decr);
227 rtclock.last_decr[mycpu] = decr;
228
229 return(1);
230 }
231
232 /*
233 * Initialize non-zero clock structure values.
234 */
235 clock_interval_to_absolutetime_interval(RTC_TICKPERIOD, 1,
236 &rtclock_tick_interval);
237 /* Set decrementer and our next tick due */
238 clock_get_uptime(&abstime);
239 rtclock_tick_deadline[mycpu] = abstime;
240 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
241 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
242 mtdec(decr);
243 rtclock.last_decr[mycpu] = decr;
244
245 rtclock_initialized = TRUE;
246
247 return (1);
248 }
249
250 #define UnsignedWide_to_scalar(x) (*(uint64_t *)(x))
251 #define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x))
252
253 /*
254 * Perform a full 64 bit by 32 bit unsigned multiply,
255 * yielding a 96 bit product. The most significant
256 * portion of the product is returned as a 64 bit
257 * quantity, with the lower portion as a 32 bit word.
258 */
259 static void
260 umul_64by32(
261 UnsignedWide now64,
262 uint32_t mult32,
263 UnsignedWide *result64,
264 uint32_t *result32)
265 {
266 uint32_t mid, mid2;
267
268 asm volatile(" mullw %0,%1,%2" :
269 "=r" (*result32) :
270 "r" (now64.lo), "r" (mult32));
271
272 asm volatile(" mullw %0,%1,%2" :
273 "=r" (mid2) :
274 "r" (now64.hi), "r" (mult32));
275 asm volatile(" mulhwu %0,%1,%2" :
276 "=r" (mid) :
277 "r" (now64.lo), "r" (mult32));
278
279 asm volatile(" mulhwu %0,%1,%2" :
280 "=r" (result64->hi) :
281 "r" (now64.hi), "r" (mult32));
282
283 asm volatile(" addc %0,%2,%3;
284 addze %1,%4" :
285 "=r" (result64->lo), "=r" (result64->hi) :
286 "r" (mid), "r" (mid2), "1" (result64->hi));
287 }
288
289 /*
290 * Perform a partial 64 bit by 32 bit unsigned multiply,
291 * yielding a 64 bit product. Only the least significant
292 * 64 bits of the product are calculated and returned.
293 */
294 static void
295 umul_64by32to64(
296 UnsignedWide now64,
297 uint32_t mult32,
298 UnsignedWide *result64)
299 {
300 uint32_t mid, mid2;
301
302 asm volatile(" mullw %0,%1,%2" :
303 "=r" (result64->lo) :
304 "r" (now64.lo), "r" (mult32));
305
306 asm volatile(" mullw %0,%1,%2" :
307 "=r" (mid2) :
308 "r" (now64.hi), "r" (mult32));
309 asm volatile(" mulhwu %0,%1,%2" :
310 "=r" (mid) :
311 "r" (now64.lo), "r" (mult32));
312
313 asm volatile(" add %0,%1,%2" :
314 "=r" (result64->hi) :
315 "r" (mid), "r" (mid2));
316 }
317
318 /*
319 * Perform an unsigned division of a 96 bit value
320 * by a 32 bit value, yielding a 96 bit quotient.
321 * The most significant portion of the product is
322 * returned as a 64 bit quantity, with the lower
323 * portion as a 32 bit word.
324 */
325 static void
326 udiv_96by32(
327 UnsignedWide now64,
328 uint32_t now32,
329 uint32_t div32,
330 UnsignedWide *result64,
331 uint32_t *result32)
332 {
333 UnsignedWide t64;
334
335 if (now64.hi > 0 || now64.lo >= div32) {
336 UnsignedWide_to_scalar(result64) =
337 UnsignedWide_to_scalar(&now64) / div32;
338
339 umul_64by32to64(*result64, div32, &t64);
340
341 UnsignedWide_to_scalar(&t64) =
342 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
343
344 *result32 = (((uint64_t)t64.lo << 32) | now32) / div32;
345 }
346 else {
347 UnsignedWide_to_scalar(result64) =
348 (((uint64_t)now64.lo << 32) | now32) / div32;
349
350 *result32 = result64->lo;
351 result64->lo = result64->hi;
352 result64->hi = 0;
353 }
354 }
355
356 /*
357 * Perform an unsigned division of a 96 bit value
358 * by a 32 bit value, yielding a 64 bit quotient.
359 * Any higher order bits of the quotient are simply
360 * discarded.
361 */
362 static void
363 udiv_96by32to64(
364 UnsignedWide now64,
365 uint32_t now32,
366 uint32_t div32,
367 UnsignedWide *result64)
368 {
369 UnsignedWide t64;
370
371 if (now64.hi > 0 || now64.lo >= div32) {
372 UnsignedWide_to_scalar(result64) =
373 UnsignedWide_to_scalar(&now64) / div32;
374
375 umul_64by32to64(*result64, div32, &t64);
376
377 UnsignedWide_to_scalar(&t64) =
378 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
379
380 result64->hi = result64->lo;
381 result64->lo = (((uint64_t)t64.lo << 32) | now32) / div32;
382 }
383 else {
384 UnsignedWide_to_scalar(result64) =
385 (((uint64_t)now64.lo << 32) | now32) / div32;
386 }
387 }
388
389 /*
390 * Perform an unsigned division of a 96 bit value
391 * by a 32 bit value, yielding a 32 bit quotient,
392 * and a 32 bit remainder. Any higher order bits
393 * of the quotient are simply discarded.
394 */
395 static void
396 udiv_96by32to32and32(
397 UnsignedWide now64,
398 uint32_t now32,
399 uint32_t div32,
400 uint32_t *result32,
401 uint32_t *remain32)
402 {
403 UnsignedWide t64, u64;
404
405 if (now64.hi > 0 || now64.lo >= div32) {
406 UnsignedWide_to_scalar(&t64) =
407 UnsignedWide_to_scalar(&now64) / div32;
408
409 umul_64by32to64(t64, div32, &t64);
410
411 UnsignedWide_to_scalar(&t64) =
412 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
413
414 UnsignedWide_to_scalar(&t64) = ((uint64_t)t64.lo << 32) | now32;
415
416 UnsignedWide_to_scalar(&u64) =
417 UnsignedWide_to_scalar(&t64) / div32;
418
419 *result32 = u64.lo;
420
421 umul_64by32to64(u64, div32, &u64);
422
423 *remain32 = UnsignedWide_to_scalar(&t64) -
424 UnsignedWide_to_scalar(&u64);
425 }
426 else {
427 UnsignedWide_to_scalar(&t64) = ((uint64_t)now64.lo << 32) | now32;
428
429 UnsignedWide_to_scalar(&u64) =
430 UnsignedWide_to_scalar(&t64) / div32;
431
432 *result32 = u64.lo;
433
434 umul_64by32to64(u64, div32, &u64);
435
436 *remain32 = UnsignedWide_to_scalar(&t64) -
437 UnsignedWide_to_scalar(&u64);
438 }
439 }
440
441 /*
442 * Get the clock device time. This routine is responsible
443 * for converting the device's machine dependent time value
444 * into a canonical mach_timespec_t value.
445 *
446 * SMP configurations - *the processor clocks are synchronised*
447 */
448 kern_return_t
449 sysclk_gettime_internal(
450 mach_timespec_t *time) /* OUT */
451 {
452 UnsignedWide now;
453 UnsignedWide t64;
454 uint32_t t32;
455 uint32_t numer, denom;
456
457 numer = rtclock.timebase_const.numer;
458 denom = rtclock.timebase_const.denom;
459
460 clock_get_uptime((uint64_t *)&now);
461
462 umul_64by32(now, numer, &t64, &t32);
463
464 udiv_96by32(t64, t32, denom, &t64, &t32);
465
466 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
467 &time->tv_sec, &time->tv_nsec);
468
469 return (KERN_SUCCESS);
470 }
471
472 kern_return_t
473 sysclk_gettime(
474 mach_timespec_t *time) /* OUT */
475 {
476 UnsignedWide now;
477 UnsignedWide t64;
478 uint32_t t32;
479 uint32_t numer, denom;
480 spl_t s;
481
482 LOCK_RTC(s);
483 numer = rtclock.timebase_const.numer;
484 denom = rtclock.timebase_const.denom;
485 UNLOCK_RTC(s);
486
487 clock_get_uptime((uint64_t *)&now);
488
489 umul_64by32(now, numer, &t64, &t32);
490
491 udiv_96by32(t64, t32, denom, &t64, &t32);
492
493 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
494 &time->tv_sec, &time->tv_nsec);
495
496 return (KERN_SUCCESS);
497 }
498
499 /*
500 * Get clock device attributes.
501 */
502 kern_return_t
503 sysclk_getattr(
504 clock_flavor_t flavor,
505 clock_attr_t attr, /* OUT */
506 mach_msg_type_number_t *count) /* IN/OUT */
507 {
508 spl_t s;
509
510 if (*count != 1)
511 return (KERN_FAILURE);
512 switch (flavor) {
513
514 case CLOCK_GET_TIME_RES: /* >0 res */
515 case CLOCK_ALARM_CURRES: /* =0 no alarm */
516 case CLOCK_ALARM_MINRES:
517 case CLOCK_ALARM_MAXRES:
518 LOCK_RTC(s);
519 *(clock_res_t *) attr = RTC_TICKPERIOD;
520 UNLOCK_RTC(s);
521 break;
522
523 default:
524 return (KERN_INVALID_VALUE);
525 }
526 return (KERN_SUCCESS);
527 }
528
529 /*
530 * Set deadline for the next alarm on the clock device. This call
531 * always resets the time to deliver an alarm for the clock.
532 */
533 void
534 sysclk_setalarm(
535 mach_timespec_t *deadline)
536 {
537 uint64_t abstime;
538
539 timespec_to_absolutetime(*deadline, &abstime);
540 timer_call_enter(&rtclock.alarm[cpu_number()], abstime);
541 }
542
543 /*
544 * Configure the calendar clock.
545 */
546 int
547 calend_config(void)
548 {
549 return (1);
550 }
551
552 /*
553 * Initialize the calendar clock.
554 */
555 int
556 calend_init(void)
557 {
558 if (cpu_number() != master_cpu)
559 return(1);
560
561 return (1);
562 }
563
564 /*
565 * Get the current clock time.
566 */
567 kern_return_t
568 calend_gettime(
569 mach_timespec_t *curr_time) /* OUT */
570 {
571 spl_t s;
572
573 LOCK_RTC(s);
574 if (!rtclock.calend_is_set) {
575 UNLOCK_RTC(s);
576 return (KERN_FAILURE);
577 }
578
579 (void) sysclk_gettime_internal(curr_time);
580 ADD_MACH_TIMESPEC(curr_time, &rtclock.calend_offset);
581 UNLOCK_RTC(s);
582
583 return (KERN_SUCCESS);
584 }
585
586 /*
587 * Set the current clock time.
588 */
589 kern_return_t
590 calend_settime(
591 mach_timespec_t *new_time)
592 {
593 mach_timespec_t curr_time;
594 spl_t s;
595
596 LOCK_RTC(s);
597 (void) sysclk_gettime_internal(&curr_time);
598 rtclock.calend_offset = *new_time;
599 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
600 rtclock.calend_is_set = TRUE;
601 UNLOCK_RTC(s);
602
603 PESetGMTTimeOfDay(new_time->tv_sec);
604
605 return (KERN_SUCCESS);
606 }
607
608 /*
609 * Get clock device attributes.
610 */
611 kern_return_t
612 calend_getattr(
613 clock_flavor_t flavor,
614 clock_attr_t attr, /* OUT */
615 mach_msg_type_number_t *count) /* IN/OUT */
616 {
617 spl_t s;
618
619 if (*count != 1)
620 return (KERN_FAILURE);
621 switch (flavor) {
622
623 case CLOCK_GET_TIME_RES: /* >0 res */
624 LOCK_RTC(s);
625 *(clock_res_t *) attr = RTC_TICKPERIOD;
626 UNLOCK_RTC(s);
627 break;
628
629 case CLOCK_ALARM_CURRES: /* =0 no alarm */
630 case CLOCK_ALARM_MINRES:
631 case CLOCK_ALARM_MAXRES:
632 *(clock_res_t *) attr = 0;
633 break;
634
635 default:
636 return (KERN_INVALID_VALUE);
637 }
638 return (KERN_SUCCESS);
639 }
640
641 void
642 clock_adjust_calendar(
643 clock_res_t nsec)
644 {
645 spl_t s;
646
647 LOCK_RTC(s);
648 if (rtclock.calend_is_set)
649 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec);
650 UNLOCK_RTC(s);
651 }
652
653 void
654 clock_initialize_calendar(void)
655 {
656 mach_timespec_t curr_time;
657 long seconds = PEGetGMTTimeOfDay();
658 spl_t s;
659
660 LOCK_RTC(s);
661 (void) sysclk_gettime_internal(&curr_time);
662 if (curr_time.tv_nsec < 500*USEC_PER_SEC)
663 rtclock.calend_offset.tv_sec = seconds;
664 else
665 rtclock.calend_offset.tv_sec = seconds + 1;
666 rtclock.calend_offset.tv_nsec = 0;
667 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
668 rtclock.calend_is_set = TRUE;
669 UNLOCK_RTC(s);
670 }
671
672 mach_timespec_t
673 clock_get_calendar_offset(void)
674 {
675 mach_timespec_t result = MACH_TIMESPEC_ZERO;
676 spl_t s;
677
678 LOCK_RTC(s);
679 if (rtclock.calend_is_set)
680 result = rtclock.calend_offset;
681 UNLOCK_RTC(s);
682
683 return (result);
684 }
685
686 void
687 clock_timebase_info(
688 mach_timebase_info_t info)
689 {
690 spl_t s;
691
692 LOCK_RTC(s);
693 *info = rtclock.timebase_const;
694 UNLOCK_RTC(s);
695 }
696
697 void
698 clock_set_timer_deadline(
699 uint64_t deadline)
700 {
701 uint64_t abstime;
702 int decr, mycpu;
703 struct rtclock_timer *mytimer;
704 spl_t s;
705
706 s = splclock();
707 mycpu = cpu_number();
708 mytimer = &rtclock.timer[mycpu];
709 clock_get_uptime(&abstime);
710 rtclock.last_abstime[mycpu] = abstime;
711 mytimer->deadline = deadline;
712 mytimer->is_set = TRUE;
713 if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
714 decr = deadline_to_decrementer(mytimer->deadline, abstime);
715 if ( rtclock_decrementer_min != 0 &&
716 rtclock_decrementer_min < (natural_t)decr )
717 decr = rtclock_decrementer_min;
718
719 mtdec(decr);
720 rtclock.last_decr[mycpu] = decr;
721
722 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
723 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
724 }
725 splx(s);
726 }
727
728 void
729 clock_set_timer_func(
730 clock_timer_func_t func)
731 {
732 spl_t s;
733
734 LOCK_RTC(s);
735 if (rtclock.timer_expire == NULL)
736 rtclock.timer_expire = func;
737 UNLOCK_RTC(s);
738 }
739
740 /*
741 * Reset the clock device. This causes the realtime clock
742 * device to reload its mode and count value (frequency).
743 */
744 void
745 rtclock_reset(void)
746 {
747 return;
748 }
749
750 /*
751 * Real-time clock device interrupt.
752 */
753 void
754 rtclock_intr(
755 int device,
756 struct ppc_saved_state *ssp,
757 spl_t old_spl)
758 {
759 uint64_t abstime;
760 int decr[3], mycpu = cpu_number();
761 struct rtclock_timer *mytimer = &rtclock.timer[mycpu];
762
763 /*
764 * We may receive interrupts too early, we must reject them.
765 */
766 if (rtclock_initialized == FALSE) {
767 mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
768 return;
769 }
770
771 decr[1] = decr[2] = DECREMENTER_MAX;
772
773 clock_get_uptime(&abstime);
774 rtclock.last_abstime[mycpu] = abstime;
775 if ( rtclock_tick_deadline[mycpu] <= abstime ) {
776 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
777 &rtclock_tick_deadline[mycpu]);
778 hertz_tick(USER_MODE(ssp->srr1), ssp->srr0);
779 }
780
781 clock_get_uptime(&abstime);
782 rtclock.last_abstime[mycpu] = abstime;
783 if ( mytimer->is_set &&
784 mytimer->deadline <= abstime ) {
785 mytimer->is_set = FALSE;
786 (*rtclock.timer_expire)(abstime);
787 }
788
789 clock_get_uptime(&abstime);
790 rtclock.last_abstime[mycpu] = abstime;
791 decr[1] = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
792
793 if (mytimer->is_set)
794 decr[2] = deadline_to_decrementer(mytimer->deadline, abstime);
795
796 if (decr[1] > decr[2])
797 decr[1] = decr[2];
798
799 if ( rtclock_decrementer_min != 0 &&
800 rtclock_decrementer_min < (natural_t)decr[1] )
801 decr[1] = rtclock_decrementer_min;
802
803 mtdec(decr[1]);
804 rtclock.last_decr[mycpu] = decr[1];
805
806 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
807 | DBG_FUNC_NONE, decr[1], 3, 0, 0, 0);
808 }
809
810 static void
811 rtclock_alarm_timer(
812 timer_call_param_t p0,
813 timer_call_param_t p1)
814 {
815 mach_timespec_t timestamp;
816
817 (void) sysclk_gettime(&timestamp);
818
819 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
820 }
821
822 void
823 clock_get_uptime(
824 uint64_t *result0)
825 {
826 UnsignedWide *result = (UnsignedWide *)result0;
827 uint32_t hi, lo, hic;
828
829 do {
830 asm volatile(" mftbu %0" : "=r" (hi));
831 asm volatile(" mftb %0" : "=r" (lo));
832 asm volatile(" mftbu %0" : "=r" (hic));
833 } while (hic != hi);
834
835 result->lo = lo;
836 result->hi = hi;
837 }
838
839 static int
840 deadline_to_decrementer(
841 uint64_t deadline,
842 uint64_t now)
843 {
844 uint64_t delt;
845
846 if (deadline <= now)
847 return DECREMENTER_MIN;
848 else {
849 delt = deadline - now;
850 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
851 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
852 }
853 }
854
855 static void
856 timespec_to_absolutetime(
857 mach_timespec_t timespec,
858 uint64_t *result0)
859 {
860 UnsignedWide *result = (UnsignedWide *)result0;
861 UnsignedWide t64;
862 uint32_t t32;
863 uint32_t numer, denom;
864 spl_t s;
865
866 LOCK_RTC(s);
867 numer = rtclock.timebase_const.numer;
868 denom = rtclock.timebase_const.denom;
869 UNLOCK_RTC(s);
870
871 asm volatile(" mullw %0,%1,%2" :
872 "=r" (t64.lo) :
873 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
874
875 asm volatile(" mulhwu %0,%1,%2" :
876 "=r" (t64.hi) :
877 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
878
879 UnsignedWide_to_scalar(&t64) += timespec.tv_nsec;
880
881 umul_64by32(t64, denom, &t64, &t32);
882
883 udiv_96by32(t64, t32, numer, &t64, &t32);
884
885 result->hi = t64.lo;
886 result->lo = t32;
887 }
888
889 void
890 clock_interval_to_deadline(
891 uint32_t interval,
892 uint32_t scale_factor,
893 uint64_t *result)
894 {
895 uint64_t abstime;
896
897 clock_get_uptime(result);
898
899 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
900
901 *result += abstime;
902 }
903
904 void
905 clock_interval_to_absolutetime_interval(
906 uint32_t interval,
907 uint32_t scale_factor,
908 uint64_t *result0)
909 {
910 UnsignedWide *result = (UnsignedWide *)result0;
911 UnsignedWide t64;
912 uint32_t t32;
913 uint32_t numer, denom;
914 spl_t s;
915
916 LOCK_RTC(s);
917 numer = rtclock.timebase_const.numer;
918 denom = rtclock.timebase_const.denom;
919 UNLOCK_RTC(s);
920
921 asm volatile(" mullw %0,%1,%2" :
922 "=r" (t64.lo) :
923 "r" (interval), "r" (scale_factor));
924 asm volatile(" mulhwu %0,%1,%2" :
925 "=r" (t64.hi) :
926 "r" (interval), "r" (scale_factor));
927
928 umul_64by32(t64, denom, &t64, &t32);
929
930 udiv_96by32(t64, t32, numer, &t64, &t32);
931
932 result->hi = t64.lo;
933 result->lo = t32;
934 }
935
936 void
937 clock_absolutetime_interval_to_deadline(
938 uint64_t abstime,
939 uint64_t *result)
940 {
941 clock_get_uptime(result);
942
943 *result += abstime;
944 }
945
946 void
947 absolutetime_to_nanoseconds(
948 uint64_t abstime,
949 uint64_t *result)
950 {
951 UnsignedWide t64;
952 uint32_t t32;
953 uint32_t numer, denom;
954 spl_t s;
955
956 LOCK_RTC(s);
957 numer = rtclock.timebase_const.numer;
958 denom = rtclock.timebase_const.denom;
959 UNLOCK_RTC(s);
960
961 UnsignedWide_to_scalar(&t64) = abstime;
962
963 umul_64by32(t64, numer, &t64, &t32);
964
965 udiv_96by32to64(t64, t32, denom, (void *)result);
966 }
967
968 void
969 nanoseconds_to_absolutetime(
970 uint64_t nanoseconds,
971 uint64_t *result)
972 {
973 UnsignedWide t64;
974 uint32_t t32;
975 uint32_t numer, denom;
976 spl_t s;
977
978 LOCK_RTC(s);
979 numer = rtclock.timebase_const.numer;
980 denom = rtclock.timebase_const.denom;
981 UNLOCK_RTC(s);
982
983 UnsignedWide_to_scalar(&t64) = nanoseconds;
984
985 umul_64by32(t64, denom, &t64, &t32);
986
987 udiv_96by32to64(t64, t32, numer, (void *)result);
988 }
989
990 /*
991 * Spin-loop delay primitives.
992 */
993 void
994 delay_for_interval(
995 uint32_t interval,
996 uint32_t scale_factor)
997 {
998 uint64_t now, end;
999
1000 clock_interval_to_deadline(interval, scale_factor, &end);
1001
1002 do {
1003 clock_get_uptime(&now);
1004 } while (now < end);
1005 }
1006
1007 void
1008 clock_delay_until(
1009 uint64_t deadline)
1010 {
1011 uint64_t now;
1012
1013 do {
1014 clock_get_uptime(&now);
1015 } while (now < deadline);
1016 }
1017
1018 void
1019 delay(
1020 int usec)
1021 {
1022 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1023 }