]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/ppc/rtclock.c
xnu-344.12.2.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28/*
29 * File: rtclock.c
30 * Purpose: Routines for handling the machine dependent
31 * real-time clock.
32 */
33
34#include <libkern/OSTypes.h>
35
36#include <mach/mach_types.h>
37
38#include <kern/clock.h>
39#include <kern/thread.h>
40#include <kern/macro_help.h>
41#include <kern/spl.h>
42
43#include <machine/mach_param.h> /* HZ */
44#include <ppc/proc_reg.h>
45
46#include <pexpert/pexpert.h>
47
48#include <sys/kdebug.h>
49
50int sysclk_config(void);
51
52int sysclk_init(void);
53
54kern_return_t sysclk_gettime(
55 mach_timespec_t *cur_time);
56
57kern_return_t sysclk_getattr(
58 clock_flavor_t flavor,
59 clock_attr_t attr,
60 mach_msg_type_number_t *count);
61
62void sysclk_setalarm(
63 mach_timespec_t *deadline);
64
65struct clock_ops sysclk_ops = {
66 sysclk_config, sysclk_init,
67 sysclk_gettime, 0,
68 sysclk_getattr, 0,
69 sysclk_setalarm,
70};
71
72int calend_config(void);
73
74int calend_init(void);
75
76kern_return_t calend_gettime(
77 mach_timespec_t *cur_time);
78
79kern_return_t calend_settime(
80 mach_timespec_t *cur_time);
81
82kern_return_t calend_getattr(
83 clock_flavor_t flavor,
84 clock_attr_t attr,
85 mach_msg_type_number_t *count);
86
87struct clock_ops calend_ops = {
88 calend_config, calend_init,
89 calend_gettime, calend_settime,
90 calend_getattr, 0,
91 0,
92};
93
94/* local data declarations */
95
96static struct rtclock {
97 mach_timespec_t calend_offset;
98 boolean_t calend_is_set;
99
100 mach_timebase_info_data_t timebase_const;
101
102 struct rtclock_timer {
103 uint64_t deadline;
104 boolean_t is_set;
105 } timer[NCPUS];
106
107 clock_timer_func_t timer_expire;
108
109 timer_call_data_t alarm_timer;
110
111 /* debugging */
112 uint64_t last_abstime[NCPUS];
113 int last_decr[NCPUS];
114
115 decl_simple_lock_data(,lock) /* real-time clock device lock */
116} rtclock;
117
118static boolean_t rtclock_initialized;
119
120static uint64_t rtclock_tick_deadline[NCPUS];
121static uint64_t rtclock_tick_interval;
122
123static void timespec_to_absolutetime(
124 mach_timespec_t timespec,
125 uint64_t *result);
126
127static int deadline_to_decrementer(
128 uint64_t deadline,
129 uint64_t now);
130
131static void rtclock_alarm_timer(
132 timer_call_param_t p0,
133 timer_call_param_t p1);
134
135/* global data declarations */
136
137#define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
138
139#define DECREMENTER_MAX 0x7FFFFFFFUL
140#define DECREMENTER_MIN 0xAUL
141
142natural_t rtclock_decrementer_min;
143
144/*
145 * Macros to lock/unlock real-time clock device.
146 */
147#define LOCK_RTC(s) \
148MACRO_BEGIN \
149 (s) = splclock(); \
150 simple_lock(&rtclock.lock); \
151MACRO_END
152
153#define UNLOCK_RTC(s) \
154MACRO_BEGIN \
155 simple_unlock(&rtclock.lock); \
156 splx(s); \
157MACRO_END
158
159static void
160timebase_callback(
161 struct timebase_freq_t *freq)
162{
163 natural_t numer, denom;
164 int n;
165 spl_t s;
166
167 denom = freq->timebase_num;
168 n = 9;
169 while (!(denom % 10)) {
170 if (n < 1)
171 break;
172 denom /= 10;
173 n--;
174 }
175
176 numer = freq->timebase_den;
177 while (n-- > 0) {
178 numer *= 10;
179 }
180
181 LOCK_RTC(s);
182 rtclock.timebase_const.numer = numer;
183 rtclock.timebase_const.denom = denom;
184 UNLOCK_RTC(s);
185}
186
187/*
188 * Configure the real-time clock device.
189 */
190int
191sysclk_config(void)
192{
193 if (cpu_number() != master_cpu)
194 return(1);
195
196 timer_call_setup(&rtclock.alarm_timer, rtclock_alarm_timer, NULL);
197
198 simple_lock_init(&rtclock.lock, ETAP_MISC_RT_CLOCK);
199
200 PE_register_timebase_callback(timebase_callback);
201
202 return (1);
203}
204
205/*
206 * Initialize the system clock device.
207 */
208int
209sysclk_init(void)
210{
211 uint64_t abstime;
212 int decr, mycpu = cpu_number();
213
214 if (mycpu != master_cpu) {
215 if (rtclock_initialized == FALSE) {
216 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
217 }
218 /* Set decrementer and hence our next tick due */
219 clock_get_uptime(&abstime);
220 rtclock_tick_deadline[mycpu] = abstime;
221 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
222 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
223 mtdec(decr);
224 rtclock.last_decr[mycpu] = decr;
225
226 return(1);
227 }
228
229 /*
230 * Initialize non-zero clock structure values.
231 */
232 clock_interval_to_absolutetime_interval(RTC_TICKPERIOD, 1,
233 &rtclock_tick_interval);
234 /* Set decrementer and our next tick due */
235 clock_get_uptime(&abstime);
236 rtclock_tick_deadline[mycpu] = abstime;
237 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
238 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
239 mtdec(decr);
240 rtclock.last_decr[mycpu] = decr;
241
242 rtclock_initialized = TRUE;
243
244 return (1);
245}
246
247#define UnsignedWide_to_scalar(x) (*(uint64_t *)(x))
248#define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x))
249
250/*
251 * Perform a full 64 bit by 32 bit unsigned multiply,
252 * yielding a 96 bit product. The most significant
253 * portion of the product is returned as a 64 bit
254 * quantity, with the lower portion as a 32 bit word.
255 */
256static void
257umul_64by32(
258 UnsignedWide now64,
259 uint32_t mult32,
260 UnsignedWide *result64,
261 uint32_t *result32)
262{
263 uint32_t mid, mid2;
264
265 asm volatile(" mullw %0,%1,%2" :
266 "=r" (*result32) :
267 "r" (now64.lo), "r" (mult32));
268
269 asm volatile(" mullw %0,%1,%2" :
270 "=r" (mid2) :
271 "r" (now64.hi), "r" (mult32));
272 asm volatile(" mulhwu %0,%1,%2" :
273 "=r" (mid) :
274 "r" (now64.lo), "r" (mult32));
275
276 asm volatile(" mulhwu %0,%1,%2" :
277 "=r" (result64->hi) :
278 "r" (now64.hi), "r" (mult32));
279
280 asm volatile(" addc %0,%2,%3;
281 addze %1,%4" :
282 "=r" (result64->lo), "=r" (result64->hi) :
283 "r" (mid), "r" (mid2), "1" (result64->hi));
284}
285
286/*
287 * Perform a partial 64 bit by 32 bit unsigned multiply,
288 * yielding a 64 bit product. Only the least significant
289 * 64 bits of the product are calculated and returned.
290 */
291static void
292umul_64by32to64(
293 UnsignedWide now64,
294 uint32_t mult32,
295 UnsignedWide *result64)
296{
297 uint32_t mid, mid2;
298
299 asm volatile(" mullw %0,%1,%2" :
300 "=r" (result64->lo) :
301 "r" (now64.lo), "r" (mult32));
302
303 asm volatile(" mullw %0,%1,%2" :
304 "=r" (mid2) :
305 "r" (now64.hi), "r" (mult32));
306 asm volatile(" mulhwu %0,%1,%2" :
307 "=r" (mid) :
308 "r" (now64.lo), "r" (mult32));
309
310 asm volatile(" add %0,%1,%2" :
311 "=r" (result64->hi) :
312 "r" (mid), "r" (mid2));
313}
314
315/*
316 * Perform an unsigned division of a 96 bit value
317 * by a 32 bit value, yielding a 96 bit quotient.
318 * The most significant portion of the product is
319 * returned as a 64 bit quantity, with the lower
320 * portion as a 32 bit word.
321 */
322static void
323udiv_96by32(
324 UnsignedWide now64,
325 uint32_t now32,
326 uint32_t div32,
327 UnsignedWide *result64,
328 uint32_t *result32)
329{
330 UnsignedWide t64;
331
332 if (now64.hi > 0 || now64.lo >= div32) {
333 UnsignedWide_to_scalar(result64) =
334 UnsignedWide_to_scalar(&now64) / div32;
335
336 umul_64by32to64(*result64, div32, &t64);
337
338 UnsignedWide_to_scalar(&t64) =
339 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
340
341 *result32 = (((uint64_t)t64.lo << 32) | now32) / div32;
342 }
343 else {
344 UnsignedWide_to_scalar(result64) =
345 (((uint64_t)now64.lo << 32) | now32) / div32;
346
347 *result32 = result64->lo;
348 result64->lo = result64->hi;
349 result64->hi = 0;
350 }
351}
352
353/*
354 * Perform an unsigned division of a 96 bit value
355 * by a 32 bit value, yielding a 64 bit quotient.
356 * Any higher order bits of the quotient are simply
357 * discarded.
358 */
359static void
360udiv_96by32to64(
361 UnsignedWide now64,
362 uint32_t now32,
363 uint32_t div32,
364 UnsignedWide *result64)
365{
366 UnsignedWide t64;
367
368 if (now64.hi > 0 || now64.lo >= div32) {
369 UnsignedWide_to_scalar(result64) =
370 UnsignedWide_to_scalar(&now64) / div32;
371
372 umul_64by32to64(*result64, div32, &t64);
373
374 UnsignedWide_to_scalar(&t64) =
375 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
376
377 result64->hi = result64->lo;
378 result64->lo = (((uint64_t)t64.lo << 32) | now32) / div32;
379 }
380 else {
381 UnsignedWide_to_scalar(result64) =
382 (((uint64_t)now64.lo << 32) | now32) / div32;
383 }
384}
385
386/*
387 * Perform an unsigned division of a 96 bit value
388 * by a 32 bit value, yielding a 32 bit quotient,
389 * and a 32 bit remainder. Any higher order bits
390 * of the quotient are simply discarded.
391 */
392static void
393udiv_96by32to32and32(
394 UnsignedWide now64,
395 uint32_t now32,
396 uint32_t div32,
397 uint32_t *result32,
398 uint32_t *remain32)
399{
400 UnsignedWide t64, u64;
401
402 if (now64.hi > 0 || now64.lo >= div32) {
403 UnsignedWide_to_scalar(&t64) =
404 UnsignedWide_to_scalar(&now64) / div32;
405
406 umul_64by32to64(t64, div32, &t64);
407
408 UnsignedWide_to_scalar(&t64) =
409 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
410
411 UnsignedWide_to_scalar(&t64) = ((uint64_t)t64.lo << 32) | now32;
412
413 UnsignedWide_to_scalar(&u64) =
414 UnsignedWide_to_scalar(&t64) / div32;
415
416 *result32 = u64.lo;
417
418 umul_64by32to64(u64, div32, &u64);
419
420 *remain32 = UnsignedWide_to_scalar(&t64) -
421 UnsignedWide_to_scalar(&u64);
422 }
423 else {
424 UnsignedWide_to_scalar(&t64) = ((uint64_t)now64.lo << 32) | now32;
425
426 UnsignedWide_to_scalar(&u64) =
427 UnsignedWide_to_scalar(&t64) / div32;
428
429 *result32 = u64.lo;
430
431 umul_64by32to64(u64, div32, &u64);
432
433 *remain32 = UnsignedWide_to_scalar(&t64) -
434 UnsignedWide_to_scalar(&u64);
435 }
436}
437
438/*
439 * Get the clock device time. This routine is responsible
440 * for converting the device's machine dependent time value
441 * into a canonical mach_timespec_t value.
442 *
443 * SMP configurations - *the processor clocks are synchronised*
444 */
445kern_return_t
446sysclk_gettime_internal(
447 mach_timespec_t *time) /* OUT */
448{
449 UnsignedWide now;
450 UnsignedWide t64;
451 uint32_t t32;
452 uint32_t numer, denom;
453
454 numer = rtclock.timebase_const.numer;
455 denom = rtclock.timebase_const.denom;
456
457 clock_get_uptime((uint64_t *)&now);
458
459 umul_64by32(now, numer, &t64, &t32);
460
461 udiv_96by32(t64, t32, denom, &t64, &t32);
462
463 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
464 &time->tv_sec, &time->tv_nsec);
465
466 return (KERN_SUCCESS);
467}
468
469kern_return_t
470sysclk_gettime(
471 mach_timespec_t *time) /* OUT */
472{
473 UnsignedWide now;
474 UnsignedWide t64;
475 uint32_t t32;
476 uint32_t numer, denom;
477 spl_t s;
478
479 LOCK_RTC(s);
480 numer = rtclock.timebase_const.numer;
481 denom = rtclock.timebase_const.denom;
482 UNLOCK_RTC(s);
483
484 clock_get_uptime((uint64_t *)&now);
485
486 umul_64by32(now, numer, &t64, &t32);
487
488 udiv_96by32(t64, t32, denom, &t64, &t32);
489
490 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
491 &time->tv_sec, &time->tv_nsec);
492
493 return (KERN_SUCCESS);
494}
495
496/*
497 * Get clock device attributes.
498 */
499kern_return_t
500sysclk_getattr(
501 clock_flavor_t flavor,
502 clock_attr_t attr, /* OUT */
503 mach_msg_type_number_t *count) /* IN/OUT */
504{
505 spl_t s;
506
507 if (*count != 1)
508 return (KERN_FAILURE);
509 switch (flavor) {
510
511 case CLOCK_GET_TIME_RES: /* >0 res */
512 case CLOCK_ALARM_CURRES: /* =0 no alarm */
513 case CLOCK_ALARM_MINRES:
514 case CLOCK_ALARM_MAXRES:
515 LOCK_RTC(s);
516 *(clock_res_t *) attr = RTC_TICKPERIOD;
517 UNLOCK_RTC(s);
518 break;
519
520 default:
521 return (KERN_INVALID_VALUE);
522 }
523 return (KERN_SUCCESS);
524}
525
526/*
527 * Set deadline for the next alarm on the clock device. This call
528 * always resets the time to deliver an alarm for the clock.
529 */
530void
531sysclk_setalarm(
532 mach_timespec_t *deadline)
533{
534 uint64_t abstime;
535
536 timespec_to_absolutetime(*deadline, &abstime);
537 timer_call_enter(&rtclock.alarm_timer, abstime);
538}
539
540/*
541 * Configure the calendar clock.
542 */
543int
544calend_config(void)
545{
546 return (1);
547}
548
549/*
550 * Initialize the calendar clock.
551 */
552int
553calend_init(void)
554{
555 if (cpu_number() != master_cpu)
556 return(1);
557
558 return (1);
559}
560
561/*
562 * Get the current clock time.
563 */
564kern_return_t
565calend_gettime(
566 mach_timespec_t *curr_time) /* OUT */
567{
568 spl_t s;
569
570 LOCK_RTC(s);
571 if (!rtclock.calend_is_set) {
572 UNLOCK_RTC(s);
573 return (KERN_FAILURE);
574 }
575
576 (void) sysclk_gettime_internal(curr_time);
577 ADD_MACH_TIMESPEC(curr_time, &rtclock.calend_offset);
578 UNLOCK_RTC(s);
579
580 return (KERN_SUCCESS);
581}
582
583/*
584 * Set the current clock time.
585 */
586kern_return_t
587calend_settime(
588 mach_timespec_t *new_time)
589{
590 mach_timespec_t curr_time;
591 spl_t s;
592
593 LOCK_RTC(s);
594 (void) sysclk_gettime_internal(&curr_time);
595 rtclock.calend_offset = *new_time;
596 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
597 rtclock.calend_is_set = TRUE;
598 UNLOCK_RTC(s);
599
600 PESetGMTTimeOfDay(new_time->tv_sec);
601
602 return (KERN_SUCCESS);
603}
604
605/*
606 * Get clock device attributes.
607 */
608kern_return_t
609calend_getattr(
610 clock_flavor_t flavor,
611 clock_attr_t attr, /* OUT */
612 mach_msg_type_number_t *count) /* IN/OUT */
613{
614 spl_t s;
615
616 if (*count != 1)
617 return (KERN_FAILURE);
618 switch (flavor) {
619
620 case CLOCK_GET_TIME_RES: /* >0 res */
621 LOCK_RTC(s);
622 *(clock_res_t *) attr = RTC_TICKPERIOD;
623 UNLOCK_RTC(s);
624 break;
625
626 case CLOCK_ALARM_CURRES: /* =0 no alarm */
627 case CLOCK_ALARM_MINRES:
628 case CLOCK_ALARM_MAXRES:
629 *(clock_res_t *) attr = 0;
630 break;
631
632 default:
633 return (KERN_INVALID_VALUE);
634 }
635 return (KERN_SUCCESS);
636}
637
638void
639clock_adjust_calendar(
640 clock_res_t nsec)
641{
642 spl_t s;
643
644 LOCK_RTC(s);
645 if (rtclock.calend_is_set)
646 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec);
647 UNLOCK_RTC(s);
648}
649
650void
651clock_initialize_calendar(void)
652{
653 mach_timespec_t curr_time;
654 long seconds = PEGetGMTTimeOfDay();
655 spl_t s;
656
657 LOCK_RTC(s);
658 (void) sysclk_gettime_internal(&curr_time);
659 if (curr_time.tv_nsec < 500*USEC_PER_SEC)
660 rtclock.calend_offset.tv_sec = seconds;
661 else
662 rtclock.calend_offset.tv_sec = seconds + 1;
663 rtclock.calend_offset.tv_nsec = 0;
664 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
665 rtclock.calend_is_set = TRUE;
666 UNLOCK_RTC(s);
667}
668
669mach_timespec_t
670clock_get_calendar_offset(void)
671{
672 mach_timespec_t result = MACH_TIMESPEC_ZERO;
673 spl_t s;
674
675 LOCK_RTC(s);
676 if (rtclock.calend_is_set)
677 result = rtclock.calend_offset;
678 UNLOCK_RTC(s);
679
680 return (result);
681}
682
683void
684clock_timebase_info(
685 mach_timebase_info_t info)
686{
687 spl_t s;
688
689 LOCK_RTC(s);
690 *info = rtclock.timebase_const;
691 UNLOCK_RTC(s);
692}
693
694void
695clock_set_timer_deadline(
696 uint64_t deadline)
697{
698 uint64_t abstime;
699 int decr, mycpu;
700 struct rtclock_timer *mytimer;
701 spl_t s;
702
703 s = splclock();
704 mycpu = cpu_number();
705 mytimer = &rtclock.timer[mycpu];
706 clock_get_uptime(&abstime);
707 rtclock.last_abstime[mycpu] = abstime;
708 mytimer->deadline = deadline;
709 mytimer->is_set = TRUE;
710 if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
711 decr = deadline_to_decrementer(mytimer->deadline, abstime);
712 if ( rtclock_decrementer_min != 0 &&
713 rtclock_decrementer_min < (natural_t)decr )
714 decr = rtclock_decrementer_min;
715
716 mtdec(decr);
717 rtclock.last_decr[mycpu] = decr;
718
719 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
720 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
721 }
722 splx(s);
723}
724
725void
726clock_set_timer_func(
727 clock_timer_func_t func)
728{
729 spl_t s;
730
731 LOCK_RTC(s);
732 if (rtclock.timer_expire == NULL)
733 rtclock.timer_expire = func;
734 UNLOCK_RTC(s);
735}
736
737/*
738 * Reset the clock device. This causes the realtime clock
739 * device to reload its mode and count value (frequency).
740 */
741void
742rtclock_reset(void)
743{
744 return;
745}
746
747/*
748 * Real-time clock device interrupt.
749 */
750void
751rtclock_intr(
752 int device,
753 struct savearea *ssp,
754 spl_t old_spl)
755{
756 uint64_t abstime;
757 int decr[3], mycpu = cpu_number();
758 struct rtclock_timer *mytimer = &rtclock.timer[mycpu];
759
760 /*
761 * We may receive interrupts too early, we must reject them.
762 */
763 if (rtclock_initialized == FALSE) {
764 mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
765 return;
766 }
767
768 decr[1] = decr[2] = DECREMENTER_MAX;
769
770 clock_get_uptime(&abstime);
771 rtclock.last_abstime[mycpu] = abstime;
772 if ( rtclock_tick_deadline[mycpu] <= abstime ) {
773 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
774 &rtclock_tick_deadline[mycpu]);
775 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
776 }
777
778 clock_get_uptime(&abstime);
779 rtclock.last_abstime[mycpu] = abstime;
780 if ( mytimer->is_set &&
781 mytimer->deadline <= abstime ) {
782 mytimer->is_set = FALSE;
783 (*rtclock.timer_expire)(abstime);
784 }
785
786 clock_get_uptime(&abstime);
787 rtclock.last_abstime[mycpu] = abstime;
788 decr[1] = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
789
790 if (mytimer->is_set)
791 decr[2] = deadline_to_decrementer(mytimer->deadline, abstime);
792
793 if (decr[1] > decr[2])
794 decr[1] = decr[2];
795
796 if ( rtclock_decrementer_min != 0 &&
797 rtclock_decrementer_min < (natural_t)decr[1] )
798 decr[1] = rtclock_decrementer_min;
799
800 mtdec(decr[1]);
801 rtclock.last_decr[mycpu] = decr[1];
802
803 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
804 | DBG_FUNC_NONE, decr[1], 3, 0, 0, 0);
805}
806
807static void
808rtclock_alarm_timer(
809 timer_call_param_t p0,
810 timer_call_param_t p1)
811{
812 mach_timespec_t timestamp;
813
814 (void) sysclk_gettime(&timestamp);
815
816 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
817}
818
819void
820clock_get_uptime(
821 uint64_t *result0)
822{
823 UnsignedWide *result = (UnsignedWide *)result0;
824 uint32_t hi, lo, hic;
825
826 do {
827 asm volatile(" mftbu %0" : "=r" (hi));
828 asm volatile(" mftb %0" : "=r" (lo));
829 asm volatile(" mftbu %0" : "=r" (hic));
830 } while (hic != hi);
831
832 result->lo = lo;
833 result->hi = hi;
834}
835
836static int
837deadline_to_decrementer(
838 uint64_t deadline,
839 uint64_t now)
840{
841 uint64_t delt;
842
843 if (deadline <= now)
844 return DECREMENTER_MIN;
845 else {
846 delt = deadline - now;
847 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
848 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
849 }
850}
851
852static void
853timespec_to_absolutetime(
854 mach_timespec_t timespec,
855 uint64_t *result0)
856{
857 UnsignedWide *result = (UnsignedWide *)result0;
858 UnsignedWide t64;
859 uint32_t t32;
860 uint32_t numer, denom;
861 spl_t s;
862
863 LOCK_RTC(s);
864 numer = rtclock.timebase_const.numer;
865 denom = rtclock.timebase_const.denom;
866 UNLOCK_RTC(s);
867
868 asm volatile(" mullw %0,%1,%2" :
869 "=r" (t64.lo) :
870 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
871
872 asm volatile(" mulhwu %0,%1,%2" :
873 "=r" (t64.hi) :
874 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
875
876 UnsignedWide_to_scalar(&t64) += timespec.tv_nsec;
877
878 umul_64by32(t64, denom, &t64, &t32);
879
880 udiv_96by32(t64, t32, numer, &t64, &t32);
881
882 result->hi = t64.lo;
883 result->lo = t32;
884}
885
886void
887clock_interval_to_deadline(
888 uint32_t interval,
889 uint32_t scale_factor,
890 uint64_t *result)
891{
892 uint64_t abstime;
893
894 clock_get_uptime(result);
895
896 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
897
898 *result += abstime;
899}
900
901void
902clock_interval_to_absolutetime_interval(
903 uint32_t interval,
904 uint32_t scale_factor,
905 uint64_t *result0)
906{
907 UnsignedWide *result = (UnsignedWide *)result0;
908 UnsignedWide t64;
909 uint32_t t32;
910 uint32_t numer, denom;
911 spl_t s;
912
913 LOCK_RTC(s);
914 numer = rtclock.timebase_const.numer;
915 denom = rtclock.timebase_const.denom;
916 UNLOCK_RTC(s);
917
918 asm volatile(" mullw %0,%1,%2" :
919 "=r" (t64.lo) :
920 "r" (interval), "r" (scale_factor));
921 asm volatile(" mulhwu %0,%1,%2" :
922 "=r" (t64.hi) :
923 "r" (interval), "r" (scale_factor));
924
925 umul_64by32(t64, denom, &t64, &t32);
926
927 udiv_96by32(t64, t32, numer, &t64, &t32);
928
929 result->hi = t64.lo;
930 result->lo = t32;
931}
932
933void
934clock_absolutetime_interval_to_deadline(
935 uint64_t abstime,
936 uint64_t *result)
937{
938 clock_get_uptime(result);
939
940 *result += abstime;
941}
942
943void
944absolutetime_to_nanoseconds(
945 uint64_t abstime,
946 uint64_t *result)
947{
948 UnsignedWide t64;
949 uint32_t t32;
950 uint32_t numer, denom;
951 spl_t s;
952
953 LOCK_RTC(s);
954 numer = rtclock.timebase_const.numer;
955 denom = rtclock.timebase_const.denom;
956 UNLOCK_RTC(s);
957
958 UnsignedWide_to_scalar(&t64) = abstime;
959
960 umul_64by32(t64, numer, &t64, &t32);
961
962 udiv_96by32to64(t64, t32, denom, (void *)result);
963}
964
965void
966nanoseconds_to_absolutetime(
967 uint64_t nanoseconds,
968 uint64_t *result)
969{
970 UnsignedWide t64;
971 uint32_t t32;
972 uint32_t numer, denom;
973 spl_t s;
974
975 LOCK_RTC(s);
976 numer = rtclock.timebase_const.numer;
977 denom = rtclock.timebase_const.denom;
978 UNLOCK_RTC(s);
979
980 UnsignedWide_to_scalar(&t64) = nanoseconds;
981
982 umul_64by32(t64, denom, &t64, &t32);
983
984 udiv_96by32to64(t64, t32, numer, (void *)result);
985}
986
987/*
988 * Spin-loop delay primitives.
989 */
990void
991delay_for_interval(
992 uint32_t interval,
993 uint32_t scale_factor)
994{
995 uint64_t now, end;
996
997 clock_interval_to_deadline(interval, scale_factor, &end);
998
999 do {
1000 clock_get_uptime(&now);
1001 } while (now < end);
1002}
1003
1004void
1005clock_delay_until(
1006 uint64_t deadline)
1007{
1008 uint64_t now;
1009
1010 do {
1011 clock_get_uptime(&now);
1012 } while (now < deadline);
1013}
1014
1015void
1016delay(
1017 int usec)
1018{
1019 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1020}