]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/rtclock.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * @APPLE_FREE_COPYRIGHT@
30 */
31 /*
32 * File: rtclock.c
33 * Purpose: Routines for handling the machine dependent
34 * real-time clock.
35 */
36
37 #include <mach/mach_types.h>
38
39 #include <kern/clock.h>
40 #include <kern/thread.h>
41 #include <kern/macro_help.h>
42 #include <kern/spl.h>
43
44 #include <kern/host_notify.h>
45
46 #include <machine/mach_param.h> /* HZ */
47 #include <machine/commpage.h>
48 #include <ppc/proc_reg.h>
49
50 #include <pexpert/pexpert.h>
51
52 #include <sys/kdebug.h>
53
54 int sysclk_config(void);
55
56 int sysclk_init(void);
57
58 kern_return_t sysclk_gettime(
59 mach_timespec_t *cur_time);
60
61 kern_return_t sysclk_getattr(
62 clock_flavor_t flavor,
63 clock_attr_t attr,
64 mach_msg_type_number_t *count);
65
66 void sysclk_setalarm(
67 mach_timespec_t *deadline);
68
69 struct clock_ops sysclk_ops = {
70 sysclk_config, sysclk_init,
71 sysclk_gettime, 0,
72 sysclk_getattr, 0,
73 sysclk_setalarm,
74 };
75
76 int calend_config(void);
77
78 int calend_init(void);
79
80 kern_return_t calend_gettime(
81 mach_timespec_t *cur_time);
82
83 kern_return_t calend_getattr(
84 clock_flavor_t flavor,
85 clock_attr_t attr,
86 mach_msg_type_number_t *count);
87
88 struct clock_ops calend_ops = {
89 calend_config, calend_init,
90 calend_gettime, 0,
91 calend_getattr, 0,
92 0,
93 };
94
95 /* local data declarations */
96
97 static struct rtclock_calend {
98 uint32_t epoch;
99 uint32_t microepoch;
100
101 uint64_t epoch1;
102
103 int64_t adjtotal;
104 int32_t adjdelta;
105 } rtclock_calend;
106
107 static boolean_t rtclock_initialized;
108
109 static uint64_t rtclock_tick_deadline[NCPUS];
110
111 #define NSEC_PER_HZ (NSEC_PER_SEC / HZ)
112 static uint32_t rtclock_tick_interval;
113
114 static uint32_t rtclock_sec_divisor;
115
116 static mach_timebase_info_data_t rtclock_timebase_const;
117
118 static boolean_t rtclock_timebase_initialized;
119
120 static struct rtclock_timer {
121 uint64_t deadline;
122 uint32_t
123 /*boolean_t*/ is_set:1,
124 has_expired:1,
125 :0;
126 } rtclock_timer[NCPUS];
127
128 static clock_timer_func_t rtclock_timer_expire;
129
130 static timer_call_data_t rtclock_alarm_timer;
131
132 static void timespec_to_absolutetime(
133 mach_timespec_t *ts,
134 uint64_t *result);
135
136 static int deadline_to_decrementer(
137 uint64_t deadline,
138 uint64_t now);
139
140 static void rtclock_alarm_expire(
141 timer_call_param_t p0,
142 timer_call_param_t p1);
143
144 /* global data declarations */
145
146 #define DECREMENTER_MAX 0x7FFFFFFFUL
147 #define DECREMENTER_MIN 0xAUL
148
149 natural_t rtclock_decrementer_min;
150
151 decl_simple_lock_data(static,rtclock_lock)
152
153 /*
154 * Macros to lock/unlock real-time clock device.
155 */
156 #define LOCK_RTC(s) \
157 MACRO_BEGIN \
158 (s) = splclock(); \
159 simple_lock(&rtclock_lock); \
160 MACRO_END
161
162 #define UNLOCK_RTC(s) \
163 MACRO_BEGIN \
164 simple_unlock(&rtclock_lock); \
165 splx(s); \
166 MACRO_END
167
168 static void
169 timebase_callback(
170 struct timebase_freq_t *freq)
171 {
172 uint32_t numer, denom;
173 uint64_t abstime;
174 spl_t s;
175
176 if ( freq->timebase_den < 1 || freq->timebase_den > 4 ||
177 freq->timebase_num < freq->timebase_den )
178 panic("rtclock timebase_callback: invalid constant %d / %d",
179 freq->timebase_num, freq->timebase_den);
180
181 denom = freq->timebase_num;
182 numer = freq->timebase_den * NSEC_PER_SEC;
183
184 LOCK_RTC(s);
185 if (!rtclock_timebase_initialized) {
186 commpage_set_timestamp(0,0,0,0);
187
188 rtclock_timebase_const.numer = numer;
189 rtclock_timebase_const.denom = denom;
190 rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
191
192 nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
193 rtclock_tick_interval = abstime;
194 }
195 else {
196 UNLOCK_RTC(s);
197 printf("rtclock timebase_callback: late old %d / %d new %d / %d",
198 rtclock_timebase_const.numer, rtclock_timebase_const.denom,
199 numer, denom);
200 return;
201 }
202 UNLOCK_RTC(s);
203
204 clock_timebase_init();
205 }
206
207 /*
208 * Configure the real-time clock device.
209 */
210 int
211 sysclk_config(void)
212 {
213 if (cpu_number() != master_cpu)
214 return(1);
215
216 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
217
218 simple_lock_init(&rtclock_lock, ETAP_MISC_RT_CLOCK);
219
220 PE_register_timebase_callback(timebase_callback);
221
222 return (1);
223 }
224
225 /*
226 * Initialize the system clock device.
227 */
228 int
229 sysclk_init(void)
230 {
231 uint64_t abstime;
232 int decr, mycpu = cpu_number();
233
234 if (mycpu != master_cpu) {
235 if (rtclock_initialized == FALSE) {
236 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
237 }
238 /* Set decrementer and hence our next tick due */
239 abstime = mach_absolute_time();
240 rtclock_tick_deadline[mycpu] = abstime;
241 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
242 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
243 mtdec(decr);
244
245 return(1);
246 }
247
248 /* Set decrementer and our next tick due */
249 abstime = mach_absolute_time();
250 rtclock_tick_deadline[mycpu] = abstime;
251 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
252 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
253 mtdec(decr);
254
255 rtclock_initialized = TRUE;
256
257 return (1);
258 }
259
260 kern_return_t
261 sysclk_gettime(
262 mach_timespec_t *time) /* OUT */
263 {
264 uint64_t now, t64;
265 uint32_t divisor;
266
267 now = mach_absolute_time();
268
269 time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor);
270 now -= (t64 * divisor);
271 time->tv_nsec = (now * NSEC_PER_SEC) / divisor;
272
273 return (KERN_SUCCESS);
274 }
275
276 void
277 clock_get_system_microtime(
278 uint32_t *secs,
279 uint32_t *microsecs)
280 {
281 uint64_t now, t64;
282 uint32_t divisor;
283
284 now = mach_absolute_time();
285
286 *secs = t64 = now / (divisor = rtclock_sec_divisor);
287 now -= (t64 * divisor);
288 *microsecs = (now * USEC_PER_SEC) / divisor;
289 }
290
291 void
292 clock_get_system_nanotime(
293 uint32_t *secs,
294 uint32_t *nanosecs)
295 {
296 uint64_t now, t64;
297 uint32_t divisor;
298
299 now = mach_absolute_time();
300
301 *secs = t64 = now / (divisor = rtclock_sec_divisor);
302 now -= (t64 * divisor);
303 *nanosecs = (now * NSEC_PER_SEC) / divisor;
304 }
305
306 /*
307 * Get clock device attributes.
308 */
309 kern_return_t
310 sysclk_getattr(
311 clock_flavor_t flavor,
312 clock_attr_t attr, /* OUT */
313 mach_msg_type_number_t *count) /* IN/OUT */
314 {
315 spl_t s;
316
317 if (*count != 1)
318 return (KERN_FAILURE);
319
320 switch (flavor) {
321
322 case CLOCK_GET_TIME_RES: /* >0 res */
323 case CLOCK_ALARM_CURRES: /* =0 no alarm */
324 case CLOCK_ALARM_MINRES:
325 case CLOCK_ALARM_MAXRES:
326 LOCK_RTC(s);
327 *(clock_res_t *) attr = NSEC_PER_HZ;
328 UNLOCK_RTC(s);
329 break;
330
331 default:
332 return (KERN_INVALID_VALUE);
333 }
334
335 return (KERN_SUCCESS);
336 }
337
338 /*
339 * Set deadline for the next alarm on the clock device. This call
340 * always resets the time to deliver an alarm for the clock.
341 */
342 void
343 sysclk_setalarm(
344 mach_timespec_t *deadline)
345 {
346 uint64_t abstime;
347
348 timespec_to_absolutetime(deadline, &abstime);
349 timer_call_enter(&rtclock_alarm_timer, abstime);
350 }
351
352 /*
353 * Configure the calendar clock.
354 */
355 int
356 calend_config(void)
357 {
358 return (1);
359 }
360
361 /*
362 * Initialize the calendar clock.
363 */
364 int
365 calend_init(void)
366 {
367 if (cpu_number() != master_cpu)
368 return(1);
369
370 return (1);
371 }
372
373 /*
374 * Get the current clock time.
375 */
376 kern_return_t
377 calend_gettime(
378 mach_timespec_t *time) /* OUT */
379 {
380 clock_get_calendar_nanotime(
381 &time->tv_sec, &time->tv_nsec);
382
383 return (KERN_SUCCESS);
384 }
385
386 /*
387 * Get clock device attributes.
388 */
389 kern_return_t
390 calend_getattr(
391 clock_flavor_t flavor,
392 clock_attr_t attr, /* OUT */
393 mach_msg_type_number_t *count) /* IN/OUT */
394 {
395 spl_t s;
396
397 if (*count != 1)
398 return (KERN_FAILURE);
399
400 switch (flavor) {
401
402 case CLOCK_GET_TIME_RES: /* >0 res */
403 LOCK_RTC(s);
404 *(clock_res_t *) attr = NSEC_PER_HZ;
405 UNLOCK_RTC(s);
406 break;
407
408 case CLOCK_ALARM_CURRES: /* =0 no alarm */
409 case CLOCK_ALARM_MINRES:
410 case CLOCK_ALARM_MAXRES:
411 *(clock_res_t *) attr = 0;
412 break;
413
414 default:
415 return (KERN_INVALID_VALUE);
416 }
417
418 return (KERN_SUCCESS);
419 }
420
421 void
422 clock_get_calendar_microtime(
423 uint32_t *secs,
424 uint32_t *microsecs)
425 {
426 uint32_t epoch, microepoch;
427 uint64_t now, t64;
428 spl_t s = splclock();
429
430 simple_lock(&rtclock_lock);
431
432 if (rtclock_calend.adjdelta >= 0) {
433 uint32_t divisor;
434
435 now = mach_absolute_time();
436
437 epoch = rtclock_calend.epoch;
438 microepoch = rtclock_calend.microepoch;
439
440 simple_unlock(&rtclock_lock);
441
442 *secs = t64 = now / (divisor = rtclock_sec_divisor);
443 now -= (t64 * divisor);
444 *microsecs = (now * USEC_PER_SEC) / divisor;
445
446 if ((*microsecs += microepoch) >= USEC_PER_SEC) {
447 *microsecs -= USEC_PER_SEC;
448 epoch += 1;
449 }
450
451 *secs += epoch;
452 }
453 else {
454 uint32_t delta, t32;
455
456 delta = -rtclock_calend.adjdelta;
457
458 t64 = mach_absolute_time() - rtclock_calend.epoch1;
459
460 *secs = rtclock_calend.epoch;
461 *microsecs = rtclock_calend.microepoch;
462
463 simple_unlock(&rtclock_lock);
464
465 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
466
467 if (t32 > delta)
468 *microsecs += (t32 - delta);
469
470 if (*microsecs >= USEC_PER_SEC) {
471 *microsecs -= USEC_PER_SEC;
472 *secs += 1;
473 }
474 }
475
476 splx(s);
477 }
478
479 /* This is only called from the gettimeofday() syscall. As a side
480 * effect, it updates the commpage timestamp. Otherwise it is
481 * identical to clock_get_calendar_microtime(). Because most
482 * gettimeofday() calls are handled by the commpage in user mode,
483 * this routine should be infrequently used except when slowing down
484 * the clock.
485 */
486 void
487 clock_gettimeofday(
488 uint32_t *secs_p,
489 uint32_t *microsecs_p)
490 {
491 uint32_t epoch, microepoch;
492 uint32_t secs, microsecs;
493 uint64_t now, t64, secs_64, usec_64;
494 spl_t s = splclock();
495
496 simple_lock(&rtclock_lock);
497
498 if (rtclock_calend.adjdelta >= 0) {
499 now = mach_absolute_time();
500
501 epoch = rtclock_calend.epoch;
502 microepoch = rtclock_calend.microepoch;
503
504 secs = secs_64 = now / rtclock_sec_divisor;
505 t64 = now - (secs_64 * rtclock_sec_divisor);
506 microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
507
508 if ((microsecs += microepoch) >= USEC_PER_SEC) {
509 microsecs -= USEC_PER_SEC;
510 epoch += 1;
511 }
512
513 secs += epoch;
514
515 /* adjust "now" to be absolute time at _start_ of usecond */
516 now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
517
518 commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
519 }
520 else {
521 uint32_t delta, t32;
522
523 delta = -rtclock_calend.adjdelta;
524
525 now = mach_absolute_time() - rtclock_calend.epoch1;
526
527 secs = rtclock_calend.epoch;
528 microsecs = rtclock_calend.microepoch;
529
530 t32 = (now * USEC_PER_SEC) / rtclock_sec_divisor;
531
532 if (t32 > delta)
533 microsecs += (t32 - delta);
534
535 if (microsecs >= USEC_PER_SEC) {
536 microsecs -= USEC_PER_SEC;
537 secs += 1;
538 }
539 /* no need to disable timestamp, it is already off */
540 }
541
542 simple_unlock(&rtclock_lock);
543 splx(s);
544
545 *secs_p = secs;
546 *microsecs_p = microsecs;
547 }
548
549 void
550 clock_get_calendar_nanotime(
551 uint32_t *secs,
552 uint32_t *nanosecs)
553 {
554 uint32_t epoch, nanoepoch;
555 uint64_t now, t64;
556 spl_t s = splclock();
557
558 simple_lock(&rtclock_lock);
559
560 if (rtclock_calend.adjdelta >= 0) {
561 uint32_t divisor;
562
563 now = mach_absolute_time();
564
565 epoch = rtclock_calend.epoch;
566 nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
567
568 simple_unlock(&rtclock_lock);
569
570 *secs = t64 = now / (divisor = rtclock_sec_divisor);
571 now -= (t64 * divisor);
572 *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
573
574 if ((*nanosecs += nanoepoch) >= NSEC_PER_SEC) {
575 *nanosecs -= NSEC_PER_SEC;
576 epoch += 1;
577 }
578
579 *secs += epoch;
580 }
581 else {
582 uint32_t delta, t32;
583
584 delta = -rtclock_calend.adjdelta;
585
586 t64 = mach_absolute_time() - rtclock_calend.epoch1;
587
588 *secs = rtclock_calend.epoch;
589 *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
590
591 simple_unlock(&rtclock_lock);
592
593 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
594
595 if (t32 > delta)
596 *nanosecs += ((t32 - delta) * NSEC_PER_USEC);
597
598 if (*nanosecs >= NSEC_PER_SEC) {
599 *nanosecs -= NSEC_PER_SEC;
600 *secs += 1;
601 }
602 }
603
604 splx(s);
605 }
606
607 void
608 clock_set_calendar_microtime(
609 uint32_t secs,
610 uint32_t microsecs)
611 {
612 uint32_t sys, microsys;
613 uint32_t newsecs;
614 spl_t s;
615
616 newsecs = (microsecs < 500*USEC_PER_SEC)?
617 secs: secs + 1;
618
619 LOCK_RTC(s);
620 commpage_set_timestamp(0,0,0,0);
621
622 clock_get_system_microtime(&sys, &microsys);
623 if ((int32_t)(microsecs -= microsys) < 0) {
624 microsecs += USEC_PER_SEC;
625 secs -= 1;
626 }
627
628 secs -= sys;
629
630 rtclock_calend.epoch = secs;
631 rtclock_calend.microepoch = microsecs;
632 rtclock_calend.epoch1 = 0;
633 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
634 UNLOCK_RTC(s);
635
636 PESetGMTTimeOfDay(newsecs);
637
638 host_notify_calendar_change();
639 }
640
641 #define tickadj (40) /* "standard" skew, us / tick */
642 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
643
644 uint32_t
645 clock_set_calendar_adjtime(
646 int32_t *secs,
647 int32_t *microsecs)
648 {
649 int64_t total, ototal;
650 uint32_t interval = 0;
651 spl_t s;
652
653 total = (int64_t)*secs * USEC_PER_SEC + *microsecs;
654
655 LOCK_RTC(s);
656 commpage_set_timestamp(0,0,0,0);
657
658 ototal = rtclock_calend.adjtotal;
659
660 if (rtclock_calend.adjdelta < 0) {
661 uint64_t now, t64;
662 uint32_t delta, t32;
663 uint32_t sys, microsys;
664
665 delta = -rtclock_calend.adjdelta;
666
667 sys = rtclock_calend.epoch;
668 microsys = rtclock_calend.microepoch;
669
670 now = mach_absolute_time();
671
672 t64 = now - rtclock_calend.epoch1;
673 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
674
675 if (t32 > delta)
676 microsys += (t32 - delta);
677
678 if (microsys >= USEC_PER_SEC) {
679 microsys -= USEC_PER_SEC;
680 sys += 1;
681 }
682
683 rtclock_calend.epoch = sys;
684 rtclock_calend.microepoch = microsys;
685
686 sys = t64 = now / rtclock_sec_divisor;
687 now -= (t64 * rtclock_sec_divisor);
688 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
689
690 if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
691 rtclock_calend.microepoch += USEC_PER_SEC;
692 sys += 1;
693 }
694
695 rtclock_calend.epoch -= sys;
696 }
697
698 if (total != 0) {
699 int32_t delta = tickadj;
700
701 if (total > 0) {
702 if (total > bigadj)
703 delta *= 10;
704 if (delta > total)
705 delta = total;
706
707 rtclock_calend.epoch1 = 0;
708 }
709 else {
710 uint64_t now, t64;
711 uint32_t sys, microsys;
712
713 if (total < -bigadj)
714 delta *= 10;
715 delta = -delta;
716 if (delta < total)
717 delta = total;
718
719 rtclock_calend.epoch1 = now = mach_absolute_time();
720
721 sys = t64 = now / rtclock_sec_divisor;
722 now -= (t64 * rtclock_sec_divisor);
723 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
724
725 if ((rtclock_calend.microepoch += microsys) >= USEC_PER_SEC) {
726 rtclock_calend.microepoch -= USEC_PER_SEC;
727 sys += 1;
728 }
729
730 rtclock_calend.epoch += sys;
731 }
732
733 rtclock_calend.adjtotal = total;
734 rtclock_calend.adjdelta = delta;
735
736 interval = rtclock_tick_interval;
737 }
738 else {
739 rtclock_calend.epoch1 = 0;
740 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
741 }
742
743 UNLOCK_RTC(s);
744
745 if (ototal == 0)
746 *secs = *microsecs = 0;
747 else {
748 *secs = ototal / USEC_PER_SEC;
749 *microsecs = ototal % USEC_PER_SEC;
750 }
751
752 return (interval);
753 }
754
755 uint32_t
756 clock_adjust_calendar(void)
757 {
758 uint32_t micronew, interval = 0;
759 int32_t delta;
760 spl_t s;
761
762 LOCK_RTC(s);
763 commpage_set_timestamp(0,0,0,0);
764
765 delta = rtclock_calend.adjdelta;
766
767 if (delta > 0) {
768 micronew = rtclock_calend.microepoch + delta;
769 if (micronew >= USEC_PER_SEC) {
770 micronew -= USEC_PER_SEC;
771 rtclock_calend.epoch += 1;
772 }
773
774 rtclock_calend.microepoch = micronew;
775
776 rtclock_calend.adjtotal -= delta;
777 if (delta > rtclock_calend.adjtotal)
778 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
779 }
780 else
781 if (delta < 0) {
782 uint64_t now, t64;
783 uint32_t t32;
784
785 now = mach_absolute_time();
786
787 t64 = now - rtclock_calend.epoch1;
788
789 rtclock_calend.epoch1 = now;
790
791 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
792
793 micronew = rtclock_calend.microepoch + t32 + delta;
794 if (micronew >= USEC_PER_SEC) {
795 micronew -= USEC_PER_SEC;
796 rtclock_calend.epoch += 1;
797 }
798
799 rtclock_calend.microepoch = micronew;
800
801 rtclock_calend.adjtotal -= delta;
802 if (delta < rtclock_calend.adjtotal)
803 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
804
805 if (rtclock_calend.adjdelta == 0) {
806 uint32_t sys, microsys;
807
808 sys = t64 = now / rtclock_sec_divisor;
809 now -= (t64 * rtclock_sec_divisor);
810 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
811
812 if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
813 rtclock_calend.microepoch += USEC_PER_SEC;
814 sys += 1;
815 }
816
817 rtclock_calend.epoch -= sys;
818
819 rtclock_calend.epoch1 = 0;
820 }
821 }
822
823 if (rtclock_calend.adjdelta != 0)
824 interval = rtclock_tick_interval;
825
826 UNLOCK_RTC(s);
827
828 return (interval);
829 }
830
831 void
832 clock_initialize_calendar(void)
833 {
834 uint32_t sys, microsys;
835 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
836 spl_t s;
837
838 LOCK_RTC(s);
839 commpage_set_timestamp(0,0,0,0);
840
841 clock_get_system_microtime(&sys, &microsys);
842 if ((int32_t)(microsecs -= microsys) < 0) {
843 microsecs += USEC_PER_SEC;
844 secs -= 1;
845 }
846
847 secs -= sys;
848
849 rtclock_calend.epoch = secs;
850 rtclock_calend.microepoch = microsecs;
851 rtclock_calend.epoch1 = 0;
852 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
853 UNLOCK_RTC(s);
854
855 host_notify_calendar_change();
856 }
857
858 void
859 clock_timebase_info(
860 mach_timebase_info_t info)
861 {
862 spl_t s;
863
864 LOCK_RTC(s);
865 rtclock_timebase_initialized = TRUE;
866 *info = rtclock_timebase_const;
867 UNLOCK_RTC(s);
868 }
869
870 void
871 clock_set_timer_deadline(
872 uint64_t deadline)
873 {
874 uint64_t abstime;
875 int decr, mycpu;
876 struct rtclock_timer *mytimer;
877 spl_t s;
878
879 s = splclock();
880 mycpu = cpu_number();
881 mytimer = &rtclock_timer[mycpu];
882 mytimer->deadline = deadline;
883 mytimer->is_set = TRUE;
884 if (!mytimer->has_expired) {
885 abstime = mach_absolute_time();
886 if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
887 decr = deadline_to_decrementer(mytimer->deadline, abstime);
888 if ( rtclock_decrementer_min != 0 &&
889 rtclock_decrementer_min < (natural_t)decr )
890 decr = rtclock_decrementer_min;
891
892 mtdec(decr);
893
894 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
895 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
896 }
897 }
898 splx(s);
899 }
900
901 void
902 clock_set_timer_func(
903 clock_timer_func_t func)
904 {
905 spl_t s;
906
907 LOCK_RTC(s);
908 if (rtclock_timer_expire == NULL)
909 rtclock_timer_expire = func;
910 UNLOCK_RTC(s);
911 }
912
913 /*
914 * Reset the clock device. This causes the realtime clock
915 * device to reload its mode and count value (frequency).
916 */
917 void
918 rtclock_reset(void)
919 {
920 return;
921 }
922
923 /*
924 * Real-time clock device interrupt.
925 */
926 void
927 rtclock_intr(
928 int device,
929 struct savearea *ssp,
930 spl_t old_spl)
931 {
932 uint64_t abstime;
933 int decr1, decr2, mycpu = cpu_number();
934 struct rtclock_timer *mytimer = &rtclock_timer[mycpu];
935
936 /*
937 * We may receive interrupts too early, we must reject them.
938 */
939 if (rtclock_initialized == FALSE) {
940 mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
941 return;
942 }
943
944 decr1 = decr2 = DECREMENTER_MAX;
945
946 abstime = mach_absolute_time();
947 if ( rtclock_tick_deadline[mycpu] <= abstime ) {
948 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
949 &rtclock_tick_deadline[mycpu]);
950 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
951 }
952
953 abstime = mach_absolute_time();
954 if ( mytimer->is_set &&
955 mytimer->deadline <= abstime ) {
956 mytimer->has_expired = TRUE; mytimer->is_set = FALSE;
957 (*rtclock_timer_expire)(abstime);
958 mytimer->has_expired = FALSE;
959 }
960
961 abstime = mach_absolute_time();
962 decr1 = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
963
964 if (mytimer->is_set)
965 decr2 = deadline_to_decrementer(mytimer->deadline, abstime);
966
967 if (decr1 > decr2)
968 decr1 = decr2;
969
970 if ( rtclock_decrementer_min != 0 &&
971 rtclock_decrementer_min < (natural_t)decr1 )
972 decr1 = rtclock_decrementer_min;
973
974 mtdec(decr1);
975
976 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
977 | DBG_FUNC_NONE, decr1, 3, 0, 0, 0);
978 }
979
980 static void
981 rtclock_alarm_expire(
982 timer_call_param_t p0,
983 timer_call_param_t p1)
984 {
985 mach_timespec_t timestamp;
986
987 (void) sysclk_gettime(&timestamp);
988
989 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
990 }
991
992 static int
993 deadline_to_decrementer(
994 uint64_t deadline,
995 uint64_t now)
996 {
997 uint64_t delt;
998
999 if (deadline <= now)
1000 return DECREMENTER_MIN;
1001 else {
1002 delt = deadline - now;
1003 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
1004 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
1005 }
1006 }
1007
1008 static void
1009 timespec_to_absolutetime(
1010 mach_timespec_t *ts,
1011 uint64_t *result)
1012 {
1013 uint32_t divisor;
1014
1015 *result = ((uint64_t)ts->tv_sec * (divisor = rtclock_sec_divisor)) +
1016 ((uint64_t)ts->tv_nsec * divisor) / NSEC_PER_SEC;
1017 }
1018
1019 void
1020 clock_interval_to_deadline(
1021 uint32_t interval,
1022 uint32_t scale_factor,
1023 uint64_t *result)
1024 {
1025 uint64_t abstime;
1026
1027 clock_get_uptime(result);
1028
1029 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1030
1031 *result += abstime;
1032 }
1033
1034 void
1035 clock_interval_to_absolutetime_interval(
1036 uint32_t interval,
1037 uint32_t scale_factor,
1038 uint64_t *result)
1039 {
1040 uint64_t nanosecs = (uint64_t)interval * scale_factor;
1041 uint64_t t64;
1042 uint32_t divisor;
1043
1044 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1045 (divisor = rtclock_sec_divisor);
1046 nanosecs -= (t64 * NSEC_PER_SEC);
1047 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1048 }
1049
1050 void
1051 clock_absolutetime_interval_to_deadline(
1052 uint64_t abstime,
1053 uint64_t *result)
1054 {
1055 clock_get_uptime(result);
1056
1057 *result += abstime;
1058 }
1059
1060 void
1061 absolutetime_to_nanoseconds(
1062 uint64_t abstime,
1063 uint64_t *result)
1064 {
1065 uint64_t t64;
1066 uint32_t divisor;
1067
1068 *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC;
1069 abstime -= (t64 * divisor);
1070 *result += (abstime * NSEC_PER_SEC) / divisor;
1071 }
1072
1073 void
1074 nanoseconds_to_absolutetime(
1075 uint64_t nanosecs,
1076 uint64_t *result)
1077 {
1078 uint64_t t64;
1079 uint32_t divisor;
1080
1081 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1082 (divisor = rtclock_sec_divisor);
1083 nanosecs -= (t64 * NSEC_PER_SEC);
1084 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1085 }
1086
1087 /*
1088 * Spin-loop delay primitives.
1089 */
1090 void
1091 delay_for_interval(
1092 uint32_t interval,
1093 uint32_t scale_factor)
1094 {
1095 uint64_t now, end;
1096
1097 clock_interval_to_deadline(interval, scale_factor, &end);
1098
1099 do {
1100 now = mach_absolute_time();
1101 } while (now < end);
1102 }
1103
1104 void
1105 clock_delay_until(
1106 uint64_t deadline)
1107 {
1108 uint64_t now;
1109
1110 do {
1111 now = mach_absolute_time();
1112 } while (now < deadline);
1113 }
1114
1115 void
1116 delay(
1117 int usec)
1118 {
1119 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1120 }