]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/rtclock.c
daa5fb39aa984c5286e01ff8f1b453531d725b06
[apple/xnu.git] / osfmk / ppc / rtclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28 /*
29 * File: rtclock.c
30 * Purpose: Routines for handling the machine dependent
31 * real-time clock.
32 */
33
34 #include <mach/mach_types.h>
35
36 #include <kern/clock.h>
37 #include <kern/thread.h>
38 #include <kern/macro_help.h>
39 #include <kern/spl.h>
40
41 #include <kern/host_notify.h>
42
43 #include <machine/mach_param.h> /* HZ */
44 #include <machine/commpage.h>
45 #include <machine/machine_routines.h>
46 #include <ppc/proc_reg.h>
47
48 #include <pexpert/pexpert.h>
49
50 #include <sys/kdebug.h>
51
52 int sysclk_config(void);
53
54 int sysclk_init(void);
55
56 kern_return_t sysclk_gettime(
57 mach_timespec_t *cur_time);
58
59 kern_return_t sysclk_getattr(
60 clock_flavor_t flavor,
61 clock_attr_t attr,
62 mach_msg_type_number_t *count);
63
64 void sysclk_setalarm(
65 mach_timespec_t *deadline);
66
67 struct clock_ops sysclk_ops = {
68 sysclk_config, sysclk_init,
69 sysclk_gettime, 0,
70 sysclk_getattr, 0,
71 sysclk_setalarm,
72 };
73
74 int calend_config(void);
75
76 int calend_init(void);
77
78 kern_return_t calend_gettime(
79 mach_timespec_t *cur_time);
80
81 kern_return_t calend_getattr(
82 clock_flavor_t flavor,
83 clock_attr_t attr,
84 mach_msg_type_number_t *count);
85
86 struct clock_ops calend_ops = {
87 calend_config, calend_init,
88 calend_gettime, 0,
89 calend_getattr, 0,
90 0,
91 };
92
93 /* local data declarations */
94
95 static struct rtclock_calend {
96 uint32_t epoch;
97 uint32_t microepoch;
98
99 uint64_t epoch1;
100
101 int64_t adjtotal;
102 int32_t adjdelta;
103 } rtclock_calend;
104
105 static boolean_t rtclock_initialized;
106
107 static uint64_t rtclock_tick_deadline[NCPUS];
108
109 #define NSEC_PER_HZ (NSEC_PER_SEC / HZ)
110 static uint32_t rtclock_tick_interval;
111
112 static uint32_t rtclock_sec_divisor;
113
114 static mach_timebase_info_data_t rtclock_timebase_const;
115
116 static boolean_t rtclock_timebase_initialized;
117
118 static struct rtclock_timer {
119 uint64_t deadline;
120 uint32_t
121 /*boolean_t*/ is_set:1,
122 has_expired:1,
123 :0;
124 } rtclock_timer[NCPUS];
125
126 static clock_timer_func_t rtclock_timer_expire;
127
128 static timer_call_data_t rtclock_alarm_timer;
129
130 static void timespec_to_absolutetime(
131 mach_timespec_t *ts,
132 uint64_t *result);
133
134 static int deadline_to_decrementer(
135 uint64_t deadline,
136 uint64_t now);
137
138 static void rtclock_alarm_expire(
139 timer_call_param_t p0,
140 timer_call_param_t p1);
141
142 /* global data declarations */
143
144 #define DECREMENTER_MAX 0x7FFFFFFFUL
145 #define DECREMENTER_MIN 0xAUL
146
147 natural_t rtclock_decrementer_min;
148
149 decl_simple_lock_data(static,rtclock_lock)
150
151 /*
152 * Macros to lock/unlock real-time clock device.
153 */
154 #define LOCK_RTC(s) \
155 MACRO_BEGIN \
156 (s) = splclock(); \
157 simple_lock(&rtclock_lock); \
158 MACRO_END
159
160 #define UNLOCK_RTC(s) \
161 MACRO_BEGIN \
162 simple_unlock(&rtclock_lock); \
163 splx(s); \
164 MACRO_END
165
166 static void
167 timebase_callback(
168 struct timebase_freq_t *freq)
169 {
170 uint32_t numer, denom;
171 uint64_t abstime;
172 spl_t s;
173
174 if ( freq->timebase_den < 1 || freq->timebase_den > 4 ||
175 freq->timebase_num < freq->timebase_den )
176 panic("rtclock timebase_callback: invalid constant %d / %d",
177 freq->timebase_num, freq->timebase_den);
178
179 denom = freq->timebase_num;
180 numer = freq->timebase_den * NSEC_PER_SEC;
181
182 LOCK_RTC(s);
183 if (!rtclock_timebase_initialized) {
184 commpage_set_timestamp(0,0,0,0);
185
186 rtclock_timebase_const.numer = numer;
187 rtclock_timebase_const.denom = denom;
188 rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
189
190 nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
191 rtclock_tick_interval = abstime;
192
193 ml_init_lock_timeout();
194 }
195 else {
196 UNLOCK_RTC(s);
197 printf("rtclock timebase_callback: late old %d / %d new %d / %d",
198 rtclock_timebase_const.numer, rtclock_timebase_const.denom,
199 numer, denom);
200 return;
201 }
202 UNLOCK_RTC(s);
203
204 clock_timebase_init();
205 }
206
207 /*
208 * Configure the real-time clock device.
209 */
210 int
211 sysclk_config(void)
212 {
213 if (cpu_number() != master_cpu)
214 return(1);
215
216 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
217
218 simple_lock_init(&rtclock_lock, ETAP_MISC_RT_CLOCK);
219
220 PE_register_timebase_callback(timebase_callback);
221
222 return (1);
223 }
224
225 /*
226 * Initialize the system clock device.
227 */
228 int
229 sysclk_init(void)
230 {
231 uint64_t abstime;
232 int decr, mycpu = cpu_number();
233
234 if (mycpu != master_cpu) {
235 if (rtclock_initialized == FALSE) {
236 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
237 }
238 /* Set decrementer and hence our next tick due */
239 abstime = mach_absolute_time();
240 rtclock_tick_deadline[mycpu] = abstime;
241 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
242 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
243 mtdec(decr);
244
245 return(1);
246 }
247
248 /* Set decrementer and our next tick due */
249 abstime = mach_absolute_time();
250 rtclock_tick_deadline[mycpu] = abstime;
251 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
252 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
253 mtdec(decr);
254
255 rtclock_initialized = TRUE;
256
257 return (1);
258 }
259
260 kern_return_t
261 sysclk_gettime(
262 mach_timespec_t *time) /* OUT */
263 {
264 uint64_t now, t64;
265 uint32_t divisor;
266
267 now = mach_absolute_time();
268
269 time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor);
270 now -= (t64 * divisor);
271 time->tv_nsec = (now * NSEC_PER_SEC) / divisor;
272
273 return (KERN_SUCCESS);
274 }
275
276 void
277 clock_get_system_microtime(
278 uint32_t *secs,
279 uint32_t *microsecs)
280 {
281 uint64_t now, t64;
282 uint32_t divisor;
283
284 now = mach_absolute_time();
285
286 *secs = t64 = now / (divisor = rtclock_sec_divisor);
287 now -= (t64 * divisor);
288 *microsecs = (now * USEC_PER_SEC) / divisor;
289 }
290
291 void
292 clock_get_system_nanotime(
293 uint32_t *secs,
294 uint32_t *nanosecs)
295 {
296 uint64_t now, t64;
297 uint32_t divisor;
298
299 now = mach_absolute_time();
300
301 *secs = t64 = now / (divisor = rtclock_sec_divisor);
302 now -= (t64 * divisor);
303 *nanosecs = (now * NSEC_PER_SEC) / divisor;
304 }
305
306 /*
307 * Get clock device attributes.
308 */
309 kern_return_t
310 sysclk_getattr(
311 clock_flavor_t flavor,
312 clock_attr_t attr, /* OUT */
313 mach_msg_type_number_t *count) /* IN/OUT */
314 {
315 spl_t s;
316
317 if (*count != 1)
318 return (KERN_FAILURE);
319
320 switch (flavor) {
321
322 case CLOCK_GET_TIME_RES: /* >0 res */
323 case CLOCK_ALARM_CURRES: /* =0 no alarm */
324 case CLOCK_ALARM_MINRES:
325 case CLOCK_ALARM_MAXRES:
326 LOCK_RTC(s);
327 *(clock_res_t *) attr = NSEC_PER_HZ;
328 UNLOCK_RTC(s);
329 break;
330
331 default:
332 return (KERN_INVALID_VALUE);
333 }
334
335 return (KERN_SUCCESS);
336 }
337
338 /*
339 * Set deadline for the next alarm on the clock device. This call
340 * always resets the time to deliver an alarm for the clock.
341 */
342 void
343 sysclk_setalarm(
344 mach_timespec_t *deadline)
345 {
346 uint64_t abstime;
347
348 timespec_to_absolutetime(deadline, &abstime);
349 timer_call_enter(&rtclock_alarm_timer, abstime);
350 }
351
352 /*
353 * Configure the calendar clock.
354 */
355 int
356 calend_config(void)
357 {
358 return (1);
359 }
360
361 /*
362 * Initialize the calendar clock.
363 */
364 int
365 calend_init(void)
366 {
367 if (cpu_number() != master_cpu)
368 return(1);
369
370 return (1);
371 }
372
373 /*
374 * Get the current clock time.
375 */
376 kern_return_t
377 calend_gettime(
378 mach_timespec_t *time) /* OUT */
379 {
380 clock_get_calendar_nanotime(
381 &time->tv_sec, &time->tv_nsec);
382
383 return (KERN_SUCCESS);
384 }
385
386 /*
387 * Get clock device attributes.
388 */
389 kern_return_t
390 calend_getattr(
391 clock_flavor_t flavor,
392 clock_attr_t attr, /* OUT */
393 mach_msg_type_number_t *count) /* IN/OUT */
394 {
395 spl_t s;
396
397 if (*count != 1)
398 return (KERN_FAILURE);
399
400 switch (flavor) {
401
402 case CLOCK_GET_TIME_RES: /* >0 res */
403 LOCK_RTC(s);
404 *(clock_res_t *) attr = NSEC_PER_HZ;
405 UNLOCK_RTC(s);
406 break;
407
408 case CLOCK_ALARM_CURRES: /* =0 no alarm */
409 case CLOCK_ALARM_MINRES:
410 case CLOCK_ALARM_MAXRES:
411 *(clock_res_t *) attr = 0;
412 break;
413
414 default:
415 return (KERN_INVALID_VALUE);
416 }
417
418 return (KERN_SUCCESS);
419 }
420
421 void
422 clock_get_calendar_microtime(
423 uint32_t *secs,
424 uint32_t *microsecs)
425 {
426 uint32_t epoch, microepoch;
427 uint64_t now, t64;
428 spl_t s = splclock();
429
430 simple_lock(&rtclock_lock);
431
432 if (rtclock_calend.adjdelta >= 0) {
433 uint32_t divisor;
434
435 now = mach_absolute_time();
436
437 epoch = rtclock_calend.epoch;
438 microepoch = rtclock_calend.microepoch;
439
440 simple_unlock(&rtclock_lock);
441
442 *secs = t64 = now / (divisor = rtclock_sec_divisor);
443 now -= (t64 * divisor);
444 *microsecs = (now * USEC_PER_SEC) / divisor;
445
446 if ((*microsecs += microepoch) >= USEC_PER_SEC) {
447 *microsecs -= USEC_PER_SEC;
448 epoch += 1;
449 }
450
451 *secs += epoch;
452 }
453 else {
454 uint32_t delta, t32;
455
456 delta = -rtclock_calend.adjdelta;
457
458 now = mach_absolute_time();
459
460 *secs = rtclock_calend.epoch;
461 *microsecs = rtclock_calend.microepoch;
462
463 if (now > rtclock_calend.epoch1) {
464 t64 = now - rtclock_calend.epoch1;
465
466 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
467
468 if (t32 > delta)
469 *microsecs += (t32 - delta);
470
471 if (*microsecs >= USEC_PER_SEC) {
472 *microsecs -= USEC_PER_SEC;
473 *secs += 1;
474 }
475 }
476
477 simple_unlock(&rtclock_lock);
478 }
479
480 splx(s);
481 }
482
483 /* This is only called from the gettimeofday() syscall. As a side
484 * effect, it updates the commpage timestamp. Otherwise it is
485 * identical to clock_get_calendar_microtime(). Because most
486 * gettimeofday() calls are handled by the commpage in user mode,
487 * this routine should be infrequently used except when slowing down
488 * the clock.
489 */
490 void
491 clock_gettimeofday(
492 uint32_t *secs_p,
493 uint32_t *microsecs_p)
494 {
495 uint32_t epoch, microepoch;
496 uint32_t secs, microsecs;
497 uint64_t now, t64, secs_64, usec_64;
498 spl_t s = splclock();
499
500 simple_lock(&rtclock_lock);
501
502 if (rtclock_calend.adjdelta >= 0) {
503 now = mach_absolute_time();
504
505 epoch = rtclock_calend.epoch;
506 microepoch = rtclock_calend.microepoch;
507
508 secs = secs_64 = now / rtclock_sec_divisor;
509 t64 = now - (secs_64 * rtclock_sec_divisor);
510 microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
511
512 if ((microsecs += microepoch) >= USEC_PER_SEC) {
513 microsecs -= USEC_PER_SEC;
514 epoch += 1;
515 }
516
517 secs += epoch;
518
519 /* adjust "now" to be absolute time at _start_ of usecond */
520 now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
521
522 commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
523 }
524 else {
525 uint32_t delta, t32;
526
527 delta = -rtclock_calend.adjdelta;
528
529 now = mach_absolute_time();
530
531 secs = rtclock_calend.epoch;
532 microsecs = rtclock_calend.microepoch;
533
534 if (now > rtclock_calend.epoch1) {
535 t64 = now - rtclock_calend.epoch1;
536
537 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
538
539 if (t32 > delta)
540 microsecs += (t32 - delta);
541
542 if (microsecs >= USEC_PER_SEC) {
543 microsecs -= USEC_PER_SEC;
544 secs += 1;
545 }
546 }
547
548 /* no need to disable timestamp, it is already off */
549 }
550
551 simple_unlock(&rtclock_lock);
552 splx(s);
553
554 *secs_p = secs;
555 *microsecs_p = microsecs;
556 }
557
558 void
559 clock_get_calendar_nanotime(
560 uint32_t *secs,
561 uint32_t *nanosecs)
562 {
563 uint32_t epoch, nanoepoch;
564 uint64_t now, t64;
565 spl_t s = splclock();
566
567 simple_lock(&rtclock_lock);
568
569 if (rtclock_calend.adjdelta >= 0) {
570 uint32_t divisor;
571
572 now = mach_absolute_time();
573
574 epoch = rtclock_calend.epoch;
575 nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
576
577 simple_unlock(&rtclock_lock);
578
579 *secs = t64 = now / (divisor = rtclock_sec_divisor);
580 now -= (t64 * divisor);
581 *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
582
583 if ((*nanosecs += nanoepoch) >= NSEC_PER_SEC) {
584 *nanosecs -= NSEC_PER_SEC;
585 epoch += 1;
586 }
587
588 *secs += epoch;
589 }
590 else {
591 uint32_t delta, t32;
592
593 delta = -rtclock_calend.adjdelta;
594
595 now = mach_absolute_time();
596
597 *secs = rtclock_calend.epoch;
598 *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
599
600 if (now > rtclock_calend.epoch1) {
601 t64 = now - rtclock_calend.epoch1;
602
603 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
604
605 if (t32 > delta)
606 *nanosecs += ((t32 - delta) * NSEC_PER_USEC);
607
608 if (*nanosecs >= NSEC_PER_SEC) {
609 *nanosecs -= NSEC_PER_SEC;
610 *secs += 1;
611 }
612 }
613
614 simple_unlock(&rtclock_lock);
615 }
616
617 splx(s);
618 }
619
620 void
621 clock_set_calendar_microtime(
622 uint32_t secs,
623 uint32_t microsecs)
624 {
625 uint32_t sys, microsys;
626 uint32_t newsecs;
627 spl_t s;
628
629 newsecs = (microsecs < 500*USEC_PER_SEC)?
630 secs: secs + 1;
631
632 LOCK_RTC(s);
633 commpage_set_timestamp(0,0,0,0);
634
635 clock_get_system_microtime(&sys, &microsys);
636 if ((int32_t)(microsecs -= microsys) < 0) {
637 microsecs += USEC_PER_SEC;
638 secs -= 1;
639 }
640
641 secs -= sys;
642
643 rtclock_calend.epoch = secs;
644 rtclock_calend.microepoch = microsecs;
645 rtclock_calend.epoch1 = 0;
646 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
647 UNLOCK_RTC(s);
648
649 PESetGMTTimeOfDay(newsecs);
650
651 host_notify_calendar_change();
652 }
653
654 #define tickadj (40) /* "standard" skew, us / tick */
655 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
656
657 uint32_t
658 clock_set_calendar_adjtime(
659 int32_t *secs,
660 int32_t *microsecs)
661 {
662 int64_t total, ototal;
663 uint32_t interval = 0;
664 spl_t s;
665
666 total = (int64_t)*secs * USEC_PER_SEC + *microsecs;
667
668 LOCK_RTC(s);
669 commpage_set_timestamp(0,0,0,0);
670
671 ototal = rtclock_calend.adjtotal;
672
673 if (rtclock_calend.adjdelta < 0) {
674 uint64_t now, t64;
675 uint32_t delta, t32;
676 uint32_t sys, microsys;
677
678 delta = -rtclock_calend.adjdelta;
679
680 sys = rtclock_calend.epoch;
681 microsys = rtclock_calend.microepoch;
682
683 now = mach_absolute_time();
684
685 if (now > rtclock_calend.epoch1)
686 t64 = now - rtclock_calend.epoch1;
687 else
688 t64 = 0;
689
690 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
691
692 if (t32 > delta)
693 microsys += (t32 - delta);
694
695 if (microsys >= USEC_PER_SEC) {
696 microsys -= USEC_PER_SEC;
697 sys += 1;
698 }
699
700 rtclock_calend.epoch = sys;
701 rtclock_calend.microepoch = microsys;
702
703 sys = t64 = now / rtclock_sec_divisor;
704 now -= (t64 * rtclock_sec_divisor);
705 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
706
707 if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
708 rtclock_calend.microepoch += USEC_PER_SEC;
709 sys += 1;
710 }
711
712 rtclock_calend.epoch -= sys;
713 }
714
715 if (total != 0) {
716 int32_t delta = tickadj;
717
718 if (total > 0) {
719 if (total > bigadj)
720 delta *= 10;
721 if (delta > total)
722 delta = total;
723
724 rtclock_calend.epoch1 = 0;
725 }
726 else {
727 uint64_t now, t64;
728 uint32_t sys, microsys;
729
730 if (total < -bigadj)
731 delta *= 10;
732 delta = -delta;
733 if (delta < total)
734 delta = total;
735
736 rtclock_calend.epoch1 = now = mach_absolute_time();
737
738 sys = t64 = now / rtclock_sec_divisor;
739 now -= (t64 * rtclock_sec_divisor);
740 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
741
742 if ((rtclock_calend.microepoch += microsys) >= USEC_PER_SEC) {
743 rtclock_calend.microepoch -= USEC_PER_SEC;
744 sys += 1;
745 }
746
747 rtclock_calend.epoch += sys;
748 }
749
750 rtclock_calend.adjtotal = total;
751 rtclock_calend.adjdelta = delta;
752
753 interval = rtclock_tick_interval;
754 }
755 else {
756 rtclock_calend.epoch1 = 0;
757 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
758 }
759
760 UNLOCK_RTC(s);
761
762 if (ototal == 0)
763 *secs = *microsecs = 0;
764 else {
765 *secs = ototal / USEC_PER_SEC;
766 *microsecs = ototal % USEC_PER_SEC;
767 }
768
769 return (interval);
770 }
771
772 uint32_t
773 clock_adjust_calendar(void)
774 {
775 uint32_t micronew, interval = 0;
776 int32_t delta;
777 spl_t s;
778
779 LOCK_RTC(s);
780 commpage_set_timestamp(0,0,0,0);
781
782 delta = rtclock_calend.adjdelta;
783
784 if (delta > 0) {
785 micronew = rtclock_calend.microepoch + delta;
786 if (micronew >= USEC_PER_SEC) {
787 micronew -= USEC_PER_SEC;
788 rtclock_calend.epoch += 1;
789 }
790
791 rtclock_calend.microepoch = micronew;
792
793 rtclock_calend.adjtotal -= delta;
794 if (delta > rtclock_calend.adjtotal)
795 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
796 }
797 else
798 if (delta < 0) {
799 uint64_t now, t64;
800 uint32_t t32;
801
802 now = mach_absolute_time();
803
804 if (now > rtclock_calend.epoch1)
805 t64 = now - rtclock_calend.epoch1;
806 else
807 t64 = 0;
808
809 rtclock_calend.epoch1 = now;
810
811 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
812
813 micronew = rtclock_calend.microepoch + t32 + delta;
814 if (micronew >= USEC_PER_SEC) {
815 micronew -= USEC_PER_SEC;
816 rtclock_calend.epoch += 1;
817 }
818
819 rtclock_calend.microepoch = micronew;
820
821 rtclock_calend.adjtotal -= delta;
822 if (delta < rtclock_calend.adjtotal)
823 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
824
825 if (rtclock_calend.adjdelta == 0) {
826 uint32_t sys, microsys;
827
828 sys = t64 = now / rtclock_sec_divisor;
829 now -= (t64 * rtclock_sec_divisor);
830 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
831
832 if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
833 rtclock_calend.microepoch += USEC_PER_SEC;
834 sys += 1;
835 }
836
837 rtclock_calend.epoch -= sys;
838
839 rtclock_calend.epoch1 = 0;
840 }
841 }
842
843 if (rtclock_calend.adjdelta != 0)
844 interval = rtclock_tick_interval;
845
846 UNLOCK_RTC(s);
847
848 return (interval);
849 }
850
851 void
852 clock_initialize_calendar(void)
853 {
854 uint32_t sys, microsys;
855 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
856 spl_t s;
857
858 LOCK_RTC(s);
859 commpage_set_timestamp(0,0,0,0);
860
861 clock_get_system_microtime(&sys, &microsys);
862 if ((int32_t)(microsecs -= microsys) < 0) {
863 microsecs += USEC_PER_SEC;
864 secs -= 1;
865 }
866
867 secs -= sys;
868
869 rtclock_calend.epoch = secs;
870 rtclock_calend.microepoch = microsecs;
871 rtclock_calend.epoch1 = 0;
872 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
873 UNLOCK_RTC(s);
874
875 host_notify_calendar_change();
876 }
877
878 void
879 clock_timebase_info(
880 mach_timebase_info_t info)
881 {
882 spl_t s;
883
884 LOCK_RTC(s);
885 rtclock_timebase_initialized = TRUE;
886 *info = rtclock_timebase_const;
887 UNLOCK_RTC(s);
888 }
889
890 void
891 clock_set_timer_deadline(
892 uint64_t deadline)
893 {
894 uint64_t abstime;
895 int decr, mycpu;
896 struct rtclock_timer *mytimer;
897 spl_t s;
898
899 s = splclock();
900 mycpu = cpu_number();
901 mytimer = &rtclock_timer[mycpu];
902 mytimer->deadline = deadline;
903 mytimer->is_set = TRUE;
904 if (!mytimer->has_expired) {
905 abstime = mach_absolute_time();
906 if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
907 decr = deadline_to_decrementer(mytimer->deadline, abstime);
908 if ( rtclock_decrementer_min != 0 &&
909 rtclock_decrementer_min < (natural_t)decr )
910 decr = rtclock_decrementer_min;
911
912 mtdec(decr);
913
914 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
915 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
916 }
917 }
918 splx(s);
919 }
920
921 void
922 clock_set_timer_func(
923 clock_timer_func_t func)
924 {
925 spl_t s;
926
927 LOCK_RTC(s);
928 if (rtclock_timer_expire == NULL)
929 rtclock_timer_expire = func;
930 UNLOCK_RTC(s);
931 }
932
933 /*
934 * Reset the clock device. This causes the realtime clock
935 * device to reload its mode and count value (frequency).
936 */
937 void
938 rtclock_reset(void)
939 {
940 return;
941 }
942
943 /*
944 * Real-time clock device interrupt.
945 */
946 void
947 rtclock_intr(
948 int device,
949 struct savearea *ssp,
950 spl_t old_spl)
951 {
952 uint64_t abstime;
953 int decr1, decr2, mycpu = cpu_number();
954 struct rtclock_timer *mytimer = &rtclock_timer[mycpu];
955
956 /*
957 * We may receive interrupts too early, we must reject them.
958 */
959 if (rtclock_initialized == FALSE) {
960 mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
961 return;
962 }
963
964 decr1 = decr2 = DECREMENTER_MAX;
965
966 abstime = mach_absolute_time();
967 if ( rtclock_tick_deadline[mycpu] <= abstime ) {
968 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
969 &rtclock_tick_deadline[mycpu]);
970 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
971 }
972
973 abstime = mach_absolute_time();
974 if ( mytimer->is_set &&
975 mytimer->deadline <= abstime ) {
976 mytimer->has_expired = TRUE; mytimer->is_set = FALSE;
977 (*rtclock_timer_expire)(abstime);
978 mytimer->has_expired = FALSE;
979 }
980
981 abstime = mach_absolute_time();
982 decr1 = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
983
984 if (mytimer->is_set)
985 decr2 = deadline_to_decrementer(mytimer->deadline, abstime);
986
987 if (decr1 > decr2)
988 decr1 = decr2;
989
990 if ( rtclock_decrementer_min != 0 &&
991 rtclock_decrementer_min < (natural_t)decr1 )
992 decr1 = rtclock_decrementer_min;
993
994 mtdec(decr1);
995
996 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
997 | DBG_FUNC_NONE, decr1, 3, 0, 0, 0);
998 }
999
1000 static void
1001 rtclock_alarm_expire(
1002 timer_call_param_t p0,
1003 timer_call_param_t p1)
1004 {
1005 mach_timespec_t timestamp;
1006
1007 (void) sysclk_gettime(&timestamp);
1008
1009 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
1010 }
1011
1012 static int
1013 deadline_to_decrementer(
1014 uint64_t deadline,
1015 uint64_t now)
1016 {
1017 uint64_t delt;
1018
1019 if (deadline <= now)
1020 return DECREMENTER_MIN;
1021 else {
1022 delt = deadline - now;
1023 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
1024 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
1025 }
1026 }
1027
1028 static void
1029 timespec_to_absolutetime(
1030 mach_timespec_t *ts,
1031 uint64_t *result)
1032 {
1033 uint32_t divisor;
1034
1035 *result = ((uint64_t)ts->tv_sec * (divisor = rtclock_sec_divisor)) +
1036 ((uint64_t)ts->tv_nsec * divisor) / NSEC_PER_SEC;
1037 }
1038
1039 void
1040 clock_interval_to_deadline(
1041 uint32_t interval,
1042 uint32_t scale_factor,
1043 uint64_t *result)
1044 {
1045 uint64_t abstime;
1046
1047 clock_get_uptime(result);
1048
1049 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1050
1051 *result += abstime;
1052 }
1053
1054 void
1055 clock_interval_to_absolutetime_interval(
1056 uint32_t interval,
1057 uint32_t scale_factor,
1058 uint64_t *result)
1059 {
1060 uint64_t nanosecs = (uint64_t)interval * scale_factor;
1061 uint64_t t64;
1062 uint32_t divisor;
1063
1064 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1065 (divisor = rtclock_sec_divisor);
1066 nanosecs -= (t64 * NSEC_PER_SEC);
1067 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1068 }
1069
1070 void
1071 clock_absolutetime_interval_to_deadline(
1072 uint64_t abstime,
1073 uint64_t *result)
1074 {
1075 clock_get_uptime(result);
1076
1077 *result += abstime;
1078 }
1079
1080 void
1081 absolutetime_to_nanoseconds(
1082 uint64_t abstime,
1083 uint64_t *result)
1084 {
1085 uint64_t t64;
1086 uint32_t divisor;
1087
1088 *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC;
1089 abstime -= (t64 * divisor);
1090 *result += (abstime * NSEC_PER_SEC) / divisor;
1091 }
1092
1093 void
1094 nanoseconds_to_absolutetime(
1095 uint64_t nanosecs,
1096 uint64_t *result)
1097 {
1098 uint64_t t64;
1099 uint32_t divisor;
1100
1101 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1102 (divisor = rtclock_sec_divisor);
1103 nanosecs -= (t64 * NSEC_PER_SEC);
1104 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1105 }
1106
1107 /*
1108 * Spin-loop delay primitives.
1109 */
1110 void
1111 delay_for_interval(
1112 uint32_t interval,
1113 uint32_t scale_factor)
1114 {
1115 uint64_t now, end;
1116
1117 clock_interval_to_deadline(interval, scale_factor, &end);
1118
1119 do {
1120 now = mach_absolute_time();
1121 } while (now < end);
1122 }
1123
1124 void
1125 clock_delay_until(
1126 uint64_t deadline)
1127 {
1128 uint64_t now;
1129
1130 do {
1131 now = mach_absolute_time();
1132 } while (now < deadline);
1133 }
1134
1135 void
1136 delay(
1137 int usec)
1138 {
1139 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1140 }