]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/rtclock.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * @APPLE_FREE_COPYRIGHT@
30 */
31 /*
32 * File: rtclock.c
33 * Purpose: Routines for handling the machine dependent
34 * real-time clock.
35 */
36
37 #include <mach/mach_types.h>
38
39 #include <kern/clock.h>
40 #include <kern/thread.h>
41 #include <kern/macro_help.h>
42 #include <kern/spl.h>
43
44 #include <kern/host_notify.h>
45
46 #include <machine/mach_param.h> /* HZ */
47 #include <machine/commpage.h>
48 #include <machine/machine_routines.h>
49 #include <ppc/proc_reg.h>
50
51 #include <pexpert/pexpert.h>
52
53 #include <sys/kdebug.h>
54
55 int sysclk_config(void);
56
57 int sysclk_init(void);
58
59 kern_return_t sysclk_gettime(
60 mach_timespec_t *cur_time);
61
62 kern_return_t sysclk_getattr(
63 clock_flavor_t flavor,
64 clock_attr_t attr,
65 mach_msg_type_number_t *count);
66
67 void sysclk_setalarm(
68 mach_timespec_t *deadline);
69
70 struct clock_ops sysclk_ops = {
71 sysclk_config, sysclk_init,
72 sysclk_gettime, 0,
73 sysclk_getattr, 0,
74 sysclk_setalarm,
75 };
76
77 int calend_config(void);
78
79 int calend_init(void);
80
81 kern_return_t calend_gettime(
82 mach_timespec_t *cur_time);
83
84 kern_return_t calend_getattr(
85 clock_flavor_t flavor,
86 clock_attr_t attr,
87 mach_msg_type_number_t *count);
88
89 struct clock_ops calend_ops = {
90 calend_config, calend_init,
91 calend_gettime, 0,
92 calend_getattr, 0,
93 0,
94 };
95
96 /* local data declarations */
97
98 static struct rtclock_calend {
99 uint32_t epoch;
100 uint32_t microepoch;
101
102 uint64_t epoch1;
103
104 int64_t adjtotal;
105 int32_t adjdelta;
106 } rtclock_calend;
107
108 static boolean_t rtclock_initialized;
109
110 static uint64_t rtclock_tick_deadline[NCPUS];
111
112 #define NSEC_PER_HZ (NSEC_PER_SEC / HZ)
113 static uint32_t rtclock_tick_interval;
114
115 static uint32_t rtclock_sec_divisor;
116
117 static mach_timebase_info_data_t rtclock_timebase_const;
118
119 static boolean_t rtclock_timebase_initialized;
120
121 static struct rtclock_timer {
122 uint64_t deadline;
123 uint32_t
124 /*boolean_t*/ is_set:1,
125 has_expired:1,
126 :0;
127 } rtclock_timer[NCPUS];
128
129 static clock_timer_func_t rtclock_timer_expire;
130
131 static timer_call_data_t rtclock_alarm_timer;
132
133 static void timespec_to_absolutetime(
134 mach_timespec_t *ts,
135 uint64_t *result);
136
137 static int deadline_to_decrementer(
138 uint64_t deadline,
139 uint64_t now);
140
141 static void rtclock_alarm_expire(
142 timer_call_param_t p0,
143 timer_call_param_t p1);
144
145 /* global data declarations */
146
147 #define DECREMENTER_MAX 0x7FFFFFFFUL
148 #define DECREMENTER_MIN 0xAUL
149
150 natural_t rtclock_decrementer_min;
151
152 decl_simple_lock_data(static,rtclock_lock)
153
154 /*
155 * Macros to lock/unlock real-time clock device.
156 */
157 #define LOCK_RTC(s) \
158 MACRO_BEGIN \
159 (s) = splclock(); \
160 simple_lock(&rtclock_lock); \
161 MACRO_END
162
163 #define UNLOCK_RTC(s) \
164 MACRO_BEGIN \
165 simple_unlock(&rtclock_lock); \
166 splx(s); \
167 MACRO_END
168
169 static void
170 timebase_callback(
171 struct timebase_freq_t *freq)
172 {
173 uint32_t numer, denom;
174 uint64_t abstime;
175 spl_t s;
176
177 if ( freq->timebase_den < 1 || freq->timebase_den > 4 ||
178 freq->timebase_num < freq->timebase_den )
179 panic("rtclock timebase_callback: invalid constant %d / %d",
180 freq->timebase_num, freq->timebase_den);
181
182 denom = freq->timebase_num;
183 numer = freq->timebase_den * NSEC_PER_SEC;
184
185 LOCK_RTC(s);
186 if (!rtclock_timebase_initialized) {
187 commpage_set_timestamp(0,0,0,0);
188
189 rtclock_timebase_const.numer = numer;
190 rtclock_timebase_const.denom = denom;
191 rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
192
193 nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
194 rtclock_tick_interval = abstime;
195
196 ml_init_lock_timeout();
197 }
198 else {
199 UNLOCK_RTC(s);
200 printf("rtclock timebase_callback: late old %d / %d new %d / %d",
201 rtclock_timebase_const.numer, rtclock_timebase_const.denom,
202 numer, denom);
203 return;
204 }
205 UNLOCK_RTC(s);
206
207 clock_timebase_init();
208 }
209
210 /*
211 * Configure the real-time clock device.
212 */
213 int
214 sysclk_config(void)
215 {
216 if (cpu_number() != master_cpu)
217 return(1);
218
219 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
220
221 simple_lock_init(&rtclock_lock, ETAP_MISC_RT_CLOCK);
222
223 PE_register_timebase_callback(timebase_callback);
224
225 return (1);
226 }
227
228 /*
229 * Initialize the system clock device.
230 */
231 int
232 sysclk_init(void)
233 {
234 uint64_t abstime;
235 int decr, mycpu = cpu_number();
236
237 if (mycpu != master_cpu) {
238 if (rtclock_initialized == FALSE) {
239 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
240 }
241 /* Set decrementer and hence our next tick due */
242 abstime = mach_absolute_time();
243 rtclock_tick_deadline[mycpu] = abstime;
244 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
245 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
246 mtdec(decr);
247
248 return(1);
249 }
250
251 /* Set decrementer and our next tick due */
252 abstime = mach_absolute_time();
253 rtclock_tick_deadline[mycpu] = abstime;
254 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
255 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
256 mtdec(decr);
257
258 rtclock_initialized = TRUE;
259
260 return (1);
261 }
262
263 kern_return_t
264 sysclk_gettime(
265 mach_timespec_t *time) /* OUT */
266 {
267 uint64_t now, t64;
268 uint32_t divisor;
269
270 now = mach_absolute_time();
271
272 time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor);
273 now -= (t64 * divisor);
274 time->tv_nsec = (now * NSEC_PER_SEC) / divisor;
275
276 return (KERN_SUCCESS);
277 }
278
279 void
280 clock_get_system_microtime(
281 uint32_t *secs,
282 uint32_t *microsecs)
283 {
284 uint64_t now, t64;
285 uint32_t divisor;
286
287 now = mach_absolute_time();
288
289 *secs = t64 = now / (divisor = rtclock_sec_divisor);
290 now -= (t64 * divisor);
291 *microsecs = (now * USEC_PER_SEC) / divisor;
292 }
293
294 void
295 clock_get_system_nanotime(
296 uint32_t *secs,
297 uint32_t *nanosecs)
298 {
299 uint64_t now, t64;
300 uint32_t divisor;
301
302 now = mach_absolute_time();
303
304 *secs = t64 = now / (divisor = rtclock_sec_divisor);
305 now -= (t64 * divisor);
306 *nanosecs = (now * NSEC_PER_SEC) / divisor;
307 }
308
309 /*
310 * Get clock device attributes.
311 */
312 kern_return_t
313 sysclk_getattr(
314 clock_flavor_t flavor,
315 clock_attr_t attr, /* OUT */
316 mach_msg_type_number_t *count) /* IN/OUT */
317 {
318 spl_t s;
319
320 if (*count != 1)
321 return (KERN_FAILURE);
322
323 switch (flavor) {
324
325 case CLOCK_GET_TIME_RES: /* >0 res */
326 case CLOCK_ALARM_CURRES: /* =0 no alarm */
327 case CLOCK_ALARM_MINRES:
328 case CLOCK_ALARM_MAXRES:
329 LOCK_RTC(s);
330 *(clock_res_t *) attr = NSEC_PER_HZ;
331 UNLOCK_RTC(s);
332 break;
333
334 default:
335 return (KERN_INVALID_VALUE);
336 }
337
338 return (KERN_SUCCESS);
339 }
340
341 /*
342 * Set deadline for the next alarm on the clock device. This call
343 * always resets the time to deliver an alarm for the clock.
344 */
345 void
346 sysclk_setalarm(
347 mach_timespec_t *deadline)
348 {
349 uint64_t abstime;
350
351 timespec_to_absolutetime(deadline, &abstime);
352 timer_call_enter(&rtclock_alarm_timer, abstime);
353 }
354
355 /*
356 * Configure the calendar clock.
357 */
358 int
359 calend_config(void)
360 {
361 return (1);
362 }
363
364 /*
365 * Initialize the calendar clock.
366 */
367 int
368 calend_init(void)
369 {
370 if (cpu_number() != master_cpu)
371 return(1);
372
373 return (1);
374 }
375
376 /*
377 * Get the current clock time.
378 */
379 kern_return_t
380 calend_gettime(
381 mach_timespec_t *time) /* OUT */
382 {
383 clock_get_calendar_nanotime(
384 &time->tv_sec, &time->tv_nsec);
385
386 return (KERN_SUCCESS);
387 }
388
389 /*
390 * Get clock device attributes.
391 */
392 kern_return_t
393 calend_getattr(
394 clock_flavor_t flavor,
395 clock_attr_t attr, /* OUT */
396 mach_msg_type_number_t *count) /* IN/OUT */
397 {
398 spl_t s;
399
400 if (*count != 1)
401 return (KERN_FAILURE);
402
403 switch (flavor) {
404
405 case CLOCK_GET_TIME_RES: /* >0 res */
406 LOCK_RTC(s);
407 *(clock_res_t *) attr = NSEC_PER_HZ;
408 UNLOCK_RTC(s);
409 break;
410
411 case CLOCK_ALARM_CURRES: /* =0 no alarm */
412 case CLOCK_ALARM_MINRES:
413 case CLOCK_ALARM_MAXRES:
414 *(clock_res_t *) attr = 0;
415 break;
416
417 default:
418 return (KERN_INVALID_VALUE);
419 }
420
421 return (KERN_SUCCESS);
422 }
423
424 void
425 clock_get_calendar_microtime(
426 uint32_t *secs,
427 uint32_t *microsecs)
428 {
429 uint32_t epoch, microepoch;
430 uint64_t now, t64;
431 spl_t s = splclock();
432
433 simple_lock(&rtclock_lock);
434
435 if (rtclock_calend.adjdelta >= 0) {
436 uint32_t divisor;
437
438 now = mach_absolute_time();
439
440 epoch = rtclock_calend.epoch;
441 microepoch = rtclock_calend.microepoch;
442
443 simple_unlock(&rtclock_lock);
444
445 *secs = t64 = now / (divisor = rtclock_sec_divisor);
446 now -= (t64 * divisor);
447 *microsecs = (now * USEC_PER_SEC) / divisor;
448
449 if ((*microsecs += microepoch) >= USEC_PER_SEC) {
450 *microsecs -= USEC_PER_SEC;
451 epoch += 1;
452 }
453
454 *secs += epoch;
455 }
456 else {
457 uint32_t delta, t32;
458
459 delta = -rtclock_calend.adjdelta;
460
461 t64 = mach_absolute_time() - rtclock_calend.epoch1;
462
463 *secs = rtclock_calend.epoch;
464 *microsecs = rtclock_calend.microepoch;
465
466 simple_unlock(&rtclock_lock);
467
468 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
469
470 if (t32 > delta)
471 *microsecs += (t32 - delta);
472
473 if (*microsecs >= USEC_PER_SEC) {
474 *microsecs -= USEC_PER_SEC;
475 *secs += 1;
476 }
477 }
478
479 splx(s);
480 }
481
482 /* This is only called from the gettimeofday() syscall. As a side
483 * effect, it updates the commpage timestamp. Otherwise it is
484 * identical to clock_get_calendar_microtime(). Because most
485 * gettimeofday() calls are handled by the commpage in user mode,
486 * this routine should be infrequently used except when slowing down
487 * the clock.
488 */
489 void
490 clock_gettimeofday(
491 uint32_t *secs_p,
492 uint32_t *microsecs_p)
493 {
494 uint32_t epoch, microepoch;
495 uint32_t secs, microsecs;
496 uint64_t now, t64, secs_64, usec_64;
497 spl_t s = splclock();
498
499 simple_lock(&rtclock_lock);
500
501 if (rtclock_calend.adjdelta >= 0) {
502 now = mach_absolute_time();
503
504 epoch = rtclock_calend.epoch;
505 microepoch = rtclock_calend.microepoch;
506
507 secs = secs_64 = now / rtclock_sec_divisor;
508 t64 = now - (secs_64 * rtclock_sec_divisor);
509 microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
510
511 if ((microsecs += microepoch) >= USEC_PER_SEC) {
512 microsecs -= USEC_PER_SEC;
513 epoch += 1;
514 }
515
516 secs += epoch;
517
518 /* adjust "now" to be absolute time at _start_ of usecond */
519 now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
520
521 commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
522 }
523 else {
524 uint32_t delta, t32;
525
526 delta = -rtclock_calend.adjdelta;
527
528 now = mach_absolute_time() - rtclock_calend.epoch1;
529
530 secs = rtclock_calend.epoch;
531 microsecs = rtclock_calend.microepoch;
532
533 t32 = (now * USEC_PER_SEC) / rtclock_sec_divisor;
534
535 if (t32 > delta)
536 microsecs += (t32 - delta);
537
538 if (microsecs >= USEC_PER_SEC) {
539 microsecs -= USEC_PER_SEC;
540 secs += 1;
541 }
542 /* no need to disable timestamp, it is already off */
543 }
544
545 simple_unlock(&rtclock_lock);
546 splx(s);
547
548 *secs_p = secs;
549 *microsecs_p = microsecs;
550 }
551
552 void
553 clock_get_calendar_nanotime(
554 uint32_t *secs,
555 uint32_t *nanosecs)
556 {
557 uint32_t epoch, nanoepoch;
558 uint64_t now, t64;
559 spl_t s = splclock();
560
561 simple_lock(&rtclock_lock);
562
563 if (rtclock_calend.adjdelta >= 0) {
564 uint32_t divisor;
565
566 now = mach_absolute_time();
567
568 epoch = rtclock_calend.epoch;
569 nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
570
571 simple_unlock(&rtclock_lock);
572
573 *secs = t64 = now / (divisor = rtclock_sec_divisor);
574 now -= (t64 * divisor);
575 *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
576
577 if ((*nanosecs += nanoepoch) >= NSEC_PER_SEC) {
578 *nanosecs -= NSEC_PER_SEC;
579 epoch += 1;
580 }
581
582 *secs += epoch;
583 }
584 else {
585 uint32_t delta, t32;
586
587 delta = -rtclock_calend.adjdelta;
588
589 t64 = mach_absolute_time() - rtclock_calend.epoch1;
590
591 *secs = rtclock_calend.epoch;
592 *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
593
594 simple_unlock(&rtclock_lock);
595
596 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
597
598 if (t32 > delta)
599 *nanosecs += ((t32 - delta) * NSEC_PER_USEC);
600
601 if (*nanosecs >= NSEC_PER_SEC) {
602 *nanosecs -= NSEC_PER_SEC;
603 *secs += 1;
604 }
605 }
606
607 splx(s);
608 }
609
610 void
611 clock_set_calendar_microtime(
612 uint32_t secs,
613 uint32_t microsecs)
614 {
615 uint32_t sys, microsys;
616 uint32_t newsecs;
617 spl_t s;
618
619 newsecs = (microsecs < 500*USEC_PER_SEC)?
620 secs: secs + 1;
621
622 LOCK_RTC(s);
623 commpage_set_timestamp(0,0,0,0);
624
625 clock_get_system_microtime(&sys, &microsys);
626 if ((int32_t)(microsecs -= microsys) < 0) {
627 microsecs += USEC_PER_SEC;
628 secs -= 1;
629 }
630
631 secs -= sys;
632
633 rtclock_calend.epoch = secs;
634 rtclock_calend.microepoch = microsecs;
635 rtclock_calend.epoch1 = 0;
636 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
637 UNLOCK_RTC(s);
638
639 PESetGMTTimeOfDay(newsecs);
640
641 host_notify_calendar_change();
642 }
643
644 #define tickadj (40) /* "standard" skew, us / tick */
645 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
646
647 uint32_t
648 clock_set_calendar_adjtime(
649 int32_t *secs,
650 int32_t *microsecs)
651 {
652 int64_t total, ototal;
653 uint32_t interval = 0;
654 spl_t s;
655
656 total = (int64_t)*secs * USEC_PER_SEC + *microsecs;
657
658 LOCK_RTC(s);
659 commpage_set_timestamp(0,0,0,0);
660
661 ototal = rtclock_calend.adjtotal;
662
663 if (rtclock_calend.adjdelta < 0) {
664 uint64_t now, t64;
665 uint32_t delta, t32;
666 uint32_t sys, microsys;
667
668 delta = -rtclock_calend.adjdelta;
669
670 sys = rtclock_calend.epoch;
671 microsys = rtclock_calend.microepoch;
672
673 now = mach_absolute_time();
674
675 t64 = now - rtclock_calend.epoch1;
676 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
677
678 if (t32 > delta)
679 microsys += (t32 - delta);
680
681 if (microsys >= USEC_PER_SEC) {
682 microsys -= USEC_PER_SEC;
683 sys += 1;
684 }
685
686 rtclock_calend.epoch = sys;
687 rtclock_calend.microepoch = microsys;
688
689 sys = t64 = now / rtclock_sec_divisor;
690 now -= (t64 * rtclock_sec_divisor);
691 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
692
693 if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
694 rtclock_calend.microepoch += USEC_PER_SEC;
695 sys += 1;
696 }
697
698 rtclock_calend.epoch -= sys;
699 }
700
701 if (total != 0) {
702 int32_t delta = tickadj;
703
704 if (total > 0) {
705 if (total > bigadj)
706 delta *= 10;
707 if (delta > total)
708 delta = total;
709
710 rtclock_calend.epoch1 = 0;
711 }
712 else {
713 uint64_t now, t64;
714 uint32_t sys, microsys;
715
716 if (total < -bigadj)
717 delta *= 10;
718 delta = -delta;
719 if (delta < total)
720 delta = total;
721
722 rtclock_calend.epoch1 = now = mach_absolute_time();
723
724 sys = t64 = now / rtclock_sec_divisor;
725 now -= (t64 * rtclock_sec_divisor);
726 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
727
728 if ((rtclock_calend.microepoch += microsys) >= USEC_PER_SEC) {
729 rtclock_calend.microepoch -= USEC_PER_SEC;
730 sys += 1;
731 }
732
733 rtclock_calend.epoch += sys;
734 }
735
736 rtclock_calend.adjtotal = total;
737 rtclock_calend.adjdelta = delta;
738
739 interval = rtclock_tick_interval;
740 }
741 else {
742 rtclock_calend.epoch1 = 0;
743 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
744 }
745
746 UNLOCK_RTC(s);
747
748 if (ototal == 0)
749 *secs = *microsecs = 0;
750 else {
751 *secs = ototal / USEC_PER_SEC;
752 *microsecs = ototal % USEC_PER_SEC;
753 }
754
755 return (interval);
756 }
757
758 uint32_t
759 clock_adjust_calendar(void)
760 {
761 uint32_t micronew, interval = 0;
762 int32_t delta;
763 spl_t s;
764
765 LOCK_RTC(s);
766 commpage_set_timestamp(0,0,0,0);
767
768 delta = rtclock_calend.adjdelta;
769
770 if (delta > 0) {
771 micronew = rtclock_calend.microepoch + delta;
772 if (micronew >= USEC_PER_SEC) {
773 micronew -= USEC_PER_SEC;
774 rtclock_calend.epoch += 1;
775 }
776
777 rtclock_calend.microepoch = micronew;
778
779 rtclock_calend.adjtotal -= delta;
780 if (delta > rtclock_calend.adjtotal)
781 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
782 }
783 else
784 if (delta < 0) {
785 uint64_t now, t64;
786 uint32_t t32;
787
788 now = mach_absolute_time();
789
790 t64 = now - rtclock_calend.epoch1;
791
792 rtclock_calend.epoch1 = now;
793
794 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
795
796 micronew = rtclock_calend.microepoch + t32 + delta;
797 if (micronew >= USEC_PER_SEC) {
798 micronew -= USEC_PER_SEC;
799 rtclock_calend.epoch += 1;
800 }
801
802 rtclock_calend.microepoch = micronew;
803
804 rtclock_calend.adjtotal -= delta;
805 if (delta < rtclock_calend.adjtotal)
806 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
807
808 if (rtclock_calend.adjdelta == 0) {
809 uint32_t sys, microsys;
810
811 sys = t64 = now / rtclock_sec_divisor;
812 now -= (t64 * rtclock_sec_divisor);
813 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
814
815 if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
816 rtclock_calend.microepoch += USEC_PER_SEC;
817 sys += 1;
818 }
819
820 rtclock_calend.epoch -= sys;
821
822 rtclock_calend.epoch1 = 0;
823 }
824 }
825
826 if (rtclock_calend.adjdelta != 0)
827 interval = rtclock_tick_interval;
828
829 UNLOCK_RTC(s);
830
831 return (interval);
832 }
833
834 void
835 clock_initialize_calendar(void)
836 {
837 uint32_t sys, microsys;
838 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
839 spl_t s;
840
841 LOCK_RTC(s);
842 commpage_set_timestamp(0,0,0,0);
843
844 clock_get_system_microtime(&sys, &microsys);
845 if ((int32_t)(microsecs -= microsys) < 0) {
846 microsecs += USEC_PER_SEC;
847 secs -= 1;
848 }
849
850 secs -= sys;
851
852 rtclock_calend.epoch = secs;
853 rtclock_calend.microepoch = microsecs;
854 rtclock_calend.epoch1 = 0;
855 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
856 UNLOCK_RTC(s);
857
858 host_notify_calendar_change();
859 }
860
861 void
862 clock_timebase_info(
863 mach_timebase_info_t info)
864 {
865 spl_t s;
866
867 LOCK_RTC(s);
868 rtclock_timebase_initialized = TRUE;
869 *info = rtclock_timebase_const;
870 UNLOCK_RTC(s);
871 }
872
873 void
874 clock_set_timer_deadline(
875 uint64_t deadline)
876 {
877 uint64_t abstime;
878 int decr, mycpu;
879 struct rtclock_timer *mytimer;
880 spl_t s;
881
882 s = splclock();
883 mycpu = cpu_number();
884 mytimer = &rtclock_timer[mycpu];
885 mytimer->deadline = deadline;
886 mytimer->is_set = TRUE;
887 if (!mytimer->has_expired) {
888 abstime = mach_absolute_time();
889 if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
890 decr = deadline_to_decrementer(mytimer->deadline, abstime);
891 if ( rtclock_decrementer_min != 0 &&
892 rtclock_decrementer_min < (natural_t)decr )
893 decr = rtclock_decrementer_min;
894
895 mtdec(decr);
896
897 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
898 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
899 }
900 }
901 splx(s);
902 }
903
904 void
905 clock_set_timer_func(
906 clock_timer_func_t func)
907 {
908 spl_t s;
909
910 LOCK_RTC(s);
911 if (rtclock_timer_expire == NULL)
912 rtclock_timer_expire = func;
913 UNLOCK_RTC(s);
914 }
915
916 /*
917 * Reset the clock device. This causes the realtime clock
918 * device to reload its mode and count value (frequency).
919 */
920 void
921 rtclock_reset(void)
922 {
923 return;
924 }
925
926 /*
927 * Real-time clock device interrupt.
928 */
929 void
930 rtclock_intr(
931 int device,
932 struct savearea *ssp,
933 spl_t old_spl)
934 {
935 uint64_t abstime;
936 int decr1, decr2, mycpu = cpu_number();
937 struct rtclock_timer *mytimer = &rtclock_timer[mycpu];
938
939 /*
940 * We may receive interrupts too early, we must reject them.
941 */
942 if (rtclock_initialized == FALSE) {
943 mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
944 return;
945 }
946
947 decr1 = decr2 = DECREMENTER_MAX;
948
949 abstime = mach_absolute_time();
950 if ( rtclock_tick_deadline[mycpu] <= abstime ) {
951 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
952 &rtclock_tick_deadline[mycpu]);
953 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
954 }
955
956 abstime = mach_absolute_time();
957 if ( mytimer->is_set &&
958 mytimer->deadline <= abstime ) {
959 mytimer->has_expired = TRUE; mytimer->is_set = FALSE;
960 (*rtclock_timer_expire)(abstime);
961 mytimer->has_expired = FALSE;
962 }
963
964 abstime = mach_absolute_time();
965 decr1 = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
966
967 if (mytimer->is_set)
968 decr2 = deadline_to_decrementer(mytimer->deadline, abstime);
969
970 if (decr1 > decr2)
971 decr1 = decr2;
972
973 if ( rtclock_decrementer_min != 0 &&
974 rtclock_decrementer_min < (natural_t)decr1 )
975 decr1 = rtclock_decrementer_min;
976
977 mtdec(decr1);
978
979 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
980 | DBG_FUNC_NONE, decr1, 3, 0, 0, 0);
981 }
982
983 static void
984 rtclock_alarm_expire(
985 timer_call_param_t p0,
986 timer_call_param_t p1)
987 {
988 mach_timespec_t timestamp;
989
990 (void) sysclk_gettime(&timestamp);
991
992 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
993 }
994
995 static int
996 deadline_to_decrementer(
997 uint64_t deadline,
998 uint64_t now)
999 {
1000 uint64_t delt;
1001
1002 if (deadline <= now)
1003 return DECREMENTER_MIN;
1004 else {
1005 delt = deadline - now;
1006 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
1007 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
1008 }
1009 }
1010
1011 static void
1012 timespec_to_absolutetime(
1013 mach_timespec_t *ts,
1014 uint64_t *result)
1015 {
1016 uint32_t divisor;
1017
1018 *result = ((uint64_t)ts->tv_sec * (divisor = rtclock_sec_divisor)) +
1019 ((uint64_t)ts->tv_nsec * divisor) / NSEC_PER_SEC;
1020 }
1021
1022 void
1023 clock_interval_to_deadline(
1024 uint32_t interval,
1025 uint32_t scale_factor,
1026 uint64_t *result)
1027 {
1028 uint64_t abstime;
1029
1030 clock_get_uptime(result);
1031
1032 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1033
1034 *result += abstime;
1035 }
1036
1037 void
1038 clock_interval_to_absolutetime_interval(
1039 uint32_t interval,
1040 uint32_t scale_factor,
1041 uint64_t *result)
1042 {
1043 uint64_t nanosecs = (uint64_t)interval * scale_factor;
1044 uint64_t t64;
1045 uint32_t divisor;
1046
1047 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1048 (divisor = rtclock_sec_divisor);
1049 nanosecs -= (t64 * NSEC_PER_SEC);
1050 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1051 }
1052
1053 void
1054 clock_absolutetime_interval_to_deadline(
1055 uint64_t abstime,
1056 uint64_t *result)
1057 {
1058 clock_get_uptime(result);
1059
1060 *result += abstime;
1061 }
1062
1063 void
1064 absolutetime_to_nanoseconds(
1065 uint64_t abstime,
1066 uint64_t *result)
1067 {
1068 uint64_t t64;
1069 uint32_t divisor;
1070
1071 *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC;
1072 abstime -= (t64 * divisor);
1073 *result += (abstime * NSEC_PER_SEC) / divisor;
1074 }
1075
1076 void
1077 nanoseconds_to_absolutetime(
1078 uint64_t nanosecs,
1079 uint64_t *result)
1080 {
1081 uint64_t t64;
1082 uint32_t divisor;
1083
1084 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1085 (divisor = rtclock_sec_divisor);
1086 nanosecs -= (t64 * NSEC_PER_SEC);
1087 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1088 }
1089
1090 /*
1091 * Spin-loop delay primitives.
1092 */
1093 void
1094 delay_for_interval(
1095 uint32_t interval,
1096 uint32_t scale_factor)
1097 {
1098 uint64_t now, end;
1099
1100 clock_interval_to_deadline(interval, scale_factor, &end);
1101
1102 do {
1103 now = mach_absolute_time();
1104 } while (now < end);
1105 }
1106
1107 void
1108 clock_delay_until(
1109 uint64_t deadline)
1110 {
1111 uint64_t now;
1112
1113 do {
1114 now = mach_absolute_time();
1115 } while (now < deadline);
1116 }
1117
1118 void
1119 delay(
1120 int usec)
1121 {
1122 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1123 }