]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/rtclock.c
bd97881bd024dff5bebe97bfaa05175988099b41
[apple/xnu.git] / osfmk / ppc / rtclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28 /*
29 * File: rtclock.c
30 * Purpose: Routines for handling the machine dependent
31 * real-time clock.
32 */
33
34 #include <mach/mach_types.h>
35
36 #include <kern/clock.h>
37 #include <kern/thread.h>
38 #include <kern/macro_help.h>
39 #include <kern/spl.h>
40
41 #include <kern/host_notify.h>
42
43 #include <machine/commpage.h>
44 #include <machine/machine_routines.h>
45 #include <ppc/exception.h>
46 #include <ppc/proc_reg.h>
47
48 #include <IOKit/IOPlatformExpert.h>
49
50 #include <sys/kdebug.h>
51
52 int sysclk_config(void);
53
54 int sysclk_init(void);
55
56 void treqs(uint32_t dec);
57
58 kern_return_t sysclk_gettime(
59 mach_timespec_t *cur_time);
60
61 kern_return_t sysclk_getattr(
62 clock_flavor_t flavor,
63 clock_attr_t attr,
64 mach_msg_type_number_t *count);
65
66 void sysclk_setalarm(
67 mach_timespec_t *deadline);
68
69 struct clock_ops sysclk_ops = {
70 sysclk_config, sysclk_init,
71 sysclk_gettime, 0,
72 sysclk_getattr, 0,
73 sysclk_setalarm,
74 };
75
76 int calend_config(void);
77
78 kern_return_t calend_gettime(
79 mach_timespec_t *cur_time);
80
81 kern_return_t calend_getattr(
82 clock_flavor_t flavor,
83 clock_attr_t attr,
84 mach_msg_type_number_t *count);
85
86 struct clock_ops calend_ops = {
87 calend_config, 0,
88 calend_gettime, 0,
89 calend_getattr, 0,
90 0,
91 };
92
93 /* local data declarations */
94
95 static struct rtclock_calend {
96 uint32_t epoch;
97 uint32_t microepoch;
98
99 uint64_t epoch1;
100
101 int64_t adjtotal;
102 int32_t adjdelta;
103 } rtclock_calend;
104
105 static uint32_t rtclock_boottime;
106
107 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
108 MACRO_BEGIN \
109 if (((rfrac) += (frac)) >= (unit)) { \
110 (rfrac) -= (unit); \
111 (rsecs) += 1; \
112 } \
113 (rsecs) += (secs); \
114 MACRO_END
115
116 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
117 MACRO_BEGIN \
118 if ((int32_t)((rfrac) -= (frac)) < 0) { \
119 (rfrac) += (unit); \
120 (rsecs) -= 1; \
121 } \
122 (rsecs) -= (secs); \
123 MACRO_END
124
125 #define NSEC_PER_HZ (NSEC_PER_SEC / 100)
126 static uint32_t rtclock_tick_interval;
127
128 static uint32_t rtclock_sec_divisor;
129
130 static mach_timebase_info_data_t rtclock_timebase_const;
131
132 static boolean_t rtclock_timebase_initialized;
133
134 static clock_timer_func_t rtclock_timer_expire;
135
136 static timer_call_data_t rtclock_alarm_timer;
137
138 static void nanotime_to_absolutetime(
139 uint32_t secs,
140 uint32_t nanosecs,
141 uint64_t *result);
142
143 static int deadline_to_decrementer(
144 uint64_t deadline,
145 uint64_t now);
146
147 static void rtclock_alarm_expire(
148 timer_call_param_t p0,
149 timer_call_param_t p1);
150
151 /* global data declarations */
152
153 #define DECREMENTER_MAX 0x7FFFFFFFUL
154 #define DECREMENTER_MIN 0xAUL
155
156 natural_t rtclock_decrementer_min;
157
158 decl_simple_lock_data(static,rtclock_lock)
159
160 /*
161 * Macros to lock/unlock real-time clock device.
162 */
163 #define LOCK_RTC(s) \
164 MACRO_BEGIN \
165 (s) = splclock(); \
166 simple_lock(&rtclock_lock); \
167 MACRO_END
168
169 #define UNLOCK_RTC(s) \
170 MACRO_BEGIN \
171 simple_unlock(&rtclock_lock); \
172 splx(s); \
173 MACRO_END
174
175 static void
176 timebase_callback(
177 struct timebase_freq_t *freq)
178 {
179 uint32_t numer, denom;
180 uint64_t abstime;
181 spl_t s;
182
183 if ( freq->timebase_den < 1 || freq->timebase_den > 4 ||
184 freq->timebase_num < freq->timebase_den )
185 panic("rtclock timebase_callback: invalid constant %d / %d",
186 freq->timebase_num, freq->timebase_den);
187
188 denom = freq->timebase_num;
189 numer = freq->timebase_den * NSEC_PER_SEC;
190
191 LOCK_RTC(s);
192 if (!rtclock_timebase_initialized) {
193 commpage_set_timestamp(0,0,0,0);
194
195 rtclock_timebase_const.numer = numer;
196 rtclock_timebase_const.denom = denom;
197 rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
198
199 nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
200 rtclock_tick_interval = abstime;
201
202 ml_init_lock_timeout();
203 }
204 else {
205 UNLOCK_RTC(s);
206 printf("rtclock timebase_callback: late old %d / %d new %d / %d\n",
207 rtclock_timebase_const.numer, rtclock_timebase_const.denom,
208 numer, denom);
209 return;
210 }
211 UNLOCK_RTC(s);
212
213 clock_timebase_init();
214 }
215
216 /*
217 * Configure the real-time clock device.
218 */
219 int
220 sysclk_config(void)
221 {
222 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
223
224 simple_lock_init(&rtclock_lock, 0);
225
226 PE_register_timebase_callback(timebase_callback);
227
228 return (1);
229 }
230
231 /*
232 * Initialize the system clock device.
233 */
234 int
235 sysclk_init(void)
236 {
237 uint64_t abstime, nexttick;
238 int decr1, decr2;
239 struct rtclock_timer *mytimer;
240 struct per_proc_info *pp;
241
242 decr1 = decr2 = DECREMENTER_MAX;
243
244 pp = getPerProc();
245 mytimer = &pp->rtclock_timer;
246
247 abstime = mach_absolute_time();
248 nexttick = abstime + rtclock_tick_interval;
249 pp->rtclock_tick_deadline = nexttick;
250 decr1 = deadline_to_decrementer(nexttick, abstime);
251
252 if (mytimer->is_set)
253 decr2 = deadline_to_decrementer(mytimer->deadline, abstime);
254
255 if (decr1 > decr2)
256 decr1 = decr2;
257
258 treqs(decr1);
259
260 return (1);
261 }
262
263 kern_return_t
264 sysclk_gettime(
265 mach_timespec_t *time) /* OUT */
266 {
267 uint64_t now, t64;
268 uint32_t divisor;
269
270 now = mach_absolute_time();
271
272 time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor);
273 now -= (t64 * divisor);
274 time->tv_nsec = (now * NSEC_PER_SEC) / divisor;
275
276 return (KERN_SUCCESS);
277 }
278
279 void
280 clock_get_system_microtime(
281 uint32_t *secs,
282 uint32_t *microsecs)
283 {
284 uint64_t now, t64;
285 uint32_t divisor;
286
287 now = mach_absolute_time();
288
289 *secs = t64 = now / (divisor = rtclock_sec_divisor);
290 now -= (t64 * divisor);
291 *microsecs = (now * USEC_PER_SEC) / divisor;
292 }
293
294 void
295 clock_get_system_nanotime(
296 uint32_t *secs,
297 uint32_t *nanosecs)
298 {
299 uint64_t now, t64;
300 uint32_t divisor;
301
302 now = mach_absolute_time();
303
304 *secs = t64 = now / (divisor = rtclock_sec_divisor);
305 now -= (t64 * divisor);
306 *nanosecs = (now * NSEC_PER_SEC) / divisor;
307 }
308
309 /*
310 * Get clock device attributes.
311 */
312 kern_return_t
313 sysclk_getattr(
314 clock_flavor_t flavor,
315 clock_attr_t attr, /* OUT */
316 mach_msg_type_number_t *count) /* IN/OUT */
317 {
318 spl_t s;
319
320 if (*count != 1)
321 return (KERN_FAILURE);
322
323 switch (flavor) {
324
325 case CLOCK_GET_TIME_RES: /* >0 res */
326 case CLOCK_ALARM_CURRES: /* =0 no alarm */
327 case CLOCK_ALARM_MINRES:
328 case CLOCK_ALARM_MAXRES:
329 LOCK_RTC(s);
330 *(clock_res_t *) attr = NSEC_PER_HZ;
331 UNLOCK_RTC(s);
332 break;
333
334 default:
335 return (KERN_INVALID_VALUE);
336 }
337
338 return (KERN_SUCCESS);
339 }
340
341 /*
342 * Set deadline for the next alarm on the clock device. This call
343 * always resets the time to deliver an alarm for the clock.
344 */
345 void
346 sysclk_setalarm(
347 mach_timespec_t *deadline)
348 {
349 uint64_t abstime;
350
351 nanotime_to_absolutetime(deadline->tv_sec, deadline->tv_nsec, &abstime);
352 timer_call_enter(&rtclock_alarm_timer, abstime);
353 }
354
355 /*
356 * Configure the calendar clock.
357 */
358 int
359 calend_config(void)
360 {
361 return (1);
362 }
363
364 /*
365 * Get the current clock time.
366 */
367 kern_return_t
368 calend_gettime(
369 mach_timespec_t *time) /* OUT */
370 {
371 clock_get_calendar_nanotime(
372 &time->tv_sec, &time->tv_nsec);
373
374 return (KERN_SUCCESS);
375 }
376
377 /*
378 * Get clock device attributes.
379 */
380 kern_return_t
381 calend_getattr(
382 clock_flavor_t flavor,
383 clock_attr_t attr, /* OUT */
384 mach_msg_type_number_t *count) /* IN/OUT */
385 {
386 spl_t s;
387
388 if (*count != 1)
389 return (KERN_FAILURE);
390
391 switch (flavor) {
392
393 case CLOCK_GET_TIME_RES: /* >0 res */
394 LOCK_RTC(s);
395 *(clock_res_t *) attr = NSEC_PER_HZ;
396 UNLOCK_RTC(s);
397 break;
398
399 case CLOCK_ALARM_CURRES: /* =0 no alarm */
400 case CLOCK_ALARM_MINRES:
401 case CLOCK_ALARM_MAXRES:
402 *(clock_res_t *) attr = 0;
403 break;
404
405 default:
406 return (KERN_INVALID_VALUE);
407 }
408
409 return (KERN_SUCCESS);
410 }
411
412 void
413 clock_get_calendar_microtime(
414 uint32_t *secs,
415 uint32_t *microsecs)
416 {
417 uint32_t epoch, microepoch;
418 uint64_t now, t64;
419 spl_t s = splclock();
420
421 simple_lock(&rtclock_lock);
422
423 if (rtclock_calend.adjdelta >= 0) {
424 uint32_t divisor;
425
426 now = mach_absolute_time();
427
428 epoch = rtclock_calend.epoch;
429 microepoch = rtclock_calend.microepoch;
430
431 simple_unlock(&rtclock_lock);
432
433 *secs = t64 = now / (divisor = rtclock_sec_divisor);
434 now -= (t64 * divisor);
435 *microsecs = (now * USEC_PER_SEC) / divisor;
436
437 TIME_ADD(*secs, epoch, *microsecs, microepoch, USEC_PER_SEC);
438 }
439 else {
440 uint32_t delta, t32;
441
442 delta = -rtclock_calend.adjdelta;
443
444 now = mach_absolute_time();
445
446 *secs = rtclock_calend.epoch;
447 *microsecs = rtclock_calend.microepoch;
448
449 if (now > rtclock_calend.epoch1) {
450 t64 = now - rtclock_calend.epoch1;
451
452 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
453
454 if (t32 > delta)
455 TIME_ADD(*secs, 0, *microsecs, (t32 - delta), USEC_PER_SEC);
456 }
457
458 simple_unlock(&rtclock_lock);
459 }
460
461 splx(s);
462 }
463
464 /* This is only called from the gettimeofday() syscall. As a side
465 * effect, it updates the commpage timestamp. Otherwise it is
466 * identical to clock_get_calendar_microtime(). Because most
467 * gettimeofday() calls are handled by the commpage in user mode,
468 * this routine should be infrequently used except when slowing down
469 * the clock.
470 */
471 void
472 clock_gettimeofday(
473 uint32_t *secs_p,
474 uint32_t *microsecs_p)
475 {
476 uint32_t epoch, microepoch;
477 uint32_t secs, microsecs;
478 uint64_t now, t64, secs_64, usec_64;
479 spl_t s = splclock();
480
481 simple_lock(&rtclock_lock);
482
483 if (rtclock_calend.adjdelta >= 0) {
484 now = mach_absolute_time();
485
486 epoch = rtclock_calend.epoch;
487 microepoch = rtclock_calend.microepoch;
488
489 secs = secs_64 = now / rtclock_sec_divisor;
490 t64 = now - (secs_64 * rtclock_sec_divisor);
491 microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
492
493 TIME_ADD(secs, epoch, microsecs, microepoch, USEC_PER_SEC);
494
495 /* adjust "now" to be absolute time at _start_ of usecond */
496 now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
497
498 commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
499 }
500 else {
501 uint32_t delta, t32;
502
503 delta = -rtclock_calend.adjdelta;
504
505 now = mach_absolute_time();
506
507 secs = rtclock_calend.epoch;
508 microsecs = rtclock_calend.microepoch;
509
510 if (now > rtclock_calend.epoch1) {
511 t64 = now - rtclock_calend.epoch1;
512
513 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
514
515 if (t32 > delta)
516 TIME_ADD(secs, 0, microsecs, (t32 - delta), USEC_PER_SEC);
517 }
518
519 /* no need to disable timestamp, it is already off */
520 }
521
522 simple_unlock(&rtclock_lock);
523 splx(s);
524
525 *secs_p = secs;
526 *microsecs_p = microsecs;
527 }
528
529 void
530 clock_get_calendar_nanotime(
531 uint32_t *secs,
532 uint32_t *nanosecs)
533 {
534 uint32_t epoch, nanoepoch;
535 uint64_t now, t64;
536 spl_t s = splclock();
537
538 simple_lock(&rtclock_lock);
539
540 if (rtclock_calend.adjdelta >= 0) {
541 uint32_t divisor;
542
543 now = mach_absolute_time();
544
545 epoch = rtclock_calend.epoch;
546 nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
547
548 simple_unlock(&rtclock_lock);
549
550 *secs = t64 = now / (divisor = rtclock_sec_divisor);
551 now -= (t64 * divisor);
552 *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
553
554 TIME_ADD(*secs, epoch, *nanosecs, nanoepoch, NSEC_PER_SEC);
555 }
556 else {
557 uint32_t delta, t32;
558
559 delta = -rtclock_calend.adjdelta;
560
561 now = mach_absolute_time();
562
563 *secs = rtclock_calend.epoch;
564 *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
565
566 if (now > rtclock_calend.epoch1) {
567 t64 = now - rtclock_calend.epoch1;
568
569 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
570
571 if (t32 > delta)
572 TIME_ADD(*secs, 0, *nanosecs, ((t32 - delta) * NSEC_PER_USEC), NSEC_PER_SEC);
573 }
574
575 simple_unlock(&rtclock_lock);
576 }
577
578 splx(s);
579 }
580
581 void
582 clock_set_calendar_microtime(
583 uint32_t secs,
584 uint32_t microsecs)
585 {
586 uint32_t sys, microsys;
587 uint32_t newsecs;
588 spl_t s;
589
590 newsecs = (microsecs < 500*USEC_PER_SEC)?
591 secs: secs + 1;
592
593 s = splclock();
594 simple_lock(&rtclock_lock);
595
596 commpage_set_timestamp(0,0,0,0);
597
598 /*
599 * Calculate the new calendar epoch based on
600 * the new value and the system clock.
601 */
602 clock_get_system_microtime(&sys, &microsys);
603 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
604
605 /*
606 * Adjust the boottime based on the delta.
607 */
608 rtclock_boottime += secs - rtclock_calend.epoch;
609
610 /*
611 * Set the new calendar epoch.
612 */
613 rtclock_calend.epoch = secs;
614 rtclock_calend.microepoch = microsecs;
615
616 /*
617 * Cancel any adjustment in progress.
618 */
619 rtclock_calend.epoch1 = 0;
620 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
621
622 simple_unlock(&rtclock_lock);
623
624 /*
625 * Set the new value for the platform clock.
626 */
627 PESetGMTTimeOfDay(newsecs);
628
629 splx(s);
630
631 /*
632 * Send host notifications.
633 */
634 host_notify_calendar_change();
635 }
636
637 #define tickadj (40) /* "standard" skew, us / tick */
638 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
639
640 uint32_t
641 clock_set_calendar_adjtime(
642 int32_t *secs,
643 int32_t *microsecs)
644 {
645 int64_t total, ototal;
646 uint32_t interval = 0;
647 spl_t s;
648
649 total = (int64_t)*secs * USEC_PER_SEC + *microsecs;
650
651 LOCK_RTC(s);
652 commpage_set_timestamp(0,0,0,0);
653
654 ototal = rtclock_calend.adjtotal;
655
656 if (rtclock_calend.adjdelta < 0) {
657 uint64_t now, t64;
658 uint32_t delta, t32;
659 uint32_t sys, microsys;
660
661 delta = -rtclock_calend.adjdelta;
662
663 sys = rtclock_calend.epoch;
664 microsys = rtclock_calend.microepoch;
665
666 now = mach_absolute_time();
667
668 if (now > rtclock_calend.epoch1)
669 t64 = now - rtclock_calend.epoch1;
670 else
671 t64 = 0;
672
673 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
674
675 if (t32 > delta)
676 TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC);
677
678 rtclock_calend.epoch = sys;
679 rtclock_calend.microepoch = microsys;
680
681 sys = t64 = now / rtclock_sec_divisor;
682 now -= (t64 * rtclock_sec_divisor);
683 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
684
685 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
686 }
687
688 if (total != 0) {
689 int32_t delta = tickadj;
690
691 if (total > 0) {
692 if (total > bigadj)
693 delta *= 10;
694 if (delta > total)
695 delta = total;
696
697 rtclock_calend.epoch1 = 0;
698 }
699 else {
700 uint64_t now, t64;
701 uint32_t sys, microsys;
702
703 if (total < -bigadj)
704 delta *= 10;
705 delta = -delta;
706 if (delta < total)
707 delta = total;
708
709 rtclock_calend.epoch1 = now = mach_absolute_time();
710
711 sys = t64 = now / rtclock_sec_divisor;
712 now -= (t64 * rtclock_sec_divisor);
713 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
714
715 TIME_ADD(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
716 }
717
718 rtclock_calend.adjtotal = total;
719 rtclock_calend.adjdelta = delta;
720
721 interval = rtclock_tick_interval;
722 }
723 else {
724 rtclock_calend.epoch1 = 0;
725 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
726 }
727
728 UNLOCK_RTC(s);
729
730 if (ototal == 0)
731 *secs = *microsecs = 0;
732 else {
733 *secs = ototal / USEC_PER_SEC;
734 *microsecs = ototal % USEC_PER_SEC;
735 }
736
737 return (interval);
738 }
739
740 uint32_t
741 clock_adjust_calendar(void)
742 {
743 uint32_t interval = 0;
744 int32_t delta;
745 spl_t s;
746
747 LOCK_RTC(s);
748 commpage_set_timestamp(0,0,0,0);
749
750 delta = rtclock_calend.adjdelta;
751
752 if (delta > 0) {
753 TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, delta, USEC_PER_SEC);
754
755 rtclock_calend.adjtotal -= delta;
756 if (delta > rtclock_calend.adjtotal)
757 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
758 }
759 else
760 if (delta < 0) {
761 uint64_t now, t64;
762 uint32_t t32;
763
764 now = mach_absolute_time();
765
766 if (now > rtclock_calend.epoch1)
767 t64 = now - rtclock_calend.epoch1;
768 else
769 t64 = 0;
770
771 rtclock_calend.epoch1 = now;
772
773 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
774
775 TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, (t32 + delta), USEC_PER_SEC);
776
777 rtclock_calend.adjtotal -= delta;
778 if (delta < rtclock_calend.adjtotal)
779 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
780
781 if (rtclock_calend.adjdelta == 0) {
782 uint32_t sys, microsys;
783
784 sys = t64 = now / rtclock_sec_divisor;
785 now -= (t64 * rtclock_sec_divisor);
786 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
787
788 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
789
790 rtclock_calend.epoch1 = 0;
791 }
792 }
793
794 if (rtclock_calend.adjdelta != 0)
795 interval = rtclock_tick_interval;
796
797 UNLOCK_RTC(s);
798
799 return (interval);
800 }
801
802 /*
803 * clock_initialize_calendar:
804 *
805 * Set the calendar and related clocks
806 * from the platform clock at boot or
807 * wake event.
808 */
809 void
810 clock_initialize_calendar(void)
811 {
812 uint32_t sys, microsys;
813 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
814 spl_t s;
815
816 LOCK_RTC(s);
817 commpage_set_timestamp(0,0,0,0);
818
819 if ((int32_t)secs >= (int32_t)rtclock_boottime) {
820 /*
821 * Initialize the boot time based on the platform clock.
822 */
823 if (rtclock_boottime == 0)
824 rtclock_boottime = secs;
825
826 /*
827 * Calculate the new calendar epoch based
828 * on the platform clock and the system
829 * clock.
830 */
831 clock_get_system_microtime(&sys, &microsys);
832 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
833
834 /*
835 * Set the new calendar epoch.
836 */
837 rtclock_calend.epoch = secs;
838 rtclock_calend.microepoch = microsecs;
839
840 /*
841 * Cancel any adjustment in progress.
842 */
843 rtclock_calend.epoch1 = 0;
844 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
845 }
846
847 UNLOCK_RTC(s);
848
849 /*
850 * Send host notifications.
851 */
852 host_notify_calendar_change();
853 }
854
855 void
856 clock_get_boottime_nanotime(
857 uint32_t *secs,
858 uint32_t *nanosecs)
859 {
860 *secs = rtclock_boottime;
861 *nanosecs = 0;
862 }
863
864 void
865 clock_timebase_info(
866 mach_timebase_info_t info)
867 {
868 spl_t s;
869
870 LOCK_RTC(s);
871 rtclock_timebase_initialized = TRUE;
872 *info = rtclock_timebase_const;
873 UNLOCK_RTC(s);
874 }
875
876 void
877 clock_set_timer_deadline(
878 uint64_t deadline)
879 {
880 uint64_t abstime;
881 int decr;
882 struct rtclock_timer *mytimer;
883 struct per_proc_info *pp;
884 spl_t s;
885
886 s = splclock();
887 pp = getPerProc();
888 mytimer = &pp->rtclock_timer;
889 mytimer->deadline = deadline;
890 mytimer->is_set = TRUE;
891 if (!mytimer->has_expired) {
892 abstime = mach_absolute_time();
893 if ( mytimer->deadline < pp->rtclock_tick_deadline ) {
894 decr = deadline_to_decrementer(mytimer->deadline, abstime);
895 if ( rtclock_decrementer_min != 0 &&
896 rtclock_decrementer_min < (natural_t)decr )
897 decr = rtclock_decrementer_min;
898
899 treqs(decr);
900
901 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
902 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
903 }
904 }
905 splx(s);
906 }
907
908 void
909 clock_set_timer_func(
910 clock_timer_func_t func)
911 {
912 spl_t s;
913
914 LOCK_RTC(s);
915 if (rtclock_timer_expire == NULL)
916 rtclock_timer_expire = func;
917 UNLOCK_RTC(s);
918 }
919
920 void
921 rtclock_intr(
922 int device,
923 struct savearea *ssp,
924 spl_t old);
925
926 /*
927 * Real-time clock device interrupt.
928 */
929 void
930 rtclock_intr(
931 __unused int device,
932 struct savearea *ssp,
933 __unused spl_t old_spl)
934 {
935 uint64_t abstime;
936 int decr1, decr2;
937 struct rtclock_timer *mytimer;
938 struct per_proc_info *pp;
939
940 decr1 = decr2 = DECREMENTER_MAX;
941
942 pp = getPerProc();
943
944 abstime = mach_absolute_time();
945 if ( pp->rtclock_tick_deadline <= abstime ) {
946 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
947 &pp->rtclock_tick_deadline);
948 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
949 }
950
951 mytimer = &pp->rtclock_timer;
952
953 abstime = mach_absolute_time();
954 if ( mytimer->is_set &&
955 mytimer->deadline <= abstime ) {
956 mytimer->has_expired = TRUE; mytimer->is_set = FALSE;
957 (*rtclock_timer_expire)(abstime);
958 mytimer->has_expired = FALSE;
959 }
960
961 abstime = mach_absolute_time();
962 decr1 = deadline_to_decrementer(pp->rtclock_tick_deadline, abstime);
963
964 if (mytimer->is_set)
965 decr2 = deadline_to_decrementer(mytimer->deadline, abstime);
966
967 if (decr1 > decr2)
968 decr1 = decr2;
969
970 if ( rtclock_decrementer_min != 0 &&
971 rtclock_decrementer_min < (natural_t)decr1 )
972 decr1 = rtclock_decrementer_min;
973
974 treqs(decr1);
975
976 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
977 | DBG_FUNC_NONE, decr1, 3, 0, 0, 0);
978 }
979
980 static void
981 rtclock_alarm_expire(
982 __unused void *p0,
983 __unused void *p1)
984 {
985 mach_timespec_t timestamp;
986
987 (void) sysclk_gettime(&timestamp);
988
989 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
990 }
991
992 static int
993 deadline_to_decrementer(
994 uint64_t deadline,
995 uint64_t now)
996 {
997 uint64_t delt;
998
999 if (deadline <= now)
1000 return DECREMENTER_MIN;
1001 else {
1002 delt = deadline - now;
1003 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
1004 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
1005 }
1006 }
1007
1008 static void
1009 nanotime_to_absolutetime(
1010 uint32_t secs,
1011 uint32_t nanosecs,
1012 uint64_t *result)
1013 {
1014 uint32_t divisor = rtclock_sec_divisor;
1015
1016 *result = ((uint64_t)secs * divisor) +
1017 ((uint64_t)nanosecs * divisor) / NSEC_PER_SEC;
1018 }
1019
1020 void
1021 absolutetime_to_microtime(
1022 uint64_t abstime,
1023 uint32_t *secs,
1024 uint32_t *microsecs)
1025 {
1026 uint64_t t64;
1027 uint32_t divisor;
1028
1029 *secs = t64 = abstime / (divisor = rtclock_sec_divisor);
1030 abstime -= (t64 * divisor);
1031 *microsecs = (abstime * USEC_PER_SEC) / divisor;
1032 }
1033
1034 void
1035 clock_interval_to_deadline(
1036 uint32_t interval,
1037 uint32_t scale_factor,
1038 uint64_t *result)
1039 {
1040 uint64_t abstime;
1041
1042 clock_get_uptime(result);
1043
1044 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1045
1046 *result += abstime;
1047 }
1048
1049 void
1050 clock_interval_to_absolutetime_interval(
1051 uint32_t interval,
1052 uint32_t scale_factor,
1053 uint64_t *result)
1054 {
1055 uint64_t nanosecs = (uint64_t)interval * scale_factor;
1056 uint64_t t64;
1057 uint32_t divisor;
1058
1059 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1060 (divisor = rtclock_sec_divisor);
1061 nanosecs -= (t64 * NSEC_PER_SEC);
1062 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1063 }
1064
1065 void
1066 clock_absolutetime_interval_to_deadline(
1067 uint64_t abstime,
1068 uint64_t *result)
1069 {
1070 clock_get_uptime(result);
1071
1072 *result += abstime;
1073 }
1074
1075 void
1076 absolutetime_to_nanoseconds(
1077 uint64_t abstime,
1078 uint64_t *result)
1079 {
1080 uint64_t t64;
1081 uint32_t divisor;
1082
1083 *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC;
1084 abstime -= (t64 * divisor);
1085 *result += (abstime * NSEC_PER_SEC) / divisor;
1086 }
1087
1088 void
1089 nanoseconds_to_absolutetime(
1090 uint64_t nanosecs,
1091 uint64_t *result)
1092 {
1093 uint64_t t64;
1094 uint32_t divisor;
1095
1096 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1097 (divisor = rtclock_sec_divisor);
1098 nanosecs -= (t64 * NSEC_PER_SEC);
1099 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1100 }
1101
1102 void
1103 machine_delay_until(
1104 uint64_t deadline)
1105 {
1106 uint64_t now;
1107
1108 do {
1109 now = mach_absolute_time();
1110 } while (now < deadline);
1111 }
1112
1113 /*
1114 * Request a decrementer pop
1115 *
1116 */
1117
1118 void treqs(uint32_t dec) {
1119
1120
1121 struct per_proc_info *pp;
1122 uint64_t nowtime, newtime;
1123
1124 nowtime = mach_absolute_time(); /* What time is it? */
1125 pp = getPerProc(); /* Get our processor block */
1126 newtime = nowtime + (uint64_t)dec; /* Get requested pop time */
1127 pp->rtcPop = newtime; /* Copy it */
1128
1129 mtdec((uint32_t)(newtime - nowtime)); /* Set decrementer */
1130 return;
1131
1132 }