]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/rtclock.c
xnu-792.6.70.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
1 /*
2 * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28 /*
29 * File: rtclock.c
30 * Purpose: Routines for handling the machine dependent
31 * real-time clock.
32 */
33
34 #include <mach/mach_types.h>
35
36 #include <kern/clock.h>
37 #include <kern/thread.h>
38 #include <kern/macro_help.h>
39 #include <kern/spl.h>
40
41 #include <kern/host_notify.h>
42
43 #include <machine/commpage.h>
44 #include <machine/machine_routines.h>
45 #include <ppc/exception.h>
46 #include <ppc/proc_reg.h>
47 #include <ppc/pms.h>
48 #include <ppc/rtclock.h>
49
50 #include <IOKit/IOPlatformExpert.h>
51
52 #include <sys/kdebug.h>
53
54 int sysclk_config(void);
55
56 int sysclk_init(void);
57
58 kern_return_t sysclk_gettime(
59 mach_timespec_t *cur_time);
60
61 kern_return_t sysclk_getattr(
62 clock_flavor_t flavor,
63 clock_attr_t attr,
64 mach_msg_type_number_t *count);
65
66 void sysclk_setalarm(
67 mach_timespec_t *deadline);
68
69 struct clock_ops sysclk_ops = {
70 sysclk_config, sysclk_init,
71 sysclk_gettime, 0,
72 sysclk_getattr, 0,
73 sysclk_setalarm,
74 };
75
76 int calend_config(void);
77
78 kern_return_t calend_gettime(
79 mach_timespec_t *cur_time);
80
81 kern_return_t calend_getattr(
82 clock_flavor_t flavor,
83 clock_attr_t attr,
84 mach_msg_type_number_t *count);
85
86 struct clock_ops calend_ops = {
87 calend_config, 0,
88 calend_gettime, 0,
89 calend_getattr, 0,
90 0,
91 };
92
93 /* local data declarations */
94
95 static struct rtclock_calend {
96 uint32_t epoch;
97 uint32_t microepoch;
98
99 uint64_t epoch1;
100
101 int64_t adjtotal;
102 int32_t adjdelta;
103 } rtclock_calend;
104
105 static uint32_t rtclock_boottime;
106
107 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
108 MACRO_BEGIN \
109 if (((rfrac) += (frac)) >= (unit)) { \
110 (rfrac) -= (unit); \
111 (rsecs) += 1; \
112 } \
113 (rsecs) += (secs); \
114 MACRO_END
115
116 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
117 MACRO_BEGIN \
118 if ((int32_t)((rfrac) -= (frac)) < 0) { \
119 (rfrac) += (unit); \
120 (rsecs) -= 1; \
121 } \
122 (rsecs) -= (secs); \
123 MACRO_END
124
125 #define NSEC_PER_HZ (NSEC_PER_SEC / 100)
126 static uint32_t rtclock_tick_interval;
127
128 static uint32_t rtclock_sec_divisor;
129
130 static mach_timebase_info_data_t rtclock_timebase_const;
131
132 static boolean_t rtclock_timebase_initialized;
133
134 static clock_timer_func_t rtclock_timer_expire;
135
136 static timer_call_data_t rtclock_alarm_timer;
137
138 static void nanotime_to_absolutetime(
139 uint32_t secs,
140 uint32_t nanosecs,
141 uint64_t *result);
142
143 static void rtclock_alarm_expire(
144 timer_call_param_t p0,
145 timer_call_param_t p1);
146
147 /* global data declarations */
148
149 decl_simple_lock_data(static,rtclock_lock)
150
151 /*
152 * Macros to lock/unlock real-time clock device.
153 */
154 #define LOCK_RTC(s) \
155 MACRO_BEGIN \
156 (s) = splclock(); \
157 simple_lock(&rtclock_lock); \
158 MACRO_END
159
160 #define UNLOCK_RTC(s) \
161 MACRO_BEGIN \
162 simple_unlock(&rtclock_lock); \
163 splx(s); \
164 MACRO_END
165
166 static void
167 timebase_callback(
168 struct timebase_freq_t *freq)
169 {
170 uint32_t numer, denom;
171 uint64_t abstime;
172 spl_t s;
173
174 if ( freq->timebase_den < 1 || freq->timebase_den > 4 ||
175 freq->timebase_num < freq->timebase_den )
176 panic("rtclock timebase_callback: invalid constant %d / %d",
177 freq->timebase_num, freq->timebase_den);
178
179 denom = freq->timebase_num;
180 numer = freq->timebase_den * NSEC_PER_SEC;
181
182 LOCK_RTC(s);
183 if (!rtclock_timebase_initialized) {
184 commpage_set_timestamp(0,0,0,0);
185
186 rtclock_timebase_const.numer = numer;
187 rtclock_timebase_const.denom = denom;
188 rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
189
190 nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
191 rtclock_tick_interval = abstime;
192
193 ml_init_lock_timeout();
194 }
195 else {
196 UNLOCK_RTC(s);
197 printf("rtclock timebase_callback: late old %d / %d new %d / %d\n",
198 rtclock_timebase_const.numer, rtclock_timebase_const.denom,
199 numer, denom);
200 return;
201 }
202 UNLOCK_RTC(s);
203
204 clock_timebase_init();
205 }
206
207 /*
208 * Configure the real-time clock device.
209 */
210 int
211 sysclk_config(void)
212 {
213 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
214
215 simple_lock_init(&rtclock_lock, 0);
216
217 PE_register_timebase_callback(timebase_callback);
218
219 return (1);
220 }
221
222 /*
223 * Initialize the system clock device.
224 */
225 int
226 sysclk_init(void)
227 {
228 uint64_t abstime;
229 struct per_proc_info *pp;
230
231 pp = getPerProc();
232
233 abstime = mach_absolute_time();
234 pp->rtclock_tick_deadline = abstime + rtclock_tick_interval; /* Get the time we need to pop */
235 pp->rtcPop = pp->rtclock_tick_deadline; /* Set the rtc pop time the same for now */
236
237 (void)setTimerReq(); /* Start the timers going */
238
239 return (1);
240 }
241
242 kern_return_t
243 sysclk_gettime(
244 mach_timespec_t *time) /* OUT */
245 {
246 uint64_t now, t64;
247 uint32_t divisor;
248
249 now = mach_absolute_time();
250
251 time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor);
252 now -= (t64 * divisor);
253 time->tv_nsec = (now * NSEC_PER_SEC) / divisor;
254
255 return (KERN_SUCCESS);
256 }
257
258 void
259 clock_get_system_microtime(
260 uint32_t *secs,
261 uint32_t *microsecs)
262 {
263 uint64_t now, t64;
264 uint32_t divisor;
265
266 now = mach_absolute_time();
267
268 *secs = t64 = now / (divisor = rtclock_sec_divisor);
269 now -= (t64 * divisor);
270 *microsecs = (now * USEC_PER_SEC) / divisor;
271 }
272
273 void
274 clock_get_system_nanotime(
275 uint32_t *secs,
276 uint32_t *nanosecs)
277 {
278 uint64_t now, t64;
279 uint32_t divisor;
280
281 now = mach_absolute_time();
282
283 *secs = t64 = now / (divisor = rtclock_sec_divisor);
284 now -= (t64 * divisor);
285 *nanosecs = (now * NSEC_PER_SEC) / divisor;
286 }
287
288 /*
289 * Get clock device attributes.
290 */
291 kern_return_t
292 sysclk_getattr(
293 clock_flavor_t flavor,
294 clock_attr_t attr, /* OUT */
295 mach_msg_type_number_t *count) /* IN/OUT */
296 {
297 spl_t s;
298
299 if (*count != 1)
300 return (KERN_FAILURE);
301
302 switch (flavor) {
303
304 case CLOCK_GET_TIME_RES: /* >0 res */
305 case CLOCK_ALARM_CURRES: /* =0 no alarm */
306 case CLOCK_ALARM_MINRES:
307 case CLOCK_ALARM_MAXRES:
308 LOCK_RTC(s);
309 *(clock_res_t *) attr = NSEC_PER_HZ;
310 UNLOCK_RTC(s);
311 break;
312
313 default:
314 return (KERN_INVALID_VALUE);
315 }
316
317 return (KERN_SUCCESS);
318 }
319
320 /*
321 * Set deadline for the next alarm on the clock device. This call
322 * always resets the time to deliver an alarm for the clock.
323 */
324 void
325 sysclk_setalarm(
326 mach_timespec_t *deadline)
327 {
328 uint64_t abstime;
329
330 nanotime_to_absolutetime(deadline->tv_sec, deadline->tv_nsec, &abstime);
331 timer_call_enter(&rtclock_alarm_timer, abstime);
332 }
333
334 /*
335 * Configure the calendar clock.
336 */
337 int
338 calend_config(void)
339 {
340 return (1);
341 }
342
343 /*
344 * Get the current clock time.
345 */
346 kern_return_t
347 calend_gettime(
348 mach_timespec_t *time) /* OUT */
349 {
350 clock_get_calendar_nanotime(
351 &time->tv_sec, &time->tv_nsec);
352
353 return (KERN_SUCCESS);
354 }
355
356 /*
357 * Get clock device attributes.
358 */
359 kern_return_t
360 calend_getattr(
361 clock_flavor_t flavor,
362 clock_attr_t attr, /* OUT */
363 mach_msg_type_number_t *count) /* IN/OUT */
364 {
365 spl_t s;
366
367 if (*count != 1)
368 return (KERN_FAILURE);
369
370 switch (flavor) {
371
372 case CLOCK_GET_TIME_RES: /* >0 res */
373 LOCK_RTC(s);
374 *(clock_res_t *) attr = NSEC_PER_HZ;
375 UNLOCK_RTC(s);
376 break;
377
378 case CLOCK_ALARM_CURRES: /* =0 no alarm */
379 case CLOCK_ALARM_MINRES:
380 case CLOCK_ALARM_MAXRES:
381 *(clock_res_t *) attr = 0;
382 break;
383
384 default:
385 return (KERN_INVALID_VALUE);
386 }
387
388 return (KERN_SUCCESS);
389 }
390
391 void
392 clock_get_calendar_microtime(
393 uint32_t *secs,
394 uint32_t *microsecs)
395 {
396 uint32_t epoch, microepoch;
397 uint64_t now, t64;
398 spl_t s = splclock();
399
400 simple_lock(&rtclock_lock);
401
402 if (rtclock_calend.adjdelta >= 0) {
403 uint32_t divisor;
404
405 now = mach_absolute_time();
406
407 epoch = rtclock_calend.epoch;
408 microepoch = rtclock_calend.microepoch;
409
410 simple_unlock(&rtclock_lock);
411
412 *secs = t64 = now / (divisor = rtclock_sec_divisor);
413 now -= (t64 * divisor);
414 *microsecs = (now * USEC_PER_SEC) / divisor;
415
416 TIME_ADD(*secs, epoch, *microsecs, microepoch, USEC_PER_SEC);
417 }
418 else {
419 uint32_t delta, t32;
420
421 delta = -rtclock_calend.adjdelta;
422
423 now = mach_absolute_time();
424
425 *secs = rtclock_calend.epoch;
426 *microsecs = rtclock_calend.microepoch;
427
428 if (now > rtclock_calend.epoch1) {
429 t64 = now - rtclock_calend.epoch1;
430
431 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
432
433 if (t32 > delta)
434 TIME_ADD(*secs, 0, *microsecs, (t32 - delta), USEC_PER_SEC);
435 }
436
437 simple_unlock(&rtclock_lock);
438 }
439
440 splx(s);
441 }
442
443 /* This is only called from the gettimeofday() syscall. As a side
444 * effect, it updates the commpage timestamp. Otherwise it is
445 * identical to clock_get_calendar_microtime(). Because most
446 * gettimeofday() calls are handled by the commpage in user mode,
447 * this routine should be infrequently used except when slowing down
448 * the clock.
449 */
450 void
451 clock_gettimeofday(
452 uint32_t *secs_p,
453 uint32_t *microsecs_p)
454 {
455 uint32_t epoch, microepoch;
456 uint32_t secs, microsecs;
457 uint64_t now, t64, secs_64, usec_64;
458 spl_t s = splclock();
459
460 simple_lock(&rtclock_lock);
461
462 if (rtclock_calend.adjdelta >= 0) {
463 now = mach_absolute_time();
464
465 epoch = rtclock_calend.epoch;
466 microepoch = rtclock_calend.microepoch;
467
468 secs = secs_64 = now / rtclock_sec_divisor;
469 t64 = now - (secs_64 * rtclock_sec_divisor);
470 microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
471
472 TIME_ADD(secs, epoch, microsecs, microepoch, USEC_PER_SEC);
473
474 /* adjust "now" to be absolute time at _start_ of usecond */
475 now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
476
477 commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
478 }
479 else {
480 uint32_t delta, t32;
481
482 delta = -rtclock_calend.adjdelta;
483
484 now = mach_absolute_time();
485
486 secs = rtclock_calend.epoch;
487 microsecs = rtclock_calend.microepoch;
488
489 if (now > rtclock_calend.epoch1) {
490 t64 = now - rtclock_calend.epoch1;
491
492 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
493
494 if (t32 > delta)
495 TIME_ADD(secs, 0, microsecs, (t32 - delta), USEC_PER_SEC);
496 }
497
498 /* no need to disable timestamp, it is already off */
499 }
500
501 simple_unlock(&rtclock_lock);
502 splx(s);
503
504 *secs_p = secs;
505 *microsecs_p = microsecs;
506 }
507
508 void
509 clock_get_calendar_nanotime(
510 uint32_t *secs,
511 uint32_t *nanosecs)
512 {
513 uint32_t epoch, nanoepoch;
514 uint64_t now, t64;
515 spl_t s = splclock();
516
517 simple_lock(&rtclock_lock);
518
519 if (rtclock_calend.adjdelta >= 0) {
520 uint32_t divisor;
521
522 now = mach_absolute_time();
523
524 epoch = rtclock_calend.epoch;
525 nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
526
527 simple_unlock(&rtclock_lock);
528
529 *secs = t64 = now / (divisor = rtclock_sec_divisor);
530 now -= (t64 * divisor);
531 *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
532
533 TIME_ADD(*secs, epoch, *nanosecs, nanoepoch, NSEC_PER_SEC);
534 }
535 else {
536 uint32_t delta, t32;
537
538 delta = -rtclock_calend.adjdelta;
539
540 now = mach_absolute_time();
541
542 *secs = rtclock_calend.epoch;
543 *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
544
545 if (now > rtclock_calend.epoch1) {
546 t64 = now - rtclock_calend.epoch1;
547
548 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
549
550 if (t32 > delta)
551 TIME_ADD(*secs, 0, *nanosecs, ((t32 - delta) * NSEC_PER_USEC), NSEC_PER_SEC);
552 }
553
554 simple_unlock(&rtclock_lock);
555 }
556
557 splx(s);
558 }
559
560 void
561 clock_set_calendar_microtime(
562 uint32_t secs,
563 uint32_t microsecs)
564 {
565 uint32_t sys, microsys;
566 uint32_t newsecs;
567 spl_t s;
568
569 newsecs = (microsecs < 500*USEC_PER_SEC)?
570 secs: secs + 1;
571
572 s = splclock();
573 simple_lock(&rtclock_lock);
574
575 commpage_set_timestamp(0,0,0,0);
576
577 /*
578 * Cancel any adjustment in progress.
579 */
580 if (rtclock_calend.adjdelta < 0) {
581 uint64_t now, t64;
582 uint32_t delta, t32;
583
584 delta = -rtclock_calend.adjdelta;
585
586 sys = rtclock_calend.epoch;
587 microsys = rtclock_calend.microepoch;
588
589 now = mach_absolute_time();
590
591 if (now > rtclock_calend.epoch1)
592 t64 = now - rtclock_calend.epoch1;
593 else
594 t64 = 0;
595
596 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
597
598 if (t32 > delta)
599 TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC);
600
601 rtclock_calend.epoch = sys;
602 rtclock_calend.microepoch = microsys;
603
604 sys = t64 = now / rtclock_sec_divisor;
605 now -= (t64 * rtclock_sec_divisor);
606 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
607
608 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
609 }
610
611 rtclock_calend.epoch1 = 0;
612 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
613
614 /*
615 * Calculate the new calendar epoch based on
616 * the new value and the system clock.
617 */
618 clock_get_system_microtime(&sys, &microsys);
619 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
620
621 /*
622 * Adjust the boottime based on the delta.
623 */
624 rtclock_boottime += secs - rtclock_calend.epoch;
625
626 /*
627 * Set the new calendar epoch.
628 */
629 rtclock_calend.epoch = secs;
630 rtclock_calend.microepoch = microsecs;
631
632 simple_unlock(&rtclock_lock);
633
634 /*
635 * Set the new value for the platform clock.
636 */
637 PESetGMTTimeOfDay(newsecs);
638
639 splx(s);
640
641 /*
642 * Send host notifications.
643 */
644 host_notify_calendar_change();
645 }
646
647 #define tickadj (40) /* "standard" skew, us / tick */
648 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
649
650 uint32_t
651 clock_set_calendar_adjtime(
652 int32_t *secs,
653 int32_t *microsecs)
654 {
655 int64_t total, ototal;
656 uint32_t interval = 0;
657 spl_t s;
658
659 total = (int64_t)*secs * USEC_PER_SEC + *microsecs;
660
661 LOCK_RTC(s);
662 commpage_set_timestamp(0,0,0,0);
663
664 ototal = rtclock_calend.adjtotal;
665
666 if (rtclock_calend.adjdelta < 0) {
667 uint64_t now, t64;
668 uint32_t delta, t32;
669 uint32_t sys, microsys;
670
671 delta = -rtclock_calend.adjdelta;
672
673 sys = rtclock_calend.epoch;
674 microsys = rtclock_calend.microepoch;
675
676 now = mach_absolute_time();
677
678 if (now > rtclock_calend.epoch1)
679 t64 = now - rtclock_calend.epoch1;
680 else
681 t64 = 0;
682
683 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
684
685 if (t32 > delta)
686 TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC);
687
688 rtclock_calend.epoch = sys;
689 rtclock_calend.microepoch = microsys;
690
691 sys = t64 = now / rtclock_sec_divisor;
692 now -= (t64 * rtclock_sec_divisor);
693 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
694
695 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
696 }
697
698 if (total != 0) {
699 int32_t delta = tickadj;
700
701 if (total > 0) {
702 if (total > bigadj)
703 delta *= 10;
704 if (delta > total)
705 delta = total;
706
707 rtclock_calend.epoch1 = 0;
708 }
709 else {
710 uint64_t now, t64;
711 uint32_t sys, microsys;
712
713 if (total < -bigadj)
714 delta *= 10;
715 delta = -delta;
716 if (delta < total)
717 delta = total;
718
719 rtclock_calend.epoch1 = now = mach_absolute_time();
720
721 sys = t64 = now / rtclock_sec_divisor;
722 now -= (t64 * rtclock_sec_divisor);
723 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
724
725 TIME_ADD(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
726 }
727
728 rtclock_calend.adjtotal = total;
729 rtclock_calend.adjdelta = delta;
730
731 interval = rtclock_tick_interval;
732 }
733 else {
734 rtclock_calend.epoch1 = 0;
735 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
736 }
737
738 UNLOCK_RTC(s);
739
740 if (ototal == 0)
741 *secs = *microsecs = 0;
742 else {
743 *secs = ototal / USEC_PER_SEC;
744 *microsecs = ototal % USEC_PER_SEC;
745 }
746
747 return (interval);
748 }
749
750 uint32_t
751 clock_adjust_calendar(void)
752 {
753 uint32_t interval = 0;
754 int32_t delta;
755 spl_t s;
756
757 LOCK_RTC(s);
758 commpage_set_timestamp(0,0,0,0);
759
760 delta = rtclock_calend.adjdelta;
761
762 if (delta > 0) {
763 TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, delta, USEC_PER_SEC);
764
765 rtclock_calend.adjtotal -= delta;
766 if (delta > rtclock_calend.adjtotal)
767 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
768 }
769 else
770 if (delta < 0) {
771 uint64_t now, t64;
772 uint32_t t32;
773
774 now = mach_absolute_time();
775
776 if (now > rtclock_calend.epoch1)
777 t64 = now - rtclock_calend.epoch1;
778 else
779 t64 = 0;
780
781 rtclock_calend.epoch1 = now;
782
783 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
784
785 TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, (t32 + delta), USEC_PER_SEC);
786
787 rtclock_calend.adjtotal -= delta;
788 if (delta < rtclock_calend.adjtotal)
789 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
790
791 if (rtclock_calend.adjdelta == 0) {
792 uint32_t sys, microsys;
793
794 sys = t64 = now / rtclock_sec_divisor;
795 now -= (t64 * rtclock_sec_divisor);
796 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
797
798 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
799
800 rtclock_calend.epoch1 = 0;
801 }
802 }
803
804 if (rtclock_calend.adjdelta != 0)
805 interval = rtclock_tick_interval;
806
807 UNLOCK_RTC(s);
808
809 return (interval);
810 }
811
812 /*
813 * clock_initialize_calendar:
814 *
815 * Set the calendar and related clocks
816 * from the platform clock at boot or
817 * wake event.
818 */
819 void
820 clock_initialize_calendar(void)
821 {
822 uint32_t sys, microsys;
823 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
824 spl_t s;
825
826 LOCK_RTC(s);
827 commpage_set_timestamp(0,0,0,0);
828
829 if ((int32_t)secs >= (int32_t)rtclock_boottime) {
830 /*
831 * Initialize the boot time based on the platform clock.
832 */
833 if (rtclock_boottime == 0)
834 rtclock_boottime = secs;
835
836 /*
837 * Calculate the new calendar epoch based
838 * on the platform clock and the system
839 * clock.
840 */
841 clock_get_system_microtime(&sys, &microsys);
842 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
843
844 /*
845 * Set the new calendar epoch.
846 */
847 rtclock_calend.epoch = secs;
848 rtclock_calend.microepoch = microsecs;
849
850 /*
851 * Cancel any adjustment in progress.
852 */
853 rtclock_calend.epoch1 = 0;
854 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
855 }
856
857 UNLOCK_RTC(s);
858
859 /*
860 * Send host notifications.
861 */
862 host_notify_calendar_change();
863 }
864
865 void
866 clock_get_boottime_nanotime(
867 uint32_t *secs,
868 uint32_t *nanosecs)
869 {
870 *secs = rtclock_boottime;
871 *nanosecs = 0;
872 }
873
874 void
875 clock_timebase_info(
876 mach_timebase_info_t info)
877 {
878 spl_t s;
879
880 LOCK_RTC(s);
881 rtclock_timebase_initialized = TRUE;
882 *info = rtclock_timebase_const;
883 UNLOCK_RTC(s);
884 }
885
886 void
887 clock_set_timer_deadline(
888 uint64_t deadline)
889 {
890 int decr;
891 uint64_t abstime;
892 rtclock_timer_t *mytimer;
893 struct per_proc_info *pp;
894 spl_t s;
895
896 s = splclock();
897 pp = getPerProc();
898 mytimer = &pp->rtclock_timer;
899 mytimer->deadline = deadline;
900
901 if (!mytimer->has_expired && (deadline < pp->rtclock_tick_deadline)) { /* Has the timer already expired or is less that set? */
902 pp->rtcPop = deadline; /* Yes, set the new rtc pop time */
903 decr = setTimerReq(); /* Start the timers going */
904
905 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
906 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
907 }
908
909 splx(s);
910 }
911
912 void
913 clock_set_timer_func(
914 clock_timer_func_t func)
915 {
916 spl_t s;
917
918 LOCK_RTC(s);
919 if (rtclock_timer_expire == NULL)
920 rtclock_timer_expire = func;
921 UNLOCK_RTC(s);
922 }
923
924 /*
925 * Real-time clock device interrupt.
926 */
927 void
928 rtclock_intr(struct savearea *ssp) {
929
930 uint64_t abstime;
931 int decr;
932 rtclock_timer_t *mytimer;
933 struct per_proc_info *pp;
934
935 pp = getPerProc();
936 mytimer = &pp->rtclock_timer;
937
938 abstime = mach_absolute_time();
939 if (pp->rtclock_tick_deadline <= abstime) { /* Have we passed the pop time? */
940 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
941 &pp->rtclock_tick_deadline);
942 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
943 abstime = mach_absolute_time(); /* Refresh the current time since we went away */
944 }
945
946 if (mytimer->deadline <= abstime) { /* Have we expired the deadline? */
947 mytimer->has_expired = TRUE; /* Remember that we popped */
948 mytimer->deadline = EndOfAllTime; /* Set timer request to the end of all time in case we have no more events */
949 (*rtclock_timer_expire)(abstime); /* Process pop */
950 mytimer->has_expired = FALSE;
951 }
952
953 pp->rtcPop = (pp->rtclock_tick_deadline < mytimer->deadline) ? /* Get shortest pop */
954 pp->rtclock_tick_deadline : /* It was the periodic timer */
955 mytimer->deadline; /* Actually, an event request */
956
957 decr = setTimerReq(); /* Request the timer pop */
958
959 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
960 | DBG_FUNC_NONE, decr, 3, 0, 0, 0);
961 }
962
963 /*
964 * Request an interruption at a specific time
965 *
966 * Sets the decrementer to pop at the right time based on the timebase.
967 * The value is chosen by comparing the rtc request with the power management.
968 * request. We may add other values at a future time.
969 *
970 */
971
972 int setTimerReq(void) {
973
974 struct per_proc_info *pp;
975 int decr;
976 uint64_t nexttime;
977
978 pp = getPerProc(); /* Get per_proc */
979
980 nexttime = pp->rtcPop; /* Assume main timer */
981
982 decr = setPop((pp->pms.pmsPop < nexttime) ? pp->pms.pmsPop : nexttime); /* Schedule timer pop */
983
984 return decr; /* Pass back what we actually set */
985 }
986
987 static void
988 rtclock_alarm_expire(
989 __unused void *p0,
990 __unused void *p1)
991 {
992 mach_timespec_t timestamp;
993
994 (void) sysclk_gettime(&timestamp);
995
996 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
997 }
998
999 static void
1000 nanotime_to_absolutetime(
1001 uint32_t secs,
1002 uint32_t nanosecs,
1003 uint64_t *result)
1004 {
1005 uint32_t divisor = rtclock_sec_divisor;
1006
1007 *result = ((uint64_t)secs * divisor) +
1008 ((uint64_t)nanosecs * divisor) / NSEC_PER_SEC;
1009 }
1010
1011 void
1012 absolutetime_to_microtime(
1013 uint64_t abstime,
1014 uint32_t *secs,
1015 uint32_t *microsecs)
1016 {
1017 uint64_t t64;
1018 uint32_t divisor;
1019
1020 *secs = t64 = abstime / (divisor = rtclock_sec_divisor);
1021 abstime -= (t64 * divisor);
1022 *microsecs = (abstime * USEC_PER_SEC) / divisor;
1023 }
1024
1025 void
1026 clock_interval_to_deadline(
1027 uint32_t interval,
1028 uint32_t scale_factor,
1029 uint64_t *result)
1030 {
1031 uint64_t abstime;
1032
1033 clock_get_uptime(result);
1034
1035 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1036
1037 *result += abstime;
1038 }
1039
1040 void
1041 clock_interval_to_absolutetime_interval(
1042 uint32_t interval,
1043 uint32_t scale_factor,
1044 uint64_t *result)
1045 {
1046 uint64_t nanosecs = (uint64_t)interval * scale_factor;
1047 uint64_t t64;
1048 uint32_t divisor;
1049
1050 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1051 (divisor = rtclock_sec_divisor);
1052 nanosecs -= (t64 * NSEC_PER_SEC);
1053 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1054 }
1055
1056 void
1057 clock_absolutetime_interval_to_deadline(
1058 uint64_t abstime,
1059 uint64_t *result)
1060 {
1061 clock_get_uptime(result);
1062
1063 *result += abstime;
1064 }
1065
1066 void
1067 absolutetime_to_nanoseconds(
1068 uint64_t abstime,
1069 uint64_t *result)
1070 {
1071 uint64_t t64;
1072 uint32_t divisor;
1073
1074 *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC;
1075 abstime -= (t64 * divisor);
1076 *result += (abstime * NSEC_PER_SEC) / divisor;
1077 }
1078
1079 void
1080 nanoseconds_to_absolutetime(
1081 uint64_t nanosecs,
1082 uint64_t *result)
1083 {
1084 uint64_t t64;
1085 uint32_t divisor;
1086
1087 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1088 (divisor = rtclock_sec_divisor);
1089 nanosecs -= (t64 * NSEC_PER_SEC);
1090 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1091 }
1092
1093 void
1094 machine_delay_until(
1095 uint64_t deadline)
1096 {
1097 uint64_t now;
1098
1099 do {
1100 now = mach_absolute_time();
1101 } while (now < deadline);
1102 }
1103