]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/rtclock.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
1 /*
2 * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * @APPLE_FREE_COPYRIGHT@
35 */
36 /*
37 * File: rtclock.c
38 * Purpose: Routines for handling the machine dependent
39 * real-time clock.
40 */
41
42 #include <mach/mach_types.h>
43
44 #include <kern/clock.h>
45 #include <kern/thread.h>
46 #include <kern/macro_help.h>
47 #include <kern/spl.h>
48
49 #include <kern/host_notify.h>
50
51 #include <machine/commpage.h>
52 #include <machine/machine_routines.h>
53 #include <ppc/exception.h>
54 #include <ppc/proc_reg.h>
55 #include <ppc/pms.h>
56 #include <ppc/rtclock.h>
57
58 #include <IOKit/IOPlatformExpert.h>
59
60 #include <sys/kdebug.h>
61
62 int sysclk_config(void);
63
64 int sysclk_init(void);
65
66 kern_return_t sysclk_gettime(
67 mach_timespec_t *cur_time);
68
69 kern_return_t sysclk_getattr(
70 clock_flavor_t flavor,
71 clock_attr_t attr,
72 mach_msg_type_number_t *count);
73
74 void sysclk_setalarm(
75 mach_timespec_t *deadline);
76
77 struct clock_ops sysclk_ops = {
78 sysclk_config, sysclk_init,
79 sysclk_gettime, 0,
80 sysclk_getattr, 0,
81 sysclk_setalarm,
82 };
83
84 int calend_config(void);
85
86 kern_return_t calend_gettime(
87 mach_timespec_t *cur_time);
88
89 kern_return_t calend_getattr(
90 clock_flavor_t flavor,
91 clock_attr_t attr,
92 mach_msg_type_number_t *count);
93
94 struct clock_ops calend_ops = {
95 calend_config, 0,
96 calend_gettime, 0,
97 calend_getattr, 0,
98 0,
99 };
100
101 /* local data declarations */
102
103 static struct rtclock_calend {
104 uint32_t epoch;
105 uint32_t microepoch;
106
107 uint64_t epoch1;
108
109 int64_t adjtotal;
110 int32_t adjdelta;
111 } rtclock_calend;
112
113 static uint32_t rtclock_boottime;
114
115 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
116 MACRO_BEGIN \
117 if (((rfrac) += (frac)) >= (unit)) { \
118 (rfrac) -= (unit); \
119 (rsecs) += 1; \
120 } \
121 (rsecs) += (secs); \
122 MACRO_END
123
124 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
125 MACRO_BEGIN \
126 if ((int32_t)((rfrac) -= (frac)) < 0) { \
127 (rfrac) += (unit); \
128 (rsecs) -= 1; \
129 } \
130 (rsecs) -= (secs); \
131 MACRO_END
132
133 #define NSEC_PER_HZ (NSEC_PER_SEC / 100)
134 static uint32_t rtclock_tick_interval;
135
136 static uint32_t rtclock_sec_divisor;
137
138 static mach_timebase_info_data_t rtclock_timebase_const;
139
140 static boolean_t rtclock_timebase_initialized;
141
142 static clock_timer_func_t rtclock_timer_expire;
143
144 static timer_call_data_t rtclock_alarm_timer;
145
146 static void nanotime_to_absolutetime(
147 uint32_t secs,
148 uint32_t nanosecs,
149 uint64_t *result);
150
151 static void rtclock_alarm_expire(
152 timer_call_param_t p0,
153 timer_call_param_t p1);
154
155 /* global data declarations */
156
157 decl_simple_lock_data(static,rtclock_lock)
158
159 /*
160 * Macros to lock/unlock real-time clock device.
161 */
162 #define LOCK_RTC(s) \
163 MACRO_BEGIN \
164 (s) = splclock(); \
165 simple_lock(&rtclock_lock); \
166 MACRO_END
167
168 #define UNLOCK_RTC(s) \
169 MACRO_BEGIN \
170 simple_unlock(&rtclock_lock); \
171 splx(s); \
172 MACRO_END
173
174 static void
175 timebase_callback(
176 struct timebase_freq_t *freq)
177 {
178 uint32_t numer, denom;
179 uint64_t abstime;
180 spl_t s;
181
182 if ( freq->timebase_den < 1 || freq->timebase_den > 4 ||
183 freq->timebase_num < freq->timebase_den )
184 panic("rtclock timebase_callback: invalid constant %d / %d",
185 freq->timebase_num, freq->timebase_den);
186
187 denom = freq->timebase_num;
188 numer = freq->timebase_den * NSEC_PER_SEC;
189
190 LOCK_RTC(s);
191 if (!rtclock_timebase_initialized) {
192 commpage_set_timestamp(0,0,0,0);
193
194 rtclock_timebase_const.numer = numer;
195 rtclock_timebase_const.denom = denom;
196 rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
197
198 nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
199 rtclock_tick_interval = abstime;
200
201 ml_init_lock_timeout();
202 }
203 else {
204 UNLOCK_RTC(s);
205 printf("rtclock timebase_callback: late old %d / %d new %d / %d\n",
206 rtclock_timebase_const.numer, rtclock_timebase_const.denom,
207 numer, denom);
208 return;
209 }
210 UNLOCK_RTC(s);
211
212 clock_timebase_init();
213 }
214
215 /*
216 * Configure the real-time clock device.
217 */
218 int
219 sysclk_config(void)
220 {
221 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
222
223 simple_lock_init(&rtclock_lock, 0);
224
225 PE_register_timebase_callback(timebase_callback);
226
227 return (1);
228 }
229
230 /*
231 * Initialize the system clock device.
232 */
233 int
234 sysclk_init(void)
235 {
236 uint64_t abstime;
237 struct per_proc_info *pp;
238
239 pp = getPerProc();
240
241 abstime = mach_absolute_time();
242 pp->rtclock_tick_deadline = abstime + rtclock_tick_interval; /* Get the time we need to pop */
243 pp->rtcPop = pp->rtclock_tick_deadline; /* Set the rtc pop time the same for now */
244
245 (void)setTimerReq(); /* Start the timers going */
246
247 return (1);
248 }
249
250 kern_return_t
251 sysclk_gettime(
252 mach_timespec_t *time) /* OUT */
253 {
254 uint64_t now, t64;
255 uint32_t divisor;
256
257 now = mach_absolute_time();
258
259 time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor);
260 now -= (t64 * divisor);
261 time->tv_nsec = (now * NSEC_PER_SEC) / divisor;
262
263 return (KERN_SUCCESS);
264 }
265
266 void
267 clock_get_system_microtime(
268 uint32_t *secs,
269 uint32_t *microsecs)
270 {
271 uint64_t now, t64;
272 uint32_t divisor;
273
274 now = mach_absolute_time();
275
276 *secs = t64 = now / (divisor = rtclock_sec_divisor);
277 now -= (t64 * divisor);
278 *microsecs = (now * USEC_PER_SEC) / divisor;
279 }
280
281 void
282 clock_get_system_nanotime(
283 uint32_t *secs,
284 uint32_t *nanosecs)
285 {
286 uint64_t now, t64;
287 uint32_t divisor;
288
289 now = mach_absolute_time();
290
291 *secs = t64 = now / (divisor = rtclock_sec_divisor);
292 now -= (t64 * divisor);
293 *nanosecs = (now * NSEC_PER_SEC) / divisor;
294 }
295
296 /*
297 * Get clock device attributes.
298 */
299 kern_return_t
300 sysclk_getattr(
301 clock_flavor_t flavor,
302 clock_attr_t attr, /* OUT */
303 mach_msg_type_number_t *count) /* IN/OUT */
304 {
305 spl_t s;
306
307 if (*count != 1)
308 return (KERN_FAILURE);
309
310 switch (flavor) {
311
312 case CLOCK_GET_TIME_RES: /* >0 res */
313 case CLOCK_ALARM_CURRES: /* =0 no alarm */
314 case CLOCK_ALARM_MINRES:
315 case CLOCK_ALARM_MAXRES:
316 LOCK_RTC(s);
317 *(clock_res_t *) attr = NSEC_PER_HZ;
318 UNLOCK_RTC(s);
319 break;
320
321 default:
322 return (KERN_INVALID_VALUE);
323 }
324
325 return (KERN_SUCCESS);
326 }
327
328 /*
329 * Set deadline for the next alarm on the clock device. This call
330 * always resets the time to deliver an alarm for the clock.
331 */
332 void
333 sysclk_setalarm(
334 mach_timespec_t *deadline)
335 {
336 uint64_t abstime;
337
338 nanotime_to_absolutetime(deadline->tv_sec, deadline->tv_nsec, &abstime);
339 timer_call_enter(&rtclock_alarm_timer, abstime);
340 }
341
342 /*
343 * Configure the calendar clock.
344 */
345 int
346 calend_config(void)
347 {
348 return (1);
349 }
350
351 /*
352 * Get the current clock time.
353 */
354 kern_return_t
355 calend_gettime(
356 mach_timespec_t *time) /* OUT */
357 {
358 clock_get_calendar_nanotime(
359 &time->tv_sec, &time->tv_nsec);
360
361 return (KERN_SUCCESS);
362 }
363
364 /*
365 * Get clock device attributes.
366 */
367 kern_return_t
368 calend_getattr(
369 clock_flavor_t flavor,
370 clock_attr_t attr, /* OUT */
371 mach_msg_type_number_t *count) /* IN/OUT */
372 {
373 spl_t s;
374
375 if (*count != 1)
376 return (KERN_FAILURE);
377
378 switch (flavor) {
379
380 case CLOCK_GET_TIME_RES: /* >0 res */
381 LOCK_RTC(s);
382 *(clock_res_t *) attr = NSEC_PER_HZ;
383 UNLOCK_RTC(s);
384 break;
385
386 case CLOCK_ALARM_CURRES: /* =0 no alarm */
387 case CLOCK_ALARM_MINRES:
388 case CLOCK_ALARM_MAXRES:
389 *(clock_res_t *) attr = 0;
390 break;
391
392 default:
393 return (KERN_INVALID_VALUE);
394 }
395
396 return (KERN_SUCCESS);
397 }
398
399 void
400 clock_get_calendar_microtime(
401 uint32_t *secs,
402 uint32_t *microsecs)
403 {
404 uint32_t epoch, microepoch;
405 uint64_t now, t64;
406 spl_t s = splclock();
407
408 simple_lock(&rtclock_lock);
409
410 if (rtclock_calend.adjdelta >= 0) {
411 uint32_t divisor;
412
413 now = mach_absolute_time();
414
415 epoch = rtclock_calend.epoch;
416 microepoch = rtclock_calend.microepoch;
417
418 simple_unlock(&rtclock_lock);
419
420 *secs = t64 = now / (divisor = rtclock_sec_divisor);
421 now -= (t64 * divisor);
422 *microsecs = (now * USEC_PER_SEC) / divisor;
423
424 TIME_ADD(*secs, epoch, *microsecs, microepoch, USEC_PER_SEC);
425 }
426 else {
427 uint32_t delta, t32;
428
429 delta = -rtclock_calend.adjdelta;
430
431 now = mach_absolute_time();
432
433 *secs = rtclock_calend.epoch;
434 *microsecs = rtclock_calend.microepoch;
435
436 if (now > rtclock_calend.epoch1) {
437 t64 = now - rtclock_calend.epoch1;
438
439 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
440
441 if (t32 > delta)
442 TIME_ADD(*secs, 0, *microsecs, (t32 - delta), USEC_PER_SEC);
443 }
444
445 simple_unlock(&rtclock_lock);
446 }
447
448 splx(s);
449 }
450
451 /* This is only called from the gettimeofday() syscall. As a side
452 * effect, it updates the commpage timestamp. Otherwise it is
453 * identical to clock_get_calendar_microtime(). Because most
454 * gettimeofday() calls are handled by the commpage in user mode,
455 * this routine should be infrequently used except when slowing down
456 * the clock.
457 */
458 void
459 clock_gettimeofday(
460 uint32_t *secs_p,
461 uint32_t *microsecs_p)
462 {
463 uint32_t epoch, microepoch;
464 uint32_t secs, microsecs;
465 uint64_t now, t64, secs_64, usec_64;
466 spl_t s = splclock();
467
468 simple_lock(&rtclock_lock);
469
470 if (rtclock_calend.adjdelta >= 0) {
471 now = mach_absolute_time();
472
473 epoch = rtclock_calend.epoch;
474 microepoch = rtclock_calend.microepoch;
475
476 secs = secs_64 = now / rtclock_sec_divisor;
477 t64 = now - (secs_64 * rtclock_sec_divisor);
478 microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
479
480 TIME_ADD(secs, epoch, microsecs, microepoch, USEC_PER_SEC);
481
482 /* adjust "now" to be absolute time at _start_ of usecond */
483 now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
484
485 commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
486 }
487 else {
488 uint32_t delta, t32;
489
490 delta = -rtclock_calend.adjdelta;
491
492 now = mach_absolute_time();
493
494 secs = rtclock_calend.epoch;
495 microsecs = rtclock_calend.microepoch;
496
497 if (now > rtclock_calend.epoch1) {
498 t64 = now - rtclock_calend.epoch1;
499
500 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
501
502 if (t32 > delta)
503 TIME_ADD(secs, 0, microsecs, (t32 - delta), USEC_PER_SEC);
504 }
505
506 /* no need to disable timestamp, it is already off */
507 }
508
509 simple_unlock(&rtclock_lock);
510 splx(s);
511
512 *secs_p = secs;
513 *microsecs_p = microsecs;
514 }
515
516 void
517 clock_get_calendar_nanotime(
518 uint32_t *secs,
519 uint32_t *nanosecs)
520 {
521 uint32_t epoch, nanoepoch;
522 uint64_t now, t64;
523 spl_t s = splclock();
524
525 simple_lock(&rtclock_lock);
526
527 if (rtclock_calend.adjdelta >= 0) {
528 uint32_t divisor;
529
530 now = mach_absolute_time();
531
532 epoch = rtclock_calend.epoch;
533 nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
534
535 simple_unlock(&rtclock_lock);
536
537 *secs = t64 = now / (divisor = rtclock_sec_divisor);
538 now -= (t64 * divisor);
539 *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
540
541 TIME_ADD(*secs, epoch, *nanosecs, nanoepoch, NSEC_PER_SEC);
542 }
543 else {
544 uint32_t delta, t32;
545
546 delta = -rtclock_calend.adjdelta;
547
548 now = mach_absolute_time();
549
550 *secs = rtclock_calend.epoch;
551 *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
552
553 if (now > rtclock_calend.epoch1) {
554 t64 = now - rtclock_calend.epoch1;
555
556 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
557
558 if (t32 > delta)
559 TIME_ADD(*secs, 0, *nanosecs, ((t32 - delta) * NSEC_PER_USEC), NSEC_PER_SEC);
560 }
561
562 simple_unlock(&rtclock_lock);
563 }
564
565 splx(s);
566 }
567
568 void
569 clock_set_calendar_microtime(
570 uint32_t secs,
571 uint32_t microsecs)
572 {
573 uint32_t sys, microsys;
574 uint32_t newsecs;
575 spl_t s;
576
577 newsecs = (microsecs < 500*USEC_PER_SEC)?
578 secs: secs + 1;
579
580 s = splclock();
581 simple_lock(&rtclock_lock);
582
583 commpage_set_timestamp(0,0,0,0);
584
585 /*
586 * Cancel any adjustment in progress.
587 */
588 if (rtclock_calend.adjdelta < 0) {
589 uint64_t now, t64;
590 uint32_t delta, t32;
591
592 delta = -rtclock_calend.adjdelta;
593
594 sys = rtclock_calend.epoch;
595 microsys = rtclock_calend.microepoch;
596
597 now = mach_absolute_time();
598
599 if (now > rtclock_calend.epoch1)
600 t64 = now - rtclock_calend.epoch1;
601 else
602 t64 = 0;
603
604 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
605
606 if (t32 > delta)
607 TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC);
608
609 rtclock_calend.epoch = sys;
610 rtclock_calend.microepoch = microsys;
611
612 sys = t64 = now / rtclock_sec_divisor;
613 now -= (t64 * rtclock_sec_divisor);
614 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
615
616 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
617 }
618
619 rtclock_calend.epoch1 = 0;
620 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
621
622 /*
623 * Calculate the new calendar epoch based on
624 * the new value and the system clock.
625 */
626 clock_get_system_microtime(&sys, &microsys);
627 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
628
629 /*
630 * Adjust the boottime based on the delta.
631 */
632 rtclock_boottime += secs - rtclock_calend.epoch;
633
634 /*
635 * Set the new calendar epoch.
636 */
637 rtclock_calend.epoch = secs;
638 rtclock_calend.microepoch = microsecs;
639
640 simple_unlock(&rtclock_lock);
641
642 /*
643 * Set the new value for the platform clock.
644 */
645 PESetGMTTimeOfDay(newsecs);
646
647 splx(s);
648
649 /*
650 * Send host notifications.
651 */
652 host_notify_calendar_change();
653 }
654
655 #define tickadj (40) /* "standard" skew, us / tick */
656 #define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
657
658 uint32_t
659 clock_set_calendar_adjtime(
660 int32_t *secs,
661 int32_t *microsecs)
662 {
663 int64_t total, ototal;
664 uint32_t interval = 0;
665 spl_t s;
666
667 total = (int64_t)*secs * USEC_PER_SEC + *microsecs;
668
669 LOCK_RTC(s);
670 commpage_set_timestamp(0,0,0,0);
671
672 ototal = rtclock_calend.adjtotal;
673
674 if (rtclock_calend.adjdelta < 0) {
675 uint64_t now, t64;
676 uint32_t delta, t32;
677 uint32_t sys, microsys;
678
679 delta = -rtclock_calend.adjdelta;
680
681 sys = rtclock_calend.epoch;
682 microsys = rtclock_calend.microepoch;
683
684 now = mach_absolute_time();
685
686 if (now > rtclock_calend.epoch1)
687 t64 = now - rtclock_calend.epoch1;
688 else
689 t64 = 0;
690
691 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
692
693 if (t32 > delta)
694 TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC);
695
696 rtclock_calend.epoch = sys;
697 rtclock_calend.microepoch = microsys;
698
699 sys = t64 = now / rtclock_sec_divisor;
700 now -= (t64 * rtclock_sec_divisor);
701 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
702
703 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
704 }
705
706 if (total != 0) {
707 int32_t delta = tickadj;
708
709 if (total > 0) {
710 if (total > bigadj)
711 delta *= 10;
712 if (delta > total)
713 delta = total;
714
715 rtclock_calend.epoch1 = 0;
716 }
717 else {
718 uint64_t now, t64;
719 uint32_t sys, microsys;
720
721 if (total < -bigadj)
722 delta *= 10;
723 delta = -delta;
724 if (delta < total)
725 delta = total;
726
727 rtclock_calend.epoch1 = now = mach_absolute_time();
728
729 sys = t64 = now / rtclock_sec_divisor;
730 now -= (t64 * rtclock_sec_divisor);
731 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
732
733 TIME_ADD(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
734 }
735
736 rtclock_calend.adjtotal = total;
737 rtclock_calend.adjdelta = delta;
738
739 interval = rtclock_tick_interval;
740 }
741 else {
742 rtclock_calend.epoch1 = 0;
743 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
744 }
745
746 UNLOCK_RTC(s);
747
748 if (ototal == 0)
749 *secs = *microsecs = 0;
750 else {
751 *secs = ototal / USEC_PER_SEC;
752 *microsecs = ototal % USEC_PER_SEC;
753 }
754
755 return (interval);
756 }
757
758 uint32_t
759 clock_adjust_calendar(void)
760 {
761 uint32_t interval = 0;
762 int32_t delta;
763 spl_t s;
764
765 LOCK_RTC(s);
766 commpage_set_timestamp(0,0,0,0);
767
768 delta = rtclock_calend.adjdelta;
769
770 if (delta > 0) {
771 TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, delta, USEC_PER_SEC);
772
773 rtclock_calend.adjtotal -= delta;
774 if (delta > rtclock_calend.adjtotal)
775 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
776 }
777 else
778 if (delta < 0) {
779 uint64_t now, t64;
780 uint32_t t32;
781
782 now = mach_absolute_time();
783
784 if (now > rtclock_calend.epoch1)
785 t64 = now - rtclock_calend.epoch1;
786 else
787 t64 = 0;
788
789 rtclock_calend.epoch1 = now;
790
791 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
792
793 TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, (t32 + delta), USEC_PER_SEC);
794
795 rtclock_calend.adjtotal -= delta;
796 if (delta < rtclock_calend.adjtotal)
797 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
798
799 if (rtclock_calend.adjdelta == 0) {
800 uint32_t sys, microsys;
801
802 sys = t64 = now / rtclock_sec_divisor;
803 now -= (t64 * rtclock_sec_divisor);
804 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
805
806 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
807
808 rtclock_calend.epoch1 = 0;
809 }
810 }
811
812 if (rtclock_calend.adjdelta != 0)
813 interval = rtclock_tick_interval;
814
815 UNLOCK_RTC(s);
816
817 return (interval);
818 }
819
820 /*
821 * clock_initialize_calendar:
822 *
823 * Set the calendar and related clocks
824 * from the platform clock at boot or
825 * wake event.
826 */
827 void
828 clock_initialize_calendar(void)
829 {
830 uint32_t sys, microsys;
831 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
832 spl_t s;
833
834 LOCK_RTC(s);
835 commpage_set_timestamp(0,0,0,0);
836
837 if ((int32_t)secs >= (int32_t)rtclock_boottime) {
838 /*
839 * Initialize the boot time based on the platform clock.
840 */
841 if (rtclock_boottime == 0)
842 rtclock_boottime = secs;
843
844 /*
845 * Calculate the new calendar epoch based
846 * on the platform clock and the system
847 * clock.
848 */
849 clock_get_system_microtime(&sys, &microsys);
850 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
851
852 /*
853 * Set the new calendar epoch.
854 */
855 rtclock_calend.epoch = secs;
856 rtclock_calend.microepoch = microsecs;
857
858 /*
859 * Cancel any adjustment in progress.
860 */
861 rtclock_calend.epoch1 = 0;
862 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
863 }
864
865 UNLOCK_RTC(s);
866
867 /*
868 * Send host notifications.
869 */
870 host_notify_calendar_change();
871 }
872
873 void
874 clock_get_boottime_nanotime(
875 uint32_t *secs,
876 uint32_t *nanosecs)
877 {
878 *secs = rtclock_boottime;
879 *nanosecs = 0;
880 }
881
882 void
883 clock_timebase_info(
884 mach_timebase_info_t info)
885 {
886 spl_t s;
887
888 LOCK_RTC(s);
889 rtclock_timebase_initialized = TRUE;
890 *info = rtclock_timebase_const;
891 UNLOCK_RTC(s);
892 }
893
894 void
895 clock_set_timer_deadline(
896 uint64_t deadline)
897 {
898 int decr;
899 uint64_t abstime;
900 rtclock_timer_t *mytimer;
901 struct per_proc_info *pp;
902 spl_t s;
903
904 s = splclock();
905 pp = getPerProc();
906 mytimer = &pp->rtclock_timer;
907 mytimer->deadline = deadline;
908
909 if (!mytimer->has_expired && (deadline < pp->rtclock_tick_deadline)) { /* Has the timer already expired or is less that set? */
910 pp->rtcPop = deadline; /* Yes, set the new rtc pop time */
911 decr = setTimerReq(); /* Start the timers going */
912
913 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
914 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
915 }
916
917 splx(s);
918 }
919
920 void
921 clock_set_timer_func(
922 clock_timer_func_t func)
923 {
924 spl_t s;
925
926 LOCK_RTC(s);
927 if (rtclock_timer_expire == NULL)
928 rtclock_timer_expire = func;
929 UNLOCK_RTC(s);
930 }
931
932 /*
933 * Real-time clock device interrupt.
934 */
935 void
936 rtclock_intr(struct savearea *ssp) {
937
938 uint64_t abstime;
939 int decr;
940 rtclock_timer_t *mytimer;
941 struct per_proc_info *pp;
942
943 pp = getPerProc();
944 mytimer = &pp->rtclock_timer;
945
946 abstime = mach_absolute_time();
947 if (pp->rtclock_tick_deadline <= abstime) { /* Have we passed the pop time? */
948 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
949 &pp->rtclock_tick_deadline);
950 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
951 abstime = mach_absolute_time(); /* Refresh the current time since we went away */
952 }
953
954 if (mytimer->deadline <= abstime) { /* Have we expired the deadline? */
955 mytimer->has_expired = TRUE; /* Remember that we popped */
956 mytimer->deadline = EndOfAllTime; /* Set timer request to the end of all time in case we have no more events */
957 (*rtclock_timer_expire)(abstime); /* Process pop */
958 mytimer->has_expired = FALSE;
959 }
960
961 pp->rtcPop = (pp->rtclock_tick_deadline < mytimer->deadline) ? /* Get shortest pop */
962 pp->rtclock_tick_deadline : /* It was the periodic timer */
963 mytimer->deadline; /* Actually, an event request */
964
965 decr = setTimerReq(); /* Request the timer pop */
966
967 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
968 | DBG_FUNC_NONE, decr, 3, 0, 0, 0);
969 }
970
971 /*
972 * Request an interruption at a specific time
973 *
974 * Sets the decrementer to pop at the right time based on the timebase.
975 * The value is chosen by comparing the rtc request with the power management.
976 * request. We may add other values at a future time.
977 *
978 */
979
980 int setTimerReq(void) {
981
982 struct per_proc_info *pp;
983 int decr;
984 uint64_t nexttime;
985
986 pp = getPerProc(); /* Get per_proc */
987
988 nexttime = pp->rtcPop; /* Assume main timer */
989
990 decr = setPop((pp->pms.pmsPop < nexttime) ? pp->pms.pmsPop : nexttime); /* Schedule timer pop */
991
992 return decr; /* Pass back what we actually set */
993 }
994
995 static void
996 rtclock_alarm_expire(
997 __unused void *p0,
998 __unused void *p1)
999 {
1000 mach_timespec_t timestamp;
1001
1002 (void) sysclk_gettime(&timestamp);
1003
1004 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
1005 }
1006
1007 static void
1008 nanotime_to_absolutetime(
1009 uint32_t secs,
1010 uint32_t nanosecs,
1011 uint64_t *result)
1012 {
1013 uint32_t divisor = rtclock_sec_divisor;
1014
1015 *result = ((uint64_t)secs * divisor) +
1016 ((uint64_t)nanosecs * divisor) / NSEC_PER_SEC;
1017 }
1018
1019 void
1020 absolutetime_to_microtime(
1021 uint64_t abstime,
1022 uint32_t *secs,
1023 uint32_t *microsecs)
1024 {
1025 uint64_t t64;
1026 uint32_t divisor;
1027
1028 *secs = t64 = abstime / (divisor = rtclock_sec_divisor);
1029 abstime -= (t64 * divisor);
1030 *microsecs = (abstime * USEC_PER_SEC) / divisor;
1031 }
1032
1033 void
1034 clock_interval_to_deadline(
1035 uint32_t interval,
1036 uint32_t scale_factor,
1037 uint64_t *result)
1038 {
1039 uint64_t abstime;
1040
1041 clock_get_uptime(result);
1042
1043 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1044
1045 *result += abstime;
1046 }
1047
1048 void
1049 clock_interval_to_absolutetime_interval(
1050 uint32_t interval,
1051 uint32_t scale_factor,
1052 uint64_t *result)
1053 {
1054 uint64_t nanosecs = (uint64_t)interval * scale_factor;
1055 uint64_t t64;
1056 uint32_t divisor;
1057
1058 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1059 (divisor = rtclock_sec_divisor);
1060 nanosecs -= (t64 * NSEC_PER_SEC);
1061 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1062 }
1063
1064 void
1065 clock_absolutetime_interval_to_deadline(
1066 uint64_t abstime,
1067 uint64_t *result)
1068 {
1069 clock_get_uptime(result);
1070
1071 *result += abstime;
1072 }
1073
1074 void
1075 absolutetime_to_nanoseconds(
1076 uint64_t abstime,
1077 uint64_t *result)
1078 {
1079 uint64_t t64;
1080 uint32_t divisor;
1081
1082 *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC;
1083 abstime -= (t64 * divisor);
1084 *result += (abstime * NSEC_PER_SEC) / divisor;
1085 }
1086
1087 void
1088 nanoseconds_to_absolutetime(
1089 uint64_t nanosecs,
1090 uint64_t *result)
1091 {
1092 uint64_t t64;
1093 uint32_t divisor;
1094
1095 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1096 (divisor = rtclock_sec_divisor);
1097 nanosecs -= (t64 * NSEC_PER_SEC);
1098 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1099 }
1100
1101 void
1102 machine_delay_until(
1103 uint64_t deadline)
1104 {
1105 uint64_t now;
1106
1107 do {
1108 now = mach_absolute_time();
1109 } while (now < deadline);
1110 }
1111