]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/rtclock.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
CommitLineData
1c79356b 1/*
8f6c56a5 2 * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34/*
35 * File: rtclock.c
36 * Purpose: Routines for handling the machine dependent
37 * real-time clock.
38 */
39
40#include <mach/mach_types.h>
41
42#include <kern/clock.h>
43#include <kern/thread.h>
44#include <kern/macro_help.h>
45#include <kern/spl.h>
46
8f6c56a5
A
47#include <kern/host_notify.h>
48
55e303ae 49#include <machine/commpage.h>
ab86ba33 50#include <machine/machine_routines.h>
a3d08fcd 51#include <ppc/exception.h>
1c79356b 52#include <ppc/proc_reg.h>
3a60a9f5
A
53#include <ppc/pms.h>
54#include <ppc/rtclock.h>
1c79356b 55
8f6c56a5
A
56#include <IOKit/IOPlatformExpert.h>
57
1c79356b
A
58#include <sys/kdebug.h>
59
8f6c56a5
A
60int sysclk_config(void);
61
62int sysclk_init(void);
63
64kern_return_t sysclk_gettime(
65 mach_timespec_t *cur_time);
66
67kern_return_t sysclk_getattr(
68 clock_flavor_t flavor,
69 clock_attr_t attr,
70 mach_msg_type_number_t *count);
71
72void sysclk_setalarm(
73 mach_timespec_t *deadline);
8ad349bb 74
8f6c56a5
A
75struct clock_ops sysclk_ops = {
76 sysclk_config, sysclk_init,
77 sysclk_gettime, 0,
78 sysclk_getattr, 0,
79 sysclk_setalarm,
80};
81
82int calend_config(void);
83
84kern_return_t calend_gettime(
85 mach_timespec_t *cur_time);
86
87kern_return_t calend_getattr(
88 clock_flavor_t flavor,
89 clock_attr_t attr,
90 mach_msg_type_number_t *count);
91
92struct clock_ops calend_ops = {
93 calend_config, 0,
94 calend_gettime, 0,
95 calend_getattr, 0,
96 0,
97};
98
99/* local data declarations */
100
101static struct rtclock_calend {
102 uint32_t epoch;
103 uint32_t microepoch;
104
105 uint64_t epoch1;
106
107 int64_t adjtotal;
108 int32_t adjdelta;
109} rtclock_calend;
110
111static uint32_t rtclock_boottime;
112
113#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
114MACRO_BEGIN \
115 if (((rfrac) += (frac)) >= (unit)) { \
116 (rfrac) -= (unit); \
117 (rsecs) += 1; \
118 } \
119 (rsecs) += (secs); \
120MACRO_END
121
122#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
123MACRO_BEGIN \
124 if ((int32_t)((rfrac) -= (frac)) < 0) { \
125 (rfrac) += (unit); \
126 (rsecs) -= 1; \
127 } \
128 (rsecs) -= (secs); \
129MACRO_END
1c79356b 130
91447636 131#define NSEC_PER_HZ (NSEC_PER_SEC / 100)
8f6c56a5 132static uint32_t rtclock_tick_interval;
1c79356b 133
55e303ae 134static uint32_t rtclock_sec_divisor;
1c79356b 135
55e303ae 136static mach_timebase_info_data_t rtclock_timebase_const;
1c79356b 137
55e303ae
A
138static boolean_t rtclock_timebase_initialized;
139
8f6c56a5
A
140static clock_timer_func_t rtclock_timer_expire;
141
142static timer_call_data_t rtclock_alarm_timer;
143
144static void nanotime_to_absolutetime(
145 uint32_t secs,
146 uint32_t nanosecs,
147 uint64_t *result);
148
149static void rtclock_alarm_expire(
150 timer_call_param_t p0,
151 timer_call_param_t p1);
152
153/* global data declarations */
1c79356b 154
55e303ae
A
155decl_simple_lock_data(static,rtclock_lock)
156
1c79356b
A
157/*
158 * Macros to lock/unlock real-time clock device.
159 */
160#define LOCK_RTC(s) \
161MACRO_BEGIN \
162 (s) = splclock(); \
55e303ae 163 simple_lock(&rtclock_lock); \
1c79356b
A
164MACRO_END
165
166#define UNLOCK_RTC(s) \
167MACRO_BEGIN \
55e303ae 168 simple_unlock(&rtclock_lock); \
1c79356b
A
169 splx(s); \
170MACRO_END
171
172static void
173timebase_callback(
174 struct timebase_freq_t *freq)
175{
55e303ae
A
176 uint32_t numer, denom;
177 uint64_t abstime;
1c79356b
A
178 spl_t s;
179
55e303ae
A
180 if ( freq->timebase_den < 1 || freq->timebase_den > 4 ||
181 freq->timebase_num < freq->timebase_den )
182 panic("rtclock timebase_callback: invalid constant %d / %d",
183 freq->timebase_num, freq->timebase_den);
1c79356b 184
55e303ae
A
185 denom = freq->timebase_num;
186 numer = freq->timebase_den * NSEC_PER_SEC;
1c79356b
A
187
188 LOCK_RTC(s);
55e303ae 189 if (!rtclock_timebase_initialized) {
8f6c56a5 190 commpage_set_timestamp(0,0,0,0);
55e303ae
A
191
192 rtclock_timebase_const.numer = numer;
193 rtclock_timebase_const.denom = denom;
194 rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
195
196 nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
197 rtclock_tick_interval = abstime;
ab86ba33
A
198
199 ml_init_lock_timeout();
55e303ae
A
200 }
201 else {
202 UNLOCK_RTC(s);
91447636 203 printf("rtclock timebase_callback: late old %d / %d new %d / %d\n",
55e303ae
A
204 rtclock_timebase_const.numer, rtclock_timebase_const.denom,
205 numer, denom);
206 return;
207 }
1c79356b 208 UNLOCK_RTC(s);
55e303ae
A
209
210 clock_timebase_init();
1c79356b
A
211}
212
213/*
8f6c56a5 214 * Configure the real-time clock device.
1c79356b
A
215 */
216int
8f6c56a5 217sysclk_config(void)
1c79356b 218{
8f6c56a5
A
219 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
220
91447636 221 simple_lock_init(&rtclock_lock, 0);
1c79356b
A
222
223 PE_register_timebase_callback(timebase_callback);
224
225 return (1);
226}
227
228/*
229 * Initialize the system clock device.
230 */
231int
8f6c56a5 232sysclk_init(void)
1c79356b 233{
3a60a9f5 234 uint64_t abstime;
91447636 235 struct per_proc_info *pp;
1c79356b 236
91447636 237 pp = getPerProc();
1c79356b 238
55e303ae 239 abstime = mach_absolute_time();
8f6c56a5
A
240 pp->rtclock_tick_deadline = abstime + rtclock_tick_interval; /* Get the time we need to pop */
241 pp->rtcPop = pp->rtclock_tick_deadline; /* Set the rtc pop time the same for now */
3a60a9f5 242
8f6c56a5 243 (void)setTimerReq(); /* Start the timers going */
1c79356b
A
244
245 return (1);
246}
247
8f6c56a5
A
248kern_return_t
249sysclk_gettime(
250 mach_timespec_t *time) /* OUT */
251{
252 uint64_t now, t64;
253 uint32_t divisor;
254
255 now = mach_absolute_time();
256
257 time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor);
258 now -= (t64 * divisor);
259 time->tv_nsec = (now * NSEC_PER_SEC) / divisor;
260
261 return (KERN_SUCCESS);
262}
263
55e303ae
A
264void
265clock_get_system_microtime(
266 uint32_t *secs,
267 uint32_t *microsecs)
1c79356b 268{
55e303ae
A
269 uint64_t now, t64;
270 uint32_t divisor;
1c79356b 271
55e303ae 272 now = mach_absolute_time();
1c79356b 273
55e303ae
A
274 *secs = t64 = now / (divisor = rtclock_sec_divisor);
275 now -= (t64 * divisor);
276 *microsecs = (now * USEC_PER_SEC) / divisor;
277}
1c79356b 278
55e303ae
A
279void
280clock_get_system_nanotime(
281 uint32_t *secs,
282 uint32_t *nanosecs)
283{
284 uint64_t now, t64;
285 uint32_t divisor;
1c79356b 286
55e303ae 287 now = mach_absolute_time();
1c79356b 288
55e303ae
A
289 *secs = t64 = now / (divisor = rtclock_sec_divisor);
290 now -= (t64 * divisor);
291 *nanosecs = (now * NSEC_PER_SEC) / divisor;
1c79356b
A
292}
293
8f6c56a5
A
294/*
295 * Get clock device attributes.
296 */
297kern_return_t
298sysclk_getattr(
299 clock_flavor_t flavor,
300 clock_attr_t attr, /* OUT */
301 mach_msg_type_number_t *count) /* IN/OUT */
302{
303 spl_t s;
304
305 if (*count != 1)
306 return (KERN_FAILURE);
307
308 switch (flavor) {
309
310 case CLOCK_GET_TIME_RES: /* >0 res */
311 case CLOCK_ALARM_CURRES: /* =0 no alarm */
312 case CLOCK_ALARM_MINRES:
313 case CLOCK_ALARM_MAXRES:
314 LOCK_RTC(s);
315 *(clock_res_t *) attr = NSEC_PER_HZ;
316 UNLOCK_RTC(s);
317 break;
318
319 default:
320 return (KERN_INVALID_VALUE);
321 }
322
323 return (KERN_SUCCESS);
324}
325
326/*
327 * Set deadline for the next alarm on the clock device. This call
328 * always resets the time to deliver an alarm for the clock.
329 */
330void
331sysclk_setalarm(
332 mach_timespec_t *deadline)
333{
334 uint64_t abstime;
335
336 nanotime_to_absolutetime(deadline->tv_sec, deadline->tv_nsec, &abstime);
337 timer_call_enter(&rtclock_alarm_timer, abstime);
338}
339
340/*
341 * Configure the calendar clock.
342 */
343int
344calend_config(void)
345{
346 return (1);
347}
348
349/*
350 * Get the current clock time.
351 */
352kern_return_t
353calend_gettime(
354 mach_timespec_t *time) /* OUT */
355{
356 clock_get_calendar_nanotime(
357 &time->tv_sec, &time->tv_nsec);
358
359 return (KERN_SUCCESS);
360}
361
362/*
363 * Get clock device attributes.
364 */
365kern_return_t
366calend_getattr(
367 clock_flavor_t flavor,
368 clock_attr_t attr, /* OUT */
369 mach_msg_type_number_t *count) /* IN/OUT */
370{
371 spl_t s;
372
373 if (*count != 1)
374 return (KERN_FAILURE);
375
376 switch (flavor) {
377
378 case CLOCK_GET_TIME_RES: /* >0 res */
379 LOCK_RTC(s);
380 *(clock_res_t *) attr = NSEC_PER_HZ;
381 UNLOCK_RTC(s);
382 break;
383
384 case CLOCK_ALARM_CURRES: /* =0 no alarm */
385 case CLOCK_ALARM_MINRES:
386 case CLOCK_ALARM_MAXRES:
387 *(clock_res_t *) attr = 0;
388 break;
389
390 default:
391 return (KERN_INVALID_VALUE);
392 }
393
394 return (KERN_SUCCESS);
395}
396
397void
398clock_get_calendar_microtime(
399 uint32_t *secs,
400 uint32_t *microsecs)
401{
402 uint32_t epoch, microepoch;
403 uint64_t now, t64;
404 spl_t s = splclock();
405
406 simple_lock(&rtclock_lock);
407
408 if (rtclock_calend.adjdelta >= 0) {
409 uint32_t divisor;
410
411 now = mach_absolute_time();
412
413 epoch = rtclock_calend.epoch;
414 microepoch = rtclock_calend.microepoch;
415
416 simple_unlock(&rtclock_lock);
417
418 *secs = t64 = now / (divisor = rtclock_sec_divisor);
419 now -= (t64 * divisor);
420 *microsecs = (now * USEC_PER_SEC) / divisor;
421
422 TIME_ADD(*secs, epoch, *microsecs, microepoch, USEC_PER_SEC);
423 }
424 else {
425 uint32_t delta, t32;
426
427 delta = -rtclock_calend.adjdelta;
428
429 now = mach_absolute_time();
430
431 *secs = rtclock_calend.epoch;
432 *microsecs = rtclock_calend.microepoch;
433
434 if (now > rtclock_calend.epoch1) {
435 t64 = now - rtclock_calend.epoch1;
436
437 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
438
439 if (t32 > delta)
440 TIME_ADD(*secs, 0, *microsecs, (t32 - delta), USEC_PER_SEC);
441 }
442
443 simple_unlock(&rtclock_lock);
444 }
445
446 splx(s);
447}
448
449/* This is only called from the gettimeofday() syscall. As a side
450 * effect, it updates the commpage timestamp. Otherwise it is
451 * identical to clock_get_calendar_microtime(). Because most
452 * gettimeofday() calls are handled by the commpage in user mode,
453 * this routine should be infrequently used except when slowing down
454 * the clock.
455 */
8ad349bb 456void
8f6c56a5
A
457clock_gettimeofday(
458 uint32_t *secs_p,
459 uint32_t *microsecs_p)
8ad349bb 460{
8f6c56a5
A
461 uint32_t epoch, microepoch;
462 uint32_t secs, microsecs;
463 uint64_t now, t64, secs_64, usec_64;
464 spl_t s = splclock();
8ad349bb
A
465
466 simple_lock(&rtclock_lock);
467
8f6c56a5
A
468 if (rtclock_calend.adjdelta >= 0) {
469 now = mach_absolute_time();
8ad349bb 470
8f6c56a5
A
471 epoch = rtclock_calend.epoch;
472 microepoch = rtclock_calend.microepoch;
8ad349bb 473
8f6c56a5
A
474 secs = secs_64 = now / rtclock_sec_divisor;
475 t64 = now - (secs_64 * rtclock_sec_divisor);
476 microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
8ad349bb 477
8f6c56a5
A
478 TIME_ADD(secs, epoch, microsecs, microepoch, USEC_PER_SEC);
479
480 /* adjust "now" to be absolute time at _start_ of usecond */
481 now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
482
483 commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
484 }
485 else {
486 uint32_t delta, t32;
487
488 delta = -rtclock_calend.adjdelta;
489
490 now = mach_absolute_time();
491
492 secs = rtclock_calend.epoch;
493 microsecs = rtclock_calend.microepoch;
494
495 if (now > rtclock_calend.epoch1) {
496 t64 = now - rtclock_calend.epoch1;
497
498 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
499
500 if (t32 > delta)
501 TIME_ADD(secs, 0, microsecs, (t32 - delta), USEC_PER_SEC);
502 }
503
504 /* no need to disable timestamp, it is already off */
505 }
506
507 simple_unlock(&rtclock_lock);
508 splx(s);
509
510 *secs_p = secs;
511 *microsecs_p = microsecs;
512}
513
514void
515clock_get_calendar_nanotime(
516 uint32_t *secs,
517 uint32_t *nanosecs)
518{
519 uint32_t epoch, nanoepoch;
520 uint64_t now, t64;
521 spl_t s = splclock();
522
523 simple_lock(&rtclock_lock);
524
525 if (rtclock_calend.adjdelta >= 0) {
526 uint32_t divisor;
527
528 now = mach_absolute_time();
529
530 epoch = rtclock_calend.epoch;
531 nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
532
533 simple_unlock(&rtclock_lock);
534
535 *secs = t64 = now / (divisor = rtclock_sec_divisor);
536 now -= (t64 * divisor);
537 *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
538
539 TIME_ADD(*secs, epoch, *nanosecs, nanoepoch, NSEC_PER_SEC);
540 }
541 else {
542 uint32_t delta, t32;
543
544 delta = -rtclock_calend.adjdelta;
545
546 now = mach_absolute_time();
547
548 *secs = rtclock_calend.epoch;
549 *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
550
551 if (now > rtclock_calend.epoch1) {
552 t64 = now - rtclock_calend.epoch1;
553
554 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
555
556 if (t32 > delta)
557 TIME_ADD(*secs, 0, *nanosecs, ((t32 - delta) * NSEC_PER_USEC), NSEC_PER_SEC);
558 }
559
560 simple_unlock(&rtclock_lock);
561 }
562
563 splx(s);
564}
565
566void
567clock_set_calendar_microtime(
568 uint32_t secs,
569 uint32_t microsecs)
570{
571 uint32_t sys, microsys;
572 uint32_t newsecs;
573 spl_t s;
574
575 newsecs = (microsecs < 500*USEC_PER_SEC)?
576 secs: secs + 1;
577
578 s = splclock();
579 simple_lock(&rtclock_lock);
580
581 commpage_set_timestamp(0,0,0,0);
582
583 /*
584 * Cancel any adjustment in progress.
585 */
586 if (rtclock_calend.adjdelta < 0) {
587 uint64_t now, t64;
588 uint32_t delta, t32;
589
590 delta = -rtclock_calend.adjdelta;
591
592 sys = rtclock_calend.epoch;
593 microsys = rtclock_calend.microepoch;
594
595 now = mach_absolute_time();
596
597 if (now > rtclock_calend.epoch1)
598 t64 = now - rtclock_calend.epoch1;
599 else
600 t64 = 0;
601
602 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
603
604 if (t32 > delta)
605 TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC);
606
607 rtclock_calend.epoch = sys;
608 rtclock_calend.microepoch = microsys;
609
610 sys = t64 = now / rtclock_sec_divisor;
611 now -= (t64 * rtclock_sec_divisor);
612 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
613
614 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
615 }
616
617 rtclock_calend.epoch1 = 0;
618 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
619
620 /*
621 * Calculate the new calendar epoch based on
622 * the new value and the system clock.
623 */
624 clock_get_system_microtime(&sys, &microsys);
625 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
626
627 /*
628 * Adjust the boottime based on the delta.
629 */
630 rtclock_boottime += secs - rtclock_calend.epoch;
631
632 /*
633 * Set the new calendar epoch.
634 */
635 rtclock_calend.epoch = secs;
636 rtclock_calend.microepoch = microsecs;
91447636 637
91447636 638 simple_unlock(&rtclock_lock);
8f6c56a5
A
639
640 /*
641 * Set the new value for the platform clock.
642 */
643 PESetGMTTimeOfDay(newsecs);
644
645 splx(s);
646
647 /*
648 * Send host notifications.
649 */
650 host_notify_calendar_change();
651}
652
653#define tickadj (40) /* "standard" skew, us / tick */
654#define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
655
656uint32_t
657clock_set_calendar_adjtime(
658 int32_t *secs,
659 int32_t *microsecs)
660{
661 int64_t total, ototal;
662 uint32_t interval = 0;
663 spl_t s;
664
665 total = (int64_t)*secs * USEC_PER_SEC + *microsecs;
666
667 LOCK_RTC(s);
668 commpage_set_timestamp(0,0,0,0);
669
670 ototal = rtclock_calend.adjtotal;
671
672 if (rtclock_calend.adjdelta < 0) {
673 uint64_t now, t64;
674 uint32_t delta, t32;
675 uint32_t sys, microsys;
676
677 delta = -rtclock_calend.adjdelta;
678
679 sys = rtclock_calend.epoch;
680 microsys = rtclock_calend.microepoch;
681
682 now = mach_absolute_time();
683
684 if (now > rtclock_calend.epoch1)
685 t64 = now - rtclock_calend.epoch1;
686 else
687 t64 = 0;
688
689 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
690
691 if (t32 > delta)
692 TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC);
693
694 rtclock_calend.epoch = sys;
695 rtclock_calend.microepoch = microsys;
696
697 sys = t64 = now / rtclock_sec_divisor;
698 now -= (t64 * rtclock_sec_divisor);
699 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
700
701 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
702 }
703
704 if (total != 0) {
705 int32_t delta = tickadj;
706
707 if (total > 0) {
708 if (total > bigadj)
709 delta *= 10;
710 if (delta > total)
711 delta = total;
712
713 rtclock_calend.epoch1 = 0;
714 }
715 else {
716 uint64_t now, t64;
717 uint32_t sys, microsys;
718
719 if (total < -bigadj)
720 delta *= 10;
721 delta = -delta;
722 if (delta < total)
723 delta = total;
724
725 rtclock_calend.epoch1 = now = mach_absolute_time();
726
727 sys = t64 = now / rtclock_sec_divisor;
728 now -= (t64 * rtclock_sec_divisor);
729 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
730
731 TIME_ADD(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
732 }
733
734 rtclock_calend.adjtotal = total;
735 rtclock_calend.adjdelta = delta;
736
737 interval = rtclock_tick_interval;
738 }
739 else {
740 rtclock_calend.epoch1 = 0;
741 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
742 }
743
744 UNLOCK_RTC(s);
745
746 if (ototal == 0)
747 *secs = *microsecs = 0;
748 else {
749 *secs = ototal / USEC_PER_SEC;
750 *microsecs = ototal % USEC_PER_SEC;
751 }
752
753 return (interval);
754}
755
756uint32_t
757clock_adjust_calendar(void)
758{
759 uint32_t interval = 0;
760 int32_t delta;
761 spl_t s;
762
763 LOCK_RTC(s);
764 commpage_set_timestamp(0,0,0,0);
765
766 delta = rtclock_calend.adjdelta;
767
768 if (delta > 0) {
769 TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, delta, USEC_PER_SEC);
770
771 rtclock_calend.adjtotal -= delta;
772 if (delta > rtclock_calend.adjtotal)
773 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
774 }
775 else
776 if (delta < 0) {
777 uint64_t now, t64;
778 uint32_t t32;
779
780 now = mach_absolute_time();
781
782 if (now > rtclock_calend.epoch1)
783 t64 = now - rtclock_calend.epoch1;
784 else
785 t64 = 0;
786
787 rtclock_calend.epoch1 = now;
788
789 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
790
791 TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, (t32 + delta), USEC_PER_SEC);
792
793 rtclock_calend.adjtotal -= delta;
794 if (delta < rtclock_calend.adjtotal)
795 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
796
797 if (rtclock_calend.adjdelta == 0) {
798 uint32_t sys, microsys;
799
800 sys = t64 = now / rtclock_sec_divisor;
801 now -= (t64 * rtclock_sec_divisor);
802 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
803
804 TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC);
805
806 rtclock_calend.epoch1 = 0;
807 }
808 }
809
810 if (rtclock_calend.adjdelta != 0)
811 interval = rtclock_tick_interval;
812
813 UNLOCK_RTC(s);
814
815 return (interval);
816}
817
818/*
819 * clock_initialize_calendar:
820 *
821 * Set the calendar and related clocks
822 * from the platform clock at boot or
823 * wake event.
824 */
825void
826clock_initialize_calendar(void)
827{
828 uint32_t sys, microsys;
829 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
830 spl_t s;
831
832 LOCK_RTC(s);
833 commpage_set_timestamp(0,0,0,0);
834
835 if ((int32_t)secs >= (int32_t)rtclock_boottime) {
836 /*
837 * Initialize the boot time based on the platform clock.
838 */
839 if (rtclock_boottime == 0)
840 rtclock_boottime = secs;
841
842 /*
843 * Calculate the new calendar epoch based
844 * on the platform clock and the system
845 * clock.
846 */
847 clock_get_system_microtime(&sys, &microsys);
848 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
849
850 /*
851 * Set the new calendar epoch.
852 */
853 rtclock_calend.epoch = secs;
854 rtclock_calend.microepoch = microsecs;
855
856 /*
857 * Cancel any adjustment in progress.
858 */
859 rtclock_calend.epoch1 = 0;
860 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
861 }
862
863 UNLOCK_RTC(s);
864
865 /*
866 * Send host notifications.
867 */
868 host_notify_calendar_change();
869}
870
871void
872clock_get_boottime_nanotime(
873 uint32_t *secs,
874 uint32_t *nanosecs)
875{
876 *secs = rtclock_boottime;
877 *nanosecs = 0;
91447636
A
878}
879
1c79356b
A
880void
881clock_timebase_info(
882 mach_timebase_info_t info)
883{
55e303ae 884 spl_t s;
1c79356b
A
885
886 LOCK_RTC(s);
5d5c5d0d 887 rtclock_timebase_initialized = TRUE;
8f6c56a5 888 *info = rtclock_timebase_const;
1c79356b
A
889 UNLOCK_RTC(s);
890}
891
8f6c56a5
A
892void
893clock_set_timer_deadline(
894 uint64_t deadline)
895{
896 int decr;
897 uint64_t abstime;
898 rtclock_timer_t *mytimer;
899 struct per_proc_info *pp;
900 spl_t s;
901
902 s = splclock();
903 pp = getPerProc();
904 mytimer = &pp->rtclock_timer;
905 mytimer->deadline = deadline;
906
907 if (!mytimer->has_expired && (deadline < pp->rtclock_tick_deadline)) { /* Has the timer already expired or is less that set? */
908 pp->rtcPop = deadline; /* Yes, set the new rtc pop time */
909 decr = setTimerReq(); /* Start the timers going */
910
911 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
912 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
913 }
914
915 splx(s);
916}
917
1c79356b
A
918void
919clock_set_timer_func(
920 clock_timer_func_t func)
921{
922 spl_t s;
923
924 LOCK_RTC(s);
55e303ae
A
925 if (rtclock_timer_expire == NULL)
926 rtclock_timer_expire = func;
1c79356b
A
927 UNLOCK_RTC(s);
928}
929
8f6c56a5
A
930/*
931 * Real-time clock device interrupt.
932 */
1c79356b 933void
8f6c56a5
A
934rtclock_intr(struct savearea *ssp) {
935
936 uint64_t abstime;
937 int decr;
938 rtclock_timer_t *mytimer;
939 struct per_proc_info *pp;
940
941 pp = getPerProc();
942 mytimer = &pp->rtclock_timer;
943
944 abstime = mach_absolute_time();
945 if (pp->rtclock_tick_deadline <= abstime) { /* Have we passed the pop time? */
946 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
947 &pp->rtclock_tick_deadline);
948 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
949 abstime = mach_absolute_time(); /* Refresh the current time since we went away */
950 }
951
952 if (mytimer->deadline <= abstime) { /* Have we expired the deadline? */
953 mytimer->has_expired = TRUE; /* Remember that we popped */
954 mytimer->deadline = EndOfAllTime; /* Set timer request to the end of all time in case we have no more events */
955 (*rtclock_timer_expire)(abstime); /* Process pop */
956 mytimer->has_expired = FALSE;
957 }
958
959 pp->rtcPop = (pp->rtclock_tick_deadline < mytimer->deadline) ? /* Get shortest pop */
960 pp->rtclock_tick_deadline : /* It was the periodic timer */
961 mytimer->deadline; /* Actually, an event request */
962
963 decr = setTimerReq(); /* Request the timer pop */
964
965 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
966 | DBG_FUNC_NONE, decr, 3, 0, 0, 0);
967}
968
969/*
970 * Request an interruption at a specific time
971 *
972 * Sets the decrementer to pop at the right time based on the timebase.
973 * The value is chosen by comparing the rtc request with the power management.
974 * request. We may add other values at a future time.
975 *
976 */
977
978int setTimerReq(void) {
979
980 struct per_proc_info *pp;
981 int decr;
982 uint64_t nexttime;
983
984 pp = getPerProc(); /* Get per_proc */
985
986 nexttime = pp->rtcPop; /* Assume main timer */
987
988 decr = setPop((pp->pms.pmsPop < nexttime) ? pp->pms.pmsPop : nexttime); /* Schedule timer pop */
989
990 return decr; /* Pass back what we actually set */
991}
992
993static void
994rtclock_alarm_expire(
995 __unused void *p0,
996 __unused void *p1)
997{
998 mach_timespec_t timestamp;
999
1000 (void) sysclk_gettime(&timestamp);
1001
1002 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
1003}
1004
1005static void
1006nanotime_to_absolutetime(
1007 uint32_t secs,
1008 uint32_t nanosecs,
55e303ae 1009 uint64_t *result)
1c79356b 1010{
8f6c56a5 1011 uint32_t divisor = rtclock_sec_divisor;
91447636 1012
8f6c56a5
A
1013 *result = ((uint64_t)secs * divisor) +
1014 ((uint64_t)nanosecs * divisor) / NSEC_PER_SEC;
91447636
A
1015}
1016
1017void
1018absolutetime_to_microtime(
1019 uint64_t abstime,
1020 uint32_t *secs,
1021 uint32_t *microsecs)
1022{
1023 uint64_t t64;
55e303ae 1024 uint32_t divisor;
1c79356b 1025
91447636
A
1026 *secs = t64 = abstime / (divisor = rtclock_sec_divisor);
1027 abstime -= (t64 * divisor);
1028 *microsecs = (abstime * USEC_PER_SEC) / divisor;
1c79356b
A
1029}
1030
1031void
8f6c56a5
A
1032clock_interval_to_deadline(
1033 uint32_t interval,
1034 uint32_t scale_factor,
1035 uint64_t *result)
1c79356b 1036{
8f6c56a5 1037 uint64_t abstime;
1c79356b 1038
8f6c56a5
A
1039 clock_get_uptime(result);
1040
1041 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1042
1043 *result += abstime;
8ad349bb
A
1044}
1045
1046void
8f6c56a5
A
1047clock_interval_to_absolutetime_interval(
1048 uint32_t interval,
1049 uint32_t scale_factor,
8ad349bb
A
1050 uint64_t *result)
1051{
8f6c56a5
A
1052 uint64_t nanosecs = (uint64_t)interval * scale_factor;
1053 uint64_t t64;
1054 uint32_t divisor;
8ad349bb 1055
8f6c56a5
A
1056 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1057 (divisor = rtclock_sec_divisor);
1058 nanosecs -= (t64 * NSEC_PER_SEC);
1059 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1060}
1061
1062void
1063clock_absolutetime_interval_to_deadline(
1064 uint64_t abstime,
1065 uint64_t *result)
1066{
1067 clock_get_uptime(result);
1068
1069 *result += abstime;
1c79356b
A
1070}
1071
1072void
1073absolutetime_to_nanoseconds(
0b4e3aa0
A
1074 uint64_t abstime,
1075 uint64_t *result)
1c79356b 1076{
55e303ae
A
1077 uint64_t t64;
1078 uint32_t divisor;
1c79356b 1079
55e303ae
A
1080 *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC;
1081 abstime -= (t64 * divisor);
1082 *result += (abstime * NSEC_PER_SEC) / divisor;
1c79356b
A
1083}
1084
1085void
1086nanoseconds_to_absolutetime(
55e303ae 1087 uint64_t nanosecs,
0b4e3aa0 1088 uint64_t *result)
1c79356b 1089{
55e303ae
A
1090 uint64_t t64;
1091 uint32_t divisor;
1c79356b 1092
55e303ae
A
1093 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1094 (divisor = rtclock_sec_divisor);
1095 nanosecs -= (t64 * NSEC_PER_SEC);
1096 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1c79356b
A
1097}
1098
1c79356b 1099void
91447636 1100machine_delay_until(
0b4e3aa0 1101 uint64_t deadline)
1c79356b 1102{
0b4e3aa0 1103 uint64_t now;
1c79356b
A
1104
1105 do {
55e303ae 1106 now = mach_absolute_time();
0b4e3aa0 1107 } while (now < deadline);
1c79356b 1108}
8f6c56a5 1109