]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/rtclock.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * @APPLE_FREE_COPYRIGHT@
30 */
31/*
32 * File: rtclock.c
33 * Purpose: Routines for handling the machine dependent
34 * real-time clock.
35 */
36
37#include <mach/mach_types.h>
38
39#include <kern/clock.h>
40#include <kern/thread.h>
41#include <kern/macro_help.h>
42#include <kern/spl.h>
43
55e303ae
A
44#include <kern/host_notify.h>
45
1c79356b 46#include <machine/mach_param.h> /* HZ */
55e303ae 47#include <machine/commpage.h>
1c79356b
A
48#include <ppc/proc_reg.h>
49
50#include <pexpert/pexpert.h>
51
1c79356b
A
52#include <sys/kdebug.h>
53
54int sysclk_config(void);
55
56int sysclk_init(void);
57
58kern_return_t sysclk_gettime(
59 mach_timespec_t *cur_time);
60
61kern_return_t sysclk_getattr(
62 clock_flavor_t flavor,
63 clock_attr_t attr,
64 mach_msg_type_number_t *count);
65
66void sysclk_setalarm(
67 mach_timespec_t *deadline);
68
69struct clock_ops sysclk_ops = {
70 sysclk_config, sysclk_init,
71 sysclk_gettime, 0,
72 sysclk_getattr, 0,
73 sysclk_setalarm,
74};
75
76int calend_config(void);
77
78int calend_init(void);
79
80kern_return_t calend_gettime(
81 mach_timespec_t *cur_time);
82
1c79356b
A
83kern_return_t calend_getattr(
84 clock_flavor_t flavor,
85 clock_attr_t attr,
86 mach_msg_type_number_t *count);
87
88struct clock_ops calend_ops = {
89 calend_config, calend_init,
55e303ae 90 calend_gettime, 0,
1c79356b
A
91 calend_getattr, 0,
92 0,
93};
94
95/* local data declarations */
96
55e303ae
A
97static struct rtclock_calend {
98 uint32_t epoch;
99 uint32_t microepoch;
1c79356b 100
55e303ae 101 uint64_t epoch1;
1c79356b 102
55e303ae
A
103 int64_t adjtotal;
104 int32_t adjdelta;
105} rtclock_calend;
1c79356b 106
55e303ae 107static boolean_t rtclock_initialized;
1c79356b 108
55e303ae 109static uint64_t rtclock_tick_deadline[NCPUS];
1c79356b 110
55e303ae
A
111#define NSEC_PER_HZ (NSEC_PER_SEC / HZ)
112static uint32_t rtclock_tick_interval;
1c79356b 113
55e303ae 114static uint32_t rtclock_sec_divisor;
1c79356b 115
55e303ae 116static mach_timebase_info_data_t rtclock_timebase_const;
1c79356b 117
55e303ae
A
118static boolean_t rtclock_timebase_initialized;
119
120static struct rtclock_timer {
121 uint64_t deadline;
122 uint32_t
123 /*boolean_t*/ is_set:1,
124 has_expired:1,
125 :0;
126} rtclock_timer[NCPUS];
127
128static clock_timer_func_t rtclock_timer_expire;
129
130static timer_call_data_t rtclock_alarm_timer;
1c79356b
A
131
132static void timespec_to_absolutetime(
55e303ae
A
133 mach_timespec_t *ts,
134 uint64_t *result);
1c79356b
A
135
136static int deadline_to_decrementer(
55e303ae
A
137 uint64_t deadline,
138 uint64_t now);
1c79356b 139
55e303ae 140static void rtclock_alarm_expire(
1c79356b
A
141 timer_call_param_t p0,
142 timer_call_param_t p1);
143
144/* global data declarations */
145
1c79356b
A
146#define DECREMENTER_MAX 0x7FFFFFFFUL
147#define DECREMENTER_MIN 0xAUL
148
149natural_t rtclock_decrementer_min;
150
55e303ae
A
151decl_simple_lock_data(static,rtclock_lock)
152
1c79356b
A
153/*
154 * Macros to lock/unlock real-time clock device.
155 */
156#define LOCK_RTC(s) \
157MACRO_BEGIN \
158 (s) = splclock(); \
55e303ae 159 simple_lock(&rtclock_lock); \
1c79356b
A
160MACRO_END
161
162#define UNLOCK_RTC(s) \
163MACRO_BEGIN \
55e303ae 164 simple_unlock(&rtclock_lock); \
1c79356b
A
165 splx(s); \
166MACRO_END
167
168static void
169timebase_callback(
170 struct timebase_freq_t *freq)
171{
55e303ae
A
172 uint32_t numer, denom;
173 uint64_t abstime;
1c79356b
A
174 spl_t s;
175
55e303ae
A
176 if ( freq->timebase_den < 1 || freq->timebase_den > 4 ||
177 freq->timebase_num < freq->timebase_den )
178 panic("rtclock timebase_callback: invalid constant %d / %d",
179 freq->timebase_num, freq->timebase_den);
1c79356b 180
55e303ae
A
181 denom = freq->timebase_num;
182 numer = freq->timebase_den * NSEC_PER_SEC;
1c79356b
A
183
184 LOCK_RTC(s);
55e303ae
A
185 if (!rtclock_timebase_initialized) {
186 commpage_set_timestamp(0,0,0,0);
187
188 rtclock_timebase_const.numer = numer;
189 rtclock_timebase_const.denom = denom;
190 rtclock_sec_divisor = freq->timebase_num / freq->timebase_den;
191
192 nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
193 rtclock_tick_interval = abstime;
194 }
195 else {
196 UNLOCK_RTC(s);
197 printf("rtclock timebase_callback: late old %d / %d new %d / %d",
198 rtclock_timebase_const.numer, rtclock_timebase_const.denom,
199 numer, denom);
200 return;
201 }
1c79356b 202 UNLOCK_RTC(s);
55e303ae
A
203
204 clock_timebase_init();
1c79356b
A
205}
206
207/*
208 * Configure the real-time clock device.
209 */
210int
211sysclk_config(void)
212{
1c79356b
A
213 if (cpu_number() != master_cpu)
214 return(1);
215
55e303ae 216 timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
1c79356b 217
55e303ae 218 simple_lock_init(&rtclock_lock, ETAP_MISC_RT_CLOCK);
1c79356b
A
219
220 PE_register_timebase_callback(timebase_callback);
221
222 return (1);
223}
224
225/*
226 * Initialize the system clock device.
227 */
228int
229sysclk_init(void)
230{
0b4e3aa0 231 uint64_t abstime;
1c79356b
A
232 int decr, mycpu = cpu_number();
233
234 if (mycpu != master_cpu) {
235 if (rtclock_initialized == FALSE) {
236 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
237 }
238 /* Set decrementer and hence our next tick due */
55e303ae 239 abstime = mach_absolute_time();
1c79356b 240 rtclock_tick_deadline[mycpu] = abstime;
0b4e3aa0 241 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
1c79356b
A
242 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
243 mtdec(decr);
1c79356b
A
244
245 return(1);
246 }
247
1c79356b 248 /* Set decrementer and our next tick due */
55e303ae 249 abstime = mach_absolute_time();
1c79356b 250 rtclock_tick_deadline[mycpu] = abstime;
0b4e3aa0 251 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
1c79356b
A
252 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
253 mtdec(decr);
1c79356b
A
254
255 rtclock_initialized = TRUE;
256
257 return (1);
258}
259
1c79356b 260kern_return_t
55e303ae
A
261sysclk_gettime(
262 mach_timespec_t *time) /* OUT */
1c79356b 263{
55e303ae
A
264 uint64_t now, t64;
265 uint32_t divisor;
1c79356b 266
55e303ae 267 now = mach_absolute_time();
1c79356b 268
55e303ae
A
269 time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor);
270 now -= (t64 * divisor);
271 time->tv_nsec = (now * NSEC_PER_SEC) / divisor;
1c79356b
A
272
273 return (KERN_SUCCESS);
274}
275
55e303ae
A
276void
277clock_get_system_microtime(
278 uint32_t *secs,
279 uint32_t *microsecs)
1c79356b 280{
55e303ae
A
281 uint64_t now, t64;
282 uint32_t divisor;
1c79356b 283
55e303ae 284 now = mach_absolute_time();
1c79356b 285
55e303ae
A
286 *secs = t64 = now / (divisor = rtclock_sec_divisor);
287 now -= (t64 * divisor);
288 *microsecs = (now * USEC_PER_SEC) / divisor;
289}
1c79356b 290
55e303ae
A
291void
292clock_get_system_nanotime(
293 uint32_t *secs,
294 uint32_t *nanosecs)
295{
296 uint64_t now, t64;
297 uint32_t divisor;
1c79356b 298
55e303ae 299 now = mach_absolute_time();
1c79356b 300
55e303ae
A
301 *secs = t64 = now / (divisor = rtclock_sec_divisor);
302 now -= (t64 * divisor);
303 *nanosecs = (now * NSEC_PER_SEC) / divisor;
1c79356b
A
304}
305
306/*
307 * Get clock device attributes.
308 */
309kern_return_t
310sysclk_getattr(
55e303ae
A
311 clock_flavor_t flavor,
312 clock_attr_t attr, /* OUT */
1c79356b
A
313 mach_msg_type_number_t *count) /* IN/OUT */
314{
55e303ae 315 spl_t s;
1c79356b
A
316
317 if (*count != 1)
318 return (KERN_FAILURE);
55e303ae 319
1c79356b
A
320 switch (flavor) {
321
322 case CLOCK_GET_TIME_RES: /* >0 res */
323 case CLOCK_ALARM_CURRES: /* =0 no alarm */
324 case CLOCK_ALARM_MINRES:
325 case CLOCK_ALARM_MAXRES:
326 LOCK_RTC(s);
55e303ae 327 *(clock_res_t *) attr = NSEC_PER_HZ;
1c79356b
A
328 UNLOCK_RTC(s);
329 break;
330
331 default:
332 return (KERN_INVALID_VALUE);
333 }
55e303ae 334
1c79356b
A
335 return (KERN_SUCCESS);
336}
337
338/*
339 * Set deadline for the next alarm on the clock device. This call
340 * always resets the time to deliver an alarm for the clock.
341 */
342void
343sysclk_setalarm(
344 mach_timespec_t *deadline)
345{
55e303ae 346 uint64_t abstime;
1c79356b 347
55e303ae
A
348 timespec_to_absolutetime(deadline, &abstime);
349 timer_call_enter(&rtclock_alarm_timer, abstime);
1c79356b
A
350}
351
352/*
353 * Configure the calendar clock.
354 */
355int
356calend_config(void)
357{
358 return (1);
359}
360
361/*
362 * Initialize the calendar clock.
363 */
364int
365calend_init(void)
366{
367 if (cpu_number() != master_cpu)
368 return(1);
369
370 return (1);
371}
372
373/*
374 * Get the current clock time.
375 */
376kern_return_t
377calend_gettime(
55e303ae 378 mach_timespec_t *time) /* OUT */
1c79356b 379{
55e303ae
A
380 clock_get_calendar_nanotime(
381 &time->tv_sec, &time->tv_nsec);
1c79356b
A
382
383 return (KERN_SUCCESS);
384}
385
386/*
387 * Get clock device attributes.
388 */
389kern_return_t
390calend_getattr(
55e303ae
A
391 clock_flavor_t flavor,
392 clock_attr_t attr, /* OUT */
1c79356b
A
393 mach_msg_type_number_t *count) /* IN/OUT */
394{
55e303ae 395 spl_t s;
1c79356b
A
396
397 if (*count != 1)
398 return (KERN_FAILURE);
55e303ae 399
1c79356b
A
400 switch (flavor) {
401
402 case CLOCK_GET_TIME_RES: /* >0 res */
403 LOCK_RTC(s);
55e303ae 404 *(clock_res_t *) attr = NSEC_PER_HZ;
1c79356b
A
405 UNLOCK_RTC(s);
406 break;
407
408 case CLOCK_ALARM_CURRES: /* =0 no alarm */
409 case CLOCK_ALARM_MINRES:
410 case CLOCK_ALARM_MAXRES:
411 *(clock_res_t *) attr = 0;
412 break;
413
414 default:
415 return (KERN_INVALID_VALUE);
416 }
55e303ae 417
1c79356b
A
418 return (KERN_SUCCESS);
419}
420
421void
55e303ae
A
422clock_get_calendar_microtime(
423 uint32_t *secs,
424 uint32_t *microsecs)
1c79356b 425{
55e303ae
A
426 uint32_t epoch, microepoch;
427 uint64_t now, t64;
428 spl_t s = splclock();
429
430 simple_lock(&rtclock_lock);
431
432 if (rtclock_calend.adjdelta >= 0) {
433 uint32_t divisor;
434
435 now = mach_absolute_time();
436
437 epoch = rtclock_calend.epoch;
438 microepoch = rtclock_calend.microepoch;
439
440 simple_unlock(&rtclock_lock);
441
442 *secs = t64 = now / (divisor = rtclock_sec_divisor);
443 now -= (t64 * divisor);
444 *microsecs = (now * USEC_PER_SEC) / divisor;
445
446 if ((*microsecs += microepoch) >= USEC_PER_SEC) {
447 *microsecs -= USEC_PER_SEC;
448 epoch += 1;
449 }
450
451 *secs += epoch;
452 }
453 else {
454 uint32_t delta, t32;
455
456 delta = -rtclock_calend.adjdelta;
457
458 t64 = mach_absolute_time() - rtclock_calend.epoch1;
459
460 *secs = rtclock_calend.epoch;
461 *microsecs = rtclock_calend.microepoch;
462
463 simple_unlock(&rtclock_lock);
464
465 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
466
467 if (t32 > delta)
468 *microsecs += (t32 - delta);
469
470 if (*microsecs >= USEC_PER_SEC) {
471 *microsecs -= USEC_PER_SEC;
472 *secs += 1;
473 }
474 }
475
476 splx(s);
477}
478
479/* This is only called from the gettimeofday() syscall. As a side
480 * effect, it updates the commpage timestamp. Otherwise it is
481 * identical to clock_get_calendar_microtime(). Because most
482 * gettimeofday() calls are handled by the commpage in user mode,
483 * this routine should be infrequently used except when slowing down
484 * the clock.
485 */
486void
487clock_gettimeofday(
488 uint32_t *secs_p,
489 uint32_t *microsecs_p)
490{
491 uint32_t epoch, microepoch;
492 uint32_t secs, microsecs;
493 uint64_t now, t64, secs_64, usec_64;
494 spl_t s = splclock();
495
496 simple_lock(&rtclock_lock);
497
498 if (rtclock_calend.adjdelta >= 0) {
499 now = mach_absolute_time();
500
501 epoch = rtclock_calend.epoch;
502 microepoch = rtclock_calend.microepoch;
503
504 secs = secs_64 = now / rtclock_sec_divisor;
505 t64 = now - (secs_64 * rtclock_sec_divisor);
506 microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
507
508 if ((microsecs += microepoch) >= USEC_PER_SEC) {
509 microsecs -= USEC_PER_SEC;
510 epoch += 1;
511 }
512
513 secs += epoch;
514
515 /* adjust "now" to be absolute time at _start_ of usecond */
516 now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC);
517
518 commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor);
519 }
520 else {
521 uint32_t delta, t32;
522
523 delta = -rtclock_calend.adjdelta;
524
525 now = mach_absolute_time() - rtclock_calend.epoch1;
526
527 secs = rtclock_calend.epoch;
528 microsecs = rtclock_calend.microepoch;
529
530 t32 = (now * USEC_PER_SEC) / rtclock_sec_divisor;
531
532 if (t32 > delta)
533 microsecs += (t32 - delta);
534
535 if (microsecs >= USEC_PER_SEC) {
536 microsecs -= USEC_PER_SEC;
537 secs += 1;
538 }
539 /* no need to disable timestamp, it is already off */
540 }
541
542 simple_unlock(&rtclock_lock);
543 splx(s);
544
545 *secs_p = secs;
546 *microsecs_p = microsecs;
547}
548
549void
550clock_get_calendar_nanotime(
551 uint32_t *secs,
552 uint32_t *nanosecs)
553{
554 uint32_t epoch, nanoepoch;
555 uint64_t now, t64;
556 spl_t s = splclock();
557
558 simple_lock(&rtclock_lock);
559
560 if (rtclock_calend.adjdelta >= 0) {
561 uint32_t divisor;
562
563 now = mach_absolute_time();
564
565 epoch = rtclock_calend.epoch;
566 nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC;
567
568 simple_unlock(&rtclock_lock);
569
570 *secs = t64 = now / (divisor = rtclock_sec_divisor);
571 now -= (t64 * divisor);
572 *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC;
573
574 if ((*nanosecs += nanoepoch) >= NSEC_PER_SEC) {
575 *nanosecs -= NSEC_PER_SEC;
576 epoch += 1;
577 }
578
579 *secs += epoch;
580 }
581 else {
582 uint32_t delta, t32;
583
584 delta = -rtclock_calend.adjdelta;
585
586 t64 = mach_absolute_time() - rtclock_calend.epoch1;
587
588 *secs = rtclock_calend.epoch;
589 *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC;
590
591 simple_unlock(&rtclock_lock);
592
593 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
594
595 if (t32 > delta)
596 *nanosecs += ((t32 - delta) * NSEC_PER_USEC);
597
598 if (*nanosecs >= NSEC_PER_SEC) {
599 *nanosecs -= NSEC_PER_SEC;
600 *secs += 1;
601 }
602 }
603
604 splx(s);
605}
606
607void
608clock_set_calendar_microtime(
609 uint32_t secs,
610 uint32_t microsecs)
611{
612 uint32_t sys, microsys;
613 uint32_t newsecs;
614 spl_t s;
615
616 newsecs = (microsecs < 500*USEC_PER_SEC)?
617 secs: secs + 1;
1c79356b
A
618
619 LOCK_RTC(s);
55e303ae
A
620 commpage_set_timestamp(0,0,0,0);
621
622 clock_get_system_microtime(&sys, &microsys);
623 if ((int32_t)(microsecs -= microsys) < 0) {
624 microsecs += USEC_PER_SEC;
625 secs -= 1;
626 }
627
628 secs -= sys;
629
630 rtclock_calend.epoch = secs;
631 rtclock_calend.microepoch = microsecs;
632 rtclock_calend.epoch1 = 0;
633 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
1c79356b 634 UNLOCK_RTC(s);
55e303ae
A
635
636 PESetGMTTimeOfDay(newsecs);
637
638 host_notify_calendar_change();
1c79356b
A
639}
640
55e303ae
A
641#define tickadj (40) /* "standard" skew, us / tick */
642#define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */
643
644uint32_t
645clock_set_calendar_adjtime(
646 int32_t *secs,
647 int32_t *microsecs)
648{
649 int64_t total, ototal;
650 uint32_t interval = 0;
651 spl_t s;
652
653 total = (int64_t)*secs * USEC_PER_SEC + *microsecs;
654
655 LOCK_RTC(s);
656 commpage_set_timestamp(0,0,0,0);
657
658 ototal = rtclock_calend.adjtotal;
659
660 if (rtclock_calend.adjdelta < 0) {
661 uint64_t now, t64;
662 uint32_t delta, t32;
663 uint32_t sys, microsys;
664
665 delta = -rtclock_calend.adjdelta;
666
667 sys = rtclock_calend.epoch;
668 microsys = rtclock_calend.microepoch;
669
670 now = mach_absolute_time();
671
672 t64 = now - rtclock_calend.epoch1;
673 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
674
675 if (t32 > delta)
676 microsys += (t32 - delta);
677
678 if (microsys >= USEC_PER_SEC) {
679 microsys -= USEC_PER_SEC;
680 sys += 1;
681 }
682
683 rtclock_calend.epoch = sys;
684 rtclock_calend.microepoch = microsys;
685
686 sys = t64 = now / rtclock_sec_divisor;
687 now -= (t64 * rtclock_sec_divisor);
688 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
689
690 if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
691 rtclock_calend.microepoch += USEC_PER_SEC;
692 sys += 1;
693 }
694
695 rtclock_calend.epoch -= sys;
696 }
697
698 if (total != 0) {
699 int32_t delta = tickadj;
700
701 if (total > 0) {
702 if (total > bigadj)
703 delta *= 10;
704 if (delta > total)
705 delta = total;
706
707 rtclock_calend.epoch1 = 0;
708 }
709 else {
710 uint64_t now, t64;
711 uint32_t sys, microsys;
712
713 if (total < -bigadj)
714 delta *= 10;
715 delta = -delta;
716 if (delta < total)
717 delta = total;
718
719 rtclock_calend.epoch1 = now = mach_absolute_time();
720
721 sys = t64 = now / rtclock_sec_divisor;
722 now -= (t64 * rtclock_sec_divisor);
723 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
724
725 if ((rtclock_calend.microepoch += microsys) >= USEC_PER_SEC) {
726 rtclock_calend.microepoch -= USEC_PER_SEC;
727 sys += 1;
728 }
729
730 rtclock_calend.epoch += sys;
731 }
732
733 rtclock_calend.adjtotal = total;
734 rtclock_calend.adjdelta = delta;
735
736 interval = rtclock_tick_interval;
737 }
738 else {
739 rtclock_calend.epoch1 = 0;
740 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
741 }
742
743 UNLOCK_RTC(s);
744
745 if (ototal == 0)
746 *secs = *microsecs = 0;
747 else {
748 *secs = ototal / USEC_PER_SEC;
749 *microsecs = ototal % USEC_PER_SEC;
750 }
751
752 return (interval);
753}
754
755uint32_t
756clock_adjust_calendar(void)
1c79356b 757{
55e303ae
A
758 uint32_t micronew, interval = 0;
759 int32_t delta;
760 spl_t s;
1c79356b 761
0b4e3aa0 762 LOCK_RTC(s);
55e303ae
A
763 commpage_set_timestamp(0,0,0,0);
764
765 delta = rtclock_calend.adjdelta;
766
767 if (delta > 0) {
768 micronew = rtclock_calend.microepoch + delta;
769 if (micronew >= USEC_PER_SEC) {
770 micronew -= USEC_PER_SEC;
771 rtclock_calend.epoch += 1;
772 }
773
774 rtclock_calend.microepoch = micronew;
775
776 rtclock_calend.adjtotal -= delta;
777 if (delta > rtclock_calend.adjtotal)
778 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
779 }
1c79356b 780 else
55e303ae
A
781 if (delta < 0) {
782 uint64_t now, t64;
783 uint32_t t32;
784
785 now = mach_absolute_time();
786
787 t64 = now - rtclock_calend.epoch1;
788
789 rtclock_calend.epoch1 = now;
790
791 t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor;
792
793 micronew = rtclock_calend.microepoch + t32 + delta;
794 if (micronew >= USEC_PER_SEC) {
795 micronew -= USEC_PER_SEC;
796 rtclock_calend.epoch += 1;
797 }
798
799 rtclock_calend.microepoch = micronew;
800
801 rtclock_calend.adjtotal -= delta;
802 if (delta < rtclock_calend.adjtotal)
803 rtclock_calend.adjdelta = rtclock_calend.adjtotal;
804
805 if (rtclock_calend.adjdelta == 0) {
806 uint32_t sys, microsys;
807
808 sys = t64 = now / rtclock_sec_divisor;
809 now -= (t64 * rtclock_sec_divisor);
810 microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor;
811
812 if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) {
813 rtclock_calend.microepoch += USEC_PER_SEC;
814 sys += 1;
815 }
816
817 rtclock_calend.epoch -= sys;
818
819 rtclock_calend.epoch1 = 0;
820 }
821 }
822
823 if (rtclock_calend.adjdelta != 0)
824 interval = rtclock_tick_interval;
825
1c79356b 826 UNLOCK_RTC(s);
55e303ae
A
827
828 return (interval);
1c79356b
A
829}
830
55e303ae
A
831void
832clock_initialize_calendar(void)
1c79356b 833{
55e303ae
A
834 uint32_t sys, microsys;
835 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
836 spl_t s;
1c79356b
A
837
838 LOCK_RTC(s);
55e303ae
A
839 commpage_set_timestamp(0,0,0,0);
840
841 clock_get_system_microtime(&sys, &microsys);
842 if ((int32_t)(microsecs -= microsys) < 0) {
843 microsecs += USEC_PER_SEC;
844 secs -= 1;
845 }
846
847 secs -= sys;
848
849 rtclock_calend.epoch = secs;
850 rtclock_calend.microepoch = microsecs;
851 rtclock_calend.epoch1 = 0;
852 rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0;
1c79356b
A
853 UNLOCK_RTC(s);
854
55e303ae 855 host_notify_calendar_change();
1c79356b
A
856}
857
858void
859clock_timebase_info(
860 mach_timebase_info_t info)
861{
55e303ae 862 spl_t s;
1c79356b
A
863
864 LOCK_RTC(s);
55e303ae
A
865 rtclock_timebase_initialized = TRUE;
866 *info = rtclock_timebase_const;
1c79356b
A
867 UNLOCK_RTC(s);
868}
869
870void
871clock_set_timer_deadline(
0b4e3aa0 872 uint64_t deadline)
1c79356b 873{
0b4e3aa0 874 uint64_t abstime;
1c79356b
A
875 int decr, mycpu;
876 struct rtclock_timer *mytimer;
877 spl_t s;
878
879 s = splclock();
880 mycpu = cpu_number();
55e303ae 881 mytimer = &rtclock_timer[mycpu];
1c79356b
A
882 mytimer->deadline = deadline;
883 mytimer->is_set = TRUE;
55e303ae
A
884 if (!mytimer->has_expired) {
885 abstime = mach_absolute_time();
886 if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
887 decr = deadline_to_decrementer(mytimer->deadline, abstime);
888 if ( rtclock_decrementer_min != 0 &&
889 rtclock_decrementer_min < (natural_t)decr )
890 decr = rtclock_decrementer_min;
891
892 mtdec(decr);
893
894 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
895 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
896 }
1c79356b
A
897 }
898 splx(s);
899}
900
901void
902clock_set_timer_func(
903 clock_timer_func_t func)
904{
905 spl_t s;
906
907 LOCK_RTC(s);
55e303ae
A
908 if (rtclock_timer_expire == NULL)
909 rtclock_timer_expire = func;
1c79356b
A
910 UNLOCK_RTC(s);
911}
912
913/*
914 * Reset the clock device. This causes the realtime clock
915 * device to reload its mode and count value (frequency).
916 */
917void
918rtclock_reset(void)
919{
920 return;
921}
922
923/*
924 * Real-time clock device interrupt.
925 */
926void
927rtclock_intr(
928 int device,
9bccf70c 929 struct savearea *ssp,
1c79356b
A
930 spl_t old_spl)
931{
0b4e3aa0 932 uint64_t abstime;
55e303ae
A
933 int decr1, decr2, mycpu = cpu_number();
934 struct rtclock_timer *mytimer = &rtclock_timer[mycpu];
1c79356b
A
935
936 /*
937 * We may receive interrupts too early, we must reject them.
938 */
939 if (rtclock_initialized == FALSE) {
940 mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
941 return;
942 }
943
55e303ae 944 decr1 = decr2 = DECREMENTER_MAX;
1c79356b 945
55e303ae 946 abstime = mach_absolute_time();
0b4e3aa0 947 if ( rtclock_tick_deadline[mycpu] <= abstime ) {
1c79356b
A
948 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
949 &rtclock_tick_deadline[mycpu]);
9bccf70c 950 hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0);
1c79356b
A
951 }
952
55e303ae 953 abstime = mach_absolute_time();
0b4e3aa0
A
954 if ( mytimer->is_set &&
955 mytimer->deadline <= abstime ) {
55e303ae
A
956 mytimer->has_expired = TRUE; mytimer->is_set = FALSE;
957 (*rtclock_timer_expire)(abstime);
958 mytimer->has_expired = FALSE;
1c79356b
A
959 }
960
55e303ae
A
961 abstime = mach_absolute_time();
962 decr1 = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
1c79356b
A
963
964 if (mytimer->is_set)
55e303ae 965 decr2 = deadline_to_decrementer(mytimer->deadline, abstime);
1c79356b 966
55e303ae
A
967 if (decr1 > decr2)
968 decr1 = decr2;
1c79356b
A
969
970 if ( rtclock_decrementer_min != 0 &&
55e303ae
A
971 rtclock_decrementer_min < (natural_t)decr1 )
972 decr1 = rtclock_decrementer_min;
1c79356b 973
55e303ae 974 mtdec(decr1);
0b4e3aa0
A
975
976 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
55e303ae 977 | DBG_FUNC_NONE, decr1, 3, 0, 0, 0);
1c79356b
A
978}
979
980static void
55e303ae 981rtclock_alarm_expire(
1c79356b
A
982 timer_call_param_t p0,
983 timer_call_param_t p1)
984{
985 mach_timespec_t timestamp;
986
987 (void) sysclk_gettime(&timestamp);
988
989 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
990}
991
1c79356b
A
992static int
993deadline_to_decrementer(
55e303ae
A
994 uint64_t deadline,
995 uint64_t now)
1c79356b 996{
55e303ae 997 uint64_t delt;
1c79356b 998
0b4e3aa0 999 if (deadline <= now)
1c79356b
A
1000 return DECREMENTER_MIN;
1001 else {
0b4e3aa0 1002 delt = deadline - now;
1c79356b
A
1003 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
1004 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
1005 }
1006}
1007
1008static void
1009timespec_to_absolutetime(
55e303ae
A
1010 mach_timespec_t *ts,
1011 uint64_t *result)
1c79356b 1012{
55e303ae 1013 uint32_t divisor;
1c79356b 1014
55e303ae
A
1015 *result = ((uint64_t)ts->tv_sec * (divisor = rtclock_sec_divisor)) +
1016 ((uint64_t)ts->tv_nsec * divisor) / NSEC_PER_SEC;
1c79356b
A
1017}
1018
1019void
1020clock_interval_to_deadline(
0b4e3aa0
A
1021 uint32_t interval,
1022 uint32_t scale_factor,
1023 uint64_t *result)
1c79356b 1024{
55e303ae 1025 uint64_t abstime;
1c79356b
A
1026
1027 clock_get_uptime(result);
1028
1029 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1030
0b4e3aa0 1031 *result += abstime;
1c79356b
A
1032}
1033
1034void
1035clock_interval_to_absolutetime_interval(
0b4e3aa0
A
1036 uint32_t interval,
1037 uint32_t scale_factor,
55e303ae 1038 uint64_t *result)
0b4e3aa0 1039{
55e303ae
A
1040 uint64_t nanosecs = (uint64_t)interval * scale_factor;
1041 uint64_t t64;
1042 uint32_t divisor;
1043
1044 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1045 (divisor = rtclock_sec_divisor);
1046 nanosecs -= (t64 * NSEC_PER_SEC);
1047 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1c79356b
A
1048}
1049
1050void
1051clock_absolutetime_interval_to_deadline(
0b4e3aa0
A
1052 uint64_t abstime,
1053 uint64_t *result)
1c79356b
A
1054{
1055 clock_get_uptime(result);
1056
0b4e3aa0 1057 *result += abstime;
1c79356b
A
1058}
1059
1060void
1061absolutetime_to_nanoseconds(
0b4e3aa0
A
1062 uint64_t abstime,
1063 uint64_t *result)
1c79356b 1064{
55e303ae
A
1065 uint64_t t64;
1066 uint32_t divisor;
1c79356b 1067
55e303ae
A
1068 *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC;
1069 abstime -= (t64 * divisor);
1070 *result += (abstime * NSEC_PER_SEC) / divisor;
1c79356b
A
1071}
1072
1073void
1074nanoseconds_to_absolutetime(
55e303ae 1075 uint64_t nanosecs,
0b4e3aa0 1076 uint64_t *result)
1c79356b 1077{
55e303ae
A
1078 uint64_t t64;
1079 uint32_t divisor;
1c79356b 1080
55e303ae
A
1081 *result = (t64 = nanosecs / NSEC_PER_SEC) *
1082 (divisor = rtclock_sec_divisor);
1083 nanosecs -= (t64 * NSEC_PER_SEC);
1084 *result += (nanosecs * divisor) / NSEC_PER_SEC;
1c79356b
A
1085}
1086
1087/*
1088 * Spin-loop delay primitives.
1089 */
1090void
1091delay_for_interval(
0b4e3aa0
A
1092 uint32_t interval,
1093 uint32_t scale_factor)
1c79356b 1094{
0b4e3aa0 1095 uint64_t now, end;
1c79356b
A
1096
1097 clock_interval_to_deadline(interval, scale_factor, &end);
1098
1099 do {
55e303ae 1100 now = mach_absolute_time();
0b4e3aa0 1101 } while (now < end);
1c79356b
A
1102}
1103
1104void
1105clock_delay_until(
0b4e3aa0 1106 uint64_t deadline)
1c79356b 1107{
0b4e3aa0 1108 uint64_t now;
1c79356b
A
1109
1110 do {
55e303ae 1111 now = mach_absolute_time();
0b4e3aa0 1112 } while (now < deadline);
1c79356b
A
1113}
1114
1115void
1116delay(
0b4e3aa0 1117 int usec)
1c79356b
A
1118{
1119 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1120}