]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/rtclock.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / osfmk / ppc / rtclock.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28/*
29 * File: rtclock.c
30 * Purpose: Routines for handling the machine dependent
31 * real-time clock.
32 */
33
0b4e3aa0
A
34#include <libkern/OSTypes.h>
35
1c79356b
A
36#include <mach/mach_types.h>
37
38#include <kern/clock.h>
39#include <kern/thread.h>
40#include <kern/macro_help.h>
41#include <kern/spl.h>
42
43#include <machine/mach_param.h> /* HZ */
44#include <ppc/proc_reg.h>
45
46#include <pexpert/pexpert.h>
47
1c79356b
A
48#include <sys/kdebug.h>
49
50int sysclk_config(void);
51
52int sysclk_init(void);
53
54kern_return_t sysclk_gettime(
55 mach_timespec_t *cur_time);
56
57kern_return_t sysclk_getattr(
58 clock_flavor_t flavor,
59 clock_attr_t attr,
60 mach_msg_type_number_t *count);
61
62void sysclk_setalarm(
63 mach_timespec_t *deadline);
64
65struct clock_ops sysclk_ops = {
66 sysclk_config, sysclk_init,
67 sysclk_gettime, 0,
68 sysclk_getattr, 0,
69 sysclk_setalarm,
70};
71
72int calend_config(void);
73
74int calend_init(void);
75
76kern_return_t calend_gettime(
77 mach_timespec_t *cur_time);
78
79kern_return_t calend_settime(
80 mach_timespec_t *cur_time);
81
82kern_return_t calend_getattr(
83 clock_flavor_t flavor,
84 clock_attr_t attr,
85 mach_msg_type_number_t *count);
86
87struct clock_ops calend_ops = {
88 calend_config, calend_init,
89 calend_gettime, calend_settime,
90 calend_getattr, 0,
91 0,
92};
93
94/* local data declarations */
95
96static struct rtclock {
97 mach_timespec_t calend_offset;
98 boolean_t calend_is_set;
99
100 mach_timebase_info_data_t timebase_const;
101
102 struct rtclock_timer {
0b4e3aa0 103 uint64_t deadline;
1c79356b
A
104 boolean_t is_set;
105 } timer[NCPUS];
106
107 clock_timer_func_t timer_expire;
108
109 timer_call_data_t alarm[NCPUS];
110
111 /* debugging */
0b4e3aa0 112 uint64_t last_abstime[NCPUS];
1c79356b
A
113 int last_decr[NCPUS];
114
115 decl_simple_lock_data(,lock) /* real-time clock device lock */
116} rtclock;
117
118static boolean_t rtclock_initialized;
119
0b4e3aa0
A
120static uint64_t rtclock_tick_deadline[NCPUS];
121static uint64_t rtclock_tick_interval;
1c79356b
A
122
123static void timespec_to_absolutetime(
124 mach_timespec_t timespec,
0b4e3aa0 125 uint64_t *result);
1c79356b
A
126
127static int deadline_to_decrementer(
0b4e3aa0
A
128 uint64_t deadline,
129 uint64_t now);
1c79356b
A
130
131static void rtclock_alarm_timer(
132 timer_call_param_t p0,
133 timer_call_param_t p1);
134
135/* global data declarations */
136
137#define RTC_TICKPERIOD (NSEC_PER_SEC / HZ)
138
139#define DECREMENTER_MAX 0x7FFFFFFFUL
140#define DECREMENTER_MIN 0xAUL
141
142natural_t rtclock_decrementer_min;
143
144/*
145 * Macros to lock/unlock real-time clock device.
146 */
147#define LOCK_RTC(s) \
148MACRO_BEGIN \
149 (s) = splclock(); \
150 simple_lock(&rtclock.lock); \
151MACRO_END
152
153#define UNLOCK_RTC(s) \
154MACRO_BEGIN \
155 simple_unlock(&rtclock.lock); \
156 splx(s); \
157MACRO_END
158
159static void
160timebase_callback(
161 struct timebase_freq_t *freq)
162{
163 natural_t numer, denom;
164 int n;
165 spl_t s;
166
167 denom = freq->timebase_num;
168 n = 9;
169 while (!(denom % 10)) {
170 if (n < 1)
171 break;
172 denom /= 10;
173 n--;
174 }
175
176 numer = freq->timebase_den;
177 while (n-- > 0) {
178 numer *= 10;
179 }
180
181 LOCK_RTC(s);
182 rtclock.timebase_const.numer = numer;
183 rtclock.timebase_const.denom = denom;
184 UNLOCK_RTC(s);
185}
186
187/*
188 * Configure the real-time clock device.
189 */
190int
191sysclk_config(void)
192{
193 int i;
194
195 if (cpu_number() != master_cpu)
196 return(1);
197
198 for (i = 0; i < NCPUS; i++)
199 timer_call_setup(&rtclock.alarm[i], rtclock_alarm_timer, NULL);
200
201 simple_lock_init(&rtclock.lock, ETAP_MISC_RT_CLOCK);
202
203 PE_register_timebase_callback(timebase_callback);
204
205 return (1);
206}
207
208/*
209 * Initialize the system clock device.
210 */
211int
212sysclk_init(void)
213{
0b4e3aa0 214 uint64_t abstime;
1c79356b
A
215 int decr, mycpu = cpu_number();
216
217 if (mycpu != master_cpu) {
218 if (rtclock_initialized == FALSE) {
219 panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu);
220 }
221 /* Set decrementer and hence our next tick due */
222 clock_get_uptime(&abstime);
223 rtclock_tick_deadline[mycpu] = abstime;
0b4e3aa0 224 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
1c79356b
A
225 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
226 mtdec(decr);
227 rtclock.last_decr[mycpu] = decr;
228
229 return(1);
230 }
231
232 /*
233 * Initialize non-zero clock structure values.
234 */
235 clock_interval_to_absolutetime_interval(RTC_TICKPERIOD, 1,
236 &rtclock_tick_interval);
237 /* Set decrementer and our next tick due */
238 clock_get_uptime(&abstime);
239 rtclock_tick_deadline[mycpu] = abstime;
0b4e3aa0 240 rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
1c79356b
A
241 decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
242 mtdec(decr);
243 rtclock.last_decr[mycpu] = decr;
244
245 rtclock_initialized = TRUE;
246
247 return (1);
248}
249
0b4e3aa0
A
250#define UnsignedWide_to_scalar(x) (*(uint64_t *)(x))
251#define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x))
252
1c79356b
A
253/*
254 * Perform a full 64 bit by 32 bit unsigned multiply,
255 * yielding a 96 bit product. The most significant
256 * portion of the product is returned as a 64 bit
257 * quantity, with the lower portion as a 32 bit word.
258 */
259static void
260umul_64by32(
0b4e3aa0
A
261 UnsignedWide now64,
262 uint32_t mult32,
263 UnsignedWide *result64,
264 uint32_t *result32)
1c79356b 265{
0b4e3aa0 266 uint32_t mid, mid2;
1c79356b
A
267
268 asm volatile(" mullw %0,%1,%2" :
269 "=r" (*result32) :
270 "r" (now64.lo), "r" (mult32));
271
272 asm volatile(" mullw %0,%1,%2" :
273 "=r" (mid2) :
274 "r" (now64.hi), "r" (mult32));
275 asm volatile(" mulhwu %0,%1,%2" :
276 "=r" (mid) :
277 "r" (now64.lo), "r" (mult32));
278
279 asm volatile(" mulhwu %0,%1,%2" :
280 "=r" (result64->hi) :
281 "r" (now64.hi), "r" (mult32));
282
283 asm volatile(" addc %0,%2,%3;
284 addze %1,%4" :
285 "=r" (result64->lo), "=r" (result64->hi) :
286 "r" (mid), "r" (mid2), "1" (result64->hi));
287}
288
289/*
290 * Perform a partial 64 bit by 32 bit unsigned multiply,
291 * yielding a 64 bit product. Only the least significant
292 * 64 bits of the product are calculated and returned.
293 */
294static void
295umul_64by32to64(
0b4e3aa0
A
296 UnsignedWide now64,
297 uint32_t mult32,
298 UnsignedWide *result64)
1c79356b 299{
0b4e3aa0 300 uint32_t mid, mid2;
1c79356b
A
301
302 asm volatile(" mullw %0,%1,%2" :
303 "=r" (result64->lo) :
304 "r" (now64.lo), "r" (mult32));
305
306 asm volatile(" mullw %0,%1,%2" :
307 "=r" (mid2) :
308 "r" (now64.hi), "r" (mult32));
309 asm volatile(" mulhwu %0,%1,%2" :
310 "=r" (mid) :
311 "r" (now64.lo), "r" (mult32));
312
313 asm volatile(" add %0,%1,%2" :
314 "=r" (result64->hi) :
315 "r" (mid), "r" (mid2));
316}
317
318/*
319 * Perform an unsigned division of a 96 bit value
320 * by a 32 bit value, yielding a 96 bit quotient.
321 * The most significant portion of the product is
322 * returned as a 64 bit quantity, with the lower
323 * portion as a 32 bit word.
324 */
0b4e3aa0 325static void
1c79356b 326udiv_96by32(
0b4e3aa0
A
327 UnsignedWide now64,
328 uint32_t now32,
329 uint32_t div32,
330 UnsignedWide *result64,
331 uint32_t *result32)
1c79356b 332{
0b4e3aa0 333 UnsignedWide t64;
1c79356b
A
334
335 if (now64.hi > 0 || now64.lo >= div32) {
0b4e3aa0
A
336 UnsignedWide_to_scalar(result64) =
337 UnsignedWide_to_scalar(&now64) / div32;
1c79356b
A
338
339 umul_64by32to64(*result64, div32, &t64);
340
0b4e3aa0
A
341 UnsignedWide_to_scalar(&t64) =
342 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
1c79356b 343
0b4e3aa0 344 *result32 = (((uint64_t)t64.lo << 32) | now32) / div32;
1c79356b
A
345 }
346 else {
0b4e3aa0
A
347 UnsignedWide_to_scalar(result64) =
348 (((uint64_t)now64.lo << 32) | now32) / div32;
1c79356b
A
349
350 *result32 = result64->lo;
351 result64->lo = result64->hi;
352 result64->hi = 0;
353 }
354}
355
356/*
357 * Perform an unsigned division of a 96 bit value
358 * by a 32 bit value, yielding a 64 bit quotient.
359 * Any higher order bits of the quotient are simply
360 * discarded.
361 */
0b4e3aa0 362static void
1c79356b 363udiv_96by32to64(
0b4e3aa0
A
364 UnsignedWide now64,
365 uint32_t now32,
366 uint32_t div32,
367 UnsignedWide *result64)
1c79356b 368{
0b4e3aa0 369 UnsignedWide t64;
1c79356b
A
370
371 if (now64.hi > 0 || now64.lo >= div32) {
0b4e3aa0
A
372 UnsignedWide_to_scalar(result64) =
373 UnsignedWide_to_scalar(&now64) / div32;
1c79356b
A
374
375 umul_64by32to64(*result64, div32, &t64);
376
0b4e3aa0
A
377 UnsignedWide_to_scalar(&t64) =
378 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
1c79356b
A
379
380 result64->hi = result64->lo;
0b4e3aa0 381 result64->lo = (((uint64_t)t64.lo << 32) | now32) / div32;
1c79356b
A
382 }
383 else {
0b4e3aa0
A
384 UnsignedWide_to_scalar(result64) =
385 (((uint64_t)now64.lo << 32) | now32) / div32;
1c79356b
A
386 }
387}
388
389/*
390 * Perform an unsigned division of a 96 bit value
391 * by a 32 bit value, yielding a 32 bit quotient,
392 * and a 32 bit remainder. Any higher order bits
393 * of the quotient are simply discarded.
394 */
0b4e3aa0 395static void
1c79356b 396udiv_96by32to32and32(
0b4e3aa0
A
397 UnsignedWide now64,
398 uint32_t now32,
399 uint32_t div32,
400 uint32_t *result32,
401 uint32_t *remain32)
1c79356b 402{
0b4e3aa0 403 UnsignedWide t64, u64;
1c79356b
A
404
405 if (now64.hi > 0 || now64.lo >= div32) {
0b4e3aa0
A
406 UnsignedWide_to_scalar(&t64) =
407 UnsignedWide_to_scalar(&now64) / div32;
1c79356b
A
408
409 umul_64by32to64(t64, div32, &t64);
410
0b4e3aa0
A
411 UnsignedWide_to_scalar(&t64) =
412 UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64);
1c79356b 413
0b4e3aa0 414 UnsignedWide_to_scalar(&t64) = ((uint64_t)t64.lo << 32) | now32;
1c79356b 415
0b4e3aa0
A
416 UnsignedWide_to_scalar(&u64) =
417 UnsignedWide_to_scalar(&t64) / div32;
1c79356b
A
418
419 *result32 = u64.lo;
420
421 umul_64by32to64(u64, div32, &u64);
422
0b4e3aa0
A
423 *remain32 = UnsignedWide_to_scalar(&t64) -
424 UnsignedWide_to_scalar(&u64);
1c79356b
A
425 }
426 else {
0b4e3aa0 427 UnsignedWide_to_scalar(&t64) = ((uint64_t)now64.lo << 32) | now32;
1c79356b 428
0b4e3aa0
A
429 UnsignedWide_to_scalar(&u64) =
430 UnsignedWide_to_scalar(&t64) / div32;
1c79356b
A
431
432 *result32 = u64.lo;
433
434 umul_64by32to64(u64, div32, &u64);
435
0b4e3aa0
A
436 *remain32 = UnsignedWide_to_scalar(&t64) -
437 UnsignedWide_to_scalar(&u64);
1c79356b
A
438 }
439}
440
441/*
442 * Get the clock device time. This routine is responsible
443 * for converting the device's machine dependent time value
444 * into a canonical mach_timespec_t value.
445 *
0b4e3aa0 446 * SMP configurations - *the processor clocks are synchronised*
1c79356b
A
447 */
448kern_return_t
449sysclk_gettime_internal(
450 mach_timespec_t *time) /* OUT */
451{
0b4e3aa0
A
452 UnsignedWide now;
453 UnsignedWide t64;
454 uint32_t t32;
455 uint32_t numer, denom;
1c79356b
A
456
457 numer = rtclock.timebase_const.numer;
458 denom = rtclock.timebase_const.denom;
459
0b4e3aa0 460 clock_get_uptime((uint64_t *)&now);
1c79356b
A
461
462 umul_64by32(now, numer, &t64, &t32);
463
464 udiv_96by32(t64, t32, denom, &t64, &t32);
465
466 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
467 &time->tv_sec, &time->tv_nsec);
468
469 return (KERN_SUCCESS);
470}
471
472kern_return_t
473sysclk_gettime(
474 mach_timespec_t *time) /* OUT */
475{
0b4e3aa0
A
476 UnsignedWide now;
477 UnsignedWide t64;
478 uint32_t t32;
479 uint32_t numer, denom;
1c79356b
A
480 spl_t s;
481
482 LOCK_RTC(s);
483 numer = rtclock.timebase_const.numer;
484 denom = rtclock.timebase_const.denom;
485 UNLOCK_RTC(s);
486
0b4e3aa0 487 clock_get_uptime((uint64_t *)&now);
1c79356b
A
488
489 umul_64by32(now, numer, &t64, &t32);
490
491 udiv_96by32(t64, t32, denom, &t64, &t32);
492
493 udiv_96by32to32and32(t64, t32, NSEC_PER_SEC,
494 &time->tv_sec, &time->tv_nsec);
495
496 return (KERN_SUCCESS);
497}
498
499/*
500 * Get clock device attributes.
501 */
502kern_return_t
503sysclk_getattr(
504 clock_flavor_t flavor,
505 clock_attr_t attr, /* OUT */
506 mach_msg_type_number_t *count) /* IN/OUT */
507{
508 spl_t s;
509
510 if (*count != 1)
511 return (KERN_FAILURE);
512 switch (flavor) {
513
514 case CLOCK_GET_TIME_RES: /* >0 res */
515 case CLOCK_ALARM_CURRES: /* =0 no alarm */
516 case CLOCK_ALARM_MINRES:
517 case CLOCK_ALARM_MAXRES:
518 LOCK_RTC(s);
519 *(clock_res_t *) attr = RTC_TICKPERIOD;
520 UNLOCK_RTC(s);
521 break;
522
523 default:
524 return (KERN_INVALID_VALUE);
525 }
526 return (KERN_SUCCESS);
527}
528
529/*
530 * Set deadline for the next alarm on the clock device. This call
531 * always resets the time to deliver an alarm for the clock.
532 */
533void
534sysclk_setalarm(
535 mach_timespec_t *deadline)
536{
0b4e3aa0 537 uint64_t abstime;
1c79356b
A
538
539 timespec_to_absolutetime(*deadline, &abstime);
540 timer_call_enter(&rtclock.alarm[cpu_number()], abstime);
541}
542
543/*
544 * Configure the calendar clock.
545 */
546int
547calend_config(void)
548{
549 return (1);
550}
551
552/*
553 * Initialize the calendar clock.
554 */
555int
556calend_init(void)
557{
558 if (cpu_number() != master_cpu)
559 return(1);
560
561 return (1);
562}
563
564/*
565 * Get the current clock time.
566 */
567kern_return_t
568calend_gettime(
569 mach_timespec_t *curr_time) /* OUT */
570{
571 spl_t s;
572
573 LOCK_RTC(s);
574 if (!rtclock.calend_is_set) {
575 UNLOCK_RTC(s);
576 return (KERN_FAILURE);
577 }
578
579 (void) sysclk_gettime_internal(curr_time);
580 ADD_MACH_TIMESPEC(curr_time, &rtclock.calend_offset);
581 UNLOCK_RTC(s);
582
583 return (KERN_SUCCESS);
584}
585
586/*
587 * Set the current clock time.
588 */
589kern_return_t
590calend_settime(
591 mach_timespec_t *new_time)
592{
593 mach_timespec_t curr_time;
594 spl_t s;
595
596 LOCK_RTC(s);
597 (void) sysclk_gettime_internal(&curr_time);
598 rtclock.calend_offset = *new_time;
599 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
600 rtclock.calend_is_set = TRUE;
601 UNLOCK_RTC(s);
602
603 PESetGMTTimeOfDay(new_time->tv_sec);
604
605 return (KERN_SUCCESS);
606}
607
608/*
609 * Get clock device attributes.
610 */
611kern_return_t
612calend_getattr(
613 clock_flavor_t flavor,
614 clock_attr_t attr, /* OUT */
615 mach_msg_type_number_t *count) /* IN/OUT */
616{
617 spl_t s;
618
619 if (*count != 1)
620 return (KERN_FAILURE);
621 switch (flavor) {
622
623 case CLOCK_GET_TIME_RES: /* >0 res */
624 LOCK_RTC(s);
625 *(clock_res_t *) attr = RTC_TICKPERIOD;
626 UNLOCK_RTC(s);
627 break;
628
629 case CLOCK_ALARM_CURRES: /* =0 no alarm */
630 case CLOCK_ALARM_MINRES:
631 case CLOCK_ALARM_MAXRES:
632 *(clock_res_t *) attr = 0;
633 break;
634
635 default:
636 return (KERN_INVALID_VALUE);
637 }
638 return (KERN_SUCCESS);
639}
640
641void
642clock_adjust_calendar(
643 clock_res_t nsec)
644{
645 spl_t s;
646
647 LOCK_RTC(s);
648 if (rtclock.calend_is_set)
649 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec);
650 UNLOCK_RTC(s);
651}
652
0b4e3aa0
A
653void
654clock_initialize_calendar(void)
1c79356b 655{
0b4e3aa0
A
656 mach_timespec_t curr_time;
657 long seconds = PEGetGMTTimeOfDay();
658 spl_t s;
1c79356b 659
0b4e3aa0 660 LOCK_RTC(s);
1c79356b
A
661 (void) sysclk_gettime_internal(&curr_time);
662 if (curr_time.tv_nsec < 500*USEC_PER_SEC)
663 rtclock.calend_offset.tv_sec = seconds;
664 else
665 rtclock.calend_offset.tv_sec = seconds + 1;
666 rtclock.calend_offset.tv_nsec = 0;
667 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
668 rtclock.calend_is_set = TRUE;
1c79356b
A
669 UNLOCK_RTC(s);
670}
671
672mach_timespec_t
673clock_get_calendar_offset(void)
674{
675 mach_timespec_t result = MACH_TIMESPEC_ZERO;
676 spl_t s;
677
678 LOCK_RTC(s);
679 if (rtclock.calend_is_set)
680 result = rtclock.calend_offset;
681 UNLOCK_RTC(s);
682
683 return (result);
684}
685
686void
687clock_timebase_info(
688 mach_timebase_info_t info)
689{
690 spl_t s;
691
692 LOCK_RTC(s);
693 *info = rtclock.timebase_const;
694 UNLOCK_RTC(s);
695}
696
697void
698clock_set_timer_deadline(
0b4e3aa0 699 uint64_t deadline)
1c79356b 700{
0b4e3aa0 701 uint64_t abstime;
1c79356b
A
702 int decr, mycpu;
703 struct rtclock_timer *mytimer;
704 spl_t s;
705
706 s = splclock();
707 mycpu = cpu_number();
708 mytimer = &rtclock.timer[mycpu];
709 clock_get_uptime(&abstime);
710 rtclock.last_abstime[mycpu] = abstime;
711 mytimer->deadline = deadline;
712 mytimer->is_set = TRUE;
0b4e3aa0 713 if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) {
1c79356b
A
714 decr = deadline_to_decrementer(mytimer->deadline, abstime);
715 if ( rtclock_decrementer_min != 0 &&
716 rtclock_decrementer_min < (natural_t)decr )
717 decr = rtclock_decrementer_min;
718
1c79356b
A
719 mtdec(decr);
720 rtclock.last_decr[mycpu] = decr;
0b4e3aa0
A
721
722 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
723 | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
1c79356b
A
724 }
725 splx(s);
726}
727
728void
729clock_set_timer_func(
730 clock_timer_func_t func)
731{
732 spl_t s;
733
734 LOCK_RTC(s);
735 if (rtclock.timer_expire == NULL)
736 rtclock.timer_expire = func;
737 UNLOCK_RTC(s);
738}
739
740/*
741 * Reset the clock device. This causes the realtime clock
742 * device to reload its mode and count value (frequency).
743 */
744void
745rtclock_reset(void)
746{
747 return;
748}
749
750/*
751 * Real-time clock device interrupt.
752 */
753void
754rtclock_intr(
755 int device,
756 struct ppc_saved_state *ssp,
757 spl_t old_spl)
758{
0b4e3aa0 759 uint64_t abstime;
1c79356b
A
760 int decr[3], mycpu = cpu_number();
761 struct rtclock_timer *mytimer = &rtclock.timer[mycpu];
762
763 /*
764 * We may receive interrupts too early, we must reject them.
765 */
766 if (rtclock_initialized == FALSE) {
767 mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
768 return;
769 }
770
771 decr[1] = decr[2] = DECREMENTER_MAX;
772
773 clock_get_uptime(&abstime);
774 rtclock.last_abstime[mycpu] = abstime;
0b4e3aa0 775 if ( rtclock_tick_deadline[mycpu] <= abstime ) {
1c79356b
A
776 clock_deadline_for_periodic_event(rtclock_tick_interval, abstime,
777 &rtclock_tick_deadline[mycpu]);
778 hertz_tick(USER_MODE(ssp->srr1), ssp->srr0);
779 }
780
781 clock_get_uptime(&abstime);
782 rtclock.last_abstime[mycpu] = abstime;
0b4e3aa0
A
783 if ( mytimer->is_set &&
784 mytimer->deadline <= abstime ) {
1c79356b
A
785 mytimer->is_set = FALSE;
786 (*rtclock.timer_expire)(abstime);
787 }
788
789 clock_get_uptime(&abstime);
790 rtclock.last_abstime[mycpu] = abstime;
791 decr[1] = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
792
793 if (mytimer->is_set)
794 decr[2] = deadline_to_decrementer(mytimer->deadline, abstime);
795
796 if (decr[1] > decr[2])
797 decr[1] = decr[2];
798
799 if ( rtclock_decrementer_min != 0 &&
800 rtclock_decrementer_min < (natural_t)decr[1] )
801 decr[1] = rtclock_decrementer_min;
802
1c79356b
A
803 mtdec(decr[1]);
804 rtclock.last_decr[mycpu] = decr[1];
0b4e3aa0
A
805
806 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
807 | DBG_FUNC_NONE, decr[1], 3, 0, 0, 0);
1c79356b
A
808}
809
810static void
811rtclock_alarm_timer(
812 timer_call_param_t p0,
813 timer_call_param_t p1)
814{
815 mach_timespec_t timestamp;
816
817 (void) sysclk_gettime(&timestamp);
818
819 clock_alarm_intr(SYSTEM_CLOCK, &timestamp);
820}
821
822void
823clock_get_uptime(
0b4e3aa0 824 uint64_t *result0)
1c79356b 825{
0b4e3aa0
A
826 UnsignedWide *result = (UnsignedWide *)result0;
827 uint32_t hi, lo, hic;
1c79356b
A
828
829 do {
830 asm volatile(" mftbu %0" : "=r" (hi));
831 asm volatile(" mftb %0" : "=r" (lo));
832 asm volatile(" mftbu %0" : "=r" (hic));
833 } while (hic != hi);
834
835 result->lo = lo;
836 result->hi = hi;
837}
838
839static int
840deadline_to_decrementer(
0b4e3aa0
A
841 uint64_t deadline,
842 uint64_t now)
1c79356b
A
843{
844 uint64_t delt;
845
0b4e3aa0 846 if (deadline <= now)
1c79356b
A
847 return DECREMENTER_MIN;
848 else {
0b4e3aa0 849 delt = deadline - now;
1c79356b
A
850 return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX:
851 ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN);
852 }
853}
854
855static void
856timespec_to_absolutetime(
857 mach_timespec_t timespec,
0b4e3aa0 858 uint64_t *result0)
1c79356b 859{
0b4e3aa0
A
860 UnsignedWide *result = (UnsignedWide *)result0;
861 UnsignedWide t64;
862 uint32_t t32;
863 uint32_t numer, denom;
1c79356b
A
864 spl_t s;
865
866 LOCK_RTC(s);
867 numer = rtclock.timebase_const.numer;
868 denom = rtclock.timebase_const.denom;
869 UNLOCK_RTC(s);
870
871 asm volatile(" mullw %0,%1,%2" :
872 "=r" (t64.lo) :
873 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
874
875 asm volatile(" mulhwu %0,%1,%2" :
876 "=r" (t64.hi) :
877 "r" (timespec.tv_sec), "r" (NSEC_PER_SEC));
878
0b4e3aa0 879 UnsignedWide_to_scalar(&t64) += timespec.tv_nsec;
1c79356b
A
880
881 umul_64by32(t64, denom, &t64, &t32);
882
883 udiv_96by32(t64, t32, numer, &t64, &t32);
884
885 result->hi = t64.lo;
886 result->lo = t32;
887}
888
889void
890clock_interval_to_deadline(
0b4e3aa0
A
891 uint32_t interval,
892 uint32_t scale_factor,
893 uint64_t *result)
1c79356b 894{
0b4e3aa0 895 uint64_t abstime;
1c79356b
A
896
897 clock_get_uptime(result);
898
899 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
900
0b4e3aa0 901 *result += abstime;
1c79356b
A
902}
903
904void
905clock_interval_to_absolutetime_interval(
0b4e3aa0
A
906 uint32_t interval,
907 uint32_t scale_factor,
908 uint64_t *result0)
909{
910 UnsignedWide *result = (UnsignedWide *)result0;
911 UnsignedWide t64;
912 uint32_t t32;
913 uint32_t numer, denom;
1c79356b
A
914 spl_t s;
915
916 LOCK_RTC(s);
917 numer = rtclock.timebase_const.numer;
918 denom = rtclock.timebase_const.denom;
919 UNLOCK_RTC(s);
920
921 asm volatile(" mullw %0,%1,%2" :
922 "=r" (t64.lo) :
923 "r" (interval), "r" (scale_factor));
924 asm volatile(" mulhwu %0,%1,%2" :
925 "=r" (t64.hi) :
926 "r" (interval), "r" (scale_factor));
927
928 umul_64by32(t64, denom, &t64, &t32);
929
930 udiv_96by32(t64, t32, numer, &t64, &t32);
931
932 result->hi = t64.lo;
933 result->lo = t32;
934}
935
936void
937clock_absolutetime_interval_to_deadline(
0b4e3aa0
A
938 uint64_t abstime,
939 uint64_t *result)
1c79356b
A
940{
941 clock_get_uptime(result);
942
0b4e3aa0 943 *result += abstime;
1c79356b
A
944}
945
946void
947absolutetime_to_nanoseconds(
0b4e3aa0
A
948 uint64_t abstime,
949 uint64_t *result)
1c79356b 950{
0b4e3aa0
A
951 UnsignedWide t64;
952 uint32_t t32;
953 uint32_t numer, denom;
1c79356b
A
954 spl_t s;
955
956 LOCK_RTC(s);
957 numer = rtclock.timebase_const.numer;
958 denom = rtclock.timebase_const.denom;
959 UNLOCK_RTC(s);
960
0b4e3aa0
A
961 UnsignedWide_to_scalar(&t64) = abstime;
962
963 umul_64by32(t64, numer, &t64, &t32);
1c79356b
A
964
965 udiv_96by32to64(t64, t32, denom, (void *)result);
966}
967
968void
969nanoseconds_to_absolutetime(
0b4e3aa0
A
970 uint64_t nanoseconds,
971 uint64_t *result)
1c79356b 972{
0b4e3aa0
A
973 UnsignedWide t64;
974 uint32_t t32;
975 uint32_t numer, denom;
1c79356b
A
976 spl_t s;
977
978 LOCK_RTC(s);
979 numer = rtclock.timebase_const.numer;
980 denom = rtclock.timebase_const.denom;
981 UNLOCK_RTC(s);
982
0b4e3aa0 983 UnsignedWide_to_scalar(&t64) = nanoseconds;
1c79356b
A
984
985 umul_64by32(t64, denom, &t64, &t32);
986
0b4e3aa0 987 udiv_96by32to64(t64, t32, numer, (void *)result);
1c79356b
A
988}
989
990/*
991 * Spin-loop delay primitives.
992 */
993void
994delay_for_interval(
0b4e3aa0
A
995 uint32_t interval,
996 uint32_t scale_factor)
1c79356b 997{
0b4e3aa0 998 uint64_t now, end;
1c79356b
A
999
1000 clock_interval_to_deadline(interval, scale_factor, &end);
1001
1002 do {
1003 clock_get_uptime(&now);
0b4e3aa0 1004 } while (now < end);
1c79356b
A
1005}
1006
1007void
1008clock_delay_until(
0b4e3aa0 1009 uint64_t deadline)
1c79356b 1010{
0b4e3aa0 1011 uint64_t now;
1c79356b
A
1012
1013 do {
1014 clock_get_uptime(&now);
0b4e3aa0 1015 } while (now < deadline);
1c79356b
A
1016}
1017
1018void
1019delay(
0b4e3aa0 1020 int usec)
1c79356b
A
1021{
1022 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1023}