]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/rtclock.c
43ab1f3be79d6f54858c4cc65e902132a903415e
[apple/xnu.git] / osfmk / i386 / rtclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28
29 /*
30 * File: i386/rtclock.c
31 * Purpose: Routines for handling the machine dependent
32 * real-time clock. This clock is generated by
33 * the Intel 8254 Programmable Interval Timer.
34 */
35
36 #include <cpus.h>
37 #include <platforms.h>
38 #include <mp_v1_1.h>
39 #include <mach_kdb.h>
40 #include <kern/cpu_number.h>
41 #include <kern/cpu_data.h>
42 #include <kern/clock.h>
43 #include <kern/macro_help.h>
44 #include <kern/misc_protos.h>
45 #include <kern/spl.h>
46 #include <machine/mach_param.h> /* HZ */
47 #include <mach/vm_prot.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_kern.h> /* for kernel_map */
50 #include <i386/ipl.h>
51 #include <i386/pit.h>
52 #include <i386/pio.h>
53 #include <i386/misc_protos.h>
54 #include <i386/rtclock_entries.h>
55 #include <i386/hardclock_entries.h>
56
57 int sysclk_config(void);
58
59 int sysclk_init(void);
60
61 kern_return_t sysclk_gettime(
62 mach_timespec_t *cur_time);
63
64 kern_return_t sysclk_getattr(
65 clock_flavor_t flavor,
66 clock_attr_t attr,
67 mach_msg_type_number_t *count);
68
69 kern_return_t sysclk_setattr(
70 clock_flavor_t flavor,
71 clock_attr_t attr,
72 mach_msg_type_number_t count);
73
74 void sysclk_setalarm(
75 mach_timespec_t *alarm_time);
76
77 extern void (*IOKitRegisterInterruptHook)(void *, int irq, int isclock);
78
79 /*
80 * Inlines to get timestamp counter value.
81 */
82
83 static inline void rdtsc_hilo(uint32_t *hi, uint32_t *lo) {
84 asm volatile("rdtsc": "=a" (*lo), "=d" (*hi));
85 }
86
87 static inline uint64_t rdtsc_64(void) {
88 uint64_t result;
89 asm volatile("rdtsc": "=A" (result));
90 return result;
91 }
92
93 /*
94 * Lists of clock routines.
95 */
96 struct clock_ops sysclk_ops = {
97 sysclk_config, sysclk_init,
98 sysclk_gettime, 0,
99 sysclk_getattr, sysclk_setattr,
100 sysclk_setalarm,
101 };
102
103 int calend_config(void);
104
105 int calend_init(void);
106
107 kern_return_t calend_gettime(
108 mach_timespec_t *cur_time);
109
110 kern_return_t calend_settime(
111 mach_timespec_t *cur_time);
112
113 kern_return_t calend_getattr(
114 clock_flavor_t flavor,
115 clock_attr_t attr,
116 mach_msg_type_number_t *count);
117
118 struct clock_ops calend_ops = {
119 calend_config, calend_init,
120 calend_gettime, calend_settime,
121 calend_getattr, 0,
122 0,
123 };
124
125 /* local data declarations */
126 mach_timespec_t *RtcTime = (mach_timespec_t *)0;
127 mach_timespec_t *RtcAlrm;
128 clock_res_t RtcDelt;
129
130 /* global data declarations */
131 struct {
132 uint64_t abstime;
133
134 mach_timespec_t time;
135 mach_timespec_t alarm_time; /* time of next alarm */
136
137 mach_timespec_t calend_offset;
138 boolean_t calend_is_set;
139
140 uint64_t timer_deadline;
141 boolean_t timer_is_set;
142 clock_timer_func_t timer_expire;
143
144 clock_res_t new_ires; /* pending new resolution (nano ) */
145 clock_res_t intr_nsec; /* interrupt resolution (nano) */
146
147 decl_simple_lock_data(,lock) /* real-time clock device lock */
148 } rtclock;
149
150 unsigned int clknum; /* clks per second */
151 unsigned int new_clknum; /* pending clknum */
152 unsigned int time_per_clk; /* time per clk in ZHZ */
153 unsigned int clks_per_int; /* clks per interrupt */
154 unsigned int clks_per_int_99;
155 int rtc_intr_count; /* interrupt counter */
156 int rtc_intr_hertz; /* interrupts per HZ */
157 int rtc_intr_freq; /* interrupt frequency */
158 int rtc_print_lost_tick; /* print lost tick */
159
160 uint32_t rtc_cyc_per_sec; /* processor cycles per seconds */
161 uint32_t rtc_last_int_tsc_lo; /* tsc values saved per interupt */
162 uint32_t rtc_last_int_tsc_hi;
163
164 /*
165 * Macros to lock/unlock real-time clock device.
166 */
167 #define LOCK_RTC(s) \
168 MACRO_BEGIN \
169 (s) = splclock(); \
170 simple_lock(&rtclock.lock); \
171 MACRO_END
172
173 #define UNLOCK_RTC(s) \
174 MACRO_BEGIN \
175 simple_unlock(&rtclock.lock); \
176 splx(s); \
177 MACRO_END
178
179 /*
180 * i8254 control. ** MONUMENT **
181 *
182 * The i8254 is a traditional PC device with some arbitrary characteristics.
183 * Basically, it is a register that counts at a fixed rate and can be
184 * programmed to generate an interrupt every N counts. The count rate is
185 * clknum counts per second (see pit.h), historically 1193167 we believe.
186 * Various constants are computed based on this value, and we calculate
187 * them at init time for execution efficiency. To obtain sufficient
188 * accuracy, some of the calculation are most easily done in floating
189 * point and then converted to int.
190 *
191 * We want an interrupt every 10 milliseconds, approximately. The count
192 * which will do that is clks_per_int. However, that many counts is not
193 * *exactly* 10 milliseconds; it is a bit more or less depending on
194 * roundoff. The actual time per tick is calculated and saved in
195 * rtclock.intr_nsec, and it is that value which is added to the time
196 * register on each tick.
197 *
198 * The i8254 counter can be read between interrupts in order to determine
199 * the time more accurately. The counter counts down from the preset value
200 * toward 0, and we have to handle the case where the counter has been
201 * reset just before being read and before the interrupt has been serviced.
202 * Given a count since the last interrupt, the time since then is given
203 * by (count * time_per_clk). In order to minimize integer truncation,
204 * we perform this calculation in an arbitrary unit of time which maintains
205 * the maximum precision, i.e. such that one tick is 1.0e9 of these units,
206 * or close to the precision of a 32-bit int. We then divide by this unit
207 * (which doesn't lose precision) to get nanoseconds. For notation
208 * purposes, this unit is defined as ZHZ = zanoseconds per nanosecond.
209 *
210 * This sequence to do all this is in sysclk_gettime. For efficiency, this
211 * sequence also needs the value that the counter will have if it has just
212 * overflowed, so we precompute that also. ALSO, certain platforms
213 * (specifically the DEC XL5100) have been observed to have problem
214 * with latching the counter, and they occasionally (say, one out of
215 * 100,000 times) return a bogus value. Hence, the present code reads
216 * the counter twice and checks for a consistent pair of values.
217 *
218 * Some attributes of the rt clock can be changed, including the
219 * interrupt resolution. We default to the minimum resolution (10 ms),
220 * but allow a finer resolution to be requested. The assumed frequency
221 * of the clock can also be set since it appears that the actual
222 * frequency of real-world hardware can vary from the nominal by
223 * 200 ppm or more. When the frequency is set, the values above are
224 * recomputed and we continue without resetting or changing anything else.
225 */
226 #define RTC_MINRES (NSEC_PER_SEC / HZ) /* nsec per tick */
227 #define RTC_MAXRES (RTC_MINRES / 20) /* nsec per tick */
228 #define ZANO (1000000000)
229 #define ZHZ (ZANO / (NSEC_PER_SEC / HZ))
230 #define READ_8254(val) { \
231 outb(PITCTL_PORT, PIT_C0); \
232 (val) = inb(PITCTR0_PORT); \
233 (val) |= inb(PITCTR0_PORT) << 8 ; }
234
235 /*
236 * Calibration delay counts.
237 */
238 unsigned int delaycount = 100;
239 unsigned int microdata = 50;
240
241 /*
242 * Forward decl.
243 */
244
245 extern int measure_delay(int us);
246 void rtc_setvals( unsigned int, clock_res_t );
247
248 static void rtc_set_cyc_per_sec();
249
250 /*
251 * Initialize non-zero clock structure values.
252 */
253 void
254 rtc_setvals(
255 unsigned int new_clknum,
256 clock_res_t new_ires
257 )
258 {
259 unsigned int timeperclk;
260 unsigned int scale0;
261 unsigned int scale1;
262 unsigned int res;
263
264 clknum = new_clknum;
265 rtc_intr_freq = (NSEC_PER_SEC / new_ires);
266 rtc_intr_hertz = rtc_intr_freq / HZ;
267 clks_per_int = (clknum + (rtc_intr_freq / 2)) / rtc_intr_freq;
268 clks_per_int_99 = clks_per_int - clks_per_int/100;
269
270 /*
271 * The following calculations are done with scaling integer operations
272 * in order that the integer results are accurate to the lsb.
273 */
274 timeperclk = div_scale(ZANO, clknum, &scale0); /* 838.105647 nsec */
275
276 time_per_clk = mul_scale(ZHZ, timeperclk, &scale1); /* 83810 */
277 if (scale0 > scale1)
278 time_per_clk >>= (scale0 - scale1);
279 else if (scale0 < scale1)
280 panic("rtc_clock: time_per_clk overflow\n");
281
282 /*
283 * Notice that rtclock.intr_nsec is signed ==> use unsigned int res
284 */
285 res = mul_scale(clks_per_int, timeperclk, &scale1); /* 10000276 */
286 if (scale0 > scale1)
287 rtclock.intr_nsec = res >> (scale0 - scale1);
288 else
289 panic("rtc_clock: rtclock.intr_nsec overflow\n");
290
291 rtc_intr_count = 1;
292 RtcDelt = rtclock.intr_nsec/2;
293 }
294
295 /*
296 * Configure the real-time clock device. Return success (1)
297 * or failure (0).
298 */
299
300 int
301 sysclk_config(void)
302 {
303 int RtcFlag;
304 int pic;
305
306 #if NCPUS > 1
307 mp_disable_preemption();
308 if (cpu_number() != master_cpu) {
309 mp_enable_preemption();
310 return(1);
311 }
312 mp_enable_preemption();
313 #endif
314 /*
315 * Setup device.
316 */
317 #if MP_V1_1
318 {
319 extern boolean_t mp_v1_1_initialized;
320 if (mp_v1_1_initialized)
321 pic = 2;
322 else
323 pic = 0;
324 }
325 #else
326 pic = 0; /* FIXME .. interrupt registration moved to AppleIntelClock */
327 #endif
328
329
330 /*
331 * We should attempt to test the real-time clock
332 * device here. If it were to fail, we should panic
333 * the system.
334 */
335 RtcFlag = /* test device */1;
336 printf("realtime clock configured\n");
337
338 simple_lock_init(&rtclock.lock, ETAP_NO_TRACE);
339 return (RtcFlag);
340 }
341
342 /*
343 * Initialize the real-time clock device. Return success (1)
344 * or failure (0). Since the real-time clock is required to
345 * provide canonical mapped time, we allocate a page to keep
346 * the clock time value. In addition, various variables used
347 * to support the clock are initialized. Note: the clock is
348 * not started until rtclock_reset is called.
349 */
350 int
351 sysclk_init(void)
352 {
353 vm_offset_t *vp;
354 #if NCPUS > 1
355 mp_disable_preemption();
356 if (cpu_number() != master_cpu) {
357 mp_enable_preemption();
358 return(1);
359 }
360 mp_enable_preemption();
361 #endif
362
363 RtcTime = &rtclock.time;
364 rtc_setvals( CLKNUM, RTC_MINRES ); /* compute constants */
365 rtc_set_cyc_per_sec(); /* compute number of tsc beats per second */
366 return (1);
367 }
368
369 static volatile unsigned int last_ival = 0;
370
371 /*
372 * Get the clock device time. This routine is responsible
373 * for converting the device's machine dependent time value
374 * into a canonical mach_timespec_t value.
375 */
376 kern_return_t
377 sysclk_gettime(
378 mach_timespec_t *cur_time) /* OUT */
379 {
380 mach_timespec_t itime = {0, 0};
381 unsigned int val, val2;
382 int s;
383
384 if (!RtcTime) {
385 /* Uninitialized */
386 cur_time->tv_nsec = 0;
387 cur_time->tv_sec = 0;
388 return (KERN_SUCCESS);
389 }
390
391 /*
392 * Inhibit interrupts. Determine the incremental
393 * time since the last interrupt. (This could be
394 * done in assembler for a bit more speed).
395 */
396 LOCK_RTC(s);
397 do {
398 READ_8254(val); /* read clock */
399 READ_8254(val2); /* read clock */
400 } while ( val2 > val || val2 < val - 10 );
401 if ( val > clks_per_int_99 ) {
402 outb( 0x0a, 0x20 ); /* see if interrupt pending */
403 if ( inb( 0x20 ) & 1 )
404 itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */
405 }
406 itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ;
407 if ( itime.tv_nsec < last_ival ) {
408 if (rtc_print_lost_tick)
409 printf( "rtclock: missed clock interrupt.\n" );
410 }
411 last_ival = itime.tv_nsec;
412 cur_time->tv_sec = rtclock.time.tv_sec;
413 cur_time->tv_nsec = rtclock.time.tv_nsec;
414 UNLOCK_RTC(s);
415 ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime));
416 return (KERN_SUCCESS);
417 }
418
419 kern_return_t
420 sysclk_gettime_internal(
421 mach_timespec_t *cur_time) /* OUT */
422 {
423 mach_timespec_t itime = {0, 0};
424 unsigned int val, val2;
425
426 if (!RtcTime) {
427 /* Uninitialized */
428 cur_time->tv_nsec = 0;
429 cur_time->tv_sec = 0;
430 return (KERN_SUCCESS);
431 }
432
433 /*
434 * Inhibit interrupts. Determine the incremental
435 * time since the last interrupt. (This could be
436 * done in assembler for a bit more speed).
437 */
438 do {
439 READ_8254(val); /* read clock */
440 READ_8254(val2); /* read clock */
441 } while ( val2 > val || val2 < val - 10 );
442 if ( val > clks_per_int_99 ) {
443 outb( 0x0a, 0x20 ); /* see if interrupt pending */
444 if ( inb( 0x20 ) & 1 )
445 itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */
446 }
447 itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ;
448 if ( itime.tv_nsec < last_ival ) {
449 if (rtc_print_lost_tick)
450 printf( "rtclock: missed clock interrupt.\n" );
451 }
452 last_ival = itime.tv_nsec;
453 cur_time->tv_sec = rtclock.time.tv_sec;
454 cur_time->tv_nsec = rtclock.time.tv_nsec;
455 ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime));
456 return (KERN_SUCCESS);
457 }
458
459 /*
460 * Get the clock device time when ALL interrupts are already disabled.
461 * Same as above except for turning interrupts off and on.
462 * This routine is responsible for converting the device's machine dependent
463 * time value into a canonical mach_timespec_t value.
464 */
465 void
466 sysclk_gettime_interrupts_disabled(
467 mach_timespec_t *cur_time) /* OUT */
468 {
469 mach_timespec_t itime = {0, 0};
470 unsigned int val;
471
472 if (!RtcTime) {
473 /* Uninitialized */
474 cur_time->tv_nsec = 0;
475 cur_time->tv_sec = 0;
476 return;
477 }
478
479 simple_lock(&rtclock.lock);
480
481 /*
482 * Copy the current time knowing that we cant be interrupted
483 * between the two longwords and so dont need to use MTS_TO_TS
484 */
485 READ_8254(val); /* read clock */
486 if ( val > clks_per_int_99 ) {
487 outb( 0x0a, 0x20 ); /* see if interrupt pending */
488 if ( inb( 0x20 ) & 1 )
489 itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */
490 }
491 itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ;
492 if ( itime.tv_nsec < last_ival ) {
493 if (rtc_print_lost_tick)
494 printf( "rtclock: missed clock interrupt.\n" );
495 }
496 last_ival = itime.tv_nsec;
497 cur_time->tv_sec = rtclock.time.tv_sec;
498 cur_time->tv_nsec = rtclock.time.tv_nsec;
499 ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime));
500
501 simple_unlock(&rtclock.lock);
502 }
503
504 // utility routine
505 // Code to calculate how many processor cycles are in a second...
506
507 static void
508 rtc_set_cyc_per_sec()
509 {
510
511 int x, y;
512 uint64_t cycles;
513 uint32_t c[15]; // array for holding sampled cycle counts
514 mach_timespec_t tst[15]; // array for holding time values. NOTE for some reason tv_sec not work
515
516 for (x=0; x<15; x++) { // quick sample 15 times
517 tst[x].tv_sec = 0;
518 tst[x].tv_nsec = 0;
519 sysclk_gettime_internal(&tst[x]);
520 rdtsc_hilo(&y, &c[x]);
521 }
522 y = 0;
523 cycles = 0;
524 for (x=0; x<14; x++) {
525 // simple formula really. calculate the numerator as the number of elapsed processor
526 // cycles * 1000 to adjust for the resolution we want. The denominator is the
527 // elapsed "real" time in nano-seconds. The result will be the processor speed in
528 // Mhz. any overflows will be discarded before they are added
529 if ((c[x+1] > c[x]) && (tst[x+1].tv_nsec > tst[x].tv_nsec)) {
530 cycles += ((uint64_t)(c[x+1]-c[x]) * NSEC_PER_SEC ) / (uint64_t)(tst[x+1].tv_nsec - tst[x].tv_nsec); // elapsed nsecs
531 y +=1;
532 }
533 }
534 if (y>0) { // we got more than 1 valid sample. This also takes care of the case of if the clock isn't running
535 cycles = cycles / y; // calc our average
536 }
537 rtc_cyc_per_sec = cycles;
538 rdtsc_hilo(&rtc_last_int_tsc_hi, &rtc_last_int_tsc_lo);
539 }
540
541 static
542 natural_t
543 get_uptime_cycles(void)
544 {
545 // get the time since the last interupt based on the processors TSC ignoring the
546 // RTC for speed
547
548 uint32_t a,d,intermediate_lo,intermediate_hi,result;
549 uint64_t newTime;
550
551 rdtsc_hilo(&d, &a);
552 if (d != rtc_last_int_tsc_hi) {
553 newTime = d-rtc_last_int_tsc_hi;
554 newTime = (newTime<<32) + (a-rtc_last_int_tsc_lo);
555 result = newTime;
556 } else {
557 result = a-rtc_last_int_tsc_lo;
558 }
559 __asm__ volatile ( " mul %3 ": "=eax" (intermediate_lo), "=edx" (intermediate_hi): "a"(result), "d"(NSEC_PER_SEC) );
560 __asm__ volatile ( " div %3": "=eax" (result): "eax"(intermediate_lo), "edx" (intermediate_hi), "ecx" (rtc_cyc_per_sec) );
561 return result;
562 }
563
564
565 /*
566 * Get clock device attributes.
567 */
568 kern_return_t
569 sysclk_getattr(
570 clock_flavor_t flavor,
571 clock_attr_t attr, /* OUT */
572 mach_msg_type_number_t *count) /* IN/OUT */
573 {
574 spl_t s;
575
576 if (*count != 1)
577 return (KERN_FAILURE);
578 switch (flavor) {
579
580 case CLOCK_GET_TIME_RES: /* >0 res */
581 #if (NCPUS == 1 || (MP_V1_1 && 0))
582 LOCK_RTC(s);
583 *(clock_res_t *) attr = 1000;
584 UNLOCK_RTC(s);
585 break;
586 #endif /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */
587 case CLOCK_ALARM_CURRES: /* =0 no alarm */
588 LOCK_RTC(s);
589 *(clock_res_t *) attr = rtclock.intr_nsec;
590 UNLOCK_RTC(s);
591 break;
592
593 case CLOCK_ALARM_MAXRES:
594 *(clock_res_t *) attr = RTC_MAXRES;
595 break;
596
597 case CLOCK_ALARM_MINRES:
598 *(clock_res_t *) attr = RTC_MINRES;
599 break;
600
601 default:
602 return (KERN_INVALID_VALUE);
603 }
604 return (KERN_SUCCESS);
605 }
606
607 /*
608 * Set clock device attributes.
609 */
610 kern_return_t
611 sysclk_setattr(
612 clock_flavor_t flavor,
613 clock_attr_t attr, /* IN */
614 mach_msg_type_number_t count) /* IN */
615 {
616 spl_t s;
617 int freq;
618 int adj;
619 clock_res_t new_ires;
620
621 if (count != 1)
622 return (KERN_FAILURE);
623 switch (flavor) {
624
625 case CLOCK_GET_TIME_RES:
626 case CLOCK_ALARM_MAXRES:
627 case CLOCK_ALARM_MINRES:
628 return (KERN_FAILURE);
629
630 case CLOCK_ALARM_CURRES:
631 new_ires = *(clock_res_t *) attr;
632
633 /*
634 * The new resolution must be within the predetermined
635 * range. If the desired resolution cannot be achieved
636 * to within 0.1%, an error is returned.
637 */
638 if (new_ires < RTC_MAXRES || new_ires > RTC_MINRES)
639 return (KERN_INVALID_VALUE);
640 freq = (NSEC_PER_SEC / new_ires);
641 adj = (((clknum % freq) * new_ires) / clknum);
642 if (adj > (new_ires / 1000))
643 return (KERN_INVALID_VALUE);
644 /*
645 * Record the new alarm resolution which will take effect
646 * on the next HZ aligned clock tick.
647 */
648 LOCK_RTC(s);
649 if ( freq != rtc_intr_freq ) {
650 rtclock.new_ires = new_ires;
651 new_clknum = clknum;
652 }
653 UNLOCK_RTC(s);
654 return (KERN_SUCCESS);
655
656 default:
657 return (KERN_INVALID_VALUE);
658 }
659 }
660
661 /*
662 * Set next alarm time for the clock device. This call
663 * always resets the time to deliver an alarm for the
664 * clock.
665 */
666 void
667 sysclk_setalarm(
668 mach_timespec_t *alarm_time)
669 {
670 spl_t s;
671
672 LOCK_RTC(s);
673 rtclock.alarm_time = *alarm_time;
674 RtcAlrm = &rtclock.alarm_time;
675 UNLOCK_RTC(s);
676 }
677
678 /*
679 * Configure the calendar clock.
680 */
681 int
682 calend_config(void)
683 {
684 return bbc_config();
685 }
686
687 /*
688 * Initialize calendar clock.
689 */
690 int
691 calend_init(void)
692 {
693 return (1);
694 }
695
696 /*
697 * Get the current clock time.
698 */
699 kern_return_t
700 calend_gettime(
701 mach_timespec_t *cur_time) /* OUT */
702 {
703 spl_t s;
704
705 LOCK_RTC(s);
706 if (!rtclock.calend_is_set) {
707 UNLOCK_RTC(s);
708 return (KERN_FAILURE);
709 }
710
711 (void) sysclk_gettime_internal(cur_time);
712 ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset);
713 UNLOCK_RTC(s);
714
715 return (KERN_SUCCESS);
716 }
717
718 /*
719 * Set the current clock time.
720 */
721 kern_return_t
722 calend_settime(
723 mach_timespec_t *new_time)
724 {
725 mach_timespec_t curr_time;
726 spl_t s;
727
728 LOCK_RTC(s);
729 (void) sysclk_gettime_internal(&curr_time);
730 rtclock.calend_offset = *new_time;
731 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
732 rtclock.calend_is_set = TRUE;
733 UNLOCK_RTC(s);
734
735 (void) bbc_settime(new_time);
736
737 return (KERN_SUCCESS);
738 }
739
740 /*
741 * Get clock device attributes.
742 */
743 kern_return_t
744 calend_getattr(
745 clock_flavor_t flavor,
746 clock_attr_t attr, /* OUT */
747 mach_msg_type_number_t *count) /* IN/OUT */
748 {
749 spl_t s;
750
751 if (*count != 1)
752 return (KERN_FAILURE);
753 switch (flavor) {
754
755 case CLOCK_GET_TIME_RES: /* >0 res */
756 #if (NCPUS == 1 || (MP_V1_1 && 0))
757 LOCK_RTC(s);
758 *(clock_res_t *) attr = 1000;
759 UNLOCK_RTC(s);
760 break;
761 #else /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */
762 LOCK_RTC(s);
763 *(clock_res_t *) attr = rtclock.intr_nsec;
764 UNLOCK_RTC(s);
765 break;
766 #endif /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */
767
768 case CLOCK_ALARM_CURRES: /* =0 no alarm */
769 case CLOCK_ALARM_MINRES:
770 case CLOCK_ALARM_MAXRES:
771 *(clock_res_t *) attr = 0;
772 break;
773
774 default:
775 return (KERN_INVALID_VALUE);
776 }
777 return (KERN_SUCCESS);
778 }
779
780 void
781 clock_adjust_calendar(
782 clock_res_t nsec)
783 {
784 spl_t s;
785
786 LOCK_RTC(s);
787 if (rtclock.calend_is_set)
788 ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec);
789 UNLOCK_RTC(s);
790 }
791
792 void
793 clock_initialize_calendar(void)
794 {
795 mach_timespec_t bbc_time, curr_time;
796 spl_t s;
797
798 if (bbc_gettime(&bbc_time) != KERN_SUCCESS)
799 return;
800
801 LOCK_RTC(s);
802 if (!rtclock.calend_is_set) {
803 (void) sysclk_gettime_internal(&curr_time);
804 rtclock.calend_offset = bbc_time;
805 SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
806 rtclock.calend_is_set = TRUE;
807 }
808 UNLOCK_RTC(s);
809 }
810
811 mach_timespec_t
812 clock_get_calendar_offset(void)
813 {
814 mach_timespec_t result = MACH_TIMESPEC_ZERO;
815 spl_t s;
816
817 LOCK_RTC(s);
818 if (rtclock.calend_is_set)
819 result = rtclock.calend_offset;
820 UNLOCK_RTC(s);
821
822 return (result);
823 }
824
825 void
826 clock_timebase_info(
827 mach_timebase_info_t info)
828 {
829 spl_t s;
830
831 LOCK_RTC(s);
832 info->numer = info->denom = 1;
833 UNLOCK_RTC(s);
834 }
835
836 void
837 clock_set_timer_deadline(
838 uint64_t deadline)
839 {
840 spl_t s;
841
842 LOCK_RTC(s);
843 rtclock.timer_deadline = deadline;
844 rtclock.timer_is_set = TRUE;
845 UNLOCK_RTC(s);
846 }
847
848 void
849 clock_set_timer_func(
850 clock_timer_func_t func)
851 {
852 spl_t s;
853
854 LOCK_RTC(s);
855 if (rtclock.timer_expire == NULL)
856 rtclock.timer_expire = func;
857 UNLOCK_RTC(s);
858 }
859
860 \f
861
862 /*
863 * Load the count register and start the clock.
864 */
865 #define RTCLOCK_RESET() { \
866 outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE); \
867 outb(PITCTR0_PORT, (clks_per_int & 0xff)); \
868 outb(PITCTR0_PORT, (clks_per_int >> 8)); \
869 }
870
871 /*
872 * Reset the clock device. This causes the realtime clock
873 * device to reload its mode and count value (frequency).
874 * Note: the CPU should be calibrated
875 * before starting the clock for the first time.
876 */
877
878 void
879 rtclock_reset(void)
880 {
881 int s;
882
883 #if NCPUS > 1 && !(MP_V1_1 && 0)
884 mp_disable_preemption();
885 if (cpu_number() != master_cpu) {
886 mp_enable_preemption();
887 return;
888 }
889 mp_enable_preemption();
890 #endif /* NCPUS > 1 && AT386 && !MP_V1_1 */
891 LOCK_RTC(s);
892 RTCLOCK_RESET();
893 UNLOCK_RTC(s);
894 }
895
896 /*
897 * Real-time clock device interrupt. Called only on the
898 * master processor. Updates the clock time and upcalls
899 * into the higher level clock code to deliver alarms.
900 */
901 int
902 rtclock_intr(void)
903 {
904 uint64_t abstime;
905 mach_timespec_t clock_time;
906 int i;
907 spl_t s;
908
909 /*
910 * Update clock time. Do the update so that the macro
911 * MTS_TO_TS() for reading the mapped time works (e.g.
912 * update in order: mtv_csec, mtv_time.tv_nsec, mtv_time.tv_sec).
913 */
914 LOCK_RTC(s);
915 rdtsc_hilo(&rtc_last_int_tsc_hi, &rtc_last_int_tsc_lo);
916 i = rtclock.time.tv_nsec + rtclock.intr_nsec;
917 if (i < NSEC_PER_SEC)
918 rtclock.time.tv_nsec = i;
919 else {
920 rtclock.time.tv_nsec = i - NSEC_PER_SEC;
921 rtclock.time.tv_sec++;
922 }
923 /* note time now up to date */
924 last_ival = 0;
925
926 rtclock.abstime += rtclock.intr_nsec;
927 abstime = rtclock.abstime;
928 if ( rtclock.timer_is_set &&
929 rtclock.timer_deadline <= abstime ) {
930 rtclock.timer_is_set = FALSE;
931 UNLOCK_RTC(s);
932
933 (*rtclock.timer_expire)(abstime);
934
935 LOCK_RTC(s);
936 }
937
938 /*
939 * Perform alarm clock processing if needed. The time
940 * passed up is incremented by a half-interrupt tick
941 * to trigger alarms closest to their desired times.
942 * The clock_alarm_intr() routine calls sysclk_setalrm()
943 * before returning if later alarms are pending.
944 */
945
946 if (RtcAlrm && (RtcAlrm->tv_sec < RtcTime->tv_sec ||
947 (RtcAlrm->tv_sec == RtcTime->tv_sec &&
948 RtcDelt >= RtcAlrm->tv_nsec - RtcTime->tv_nsec))) {
949 clock_time.tv_sec = 0;
950 clock_time.tv_nsec = RtcDelt;
951 ADD_MACH_TIMESPEC (&clock_time, RtcTime);
952 RtcAlrm = 0;
953 UNLOCK_RTC(s);
954 /*
955 * Call clock_alarm_intr() without RTC-lock.
956 * The lock ordering is always CLOCK-lock
957 * before RTC-lock.
958 */
959 clock_alarm_intr(SYSTEM_CLOCK, &clock_time);
960 LOCK_RTC(s);
961 }
962
963 /*
964 * On a HZ-tick boundary: return 0 and adjust the clock
965 * alarm resolution (if requested). Otherwise return a
966 * non-zero value.
967 */
968 if ((i = --rtc_intr_count) == 0) {
969 if (rtclock.new_ires) {
970 rtc_setvals(new_clknum, rtclock.new_ires);
971 RTCLOCK_RESET(); /* lock clock register */
972 rtclock.new_ires = 0;
973 }
974 rtc_intr_count = rtc_intr_hertz;
975 }
976 UNLOCK_RTC(s);
977 return (i);
978 }
979
980 void
981 clock_get_uptime(
982 uint64_t *result)
983 {
984 uint32_t ticks;
985 spl_t s;
986
987 LOCK_RTC(s);
988 ticks = get_uptime_cycles();
989 *result = rtclock.abstime;
990 UNLOCK_RTC(s);
991
992 *result += ticks;
993 }
994
995 void
996 clock_interval_to_deadline(
997 uint32_t interval,
998 uint32_t scale_factor,
999 uint64_t *result)
1000 {
1001 uint64_t abstime;
1002
1003 clock_get_uptime(result);
1004
1005 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1006
1007 *result += abstime;
1008 }
1009
1010 void
1011 clock_interval_to_absolutetime_interval(
1012 uint32_t interval,
1013 uint32_t scale_factor,
1014 uint64_t *result)
1015 {
1016 *result = (uint64_t)interval * scale_factor;
1017 }
1018
1019 void
1020 clock_absolutetime_interval_to_deadline(
1021 uint64_t abstime,
1022 uint64_t *result)
1023 {
1024 clock_get_uptime(result);
1025
1026 *result += abstime;
1027 }
1028
1029 void
1030 absolutetime_to_nanoseconds(
1031 uint64_t abstime,
1032 uint64_t *result)
1033 {
1034 *result = abstime;
1035 }
1036
1037 void
1038 nanoseconds_to_absolutetime(
1039 uint64_t nanoseconds,
1040 uint64_t *result)
1041 {
1042 *result = nanoseconds;
1043 }
1044
1045 /*
1046 * measure_delay(microseconds)
1047 *
1048 * Measure elapsed time for delay calls
1049 * Returns microseconds.
1050 *
1051 * Microseconds must not be too large since the counter (short)
1052 * will roll over. Max is about 13 ms. Values smaller than 1 ms are ok.
1053 * This uses the assumed frequency of the rt clock which is emperically
1054 * accurate to only about 200 ppm.
1055 */
1056
1057 int
1058 measure_delay(
1059 int us)
1060 {
1061 unsigned int lsb, val;
1062
1063 outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE);
1064 outb(PITCTR0_PORT, 0xff); /* set counter to max value */
1065 outb(PITCTR0_PORT, 0xff);
1066 delay(us);
1067 outb(PITCTL_PORT, PIT_C0);
1068 lsb = inb(PITCTR0_PORT);
1069 val = (inb(PITCTR0_PORT) << 8) | lsb;
1070 val = 0xffff - val;
1071 val *= 1000000;
1072 val /= CLKNUM;
1073 return(val);
1074 }
1075
1076 /*
1077 * calibrate_delay(void)
1078 *
1079 * Adjust delaycount. Called from startup before clock is started
1080 * for normal interrupt generation.
1081 */
1082
1083 void
1084 calibrate_delay(void)
1085 {
1086 unsigned val;
1087 int prev = 0;
1088 register int i;
1089
1090 printf("adjusting delay count: %d", delaycount);
1091 for (i=0; i<10; i++) {
1092 prev = delaycount;
1093 /*
1094 * microdata must not be too large since measure_timer
1095 * will not return accurate values if the counter (short)
1096 * rolls over
1097 */
1098 val = measure_delay(microdata);
1099 if (val == 0) {
1100 delaycount *= 2;
1101 } else {
1102 delaycount *= microdata;
1103 delaycount += val-1; /* round up to upper us */
1104 delaycount /= val;
1105 }
1106 if (delaycount <= 0)
1107 delaycount = 1;
1108 if (delaycount != prev)
1109 printf(" %d", delaycount);
1110 }
1111 printf("\n");
1112 }
1113
1114 #if MACH_KDB
1115 void
1116 test_delay(void);
1117
1118 void
1119 test_delay(void)
1120 {
1121 register i;
1122
1123 for (i = 0; i < 10; i++)
1124 printf("%d, %d\n", i, measure_delay(i));
1125 for (i = 10; i <= 100; i+=10)
1126 printf("%d, %d\n", i, measure_delay(i));
1127 }
1128 #endif /* MACH_KDB */