]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | ||
26 | /* | |
27 | * File: i386/rtclock.c | |
28 | * Purpose: Routines for handling the machine dependent | |
29 | * real-time clock. This clock is generated by | |
30 | * the Intel 8254 Programmable Interval Timer. | |
31 | */ | |
32 | ||
33 | #include <cpus.h> | |
34 | #include <platforms.h> | |
35 | #include <mp_v1_1.h> | |
36 | #include <mach_kdb.h> | |
37 | #include <kern/cpu_number.h> | |
38 | #include <kern/cpu_data.h> | |
39 | #include <kern/clock.h> | |
40 | #include <kern/macro_help.h> | |
41 | #include <kern/misc_protos.h> | |
42 | #include <kern/spl.h> | |
43 | #include <machine/mach_param.h> /* HZ */ | |
44 | #include <mach/vm_prot.h> | |
45 | #include <vm/pmap.h> | |
46 | #include <vm/vm_kern.h> /* for kernel_map */ | |
47 | #include <i386/ipl.h> | |
48 | #include <i386/pit.h> | |
49 | #include <i386/pio.h> | |
50 | #include <i386/misc_protos.h> | |
51 | #include <i386/rtclock_entries.h> | |
52 | #include <i386/hardclock_entries.h> | |
53 | ||
54 | int sysclk_config(void); | |
55 | ||
56 | int sysclk_init(void); | |
57 | ||
58 | kern_return_t sysclk_gettime( | |
59 | mach_timespec_t *cur_time); | |
60 | ||
61 | kern_return_t sysclk_getattr( | |
62 | clock_flavor_t flavor, | |
63 | clock_attr_t attr, | |
64 | mach_msg_type_number_t *count); | |
65 | ||
66 | kern_return_t sysclk_setattr( | |
67 | clock_flavor_t flavor, | |
68 | clock_attr_t attr, | |
69 | mach_msg_type_number_t count); | |
70 | ||
71 | void sysclk_setalarm( | |
72 | mach_timespec_t *alarm_time); | |
73 | ||
74 | extern void (*IOKitRegisterInterruptHook)(void *, int irq, int isclock); | |
75 | ||
76 | /* | |
77 | * Inlines to get timestamp counter value. | |
78 | */ | |
79 | ||
80 | static inline void rdtsc_hilo(uint32_t *hi, uint32_t *lo) { | |
81 | asm volatile("rdtsc": "=a" (*lo), "=d" (*hi)); | |
82 | } | |
83 | ||
84 | static inline uint64_t rdtsc_64(void) { | |
85 | uint64_t result; | |
86 | asm volatile("rdtsc": "=A" (result)); | |
87 | return result; | |
88 | } | |
89 | ||
90 | /* | |
91 | * Lists of clock routines. | |
92 | */ | |
93 | struct clock_ops sysclk_ops = { | |
94 | sysclk_config, sysclk_init, | |
95 | sysclk_gettime, 0, | |
96 | sysclk_getattr, sysclk_setattr, | |
97 | sysclk_setalarm, | |
98 | }; | |
99 | ||
100 | int calend_config(void); | |
101 | ||
102 | int calend_init(void); | |
103 | ||
104 | kern_return_t calend_gettime( | |
105 | mach_timespec_t *cur_time); | |
106 | ||
107 | kern_return_t calend_settime( | |
108 | mach_timespec_t *cur_time); | |
109 | ||
110 | kern_return_t calend_getattr( | |
111 | clock_flavor_t flavor, | |
112 | clock_attr_t attr, | |
113 | mach_msg_type_number_t *count); | |
114 | ||
115 | struct clock_ops calend_ops = { | |
116 | calend_config, calend_init, | |
117 | calend_gettime, calend_settime, | |
118 | calend_getattr, 0, | |
119 | 0, | |
120 | }; | |
121 | ||
122 | /* local data declarations */ | |
123 | mach_timespec_t *RtcTime = (mach_timespec_t *)0; | |
124 | mach_timespec_t *RtcAlrm; | |
125 | clock_res_t RtcDelt; | |
126 | ||
127 | /* global data declarations */ | |
128 | struct { | |
129 | uint64_t abstime; | |
130 | ||
131 | mach_timespec_t time; | |
132 | mach_timespec_t alarm_time; /* time of next alarm */ | |
133 | ||
134 | mach_timespec_t calend_offset; | |
135 | boolean_t calend_is_set; | |
136 | ||
137 | uint64_t timer_deadline; | |
138 | boolean_t timer_is_set; | |
139 | clock_timer_func_t timer_expire; | |
140 | ||
141 | clock_res_t new_ires; /* pending new resolution (nano ) */ | |
142 | clock_res_t intr_nsec; /* interrupt resolution (nano) */ | |
143 | ||
144 | decl_simple_lock_data(,lock) /* real-time clock device lock */ | |
145 | } rtclock; | |
146 | ||
147 | unsigned int clknum; /* clks per second */ | |
148 | unsigned int new_clknum; /* pending clknum */ | |
149 | unsigned int time_per_clk; /* time per clk in ZHZ */ | |
150 | unsigned int clks_per_int; /* clks per interrupt */ | |
151 | unsigned int clks_per_int_99; | |
152 | int rtc_intr_count; /* interrupt counter */ | |
153 | int rtc_intr_hertz; /* interrupts per HZ */ | |
154 | int rtc_intr_freq; /* interrupt frequency */ | |
155 | int rtc_print_lost_tick; /* print lost tick */ | |
156 | ||
157 | uint32_t rtc_cyc_per_sec; /* processor cycles per seconds */ | |
158 | uint32_t rtc_last_int_tsc_lo; /* tsc values saved per interupt */ | |
159 | uint32_t rtc_last_int_tsc_hi; | |
160 | ||
161 | /* | |
162 | * Macros to lock/unlock real-time clock device. | |
163 | */ | |
164 | #define LOCK_RTC(s) \ | |
165 | MACRO_BEGIN \ | |
166 | (s) = splclock(); \ | |
167 | simple_lock(&rtclock.lock); \ | |
168 | MACRO_END | |
169 | ||
170 | #define UNLOCK_RTC(s) \ | |
171 | MACRO_BEGIN \ | |
172 | simple_unlock(&rtclock.lock); \ | |
173 | splx(s); \ | |
174 | MACRO_END | |
175 | ||
176 | /* | |
177 | * i8254 control. ** MONUMENT ** | |
178 | * | |
179 | * The i8254 is a traditional PC device with some arbitrary characteristics. | |
180 | * Basically, it is a register that counts at a fixed rate and can be | |
181 | * programmed to generate an interrupt every N counts. The count rate is | |
182 | * clknum counts per second (see pit.h), historically 1193167 we believe. | |
183 | * Various constants are computed based on this value, and we calculate | |
184 | * them at init time for execution efficiency. To obtain sufficient | |
185 | * accuracy, some of the calculation are most easily done in floating | |
186 | * point and then converted to int. | |
187 | * | |
188 | * We want an interrupt every 10 milliseconds, approximately. The count | |
189 | * which will do that is clks_per_int. However, that many counts is not | |
190 | * *exactly* 10 milliseconds; it is a bit more or less depending on | |
191 | * roundoff. The actual time per tick is calculated and saved in | |
192 | * rtclock.intr_nsec, and it is that value which is added to the time | |
193 | * register on each tick. | |
194 | * | |
195 | * The i8254 counter can be read between interrupts in order to determine | |
196 | * the time more accurately. The counter counts down from the preset value | |
197 | * toward 0, and we have to handle the case where the counter has been | |
198 | * reset just before being read and before the interrupt has been serviced. | |
199 | * Given a count since the last interrupt, the time since then is given | |
200 | * by (count * time_per_clk). In order to minimize integer truncation, | |
201 | * we perform this calculation in an arbitrary unit of time which maintains | |
202 | * the maximum precision, i.e. such that one tick is 1.0e9 of these units, | |
203 | * or close to the precision of a 32-bit int. We then divide by this unit | |
204 | * (which doesn't lose precision) to get nanoseconds. For notation | |
205 | * purposes, this unit is defined as ZHZ = zanoseconds per nanosecond. | |
206 | * | |
207 | * This sequence to do all this is in sysclk_gettime. For efficiency, this | |
208 | * sequence also needs the value that the counter will have if it has just | |
209 | * overflowed, so we precompute that also. ALSO, certain platforms | |
210 | * (specifically the DEC XL5100) have been observed to have problem | |
211 | * with latching the counter, and they occasionally (say, one out of | |
212 | * 100,000 times) return a bogus value. Hence, the present code reads | |
213 | * the counter twice and checks for a consistent pair of values. | |
214 | * | |
215 | * Some attributes of the rt clock can be changed, including the | |
216 | * interrupt resolution. We default to the minimum resolution (10 ms), | |
217 | * but allow a finer resolution to be requested. The assumed frequency | |
218 | * of the clock can also be set since it appears that the actual | |
219 | * frequency of real-world hardware can vary from the nominal by | |
220 | * 200 ppm or more. When the frequency is set, the values above are | |
221 | * recomputed and we continue without resetting or changing anything else. | |
222 | */ | |
223 | #define RTC_MINRES (NSEC_PER_SEC / HZ) /* nsec per tick */ | |
224 | #define RTC_MAXRES (RTC_MINRES / 20) /* nsec per tick */ | |
225 | #define ZANO (1000000000) | |
226 | #define ZHZ (ZANO / (NSEC_PER_SEC / HZ)) | |
227 | #define READ_8254(val) { \ | |
228 | outb(PITCTL_PORT, PIT_C0); \ | |
229 | (val) = inb(PITCTR0_PORT); \ | |
230 | (val) |= inb(PITCTR0_PORT) << 8 ; } | |
231 | ||
232 | /* | |
233 | * Calibration delay counts. | |
234 | */ | |
235 | unsigned int delaycount = 100; | |
236 | unsigned int microdata = 50; | |
237 | ||
238 | /* | |
239 | * Forward decl. | |
240 | */ | |
241 | ||
242 | extern int measure_delay(int us); | |
243 | void rtc_setvals( unsigned int, clock_res_t ); | |
244 | ||
245 | static void rtc_set_cyc_per_sec(); | |
246 | ||
247 | /* | |
248 | * Initialize non-zero clock structure values. | |
249 | */ | |
250 | void | |
251 | rtc_setvals( | |
252 | unsigned int new_clknum, | |
253 | clock_res_t new_ires | |
254 | ) | |
255 | { | |
256 | unsigned int timeperclk; | |
257 | unsigned int scale0; | |
258 | unsigned int scale1; | |
259 | unsigned int res; | |
260 | ||
261 | clknum = new_clknum; | |
262 | rtc_intr_freq = (NSEC_PER_SEC / new_ires); | |
263 | rtc_intr_hertz = rtc_intr_freq / HZ; | |
264 | clks_per_int = (clknum + (rtc_intr_freq / 2)) / rtc_intr_freq; | |
265 | clks_per_int_99 = clks_per_int - clks_per_int/100; | |
266 | ||
267 | /* | |
268 | * The following calculations are done with scaling integer operations | |
269 | * in order that the integer results are accurate to the lsb. | |
270 | */ | |
271 | timeperclk = div_scale(ZANO, clknum, &scale0); /* 838.105647 nsec */ | |
272 | ||
273 | time_per_clk = mul_scale(ZHZ, timeperclk, &scale1); /* 83810 */ | |
274 | if (scale0 > scale1) | |
275 | time_per_clk >>= (scale0 - scale1); | |
276 | else if (scale0 < scale1) | |
277 | panic("rtc_clock: time_per_clk overflow\n"); | |
278 | ||
279 | /* | |
280 | * Notice that rtclock.intr_nsec is signed ==> use unsigned int res | |
281 | */ | |
282 | res = mul_scale(clks_per_int, timeperclk, &scale1); /* 10000276 */ | |
283 | if (scale0 > scale1) | |
284 | rtclock.intr_nsec = res >> (scale0 - scale1); | |
285 | else | |
286 | panic("rtc_clock: rtclock.intr_nsec overflow\n"); | |
287 | ||
288 | rtc_intr_count = 1; | |
289 | RtcDelt = rtclock.intr_nsec/2; | |
290 | } | |
291 | ||
292 | /* | |
293 | * Configure the real-time clock device. Return success (1) | |
294 | * or failure (0). | |
295 | */ | |
296 | ||
297 | int | |
298 | sysclk_config(void) | |
299 | { | |
300 | int RtcFlag; | |
301 | int pic; | |
302 | ||
303 | #if NCPUS > 1 | |
304 | mp_disable_preemption(); | |
305 | if (cpu_number() != master_cpu) { | |
306 | mp_enable_preemption(); | |
307 | return(1); | |
308 | } | |
309 | mp_enable_preemption(); | |
310 | #endif | |
311 | /* | |
312 | * Setup device. | |
313 | */ | |
314 | #if MP_V1_1 | |
315 | { | |
316 | extern boolean_t mp_v1_1_initialized; | |
317 | if (mp_v1_1_initialized) | |
318 | pic = 2; | |
319 | else | |
320 | pic = 0; | |
321 | } | |
322 | #else | |
323 | pic = 0; /* FIXME .. interrupt registration moved to AppleIntelClock */ | |
324 | #endif | |
325 | ||
326 | ||
327 | /* | |
328 | * We should attempt to test the real-time clock | |
329 | * device here. If it were to fail, we should panic | |
330 | * the system. | |
331 | */ | |
332 | RtcFlag = /* test device */1; | |
333 | printf("realtime clock configured\n"); | |
334 | ||
335 | simple_lock_init(&rtclock.lock, ETAP_NO_TRACE); | |
336 | return (RtcFlag); | |
337 | } | |
338 | ||
339 | /* | |
340 | * Initialize the real-time clock device. Return success (1) | |
341 | * or failure (0). Since the real-time clock is required to | |
342 | * provide canonical mapped time, we allocate a page to keep | |
343 | * the clock time value. In addition, various variables used | |
344 | * to support the clock are initialized. Note: the clock is | |
345 | * not started until rtclock_reset is called. | |
346 | */ | |
347 | int | |
348 | sysclk_init(void) | |
349 | { | |
350 | vm_offset_t *vp; | |
351 | #if NCPUS > 1 | |
352 | mp_disable_preemption(); | |
353 | if (cpu_number() != master_cpu) { | |
354 | mp_enable_preemption(); | |
355 | return(1); | |
356 | } | |
357 | mp_enable_preemption(); | |
358 | #endif | |
359 | ||
360 | RtcTime = &rtclock.time; | |
361 | rtc_setvals( CLKNUM, RTC_MINRES ); /* compute constants */ | |
362 | rtc_set_cyc_per_sec(); /* compute number of tsc beats per second */ | |
363 | return (1); | |
364 | } | |
365 | ||
366 | static volatile unsigned int last_ival = 0; | |
367 | ||
368 | /* | |
369 | * Get the clock device time. This routine is responsible | |
370 | * for converting the device's machine dependent time value | |
371 | * into a canonical mach_timespec_t value. | |
372 | */ | |
373 | kern_return_t | |
374 | sysclk_gettime( | |
375 | mach_timespec_t *cur_time) /* OUT */ | |
376 | { | |
377 | mach_timespec_t itime = {0, 0}; | |
378 | unsigned int val, val2; | |
379 | int s; | |
380 | ||
381 | if (!RtcTime) { | |
382 | /* Uninitialized */ | |
383 | cur_time->tv_nsec = 0; | |
384 | cur_time->tv_sec = 0; | |
385 | return (KERN_SUCCESS); | |
386 | } | |
387 | ||
388 | /* | |
389 | * Inhibit interrupts. Determine the incremental | |
390 | * time since the last interrupt. (This could be | |
391 | * done in assembler for a bit more speed). | |
392 | */ | |
393 | LOCK_RTC(s); | |
394 | do { | |
395 | READ_8254(val); /* read clock */ | |
396 | READ_8254(val2); /* read clock */ | |
397 | } while ( val2 > val || val2 < val - 10 ); | |
398 | if ( val > clks_per_int_99 ) { | |
399 | outb( 0x0a, 0x20 ); /* see if interrupt pending */ | |
400 | if ( inb( 0x20 ) & 1 ) | |
401 | itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */ | |
402 | } | |
403 | itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ; | |
404 | if ( itime.tv_nsec < last_ival ) { | |
405 | if (rtc_print_lost_tick) | |
406 | printf( "rtclock: missed clock interrupt.\n" ); | |
407 | } | |
408 | last_ival = itime.tv_nsec; | |
409 | cur_time->tv_sec = rtclock.time.tv_sec; | |
410 | cur_time->tv_nsec = rtclock.time.tv_nsec; | |
411 | UNLOCK_RTC(s); | |
412 | ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime)); | |
413 | return (KERN_SUCCESS); | |
414 | } | |
415 | ||
416 | kern_return_t | |
417 | sysclk_gettime_internal( | |
418 | mach_timespec_t *cur_time) /* OUT */ | |
419 | { | |
420 | mach_timespec_t itime = {0, 0}; | |
421 | unsigned int val, val2; | |
422 | ||
423 | if (!RtcTime) { | |
424 | /* Uninitialized */ | |
425 | cur_time->tv_nsec = 0; | |
426 | cur_time->tv_sec = 0; | |
427 | return (KERN_SUCCESS); | |
428 | } | |
429 | ||
430 | /* | |
431 | * Inhibit interrupts. Determine the incremental | |
432 | * time since the last interrupt. (This could be | |
433 | * done in assembler for a bit more speed). | |
434 | */ | |
435 | do { | |
436 | READ_8254(val); /* read clock */ | |
437 | READ_8254(val2); /* read clock */ | |
438 | } while ( val2 > val || val2 < val - 10 ); | |
439 | if ( val > clks_per_int_99 ) { | |
440 | outb( 0x0a, 0x20 ); /* see if interrupt pending */ | |
441 | if ( inb( 0x20 ) & 1 ) | |
442 | itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */ | |
443 | } | |
444 | itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ; | |
445 | if ( itime.tv_nsec < last_ival ) { | |
446 | if (rtc_print_lost_tick) | |
447 | printf( "rtclock: missed clock interrupt.\n" ); | |
448 | } | |
449 | last_ival = itime.tv_nsec; | |
450 | cur_time->tv_sec = rtclock.time.tv_sec; | |
451 | cur_time->tv_nsec = rtclock.time.tv_nsec; | |
452 | ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime)); | |
453 | return (KERN_SUCCESS); | |
454 | } | |
455 | ||
456 | /* | |
457 | * Get the clock device time when ALL interrupts are already disabled. | |
458 | * Same as above except for turning interrupts off and on. | |
459 | * This routine is responsible for converting the device's machine dependent | |
460 | * time value into a canonical mach_timespec_t value. | |
461 | */ | |
462 | void | |
463 | sysclk_gettime_interrupts_disabled( | |
464 | mach_timespec_t *cur_time) /* OUT */ | |
465 | { | |
466 | mach_timespec_t itime = {0, 0}; | |
467 | unsigned int val; | |
468 | ||
469 | if (!RtcTime) { | |
470 | /* Uninitialized */ | |
471 | cur_time->tv_nsec = 0; | |
472 | cur_time->tv_sec = 0; | |
473 | return; | |
474 | } | |
475 | ||
476 | simple_lock(&rtclock.lock); | |
477 | ||
478 | /* | |
479 | * Copy the current time knowing that we cant be interrupted | |
480 | * between the two longwords and so dont need to use MTS_TO_TS | |
481 | */ | |
482 | READ_8254(val); /* read clock */ | |
483 | if ( val > clks_per_int_99 ) { | |
484 | outb( 0x0a, 0x20 ); /* see if interrupt pending */ | |
485 | if ( inb( 0x20 ) & 1 ) | |
486 | itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */ | |
487 | } | |
488 | itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ; | |
489 | if ( itime.tv_nsec < last_ival ) { | |
490 | if (rtc_print_lost_tick) | |
491 | printf( "rtclock: missed clock interrupt.\n" ); | |
492 | } | |
493 | last_ival = itime.tv_nsec; | |
494 | cur_time->tv_sec = rtclock.time.tv_sec; | |
495 | cur_time->tv_nsec = rtclock.time.tv_nsec; | |
496 | ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime)); | |
497 | ||
498 | simple_unlock(&rtclock.lock); | |
499 | } | |
500 | ||
501 | // utility routine | |
502 | // Code to calculate how many processor cycles are in a second... | |
503 | ||
504 | static void | |
505 | rtc_set_cyc_per_sec() | |
506 | { | |
507 | ||
508 | int x, y; | |
509 | uint64_t cycles; | |
510 | uint32_t c[15]; // array for holding sampled cycle counts | |
511 | mach_timespec_t tst[15]; // array for holding time values. NOTE for some reason tv_sec not work | |
512 | ||
513 | for (x=0; x<15; x++) { // quick sample 15 times | |
514 | tst[x].tv_sec = 0; | |
515 | tst[x].tv_nsec = 0; | |
516 | sysclk_gettime_internal(&tst[x]); | |
517 | rdtsc_hilo(&y, &c[x]); | |
518 | } | |
519 | y = 0; | |
520 | cycles = 0; | |
521 | for (x=0; x<14; x++) { | |
522 | // simple formula really. calculate the numerator as the number of elapsed processor | |
523 | // cycles * 1000 to adjust for the resolution we want. The denominator is the | |
524 | // elapsed "real" time in nano-seconds. The result will be the processor speed in | |
525 | // Mhz. any overflows will be discarded before they are added | |
526 | if ((c[x+1] > c[x]) && (tst[x+1].tv_nsec > tst[x].tv_nsec)) { | |
527 | cycles += ((uint64_t)(c[x+1]-c[x]) * NSEC_PER_SEC ) / (uint64_t)(tst[x+1].tv_nsec - tst[x].tv_nsec); // elapsed nsecs | |
528 | y +=1; | |
529 | } | |
530 | } | |
531 | if (y>0) { // we got more than 1 valid sample. This also takes care of the case of if the clock isn't running | |
532 | cycles = cycles / y; // calc our average | |
533 | } | |
534 | rtc_cyc_per_sec = cycles; | |
535 | rdtsc_hilo(&rtc_last_int_tsc_hi, &rtc_last_int_tsc_lo); | |
536 | } | |
537 | ||
538 | static | |
539 | natural_t | |
540 | get_uptime_cycles(void) | |
541 | { | |
542 | // get the time since the last interupt based on the processors TSC ignoring the | |
543 | // RTC for speed | |
544 | ||
545 | uint32_t a,d,intermediate_lo,intermediate_hi,result; | |
546 | uint64_t newTime; | |
547 | ||
548 | rdtsc_hilo(&d, &a); | |
549 | if (d != rtc_last_int_tsc_hi) { | |
550 | newTime = d-rtc_last_int_tsc_hi; | |
551 | newTime = (newTime<<32) + (a-rtc_last_int_tsc_lo); | |
552 | result = newTime; | |
553 | } else { | |
554 | result = a-rtc_last_int_tsc_lo; | |
555 | } | |
556 | __asm__ volatile ( " mul %3 ": "=eax" (intermediate_lo), "=edx" (intermediate_hi): "a"(result), "d"(NSEC_PER_SEC) ); | |
557 | __asm__ volatile ( " div %3": "=eax" (result): "eax"(intermediate_lo), "edx" (intermediate_hi), "ecx" (rtc_cyc_per_sec) ); | |
558 | return result; | |
559 | } | |
560 | ||
561 | ||
562 | /* | |
563 | * Get clock device attributes. | |
564 | */ | |
565 | kern_return_t | |
566 | sysclk_getattr( | |
567 | clock_flavor_t flavor, | |
568 | clock_attr_t attr, /* OUT */ | |
569 | mach_msg_type_number_t *count) /* IN/OUT */ | |
570 | { | |
571 | spl_t s; | |
572 | ||
573 | if (*count != 1) | |
574 | return (KERN_FAILURE); | |
575 | switch (flavor) { | |
576 | ||
577 | case CLOCK_GET_TIME_RES: /* >0 res */ | |
578 | #if (NCPUS == 1 || (MP_V1_1 && 0)) | |
579 | LOCK_RTC(s); | |
580 | *(clock_res_t *) attr = 1000; | |
581 | UNLOCK_RTC(s); | |
582 | break; | |
583 | #endif /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */ | |
584 | case CLOCK_ALARM_CURRES: /* =0 no alarm */ | |
585 | LOCK_RTC(s); | |
586 | *(clock_res_t *) attr = rtclock.intr_nsec; | |
587 | UNLOCK_RTC(s); | |
588 | break; | |
589 | ||
590 | case CLOCK_ALARM_MAXRES: | |
591 | *(clock_res_t *) attr = RTC_MAXRES; | |
592 | break; | |
593 | ||
594 | case CLOCK_ALARM_MINRES: | |
595 | *(clock_res_t *) attr = RTC_MINRES; | |
596 | break; | |
597 | ||
598 | default: | |
599 | return (KERN_INVALID_VALUE); | |
600 | } | |
601 | return (KERN_SUCCESS); | |
602 | } | |
603 | ||
604 | /* | |
605 | * Set clock device attributes. | |
606 | */ | |
607 | kern_return_t | |
608 | sysclk_setattr( | |
609 | clock_flavor_t flavor, | |
610 | clock_attr_t attr, /* IN */ | |
611 | mach_msg_type_number_t count) /* IN */ | |
612 | { | |
613 | spl_t s; | |
614 | int freq; | |
615 | int adj; | |
616 | clock_res_t new_ires; | |
617 | ||
618 | if (count != 1) | |
619 | return (KERN_FAILURE); | |
620 | switch (flavor) { | |
621 | ||
622 | case CLOCK_GET_TIME_RES: | |
623 | case CLOCK_ALARM_MAXRES: | |
624 | case CLOCK_ALARM_MINRES: | |
625 | return (KERN_FAILURE); | |
626 | ||
627 | case CLOCK_ALARM_CURRES: | |
628 | new_ires = *(clock_res_t *) attr; | |
629 | ||
630 | /* | |
631 | * The new resolution must be within the predetermined | |
632 | * range. If the desired resolution cannot be achieved | |
633 | * to within 0.1%, an error is returned. | |
634 | */ | |
635 | if (new_ires < RTC_MAXRES || new_ires > RTC_MINRES) | |
636 | return (KERN_INVALID_VALUE); | |
637 | freq = (NSEC_PER_SEC / new_ires); | |
638 | adj = (((clknum % freq) * new_ires) / clknum); | |
639 | if (adj > (new_ires / 1000)) | |
640 | return (KERN_INVALID_VALUE); | |
641 | /* | |
642 | * Record the new alarm resolution which will take effect | |
643 | * on the next HZ aligned clock tick. | |
644 | */ | |
645 | LOCK_RTC(s); | |
646 | if ( freq != rtc_intr_freq ) { | |
647 | rtclock.new_ires = new_ires; | |
648 | new_clknum = clknum; | |
649 | } | |
650 | UNLOCK_RTC(s); | |
651 | return (KERN_SUCCESS); | |
652 | ||
653 | default: | |
654 | return (KERN_INVALID_VALUE); | |
655 | } | |
656 | } | |
657 | ||
658 | /* | |
659 | * Set next alarm time for the clock device. This call | |
660 | * always resets the time to deliver an alarm for the | |
661 | * clock. | |
662 | */ | |
663 | void | |
664 | sysclk_setalarm( | |
665 | mach_timespec_t *alarm_time) | |
666 | { | |
667 | spl_t s; | |
668 | ||
669 | LOCK_RTC(s); | |
670 | rtclock.alarm_time = *alarm_time; | |
671 | RtcAlrm = &rtclock.alarm_time; | |
672 | UNLOCK_RTC(s); | |
673 | } | |
674 | ||
675 | /* | |
676 | * Configure the calendar clock. | |
677 | */ | |
678 | int | |
679 | calend_config(void) | |
680 | { | |
681 | return bbc_config(); | |
682 | } | |
683 | ||
684 | /* | |
685 | * Initialize calendar clock. | |
686 | */ | |
687 | int | |
688 | calend_init(void) | |
689 | { | |
690 | return (1); | |
691 | } | |
692 | ||
693 | /* | |
694 | * Get the current clock time. | |
695 | */ | |
696 | kern_return_t | |
697 | calend_gettime( | |
698 | mach_timespec_t *cur_time) /* OUT */ | |
699 | { | |
700 | spl_t s; | |
701 | ||
702 | LOCK_RTC(s); | |
703 | if (!rtclock.calend_is_set) { | |
704 | UNLOCK_RTC(s); | |
705 | return (KERN_FAILURE); | |
706 | } | |
707 | ||
708 | (void) sysclk_gettime_internal(cur_time); | |
709 | ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset); | |
710 | UNLOCK_RTC(s); | |
711 | ||
712 | return (KERN_SUCCESS); | |
713 | } | |
714 | ||
715 | /* | |
716 | * Set the current clock time. | |
717 | */ | |
718 | kern_return_t | |
719 | calend_settime( | |
720 | mach_timespec_t *new_time) | |
721 | { | |
722 | mach_timespec_t curr_time; | |
723 | spl_t s; | |
724 | ||
725 | LOCK_RTC(s); | |
726 | (void) sysclk_gettime_internal(&curr_time); | |
727 | rtclock.calend_offset = *new_time; | |
728 | SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); | |
729 | rtclock.calend_is_set = TRUE; | |
730 | UNLOCK_RTC(s); | |
731 | ||
732 | (void) bbc_settime(new_time); | |
733 | ||
734 | return (KERN_SUCCESS); | |
735 | } | |
736 | ||
737 | /* | |
738 | * Get clock device attributes. | |
739 | */ | |
740 | kern_return_t | |
741 | calend_getattr( | |
742 | clock_flavor_t flavor, | |
743 | clock_attr_t attr, /* OUT */ | |
744 | mach_msg_type_number_t *count) /* IN/OUT */ | |
745 | { | |
746 | spl_t s; | |
747 | ||
748 | if (*count != 1) | |
749 | return (KERN_FAILURE); | |
750 | switch (flavor) { | |
751 | ||
752 | case CLOCK_GET_TIME_RES: /* >0 res */ | |
753 | #if (NCPUS == 1 || (MP_V1_1 && 0)) | |
754 | LOCK_RTC(s); | |
755 | *(clock_res_t *) attr = 1000; | |
756 | UNLOCK_RTC(s); | |
757 | break; | |
758 | #else /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */ | |
759 | LOCK_RTC(s); | |
760 | *(clock_res_t *) attr = rtclock.intr_nsec; | |
761 | UNLOCK_RTC(s); | |
762 | break; | |
763 | #endif /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */ | |
764 | ||
765 | case CLOCK_ALARM_CURRES: /* =0 no alarm */ | |
766 | case CLOCK_ALARM_MINRES: | |
767 | case CLOCK_ALARM_MAXRES: | |
768 | *(clock_res_t *) attr = 0; | |
769 | break; | |
770 | ||
771 | default: | |
772 | return (KERN_INVALID_VALUE); | |
773 | } | |
774 | return (KERN_SUCCESS); | |
775 | } | |
776 | ||
777 | void | |
778 | clock_adjust_calendar( | |
779 | clock_res_t nsec) | |
780 | { | |
781 | spl_t s; | |
782 | ||
783 | LOCK_RTC(s); | |
784 | if (rtclock.calend_is_set) | |
785 | ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec); | |
786 | UNLOCK_RTC(s); | |
787 | } | |
788 | ||
789 | void | |
790 | clock_initialize_calendar(void) | |
791 | { | |
792 | mach_timespec_t bbc_time, curr_time; | |
793 | spl_t s; | |
794 | ||
795 | if (bbc_gettime(&bbc_time) != KERN_SUCCESS) | |
796 | return; | |
797 | ||
798 | LOCK_RTC(s); | |
799 | if (!rtclock.calend_is_set) { | |
800 | (void) sysclk_gettime_internal(&curr_time); | |
801 | rtclock.calend_offset = bbc_time; | |
802 | SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); | |
803 | rtclock.calend_is_set = TRUE; | |
804 | } | |
805 | UNLOCK_RTC(s); | |
806 | } | |
807 | ||
808 | mach_timespec_t | |
809 | clock_get_calendar_offset(void) | |
810 | { | |
811 | mach_timespec_t result = MACH_TIMESPEC_ZERO; | |
812 | spl_t s; | |
813 | ||
814 | LOCK_RTC(s); | |
815 | if (rtclock.calend_is_set) | |
816 | result = rtclock.calend_offset; | |
817 | UNLOCK_RTC(s); | |
818 | ||
819 | return (result); | |
820 | } | |
821 | ||
822 | void | |
823 | clock_timebase_info( | |
824 | mach_timebase_info_t info) | |
825 | { | |
826 | spl_t s; | |
827 | ||
828 | LOCK_RTC(s); | |
829 | info->numer = info->denom = 1; | |
830 | UNLOCK_RTC(s); | |
831 | } | |
832 | ||
833 | void | |
834 | clock_set_timer_deadline( | |
835 | uint64_t deadline) | |
836 | { | |
837 | spl_t s; | |
838 | ||
839 | LOCK_RTC(s); | |
840 | rtclock.timer_deadline = deadline; | |
841 | rtclock.timer_is_set = TRUE; | |
842 | UNLOCK_RTC(s); | |
843 | } | |
844 | ||
845 | void | |
846 | clock_set_timer_func( | |
847 | clock_timer_func_t func) | |
848 | { | |
849 | spl_t s; | |
850 | ||
851 | LOCK_RTC(s); | |
852 | if (rtclock.timer_expire == NULL) | |
853 | rtclock.timer_expire = func; | |
854 | UNLOCK_RTC(s); | |
855 | } | |
856 | ||
857 | \f | |
858 | ||
859 | /* | |
860 | * Load the count register and start the clock. | |
861 | */ | |
862 | #define RTCLOCK_RESET() { \ | |
863 | outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE); \ | |
864 | outb(PITCTR0_PORT, (clks_per_int & 0xff)); \ | |
865 | outb(PITCTR0_PORT, (clks_per_int >> 8)); \ | |
866 | } | |
867 | ||
868 | /* | |
869 | * Reset the clock device. This causes the realtime clock | |
870 | * device to reload its mode and count value (frequency). | |
871 | * Note: the CPU should be calibrated | |
872 | * before starting the clock for the first time. | |
873 | */ | |
874 | ||
875 | void | |
876 | rtclock_reset(void) | |
877 | { | |
878 | int s; | |
879 | ||
880 | #if NCPUS > 1 && !(MP_V1_1 && 0) | |
881 | mp_disable_preemption(); | |
882 | if (cpu_number() != master_cpu) { | |
883 | mp_enable_preemption(); | |
884 | return; | |
885 | } | |
886 | mp_enable_preemption(); | |
887 | #endif /* NCPUS > 1 && AT386 && !MP_V1_1 */ | |
888 | LOCK_RTC(s); | |
889 | RTCLOCK_RESET(); | |
890 | UNLOCK_RTC(s); | |
891 | } | |
892 | ||
893 | /* | |
894 | * Real-time clock device interrupt. Called only on the | |
895 | * master processor. Updates the clock time and upcalls | |
896 | * into the higher level clock code to deliver alarms. | |
897 | */ | |
898 | int | |
899 | rtclock_intr(void) | |
900 | { | |
901 | uint64_t abstime; | |
902 | mach_timespec_t clock_time; | |
903 | int i; | |
904 | spl_t s; | |
905 | ||
906 | /* | |
907 | * Update clock time. Do the update so that the macro | |
908 | * MTS_TO_TS() for reading the mapped time works (e.g. | |
909 | * update in order: mtv_csec, mtv_time.tv_nsec, mtv_time.tv_sec). | |
910 | */ | |
911 | LOCK_RTC(s); | |
912 | rdtsc_hilo(&rtc_last_int_tsc_hi, &rtc_last_int_tsc_lo); | |
913 | i = rtclock.time.tv_nsec + rtclock.intr_nsec; | |
914 | if (i < NSEC_PER_SEC) | |
915 | rtclock.time.tv_nsec = i; | |
916 | else { | |
917 | rtclock.time.tv_nsec = i - NSEC_PER_SEC; | |
918 | rtclock.time.tv_sec++; | |
919 | } | |
920 | /* note time now up to date */ | |
921 | last_ival = 0; | |
922 | ||
923 | rtclock.abstime += rtclock.intr_nsec; | |
924 | abstime = rtclock.abstime; | |
925 | if ( rtclock.timer_is_set && | |
926 | rtclock.timer_deadline <= abstime ) { | |
927 | rtclock.timer_is_set = FALSE; | |
928 | UNLOCK_RTC(s); | |
929 | ||
930 | (*rtclock.timer_expire)(abstime); | |
931 | ||
932 | LOCK_RTC(s); | |
933 | } | |
934 | ||
935 | /* | |
936 | * Perform alarm clock processing if needed. The time | |
937 | * passed up is incremented by a half-interrupt tick | |
938 | * to trigger alarms closest to their desired times. | |
939 | * The clock_alarm_intr() routine calls sysclk_setalrm() | |
940 | * before returning if later alarms are pending. | |
941 | */ | |
942 | ||
943 | if (RtcAlrm && (RtcAlrm->tv_sec < RtcTime->tv_sec || | |
944 | (RtcAlrm->tv_sec == RtcTime->tv_sec && | |
945 | RtcDelt >= RtcAlrm->tv_nsec - RtcTime->tv_nsec))) { | |
946 | clock_time.tv_sec = 0; | |
947 | clock_time.tv_nsec = RtcDelt; | |
948 | ADD_MACH_TIMESPEC (&clock_time, RtcTime); | |
949 | RtcAlrm = 0; | |
950 | UNLOCK_RTC(s); | |
951 | /* | |
952 | * Call clock_alarm_intr() without RTC-lock. | |
953 | * The lock ordering is always CLOCK-lock | |
954 | * before RTC-lock. | |
955 | */ | |
956 | clock_alarm_intr(SYSTEM_CLOCK, &clock_time); | |
957 | LOCK_RTC(s); | |
958 | } | |
959 | ||
960 | /* | |
961 | * On a HZ-tick boundary: return 0 and adjust the clock | |
962 | * alarm resolution (if requested). Otherwise return a | |
963 | * non-zero value. | |
964 | */ | |
965 | if ((i = --rtc_intr_count) == 0) { | |
966 | if (rtclock.new_ires) { | |
967 | rtc_setvals(new_clknum, rtclock.new_ires); | |
968 | RTCLOCK_RESET(); /* lock clock register */ | |
969 | rtclock.new_ires = 0; | |
970 | } | |
971 | rtc_intr_count = rtc_intr_hertz; | |
972 | } | |
973 | UNLOCK_RTC(s); | |
974 | return (i); | |
975 | } | |
976 | ||
977 | void | |
978 | clock_get_uptime( | |
979 | uint64_t *result) | |
980 | { | |
981 | uint32_t ticks; | |
982 | spl_t s; | |
983 | ||
984 | LOCK_RTC(s); | |
985 | ticks = get_uptime_cycles(); | |
986 | *result = rtclock.abstime; | |
987 | UNLOCK_RTC(s); | |
988 | ||
989 | *result += ticks; | |
990 | } | |
991 | ||
992 | void | |
993 | clock_interval_to_deadline( | |
994 | uint32_t interval, | |
995 | uint32_t scale_factor, | |
996 | uint64_t *result) | |
997 | { | |
998 | uint64_t abstime; | |
999 | ||
1000 | clock_get_uptime(result); | |
1001 | ||
1002 | clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); | |
1003 | ||
1004 | *result += abstime; | |
1005 | } | |
1006 | ||
1007 | void | |
1008 | clock_interval_to_absolutetime_interval( | |
1009 | uint32_t interval, | |
1010 | uint32_t scale_factor, | |
1011 | uint64_t *result) | |
1012 | { | |
1013 | *result = (uint64_t)interval * scale_factor; | |
1014 | } | |
1015 | ||
1016 | void | |
1017 | clock_absolutetime_interval_to_deadline( | |
1018 | uint64_t abstime, | |
1019 | uint64_t *result) | |
1020 | { | |
1021 | clock_get_uptime(result); | |
1022 | ||
1023 | *result += abstime; | |
1024 | } | |
1025 | ||
1026 | void | |
1027 | absolutetime_to_nanoseconds( | |
1028 | uint64_t abstime, | |
1029 | uint64_t *result) | |
1030 | { | |
1031 | *result = abstime; | |
1032 | } | |
1033 | ||
1034 | void | |
1035 | nanoseconds_to_absolutetime( | |
1036 | uint64_t nanoseconds, | |
1037 | uint64_t *result) | |
1038 | { | |
1039 | *result = nanoseconds; | |
1040 | } | |
1041 | ||
1042 | /* | |
1043 | * measure_delay(microseconds) | |
1044 | * | |
1045 | * Measure elapsed time for delay calls | |
1046 | * Returns microseconds. | |
1047 | * | |
1048 | * Microseconds must not be too large since the counter (short) | |
1049 | * will roll over. Max is about 13 ms. Values smaller than 1 ms are ok. | |
1050 | * This uses the assumed frequency of the rt clock which is emperically | |
1051 | * accurate to only about 200 ppm. | |
1052 | */ | |
1053 | ||
1054 | int | |
1055 | measure_delay( | |
1056 | int us) | |
1057 | { | |
1058 | unsigned int lsb, val; | |
1059 | ||
1060 | outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE); | |
1061 | outb(PITCTR0_PORT, 0xff); /* set counter to max value */ | |
1062 | outb(PITCTR0_PORT, 0xff); | |
1063 | delay(us); | |
1064 | outb(PITCTL_PORT, PIT_C0); | |
1065 | lsb = inb(PITCTR0_PORT); | |
1066 | val = (inb(PITCTR0_PORT) << 8) | lsb; | |
1067 | val = 0xffff - val; | |
1068 | val *= 1000000; | |
1069 | val /= CLKNUM; | |
1070 | return(val); | |
1071 | } | |
1072 | ||
1073 | /* | |
1074 | * calibrate_delay(void) | |
1075 | * | |
1076 | * Adjust delaycount. Called from startup before clock is started | |
1077 | * for normal interrupt generation. | |
1078 | */ | |
1079 | ||
1080 | void | |
1081 | calibrate_delay(void) | |
1082 | { | |
1083 | unsigned val; | |
1084 | int prev = 0; | |
1085 | register int i; | |
1086 | ||
1087 | printf("adjusting delay count: %d", delaycount); | |
1088 | for (i=0; i<10; i++) { | |
1089 | prev = delaycount; | |
1090 | /* | |
1091 | * microdata must not be too large since measure_timer | |
1092 | * will not return accurate values if the counter (short) | |
1093 | * rolls over | |
1094 | */ | |
1095 | val = measure_delay(microdata); | |
1096 | if (val == 0) { | |
1097 | delaycount *= 2; | |
1098 | } else { | |
1099 | delaycount *= microdata; | |
1100 | delaycount += val-1; /* round up to upper us */ | |
1101 | delaycount /= val; | |
1102 | } | |
1103 | if (delaycount <= 0) | |
1104 | delaycount = 1; | |
1105 | if (delaycount != prev) | |
1106 | printf(" %d", delaycount); | |
1107 | } | |
1108 | printf("\n"); | |
1109 | } | |
1110 | ||
1111 | #if MACH_KDB | |
1112 | void | |
1113 | test_delay(void); | |
1114 | ||
1115 | void | |
1116 | test_delay(void) | |
1117 | { | |
1118 | register i; | |
1119 | ||
1120 | for (i = 0; i < 10; i++) | |
1121 | printf("%d, %d\n", i, measure_delay(i)); | |
1122 | for (i = 10; i <= 100; i+=10) | |
1123 | printf("%d, %d\n", i, measure_delay(i)); | |
1124 | } | |
1125 | #endif /* MACH_KDB */ |