]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
ff6e181a A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
1c79356b | 12 | * |
ff6e181a A |
13 | * The Original Code and all software distributed under the License are |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
ff6e181a A |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
1c79356b A |
20 | * |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | /* | |
24 | * @OSF_COPYRIGHT@ | |
25 | */ | |
26 | ||
27 | /* | |
28 | * File: i386/rtclock.c | |
29 | * Purpose: Routines for handling the machine dependent | |
91447636 A |
30 | * real-time clock. Historically, this clock is |
31 | * generated by the Intel 8254 Programmable Interval | |
32 | * Timer, but local apic timers are now used for | |
33 | * this purpose with the master time reference being | |
34 | * the cpu clock counted by the timestamp MSR. | |
1c79356b A |
35 | */ |
36 | ||
1c79356b | 37 | #include <platforms.h> |
1c79356b | 38 | #include <mach_kdb.h> |
55e303ae A |
39 | |
40 | #include <mach/mach_types.h> | |
41 | ||
1c79356b | 42 | #include <kern/cpu_data.h> |
91447636 | 43 | #include <kern/cpu_number.h> |
1c79356b | 44 | #include <kern/clock.h> |
55e303ae | 45 | #include <kern/host_notify.h> |
1c79356b A |
46 | #include <kern/macro_help.h> |
47 | #include <kern/misc_protos.h> | |
48 | #include <kern/spl.h> | |
91447636 | 49 | #include <kern/assert.h> |
1c79356b A |
50 | #include <mach/vm_prot.h> |
51 | #include <vm/pmap.h> | |
52 | #include <vm/vm_kern.h> /* for kernel_map */ | |
53 | #include <i386/ipl.h> | |
54 | #include <i386/pit.h> | |
55 | #include <i386/pio.h> | |
56 | #include <i386/misc_protos.h> | |
55e303ae A |
57 | #include <i386/proc_reg.h> |
58 | #include <i386/machine_cpu.h> | |
91447636 A |
59 | #include <i386/mp.h> |
60 | #include <i386/cpuid.h> | |
61 | #include <i386/cpu_data.h> | |
62 | #include <i386/cpu_threads.h> | |
63 | #include <i386/perfmon.h> | |
64 | #include <i386/machine_routines.h> | |
65 | #include <i386/AT386/bbclock_entries.h> | |
55e303ae | 66 | #include <pexpert/pexpert.h> |
91447636 A |
67 | #include <machine/limits.h> |
68 | #include <machine/commpage.h> | |
69 | #include <sys/kdebug.h> | |
70 | ||
71 | #define MAX(a,b) (((a)>(b))?(a):(b)) | |
72 | #define MIN(a,b) (((a)>(b))?(b):(a)) | |
55e303ae | 73 | |
91447636 A |
74 | #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */ |
75 | ||
76 | #define UI_CPUFREQ_ROUNDING_FACTOR 10000000 | |
1c79356b A |
77 | |
78 | int sysclk_config(void); | |
79 | ||
80 | int sysclk_init(void); | |
81 | ||
82 | kern_return_t sysclk_gettime( | |
83 | mach_timespec_t *cur_time); | |
84 | ||
85 | kern_return_t sysclk_getattr( | |
86 | clock_flavor_t flavor, | |
87 | clock_attr_t attr, | |
88 | mach_msg_type_number_t *count); | |
89 | ||
1c79356b A |
90 | void sysclk_setalarm( |
91 | mach_timespec_t *alarm_time); | |
92 | ||
1c79356b A |
93 | /* |
94 | * Lists of clock routines. | |
95 | */ | |
96 | struct clock_ops sysclk_ops = { | |
97 | sysclk_config, sysclk_init, | |
98 | sysclk_gettime, 0, | |
91447636 | 99 | sysclk_getattr, 0, |
1c79356b A |
100 | sysclk_setalarm, |
101 | }; | |
102 | ||
103 | int calend_config(void); | |
104 | ||
105 | int calend_init(void); | |
106 | ||
107 | kern_return_t calend_gettime( | |
108 | mach_timespec_t *cur_time); | |
109 | ||
1c79356b A |
110 | kern_return_t calend_getattr( |
111 | clock_flavor_t flavor, | |
112 | clock_attr_t attr, | |
113 | mach_msg_type_number_t *count); | |
114 | ||
115 | struct clock_ops calend_ops = { | |
116 | calend_config, calend_init, | |
55e303ae | 117 | calend_gettime, 0, |
1c79356b A |
118 | calend_getattr, 0, |
119 | 0, | |
120 | }; | |
121 | ||
122 | /* local data declarations */ | |
1c79356b | 123 | |
91447636 A |
124 | static clock_timer_func_t rtclock_timer_expire; |
125 | ||
126 | static timer_call_data_t rtclock_alarm_timer; | |
1c79356b | 127 | |
91447636 A |
128 | static void rtclock_alarm_expire( |
129 | timer_call_param_t p0, | |
130 | timer_call_param_t p1); | |
1c79356b | 131 | |
91447636 A |
132 | struct { |
133 | mach_timespec_t calend_offset; | |
1c79356b A |
134 | boolean_t calend_is_set; |
135 | ||
55e303ae A |
136 | int64_t calend_adjtotal; |
137 | int32_t calend_adjdelta; | |
138 | ||
91447636 | 139 | uint32_t boottime; |
1c79356b | 140 | |
55e303ae | 141 | mach_timebase_info_data_t timebase_const; |
1c79356b A |
142 | |
143 | decl_simple_lock_data(,lock) /* real-time clock device lock */ | |
144 | } rtclock; | |
145 | ||
91447636 A |
146 | boolean_t rtc_initialized = FALSE; |
147 | clock_res_t rtc_intr_nsec = NSEC_PER_HZ; /* interrupt res */ | |
148 | uint64_t rtc_cycle_count; /* clocks in 1/20th second */ | |
149 | uint64_t rtc_cyc_per_sec; /* processor cycles per sec */ | |
150 | uint32_t rtc_boot_frequency; /* provided by 1st speed-step */ | |
151 | uint32_t rtc_quant_scale; /* clock to nanos multiplier */ | |
152 | uint32_t rtc_quant_shift; /* clock to nanos right shift */ | |
153 | uint64_t rtc_decrementer_min; | |
1c79356b | 154 | |
91447636 | 155 | static mach_timebase_info_data_t rtc_lapic_scale; /* nsec to lapic count */ |
9bccf70c | 156 | |
1c79356b | 157 | /* |
91447636 | 158 | * Macros to lock/unlock real-time clock data. |
1c79356b | 159 | */ |
91447636 A |
160 | #define RTC_INTRS_OFF(s) \ |
161 | (s) = splclock() | |
162 | ||
163 | #define RTC_INTRS_ON(s) \ | |
164 | splx(s) | |
165 | ||
166 | #define RTC_LOCK(s) \ | |
167 | MACRO_BEGIN \ | |
168 | RTC_INTRS_OFF(s); \ | |
169 | simple_lock(&rtclock.lock); \ | |
1c79356b A |
170 | MACRO_END |
171 | ||
91447636 A |
172 | #define RTC_UNLOCK(s) \ |
173 | MACRO_BEGIN \ | |
1c79356b | 174 | simple_unlock(&rtclock.lock); \ |
91447636 | 175 | RTC_INTRS_ON(s); \ |
1c79356b A |
176 | MACRO_END |
177 | ||
178 | /* | |
179 | * i8254 control. ** MONUMENT ** | |
180 | * | |
181 | * The i8254 is a traditional PC device with some arbitrary characteristics. | |
182 | * Basically, it is a register that counts at a fixed rate and can be | |
183 | * programmed to generate an interrupt every N counts. The count rate is | |
91447636 A |
184 | * clknum counts per sec (see pit.h), historically 1193167=14.318MHz/12 |
185 | * but the more accurate value is 1193182=14.31818MHz/12. [14.31818 MHz being | |
186 | * the master crystal oscillator reference frequency since the very first PC.] | |
1c79356b A |
187 | * Various constants are computed based on this value, and we calculate |
188 | * them at init time for execution efficiency. To obtain sufficient | |
189 | * accuracy, some of the calculation are most easily done in floating | |
190 | * point and then converted to int. | |
191 | * | |
1c79356b | 192 | */ |
1c79356b A |
193 | |
194 | /* | |
195 | * Forward decl. | |
196 | */ | |
197 | ||
91447636 A |
198 | static uint64_t rtc_set_cyc_per_sec(uint64_t cycles); |
199 | uint64_t rtc_nanotime_read(void); | |
55e303ae A |
200 | |
201 | /* | |
91447636 A |
202 | * create_mul_quant_GHZ |
203 | * create a constant used to multiply the TSC by to convert to nanoseconds. | |
204 | * This is a 32 bit number and the TSC *MUST* have a frequency higher than | |
205 | * 1000Mhz for this routine to work. | |
206 | * | |
207 | * The theory here is that we know how many TSCs-per-sec the processor runs at. | |
208 | * Normally to convert this to nanoseconds you would multiply the current | |
209 | * timestamp by 1000000000 (a billion) then divide by TSCs-per-sec. | |
210 | * Unfortunatly the TSC is 64 bits which would leave us with 96 bit intermediate | |
211 | * results from the multiply that must be divided by. | |
212 | * Usually thats | |
213 | * uint96 = tsc * numer | |
214 | * nanos = uint96 / denom | |
215 | * Instead, we create this quant constant and it becomes the numerator, | |
216 | * the denominator can then be 0x100000000 which makes our division as simple as | |
217 | * forgetting the lower 32 bits of the result. We can also pass this number to | |
218 | * user space as the numer and pass 0xFFFFFFFF (RTC_FAST_DENOM) as the denom to | |
219 | * convert raw counts * to nanos. The difference is so small as to be | |
220 | * undetectable by anything. | |
221 | * | |
222 | * Unfortunatly we can not do this for sub GHZ processors. In this case, all | |
223 | * we do is pass the CPU speed in raw as the denom and we pass in 1000000000 | |
224 | * as the numerator. No short cuts allowed | |
55e303ae | 225 | */ |
91447636 | 226 | #define RTC_FAST_DENOM 0xFFFFFFFF |
55e303ae | 227 | inline static uint32_t |
91447636 | 228 | create_mul_quant_GHZ(int shift, uint32_t quant) |
55e303ae | 229 | { |
91447636 | 230 | return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant); |
55e303ae | 231 | } |
91447636 A |
232 | /* |
233 | * This routine takes a value of raw TSC ticks and applies the passed mul_quant | |
234 | * generated by create_mul_quant() This is our internal routine for creating | |
235 | * nanoseconds. | |
236 | * Since we don't really have uint96_t this routine basically does this.... | |
237 | * uint96_t intermediate = (*value) * scale | |
238 | * return (intermediate >> 32) | |
239 | */ | |
55e303ae A |
240 | inline static uint64_t |
241 | fast_get_nano_from_abs(uint64_t value, int scale) | |
242 | { | |
243 | asm (" movl %%edx,%%esi \n\t" | |
244 | " mull %%ecx \n\t" | |
245 | " movl %%edx,%%edi \n\t" | |
246 | " movl %%esi,%%eax \n\t" | |
247 | " mull %%ecx \n\t" | |
248 | " xorl %%ecx,%%ecx \n\t" | |
249 | " addl %%edi,%%eax \n\t" | |
250 | " adcl %%ecx,%%edx " | |
251 | : "+A" (value) | |
252 | : "c" (scale) | |
253 | : "%esi", "%edi"); | |
254 | return value; | |
255 | } | |
256 | ||
257 | /* | |
91447636 | 258 | * This routine basically does this... |
55e303ae A |
259 | * ts.tv_sec = nanos / 1000000000; create seconds |
260 | * ts.tv_nsec = nanos % 1000000000; create remainder nanos | |
261 | */ | |
262 | inline static mach_timespec_t | |
263 | nanos_to_timespec(uint64_t nanos) | |
264 | { | |
265 | union { | |
266 | mach_timespec_t ts; | |
267 | uint64_t u64; | |
268 | } ret; | |
269 | ret.u64 = nanos; | |
270 | asm volatile("divl %1" : "+A" (ret.u64) : "r" (NSEC_PER_SEC)); | |
271 | return ret.ts; | |
272 | } | |
273 | ||
91447636 A |
274 | /* |
275 | * The following two routines perform the 96 bit arithmetic we need to | |
276 | * convert generic absolute<->nanoseconds | |
277 | * The multiply routine takes a uint64_t and a uint32_t and returns the result | |
278 | * in a uint32_t[3] array. | |
279 | * The divide routine takes this uint32_t[3] array and divides it by a uint32_t | |
280 | * returning a uint64_t | |
281 | */ | |
55e303ae A |
282 | inline static void |
283 | longmul(uint64_t *abstime, uint32_t multiplicand, uint32_t *result) | |
284 | { | |
285 | asm volatile( | |
286 | " pushl %%ebx \n\t" | |
287 | " movl %%eax,%%ebx \n\t" | |
288 | " movl (%%eax),%%eax \n\t" | |
289 | " mull %%ecx \n\t" | |
290 | " xchg %%eax,%%ebx \n\t" | |
291 | " pushl %%edx \n\t" | |
292 | " movl 4(%%eax),%%eax \n\t" | |
293 | " mull %%ecx \n\t" | |
294 | " movl %2,%%ecx \n\t" | |
295 | " movl %%ebx,(%%ecx) \n\t" | |
296 | " popl %%ebx \n\t" | |
297 | " addl %%ebx,%%eax \n\t" | |
298 | " popl %%ebx \n\t" | |
299 | " movl %%eax,4(%%ecx) \n\t" | |
300 | " adcl $0,%%edx \n\t" | |
301 | " movl %%edx,8(%%ecx) // and save it" | |
91447636 | 302 | : : "a"(abstime), "c"(multiplicand), "m"(result)); |
55e303ae A |
303 | |
304 | } | |
305 | ||
306 | inline static uint64_t | |
307 | longdiv(uint32_t *numer, uint32_t denom) | |
308 | { | |
309 | uint64_t result; | |
310 | asm volatile( | |
311 | " pushl %%ebx \n\t" | |
312 | " movl %%eax,%%ebx \n\t" | |
313 | " movl 8(%%eax),%%edx \n\t" | |
314 | " movl 4(%%eax),%%eax \n\t" | |
315 | " divl %%ecx \n\t" | |
316 | " xchg %%ebx,%%eax \n\t" | |
317 | " movl (%%eax),%%eax \n\t" | |
318 | " divl %%ecx \n\t" | |
319 | " xchg %%ebx,%%edx \n\t" | |
320 | " popl %%ebx \n\t" | |
321 | : "=A"(result) : "a"(numer),"c"(denom)); | |
322 | return result; | |
323 | } | |
324 | ||
91447636 A |
325 | /* |
326 | * Enable or disable timer 2. | |
327 | * Port 0x61 controls timer 2: | |
328 | * bit 0 gates the clock, | |
329 | * bit 1 gates output to speaker. | |
330 | */ | |
55e303ae | 331 | inline static void |
91447636 | 332 | enable_PIT2(void) |
55e303ae A |
333 | { |
334 | asm volatile( | |
91447636 A |
335 | " inb $0x61,%%al \n\t" |
336 | " and $0xFC,%%al \n\t" | |
55e303ae | 337 | " or $1,%%al \n\t" |
91447636 A |
338 | " outb %%al,$0x61 \n\t" |
339 | : : : "%al" ); | |
55e303ae A |
340 | } |
341 | ||
342 | inline static void | |
91447636 | 343 | disable_PIT2(void) |
55e303ae A |
344 | { |
345 | asm volatile( | |
91447636 A |
346 | " inb $0x61,%%al \n\t" |
347 | " and $0xFC,%%al \n\t" | |
348 | " outb %%al,$0x61 \n\t" | |
55e303ae A |
349 | : : : "%al" ); |
350 | } | |
351 | ||
55e303ae A |
352 | inline static void |
353 | set_PIT2(int value) | |
354 | { | |
91447636 A |
355 | /* |
356 | * First, tell the clock we are going to write 16 bits to the counter | |
357 | * and enable one-shot mode (command 0xB8 to port 0x43) | |
358 | * Then write the two bytes into the PIT2 clock register (port 0x42). | |
359 | * Loop until the value is "realized" in the clock, | |
360 | * this happens on the next tick. | |
361 | */ | |
55e303ae | 362 | asm volatile( |
91447636 A |
363 | " movb $0xB8,%%al \n\t" |
364 | " outb %%al,$0x43 \n\t" | |
55e303ae | 365 | " movb %%dl,%%al \n\t" |
91447636 | 366 | " outb %%al,$0x42 \n\t" |
55e303ae | 367 | " movb %%dh,%%al \n\t" |
91447636 A |
368 | " outb %%al,$0x42 \n" |
369 | "1: inb $0x42,%%al \n\t" | |
370 | " inb $0x42,%%al \n\t" | |
55e303ae A |
371 | " cmp %%al,%%dh \n\t" |
372 | " jne 1b" | |
91447636 | 373 | : : "d"(value) : "%al"); |
55e303ae A |
374 | } |
375 | ||
376 | inline static uint64_t | |
377 | get_PIT2(unsigned int *value) | |
378 | { | |
55e303ae | 379 | register uint64_t result; |
91447636 A |
380 | /* |
381 | * This routine first latches the time (command 0x80 to port 0x43), | |
382 | * then gets the time stamp so we know how long the read will take later. | |
383 | * Read (from port 0x42) and return the current value of the timer. | |
384 | */ | |
55e303ae A |
385 | asm volatile( |
386 | " xorl %%ecx,%%ecx \n\t" | |
91447636 A |
387 | " movb $0x80,%%al \n\t" |
388 | " outb %%al,$0x43 \n\t" | |
55e303ae A |
389 | " rdtsc \n\t" |
390 | " pushl %%eax \n\t" | |
91447636 | 391 | " inb $0x42,%%al \n\t" |
55e303ae | 392 | " movb %%al,%%cl \n\t" |
91447636 | 393 | " inb $0x42,%%al \n\t" |
55e303ae A |
394 | " movb %%al,%%ch \n\t" |
395 | " popl %%eax " | |
91447636 A |
396 | : "=A"(result), "=c"(*value)); |
397 | return result; | |
55e303ae A |
398 | } |
399 | ||
91447636 A |
400 | /* |
401 | * timeRDTSC() | |
402 | * This routine sets up PIT counter 2 to count down 1/20 of a second. | |
403 | * It pauses until the value is latched in the counter | |
404 | * and then reads the time stamp counter to return to the caller. | |
405 | */ | |
406 | static uint64_t | |
55e303ae A |
407 | timeRDTSC(void) |
408 | { | |
91447636 | 409 | int attempts = 0; |
55e303ae A |
410 | uint64_t latchTime; |
411 | uint64_t saveTime,intermediate; | |
91447636 | 412 | unsigned int timerValue, lastValue; |
55e303ae | 413 | boolean_t int_enabled; |
91447636 A |
414 | /* |
415 | * Table of correction factors to account for | |
416 | * - timer counter quantization errors, and | |
417 | * - undercounts 0..5 | |
418 | */ | |
419 | #define SAMPLE_CLKS_EXACT (((double) CLKNUM) / 20.0) | |
420 | #define SAMPLE_CLKS_INT ((int) CLKNUM / 20) | |
421 | #define SAMPLE_NSECS (2000000000LL) | |
422 | #define SAMPLE_MULTIPLIER (((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT) | |
423 | #define ROUND64(x) ((uint64_t)((x) + 0.5)) | |
424 | uint64_t scale[6] = { | |
425 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)), | |
426 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)), | |
427 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)), | |
428 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)), | |
429 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)), | |
430 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5)) | |
431 | }; | |
55e303ae A |
432 | |
433 | int_enabled = ml_set_interrupts_enabled(FALSE); | |
434 | ||
91447636 A |
435 | restart: |
436 | if (attempts >= 2) | |
437 | panic("timeRDTSC() calibation failed with %d attempts\n", attempts); | |
438 | attempts++; | |
55e303ae A |
439 | enable_PIT2(); // turn on PIT2 |
440 | set_PIT2(0); // reset timer 2 to be zero | |
91447636 | 441 | latchTime = rdtsc64(); // get the time stamp to time |
55e303ae | 442 | latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes |
91447636 A |
443 | set_PIT2(SAMPLE_CLKS_INT); // set up the timer for (almost) 1/20th a second |
444 | saveTime = rdtsc64(); // now time how long a 20th a second is... | |
445 | get_PIT2(&lastValue); | |
446 | get_PIT2(&lastValue); // read twice, first value may be unreliable | |
55e303ae A |
447 | do { |
448 | intermediate = get_PIT2(&timerValue); | |
91447636 A |
449 | if (timerValue > lastValue) { |
450 | printf("Hey we are going backwards! %u -> %u, restarting timing\n", | |
451 | timerValue,lastValue); | |
452 | set_PIT2(0); | |
453 | disable_PIT2(); | |
454 | goto restart; | |
455 | } | |
456 | lastValue = timerValue; | |
457 | } while (timerValue > 5); | |
458 | kprintf("timerValue %d\n",timerValue); | |
459 | kprintf("intermediate 0x%016llx\n",intermediate); | |
460 | kprintf("saveTime 0x%016llx\n",saveTime); | |
55e303ae | 461 | |
91447636 A |
462 | intermediate -= saveTime; // raw count for about 1/20 second |
463 | intermediate *= scale[timerValue]; // rescale measured time spent | |
464 | intermediate /= SAMPLE_NSECS; // so its exactly 1/20 a second | |
465 | intermediate += latchTime; // add on our save fudge | |
466 | ||
467 | set_PIT2(0); // reset timer 2 to be zero | |
468 | disable_PIT2(); // turn off PIT 2 | |
469 | ||
55e303ae A |
470 | ml_set_interrupts_enabled(int_enabled); |
471 | return intermediate; | |
472 | } | |
473 | ||
474 | static uint64_t | |
91447636 | 475 | tsc_to_nanoseconds(uint64_t abstime) |
55e303ae A |
476 | { |
477 | uint32_t numer; | |
478 | uint32_t denom; | |
55e303ae A |
479 | uint32_t intermediate[3]; |
480 | ||
481 | numer = rtclock.timebase_const.numer; | |
482 | denom = rtclock.timebase_const.denom; | |
91447636 | 483 | if (denom == RTC_FAST_DENOM) { |
55e303ae A |
484 | abstime = fast_get_nano_from_abs(abstime, numer); |
485 | } else { | |
486 | longmul(&abstime, numer, intermediate); | |
487 | abstime = longdiv(intermediate, denom); | |
488 | } | |
489 | return abstime; | |
490 | } | |
491 | ||
492 | inline static mach_timespec_t | |
91447636 | 493 | tsc_to_timespec(void) |
55e303ae A |
494 | { |
495 | uint64_t currNanos; | |
91447636 | 496 | currNanos = rtc_nanotime_read(); |
55e303ae A |
497 | return nanos_to_timespec(currNanos); |
498 | } | |
499 | ||
91447636 A |
500 | #define DECREMENTER_MAX UINT_MAX |
501 | static uint32_t | |
502 | deadline_to_decrementer( | |
503 | uint64_t deadline, | |
504 | uint64_t now) | |
505 | { | |
506 | uint64_t delta; | |
507 | ||
508 | if (deadline <= now) | |
509 | return rtc_decrementer_min; | |
510 | else { | |
511 | delta = deadline - now; | |
512 | return MIN(MAX(rtc_decrementer_min,delta),DECREMENTER_MAX); | |
513 | } | |
514 | } | |
515 | ||
516 | static inline uint64_t | |
517 | lapic_time_countdown(uint32_t initial_count) | |
1c79356b | 518 | { |
91447636 A |
519 | boolean_t state; |
520 | uint64_t start_time; | |
521 | uint64_t stop_time; | |
522 | lapic_timer_count_t count; | |
523 | ||
524 | state = ml_set_interrupts_enabled(FALSE); | |
525 | lapic_set_timer(FALSE, one_shot, divide_by_1, initial_count); | |
526 | start_time = rdtsc64(); | |
527 | do { | |
528 | lapic_get_timer(NULL, NULL, NULL, &count); | |
529 | } while (count > 0); | |
530 | stop_time = rdtsc64(); | |
531 | ml_set_interrupts_enabled(state); | |
1c79356b | 532 | |
91447636 A |
533 | return tsc_to_nanoseconds(stop_time - start_time); |
534 | } | |
1c79356b | 535 | |
91447636 A |
536 | static void |
537 | rtc_lapic_timer_calibrate(void) | |
538 | { | |
539 | uint32_t nsecs; | |
540 | uint64_t countdown; | |
1c79356b | 541 | |
91447636 A |
542 | if (!(cpuid_features() & CPUID_FEATURE_APIC)) |
543 | return; | |
1c79356b | 544 | |
91447636 A |
545 | /* |
546 | * Set the local apic timer counting down to zero without an interrupt. | |
547 | * Use the timestamp to calculate how long this takes. | |
548 | */ | |
549 | nsecs = (uint32_t) lapic_time_countdown(rtc_intr_nsec); | |
550 | ||
551 | /* | |
552 | * Compute a countdown ratio for a given time in nanoseconds. | |
553 | * That is, countdown = time * numer / denom. | |
554 | */ | |
555 | countdown = (uint64_t)rtc_intr_nsec * (uint64_t)rtc_intr_nsec / nsecs; | |
556 | ||
557 | nsecs = (uint32_t) lapic_time_countdown((uint32_t) countdown); | |
558 | ||
559 | rtc_lapic_scale.numer = countdown; | |
560 | rtc_lapic_scale.denom = nsecs; | |
561 | ||
562 | kprintf("rtc_lapic_timer_calibrate() scale: %d/%d\n", | |
563 | (uint32_t) countdown, nsecs); | |
564 | } | |
565 | ||
566 | static void | |
567 | rtc_lapic_set_timer( | |
568 | uint32_t interval) | |
569 | { | |
570 | uint64_t count; | |
571 | ||
572 | assert(rtc_lapic_scale.denom); | |
573 | ||
574 | count = interval * (uint64_t) rtc_lapic_scale.numer; | |
575 | count /= rtc_lapic_scale.denom; | |
576 | ||
577 | lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count); | |
578 | } | |
579 | ||
580 | static void | |
581 | rtc_lapic_start_ticking(void) | |
582 | { | |
583 | uint64_t abstime; | |
584 | uint64_t first_tick; | |
585 | uint64_t decr; | |
586 | ||
587 | abstime = mach_absolute_time(); | |
588 | first_tick = abstime + NSEC_PER_HZ; | |
589 | current_cpu_datap()->cpu_rtc_tick_deadline = first_tick; | |
590 | decr = deadline_to_decrementer(first_tick, abstime); | |
591 | rtc_lapic_set_timer(decr); | |
1c79356b A |
592 | } |
593 | ||
594 | /* | |
595 | * Configure the real-time clock device. Return success (1) | |
596 | * or failure (0). | |
597 | */ | |
598 | ||
599 | int | |
600 | sysclk_config(void) | |
601 | { | |
1c79356b | 602 | |
1c79356b A |
603 | mp_disable_preemption(); |
604 | if (cpu_number() != master_cpu) { | |
605 | mp_enable_preemption(); | |
606 | return(1); | |
607 | } | |
608 | mp_enable_preemption(); | |
91447636 A |
609 | |
610 | timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL); | |
611 | ||
612 | simple_lock_init(&rtclock.lock, 0); | |
613 | ||
614 | return (1); | |
615 | } | |
616 | ||
617 | ||
618 | /* | |
619 | * Nanotime/mach_absolutime_time | |
620 | * ----------------------------- | |
621 | * The timestamp counter (tsc) - which counts cpu clock cycles and can be read | |
622 | * efficient by the kernel and in userspace - is the reference for all timing. | |
623 | * However, the cpu clock rate is not only platform-dependent but can change | |
624 | * (speed-step) dynamically. Hence tsc is converted into nanoseconds which is | |
625 | * identical to mach_absolute_time. The conversion to tsc to nanoseconds is | |
626 | * encapsulated by nanotime. | |
627 | * | |
628 | * The kernel maintains nanotime information recording: | |
629 | * - the current ratio of tsc to nanoseconds | |
630 | * with this ratio expressed as a 32-bit scale and shift | |
631 | * (power of 2 divider); | |
632 | * - the tsc (step_tsc) and nanotime (step_ns) at which the current | |
633 | * ratio (clock speed) began. | |
634 | * So a tsc value can be converted to nanotime by: | |
635 | * | |
636 | * nanotime = (((tsc - step_tsc)*scale) >> shift) + step_ns | |
637 | * | |
638 | * In general, (tsc - step_tsc) is a 64-bit quantity with the scaling | |
639 | * involving a 96-bit intermediate value. However, by saving the converted | |
640 | * values at each tick (or at any intervening speed-step) - base_tsc and | |
641 | * base_ns - we can perform conversions relative to these and be assured that | |
642 | * (tsc - tick_tsc) is 32-bits. Hence: | |
643 | * | |
644 | * fast_nanotime = (((tsc - base_tsc)*scale) >> shift) + base_ns | |
645 | * | |
646 | * The tuple {base_tsc, base_ns, scale, shift} is exported in the commpage | |
647 | * for the userspace nanotime routine to read. A duplicate check_tsc is | |
648 | * appended so that the consistency of the read can be verified. Note that | |
649 | * this scheme is essential for MP systems in which the commpage is updated | |
650 | * by the master cpu but may be read concurrently by other cpus. | |
651 | * | |
652 | */ | |
653 | static inline void | |
654 | rtc_nanotime_set_commpage(rtc_nanotime_t *rntp) | |
655 | { | |
656 | commpage_nanotime_t cp_nanotime; | |
657 | ||
658 | /* Only the master cpu updates the commpage */ | |
659 | if (cpu_number() != master_cpu) | |
660 | return; | |
661 | ||
662 | cp_nanotime.nt_base_tsc = rntp->rnt_tsc; | |
663 | cp_nanotime.nt_base_ns = rntp->rnt_nanos; | |
664 | cp_nanotime.nt_scale = rntp->rnt_scale; | |
665 | cp_nanotime.nt_shift = rntp->rnt_shift; | |
666 | ||
667 | commpage_set_nanotime(&cp_nanotime); | |
668 | } | |
669 | ||
670 | static void | |
671 | rtc_nanotime_init(void) | |
672 | { | |
673 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
674 | rtc_nanotime_t *master_rntp = &cpu_datap(master_cpu)->cpu_rtc_nanotime; | |
675 | ||
676 | if (cpu_number() == master_cpu) { | |
677 | rntp->rnt_tsc = rdtsc64(); | |
678 | rntp->rnt_nanos = tsc_to_nanoseconds(rntp->rnt_tsc); | |
679 | rntp->rnt_scale = rtc_quant_scale; | |
680 | rntp->rnt_shift = rtc_quant_shift; | |
681 | rntp->rnt_step_tsc = 0ULL; | |
682 | rntp->rnt_step_nanos = 0ULL; | |
683 | } else { | |
684 | /* | |
685 | * Copy master processor's nanotime info. | |
686 | * Loop required in case this changes while copying. | |
687 | */ | |
688 | do { | |
689 | *rntp = *master_rntp; | |
690 | } while (rntp->rnt_tsc != master_rntp->rnt_tsc); | |
691 | } | |
692 | } | |
693 | ||
694 | static inline void | |
695 | _rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t tsc) | |
696 | { | |
697 | uint64_t tsc_delta; | |
698 | uint64_t ns_delta; | |
699 | ||
700 | tsc_delta = tsc - rntp->rnt_step_tsc; | |
701 | ns_delta = tsc_to_nanoseconds(tsc_delta); | |
702 | rntp->rnt_nanos = rntp->rnt_step_nanos + ns_delta; | |
703 | rntp->rnt_tsc = tsc; | |
704 | } | |
705 | ||
706 | static void | |
707 | rtc_nanotime_update(void) | |
708 | { | |
709 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
710 | ||
711 | assert(get_preemption_level() > 0); | |
712 | assert(!ml_get_interrupts_enabled()); | |
713 | ||
714 | _rtc_nanotime_update(rntp, rdtsc64()); | |
715 | rtc_nanotime_set_commpage(rntp); | |
716 | } | |
717 | ||
718 | static void | |
719 | rtc_nanotime_scale_update(void) | |
720 | { | |
721 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
722 | uint64_t tsc = rdtsc64(); | |
723 | ||
724 | assert(!ml_get_interrupts_enabled()); | |
725 | ||
726 | /* | |
727 | * Update time based on past scale. | |
728 | */ | |
729 | _rtc_nanotime_update(rntp, tsc); | |
730 | ||
1c79356b | 731 | /* |
91447636 | 732 | * Update scale and timestamp this update. |
1c79356b | 733 | */ |
91447636 A |
734 | rntp->rnt_scale = rtc_quant_scale; |
735 | rntp->rnt_shift = rtc_quant_shift; | |
736 | rntp->rnt_step_tsc = rntp->rnt_tsc; | |
737 | rntp->rnt_step_nanos = rntp->rnt_nanos; | |
1c79356b | 738 | |
91447636 A |
739 | /* Export update to userland */ |
740 | rtc_nanotime_set_commpage(rntp); | |
741 | } | |
742 | ||
743 | static uint64_t | |
744 | _rtc_nanotime_read(void) | |
745 | { | |
746 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
747 | uint64_t rnt_tsc; | |
748 | uint32_t rnt_scale; | |
749 | uint32_t rnt_shift; | |
750 | uint64_t rnt_nanos; | |
751 | uint64_t tsc; | |
752 | uint64_t tsc_delta; | |
753 | ||
754 | rnt_scale = rntp->rnt_scale; | |
755 | if (rnt_scale == 0) | |
756 | return 0ULL; | |
757 | ||
758 | rnt_shift = rntp->rnt_shift; | |
759 | rnt_nanos = rntp->rnt_nanos; | |
760 | rnt_tsc = rntp->rnt_tsc; | |
761 | tsc = rdtsc64(); | |
762 | ||
763 | tsc_delta = tsc - rnt_tsc; | |
764 | if ((tsc_delta >> 32) != 0) | |
765 | return rnt_nanos + tsc_to_nanoseconds(tsc_delta); | |
766 | ||
767 | /* Let the compiler optimize(?): */ | |
768 | if (rnt_shift == 32) | |
769 | return rnt_nanos + ((tsc_delta * rnt_scale) >> 32); | |
770 | else | |
771 | return rnt_nanos + ((tsc_delta * rnt_scale) >> rnt_shift); | |
772 | } | |
773 | ||
774 | uint64_t | |
775 | rtc_nanotime_read(void) | |
776 | { | |
777 | uint64_t result; | |
778 | uint64_t rnt_tsc; | |
779 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
1c79356b A |
780 | |
781 | /* | |
91447636 A |
782 | * Use timestamp to ensure the uptime record isn't changed. |
783 | * This avoids disabling interrupts. | |
784 | * And not this is a per-cpu structure hence no locking. | |
1c79356b | 785 | */ |
91447636 A |
786 | do { |
787 | rnt_tsc = rntp->rnt_tsc; | |
788 | result = _rtc_nanotime_read(); | |
789 | } while (rnt_tsc != rntp->rnt_tsc); | |
790 | ||
791 | return result; | |
792 | } | |
793 | ||
794 | ||
795 | /* | |
796 | * This function is called by the speed-step driver when a | |
797 | * change of cpu clock frequency is about to occur. | |
798 | * The scale is not changed until rtc_clock_stepped() is called. | |
799 | * Between these times there is an uncertainty is exactly when | |
800 | * the change takes effect. FIXME: by using another timing source | |
801 | * we could eliminate this error. | |
802 | */ | |
803 | void | |
804 | rtc_clock_stepping(__unused uint32_t new_frequency, | |
805 | __unused uint32_t old_frequency) | |
806 | { | |
807 | boolean_t istate; | |
1c79356b | 808 | |
91447636 A |
809 | istate = ml_set_interrupts_enabled(FALSE); |
810 | rtc_nanotime_scale_update(); | |
811 | ml_set_interrupts_enabled(istate); | |
812 | } | |
813 | ||
814 | /* | |
815 | * This function is called by the speed-step driver when a | |
816 | * change of cpu clock frequency has just occured. This change | |
817 | * is expressed as a ratio relative to the boot clock rate. | |
818 | */ | |
819 | void | |
820 | rtc_clock_stepped(uint32_t new_frequency, uint32_t old_frequency) | |
821 | { | |
822 | boolean_t istate; | |
823 | ||
824 | istate = ml_set_interrupts_enabled(FALSE); | |
825 | if (rtc_boot_frequency == 0) { | |
826 | /* | |
827 | * At the first ever stepping, old frequency is the real | |
828 | * initial clock rate. This step and all others are based | |
829 | * relative to this initial frequency at which the tsc | |
830 | * calibration was made. Hence we must remember this base | |
831 | * frequency as reference. | |
832 | */ | |
833 | rtc_boot_frequency = old_frequency; | |
834 | } | |
835 | rtc_set_cyc_per_sec(rtc_cycle_count * new_frequency / | |
836 | rtc_boot_frequency); | |
837 | rtc_nanotime_scale_update(); | |
838 | ml_set_interrupts_enabled(istate); | |
1c79356b A |
839 | } |
840 | ||
841 | /* | |
91447636 A |
842 | * rtc_sleep_wakeup() is called from acpi on awakening from a S3 sleep |
843 | */ | |
844 | void | |
845 | rtc_sleep_wakeup(void) | |
846 | { | |
847 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
848 | ||
849 | boolean_t istate; | |
850 | ||
851 | istate = ml_set_interrupts_enabled(FALSE); | |
852 | ||
853 | /* | |
854 | * Reset nanotime. | |
855 | * The timestamp counter will have been reset | |
856 | * but nanotime (uptime) marches onward. | |
857 | * We assume that we're still at the former cpu frequency. | |
858 | */ | |
859 | rntp->rnt_tsc = rdtsc64(); | |
860 | rntp->rnt_step_tsc = 0ULL; | |
861 | rntp->rnt_step_nanos = rntp->rnt_nanos; | |
862 | rtc_nanotime_set_commpage(rntp); | |
863 | ||
864 | /* Restart tick interrupts from the LAPIC timer */ | |
865 | rtc_lapic_start_ticking(); | |
866 | ||
867 | ml_set_interrupts_enabled(istate); | |
868 | } | |
869 | ||
870 | /* | |
871 | * Initialize the real-time clock device. | |
872 | * In addition, various variables used to support the clock are initialized. | |
1c79356b A |
873 | */ |
874 | int | |
875 | sysclk_init(void) | |
876 | { | |
91447636 A |
877 | uint64_t cycles; |
878 | ||
1c79356b | 879 | mp_disable_preemption(); |
91447636 A |
880 | if (cpu_number() == master_cpu) { |
881 | /* | |
882 | * Perform calibration. | |
883 | * The PIT is used as the reference to compute how many | |
884 | * TCS counts (cpu clock cycles) occur per second. | |
885 | */ | |
886 | rtc_cycle_count = timeRDTSC(); | |
887 | cycles = rtc_set_cyc_per_sec(rtc_cycle_count); | |
888 | ||
889 | /* | |
890 | * Set min/max to actual. | |
891 | * ACPI may update these later if speed-stepping is detected. | |
892 | */ | |
893 | gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles; | |
894 | gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles; | |
895 | printf("[RTCLOCK] frequency %llu (%llu)\n", | |
896 | cycles, rtc_cyc_per_sec); | |
897 | ||
898 | rtc_lapic_timer_calibrate(); | |
899 | ||
900 | /* Minimum interval is 1usec */ | |
901 | rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, | |
902 | 0ULL); | |
903 | /* Point LAPIC interrupts to hardclock() */ | |
904 | lapic_set_timer_func((i386_intr_func_t) rtclock_intr); | |
905 | ||
906 | clock_timebase_init(); | |
907 | rtc_initialized = TRUE; | |
1c79356b | 908 | } |
91447636 A |
909 | |
910 | rtc_nanotime_init(); | |
911 | ||
912 | rtc_lapic_start_ticking(); | |
913 | ||
1c79356b | 914 | mp_enable_preemption(); |
1c79356b | 915 | |
1c79356b A |
916 | return (1); |
917 | } | |
918 | ||
1c79356b A |
919 | /* |
920 | * Get the clock device time. This routine is responsible | |
921 | * for converting the device's machine dependent time value | |
922 | * into a canonical mach_timespec_t value. | |
923 | */ | |
91447636 A |
924 | static kern_return_t |
925 | sysclk_gettime_internal( | |
1c79356b A |
926 | mach_timespec_t *cur_time) /* OUT */ |
927 | { | |
91447636 | 928 | *cur_time = tsc_to_timespec(); |
1c79356b A |
929 | return (KERN_SUCCESS); |
930 | } | |
931 | ||
932 | kern_return_t | |
91447636 | 933 | sysclk_gettime( |
1c79356b A |
934 | mach_timespec_t *cur_time) /* OUT */ |
935 | { | |
91447636 | 936 | return sysclk_gettime_internal(cur_time); |
1c79356b A |
937 | } |
938 | ||
1c79356b A |
939 | void |
940 | sysclk_gettime_interrupts_disabled( | |
941 | mach_timespec_t *cur_time) /* OUT */ | |
942 | { | |
91447636 | 943 | (void) sysclk_gettime_internal(cur_time); |
1c79356b A |
944 | } |
945 | ||
9bccf70c A |
946 | // utility routine |
947 | // Code to calculate how many processor cycles are in a second... | |
1c79356b | 948 | |
91447636 A |
949 | static uint64_t |
950 | rtc_set_cyc_per_sec(uint64_t cycles) | |
9bccf70c | 951 | { |
1c79356b | 952 | |
91447636 A |
953 | if (cycles > (NSEC_PER_SEC/20)) { |
954 | // we can use just a "fast" multiply to get nanos | |
955 | rtc_quant_shift = 32; | |
956 | rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles); | |
957 | rtclock.timebase_const.numer = rtc_quant_scale; // timeRDTSC is 1/20 | |
958 | rtclock.timebase_const.denom = RTC_FAST_DENOM; | |
959 | } else { | |
960 | rtc_quant_shift = 26; | |
961 | rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles); | |
962 | rtclock.timebase_const.numer = NSEC_PER_SEC/20; // timeRDTSC is 1/20 | |
963 | rtclock.timebase_const.denom = cycles; | |
964 | } | |
965 | rtc_cyc_per_sec = cycles*20; // multiply it by 20 and we are done.. | |
966 | // BUT we also want to calculate... | |
967 | ||
968 | cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2)) | |
969 | / UI_CPUFREQ_ROUNDING_FACTOR) | |
970 | * UI_CPUFREQ_ROUNDING_FACTOR; | |
9bccf70c | 971 | |
91447636 A |
972 | /* |
973 | * Set current measured speed. | |
974 | */ | |
975 | if (cycles >= 0x100000000ULL) { | |
976 | gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL; | |
55e303ae | 977 | } else { |
91447636 | 978 | gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles; |
9bccf70c | 979 | } |
91447636 | 980 | gPEClockFrequencyInfo.cpu_frequency_hz = cycles; |
55e303ae | 981 | |
91447636 A |
982 | kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, rtc_cyc_per_sec); |
983 | return(cycles); | |
9bccf70c | 984 | } |
1c79356b | 985 | |
55e303ae A |
986 | void |
987 | clock_get_system_microtime( | |
988 | uint32_t *secs, | |
989 | uint32_t *microsecs) | |
9bccf70c | 990 | { |
55e303ae A |
991 | mach_timespec_t now; |
992 | ||
91447636 | 993 | (void) sysclk_gettime_internal(&now); |
55e303ae A |
994 | |
995 | *secs = now.tv_sec; | |
996 | *microsecs = now.tv_nsec / NSEC_PER_USEC; | |
1c79356b A |
997 | } |
998 | ||
55e303ae A |
999 | void |
1000 | clock_get_system_nanotime( | |
1001 | uint32_t *secs, | |
1002 | uint32_t *nanosecs) | |
1003 | { | |
1004 | mach_timespec_t now; | |
1005 | ||
91447636 | 1006 | (void) sysclk_gettime_internal(&now); |
55e303ae A |
1007 | |
1008 | *secs = now.tv_sec; | |
1009 | *nanosecs = now.tv_nsec; | |
1010 | } | |
9bccf70c | 1011 | |
1c79356b A |
1012 | /* |
1013 | * Get clock device attributes. | |
1014 | */ | |
1015 | kern_return_t | |
1016 | sysclk_getattr( | |
1017 | clock_flavor_t flavor, | |
1018 | clock_attr_t attr, /* OUT */ | |
1019 | mach_msg_type_number_t *count) /* IN/OUT */ | |
1020 | { | |
1c79356b A |
1021 | if (*count != 1) |
1022 | return (KERN_FAILURE); | |
1023 | switch (flavor) { | |
1024 | ||
1025 | case CLOCK_GET_TIME_RES: /* >0 res */ | |
91447636 | 1026 | *(clock_res_t *) attr = rtc_intr_nsec; |
1c79356b A |
1027 | break; |
1028 | ||
91447636 | 1029 | case CLOCK_ALARM_CURRES: /* =0 no alarm */ |
1c79356b | 1030 | case CLOCK_ALARM_MAXRES: |
1c79356b | 1031 | case CLOCK_ALARM_MINRES: |
91447636 | 1032 | *(clock_res_t *) attr = 0; |
1c79356b A |
1033 | break; |
1034 | ||
1035 | default: | |
1036 | return (KERN_INVALID_VALUE); | |
1037 | } | |
1038 | return (KERN_SUCCESS); | |
1039 | } | |
1040 | ||
1c79356b A |
1041 | /* |
1042 | * Set next alarm time for the clock device. This call | |
1043 | * always resets the time to deliver an alarm for the | |
1044 | * clock. | |
1045 | */ | |
1046 | void | |
1047 | sysclk_setalarm( | |
1048 | mach_timespec_t *alarm_time) | |
1049 | { | |
91447636 A |
1050 | timer_call_enter(&rtclock_alarm_timer, |
1051 | (uint64_t) alarm_time->tv_sec * NSEC_PER_SEC | |
1052 | + alarm_time->tv_nsec); | |
1c79356b A |
1053 | } |
1054 | ||
1055 | /* | |
1056 | * Configure the calendar clock. | |
1057 | */ | |
1058 | int | |
1059 | calend_config(void) | |
1060 | { | |
1061 | return bbc_config(); | |
1062 | } | |
1063 | ||
1064 | /* | |
1065 | * Initialize calendar clock. | |
1066 | */ | |
1067 | int | |
1068 | calend_init(void) | |
1069 | { | |
1070 | return (1); | |
1071 | } | |
1072 | ||
1073 | /* | |
1074 | * Get the current clock time. | |
1075 | */ | |
1076 | kern_return_t | |
1077 | calend_gettime( | |
1078 | mach_timespec_t *cur_time) /* OUT */ | |
1079 | { | |
1080 | spl_t s; | |
1081 | ||
91447636 | 1082 | RTC_LOCK(s); |
1c79356b | 1083 | if (!rtclock.calend_is_set) { |
91447636 | 1084 | RTC_UNLOCK(s); |
1c79356b A |
1085 | return (KERN_FAILURE); |
1086 | } | |
1087 | ||
1088 | (void) sysclk_gettime_internal(cur_time); | |
1089 | ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset); | |
91447636 | 1090 | RTC_UNLOCK(s); |
1c79356b A |
1091 | |
1092 | return (KERN_SUCCESS); | |
1093 | } | |
1094 | ||
55e303ae A |
1095 | void |
1096 | clock_get_calendar_microtime( | |
1097 | uint32_t *secs, | |
1098 | uint32_t *microsecs) | |
1099 | { | |
1100 | mach_timespec_t now; | |
1101 | ||
1102 | calend_gettime(&now); | |
1103 | ||
1104 | *secs = now.tv_sec; | |
1105 | *microsecs = now.tv_nsec / NSEC_PER_USEC; | |
1106 | } | |
1107 | ||
1108 | void | |
1109 | clock_get_calendar_nanotime( | |
1110 | uint32_t *secs, | |
1111 | uint32_t *nanosecs) | |
1c79356b | 1112 | { |
55e303ae A |
1113 | mach_timespec_t now; |
1114 | ||
1115 | calend_gettime(&now); | |
1116 | ||
1117 | *secs = now.tv_sec; | |
1118 | *nanosecs = now.tv_nsec; | |
1119 | } | |
1120 | ||
1121 | void | |
1122 | clock_set_calendar_microtime( | |
1123 | uint32_t secs, | |
1124 | uint32_t microsecs) | |
1125 | { | |
1126 | mach_timespec_t new_time, curr_time; | |
91447636 | 1127 | uint32_t old_offset; |
1c79356b A |
1128 | spl_t s; |
1129 | ||
91447636 A |
1130 | new_time.tv_sec = secs; |
1131 | new_time.tv_nsec = microsecs * NSEC_PER_USEC; | |
1132 | ||
1133 | RTC_LOCK(s); | |
1134 | old_offset = rtclock.calend_offset.tv_sec; | |
1c79356b | 1135 | (void) sysclk_gettime_internal(&curr_time); |
91447636 | 1136 | rtclock.calend_offset = new_time; |
1c79356b | 1137 | SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); |
91447636 | 1138 | rtclock.boottime += rtclock.calend_offset.tv_sec - old_offset; |
1c79356b | 1139 | rtclock.calend_is_set = TRUE; |
91447636 | 1140 | RTC_UNLOCK(s); |
1c79356b | 1141 | |
55e303ae | 1142 | (void) bbc_settime(&new_time); |
1c79356b | 1143 | |
55e303ae | 1144 | host_notify_calendar_change(); |
1c79356b A |
1145 | } |
1146 | ||
1147 | /* | |
1148 | * Get clock device attributes. | |
1149 | */ | |
1150 | kern_return_t | |
1151 | calend_getattr( | |
1152 | clock_flavor_t flavor, | |
1153 | clock_attr_t attr, /* OUT */ | |
1154 | mach_msg_type_number_t *count) /* IN/OUT */ | |
1155 | { | |
1c79356b A |
1156 | if (*count != 1) |
1157 | return (KERN_FAILURE); | |
1158 | switch (flavor) { | |
1159 | ||
1160 | case CLOCK_GET_TIME_RES: /* >0 res */ | |
91447636 | 1161 | *(clock_res_t *) attr = rtc_intr_nsec; |
1c79356b | 1162 | break; |
1c79356b A |
1163 | |
1164 | case CLOCK_ALARM_CURRES: /* =0 no alarm */ | |
1165 | case CLOCK_ALARM_MINRES: | |
1166 | case CLOCK_ALARM_MAXRES: | |
1167 | *(clock_res_t *) attr = 0; | |
1168 | break; | |
1169 | ||
1170 | default: | |
1171 | return (KERN_INVALID_VALUE); | |
1172 | } | |
1173 | return (KERN_SUCCESS); | |
1174 | } | |
1175 | ||
55e303ae A |
1176 | #define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */ |
1177 | #define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */ | |
1178 | ||
1179 | uint32_t | |
1180 | clock_set_calendar_adjtime( | |
1181 | int32_t *secs, | |
1182 | int32_t *microsecs) | |
1c79356b | 1183 | { |
55e303ae A |
1184 | int64_t total, ototal; |
1185 | uint32_t interval = 0; | |
1186 | spl_t s; | |
1187 | ||
1188 | total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC; | |
1c79356b | 1189 | |
91447636 | 1190 | RTC_LOCK(s); |
55e303ae A |
1191 | ototal = rtclock.calend_adjtotal; |
1192 | ||
1193 | if (total != 0) { | |
1194 | int32_t delta = tickadj; | |
1195 | ||
1196 | if (total > 0) { | |
1197 | if (total > bigadj) | |
1198 | delta *= 10; | |
1199 | if (delta > total) | |
1200 | delta = total; | |
1201 | } | |
1202 | else { | |
1203 | if (total < -bigadj) | |
1204 | delta *= 10; | |
1205 | delta = -delta; | |
1206 | if (delta < total) | |
1207 | delta = total; | |
1208 | } | |
1209 | ||
1210 | rtclock.calend_adjtotal = total; | |
1211 | rtclock.calend_adjdelta = delta; | |
1212 | ||
91447636 | 1213 | interval = NSEC_PER_HZ; |
55e303ae A |
1214 | } |
1215 | else | |
1216 | rtclock.calend_adjdelta = rtclock.calend_adjtotal = 0; | |
1217 | ||
91447636 | 1218 | RTC_UNLOCK(s); |
55e303ae A |
1219 | |
1220 | if (ototal == 0) | |
1221 | *secs = *microsecs = 0; | |
1222 | else { | |
1223 | *secs = ototal / NSEC_PER_SEC; | |
1224 | *microsecs = ototal % NSEC_PER_SEC; | |
1225 | } | |
1226 | ||
1227 | return (interval); | |
1228 | } | |
1229 | ||
1230 | uint32_t | |
1231 | clock_adjust_calendar(void) | |
1232 | { | |
1233 | uint32_t interval = 0; | |
1234 | int32_t delta; | |
1235 | spl_t s; | |
1236 | ||
91447636 | 1237 | RTC_LOCK(s); |
55e303ae A |
1238 | delta = rtclock.calend_adjdelta; |
1239 | ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, delta); | |
1240 | ||
1241 | rtclock.calend_adjtotal -= delta; | |
1242 | ||
1243 | if (delta > 0) { | |
1244 | if (delta > rtclock.calend_adjtotal) | |
1245 | rtclock.calend_adjdelta = rtclock.calend_adjtotal; | |
1246 | } | |
1247 | else | |
1248 | if (delta < 0) { | |
1249 | if (delta < rtclock.calend_adjtotal) | |
1250 | rtclock.calend_adjdelta = rtclock.calend_adjtotal; | |
1251 | } | |
1252 | ||
1253 | if (rtclock.calend_adjdelta != 0) | |
91447636 | 1254 | interval = NSEC_PER_HZ; |
55e303ae | 1255 | |
91447636 | 1256 | RTC_UNLOCK(s); |
55e303ae A |
1257 | |
1258 | return (interval); | |
1c79356b A |
1259 | } |
1260 | ||
1261 | void | |
1262 | clock_initialize_calendar(void) | |
1263 | { | |
1264 | mach_timespec_t bbc_time, curr_time; | |
1265 | spl_t s; | |
1266 | ||
1267 | if (bbc_gettime(&bbc_time) != KERN_SUCCESS) | |
1268 | return; | |
1269 | ||
91447636 A |
1270 | RTC_LOCK(s); |
1271 | if (rtclock.boottime == 0) | |
1272 | rtclock.boottime = bbc_time.tv_sec; | |
1273 | (void) sysclk_gettime_internal(&curr_time); | |
1274 | rtclock.calend_offset = bbc_time; | |
1275 | SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); | |
1276 | rtclock.calend_is_set = TRUE; | |
1277 | RTC_UNLOCK(s); | |
1c79356b | 1278 | |
55e303ae | 1279 | host_notify_calendar_change(); |
1c79356b A |
1280 | } |
1281 | ||
91447636 A |
1282 | void |
1283 | clock_get_boottime_nanotime( | |
1284 | uint32_t *secs, | |
1285 | uint32_t *nanosecs) | |
1286 | { | |
1287 | *secs = rtclock.boottime; | |
1288 | *nanosecs = 0; | |
1289 | } | |
1290 | ||
1c79356b A |
1291 | void |
1292 | clock_timebase_info( | |
1293 | mach_timebase_info_t info) | |
1294 | { | |
91447636 | 1295 | info->numer = info->denom = 1; |
1c79356b A |
1296 | } |
1297 | ||
1298 | void | |
1299 | clock_set_timer_deadline( | |
0b4e3aa0 | 1300 | uint64_t deadline) |
1c79356b | 1301 | { |
91447636 A |
1302 | spl_t s; |
1303 | cpu_data_t *pp = current_cpu_datap(); | |
1304 | rtclock_timer_t *mytimer = &pp->cpu_rtc_timer; | |
1305 | uint64_t abstime; | |
1306 | uint64_t decr; | |
1307 | ||
1308 | assert(get_preemption_level() > 0); | |
1309 | assert(rtclock_timer_expire); | |
1310 | ||
1311 | RTC_INTRS_OFF(s); | |
1312 | mytimer->deadline = deadline; | |
1313 | mytimer->is_set = TRUE; | |
1314 | if (!mytimer->has_expired) { | |
1315 | abstime = mach_absolute_time(); | |
1316 | if (mytimer->deadline < pp->cpu_rtc_tick_deadline) { | |
1317 | decr = deadline_to_decrementer(mytimer->deadline, | |
1318 | abstime); | |
1319 | rtc_lapic_set_timer(decr); | |
1320 | pp->cpu_rtc_intr_deadline = mytimer->deadline; | |
1321 | KERNEL_DEBUG_CONSTANT( | |
1322 | MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | | |
1323 | DBG_FUNC_NONE, decr, 2, 0, 0, 0); | |
1324 | } | |
1325 | } | |
1326 | RTC_INTRS_ON(s); | |
1c79356b A |
1327 | } |
1328 | ||
1329 | void | |
1330 | clock_set_timer_func( | |
1331 | clock_timer_func_t func) | |
1332 | { | |
91447636 A |
1333 | if (rtclock_timer_expire == NULL) |
1334 | rtclock_timer_expire = func; | |
1c79356b A |
1335 | } |
1336 | ||
1c79356b | 1337 | /* |
91447636 | 1338 | * Real-time clock device interrupt. |
1c79356b | 1339 | */ |
1c79356b | 1340 | void |
55e303ae | 1341 | rtclock_intr(struct i386_interrupt_state *regs) |
1c79356b | 1342 | { |
55e303ae | 1343 | uint64_t abstime; |
91447636 A |
1344 | uint32_t latency; |
1345 | uint64_t decr; | |
1346 | uint64_t decr_tick; | |
1347 | uint64_t decr_timer; | |
1348 | cpu_data_t *pp = current_cpu_datap(); | |
1349 | rtclock_timer_t *mytimer = &pp->cpu_rtc_timer; | |
1350 | ||
1351 | assert(get_preemption_level() > 0); | |
1352 | assert(!ml_get_interrupts_enabled()); | |
1353 | ||
1354 | abstime = _rtc_nanotime_read(); | |
1355 | latency = (uint32_t) abstime - pp->cpu_rtc_intr_deadline; | |
1356 | if (pp->cpu_rtc_tick_deadline <= abstime) { | |
1357 | rtc_nanotime_update(); | |
1358 | clock_deadline_for_periodic_event( | |
1359 | NSEC_PER_HZ, abstime, &pp->cpu_rtc_tick_deadline); | |
1360 | hertz_tick( | |
1361 | #if STAT_TIME | |
1362 | NSEC_PER_HZ, | |
1363 | #endif | |
1364 | (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0), | |
1365 | regs->eip); | |
1366 | } | |
1c79356b | 1367 | |
91447636 A |
1368 | abstime = _rtc_nanotime_read(); |
1369 | if (mytimer->is_set && mytimer->deadline <= abstime) { | |
1370 | mytimer->has_expired = TRUE; | |
1371 | mytimer->is_set = FALSE; | |
1372 | (*rtclock_timer_expire)(abstime); | |
1373 | assert(!ml_get_interrupts_enabled()); | |
1374 | mytimer->has_expired = FALSE; | |
55e303ae A |
1375 | } |
1376 | ||
91447636 A |
1377 | /* Log the interrupt service latency (-ve value expected by tool) */ |
1378 | KERNEL_DEBUG_CONSTANT( | |
1379 | MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE, | |
1380 | -latency, (uint32_t)regs->eip, 0, 0, 0); | |
1c79356b | 1381 | |
91447636 A |
1382 | abstime = _rtc_nanotime_read(); |
1383 | decr_tick = deadline_to_decrementer(pp->cpu_rtc_tick_deadline, abstime); | |
1384 | decr_timer = (mytimer->is_set) ? | |
1385 | deadline_to_decrementer(mytimer->deadline, abstime) : | |
1386 | DECREMENTER_MAX; | |
1387 | decr = MIN(decr_tick, decr_timer); | |
1388 | pp->cpu_rtc_intr_deadline = abstime + decr; | |
1c79356b | 1389 | |
91447636 | 1390 | rtc_lapic_set_timer(decr); |
1c79356b | 1391 | |
91447636 A |
1392 | /* Log the new decrementer value */ |
1393 | KERNEL_DEBUG_CONSTANT( | |
1394 | MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE, | |
1395 | decr, 3, 0, 0, 0); | |
1c79356b | 1396 | |
91447636 | 1397 | } |
1c79356b | 1398 | |
91447636 A |
1399 | static void |
1400 | rtclock_alarm_expire( | |
1401 | __unused timer_call_param_t p0, | |
1402 | __unused timer_call_param_t p1) | |
1403 | { | |
1404 | mach_timespec_t clock_time; | |
1405 | ||
1406 | (void) sysclk_gettime_internal(&clock_time); | |
1407 | ||
1408 | clock_alarm_intr(SYSTEM_CLOCK, &clock_time); | |
1c79356b A |
1409 | } |
1410 | ||
1411 | void | |
1412 | clock_get_uptime( | |
0b4e3aa0 | 1413 | uint64_t *result) |
1c79356b | 1414 | { |
91447636 | 1415 | *result = rtc_nanotime_read(); |
55e303ae | 1416 | } |
1c79356b | 1417 | |
55e303ae A |
1418 | uint64_t |
1419 | mach_absolute_time(void) | |
1420 | { | |
91447636 A |
1421 | return rtc_nanotime_read(); |
1422 | } | |
1423 | ||
1424 | void | |
1425 | absolutetime_to_microtime( | |
1426 | uint64_t abstime, | |
1427 | uint32_t *secs, | |
1428 | uint32_t *microsecs) | |
1429 | { | |
1430 | uint32_t remain; | |
1431 | ||
1432 | asm volatile( | |
1433 | "divl %3" | |
1434 | : "=a" (*secs), "=d" (remain) | |
1435 | : "A" (abstime), "r" (NSEC_PER_SEC)); | |
1436 | asm volatile( | |
1437 | "divl %3" | |
1438 | : "=a" (*microsecs) | |
1439 | : "0" (remain), "d" (0), "r" (NSEC_PER_USEC)); | |
1c79356b A |
1440 | } |
1441 | ||
1442 | void | |
1443 | clock_interval_to_deadline( | |
0b4e3aa0 A |
1444 | uint32_t interval, |
1445 | uint32_t scale_factor, | |
1446 | uint64_t *result) | |
1c79356b | 1447 | { |
0b4e3aa0 | 1448 | uint64_t abstime; |
1c79356b A |
1449 | |
1450 | clock_get_uptime(result); | |
1451 | ||
1452 | clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); | |
1453 | ||
0b4e3aa0 | 1454 | *result += abstime; |
1c79356b A |
1455 | } |
1456 | ||
1457 | void | |
1458 | clock_interval_to_absolutetime_interval( | |
0b4e3aa0 A |
1459 | uint32_t interval, |
1460 | uint32_t scale_factor, | |
1461 | uint64_t *result) | |
1c79356b | 1462 | { |
0b4e3aa0 | 1463 | *result = (uint64_t)interval * scale_factor; |
1c79356b A |
1464 | } |
1465 | ||
1466 | void | |
1467 | clock_absolutetime_interval_to_deadline( | |
0b4e3aa0 A |
1468 | uint64_t abstime, |
1469 | uint64_t *result) | |
1c79356b A |
1470 | { |
1471 | clock_get_uptime(result); | |
1472 | ||
0b4e3aa0 | 1473 | *result += abstime; |
1c79356b A |
1474 | } |
1475 | ||
1476 | void | |
1477 | absolutetime_to_nanoseconds( | |
0b4e3aa0 A |
1478 | uint64_t abstime, |
1479 | uint64_t *result) | |
1c79356b | 1480 | { |
0b4e3aa0 | 1481 | *result = abstime; |
1c79356b A |
1482 | } |
1483 | ||
1484 | void | |
1485 | nanoseconds_to_absolutetime( | |
0b4e3aa0 A |
1486 | uint64_t nanoseconds, |
1487 | uint64_t *result) | |
1c79356b | 1488 | { |
0b4e3aa0 | 1489 | *result = nanoseconds; |
1c79356b A |
1490 | } |
1491 | ||
55e303ae | 1492 | void |
91447636 | 1493 | machine_delay_until( |
55e303ae A |
1494 | uint64_t deadline) |
1495 | { | |
1496 | uint64_t now; | |
1497 | ||
1498 | do { | |
1499 | cpu_pause(); | |
1500 | now = mach_absolute_time(); | |
1501 | } while (now < deadline); | |
1502 | } |