]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
e5568f75 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
1c79356b | 11 | * |
e5568f75 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
e5568f75 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
1c79356b A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | ||
26 | /* | |
27 | * File: i386/rtclock.c | |
28 | * Purpose: Routines for handling the machine dependent | |
91447636 A |
29 | * real-time clock. Historically, this clock is |
30 | * generated by the Intel 8254 Programmable Interval | |
31 | * Timer, but local apic timers are now used for | |
32 | * this purpose with the master time reference being | |
33 | * the cpu clock counted by the timestamp MSR. | |
1c79356b A |
34 | */ |
35 | ||
1c79356b | 36 | #include <platforms.h> |
1c79356b | 37 | #include <mach_kdb.h> |
55e303ae A |
38 | |
39 | #include <mach/mach_types.h> | |
40 | ||
1c79356b | 41 | #include <kern/cpu_data.h> |
91447636 | 42 | #include <kern/cpu_number.h> |
1c79356b | 43 | #include <kern/clock.h> |
55e303ae | 44 | #include <kern/host_notify.h> |
1c79356b A |
45 | #include <kern/macro_help.h> |
46 | #include <kern/misc_protos.h> | |
47 | #include <kern/spl.h> | |
91447636 | 48 | #include <kern/assert.h> |
1c79356b A |
49 | #include <mach/vm_prot.h> |
50 | #include <vm/pmap.h> | |
51 | #include <vm/vm_kern.h> /* for kernel_map */ | |
52 | #include <i386/ipl.h> | |
53 | #include <i386/pit.h> | |
54 | #include <i386/pio.h> | |
55 | #include <i386/misc_protos.h> | |
55e303ae A |
56 | #include <i386/proc_reg.h> |
57 | #include <i386/machine_cpu.h> | |
91447636 A |
58 | #include <i386/mp.h> |
59 | #include <i386/cpuid.h> | |
60 | #include <i386/cpu_data.h> | |
61 | #include <i386/cpu_threads.h> | |
62 | #include <i386/perfmon.h> | |
63 | #include <i386/machine_routines.h> | |
64 | #include <i386/AT386/bbclock_entries.h> | |
55e303ae | 65 | #include <pexpert/pexpert.h> |
91447636 A |
66 | #include <machine/limits.h> |
67 | #include <machine/commpage.h> | |
68 | #include <sys/kdebug.h> | |
69 | ||
70 | #define MAX(a,b) (((a)>(b))?(a):(b)) | |
71 | #define MIN(a,b) (((a)>(b))?(b):(a)) | |
55e303ae | 72 | |
91447636 A |
73 | #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */ |
74 | ||
75 | #define UI_CPUFREQ_ROUNDING_FACTOR 10000000 | |
1c79356b A |
76 | |
77 | int sysclk_config(void); | |
78 | ||
79 | int sysclk_init(void); | |
80 | ||
81 | kern_return_t sysclk_gettime( | |
82 | mach_timespec_t *cur_time); | |
83 | ||
84 | kern_return_t sysclk_getattr( | |
85 | clock_flavor_t flavor, | |
86 | clock_attr_t attr, | |
87 | mach_msg_type_number_t *count); | |
88 | ||
1c79356b A |
89 | void sysclk_setalarm( |
90 | mach_timespec_t *alarm_time); | |
91 | ||
1c79356b A |
92 | /* |
93 | * Lists of clock routines. | |
94 | */ | |
95 | struct clock_ops sysclk_ops = { | |
96 | sysclk_config, sysclk_init, | |
97 | sysclk_gettime, 0, | |
91447636 | 98 | sysclk_getattr, 0, |
1c79356b A |
99 | sysclk_setalarm, |
100 | }; | |
101 | ||
102 | int calend_config(void); | |
103 | ||
104 | int calend_init(void); | |
105 | ||
106 | kern_return_t calend_gettime( | |
107 | mach_timespec_t *cur_time); | |
108 | ||
1c79356b A |
109 | kern_return_t calend_getattr( |
110 | clock_flavor_t flavor, | |
111 | clock_attr_t attr, | |
112 | mach_msg_type_number_t *count); | |
113 | ||
114 | struct clock_ops calend_ops = { | |
115 | calend_config, calend_init, | |
55e303ae | 116 | calend_gettime, 0, |
1c79356b A |
117 | calend_getattr, 0, |
118 | 0, | |
119 | }; | |
120 | ||
121 | /* local data declarations */ | |
1c79356b | 122 | |
91447636 A |
123 | static clock_timer_func_t rtclock_timer_expire; |
124 | ||
125 | static timer_call_data_t rtclock_alarm_timer; | |
1c79356b | 126 | |
91447636 A |
127 | static void rtclock_alarm_expire( |
128 | timer_call_param_t p0, | |
129 | timer_call_param_t p1); | |
1c79356b | 130 | |
91447636 A |
131 | struct { |
132 | mach_timespec_t calend_offset; | |
1c79356b A |
133 | boolean_t calend_is_set; |
134 | ||
55e303ae A |
135 | int64_t calend_adjtotal; |
136 | int32_t calend_adjdelta; | |
137 | ||
91447636 | 138 | uint32_t boottime; |
1c79356b | 139 | |
55e303ae | 140 | mach_timebase_info_data_t timebase_const; |
1c79356b A |
141 | |
142 | decl_simple_lock_data(,lock) /* real-time clock device lock */ | |
143 | } rtclock; | |
144 | ||
91447636 A |
145 | boolean_t rtc_initialized = FALSE; |
146 | clock_res_t rtc_intr_nsec = NSEC_PER_HZ; /* interrupt res */ | |
147 | uint64_t rtc_cycle_count; /* clocks in 1/20th second */ | |
148 | uint64_t rtc_cyc_per_sec; /* processor cycles per sec */ | |
149 | uint32_t rtc_boot_frequency; /* provided by 1st speed-step */ | |
150 | uint32_t rtc_quant_scale; /* clock to nanos multiplier */ | |
151 | uint32_t rtc_quant_shift; /* clock to nanos right shift */ | |
152 | uint64_t rtc_decrementer_min; | |
1c79356b | 153 | |
91447636 | 154 | static mach_timebase_info_data_t rtc_lapic_scale; /* nsec to lapic count */ |
9bccf70c | 155 | |
1c79356b | 156 | /* |
91447636 | 157 | * Macros to lock/unlock real-time clock data. |
1c79356b | 158 | */ |
91447636 A |
159 | #define RTC_INTRS_OFF(s) \ |
160 | (s) = splclock() | |
161 | ||
162 | #define RTC_INTRS_ON(s) \ | |
163 | splx(s) | |
164 | ||
165 | #define RTC_LOCK(s) \ | |
166 | MACRO_BEGIN \ | |
167 | RTC_INTRS_OFF(s); \ | |
168 | simple_lock(&rtclock.lock); \ | |
1c79356b A |
169 | MACRO_END |
170 | ||
91447636 A |
171 | #define RTC_UNLOCK(s) \ |
172 | MACRO_BEGIN \ | |
1c79356b | 173 | simple_unlock(&rtclock.lock); \ |
91447636 | 174 | RTC_INTRS_ON(s); \ |
1c79356b A |
175 | MACRO_END |
176 | ||
177 | /* | |
178 | * i8254 control. ** MONUMENT ** | |
179 | * | |
180 | * The i8254 is a traditional PC device with some arbitrary characteristics. | |
181 | * Basically, it is a register that counts at a fixed rate and can be | |
182 | * programmed to generate an interrupt every N counts. The count rate is | |
91447636 A |
183 | * clknum counts per sec (see pit.h), historically 1193167=14.318MHz/12 |
184 | * but the more accurate value is 1193182=14.31818MHz/12. [14.31818 MHz being | |
185 | * the master crystal oscillator reference frequency since the very first PC.] | |
1c79356b A |
186 | * Various constants are computed based on this value, and we calculate |
187 | * them at init time for execution efficiency. To obtain sufficient | |
188 | * accuracy, some of the calculation are most easily done in floating | |
189 | * point and then converted to int. | |
190 | * | |
1c79356b | 191 | */ |
1c79356b A |
192 | |
193 | /* | |
194 | * Forward decl. | |
195 | */ | |
196 | ||
91447636 A |
197 | static uint64_t rtc_set_cyc_per_sec(uint64_t cycles); |
198 | uint64_t rtc_nanotime_read(void); | |
55e303ae A |
199 | |
200 | /* | |
91447636 A |
201 | * create_mul_quant_GHZ |
202 | * create a constant used to multiply the TSC by to convert to nanoseconds. | |
203 | * This is a 32 bit number and the TSC *MUST* have a frequency higher than | |
204 | * 1000Mhz for this routine to work. | |
205 | * | |
206 | * The theory here is that we know how many TSCs-per-sec the processor runs at. | |
207 | * Normally to convert this to nanoseconds you would multiply the current | |
208 | * timestamp by 1000000000 (a billion) then divide by TSCs-per-sec. | |
209 | * Unfortunatly the TSC is 64 bits which would leave us with 96 bit intermediate | |
210 | * results from the multiply that must be divided by. | |
211 | * Usually thats | |
212 | * uint96 = tsc * numer | |
213 | * nanos = uint96 / denom | |
214 | * Instead, we create this quant constant and it becomes the numerator, | |
215 | * the denominator can then be 0x100000000 which makes our division as simple as | |
216 | * forgetting the lower 32 bits of the result. We can also pass this number to | |
217 | * user space as the numer and pass 0xFFFFFFFF (RTC_FAST_DENOM) as the denom to | |
218 | * convert raw counts * to nanos. The difference is so small as to be | |
219 | * undetectable by anything. | |
220 | * | |
221 | * Unfortunatly we can not do this for sub GHZ processors. In this case, all | |
222 | * we do is pass the CPU speed in raw as the denom and we pass in 1000000000 | |
223 | * as the numerator. No short cuts allowed | |
55e303ae | 224 | */ |
91447636 | 225 | #define RTC_FAST_DENOM 0xFFFFFFFF |
55e303ae | 226 | inline static uint32_t |
91447636 | 227 | create_mul_quant_GHZ(int shift, uint32_t quant) |
55e303ae | 228 | { |
91447636 | 229 | return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant); |
55e303ae | 230 | } |
91447636 A |
231 | /* |
232 | * This routine takes a value of raw TSC ticks and applies the passed mul_quant | |
233 | * generated by create_mul_quant() This is our internal routine for creating | |
234 | * nanoseconds. | |
235 | * Since we don't really have uint96_t this routine basically does this.... | |
236 | * uint96_t intermediate = (*value) * scale | |
237 | * return (intermediate >> 32) | |
238 | */ | |
55e303ae A |
239 | inline static uint64_t |
240 | fast_get_nano_from_abs(uint64_t value, int scale) | |
241 | { | |
242 | asm (" movl %%edx,%%esi \n\t" | |
243 | " mull %%ecx \n\t" | |
244 | " movl %%edx,%%edi \n\t" | |
245 | " movl %%esi,%%eax \n\t" | |
246 | " mull %%ecx \n\t" | |
247 | " xorl %%ecx,%%ecx \n\t" | |
248 | " addl %%edi,%%eax \n\t" | |
249 | " adcl %%ecx,%%edx " | |
250 | : "+A" (value) | |
251 | : "c" (scale) | |
252 | : "%esi", "%edi"); | |
253 | return value; | |
254 | } | |
255 | ||
256 | /* | |
91447636 | 257 | * This routine basically does this... |
55e303ae A |
258 | * ts.tv_sec = nanos / 1000000000; create seconds |
259 | * ts.tv_nsec = nanos % 1000000000; create remainder nanos | |
260 | */ | |
261 | inline static mach_timespec_t | |
262 | nanos_to_timespec(uint64_t nanos) | |
263 | { | |
264 | union { | |
265 | mach_timespec_t ts; | |
266 | uint64_t u64; | |
267 | } ret; | |
268 | ret.u64 = nanos; | |
269 | asm volatile("divl %1" : "+A" (ret.u64) : "r" (NSEC_PER_SEC)); | |
270 | return ret.ts; | |
271 | } | |
272 | ||
91447636 A |
273 | /* |
274 | * The following two routines perform the 96 bit arithmetic we need to | |
275 | * convert generic absolute<->nanoseconds | |
276 | * The multiply routine takes a uint64_t and a uint32_t and returns the result | |
277 | * in a uint32_t[3] array. | |
278 | * The divide routine takes this uint32_t[3] array and divides it by a uint32_t | |
279 | * returning a uint64_t | |
280 | */ | |
55e303ae A |
281 | inline static void |
282 | longmul(uint64_t *abstime, uint32_t multiplicand, uint32_t *result) | |
283 | { | |
284 | asm volatile( | |
285 | " pushl %%ebx \n\t" | |
286 | " movl %%eax,%%ebx \n\t" | |
287 | " movl (%%eax),%%eax \n\t" | |
288 | " mull %%ecx \n\t" | |
289 | " xchg %%eax,%%ebx \n\t" | |
290 | " pushl %%edx \n\t" | |
291 | " movl 4(%%eax),%%eax \n\t" | |
292 | " mull %%ecx \n\t" | |
293 | " movl %2,%%ecx \n\t" | |
294 | " movl %%ebx,(%%ecx) \n\t" | |
295 | " popl %%ebx \n\t" | |
296 | " addl %%ebx,%%eax \n\t" | |
297 | " popl %%ebx \n\t" | |
298 | " movl %%eax,4(%%ecx) \n\t" | |
299 | " adcl $0,%%edx \n\t" | |
300 | " movl %%edx,8(%%ecx) // and save it" | |
91447636 | 301 | : : "a"(abstime), "c"(multiplicand), "m"(result)); |
55e303ae A |
302 | |
303 | } | |
304 | ||
305 | inline static uint64_t | |
306 | longdiv(uint32_t *numer, uint32_t denom) | |
307 | { | |
308 | uint64_t result; | |
309 | asm volatile( | |
310 | " pushl %%ebx \n\t" | |
311 | " movl %%eax,%%ebx \n\t" | |
312 | " movl 8(%%eax),%%edx \n\t" | |
313 | " movl 4(%%eax),%%eax \n\t" | |
314 | " divl %%ecx \n\t" | |
315 | " xchg %%ebx,%%eax \n\t" | |
316 | " movl (%%eax),%%eax \n\t" | |
317 | " divl %%ecx \n\t" | |
318 | " xchg %%ebx,%%edx \n\t" | |
319 | " popl %%ebx \n\t" | |
320 | : "=A"(result) : "a"(numer),"c"(denom)); | |
321 | return result; | |
322 | } | |
323 | ||
91447636 A |
324 | /* |
325 | * Enable or disable timer 2. | |
326 | * Port 0x61 controls timer 2: | |
327 | * bit 0 gates the clock, | |
328 | * bit 1 gates output to speaker. | |
329 | */ | |
55e303ae | 330 | inline static void |
91447636 | 331 | enable_PIT2(void) |
55e303ae A |
332 | { |
333 | asm volatile( | |
91447636 A |
334 | " inb $0x61,%%al \n\t" |
335 | " and $0xFC,%%al \n\t" | |
55e303ae | 336 | " or $1,%%al \n\t" |
91447636 A |
337 | " outb %%al,$0x61 \n\t" |
338 | : : : "%al" ); | |
55e303ae A |
339 | } |
340 | ||
341 | inline static void | |
91447636 | 342 | disable_PIT2(void) |
55e303ae A |
343 | { |
344 | asm volatile( | |
91447636 A |
345 | " inb $0x61,%%al \n\t" |
346 | " and $0xFC,%%al \n\t" | |
347 | " outb %%al,$0x61 \n\t" | |
55e303ae A |
348 | : : : "%al" ); |
349 | } | |
350 | ||
55e303ae A |
351 | inline static void |
352 | set_PIT2(int value) | |
353 | { | |
91447636 A |
354 | /* |
355 | * First, tell the clock we are going to write 16 bits to the counter | |
356 | * and enable one-shot mode (command 0xB8 to port 0x43) | |
357 | * Then write the two bytes into the PIT2 clock register (port 0x42). | |
358 | * Loop until the value is "realized" in the clock, | |
359 | * this happens on the next tick. | |
360 | */ | |
55e303ae | 361 | asm volatile( |
91447636 A |
362 | " movb $0xB8,%%al \n\t" |
363 | " outb %%al,$0x43 \n\t" | |
55e303ae | 364 | " movb %%dl,%%al \n\t" |
91447636 | 365 | " outb %%al,$0x42 \n\t" |
55e303ae | 366 | " movb %%dh,%%al \n\t" |
91447636 A |
367 | " outb %%al,$0x42 \n" |
368 | "1: inb $0x42,%%al \n\t" | |
369 | " inb $0x42,%%al \n\t" | |
55e303ae A |
370 | " cmp %%al,%%dh \n\t" |
371 | " jne 1b" | |
91447636 | 372 | : : "d"(value) : "%al"); |
55e303ae A |
373 | } |
374 | ||
375 | inline static uint64_t | |
376 | get_PIT2(unsigned int *value) | |
377 | { | |
55e303ae | 378 | register uint64_t result; |
91447636 A |
379 | /* |
380 | * This routine first latches the time (command 0x80 to port 0x43), | |
381 | * then gets the time stamp so we know how long the read will take later. | |
382 | * Read (from port 0x42) and return the current value of the timer. | |
383 | */ | |
55e303ae A |
384 | asm volatile( |
385 | " xorl %%ecx,%%ecx \n\t" | |
91447636 A |
386 | " movb $0x80,%%al \n\t" |
387 | " outb %%al,$0x43 \n\t" | |
55e303ae A |
388 | " rdtsc \n\t" |
389 | " pushl %%eax \n\t" | |
91447636 | 390 | " inb $0x42,%%al \n\t" |
55e303ae | 391 | " movb %%al,%%cl \n\t" |
91447636 | 392 | " inb $0x42,%%al \n\t" |
55e303ae A |
393 | " movb %%al,%%ch \n\t" |
394 | " popl %%eax " | |
91447636 A |
395 | : "=A"(result), "=c"(*value)); |
396 | return result; | |
55e303ae A |
397 | } |
398 | ||
91447636 A |
399 | /* |
400 | * timeRDTSC() | |
401 | * This routine sets up PIT counter 2 to count down 1/20 of a second. | |
402 | * It pauses until the value is latched in the counter | |
403 | * and then reads the time stamp counter to return to the caller. | |
404 | */ | |
405 | static uint64_t | |
55e303ae A |
406 | timeRDTSC(void) |
407 | { | |
91447636 | 408 | int attempts = 0; |
55e303ae A |
409 | uint64_t latchTime; |
410 | uint64_t saveTime,intermediate; | |
91447636 | 411 | unsigned int timerValue, lastValue; |
55e303ae | 412 | boolean_t int_enabled; |
91447636 A |
413 | /* |
414 | * Table of correction factors to account for | |
415 | * - timer counter quantization errors, and | |
416 | * - undercounts 0..5 | |
417 | */ | |
418 | #define SAMPLE_CLKS_EXACT (((double) CLKNUM) / 20.0) | |
419 | #define SAMPLE_CLKS_INT ((int) CLKNUM / 20) | |
420 | #define SAMPLE_NSECS (2000000000LL) | |
421 | #define SAMPLE_MULTIPLIER (((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT) | |
422 | #define ROUND64(x) ((uint64_t)((x) + 0.5)) | |
423 | uint64_t scale[6] = { | |
424 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)), | |
425 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)), | |
426 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)), | |
427 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)), | |
428 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)), | |
429 | ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5)) | |
430 | }; | |
55e303ae A |
431 | |
432 | int_enabled = ml_set_interrupts_enabled(FALSE); | |
433 | ||
91447636 A |
434 | restart: |
435 | if (attempts >= 2) | |
436 | panic("timeRDTSC() calibation failed with %d attempts\n", attempts); | |
437 | attempts++; | |
55e303ae A |
438 | enable_PIT2(); // turn on PIT2 |
439 | set_PIT2(0); // reset timer 2 to be zero | |
91447636 | 440 | latchTime = rdtsc64(); // get the time stamp to time |
55e303ae | 441 | latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes |
91447636 A |
442 | set_PIT2(SAMPLE_CLKS_INT); // set up the timer for (almost) 1/20th a second |
443 | saveTime = rdtsc64(); // now time how long a 20th a second is... | |
444 | get_PIT2(&lastValue); | |
445 | get_PIT2(&lastValue); // read twice, first value may be unreliable | |
55e303ae A |
446 | do { |
447 | intermediate = get_PIT2(&timerValue); | |
91447636 A |
448 | if (timerValue > lastValue) { |
449 | printf("Hey we are going backwards! %u -> %u, restarting timing\n", | |
450 | timerValue,lastValue); | |
451 | set_PIT2(0); | |
452 | disable_PIT2(); | |
453 | goto restart; | |
454 | } | |
455 | lastValue = timerValue; | |
456 | } while (timerValue > 5); | |
457 | kprintf("timerValue %d\n",timerValue); | |
458 | kprintf("intermediate 0x%016llx\n",intermediate); | |
459 | kprintf("saveTime 0x%016llx\n",saveTime); | |
55e303ae | 460 | |
91447636 A |
461 | intermediate -= saveTime; // raw count for about 1/20 second |
462 | intermediate *= scale[timerValue]; // rescale measured time spent | |
463 | intermediate /= SAMPLE_NSECS; // so its exactly 1/20 a second | |
464 | intermediate += latchTime; // add on our save fudge | |
465 | ||
466 | set_PIT2(0); // reset timer 2 to be zero | |
467 | disable_PIT2(); // turn off PIT 2 | |
468 | ||
55e303ae A |
469 | ml_set_interrupts_enabled(int_enabled); |
470 | return intermediate; | |
471 | } | |
472 | ||
473 | static uint64_t | |
91447636 | 474 | tsc_to_nanoseconds(uint64_t abstime) |
55e303ae A |
475 | { |
476 | uint32_t numer; | |
477 | uint32_t denom; | |
55e303ae A |
478 | uint32_t intermediate[3]; |
479 | ||
480 | numer = rtclock.timebase_const.numer; | |
481 | denom = rtclock.timebase_const.denom; | |
91447636 | 482 | if (denom == RTC_FAST_DENOM) { |
55e303ae A |
483 | abstime = fast_get_nano_from_abs(abstime, numer); |
484 | } else { | |
485 | longmul(&abstime, numer, intermediate); | |
486 | abstime = longdiv(intermediate, denom); | |
487 | } | |
488 | return abstime; | |
489 | } | |
490 | ||
491 | inline static mach_timespec_t | |
91447636 | 492 | tsc_to_timespec(void) |
55e303ae A |
493 | { |
494 | uint64_t currNanos; | |
91447636 | 495 | currNanos = rtc_nanotime_read(); |
55e303ae A |
496 | return nanos_to_timespec(currNanos); |
497 | } | |
498 | ||
91447636 A |
499 | #define DECREMENTER_MAX UINT_MAX |
500 | static uint32_t | |
501 | deadline_to_decrementer( | |
502 | uint64_t deadline, | |
503 | uint64_t now) | |
504 | { | |
505 | uint64_t delta; | |
506 | ||
507 | if (deadline <= now) | |
508 | return rtc_decrementer_min; | |
509 | else { | |
510 | delta = deadline - now; | |
511 | return MIN(MAX(rtc_decrementer_min,delta),DECREMENTER_MAX); | |
512 | } | |
513 | } | |
514 | ||
515 | static inline uint64_t | |
516 | lapic_time_countdown(uint32_t initial_count) | |
1c79356b | 517 | { |
91447636 A |
518 | boolean_t state; |
519 | uint64_t start_time; | |
520 | uint64_t stop_time; | |
521 | lapic_timer_count_t count; | |
522 | ||
523 | state = ml_set_interrupts_enabled(FALSE); | |
524 | lapic_set_timer(FALSE, one_shot, divide_by_1, initial_count); | |
525 | start_time = rdtsc64(); | |
526 | do { | |
527 | lapic_get_timer(NULL, NULL, NULL, &count); | |
528 | } while (count > 0); | |
529 | stop_time = rdtsc64(); | |
530 | ml_set_interrupts_enabled(state); | |
1c79356b | 531 | |
91447636 A |
532 | return tsc_to_nanoseconds(stop_time - start_time); |
533 | } | |
1c79356b | 534 | |
91447636 A |
535 | static void |
536 | rtc_lapic_timer_calibrate(void) | |
537 | { | |
538 | uint32_t nsecs; | |
539 | uint64_t countdown; | |
1c79356b | 540 | |
91447636 A |
541 | if (!(cpuid_features() & CPUID_FEATURE_APIC)) |
542 | return; | |
1c79356b | 543 | |
91447636 A |
544 | /* |
545 | * Set the local apic timer counting down to zero without an interrupt. | |
546 | * Use the timestamp to calculate how long this takes. | |
547 | */ | |
548 | nsecs = (uint32_t) lapic_time_countdown(rtc_intr_nsec); | |
549 | ||
550 | /* | |
551 | * Compute a countdown ratio for a given time in nanoseconds. | |
552 | * That is, countdown = time * numer / denom. | |
553 | */ | |
554 | countdown = (uint64_t)rtc_intr_nsec * (uint64_t)rtc_intr_nsec / nsecs; | |
555 | ||
556 | nsecs = (uint32_t) lapic_time_countdown((uint32_t) countdown); | |
557 | ||
558 | rtc_lapic_scale.numer = countdown; | |
559 | rtc_lapic_scale.denom = nsecs; | |
560 | ||
561 | kprintf("rtc_lapic_timer_calibrate() scale: %d/%d\n", | |
562 | (uint32_t) countdown, nsecs); | |
563 | } | |
564 | ||
565 | static void | |
566 | rtc_lapic_set_timer( | |
567 | uint32_t interval) | |
568 | { | |
569 | uint64_t count; | |
570 | ||
571 | assert(rtc_lapic_scale.denom); | |
572 | ||
573 | count = interval * (uint64_t) rtc_lapic_scale.numer; | |
574 | count /= rtc_lapic_scale.denom; | |
575 | ||
576 | lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count); | |
577 | } | |
578 | ||
579 | static void | |
580 | rtc_lapic_start_ticking(void) | |
581 | { | |
582 | uint64_t abstime; | |
583 | uint64_t first_tick; | |
584 | uint64_t decr; | |
585 | ||
586 | abstime = mach_absolute_time(); | |
587 | first_tick = abstime + NSEC_PER_HZ; | |
588 | current_cpu_datap()->cpu_rtc_tick_deadline = first_tick; | |
589 | decr = deadline_to_decrementer(first_tick, abstime); | |
590 | rtc_lapic_set_timer(decr); | |
1c79356b A |
591 | } |
592 | ||
593 | /* | |
594 | * Configure the real-time clock device. Return success (1) | |
595 | * or failure (0). | |
596 | */ | |
597 | ||
598 | int | |
599 | sysclk_config(void) | |
600 | { | |
1c79356b | 601 | |
1c79356b A |
602 | mp_disable_preemption(); |
603 | if (cpu_number() != master_cpu) { | |
604 | mp_enable_preemption(); | |
605 | return(1); | |
606 | } | |
607 | mp_enable_preemption(); | |
91447636 A |
608 | |
609 | timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL); | |
610 | ||
611 | simple_lock_init(&rtclock.lock, 0); | |
612 | ||
613 | return (1); | |
614 | } | |
615 | ||
616 | ||
617 | /* | |
618 | * Nanotime/mach_absolutime_time | |
619 | * ----------------------------- | |
620 | * The timestamp counter (tsc) - which counts cpu clock cycles and can be read | |
621 | * efficient by the kernel and in userspace - is the reference for all timing. | |
622 | * However, the cpu clock rate is not only platform-dependent but can change | |
623 | * (speed-step) dynamically. Hence tsc is converted into nanoseconds which is | |
624 | * identical to mach_absolute_time. The conversion to tsc to nanoseconds is | |
625 | * encapsulated by nanotime. | |
626 | * | |
627 | * The kernel maintains nanotime information recording: | |
628 | * - the current ratio of tsc to nanoseconds | |
629 | * with this ratio expressed as a 32-bit scale and shift | |
630 | * (power of 2 divider); | |
631 | * - the tsc (step_tsc) and nanotime (step_ns) at which the current | |
632 | * ratio (clock speed) began. | |
633 | * So a tsc value can be converted to nanotime by: | |
634 | * | |
635 | * nanotime = (((tsc - step_tsc)*scale) >> shift) + step_ns | |
636 | * | |
637 | * In general, (tsc - step_tsc) is a 64-bit quantity with the scaling | |
638 | * involving a 96-bit intermediate value. However, by saving the converted | |
639 | * values at each tick (or at any intervening speed-step) - base_tsc and | |
640 | * base_ns - we can perform conversions relative to these and be assured that | |
641 | * (tsc - tick_tsc) is 32-bits. Hence: | |
642 | * | |
643 | * fast_nanotime = (((tsc - base_tsc)*scale) >> shift) + base_ns | |
644 | * | |
645 | * The tuple {base_tsc, base_ns, scale, shift} is exported in the commpage | |
646 | * for the userspace nanotime routine to read. A duplicate check_tsc is | |
647 | * appended so that the consistency of the read can be verified. Note that | |
648 | * this scheme is essential for MP systems in which the commpage is updated | |
649 | * by the master cpu but may be read concurrently by other cpus. | |
650 | * | |
651 | */ | |
652 | static inline void | |
653 | rtc_nanotime_set_commpage(rtc_nanotime_t *rntp) | |
654 | { | |
655 | commpage_nanotime_t cp_nanotime; | |
656 | ||
657 | /* Only the master cpu updates the commpage */ | |
658 | if (cpu_number() != master_cpu) | |
659 | return; | |
660 | ||
661 | cp_nanotime.nt_base_tsc = rntp->rnt_tsc; | |
662 | cp_nanotime.nt_base_ns = rntp->rnt_nanos; | |
663 | cp_nanotime.nt_scale = rntp->rnt_scale; | |
664 | cp_nanotime.nt_shift = rntp->rnt_shift; | |
665 | ||
666 | commpage_set_nanotime(&cp_nanotime); | |
667 | } | |
668 | ||
669 | static void | |
670 | rtc_nanotime_init(void) | |
671 | { | |
672 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
673 | rtc_nanotime_t *master_rntp = &cpu_datap(master_cpu)->cpu_rtc_nanotime; | |
674 | ||
675 | if (cpu_number() == master_cpu) { | |
676 | rntp->rnt_tsc = rdtsc64(); | |
677 | rntp->rnt_nanos = tsc_to_nanoseconds(rntp->rnt_tsc); | |
678 | rntp->rnt_scale = rtc_quant_scale; | |
679 | rntp->rnt_shift = rtc_quant_shift; | |
680 | rntp->rnt_step_tsc = 0ULL; | |
681 | rntp->rnt_step_nanos = 0ULL; | |
682 | } else { | |
683 | /* | |
684 | * Copy master processor's nanotime info. | |
685 | * Loop required in case this changes while copying. | |
686 | */ | |
687 | do { | |
688 | *rntp = *master_rntp; | |
689 | } while (rntp->rnt_tsc != master_rntp->rnt_tsc); | |
690 | } | |
691 | } | |
692 | ||
693 | static inline void | |
694 | _rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t tsc) | |
695 | { | |
696 | uint64_t tsc_delta; | |
697 | uint64_t ns_delta; | |
698 | ||
699 | tsc_delta = tsc - rntp->rnt_step_tsc; | |
700 | ns_delta = tsc_to_nanoseconds(tsc_delta); | |
701 | rntp->rnt_nanos = rntp->rnt_step_nanos + ns_delta; | |
702 | rntp->rnt_tsc = tsc; | |
703 | } | |
704 | ||
705 | static void | |
706 | rtc_nanotime_update(void) | |
707 | { | |
708 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
709 | ||
710 | assert(get_preemption_level() > 0); | |
711 | assert(!ml_get_interrupts_enabled()); | |
712 | ||
713 | _rtc_nanotime_update(rntp, rdtsc64()); | |
714 | rtc_nanotime_set_commpage(rntp); | |
715 | } | |
716 | ||
717 | static void | |
718 | rtc_nanotime_scale_update(void) | |
719 | { | |
720 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
721 | uint64_t tsc = rdtsc64(); | |
722 | ||
723 | assert(!ml_get_interrupts_enabled()); | |
724 | ||
725 | /* | |
726 | * Update time based on past scale. | |
727 | */ | |
728 | _rtc_nanotime_update(rntp, tsc); | |
729 | ||
1c79356b | 730 | /* |
91447636 | 731 | * Update scale and timestamp this update. |
1c79356b | 732 | */ |
91447636 A |
733 | rntp->rnt_scale = rtc_quant_scale; |
734 | rntp->rnt_shift = rtc_quant_shift; | |
735 | rntp->rnt_step_tsc = rntp->rnt_tsc; | |
736 | rntp->rnt_step_nanos = rntp->rnt_nanos; | |
1c79356b | 737 | |
91447636 A |
738 | /* Export update to userland */ |
739 | rtc_nanotime_set_commpage(rntp); | |
740 | } | |
741 | ||
742 | static uint64_t | |
743 | _rtc_nanotime_read(void) | |
744 | { | |
745 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
746 | uint64_t rnt_tsc; | |
747 | uint32_t rnt_scale; | |
748 | uint32_t rnt_shift; | |
749 | uint64_t rnt_nanos; | |
750 | uint64_t tsc; | |
751 | uint64_t tsc_delta; | |
752 | ||
753 | rnt_scale = rntp->rnt_scale; | |
754 | if (rnt_scale == 0) | |
755 | return 0ULL; | |
756 | ||
757 | rnt_shift = rntp->rnt_shift; | |
758 | rnt_nanos = rntp->rnt_nanos; | |
759 | rnt_tsc = rntp->rnt_tsc; | |
760 | tsc = rdtsc64(); | |
761 | ||
762 | tsc_delta = tsc - rnt_tsc; | |
763 | if ((tsc_delta >> 32) != 0) | |
764 | return rnt_nanos + tsc_to_nanoseconds(tsc_delta); | |
765 | ||
766 | /* Let the compiler optimize(?): */ | |
767 | if (rnt_shift == 32) | |
768 | return rnt_nanos + ((tsc_delta * rnt_scale) >> 32); | |
769 | else | |
770 | return rnt_nanos + ((tsc_delta * rnt_scale) >> rnt_shift); | |
771 | } | |
772 | ||
773 | uint64_t | |
774 | rtc_nanotime_read(void) | |
775 | { | |
776 | uint64_t result; | |
777 | uint64_t rnt_tsc; | |
778 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
1c79356b A |
779 | |
780 | /* | |
91447636 A |
781 | * Use timestamp to ensure the uptime record isn't changed. |
782 | * This avoids disabling interrupts. | |
783 | * And not this is a per-cpu structure hence no locking. | |
1c79356b | 784 | */ |
91447636 A |
785 | do { |
786 | rnt_tsc = rntp->rnt_tsc; | |
787 | result = _rtc_nanotime_read(); | |
788 | } while (rnt_tsc != rntp->rnt_tsc); | |
789 | ||
790 | return result; | |
791 | } | |
792 | ||
793 | ||
794 | /* | |
795 | * This function is called by the speed-step driver when a | |
796 | * change of cpu clock frequency is about to occur. | |
797 | * The scale is not changed until rtc_clock_stepped() is called. | |
798 | * Between these times there is an uncertainty is exactly when | |
799 | * the change takes effect. FIXME: by using another timing source | |
800 | * we could eliminate this error. | |
801 | */ | |
802 | void | |
803 | rtc_clock_stepping(__unused uint32_t new_frequency, | |
804 | __unused uint32_t old_frequency) | |
805 | { | |
806 | boolean_t istate; | |
1c79356b | 807 | |
91447636 A |
808 | istate = ml_set_interrupts_enabled(FALSE); |
809 | rtc_nanotime_scale_update(); | |
810 | ml_set_interrupts_enabled(istate); | |
811 | } | |
812 | ||
813 | /* | |
814 | * This function is called by the speed-step driver when a | |
815 | * change of cpu clock frequency has just occured. This change | |
816 | * is expressed as a ratio relative to the boot clock rate. | |
817 | */ | |
818 | void | |
819 | rtc_clock_stepped(uint32_t new_frequency, uint32_t old_frequency) | |
820 | { | |
821 | boolean_t istate; | |
822 | ||
823 | istate = ml_set_interrupts_enabled(FALSE); | |
824 | if (rtc_boot_frequency == 0) { | |
825 | /* | |
826 | * At the first ever stepping, old frequency is the real | |
827 | * initial clock rate. This step and all others are based | |
828 | * relative to this initial frequency at which the tsc | |
829 | * calibration was made. Hence we must remember this base | |
830 | * frequency as reference. | |
831 | */ | |
832 | rtc_boot_frequency = old_frequency; | |
833 | } | |
834 | rtc_set_cyc_per_sec(rtc_cycle_count * new_frequency / | |
835 | rtc_boot_frequency); | |
836 | rtc_nanotime_scale_update(); | |
837 | ml_set_interrupts_enabled(istate); | |
1c79356b A |
838 | } |
839 | ||
840 | /* | |
91447636 A |
841 | * rtc_sleep_wakeup() is called from acpi on awakening from a S3 sleep |
842 | */ | |
843 | void | |
844 | rtc_sleep_wakeup(void) | |
845 | { | |
846 | rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime; | |
847 | ||
848 | boolean_t istate; | |
849 | ||
850 | istate = ml_set_interrupts_enabled(FALSE); | |
851 | ||
852 | /* | |
853 | * Reset nanotime. | |
854 | * The timestamp counter will have been reset | |
855 | * but nanotime (uptime) marches onward. | |
856 | * We assume that we're still at the former cpu frequency. | |
857 | */ | |
858 | rntp->rnt_tsc = rdtsc64(); | |
859 | rntp->rnt_step_tsc = 0ULL; | |
860 | rntp->rnt_step_nanos = rntp->rnt_nanos; | |
861 | rtc_nanotime_set_commpage(rntp); | |
862 | ||
863 | /* Restart tick interrupts from the LAPIC timer */ | |
864 | rtc_lapic_start_ticking(); | |
865 | ||
866 | ml_set_interrupts_enabled(istate); | |
867 | } | |
868 | ||
869 | /* | |
870 | * Initialize the real-time clock device. | |
871 | * In addition, various variables used to support the clock are initialized. | |
1c79356b A |
872 | */ |
873 | int | |
874 | sysclk_init(void) | |
875 | { | |
91447636 A |
876 | uint64_t cycles; |
877 | ||
1c79356b | 878 | mp_disable_preemption(); |
91447636 A |
879 | if (cpu_number() == master_cpu) { |
880 | /* | |
881 | * Perform calibration. | |
882 | * The PIT is used as the reference to compute how many | |
883 | * TCS counts (cpu clock cycles) occur per second. | |
884 | */ | |
885 | rtc_cycle_count = timeRDTSC(); | |
886 | cycles = rtc_set_cyc_per_sec(rtc_cycle_count); | |
887 | ||
888 | /* | |
889 | * Set min/max to actual. | |
890 | * ACPI may update these later if speed-stepping is detected. | |
891 | */ | |
892 | gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles; | |
893 | gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles; | |
894 | printf("[RTCLOCK] frequency %llu (%llu)\n", | |
895 | cycles, rtc_cyc_per_sec); | |
896 | ||
897 | rtc_lapic_timer_calibrate(); | |
898 | ||
899 | /* Minimum interval is 1usec */ | |
900 | rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, | |
901 | 0ULL); | |
902 | /* Point LAPIC interrupts to hardclock() */ | |
903 | lapic_set_timer_func((i386_intr_func_t) rtclock_intr); | |
904 | ||
905 | clock_timebase_init(); | |
906 | rtc_initialized = TRUE; | |
1c79356b | 907 | } |
91447636 A |
908 | |
909 | rtc_nanotime_init(); | |
910 | ||
911 | rtc_lapic_start_ticking(); | |
912 | ||
1c79356b | 913 | mp_enable_preemption(); |
1c79356b | 914 | |
1c79356b A |
915 | return (1); |
916 | } | |
917 | ||
1c79356b A |
918 | /* |
919 | * Get the clock device time. This routine is responsible | |
920 | * for converting the device's machine dependent time value | |
921 | * into a canonical mach_timespec_t value. | |
922 | */ | |
91447636 A |
923 | static kern_return_t |
924 | sysclk_gettime_internal( | |
1c79356b A |
925 | mach_timespec_t *cur_time) /* OUT */ |
926 | { | |
91447636 | 927 | *cur_time = tsc_to_timespec(); |
1c79356b A |
928 | return (KERN_SUCCESS); |
929 | } | |
930 | ||
931 | kern_return_t | |
91447636 | 932 | sysclk_gettime( |
1c79356b A |
933 | mach_timespec_t *cur_time) /* OUT */ |
934 | { | |
91447636 | 935 | return sysclk_gettime_internal(cur_time); |
1c79356b A |
936 | } |
937 | ||
1c79356b A |
938 | void |
939 | sysclk_gettime_interrupts_disabled( | |
940 | mach_timespec_t *cur_time) /* OUT */ | |
941 | { | |
91447636 | 942 | (void) sysclk_gettime_internal(cur_time); |
1c79356b A |
943 | } |
944 | ||
9bccf70c A |
945 | // utility routine |
946 | // Code to calculate how many processor cycles are in a second... | |
1c79356b | 947 | |
91447636 A |
948 | static uint64_t |
949 | rtc_set_cyc_per_sec(uint64_t cycles) | |
9bccf70c | 950 | { |
1c79356b | 951 | |
91447636 A |
952 | if (cycles > (NSEC_PER_SEC/20)) { |
953 | // we can use just a "fast" multiply to get nanos | |
954 | rtc_quant_shift = 32; | |
955 | rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles); | |
956 | rtclock.timebase_const.numer = rtc_quant_scale; // timeRDTSC is 1/20 | |
957 | rtclock.timebase_const.denom = RTC_FAST_DENOM; | |
958 | } else { | |
959 | rtc_quant_shift = 26; | |
960 | rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles); | |
961 | rtclock.timebase_const.numer = NSEC_PER_SEC/20; // timeRDTSC is 1/20 | |
962 | rtclock.timebase_const.denom = cycles; | |
963 | } | |
964 | rtc_cyc_per_sec = cycles*20; // multiply it by 20 and we are done.. | |
965 | // BUT we also want to calculate... | |
966 | ||
967 | cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2)) | |
968 | / UI_CPUFREQ_ROUNDING_FACTOR) | |
969 | * UI_CPUFREQ_ROUNDING_FACTOR; | |
9bccf70c | 970 | |
91447636 A |
971 | /* |
972 | * Set current measured speed. | |
973 | */ | |
974 | if (cycles >= 0x100000000ULL) { | |
975 | gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL; | |
55e303ae | 976 | } else { |
91447636 | 977 | gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles; |
9bccf70c | 978 | } |
91447636 | 979 | gPEClockFrequencyInfo.cpu_frequency_hz = cycles; |
55e303ae | 980 | |
91447636 A |
981 | kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, rtc_cyc_per_sec); |
982 | return(cycles); | |
9bccf70c | 983 | } |
1c79356b | 984 | |
55e303ae A |
985 | void |
986 | clock_get_system_microtime( | |
987 | uint32_t *secs, | |
988 | uint32_t *microsecs) | |
9bccf70c | 989 | { |
55e303ae A |
990 | mach_timespec_t now; |
991 | ||
91447636 | 992 | (void) sysclk_gettime_internal(&now); |
55e303ae A |
993 | |
994 | *secs = now.tv_sec; | |
995 | *microsecs = now.tv_nsec / NSEC_PER_USEC; | |
1c79356b A |
996 | } |
997 | ||
55e303ae A |
998 | void |
999 | clock_get_system_nanotime( | |
1000 | uint32_t *secs, | |
1001 | uint32_t *nanosecs) | |
1002 | { | |
1003 | mach_timespec_t now; | |
1004 | ||
91447636 | 1005 | (void) sysclk_gettime_internal(&now); |
55e303ae A |
1006 | |
1007 | *secs = now.tv_sec; | |
1008 | *nanosecs = now.tv_nsec; | |
1009 | } | |
9bccf70c | 1010 | |
1c79356b A |
1011 | /* |
1012 | * Get clock device attributes. | |
1013 | */ | |
1014 | kern_return_t | |
1015 | sysclk_getattr( | |
1016 | clock_flavor_t flavor, | |
1017 | clock_attr_t attr, /* OUT */ | |
1018 | mach_msg_type_number_t *count) /* IN/OUT */ | |
1019 | { | |
1c79356b A |
1020 | if (*count != 1) |
1021 | return (KERN_FAILURE); | |
1022 | switch (flavor) { | |
1023 | ||
1024 | case CLOCK_GET_TIME_RES: /* >0 res */ | |
91447636 | 1025 | *(clock_res_t *) attr = rtc_intr_nsec; |
1c79356b A |
1026 | break; |
1027 | ||
91447636 | 1028 | case CLOCK_ALARM_CURRES: /* =0 no alarm */ |
1c79356b | 1029 | case CLOCK_ALARM_MAXRES: |
1c79356b | 1030 | case CLOCK_ALARM_MINRES: |
91447636 | 1031 | *(clock_res_t *) attr = 0; |
1c79356b A |
1032 | break; |
1033 | ||
1034 | default: | |
1035 | return (KERN_INVALID_VALUE); | |
1036 | } | |
1037 | return (KERN_SUCCESS); | |
1038 | } | |
1039 | ||
1c79356b A |
1040 | /* |
1041 | * Set next alarm time for the clock device. This call | |
1042 | * always resets the time to deliver an alarm for the | |
1043 | * clock. | |
1044 | */ | |
1045 | void | |
1046 | sysclk_setalarm( | |
1047 | mach_timespec_t *alarm_time) | |
1048 | { | |
91447636 A |
1049 | timer_call_enter(&rtclock_alarm_timer, |
1050 | (uint64_t) alarm_time->tv_sec * NSEC_PER_SEC | |
1051 | + alarm_time->tv_nsec); | |
1c79356b A |
1052 | } |
1053 | ||
1054 | /* | |
1055 | * Configure the calendar clock. | |
1056 | */ | |
1057 | int | |
1058 | calend_config(void) | |
1059 | { | |
1060 | return bbc_config(); | |
1061 | } | |
1062 | ||
1063 | /* | |
1064 | * Initialize calendar clock. | |
1065 | */ | |
1066 | int | |
1067 | calend_init(void) | |
1068 | { | |
1069 | return (1); | |
1070 | } | |
1071 | ||
1072 | /* | |
1073 | * Get the current clock time. | |
1074 | */ | |
1075 | kern_return_t | |
1076 | calend_gettime( | |
1077 | mach_timespec_t *cur_time) /* OUT */ | |
1078 | { | |
1079 | spl_t s; | |
1080 | ||
91447636 | 1081 | RTC_LOCK(s); |
1c79356b | 1082 | if (!rtclock.calend_is_set) { |
91447636 | 1083 | RTC_UNLOCK(s); |
1c79356b A |
1084 | return (KERN_FAILURE); |
1085 | } | |
1086 | ||
1087 | (void) sysclk_gettime_internal(cur_time); | |
1088 | ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset); | |
91447636 | 1089 | RTC_UNLOCK(s); |
1c79356b A |
1090 | |
1091 | return (KERN_SUCCESS); | |
1092 | } | |
1093 | ||
55e303ae A |
1094 | void |
1095 | clock_get_calendar_microtime( | |
1096 | uint32_t *secs, | |
1097 | uint32_t *microsecs) | |
1098 | { | |
1099 | mach_timespec_t now; | |
1100 | ||
1101 | calend_gettime(&now); | |
1102 | ||
1103 | *secs = now.tv_sec; | |
1104 | *microsecs = now.tv_nsec / NSEC_PER_USEC; | |
1105 | } | |
1106 | ||
1107 | void | |
1108 | clock_get_calendar_nanotime( | |
1109 | uint32_t *secs, | |
1110 | uint32_t *nanosecs) | |
1c79356b | 1111 | { |
55e303ae A |
1112 | mach_timespec_t now; |
1113 | ||
1114 | calend_gettime(&now); | |
1115 | ||
1116 | *secs = now.tv_sec; | |
1117 | *nanosecs = now.tv_nsec; | |
1118 | } | |
1119 | ||
1120 | void | |
1121 | clock_set_calendar_microtime( | |
1122 | uint32_t secs, | |
1123 | uint32_t microsecs) | |
1124 | { | |
1125 | mach_timespec_t new_time, curr_time; | |
91447636 | 1126 | uint32_t old_offset; |
1c79356b A |
1127 | spl_t s; |
1128 | ||
91447636 A |
1129 | new_time.tv_sec = secs; |
1130 | new_time.tv_nsec = microsecs * NSEC_PER_USEC; | |
1131 | ||
1132 | RTC_LOCK(s); | |
1133 | old_offset = rtclock.calend_offset.tv_sec; | |
1c79356b | 1134 | (void) sysclk_gettime_internal(&curr_time); |
91447636 | 1135 | rtclock.calend_offset = new_time; |
1c79356b | 1136 | SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); |
91447636 | 1137 | rtclock.boottime += rtclock.calend_offset.tv_sec - old_offset; |
1c79356b | 1138 | rtclock.calend_is_set = TRUE; |
91447636 | 1139 | RTC_UNLOCK(s); |
1c79356b | 1140 | |
55e303ae | 1141 | (void) bbc_settime(&new_time); |
1c79356b | 1142 | |
55e303ae | 1143 | host_notify_calendar_change(); |
1c79356b A |
1144 | } |
1145 | ||
1146 | /* | |
1147 | * Get clock device attributes. | |
1148 | */ | |
1149 | kern_return_t | |
1150 | calend_getattr( | |
1151 | clock_flavor_t flavor, | |
1152 | clock_attr_t attr, /* OUT */ | |
1153 | mach_msg_type_number_t *count) /* IN/OUT */ | |
1154 | { | |
1c79356b A |
1155 | if (*count != 1) |
1156 | return (KERN_FAILURE); | |
1157 | switch (flavor) { | |
1158 | ||
1159 | case CLOCK_GET_TIME_RES: /* >0 res */ | |
91447636 | 1160 | *(clock_res_t *) attr = rtc_intr_nsec; |
1c79356b | 1161 | break; |
1c79356b A |
1162 | |
1163 | case CLOCK_ALARM_CURRES: /* =0 no alarm */ | |
1164 | case CLOCK_ALARM_MINRES: | |
1165 | case CLOCK_ALARM_MAXRES: | |
1166 | *(clock_res_t *) attr = 0; | |
1167 | break; | |
1168 | ||
1169 | default: | |
1170 | return (KERN_INVALID_VALUE); | |
1171 | } | |
1172 | return (KERN_SUCCESS); | |
1173 | } | |
1174 | ||
55e303ae A |
1175 | #define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */ |
1176 | #define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */ | |
1177 | ||
1178 | uint32_t | |
1179 | clock_set_calendar_adjtime( | |
1180 | int32_t *secs, | |
1181 | int32_t *microsecs) | |
1c79356b | 1182 | { |
55e303ae A |
1183 | int64_t total, ototal; |
1184 | uint32_t interval = 0; | |
1185 | spl_t s; | |
1186 | ||
1187 | total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC; | |
1c79356b | 1188 | |
91447636 | 1189 | RTC_LOCK(s); |
55e303ae A |
1190 | ototal = rtclock.calend_adjtotal; |
1191 | ||
1192 | if (total != 0) { | |
1193 | int32_t delta = tickadj; | |
1194 | ||
1195 | if (total > 0) { | |
1196 | if (total > bigadj) | |
1197 | delta *= 10; | |
1198 | if (delta > total) | |
1199 | delta = total; | |
1200 | } | |
1201 | else { | |
1202 | if (total < -bigadj) | |
1203 | delta *= 10; | |
1204 | delta = -delta; | |
1205 | if (delta < total) | |
1206 | delta = total; | |
1207 | } | |
1208 | ||
1209 | rtclock.calend_adjtotal = total; | |
1210 | rtclock.calend_adjdelta = delta; | |
1211 | ||
91447636 | 1212 | interval = NSEC_PER_HZ; |
55e303ae A |
1213 | } |
1214 | else | |
1215 | rtclock.calend_adjdelta = rtclock.calend_adjtotal = 0; | |
1216 | ||
91447636 | 1217 | RTC_UNLOCK(s); |
55e303ae A |
1218 | |
1219 | if (ototal == 0) | |
1220 | *secs = *microsecs = 0; | |
1221 | else { | |
1222 | *secs = ototal / NSEC_PER_SEC; | |
1223 | *microsecs = ototal % NSEC_PER_SEC; | |
1224 | } | |
1225 | ||
1226 | return (interval); | |
1227 | } | |
1228 | ||
1229 | uint32_t | |
1230 | clock_adjust_calendar(void) | |
1231 | { | |
1232 | uint32_t interval = 0; | |
1233 | int32_t delta; | |
1234 | spl_t s; | |
1235 | ||
91447636 | 1236 | RTC_LOCK(s); |
55e303ae A |
1237 | delta = rtclock.calend_adjdelta; |
1238 | ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, delta); | |
1239 | ||
1240 | rtclock.calend_adjtotal -= delta; | |
1241 | ||
1242 | if (delta > 0) { | |
1243 | if (delta > rtclock.calend_adjtotal) | |
1244 | rtclock.calend_adjdelta = rtclock.calend_adjtotal; | |
1245 | } | |
1246 | else | |
1247 | if (delta < 0) { | |
1248 | if (delta < rtclock.calend_adjtotal) | |
1249 | rtclock.calend_adjdelta = rtclock.calend_adjtotal; | |
1250 | } | |
1251 | ||
1252 | if (rtclock.calend_adjdelta != 0) | |
91447636 | 1253 | interval = NSEC_PER_HZ; |
55e303ae | 1254 | |
91447636 | 1255 | RTC_UNLOCK(s); |
55e303ae A |
1256 | |
1257 | return (interval); | |
1c79356b A |
1258 | } |
1259 | ||
1260 | void | |
1261 | clock_initialize_calendar(void) | |
1262 | { | |
1263 | mach_timespec_t bbc_time, curr_time; | |
1264 | spl_t s; | |
1265 | ||
1266 | if (bbc_gettime(&bbc_time) != KERN_SUCCESS) | |
1267 | return; | |
1268 | ||
91447636 A |
1269 | RTC_LOCK(s); |
1270 | if (rtclock.boottime == 0) | |
1271 | rtclock.boottime = bbc_time.tv_sec; | |
1272 | (void) sysclk_gettime_internal(&curr_time); | |
1273 | rtclock.calend_offset = bbc_time; | |
1274 | SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); | |
1275 | rtclock.calend_is_set = TRUE; | |
1276 | RTC_UNLOCK(s); | |
1c79356b | 1277 | |
55e303ae | 1278 | host_notify_calendar_change(); |
1c79356b A |
1279 | } |
1280 | ||
91447636 A |
1281 | void |
1282 | clock_get_boottime_nanotime( | |
1283 | uint32_t *secs, | |
1284 | uint32_t *nanosecs) | |
1285 | { | |
1286 | *secs = rtclock.boottime; | |
1287 | *nanosecs = 0; | |
1288 | } | |
1289 | ||
1c79356b A |
1290 | void |
1291 | clock_timebase_info( | |
1292 | mach_timebase_info_t info) | |
1293 | { | |
91447636 | 1294 | info->numer = info->denom = 1; |
1c79356b A |
1295 | } |
1296 | ||
1297 | void | |
1298 | clock_set_timer_deadline( | |
0b4e3aa0 | 1299 | uint64_t deadline) |
1c79356b | 1300 | { |
91447636 A |
1301 | spl_t s; |
1302 | cpu_data_t *pp = current_cpu_datap(); | |
1303 | rtclock_timer_t *mytimer = &pp->cpu_rtc_timer; | |
1304 | uint64_t abstime; | |
1305 | uint64_t decr; | |
1306 | ||
1307 | assert(get_preemption_level() > 0); | |
1308 | assert(rtclock_timer_expire); | |
1309 | ||
1310 | RTC_INTRS_OFF(s); | |
1311 | mytimer->deadline = deadline; | |
1312 | mytimer->is_set = TRUE; | |
1313 | if (!mytimer->has_expired) { | |
1314 | abstime = mach_absolute_time(); | |
1315 | if (mytimer->deadline < pp->cpu_rtc_tick_deadline) { | |
1316 | decr = deadline_to_decrementer(mytimer->deadline, | |
1317 | abstime); | |
1318 | rtc_lapic_set_timer(decr); | |
1319 | pp->cpu_rtc_intr_deadline = mytimer->deadline; | |
1320 | KERNEL_DEBUG_CONSTANT( | |
1321 | MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | | |
1322 | DBG_FUNC_NONE, decr, 2, 0, 0, 0); | |
1323 | } | |
1324 | } | |
1325 | RTC_INTRS_ON(s); | |
1c79356b A |
1326 | } |
1327 | ||
1328 | void | |
1329 | clock_set_timer_func( | |
1330 | clock_timer_func_t func) | |
1331 | { | |
91447636 A |
1332 | if (rtclock_timer_expire == NULL) |
1333 | rtclock_timer_expire = func; | |
1c79356b A |
1334 | } |
1335 | ||
1c79356b | 1336 | /* |
91447636 | 1337 | * Real-time clock device interrupt. |
1c79356b | 1338 | */ |
1c79356b | 1339 | void |
55e303ae | 1340 | rtclock_intr(struct i386_interrupt_state *regs) |
1c79356b | 1341 | { |
55e303ae | 1342 | uint64_t abstime; |
91447636 A |
1343 | uint32_t latency; |
1344 | uint64_t decr; | |
1345 | uint64_t decr_tick; | |
1346 | uint64_t decr_timer; | |
1347 | cpu_data_t *pp = current_cpu_datap(); | |
1348 | rtclock_timer_t *mytimer = &pp->cpu_rtc_timer; | |
1349 | ||
1350 | assert(get_preemption_level() > 0); | |
1351 | assert(!ml_get_interrupts_enabled()); | |
1352 | ||
1353 | abstime = _rtc_nanotime_read(); | |
1354 | latency = (uint32_t) abstime - pp->cpu_rtc_intr_deadline; | |
1355 | if (pp->cpu_rtc_tick_deadline <= abstime) { | |
1356 | rtc_nanotime_update(); | |
1357 | clock_deadline_for_periodic_event( | |
1358 | NSEC_PER_HZ, abstime, &pp->cpu_rtc_tick_deadline); | |
1359 | hertz_tick( | |
1360 | #if STAT_TIME | |
1361 | NSEC_PER_HZ, | |
1362 | #endif | |
1363 | (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0), | |
1364 | regs->eip); | |
1365 | } | |
1c79356b | 1366 | |
91447636 A |
1367 | abstime = _rtc_nanotime_read(); |
1368 | if (mytimer->is_set && mytimer->deadline <= abstime) { | |
1369 | mytimer->has_expired = TRUE; | |
1370 | mytimer->is_set = FALSE; | |
1371 | (*rtclock_timer_expire)(abstime); | |
1372 | assert(!ml_get_interrupts_enabled()); | |
1373 | mytimer->has_expired = FALSE; | |
55e303ae A |
1374 | } |
1375 | ||
91447636 A |
1376 | /* Log the interrupt service latency (-ve value expected by tool) */ |
1377 | KERNEL_DEBUG_CONSTANT( | |
1378 | MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE, | |
1379 | -latency, (uint32_t)regs->eip, 0, 0, 0); | |
1c79356b | 1380 | |
91447636 A |
1381 | abstime = _rtc_nanotime_read(); |
1382 | decr_tick = deadline_to_decrementer(pp->cpu_rtc_tick_deadline, abstime); | |
1383 | decr_timer = (mytimer->is_set) ? | |
1384 | deadline_to_decrementer(mytimer->deadline, abstime) : | |
1385 | DECREMENTER_MAX; | |
1386 | decr = MIN(decr_tick, decr_timer); | |
1387 | pp->cpu_rtc_intr_deadline = abstime + decr; | |
1c79356b | 1388 | |
91447636 | 1389 | rtc_lapic_set_timer(decr); |
1c79356b | 1390 | |
91447636 A |
1391 | /* Log the new decrementer value */ |
1392 | KERNEL_DEBUG_CONSTANT( | |
1393 | MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE, | |
1394 | decr, 3, 0, 0, 0); | |
1c79356b | 1395 | |
91447636 | 1396 | } |
1c79356b | 1397 | |
91447636 A |
1398 | static void |
1399 | rtclock_alarm_expire( | |
1400 | __unused timer_call_param_t p0, | |
1401 | __unused timer_call_param_t p1) | |
1402 | { | |
1403 | mach_timespec_t clock_time; | |
1404 | ||
1405 | (void) sysclk_gettime_internal(&clock_time); | |
1406 | ||
1407 | clock_alarm_intr(SYSTEM_CLOCK, &clock_time); | |
1c79356b A |
1408 | } |
1409 | ||
1410 | void | |
1411 | clock_get_uptime( | |
0b4e3aa0 | 1412 | uint64_t *result) |
1c79356b | 1413 | { |
91447636 | 1414 | *result = rtc_nanotime_read(); |
55e303ae | 1415 | } |
1c79356b | 1416 | |
55e303ae A |
1417 | uint64_t |
1418 | mach_absolute_time(void) | |
1419 | { | |
91447636 A |
1420 | return rtc_nanotime_read(); |
1421 | } | |
1422 | ||
1423 | void | |
1424 | absolutetime_to_microtime( | |
1425 | uint64_t abstime, | |
1426 | uint32_t *secs, | |
1427 | uint32_t *microsecs) | |
1428 | { | |
1429 | uint32_t remain; | |
1430 | ||
1431 | asm volatile( | |
1432 | "divl %3" | |
1433 | : "=a" (*secs), "=d" (remain) | |
1434 | : "A" (abstime), "r" (NSEC_PER_SEC)); | |
1435 | asm volatile( | |
1436 | "divl %3" | |
1437 | : "=a" (*microsecs) | |
1438 | : "0" (remain), "d" (0), "r" (NSEC_PER_USEC)); | |
1c79356b A |
1439 | } |
1440 | ||
1441 | void | |
1442 | clock_interval_to_deadline( | |
0b4e3aa0 A |
1443 | uint32_t interval, |
1444 | uint32_t scale_factor, | |
1445 | uint64_t *result) | |
1c79356b | 1446 | { |
0b4e3aa0 | 1447 | uint64_t abstime; |
1c79356b A |
1448 | |
1449 | clock_get_uptime(result); | |
1450 | ||
1451 | clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); | |
1452 | ||
0b4e3aa0 | 1453 | *result += abstime; |
1c79356b A |
1454 | } |
1455 | ||
1456 | void | |
1457 | clock_interval_to_absolutetime_interval( | |
0b4e3aa0 A |
1458 | uint32_t interval, |
1459 | uint32_t scale_factor, | |
1460 | uint64_t *result) | |
1c79356b | 1461 | { |
0b4e3aa0 | 1462 | *result = (uint64_t)interval * scale_factor; |
1c79356b A |
1463 | } |
1464 | ||
1465 | void | |
1466 | clock_absolutetime_interval_to_deadline( | |
0b4e3aa0 A |
1467 | uint64_t abstime, |
1468 | uint64_t *result) | |
1c79356b A |
1469 | { |
1470 | clock_get_uptime(result); | |
1471 | ||
0b4e3aa0 | 1472 | *result += abstime; |
1c79356b A |
1473 | } |
1474 | ||
1475 | void | |
1476 | absolutetime_to_nanoseconds( | |
0b4e3aa0 A |
1477 | uint64_t abstime, |
1478 | uint64_t *result) | |
1c79356b | 1479 | { |
0b4e3aa0 | 1480 | *result = abstime; |
1c79356b A |
1481 | } |
1482 | ||
1483 | void | |
1484 | nanoseconds_to_absolutetime( | |
0b4e3aa0 A |
1485 | uint64_t nanoseconds, |
1486 | uint64_t *result) | |
1c79356b | 1487 | { |
0b4e3aa0 | 1488 | *result = nanoseconds; |
1c79356b A |
1489 | } |
1490 | ||
55e303ae | 1491 | void |
91447636 | 1492 | machine_delay_until( |
55e303ae A |
1493 | uint64_t deadline) |
1494 | { | |
1495 | uint64_t now; | |
1496 | ||
1497 | do { | |
1498 | cpu_pause(); | |
1499 | now = mach_absolute_time(); | |
1500 | } while (now < deadline); | |
1501 | } |