]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/rtclock.c
xnu-1486.2.11.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 /*
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
40 */
41
42 #include <platforms.h>
43 #include <mach_kdb.h>
44
45 #include <mach/mach_types.h>
46
47 #include <kern/cpu_data.h>
48 #include <kern/cpu_number.h>
49 #include <kern/clock.h>
50 #include <kern/host_notify.h>
51 #include <kern/macro_help.h>
52 #include <kern/misc_protos.h>
53 #include <kern/spl.h>
54 #include <kern/assert.h>
55 #include <kern/etimer.h>
56 #include <mach/vm_prot.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_kern.h> /* for kernel_map */
59 #include <i386/ipl.h>
60 #include <architecture/i386/pio.h>
61 #include <i386/machine_cpu.h>
62 #include <i386/cpuid.h>
63 #include <i386/cpu_threads.h>
64 #include <i386/mp.h>
65 #include <i386/machine_routines.h>
66 #include <i386/proc_reg.h>
67 #include <i386/misc_protos.h>
68 #include <i386/lapic.h>
69 #include <pexpert/pexpert.h>
70 #include <machine/limits.h>
71 #include <machine/commpage.h>
72 #include <sys/kdebug.h>
73 #include <i386/tsc.h>
74 #include <i386/rtclock.h>
75
76 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
77
78 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
79
80 int rtclock_config(void);
81
82 int rtclock_init(void);
83
84 uint64_t rtc_decrementer_min;
85
86 uint64_t tsc_rebase_abs_time = 0;
87
88 void rtclock_intr(x86_saved_state_t *regs);
89 static uint64_t maxDec; /* longest interval our hardware timer can handle (nsec) */
90
91 static void rtc_set_timescale(uint64_t cycles);
92 static uint64_t rtc_export_speed(uint64_t cycles);
93
94 rtc_nanotime_t rtc_nanotime_info = {0,0,0,0,1,0};
95
96 /*
97 * tsc_to_nanoseconds:
98 *
99 * Basic routine to convert a raw 64 bit TSC value to a
100 * 64 bit nanosecond value. The conversion is implemented
101 * based on the scale factor and an implicit 32 bit shift.
102 */
103 static inline uint64_t
104 _tsc_to_nanoseconds(uint64_t value)
105 {
106 #if defined(__i386__)
107 asm volatile("movl %%edx,%%esi ;"
108 "mull %%ecx ;"
109 "movl %%edx,%%edi ;"
110 "movl %%esi,%%eax ;"
111 "mull %%ecx ;"
112 "addl %%edi,%%eax ;"
113 "adcl $0,%%edx "
114 : "+A" (value)
115 : "c" (current_cpu_datap()->cpu_nanotime->scale)
116 : "esi", "edi");
117 #elif defined(__x86_64__)
118 asm volatile("mul %%rcx;"
119 "shrq $32, %%rax;"
120 "shlq $32, %%rdx;"
121 "orq %%rdx, %%rax;"
122 : "=a"(value)
123 : "a"(value), "c"(rtc_nanotime_info.scale)
124 : "rdx", "cc" );
125 #else
126 #error Unsupported architecture
127 #endif
128
129 return (value);
130 }
131
132 static inline uint32_t
133 _absolutetime_to_microtime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *microsecs)
134 {
135 uint32_t remain;
136 #if defined(__i386__)
137 asm volatile(
138 "divl %3"
139 : "=a" (*secs), "=d" (remain)
140 : "A" (abstime), "r" (NSEC_PER_SEC));
141 asm volatile(
142 "divl %3"
143 : "=a" (*microsecs)
144 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
145 #elif defined(__x86_64__)
146 *secs = abstime / (uint64_t)NSEC_PER_SEC;
147 remain = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC);
148 *microsecs = remain / NSEC_PER_USEC;
149 #else
150 #error Unsupported architecture
151 #endif
152 return remain;
153 }
154
155 static inline void
156 _absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nanosecs)
157 {
158 #if defined(__i386__)
159 asm volatile(
160 "divl %3"
161 : "=a" (*secs), "=d" (*nanosecs)
162 : "A" (abstime), "r" (NSEC_PER_SEC));
163 #elif defined(__x86_64__)
164 *secs = abstime / (uint64_t)NSEC_PER_SEC;
165 *nanosecs = (clock_usec_t)(abstime % (uint64_t)NSEC_PER_SEC);
166 #else
167 #error Unsupported architecture
168 #endif
169 }
170
171 static uint32_t
172 deadline_to_decrementer(
173 uint64_t deadline,
174 uint64_t now)
175 {
176 uint64_t delta;
177
178 if (deadline <= now)
179 return (uint32_t)rtc_decrementer_min;
180 else {
181 delta = deadline - now;
182 return (uint32_t)MIN(MAX(rtc_decrementer_min,delta),maxDec);
183 }
184 }
185
186 void
187 rtc_lapic_start_ticking(void)
188 {
189 x86_lcpu_t *lcpu = x86_lcpu();
190
191 /*
192 * Force a complete re-evaluation of timer deadlines.
193 */
194 lcpu->rtcPop = EndOfAllTime;
195 etimer_resync_deadlines();
196 }
197
198 /*
199 * Configure the real-time clock device. Return success (1)
200 * or failure (0).
201 */
202
203 int
204 rtclock_config(void)
205 {
206 /* nothing to do */
207 return (1);
208 }
209
210
211 /*
212 * Nanotime/mach_absolutime_time
213 * -----------------------------
214 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
215 * efficiently by the kernel and in userspace - is the reference for all timing.
216 * The cpu clock rate is platform-dependent and may stop or be reset when the
217 * processor is napped/slept. As a result, nanotime is the software abstraction
218 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
219 *
220 * The kernel maintains nanotime information recording:
221 * - the ratio of tsc to nanoseconds
222 * with this ratio expressed as a 32-bit scale and shift
223 * (power of 2 divider);
224 * - { tsc_base, ns_base } pair of corresponding timestamps.
225 *
226 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
227 * for the userspace nanotime routine to read.
228 *
229 * All of the routines which update the nanotime data are non-reentrant. This must
230 * be guaranteed by the caller.
231 */
232 static inline void
233 rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
234 {
235 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
236 }
237
238 /*
239 * rtc_nanotime_init:
240 *
241 * Intialize the nanotime info from the base time.
242 */
243 static inline void
244 _rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base)
245 {
246 uint64_t tsc = rdtsc64();
247
248 _rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
249 }
250
251 static void
252 rtc_nanotime_init(uint64_t base)
253 {
254 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
255
256 _rtc_nanotime_init(rntp, base);
257 rtc_nanotime_set_commpage(rntp);
258 }
259
260 /*
261 * rtc_nanotime_init_commpage:
262 *
263 * Call back from the commpage initialization to
264 * cause the commpage data to be filled in once the
265 * commpages have been created.
266 */
267 void
268 rtc_nanotime_init_commpage(void)
269 {
270 spl_t s = splclock();
271
272 rtc_nanotime_set_commpage(current_cpu_datap()->cpu_nanotime);
273
274 splx(s);
275 }
276
277 /*
278 * rtc_nanotime_read:
279 *
280 * Returns the current nanotime value, accessable from any
281 * context.
282 */
283 static inline uint64_t
284 rtc_nanotime_read(void)
285 {
286
287 #if CONFIG_EMBEDDED
288 if (gPEClockFrequencyInfo.timebase_frequency_hz > SLOW_TSC_THRESHOLD)
289 return _rtc_nanotime_read(current_cpu_datap()->cpu_nanotime, 1); /* slow processor */
290 else
291 #endif
292 return _rtc_nanotime_read(current_cpu_datap()->cpu_nanotime, 0); /* assume fast processor */
293 }
294
295 /*
296 * rtc_clock_napped:
297 *
298 * Invoked from power management when we exit from a low C-State (>= C4)
299 * and the TSC has stopped counting. The nanotime data is updated according
300 * to the provided value which represents the new value for nanotime.
301 */
302 void
303 rtc_clock_napped(uint64_t base, uint64_t tsc_base)
304 {
305 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
306 uint64_t oldnsecs;
307 uint64_t newnsecs;
308 uint64_t tsc;
309
310 assert(!ml_get_interrupts_enabled());
311 tsc = rdtsc64();
312 oldnsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
313 newnsecs = base + _tsc_to_nanoseconds(tsc - tsc_base);
314
315 /*
316 * Only update the base values if time using the new base values
317 * is later than the time using the old base values.
318 */
319 if (oldnsecs < newnsecs) {
320 _rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
321 rtc_nanotime_set_commpage(rntp);
322 }
323 }
324
325 void
326 rtc_clock_stepping(__unused uint32_t new_frequency,
327 __unused uint32_t old_frequency)
328 {
329 panic("rtc_clock_stepping unsupported");
330 }
331
332 void
333 rtc_clock_stepped(__unused uint32_t new_frequency,
334 __unused uint32_t old_frequency)
335 {
336 panic("rtc_clock_stepped unsupported");
337 }
338
339 /*
340 * rtc_sleep_wakeup:
341 *
342 * Invoked from power manageent when we have awoken from a sleep (S3)
343 * and the TSC has been reset. The nanotime data is updated based on
344 * the passed in value.
345 *
346 * The caller must guarantee non-reentrancy.
347 */
348 void
349 rtc_sleep_wakeup(
350 uint64_t base)
351 {
352 /*
353 * Reset nanotime.
354 * The timestamp counter will have been reset
355 * but nanotime (uptime) marches onward.
356 */
357 rtc_nanotime_init(base);
358 }
359
360 /*
361 * Initialize the real-time clock device.
362 * In addition, various variables used to support the clock are initialized.
363 */
364 int
365 rtclock_init(void)
366 {
367 uint64_t cycles;
368
369 assert(!ml_get_interrupts_enabled());
370
371 if (cpu_number() == master_cpu) {
372
373 assert(tscFreq);
374 rtc_set_timescale(tscFreq);
375
376 /*
377 * Adjust and set the exported cpu speed.
378 */
379 cycles = rtc_export_speed(tscFreq);
380
381 /*
382 * Set min/max to actual.
383 * ACPI may update these later if speed-stepping is detected.
384 */
385 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
386 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
387
388 /*
389 * Compute the longest interval we can represent.
390 */
391 maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n);
392 kprintf("maxDec: %lld\n", maxDec);
393
394 /* Minimum interval is 1usec */
395 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL);
396 /* Point LAPIC interrupts to hardclock() */
397 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
398
399 clock_timebase_init();
400 ml_init_lock_timeout();
401 }
402
403 rtc_lapic_start_ticking();
404
405 return (1);
406 }
407
408 // utility routine
409 // Code to calculate how many processor cycles are in a second...
410
411 static void
412 rtc_set_timescale(uint64_t cycles)
413 {
414 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
415 rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles);
416
417 if (cycles <= SLOW_TSC_THRESHOLD)
418 rntp->shift = (uint32_t)cycles;
419 else
420 rntp->shift = 32;
421
422 if (tsc_rebase_abs_time == 0)
423 tsc_rebase_abs_time = mach_absolute_time();
424
425 rtc_nanotime_init(0);
426 }
427
428 static uint64_t
429 rtc_export_speed(uint64_t cyc_per_sec)
430 {
431 uint64_t cycles;
432
433 /* Round: */
434 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
435 / UI_CPUFREQ_ROUNDING_FACTOR)
436 * UI_CPUFREQ_ROUNDING_FACTOR;
437
438 /*
439 * Set current measured speed.
440 */
441 if (cycles >= 0x100000000ULL) {
442 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
443 } else {
444 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
445 }
446 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
447
448 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
449 return(cycles);
450 }
451
452 void
453 clock_get_system_microtime(
454 clock_sec_t *secs,
455 clock_usec_t *microsecs)
456 {
457 uint64_t now = rtc_nanotime_read();
458
459 _absolutetime_to_microtime(now, secs, microsecs);
460 }
461
462 void
463 clock_get_system_nanotime(
464 clock_sec_t *secs,
465 clock_nsec_t *nanosecs)
466 {
467 uint64_t now = rtc_nanotime_read();
468
469 _absolutetime_to_nanotime(now, secs, nanosecs);
470 }
471
472 void
473 clock_gettimeofday_set_commpage(
474 uint64_t abstime,
475 uint64_t epoch,
476 uint64_t offset,
477 clock_sec_t *secs,
478 clock_usec_t *microsecs)
479 {
480 uint64_t now = abstime + offset;
481 uint32_t remain;
482
483 remain = _absolutetime_to_microtime(now, secs, microsecs);
484
485 *secs += (clock_sec_t)epoch;
486
487 commpage_set_timestamp(abstime - remain, *secs);
488 }
489
490 void
491 clock_timebase_info(
492 mach_timebase_info_t info)
493 {
494 info->numer = info->denom = 1;
495 }
496
497 /*
498 * Real-time clock device interrupt.
499 */
500 void
501 rtclock_intr(
502 x86_saved_state_t *tregs)
503 {
504 uint64_t rip;
505 boolean_t user_mode = FALSE;
506 uint64_t abstime;
507 uint32_t latency;
508 x86_lcpu_t *lcpu = x86_lcpu();
509
510 assert(get_preemption_level() > 0);
511 assert(!ml_get_interrupts_enabled());
512
513 abstime = rtc_nanotime_read();
514 latency = (uint32_t)(abstime - lcpu->rtcDeadline);
515 if (abstime < lcpu->rtcDeadline)
516 latency = 1;
517
518 if (is_saved_state64(tregs) == TRUE) {
519 x86_saved_state64_t *regs;
520
521 regs = saved_state64(tregs);
522
523 if (regs->isf.cs & 0x03)
524 user_mode = TRUE;
525 rip = regs->isf.rip;
526 } else {
527 x86_saved_state32_t *regs;
528
529 regs = saved_state32(tregs);
530
531 if (regs->cs & 0x03)
532 user_mode = TRUE;
533 rip = regs->eip;
534 }
535
536 /* Log the interrupt service latency (-ve value expected by tool) */
537 KERNEL_DEBUG_CONSTANT(
538 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
539 -(int32_t)latency, (uint32_t)rip, user_mode, 0, 0);
540
541 /* call the generic etimer */
542 etimer_intr(user_mode, rip);
543 }
544
545 /*
546 * Request timer pop from the hardware
547 */
548
549
550 int
551 setPop(
552 uint64_t time)
553 {
554 uint64_t now;
555 uint32_t decr;
556 uint64_t count;
557
558 now = rtc_nanotime_read(); /* The time in nanoseconds */
559 decr = deadline_to_decrementer(time, now);
560
561 count = tmrCvt(decr, busFCvtn2t);
562 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
563
564 return decr; /* Pass back what we set */
565 }
566
567
568 uint64_t
569 mach_absolute_time(void)
570 {
571 return rtc_nanotime_read();
572 }
573
574 void
575 clock_interval_to_absolutetime_interval(
576 uint32_t interval,
577 uint32_t scale_factor,
578 uint64_t *result)
579 {
580 *result = (uint64_t)interval * scale_factor;
581 }
582
583 void
584 absolutetime_to_microtime(
585 uint64_t abstime,
586 clock_sec_t *secs,
587 clock_usec_t *microsecs)
588 {
589 _absolutetime_to_microtime(abstime, secs, microsecs);
590 }
591
592 void
593 absolutetime_to_nanotime(
594 uint64_t abstime,
595 clock_sec_t *secs,
596 clock_nsec_t *nanosecs)
597 {
598 _absolutetime_to_nanotime(abstime, secs, nanosecs);
599 }
600
601 void
602 nanotime_to_absolutetime(
603 clock_sec_t secs,
604 clock_nsec_t nanosecs,
605 uint64_t *result)
606 {
607 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
608 }
609
610 void
611 absolutetime_to_nanoseconds(
612 uint64_t abstime,
613 uint64_t *result)
614 {
615 *result = abstime;
616 }
617
618 void
619 nanoseconds_to_absolutetime(
620 uint64_t nanoseconds,
621 uint64_t *result)
622 {
623 *result = nanoseconds;
624 }
625
626 void
627 machine_delay_until(
628 uint64_t deadline)
629 {
630 uint64_t now;
631
632 do {
633 cpu_pause();
634 now = mach_absolute_time();
635 } while (now < deadline);
636 }