]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/rtclock.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
CommitLineData
1c79356b 1/*
0b4c1975 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32/*
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
91447636
A
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
1c79356b
A
40 */
41
1c79356b 42#include <platforms.h>
1c79356b 43#include <mach_kdb.h>
55e303ae
A
44
45#include <mach/mach_types.h>
46
1c79356b 47#include <kern/cpu_data.h>
91447636 48#include <kern/cpu_number.h>
1c79356b 49#include <kern/clock.h>
55e303ae 50#include <kern/host_notify.h>
1c79356b
A
51#include <kern/macro_help.h>
52#include <kern/misc_protos.h>
53#include <kern/spl.h>
91447636 54#include <kern/assert.h>
7e4a7d39 55#include <kern/etimer.h>
1c79356b
A
56#include <mach/vm_prot.h>
57#include <vm/pmap.h>
58#include <vm/vm_kern.h> /* for kernel_map */
0c530ab8 59#include <architecture/i386/pio.h>
55e303ae 60#include <i386/machine_cpu.h>
91447636 61#include <i386/cpuid.h>
91447636 62#include <i386/cpu_threads.h>
b0d623f7 63#include <i386/mp.h>
91447636 64#include <i386/machine_routines.h>
6d2010ae 65#include <i386/pal_routines.h>
b0d623f7
A
66#include <i386/proc_reg.h>
67#include <i386/misc_protos.h>
55e303ae 68#include <pexpert/pexpert.h>
91447636
A
69#include <machine/limits.h>
70#include <machine/commpage.h>
71#include <sys/kdebug.h>
0c530ab8 72#include <i386/tsc.h>
6d2010ae 73#include <i386/rtclock_protos.h>
91447636 74
91447636 75#define UI_CPUFREQ_ROUNDING_FACTOR 10000000
1c79356b 76
0c530ab8 77int rtclock_config(void);
6601e61a 78
0c530ab8 79int rtclock_init(void);
6601e61a 80
b0d623f7
A
81uint64_t tsc_rebase_abs_time = 0;
82
0c530ab8
A
83static void rtc_set_timescale(uint64_t cycles);
84static uint64_t rtc_export_speed(uint64_t cycles);
8f6c56a5 85
060df5ea
A
86void
87rtc_timer_start(void)
88{
89 /*
90 * Force a complete re-evaluation of timer deadlines.
91 */
92 etimer_resync_deadlines();
93}
94
4a3eedf9
A
95/*
96 * tsc_to_nanoseconds:
97 *
98 * Basic routine to convert a raw 64 bit TSC value to a
99 * 64 bit nanosecond value. The conversion is implemented
100 * based on the scale factor and an implicit 32 bit shift.
101 */
102static inline uint64_t
103_tsc_to_nanoseconds(uint64_t value)
104{
b0d623f7 105#if defined(__i386__)
4a3eedf9
A
106 asm volatile("movl %%edx,%%esi ;"
107 "mull %%ecx ;"
108 "movl %%edx,%%edi ;"
109 "movl %%esi,%%eax ;"
110 "mull %%ecx ;"
111 "addl %%edi,%%eax ;"
112 "adcl $0,%%edx "
593a1d5f 113 : "+A" (value)
6d2010ae 114 : "c" (pal_rtc_nanotime_info.scale)
593a1d5f 115 : "esi", "edi");
b0d623f7
A
116#elif defined(__x86_64__)
117 asm volatile("mul %%rcx;"
118 "shrq $32, %%rax;"
119 "shlq $32, %%rdx;"
120 "orq %%rdx, %%rax;"
121 : "=a"(value)
6d2010ae 122 : "a"(value), "c"(pal_rtc_nanotime_info.scale)
b0d623f7
A
123 : "rdx", "cc" );
124#else
125#error Unsupported architecture
126#endif
4a3eedf9
A
127
128 return (value);
129}
130
b0d623f7
A
131static inline uint32_t
132_absolutetime_to_microtime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *microsecs)
133{
134 uint32_t remain;
135#if defined(__i386__)
136 asm volatile(
137 "divl %3"
138 : "=a" (*secs), "=d" (remain)
139 : "A" (abstime), "r" (NSEC_PER_SEC));
140 asm volatile(
141 "divl %3"
142 : "=a" (*microsecs)
143 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
144#elif defined(__x86_64__)
145 *secs = abstime / (uint64_t)NSEC_PER_SEC;
146 remain = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC);
147 *microsecs = remain / NSEC_PER_USEC;
148#else
149#error Unsupported architecture
150#endif
151 return remain;
152}
153
154static inline void
155_absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nanosecs)
156{
157#if defined(__i386__)
158 asm volatile(
159 "divl %3"
160 : "=a" (*secs), "=d" (*nanosecs)
161 : "A" (abstime), "r" (NSEC_PER_SEC));
162#elif defined(__x86_64__)
163 *secs = abstime / (uint64_t)NSEC_PER_SEC;
164 *nanosecs = (clock_usec_t)(abstime % (uint64_t)NSEC_PER_SEC);
165#else
166#error Unsupported architecture
167#endif
168}
169
1c79356b
A
170/*
171 * Configure the real-time clock device. Return success (1)
172 * or failure (0).
173 */
174
175int
0c530ab8 176rtclock_config(void)
1c79356b 177{
0c530ab8 178 /* nothing to do */
91447636
A
179 return (1);
180}
181
182
183/*
184 * Nanotime/mach_absolutime_time
185 * -----------------------------
0c530ab8
A
186 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
187 * efficiently by the kernel and in userspace - is the reference for all timing.
188 * The cpu clock rate is platform-dependent and may stop or be reset when the
189 * processor is napped/slept. As a result, nanotime is the software abstraction
190 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
91447636
A
191 *
192 * The kernel maintains nanotime information recording:
0c530ab8 193 * - the ratio of tsc to nanoseconds
91447636
A
194 * with this ratio expressed as a 32-bit scale and shift
195 * (power of 2 divider);
0c530ab8 196 * - { tsc_base, ns_base } pair of corresponding timestamps.
6601e61a 197 *
0c530ab8
A
198 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
199 * for the userspace nanotime routine to read.
6601e61a 200 *
0c530ab8
A
201 * All of the routines which update the nanotime data are non-reentrant. This must
202 * be guaranteed by the caller.
91447636
A
203 */
204static inline void
6d2010ae 205rtc_nanotime_set_commpage(pal_rtc_nanotime_t *rntp)
91447636 206{
0c530ab8
A
207 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
208}
6601e61a 209
0c530ab8
A
210/*
211 * rtc_nanotime_init:
212 *
213 * Intialize the nanotime info from the base time.
214 */
215static inline void
6d2010ae 216_rtc_nanotime_init(pal_rtc_nanotime_t *rntp, uint64_t base)
0c530ab8
A
217{
218 uint64_t tsc = rdtsc64();
21362eb3 219
6d2010ae 220 _pal_rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
91447636
A
221}
222
223static void
0c530ab8 224rtc_nanotime_init(uint64_t base)
91447636 225{
6d2010ae
A
226 _rtc_nanotime_init(&pal_rtc_nanotime_info, base);
227 rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
91447636
A
228}
229
0c530ab8
A
230/*
231 * rtc_nanotime_init_commpage:
232 *
233 * Call back from the commpage initialization to
234 * cause the commpage data to be filled in once the
235 * commpages have been created.
236 */
237void
238rtc_nanotime_init_commpage(void)
91447636 239{
0c530ab8
A
240 spl_t s = splclock();
241
6d2010ae 242 rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
0c530ab8 243 splx(s);
91447636
A
244}
245
0c530ab8
A
246/*
247 * rtc_nanotime_read:
248 *
249 * Returns the current nanotime value, accessable from any
250 * context.
251 */
2d21ac55 252static inline uint64_t
91447636
A
253rtc_nanotime_read(void)
254{
2d21ac55
A
255
256#if CONFIG_EMBEDDED
257 if (gPEClockFrequencyInfo.timebase_frequency_hz > SLOW_TSC_THRESHOLD)
060df5ea 258 return _rtc_nanotime_read(&rtc_nanotime_info, 1); /* slow processor */
2d21ac55
A
259 else
260#endif
6d2010ae 261 return _rtc_nanotime_read(&pal_rtc_nanotime_info, 0); /* assume fast processor */
91447636
A
262}
263
91447636 264/*
0c530ab8
A
265 * rtc_clock_napped:
266 *
4a3eedf9
A
267 * Invoked from power management when we exit from a low C-State (>= C4)
268 * and the TSC has stopped counting. The nanotime data is updated according
269 * to the provided value which represents the new value for nanotime.
91447636 270 */
0c530ab8 271void
4a3eedf9 272rtc_clock_napped(uint64_t base, uint64_t tsc_base)
0c530ab8 273{
6d2010ae 274 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
4a3eedf9
A
275 uint64_t oldnsecs;
276 uint64_t newnsecs;
277 uint64_t tsc;
2d21ac55
A
278
279 assert(!ml_get_interrupts_enabled());
4a3eedf9
A
280 tsc = rdtsc64();
281 oldnsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
282 newnsecs = base + _tsc_to_nanoseconds(tsc - tsc_base);
283
284 /*
285 * Only update the base values if time using the new base values
286 * is later than the time using the old base values.
287 */
288 if (oldnsecs < newnsecs) {
6d2010ae 289 _pal_rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
4a3eedf9 290 rtc_nanotime_set_commpage(rntp);
6d2010ae 291 trace_set_timebases(tsc_base, base);
4a3eedf9 292 }
0c530ab8
A
293}
294
0b4c1975
A
295/*
296 * Invoked from power management to correct the SFLM TSC entry drift problem:
6d2010ae
A
297 * a small delta is added to the tsc_base. This is equivalent to nudgin time
298 * backwards. We require this to be on the order of a TSC quantum which won't
299 * cause callers of mach_absolute_time() to see time going backwards!
0b4c1975
A
300 */
301void
302rtc_clock_adjust(uint64_t tsc_base_delta)
303{
6d2010ae 304 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
0b4c1975 305
6d2010ae
A
306 assert(!ml_get_interrupts_enabled());
307 assert(tsc_base_delta < 100ULL); /* i.e. it's small */
308 _rtc_nanotime_adjust(tsc_base_delta, rntp);
309 rtc_nanotime_set_commpage(rntp);
0b4c1975
A
310}
311
91447636
A
312void
313rtc_clock_stepping(__unused uint32_t new_frequency,
314 __unused uint32_t old_frequency)
315{
0c530ab8 316 panic("rtc_clock_stepping unsupported");
91447636
A
317}
318
91447636 319void
0c530ab8
A
320rtc_clock_stepped(__unused uint32_t new_frequency,
321 __unused uint32_t old_frequency)
91447636 322{
2d21ac55 323 panic("rtc_clock_stepped unsupported");
1c79356b
A
324}
325
326/*
0c530ab8
A
327 * rtc_sleep_wakeup:
328 *
6d2010ae 329 * Invoked from power management when we have awoken from a sleep (S3)
0c530ab8
A
330 * and the TSC has been reset. The nanotime data is updated based on
331 * the passed in value.
332 *
333 * The caller must guarantee non-reentrancy.
91447636
A
334 */
335void
0c530ab8
A
336rtc_sleep_wakeup(
337 uint64_t base)
91447636 338{
060df5ea
A
339 /* Set fixed configuration for lapic timers */
340 rtc_timer->config();
341
91447636
A
342 /*
343 * Reset nanotime.
344 * The timestamp counter will have been reset
345 * but nanotime (uptime) marches onward.
91447636 346 */
0c530ab8 347 rtc_nanotime_init(base);
91447636
A
348}
349
350/*
351 * Initialize the real-time clock device.
352 * In addition, various variables used to support the clock are initialized.
1c79356b
A
353 */
354int
0c530ab8 355rtclock_init(void)
1c79356b 356{
91447636
A
357 uint64_t cycles;
358
0c530ab8
A
359 assert(!ml_get_interrupts_enabled());
360
91447636 361 if (cpu_number() == master_cpu) {
0c530ab8
A
362
363 assert(tscFreq);
364 rtc_set_timescale(tscFreq);
365
91447636 366 /*
0c530ab8 367 * Adjust and set the exported cpu speed.
91447636 368 */
0c530ab8 369 cycles = rtc_export_speed(tscFreq);
91447636
A
370
371 /*
372 * Set min/max to actual.
373 * ACPI may update these later if speed-stepping is detected.
374 */
0c530ab8
A
375 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
376 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
91447636 377
060df5ea 378 rtc_timer_init();
91447636 379 clock_timebase_init();
0c530ab8 380 ml_init_lock_timeout();
1c79356b 381 }
91447636 382
6d2010ae 383 /* Set fixed configuration for lapic timers */
060df5ea 384 rtc_timer->config();
060df5ea 385 rtc_timer_start();
91447636 386
1c79356b
A
387 return (1);
388}
389
0c530ab8
A
390// utility routine
391// Code to calculate how many processor cycles are in a second...
1c79356b 392
0c530ab8
A
393static void
394rtc_set_timescale(uint64_t cycles)
1c79356b 395{
6d2010ae 396 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
b0d623f7 397 rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles);
2d21ac55 398
6d2010ae 399#if CONFIG_EMBEDDED
2d21ac55 400 if (cycles <= SLOW_TSC_THRESHOLD)
b0d623f7 401 rntp->shift = (uint32_t)cycles;
2d21ac55 402 else
6d2010ae 403#endif
593a1d5f 404 rntp->shift = 32;
1c79356b 405
b0d623f7
A
406 if (tsc_rebase_abs_time == 0)
407 tsc_rebase_abs_time = mach_absolute_time();
408
0c530ab8 409 rtc_nanotime_init(0);
1c79356b
A
410}
411
91447636 412static uint64_t
0c530ab8 413rtc_export_speed(uint64_t cyc_per_sec)
9bccf70c 414{
0c530ab8 415 uint64_t cycles;
1c79356b 416
0c530ab8
A
417 /* Round: */
418 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
91447636
A
419 / UI_CPUFREQ_ROUNDING_FACTOR)
420 * UI_CPUFREQ_ROUNDING_FACTOR;
9bccf70c 421
91447636
A
422 /*
423 * Set current measured speed.
424 */
425 if (cycles >= 0x100000000ULL) {
426 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
55e303ae 427 } else {
91447636 428 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
9bccf70c 429 }
91447636 430 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
55e303ae 431
0c530ab8 432 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
91447636 433 return(cycles);
9bccf70c 434}
1c79356b 435
55e303ae
A
436void
437clock_get_system_microtime(
b0d623f7
A
438 clock_sec_t *secs,
439 clock_usec_t *microsecs)
9bccf70c 440{
0c530ab8 441 uint64_t now = rtc_nanotime_read();
6601e61a 442
b0d623f7 443 _absolutetime_to_microtime(now, secs, microsecs);
1c79356b
A
444}
445
55e303ae
A
446void
447clock_get_system_nanotime(
b0d623f7
A
448 clock_sec_t *secs,
449 clock_nsec_t *nanosecs)
55e303ae 450{
0c530ab8 451 uint64_t now = rtc_nanotime_read();
8f6c56a5 452
b0d623f7 453 _absolutetime_to_nanotime(now, secs, nanosecs);
6601e61a
A
454}
455
456void
0c530ab8
A
457clock_gettimeofday_set_commpage(
458 uint64_t abstime,
459 uint64_t epoch,
460 uint64_t offset,
b0d623f7
A
461 clock_sec_t *secs,
462 clock_usec_t *microsecs)
0c530ab8 463{
b0d623f7 464 uint64_t now = abstime + offset;
0c530ab8 465 uint32_t remain;
6601e61a 466
b0d623f7 467 remain = _absolutetime_to_microtime(now, secs, microsecs);
6601e61a 468
b0d623f7 469 *secs += (clock_sec_t)epoch;
6601e61a 470
2d21ac55 471 commpage_set_timestamp(abstime - remain, *secs);
91447636
A
472}
473
1c79356b
A
474void
475clock_timebase_info(
476 mach_timebase_info_t info)
477{
91447636 478 info->numer = info->denom = 1;
1c79356b
A
479}
480
1c79356b 481/*
91447636 482 * Real-time clock device interrupt.
1c79356b 483 */
1c79356b 484void
0c530ab8
A
485rtclock_intr(
486 x86_saved_state_t *tregs)
1c79356b 487{
0c530ab8
A
488 uint64_t rip;
489 boolean_t user_mode = FALSE;
91447636
A
490
491 assert(get_preemption_level() > 0);
492 assert(!ml_get_interrupts_enabled());
493
0c530ab8
A
494 if (is_saved_state64(tregs) == TRUE) {
495 x86_saved_state64_t *regs;
496
497 regs = saved_state64(tregs);
5d5c5d0d 498
b0d623f7
A
499 if (regs->isf.cs & 0x03)
500 user_mode = TRUE;
0c530ab8
A
501 rip = regs->isf.rip;
502 } else {
503 x86_saved_state32_t *regs;
8ad349bb 504
0c530ab8 505 regs = saved_state32(tregs);
4452a7af 506
0c530ab8
A
507 if (regs->cs & 0x03)
508 user_mode = TRUE;
509 rip = regs->eip;
510 }
89b3af67 511
0c530ab8
A
512 /* call the generic etimer */
513 etimer_intr(user_mode, rip);
5d5c5d0d
A
514}
515
060df5ea 516
0c530ab8
A
517/*
518 * Request timer pop from the hardware
519 */
520
060df5ea 521uint64_t
0c530ab8
A
522setPop(
523 uint64_t time)
5d5c5d0d 524{
6d2010ae
A
525 uint64_t now;
526 uint64_t pop;
060df5ea
A
527
528 /* 0 and EndOfAllTime are special-cases for "clear the timer" */
6d2010ae 529 if (time == 0 || time == EndOfAllTime ) {
060df5ea
A
530 time = EndOfAllTime;
531 now = 0;
6d2010ae 532 pop = rtc_timer->set(0, 0);
060df5ea 533 } else {
6d2010ae
A
534 now = rtc_nanotime_read(); /* The time in nanoseconds */
535 pop = rtc_timer->set(time, now);
060df5ea 536 }
4452a7af 537
6d2010ae 538 /* Record requested and actual deadlines set */
060df5ea 539 x86_lcpu()->rtcDeadline = time;
6d2010ae 540 x86_lcpu()->rtcPop = pop;
4452a7af 541
060df5ea 542 return pop - now;
89b3af67
A
543}
544
6601e61a
A
545uint64_t
546mach_absolute_time(void)
4452a7af 547{
0c530ab8
A
548 return rtc_nanotime_read();
549}
550
551void
552clock_interval_to_absolutetime_interval(
553 uint32_t interval,
554 uint32_t scale_factor,
555 uint64_t *result)
556{
557 *result = (uint64_t)interval * scale_factor;
91447636
A
558}
559
560void
561absolutetime_to_microtime(
562 uint64_t abstime,
b0d623f7
A
563 clock_sec_t *secs,
564 clock_usec_t *microsecs)
91447636 565{
b0d623f7 566 _absolutetime_to_microtime(abstime, secs, microsecs);
1c79356b
A
567}
568
569void
0c530ab8
A
570absolutetime_to_nanotime(
571 uint64_t abstime,
b0d623f7
A
572 clock_sec_t *secs,
573 clock_nsec_t *nanosecs)
6601e61a 574{
b0d623f7 575 _absolutetime_to_nanotime(abstime, secs, nanosecs);
6601e61a
A
576}
577
578void
0c530ab8 579nanotime_to_absolutetime(
b0d623f7
A
580 clock_sec_t secs,
581 clock_nsec_t nanosecs,
0c530ab8 582 uint64_t *result)
1c79356b 583{
0c530ab8 584 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
1c79356b
A
585}
586
587void
588absolutetime_to_nanoseconds(
0b4e3aa0
A
589 uint64_t abstime,
590 uint64_t *result)
1c79356b 591{
0b4e3aa0 592 *result = abstime;
1c79356b
A
593}
594
595void
596nanoseconds_to_absolutetime(
0b4e3aa0
A
597 uint64_t nanoseconds,
598 uint64_t *result)
1c79356b 599{
0b4e3aa0 600 *result = nanoseconds;
1c79356b
A
601}
602
55e303ae 603void
91447636 604machine_delay_until(
55e303ae
A
605 uint64_t deadline)
606{
607 uint64_t now;
608
609 do {
610 cpu_pause();
611 now = mach_absolute_time();
612 } while (now < deadline);
613}