]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/rtclock.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32/*
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
91447636
A
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
1c79356b
A
40 */
41
1c79356b 42#include <platforms.h>
55e303ae
A
43
44#include <mach/mach_types.h>
45
1c79356b 46#include <kern/cpu_data.h>
91447636 47#include <kern/cpu_number.h>
1c79356b 48#include <kern/clock.h>
55e303ae 49#include <kern/host_notify.h>
1c79356b
A
50#include <kern/macro_help.h>
51#include <kern/misc_protos.h>
52#include <kern/spl.h>
91447636 53#include <kern/assert.h>
39236c6e 54#include <kern/timer_queue.h>
1c79356b
A
55#include <mach/vm_prot.h>
56#include <vm/pmap.h>
57#include <vm/vm_kern.h> /* for kernel_map */
0c530ab8 58#include <architecture/i386/pio.h>
55e303ae 59#include <i386/machine_cpu.h>
91447636 60#include <i386/cpuid.h>
91447636 61#include <i386/cpu_threads.h>
b0d623f7 62#include <i386/mp.h>
91447636 63#include <i386/machine_routines.h>
6d2010ae 64#include <i386/pal_routines.h>
b0d623f7
A
65#include <i386/proc_reg.h>
66#include <i386/misc_protos.h>
55e303ae 67#include <pexpert/pexpert.h>
91447636
A
68#include <machine/limits.h>
69#include <machine/commpage.h>
70#include <sys/kdebug.h>
0c530ab8 71#include <i386/tsc.h>
6d2010ae 72#include <i386/rtclock_protos.h>
91447636 73#define UI_CPUFREQ_ROUNDING_FACTOR 10000000
1c79356b 74
0c530ab8 75int rtclock_config(void);
6601e61a 76
0c530ab8 77int rtclock_init(void);
6601e61a 78
b0d623f7
A
79uint64_t tsc_rebase_abs_time = 0;
80
0c530ab8
A
81static void rtc_set_timescale(uint64_t cycles);
82static uint64_t rtc_export_speed(uint64_t cycles);
8f6c56a5 83
060df5ea
A
84void
85rtc_timer_start(void)
86{
87 /*
88 * Force a complete re-evaluation of timer deadlines.
89 */
39236c6e
A
90 x86_lcpu()->rtcDeadline = EndOfAllTime;
91 timer_resync_deadlines();
060df5ea
A
92}
93
b0d623f7
A
94static inline uint32_t
95_absolutetime_to_microtime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *microsecs)
96{
97 uint32_t remain;
b0d623f7
A
98 *secs = abstime / (uint64_t)NSEC_PER_SEC;
99 remain = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC);
100 *microsecs = remain / NSEC_PER_USEC;
b0d623f7
A
101 return remain;
102}
103
104static inline void
105_absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nanosecs)
106{
b0d623f7
A
107 *secs = abstime / (uint64_t)NSEC_PER_SEC;
108 *nanosecs = (clock_usec_t)(abstime % (uint64_t)NSEC_PER_SEC);
b0d623f7
A
109}
110
1c79356b
A
111/*
112 * Configure the real-time clock device. Return success (1)
113 * or failure (0).
114 */
115
116int
0c530ab8 117rtclock_config(void)
1c79356b 118{
0c530ab8 119 /* nothing to do */
91447636
A
120 return (1);
121}
122
123
124/*
125 * Nanotime/mach_absolutime_time
126 * -----------------------------
0c530ab8
A
127 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
128 * efficiently by the kernel and in userspace - is the reference for all timing.
129 * The cpu clock rate is platform-dependent and may stop or be reset when the
130 * processor is napped/slept. As a result, nanotime is the software abstraction
131 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
91447636
A
132 *
133 * The kernel maintains nanotime information recording:
0c530ab8 134 * - the ratio of tsc to nanoseconds
91447636
A
135 * with this ratio expressed as a 32-bit scale and shift
136 * (power of 2 divider);
0c530ab8 137 * - { tsc_base, ns_base } pair of corresponding timestamps.
6601e61a 138 *
0c530ab8
A
139 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
140 * for the userspace nanotime routine to read.
6601e61a 141 *
0c530ab8
A
142 * All of the routines which update the nanotime data are non-reentrant. This must
143 * be guaranteed by the caller.
91447636
A
144 */
145static inline void
6d2010ae 146rtc_nanotime_set_commpage(pal_rtc_nanotime_t *rntp)
91447636 147{
0c530ab8
A
148 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
149}
6601e61a 150
0c530ab8
A
151/*
152 * rtc_nanotime_init:
153 *
154 * Intialize the nanotime info from the base time.
155 */
156static inline void
6d2010ae 157_rtc_nanotime_init(pal_rtc_nanotime_t *rntp, uint64_t base)
0c530ab8
A
158{
159 uint64_t tsc = rdtsc64();
21362eb3 160
6d2010ae 161 _pal_rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
91447636
A
162}
163
164static void
0c530ab8 165rtc_nanotime_init(uint64_t base)
91447636 166{
6d2010ae
A
167 _rtc_nanotime_init(&pal_rtc_nanotime_info, base);
168 rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
91447636
A
169}
170
0c530ab8
A
171/*
172 * rtc_nanotime_init_commpage:
173 *
174 * Call back from the commpage initialization to
175 * cause the commpage data to be filled in once the
176 * commpages have been created.
177 */
178void
179rtc_nanotime_init_commpage(void)
91447636 180{
0c530ab8
A
181 spl_t s = splclock();
182
6d2010ae 183 rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
0c530ab8 184 splx(s);
91447636
A
185}
186
0c530ab8
A
187/*
188 * rtc_nanotime_read:
189 *
190 * Returns the current nanotime value, accessable from any
191 * context.
192 */
2d21ac55 193static inline uint64_t
91447636
A
194rtc_nanotime_read(void)
195{
bd504ef0 196 return _rtc_nanotime_read(&pal_rtc_nanotime_info);
91447636
A
197}
198
91447636 199/*
0c530ab8
A
200 * rtc_clock_napped:
201 *
4a3eedf9
A
202 * Invoked from power management when we exit from a low C-State (>= C4)
203 * and the TSC has stopped counting. The nanotime data is updated according
204 * to the provided value which represents the new value for nanotime.
91447636 205 */
0c530ab8 206void
4a3eedf9 207rtc_clock_napped(uint64_t base, uint64_t tsc_base)
0c530ab8 208{
6d2010ae 209 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
4a3eedf9
A
210 uint64_t oldnsecs;
211 uint64_t newnsecs;
212 uint64_t tsc;
2d21ac55
A
213
214 assert(!ml_get_interrupts_enabled());
4a3eedf9 215 tsc = rdtsc64();
bd504ef0
A
216 oldnsecs = rntp->ns_base + _rtc_tsc_to_nanoseconds(tsc - rntp->tsc_base, rntp);
217 newnsecs = base + _rtc_tsc_to_nanoseconds(tsc - tsc_base, rntp);
4a3eedf9
A
218
219 /*
220 * Only update the base values if time using the new base values
221 * is later than the time using the old base values.
222 */
223 if (oldnsecs < newnsecs) {
6d2010ae 224 _pal_rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
4a3eedf9
A
225 rtc_nanotime_set_commpage(rntp);
226 }
0c530ab8
A
227}
228
0b4c1975
A
229/*
230 * Invoked from power management to correct the SFLM TSC entry drift problem:
6d2010ae
A
231 * a small delta is added to the tsc_base. This is equivalent to nudgin time
232 * backwards. We require this to be on the order of a TSC quantum which won't
233 * cause callers of mach_absolute_time() to see time going backwards!
0b4c1975
A
234 */
235void
236rtc_clock_adjust(uint64_t tsc_base_delta)
237{
6d2010ae 238 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
0b4c1975 239
6d2010ae
A
240 assert(!ml_get_interrupts_enabled());
241 assert(tsc_base_delta < 100ULL); /* i.e. it's small */
242 _rtc_nanotime_adjust(tsc_base_delta, rntp);
243 rtc_nanotime_set_commpage(rntp);
0b4c1975
A
244}
245
91447636
A
246void
247rtc_clock_stepping(__unused uint32_t new_frequency,
248 __unused uint32_t old_frequency)
249{
0c530ab8 250 panic("rtc_clock_stepping unsupported");
91447636
A
251}
252
91447636 253void
0c530ab8
A
254rtc_clock_stepped(__unused uint32_t new_frequency,
255 __unused uint32_t old_frequency)
91447636 256{
2d21ac55 257 panic("rtc_clock_stepped unsupported");
1c79356b
A
258}
259
260/*
0c530ab8
A
261 * rtc_sleep_wakeup:
262 *
6d2010ae 263 * Invoked from power management when we have awoken from a sleep (S3)
bd504ef0
A
264 * and the TSC has been reset, or from Deep Idle (S0) sleep when the TSC
265 * has progressed. The nanotime data is updated based on the passed-in value.
0c530ab8
A
266 *
267 * The caller must guarantee non-reentrancy.
91447636
A
268 */
269void
0c530ab8
A
270rtc_sleep_wakeup(
271 uint64_t base)
91447636 272{
060df5ea
A
273 /* Set fixed configuration for lapic timers */
274 rtc_timer->config();
275
91447636
A
276 /*
277 * Reset nanotime.
278 * The timestamp counter will have been reset
279 * but nanotime (uptime) marches onward.
91447636 280 */
0c530ab8 281 rtc_nanotime_init(base);
91447636
A
282}
283
284/*
285 * Initialize the real-time clock device.
286 * In addition, various variables used to support the clock are initialized.
1c79356b
A
287 */
288int
0c530ab8 289rtclock_init(void)
1c79356b 290{
91447636
A
291 uint64_t cycles;
292
0c530ab8
A
293 assert(!ml_get_interrupts_enabled());
294
91447636 295 if (cpu_number() == master_cpu) {
0c530ab8
A
296
297 assert(tscFreq);
298 rtc_set_timescale(tscFreq);
299
91447636 300 /*
0c530ab8 301 * Adjust and set the exported cpu speed.
91447636 302 */
0c530ab8 303 cycles = rtc_export_speed(tscFreq);
91447636
A
304
305 /*
306 * Set min/max to actual.
307 * ACPI may update these later if speed-stepping is detected.
308 */
0c530ab8
A
309 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
310 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
91447636 311
060df5ea 312 rtc_timer_init();
91447636 313 clock_timebase_init();
0c530ab8 314 ml_init_lock_timeout();
bd504ef0 315 ml_init_delay_spin_threshold(10);
1c79356b 316 }
91447636 317
6d2010ae 318 /* Set fixed configuration for lapic timers */
060df5ea 319 rtc_timer->config();
060df5ea 320 rtc_timer_start();
91447636 321
1c79356b
A
322 return (1);
323}
324
0c530ab8
A
325// utility routine
326// Code to calculate how many processor cycles are in a second...
1c79356b 327
0c530ab8
A
328static void
329rtc_set_timescale(uint64_t cycles)
1c79356b 330{
6d2010ae 331 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
bd504ef0
A
332 uint32_t shift = 0;
333
334 /* the "scale" factor will overflow unless cycles>SLOW_TSC_THRESHOLD */
335
336 while ( cycles <= SLOW_TSC_THRESHOLD) {
337 shift++;
338 cycles <<= 1;
339 }
340
341 if ( shift != 0 )
342 printf("Slow TSC, rtc_nanotime.shift == %d\n", shift);
343
b0d623f7 344 rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles);
2d21ac55 345
bd504ef0 346 rntp->shift = shift;
1c79356b 347
15129b1c
A
348 /*
349 * On some platforms, the TSC is not reset at warm boot. But the
350 * rebase time must be relative to the current boot so we can't use
351 * mach_absolute_time(). Instead, we convert the TSC delta since boot
352 * to nanoseconds.
353 */
b0d623f7 354 if (tsc_rebase_abs_time == 0)
15129b1c
A
355 tsc_rebase_abs_time = _rtc_tsc_to_nanoseconds(
356 rdtsc64() - tsc_at_boot, rntp);
b0d623f7 357
0c530ab8 358 rtc_nanotime_init(0);
1c79356b
A
359}
360
91447636 361static uint64_t
0c530ab8 362rtc_export_speed(uint64_t cyc_per_sec)
9bccf70c 363{
0c530ab8 364 uint64_t cycles;
1c79356b 365
0c530ab8
A
366 /* Round: */
367 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
91447636
A
368 / UI_CPUFREQ_ROUNDING_FACTOR)
369 * UI_CPUFREQ_ROUNDING_FACTOR;
9bccf70c 370
91447636
A
371 /*
372 * Set current measured speed.
373 */
374 if (cycles >= 0x100000000ULL) {
375 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
55e303ae 376 } else {
91447636 377 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
9bccf70c 378 }
91447636 379 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
55e303ae 380
0c530ab8 381 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
91447636 382 return(cycles);
9bccf70c 383}
1c79356b 384
55e303ae
A
385void
386clock_get_system_microtime(
b0d623f7
A
387 clock_sec_t *secs,
388 clock_usec_t *microsecs)
9bccf70c 389{
0c530ab8 390 uint64_t now = rtc_nanotime_read();
6601e61a 391
b0d623f7 392 _absolutetime_to_microtime(now, secs, microsecs);
1c79356b
A
393}
394
55e303ae
A
395void
396clock_get_system_nanotime(
b0d623f7
A
397 clock_sec_t *secs,
398 clock_nsec_t *nanosecs)
55e303ae 399{
0c530ab8 400 uint64_t now = rtc_nanotime_read();
8f6c56a5 401
b0d623f7 402 _absolutetime_to_nanotime(now, secs, nanosecs);
6601e61a
A
403}
404
405void
0c530ab8
A
406clock_gettimeofday_set_commpage(
407 uint64_t abstime,
408 uint64_t epoch,
409 uint64_t offset,
b0d623f7
A
410 clock_sec_t *secs,
411 clock_usec_t *microsecs)
0c530ab8 412{
b0d623f7 413 uint64_t now = abstime + offset;
0c530ab8 414 uint32_t remain;
6601e61a 415
b0d623f7 416 remain = _absolutetime_to_microtime(now, secs, microsecs);
6601e61a 417
b0d623f7 418 *secs += (clock_sec_t)epoch;
6601e61a 419
2d21ac55 420 commpage_set_timestamp(abstime - remain, *secs);
91447636
A
421}
422
1c79356b
A
423void
424clock_timebase_info(
425 mach_timebase_info_t info)
426{
91447636 427 info->numer = info->denom = 1;
1c79356b
A
428}
429
1c79356b 430/*
91447636 431 * Real-time clock device interrupt.
1c79356b 432 */
1c79356b 433void
0c530ab8
A
434rtclock_intr(
435 x86_saved_state_t *tregs)
1c79356b 436{
0c530ab8
A
437 uint64_t rip;
438 boolean_t user_mode = FALSE;
91447636
A
439
440 assert(get_preemption_level() > 0);
441 assert(!ml_get_interrupts_enabled());
442
0c530ab8
A
443 if (is_saved_state64(tregs) == TRUE) {
444 x86_saved_state64_t *regs;
445
446 regs = saved_state64(tregs);
5d5c5d0d 447
b0d623f7
A
448 if (regs->isf.cs & 0x03)
449 user_mode = TRUE;
0c530ab8
A
450 rip = regs->isf.rip;
451 } else {
452 x86_saved_state32_t *regs;
8ad349bb 453
0c530ab8 454 regs = saved_state32(tregs);
4452a7af 455
0c530ab8
A
456 if (regs->cs & 0x03)
457 user_mode = TRUE;
458 rip = regs->eip;
459 }
89b3af67 460
0c530ab8 461 /* call the generic etimer */
39236c6e 462 timer_intr(user_mode, rip);
5d5c5d0d
A
463}
464
060df5ea 465
0c530ab8
A
466/*
467 * Request timer pop from the hardware
468 */
469
060df5ea 470uint64_t
0c530ab8
A
471setPop(
472 uint64_t time)
5d5c5d0d 473{
6d2010ae
A
474 uint64_t now;
475 uint64_t pop;
060df5ea
A
476
477 /* 0 and EndOfAllTime are special-cases for "clear the timer" */
6d2010ae 478 if (time == 0 || time == EndOfAllTime ) {
060df5ea
A
479 time = EndOfAllTime;
480 now = 0;
6d2010ae 481 pop = rtc_timer->set(0, 0);
060df5ea 482 } else {
6d2010ae
A
483 now = rtc_nanotime_read(); /* The time in nanoseconds */
484 pop = rtc_timer->set(time, now);
060df5ea 485 }
4452a7af 486
6d2010ae 487 /* Record requested and actual deadlines set */
060df5ea 488 x86_lcpu()->rtcDeadline = time;
6d2010ae 489 x86_lcpu()->rtcPop = pop;
4452a7af 490
060df5ea 491 return pop - now;
89b3af67
A
492}
493
6601e61a
A
494uint64_t
495mach_absolute_time(void)
4452a7af 496{
0c530ab8
A
497 return rtc_nanotime_read();
498}
499
500void
501clock_interval_to_absolutetime_interval(
502 uint32_t interval,
503 uint32_t scale_factor,
504 uint64_t *result)
505{
506 *result = (uint64_t)interval * scale_factor;
91447636
A
507}
508
509void
510absolutetime_to_microtime(
511 uint64_t abstime,
b0d623f7
A
512 clock_sec_t *secs,
513 clock_usec_t *microsecs)
91447636 514{
b0d623f7 515 _absolutetime_to_microtime(abstime, secs, microsecs);
1c79356b
A
516}
517
518void
0c530ab8
A
519absolutetime_to_nanotime(
520 uint64_t abstime,
b0d623f7
A
521 clock_sec_t *secs,
522 clock_nsec_t *nanosecs)
6601e61a 523{
b0d623f7 524 _absolutetime_to_nanotime(abstime, secs, nanosecs);
6601e61a
A
525}
526
527void
0c530ab8 528nanotime_to_absolutetime(
b0d623f7
A
529 clock_sec_t secs,
530 clock_nsec_t nanosecs,
0c530ab8 531 uint64_t *result)
1c79356b 532{
0c530ab8 533 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
1c79356b
A
534}
535
536void
537absolutetime_to_nanoseconds(
0b4e3aa0
A
538 uint64_t abstime,
539 uint64_t *result)
1c79356b 540{
0b4e3aa0 541 *result = abstime;
1c79356b
A
542}
543
544void
545nanoseconds_to_absolutetime(
0b4e3aa0
A
546 uint64_t nanoseconds,
547 uint64_t *result)
1c79356b 548{
0b4e3aa0 549 *result = nanoseconds;
1c79356b
A
550}
551
55e303ae 552void
91447636 553machine_delay_until(
39236c6e
A
554 uint64_t interval,
555 uint64_t deadline)
55e303ae 556{
39236c6e
A
557 (void)interval;
558 while (mach_absolute_time() < deadline) {
559 cpu_pause();
560 }
55e303ae 561}