]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/rtclock.c
982c160f4a2749674dce28ede90c26fa0079d72f
[apple/xnu.git] / osfmk / i386 / rtclock.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 /*
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
40 */
41
42 #include <platforms.h>
43 #include <mach_kdb.h>
44
45 #include <mach/mach_types.h>
46
47 #include <kern/cpu_data.h>
48 #include <kern/cpu_number.h>
49 #include <kern/clock.h>
50 #include <kern/host_notify.h>
51 #include <kern/macro_help.h>
52 #include <kern/misc_protos.h>
53 #include <kern/spl.h>
54 #include <kern/assert.h>
55 #include <mach/vm_prot.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_kern.h> /* for kernel_map */
58 #include <i386/ipl.h>
59 #include <architecture/i386/pio.h>
60 #include <i386/misc_protos.h>
61 #include <i386/proc_reg.h>
62 #include <i386/machine_cpu.h>
63 #include <i386/lapic.h>
64 #include <i386/cpuid.h>
65 #include <i386/cpu_data.h>
66 #include <i386/cpu_threads.h>
67 #include <i386/perfmon.h>
68 #include <i386/machine_routines.h>
69 #include <pexpert/pexpert.h>
70 #include <machine/limits.h>
71 #include <machine/commpage.h>
72 #include <sys/kdebug.h>
73 #include <i386/tsc.h>
74 #include <i386/rtclock.h>
75
76 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
77
78 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
79
80 int rtclock_config(void);
81
82 int rtclock_init(void);
83
84 uint64_t rtc_decrementer_min;
85
86 void rtclock_intr(x86_saved_state_t *regs);
87 static uint64_t maxDec; /* longest interval our hardware timer can handle (nsec) */
88
89 /* XXX this should really be in a header somewhere */
90 extern clock_timer_func_t rtclock_timer_expire;
91
92 static void rtc_set_timescale(uint64_t cycles);
93 static uint64_t rtc_export_speed(uint64_t cycles);
94
95 rtc_nanotime_t rtc_nanotime_info = {0,0,0,0,1,0};
96
97 /*
98 * tsc_to_nanoseconds:
99 *
100 * Basic routine to convert a raw 64 bit TSC value to a
101 * 64 bit nanosecond value. The conversion is implemented
102 * based on the scale factor and an implicit 32 bit shift.
103 */
104 static inline uint64_t
105 _tsc_to_nanoseconds(uint64_t value)
106 {
107 asm volatile("movl %%edx,%%esi ;"
108 "mull %%ecx ;"
109 "movl %%edx,%%edi ;"
110 "movl %%esi,%%eax ;"
111 "mull %%ecx ;"
112 "addl %%edi,%%eax ;"
113 "adcl $0,%%edx "
114 : "+A" (value)
115 : "c" (current_cpu_datap()->cpu_nanotime->scale)
116 : "esi", "edi");
117
118 return (value);
119 }
120
121 static uint32_t
122 deadline_to_decrementer(
123 uint64_t deadline,
124 uint64_t now)
125 {
126 uint64_t delta;
127
128 if (deadline <= now)
129 return rtc_decrementer_min;
130 else {
131 delta = deadline - now;
132 return MIN(MAX(rtc_decrementer_min,delta),maxDec);
133 }
134 }
135
136 void
137 rtc_lapic_start_ticking(void)
138 {
139 x86_lcpu_t *lcpu = x86_lcpu();
140
141 /*
142 * Force a complete re-evaluation of timer deadlines.
143 */
144 lcpu->rtcPop = EndOfAllTime;
145 etimer_resync_deadlines();
146 }
147
148 /*
149 * Configure the real-time clock device. Return success (1)
150 * or failure (0).
151 */
152
153 int
154 rtclock_config(void)
155 {
156 /* nothing to do */
157 return (1);
158 }
159
160
161 /*
162 * Nanotime/mach_absolutime_time
163 * -----------------------------
164 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
165 * efficiently by the kernel and in userspace - is the reference for all timing.
166 * The cpu clock rate is platform-dependent and may stop or be reset when the
167 * processor is napped/slept. As a result, nanotime is the software abstraction
168 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
169 *
170 * The kernel maintains nanotime information recording:
171 * - the ratio of tsc to nanoseconds
172 * with this ratio expressed as a 32-bit scale and shift
173 * (power of 2 divider);
174 * - { tsc_base, ns_base } pair of corresponding timestamps.
175 *
176 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
177 * for the userspace nanotime routine to read.
178 *
179 * All of the routines which update the nanotime data are non-reentrant. This must
180 * be guaranteed by the caller.
181 */
182 static inline void
183 rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
184 {
185 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
186 }
187
188 /*
189 * rtc_nanotime_init:
190 *
191 * Intialize the nanotime info from the base time.
192 */
193 static inline void
194 _rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base)
195 {
196 uint64_t tsc = rdtsc64();
197
198 _rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
199 }
200
201 static void
202 rtc_nanotime_init(uint64_t base)
203 {
204 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
205
206 _rtc_nanotime_init(rntp, base);
207 rtc_nanotime_set_commpage(rntp);
208 }
209
210 /*
211 * rtc_nanotime_init_commpage:
212 *
213 * Call back from the commpage initialization to
214 * cause the commpage data to be filled in once the
215 * commpages have been created.
216 */
217 void
218 rtc_nanotime_init_commpage(void)
219 {
220 spl_t s = splclock();
221
222 rtc_nanotime_set_commpage(current_cpu_datap()->cpu_nanotime);
223
224 splx(s);
225 }
226
227 /*
228 * rtc_nanotime_read:
229 *
230 * Returns the current nanotime value, accessable from any
231 * context.
232 */
233 static inline uint64_t
234 rtc_nanotime_read(void)
235 {
236
237 #if CONFIG_EMBEDDED
238 if (gPEClockFrequencyInfo.timebase_frequency_hz > SLOW_TSC_THRESHOLD)
239 return _rtc_nanotime_read(current_cpu_datap()->cpu_nanotime, 1); /* slow processor */
240 else
241 #endif
242 return _rtc_nanotime_read(current_cpu_datap()->cpu_nanotime, 0); /* assume fast processor */
243 }
244
245 /*
246 * rtc_clock_napped:
247 *
248 * Invoked from power management when we exit from a low C-State (>= C4)
249 * and the TSC has stopped counting. The nanotime data is updated according
250 * to the provided value which represents the new value for nanotime.
251 */
252 void
253 rtc_clock_napped(uint64_t base, uint64_t tsc_base)
254 {
255 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
256 uint64_t oldnsecs;
257 uint64_t newnsecs;
258 uint64_t tsc;
259
260 assert(!ml_get_interrupts_enabled());
261 tsc = rdtsc64();
262 oldnsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
263 newnsecs = base + _tsc_to_nanoseconds(tsc - tsc_base);
264
265 /*
266 * Only update the base values if time using the new base values
267 * is later than the time using the old base values.
268 */
269 if (oldnsecs < newnsecs) {
270 _rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
271 rtc_nanotime_set_commpage(rntp);
272 }
273 }
274
275 void
276 rtc_clock_stepping(__unused uint32_t new_frequency,
277 __unused uint32_t old_frequency)
278 {
279 panic("rtc_clock_stepping unsupported");
280 }
281
282 void
283 rtc_clock_stepped(__unused uint32_t new_frequency,
284 __unused uint32_t old_frequency)
285 {
286 panic("rtc_clock_stepped unsupported");
287 }
288
289 /*
290 * rtc_sleep_wakeup:
291 *
292 * Invoked from power manageent when we have awoken from a sleep (S3)
293 * and the TSC has been reset. The nanotime data is updated based on
294 * the passed in value.
295 *
296 * The caller must guarantee non-reentrancy.
297 */
298 void
299 rtc_sleep_wakeup(
300 uint64_t base)
301 {
302 /*
303 * Reset nanotime.
304 * The timestamp counter will have been reset
305 * but nanotime (uptime) marches onward.
306 */
307 rtc_nanotime_init(base);
308 }
309
310 /*
311 * Initialize the real-time clock device.
312 * In addition, various variables used to support the clock are initialized.
313 */
314 int
315 rtclock_init(void)
316 {
317 uint64_t cycles;
318
319 assert(!ml_get_interrupts_enabled());
320
321 if (cpu_number() == master_cpu) {
322
323 assert(tscFreq);
324 rtc_set_timescale(tscFreq);
325
326 /*
327 * Adjust and set the exported cpu speed.
328 */
329 cycles = rtc_export_speed(tscFreq);
330
331 /*
332 * Set min/max to actual.
333 * ACPI may update these later if speed-stepping is detected.
334 */
335 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
336 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
337
338 /*
339 * Compute the longest interval we can represent.
340 */
341 maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n);
342 kprintf("maxDec: %lld\n", maxDec);
343
344 /* Minimum interval is 1usec */
345 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL);
346 /* Point LAPIC interrupts to hardclock() */
347 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
348
349 clock_timebase_init();
350 ml_init_lock_timeout();
351 }
352
353 rtc_lapic_start_ticking();
354
355 return (1);
356 }
357
358 // utility routine
359 // Code to calculate how many processor cycles are in a second...
360
361 static void
362 rtc_set_timescale(uint64_t cycles)
363 {
364 rtc_nanotime_t *rntp = current_cpu_datap()->cpu_nanotime;
365 rntp->scale = ((uint64_t)NSEC_PER_SEC << 32) / cycles;
366
367 if (cycles <= SLOW_TSC_THRESHOLD)
368 rntp->shift = cycles;
369 else
370 rntp->shift = 32;
371
372 rtc_nanotime_init(0);
373 }
374
375 static uint64_t
376 rtc_export_speed(uint64_t cyc_per_sec)
377 {
378 uint64_t cycles;
379
380 /* Round: */
381 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
382 / UI_CPUFREQ_ROUNDING_FACTOR)
383 * UI_CPUFREQ_ROUNDING_FACTOR;
384
385 /*
386 * Set current measured speed.
387 */
388 if (cycles >= 0x100000000ULL) {
389 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
390 } else {
391 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
392 }
393 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
394
395 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
396 return(cycles);
397 }
398
399 void
400 clock_get_system_microtime(
401 uint32_t *secs,
402 uint32_t *microsecs)
403 {
404 uint64_t now = rtc_nanotime_read();
405 uint32_t remain;
406
407 asm volatile(
408 "divl %3"
409 : "=a" (*secs), "=d" (remain)
410 : "A" (now), "r" (NSEC_PER_SEC));
411 asm volatile(
412 "divl %3"
413 : "=a" (*microsecs)
414 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
415 }
416
417 void
418 clock_get_system_nanotime(
419 uint32_t *secs,
420 uint32_t *nanosecs)
421 {
422 uint64_t now = rtc_nanotime_read();
423
424 asm volatile(
425 "divl %3"
426 : "=a" (*secs), "=d" (*nanosecs)
427 : "A" (now), "r" (NSEC_PER_SEC));
428 }
429
430 void
431 clock_gettimeofday_set_commpage(
432 uint64_t abstime,
433 uint64_t epoch,
434 uint64_t offset,
435 uint32_t *secs,
436 uint32_t *microsecs)
437 {
438 uint64_t now = abstime;
439 uint32_t remain;
440
441 now += offset;
442
443 asm volatile(
444 "divl %3"
445 : "=a" (*secs), "=d" (remain)
446 : "A" (now), "r" (NSEC_PER_SEC));
447 asm volatile(
448 "divl %3"
449 : "=a" (*microsecs)
450 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
451
452 *secs += epoch;
453
454 commpage_set_timestamp(abstime - remain, *secs);
455 }
456
457 void
458 clock_timebase_info(
459 mach_timebase_info_t info)
460 {
461 info->numer = info->denom = 1;
462 }
463
464 void
465 clock_set_timer_func(
466 clock_timer_func_t func)
467 {
468 if (rtclock_timer_expire == NULL)
469 rtclock_timer_expire = func;
470 }
471
472 /*
473 * Real-time clock device interrupt.
474 */
475 void
476 rtclock_intr(
477 x86_saved_state_t *tregs)
478 {
479 uint64_t rip;
480 boolean_t user_mode = FALSE;
481 uint64_t abstime;
482 uint32_t latency;
483 x86_lcpu_t *lcpu = x86_lcpu();
484
485 assert(get_preemption_level() > 0);
486 assert(!ml_get_interrupts_enabled());
487
488 abstime = rtc_nanotime_read();
489 latency = (uint32_t)(abstime - lcpu->rtcDeadline);
490 if (abstime < lcpu->rtcDeadline)
491 latency = 1;
492
493 if (is_saved_state64(tregs) == TRUE) {
494 x86_saved_state64_t *regs;
495
496 regs = saved_state64(tregs);
497
498 user_mode = TRUE;
499 rip = regs->isf.rip;
500 } else {
501 x86_saved_state32_t *regs;
502
503 regs = saved_state32(tregs);
504
505 if (regs->cs & 0x03)
506 user_mode = TRUE;
507 rip = regs->eip;
508 }
509
510 /* Log the interrupt service latency (-ve value expected by tool) */
511 KERNEL_DEBUG_CONSTANT(
512 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
513 -latency, (uint32_t)rip, user_mode, 0, 0);
514
515 /* call the generic etimer */
516 etimer_intr(user_mode, rip);
517 }
518
519 /*
520 * Request timer pop from the hardware
521 */
522
523 int
524 setPop(
525 uint64_t time)
526 {
527 uint64_t now;
528 uint32_t decr;
529 uint64_t count;
530
531 now = rtc_nanotime_read(); /* The time in nanoseconds */
532 decr = deadline_to_decrementer(time, now);
533
534 count = tmrCvt(decr, busFCvtn2t);
535 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
536
537 return decr; /* Pass back what we set */
538 }
539
540
541 uint64_t
542 mach_absolute_time(void)
543 {
544 return rtc_nanotime_read();
545 }
546
547 void
548 clock_interval_to_absolutetime_interval(
549 uint32_t interval,
550 uint32_t scale_factor,
551 uint64_t *result)
552 {
553 *result = (uint64_t)interval * scale_factor;
554 }
555
556 void
557 absolutetime_to_microtime(
558 uint64_t abstime,
559 uint32_t *secs,
560 uint32_t *microsecs)
561 {
562 uint32_t remain;
563
564 asm volatile(
565 "divl %3"
566 : "=a" (*secs), "=d" (remain)
567 : "A" (abstime), "r" (NSEC_PER_SEC));
568 asm volatile(
569 "divl %3"
570 : "=a" (*microsecs)
571 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
572 }
573
574 void
575 absolutetime_to_nanotime(
576 uint64_t abstime,
577 uint32_t *secs,
578 uint32_t *nanosecs)
579 {
580 asm volatile(
581 "divl %3"
582 : "=a" (*secs), "=d" (*nanosecs)
583 : "A" (abstime), "r" (NSEC_PER_SEC));
584 }
585
586 void
587 nanotime_to_absolutetime(
588 uint32_t secs,
589 uint32_t nanosecs,
590 uint64_t *result)
591 {
592 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
593 }
594
595 void
596 absolutetime_to_nanoseconds(
597 uint64_t abstime,
598 uint64_t *result)
599 {
600 *result = abstime;
601 }
602
603 void
604 nanoseconds_to_absolutetime(
605 uint64_t nanoseconds,
606 uint64_t *result)
607 {
608 *result = nanoseconds;
609 }
610
611 void
612 machine_delay_until(
613 uint64_t deadline)
614 {
615 uint64_t now;
616
617 do {
618 cpu_pause();
619 now = mach_absolute_time();
620 } while (now < deadline);
621 }