]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/rtclock.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / i386 / rtclock.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 /*
27 * File: i386/rtclock.c
28 * Purpose: Routines for handling the machine dependent
29 * real-time clock. Historically, this clock is
30 * generated by the Intel 8254 Programmable Interval
31 * Timer, but local apic timers are now used for
32 * this purpose with the master time reference being
33 * the cpu clock counted by the timestamp MSR.
34 */
35
36 #include <platforms.h>
37 #include <mach_kdb.h>
38
39 #include <mach/mach_types.h>
40
41 #include <kern/cpu_data.h>
42 #include <kern/cpu_number.h>
43 #include <kern/clock.h>
44 #include <kern/host_notify.h>
45 #include <kern/macro_help.h>
46 #include <kern/misc_protos.h>
47 #include <kern/spl.h>
48 #include <kern/assert.h>
49 #include <mach/vm_prot.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_kern.h> /* for kernel_map */
52 #include <i386/ipl.h>
53 #include <i386/pit.h>
54 #include <architecture/i386/pio.h>
55 #include <i386/misc_protos.h>
56 #include <i386/proc_reg.h>
57 #include <i386/machine_cpu.h>
58 #include <i386/mp.h>
59 #include <i386/cpuid.h>
60 #include <i386/cpu_data.h>
61 #include <i386/cpu_threads.h>
62 #include <i386/perfmon.h>
63 #include <i386/machine_routines.h>
64 #include <pexpert/pexpert.h>
65 #include <machine/limits.h>
66 #include <machine/commpage.h>
67 #include <sys/kdebug.h>
68 #include <i386/tsc.h>
69 #include <i386/hpet.h>
70 #include <i386/rtclock.h>
71
72 #define MAX(a,b) (((a)>(b))?(a):(b))
73 #define MIN(a,b) (((a)>(b))?(b):(a))
74
75 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) /* nsec per tick */
76
77 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
78
79 int rtclock_config(void);
80
81 int rtclock_init(void);
82
83 uint64_t rtc_decrementer_min;
84
85 void rtclock_intr(x86_saved_state_t *regs);
86 static uint64_t maxDec; /* longest interval our hardware timer can handle (nsec) */
87
88 /* XXX this should really be in a header somewhere */
89 extern clock_timer_func_t rtclock_timer_expire;
90
91 static void rtc_set_timescale(uint64_t cycles);
92 static uint64_t rtc_export_speed(uint64_t cycles);
93
94 extern void rtc_nanotime_store(
95 uint64_t tsc,
96 uint64_t nsec,
97 uint32_t scale,
98 uint32_t shift,
99 rtc_nanotime_t *dst);
100
101 extern void rtc_nanotime_load(
102 rtc_nanotime_t *src,
103 rtc_nanotime_t *dst);
104
105 rtc_nanotime_t rtc_nanotime_info;
106
107 /*
108 * tsc_to_nanoseconds:
109 *
110 * Basic routine to convert a raw 64 bit TSC value to a
111 * 64 bit nanosecond value. The conversion is implemented
112 * based on the scale factor and an implicit 32 bit shift.
113 */
114 static inline uint64_t
115 _tsc_to_nanoseconds(uint64_t value)
116 {
117 asm volatile("movl %%edx,%%esi ;"
118 "mull %%ecx ;"
119 "movl %%edx,%%edi ;"
120 "movl %%esi,%%eax ;"
121 "mull %%ecx ;"
122 "addl %%edi,%%eax ;"
123 "adcl $0,%%edx "
124 : "+A" (value) : "c" (rtc_nanotime_info.scale) : "esi", "edi");
125
126 return (value);
127 }
128
129 uint64_t
130 tsc_to_nanoseconds(uint64_t value)
131 {
132 return _tsc_to_nanoseconds(value);
133 }
134
135 static uint32_t
136 deadline_to_decrementer(
137 uint64_t deadline,
138 uint64_t now)
139 {
140 uint64_t delta;
141
142 if (deadline <= now)
143 return rtc_decrementer_min;
144 else {
145 delta = deadline - now;
146 return MIN(MAX(rtc_decrementer_min,delta),maxDec);
147 }
148 }
149
150 void
151 rtc_lapic_start_ticking(void)
152 {
153 uint64_t abstime;
154 uint64_t first_tick;
155 cpu_data_t *cdp = current_cpu_datap();
156
157 abstime = mach_absolute_time();
158 rtclock_tick_interval = NSEC_PER_HZ;
159
160 first_tick = abstime + rtclock_tick_interval;
161 cdp->rtclock_intr_deadline = first_tick;
162
163 /*
164 * Force a complete re-evaluation of timer deadlines.
165 */
166 cdp->rtcPop = EndOfAllTime;
167 etimer_resync_deadlines();
168 }
169
170 /*
171 * Configure the real-time clock device. Return success (1)
172 * or failure (0).
173 */
174
175 int
176 rtclock_config(void)
177 {
178 /* nothing to do */
179 return (1);
180 }
181
182
183 /*
184 * Nanotime/mach_absolutime_time
185 * -----------------------------
186 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
187 * efficiently by the kernel and in userspace - is the reference for all timing.
188 * The cpu clock rate is platform-dependent and may stop or be reset when the
189 * processor is napped/slept. As a result, nanotime is the software abstraction
190 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
191 *
192 * The kernel maintains nanotime information recording:
193 * - the ratio of tsc to nanoseconds
194 * with this ratio expressed as a 32-bit scale and shift
195 * (power of 2 divider);
196 * - { tsc_base, ns_base } pair of corresponding timestamps.
197 *
198 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
199 * for the userspace nanotime routine to read.
200 *
201 * All of the routines which update the nanotime data are non-reentrant. This must
202 * be guaranteed by the caller.
203 */
204 static inline void
205 rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
206 {
207 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
208 }
209
210 /*
211 * rtc_nanotime_init:
212 *
213 * Intialize the nanotime info from the base time.
214 */
215 static inline void
216 _rtc_nanotime_init(rtc_nanotime_t *rntp, uint64_t base)
217 {
218 uint64_t tsc = rdtsc64();
219
220 rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
221 }
222
223 static void
224 rtc_nanotime_init(uint64_t base)
225 {
226 rtc_nanotime_t *rntp = &rtc_nanotime_info;
227
228 _rtc_nanotime_init(rntp, base);
229 rtc_nanotime_set_commpage(rntp);
230 }
231
232 /*
233 * rtc_nanotime_init_commpage:
234 *
235 * Call back from the commpage initialization to
236 * cause the commpage data to be filled in once the
237 * commpages have been created.
238 */
239 void
240 rtc_nanotime_init_commpage(void)
241 {
242 spl_t s = splclock();
243
244 rtc_nanotime_set_commpage(&rtc_nanotime_info);
245
246 splx(s);
247 }
248
249 /*
250 * rtc_nanotime_update:
251 *
252 * Update the nanotime info from the base time. Since
253 * the base value might be from a lower resolution clock,
254 * we compare it to the TSC derived value, and use the
255 * greater of the two values.
256 *
257 * N.B. In comparison to the above init routine, this assumes
258 * that the TSC has remained monotonic compared to the tsc_base
259 * value, which is not the case after S3 sleep.
260 */
261 static inline void
262 _rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t base)
263 {
264 uint64_t nsecs, tsc = rdtsc64();
265
266 nsecs = rntp->ns_base + _tsc_to_nanoseconds(tsc - rntp->tsc_base);
267 rtc_nanotime_store(tsc, MAX(nsecs, base), rntp->scale, rntp->shift, rntp);
268 }
269
270 static void
271 rtc_nanotime_update(
272 uint64_t base)
273 {
274 rtc_nanotime_t *rntp = &rtc_nanotime_info;
275
276 assert(!ml_get_interrupts_enabled());
277
278 _rtc_nanotime_update(rntp, base);
279 rtc_nanotime_set_commpage(rntp);
280 }
281
282 /*
283 * rtc_nanotime_read:
284 *
285 * Returns the current nanotime value, accessable from any
286 * context.
287 */
288 static uint64_t
289 rtc_nanotime_read(void)
290 {
291 rtc_nanotime_t rnt, *rntp = &rtc_nanotime_info;
292 uint64_t result;
293
294 do {
295 rtc_nanotime_load(rntp, &rnt);
296 result = rnt.ns_base + _tsc_to_nanoseconds(rdtsc64() - rnt.tsc_base);
297 } while (rntp->tsc_base != rnt.tsc_base);
298
299 return (result);
300 }
301
302 /*
303 * rtc_clock_napped:
304 *
305 * Invoked from power manangement when we have awoken from a nap (C3/C4)
306 * during which the TSC lost counts. The nanotime data is updated according
307 * to the provided nanosecond base value.
308 *
309 * The caller must guarantee non-reentrancy.
310 */
311 void
312 rtc_clock_napped(
313 uint64_t base)
314 {
315 rtc_nanotime_update(base);
316 }
317
318 void
319 rtc_clock_stepping(__unused uint32_t new_frequency,
320 __unused uint32_t old_frequency)
321 {
322 panic("rtc_clock_stepping unsupported");
323 }
324
325 void
326 rtc_clock_stepped(__unused uint32_t new_frequency,
327 __unused uint32_t old_frequency)
328 {
329 panic("rtc_clock_stepping unsupported");
330 }
331
332 /*
333 * rtc_sleep_wakeup:
334 *
335 * Invoked from power manageent when we have awoken from a sleep (S3)
336 * and the TSC has been reset. The nanotime data is updated based on
337 * the passed in value.
338 *
339 * The caller must guarantee non-reentrancy.
340 */
341 void
342 rtc_sleep_wakeup(
343 uint64_t base)
344 {
345 /*
346 * Reset nanotime.
347 * The timestamp counter will have been reset
348 * but nanotime (uptime) marches onward.
349 */
350 rtc_nanotime_init(base);
351 }
352
353 /*
354 * Initialize the real-time clock device.
355 * In addition, various variables used to support the clock are initialized.
356 */
357 int
358 rtclock_init(void)
359 {
360 uint64_t cycles;
361
362 assert(!ml_get_interrupts_enabled());
363
364 if (cpu_number() == master_cpu) {
365
366 assert(tscFreq);
367 rtc_set_timescale(tscFreq);
368
369 /*
370 * Adjust and set the exported cpu speed.
371 */
372 cycles = rtc_export_speed(tscFreq);
373
374 /*
375 * Set min/max to actual.
376 * ACPI may update these later if speed-stepping is detected.
377 */
378 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
379 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
380
381 /*
382 * Compute the longest interval we can represent.
383 */
384 maxDec = tmrCvt(0x7fffffffULL, busFCvtt2n);
385 kprintf("maxDec: %lld\n", maxDec);
386
387 /* Minimum interval is 1usec */
388 rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC, 0ULL);
389 /* Point LAPIC interrupts to hardclock() */
390 lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
391
392 clock_timebase_init();
393 ml_init_lock_timeout();
394 }
395
396 rtc_lapic_start_ticking();
397
398 return (1);
399 }
400
401 // utility routine
402 // Code to calculate how many processor cycles are in a second...
403
404 static void
405 rtc_set_timescale(uint64_t cycles)
406 {
407 rtc_nanotime_info.scale = ((uint64_t)NSEC_PER_SEC << 32) / cycles;
408 rtc_nanotime_info.shift = 32;
409
410 rtc_nanotime_init(0);
411 }
412
413 static uint64_t
414 rtc_export_speed(uint64_t cyc_per_sec)
415 {
416 uint64_t cycles;
417
418 /* Round: */
419 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
420 / UI_CPUFREQ_ROUNDING_FACTOR)
421 * UI_CPUFREQ_ROUNDING_FACTOR;
422
423 /*
424 * Set current measured speed.
425 */
426 if (cycles >= 0x100000000ULL) {
427 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
428 } else {
429 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
430 }
431 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
432
433 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
434 return(cycles);
435 }
436
437 void
438 clock_get_system_microtime(
439 uint32_t *secs,
440 uint32_t *microsecs)
441 {
442 uint64_t now = rtc_nanotime_read();
443 uint32_t remain;
444
445 asm volatile(
446 "divl %3"
447 : "=a" (*secs), "=d" (remain)
448 : "A" (now), "r" (NSEC_PER_SEC));
449 asm volatile(
450 "divl %3"
451 : "=a" (*microsecs)
452 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
453 }
454
455 void
456 clock_get_system_nanotime(
457 uint32_t *secs,
458 uint32_t *nanosecs)
459 {
460 uint64_t now = rtc_nanotime_read();
461
462 asm volatile(
463 "divl %3"
464 : "=a" (*secs), "=d" (*nanosecs)
465 : "A" (now), "r" (NSEC_PER_SEC));
466 }
467
468 void
469 clock_gettimeofday_set_commpage(
470 uint64_t abstime,
471 uint64_t epoch,
472 uint64_t offset,
473 uint32_t *secs,
474 uint32_t *microsecs)
475 {
476 uint64_t now = abstime;
477 uint32_t remain;
478
479 now += offset;
480
481 asm volatile(
482 "divl %3"
483 : "=a" (*secs), "=d" (remain)
484 : "A" (now), "r" (NSEC_PER_SEC));
485 asm volatile(
486 "divl %3"
487 : "=a" (*microsecs)
488 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
489
490 *secs += epoch;
491
492 commpage_set_timestamp(abstime - remain, *secs, NSEC_PER_SEC);
493 }
494
495 void
496 clock_timebase_info(
497 mach_timebase_info_t info)
498 {
499 info->numer = info->denom = 1;
500 }
501
502 void
503 clock_set_timer_func(
504 clock_timer_func_t func)
505 {
506 if (rtclock_timer_expire == NULL)
507 rtclock_timer_expire = func;
508 }
509
510 /*
511 * Real-time clock device interrupt.
512 */
513 void
514 rtclock_intr(
515 x86_saved_state_t *tregs)
516 {
517 uint64_t rip;
518 boolean_t user_mode = FALSE;
519 uint64_t abstime;
520 uint32_t latency;
521 cpu_data_t *pp = current_cpu_datap();
522
523 assert(get_preemption_level() > 0);
524 assert(!ml_get_interrupts_enabled());
525
526 abstime = rtc_nanotime_read();
527 latency = (uint32_t) abstime - pp->rtcPop;
528
529 if (is_saved_state64(tregs) == TRUE) {
530 x86_saved_state64_t *regs;
531
532 regs = saved_state64(tregs);
533
534 user_mode = TRUE;
535 rip = regs->isf.rip;
536 } else {
537 x86_saved_state32_t *regs;
538
539 regs = saved_state32(tregs);
540
541 if (regs->cs & 0x03)
542 user_mode = TRUE;
543 rip = regs->eip;
544 }
545
546 /* Log the interrupt service latency (-ve value expected by tool) */
547 KERNEL_DEBUG_CONSTANT(
548 MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
549 -latency, (uint32_t)rip, user_mode, 0, 0);
550
551 /* call the generic etimer */
552 etimer_intr(user_mode, rip);
553 }
554
555 /*
556 * Request timer pop from the hardware
557 */
558
559 int
560 setPop(
561 uint64_t time)
562 {
563 uint64_t now;
564 uint32_t decr;
565 uint64_t count;
566
567 now = rtc_nanotime_read(); /* The time in nanoseconds */
568 decr = deadline_to_decrementer(time, now);
569
570 count = tmrCvt(decr, busFCvtn2t);
571 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
572
573 return decr; /* Pass back what we set */
574 }
575
576
577 void
578 resetPop(void)
579 {
580 uint64_t now;
581 uint32_t decr;
582 uint64_t count;
583 cpu_data_t *cdp = current_cpu_datap();
584
585 now = rtc_nanotime_read();
586
587 decr = deadline_to_decrementer(cdp->rtcPop, now);
588
589 count = tmrCvt(decr, busFCvtn2t);
590 lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t)count);
591 }
592
593
594 uint64_t
595 mach_absolute_time(void)
596 {
597 return rtc_nanotime_read();
598 }
599
600 void
601 clock_interval_to_absolutetime_interval(
602 uint32_t interval,
603 uint32_t scale_factor,
604 uint64_t *result)
605 {
606 *result = (uint64_t)interval * scale_factor;
607 }
608
609 void
610 absolutetime_to_microtime(
611 uint64_t abstime,
612 uint32_t *secs,
613 uint32_t *microsecs)
614 {
615 uint32_t remain;
616
617 asm volatile(
618 "divl %3"
619 : "=a" (*secs), "=d" (remain)
620 : "A" (abstime), "r" (NSEC_PER_SEC));
621 asm volatile(
622 "divl %3"
623 : "=a" (*microsecs)
624 : "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
625 }
626
627 void
628 absolutetime_to_nanotime(
629 uint64_t abstime,
630 uint32_t *secs,
631 uint32_t *nanosecs)
632 {
633 asm volatile(
634 "divl %3"
635 : "=a" (*secs), "=d" (*nanosecs)
636 : "A" (abstime), "r" (NSEC_PER_SEC));
637 }
638
639 void
640 nanotime_to_absolutetime(
641 uint32_t secs,
642 uint32_t nanosecs,
643 uint64_t *result)
644 {
645 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
646 }
647
648 void
649 absolutetime_to_nanoseconds(
650 uint64_t abstime,
651 uint64_t *result)
652 {
653 *result = abstime;
654 }
655
656 void
657 nanoseconds_to_absolutetime(
658 uint64_t nanoseconds,
659 uint64_t *result)
660 {
661 *result = nanoseconds;
662 }
663
664 void
665 machine_delay_until(
666 uint64_t deadline)
667 {
668 uint64_t now;
669
670 do {
671 cpu_pause();
672 now = mach_absolute_time();
673 } while (now < deadline);
674 }