]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
4085c5883b7670b5d26f9cb39716a0a2f379c6a0
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 */
27
28 #include <mach/mach_types.h>
29
30 #include <kern/lock.h>
31 #include <kern/spl.h>
32 #include <kern/sched_prim.h>
33 #include <kern/thread.h>
34 #include <kern/clock.h>
35 #include <kern/host_notify.h>
36
37 #include <IOKit/IOPlatformExpert.h>
38
39 #include <machine/commpage.h>
40
41 #include <mach/mach_traps.h>
42 #include <mach/mach_time.h>
43
44 decl_simple_lock_data(static,clock_lock)
45
46 /*
47 * Time of day (calendar) variables.
48 *
49 * Algorithm:
50 *
51 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
52 *
53 * where CONV converts absolute time units into seconds and a fraction.
54 */
55 static struct clock_calend {
56 uint64_t epoch;
57 uint64_t offset;
58 } clock_calend;
59
60 /*
61 * Calendar adjustment variables and values.
62 */
63 #define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
64 #define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
65 #define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
66
67 static uint64_t calend_adjstart; /* Absolute time value for start of this adjustment period */
68 static uint32_t calend_adjoffset; /* Absolute time offset for this adjustment period as absolute value */
69
70 static int32_t calend_adjdelta; /* Nanosecond time delta for this adjustment period */
71 static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
72
73 static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
74 static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
75
76 static timer_call_data_t calend_adjcall;
77 static uint32_t calend_adjactive;
78
79 static uint32_t calend_set_adjustment(
80 int32_t *secs,
81 int32_t *microsecs);
82
83 static void calend_adjust_call(void);
84 static uint32_t calend_adjust(void);
85
86 static thread_call_data_t calend_wakecall;
87
88 extern void IOKitResetTime(void);
89
90 static uint64_t clock_boottime; /* Seconds boottime epoch */
91
92 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
93 MACRO_BEGIN \
94 if (((rfrac) += (frac)) >= (unit)) { \
95 (rfrac) -= (unit); \
96 (rsecs) += 1; \
97 } \
98 (rsecs) += (secs); \
99 MACRO_END
100
101 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
102 MACRO_BEGIN \
103 if ((int32_t)((rfrac) -= (frac)) < 0) { \
104 (rfrac) += (unit); \
105 (rsecs) -= 1; \
106 } \
107 (rsecs) -= (secs); \
108 MACRO_END
109
110 /*
111 * clock_config:
112 *
113 * Called once at boot to configure the clock subsystem.
114 */
115 void
116 clock_config(void)
117 {
118 simple_lock_init(&clock_lock, 0);
119
120 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
121 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
122
123 clock_oldconfig();
124
125 /*
126 * Initialize the timer callouts.
127 */
128 timer_call_initialize();
129 }
130
131 /*
132 * clock_init:
133 *
134 * Called on a processor each time started.
135 */
136 void
137 clock_init(void)
138 {
139 clock_oldinit();
140 }
141
142 /*
143 * clock_timebase_init:
144 *
145 * Called by machine dependent code
146 * to initialize areas dependent on the
147 * timebase value. May be called multiple
148 * times during start up.
149 */
150 void
151 clock_timebase_init(void)
152 {
153 uint64_t abstime;
154
155 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
156 calend_adjinterval = abstime;
157
158 sched_timebase_init();
159 }
160
161 /*
162 * mach_timebase_info_trap:
163 *
164 * User trap returns timebase constant.
165 */
166 kern_return_t
167 mach_timebase_info_trap(
168 struct mach_timebase_info_trap_args *args)
169 {
170 mach_vm_address_t out_info_addr = args->info;
171 mach_timebase_info_data_t info;
172
173 clock_timebase_info(&info);
174
175 copyout((void *)&info, out_info_addr, sizeof (info));
176
177 return (KERN_SUCCESS);
178 }
179
180 /*
181 * Calendar routines.
182 */
183
184 /*
185 * clock_get_calendar_microtime:
186 *
187 * Returns the current calendar value,
188 * microseconds as the fraction.
189 */
190 void
191 clock_get_calendar_microtime(
192 uint32_t *secs,
193 uint32_t *microsecs)
194 {
195 uint64_t now;
196 spl_t s;
197
198 s = splclock();
199 simple_lock(&clock_lock);
200
201 now = mach_absolute_time();
202
203 if (calend_adjdelta < 0) {
204 uint32_t t32;
205
206 if (now > calend_adjstart) {
207 t32 = now - calend_adjstart;
208
209 if (t32 > calend_adjoffset)
210 now -= calend_adjoffset;
211 else
212 now = calend_adjstart;
213 }
214 }
215
216 now += clock_calend.offset;
217
218 absolutetime_to_microtime(now, secs, microsecs);
219
220 *secs += clock_calend.epoch;
221
222 simple_unlock(&clock_lock);
223 splx(s);
224 }
225
226 /*
227 * clock_get_calendar_nanotime:
228 *
229 * Returns the current calendar value,
230 * nanoseconds as the fraction.
231 *
232 * Since we do not have an interface to
233 * set the calendar with resolution greater
234 * than a microsecond, we honor that here.
235 */
236 void
237 clock_get_calendar_nanotime(
238 uint32_t *secs,
239 uint32_t *nanosecs)
240 {
241 uint64_t now;
242 spl_t s;
243
244 s = splclock();
245 simple_lock(&clock_lock);
246
247 now = mach_absolute_time();
248
249 if (calend_adjdelta < 0) {
250 uint32_t t32;
251
252 if (now > calend_adjstart) {
253 t32 = now - calend_adjstart;
254
255 if (t32 > calend_adjoffset)
256 now -= calend_adjoffset;
257 else
258 now = calend_adjstart;
259 }
260 }
261
262 now += clock_calend.offset;
263
264 absolutetime_to_microtime(now, secs, nanosecs);
265 *nanosecs *= NSEC_PER_USEC;
266
267 *secs += clock_calend.epoch;
268
269 simple_unlock(&clock_lock);
270 splx(s);
271 }
272
273 /*
274 * clock_gettimeofday:
275 *
276 * Kernel interface for commpage implementation of
277 * gettimeofday() syscall.
278 *
279 * Returns the current calendar value, and updates the
280 * commpage info as appropriate. Because most calls to
281 * gettimeofday() are handled in user mode by the commpage,
282 * this routine should be used infrequently.
283 */
284 void
285 clock_gettimeofday(
286 uint32_t *secs,
287 uint32_t *microsecs)
288 {
289 uint64_t now;
290 spl_t s;
291
292 s = splclock();
293 simple_lock(&clock_lock);
294
295 now = mach_absolute_time();
296
297 if (calend_adjdelta >= 0) {
298 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
299 }
300 else {
301 uint32_t t32;
302
303 if (now > calend_adjstart) {
304 t32 = now - calend_adjstart;
305
306 if (t32 > calend_adjoffset)
307 now -= calend_adjoffset;
308 else
309 now = calend_adjstart;
310 }
311
312 now += clock_calend.offset;
313
314 absolutetime_to_microtime(now, secs, microsecs);
315
316 *secs += clock_calend.epoch;
317 }
318
319 simple_unlock(&clock_lock);
320 splx(s);
321 }
322
323 /*
324 * clock_set_calendar_microtime:
325 *
326 * Sets the current calendar value by
327 * recalculating the epoch and offset
328 * from the system clock.
329 *
330 * Also adjusts the boottime to keep the
331 * value consistent, writes the new
332 * calendar value to the platform clock,
333 * and sends calendar change notifications.
334 */
335 void
336 clock_set_calendar_microtime(
337 uint32_t secs,
338 uint32_t microsecs)
339 {
340 uint32_t sys, microsys;
341 uint32_t newsecs;
342 spl_t s;
343
344 newsecs = (microsecs < 500*USEC_PER_SEC)?
345 secs: secs + 1;
346
347 s = splclock();
348 simple_lock(&clock_lock);
349
350 commpage_set_timestamp(0,0,0);
351
352 /*
353 * Calculate the new calendar epoch based on
354 * the new value and the system clock.
355 */
356 clock_get_system_microtime(&sys, &microsys);
357 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
358
359 /*
360 * Adjust the boottime based on the delta.
361 */
362 clock_boottime += secs - clock_calend.epoch;
363
364 /*
365 * Set the new calendar epoch.
366 */
367 clock_calend.epoch = secs;
368 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
369
370 /*
371 * Cancel any adjustment in progress.
372 */
373 calend_adjdelta = calend_adjtotal = 0;
374
375 simple_unlock(&clock_lock);
376
377 /*
378 * Set the new value for the platform clock.
379 */
380 PESetGMTTimeOfDay(newsecs);
381
382 splx(s);
383
384 /*
385 * Send host notifications.
386 */
387 host_notify_calendar_change();
388 }
389
390 /*
391 * clock_initialize_calendar:
392 *
393 * Set the calendar and related clocks
394 * from the platform clock at boot or
395 * wake event.
396 *
397 * Also sends host notifications.
398 */
399 void
400 clock_initialize_calendar(void)
401 {
402 uint32_t sys, microsys;
403 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
404 spl_t s;
405
406 s = splclock();
407 simple_lock(&clock_lock);
408
409 commpage_set_timestamp(0,0,0);
410
411 if ((int32_t)secs >= (int32_t)clock_boottime) {
412 /*
413 * Initialize the boot time based on the platform clock.
414 */
415 if (clock_boottime == 0)
416 clock_boottime = secs;
417
418 /*
419 * Calculate the new calendar epoch based on
420 * the platform clock and the system clock.
421 */
422 clock_get_system_microtime(&sys, &microsys);
423 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
424
425 /*
426 * Set the new calendar epoch.
427 */
428 clock_calend.epoch = secs;
429 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
430
431 /*
432 * Cancel any adjustment in progress.
433 */
434 calend_adjdelta = calend_adjtotal = 0;
435 }
436
437 simple_unlock(&clock_lock);
438 splx(s);
439
440 /*
441 * Send host notifications.
442 */
443 host_notify_calendar_change();
444 }
445
446 /*
447 * clock_get_boottime_nanotime:
448 *
449 * Return the boottime, used by sysctl.
450 */
451 void
452 clock_get_boottime_nanotime(
453 uint32_t *secs,
454 uint32_t *nanosecs)
455 {
456 *secs = clock_boottime;
457 *nanosecs = 0;
458 }
459
460 /*
461 * clock_adjtime:
462 *
463 * Interface to adjtime() syscall.
464 *
465 * Calculates adjustment variables and
466 * initiates adjustment.
467 */
468 void
469 clock_adjtime(
470 int32_t *secs,
471 int32_t *microsecs)
472 {
473 uint32_t interval;
474 spl_t s;
475
476 s = splclock();
477 simple_lock(&clock_lock);
478
479 interval = calend_set_adjustment(secs, microsecs);
480 if (interval != 0) {
481 calend_adjdeadline = mach_absolute_time() + interval;
482 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
483 calend_adjactive++;
484 }
485 else
486 if (timer_call_cancel(&calend_adjcall))
487 calend_adjactive--;
488
489 simple_unlock(&clock_lock);
490 splx(s);
491 }
492
493 static uint32_t
494 calend_set_adjustment(
495 int32_t *secs,
496 int32_t *microsecs)
497 {
498 uint64_t now, t64;
499 int64_t total, ototal;
500 uint32_t interval = 0;
501
502 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
503
504 commpage_set_timestamp(0,0,0);
505
506 now = mach_absolute_time();
507
508 ototal = calend_adjtotal;
509
510 if (total != 0) {
511 int32_t delta = calend_adjskew;
512
513 if (total > 0) {
514 if (total > calend_adjbig)
515 delta *= 10;
516 if (delta > total)
517 delta = total;
518
519 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
520 calend_adjoffset = t64;
521 }
522 else {
523 if (total < -calend_adjbig)
524 delta *= 10;
525 delta = -delta;
526 if (delta < total)
527 delta = total;
528
529 calend_adjstart = now;
530
531 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
532 calend_adjoffset = t64;
533 }
534
535 calend_adjtotal = total;
536 calend_adjdelta = delta;
537
538 interval = calend_adjinterval;
539 }
540 else
541 calend_adjdelta = calend_adjtotal = 0;
542
543 if (ototal != 0) {
544 *secs = ototal / NSEC_PER_SEC;
545 *microsecs = (ototal % NSEC_PER_SEC) / NSEC_PER_USEC;
546 }
547 else
548 *secs = *microsecs = 0;
549
550 return (interval);
551 }
552
553 static void
554 calend_adjust_call(void)
555 {
556 uint32_t interval;
557 spl_t s;
558
559 s = splclock();
560 simple_lock(&clock_lock);
561
562 if (--calend_adjactive == 0) {
563 interval = calend_adjust();
564 if (interval != 0) {
565 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
566 &calend_adjdeadline);
567
568 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
569 calend_adjactive++;
570 }
571 }
572
573 simple_unlock(&clock_lock);
574 splx(s);
575 }
576
577 static uint32_t
578 calend_adjust(void)
579 {
580 uint64_t now, t64;
581 int32_t delta;
582 uint32_t interval = 0;
583
584 commpage_set_timestamp(0,0,0);
585
586 now = mach_absolute_time();
587
588 delta = calend_adjdelta;
589
590 if (delta > 0) {
591 clock_calend.offset += calend_adjoffset;
592
593 calend_adjtotal -= delta;
594 if (delta > calend_adjtotal) {
595 calend_adjdelta = delta = calend_adjtotal;
596
597 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
598 calend_adjoffset = t64;
599 }
600 }
601 else
602 if (delta < 0) {
603 clock_calend.offset -= calend_adjoffset;
604
605 calend_adjtotal -= delta;
606 if (delta < calend_adjtotal) {
607 calend_adjdelta = delta = calend_adjtotal;
608
609 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
610 calend_adjoffset = t64;
611 }
612
613 if (calend_adjdelta != 0)
614 calend_adjstart = now;
615 }
616
617 if (calend_adjdelta != 0)
618 interval = calend_adjinterval;
619
620 return (interval);
621 }
622
623 /*
624 * clock_wakeup_calendar:
625 *
626 * Interface to power management, used
627 * to initiate the reset of the calendar
628 * on wake from sleep event.
629 */
630 void
631 clock_wakeup_calendar(void)
632 {
633 thread_call_enter(&calend_wakecall);
634 }
635
636 /*
637 * Wait / delay routines.
638 */
639 static void
640 mach_wait_until_continue(
641 __unused void *parameter,
642 wait_result_t wresult)
643 {
644 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
645 /*NOTREACHED*/
646 }
647
648 kern_return_t
649 mach_wait_until_trap(
650 struct mach_wait_until_trap_args *args)
651 {
652 uint64_t deadline = args->deadline;
653 wait_result_t wresult;
654
655 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
656 if (wresult == THREAD_WAITING)
657 wresult = thread_block(mach_wait_until_continue);
658
659 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
660 }
661
662 void
663 clock_delay_until(
664 uint64_t deadline)
665 {
666 uint64_t now = mach_absolute_time();
667
668 if (now >= deadline)
669 return;
670
671 if ( (deadline - now) < (8 * sched_cswtime) ||
672 get_preemption_level() != 0 ||
673 ml_get_interrupts_enabled() == FALSE )
674 machine_delay_until(deadline);
675 else {
676 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
677
678 thread_block(THREAD_CONTINUE_NULL);
679 }
680 }
681
682 void
683 delay_for_interval(
684 uint32_t interval,
685 uint32_t scale_factor)
686 {
687 uint64_t end;
688
689 clock_interval_to_deadline(interval, scale_factor, &end);
690
691 clock_delay_until(end);
692 }
693
694 void
695 delay(
696 int usec)
697 {
698 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
699 }
700
701 /*
702 * Miscellaneous routines.
703 */
704 void
705 clock_interval_to_deadline(
706 uint32_t interval,
707 uint32_t scale_factor,
708 uint64_t *result)
709 {
710 uint64_t abstime;
711
712 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
713
714 *result = mach_absolute_time() + abstime;
715 }
716
717 void
718 clock_absolutetime_interval_to_deadline(
719 uint64_t abstime,
720 uint64_t *result)
721 {
722 *result = mach_absolute_time() + abstime;
723 }
724
725 void
726 clock_get_uptime(
727 uint64_t *result)
728 {
729 *result = mach_absolute_time();
730 }
731
732 void
733 clock_deadline_for_periodic_event(
734 uint64_t interval,
735 uint64_t abstime,
736 uint64_t *deadline)
737 {
738 assert(interval != 0);
739
740 *deadline += interval;
741
742 if (*deadline <= abstime) {
743 *deadline = abstime + interval;
744 abstime = mach_absolute_time();
745
746 if (*deadline <= abstime)
747 *deadline = abstime + interval;
748 }
749 }