]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
a3266197e9fa07e505c729c71748388c55fab1f9
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 */
35
36 #include <mach/mach_types.h>
37
38 #include <kern/lock.h>
39 #include <kern/spl.h>
40 #include <kern/sched_prim.h>
41 #include <kern/thread.h>
42 #include <kern/clock.h>
43 #include <kern/host_notify.h>
44
45 #include <IOKit/IOPlatformExpert.h>
46
47 #include <machine/commpage.h>
48
49 #include <mach/mach_traps.h>
50 #include <mach/mach_time.h>
51
52 decl_simple_lock_data(static,clock_lock)
53
54 /*
55 * Time of day (calendar) variables.
56 *
57 * Algorithm:
58 *
59 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
60 *
61 * where CONV converts absolute time units into seconds and a fraction.
62 */
63 static struct clock_calend {
64 uint64_t epoch;
65 uint64_t offset;
66 } clock_calend;
67
68 /*
69 * Calendar adjustment variables and values.
70 */
71 #define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
72 #define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
73 #define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
74
75 static uint64_t calend_adjstart; /* Absolute time value for start of this adjustment period */
76 static uint32_t calend_adjoffset; /* Absolute time offset for this adjustment period as absolute value */
77
78 static int32_t calend_adjdelta; /* Nanosecond time delta for this adjustment period */
79 static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
80
81 static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
82 static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
83
84 static timer_call_data_t calend_adjcall;
85 static uint32_t calend_adjactive;
86
87 static uint32_t calend_set_adjustment(
88 int32_t *secs,
89 int32_t *microsecs);
90
91 static void calend_adjust_call(void);
92 static uint32_t calend_adjust(void);
93
94 static thread_call_data_t calend_wakecall;
95
96 extern void IOKitResetTime(void);
97
98 static uint64_t clock_boottime; /* Seconds boottime epoch */
99
100 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
101 MACRO_BEGIN \
102 if (((rfrac) += (frac)) >= (unit)) { \
103 (rfrac) -= (unit); \
104 (rsecs) += 1; \
105 } \
106 (rsecs) += (secs); \
107 MACRO_END
108
109 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
110 MACRO_BEGIN \
111 if ((int32_t)((rfrac) -= (frac)) < 0) { \
112 (rfrac) += (unit); \
113 (rsecs) -= 1; \
114 } \
115 (rsecs) -= (secs); \
116 MACRO_END
117
118 /*
119 * clock_config:
120 *
121 * Called once at boot to configure the clock subsystem.
122 */
123 void
124 clock_config(void)
125 {
126 simple_lock_init(&clock_lock, 0);
127
128 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
129 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
130
131 clock_oldconfig();
132
133 /*
134 * Initialize the timer callouts.
135 */
136 timer_call_initialize();
137 }
138
139 /*
140 * clock_init:
141 *
142 * Called on a processor each time started.
143 */
144 void
145 clock_init(void)
146 {
147 clock_oldinit();
148 }
149
150 /*
151 * clock_timebase_init:
152 *
153 * Called by machine dependent code
154 * to initialize areas dependent on the
155 * timebase value. May be called multiple
156 * times during start up.
157 */
158 void
159 clock_timebase_init(void)
160 {
161 uint64_t abstime;
162
163 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
164 calend_adjinterval = abstime;
165
166 sched_timebase_init();
167 }
168
169 /*
170 * mach_timebase_info_trap:
171 *
172 * User trap returns timebase constant.
173 */
174 kern_return_t
175 mach_timebase_info_trap(
176 struct mach_timebase_info_trap_args *args)
177 {
178 mach_vm_address_t out_info_addr = args->info;
179 mach_timebase_info_data_t info;
180
181 clock_timebase_info(&info);
182
183 copyout((void *)&info, out_info_addr, sizeof (info));
184
185 return (KERN_SUCCESS);
186 }
187
188 /*
189 * Calendar routines.
190 */
191
192 /*
193 * clock_get_calendar_microtime:
194 *
195 * Returns the current calendar value,
196 * microseconds as the fraction.
197 */
198 void
199 clock_get_calendar_microtime(
200 uint32_t *secs,
201 uint32_t *microsecs)
202 {
203 uint64_t now;
204 spl_t s;
205
206 s = splclock();
207 simple_lock(&clock_lock);
208
209 now = mach_absolute_time();
210
211 if (calend_adjdelta < 0) {
212 uint32_t t32;
213
214 if (now > calend_adjstart) {
215 t32 = now - calend_adjstart;
216
217 if (t32 > calend_adjoffset)
218 now -= calend_adjoffset;
219 else
220 now = calend_adjstart;
221 }
222 }
223
224 now += clock_calend.offset;
225
226 absolutetime_to_microtime(now, secs, microsecs);
227
228 *secs += clock_calend.epoch;
229
230 simple_unlock(&clock_lock);
231 splx(s);
232 }
233
234 /*
235 * clock_get_calendar_nanotime:
236 *
237 * Returns the current calendar value,
238 * nanoseconds as the fraction.
239 *
240 * Since we do not have an interface to
241 * set the calendar with resolution greater
242 * than a microsecond, we honor that here.
243 */
244 void
245 clock_get_calendar_nanotime(
246 uint32_t *secs,
247 uint32_t *nanosecs)
248 {
249 uint64_t now;
250 spl_t s;
251
252 s = splclock();
253 simple_lock(&clock_lock);
254
255 now = mach_absolute_time();
256
257 if (calend_adjdelta < 0) {
258 uint32_t t32;
259
260 if (now > calend_adjstart) {
261 t32 = now - calend_adjstart;
262
263 if (t32 > calend_adjoffset)
264 now -= calend_adjoffset;
265 else
266 now = calend_adjstart;
267 }
268 }
269
270 now += clock_calend.offset;
271
272 absolutetime_to_microtime(now, secs, nanosecs);
273 *nanosecs *= NSEC_PER_USEC;
274
275 *secs += clock_calend.epoch;
276
277 simple_unlock(&clock_lock);
278 splx(s);
279 }
280
281 /*
282 * clock_gettimeofday:
283 *
284 * Kernel interface for commpage implementation of
285 * gettimeofday() syscall.
286 *
287 * Returns the current calendar value, and updates the
288 * commpage info as appropriate. Because most calls to
289 * gettimeofday() are handled in user mode by the commpage,
290 * this routine should be used infrequently.
291 */
292 void
293 clock_gettimeofday(
294 uint32_t *secs,
295 uint32_t *microsecs)
296 {
297 uint64_t now;
298 spl_t s;
299
300 s = splclock();
301 simple_lock(&clock_lock);
302
303 now = mach_absolute_time();
304
305 if (calend_adjdelta >= 0) {
306 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
307 }
308 else {
309 uint32_t t32;
310
311 if (now > calend_adjstart) {
312 t32 = now - calend_adjstart;
313
314 if (t32 > calend_adjoffset)
315 now -= calend_adjoffset;
316 else
317 now = calend_adjstart;
318 }
319
320 now += clock_calend.offset;
321
322 absolutetime_to_microtime(now, secs, microsecs);
323
324 *secs += clock_calend.epoch;
325 }
326
327 simple_unlock(&clock_lock);
328 splx(s);
329 }
330
331 /*
332 * clock_set_calendar_microtime:
333 *
334 * Sets the current calendar value by
335 * recalculating the epoch and offset
336 * from the system clock.
337 *
338 * Also adjusts the boottime to keep the
339 * value consistent, writes the new
340 * calendar value to the platform clock,
341 * and sends calendar change notifications.
342 */
343 void
344 clock_set_calendar_microtime(
345 uint32_t secs,
346 uint32_t microsecs)
347 {
348 uint32_t sys, microsys;
349 uint32_t newsecs;
350 spl_t s;
351
352 newsecs = (microsecs < 500*USEC_PER_SEC)?
353 secs: secs + 1;
354
355 s = splclock();
356 simple_lock(&clock_lock);
357
358 commpage_set_timestamp(0,0,0);
359
360 /*
361 * Calculate the new calendar epoch based on
362 * the new value and the system clock.
363 */
364 clock_get_system_microtime(&sys, &microsys);
365 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
366
367 /*
368 * Adjust the boottime based on the delta.
369 */
370 clock_boottime += secs - clock_calend.epoch;
371
372 /*
373 * Set the new calendar epoch.
374 */
375 clock_calend.epoch = secs;
376 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
377
378 /*
379 * Cancel any adjustment in progress.
380 */
381 calend_adjdelta = calend_adjtotal = 0;
382
383 simple_unlock(&clock_lock);
384
385 /*
386 * Set the new value for the platform clock.
387 */
388 PESetGMTTimeOfDay(newsecs);
389
390 splx(s);
391
392 /*
393 * Send host notifications.
394 */
395 host_notify_calendar_change();
396 }
397
398 /*
399 * clock_initialize_calendar:
400 *
401 * Set the calendar and related clocks
402 * from the platform clock at boot or
403 * wake event.
404 *
405 * Also sends host notifications.
406 */
407 void
408 clock_initialize_calendar(void)
409 {
410 uint32_t sys, microsys;
411 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
412 spl_t s;
413
414 s = splclock();
415 simple_lock(&clock_lock);
416
417 commpage_set_timestamp(0,0,0);
418
419 if ((int32_t)secs >= (int32_t)clock_boottime) {
420 /*
421 * Initialize the boot time based on the platform clock.
422 */
423 if (clock_boottime == 0)
424 clock_boottime = secs;
425
426 /*
427 * Calculate the new calendar epoch based on
428 * the platform clock and the system clock.
429 */
430 clock_get_system_microtime(&sys, &microsys);
431 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
432
433 /*
434 * Set the new calendar epoch.
435 */
436 clock_calend.epoch = secs;
437 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
438
439 /*
440 * Cancel any adjustment in progress.
441 */
442 calend_adjdelta = calend_adjtotal = 0;
443 }
444
445 simple_unlock(&clock_lock);
446 splx(s);
447
448 /*
449 * Send host notifications.
450 */
451 host_notify_calendar_change();
452 }
453
454 /*
455 * clock_get_boottime_nanotime:
456 *
457 * Return the boottime, used by sysctl.
458 */
459 void
460 clock_get_boottime_nanotime(
461 uint32_t *secs,
462 uint32_t *nanosecs)
463 {
464 *secs = clock_boottime;
465 *nanosecs = 0;
466 }
467
468 /*
469 * clock_adjtime:
470 *
471 * Interface to adjtime() syscall.
472 *
473 * Calculates adjustment variables and
474 * initiates adjustment.
475 */
476 void
477 clock_adjtime(
478 int32_t *secs,
479 int32_t *microsecs)
480 {
481 uint32_t interval;
482 spl_t s;
483
484 s = splclock();
485 simple_lock(&clock_lock);
486
487 interval = calend_set_adjustment(secs, microsecs);
488 if (interval != 0) {
489 calend_adjdeadline = mach_absolute_time() + interval;
490 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
491 calend_adjactive++;
492 }
493 else
494 if (timer_call_cancel(&calend_adjcall))
495 calend_adjactive--;
496
497 simple_unlock(&clock_lock);
498 splx(s);
499 }
500
501 static uint32_t
502 calend_set_adjustment(
503 int32_t *secs,
504 int32_t *microsecs)
505 {
506 uint64_t now, t64;
507 int64_t total, ototal;
508 uint32_t interval = 0;
509
510 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
511
512 commpage_set_timestamp(0,0,0);
513
514 now = mach_absolute_time();
515
516 ototal = calend_adjtotal;
517
518 if (total != 0) {
519 int32_t delta = calend_adjskew;
520
521 if (total > 0) {
522 if (total > calend_adjbig)
523 delta *= 10;
524 if (delta > total)
525 delta = total;
526
527 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
528 calend_adjoffset = t64;
529 }
530 else {
531 if (total < -calend_adjbig)
532 delta *= 10;
533 delta = -delta;
534 if (delta < total)
535 delta = total;
536
537 calend_adjstart = now;
538
539 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
540 calend_adjoffset = t64;
541 }
542
543 calend_adjtotal = total;
544 calend_adjdelta = delta;
545
546 interval = calend_adjinterval;
547 }
548 else
549 calend_adjdelta = calend_adjtotal = 0;
550
551 if (ototal != 0) {
552 *secs = ototal / NSEC_PER_SEC;
553 *microsecs = (ototal % NSEC_PER_SEC) / NSEC_PER_USEC;
554 }
555 else
556 *secs = *microsecs = 0;
557
558 return (interval);
559 }
560
561 static void
562 calend_adjust_call(void)
563 {
564 uint32_t interval;
565 spl_t s;
566
567 s = splclock();
568 simple_lock(&clock_lock);
569
570 if (--calend_adjactive == 0) {
571 interval = calend_adjust();
572 if (interval != 0) {
573 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
574 &calend_adjdeadline);
575
576 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
577 calend_adjactive++;
578 }
579 }
580
581 simple_unlock(&clock_lock);
582 splx(s);
583 }
584
585 static uint32_t
586 calend_adjust(void)
587 {
588 uint64_t now, t64;
589 int32_t delta;
590 uint32_t interval = 0;
591
592 commpage_set_timestamp(0,0,0);
593
594 now = mach_absolute_time();
595
596 delta = calend_adjdelta;
597
598 if (delta > 0) {
599 clock_calend.offset += calend_adjoffset;
600
601 calend_adjtotal -= delta;
602 if (delta > calend_adjtotal) {
603 calend_adjdelta = delta = calend_adjtotal;
604
605 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
606 calend_adjoffset = t64;
607 }
608 }
609 else
610 if (delta < 0) {
611 clock_calend.offset -= calend_adjoffset;
612
613 calend_adjtotal -= delta;
614 if (delta < calend_adjtotal) {
615 calend_adjdelta = delta = calend_adjtotal;
616
617 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
618 calend_adjoffset = t64;
619 }
620
621 if (calend_adjdelta != 0)
622 calend_adjstart = now;
623 }
624
625 if (calend_adjdelta != 0)
626 interval = calend_adjinterval;
627
628 return (interval);
629 }
630
631 /*
632 * clock_wakeup_calendar:
633 *
634 * Interface to power management, used
635 * to initiate the reset of the calendar
636 * on wake from sleep event.
637 */
638 void
639 clock_wakeup_calendar(void)
640 {
641 thread_call_enter(&calend_wakecall);
642 }
643
644 /*
645 * Wait / delay routines.
646 */
647 static void
648 mach_wait_until_continue(
649 __unused void *parameter,
650 wait_result_t wresult)
651 {
652 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
653 /*NOTREACHED*/
654 }
655
656 kern_return_t
657 mach_wait_until_trap(
658 struct mach_wait_until_trap_args *args)
659 {
660 uint64_t deadline = args->deadline;
661 wait_result_t wresult;
662
663 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
664 if (wresult == THREAD_WAITING)
665 wresult = thread_block(mach_wait_until_continue);
666
667 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
668 }
669
670 void
671 clock_delay_until(
672 uint64_t deadline)
673 {
674 uint64_t now = mach_absolute_time();
675
676 if (now >= deadline)
677 return;
678
679 if ( (deadline - now) < (8 * sched_cswtime) ||
680 get_preemption_level() != 0 ||
681 ml_get_interrupts_enabled() == FALSE )
682 machine_delay_until(deadline);
683 else {
684 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
685
686 thread_block(THREAD_CONTINUE_NULL);
687 }
688 }
689
690 void
691 delay_for_interval(
692 uint32_t interval,
693 uint32_t scale_factor)
694 {
695 uint64_t end;
696
697 clock_interval_to_deadline(interval, scale_factor, &end);
698
699 clock_delay_until(end);
700 }
701
702 void
703 delay(
704 int usec)
705 {
706 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
707 }
708
709 /*
710 * Miscellaneous routines.
711 */
712 void
713 clock_interval_to_deadline(
714 uint32_t interval,
715 uint32_t scale_factor,
716 uint64_t *result)
717 {
718 uint64_t abstime;
719
720 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
721
722 *result = mach_absolute_time() + abstime;
723 }
724
725 void
726 clock_absolutetime_interval_to_deadline(
727 uint64_t abstime,
728 uint64_t *result)
729 {
730 *result = mach_absolute_time() + abstime;
731 }
732
733 void
734 clock_get_uptime(
735 uint64_t *result)
736 {
737 *result = mach_absolute_time();
738 }
739
740 void
741 clock_deadline_for_periodic_event(
742 uint64_t interval,
743 uint64_t abstime,
744 uint64_t *deadline)
745 {
746 assert(interval != 0);
747
748 *deadline += interval;
749
750 if (*deadline <= abstime) {
751 *deadline = abstime + interval;
752 abstime = mach_absolute_time();
753
754 if (*deadline <= abstime)
755 *deadline = abstime + interval;
756 }
757 }