]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
b9d0a075baa9da9c1fd2ff7e1252e105533335b7
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 */
33
34 #include <mach/mach_types.h>
35
36 #include <kern/lock.h>
37 #include <kern/spl.h>
38 #include <kern/sched_prim.h>
39 #include <kern/thread.h>
40 #include <kern/clock.h>
41 #include <kern/host_notify.h>
42
43 #include <IOKit/IOPlatformExpert.h>
44
45 #include <machine/commpage.h>
46
47 #include <mach/mach_traps.h>
48 #include <mach/mach_time.h>
49
50 decl_simple_lock_data(static,clock_lock)
51
52 /*
53 * Time of day (calendar) variables.
54 *
55 * Algorithm:
56 *
57 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
58 *
59 * where CONV converts absolute time units into seconds and a fraction.
60 */
61 static struct clock_calend {
62 uint64_t epoch;
63 uint64_t offset;
64 } clock_calend;
65
66 /*
67 * Calendar adjustment variables and values.
68 */
69 #define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
70 #define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
71 #define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
72
73 static uint64_t calend_adjstart; /* Absolute time value for start of this adjustment period */
74 static uint32_t calend_adjoffset; /* Absolute time offset for this adjustment period as absolute value */
75
76 static int32_t calend_adjdelta; /* Nanosecond time delta for this adjustment period */
77 static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
78
79 static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
80 static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
81
82 static timer_call_data_t calend_adjcall;
83 static uint32_t calend_adjactive;
84
85 static uint32_t calend_set_adjustment(
86 int32_t *secs,
87 int32_t *microsecs);
88
89 static void calend_adjust_call(void);
90 static uint32_t calend_adjust(void);
91
92 static thread_call_data_t calend_wakecall;
93
94 extern void IOKitResetTime(void);
95
96 static uint64_t clock_boottime; /* Seconds boottime epoch */
97
98 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
99 MACRO_BEGIN \
100 if (((rfrac) += (frac)) >= (unit)) { \
101 (rfrac) -= (unit); \
102 (rsecs) += 1; \
103 } \
104 (rsecs) += (secs); \
105 MACRO_END
106
107 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
108 MACRO_BEGIN \
109 if ((int32_t)((rfrac) -= (frac)) < 0) { \
110 (rfrac) += (unit); \
111 (rsecs) -= 1; \
112 } \
113 (rsecs) -= (secs); \
114 MACRO_END
115
116 /*
117 * clock_config:
118 *
119 * Called once at boot to configure the clock subsystem.
120 */
121 void
122 clock_config(void)
123 {
124 simple_lock_init(&clock_lock, 0);
125
126 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
127 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
128
129 clock_oldconfig();
130
131 /*
132 * Initialize the timer callouts.
133 */
134 timer_call_initialize();
135 }
136
137 /*
138 * clock_init:
139 *
140 * Called on a processor each time started.
141 */
142 void
143 clock_init(void)
144 {
145 clock_oldinit();
146 }
147
148 /*
149 * clock_timebase_init:
150 *
151 * Called by machine dependent code
152 * to initialize areas dependent on the
153 * timebase value. May be called multiple
154 * times during start up.
155 */
156 void
157 clock_timebase_init(void)
158 {
159 uint64_t abstime;
160
161 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
162 calend_adjinterval = abstime;
163
164 sched_timebase_init();
165 }
166
167 /*
168 * mach_timebase_info_trap:
169 *
170 * User trap returns timebase constant.
171 */
172 kern_return_t
173 mach_timebase_info_trap(
174 struct mach_timebase_info_trap_args *args)
175 {
176 mach_vm_address_t out_info_addr = args->info;
177 mach_timebase_info_data_t info;
178
179 clock_timebase_info(&info);
180
181 copyout((void *)&info, out_info_addr, sizeof (info));
182
183 return (KERN_SUCCESS);
184 }
185
186 /*
187 * Calendar routines.
188 */
189
190 /*
191 * clock_get_calendar_microtime:
192 *
193 * Returns the current calendar value,
194 * microseconds as the fraction.
195 */
196 void
197 clock_get_calendar_microtime(
198 uint32_t *secs,
199 uint32_t *microsecs)
200 {
201 uint64_t now;
202 spl_t s;
203
204 s = splclock();
205 simple_lock(&clock_lock);
206
207 now = mach_absolute_time();
208
209 if (calend_adjdelta < 0) {
210 uint32_t t32;
211
212 if (now > calend_adjstart) {
213 t32 = now - calend_adjstart;
214
215 if (t32 > calend_adjoffset)
216 now -= calend_adjoffset;
217 else
218 now = calend_adjstart;
219 }
220 }
221
222 now += clock_calend.offset;
223
224 absolutetime_to_microtime(now, secs, microsecs);
225
226 *secs += clock_calend.epoch;
227
228 simple_unlock(&clock_lock);
229 splx(s);
230 }
231
232 /*
233 * clock_get_calendar_nanotime:
234 *
235 * Returns the current calendar value,
236 * nanoseconds as the fraction.
237 *
238 * Since we do not have an interface to
239 * set the calendar with resolution greater
240 * than a microsecond, we honor that here.
241 */
242 void
243 clock_get_calendar_nanotime(
244 uint32_t *secs,
245 uint32_t *nanosecs)
246 {
247 uint64_t now;
248 spl_t s;
249
250 s = splclock();
251 simple_lock(&clock_lock);
252
253 now = mach_absolute_time();
254
255 if (calend_adjdelta < 0) {
256 uint32_t t32;
257
258 if (now > calend_adjstart) {
259 t32 = now - calend_adjstart;
260
261 if (t32 > calend_adjoffset)
262 now -= calend_adjoffset;
263 else
264 now = calend_adjstart;
265 }
266 }
267
268 now += clock_calend.offset;
269
270 absolutetime_to_microtime(now, secs, nanosecs);
271 *nanosecs *= NSEC_PER_USEC;
272
273 *secs += clock_calend.epoch;
274
275 simple_unlock(&clock_lock);
276 splx(s);
277 }
278
279 /*
280 * clock_gettimeofday:
281 *
282 * Kernel interface for commpage implementation of
283 * gettimeofday() syscall.
284 *
285 * Returns the current calendar value, and updates the
286 * commpage info as appropriate. Because most calls to
287 * gettimeofday() are handled in user mode by the commpage,
288 * this routine should be used infrequently.
289 */
290 void
291 clock_gettimeofday(
292 uint32_t *secs,
293 uint32_t *microsecs)
294 {
295 uint64_t now;
296 spl_t s;
297
298 s = splclock();
299 simple_lock(&clock_lock);
300
301 now = mach_absolute_time();
302
303 if (calend_adjdelta >= 0) {
304 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
305 }
306 else {
307 uint32_t t32;
308
309 if (now > calend_adjstart) {
310 t32 = now - calend_adjstart;
311
312 if (t32 > calend_adjoffset)
313 now -= calend_adjoffset;
314 else
315 now = calend_adjstart;
316 }
317
318 now += clock_calend.offset;
319
320 absolutetime_to_microtime(now, secs, microsecs);
321
322 *secs += clock_calend.epoch;
323 }
324
325 simple_unlock(&clock_lock);
326 splx(s);
327 }
328
329 /*
330 * clock_set_calendar_microtime:
331 *
332 * Sets the current calendar value by
333 * recalculating the epoch and offset
334 * from the system clock.
335 *
336 * Also adjusts the boottime to keep the
337 * value consistent, writes the new
338 * calendar value to the platform clock,
339 * and sends calendar change notifications.
340 */
341 void
342 clock_set_calendar_microtime(
343 uint32_t secs,
344 uint32_t microsecs)
345 {
346 uint32_t sys, microsys;
347 uint32_t newsecs;
348 spl_t s;
349
350 newsecs = (microsecs < 500*USEC_PER_SEC)?
351 secs: secs + 1;
352
353 s = splclock();
354 simple_lock(&clock_lock);
355
356 commpage_set_timestamp(0,0,0);
357
358 /*
359 * Calculate the new calendar epoch based on
360 * the new value and the system clock.
361 */
362 clock_get_system_microtime(&sys, &microsys);
363 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
364
365 /*
366 * Adjust the boottime based on the delta.
367 */
368 clock_boottime += secs - clock_calend.epoch;
369
370 /*
371 * Set the new calendar epoch.
372 */
373 clock_calend.epoch = secs;
374 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
375
376 /*
377 * Cancel any adjustment in progress.
378 */
379 calend_adjdelta = calend_adjtotal = 0;
380
381 simple_unlock(&clock_lock);
382
383 /*
384 * Set the new value for the platform clock.
385 */
386 PESetGMTTimeOfDay(newsecs);
387
388 splx(s);
389
390 /*
391 * Send host notifications.
392 */
393 host_notify_calendar_change();
394 }
395
396 /*
397 * clock_initialize_calendar:
398 *
399 * Set the calendar and related clocks
400 * from the platform clock at boot or
401 * wake event.
402 *
403 * Also sends host notifications.
404 */
405 void
406 clock_initialize_calendar(void)
407 {
408 uint32_t sys, microsys;
409 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
410 spl_t s;
411
412 s = splclock();
413 simple_lock(&clock_lock);
414
415 commpage_set_timestamp(0,0,0);
416
417 if ((int32_t)secs >= (int32_t)clock_boottime) {
418 /*
419 * Initialize the boot time based on the platform clock.
420 */
421 if (clock_boottime == 0)
422 clock_boottime = secs;
423
424 /*
425 * Calculate the new calendar epoch based on
426 * the platform clock and the system clock.
427 */
428 clock_get_system_microtime(&sys, &microsys);
429 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
430
431 /*
432 * Set the new calendar epoch.
433 */
434 clock_calend.epoch = secs;
435 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
436
437 /*
438 * Cancel any adjustment in progress.
439 */
440 calend_adjdelta = calend_adjtotal = 0;
441 }
442
443 simple_unlock(&clock_lock);
444 splx(s);
445
446 /*
447 * Send host notifications.
448 */
449 host_notify_calendar_change();
450 }
451
452 /*
453 * clock_get_boottime_nanotime:
454 *
455 * Return the boottime, used by sysctl.
456 */
457 void
458 clock_get_boottime_nanotime(
459 uint32_t *secs,
460 uint32_t *nanosecs)
461 {
462 *secs = clock_boottime;
463 *nanosecs = 0;
464 }
465
466 /*
467 * clock_adjtime:
468 *
469 * Interface to adjtime() syscall.
470 *
471 * Calculates adjustment variables and
472 * initiates adjustment.
473 */
474 void
475 clock_adjtime(
476 int32_t *secs,
477 int32_t *microsecs)
478 {
479 uint32_t interval;
480 spl_t s;
481
482 s = splclock();
483 simple_lock(&clock_lock);
484
485 interval = calend_set_adjustment(secs, microsecs);
486 if (interval != 0) {
487 calend_adjdeadline = mach_absolute_time() + interval;
488 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
489 calend_adjactive++;
490 }
491 else
492 if (timer_call_cancel(&calend_adjcall))
493 calend_adjactive--;
494
495 simple_unlock(&clock_lock);
496 splx(s);
497 }
498
499 static uint32_t
500 calend_set_adjustment(
501 int32_t *secs,
502 int32_t *microsecs)
503 {
504 uint64_t now, t64;
505 int64_t total, ototal;
506 uint32_t interval = 0;
507
508 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
509
510 commpage_set_timestamp(0,0,0);
511
512 now = mach_absolute_time();
513
514 ototal = calend_adjtotal;
515
516 if (total != 0) {
517 int32_t delta = calend_adjskew;
518
519 if (total > 0) {
520 if (total > calend_adjbig)
521 delta *= 10;
522 if (delta > total)
523 delta = total;
524
525 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
526 calend_adjoffset = t64;
527 }
528 else {
529 if (total < -calend_adjbig)
530 delta *= 10;
531 delta = -delta;
532 if (delta < total)
533 delta = total;
534
535 calend_adjstart = now;
536
537 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
538 calend_adjoffset = t64;
539 }
540
541 calend_adjtotal = total;
542 calend_adjdelta = delta;
543
544 interval = calend_adjinterval;
545 }
546 else
547 calend_adjdelta = calend_adjtotal = 0;
548
549 if (ototal != 0) {
550 *secs = ototal / NSEC_PER_SEC;
551 *microsecs = (ototal % NSEC_PER_SEC) / NSEC_PER_USEC;
552 }
553 else
554 *secs = *microsecs = 0;
555
556 return (interval);
557 }
558
559 static void
560 calend_adjust_call(void)
561 {
562 uint32_t interval;
563 spl_t s;
564
565 s = splclock();
566 simple_lock(&clock_lock);
567
568 if (--calend_adjactive == 0) {
569 interval = calend_adjust();
570 if (interval != 0) {
571 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
572 &calend_adjdeadline);
573
574 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
575 calend_adjactive++;
576 }
577 }
578
579 simple_unlock(&clock_lock);
580 splx(s);
581 }
582
583 static uint32_t
584 calend_adjust(void)
585 {
586 uint64_t now, t64;
587 int32_t delta;
588 uint32_t interval = 0;
589
590 commpage_set_timestamp(0,0,0);
591
592 now = mach_absolute_time();
593
594 delta = calend_adjdelta;
595
596 if (delta > 0) {
597 clock_calend.offset += calend_adjoffset;
598
599 calend_adjtotal -= delta;
600 if (delta > calend_adjtotal) {
601 calend_adjdelta = delta = calend_adjtotal;
602
603 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
604 calend_adjoffset = t64;
605 }
606 }
607 else
608 if (delta < 0) {
609 clock_calend.offset -= calend_adjoffset;
610
611 calend_adjtotal -= delta;
612 if (delta < calend_adjtotal) {
613 calend_adjdelta = delta = calend_adjtotal;
614
615 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
616 calend_adjoffset = t64;
617 }
618
619 if (calend_adjdelta != 0)
620 calend_adjstart = now;
621 }
622
623 if (calend_adjdelta != 0)
624 interval = calend_adjinterval;
625
626 return (interval);
627 }
628
629 /*
630 * clock_wakeup_calendar:
631 *
632 * Interface to power management, used
633 * to initiate the reset of the calendar
634 * on wake from sleep event.
635 */
636 void
637 clock_wakeup_calendar(void)
638 {
639 thread_call_enter(&calend_wakecall);
640 }
641
642 /*
643 * Wait / delay routines.
644 */
645 static void
646 mach_wait_until_continue(
647 __unused void *parameter,
648 wait_result_t wresult)
649 {
650 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
651 /*NOTREACHED*/
652 }
653
654 kern_return_t
655 mach_wait_until_trap(
656 struct mach_wait_until_trap_args *args)
657 {
658 uint64_t deadline = args->deadline;
659 wait_result_t wresult;
660
661 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
662 if (wresult == THREAD_WAITING)
663 wresult = thread_block(mach_wait_until_continue);
664
665 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
666 }
667
668 void
669 clock_delay_until(
670 uint64_t deadline)
671 {
672 uint64_t now = mach_absolute_time();
673
674 if (now >= deadline)
675 return;
676
677 if ( (deadline - now) < (8 * sched_cswtime) ||
678 get_preemption_level() != 0 ||
679 ml_get_interrupts_enabled() == FALSE )
680 machine_delay_until(deadline);
681 else {
682 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
683
684 thread_block(THREAD_CONTINUE_NULL);
685 }
686 }
687
688 void
689 delay_for_interval(
690 uint32_t interval,
691 uint32_t scale_factor)
692 {
693 uint64_t end;
694
695 clock_interval_to_deadline(interval, scale_factor, &end);
696
697 clock_delay_until(end);
698 }
699
700 void
701 delay(
702 int usec)
703 {
704 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
705 }
706
707 /*
708 * Miscellaneous routines.
709 */
710 void
711 clock_interval_to_deadline(
712 uint32_t interval,
713 uint32_t scale_factor,
714 uint64_t *result)
715 {
716 uint64_t abstime;
717
718 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
719
720 *result = mach_absolute_time() + abstime;
721 }
722
723 void
724 clock_absolutetime_interval_to_deadline(
725 uint64_t abstime,
726 uint64_t *result)
727 {
728 *result = mach_absolute_time() + abstime;
729 }
730
731 void
732 clock_get_uptime(
733 uint64_t *result)
734 {
735 *result = mach_absolute_time();
736 }
737
738 void
739 clock_deadline_for_periodic_event(
740 uint64_t interval,
741 uint64_t abstime,
742 uint64_t *deadline)
743 {
744 assert(interval != 0);
745
746 *deadline += interval;
747
748 if (*deadline <= abstime) {
749 *deadline = abstime + interval;
750 abstime = mach_absolute_time();
751
752 if (*deadline <= abstime)
753 *deadline = abstime + interval;
754 }
755 }