]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
xnu-2782.10.72.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 */
33
34 #include <mach/mach_types.h>
35
36 #include <kern/spl.h>
37 #include <kern/sched_prim.h>
38 #include <kern/thread.h>
39 #include <kern/clock.h>
40 #include <kern/host_notify.h>
41
42 #include <IOKit/IOPlatformExpert.h>
43
44 #include <machine/commpage.h>
45
46 #include <mach/mach_traps.h>
47 #include <mach/mach_time.h>
48
49 uint32_t hz_tick_interval = 1;
50
51
52 decl_simple_lock_data(,clock_lock)
53
54 #define clock_lock() \
55 simple_lock(&clock_lock)
56
57 #define clock_unlock() \
58 simple_unlock(&clock_lock)
59
60 #define clock_lock_init() \
61 simple_lock_init(&clock_lock, 0)
62
63
64 /*
65 * Time of day (calendar) variables.
66 *
67 * Algorithm:
68 *
69 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
70 *
71 * where CONV converts absolute time units into seconds and a fraction.
72 */
73 static struct clock_calend {
74 uint64_t epoch;
75 uint64_t offset;
76
77 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
78 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
79 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
80 } clock_calend;
81
82 #if CONFIG_DTRACE
83
84 /*
85 * Unlocked calendar flipflop; this is used to track a clock_calend such
86 * that we can safely access a snapshot of a valid clock_calend structure
87 * without needing to take any locks to do it.
88 *
89 * The trick is to use a generation count and set the low bit when it is
90 * being updated/read; by doing this, we guarantee, through use of the
91 * hw_atomic functions, that the generation is incremented when the bit
92 * is cleared atomically (by using a 1 bit add).
93 */
94 static struct unlocked_clock_calend {
95 struct clock_calend calend; /* copy of calendar */
96 uint32_t gen; /* generation count */
97 } flipflop[ 2];
98
99 static void clock_track_calend_nowait(void);
100
101 #endif
102
103 /*
104 * Calendar adjustment variables and values.
105 */
106 #define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
107 #define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
108 #define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
109
110 static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
111 static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
112 static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
113
114 static timer_call_data_t calend_adjcall;
115 static uint32_t calend_adjactive;
116
117 static uint32_t calend_set_adjustment(
118 long *secs,
119 int *microsecs);
120
121 static void calend_adjust_call(void);
122 static uint32_t calend_adjust(void);
123
124 static thread_call_data_t calend_wakecall;
125
126 extern void IOKitResetTime(void);
127
128 void _clock_delay_until_deadline(uint64_t interval,
129 uint64_t deadline);
130
131 static uint64_t clock_boottime; /* Seconds boottime epoch */
132
133 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
134 MACRO_BEGIN \
135 if (((rfrac) += (frac)) >= (unit)) { \
136 (rfrac) -= (unit); \
137 (rsecs) += 1; \
138 } \
139 (rsecs) += (secs); \
140 MACRO_END
141
142 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
143 MACRO_BEGIN \
144 if ((int)((rfrac) -= (frac)) < 0) { \
145 (rfrac) += (unit); \
146 (rsecs) -= 1; \
147 } \
148 (rsecs) -= (secs); \
149 MACRO_END
150
151 /*
152 * clock_config:
153 *
154 * Called once at boot to configure the clock subsystem.
155 */
156 void
157 clock_config(void)
158 {
159 clock_lock_init();
160
161 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
162 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
163
164 clock_oldconfig();
165 }
166
167 /*
168 * clock_init:
169 *
170 * Called on a processor each time started.
171 */
172 void
173 clock_init(void)
174 {
175 clock_oldinit();
176 }
177
178 /*
179 * clock_timebase_init:
180 *
181 * Called by machine dependent code
182 * to initialize areas dependent on the
183 * timebase value. May be called multiple
184 * times during start up.
185 */
186 void
187 clock_timebase_init(void)
188 {
189 uint64_t abstime;
190
191 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
192 calend_adjinterval = (uint32_t)abstime;
193
194 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
195 hz_tick_interval = (uint32_t)abstime;
196
197 sched_timebase_init();
198 }
199
200 /*
201 * mach_timebase_info_trap:
202 *
203 * User trap returns timebase constant.
204 */
205 kern_return_t
206 mach_timebase_info_trap(
207 struct mach_timebase_info_trap_args *args)
208 {
209 mach_vm_address_t out_info_addr = args->info;
210 mach_timebase_info_data_t info;
211
212 clock_timebase_info(&info);
213
214 copyout((void *)&info, out_info_addr, sizeof (info));
215
216 return (KERN_SUCCESS);
217 }
218
219 /*
220 * Calendar routines.
221 */
222
223 /*
224 * clock_get_calendar_microtime:
225 *
226 * Returns the current calendar value,
227 * microseconds as the fraction.
228 */
229 void
230 clock_get_calendar_microtime(
231 clock_sec_t *secs,
232 clock_usec_t *microsecs)
233 {
234 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
235 }
236
237 /*
238 * clock_get_calendar_absolute_and_microtime:
239 *
240 * Returns the current calendar value,
241 * microseconds as the fraction. Also
242 * returns mach_absolute_time if abstime
243 * is not NULL.
244 */
245 void
246 clock_get_calendar_absolute_and_microtime(
247 clock_sec_t *secs,
248 clock_usec_t *microsecs,
249 uint64_t *abstime)
250 {
251 uint64_t now;
252 spl_t s;
253
254 s = splclock();
255 clock_lock();
256
257 now = mach_absolute_time();
258 if (abstime)
259 *abstime = now;
260
261 if (clock_calend.adjdelta < 0) {
262 uint32_t t32;
263
264 /*
265 * Since offset is decremented during a negative adjustment,
266 * ensure that time increases monotonically without going
267 * temporarily backwards.
268 * If the delta has not yet passed, now is set to the start
269 * of the current adjustment period; otherwise, we're between
270 * the expiry of the delta and the next call to calend_adjust(),
271 * and we offset accordingly.
272 */
273 if (now > clock_calend.adjstart) {
274 t32 = (uint32_t)(now - clock_calend.adjstart);
275
276 if (t32 > clock_calend.adjoffset)
277 now -= clock_calend.adjoffset;
278 else
279 now = clock_calend.adjstart;
280 }
281 }
282
283 now += clock_calend.offset;
284
285 absolutetime_to_microtime(now, secs, microsecs);
286
287 *secs += (clock_sec_t)clock_calend.epoch;
288
289 clock_unlock();
290 splx(s);
291 }
292
293 /*
294 * clock_get_calendar_nanotime:
295 *
296 * Returns the current calendar value,
297 * nanoseconds as the fraction.
298 *
299 * Since we do not have an interface to
300 * set the calendar with resolution greater
301 * than a microsecond, we honor that here.
302 */
303 void
304 clock_get_calendar_nanotime(
305 clock_sec_t *secs,
306 clock_nsec_t *nanosecs)
307 {
308 uint64_t now;
309 spl_t s;
310
311 s = splclock();
312 clock_lock();
313
314 now = mach_absolute_time();
315
316 if (clock_calend.adjdelta < 0) {
317 uint32_t t32;
318
319 if (now > clock_calend.adjstart) {
320 t32 = (uint32_t)(now - clock_calend.adjstart);
321
322 if (t32 > clock_calend.adjoffset)
323 now -= clock_calend.adjoffset;
324 else
325 now = clock_calend.adjstart;
326 }
327 }
328
329 now += clock_calend.offset;
330
331 absolutetime_to_microtime(now, secs, nanosecs);
332
333 *nanosecs *= NSEC_PER_USEC;
334
335 *secs += (clock_sec_t)clock_calend.epoch;
336
337 clock_unlock();
338 splx(s);
339 }
340
341 /*
342 * clock_gettimeofday:
343 *
344 * Kernel interface for commpage implementation of
345 * gettimeofday() syscall.
346 *
347 * Returns the current calendar value, and updates the
348 * commpage info as appropriate. Because most calls to
349 * gettimeofday() are handled in user mode by the commpage,
350 * this routine should be used infrequently.
351 */
352 void
353 clock_gettimeofday(
354 clock_sec_t *secs,
355 clock_usec_t *microsecs)
356 {
357 uint64_t now;
358 spl_t s;
359
360 s = splclock();
361 clock_lock();
362
363 now = mach_absolute_time();
364
365 if (clock_calend.adjdelta >= 0) {
366 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
367 }
368 else {
369 uint32_t t32;
370
371 if (now > clock_calend.adjstart) {
372 t32 = (uint32_t)(now - clock_calend.adjstart);
373
374 if (t32 > clock_calend.adjoffset)
375 now -= clock_calend.adjoffset;
376 else
377 now = clock_calend.adjstart;
378 }
379
380 now += clock_calend.offset;
381
382 absolutetime_to_microtime(now, secs, microsecs);
383
384 *secs += (clock_sec_t)clock_calend.epoch;
385 }
386
387 clock_unlock();
388 splx(s);
389 }
390
391 /*
392 * clock_set_calendar_microtime:
393 *
394 * Sets the current calendar value by
395 * recalculating the epoch and offset
396 * from the system clock.
397 *
398 * Also adjusts the boottime to keep the
399 * value consistent, writes the new
400 * calendar value to the platform clock,
401 * and sends calendar change notifications.
402 */
403 void
404 clock_set_calendar_microtime(
405 clock_sec_t secs,
406 clock_usec_t microsecs)
407 {
408 clock_sec_t sys;
409 clock_usec_t microsys;
410 clock_sec_t newsecs;
411 clock_usec_t newmicrosecs;
412 spl_t s;
413
414 newsecs = secs;
415 newmicrosecs = microsecs;
416
417 s = splclock();
418 clock_lock();
419
420 commpage_disable_timestamp();
421
422 /*
423 * Calculate the new calendar epoch based on
424 * the new value and the system clock.
425 */
426 clock_get_system_microtime(&sys, &microsys);
427 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
428
429 /*
430 * Adjust the boottime based on the delta.
431 */
432 clock_boottime += secs - clock_calend.epoch;
433
434 /*
435 * Set the new calendar epoch.
436 */
437 clock_calend.epoch = secs;
438
439 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
440
441 /*
442 * Cancel any adjustment in progress.
443 */
444 calend_adjtotal = clock_calend.adjdelta = 0;
445
446 clock_unlock();
447
448 /*
449 * Set the new value for the platform clock.
450 */
451 PESetUTCTimeOfDay(newsecs, newmicrosecs);
452
453 splx(s);
454
455 /*
456 * Send host notifications.
457 */
458 host_notify_calendar_change();
459
460 #if CONFIG_DTRACE
461 clock_track_calend_nowait();
462 #endif
463 }
464
465 /*
466 * clock_initialize_calendar:
467 *
468 * Set the calendar and related clocks
469 * from the platform clock at boot or
470 * wake event.
471 *
472 * Also sends host notifications.
473 */
474 void
475 clock_initialize_calendar(void)
476 {
477 clock_sec_t sys, secs;
478 clock_usec_t microsys, microsecs;
479 spl_t s;
480
481 PEGetUTCTimeOfDay(&secs, &microsecs);
482
483 s = splclock();
484 clock_lock();
485
486 commpage_disable_timestamp();
487
488 if ((long)secs >= (long)clock_boottime) {
489 /*
490 * Initialize the boot time based on the platform clock.
491 */
492 if (clock_boottime == 0)
493 clock_boottime = secs;
494
495 /*
496 * Calculate the new calendar epoch based on
497 * the platform clock and the system clock.
498 */
499 clock_get_system_microtime(&sys, &microsys);
500 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
501
502 /*
503 * Set the new calendar epoch.
504 */
505 clock_calend.epoch = secs;
506
507 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
508
509 /*
510 * Cancel any adjustment in progress.
511 */
512 calend_adjtotal = clock_calend.adjdelta = 0;
513 }
514
515 clock_unlock();
516 splx(s);
517
518 /*
519 * Send host notifications.
520 */
521 host_notify_calendar_change();
522
523 #if CONFIG_DTRACE
524 clock_track_calend_nowait();
525 #endif
526 }
527
528 /*
529 * clock_get_boottime_nanotime:
530 *
531 * Return the boottime, used by sysctl.
532 */
533 void
534 clock_get_boottime_nanotime(
535 clock_sec_t *secs,
536 clock_nsec_t *nanosecs)
537 {
538 spl_t s;
539
540 s = splclock();
541 clock_lock();
542
543 *secs = (clock_sec_t)clock_boottime;
544 *nanosecs = 0;
545
546 clock_unlock();
547 splx(s);
548 }
549
550 /*
551 * clock_adjtime:
552 *
553 * Interface to adjtime() syscall.
554 *
555 * Calculates adjustment variables and
556 * initiates adjustment.
557 */
558 void
559 clock_adjtime(
560 long *secs,
561 int *microsecs)
562 {
563 uint32_t interval;
564 spl_t s;
565
566 s = splclock();
567 clock_lock();
568
569 interval = calend_set_adjustment(secs, microsecs);
570 if (interval != 0) {
571 calend_adjdeadline = mach_absolute_time() + interval;
572 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
573 calend_adjactive++;
574 }
575 else
576 if (timer_call_cancel(&calend_adjcall))
577 calend_adjactive--;
578
579 clock_unlock();
580 splx(s);
581 }
582
583 static uint32_t
584 calend_set_adjustment(
585 long *secs,
586 int *microsecs)
587 {
588 uint64_t now, t64;
589 int64_t total, ototal;
590 uint32_t interval = 0;
591
592 /*
593 * Compute the total adjustment time in nanoseconds.
594 */
595 total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
596
597 /*
598 * Disable commpage gettimeofday().
599 */
600 commpage_disable_timestamp();
601
602 /*
603 * Get current absolute time.
604 */
605 now = mach_absolute_time();
606
607 /*
608 * Save the old adjustment total for later return.
609 */
610 ototal = calend_adjtotal;
611
612 /*
613 * Is a new correction specified?
614 */
615 if (total != 0) {
616 /*
617 * Set delta to the standard, small, adjustment skew.
618 */
619 int32_t delta = calend_adjskew;
620
621 if (total > 0) {
622 /*
623 * Positive adjustment. If greater than the preset 'big'
624 * threshold, slew at a faster rate, capping if necessary.
625 */
626 if (total > (int64_t) calend_adjbig)
627 delta *= 10;
628 if (delta > total)
629 delta = (int32_t)total;
630
631 /*
632 * Convert the delta back from ns to absolute time and store in adjoffset.
633 */
634 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
635 clock_calend.adjoffset = (uint32_t)t64;
636 }
637 else {
638 /*
639 * Negative adjustment; therefore, negate the delta. If
640 * greater than the preset 'big' threshold, slew at a faster
641 * rate, capping if necessary.
642 */
643 if (total < (int64_t) -calend_adjbig)
644 delta *= 10;
645 delta = -delta;
646 if (delta < total)
647 delta = (int32_t)total;
648
649 /*
650 * Save the current absolute time. Subsequent time operations occuring
651 * during this negative correction can make use of this value to ensure
652 * that time increases monotonically.
653 */
654 clock_calend.adjstart = now;
655
656 /*
657 * Convert the delta back from ns to absolute time and store in adjoffset.
658 */
659 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
660 clock_calend.adjoffset = (uint32_t)t64;
661 }
662
663 /*
664 * Store the total adjustment time in ns.
665 */
666 calend_adjtotal = total;
667
668 /*
669 * Store the delta for this adjustment period in ns.
670 */
671 clock_calend.adjdelta = delta;
672
673 /*
674 * Set the interval in absolute time for later return.
675 */
676 interval = calend_adjinterval;
677 }
678 else {
679 /*
680 * No change; clear any prior adjustment.
681 */
682 calend_adjtotal = clock_calend.adjdelta = 0;
683 }
684
685 /*
686 * If an prior correction was in progress, return the
687 * remaining uncorrected time from it.
688 */
689 if (ototal != 0) {
690 *secs = (long)(ototal / (long)NSEC_PER_SEC);
691 *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
692 }
693 else
694 *secs = *microsecs = 0;
695
696 #if CONFIG_DTRACE
697 clock_track_calend_nowait();
698 #endif
699
700 return (interval);
701 }
702
703 static void
704 calend_adjust_call(void)
705 {
706 uint32_t interval;
707 spl_t s;
708
709 s = splclock();
710 clock_lock();
711
712 if (--calend_adjactive == 0) {
713 interval = calend_adjust();
714 if (interval != 0) {
715 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
716
717 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
718 calend_adjactive++;
719 }
720 }
721
722 clock_unlock();
723 splx(s);
724 }
725
726 static uint32_t
727 calend_adjust(void)
728 {
729 uint64_t now, t64;
730 int32_t delta;
731 uint32_t interval = 0;
732
733 commpage_disable_timestamp();
734
735 now = mach_absolute_time();
736
737 delta = clock_calend.adjdelta;
738
739 if (delta > 0) {
740 clock_calend.offset += clock_calend.adjoffset;
741
742 calend_adjtotal -= delta;
743 if (delta > calend_adjtotal) {
744 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
745
746 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
747 clock_calend.adjoffset = (uint32_t)t64;
748 }
749 }
750 else
751 if (delta < 0) {
752 clock_calend.offset -= clock_calend.adjoffset;
753
754 calend_adjtotal -= delta;
755 if (delta < calend_adjtotal) {
756 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
757
758 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
759 clock_calend.adjoffset = (uint32_t)t64;
760 }
761
762 if (clock_calend.adjdelta != 0)
763 clock_calend.adjstart = now;
764 }
765
766 if (clock_calend.adjdelta != 0)
767 interval = calend_adjinterval;
768
769 #if CONFIG_DTRACE
770 clock_track_calend_nowait();
771 #endif
772
773 return (interval);
774 }
775
776 /*
777 * clock_wakeup_calendar:
778 *
779 * Interface to power management, used
780 * to initiate the reset of the calendar
781 * on wake from sleep event.
782 */
783 void
784 clock_wakeup_calendar(void)
785 {
786 thread_call_enter(&calend_wakecall);
787 }
788
789 /*
790 * Wait / delay routines.
791 */
792 static void
793 mach_wait_until_continue(
794 __unused void *parameter,
795 wait_result_t wresult)
796 {
797 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
798 /*NOTREACHED*/
799 }
800
801 /*
802 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
803 *
804 * Parameters: args->deadline Amount of time to wait
805 *
806 * Returns: 0 Success
807 * !0 Not success
808 *
809 */
810 kern_return_t
811 mach_wait_until_trap(
812 struct mach_wait_until_trap_args *args)
813 {
814 uint64_t deadline = args->deadline;
815 wait_result_t wresult;
816
817 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
818 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
819 if (wresult == THREAD_WAITING)
820 wresult = thread_block(mach_wait_until_continue);
821
822 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
823 }
824
825 void
826 clock_delay_until(
827 uint64_t deadline)
828 {
829 uint64_t now = mach_absolute_time();
830
831 if (now >= deadline)
832 return;
833
834 _clock_delay_until_deadline(deadline - now, deadline);
835 }
836
837 /*
838 * Preserve the original precise interval that the client
839 * requested for comparison to the spin threshold.
840 */
841 void
842 _clock_delay_until_deadline(
843 uint64_t interval,
844 uint64_t deadline)
845 {
846
847 if (interval == 0)
848 return;
849
850 if ( ml_delay_should_spin(interval) ||
851 get_preemption_level() != 0 ||
852 ml_get_interrupts_enabled() == FALSE ) {
853 machine_delay_until(interval, deadline);
854 } else {
855 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
856
857 thread_block(THREAD_CONTINUE_NULL);
858 }
859 }
860
861
862 void
863 delay_for_interval(
864 uint32_t interval,
865 uint32_t scale_factor)
866 {
867 uint64_t abstime;
868
869 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
870
871 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
872 }
873
874 void
875 delay(
876 int usec)
877 {
878 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
879 }
880
881 /*
882 * Miscellaneous routines.
883 */
884 void
885 clock_interval_to_deadline(
886 uint32_t interval,
887 uint32_t scale_factor,
888 uint64_t *result)
889 {
890 uint64_t abstime;
891
892 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
893
894 *result = mach_absolute_time() + abstime;
895 }
896
897 void
898 clock_absolutetime_interval_to_deadline(
899 uint64_t abstime,
900 uint64_t *result)
901 {
902 *result = mach_absolute_time() + abstime;
903 }
904
905 void
906 clock_get_uptime(
907 uint64_t *result)
908 {
909 *result = mach_absolute_time();
910 }
911
912 void
913 clock_deadline_for_periodic_event(
914 uint64_t interval,
915 uint64_t abstime,
916 uint64_t *deadline)
917 {
918 assert(interval != 0);
919
920 *deadline += interval;
921
922 if (*deadline <= abstime) {
923 *deadline = abstime + interval;
924 abstime = mach_absolute_time();
925
926 if (*deadline <= abstime)
927 *deadline = abstime + interval;
928 }
929 }
930
931 #if CONFIG_DTRACE
932
933 /*
934 * clock_get_calendar_nanotime_nowait
935 *
936 * Description: Non-blocking version of clock_get_calendar_nanotime()
937 *
938 * Notes: This function operates by separately tracking calendar time
939 * updates using a two element structure to copy the calendar
940 * state, which may be asynchronously modified. It utilizes
941 * barrier instructions in the tracking process and in the local
942 * stable snapshot process in order to ensure that a consistent
943 * snapshot is used to perform the calculation.
944 */
945 void
946 clock_get_calendar_nanotime_nowait(
947 clock_sec_t *secs,
948 clock_nsec_t *nanosecs)
949 {
950 int i = 0;
951 uint64_t now;
952 struct unlocked_clock_calend stable;
953
954 for (;;) {
955 stable = flipflop[i]; /* take snapshot */
956
957 /*
958 * Use a barrier instructions to ensure atomicity. We AND
959 * off the "in progress" bit to get the current generation
960 * count.
961 */
962 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
963
964 /*
965 * If an update _is_ in progress, the generation count will be
966 * off by one, if it _was_ in progress, it will be off by two,
967 * and if we caught it at a good time, it will be equal (and
968 * our snapshot is threfore stable).
969 */
970 if (flipflop[i].gen == stable.gen)
971 break;
972
973 /* Switch to the oher element of the flipflop, and try again. */
974 i ^= 1;
975 }
976
977 now = mach_absolute_time();
978
979 if (stable.calend.adjdelta < 0) {
980 uint32_t t32;
981
982 if (now > stable.calend.adjstart) {
983 t32 = (uint32_t)(now - stable.calend.adjstart);
984
985 if (t32 > stable.calend.adjoffset)
986 now -= stable.calend.adjoffset;
987 else
988 now = stable.calend.adjstart;
989 }
990 }
991
992 now += stable.calend.offset;
993
994 absolutetime_to_microtime(now, secs, nanosecs);
995 *nanosecs *= NSEC_PER_USEC;
996
997 *secs += (clock_sec_t)stable.calend.epoch;
998 }
999
1000 static void
1001 clock_track_calend_nowait(void)
1002 {
1003 int i;
1004
1005 for (i = 0; i < 2; i++) {
1006 struct clock_calend tmp = clock_calend;
1007
1008 /*
1009 * Set the low bit if the generation count; since we use a
1010 * barrier instruction to do this, we are guaranteed that this
1011 * will flag an update in progress to an async caller trying
1012 * to examine the contents.
1013 */
1014 (void)hw_atomic_or(&flipflop[i].gen, 1);
1015
1016 flipflop[i].calend = tmp;
1017
1018 /*
1019 * Increment the generation count to clear the low bit to
1020 * signal completion. If a caller compares the generation
1021 * count after taking a copy while in progress, the count
1022 * will be off by two.
1023 */
1024 (void)hw_atomic_add(&flipflop[i].gen, 1);
1025 }
1026 }
1027
1028 #endif /* CONFIG_DTRACE */
1029