]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
xnu-3248.50.21.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 */
33
34 #include <mach/mach_types.h>
35
36 #include <kern/spl.h>
37 #include <kern/sched_prim.h>
38 #include <kern/thread.h>
39 #include <kern/clock.h>
40 #include <kern/host_notify.h>
41
42 #include <IOKit/IOPlatformExpert.h>
43
44 #include <machine/commpage.h>
45
46 #include <mach/mach_traps.h>
47 #include <mach/mach_time.h>
48
49 #include <sys/kdebug.h>
50
51 uint32_t hz_tick_interval = 1;
52
53
54 decl_simple_lock_data(,clock_lock)
55
56 #define clock_lock() \
57 simple_lock(&clock_lock)
58
59 #define clock_unlock() \
60 simple_unlock(&clock_lock)
61
62 #define clock_lock_init() \
63 simple_lock_init(&clock_lock, 0)
64
65
66 /*
67 * Time of day (calendar) variables.
68 *
69 * Algorithm:
70 *
71 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
72 *
73 * where CONV converts absolute time units into seconds and a fraction.
74 */
75 static struct clock_calend {
76 uint64_t epoch;
77 uint64_t offset;
78 uint64_t epoch_absolute;
79
80 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
81 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
82 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
83 } clock_calend;
84
85 #if CONFIG_DTRACE
86
87 /*
88 * Unlocked calendar flipflop; this is used to track a clock_calend such
89 * that we can safely access a snapshot of a valid clock_calend structure
90 * without needing to take any locks to do it.
91 *
92 * The trick is to use a generation count and set the low bit when it is
93 * being updated/read; by doing this, we guarantee, through use of the
94 * hw_atomic functions, that the generation is incremented when the bit
95 * is cleared atomically (by using a 1 bit add).
96 */
97 static struct unlocked_clock_calend {
98 struct clock_calend calend; /* copy of calendar */
99 uint32_t gen; /* generation count */
100 } flipflop[ 2];
101
102 static void clock_track_calend_nowait(void);
103
104 #endif
105
106 /*
107 * Calendar adjustment variables and values.
108 */
109 #define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
110 #define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
111 #define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
112
113 static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
114 static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
115 static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
116
117 static timer_call_data_t calend_adjcall;
118 static uint32_t calend_adjactive;
119
120 static uint32_t calend_set_adjustment(
121 long *secs,
122 int *microsecs);
123
124 static void calend_adjust_call(void);
125 static uint32_t calend_adjust(void);
126
127 void _clock_delay_until_deadline(uint64_t interval,
128 uint64_t deadline);
129 void _clock_delay_until_deadline_with_leeway(uint64_t interval,
130 uint64_t deadline,
131 uint64_t leeway);
132
133 static uint64_t clock_boottime; /* Seconds boottime epoch */
134
135 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
136 MACRO_BEGIN \
137 if (((rfrac) += (frac)) >= (unit)) { \
138 (rfrac) -= (unit); \
139 (rsecs) += 1; \
140 } \
141 (rsecs) += (secs); \
142 MACRO_END
143
144 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
145 MACRO_BEGIN \
146 if ((int)((rfrac) -= (frac)) < 0) { \
147 (rfrac) += (unit); \
148 (rsecs) -= 1; \
149 } \
150 (rsecs) -= (secs); \
151 MACRO_END
152
153 /*
154 * clock_config:
155 *
156 * Called once at boot to configure the clock subsystem.
157 */
158 void
159 clock_config(void)
160 {
161 clock_lock_init();
162
163 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
164
165 clock_oldconfig();
166 }
167
168 /*
169 * clock_init:
170 *
171 * Called on a processor each time started.
172 */
173 void
174 clock_init(void)
175 {
176 clock_oldinit();
177 }
178
179 /*
180 * clock_timebase_init:
181 *
182 * Called by machine dependent code
183 * to initialize areas dependent on the
184 * timebase value. May be called multiple
185 * times during start up.
186 */
187 void
188 clock_timebase_init(void)
189 {
190 uint64_t abstime;
191
192 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
193 calend_adjinterval = (uint32_t)abstime;
194
195 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
196 hz_tick_interval = (uint32_t)abstime;
197
198 sched_timebase_init();
199 }
200
201 /*
202 * mach_timebase_info_trap:
203 *
204 * User trap returns timebase constant.
205 */
206 kern_return_t
207 mach_timebase_info_trap(
208 struct mach_timebase_info_trap_args *args)
209 {
210 mach_vm_address_t out_info_addr = args->info;
211 mach_timebase_info_data_t info;
212
213 clock_timebase_info(&info);
214
215 copyout((void *)&info, out_info_addr, sizeof (info));
216
217 return (KERN_SUCCESS);
218 }
219
220 /*
221 * Calendar routines.
222 */
223
224 /*
225 * clock_get_calendar_microtime:
226 *
227 * Returns the current calendar value,
228 * microseconds as the fraction.
229 */
230 void
231 clock_get_calendar_microtime(
232 clock_sec_t *secs,
233 clock_usec_t *microsecs)
234 {
235 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
236 }
237
238 /*
239 * clock_get_calendar_absolute_and_microtime:
240 *
241 * Returns the current calendar value,
242 * microseconds as the fraction. Also
243 * returns mach_absolute_time if abstime
244 * is not NULL.
245 */
246 void
247 clock_get_calendar_absolute_and_microtime(
248 clock_sec_t *secs,
249 clock_usec_t *microsecs,
250 uint64_t *abstime)
251 {
252 uint64_t now;
253 spl_t s;
254
255 s = splclock();
256 clock_lock();
257
258 now = mach_absolute_time();
259 if (abstime)
260 *abstime = now;
261
262 if (clock_calend.adjdelta < 0) {
263 uint32_t t32;
264
265 /*
266 * Since offset is decremented during a negative adjustment,
267 * ensure that time increases monotonically without going
268 * temporarily backwards.
269 * If the delta has not yet passed, now is set to the start
270 * of the current adjustment period; otherwise, we're between
271 * the expiry of the delta and the next call to calend_adjust(),
272 * and we offset accordingly.
273 */
274 if (now > clock_calend.adjstart) {
275 t32 = (uint32_t)(now - clock_calend.adjstart);
276
277 if (t32 > clock_calend.adjoffset)
278 now -= clock_calend.adjoffset;
279 else
280 now = clock_calend.adjstart;
281 }
282 }
283
284 now += clock_calend.offset;
285
286 absolutetime_to_microtime(now, secs, microsecs);
287
288 *secs += (clock_sec_t)clock_calend.epoch;
289
290 clock_unlock();
291 splx(s);
292 }
293
294 /*
295 * clock_get_calendar_nanotime:
296 *
297 * Returns the current calendar value,
298 * nanoseconds as the fraction.
299 *
300 * Since we do not have an interface to
301 * set the calendar with resolution greater
302 * than a microsecond, we honor that here.
303 */
304 void
305 clock_get_calendar_nanotime(
306 clock_sec_t *secs,
307 clock_nsec_t *nanosecs)
308 {
309 uint64_t now;
310 spl_t s;
311
312 s = splclock();
313 clock_lock();
314
315 now = mach_absolute_time();
316
317 if (clock_calend.adjdelta < 0) {
318 uint32_t t32;
319
320 if (now > clock_calend.adjstart) {
321 t32 = (uint32_t)(now - clock_calend.adjstart);
322
323 if (t32 > clock_calend.adjoffset)
324 now -= clock_calend.adjoffset;
325 else
326 now = clock_calend.adjstart;
327 }
328 }
329
330 now += clock_calend.offset;
331
332 absolutetime_to_microtime(now, secs, nanosecs);
333
334 *nanosecs *= NSEC_PER_USEC;
335
336 *secs += (clock_sec_t)clock_calend.epoch;
337
338 clock_unlock();
339 splx(s);
340 }
341
342 /*
343 * clock_gettimeofday:
344 *
345 * Kernel interface for commpage implementation of
346 * gettimeofday() syscall.
347 *
348 * Returns the current calendar value, and updates the
349 * commpage info as appropriate. Because most calls to
350 * gettimeofday() are handled in user mode by the commpage,
351 * this routine should be used infrequently.
352 */
353 void
354 clock_gettimeofday(
355 clock_sec_t *secs,
356 clock_usec_t *microsecs)
357 {
358 uint64_t now;
359 spl_t s;
360
361 s = splclock();
362 clock_lock();
363
364 now = mach_absolute_time();
365
366 if (clock_calend.adjdelta >= 0) {
367 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
368 }
369 else {
370 uint32_t t32;
371
372 if (now > clock_calend.adjstart) {
373 t32 = (uint32_t)(now - clock_calend.adjstart);
374
375 if (t32 > clock_calend.adjoffset)
376 now -= clock_calend.adjoffset;
377 else
378 now = clock_calend.adjstart;
379 }
380
381 now += clock_calend.offset;
382
383 absolutetime_to_microtime(now, secs, microsecs);
384
385 *secs += (clock_sec_t)clock_calend.epoch;
386 }
387
388 clock_unlock();
389 splx(s);
390 }
391
392 /*
393 * clock_set_calendar_microtime:
394 *
395 * Sets the current calendar value by
396 * recalculating the epoch and offset
397 * from the system clock.
398 *
399 * Also adjusts the boottime to keep the
400 * value consistent, writes the new
401 * calendar value to the platform clock,
402 * and sends calendar change notifications.
403 */
404 void
405 clock_set_calendar_microtime(
406 clock_sec_t secs,
407 clock_usec_t microsecs)
408 {
409 clock_sec_t sys;
410 clock_usec_t microsys;
411 clock_sec_t newsecs;
412 clock_usec_t newmicrosecs;
413 spl_t s;
414
415 newsecs = secs;
416 newmicrosecs = microsecs;
417
418 s = splclock();
419 clock_lock();
420
421 commpage_disable_timestamp();
422
423 /*
424 * Calculate the new calendar epoch based on
425 * the new value and the system clock.
426 */
427 clock_get_system_microtime(&sys, &microsys);
428 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
429
430 /*
431 * Adjust the boottime based on the delta.
432 */
433 clock_boottime += secs - clock_calend.epoch;
434
435 /*
436 * Set the new calendar epoch.
437 */
438 clock_calend.epoch = secs;
439
440 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
441
442 clock_interval_to_absolutetime_interval((uint32_t) secs, NSEC_PER_SEC, &clock_calend.epoch_absolute);
443 clock_calend.epoch_absolute += clock_calend.offset;
444
445 /*
446 * Cancel any adjustment in progress.
447 */
448 calend_adjtotal = clock_calend.adjdelta = 0;
449
450 clock_unlock();
451
452 /*
453 * Set the new value for the platform clock.
454 */
455 PESetUTCTimeOfDay(newsecs, newmicrosecs);
456
457 splx(s);
458
459 /*
460 * Send host notifications.
461 */
462 host_notify_calendar_change();
463
464 #if CONFIG_DTRACE
465 clock_track_calend_nowait();
466 #endif
467 }
468
469 /*
470 * clock_initialize_calendar:
471 *
472 * Set the calendar and related clocks
473 * from the platform clock at boot or
474 * wake event.
475 *
476 * Also sends host notifications.
477 */
478
479 uint64_t mach_absolutetime_asleep;
480 uint64_t mach_absolutetime_last_sleep;
481
482 void
483 clock_initialize_calendar(void)
484 {
485 clock_sec_t sys, secs;
486 clock_usec_t microsys, microsecs;
487 uint64_t new_epoch;
488 spl_t s;
489
490 PEGetUTCTimeOfDay(&secs, &microsecs);
491
492 s = splclock();
493 clock_lock();
494
495 commpage_disable_timestamp();
496
497 if ((long)secs >= (long)clock_boottime) {
498 /*
499 * Initialize the boot time based on the platform clock.
500 */
501 if (clock_boottime == 0)
502 clock_boottime = secs;
503
504 /*
505 * Calculate the new calendar epoch based on
506 * the platform clock and the system clock.
507 */
508 clock_get_system_microtime(&sys, &microsys);
509 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
510
511 /*
512 * Set the new calendar epoch.
513 */
514
515 clock_calend.epoch = secs;
516
517 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
518
519 clock_interval_to_absolutetime_interval((uint32_t) secs, NSEC_PER_SEC, &new_epoch);
520 new_epoch += clock_calend.offset;
521
522 if (clock_calend.epoch_absolute)
523 {
524 mach_absolutetime_last_sleep = new_epoch - clock_calend.epoch_absolute;
525 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
526 KERNEL_DEBUG_CONSTANT(
527 MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
528 (uintptr_t) mach_absolutetime_last_sleep,
529 (uintptr_t) mach_absolutetime_asleep,
530 (uintptr_t) (mach_absolutetime_last_sleep >> 32),
531 (uintptr_t) (mach_absolutetime_asleep >> 32),
532 0);
533 }
534 clock_calend.epoch_absolute = new_epoch;
535
536 /*
537 * Cancel any adjustment in progress.
538 */
539 calend_adjtotal = clock_calend.adjdelta = 0;
540 }
541
542 clock_unlock();
543 splx(s);
544
545 /*
546 * Send host notifications.
547 */
548 host_notify_calendar_change();
549
550 #if CONFIG_DTRACE
551 clock_track_calend_nowait();
552 #endif
553 }
554
555 /*
556 * clock_get_boottime_nanotime:
557 *
558 * Return the boottime, used by sysctl.
559 */
560 void
561 clock_get_boottime_nanotime(
562 clock_sec_t *secs,
563 clock_nsec_t *nanosecs)
564 {
565 spl_t s;
566
567 s = splclock();
568 clock_lock();
569
570 *secs = (clock_sec_t)clock_boottime;
571 *nanosecs = 0;
572
573 clock_unlock();
574 splx(s);
575 }
576
577 /*
578 * clock_adjtime:
579 *
580 * Interface to adjtime() syscall.
581 *
582 * Calculates adjustment variables and
583 * initiates adjustment.
584 */
585 void
586 clock_adjtime(
587 long *secs,
588 int *microsecs)
589 {
590 uint32_t interval;
591 spl_t s;
592
593 s = splclock();
594 clock_lock();
595
596 interval = calend_set_adjustment(secs, microsecs);
597 if (interval != 0) {
598 calend_adjdeadline = mach_absolute_time() + interval;
599 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
600 calend_adjactive++;
601 }
602 else
603 if (timer_call_cancel(&calend_adjcall))
604 calend_adjactive--;
605
606 clock_unlock();
607 splx(s);
608 }
609
610 static uint32_t
611 calend_set_adjustment(
612 long *secs,
613 int *microsecs)
614 {
615 uint64_t now, t64;
616 int64_t total, ototal;
617 uint32_t interval = 0;
618
619 /*
620 * Compute the total adjustment time in nanoseconds.
621 */
622 total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
623
624 /*
625 * Disable commpage gettimeofday().
626 */
627 commpage_disable_timestamp();
628
629 /*
630 * Get current absolute time.
631 */
632 now = mach_absolute_time();
633
634 /*
635 * Save the old adjustment total for later return.
636 */
637 ototal = calend_adjtotal;
638
639 /*
640 * Is a new correction specified?
641 */
642 if (total != 0) {
643 /*
644 * Set delta to the standard, small, adjustment skew.
645 */
646 int32_t delta = calend_adjskew;
647
648 if (total > 0) {
649 /*
650 * Positive adjustment. If greater than the preset 'big'
651 * threshold, slew at a faster rate, capping if necessary.
652 */
653 if (total > (int64_t) calend_adjbig)
654 delta *= 10;
655 if (delta > total)
656 delta = (int32_t)total;
657
658 /*
659 * Convert the delta back from ns to absolute time and store in adjoffset.
660 */
661 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
662 clock_calend.adjoffset = (uint32_t)t64;
663 }
664 else {
665 /*
666 * Negative adjustment; therefore, negate the delta. If
667 * greater than the preset 'big' threshold, slew at a faster
668 * rate, capping if necessary.
669 */
670 if (total < (int64_t) -calend_adjbig)
671 delta *= 10;
672 delta = -delta;
673 if (delta < total)
674 delta = (int32_t)total;
675
676 /*
677 * Save the current absolute time. Subsequent time operations occuring
678 * during this negative correction can make use of this value to ensure
679 * that time increases monotonically.
680 */
681 clock_calend.adjstart = now;
682
683 /*
684 * Convert the delta back from ns to absolute time and store in adjoffset.
685 */
686 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
687 clock_calend.adjoffset = (uint32_t)t64;
688 }
689
690 /*
691 * Store the total adjustment time in ns.
692 */
693 calend_adjtotal = total;
694
695 /*
696 * Store the delta for this adjustment period in ns.
697 */
698 clock_calend.adjdelta = delta;
699
700 /*
701 * Set the interval in absolute time for later return.
702 */
703 interval = calend_adjinterval;
704 }
705 else {
706 /*
707 * No change; clear any prior adjustment.
708 */
709 calend_adjtotal = clock_calend.adjdelta = 0;
710 }
711
712 /*
713 * If an prior correction was in progress, return the
714 * remaining uncorrected time from it.
715 */
716 if (ototal != 0) {
717 *secs = (long)(ototal / (long)NSEC_PER_SEC);
718 *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
719 }
720 else
721 *secs = *microsecs = 0;
722
723 #if CONFIG_DTRACE
724 clock_track_calend_nowait();
725 #endif
726
727 return (interval);
728 }
729
730 static void
731 calend_adjust_call(void)
732 {
733 uint32_t interval;
734 spl_t s;
735
736 s = splclock();
737 clock_lock();
738
739 if (--calend_adjactive == 0) {
740 interval = calend_adjust();
741 if (interval != 0) {
742 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
743
744 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
745 calend_adjactive++;
746 }
747 }
748
749 clock_unlock();
750 splx(s);
751 }
752
753 static uint32_t
754 calend_adjust(void)
755 {
756 uint64_t now, t64;
757 int32_t delta;
758 uint32_t interval = 0;
759
760 commpage_disable_timestamp();
761
762 now = mach_absolute_time();
763
764 delta = clock_calend.adjdelta;
765
766 if (delta > 0) {
767 clock_calend.offset += clock_calend.adjoffset;
768
769 calend_adjtotal -= delta;
770 if (delta > calend_adjtotal) {
771 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
772
773 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
774 clock_calend.adjoffset = (uint32_t)t64;
775 }
776 }
777 else
778 if (delta < 0) {
779 clock_calend.offset -= clock_calend.adjoffset;
780
781 calend_adjtotal -= delta;
782 if (delta < calend_adjtotal) {
783 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
784
785 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
786 clock_calend.adjoffset = (uint32_t)t64;
787 }
788
789 if (clock_calend.adjdelta != 0)
790 clock_calend.adjstart = now;
791 }
792
793 if (clock_calend.adjdelta != 0)
794 interval = calend_adjinterval;
795
796 #if CONFIG_DTRACE
797 clock_track_calend_nowait();
798 #endif
799
800 return (interval);
801 }
802
803 /*
804 * Wait / delay routines.
805 */
806 static void
807 mach_wait_until_continue(
808 __unused void *parameter,
809 wait_result_t wresult)
810 {
811 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
812 /*NOTREACHED*/
813 }
814
815 /*
816 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
817 *
818 * Parameters: args->deadline Amount of time to wait
819 *
820 * Returns: 0 Success
821 * !0 Not success
822 *
823 */
824 kern_return_t
825 mach_wait_until_trap(
826 struct mach_wait_until_trap_args *args)
827 {
828 uint64_t deadline = args->deadline;
829 wait_result_t wresult;
830
831 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
832 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
833 if (wresult == THREAD_WAITING)
834 wresult = thread_block(mach_wait_until_continue);
835
836 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
837 }
838
839 void
840 clock_delay_until(
841 uint64_t deadline)
842 {
843 uint64_t now = mach_absolute_time();
844
845 if (now >= deadline)
846 return;
847
848 _clock_delay_until_deadline(deadline - now, deadline);
849 }
850
851 /*
852 * Preserve the original precise interval that the client
853 * requested for comparison to the spin threshold.
854 */
855 void
856 _clock_delay_until_deadline(
857 uint64_t interval,
858 uint64_t deadline)
859 {
860 _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
861 }
862
863 /*
864 * Like _clock_delay_until_deadline, but it accepts a
865 * leeway value.
866 */
867 void
868 _clock_delay_until_deadline_with_leeway(
869 uint64_t interval,
870 uint64_t deadline,
871 uint64_t leeway)
872 {
873
874 if (interval == 0)
875 return;
876
877 if ( ml_delay_should_spin(interval) ||
878 get_preemption_level() != 0 ||
879 ml_get_interrupts_enabled() == FALSE ) {
880 machine_delay_until(interval, deadline);
881 } else {
882 /*
883 * For now, assume a leeway request of 0 means the client does not want a leeway
884 * value. We may want to change this interpretation in the future.
885 */
886
887 if (leeway) {
888 assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
889 } else {
890 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
891 }
892
893 thread_block(THREAD_CONTINUE_NULL);
894 }
895 }
896
897 void
898 delay_for_interval(
899 uint32_t interval,
900 uint32_t scale_factor)
901 {
902 uint64_t abstime;
903
904 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
905
906 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
907 }
908
909 void
910 delay_for_interval_with_leeway(
911 uint32_t interval,
912 uint32_t leeway,
913 uint32_t scale_factor)
914 {
915 uint64_t abstime_interval;
916 uint64_t abstime_leeway;
917
918 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
919 clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
920
921 _clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
922 }
923
924 void
925 delay(
926 int usec)
927 {
928 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
929 }
930
931 /*
932 * Miscellaneous routines.
933 */
934 void
935 clock_interval_to_deadline(
936 uint32_t interval,
937 uint32_t scale_factor,
938 uint64_t *result)
939 {
940 uint64_t abstime;
941
942 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
943
944 *result = mach_absolute_time() + abstime;
945 }
946
947 void
948 clock_absolutetime_interval_to_deadline(
949 uint64_t abstime,
950 uint64_t *result)
951 {
952 *result = mach_absolute_time() + abstime;
953 }
954
955 void
956 clock_get_uptime(
957 uint64_t *result)
958 {
959 *result = mach_absolute_time();
960 }
961
962 void
963 clock_deadline_for_periodic_event(
964 uint64_t interval,
965 uint64_t abstime,
966 uint64_t *deadline)
967 {
968 assert(interval != 0);
969
970 *deadline += interval;
971
972 if (*deadline <= abstime) {
973 *deadline = abstime + interval;
974 abstime = mach_absolute_time();
975
976 if (*deadline <= abstime)
977 *deadline = abstime + interval;
978 }
979 }
980
981 #if CONFIG_DTRACE
982
983 /*
984 * clock_get_calendar_nanotime_nowait
985 *
986 * Description: Non-blocking version of clock_get_calendar_nanotime()
987 *
988 * Notes: This function operates by separately tracking calendar time
989 * updates using a two element structure to copy the calendar
990 * state, which may be asynchronously modified. It utilizes
991 * barrier instructions in the tracking process and in the local
992 * stable snapshot process in order to ensure that a consistent
993 * snapshot is used to perform the calculation.
994 */
995 void
996 clock_get_calendar_nanotime_nowait(
997 clock_sec_t *secs,
998 clock_nsec_t *nanosecs)
999 {
1000 int i = 0;
1001 uint64_t now;
1002 struct unlocked_clock_calend stable;
1003
1004 for (;;) {
1005 stable = flipflop[i]; /* take snapshot */
1006
1007 /*
1008 * Use a barrier instructions to ensure atomicity. We AND
1009 * off the "in progress" bit to get the current generation
1010 * count.
1011 */
1012 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
1013
1014 /*
1015 * If an update _is_ in progress, the generation count will be
1016 * off by one, if it _was_ in progress, it will be off by two,
1017 * and if we caught it at a good time, it will be equal (and
1018 * our snapshot is threfore stable).
1019 */
1020 if (flipflop[i].gen == stable.gen)
1021 break;
1022
1023 /* Switch to the oher element of the flipflop, and try again. */
1024 i ^= 1;
1025 }
1026
1027 now = mach_absolute_time();
1028
1029 if (stable.calend.adjdelta < 0) {
1030 uint32_t t32;
1031
1032 if (now > stable.calend.adjstart) {
1033 t32 = (uint32_t)(now - stable.calend.adjstart);
1034
1035 if (t32 > stable.calend.adjoffset)
1036 now -= stable.calend.adjoffset;
1037 else
1038 now = stable.calend.adjstart;
1039 }
1040 }
1041
1042 now += stable.calend.offset;
1043
1044 absolutetime_to_microtime(now, secs, nanosecs);
1045 *nanosecs *= NSEC_PER_USEC;
1046
1047 *secs += (clock_sec_t)stable.calend.epoch;
1048 }
1049
1050 static void
1051 clock_track_calend_nowait(void)
1052 {
1053 int i;
1054
1055 for (i = 0; i < 2; i++) {
1056 struct clock_calend tmp = clock_calend;
1057
1058 /*
1059 * Set the low bit if the generation count; since we use a
1060 * barrier instruction to do this, we are guaranteed that this
1061 * will flag an update in progress to an async caller trying
1062 * to examine the contents.
1063 */
1064 (void)hw_atomic_or(&flipflop[i].gen, 1);
1065
1066 flipflop[i].calend = tmp;
1067
1068 /*
1069 * Increment the generation count to clear the low bit to
1070 * signal completion. If a caller compares the generation
1071 * count after taking a copy while in progress, the count
1072 * will be off by two.
1073 */
1074 (void)hw_atomic_add(&flipflop[i].gen, 1);
1075 }
1076 }
1077
1078 #endif /* CONFIG_DTRACE */
1079