]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
xnu-1699.24.23.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 */
33
34 #include <mach/mach_types.h>
35
36 #include <kern/lock.h>
37 #include <kern/spl.h>
38 #include <kern/sched_prim.h>
39 #include <kern/thread.h>
40 #include <kern/clock.h>
41 #include <kern/host_notify.h>
42
43 #include <IOKit/IOPlatformExpert.h>
44
45 #include <machine/commpage.h>
46
47 #include <mach/mach_traps.h>
48 #include <mach/mach_time.h>
49
50 uint32_t hz_tick_interval = 1;
51
52
53 decl_simple_lock_data(,clock_lock)
54
55 #define clock_lock() \
56 simple_lock(&clock_lock)
57
58 #define clock_unlock() \
59 simple_unlock(&clock_lock)
60
61 #define clock_lock_init() \
62 simple_lock_init(&clock_lock, 0)
63
64
65 /*
66 * Time of day (calendar) variables.
67 *
68 * Algorithm:
69 *
70 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
71 *
72 * where CONV converts absolute time units into seconds and a fraction.
73 */
74 static struct clock_calend {
75 uint64_t epoch;
76 uint64_t offset;
77
78 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
79 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
80 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
81 } clock_calend;
82
83 #if CONFIG_DTRACE
84
85 /*
86 * Unlocked calendar flipflop; this is used to track a clock_calend such
87 * that we can safely access a snapshot of a valid clock_calend structure
88 * without needing to take any locks to do it.
89 *
90 * The trick is to use a generation count and set the low bit when it is
91 * being updated/read; by doing this, we guarantee, through use of the
92 * hw_atomic functions, that the generation is incremented when the bit
93 * is cleared atomically (by using a 1 bit add).
94 */
95 static struct unlocked_clock_calend {
96 struct clock_calend calend; /* copy of calendar */
97 uint32_t gen; /* generation count */
98 } flipflop[ 2];
99
100 static void clock_track_calend_nowait(void);
101
102 #endif
103
104 /*
105 * Calendar adjustment variables and values.
106 */
107 #define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
108 #define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
109 #define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
110
111 static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
112 static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
113 static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
114
115 static timer_call_data_t calend_adjcall;
116 static uint32_t calend_adjactive;
117
118 static uint32_t calend_set_adjustment(
119 long *secs,
120 int *microsecs);
121
122 static void calend_adjust_call(void);
123 static uint32_t calend_adjust(void);
124
125 static thread_call_data_t calend_wakecall;
126
127 extern void IOKitResetTime(void);
128
129 static uint64_t clock_boottime; /* Seconds boottime epoch */
130
131 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
132 MACRO_BEGIN \
133 if (((rfrac) += (frac)) >= (unit)) { \
134 (rfrac) -= (unit); \
135 (rsecs) += 1; \
136 } \
137 (rsecs) += (secs); \
138 MACRO_END
139
140 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
141 MACRO_BEGIN \
142 if ((int)((rfrac) -= (frac)) < 0) { \
143 (rfrac) += (unit); \
144 (rsecs) -= 1; \
145 } \
146 (rsecs) -= (secs); \
147 MACRO_END
148
149 /*
150 * clock_config:
151 *
152 * Called once at boot to configure the clock subsystem.
153 */
154 void
155 clock_config(void)
156 {
157 clock_lock_init();
158
159 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
160 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
161
162 clock_oldconfig();
163 }
164
165 /*
166 * clock_init:
167 *
168 * Called on a processor each time started.
169 */
170 void
171 clock_init(void)
172 {
173 clock_oldinit();
174 }
175
176 /*
177 * clock_timebase_init:
178 *
179 * Called by machine dependent code
180 * to initialize areas dependent on the
181 * timebase value. May be called multiple
182 * times during start up.
183 */
184 void
185 clock_timebase_init(void)
186 {
187 uint64_t abstime;
188
189 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
190 calend_adjinterval = (uint32_t)abstime;
191
192 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
193 hz_tick_interval = (uint32_t)abstime;
194
195 sched_timebase_init();
196 }
197
198 /*
199 * mach_timebase_info_trap:
200 *
201 * User trap returns timebase constant.
202 */
203 kern_return_t
204 mach_timebase_info_trap(
205 struct mach_timebase_info_trap_args *args)
206 {
207 mach_vm_address_t out_info_addr = args->info;
208 mach_timebase_info_data_t info;
209
210 clock_timebase_info(&info);
211
212 copyout((void *)&info, out_info_addr, sizeof (info));
213
214 return (KERN_SUCCESS);
215 }
216
217 /*
218 * Calendar routines.
219 */
220
221 /*
222 * clock_get_calendar_microtime:
223 *
224 * Returns the current calendar value,
225 * microseconds as the fraction.
226 */
227 void
228 clock_get_calendar_microtime(
229 clock_sec_t *secs,
230 clock_usec_t *microsecs)
231 {
232 uint64_t now;
233 spl_t s;
234
235 s = splclock();
236 clock_lock();
237
238 now = mach_absolute_time();
239
240 if (clock_calend.adjdelta < 0) {
241 uint32_t t32;
242
243 /*
244 * Since offset is decremented during a negative adjustment,
245 * ensure that time increases monotonically without going
246 * temporarily backwards.
247 * If the delta has not yet passed, now is set to the start
248 * of the current adjustment period; otherwise, we're between
249 * the expiry of the delta and the next call to calend_adjust(),
250 * and we offset accordingly.
251 */
252 if (now > clock_calend.adjstart) {
253 t32 = (uint32_t)(now - clock_calend.adjstart);
254
255 if (t32 > clock_calend.adjoffset)
256 now -= clock_calend.adjoffset;
257 else
258 now = clock_calend.adjstart;
259 }
260 }
261
262 now += clock_calend.offset;
263
264 absolutetime_to_microtime(now, secs, microsecs);
265
266 *secs += (clock_sec_t)clock_calend.epoch;
267
268 clock_unlock();
269 splx(s);
270 }
271
272 /*
273 * clock_get_calendar_nanotime:
274 *
275 * Returns the current calendar value,
276 * nanoseconds as the fraction.
277 *
278 * Since we do not have an interface to
279 * set the calendar with resolution greater
280 * than a microsecond, we honor that here.
281 */
282 void
283 clock_get_calendar_nanotime(
284 clock_sec_t *secs,
285 clock_nsec_t *nanosecs)
286 {
287 uint64_t now;
288 spl_t s;
289
290 s = splclock();
291 clock_lock();
292
293 now = mach_absolute_time();
294
295 if (clock_calend.adjdelta < 0) {
296 uint32_t t32;
297
298 if (now > clock_calend.adjstart) {
299 t32 = (uint32_t)(now - clock_calend.adjstart);
300
301 if (t32 > clock_calend.adjoffset)
302 now -= clock_calend.adjoffset;
303 else
304 now = clock_calend.adjstart;
305 }
306 }
307
308 now += clock_calend.offset;
309
310 absolutetime_to_microtime(now, secs, nanosecs);
311
312 *nanosecs *= NSEC_PER_USEC;
313
314 *secs += (clock_sec_t)clock_calend.epoch;
315
316 clock_unlock();
317 splx(s);
318 }
319
320 /*
321 * clock_gettimeofday:
322 *
323 * Kernel interface for commpage implementation of
324 * gettimeofday() syscall.
325 *
326 * Returns the current calendar value, and updates the
327 * commpage info as appropriate. Because most calls to
328 * gettimeofday() are handled in user mode by the commpage,
329 * this routine should be used infrequently.
330 */
331 void
332 clock_gettimeofday(
333 clock_sec_t *secs,
334 clock_usec_t *microsecs)
335 {
336 uint64_t now;
337 spl_t s;
338
339 s = splclock();
340 clock_lock();
341
342 now = mach_absolute_time();
343
344 if (clock_calend.adjdelta >= 0) {
345 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
346 }
347 else {
348 uint32_t t32;
349
350 if (now > clock_calend.adjstart) {
351 t32 = (uint32_t)(now - clock_calend.adjstart);
352
353 if (t32 > clock_calend.adjoffset)
354 now -= clock_calend.adjoffset;
355 else
356 now = clock_calend.adjstart;
357 }
358
359 now += clock_calend.offset;
360
361 absolutetime_to_microtime(now, secs, microsecs);
362
363 *secs += (clock_sec_t)clock_calend.epoch;
364 }
365
366 clock_unlock();
367 splx(s);
368 }
369
370 /*
371 * clock_set_calendar_microtime:
372 *
373 * Sets the current calendar value by
374 * recalculating the epoch and offset
375 * from the system clock.
376 *
377 * Also adjusts the boottime to keep the
378 * value consistent, writes the new
379 * calendar value to the platform clock,
380 * and sends calendar change notifications.
381 */
382 void
383 clock_set_calendar_microtime(
384 clock_sec_t secs,
385 clock_usec_t microsecs)
386 {
387 clock_sec_t sys;
388 clock_usec_t microsys;
389 clock_sec_t newsecs;
390 spl_t s;
391
392 newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1;
393
394 s = splclock();
395 clock_lock();
396
397 commpage_disable_timestamp();
398
399 /*
400 * Calculate the new calendar epoch based on
401 * the new value and the system clock.
402 */
403 clock_get_system_microtime(&sys, &microsys);
404 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
405
406 /*
407 * Adjust the boottime based on the delta.
408 */
409 clock_boottime += secs - clock_calend.epoch;
410
411 /*
412 * Set the new calendar epoch.
413 */
414 clock_calend.epoch = secs;
415
416 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
417
418 /*
419 * Cancel any adjustment in progress.
420 */
421 calend_adjtotal = clock_calend.adjdelta = 0;
422
423 clock_unlock();
424
425 /*
426 * Set the new value for the platform clock.
427 */
428 PESetGMTTimeOfDay(newsecs);
429
430 splx(s);
431
432 /*
433 * Send host notifications.
434 */
435 host_notify_calendar_change();
436
437 #if CONFIG_DTRACE
438 clock_track_calend_nowait();
439 #endif
440 }
441
442 /*
443 * clock_initialize_calendar:
444 *
445 * Set the calendar and related clocks
446 * from the platform clock at boot or
447 * wake event.
448 *
449 * Also sends host notifications.
450 */
451 void
452 clock_initialize_calendar(void)
453 {
454 clock_sec_t sys, secs = PEGetGMTTimeOfDay();
455 clock_usec_t microsys, microsecs = 0;
456 spl_t s;
457
458 s = splclock();
459 clock_lock();
460
461 commpage_disable_timestamp();
462
463 if ((long)secs >= (long)clock_boottime) {
464 /*
465 * Initialize the boot time based on the platform clock.
466 */
467 if (clock_boottime == 0)
468 clock_boottime = secs;
469
470 /*
471 * Calculate the new calendar epoch based on
472 * the platform clock and the system clock.
473 */
474 clock_get_system_microtime(&sys, &microsys);
475 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
476
477 /*
478 * Set the new calendar epoch.
479 */
480 clock_calend.epoch = secs;
481
482 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
483
484 /*
485 * Cancel any adjustment in progress.
486 */
487 calend_adjtotal = clock_calend.adjdelta = 0;
488 }
489
490 clock_unlock();
491 splx(s);
492
493 /*
494 * Send host notifications.
495 */
496 host_notify_calendar_change();
497
498 #if CONFIG_DTRACE
499 clock_track_calend_nowait();
500 #endif
501 }
502
503 /*
504 * clock_get_boottime_nanotime:
505 *
506 * Return the boottime, used by sysctl.
507 */
508 void
509 clock_get_boottime_nanotime(
510 clock_sec_t *secs,
511 clock_nsec_t *nanosecs)
512 {
513 spl_t s;
514
515 s = splclock();
516 clock_lock();
517
518 *secs = (clock_sec_t)clock_boottime;
519 *nanosecs = 0;
520
521 clock_unlock();
522 splx(s);
523 }
524
525 /*
526 * clock_adjtime:
527 *
528 * Interface to adjtime() syscall.
529 *
530 * Calculates adjustment variables and
531 * initiates adjustment.
532 */
533 void
534 clock_adjtime(
535 long *secs,
536 int *microsecs)
537 {
538 uint32_t interval;
539 spl_t s;
540
541 s = splclock();
542 clock_lock();
543
544 interval = calend_set_adjustment(secs, microsecs);
545 if (interval != 0) {
546 calend_adjdeadline = mach_absolute_time() + interval;
547 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
548 calend_adjactive++;
549 }
550 else
551 if (timer_call_cancel(&calend_adjcall))
552 calend_adjactive--;
553
554 clock_unlock();
555 splx(s);
556 }
557
558 static uint32_t
559 calend_set_adjustment(
560 long *secs,
561 int *microsecs)
562 {
563 uint64_t now, t64;
564 int64_t total, ototal;
565 uint32_t interval = 0;
566
567 /*
568 * Compute the total adjustment time in nanoseconds.
569 */
570 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
571
572 /*
573 * Disable commpage gettimeofday().
574 */
575 commpage_disable_timestamp();
576
577 /*
578 * Get current absolute time.
579 */
580 now = mach_absolute_time();
581
582 /*
583 * Save the old adjustment total for later return.
584 */
585 ototal = calend_adjtotal;
586
587 /*
588 * Is a new correction specified?
589 */
590 if (total != 0) {
591 /*
592 * Set delta to the standard, small, adjustment skew.
593 */
594 int32_t delta = calend_adjskew;
595
596 if (total > 0) {
597 /*
598 * Positive adjustment. If greater than the preset 'big'
599 * threshold, slew at a faster rate, capping if necessary.
600 */
601 if (total > calend_adjbig)
602 delta *= 10;
603 if (delta > total)
604 delta = (int32_t)total;
605
606 /*
607 * Convert the delta back from ns to absolute time and store in adjoffset.
608 */
609 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
610 clock_calend.adjoffset = (uint32_t)t64;
611 }
612 else {
613 /*
614 * Negative adjustment; therefore, negate the delta. If
615 * greater than the preset 'big' threshold, slew at a faster
616 * rate, capping if necessary.
617 */
618 if (total < -calend_adjbig)
619 delta *= 10;
620 delta = -delta;
621 if (delta < total)
622 delta = (int32_t)total;
623
624 /*
625 * Save the current absolute time. Subsequent time operations occuring
626 * during this negative correction can make use of this value to ensure
627 * that time increases monotonically.
628 */
629 clock_calend.adjstart = now;
630
631 /*
632 * Convert the delta back from ns to absolute time and store in adjoffset.
633 */
634 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
635 clock_calend.adjoffset = (uint32_t)t64;
636 }
637
638 /*
639 * Store the total adjustment time in ns.
640 */
641 calend_adjtotal = total;
642
643 /*
644 * Store the delta for this adjustment period in ns.
645 */
646 clock_calend.adjdelta = delta;
647
648 /*
649 * Set the interval in absolute time for later return.
650 */
651 interval = calend_adjinterval;
652 }
653 else {
654 /*
655 * No change; clear any prior adjustment.
656 */
657 calend_adjtotal = clock_calend.adjdelta = 0;
658 }
659
660 /*
661 * If an prior correction was in progress, return the
662 * remaining uncorrected time from it.
663 */
664 if (ototal != 0) {
665 *secs = (long)(ototal / NSEC_PER_SEC);
666 *microsecs = (int)((ototal % NSEC_PER_SEC) / NSEC_PER_USEC);
667 }
668 else
669 *secs = *microsecs = 0;
670
671 #if CONFIG_DTRACE
672 clock_track_calend_nowait();
673 #endif
674
675 return (interval);
676 }
677
678 static void
679 calend_adjust_call(void)
680 {
681 uint32_t interval;
682 spl_t s;
683
684 s = splclock();
685 clock_lock();
686
687 if (--calend_adjactive == 0) {
688 interval = calend_adjust();
689 if (interval != 0) {
690 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
691
692 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
693 calend_adjactive++;
694 }
695 }
696
697 clock_unlock();
698 splx(s);
699 }
700
701 static uint32_t
702 calend_adjust(void)
703 {
704 uint64_t now, t64;
705 int32_t delta;
706 uint32_t interval = 0;
707
708 commpage_disable_timestamp();
709
710 now = mach_absolute_time();
711
712 delta = clock_calend.adjdelta;
713
714 if (delta > 0) {
715 clock_calend.offset += clock_calend.adjoffset;
716
717 calend_adjtotal -= delta;
718 if (delta > calend_adjtotal) {
719 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
720
721 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
722 clock_calend.adjoffset = (uint32_t)t64;
723 }
724 }
725 else
726 if (delta < 0) {
727 clock_calend.offset -= clock_calend.adjoffset;
728
729 calend_adjtotal -= delta;
730 if (delta < calend_adjtotal) {
731 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
732
733 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
734 clock_calend.adjoffset = (uint32_t)t64;
735 }
736
737 if (clock_calend.adjdelta != 0)
738 clock_calend.adjstart = now;
739 }
740
741 if (clock_calend.adjdelta != 0)
742 interval = calend_adjinterval;
743
744 #if CONFIG_DTRACE
745 clock_track_calend_nowait();
746 #endif
747
748 return (interval);
749 }
750
751 /*
752 * clock_wakeup_calendar:
753 *
754 * Interface to power management, used
755 * to initiate the reset of the calendar
756 * on wake from sleep event.
757 */
758 void
759 clock_wakeup_calendar(void)
760 {
761 thread_call_enter(&calend_wakecall);
762 }
763
764 /*
765 * Wait / delay routines.
766 */
767 static void
768 mach_wait_until_continue(
769 __unused void *parameter,
770 wait_result_t wresult)
771 {
772 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
773 /*NOTREACHED*/
774 }
775
776 kern_return_t
777 mach_wait_until_trap(
778 struct mach_wait_until_trap_args *args)
779 {
780 uint64_t deadline = args->deadline;
781 wait_result_t wresult;
782
783 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
784 if (wresult == THREAD_WAITING)
785 wresult = thread_block(mach_wait_until_continue);
786
787 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
788 }
789
790 void
791 clock_delay_until(
792 uint64_t deadline)
793 {
794 uint64_t now = mach_absolute_time();
795
796 if (now >= deadline)
797 return;
798
799 if ( (deadline - now) < (8 * sched_cswtime) ||
800 get_preemption_level() != 0 ||
801 ml_get_interrupts_enabled() == FALSE )
802 machine_delay_until(deadline);
803 else {
804 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
805
806 thread_block(THREAD_CONTINUE_NULL);
807 }
808 }
809
810 void
811 delay_for_interval(
812 uint32_t interval,
813 uint32_t scale_factor)
814 {
815 uint64_t end;
816
817 clock_interval_to_deadline(interval, scale_factor, &end);
818
819 clock_delay_until(end);
820 }
821
822 void
823 delay(
824 int usec)
825 {
826 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
827 }
828
829 /*
830 * Miscellaneous routines.
831 */
832 void
833 clock_interval_to_deadline(
834 uint32_t interval,
835 uint32_t scale_factor,
836 uint64_t *result)
837 {
838 uint64_t abstime;
839
840 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
841
842 *result = mach_absolute_time() + abstime;
843 }
844
845 void
846 clock_absolutetime_interval_to_deadline(
847 uint64_t abstime,
848 uint64_t *result)
849 {
850 *result = mach_absolute_time() + abstime;
851 }
852
853 void
854 clock_get_uptime(
855 uint64_t *result)
856 {
857 *result = mach_absolute_time();
858 }
859
860 void
861 clock_deadline_for_periodic_event(
862 uint64_t interval,
863 uint64_t abstime,
864 uint64_t *deadline)
865 {
866 assert(interval != 0);
867
868 *deadline += interval;
869
870 if (*deadline <= abstime) {
871 *deadline = abstime + interval;
872 abstime = mach_absolute_time();
873
874 if (*deadline <= abstime)
875 *deadline = abstime + interval;
876 }
877 }
878
879 #if CONFIG_DTRACE
880
881 /*
882 * clock_get_calendar_nanotime_nowait
883 *
884 * Description: Non-blocking version of clock_get_calendar_nanotime()
885 *
886 * Notes: This function operates by separately tracking calendar time
887 * updates using a two element structure to copy the calendar
888 * state, which may be asynchronously modified. It utilizes
889 * barrier instructions in the tracking process and in the local
890 * stable snapshot process in order to ensure that a consistent
891 * snapshot is used to perform the calculation.
892 */
893 void
894 clock_get_calendar_nanotime_nowait(
895 clock_sec_t *secs,
896 clock_nsec_t *nanosecs)
897 {
898 int i = 0;
899 uint64_t now;
900 struct unlocked_clock_calend stable;
901
902 for (;;) {
903 stable = flipflop[i]; /* take snapshot */
904
905 /*
906 * Use a barrier instructions to ensure atomicity. We AND
907 * off the "in progress" bit to get the current generation
908 * count.
909 */
910 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
911
912 /*
913 * If an update _is_ in progress, the generation count will be
914 * off by one, if it _was_ in progress, it will be off by two,
915 * and if we caught it at a good time, it will be equal (and
916 * our snapshot is threfore stable).
917 */
918 if (flipflop[i].gen == stable.gen)
919 break;
920
921 /* Switch to the oher element of the flipflop, and try again. */
922 i ^= 1;
923 }
924
925 now = mach_absolute_time();
926
927 if (stable.calend.adjdelta < 0) {
928 uint32_t t32;
929
930 if (now > stable.calend.adjstart) {
931 t32 = (uint32_t)(now - stable.calend.adjstart);
932
933 if (t32 > stable.calend.adjoffset)
934 now -= stable.calend.adjoffset;
935 else
936 now = stable.calend.adjstart;
937 }
938 }
939
940 now += stable.calend.offset;
941
942 absolutetime_to_microtime(now, secs, nanosecs);
943 *nanosecs *= NSEC_PER_USEC;
944
945 *secs += (clock_sec_t)stable.calend.epoch;
946 }
947
948 static void
949 clock_track_calend_nowait(void)
950 {
951 int i;
952
953 for (i = 0; i < 2; i++) {
954 struct clock_calend tmp = clock_calend;
955
956 /*
957 * Set the low bit if the generation count; since we use a
958 * barrier instruction to do this, we are guaranteed that this
959 * will flag an update in progress to an async caller trying
960 * to examine the contents.
961 */
962 (void)hw_atomic_or(&flipflop[i].gen, 1);
963
964 flipflop[i].calend = tmp;
965
966 /*
967 * Increment the generation count to clear the low bit to
968 * signal completion. If a caller compares the generation
969 * count after taking a copy while in progress, the count
970 * will be off by two.
971 */
972 (void)hw_atomic_add(&flipflop[i].gen, 1);
973 }
974 }
975
976 #endif /* CONFIG_DTRACE */