]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
xnu-1504.9.26.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 */
33
34 #include <mach/mach_types.h>
35
36 #include <kern/lock.h>
37 #include <kern/spl.h>
38 #include <kern/sched_prim.h>
39 #include <kern/thread.h>
40 #include <kern/clock.h>
41 #include <kern/host_notify.h>
42
43 #include <IOKit/IOPlatformExpert.h>
44
45 #include <machine/commpage.h>
46
47 #include <mach/mach_traps.h>
48 #include <mach/mach_time.h>
49
50 uint32_t hz_tick_interval = 1;
51
52
53 decl_simple_lock_data(static,clock_lock)
54
55 #define clock_lock() \
56 simple_lock(&clock_lock)
57
58 #define clock_unlock() \
59 simple_unlock(&clock_lock)
60
61 #define clock_lock_init() \
62 simple_lock_init(&clock_lock, 0)
63
64
65 /*
66 * Time of day (calendar) variables.
67 *
68 * Algorithm:
69 *
70 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
71 *
72 * where CONV converts absolute time units into seconds and a fraction.
73 */
74 static struct clock_calend {
75
76 uint64_t epoch;
77 uint64_t offset;
78
79 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
80 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
81 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
82 } clock_calend;
83
84 #if CONFIG_DTRACE
85
86 /*
87 * Unlocked calendar flipflop; this is used to track a clock_calend such
88 * that we can safely access a snapshot of a valid clock_calend structure
89 * without needing to take any locks to do it.
90 *
91 * The trick is to use a generation count and set the low bit when it is
92 * being updated/read; by doing this, we guarantee, through use of the
93 * hw_atomic functions, that the generation is incremented when the bit
94 * is cleared atomically (by using a 1 bit add).
95 */
96 static struct unlocked_clock_calend {
97 struct clock_calend calend; /* copy of calendar */
98 uint32_t gen; /* generation count */
99 } flipflop[ 2];
100
101 static void clock_track_calend_nowait(void);
102
103 #endif
104
105 /*
106 * Calendar adjustment variables and values.
107 */
108 #define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
109 #define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
110 #define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
111
112 static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
113 static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
114 static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
115
116 static timer_call_data_t calend_adjcall;
117 static uint32_t calend_adjactive;
118
119 static uint32_t calend_set_adjustment(
120 long *secs,
121 int *microsecs);
122
123 static void calend_adjust_call(void);
124 static uint32_t calend_adjust(void);
125
126 static thread_call_data_t calend_wakecall;
127
128 extern void IOKitResetTime(void);
129
130 static uint64_t clock_boottime; /* Seconds boottime epoch */
131
132 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
133 MACRO_BEGIN \
134 if (((rfrac) += (frac)) >= (unit)) { \
135 (rfrac) -= (unit); \
136 (rsecs) += 1; \
137 } \
138 (rsecs) += (secs); \
139 MACRO_END
140
141 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
142 MACRO_BEGIN \
143 if ((int)((rfrac) -= (frac)) < 0) { \
144 (rfrac) += (unit); \
145 (rsecs) -= 1; \
146 } \
147 (rsecs) -= (secs); \
148 MACRO_END
149
150 /*
151 * clock_config:
152 *
153 * Called once at boot to configure the clock subsystem.
154 */
155 void
156 clock_config(void)
157 {
158 clock_lock_init();
159
160 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
161 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
162
163 clock_oldconfig();
164
165 /*
166 * Initialize the timer callouts.
167 */
168 timer_call_initialize();
169 }
170
171 /*
172 * clock_init:
173 *
174 * Called on a processor each time started.
175 */
176 void
177 clock_init(void)
178 {
179 clock_oldinit();
180 }
181
182 /*
183 * clock_timebase_init:
184 *
185 * Called by machine dependent code
186 * to initialize areas dependent on the
187 * timebase value. May be called multiple
188 * times during start up.
189 */
190 void
191 clock_timebase_init(void)
192 {
193 uint64_t abstime;
194
195 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
196 calend_adjinterval = (uint32_t)abstime;
197
198 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
199 hz_tick_interval = (uint32_t)abstime;
200
201 sched_timebase_init();
202 }
203
204 /*
205 * mach_timebase_info_trap:
206 *
207 * User trap returns timebase constant.
208 */
209 kern_return_t
210 mach_timebase_info_trap(
211 struct mach_timebase_info_trap_args *args)
212 {
213 mach_vm_address_t out_info_addr = args->info;
214 mach_timebase_info_data_t info;
215
216 clock_timebase_info(&info);
217
218 copyout((void *)&info, out_info_addr, sizeof (info));
219
220 return (KERN_SUCCESS);
221 }
222
223 /*
224 * Calendar routines.
225 */
226
227 /*
228 * clock_get_calendar_microtime:
229 *
230 * Returns the current calendar value,
231 * microseconds as the fraction.
232 */
233 void
234 clock_get_calendar_microtime(
235 clock_sec_t *secs,
236 clock_usec_t *microsecs)
237 {
238 uint64_t now;
239 spl_t s;
240
241 s = splclock();
242 clock_lock();
243
244 now = mach_absolute_time();
245
246 if (clock_calend.adjdelta < 0) {
247 uint32_t t32;
248
249 if (now > clock_calend.adjstart) {
250 t32 = (uint32_t)(now - clock_calend.adjstart);
251
252 if (t32 > clock_calend.adjoffset)
253 now -= clock_calend.adjoffset;
254 else
255 now = clock_calend.adjstart;
256 }
257 }
258
259 now += clock_calend.offset;
260
261 absolutetime_to_microtime(now, secs, microsecs);
262
263 *secs += (clock_sec_t)clock_calend.epoch;
264
265 clock_unlock();
266 splx(s);
267 }
268
269 /*
270 * clock_get_calendar_nanotime:
271 *
272 * Returns the current calendar value,
273 * nanoseconds as the fraction.
274 *
275 * Since we do not have an interface to
276 * set the calendar with resolution greater
277 * than a microsecond, we honor that here.
278 */
279 void
280 clock_get_calendar_nanotime(
281 clock_sec_t *secs,
282 clock_nsec_t *nanosecs)
283 {
284 uint64_t now;
285 spl_t s;
286
287 s = splclock();
288 clock_lock();
289
290 now = mach_absolute_time();
291
292 if (clock_calend.adjdelta < 0) {
293 uint32_t t32;
294
295 if (now > clock_calend.adjstart) {
296 t32 = (uint32_t)(now - clock_calend.adjstart);
297
298 if (t32 > clock_calend.adjoffset)
299 now -= clock_calend.adjoffset;
300 else
301 now = clock_calend.adjstart;
302 }
303 }
304
305 now += clock_calend.offset;
306
307 absolutetime_to_microtime(now, secs, nanosecs);
308 *nanosecs *= NSEC_PER_USEC;
309
310 *secs += (clock_sec_t)clock_calend.epoch;
311
312 clock_unlock();
313 splx(s);
314 }
315
316 /*
317 * clock_gettimeofday:
318 *
319 * Kernel interface for commpage implementation of
320 * gettimeofday() syscall.
321 *
322 * Returns the current calendar value, and updates the
323 * commpage info as appropriate. Because most calls to
324 * gettimeofday() are handled in user mode by the commpage,
325 * this routine should be used infrequently.
326 */
327 void
328 clock_gettimeofday(
329 clock_sec_t *secs,
330 clock_usec_t *microsecs)
331 {
332 uint64_t now;
333 spl_t s;
334
335 s = splclock();
336 clock_lock();
337
338 now = mach_absolute_time();
339
340 if (clock_calend.adjdelta >= 0) {
341 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
342 }
343 else {
344 uint32_t t32;
345
346 if (now > clock_calend.adjstart) {
347 t32 = (uint32_t)(now - clock_calend.adjstart);
348
349 if (t32 > clock_calend.adjoffset)
350 now -= clock_calend.adjoffset;
351 else
352 now = clock_calend.adjstart;
353 }
354
355 now += clock_calend.offset;
356
357 absolutetime_to_microtime(now, secs, microsecs);
358
359 *secs += (clock_sec_t)clock_calend.epoch;
360 }
361
362 clock_unlock();
363 splx(s);
364 }
365
366 /*
367 * clock_set_calendar_microtime:
368 *
369 * Sets the current calendar value by
370 * recalculating the epoch and offset
371 * from the system clock.
372 *
373 * Also adjusts the boottime to keep the
374 * value consistent, writes the new
375 * calendar value to the platform clock,
376 * and sends calendar change notifications.
377 */
378 void
379 clock_set_calendar_microtime(
380 clock_sec_t secs,
381 clock_usec_t microsecs)
382 {
383 clock_sec_t sys;
384 clock_usec_t microsys;
385 clock_sec_t newsecs;
386 spl_t s;
387
388 newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1;
389
390 s = splclock();
391 clock_lock();
392
393 commpage_disable_timestamp();
394
395 /*
396 * Calculate the new calendar epoch based on
397 * the new value and the system clock.
398 */
399 clock_get_system_microtime(&sys, &microsys);
400 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
401
402 /*
403 * Adjust the boottime based on the delta.
404 */
405 clock_boottime += secs - clock_calend.epoch;
406
407 /*
408 * Set the new calendar epoch.
409 */
410 clock_calend.epoch = secs;
411 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
412
413 /*
414 * Cancel any adjustment in progress.
415 */
416 calend_adjtotal = clock_calend.adjdelta = 0;
417
418 clock_unlock();
419
420 /*
421 * Set the new value for the platform clock.
422 */
423 PESetGMTTimeOfDay(newsecs);
424
425 splx(s);
426
427 /*
428 * Send host notifications.
429 */
430 host_notify_calendar_change();
431
432 #if CONFIG_DTRACE
433 clock_track_calend_nowait();
434 #endif
435 }
436
437 /*
438 * clock_initialize_calendar:
439 *
440 * Set the calendar and related clocks
441 * from the platform clock at boot or
442 * wake event.
443 *
444 * Also sends host notifications.
445 */
446 void
447 clock_initialize_calendar(void)
448 {
449 clock_sec_t sys, secs = PEGetGMTTimeOfDay();
450 clock_usec_t microsys, microsecs = 0;
451 spl_t s;
452
453 s = splclock();
454 clock_lock();
455
456 commpage_disable_timestamp();
457
458 if ((long)secs >= (long)clock_boottime) {
459 /*
460 * Initialize the boot time based on the platform clock.
461 */
462 if (clock_boottime == 0)
463 clock_boottime = secs;
464
465 /*
466 * Calculate the new calendar epoch based on
467 * the platform clock and the system clock.
468 */
469 clock_get_system_microtime(&sys, &microsys);
470 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
471
472 /*
473 * Set the new calendar epoch.
474 */
475 clock_calend.epoch = secs;
476 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
477
478 /*
479 * Cancel any adjustment in progress.
480 */
481 calend_adjtotal = clock_calend.adjdelta = 0;
482 }
483
484 clock_unlock();
485 splx(s);
486
487 /*
488 * Send host notifications.
489 */
490 host_notify_calendar_change();
491
492 #if CONFIG_DTRACE
493 clock_track_calend_nowait();
494 #endif
495 }
496
497 /*
498 * clock_get_boottime_nanotime:
499 *
500 * Return the boottime, used by sysctl.
501 */
502 void
503 clock_get_boottime_nanotime(
504 clock_sec_t *secs,
505 clock_nsec_t *nanosecs)
506 {
507 spl_t s;
508
509 s = splclock();
510 clock_lock();
511
512 *secs = (clock_sec_t)clock_boottime;
513 *nanosecs = 0;
514
515 clock_unlock();
516 splx(s);
517 }
518
519 /*
520 * clock_adjtime:
521 *
522 * Interface to adjtime() syscall.
523 *
524 * Calculates adjustment variables and
525 * initiates adjustment.
526 */
527 void
528 clock_adjtime(
529 long *secs,
530 int *microsecs)
531 {
532 uint32_t interval;
533 spl_t s;
534
535 s = splclock();
536 clock_lock();
537
538 interval = calend_set_adjustment(secs, microsecs);
539 if (interval != 0) {
540 calend_adjdeadline = mach_absolute_time() + interval;
541 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
542 calend_adjactive++;
543 }
544 else
545 if (timer_call_cancel(&calend_adjcall))
546 calend_adjactive--;
547
548 clock_unlock();
549 splx(s);
550 }
551
552 static uint32_t
553 calend_set_adjustment(
554 long *secs,
555 int *microsecs)
556 {
557 uint64_t now, t64;
558 int64_t total, ototal;
559 uint32_t interval = 0;
560
561 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
562
563 commpage_disable_timestamp();
564
565 now = mach_absolute_time();
566
567 ototal = calend_adjtotal;
568
569 if (total != 0) {
570 int32_t delta = calend_adjskew;
571
572 if (total > 0) {
573 if (total > calend_adjbig)
574 delta *= 10;
575 if (delta > total)
576 delta = (int32_t)total;
577
578 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
579 clock_calend.adjoffset = (uint32_t)t64;
580 }
581 else {
582 if (total < -calend_adjbig)
583 delta *= 10;
584 delta = -delta;
585 if (delta < total)
586 delta = (int32_t)total;
587
588 clock_calend.adjstart = now;
589
590 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
591 clock_calend.adjoffset = (uint32_t)t64;
592 }
593
594 calend_adjtotal = total;
595 clock_calend.adjdelta = delta;
596
597 interval = calend_adjinterval;
598 }
599 else
600 calend_adjtotal = clock_calend.adjdelta = 0;
601
602 if (ototal != 0) {
603 *secs = (long)(ototal / NSEC_PER_SEC);
604 *microsecs = (int)((ototal % NSEC_PER_SEC) / NSEC_PER_USEC);
605 }
606 else
607 *secs = *microsecs = 0;
608
609 #if CONFIG_DTRACE
610 clock_track_calend_nowait();
611 #endif
612
613 return (interval);
614 }
615
616 static void
617 calend_adjust_call(void)
618 {
619 uint32_t interval;
620 spl_t s;
621
622 s = splclock();
623 clock_lock();
624
625 if (--calend_adjactive == 0) {
626 interval = calend_adjust();
627 if (interval != 0) {
628 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
629
630 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline))
631 calend_adjactive++;
632 }
633 }
634
635 clock_unlock();
636 splx(s);
637 }
638
639 static uint32_t
640 calend_adjust(void)
641 {
642 uint64_t now, t64;
643 int32_t delta;
644 uint32_t interval = 0;
645
646 commpage_disable_timestamp();
647
648 now = mach_absolute_time();
649
650 delta = clock_calend.adjdelta;
651
652 if (delta > 0) {
653 clock_calend.offset += clock_calend.adjoffset;
654
655 calend_adjtotal -= delta;
656 if (delta > calend_adjtotal) {
657 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
658
659 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
660 clock_calend.adjoffset = (uint32_t)t64;
661 }
662 }
663 else
664 if (delta < 0) {
665 clock_calend.offset -= clock_calend.adjoffset;
666
667 calend_adjtotal -= delta;
668 if (delta < calend_adjtotal) {
669 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
670
671 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
672 clock_calend.adjoffset = (uint32_t)t64;
673 }
674
675 if (clock_calend.adjdelta != 0)
676 clock_calend.adjstart = now;
677 }
678
679 if (clock_calend.adjdelta != 0)
680 interval = calend_adjinterval;
681
682 #if CONFIG_DTRACE
683 clock_track_calend_nowait();
684 #endif
685
686 return (interval);
687 }
688
689 /*
690 * clock_wakeup_calendar:
691 *
692 * Interface to power management, used
693 * to initiate the reset of the calendar
694 * on wake from sleep event.
695 */
696 void
697 clock_wakeup_calendar(void)
698 {
699 thread_call_enter(&calend_wakecall);
700 }
701
702 /*
703 * Wait / delay routines.
704 */
705 static void
706 mach_wait_until_continue(
707 __unused void *parameter,
708 wait_result_t wresult)
709 {
710 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
711 /*NOTREACHED*/
712 }
713
714 kern_return_t
715 mach_wait_until_trap(
716 struct mach_wait_until_trap_args *args)
717 {
718 uint64_t deadline = args->deadline;
719 wait_result_t wresult;
720
721 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
722 if (wresult == THREAD_WAITING)
723 wresult = thread_block(mach_wait_until_continue);
724
725 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
726 }
727
728 void
729 clock_delay_until(
730 uint64_t deadline)
731 {
732 uint64_t now = mach_absolute_time();
733
734 if (now >= deadline)
735 return;
736
737 if ( (deadline - now) < (8 * sched_cswtime) ||
738 get_preemption_level() != 0 ||
739 ml_get_interrupts_enabled() == FALSE )
740 machine_delay_until(deadline);
741 else {
742 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
743
744 thread_block(THREAD_CONTINUE_NULL);
745 }
746 }
747
748 void
749 delay_for_interval(
750 uint32_t interval,
751 uint32_t scale_factor)
752 {
753 uint64_t end;
754
755 clock_interval_to_deadline(interval, scale_factor, &end);
756
757 clock_delay_until(end);
758 }
759
760 void
761 delay(
762 int usec)
763 {
764 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
765 }
766
767 /*
768 * Miscellaneous routines.
769 */
770 void
771 clock_interval_to_deadline(
772 uint32_t interval,
773 uint32_t scale_factor,
774 uint64_t *result)
775 {
776 uint64_t abstime;
777
778 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
779
780 *result = mach_absolute_time() + abstime;
781 }
782
783 void
784 clock_absolutetime_interval_to_deadline(
785 uint64_t abstime,
786 uint64_t *result)
787 {
788 *result = mach_absolute_time() + abstime;
789 }
790
791 void
792 clock_get_uptime(
793 uint64_t *result)
794 {
795 *result = mach_absolute_time();
796 }
797
798 void
799 clock_deadline_for_periodic_event(
800 uint64_t interval,
801 uint64_t abstime,
802 uint64_t *deadline)
803 {
804 assert(interval != 0);
805
806 *deadline += interval;
807
808 if (*deadline <= abstime) {
809 *deadline = abstime + interval;
810 abstime = mach_absolute_time();
811
812 if (*deadline <= abstime)
813 *deadline = abstime + interval;
814 }
815 }
816
817 #if CONFIG_DTRACE
818
819 /*
820 * clock_get_calendar_nanotime_nowait
821 *
822 * Description: Non-blocking version of clock_get_calendar_nanotime()
823 *
824 * Notes: This function operates by separately tracking calendar time
825 * updates using a two element structure to copy the calendar
826 * state, which may be asynchronously modified. It utilizes
827 * barrier instructions in the tracking process and in the local
828 * stable snapshot process in order to ensure that a consistent
829 * snapshot is used to perform the calculation.
830 */
831 void
832 clock_get_calendar_nanotime_nowait(
833 clock_sec_t *secs,
834 clock_nsec_t *nanosecs)
835 {
836 int i = 0;
837 uint64_t now;
838 struct unlocked_clock_calend stable;
839
840 for (;;) {
841 stable = flipflop[i]; /* take snapshot */
842
843 /*
844 * Use a barrier instructions to ensure atomicity. We AND
845 * off the "in progress" bit to get the current generation
846 * count.
847 */
848 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
849
850 /*
851 * If an update _is_ in progress, the generation count will be
852 * off by one, if it _was_ in progress, it will be off by two,
853 * and if we caught it at a good time, it will be equal (and
854 * our snapshot is threfore stable).
855 */
856 if (flipflop[i].gen == stable.gen)
857 break;
858
859 /* Switch to the oher element of the flipflop, and try again. */
860 i ^= 1;
861 }
862
863 now = mach_absolute_time();
864
865 if (stable.calend.adjdelta < 0) {
866 uint32_t t32;
867
868 if (now > stable.calend.adjstart) {
869 t32 = (uint32_t)(now - stable.calend.adjstart);
870
871 if (t32 > stable.calend.adjoffset)
872 now -= stable.calend.adjoffset;
873 else
874 now = stable.calend.adjstart;
875 }
876 }
877
878 now += stable.calend.offset;
879
880 absolutetime_to_microtime(now, secs, nanosecs);
881 *nanosecs *= NSEC_PER_USEC;
882
883 *secs += (clock_sec_t)stable.calend.epoch;
884 }
885
886 static void
887 clock_track_calend_nowait(void)
888 {
889 int i;
890
891 for (i = 0; i < 2; i++) {
892 struct clock_calend tmp = clock_calend;
893
894 /*
895 * Set the low bit if the generation count; since we use a
896 * barrier instruction to do this, we are guaranteed that this
897 * will flag an update in progress to an async caller trying
898 * to examine the contents.
899 */
900 (void)hw_atomic_or(&flipflop[i].gen, 1);
901
902 flipflop[i].calend = tmp;
903
904 /*
905 * Increment the generation count to clear the low bit to
906 * signal completion. If a caller compares the generation
907 * count after taking a copy while in progress, the count
908 * will be off by two.
909 */
910 (void)hw_atomic_add(&flipflop[i].gen, 1);
911 }
912 }
913
914 #endif /* CONFIG_DTRACE */