2 ***********************************************************************
4 * Copyright (c) David L. Mills 1993-2001 *
6 * Permission to use, copy, modify, and distribute this software and *
7 * its documentation for any purpose and without fee is hereby *
8 * granted, provided that the above copyright notice appears in all *
9 * copies and that both the copyright notice and this permission *
10 * notice appear in supporting documentation, and that the name *
11 * University of Delaware not be used in advertising or publicity *
12 * pertaining to distribution of the software without specific, *
13 * written prior permission. The University of Delaware makes no *
14 * representations about the suitability this software for any *
15 * purpose. It is provided "as is" without express or implied *
18 **********************************************************************/
22 * Adapted from the original sources for FreeBSD and timecounters by:
23 * Poul-Henning Kamp <phk@FreeBSD.org>.
25 * The 32bit version of the "LP" macros seems a bit past its "sell by"
26 * date so I have retained only the 64bit version and included it directly
29 * Only minor changes done to interface with the timecounters over in
30 * sys/kern/kern_clock.c. Some of the comments below may be (even more)
31 * confusing and/or plain wrong in that context.
35 * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
37 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39 * This file contains Original Code and/or Modifications of Original Code
40 * as defined in and that are subject to the Apple Public Source License
41 * Version 2.0 (the 'License'). You may not use this file except in
42 * compliance with the License. The rights granted to you under the License
43 * may not be used to create, or enable the creation or redistribution of,
44 * unlawful or unlicensed copies of an Apple operating system, or to
45 * circumvent, violate, or enable the circumvention or violation of, any
46 * terms of an Apple operating system software license agreement.
48 * Please obtain a copy of the License at
49 * http://www.opensource.apple.com/apsl/ and read it before using this file.
51 * The Original Code and all software distributed under the License are
52 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
53 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
54 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
55 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
56 * Please see the License for the specific language governing rights and
57 * limitations under the License.
59 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
62 #include <sys/cdefs.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/eventhandler.h>
66 #include <sys/kernel.h>
71 #include <sys/timex.h>
72 #include <kern/clock.h>
73 #include <sys/sysctl.h>
74 #include <sys/sysproto.h>
75 #include <sys/kauth.h>
76 #include <kern/thread_call.h>
77 #include <kern/timer_call.h>
78 #include <machine/machine_routines.h>
80 #include <security/mac_framework.h>
82 #include <IOKit/IOBSD.h>
86 #define L_ADD(v, u) ((v) += (u))
87 #define L_SUB(v, u) ((v) -= (u))
88 #define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32)
89 #define L_NEG(v) ((v) = -(v))
90 #define L_RSHIFT(v, n) \
93 (v) = -(-(v) >> (n)); \
97 #define L_MPY(v, a) ((v) *= (a))
98 #define L_CLR(v) ((v) = 0)
99 #define L_ISNEG(v) ((v) < 0)
100 #define L_LINT(v, a) \
103 ((v) = (int64_t)(a) << 32); \
105 ((v) = -((int64_t)(-(a)) << 32)); \
107 #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32)
110 * Generic NTP kernel interface
112 * These routines constitute the Network Time Protocol (NTP) interfaces
113 * for user and daemon application programs. The ntp_gettime() routine
114 * provides the time, maximum error (synch distance) and estimated error
115 * (dispersion) to client user application programs. The ntp_adjtime()
116 * routine is used by the NTP daemon to adjust the calendar clock to an
117 * externally derived time. The time offset and related variables set by
118 * this routine are used by other routines in this module to adjust the
119 * phase and frequency of the clock discipline loop which controls the
122 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO
123 * defined), the time at each tick interrupt is derived directly from
124 * the kernel time variable. When the kernel time is reckoned in
125 * microseconds, (NTP_NANO undefined), the time is derived from the
126 * kernel time variable together with a variable representing the
127 * leftover nanoseconds at the last tick interrupt. In either case, the
128 * current nanosecond time is reckoned from these values plus an
129 * interpolated value derived by the clock routines in another
130 * architecture-specific module. The interpolation can use either a
131 * dedicated counter or a processor cycle counter (PCC) implemented in
132 * some architectures.
136 * Phase/frequency-lock loop (PLL/FLL) definitions
138 * The nanosecond clock discipline uses two variable types, time
139 * variables and frequency variables. Both types are represented as 64-
140 * bit fixed-point quantities with the decimal point between two 32-bit
141 * halves. On a 32-bit machine, each half is represented as a single
142 * word and mathematical operations are done using multiple-precision
143 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is
146 * A time variable is a signed 64-bit fixed-point number in ns and
147 * fraction. It represents the remaining time offset to be amortized
148 * over succeeding tick interrupts. The maximum time offset is about
149 * 0.5 s and the resolution is about 2.3e-10 ns.
151 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
152 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
153 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
155 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
157 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
159 * A frequency variable is a signed 64-bit fixed-point number in ns/s
160 * and fraction. It represents the ns and fraction to be added to the
161 * kernel time variable at each second. The maximum frequency offset is
162 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s.
164 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
165 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
166 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
167 * |s s s s s s s s s s s s s| ns/s |
168 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
170 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
176 static int time_state
= TIME_OK
;
177 int time_status
= STA_UNSYNC
;
178 static long time_tai
;
179 static long time_constant
;
180 static long time_precision
= 1;
181 static long time_maxerror
= MAXPHASE
/ 1000;
182 static unsigned long last_time_maxerror_update
;
183 long time_esterror
= MAXPHASE
/ 1000;
184 static long time_reftime
;
185 static l_fp time_offset
;
186 static l_fp time_freq
;
187 static int64_t time_adjtime
;
190 static LCK_GRP_DECLARE(ntp_lock_grp
, "ntp_lock");
191 static LCK_SPIN_DECLARE(ntp_lock
, &ntp_lock_grp
);
193 #define NTP_LOCK(enable) \
194 enable = ml_set_interrupts_enabled(FALSE); \
195 lck_spin_lock(&ntp_lock);
197 #define NTP_UNLOCK(enable) \
198 lck_spin_unlock(&ntp_lock);\
199 ml_set_interrupts_enabled(enable);
201 #define NTP_ASSERT_LOCKED() LCK_SPIN_ASSERT(&ntp_lock, LCK_ASSERT_OWNED)
203 static timer_call_data_t ntp_loop_update
;
204 static uint64_t ntp_loop_deadline
;
205 static uint32_t ntp_loop_active
;
206 static uint32_t ntp_loop_period
;
207 #define NTP_LOOP_PERIOD_INTERVAL (NSEC_PER_SEC) /*1 second interval*/
210 static void hardupdate(long offset
);
211 static void ntp_gettime1(struct ntptimeval
*ntvp
);
212 static bool ntp_is_time_error(int tsl
);
214 static void ntp_loop_update_call(void);
215 static void refresh_ntp_loop(void);
216 static void start_ntp_loop(void);
218 #if DEVELOPMENT || DEBUG
219 uint32_t g_should_log_clock_adjustments
= 0;
220 SYSCTL_INT(_kern
, OID_AUTO
, log_clock_adjustments
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &g_should_log_clock_adjustments
, 0, "enable kernel clock adjustment logging");
224 ntp_is_time_error(int tsl
)
226 if (tsl
& (STA_UNSYNC
| STA_CLOCKERR
)) {
234 ntp_gettime1(struct ntptimeval
*ntvp
)
241 ntvp
->time
.tv_sec
= atv
.tv_sec
;
242 ntvp
->time
.tv_nsec
= atv
.tv_nsec
;
243 if ((unsigned long)atv
.tv_sec
> last_time_maxerror_update
) {
244 time_maxerror
+= (MAXFREQ
/ 1000) * (atv
.tv_sec
- last_time_maxerror_update
);
245 last_time_maxerror_update
= atv
.tv_sec
;
247 ntvp
->maxerror
= time_maxerror
;
248 ntvp
->esterror
= time_esterror
;
249 ntvp
->tai
= time_tai
;
250 ntvp
->time_state
= time_state
;
252 if (ntp_is_time_error(time_status
)) {
253 ntvp
->time_state
= TIME_ERROR
;
258 ntp_gettime(struct proc
*p
, struct ntp_gettime_args
*uap
, __unused
int32_t *retval
)
260 struct ntptimeval ntv
;
268 if (IS_64BIT_PROCESS(p
)) {
269 struct user64_ntptimeval user_ntv
= {};
270 user_ntv
.time
.tv_sec
= ntv
.time
.tv_sec
;
271 user_ntv
.time
.tv_nsec
= ntv
.time
.tv_nsec
;
272 user_ntv
.maxerror
= ntv
.maxerror
;
273 user_ntv
.esterror
= ntv
.esterror
;
274 user_ntv
.tai
= ntv
.tai
;
275 user_ntv
.time_state
= ntv
.time_state
;
276 error
= copyout(&user_ntv
, uap
->ntvp
, sizeof(user_ntv
));
278 struct user32_ntptimeval user_ntv
= {};
279 user_ntv
.time
.tv_sec
= (user32_long_t
)ntv
.time
.tv_sec
;
280 user_ntv
.time
.tv_nsec
= (user32_long_t
)ntv
.time
.tv_nsec
;
281 user_ntv
.maxerror
= (user32_long_t
)ntv
.maxerror
;
282 user_ntv
.esterror
= (user32_long_t
)ntv
.esterror
;
283 user_ntv
.tai
= (user32_long_t
)ntv
.tai
;
284 user_ntv
.time_state
= ntv
.time_state
;
285 error
= copyout(&user_ntv
, uap
->ntvp
, sizeof(user_ntv
));
292 return ntv
.time_state
;
296 ntp_adjtime(struct proc
*p
, struct ntp_adjtime_args
*uap
, int32_t *retval
)
298 struct timex ntv
= {};
303 clock_usec_t microsecs
;
306 if (IS_64BIT_PROCESS(p
)) {
307 struct user64_timex user_ntv
;
308 error
= copyin(uap
->tp
, &user_ntv
, sizeof(user_ntv
));
309 ntv
.modes
= user_ntv
.modes
;
310 ntv
.offset
= (long)user_ntv
.offset
;
311 ntv
.freq
= (long)user_ntv
.freq
;
312 ntv
.maxerror
= (long)user_ntv
.maxerror
;
313 ntv
.esterror
= (long)user_ntv
.esterror
;
314 ntv
.status
= user_ntv
.status
;
315 ntv
.constant
= (long)user_ntv
.constant
;
316 ntv
.precision
= (long)user_ntv
.precision
;
317 ntv
.tolerance
= (long)user_ntv
.tolerance
;
319 struct user32_timex user_ntv
;
320 error
= copyin(uap
->tp
, &user_ntv
, sizeof(user_ntv
));
321 ntv
.modes
= user_ntv
.modes
;
322 ntv
.offset
= user_ntv
.offset
;
323 ntv
.freq
= user_ntv
.freq
;
324 ntv
.maxerror
= user_ntv
.maxerror
;
325 ntv
.esterror
= user_ntv
.esterror
;
326 ntv
.status
= user_ntv
.status
;
327 ntv
.constant
= user_ntv
.constant
;
328 ntv
.precision
= user_ntv
.precision
;
329 ntv
.tolerance
= user_ntv
.tolerance
;
335 #if DEVELOPMENT || DEBUG
336 if (g_should_log_clock_adjustments
) {
337 os_log(OS_LOG_DEFAULT
, "%s: BEFORE modes %u offset %ld freq %ld status %d constant %ld time_adjtime %lld\n",
338 __func__
, ntv
.modes
, ntv
.offset
, ntv
.freq
, ntv
.status
, ntv
.constant
, time_adjtime
);
342 * Update selected clock variables - only the superuser can
343 * change anything. Note that there is no error checking here on
344 * the assumption the superuser should know what it is doing.
345 * Note that either the time constant or TAI offset are loaded
346 * from the ntv.constant member, depending on the mode bits. If
347 * the STA_PLL bit in the status word is cleared, the state and
348 * status words are reset to the initial values at boot.
352 /* Check that this task is entitled to set the time or it is root */
353 if (!IOTaskHasEntitlement(current_task(), SETTIME_ENTITLEMENT
)) {
355 error
= mac_system_check_settime(kauth_cred_get());
360 if ((error
= priv_check_cred(kauth_cred_get(), PRIV_ADJTIME
, 0))) {
368 if (modes
& MOD_MAXERROR
) {
369 clock_gettimeofday(&sec
, µsecs
);
370 time_maxerror
= ntv
.maxerror
;
371 last_time_maxerror_update
= sec
;
373 if (modes
& MOD_ESTERROR
) {
374 time_esterror
= ntv
.esterror
;
376 if (modes
& MOD_STATUS
) {
377 if (time_status
& STA_PLL
&& !(ntv
.status
& STA_PLL
)) {
378 time_state
= TIME_OK
;
379 time_status
= STA_UNSYNC
;
381 time_status
&= STA_RONLY
;
382 time_status
|= ntv
.status
& ~STA_RONLY
;
384 * Nor PPS or leaps seconds are supported.
385 * Filter out unsupported bits.
387 time_status
&= STA_SUPPORTED
;
389 if (modes
& MOD_TIMECONST
) {
390 if (ntv
.constant
< 0) {
392 } else if (ntv
.constant
> MAXTC
) {
393 time_constant
= MAXTC
;
395 time_constant
= ntv
.constant
;
398 if (modes
& MOD_TAI
) {
399 if (ntv
.constant
> 0) {
400 time_tai
= ntv
.constant
;
403 if (modes
& MOD_NANO
) {
404 time_status
|= STA_NANO
;
406 if (modes
& MOD_MICRO
) {
407 time_status
&= ~STA_NANO
;
409 if (modes
& MOD_CLKB
) {
410 time_status
|= STA_CLK
;
412 if (modes
& MOD_CLKA
) {
413 time_status
&= ~STA_CLK
;
415 if (modes
& MOD_FREQUENCY
) {
416 freq
= (ntv
.freq
* 1000LL) >> 16;
417 if (freq
> MAXFREQ
) {
418 L_LINT(time_freq
, MAXFREQ
);
419 } else if (freq
< -MAXFREQ
) {
420 L_LINT(time_freq
, -MAXFREQ
);
423 * ntv.freq is [PPM * 2^16] = [us/s * 2^16]
424 * time_freq is [ns/s * 2^32]
426 time_freq
= ntv
.freq
* 1000LL * 65536LL;
429 if (modes
& MOD_OFFSET
) {
430 if (time_status
& STA_NANO
) {
431 hardupdate(ntv
.offset
);
433 hardupdate(ntv
.offset
* 1000);
437 ret
= ntp_is_time_error(time_status
) ? TIME_ERROR
: time_state
;
439 #if DEVELOPMENT || DEBUG
440 if (g_should_log_clock_adjustments
) {
441 os_log(OS_LOG_DEFAULT
, "%s: AFTER modes %u offset %lld freq %lld status %d constant %ld time_adjtime %lld\n",
442 __func__
, modes
, time_offset
, time_freq
, time_status
, time_constant
, time_adjtime
);
447 * Retrieve all clock variables. Note that the TAI offset is
448 * returned only by ntp_gettime();
450 if (IS_64BIT_PROCESS(p
)) {
451 struct user64_timex user_ntv
= {};
453 user_ntv
.modes
= modes
;
454 if (time_status
& STA_NANO
) {
455 user_ntv
.offset
= L_GINT(time_offset
);
457 user_ntv
.offset
= L_GINT(time_offset
) / 1000;
460 user_ntv
.freq
= L_GINT(((int64_t)(time_freq
/ 1000LL)) << 16);
462 user_ntv
.freq
= -L_GINT(((int64_t)(-(time_freq
) / 1000LL)) << 16);
464 user_ntv
.maxerror
= time_maxerror
;
465 user_ntv
.esterror
= time_esterror
;
466 user_ntv
.status
= time_status
;
467 user_ntv
.constant
= time_constant
;
468 if (time_status
& STA_NANO
) {
469 user_ntv
.precision
= time_precision
;
471 user_ntv
.precision
= time_precision
/ 1000;
473 user_ntv
.tolerance
= MAXFREQ
* SCALE_PPM
;
475 /* unlock before copyout */
478 error
= copyout(&user_ntv
, uap
->tp
, sizeof(user_ntv
));
480 struct user32_timex user_ntv
= {};
482 user_ntv
.modes
= modes
;
483 if (time_status
& STA_NANO
) {
484 user_ntv
.offset
= L_GINT(time_offset
);
486 user_ntv
.offset
= L_GINT(time_offset
) / 1000;
489 user_ntv
.freq
= L_GINT((time_freq
/ 1000LL) << 16);
491 user_ntv
.freq
= -L_GINT((-(time_freq
) / 1000LL) << 16);
493 user_ntv
.maxerror
= (user32_long_t
)time_maxerror
;
494 user_ntv
.esterror
= (user32_long_t
)time_esterror
;
495 user_ntv
.status
= time_status
;
496 user_ntv
.constant
= (user32_long_t
)time_constant
;
497 if (time_status
& STA_NANO
) {
498 user_ntv
.precision
= (user32_long_t
)time_precision
;
500 user_ntv
.precision
= (user32_long_t
)(time_precision
/ 1000);
502 user_ntv
.tolerance
= MAXFREQ
* SCALE_PPM
;
504 /* unlock before copyout */
507 error
= copyout(&user_ntv
, uap
->tp
, sizeof(user_ntv
));
528 * Compute the adjustment to add to the next second.
531 ntp_update_second(int64_t *adjustment
, clock_sec_t secs
)
535 l_fp ftemp
, old_time_adjtime
, old_offset
;
539 if (secs
> last_time_maxerror_update
) {
540 time_maxerror
+= (MAXFREQ
/ 1000) * (secs
- last_time_maxerror_update
);
541 last_time_maxerror_update
= secs
;
544 old_offset
= time_offset
;
545 old_time_adjtime
= time_adjtime
;
548 L_RSHIFT(ftemp
, SHIFT_PLL
+ time_constant
);
550 L_SUB(time_offset
, ftemp
);
551 L_ADD(time_adj
, time_freq
);
554 * Apply any correction from adjtime. If more than one second
555 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM)
556 * until the last second is slewed the final < 500 usecs.
558 if (time_adjtime
!= 0) {
559 if (time_adjtime
> 1000000) {
561 } else if (time_adjtime
< -1000000) {
563 } else if (time_adjtime
> 500) {
565 } else if (time_adjtime
< -500) {
568 tickrate
= (int)time_adjtime
;
570 time_adjtime
-= tickrate
;
571 L_LINT(ftemp
, tickrate
* 1000);
572 L_ADD(time_adj
, ftemp
);
575 if (old_time_adjtime
|| ((time_offset
|| old_offset
) && (time_offset
!= old_offset
))) {
581 #if DEVELOPMENT || DEBUG
582 if (g_should_log_clock_adjustments
) {
583 int64_t nano
= (time_adj
> 0)? time_adj
>> 32 : -((-time_adj
) >> 32);
584 int64_t frac
= (time_adj
> 0)? ((uint32_t) time_adj
) : -((uint32_t) (-time_adj
));
586 os_log(OS_LOG_DEFAULT
, "%s:AFTER offset %lld (%lld) freq %lld status %d "
587 "constant %ld time_adjtime %lld nano %lld frac %lld adj %lld\n",
588 __func__
, time_offset
, (time_offset
> 0)? time_offset
>> 32 : -((-time_offset
) >> 32),
589 time_freq
, time_status
, time_constant
, time_adjtime
, nano
, frac
, time_adj
);
593 *adjustment
= time_adj
;
597 * hardupdate() - local clock update
599 * This routine is called by ntp_adjtime() when an offset is provided
600 * to update the local clock phase and frequency.
601 * The implementation is of an adaptive-parameter, hybrid
602 * phase/frequency-lock loop (PLL/FLL). The routine computes new
603 * time and frequency offset estimates for each call.
604 * Presumably, calls to ntp_adjtime() occur only when the caller
605 * believes the local clock is valid within some bound (+-128 ms with
608 * For uncompensated quartz crystal oscillators and nominal update
609 * intervals less than 256 s, operation should be in phase-lock mode,
610 * where the loop is disciplined to phase. For update intervals greater
611 * than 1024 s, operation should be in frequency-lock mode, where the
612 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode
613 * is selected by the STA_MODE status bit.
621 clock_sec_t time_uptime
;
626 if (!(time_status
& STA_PLL
)) {
630 if (offset
> MAXPHASE
) {
631 time_monitor
= MAXPHASE
;
632 } else if (offset
< -MAXPHASE
) {
633 time_monitor
= -MAXPHASE
;
635 time_monitor
= offset
;
637 L_LINT(time_offset
, time_monitor
);
639 clock_get_calendar_uptime(&time_uptime
);
641 if (time_status
& STA_FREQHOLD
|| time_reftime
== 0) {
642 time_reftime
= time_uptime
;
645 mtemp
= time_uptime
- time_reftime
;
646 L_LINT(ftemp
, time_monitor
);
647 L_RSHIFT(ftemp
, (SHIFT_PLL
+ 2 + time_constant
) << 1);
649 L_ADD(time_freq
, ftemp
);
650 time_status
&= ~STA_MODE
;
651 if (mtemp
>= MINSEC
&& (time_status
& STA_FLL
|| mtemp
>
653 L_LINT(ftemp
, (time_monitor
<< 4) / mtemp
);
654 L_RSHIFT(ftemp
, SHIFT_FLL
+ 4);
655 L_ADD(time_freq
, ftemp
);
656 time_status
|= STA_MODE
;
658 time_reftime
= time_uptime
;
660 if (L_GINT(time_freq
) > MAXFREQ
) {
661 L_LINT(time_freq
, MAXFREQ
);
662 } else if (L_GINT(time_freq
) < -MAXFREQ
) {
663 L_LINT(time_freq
, -MAXFREQ
);
669 kern_adjtime(struct timeval
*delta
)
679 ltw
= (int64_t)delta
->tv_sec
* (int64_t)USEC_PER_SEC
+ delta
->tv_usec
;
684 #if DEVELOPMENT || DEBUG
685 if (g_should_log_clock_adjustments
) {
686 os_log(OS_LOG_DEFAULT
, "%s:AFTER offset %lld freq %lld status %d constant %ld time_adjtime %lld\n",
687 __func__
, time_offset
, time_freq
, time_status
, time_constant
, time_adjtime
);
692 atv
.tv_sec
= (__darwin_time_t
)(ltr
/ (int64_t)USEC_PER_SEC
);
693 atv
.tv_usec
= ltr
% (int64_t)USEC_PER_SEC
;
694 if (atv
.tv_usec
< 0) {
695 atv
.tv_usec
+= (suseconds_t
)USEC_PER_SEC
;
707 adjtime(struct proc
*p
, struct adjtime_args
*uap
, __unused
int32_t *retval
)
712 /* Check that this task is entitled to set the time or it is root */
713 if (!IOTaskHasEntitlement(current_task(), SETTIME_ENTITLEMENT
)) {
715 error
= mac_system_check_settime(kauth_cred_get());
720 if ((error
= priv_check_cred(kauth_cred_get(), PRIV_ADJTIME
, 0))) {
725 if (IS_64BIT_PROCESS(p
)) {
726 struct user64_timeval user_atv
;
727 error
= copyin(uap
->delta
, &user_atv
, sizeof(user_atv
));
728 atv
.tv_sec
= (__darwin_time_t
)user_atv
.tv_sec
;
729 atv
.tv_usec
= user_atv
.tv_usec
;
731 struct user32_timeval user_atv
;
732 error
= copyin(uap
->delta
, &user_atv
, sizeof(user_atv
));
733 atv
.tv_sec
= user_atv
.tv_sec
;
734 atv
.tv_usec
= user_atv
.tv_usec
;
743 if (IS_64BIT_PROCESS(p
)) {
744 struct user64_timeval user_atv
= {};
745 user_atv
.tv_sec
= atv
.tv_sec
;
746 user_atv
.tv_usec
= atv
.tv_usec
;
747 error
= copyout(&user_atv
, uap
->olddelta
, sizeof(user_atv
));
749 struct user32_timeval user_atv
= {};
750 user_atv
.tv_sec
= (user32_time_t
)atv
.tv_sec
;
751 user_atv
.tv_usec
= atv
.tv_usec
;
752 error
= copyout(&user_atv
, uap
->olddelta
, sizeof(user_atv
));
760 ntp_loop_update_call(void)
767 * Update the scale factor used by clock_calend.
768 * NOTE: clock_update_calendar will call ntp_update_second to compute the next adjustment.
770 clock_update_calendar();
778 refresh_ntp_loop(void)
781 if (--ntp_loop_active
== 0) {
783 * Activate the timer only if the next second adjustment might change.
784 * ntp_update_second checks it and sets updated accordingly.
787 clock_deadline_for_periodic_event(ntp_loop_period
, mach_absolute_time(), &ntp_loop_deadline
);
789 if (!timer_call_enter(&ntp_loop_update
, ntp_loop_deadline
, TIMER_CALL_SYS_CRITICAL
)) {
797 * This function triggers a timer that each second will calculate the adjustment to
798 * provide to clock_calendar to scale the time (used by gettimeofday-family syscalls).
799 * The periodic timer will stop when the adjustment will reach a stable value.
808 ntp_loop_deadline
= mach_absolute_time() + ntp_loop_period
;
810 if (!timer_call_enter(&ntp_loop_update
, ntp_loop_deadline
, TIMER_CALL_SYS_CRITICAL
)) {
824 nanoseconds_to_absolutetime(NTP_LOOP_PERIOD_INTERVAL
, &abstime
);
825 ntp_loop_period
= (uint32_t)abstime
;
826 timer_call_setup(&ntp_loop_update
, (timer_call_func_t
)ntp_loop_update_call
, NULL
);