2 ***********************************************************************
4 * Copyright (c) David L. Mills 1993-2001 *
6 * Permission to use, copy, modify, and distribute this software and *
7 * its documentation for any purpose and without fee is hereby *
8 * granted, provided that the above copyright notice appears in all *
9 * copies and that both the copyright notice and this permission *
10 * notice appear in supporting documentation, and that the name *
11 * University of Delaware not be used in advertising or publicity *
12 * pertaining to distribution of the software without specific, *
13 * written prior permission. The University of Delaware makes no *
14 * representations about the suitability this software for any *
15 * purpose. It is provided "as is" without express or implied *
18 **********************************************************************/
22 * Adapted from the original sources for FreeBSD and timecounters by:
23 * Poul-Henning Kamp <phk@FreeBSD.org>.
25 * The 32bit version of the "LP" macros seems a bit past its "sell by"
26 * date so I have retained only the 64bit version and included it directly
29 * Only minor changes done to interface with the timecounters over in
30 * sys/kern/kern_clock.c. Some of the comments below may be (even more)
31 * confusing and/or plain wrong in that context.
35 * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
37 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39 * This file contains Original Code and/or Modifications of Original Code
40 * as defined in and that are subject to the Apple Public Source License
41 * Version 2.0 (the 'License'). You may not use this file except in
42 * compliance with the License. The rights granted to you under the License
43 * may not be used to create, or enable the creation or redistribution of,
44 * unlawful or unlicensed copies of an Apple operating system, or to
45 * circumvent, violate, or enable the circumvention or violation of, any
46 * terms of an Apple operating system software license agreement.
48 * Please obtain a copy of the License at
49 * http://www.opensource.apple.com/apsl/ and read it before using this file.
51 * The Original Code and all software distributed under the License are
52 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
53 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
54 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
55 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
56 * Please see the License for the specific language governing rights and
57 * limitations under the License.
59 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
62 #include <sys/cdefs.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/eventhandler.h>
66 #include <sys/kernel.h>
71 #include <sys/timex.h>
72 #include <kern/clock.h>
73 #include <sys/sysctl.h>
74 #include <sys/sysproto.h>
75 #include <sys/kauth.h>
76 #include <kern/thread_call.h>
77 #include <kern/timer_call.h>
78 #include <machine/machine_routines.h>
80 #include <security/mac_framework.h>
82 #include <IOKit/IOBSD.h>
86 #define L_ADD(v, u) ((v) += (u))
87 #define L_SUB(v, u) ((v) -= (u))
88 #define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32)
89 #define L_NEG(v) ((v) = -(v))
90 #define L_RSHIFT(v, n) \
93 (v) = -(-(v) >> (n)); \
97 #define L_MPY(v, a) ((v) *= (a))
98 #define L_CLR(v) ((v) = 0)
99 #define L_ISNEG(v) ((v) < 0)
100 #define L_LINT(v, a) \
103 ((v) = (int64_t)(a) << 32); \
105 ((v) = -((int64_t)(-(a)) << 32)); \
107 #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32)
110 * Generic NTP kernel interface
112 * These routines constitute the Network Time Protocol (NTP) interfaces
113 * for user and daemon application programs. The ntp_gettime() routine
114 * provides the time, maximum error (synch distance) and estimated error
115 * (dispersion) to client user application programs. The ntp_adjtime()
116 * routine is used by the NTP daemon to adjust the calendar clock to an
117 * externally derived time. The time offset and related variables set by
118 * this routine are used by other routines in this module to adjust the
119 * phase and frequency of the clock discipline loop which controls the
122 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO
123 * defined), the time at each tick interrupt is derived directly from
124 * the kernel time variable. When the kernel time is reckoned in
125 * microseconds, (NTP_NANO undefined), the time is derived from the
126 * kernel time variable together with a variable representing the
127 * leftover nanoseconds at the last tick interrupt. In either case, the
128 * current nanosecond time is reckoned from these values plus an
129 * interpolated value derived by the clock routines in another
130 * architecture-specific module. The interpolation can use either a
131 * dedicated counter or a processor cycle counter (PCC) implemented in
132 * some architectures.
136 * Phase/frequency-lock loop (PLL/FLL) definitions
138 * The nanosecond clock discipline uses two variable types, time
139 * variables and frequency variables. Both types are represented as 64-
140 * bit fixed-point quantities with the decimal point between two 32-bit
141 * halves. On a 32-bit machine, each half is represented as a single
142 * word and mathematical operations are done using multiple-precision
143 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is
146 * A time variable is a signed 64-bit fixed-point number in ns and
147 * fraction. It represents the remaining time offset to be amortized
148 * over succeeding tick interrupts. The maximum time offset is about
149 * 0.5 s and the resolution is about 2.3e-10 ns.
151 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
152 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
153 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
155 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
157 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
159 * A frequency variable is a signed 64-bit fixed-point number in ns/s
160 * and fraction. It represents the ns and fraction to be added to the
161 * kernel time variable at each second. The maximum frequency offset is
162 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s.
164 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
165 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
166 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
167 * |s s s s s s s s s s s s s| ns/s |
168 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
170 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
176 static int time_state
= TIME_OK
;
177 int time_status
= STA_UNSYNC
;
178 static long time_tai
;
179 static long time_constant
;
180 static long time_precision
= 1;
181 static long time_maxerror
= MAXPHASE
/ 1000;
182 static unsigned long last_time_maxerror_update
;
183 long time_esterror
= MAXPHASE
/ 1000;
184 static long time_reftime
;
185 static l_fp time_offset
;
186 static l_fp time_freq
;
187 static int64_t time_adjtime
;
190 static lck_spin_t
* ntp_lock
;
191 static lck_grp_t
* ntp_lock_grp
;
192 static lck_attr_t
* ntp_lock_attr
;
193 static lck_grp_attr_t
*ntp_lock_grp_attr
;
195 #define NTP_LOCK(enable) \
196 enable = ml_set_interrupts_enabled(FALSE); \
197 lck_spin_lock(ntp_lock);
199 #define NTP_UNLOCK(enable) \
200 lck_spin_unlock(ntp_lock);\
201 ml_set_interrupts_enabled(enable);
203 #define NTP_ASSERT_LOCKED() LCK_SPIN_ASSERT(ntp_lock, LCK_ASSERT_OWNED)
205 static timer_call_data_t ntp_loop_update
;
206 static uint64_t ntp_loop_deadline
;
207 static uint32_t ntp_loop_active
;
208 static uint32_t ntp_loop_period
;
209 #define NTP_LOOP_PERIOD_INTERVAL (NSEC_PER_SEC) /*1 second interval*/
212 static void hardupdate(long offset
);
213 static void ntp_gettime1(struct ntptimeval
*ntvp
);
214 static bool ntp_is_time_error(int tsl
);
216 static void ntp_loop_update_call(void);
217 static void refresh_ntp_loop(void);
218 static void start_ntp_loop(void);
220 #if DEVELOPMENT || DEBUG
221 uint32_t g_should_log_clock_adjustments
= 0;
222 SYSCTL_INT(_kern
, OID_AUTO
, log_clock_adjustments
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &g_should_log_clock_adjustments
, 0, "enable kernel clock adjustment logging");
226 ntp_is_time_error(int tsl
)
228 if (tsl
& (STA_UNSYNC
| STA_CLOCKERR
)) {
236 ntp_gettime1(struct ntptimeval
*ntvp
)
243 ntvp
->time
.tv_sec
= atv
.tv_sec
;
244 ntvp
->time
.tv_nsec
= atv
.tv_nsec
;
245 if ((unsigned long)atv
.tv_sec
> last_time_maxerror_update
) {
246 time_maxerror
+= (MAXFREQ
/ 1000) * (atv
.tv_sec
- last_time_maxerror_update
);
247 last_time_maxerror_update
= atv
.tv_sec
;
249 ntvp
->maxerror
= time_maxerror
;
250 ntvp
->esterror
= time_esterror
;
251 ntvp
->tai
= time_tai
;
252 ntvp
->time_state
= time_state
;
254 if (ntp_is_time_error(time_status
)) {
255 ntvp
->time_state
= TIME_ERROR
;
260 ntp_gettime(struct proc
*p
, struct ntp_gettime_args
*uap
, __unused
int32_t *retval
)
262 struct ntptimeval ntv
;
270 if (IS_64BIT_PROCESS(p
)) {
271 struct user64_ntptimeval user_ntv
= {};
272 user_ntv
.time
.tv_sec
= ntv
.time
.tv_sec
;
273 user_ntv
.time
.tv_nsec
= ntv
.time
.tv_nsec
;
274 user_ntv
.maxerror
= ntv
.maxerror
;
275 user_ntv
.esterror
= ntv
.esterror
;
276 user_ntv
.tai
= ntv
.tai
;
277 user_ntv
.time_state
= ntv
.time_state
;
278 error
= copyout(&user_ntv
, uap
->ntvp
, sizeof(user_ntv
));
280 struct user32_ntptimeval user_ntv
= {};
281 user_ntv
.time
.tv_sec
= ntv
.time
.tv_sec
;
282 user_ntv
.time
.tv_nsec
= ntv
.time
.tv_nsec
;
283 user_ntv
.maxerror
= ntv
.maxerror
;
284 user_ntv
.esterror
= ntv
.esterror
;
285 user_ntv
.tai
= ntv
.tai
;
286 user_ntv
.time_state
= ntv
.time_state
;
287 error
= copyout(&user_ntv
, uap
->ntvp
, sizeof(user_ntv
));
294 return ntv
.time_state
;
298 ntp_adjtime(struct proc
*p
, struct ntp_adjtime_args
*uap
, int32_t *retval
)
300 struct timex ntv
= {};
305 clock_usec_t microsecs
;
308 if (IS_64BIT_PROCESS(p
)) {
309 struct user64_timex user_ntv
;
310 error
= copyin(uap
->tp
, &user_ntv
, sizeof(user_ntv
));
311 ntv
.modes
= user_ntv
.modes
;
312 ntv
.offset
= user_ntv
.offset
;
313 ntv
.freq
= user_ntv
.freq
;
314 ntv
.maxerror
= user_ntv
.maxerror
;
315 ntv
.esterror
= user_ntv
.esterror
;
316 ntv
.status
= user_ntv
.status
;
317 ntv
.constant
= user_ntv
.constant
;
318 ntv
.precision
= user_ntv
.precision
;
319 ntv
.tolerance
= user_ntv
.tolerance
;
321 struct user32_timex user_ntv
;
322 error
= copyin(uap
->tp
, &user_ntv
, sizeof(user_ntv
));
323 ntv
.modes
= user_ntv
.modes
;
324 ntv
.offset
= user_ntv
.offset
;
325 ntv
.freq
= user_ntv
.freq
;
326 ntv
.maxerror
= user_ntv
.maxerror
;
327 ntv
.esterror
= user_ntv
.esterror
;
328 ntv
.status
= user_ntv
.status
;
329 ntv
.constant
= user_ntv
.constant
;
330 ntv
.precision
= user_ntv
.precision
;
331 ntv
.tolerance
= user_ntv
.tolerance
;
337 #if DEVELOPMENT || DEBUG
338 if (g_should_log_clock_adjustments
) {
339 os_log(OS_LOG_DEFAULT
, "%s: BEFORE modes %u offset %ld freq %ld status %d constant %ld time_adjtime %lld\n",
340 __func__
, ntv
.modes
, ntv
.offset
, ntv
.freq
, ntv
.status
, ntv
.constant
, time_adjtime
);
344 * Update selected clock variables - only the superuser can
345 * change anything. Note that there is no error checking here on
346 * the assumption the superuser should know what it is doing.
347 * Note that either the time constant or TAI offset are loaded
348 * from the ntv.constant member, depending on the mode bits. If
349 * the STA_PLL bit in the status word is cleared, the state and
350 * status words are reset to the initial values at boot.
354 /* Check that this task is entitled to set the time or it is root */
355 if (!IOTaskHasEntitlement(current_task(), SETTIME_ENTITLEMENT
)) {
357 error
= mac_system_check_settime(kauth_cred_get());
362 if ((error
= priv_check_cred(kauth_cred_get(), PRIV_ADJTIME
, 0))) {
370 if (modes
& MOD_MAXERROR
) {
371 clock_gettimeofday(&sec
, µsecs
);
372 time_maxerror
= ntv
.maxerror
;
373 last_time_maxerror_update
= sec
;
375 if (modes
& MOD_ESTERROR
) {
376 time_esterror
= ntv
.esterror
;
378 if (modes
& MOD_STATUS
) {
379 if (time_status
& STA_PLL
&& !(ntv
.status
& STA_PLL
)) {
380 time_state
= TIME_OK
;
381 time_status
= STA_UNSYNC
;
383 time_status
&= STA_RONLY
;
384 time_status
|= ntv
.status
& ~STA_RONLY
;
386 * Nor PPS or leaps seconds are supported.
387 * Filter out unsupported bits.
389 time_status
&= STA_SUPPORTED
;
391 if (modes
& MOD_TIMECONST
) {
392 if (ntv
.constant
< 0) {
394 } else if (ntv
.constant
> MAXTC
) {
395 time_constant
= MAXTC
;
397 time_constant
= ntv
.constant
;
400 if (modes
& MOD_TAI
) {
401 if (ntv
.constant
> 0) {
402 time_tai
= ntv
.constant
;
405 if (modes
& MOD_NANO
) {
406 time_status
|= STA_NANO
;
408 if (modes
& MOD_MICRO
) {
409 time_status
&= ~STA_NANO
;
411 if (modes
& MOD_CLKB
) {
412 time_status
|= STA_CLK
;
414 if (modes
& MOD_CLKA
) {
415 time_status
&= ~STA_CLK
;
417 if (modes
& MOD_FREQUENCY
) {
418 freq
= (ntv
.freq
* 1000LL) >> 16;
419 if (freq
> MAXFREQ
) {
420 L_LINT(time_freq
, MAXFREQ
);
421 } else if (freq
< -MAXFREQ
) {
422 L_LINT(time_freq
, -MAXFREQ
);
425 * ntv.freq is [PPM * 2^16] = [us/s * 2^16]
426 * time_freq is [ns/s * 2^32]
428 time_freq
= ntv
.freq
* 1000LL * 65536LL;
431 if (modes
& MOD_OFFSET
) {
432 if (time_status
& STA_NANO
) {
433 hardupdate(ntv
.offset
);
435 hardupdate(ntv
.offset
* 1000);
439 ret
= ntp_is_time_error(time_status
) ? TIME_ERROR
: time_state
;
441 #if DEVELOPMENT || DEBUG
442 if (g_should_log_clock_adjustments
) {
443 os_log(OS_LOG_DEFAULT
, "%s: AFTER modes %u offset %lld freq %lld status %d constant %ld time_adjtime %lld\n",
444 __func__
, modes
, time_offset
, time_freq
, time_status
, time_constant
, time_adjtime
);
449 * Retrieve all clock variables. Note that the TAI offset is
450 * returned only by ntp_gettime();
452 if (IS_64BIT_PROCESS(p
)) {
453 struct user64_timex user_ntv
= {};
455 user_ntv
.modes
= modes
;
456 if (time_status
& STA_NANO
) {
457 user_ntv
.offset
= L_GINT(time_offset
);
459 user_ntv
.offset
= L_GINT(time_offset
) / 1000;
461 user_ntv
.freq
= L_GINT((time_freq
/ 1000LL) << 16);
462 user_ntv
.maxerror
= time_maxerror
;
463 user_ntv
.esterror
= time_esterror
;
464 user_ntv
.status
= time_status
;
465 user_ntv
.constant
= time_constant
;
466 if (time_status
& STA_NANO
) {
467 user_ntv
.precision
= time_precision
;
469 user_ntv
.precision
= time_precision
/ 1000;
471 user_ntv
.tolerance
= MAXFREQ
* SCALE_PPM
;
473 /* unlock before copyout */
476 error
= copyout(&user_ntv
, uap
->tp
, sizeof(user_ntv
));
478 struct user32_timex user_ntv
= {};
480 user_ntv
.modes
= modes
;
481 if (time_status
& STA_NANO
) {
482 user_ntv
.offset
= L_GINT(time_offset
);
484 user_ntv
.offset
= L_GINT(time_offset
) / 1000;
486 user_ntv
.freq
= L_GINT((time_freq
/ 1000LL) << 16);
487 user_ntv
.maxerror
= time_maxerror
;
488 user_ntv
.esterror
= time_esterror
;
489 user_ntv
.status
= time_status
;
490 user_ntv
.constant
= time_constant
;
491 if (time_status
& STA_NANO
) {
492 user_ntv
.precision
= time_precision
;
494 user_ntv
.precision
= time_precision
/ 1000;
496 user_ntv
.tolerance
= MAXFREQ
* SCALE_PPM
;
498 /* unlock before copyout */
501 error
= copyout(&user_ntv
, uap
->tp
, sizeof(user_ntv
));
522 * Compute the adjustment to add to the next second.
525 ntp_update_second(int64_t *adjustment
, clock_sec_t secs
)
529 l_fp ftemp
, old_time_adjtime
, old_offset
;
533 if (secs
> last_time_maxerror_update
) {
534 time_maxerror
+= (MAXFREQ
/ 1000) * (secs
- last_time_maxerror_update
);
535 last_time_maxerror_update
= secs
;
538 old_offset
= time_offset
;
539 old_time_adjtime
= time_adjtime
;
542 L_RSHIFT(ftemp
, SHIFT_PLL
+ time_constant
);
544 L_SUB(time_offset
, ftemp
);
545 L_ADD(time_adj
, time_freq
);
548 * Apply any correction from adjtime. If more than one second
549 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM)
550 * until the last second is slewed the final < 500 usecs.
552 if (time_adjtime
!= 0) {
553 if (time_adjtime
> 1000000) {
555 } else if (time_adjtime
< -1000000) {
557 } else if (time_adjtime
> 500) {
559 } else if (time_adjtime
< -500) {
562 tickrate
= time_adjtime
;
564 time_adjtime
-= tickrate
;
565 L_LINT(ftemp
, tickrate
* 1000);
566 L_ADD(time_adj
, ftemp
);
569 if (old_time_adjtime
|| ((time_offset
|| old_offset
) && (time_offset
!= old_offset
))) {
575 #if DEVELOPMENT || DEBUG
576 if (g_should_log_clock_adjustments
) {
577 int64_t nano
= (time_adj
> 0)? time_adj
>> 32 : -((-time_adj
) >> 32);
578 int64_t frac
= (time_adj
> 0)? ((uint32_t) time_adj
) : -((uint32_t) (-time_adj
));
580 os_log(OS_LOG_DEFAULT
, "%s:AFTER offset %lld (%lld) freq %lld status %d "
581 "constant %ld time_adjtime %lld nano %lld frac %lld adj %lld\n",
582 __func__
, time_offset
, (time_offset
> 0)? time_offset
>> 32 : -((-time_offset
) >> 32),
583 time_freq
, time_status
, time_constant
, time_adjtime
, nano
, frac
, time_adj
);
587 *adjustment
= time_adj
;
591 * hardupdate() - local clock update
593 * This routine is called by ntp_adjtime() when an offset is provided
594 * to update the local clock phase and frequency.
595 * The implementation is of an adaptive-parameter, hybrid
596 * phase/frequency-lock loop (PLL/FLL). The routine computes new
597 * time and frequency offset estimates for each call.
598 * Presumably, calls to ntp_adjtime() occur only when the caller
599 * believes the local clock is valid within some bound (+-128 ms with
602 * For uncompensated quartz crystal oscillators and nominal update
603 * intervals less than 256 s, operation should be in phase-lock mode,
604 * where the loop is disciplined to phase. For update intervals greater
605 * than 1024 s, operation should be in frequency-lock mode, where the
606 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode
607 * is selected by the STA_MODE status bit.
615 clock_sec_t time_uptime
;
620 if (!(time_status
& STA_PLL
)) {
624 if (offset
> MAXPHASE
) {
625 time_monitor
= MAXPHASE
;
626 } else if (offset
< -MAXPHASE
) {
627 time_monitor
= -MAXPHASE
;
629 time_monitor
= offset
;
631 L_LINT(time_offset
, time_monitor
);
633 clock_get_calendar_uptime(&time_uptime
);
635 if (time_status
& STA_FREQHOLD
|| time_reftime
== 0) {
636 time_reftime
= time_uptime
;
639 mtemp
= time_uptime
- time_reftime
;
640 L_LINT(ftemp
, time_monitor
);
641 L_RSHIFT(ftemp
, (SHIFT_PLL
+ 2 + time_constant
) << 1);
643 L_ADD(time_freq
, ftemp
);
644 time_status
&= ~STA_MODE
;
645 if (mtemp
>= MINSEC
&& (time_status
& STA_FLL
|| mtemp
>
647 L_LINT(ftemp
, (time_monitor
<< 4) / mtemp
);
648 L_RSHIFT(ftemp
, SHIFT_FLL
+ 4);
649 L_ADD(time_freq
, ftemp
);
650 time_status
|= STA_MODE
;
652 time_reftime
= time_uptime
;
654 if (L_GINT(time_freq
) > MAXFREQ
) {
655 L_LINT(time_freq
, MAXFREQ
);
656 } else if (L_GINT(time_freq
) < -MAXFREQ
) {
657 L_LINT(time_freq
, -MAXFREQ
);
663 kern_adjtime(struct timeval
*delta
)
673 ltw
= (int64_t)delta
->tv_sec
* (int64_t)USEC_PER_SEC
+ delta
->tv_usec
;
678 #if DEVELOPMENT || DEBUG
679 if (g_should_log_clock_adjustments
) {
680 os_log(OS_LOG_DEFAULT
, "%s:AFTER offset %lld freq %lld status %d constant %ld time_adjtime %lld\n",
681 __func__
, time_offset
, time_freq
, time_status
, time_constant
, time_adjtime
);
686 atv
.tv_sec
= ltr
/ (int64_t)USEC_PER_SEC
;
687 atv
.tv_usec
= ltr
% (int64_t)USEC_PER_SEC
;
688 if (atv
.tv_usec
< 0) {
689 atv
.tv_usec
+= (suseconds_t
)USEC_PER_SEC
;
701 adjtime(struct proc
*p
, struct adjtime_args
*uap
, __unused
int32_t *retval
)
706 /* Check that this task is entitled to set the time or it is root */
707 if (!IOTaskHasEntitlement(current_task(), SETTIME_ENTITLEMENT
)) {
709 error
= mac_system_check_settime(kauth_cred_get());
714 if ((error
= priv_check_cred(kauth_cred_get(), PRIV_ADJTIME
, 0))) {
719 if (IS_64BIT_PROCESS(p
)) {
720 struct user64_timeval user_atv
;
721 error
= copyin(uap
->delta
, &user_atv
, sizeof(user_atv
));
722 atv
.tv_sec
= user_atv
.tv_sec
;
723 atv
.tv_usec
= user_atv
.tv_usec
;
725 struct user32_timeval user_atv
;
726 error
= copyin(uap
->delta
, &user_atv
, sizeof(user_atv
));
727 atv
.tv_sec
= user_atv
.tv_sec
;
728 atv
.tv_usec
= user_atv
.tv_usec
;
737 if (IS_64BIT_PROCESS(p
)) {
738 struct user64_timeval user_atv
= {};
739 user_atv
.tv_sec
= atv
.tv_sec
;
740 user_atv
.tv_usec
= atv
.tv_usec
;
741 error
= copyout(&user_atv
, uap
->olddelta
, sizeof(user_atv
));
743 struct user32_timeval user_atv
= {};
744 user_atv
.tv_sec
= atv
.tv_sec
;
745 user_atv
.tv_usec
= atv
.tv_usec
;
746 error
= copyout(&user_atv
, uap
->olddelta
, sizeof(user_atv
));
754 ntp_loop_update_call(void)
761 * Update the scale factor used by clock_calend.
762 * NOTE: clock_update_calendar will call ntp_update_second to compute the next adjustment.
764 clock_update_calendar();
772 refresh_ntp_loop(void)
775 if (--ntp_loop_active
== 0) {
777 * Activate the timer only if the next second adjustment might change.
778 * ntp_update_second checks it and sets updated accordingly.
781 clock_deadline_for_periodic_event(ntp_loop_period
, mach_absolute_time(), &ntp_loop_deadline
);
783 if (!timer_call_enter(&ntp_loop_update
, ntp_loop_deadline
, TIMER_CALL_SYS_CRITICAL
)) {
791 * This function triggers a timer that each second will calculate the adjustment to
792 * provide to clock_calendar to scale the time (used by gettimeofday-family syscalls).
793 * The periodic timer will stop when the adjustment will reach a stable value.
802 ntp_loop_deadline
= mach_absolute_time() + ntp_loop_period
;
804 if (!timer_call_enter(&ntp_loop_update
, ntp_loop_deadline
, TIMER_CALL_SYS_CRITICAL
)) {
818 nanoseconds_to_absolutetime(NTP_LOOP_PERIOD_INTERVAL
, &abstime
);
819 ntp_loop_period
= (uint32_t)abstime
;
820 timer_call_setup(&ntp_loop_update
, (timer_call_func_t
)ntp_loop_update_call
, NULL
);
829 ntp_lock_grp_attr
= lck_grp_attr_alloc_init();
830 ntp_lock_grp
= lck_grp_alloc_init("ntp_lock", ntp_lock_grp_attr
);
831 ntp_lock_attr
= lck_attr_alloc_init();
832 ntp_lock
= lck_spin_alloc_init(ntp_lock_grp
, ntp_lock_attr
);
839 SYSINIT(ntpclocks
, SI_SUB_CLOCKS
, SI_ORDER_MIDDLE
, ntp_init
, NULL
);