]>
git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_time.c
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
58 #include <sys/param.h>
59 #include <sys/resourcevar.h>
60 #include <sys/kernel.h>
61 #include <sys/systm.h>
62 #include <sys/proc_internal.h>
63 #include <sys/kauth.h>
64 #include <sys/vnode.h>
66 #include <sys/mount_internal.h>
67 #include <sys/sysproto.h>
68 #include <sys/signalvar.h>
70 #include <kern/clock.h>
71 #include <kern/thread_call.h>
73 #define HZ 100 /* XXX */
75 /* simple lock used to access timezone, tz structure */
76 lck_spin_t
* tz_slock
;
77 lck_grp_t
* tz_slock_grp
;
78 lck_attr_t
* tz_slock_attr
;
79 lck_grp_attr_t
*tz_slock_grp_attr
;
81 static void setthetime(
84 void time_zone_slock_init(void);
87 * Time of day and interval timer support.
89 * These routines provide the kernel entry points to get and set
90 * the time-of-day and per-process interval timers. Subroutines
91 * here provide support for adding and subtracting timeval structures
92 * and decrementing interval timers, optionally reloading the interval
93 * timers when they expire.
98 __unused
struct proc
*p
,
99 struct gettimeofday_args
*uap
,
103 struct timezone ltz
; /* local copy */
106 clock_gettimeofday(&retval
[0], &retval
[1]);
109 lck_spin_lock(tz_slock
);
111 lck_spin_unlock(tz_slock
);
113 error
= copyout((caddr_t
)<z
, CAST_USER_ADDR_T(uap
->tzp
), sizeof (tz
));
120 * XXX Y2038 bug because of setthetime() argument
124 settimeofday(struct proc
*p
, struct settimeofday_args
*uap
, __unused register_t
*retval
)
130 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
132 /* Verify all parameters before changing time */
134 if (IS_64BIT_PROCESS(p
)) {
135 struct user_timeval user_atv
;
136 error
= copyin(uap
->tv
, &user_atv
, sizeof(struct user_timeval
));
137 atv
.tv_sec
= user_atv
.tv_sec
;
138 atv
.tv_usec
= user_atv
.tv_usec
;
140 error
= copyin(uap
->tv
, &atv
, sizeof(struct timeval
));
145 if (uap
->tzp
&& (error
= copyin(uap
->tzp
, (caddr_t
)&atz
, sizeof(atz
))))
149 if (atv
.tv_sec
< 0 || (atv
.tv_sec
== 0 && atv
.tv_usec
< 0))
154 lck_spin_lock(tz_slock
);
156 lck_spin_unlock(tz_slock
);
165 clock_set_calendar_microtime(tv
->tv_sec
, tv
->tv_usec
);
169 * XXX Y2038 bug because of clock_adjtime() first argument
173 adjtime(struct proc
*p
, register struct adjtime_args
*uap
, __unused register_t
*retval
)
178 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
180 if (IS_64BIT_PROCESS(p
)) {
181 struct user_timeval user_atv
;
182 error
= copyin(uap
->delta
, &user_atv
, sizeof(struct user_timeval
));
183 atv
.tv_sec
= user_atv
.tv_sec
;
184 atv
.tv_usec
= user_atv
.tv_usec
;
186 error
= copyin(uap
->delta
, &atv
, sizeof(struct timeval
));
192 * Compute the total correction and the rate at which to apply it.
194 clock_adjtime((int32_t *)&atv
.tv_sec
, &atv
.tv_usec
);
197 if (IS_64BIT_PROCESS(p
)) {
198 struct user_timeval user_atv
;
199 user_atv
.tv_sec
= atv
.tv_sec
;
200 user_atv
.tv_usec
= atv
.tv_usec
;
201 error
= copyout(&user_atv
, uap
->olddelta
, sizeof(struct user_timeval
));
203 error
= copyout(&atv
, uap
->olddelta
, sizeof(struct timeval
));
211 * Verify the calendar value. If negative,
212 * reset to zero (the epoch).
216 __unused
time_t base
)
222 * The calendar has already been
223 * set up from the platform clock.
225 * The value returned by microtime()
226 * is gotten from the calendar.
230 if (tv
.tv_sec
< 0 || tv
.tv_usec
< 0) {
231 printf ("WARNING: preposterous time in Real Time Clock");
232 tv
.tv_sec
= 0; /* the UNIX epoch */
235 printf(" -- CHECK AND RESET THE DATE!\n");
242 uint32_t sec
, nanosec
;
243 clock_get_boottime_nanotime(&sec
, &nanosec
);
247 uint64_t tvtoabstime(struct timeval
*tvp
);
250 * Get value of an interval timer. The process virtual and
251 * profiling virtual time timers are kept internally in the
252 * way they are specified externally: in time until they expire.
254 * The real time interval timer expiration time (p_rtime)
255 * is kept as an absolute time rather than as a delta, so that
256 * it is easy to keep periodic real-time signals from drifting.
258 * Virtual time timers are processed in the hardclock() routine of
259 * kern_clock.c. The real time timer is processed by a callout
260 * routine. Since a callout may be delayed in real time due to
261 * other processing in the system, it is possible for the real
262 * time callout routine (realitexpire, given below), to be delayed
263 * in real time past when it is supposed to occur. It does not
264 * suffice, therefore, to reload the real time .it_value from the
265 * real time .it_interval. Rather, we compute the next time in
266 * absolute time when the timer should go off.
271 getitimer(struct proc
*p
, register struct getitimer_args
*uap
, __unused register_t
*retval
)
273 struct itimerval aitv
;
275 if (uap
->which
> ITIMER_PROF
)
277 if (uap
->which
== ITIMER_REAL
) {
279 * If time for real time timer has passed return 0,
280 * else return difference between current time and
281 * time for the timer to go off.
283 aitv
= p
->p_realtimer
;
284 if (timerisset(&p
->p_rtime
)) {
288 if (timercmp(&p
->p_rtime
, &now
, <))
289 timerclear(&aitv
.it_value
);
291 aitv
.it_value
= p
->p_rtime
;
292 timevalsub(&aitv
.it_value
, &now
);
296 timerclear(&aitv
.it_value
);
299 aitv
= p
->p_stats
->p_timer
[uap
->which
];
301 if (IS_64BIT_PROCESS(p
)) {
302 struct user_itimerval user_itv
;
303 user_itv
.it_interval
.tv_sec
= aitv
.it_interval
.tv_sec
;
304 user_itv
.it_interval
.tv_usec
= aitv
.it_interval
.tv_usec
;
305 user_itv
.it_value
.tv_sec
= aitv
.it_value
.tv_sec
;
306 user_itv
.it_value
.tv_usec
= aitv
.it_value
.tv_usec
;
307 return (copyout((caddr_t
)&user_itv
, uap
->itv
, sizeof (struct user_itimerval
)));
309 return (copyout((caddr_t
)&aitv
, uap
->itv
, sizeof (struct itimerval
)));
315 setitimer(p
, uap
, retval
)
317 register struct setitimer_args
*uap
;
320 struct itimerval aitv
;
324 if (uap
->which
> ITIMER_PROF
)
326 if ((itvp
= uap
->itv
)) {
327 if (IS_64BIT_PROCESS(p
)) {
328 struct user_itimerval user_itv
;
329 if ((error
= copyin(itvp
, (caddr_t
)&user_itv
, sizeof (struct user_itimerval
))))
331 aitv
.it_interval
.tv_sec
= user_itv
.it_interval
.tv_sec
;
332 aitv
.it_interval
.tv_usec
= user_itv
.it_interval
.tv_usec
;
333 aitv
.it_value
.tv_sec
= user_itv
.it_value
.tv_sec
;
334 aitv
.it_value
.tv_usec
= user_itv
.it_value
.tv_usec
;
336 if ((error
= copyin(itvp
, (caddr_t
)&aitv
, sizeof (struct itimerval
))))
340 if ((uap
->itv
= uap
->oitv
) && (error
= getitimer(p
, (struct getitimer_args
*)uap
, retval
)))
344 if (itimerfix(&aitv
.it_value
) || itimerfix(&aitv
.it_interval
))
346 if (uap
->which
== ITIMER_REAL
) {
347 thread_call_func_cancel((thread_call_func_t
)realitexpire
, (void *)p
->p_pid
, FALSE
);
348 if (timerisset(&aitv
.it_value
)) {
349 microuptime(&p
->p_rtime
);
350 timevaladd(&p
->p_rtime
, &aitv
.it_value
);
351 thread_call_func_delayed(
352 (thread_call_func_t
)realitexpire
, (void *)p
->p_pid
,
353 tvtoabstime(&p
->p_rtime
));
356 timerclear(&p
->p_rtime
);
358 p
->p_realtimer
= aitv
;
361 p
->p_stats
->p_timer
[uap
->which
] = aitv
;
367 * Real interval timer expired:
368 * send process whose timer expired an alarm signal.
369 * If time is not set up to reload, then just return.
370 * Else compute next time timer should go off which is > current time.
371 * This is where delay in processing this timeout causes multiple
372 * SIGALRM calls to be compressed into one.
378 register struct proc
*p
;
380 boolean_t funnel_state
;
382 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
383 p
= pfind((pid_t
)pid
);
385 (void) thread_funnel_set(kernel_flock
, FALSE
);
389 if (!timerisset(&p
->p_realtimer
.it_interval
)) {
390 timerclear(&p
->p_rtime
);
393 (void) thread_funnel_set(kernel_flock
, FALSE
);
398 timevaladd(&p
->p_rtime
, &p
->p_realtimer
.it_interval
);
399 if (timercmp(&p
->p_rtime
, &now
, <=)) {
400 if ((p
->p_rtime
.tv_sec
+ 2) >= now
.tv_sec
) {
402 timevaladd(&p
->p_rtime
, &p
->p_realtimer
.it_interval
);
403 if (timercmp(&p
->p_rtime
, &now
, >))
408 p
->p_rtime
= p
->p_realtimer
.it_interval
;
409 timevaladd(&p
->p_rtime
, &now
);
415 thread_call_func_delayed((thread_call_func_t
)realitexpire
, pid
, tvtoabstime(&p
->p_rtime
));
417 (void) thread_funnel_set(kernel_flock
, FALSE
);
421 * Check that a proposed value to load into the .it_value or
422 * .it_interval part of an interval timer is acceptable, and
423 * fix it to have at least minimal value (i.e. if it is less
424 * than the resolution of the clock, round it up.)
431 if (tv
->tv_sec
< 0 || tv
->tv_sec
> 100000000 ||
432 tv
->tv_usec
< 0 || tv
->tv_usec
>= 1000000)
434 if (tv
->tv_sec
== 0 && tv
->tv_usec
!= 0 && tv
->tv_usec
< tick
)
440 * Decrement an interval timer by a specified number
441 * of microseconds, which must be less than a second,
442 * i.e. < 1000000. If the timer expires, then reload
443 * it. In this case, carry over (usec - old value) to
444 * reducint the value reloaded into the timer so that
445 * the timer does not drift. This routine assumes
446 * that it is called in a context where the timers
447 * on which it is operating cannot change in value.
450 itimerdecr(itp
, usec
)
451 register struct itimerval
*itp
;
455 if (itp
->it_value
.tv_usec
< usec
) {
456 if (itp
->it_value
.tv_sec
== 0) {
457 /* expired, and already in next interval */
458 usec
-= itp
->it_value
.tv_usec
;
461 itp
->it_value
.tv_usec
+= 1000000;
462 itp
->it_value
.tv_sec
--;
464 itp
->it_value
.tv_usec
-= usec
;
466 if (timerisset(&itp
->it_value
))
468 /* expired, exactly at end of interval */
470 if (timerisset(&itp
->it_interval
)) {
471 itp
->it_value
= itp
->it_interval
;
472 itp
->it_value
.tv_usec
-= usec
;
473 if (itp
->it_value
.tv_usec
< 0) {
474 itp
->it_value
.tv_usec
+= 1000000;
475 itp
->it_value
.tv_sec
--;
478 itp
->it_value
.tv_usec
= 0; /* sec is already 0 */
483 * Add and subtract routines for timevals.
484 * N.B.: subtract routine doesn't deal with
485 * results which are before the beginning,
486 * it just gets very confused in this case.
495 t1
->tv_sec
+= t2
->tv_sec
;
496 t1
->tv_usec
+= t2
->tv_usec
;
505 t1
->tv_sec
-= t2
->tv_sec
;
506 t1
->tv_usec
-= t2
->tv_usec
;
514 if (t1
->tv_usec
< 0) {
516 t1
->tv_usec
+= 1000000;
518 if (t1
->tv_usec
>= 1000000) {
520 t1
->tv_usec
-= 1000000;
525 * Return the best possible estimate of the time in the timeval
526 * to which tvp points.
532 clock_get_calendar_microtime((uint32_t *)&tvp
->tv_sec
, &tvp
->tv_usec
);
539 clock_get_system_microtime((uint32_t *)&tvp
->tv_sec
, &tvp
->tv_usec
);
543 * Ditto for timespec.
547 struct timespec
*tsp
)
549 clock_get_calendar_nanotime((uint32_t *)&tsp
->tv_sec
, (uint32_t *)&tsp
->tv_nsec
);
554 struct timespec
*tsp
)
556 clock_get_system_nanotime((uint32_t *)&tsp
->tv_sec
, (uint32_t *)&tsp
->tv_nsec
);
563 uint64_t result
, usresult
;
565 clock_interval_to_absolutetime_interval(
566 tvp
->tv_sec
, NSEC_PER_SEC
, &result
);
567 clock_interval_to_absolutetime_interval(
568 tvp
->tv_usec
, NSEC_PER_USEC
, &usresult
);
570 return (result
+ usresult
);
573 time_zone_slock_init(void)
575 /* allocate lock group attribute and group */
576 tz_slock_grp_attr
= lck_grp_attr_alloc_init();
578 tz_slock_grp
= lck_grp_alloc_init("tzlock", tz_slock_grp_attr
);
580 /* Allocate lock attribute */
581 tz_slock_attr
= lck_attr_alloc_init();
583 /* Allocate the spin lock */
584 tz_slock
= lck_spin_alloc_init(tz_slock_grp
, tz_slock_attr
);