]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_time.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / bsd / kern / kern_time.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
31 /*
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
64 */
65
66 #include <sys/param.h>
67 #include <sys/resourcevar.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/proc_internal.h>
71 #include <sys/kauth.h>
72 #include <sys/vnode.h>
73
74 #include <sys/mount_internal.h>
75 #include <sys/sysproto.h>
76 #include <sys/signalvar.h>
77
78 #include <kern/clock.h>
79 #include <kern/thread_call.h>
80
81 #define HZ 100 /* XXX */
82
83 /* simple lock used to access timezone, tz structure */
84 lck_spin_t * tz_slock;
85 lck_grp_t * tz_slock_grp;
86 lck_attr_t * tz_slock_attr;
87 lck_grp_attr_t *tz_slock_grp_attr;
88
89 static void setthetime(
90 struct timeval *tv);
91
92 void time_zone_slock_init(void);
93
94 /*
95 * Time of day and interval timer support.
96 *
97 * These routines provide the kernel entry points to get and set
98 * the time-of-day and per-process interval timers. Subroutines
99 * here provide support for adding and subtracting timeval structures
100 * and decrementing interval timers, optionally reloading the interval
101 * timers when they expire.
102 */
103 /* ARGSUSED */
104 int
105 gettimeofday(
106 __unused struct proc *p,
107 struct gettimeofday_args *uap,
108 register_t *retval)
109 {
110 int error = 0;
111 struct timezone ltz; /* local copy */
112
113 if (uap->tp)
114 clock_gettimeofday(&retval[0], &retval[1]);
115
116 if (uap->tzp) {
117 lck_spin_lock(tz_slock);
118 ltz = tz;
119 lck_spin_unlock(tz_slock);
120
121 error = copyout((caddr_t)&ltz, CAST_USER_ADDR_T(uap->tzp), sizeof (tz));
122 }
123
124 return (error);
125 }
126
127 /*
128 * XXX Y2038 bug because of setthetime() argument
129 */
130 /* ARGSUSED */
131 int
132 settimeofday(struct proc *p, struct settimeofday_args *uap, __unused register_t *retval)
133 {
134 struct timeval atv;
135 struct timezone atz;
136 int error;
137
138 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
139 return (error);
140 /* Verify all parameters before changing time */
141 if (uap->tv) {
142 if (IS_64BIT_PROCESS(p)) {
143 struct user_timeval user_atv;
144 error = copyin(uap->tv, &user_atv, sizeof(struct user_timeval));
145 atv.tv_sec = user_atv.tv_sec;
146 atv.tv_usec = user_atv.tv_usec;
147 } else {
148 error = copyin(uap->tv, &atv, sizeof(struct timeval));
149 }
150 if (error)
151 return (error);
152 }
153 if (uap->tzp && (error = copyin(uap->tzp, (caddr_t)&atz, sizeof(atz))))
154 return (error);
155 if (uap->tv) {
156 timevalfix(&atv);
157 if (atv.tv_sec < 0 || (atv.tv_sec == 0 && atv.tv_usec < 0))
158 return (EPERM);
159 setthetime(&atv);
160 }
161 if (uap->tzp) {
162 lck_spin_lock(tz_slock);
163 tz = atz;
164 lck_spin_unlock(tz_slock);
165 }
166 return (0);
167 }
168
169 static void
170 setthetime(
171 struct timeval *tv)
172 {
173 clock_set_calendar_microtime(tv->tv_sec, tv->tv_usec);
174 }
175
176 /*
177 * XXX Y2038 bug because of clock_adjtime() first argument
178 */
179 /* ARGSUSED */
180 int
181 adjtime(struct proc *p, register struct adjtime_args *uap, __unused register_t *retval)
182 {
183 struct timeval atv;
184 int error;
185
186 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
187 return (error);
188 if (IS_64BIT_PROCESS(p)) {
189 struct user_timeval user_atv;
190 error = copyin(uap->delta, &user_atv, sizeof(struct user_timeval));
191 atv.tv_sec = user_atv.tv_sec;
192 atv.tv_usec = user_atv.tv_usec;
193 } else {
194 error = copyin(uap->delta, &atv, sizeof(struct timeval));
195 }
196 if (error)
197 return (error);
198
199 /*
200 * Compute the total correction and the rate at which to apply it.
201 */
202 clock_adjtime((int32_t *)&atv.tv_sec, &atv.tv_usec);
203
204 if (uap->olddelta) {
205 if (IS_64BIT_PROCESS(p)) {
206 struct user_timeval user_atv;
207 user_atv.tv_sec = atv.tv_sec;
208 user_atv.tv_usec = atv.tv_usec;
209 error = copyout(&user_atv, uap->olddelta, sizeof(struct user_timeval));
210 } else {
211 error = copyout(&atv, uap->olddelta, sizeof(struct timeval));
212 }
213 }
214
215 return (0);
216 }
217
218 /*
219 * Verify the calendar value. If negative,
220 * reset to zero (the epoch).
221 */
222 void
223 inittodr(
224 __unused time_t base)
225 {
226 struct timeval tv;
227
228 /*
229 * Assertion:
230 * The calendar has already been
231 * set up from the platform clock.
232 *
233 * The value returned by microtime()
234 * is gotten from the calendar.
235 */
236 microtime(&tv);
237
238 if (tv.tv_sec < 0 || tv.tv_usec < 0) {
239 printf ("WARNING: preposterous time in Real Time Clock");
240 tv.tv_sec = 0; /* the UNIX epoch */
241 tv.tv_usec = 0;
242 setthetime(&tv);
243 printf(" -- CHECK AND RESET THE DATE!\n");
244 }
245 }
246
247 time_t
248 boottime_sec(void)
249 {
250 uint32_t sec, nanosec;
251 clock_get_boottime_nanotime(&sec, &nanosec);
252 return (sec);
253 }
254
255 uint64_t tvtoabstime(struct timeval *tvp);
256
257 /*
258 * Get value of an interval timer. The process virtual and
259 * profiling virtual time timers are kept internally in the
260 * way they are specified externally: in time until they expire.
261 *
262 * The real time interval timer expiration time (p_rtime)
263 * is kept as an absolute time rather than as a delta, so that
264 * it is easy to keep periodic real-time signals from drifting.
265 *
266 * Virtual time timers are processed in the hardclock() routine of
267 * kern_clock.c. The real time timer is processed by a callout
268 * routine. Since a callout may be delayed in real time due to
269 * other processing in the system, it is possible for the real
270 * time callout routine (realitexpire, given below), to be delayed
271 * in real time past when it is supposed to occur. It does not
272 * suffice, therefore, to reload the real time .it_value from the
273 * real time .it_interval. Rather, we compute the next time in
274 * absolute time when the timer should go off.
275 */
276
277 /* ARGSUSED */
278 int
279 getitimer(struct proc *p, register struct getitimer_args *uap, __unused register_t *retval)
280 {
281 struct itimerval aitv;
282
283 if (uap->which > ITIMER_PROF)
284 return(EINVAL);
285 if (uap->which == ITIMER_REAL) {
286 /*
287 * If time for real time timer has passed return 0,
288 * else return difference between current time and
289 * time for the timer to go off.
290 */
291 aitv = p->p_realtimer;
292 if (timerisset(&p->p_rtime)) {
293 struct timeval now;
294
295 microuptime(&now);
296 if (timercmp(&p->p_rtime, &now, <))
297 timerclear(&aitv.it_value);
298 else {
299 aitv.it_value = p->p_rtime;
300 timevalsub(&aitv.it_value, &now);
301 }
302 }
303 else
304 timerclear(&aitv.it_value);
305 }
306 else
307 aitv = p->p_stats->p_timer[uap->which];
308
309 if (IS_64BIT_PROCESS(p)) {
310 struct user_itimerval user_itv;
311 user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
312 user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
313 user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
314 user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
315 return (copyout((caddr_t)&user_itv, uap->itv, sizeof (struct user_itimerval)));
316 } else {
317 return (copyout((caddr_t)&aitv, uap->itv, sizeof (struct itimerval)));
318 }
319 }
320
321 /* ARGSUSED */
322 int
323 setitimer(p, uap, retval)
324 struct proc *p;
325 register struct setitimer_args *uap;
326 register_t *retval;
327 {
328 struct itimerval aitv;
329 user_addr_t itvp;
330 int error;
331
332 if (uap->which > ITIMER_PROF)
333 return (EINVAL);
334 if ((itvp = uap->itv)) {
335 if (IS_64BIT_PROCESS(p)) {
336 struct user_itimerval user_itv;
337 if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (struct user_itimerval))))
338 return (error);
339 aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec;
340 aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
341 aitv.it_value.tv_sec = user_itv.it_value.tv_sec;
342 aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
343 } else {
344 if ((error = copyin(itvp, (caddr_t)&aitv, sizeof (struct itimerval))))
345 return (error);
346 }
347 }
348 if ((uap->itv = uap->oitv) && (error = getitimer(p, (struct getitimer_args *)uap, retval)))
349 return (error);
350 if (itvp == 0)
351 return (0);
352 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
353 return (EINVAL);
354 if (uap->which == ITIMER_REAL) {
355 thread_call_func_cancel((thread_call_func_t)realitexpire, (void *)p->p_pid, FALSE);
356 if (timerisset(&aitv.it_value)) {
357 microuptime(&p->p_rtime);
358 timevaladd(&p->p_rtime, &aitv.it_value);
359 thread_call_func_delayed(
360 (thread_call_func_t)realitexpire, (void *)p->p_pid,
361 tvtoabstime(&p->p_rtime));
362 }
363 else
364 timerclear(&p->p_rtime);
365
366 p->p_realtimer = aitv;
367 }
368 else
369 p->p_stats->p_timer[uap->which] = aitv;
370
371 return (0);
372 }
373
374 /*
375 * Real interval timer expired:
376 * send process whose timer expired an alarm signal.
377 * If time is not set up to reload, then just return.
378 * Else compute next time timer should go off which is > current time.
379 * This is where delay in processing this timeout causes multiple
380 * SIGALRM calls to be compressed into one.
381 */
382 void
383 realitexpire(
384 void *pid)
385 {
386 register struct proc *p;
387 struct timeval now;
388 boolean_t funnel_state;
389
390 funnel_state = thread_funnel_set(kernel_flock, TRUE);
391 p = pfind((pid_t)pid);
392 if (p == NULL) {
393 (void) thread_funnel_set(kernel_flock, FALSE);
394 return;
395 }
396
397 if (!timerisset(&p->p_realtimer.it_interval)) {
398 timerclear(&p->p_rtime);
399 psignal(p, SIGALRM);
400
401 (void) thread_funnel_set(kernel_flock, FALSE);
402 return;
403 }
404
405 microuptime(&now);
406 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
407 if (timercmp(&p->p_rtime, &now, <=)) {
408 if ((p->p_rtime.tv_sec + 2) >= now.tv_sec) {
409 for (;;) {
410 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
411 if (timercmp(&p->p_rtime, &now, >))
412 break;
413 }
414 }
415 else {
416 p->p_rtime = p->p_realtimer.it_interval;
417 timevaladd(&p->p_rtime, &now);
418 }
419 }
420
421 psignal(p, SIGALRM);
422
423 thread_call_func_delayed((thread_call_func_t)realitexpire, pid, tvtoabstime(&p->p_rtime));
424
425 (void) thread_funnel_set(kernel_flock, FALSE);
426 }
427
428 /*
429 * Check that a proposed value to load into the .it_value or
430 * .it_interval part of an interval timer is acceptable, and
431 * fix it to have at least minimal value (i.e. if it is less
432 * than the resolution of the clock, round it up.)
433 */
434 int
435 itimerfix(tv)
436 struct timeval *tv;
437 {
438
439 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
440 tv->tv_usec < 0 || tv->tv_usec >= 1000000)
441 return (EINVAL);
442 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
443 tv->tv_usec = tick;
444 return (0);
445 }
446
447 /*
448 * Decrement an interval timer by a specified number
449 * of microseconds, which must be less than a second,
450 * i.e. < 1000000. If the timer expires, then reload
451 * it. In this case, carry over (usec - old value) to
452 * reducint the value reloaded into the timer so that
453 * the timer does not drift. This routine assumes
454 * that it is called in a context where the timers
455 * on which it is operating cannot change in value.
456 */
457 int
458 itimerdecr(itp, usec)
459 register struct itimerval *itp;
460 int usec;
461 {
462
463 if (itp->it_value.tv_usec < usec) {
464 if (itp->it_value.tv_sec == 0) {
465 /* expired, and already in next interval */
466 usec -= itp->it_value.tv_usec;
467 goto expire;
468 }
469 itp->it_value.tv_usec += 1000000;
470 itp->it_value.tv_sec--;
471 }
472 itp->it_value.tv_usec -= usec;
473 usec = 0;
474 if (timerisset(&itp->it_value))
475 return (1);
476 /* expired, exactly at end of interval */
477 expire:
478 if (timerisset(&itp->it_interval)) {
479 itp->it_value = itp->it_interval;
480 itp->it_value.tv_usec -= usec;
481 if (itp->it_value.tv_usec < 0) {
482 itp->it_value.tv_usec += 1000000;
483 itp->it_value.tv_sec--;
484 }
485 } else
486 itp->it_value.tv_usec = 0; /* sec is already 0 */
487 return (0);
488 }
489
490 /*
491 * Add and subtract routines for timevals.
492 * N.B.: subtract routine doesn't deal with
493 * results which are before the beginning,
494 * it just gets very confused in this case.
495 * Caveat emptor.
496 */
497 void
498 timevaladd(
499 struct timeval *t1,
500 struct timeval *t2)
501 {
502
503 t1->tv_sec += t2->tv_sec;
504 t1->tv_usec += t2->tv_usec;
505 timevalfix(t1);
506 }
507 void
508 timevalsub(
509 struct timeval *t1,
510 struct timeval *t2)
511 {
512
513 t1->tv_sec -= t2->tv_sec;
514 t1->tv_usec -= t2->tv_usec;
515 timevalfix(t1);
516 }
517 void
518 timevalfix(
519 struct timeval *t1)
520 {
521
522 if (t1->tv_usec < 0) {
523 t1->tv_sec--;
524 t1->tv_usec += 1000000;
525 }
526 if (t1->tv_usec >= 1000000) {
527 t1->tv_sec++;
528 t1->tv_usec -= 1000000;
529 }
530 }
531
532 /*
533 * Return the best possible estimate of the time in the timeval
534 * to which tvp points.
535 */
536 void
537 microtime(
538 struct timeval *tvp)
539 {
540 clock_get_calendar_microtime((uint32_t *)&tvp->tv_sec, &tvp->tv_usec);
541 }
542
543 void
544 microuptime(
545 struct timeval *tvp)
546 {
547 clock_get_system_microtime((uint32_t *)&tvp->tv_sec, &tvp->tv_usec);
548 }
549
550 /*
551 * Ditto for timespec.
552 */
553 void
554 nanotime(
555 struct timespec *tsp)
556 {
557 clock_get_calendar_nanotime((uint32_t *)&tsp->tv_sec, (uint32_t *)&tsp->tv_nsec);
558 }
559
560 void
561 nanouptime(
562 struct timespec *tsp)
563 {
564 clock_get_system_nanotime((uint32_t *)&tsp->tv_sec, (uint32_t *)&tsp->tv_nsec);
565 }
566
567 uint64_t
568 tvtoabstime(
569 struct timeval *tvp)
570 {
571 uint64_t result, usresult;
572
573 clock_interval_to_absolutetime_interval(
574 tvp->tv_sec, NSEC_PER_SEC, &result);
575 clock_interval_to_absolutetime_interval(
576 tvp->tv_usec, NSEC_PER_USEC, &usresult);
577
578 return (result + usresult);
579 }
580 void
581 time_zone_slock_init(void)
582 {
583 /* allocate lock group attribute and group */
584 tz_slock_grp_attr = lck_grp_attr_alloc_init();
585
586 tz_slock_grp = lck_grp_alloc_init("tzlock", tz_slock_grp_attr);
587
588 /* Allocate lock attribute */
589 tz_slock_attr = lck_attr_alloc_init();
590
591 /* Allocate the spin lock */
592 tz_slock = lck_spin_alloc_init(tz_slock_grp, tz_slock_attr);
593 }
594