]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_time.c
af3e1a9ca18f37ef5c086dff34a585bd3b8ab01e
[apple/xnu.git] / bsd / kern / kern_time.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
31 /*
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
64 */
65
66 #include <sys/param.h>
67 #include <sys/resourcevar.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/proc_internal.h>
71 #include <sys/kauth.h>
72 #include <sys/vnode.h>
73
74 #include <sys/mount_internal.h>
75 #include <sys/sysproto.h>
76 #include <sys/signalvar.h>
77
78 #include <kern/clock.h>
79 #include <kern/thread_call.h>
80
81 #define HZ 100 /* XXX */
82
83 /* simple lock used to access timezone, tz structure */
84 lck_spin_t * tz_slock;
85 lck_grp_t * tz_slock_grp;
86 lck_attr_t * tz_slock_attr;
87 lck_grp_attr_t *tz_slock_grp_attr;
88
89 static void setthetime(
90 struct timeval *tv);
91
92 void time_zone_slock_init(void);
93
94 int gettimeofday(struct proc *p,
95 #ifdef __ppc__
96 struct ppc_gettimeofday_args *uap,
97 #else
98 struct gettimeofday_args *uap,
99 #endif
100 register_t *retval);
101
102 /*
103 * Time of day and interval timer support.
104 *
105 * These routines provide the kernel entry points to get and set
106 * the time-of-day and per-process interval timers. Subroutines
107 * here provide support for adding and subtracting timeval structures
108 * and decrementing interval timers, optionally reloading the interval
109 * timers when they expire.
110 *
111 * XXX Y2038 bug because of clock_get_calendar_microtime() first argument
112 */
113 /* ARGSUSED */
114 int
115 gettimeofday(__unused struct proc *p,
116 #ifdef __ppc__
117 register struct ppc_gettimeofday_args *uap,
118 #else
119 register struct gettimeofday_args *uap,
120 #endif
121 __unused register_t *retval)
122 {
123 struct timeval atv;
124 int error = 0;
125 struct timezone ltz; /* local copy */
126
127 /* NOTE THIS implementation is for non ppc architectures only */
128
129 if (uap->tp) {
130 clock_get_calendar_microtime((uint32_t *)&atv.tv_sec, &atv.tv_usec);
131 if (IS_64BIT_PROCESS(p)) {
132 struct user_timeval user_atv;
133 user_atv.tv_sec = atv.tv_sec;
134 user_atv.tv_usec = atv.tv_usec;
135 /*
136 * This cast is not necessary for PPC, but is
137 * mostly harmless.
138 */
139 error = copyout(&user_atv, CAST_USER_ADDR_T(uap->tp), sizeof(struct user_timeval));
140 } else {
141 error = copyout(&atv, CAST_USER_ADDR_T(uap->tp), sizeof(struct timeval));
142 }
143 if (error)
144 return(error);
145 }
146
147 if (uap->tzp) {
148 lck_spin_lock(tz_slock);
149 ltz = tz;
150 lck_spin_unlock(tz_slock);
151 error = copyout((caddr_t)&ltz, CAST_USER_ADDR_T(uap->tzp),
152 sizeof (tz));
153 }
154
155 return(error);
156 }
157
158 /*
159 * XXX Y2038 bug because of setthetime() argument
160 */
161 /* ARGSUSED */
162 int
163 settimeofday(struct proc *p, struct settimeofday_args *uap, __unused register_t *retval)
164 {
165 struct timeval atv;
166 struct timezone atz;
167 int error;
168
169 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
170 return (error);
171 /* Verify all parameters before changing time */
172 if (uap->tv) {
173 if (IS_64BIT_PROCESS(p)) {
174 struct user_timeval user_atv;
175 error = copyin(uap->tv, &user_atv, sizeof(struct user_timeval));
176 atv.tv_sec = user_atv.tv_sec;
177 atv.tv_usec = user_atv.tv_usec;
178 } else {
179 error = copyin(uap->tv, &atv, sizeof(struct timeval));
180 }
181 if (error)
182 return (error);
183 }
184 if (uap->tzp && (error = copyin(uap->tzp, (caddr_t)&atz, sizeof(atz))))
185 return (error);
186 if (uap->tv) {
187 timevalfix(&atv);
188 if (atv.tv_sec < 0 || (atv.tv_sec == 0 && atv.tv_usec < 0))
189 return (EPERM);
190 setthetime(&atv);
191 }
192 if (uap->tzp) {
193 lck_spin_lock(tz_slock);
194 tz = atz;
195 lck_spin_unlock(tz_slock);
196 }
197 return (0);
198 }
199
200 static void
201 setthetime(
202 struct timeval *tv)
203 {
204 clock_set_calendar_microtime(tv->tv_sec, tv->tv_usec);
205 }
206
207 /*
208 * XXX Y2038 bug because of clock_adjtime() first argument
209 */
210 /* ARGSUSED */
211 int
212 adjtime(struct proc *p, register struct adjtime_args *uap, __unused register_t *retval)
213 {
214 struct timeval atv;
215 int error;
216
217 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
218 return (error);
219 if (IS_64BIT_PROCESS(p)) {
220 struct user_timeval user_atv;
221 error = copyin(uap->delta, &user_atv, sizeof(struct user_timeval));
222 atv.tv_sec = user_atv.tv_sec;
223 atv.tv_usec = user_atv.tv_usec;
224 } else {
225 error = copyin(uap->delta, &atv, sizeof(struct timeval));
226 }
227 if (error)
228 return (error);
229
230 /*
231 * Compute the total correction and the rate at which to apply it.
232 */
233 clock_adjtime((int32_t *)&atv.tv_sec, &atv.tv_usec);
234
235 if (uap->olddelta) {
236 if (IS_64BIT_PROCESS(p)) {
237 struct user_timeval user_atv;
238 user_atv.tv_sec = atv.tv_sec;
239 user_atv.tv_usec = atv.tv_usec;
240 error = copyout(&user_atv, uap->olddelta, sizeof(struct user_timeval));
241 } else {
242 error = copyout(&atv, uap->olddelta, sizeof(struct timeval));
243 }
244 }
245
246 return (0);
247 }
248
249 /*
250 * Verify the calendar value. If negative,
251 * reset to zero (the epoch).
252 */
253 void
254 inittodr(
255 __unused time_t base)
256 {
257 struct timeval tv;
258
259 /*
260 * Assertion:
261 * The calendar has already been
262 * set up from the platform clock.
263 *
264 * The value returned by microtime()
265 * is gotten from the calendar.
266 */
267 microtime(&tv);
268
269 if (tv.tv_sec < 0 || tv.tv_usec < 0) {
270 printf ("WARNING: preposterous time in Real Time Clock");
271 tv.tv_sec = 0; /* the UNIX epoch */
272 tv.tv_usec = 0;
273 setthetime(&tv);
274 printf(" -- CHECK AND RESET THE DATE!\n");
275 }
276 }
277
278 time_t
279 boottime_sec(void)
280 {
281 uint32_t sec, nanosec;
282 clock_get_boottime_nanotime(&sec, &nanosec);
283 return (sec);
284 }
285
286 uint64_t tvtoabstime(struct timeval *tvp);
287
288 /*
289 * Get value of an interval timer. The process virtual and
290 * profiling virtual time timers are kept internally in the
291 * way they are specified externally: in time until they expire.
292 *
293 * The real time interval timer expiration time (p_rtime)
294 * is kept as an absolute time rather than as a delta, so that
295 * it is easy to keep periodic real-time signals from drifting.
296 *
297 * Virtual time timers are processed in the hardclock() routine of
298 * kern_clock.c. The real time timer is processed by a callout
299 * routine. Since a callout may be delayed in real time due to
300 * other processing in the system, it is possible for the real
301 * time callout routine (realitexpire, given below), to be delayed
302 * in real time past when it is supposed to occur. It does not
303 * suffice, therefore, to reload the real time .it_value from the
304 * real time .it_interval. Rather, we compute the next time in
305 * absolute time when the timer should go off.
306 */
307
308 /* ARGSUSED */
309 int
310 getitimer(struct proc *p, register struct getitimer_args *uap, __unused register_t *retval)
311 {
312 struct itimerval aitv;
313
314 if (uap->which > ITIMER_PROF)
315 return(EINVAL);
316 if (uap->which == ITIMER_REAL) {
317 /*
318 * If time for real time timer has passed return 0,
319 * else return difference between current time and
320 * time for the timer to go off.
321 */
322 aitv = p->p_realtimer;
323 if (timerisset(&p->p_rtime)) {
324 struct timeval now;
325
326 microuptime(&now);
327 if (timercmp(&p->p_rtime, &now, <))
328 timerclear(&aitv.it_value);
329 else {
330 aitv.it_value = p->p_rtime;
331 timevalsub(&aitv.it_value, &now);
332 }
333 }
334 else
335 timerclear(&aitv.it_value);
336 }
337 else
338 aitv = p->p_stats->p_timer[uap->which];
339
340 if (IS_64BIT_PROCESS(p)) {
341 struct user_itimerval user_itv;
342 user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
343 user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
344 user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
345 user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
346 return (copyout((caddr_t)&user_itv, uap->itv, sizeof (struct user_itimerval)));
347 } else {
348 return (copyout((caddr_t)&aitv, uap->itv, sizeof (struct itimerval)));
349 }
350 }
351
352 /* ARGSUSED */
353 int
354 setitimer(p, uap, retval)
355 struct proc *p;
356 register struct setitimer_args *uap;
357 register_t *retval;
358 {
359 struct itimerval aitv;
360 user_addr_t itvp;
361 int error;
362
363 if (uap->which > ITIMER_PROF)
364 return (EINVAL);
365 if ((itvp = uap->itv)) {
366 if (IS_64BIT_PROCESS(p)) {
367 struct user_itimerval user_itv;
368 if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (struct user_itimerval))))
369 return (error);
370 aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec;
371 aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
372 aitv.it_value.tv_sec = user_itv.it_value.tv_sec;
373 aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
374 } else {
375 if ((error = copyin(itvp, (caddr_t)&aitv, sizeof (struct itimerval))))
376 return (error);
377 }
378 }
379 if ((uap->itv = uap->oitv) && (error = getitimer(p, (struct getitimer_args *)uap, retval)))
380 return (error);
381 if (itvp == 0)
382 return (0);
383 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
384 return (EINVAL);
385 if (uap->which == ITIMER_REAL) {
386 thread_call_func_cancel((thread_call_func_t)realitexpire, (void *)p->p_pid, FALSE);
387 if (timerisset(&aitv.it_value)) {
388 microuptime(&p->p_rtime);
389 timevaladd(&p->p_rtime, &aitv.it_value);
390 thread_call_func_delayed(
391 (thread_call_func_t)realitexpire, (void *)p->p_pid,
392 tvtoabstime(&p->p_rtime));
393 }
394 else
395 timerclear(&p->p_rtime);
396
397 p->p_realtimer = aitv;
398 }
399 else
400 p->p_stats->p_timer[uap->which] = aitv;
401
402 return (0);
403 }
404
405 /*
406 * Real interval timer expired:
407 * send process whose timer expired an alarm signal.
408 * If time is not set up to reload, then just return.
409 * Else compute next time timer should go off which is > current time.
410 * This is where delay in processing this timeout causes multiple
411 * SIGALRM calls to be compressed into one.
412 */
413 void
414 realitexpire(
415 void *pid)
416 {
417 register struct proc *p;
418 struct timeval now;
419 boolean_t funnel_state;
420
421 funnel_state = thread_funnel_set(kernel_flock, TRUE);
422 p = pfind((pid_t)pid);
423 if (p == NULL) {
424 (void) thread_funnel_set(kernel_flock, FALSE);
425 return;
426 }
427
428 if (!timerisset(&p->p_realtimer.it_interval)) {
429 timerclear(&p->p_rtime);
430 psignal(p, SIGALRM);
431
432 (void) thread_funnel_set(kernel_flock, FALSE);
433 return;
434 }
435
436 microuptime(&now);
437 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
438 if (timercmp(&p->p_rtime, &now, <=)) {
439 if ((p->p_rtime.tv_sec + 2) >= now.tv_sec) {
440 for (;;) {
441 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
442 if (timercmp(&p->p_rtime, &now, >))
443 break;
444 }
445 }
446 else {
447 p->p_rtime = p->p_realtimer.it_interval;
448 timevaladd(&p->p_rtime, &now);
449 }
450 }
451
452 psignal(p, SIGALRM);
453
454 thread_call_func_delayed((thread_call_func_t)realitexpire, pid, tvtoabstime(&p->p_rtime));
455
456 (void) thread_funnel_set(kernel_flock, FALSE);
457 }
458
459 /*
460 * Check that a proposed value to load into the .it_value or
461 * .it_interval part of an interval timer is acceptable, and
462 * fix it to have at least minimal value (i.e. if it is less
463 * than the resolution of the clock, round it up.)
464 */
465 int
466 itimerfix(tv)
467 struct timeval *tv;
468 {
469
470 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
471 tv->tv_usec < 0 || tv->tv_usec >= 1000000)
472 return (EINVAL);
473 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
474 tv->tv_usec = tick;
475 return (0);
476 }
477
478 /*
479 * Decrement an interval timer by a specified number
480 * of microseconds, which must be less than a second,
481 * i.e. < 1000000. If the timer expires, then reload
482 * it. In this case, carry over (usec - old value) to
483 * reducint the value reloaded into the timer so that
484 * the timer does not drift. This routine assumes
485 * that it is called in a context where the timers
486 * on which it is operating cannot change in value.
487 */
488 int
489 itimerdecr(itp, usec)
490 register struct itimerval *itp;
491 int usec;
492 {
493
494 if (itp->it_value.tv_usec < usec) {
495 if (itp->it_value.tv_sec == 0) {
496 /* expired, and already in next interval */
497 usec -= itp->it_value.tv_usec;
498 goto expire;
499 }
500 itp->it_value.tv_usec += 1000000;
501 itp->it_value.tv_sec--;
502 }
503 itp->it_value.tv_usec -= usec;
504 usec = 0;
505 if (timerisset(&itp->it_value))
506 return (1);
507 /* expired, exactly at end of interval */
508 expire:
509 if (timerisset(&itp->it_interval)) {
510 itp->it_value = itp->it_interval;
511 itp->it_value.tv_usec -= usec;
512 if (itp->it_value.tv_usec < 0) {
513 itp->it_value.tv_usec += 1000000;
514 itp->it_value.tv_sec--;
515 }
516 } else
517 itp->it_value.tv_usec = 0; /* sec is already 0 */
518 return (0);
519 }
520
521 /*
522 * Add and subtract routines for timevals.
523 * N.B.: subtract routine doesn't deal with
524 * results which are before the beginning,
525 * it just gets very confused in this case.
526 * Caveat emptor.
527 */
528 void
529 timevaladd(
530 struct timeval *t1,
531 struct timeval *t2)
532 {
533
534 t1->tv_sec += t2->tv_sec;
535 t1->tv_usec += t2->tv_usec;
536 timevalfix(t1);
537 }
538 void
539 timevalsub(
540 struct timeval *t1,
541 struct timeval *t2)
542 {
543
544 t1->tv_sec -= t2->tv_sec;
545 t1->tv_usec -= t2->tv_usec;
546 timevalfix(t1);
547 }
548 void
549 timevalfix(
550 struct timeval *t1)
551 {
552
553 if (t1->tv_usec < 0) {
554 t1->tv_sec--;
555 t1->tv_usec += 1000000;
556 }
557 if (t1->tv_usec >= 1000000) {
558 t1->tv_sec++;
559 t1->tv_usec -= 1000000;
560 }
561 }
562
563 /*
564 * Return the best possible estimate of the time in the timeval
565 * to which tvp points.
566 */
567 void
568 microtime(
569 struct timeval *tvp)
570 {
571 clock_get_calendar_microtime((uint32_t *)&tvp->tv_sec, &tvp->tv_usec);
572 }
573
574 void
575 microuptime(
576 struct timeval *tvp)
577 {
578 clock_get_system_microtime((uint32_t *)&tvp->tv_sec, &tvp->tv_usec);
579 }
580
581 /*
582 * Ditto for timespec.
583 */
584 void
585 nanotime(
586 struct timespec *tsp)
587 {
588 clock_get_calendar_nanotime((uint32_t *)&tsp->tv_sec, (uint32_t *)&tsp->tv_nsec);
589 }
590
591 void
592 nanouptime(
593 struct timespec *tsp)
594 {
595 clock_get_system_nanotime((uint32_t *)&tsp->tv_sec, (uint32_t *)&tsp->tv_nsec);
596 }
597
598 uint64_t
599 tvtoabstime(
600 struct timeval *tvp)
601 {
602 uint64_t result, usresult;
603
604 clock_interval_to_absolutetime_interval(
605 tvp->tv_sec, NSEC_PER_SEC, &result);
606 clock_interval_to_absolutetime_interval(
607 tvp->tv_usec, NSEC_PER_USEC, &usresult);
608
609 return (result + usresult);
610 }
611 void
612 time_zone_slock_init(void)
613 {
614 /* allocate lock group attribute and group */
615 tz_slock_grp_attr = lck_grp_attr_alloc_init();
616 lck_grp_attr_setstat(tz_slock_grp_attr);
617
618 tz_slock_grp = lck_grp_alloc_init("tzlock", tz_slock_grp_attr);
619
620 /* Allocate lock attribute */
621 tz_slock_attr = lck_attr_alloc_init();
622 //lck_attr_setdebug(tz_slock_attr);
623
624 /* Allocate the spin lock */
625 tz_slock = lck_spin_alloc_init(tz_slock_grp, tz_slock_attr);
626 }
627