]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_time.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / bsd / kern / kern_time.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
56 */
57
58 #include <sys/param.h>
59 #include <sys/resourcevar.h>
60 #include <sys/kernel.h>
61 #include <sys/systm.h>
62 #include <sys/proc.h>
63 #include <sys/vnode.h>
64
65 #include <sys/mount.h>
66
67 #include <kern/clock.h>
68
69 #define HZ 100 /* XXX */
70
71 volatile struct timeval time;
72 /* simple lock used to access timezone, tz structure */
73 decl_simple_lock_data(, tz_slock);
74 /*
75 * Time of day and interval timer support.
76 *
77 * These routines provide the kernel entry points to get and set
78 * the time-of-day and per-process interval timers. Subroutines
79 * here provide support for adding and subtracting timeval structures
80 * and decrementing interval timers, optionally reloading the interval
81 * timers when they expire.
82 */
83 struct gettimeofday_args{
84 struct timeval *tp;
85 struct timezone *tzp;
86 };
87 /* ARGSUSED */
88 int
89 gettimeofday(p, uap, retval)
90 struct proc *p;
91 register struct gettimeofday_args *uap;
92 register_t *retval;
93 {
94 struct timeval atv;
95 int error = 0;
96 extern simple_lock_data_t tz_slock;
97 struct timezone ltz; /* local copy */
98
99 /* NOTE THIS implementation is for non ppc architectures only */
100
101 if (uap->tp) {
102 clock_get_calendar_microtime(&atv.tv_sec, &atv.tv_usec);
103 if (error = copyout((caddr_t)&atv, (caddr_t)uap->tp,
104 sizeof (atv)))
105 return(error);
106 }
107
108 if (uap->tzp) {
109 usimple_lock(&tz_slock);
110 ltz = tz;
111 usimple_unlock(&tz_slock);
112 error = copyout((caddr_t)&ltz, (caddr_t)uap->tzp,
113 sizeof (tz));
114 }
115
116 return(error);
117 }
118
119 struct settimeofday_args {
120 struct timeval *tv;
121 struct timezone *tzp;
122 };
123 /* ARGSUSED */
124 int
125 settimeofday(p, uap, retval)
126 struct proc *p;
127 struct settimeofday_args *uap;
128 register_t *retval;
129 {
130 struct timeval atv;
131 struct timezone atz;
132 int error, s;
133 extern simple_lock_data_t tz_slock;
134
135 if (error = suser(p->p_ucred, &p->p_acflag))
136 return (error);
137 /* Verify all parameters before changing time. */
138 if (uap->tv && (error = copyin((caddr_t)uap->tv,
139 (caddr_t)&atv, sizeof(atv))))
140 return (error);
141 if (uap->tzp && (error = copyin((caddr_t)uap->tzp,
142 (caddr_t)&atz, sizeof(atz))))
143 return (error);
144 if (uap->tv)
145 setthetime(&atv);
146 if (uap->tzp) {
147 usimple_lock(&tz_slock);
148 tz = atz;
149 usimple_unlock(&tz_slock);
150 }
151 return (0);
152 }
153
154 setthetime(tv)
155 struct timeval *tv;
156 {
157 long delta = tv->tv_sec - time.tv_sec;
158
159 clock_set_calendar_microtime(tv->tv_sec, tv->tv_usec);
160 boottime.tv_sec += delta;
161 #if NFSCLIENT || NFSSERVER
162 lease_updatetime(delta);
163 #endif
164 }
165
166 struct adjtime_args {
167 struct timeval *delta;
168 struct timeval *olddelta;
169 };
170 /* ARGSUSED */
171 int
172 adjtime(p, uap, retval)
173 struct proc *p;
174 register struct adjtime_args *uap;
175 register_t *retval;
176 {
177 struct timeval atv;
178 int error;
179
180 if (error = suser(p->p_ucred, &p->p_acflag))
181 return (error);
182 if (error = copyin((caddr_t)uap->delta,
183 (caddr_t)&atv, sizeof (struct timeval)))
184 return (error);
185
186 /*
187 * Compute the total correction and the rate at which to apply it.
188 */
189 clock_adjtime(&atv.tv_sec, &atv.tv_usec);
190
191 if (uap->olddelta) {
192 (void) copyout((caddr_t)&atv,
193 (caddr_t)uap->olddelta, sizeof (struct timeval));
194 }
195
196 return (0);
197 }
198
199 /*
200 * Initialze the time of day register.
201 * Trust the RTC except for the case where it is set before
202 * the UNIX epoch. In that case use the the UNIX epoch.
203 * The argument passed in is ignored.
204 */
205 void
206 inittodr(base)
207 time_t base;
208 {
209 struct timeval tv;
210
211 /*
212 * Assertion:
213 * The calendar has already been
214 * set up from the battery clock.
215 *
216 * The value returned by microtime()
217 * is gotten from the calendar.
218 */
219 microtime(&tv);
220
221 time = tv;
222 boottime.tv_sec = tv.tv_sec;
223 boottime.tv_usec = 0;
224
225 /*
226 * If the RTC does not have acceptable value, i.e. time before
227 * the UNIX epoch, set it to the UNIX epoch
228 */
229 if (tv.tv_sec < 0) {
230 printf ("WARNING: preposterous time in Real Time Clock");
231 time.tv_sec = 0; /* the UNIX epoch */
232 time.tv_usec = 0;
233 setthetime(&time);
234 boottime = time;
235 printf(" -- CHECK AND RESET THE DATE!\n");
236 }
237
238 return;
239 }
240
241 void timevaladd(
242 struct timeval *t1,
243 struct timeval *t2);
244 void timevalsub(
245 struct timeval *t1,
246 struct timeval *t2);
247 void timevalfix(
248 struct timeval *t1);
249
250 uint64_t
251 tvtoabstime(
252 struct timeval *tvp);
253
254 /*
255 * Get value of an interval timer. The process virtual and
256 * profiling virtual time timers are kept internally in the
257 * way they are specified externally: in time until they expire.
258 *
259 * The real time interval timer expiration time (p_rtime)
260 * is kept as an absolute time rather than as a delta, so that
261 * it is easy to keep periodic real-time signals from drifting.
262 *
263 * Virtual time timers are processed in the hardclock() routine of
264 * kern_clock.c. The real time timer is processed by a callout
265 * routine. Since a callout may be delayed in real time due to
266 * other processing in the system, it is possible for the real
267 * time callout routine (realitexpire, given below), to be delayed
268 * in real time past when it is supposed to occur. It does not
269 * suffice, therefore, to reload the real time .it_value from the
270 * real time .it_interval. Rather, we compute the next time in
271 * absolute time when the timer should go off.
272 */
273
274 struct getitimer_args {
275 u_int which;
276 struct itimerval *itv;
277 };
278 /* ARGSUSED */
279 int
280 getitimer(p, uap, retval)
281 struct proc *p;
282 register struct getitimer_args *uap;
283 register_t *retval;
284 {
285 struct itimerval aitv;
286
287 if (uap->which > ITIMER_PROF)
288 return(EINVAL);
289 if (uap->which == ITIMER_REAL) {
290 /*
291 * If time for real time timer has passed return 0,
292 * else return difference between current time and
293 * time for the timer to go off.
294 */
295 aitv = p->p_realtimer;
296 if (timerisset(&p->p_rtime)) {
297 struct timeval now;
298
299 microuptime(&now);
300 if (timercmp(&p->p_rtime, &now, <))
301 timerclear(&aitv.it_value);
302 else {
303 aitv.it_value = p->p_rtime;
304 timevalsub(&aitv.it_value, &now);
305 }
306 }
307 else
308 timerclear(&aitv.it_value);
309 }
310 else
311 aitv = p->p_stats->p_timer[uap->which];
312
313 return (copyout((caddr_t)&aitv,
314 (caddr_t)uap->itv, sizeof (struct itimerval)));
315 }
316
317 struct setitimer_args {
318 u_int which;
319 struct itimerval *itv;
320 struct itimerval *oitv;
321 };
322 /* ARGSUSED */
323 int
324 setitimer(p, uap, retval)
325 struct proc *p;
326 register struct setitimer_args *uap;
327 register_t *retval;
328 {
329 struct itimerval aitv;
330 register struct itimerval *itvp;
331 int error;
332
333 if (uap->which > ITIMER_PROF)
334 return (EINVAL);
335 if ((itvp = uap->itv) &&
336 (error = copyin((caddr_t)itvp,
337 (caddr_t)&aitv, sizeof (struct itimerval))))
338 return (error);
339 if ((uap->itv = uap->oitv) && (error = getitimer(p, uap, retval)))
340 return (error);
341 if (itvp == 0)
342 return (0);
343 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
344 return (EINVAL);
345 if (uap->which == ITIMER_REAL) {
346 thread_call_func_cancel(realitexpire, (void *)p->p_pid, FALSE);
347 if (timerisset(&aitv.it_value)) {
348 microuptime(&p->p_rtime);
349 timevaladd(&p->p_rtime, &aitv.it_value);
350 thread_call_func_delayed(
351 realitexpire, (void *)p->p_pid,
352 tvtoabstime(&p->p_rtime));
353 }
354 else
355 timerclear(&p->p_rtime);
356
357 p->p_realtimer = aitv;
358 }
359 else
360 p->p_stats->p_timer[uap->which] = aitv;
361
362 return (0);
363 }
364
365 /*
366 * Real interval timer expired:
367 * send process whose timer expired an alarm signal.
368 * If time is not set up to reload, then just return.
369 * Else compute next time timer should go off which is > current time.
370 * This is where delay in processing this timeout causes multiple
371 * SIGALRM calls to be compressed into one.
372 */
373 void
374 realitexpire(
375 void *pid)
376 {
377 register struct proc *p;
378 struct timeval now;
379 boolean_t funnel_state = thread_funnel_set(kernel_flock, TRUE);
380
381 p = pfind((pid_t)pid);
382 if (p == NULL) {
383 (void) thread_funnel_set(kernel_flock, FALSE);
384 return;
385 }
386
387 if (!timerisset(&p->p_realtimer.it_interval)) {
388 timerclear(&p->p_rtime);
389 psignal(p, SIGALRM);
390
391 (void) thread_funnel_set(kernel_flock, FALSE);
392 return;
393 }
394
395 microuptime(&now);
396 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
397 if (timercmp(&p->p_rtime, &now, <=)) {
398 if ((p->p_rtime.tv_sec + 2) >= now.tv_sec) {
399 for (;;) {
400 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
401 if (timercmp(&p->p_rtime, &now, >))
402 break;
403 }
404 }
405 else {
406 p->p_rtime = p->p_realtimer.it_interval;
407 timevaladd(&p->p_rtime, &now);
408 }
409 }
410
411 psignal(p, SIGALRM);
412
413 thread_call_func_delayed(realitexpire, pid, tvtoabstime(&p->p_rtime));
414
415 (void) thread_funnel_set(kernel_flock, FALSE);
416 }
417
418 /*
419 * Check that a proposed value to load into the .it_value or
420 * .it_interval part of an interval timer is acceptable, and
421 * fix it to have at least minimal value (i.e. if it is less
422 * than the resolution of the clock, round it up.)
423 */
424 int
425 itimerfix(tv)
426 struct timeval *tv;
427 {
428
429 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
430 tv->tv_usec < 0 || tv->tv_usec >= 1000000)
431 return (EINVAL);
432 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
433 tv->tv_usec = tick;
434 return (0);
435 }
436
437 /*
438 * Decrement an interval timer by a specified number
439 * of microseconds, which must be less than a second,
440 * i.e. < 1000000. If the timer expires, then reload
441 * it. In this case, carry over (usec - old value) to
442 * reducint the value reloaded into the timer so that
443 * the timer does not drift. This routine assumes
444 * that it is called in a context where the timers
445 * on which it is operating cannot change in value.
446 */
447 int
448 itimerdecr(itp, usec)
449 register struct itimerval *itp;
450 int usec;
451 {
452
453 if (itp->it_value.tv_usec < usec) {
454 if (itp->it_value.tv_sec == 0) {
455 /* expired, and already in next interval */
456 usec -= itp->it_value.tv_usec;
457 goto expire;
458 }
459 itp->it_value.tv_usec += 1000000;
460 itp->it_value.tv_sec--;
461 }
462 itp->it_value.tv_usec -= usec;
463 usec = 0;
464 if (timerisset(&itp->it_value))
465 return (1);
466 /* expired, exactly at end of interval */
467 expire:
468 if (timerisset(&itp->it_interval)) {
469 itp->it_value = itp->it_interval;
470 itp->it_value.tv_usec -= usec;
471 if (itp->it_value.tv_usec < 0) {
472 itp->it_value.tv_usec += 1000000;
473 itp->it_value.tv_sec--;
474 }
475 } else
476 itp->it_value.tv_usec = 0; /* sec is already 0 */
477 return (0);
478 }
479
480 /*
481 * Add and subtract routines for timevals.
482 * N.B.: subtract routine doesn't deal with
483 * results which are before the beginning,
484 * it just gets very confused in this case.
485 * Caveat emptor.
486 */
487 void
488 timevaladd(
489 struct timeval *t1,
490 struct timeval *t2)
491 {
492
493 t1->tv_sec += t2->tv_sec;
494 t1->tv_usec += t2->tv_usec;
495 timevalfix(t1);
496 }
497 void
498 timevalsub(
499 struct timeval *t1,
500 struct timeval *t2)
501 {
502
503 t1->tv_sec -= t2->tv_sec;
504 t1->tv_usec -= t2->tv_usec;
505 timevalfix(t1);
506 }
507 void
508 timevalfix(
509 struct timeval *t1)
510 {
511
512 if (t1->tv_usec < 0) {
513 t1->tv_sec--;
514 t1->tv_usec += 1000000;
515 }
516 if (t1->tv_usec >= 1000000) {
517 t1->tv_sec++;
518 t1->tv_usec -= 1000000;
519 }
520 }
521
522 /*
523 * Return the best possible estimate of the time in the timeval
524 * to which tvp points.
525 */
526 void
527 microtime(
528 struct timeval *tvp)
529 {
530 clock_get_calendar_microtime(&tvp->tv_sec, &tvp->tv_usec);
531 }
532
533 void
534 microuptime(
535 struct timeval *tvp)
536 {
537 clock_get_system_microtime(&tvp->tv_sec, &tvp->tv_usec);
538 }
539
540 /*
541 * Ditto for timespec.
542 */
543 void
544 nanotime(
545 struct timespec *tsp)
546 {
547 clock_get_calendar_nanotime((uint32_t *)&tsp->tv_sec, &tsp->tv_nsec);
548 }
549
550 void
551 nanouptime(
552 struct timespec *tsp)
553 {
554 clock_get_system_nanotime((uint32_t *)&tsp->tv_sec, &tsp->tv_nsec);
555 }
556
557 uint64_t
558 tvtoabstime(
559 struct timeval *tvp)
560 {
561 uint64_t result, usresult;
562
563 clock_interval_to_absolutetime_interval(
564 tvp->tv_sec, NSEC_PER_SEC, &result);
565 clock_interval_to_absolutetime_interval(
566 tvp->tv_usec, NSEC_PER_USEC, &usresult);
567
568 return (result + usresult);
569 }
570 void
571 time_zone_slock_init(void)
572 {
573 extern simple_lock_data_t tz_slock;
574
575 simple_lock_init(&tz_slock);
576
577
578 }