]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_time.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / bsd / kern / kern_time.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <sys/param.h>
71 #include <sys/resourcevar.h>
72 #include <sys/kernel.h>
73 #include <sys/systm.h>
74 #include <sys/proc_internal.h>
75 #include <sys/kauth.h>
76 #include <sys/vnode.h>
77 #include <sys/time.h>
78 #include <sys/priv.h>
79
80 #include <sys/mount_internal.h>
81 #include <sys/sysproto.h>
82 #include <sys/signalvar.h>
83 #include <sys/protosw.h> /* for net_uptime2timeval() */
84
85 #include <kern/clock.h>
86 #include <kern/task.h>
87 #include <kern/thread_call.h>
88 #if CONFIG_MACF
89 #include <security/mac_framework.h>
90 #endif
91 #include <IOKit/IOBSD.h>
92 #include <sys/time.h>
93
94 #define HZ 100 /* XXX */
95
96 /* simple lock used to access timezone, tz structure */
97 lck_spin_t * tz_slock;
98 lck_grp_t * tz_slock_grp;
99 lck_attr_t * tz_slock_attr;
100 lck_grp_attr_t *tz_slock_grp_attr;
101
102 static void setthetime(
103 struct timeval *tv);
104
105 void time_zone_slock_init(void);
106
107 /*
108 * Time of day and interval timer support.
109 *
110 * These routines provide the kernel entry points to get and set
111 * the time-of-day and per-process interval timers. Subroutines
112 * here provide support for adding and subtracting timeval structures
113 * and decrementing interval timers, optionally reloading the interval
114 * timers when they expire.
115 */
116 /* ARGSUSED */
117 int
118 gettimeofday(
119 struct proc *p,
120 struct gettimeofday_args *uap,
121 __unused int32_t *retval)
122 {
123 int error = 0;
124 struct timezone ltz; /* local copy */
125 clock_sec_t secs;
126 clock_usec_t usecs;
127 uint64_t mach_time;
128
129 if (uap->tp || uap->mach_absolute_time) {
130 clock_gettimeofday_and_absolute_time(&secs, &usecs, &mach_time);
131 }
132
133 if (uap->tp) {
134 /* Casting secs through a uint32_t to match arm64 commpage */
135 if (IS_64BIT_PROCESS(p)) {
136 struct user64_timeval user_atv = {};
137 user_atv.tv_sec = (uint32_t)secs;
138 user_atv.tv_usec = usecs;
139 error = copyout(&user_atv, uap->tp, sizeof(user_atv));
140 } else {
141 struct user32_timeval user_atv = {};
142 user_atv.tv_sec = (uint32_t)secs;
143 user_atv.tv_usec = usecs;
144 error = copyout(&user_atv, uap->tp, sizeof(user_atv));
145 }
146 if (error) {
147 return error;
148 }
149 }
150
151 if (uap->tzp) {
152 lck_spin_lock(tz_slock);
153 ltz = tz;
154 lck_spin_unlock(tz_slock);
155
156 error = copyout((caddr_t)&ltz, CAST_USER_ADDR_T(uap->tzp), sizeof(tz));
157 }
158
159 if (error == 0 && uap->mach_absolute_time) {
160 error = copyout(&mach_time, uap->mach_absolute_time, sizeof(mach_time));
161 }
162
163 return error;
164 }
165
166 /*
167 * XXX Y2038 bug because of setthetime() argument
168 */
169 /* ARGSUSED */
170 int
171 settimeofday(__unused struct proc *p, struct settimeofday_args *uap, __unused int32_t *retval)
172 {
173 struct timeval atv;
174 struct timezone atz;
175 int error;
176
177 bzero(&atv, sizeof(atv));
178
179 /* Check that this task is entitled to set the time or it is root */
180 if (!IOTaskHasEntitlement(current_task(), SETTIME_ENTITLEMENT)) {
181
182 #if CONFIG_MACF
183 error = mac_system_check_settime(kauth_cred_get());
184 if (error)
185 return (error);
186 #endif
187 #ifndef CONFIG_EMBEDDED
188 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
189 return (error);
190 #endif
191 }
192
193 /* Verify all parameters before changing time */
194 if (uap->tv) {
195 if (IS_64BIT_PROCESS(p)) {
196 struct user64_timeval user_atv;
197 error = copyin(uap->tv, &user_atv, sizeof(user_atv));
198 atv.tv_sec = user_atv.tv_sec;
199 atv.tv_usec = user_atv.tv_usec;
200 } else {
201 struct user32_timeval user_atv;
202 error = copyin(uap->tv, &user_atv, sizeof(user_atv));
203 atv.tv_sec = user_atv.tv_sec;
204 atv.tv_usec = user_atv.tv_usec;
205 }
206 if (error)
207 return (error);
208 }
209 if (uap->tzp && (error = copyin(uap->tzp, (caddr_t)&atz, sizeof(atz))))
210 return (error);
211 if (uap->tv) {
212 timevalfix(&atv);
213 if (atv.tv_sec < 0 || (atv.tv_sec == 0 && atv.tv_usec < 0))
214 return (EPERM);
215 setthetime(&atv);
216 }
217 if (uap->tzp) {
218 lck_spin_lock(tz_slock);
219 tz = atz;
220 lck_spin_unlock(tz_slock);
221 }
222 return (0);
223 }
224
225 static void
226 setthetime(
227 struct timeval *tv)
228 {
229 clock_set_calendar_microtime(tv->tv_sec, tv->tv_usec);
230 }
231
232 /*
233 * Verify the calendar value. If negative,
234 * reset to zero (the epoch).
235 */
236 void
237 inittodr(
238 __unused time_t base)
239 {
240 struct timeval tv;
241
242 /*
243 * Assertion:
244 * The calendar has already been
245 * set up from the platform clock.
246 *
247 * The value returned by microtime()
248 * is gotten from the calendar.
249 */
250 microtime(&tv);
251
252 if (tv.tv_sec < 0 || tv.tv_usec < 0) {
253 printf ("WARNING: preposterous time in Real Time Clock");
254 tv.tv_sec = 0; /* the UNIX epoch */
255 tv.tv_usec = 0;
256 setthetime(&tv);
257 printf(" -- CHECK AND RESET THE DATE!\n");
258 }
259 }
260
261 time_t
262 boottime_sec(void)
263 {
264 clock_sec_t secs;
265 clock_nsec_t nanosecs;
266
267 clock_get_boottime_nanotime(&secs, &nanosecs);
268 return (secs);
269 }
270
271 void
272 boottime_timeval(struct timeval *tv)
273 {
274 clock_sec_t secs;
275 clock_usec_t microsecs;
276
277 clock_get_boottime_microtime(&secs, &microsecs);
278
279 tv->tv_sec = secs;
280 tv->tv_usec = microsecs;
281 }
282
283 /*
284 * Get value of an interval timer. The process virtual and
285 * profiling virtual time timers are kept internally in the
286 * way they are specified externally: in time until they expire.
287 *
288 * The real time interval timer expiration time (p_rtime)
289 * is kept as an absolute time rather than as a delta, so that
290 * it is easy to keep periodic real-time signals from drifting.
291 *
292 * The real time timer is processed by a callout routine.
293 * Since a callout may be delayed in real time due to
294 * other processing in the system, it is possible for the real
295 * time callout routine (realitexpire, given below), to be delayed
296 * in real time past when it is supposed to occur. It does not
297 * suffice, therefore, to reload the real time .it_value from the
298 * real time .it_interval. Rather, we compute the next time in
299 * absolute time when the timer should go off.
300 *
301 * Returns: 0 Success
302 * EINVAL Invalid argument
303 * copyout:EFAULT Bad address
304 */
305 /* ARGSUSED */
306 int
307 getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval)
308 {
309 struct itimerval aitv;
310
311 if (uap->which > ITIMER_PROF)
312 return(EINVAL);
313
314 bzero(&aitv, sizeof(aitv));
315
316 proc_spinlock(p);
317 switch (uap->which) {
318
319 case ITIMER_REAL:
320 /*
321 * If time for real time timer has passed return 0,
322 * else return difference between current time and
323 * time for the timer to go off.
324 */
325 aitv = p->p_realtimer;
326 if (timerisset(&p->p_rtime)) {
327 struct timeval now;
328
329 microuptime(&now);
330 if (timercmp(&p->p_rtime, &now, <))
331 timerclear(&aitv.it_value);
332 else {
333 aitv.it_value = p->p_rtime;
334 timevalsub(&aitv.it_value, &now);
335 }
336 }
337 else
338 timerclear(&aitv.it_value);
339 break;
340
341 case ITIMER_VIRTUAL:
342 aitv = p->p_vtimer_user;
343 break;
344
345 case ITIMER_PROF:
346 aitv = p->p_vtimer_prof;
347 break;
348 }
349
350 proc_spinunlock(p);
351
352 if (IS_64BIT_PROCESS(p)) {
353 struct user64_itimerval user_itv;
354 bzero(&user_itv, sizeof (user_itv));
355 user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
356 user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
357 user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
358 user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
359 return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv)));
360 } else {
361 struct user32_itimerval user_itv;
362 bzero(&user_itv, sizeof (user_itv));
363 user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
364 user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
365 user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
366 user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
367 return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv)));
368 }
369 }
370
371 /*
372 * Returns: 0 Success
373 * EINVAL Invalid argument
374 * copyin:EFAULT Bad address
375 * getitimer:EINVAL Invalid argument
376 * getitimer:EFAULT Bad address
377 */
378 /* ARGSUSED */
379 int
380 setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval)
381 {
382 struct itimerval aitv;
383 user_addr_t itvp;
384 int error;
385
386 bzero(&aitv, sizeof(aitv));
387
388 if (uap->which > ITIMER_PROF)
389 return (EINVAL);
390 if ((itvp = uap->itv)) {
391 if (IS_64BIT_PROCESS(p)) {
392 struct user64_itimerval user_itv;
393 if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (user_itv))))
394 return (error);
395 aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec;
396 aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
397 aitv.it_value.tv_sec = user_itv.it_value.tv_sec;
398 aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
399 } else {
400 struct user32_itimerval user_itv;
401 if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (user_itv))))
402 return (error);
403 aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec;
404 aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
405 aitv.it_value.tv_sec = user_itv.it_value.tv_sec;
406 aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
407 }
408 }
409 if ((uap->itv = uap->oitv) && (error = getitimer(p, (struct getitimer_args *)uap, retval)))
410 return (error);
411 if (itvp == 0)
412 return (0);
413 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
414 return (EINVAL);
415
416 switch (uap->which) {
417
418 case ITIMER_REAL:
419 proc_spinlock(p);
420 if (timerisset(&aitv.it_value)) {
421 microuptime(&p->p_rtime);
422 timevaladd(&p->p_rtime, &aitv.it_value);
423 p->p_realtimer = aitv;
424 if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL,
425 tvtoabstime(&p->p_rtime), 0, THREAD_CALL_DELAY_USER_NORMAL))
426 p->p_ractive++;
427 } else {
428 timerclear(&p->p_rtime);
429 p->p_realtimer = aitv;
430 if (thread_call_cancel(p->p_rcall))
431 p->p_ractive--;
432 }
433 proc_spinunlock(p);
434
435 break;
436
437
438 case ITIMER_VIRTUAL:
439 if (timerisset(&aitv.it_value))
440 task_vtimer_set(p->task, TASK_VTIMER_USER);
441 else
442 task_vtimer_clear(p->task, TASK_VTIMER_USER);
443
444 proc_spinlock(p);
445 p->p_vtimer_user = aitv;
446 proc_spinunlock(p);
447 break;
448
449 case ITIMER_PROF:
450 if (timerisset(&aitv.it_value))
451 task_vtimer_set(p->task, TASK_VTIMER_PROF);
452 else
453 task_vtimer_clear(p->task, TASK_VTIMER_PROF);
454
455 proc_spinlock(p);
456 p->p_vtimer_prof = aitv;
457 proc_spinunlock(p);
458 break;
459 }
460
461 return (0);
462 }
463
464 /*
465 * Real interval timer expired:
466 * send process whose timer expired an alarm signal.
467 * If time is not set up to reload, then just return.
468 * Else compute next time timer should go off which is > current time.
469 * This is where delay in processing this timeout causes multiple
470 * SIGALRM calls to be compressed into one.
471 */
472 void
473 realitexpire(
474 struct proc *p)
475 {
476 struct proc *r;
477 struct timeval t;
478
479 r = proc_find(p->p_pid);
480
481 proc_spinlock(p);
482
483 assert(p->p_ractive > 0);
484
485 if (--p->p_ractive > 0 || r != p) {
486 /*
487 * bail, because either proc is exiting
488 * or there's another active thread call
489 */
490 proc_spinunlock(p);
491
492 if (r != NULL)
493 proc_rele(r);
494 return;
495 }
496
497 if (!timerisset(&p->p_realtimer.it_interval)) {
498 /*
499 * p_realtimer was cleared while this call was pending,
500 * send one last SIGALRM, but don't re-arm
501 */
502 timerclear(&p->p_rtime);
503 proc_spinunlock(p);
504
505 psignal(p, SIGALRM);
506 proc_rele(p);
507 return;
508 }
509
510 proc_spinunlock(p);
511
512 /*
513 * Send the signal before re-arming the next thread call,
514 * so in case psignal blocks, we won't create yet another thread call.
515 */
516
517 psignal(p, SIGALRM);
518
519 proc_spinlock(p);
520
521 /* Should we still re-arm the next thread call? */
522 if (!timerisset(&p->p_realtimer.it_interval)) {
523 timerclear(&p->p_rtime);
524 proc_spinunlock(p);
525
526 proc_rele(p);
527 return;
528 }
529
530 microuptime(&t);
531 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
532
533 if (timercmp(&p->p_rtime, &t, <=)) {
534 if ((p->p_rtime.tv_sec + 2) >= t.tv_sec) {
535 for (;;) {
536 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
537 if (timercmp(&p->p_rtime, &t, >))
538 break;
539 }
540 } else {
541 p->p_rtime = p->p_realtimer.it_interval;
542 timevaladd(&p->p_rtime, &t);
543 }
544 }
545
546 assert(p->p_rcall != NULL);
547
548 if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL, tvtoabstime(&p->p_rtime), 0,
549 THREAD_CALL_DELAY_USER_NORMAL)) {
550 p->p_ractive++;
551 }
552
553 proc_spinunlock(p);
554
555 proc_rele(p);
556 }
557
558 /*
559 * Called once in proc_exit to clean up after an armed or pending realitexpire
560 *
561 * This will only be called after the proc refcount is drained,
562 * so realitexpire cannot be currently holding a proc ref.
563 * i.e. it will/has gotten PROC_NULL from proc_find.
564 */
565 void
566 proc_free_realitimer(proc_t p)
567 {
568 proc_spinlock(p);
569
570 assert(p->p_rcall != NULL);
571 assert(p->p_refcount == 0);
572
573 timerclear(&p->p_realtimer.it_interval);
574
575 if (thread_call_cancel(p->p_rcall)) {
576 assert(p->p_ractive > 0);
577 p->p_ractive--;
578 }
579
580 while (p->p_ractive > 0) {
581 proc_spinunlock(p);
582
583 delay(1);
584
585 proc_spinlock(p);
586 }
587
588 thread_call_t call = p->p_rcall;
589 p->p_rcall = NULL;
590
591 proc_spinunlock(p);
592
593 thread_call_free(call);
594 }
595
596 /*
597 * Check that a proposed value to load into the .it_value or
598 * .it_interval part of an interval timer is acceptable.
599 */
600 int
601 itimerfix(
602 struct timeval *tv)
603 {
604
605 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
606 tv->tv_usec < 0 || tv->tv_usec >= 1000000)
607 return (EINVAL);
608 return (0);
609 }
610
611 int
612 timespec_is_valid(const struct timespec *ts)
613 {
614 /* The INT32_MAX limit ensures the timespec is safe for clock_*() functions
615 * which accept 32-bit ints. */
616 if (ts->tv_sec < 0 || ts->tv_sec > INT32_MAX ||
617 ts->tv_nsec < 0 || (unsigned long long)ts->tv_nsec > NSEC_PER_SEC) {
618 return 0;
619 }
620 return 1;
621 }
622
623 /*
624 * Decrement an interval timer by a specified number
625 * of microseconds, which must be less than a second,
626 * i.e. < 1000000. If the timer expires, then reload
627 * it. In this case, carry over (usec - old value) to
628 * reduce the value reloaded into the timer so that
629 * the timer does not drift. This routine assumes
630 * that it is called in a context where the timers
631 * on which it is operating cannot change in value.
632 */
633 int
634 itimerdecr(proc_t p,
635 struct itimerval *itp, int usec)
636 {
637
638 proc_spinlock(p);
639
640 if (itp->it_value.tv_usec < usec) {
641 if (itp->it_value.tv_sec == 0) {
642 /* expired, and already in next interval */
643 usec -= itp->it_value.tv_usec;
644 goto expire;
645 }
646 itp->it_value.tv_usec += 1000000;
647 itp->it_value.tv_sec--;
648 }
649 itp->it_value.tv_usec -= usec;
650 usec = 0;
651 if (timerisset(&itp->it_value)) {
652 proc_spinunlock(p);
653 return (1);
654 }
655 /* expired, exactly at end of interval */
656 expire:
657 if (timerisset(&itp->it_interval)) {
658 itp->it_value = itp->it_interval;
659 if (itp->it_value.tv_sec > 0) {
660 itp->it_value.tv_usec -= usec;
661 if (itp->it_value.tv_usec < 0) {
662 itp->it_value.tv_usec += 1000000;
663 itp->it_value.tv_sec--;
664 }
665 }
666 } else
667 itp->it_value.tv_usec = 0; /* sec is already 0 */
668 proc_spinunlock(p);
669 return (0);
670 }
671
672 /*
673 * Add and subtract routines for timevals.
674 * N.B.: subtract routine doesn't deal with
675 * results which are before the beginning,
676 * it just gets very confused in this case.
677 * Caveat emptor.
678 */
679 void
680 timevaladd(
681 struct timeval *t1,
682 struct timeval *t2)
683 {
684
685 t1->tv_sec += t2->tv_sec;
686 t1->tv_usec += t2->tv_usec;
687 timevalfix(t1);
688 }
689 void
690 timevalsub(
691 struct timeval *t1,
692 struct timeval *t2)
693 {
694
695 t1->tv_sec -= t2->tv_sec;
696 t1->tv_usec -= t2->tv_usec;
697 timevalfix(t1);
698 }
699 void
700 timevalfix(
701 struct timeval *t1)
702 {
703
704 if (t1->tv_usec < 0) {
705 t1->tv_sec--;
706 t1->tv_usec += 1000000;
707 }
708 if (t1->tv_usec >= 1000000) {
709 t1->tv_sec++;
710 t1->tv_usec -= 1000000;
711 }
712 }
713
714 /*
715 * Return the best possible estimate of the time in the timeval
716 * to which tvp points.
717 */
718 void
719 microtime(
720 struct timeval *tvp)
721 {
722 clock_sec_t tv_sec;
723 clock_usec_t tv_usec;
724
725 clock_get_calendar_microtime(&tv_sec, &tv_usec);
726
727 tvp->tv_sec = tv_sec;
728 tvp->tv_usec = tv_usec;
729 }
730
731 void
732 microtime_with_abstime(
733 struct timeval *tvp, uint64_t *abstime)
734 {
735 clock_sec_t tv_sec;
736 clock_usec_t tv_usec;
737
738 clock_get_calendar_absolute_and_microtime(&tv_sec, &tv_usec, abstime);
739
740 tvp->tv_sec = tv_sec;
741 tvp->tv_usec = tv_usec;
742 }
743
744 void
745 microuptime(
746 struct timeval *tvp)
747 {
748 clock_sec_t tv_sec;
749 clock_usec_t tv_usec;
750
751 clock_get_system_microtime(&tv_sec, &tv_usec);
752
753 tvp->tv_sec = tv_sec;
754 tvp->tv_usec = tv_usec;
755 }
756
757 /*
758 * Ditto for timespec.
759 */
760 void
761 nanotime(
762 struct timespec *tsp)
763 {
764 clock_sec_t tv_sec;
765 clock_nsec_t tv_nsec;
766
767 clock_get_calendar_nanotime(&tv_sec, &tv_nsec);
768
769 tsp->tv_sec = tv_sec;
770 tsp->tv_nsec = tv_nsec;
771 }
772
773 void
774 nanouptime(
775 struct timespec *tsp)
776 {
777 clock_sec_t tv_sec;
778 clock_nsec_t tv_nsec;
779
780 clock_get_system_nanotime(&tv_sec, &tv_nsec);
781
782 tsp->tv_sec = tv_sec;
783 tsp->tv_nsec = tv_nsec;
784 }
785
786 uint64_t
787 tvtoabstime(
788 struct timeval *tvp)
789 {
790 uint64_t result, usresult;
791
792 clock_interval_to_absolutetime_interval(
793 tvp->tv_sec, NSEC_PER_SEC, &result);
794 clock_interval_to_absolutetime_interval(
795 tvp->tv_usec, NSEC_PER_USEC, &usresult);
796
797 return (result + usresult);
798 }
799
800 uint64_t
801 tstoabstime(struct timespec *ts)
802 {
803 uint64_t abstime_s, abstime_ns;
804 clock_interval_to_absolutetime_interval(ts->tv_sec, NSEC_PER_SEC, &abstime_s);
805 clock_interval_to_absolutetime_interval(ts->tv_nsec, 1, &abstime_ns);
806 return abstime_s + abstime_ns;
807 }
808
809 #if NETWORKING
810 /*
811 * ratecheck(): simple time-based rate-limit checking.
812 */
813 int
814 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
815 {
816 struct timeval tv, delta;
817 int rv = 0;
818
819 net_uptime2timeval(&tv);
820 delta = tv;
821 timevalsub(&delta, lasttime);
822
823 /*
824 * check for 0,0 is so that the message will be seen at least once,
825 * even if interval is huge.
826 */
827 if (timevalcmp(&delta, mininterval, >=) ||
828 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
829 *lasttime = tv;
830 rv = 1;
831 }
832
833 return (rv);
834 }
835
836 /*
837 * ppsratecheck(): packets (or events) per second limitation.
838 */
839 int
840 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
841 {
842 struct timeval tv, delta;
843 int rv;
844
845 net_uptime2timeval(&tv);
846
847 timersub(&tv, lasttime, &delta);
848
849 /*
850 * Check for 0,0 so that the message will be seen at least once.
851 * If more than one second has passed since the last update of
852 * lasttime, reset the counter.
853 *
854 * we do increment *curpps even in *curpps < maxpps case, as some may
855 * try to use *curpps for stat purposes as well.
856 */
857 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
858 delta.tv_sec >= 1) {
859 *lasttime = tv;
860 *curpps = 0;
861 rv = 1;
862 } else if (maxpps < 0)
863 rv = 1;
864 else if (*curpps < maxpps)
865 rv = 1;
866 else
867 rv = 0;
868
869 #if 1 /* DIAGNOSTIC? */
870 /* be careful about wrap-around */
871 if (*curpps + 1 > 0)
872 *curpps = *curpps + 1;
873 #else
874 /*
875 * assume that there's not too many calls to this function.
876 * not sure if the assumption holds, as it depends on *caller's*
877 * behavior, not the behavior of this function.
878 * IMHO it is wrong to make assumption on the caller's behavior,
879 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
880 */
881 *curpps = *curpps + 1;
882 #endif
883
884 return (rv);
885 }
886 #endif /* NETWORKING */
887
888 void
889 time_zone_slock_init(void)
890 {
891 /* allocate lock group attribute and group */
892 tz_slock_grp_attr = lck_grp_attr_alloc_init();
893
894 tz_slock_grp = lck_grp_alloc_init("tzlock", tz_slock_grp_attr);
895
896 /* Allocate lock attribute */
897 tz_slock_attr = lck_attr_alloc_init();
898
899 /* Allocate the spin lock */
900 tz_slock = lck_spin_alloc_init(tz_slock_grp, tz_slock_attr);
901 }