]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
8f74809795ef0fc6b906bb9d758bbe29236a9e49
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * File: kern/clock.c
30 * Purpose: Routines for the creation and use of kernel
31 * alarm clock services. This file and the ipc
32 * routines in kern/ipc_clock.c constitute the
33 * machine-independent clock service layer.
34 */
35
36 #include <cpus.h>
37 #include <mach_host.h>
38
39 #include <mach/boolean.h>
40 #include <mach/processor_info.h>
41 #include <mach/vm_param.h>
42 #include <machine/mach_param.h>
43 #include <kern/cpu_number.h>
44 #include <kern/misc_protos.h>
45 #include <kern/lock.h>
46 #include <kern/host.h>
47 #include <kern/spl.h>
48 #include <kern/thread.h>
49 #include <kern/thread_swap.h>
50 #include <kern/ipc_host.h>
51 #include <kern/clock.h>
52 #include <kern/zalloc.h>
53 #include <ipc/ipc_port.h>
54
55 #include <mach/mach_syscalls.h>
56 #include <mach/clock_reply.h>
57 #include <mach/mach_time.h>
58
59 #include <kern/mk_timer.h>
60
61 /*
62 * Exported interface
63 */
64
65 #include <mach/clock_server.h>
66 #include <mach/mach_host_server.h>
67
68 /* local data declarations */
69 decl_simple_lock_data(static,ClockLock) /* clock system synchronization */
70 static struct zone *alarm_zone; /* zone for user alarms */
71 static struct alarm *alrmfree; /* alarm free list pointer */
72 static struct alarm *alrmdone; /* alarm done list pointer */
73 static long alrm_seqno; /* uniquely identifies alarms */
74 static thread_call_data_t alarm_deliver;
75
76 decl_simple_lock_data(static,calend_adjlock)
77 static int64_t calend_adjtotal;
78 static uint32_t calend_adjdelta;
79
80 static timer_call_data_t calend_adjcall;
81 static uint64_t calend_adjinterval, calend_adjdeadline;
82
83 /* backwards compatibility */
84 int hz = HZ; /* GET RID OF THIS !!! */
85 int tick = (1000000 / HZ); /* GET RID OF THIS !!! */
86
87 /* external declarations */
88 extern struct clock clock_list[];
89 extern int clock_count;
90
91 /* local clock subroutines */
92 static
93 void flush_alarms(
94 clock_t clock);
95
96 static
97 void post_alarm(
98 clock_t clock,
99 alarm_t alarm);
100
101 static
102 int check_time(
103 alarm_type_t alarm_type,
104 mach_timespec_t *alarm_time,
105 mach_timespec_t *clock_time);
106
107 static
108 void clock_alarm_deliver(
109 thread_call_param_t p0,
110 thread_call_param_t p1);
111
112 static
113 void clock_calend_adjust(
114 timer_call_param_t p0,
115 timer_call_param_t p1);
116
117 /*
118 * Macros to lock/unlock clock system.
119 */
120 #define LOCK_CLOCK(s) \
121 s = splclock(); \
122 simple_lock(&ClockLock);
123
124 #define UNLOCK_CLOCK(s) \
125 simple_unlock(&ClockLock); \
126 splx(s);
127
128 /*
129 * Configure the clock system. (Not sure if we need this,
130 * as separate from clock_init()).
131 */
132 void
133 clock_config(void)
134 {
135 clock_t clock;
136 register int i;
137
138 if (cpu_number() != master_cpu)
139 panic("clock_config");
140
141 /*
142 * Configure clock devices.
143 */
144 simple_lock_init(&calend_adjlock, ETAP_MISC_CLOCK);
145 simple_lock_init(&ClockLock, ETAP_MISC_CLOCK);
146 for (i = 0; i < clock_count; i++) {
147 clock = &clock_list[i];
148 if (clock->cl_ops) {
149 if ((*clock->cl_ops->c_config)() == 0)
150 clock->cl_ops = 0;
151 }
152 }
153
154 /* start alarm sequence numbers at 0 */
155 alrm_seqno = 0;
156 }
157
158 /*
159 * Initialize the clock system.
160 */
161 void
162 clock_init(void)
163 {
164 clock_t clock;
165 register int i;
166
167 /*
168 * Initialize basic clock structures.
169 */
170 for (i = 0; i < clock_count; i++) {
171 clock = &clock_list[i];
172 if (clock->cl_ops)
173 (*clock->cl_ops->c_init)();
174 }
175 }
176
177 /*
178 * Initialize the clock ipc service facility.
179 */
180 void
181 clock_service_create(void)
182 {
183 clock_t clock;
184 register int i;
185
186 mk_timer_initialize();
187
188 /*
189 * Initialize ipc clock services.
190 */
191 for (i = 0; i < clock_count; i++) {
192 clock = &clock_list[i];
193 if (clock->cl_ops) {
194 ipc_clock_init(clock);
195 ipc_clock_enable(clock);
196 }
197 }
198
199 timer_call_setup(&calend_adjcall, clock_calend_adjust, NULL);
200
201 /*
202 * Initialize clock service alarms.
203 */
204 i = sizeof(struct alarm);
205 alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms");
206
207 thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL);
208 }
209
210 /*
211 * Get the service port on a clock.
212 */
213 kern_return_t
214 host_get_clock_service(
215 host_t host,
216 clock_id_t clock_id,
217 clock_t *clock) /* OUT */
218 {
219 if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) {
220 *clock = CLOCK_NULL;
221 return (KERN_INVALID_ARGUMENT);
222 }
223
224 *clock = &clock_list[clock_id];
225 if ((*clock)->cl_ops == 0)
226 return (KERN_FAILURE);
227 return (KERN_SUCCESS);
228 }
229
230 /*
231 * Get the control port on a clock.
232 */
233 kern_return_t
234 host_get_clock_control(
235 host_priv_t host_priv,
236 clock_id_t clock_id,
237 clock_t *clock) /* OUT */
238 {
239 if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) {
240 *clock = CLOCK_NULL;
241 return (KERN_INVALID_ARGUMENT);
242 }
243
244 *clock = &clock_list[clock_id];
245 if ((*clock)->cl_ops == 0)
246 return (KERN_FAILURE);
247 return (KERN_SUCCESS);
248 }
249
250 /*
251 * Get the current clock time.
252 */
253 kern_return_t
254 clock_get_time(
255 clock_t clock,
256 mach_timespec_t *cur_time) /* OUT */
257 {
258 if (clock == CLOCK_NULL)
259 return (KERN_INVALID_ARGUMENT);
260 return ((*clock->cl_ops->c_gettime)(cur_time));
261 }
262
263 /*
264 * Get clock attributes.
265 */
266 kern_return_t
267 clock_get_attributes(
268 clock_t clock,
269 clock_flavor_t flavor,
270 clock_attr_t attr, /* OUT */
271 mach_msg_type_number_t *count) /* IN/OUT */
272 {
273 kern_return_t (*getattr)(
274 clock_flavor_t flavor,
275 clock_attr_t attr,
276 mach_msg_type_number_t *count);
277
278 if (clock == CLOCK_NULL)
279 return (KERN_INVALID_ARGUMENT);
280 if (getattr = clock->cl_ops->c_getattr)
281 return((*getattr)(flavor, attr, count));
282 else
283 return (KERN_FAILURE);
284 }
285
286 /*
287 * Set the current clock time.
288 */
289 kern_return_t
290 clock_set_time(
291 clock_t clock,
292 mach_timespec_t new_time)
293 {
294 mach_timespec_t *clock_time;
295 kern_return_t (*settime)(
296 mach_timespec_t *clock_time);
297 extern kern_return_t
298 calend_settime(
299 mach_timespec_t *clock_time);
300
301 if (clock == CLOCK_NULL)
302 return (KERN_INVALID_ARGUMENT);
303 if ((settime = clock->cl_ops->c_settime) == 0)
304 return (KERN_FAILURE);
305 if (settime == calend_settime)
306 return (KERN_FAILURE);
307 clock_time = &new_time;
308 if (BAD_MACH_TIMESPEC(clock_time))
309 return (KERN_INVALID_VALUE);
310
311 /*
312 * Flush all outstanding alarms.
313 */
314 flush_alarms(clock);
315
316 /*
317 * Set the new time.
318 */
319 return ((*settime)(clock_time));
320 }
321
322 /*
323 * Set the clock alarm resolution.
324 */
325 kern_return_t
326 clock_set_attributes(
327 clock_t clock,
328 clock_flavor_t flavor,
329 clock_attr_t attr,
330 mach_msg_type_number_t count)
331 {
332 kern_return_t (*setattr)(
333 clock_flavor_t flavor,
334 clock_attr_t attr,
335 mach_msg_type_number_t count);
336
337 if (clock == CLOCK_NULL)
338 return (KERN_INVALID_ARGUMENT);
339 if (setattr = clock->cl_ops->c_setattr)
340 return ((*setattr)(flavor, attr, count));
341 else
342 return (KERN_FAILURE);
343 }
344
345 /*
346 * Setup a clock alarm.
347 */
348 kern_return_t
349 clock_alarm(
350 clock_t clock,
351 alarm_type_t alarm_type,
352 mach_timespec_t alarm_time,
353 ipc_port_t alarm_port,
354 mach_msg_type_name_t alarm_port_type)
355 {
356 alarm_t alarm;
357 mach_timespec_t clock_time;
358 int chkstat;
359 kern_return_t reply_code;
360 spl_t s;
361
362 if (clock == CLOCK_NULL)
363 return (KERN_INVALID_ARGUMENT);
364 if (clock->cl_ops->c_setalrm == 0)
365 return (KERN_FAILURE);
366 if (IP_VALID(alarm_port) == 0)
367 return (KERN_INVALID_CAPABILITY);
368
369 /*
370 * Check alarm parameters. If parameters are invalid,
371 * send alarm message immediately.
372 */
373 (*clock->cl_ops->c_gettime)(&clock_time);
374 chkstat = check_time(alarm_type, &alarm_time, &clock_time);
375 if (chkstat <= 0) {
376 reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS);
377 clock_alarm_reply(alarm_port, alarm_port_type,
378 reply_code, alarm_type, clock_time);
379 return (KERN_SUCCESS);
380 }
381
382 /*
383 * Get alarm and add to clock alarm list.
384 */
385
386 LOCK_CLOCK(s);
387 if ((alarm = alrmfree) == 0) {
388 UNLOCK_CLOCK(s);
389 alarm = (alarm_t) zalloc(alarm_zone);
390 if (alarm == 0)
391 return (KERN_RESOURCE_SHORTAGE);
392 LOCK_CLOCK(s);
393 }
394 else
395 alrmfree = alarm->al_next;
396
397 alarm->al_status = ALARM_CLOCK;
398 alarm->al_time = alarm_time;
399 alarm->al_type = alarm_type;
400 alarm->al_port = alarm_port;
401 alarm->al_port_type = alarm_port_type;
402 alarm->al_clock = clock;
403 alarm->al_seqno = alrm_seqno++;
404 post_alarm(clock, alarm);
405 UNLOCK_CLOCK(s);
406
407 return (KERN_SUCCESS);
408 }
409
410 /*
411 * Sleep on a clock. System trap. User-level libmach clock_sleep
412 * interface call takes a mach_timespec_t sleep_time argument which it
413 * converts to sleep_sec and sleep_nsec arguments which are then
414 * passed to clock_sleep_trap.
415 */
416 kern_return_t
417 clock_sleep_trap(
418 mach_port_name_t clock_name,
419 sleep_type_t sleep_type,
420 int sleep_sec,
421 int sleep_nsec,
422 mach_timespec_t *wakeup_time)
423 {
424 clock_t clock;
425 mach_timespec_t swtime;
426 kern_return_t rvalue;
427
428 /*
429 * Convert the trap parameters.
430 */
431 if (clock_name != MACH_PORT_NULL)
432 clock = port_name_to_clock(clock_name);
433 else
434 clock = &clock_list[SYSTEM_CLOCK];
435
436 swtime.tv_sec = sleep_sec;
437 swtime.tv_nsec = sleep_nsec;
438
439 /*
440 * Call the actual clock_sleep routine.
441 */
442 rvalue = clock_sleep_internal(clock, sleep_type, &swtime);
443
444 /*
445 * Return current time as wakeup time.
446 */
447 if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) {
448 copyout((char *)&swtime, (char *)wakeup_time,
449 sizeof(mach_timespec_t));
450 }
451 return (rvalue);
452 }
453
454 /*
455 * Kernel internally callable clock sleep routine. The calling
456 * thread is suspended until the requested sleep time is reached.
457 */
458 kern_return_t
459 clock_sleep_internal(
460 clock_t clock,
461 sleep_type_t sleep_type,
462 mach_timespec_t *sleep_time)
463 {
464 alarm_t alarm;
465 mach_timespec_t clock_time;
466 kern_return_t rvalue;
467 int chkstat;
468 spl_t s;
469
470 if (clock == CLOCK_NULL)
471 return (KERN_INVALID_ARGUMENT);
472 if (clock->cl_ops->c_setalrm == 0)
473 return (KERN_FAILURE);
474
475 /*
476 * Check sleep parameters. If parameters are invalid
477 * return an error, otherwise post alarm request.
478 */
479 (*clock->cl_ops->c_gettime)(&clock_time);
480
481 chkstat = check_time(sleep_type, sleep_time, &clock_time);
482 if (chkstat < 0)
483 return (KERN_INVALID_VALUE);
484 rvalue = KERN_SUCCESS;
485 if (chkstat > 0) {
486 wait_result_t wait_result;
487
488 /*
489 * Get alarm and add to clock alarm list.
490 */
491
492 LOCK_CLOCK(s);
493 if ((alarm = alrmfree) == 0) {
494 UNLOCK_CLOCK(s);
495 alarm = (alarm_t) zalloc(alarm_zone);
496 if (alarm == 0)
497 return (KERN_RESOURCE_SHORTAGE);
498 LOCK_CLOCK(s);
499 }
500 else
501 alrmfree = alarm->al_next;
502
503 /*
504 * Wait for alarm to occur.
505 */
506 wait_result = assert_wait((event_t)alarm, THREAD_ABORTSAFE);
507 if (wait_result == THREAD_WAITING) {
508 alarm->al_time = *sleep_time;
509 alarm->al_status = ALARM_SLEEP;
510 post_alarm(clock, alarm);
511 UNLOCK_CLOCK(s);
512
513 wait_result = thread_block(THREAD_CONTINUE_NULL);
514
515 /*
516 * Note if alarm expired normally or whether it
517 * was aborted. If aborted, delete alarm from
518 * clock alarm list. Return alarm to free list.
519 */
520 LOCK_CLOCK(s);
521 if (alarm->al_status != ALARM_DONE) {
522 assert(wait_result != THREAD_AWAKENED);
523 if ((alarm->al_prev)->al_next = alarm->al_next)
524 (alarm->al_next)->al_prev = alarm->al_prev;
525 rvalue = KERN_ABORTED;
526 }
527 *sleep_time = alarm->al_time;
528 alarm->al_status = ALARM_FREE;
529 } else {
530 assert(wait_result == THREAD_INTERRUPTED);
531 assert(alarm->al_status == ALARM_FREE);
532 rvalue = KERN_ABORTED;
533 }
534 alarm->al_next = alrmfree;
535 alrmfree = alarm;
536 UNLOCK_CLOCK(s);
537 }
538 else
539 *sleep_time = clock_time;
540
541 return (rvalue);
542 }
543
544 /*
545 * CLOCK INTERRUPT SERVICE ROUTINES.
546 */
547
548 /*
549 * Service clock alarm interrupts. Called from machine dependent
550 * layer at splclock(). The clock_id argument specifies the clock,
551 * and the clock_time argument gives that clock's current time.
552 */
553 void
554 clock_alarm_intr(
555 clock_id_t clock_id,
556 mach_timespec_t *clock_time)
557 {
558 clock_t clock;
559 register alarm_t alrm1;
560 register alarm_t alrm2;
561 mach_timespec_t *alarm_time;
562 spl_t s;
563
564 clock = &clock_list[clock_id];
565
566 /*
567 * Update clock alarm list. All alarms that are due are moved
568 * to the alarmdone list to be serviced by the alarm_thread.
569 */
570
571 LOCK_CLOCK(s);
572 alrm1 = (alarm_t) &clock->cl_alarm;
573 while (alrm2 = alrm1->al_next) {
574 alarm_time = &alrm2->al_time;
575 if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0)
576 break;
577
578 /*
579 * Alarm has expired, so remove it from the
580 * clock alarm list.
581 */
582 if (alrm1->al_next = alrm2->al_next)
583 (alrm1->al_next)->al_prev = alrm1;
584
585 /*
586 * If a clock_sleep() alarm, wakeup the thread
587 * which issued the clock_sleep() call.
588 */
589 if (alrm2->al_status == ALARM_SLEEP) {
590 alrm2->al_next = 0;
591 alrm2->al_status = ALARM_DONE;
592 alrm2->al_time = *clock_time;
593 thread_wakeup((event_t)alrm2);
594 }
595
596 /*
597 * If a clock_alarm() alarm, place the alarm on
598 * the alarm done list and schedule the alarm
599 * delivery mechanism.
600 */
601 else {
602 assert(alrm2->al_status == ALARM_CLOCK);
603 if (alrm2->al_next = alrmdone)
604 alrmdone->al_prev = alrm2;
605 else
606 thread_call_enter(&alarm_deliver);
607 alrm2->al_prev = (alarm_t) &alrmdone;
608 alrmdone = alrm2;
609 alrm2->al_status = ALARM_DONE;
610 alrm2->al_time = *clock_time;
611 }
612 }
613
614 /*
615 * Setup the clock dependent layer to deliver another
616 * interrupt for the next pending alarm.
617 */
618 if (alrm2)
619 (*clock->cl_ops->c_setalrm)(alarm_time);
620 UNLOCK_CLOCK(s);
621 }
622
623 /*
624 * ALARM DELIVERY ROUTINES.
625 */
626
627 static void
628 clock_alarm_deliver(
629 thread_call_param_t p0,
630 thread_call_param_t p1)
631 {
632 register alarm_t alrm;
633 kern_return_t code;
634 spl_t s;
635
636 LOCK_CLOCK(s);
637 while (alrm = alrmdone) {
638 if (alrmdone = alrm->al_next)
639 alrmdone->al_prev = (alarm_t) &alrmdone;
640 UNLOCK_CLOCK(s);
641
642 code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED);
643 if (alrm->al_port != IP_NULL) {
644 /* Deliver message to designated port */
645 if (IP_VALID(alrm->al_port)) {
646 clock_alarm_reply(alrm->al_port, alrm->al_port_type, code,
647 alrm->al_type, alrm->al_time);
648 }
649
650 LOCK_CLOCK(s);
651 alrm->al_status = ALARM_FREE;
652 alrm->al_next = alrmfree;
653 alrmfree = alrm;
654 }
655 else
656 panic("clock_alarm_deliver");
657 }
658
659 UNLOCK_CLOCK(s);
660 }
661
662 /*
663 * CLOCK PRIVATE SERVICING SUBROUTINES.
664 */
665
666 /*
667 * Flush all pending alarms on a clock. All alarms
668 * are activated and timestamped correctly, so any
669 * programs waiting on alarms/threads will proceed
670 * with accurate information.
671 */
672 static
673 void
674 flush_alarms(
675 clock_t clock)
676 {
677 register alarm_t alrm1, alrm2;
678 spl_t s;
679
680 /*
681 * Flush all outstanding alarms.
682 */
683 LOCK_CLOCK(s);
684 alrm1 = (alarm_t) &clock->cl_alarm;
685 while (alrm2 = alrm1->al_next) {
686 /*
687 * Remove alarm from the clock alarm list.
688 */
689 if (alrm1->al_next = alrm2->al_next)
690 (alrm1->al_next)->al_prev = alrm1;
691
692 /*
693 * If a clock_sleep() alarm, wakeup the thread
694 * which issued the clock_sleep() call.
695 */
696 if (alrm2->al_status == ALARM_SLEEP) {
697 alrm2->al_next = 0;
698 thread_wakeup((event_t)alrm2);
699 }
700 else {
701 /*
702 * If a clock_alarm() alarm, place the alarm on
703 * the alarm done list and wakeup the dedicated
704 * kernel alarm_thread to service the alarm.
705 */
706 assert(alrm2->al_status == ALARM_CLOCK);
707 if (alrm2->al_next = alrmdone)
708 alrmdone->al_prev = alrm2;
709 else
710 thread_wakeup((event_t)&alrmdone);
711 alrm2->al_prev = (alarm_t) &alrmdone;
712 alrmdone = alrm2;
713 }
714 }
715 UNLOCK_CLOCK(s);
716 }
717
718 /*
719 * Post an alarm on a clock's active alarm list. The alarm is
720 * inserted in time-order into the clock's active alarm list.
721 * Always called from within a LOCK_CLOCK() code section.
722 */
723 static
724 void
725 post_alarm(
726 clock_t clock,
727 alarm_t alarm)
728 {
729 register alarm_t alrm1, alrm2;
730 mach_timespec_t *alarm_time;
731 mach_timespec_t *queue_time;
732
733 /*
734 * Traverse alarm list until queue time is greater
735 * than alarm time, then insert alarm.
736 */
737 alarm_time = &alarm->al_time;
738 alrm1 = (alarm_t) &clock->cl_alarm;
739 while (alrm2 = alrm1->al_next) {
740 queue_time = &alrm2->al_time;
741 if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0)
742 break;
743 alrm1 = alrm2;
744 }
745 alrm1->al_next = alarm;
746 alarm->al_next = alrm2;
747 alarm->al_prev = alrm1;
748 if (alrm2)
749 alrm2->al_prev = alarm;
750
751 /*
752 * If the inserted alarm is the 'earliest' alarm,
753 * reset the device layer alarm time accordingly.
754 */
755 if (clock->cl_alarm.al_next == alarm)
756 (*clock->cl_ops->c_setalrm)(alarm_time);
757 }
758
759 /*
760 * Check the validity of 'alarm_time' and 'alarm_type'. If either
761 * argument is invalid, return a negative value. If the 'alarm_time'
762 * is now, return a 0 value. If the 'alarm_time' is in the future,
763 * return a positive value.
764 */
765 static
766 int
767 check_time(
768 alarm_type_t alarm_type,
769 mach_timespec_t *alarm_time,
770 mach_timespec_t *clock_time)
771 {
772 int result;
773
774 if (BAD_ALRMTYPE(alarm_type))
775 return (-1);
776 if (BAD_MACH_TIMESPEC(alarm_time))
777 return (-1);
778 if ((alarm_type & ALRMTYPE) == TIME_RELATIVE)
779 ADD_MACH_TIMESPEC(alarm_time, clock_time);
780
781 result = CMP_MACH_TIMESPEC(alarm_time, clock_time);
782
783 return ((result >= 0)? result: 0);
784 }
785
786 mach_timespec_t
787 clock_get_system_value(void)
788 {
789 clock_t clock = &clock_list[SYSTEM_CLOCK];
790 mach_timespec_t value;
791
792 (void) (*clock->cl_ops->c_gettime)(&value);
793
794 return value;
795 }
796
797 mach_timespec_t
798 clock_get_calendar_value(void)
799 {
800 clock_t clock = &clock_list[CALENDAR_CLOCK];
801 mach_timespec_t value = MACH_TIMESPEC_ZERO;
802
803 (void) (*clock->cl_ops->c_gettime)(&value);
804
805 return value;
806 }
807
808 void
809 clock_set_calendar_value(
810 mach_timespec_t value)
811 {
812 clock_t clock = &clock_list[CALENDAR_CLOCK];
813
814 (void) (*clock->cl_ops->c_settime)(&value);
815 }
816
817 void
818 clock_deadline_for_periodic_event(
819 uint64_t interval,
820 uint64_t abstime,
821 uint64_t *deadline)
822 {
823 assert(interval != 0);
824
825 *deadline += interval;
826
827 if (*deadline <= abstime) {
828 *deadline = abstime;
829 clock_get_uptime(&abstime);
830 *deadline += interval;
831
832 if (*deadline <= abstime) {
833 *deadline = abstime;
834 *deadline += interval;
835 }
836 }
837 }
838
839 void
840 mk_timebase_info(
841 uint32_t *delta,
842 uint32_t *abs_to_ns_numer,
843 uint32_t *abs_to_ns_denom,
844 uint32_t *proc_to_abs_numer,
845 uint32_t *proc_to_abs_denom)
846 {
847 mach_timebase_info_data_t info;
848 uint32_t one = 1;
849
850 clock_timebase_info(&info);
851
852 copyout((void *)&one, (void *)delta, sizeof (uint32_t));
853
854 copyout((void *)&info.numer, (void *)abs_to_ns_numer, sizeof (uint32_t));
855 copyout((void *)&info.denom, (void *)abs_to_ns_denom, sizeof (uint32_t));
856
857 copyout((void *)&one, (void *)proc_to_abs_numer, sizeof (uint32_t));
858 copyout((void *)&one, (void *)proc_to_abs_denom, sizeof (uint32_t));
859 }
860
861 kern_return_t
862 mach_timebase_info(
863 mach_timebase_info_t out_info)
864 {
865 mach_timebase_info_data_t info;
866
867 clock_timebase_info(&info);
868
869 copyout((void *)&info, (void *)out_info, sizeof (info));
870
871 return (KERN_SUCCESS);
872 }
873
874 kern_return_t
875 mach_wait_until(
876 uint64_t deadline)
877 {
878 int wait_result;
879
880 wait_result = assert_wait((event_t)&mach_wait_until, THREAD_ABORTSAFE);
881 if (wait_result == THREAD_WAITING) {
882 thread_set_timer_deadline(deadline);
883 wait_result = thread_block(THREAD_CONTINUE_NULL);
884 if (wait_result != THREAD_TIMED_OUT)
885 thread_cancel_timer();
886 }
887
888 return ((wait_result == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
889 }
890
891 int64_t
892 clock_set_calendar_adjtime(
893 int64_t total,
894 uint32_t delta)
895 {
896 int64_t ototal;
897 spl_t s;
898
899 s = splclock();
900 simple_lock(&calend_adjlock);
901
902 if (calend_adjinterval == 0)
903 clock_interval_to_absolutetime_interval(10000, NSEC_PER_USEC,
904 &calend_adjinterval);
905
906 ototal = calend_adjtotal;
907
908 if (total != 0) {
909 uint64_t abstime;
910
911 if (total > 0) {
912 if (delta > total)
913 delta = total;
914 }
915 else {
916 if (delta > -total)
917 delta = -total;
918 }
919
920 calend_adjtotal = total;
921 calend_adjdelta = delta;
922
923 if (calend_adjdeadline >= calend_adjinterval)
924 calend_adjdeadline -= calend_adjinterval;
925 clock_get_uptime(&abstime);
926 clock_deadline_for_periodic_event(calend_adjinterval, abstime,
927 &calend_adjdeadline);
928
929 timer_call_enter(&calend_adjcall, calend_adjdeadline);
930 }
931 else {
932 calend_adjtotal = 0;
933
934 timer_call_cancel(&calend_adjcall);
935 }
936
937 simple_unlock(&calend_adjlock);
938 splx(s);
939
940 return (ototal);
941 }
942
943 static void
944 clock_calend_adjust(
945 timer_call_param_t p0,
946 timer_call_param_t p1)
947 {
948 spl_t s;
949
950 s = splclock();
951 simple_lock(&calend_adjlock);
952
953 if (calend_adjtotal > 0) {
954 clock_adjust_calendar((clock_res_t)calend_adjdelta);
955 calend_adjtotal -= calend_adjdelta;
956
957 if (calend_adjdelta > calend_adjtotal)
958 calend_adjdelta = calend_adjtotal;
959 }
960 else
961 if (calend_adjtotal < 0) {
962 clock_adjust_calendar(-(clock_res_t)calend_adjdelta);
963 calend_adjtotal += calend_adjdelta;
964
965 if (calend_adjdelta > -calend_adjtotal)
966 calend_adjdelta = -calend_adjtotal;
967 }
968
969 if (calend_adjtotal != 0) {
970 uint64_t abstime;
971
972 clock_get_uptime(&abstime);
973 clock_deadline_for_periodic_event(calend_adjinterval, abstime,
974 &calend_adjdeadline);
975
976 timer_call_enter(&calend_adjcall, calend_adjdeadline);
977 }
978
979 simple_unlock(&calend_adjlock);
980 splx(s);
981 }