]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * File: kern/clock.c
27 * Purpose: Routines for the creation and use of kernel
28 * alarm clock services. This file and the ipc
29 * routines in kern/ipc_clock.c constitute the
30 * machine-independent clock service layer.
31 */
32
33 #include <cpus.h>
34 #include <mach_host.h>
35
36 #include <mach/boolean.h>
37 #include <mach/processor_info.h>
38 #include <mach/vm_param.h>
39 #include <machine/mach_param.h>
40 #include <kern/cpu_number.h>
41 #include <kern/misc_protos.h>
42 #include <kern/lock.h>
43 #include <kern/host.h>
44 #include <kern/spl.h>
45 #include <kern/sched_prim.h>
46 #include <kern/thread.h>
47 #include <kern/thread_swap.h>
48 #include <kern/ipc_host.h>
49 #include <kern/clock.h>
50 #include <kern/zalloc.h>
51 #include <ipc/ipc_port.h>
52
53 #include <mach/mach_syscalls.h>
54 #include <mach/clock_reply.h>
55 #include <mach/mach_time.h>
56
57 /*
58 * Exported interface
59 */
60
61 #include <mach/clock_server.h>
62 #include <mach/mach_host_server.h>
63
64 /* local data declarations */
65 decl_simple_lock_data(static,ClockLock) /* clock system synchronization */
66 static struct zone *alarm_zone; /* zone for user alarms */
67 static struct alarm *alrmfree; /* alarm free list pointer */
68 static struct alarm *alrmdone; /* alarm done list pointer */
69 static long alrm_seqno; /* uniquely identifies alarms */
70 static thread_call_data_t alarm_deliver;
71
72 decl_simple_lock_data(static,calend_adjlock)
73
74 static timer_call_data_t calend_adjcall;
75 static uint64_t calend_adjinterval, calend_adjdeadline;
76
77 static thread_call_data_t calend_wakecall;
78
79 /* backwards compatibility */
80 int hz = HZ; /* GET RID OF THIS !!! */
81 int tick = (1000000 / HZ); /* GET RID OF THIS !!! */
82
83 /* external declarations */
84 extern struct clock clock_list[];
85 extern int clock_count;
86
87 /* local clock subroutines */
88 static
89 void flush_alarms(
90 clock_t clock);
91
92 static
93 void post_alarm(
94 clock_t clock,
95 alarm_t alarm);
96
97 static
98 int check_time(
99 alarm_type_t alarm_type,
100 mach_timespec_t *alarm_time,
101 mach_timespec_t *clock_time);
102
103 static
104 void clock_alarm_deliver(
105 thread_call_param_t p0,
106 thread_call_param_t p1);
107
108 static
109 void calend_adjust_call(
110 timer_call_param_t p0,
111 timer_call_param_t p1);
112
113 static
114 void calend_dowakeup(
115 thread_call_param_t p0,
116 thread_call_param_t p1);
117
118 /*
119 * Macros to lock/unlock clock system.
120 */
121 #define LOCK_CLOCK(s) \
122 s = splclock(); \
123 simple_lock(&ClockLock);
124
125 #define UNLOCK_CLOCK(s) \
126 simple_unlock(&ClockLock); \
127 splx(s);
128
129 /*
130 * Configure the clock system. (Not sure if we need this,
131 * as separate from clock_init()).
132 */
133 void
134 clock_config(void)
135 {
136 clock_t clock;
137 register int i;
138
139 if (cpu_number() != master_cpu)
140 panic("clock_config");
141
142 simple_lock_init(&ClockLock, ETAP_MISC_CLOCK);
143 thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL);
144
145 simple_lock_init(&calend_adjlock, ETAP_MISC_CLOCK);
146 timer_call_setup(&calend_adjcall, calend_adjust_call, NULL);
147
148 thread_call_setup(&calend_wakecall, calend_dowakeup, NULL);
149
150 /*
151 * Configure clock devices.
152 */
153 for (i = 0; i < clock_count; i++) {
154 clock = &clock_list[i];
155 if (clock->cl_ops) {
156 if ((*clock->cl_ops->c_config)() == 0)
157 clock->cl_ops = 0;
158 }
159 }
160
161 /* start alarm sequence numbers at 0 */
162 alrm_seqno = 0;
163 }
164
165 /*
166 * Initialize the clock system.
167 */
168 void
169 clock_init(void)
170 {
171 clock_t clock;
172 register int i;
173
174 /*
175 * Initialize basic clock structures.
176 */
177 for (i = 0; i < clock_count; i++) {
178 clock = &clock_list[i];
179 if (clock->cl_ops)
180 (*clock->cl_ops->c_init)();
181 }
182 }
183
184 /*
185 * Called by machine dependent code
186 * to initialize areas dependent on the
187 * timebase value. May be called multiple
188 * times during start up.
189 */
190 void
191 clock_timebase_init(void)
192 {
193 sched_timebase_init();
194 }
195
196 /*
197 * Initialize the clock ipc service facility.
198 */
199 void
200 clock_service_create(void)
201 {
202 clock_t clock;
203 register int i;
204
205 /*
206 * Initialize ipc clock services.
207 */
208 for (i = 0; i < clock_count; i++) {
209 clock = &clock_list[i];
210 if (clock->cl_ops) {
211 ipc_clock_init(clock);
212 ipc_clock_enable(clock);
213 }
214 }
215
216 /*
217 * Perform miscellaneous late
218 * initialization.
219 */
220 i = sizeof(struct alarm);
221 alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms");
222 }
223
224 /*
225 * Get the service port on a clock.
226 */
227 kern_return_t
228 host_get_clock_service(
229 host_t host,
230 clock_id_t clock_id,
231 clock_t *clock) /* OUT */
232 {
233 if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) {
234 *clock = CLOCK_NULL;
235 return (KERN_INVALID_ARGUMENT);
236 }
237
238 *clock = &clock_list[clock_id];
239 if ((*clock)->cl_ops == 0)
240 return (KERN_FAILURE);
241 return (KERN_SUCCESS);
242 }
243
244 /*
245 * Get the control port on a clock.
246 */
247 kern_return_t
248 host_get_clock_control(
249 host_priv_t host_priv,
250 clock_id_t clock_id,
251 clock_t *clock) /* OUT */
252 {
253 if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) {
254 *clock = CLOCK_NULL;
255 return (KERN_INVALID_ARGUMENT);
256 }
257
258 *clock = &clock_list[clock_id];
259 if ((*clock)->cl_ops == 0)
260 return (KERN_FAILURE);
261 return (KERN_SUCCESS);
262 }
263
264 /*
265 * Get the current clock time.
266 */
267 kern_return_t
268 clock_get_time(
269 clock_t clock,
270 mach_timespec_t *cur_time) /* OUT */
271 {
272 if (clock == CLOCK_NULL)
273 return (KERN_INVALID_ARGUMENT);
274 return ((*clock->cl_ops->c_gettime)(cur_time));
275 }
276
277 /*
278 * Get clock attributes.
279 */
280 kern_return_t
281 clock_get_attributes(
282 clock_t clock,
283 clock_flavor_t flavor,
284 clock_attr_t attr, /* OUT */
285 mach_msg_type_number_t *count) /* IN/OUT */
286 {
287 kern_return_t (*getattr)(
288 clock_flavor_t flavor,
289 clock_attr_t attr,
290 mach_msg_type_number_t *count);
291
292 if (clock == CLOCK_NULL)
293 return (KERN_INVALID_ARGUMENT);
294 if (getattr = clock->cl_ops->c_getattr)
295 return((*getattr)(flavor, attr, count));
296 else
297 return (KERN_FAILURE);
298 }
299
300 /*
301 * Set the current clock time.
302 */
303 kern_return_t
304 clock_set_time(
305 clock_t clock,
306 mach_timespec_t new_time)
307 {
308 mach_timespec_t *clock_time;
309 kern_return_t (*settime)(
310 mach_timespec_t *clock_time);
311
312 if (clock == CLOCK_NULL)
313 return (KERN_INVALID_ARGUMENT);
314 if ((settime = clock->cl_ops->c_settime) == 0)
315 return (KERN_FAILURE);
316 clock_time = &new_time;
317 if (BAD_MACH_TIMESPEC(clock_time))
318 return (KERN_INVALID_VALUE);
319
320 /*
321 * Flush all outstanding alarms.
322 */
323 flush_alarms(clock);
324
325 /*
326 * Set the new time.
327 */
328 return ((*settime)(clock_time));
329 }
330
331 /*
332 * Set the clock alarm resolution.
333 */
334 kern_return_t
335 clock_set_attributes(
336 clock_t clock,
337 clock_flavor_t flavor,
338 clock_attr_t attr,
339 mach_msg_type_number_t count)
340 {
341 kern_return_t (*setattr)(
342 clock_flavor_t flavor,
343 clock_attr_t attr,
344 mach_msg_type_number_t count);
345
346 if (clock == CLOCK_NULL)
347 return (KERN_INVALID_ARGUMENT);
348 if (setattr = clock->cl_ops->c_setattr)
349 return ((*setattr)(flavor, attr, count));
350 else
351 return (KERN_FAILURE);
352 }
353
354 /*
355 * Setup a clock alarm.
356 */
357 kern_return_t
358 clock_alarm(
359 clock_t clock,
360 alarm_type_t alarm_type,
361 mach_timespec_t alarm_time,
362 ipc_port_t alarm_port,
363 mach_msg_type_name_t alarm_port_type)
364 {
365 alarm_t alarm;
366 mach_timespec_t clock_time;
367 int chkstat;
368 kern_return_t reply_code;
369 spl_t s;
370
371 if (clock == CLOCK_NULL)
372 return (KERN_INVALID_ARGUMENT);
373 if (clock->cl_ops->c_setalrm == 0)
374 return (KERN_FAILURE);
375 if (IP_VALID(alarm_port) == 0)
376 return (KERN_INVALID_CAPABILITY);
377
378 /*
379 * Check alarm parameters. If parameters are invalid,
380 * send alarm message immediately.
381 */
382 (*clock->cl_ops->c_gettime)(&clock_time);
383 chkstat = check_time(alarm_type, &alarm_time, &clock_time);
384 if (chkstat <= 0) {
385 reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS);
386 clock_alarm_reply(alarm_port, alarm_port_type,
387 reply_code, alarm_type, clock_time);
388 return (KERN_SUCCESS);
389 }
390
391 /*
392 * Get alarm and add to clock alarm list.
393 */
394
395 LOCK_CLOCK(s);
396 if ((alarm = alrmfree) == 0) {
397 UNLOCK_CLOCK(s);
398 alarm = (alarm_t) zalloc(alarm_zone);
399 if (alarm == 0)
400 return (KERN_RESOURCE_SHORTAGE);
401 LOCK_CLOCK(s);
402 }
403 else
404 alrmfree = alarm->al_next;
405
406 alarm->al_status = ALARM_CLOCK;
407 alarm->al_time = alarm_time;
408 alarm->al_type = alarm_type;
409 alarm->al_port = alarm_port;
410 alarm->al_port_type = alarm_port_type;
411 alarm->al_clock = clock;
412 alarm->al_seqno = alrm_seqno++;
413 post_alarm(clock, alarm);
414 UNLOCK_CLOCK(s);
415
416 return (KERN_SUCCESS);
417 }
418
419 /*
420 * Sleep on a clock. System trap. User-level libmach clock_sleep
421 * interface call takes a mach_timespec_t sleep_time argument which it
422 * converts to sleep_sec and sleep_nsec arguments which are then
423 * passed to clock_sleep_trap.
424 */
425 kern_return_t
426 clock_sleep_trap(
427 mach_port_name_t clock_name,
428 sleep_type_t sleep_type,
429 int sleep_sec,
430 int sleep_nsec,
431 mach_timespec_t *wakeup_time)
432 {
433 clock_t clock;
434 mach_timespec_t swtime;
435 kern_return_t rvalue;
436
437 /*
438 * Convert the trap parameters.
439 */
440 if (clock_name != MACH_PORT_NULL)
441 clock = port_name_to_clock(clock_name);
442 else
443 clock = &clock_list[SYSTEM_CLOCK];
444
445 swtime.tv_sec = sleep_sec;
446 swtime.tv_nsec = sleep_nsec;
447
448 /*
449 * Call the actual clock_sleep routine.
450 */
451 rvalue = clock_sleep_internal(clock, sleep_type, &swtime);
452
453 /*
454 * Return current time as wakeup time.
455 */
456 if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) {
457 copyout((char *)&swtime, (char *)wakeup_time,
458 sizeof(mach_timespec_t));
459 }
460 return (rvalue);
461 }
462
463 /*
464 * Kernel internally callable clock sleep routine. The calling
465 * thread is suspended until the requested sleep time is reached.
466 */
467 kern_return_t
468 clock_sleep_internal(
469 clock_t clock,
470 sleep_type_t sleep_type,
471 mach_timespec_t *sleep_time)
472 {
473 alarm_t alarm;
474 mach_timespec_t clock_time;
475 kern_return_t rvalue;
476 int chkstat;
477 spl_t s;
478
479 if (clock == CLOCK_NULL)
480 return (KERN_INVALID_ARGUMENT);
481 if (clock->cl_ops->c_setalrm == 0)
482 return (KERN_FAILURE);
483
484 /*
485 * Check sleep parameters. If parameters are invalid
486 * return an error, otherwise post alarm request.
487 */
488 (*clock->cl_ops->c_gettime)(&clock_time);
489
490 chkstat = check_time(sleep_type, sleep_time, &clock_time);
491 if (chkstat < 0)
492 return (KERN_INVALID_VALUE);
493 rvalue = KERN_SUCCESS;
494 if (chkstat > 0) {
495 wait_result_t wait_result;
496
497 /*
498 * Get alarm and add to clock alarm list.
499 */
500
501 LOCK_CLOCK(s);
502 if ((alarm = alrmfree) == 0) {
503 UNLOCK_CLOCK(s);
504 alarm = (alarm_t) zalloc(alarm_zone);
505 if (alarm == 0)
506 return (KERN_RESOURCE_SHORTAGE);
507 LOCK_CLOCK(s);
508 }
509 else
510 alrmfree = alarm->al_next;
511
512 /*
513 * Wait for alarm to occur.
514 */
515 wait_result = assert_wait((event_t)alarm, THREAD_ABORTSAFE);
516 if (wait_result == THREAD_WAITING) {
517 alarm->al_time = *sleep_time;
518 alarm->al_status = ALARM_SLEEP;
519 post_alarm(clock, alarm);
520 UNLOCK_CLOCK(s);
521
522 wait_result = thread_block(THREAD_CONTINUE_NULL);
523
524 /*
525 * Note if alarm expired normally or whether it
526 * was aborted. If aborted, delete alarm from
527 * clock alarm list. Return alarm to free list.
528 */
529 LOCK_CLOCK(s);
530 if (alarm->al_status != ALARM_DONE) {
531 assert(wait_result != THREAD_AWAKENED);
532 if ((alarm->al_prev)->al_next = alarm->al_next)
533 (alarm->al_next)->al_prev = alarm->al_prev;
534 rvalue = KERN_ABORTED;
535 }
536 *sleep_time = alarm->al_time;
537 alarm->al_status = ALARM_FREE;
538 } else {
539 assert(wait_result == THREAD_INTERRUPTED);
540 assert(alarm->al_status == ALARM_FREE);
541 rvalue = KERN_ABORTED;
542 }
543 alarm->al_next = alrmfree;
544 alrmfree = alarm;
545 UNLOCK_CLOCK(s);
546 }
547 else
548 *sleep_time = clock_time;
549
550 return (rvalue);
551 }
552
553 /*
554 * CLOCK INTERRUPT SERVICE ROUTINES.
555 */
556
557 /*
558 * Service clock alarm interrupts. Called from machine dependent
559 * layer at splclock(). The clock_id argument specifies the clock,
560 * and the clock_time argument gives that clock's current time.
561 */
562 void
563 clock_alarm_intr(
564 clock_id_t clock_id,
565 mach_timespec_t *clock_time)
566 {
567 clock_t clock;
568 register alarm_t alrm1;
569 register alarm_t alrm2;
570 mach_timespec_t *alarm_time;
571 spl_t s;
572
573 clock = &clock_list[clock_id];
574
575 /*
576 * Update clock alarm list. All alarms that are due are moved
577 * to the alarmdone list to be serviced by the alarm_thread.
578 */
579
580 LOCK_CLOCK(s);
581 alrm1 = (alarm_t) &clock->cl_alarm;
582 while (alrm2 = alrm1->al_next) {
583 alarm_time = &alrm2->al_time;
584 if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0)
585 break;
586
587 /*
588 * Alarm has expired, so remove it from the
589 * clock alarm list.
590 */
591 if (alrm1->al_next = alrm2->al_next)
592 (alrm1->al_next)->al_prev = alrm1;
593
594 /*
595 * If a clock_sleep() alarm, wakeup the thread
596 * which issued the clock_sleep() call.
597 */
598 if (alrm2->al_status == ALARM_SLEEP) {
599 alrm2->al_next = 0;
600 alrm2->al_status = ALARM_DONE;
601 alrm2->al_time = *clock_time;
602 thread_wakeup((event_t)alrm2);
603 }
604
605 /*
606 * If a clock_alarm() alarm, place the alarm on
607 * the alarm done list and schedule the alarm
608 * delivery mechanism.
609 */
610 else {
611 assert(alrm2->al_status == ALARM_CLOCK);
612 if (alrm2->al_next = alrmdone)
613 alrmdone->al_prev = alrm2;
614 else
615 thread_call_enter(&alarm_deliver);
616 alrm2->al_prev = (alarm_t) &alrmdone;
617 alrmdone = alrm2;
618 alrm2->al_status = ALARM_DONE;
619 alrm2->al_time = *clock_time;
620 }
621 }
622
623 /*
624 * Setup the clock dependent layer to deliver another
625 * interrupt for the next pending alarm.
626 */
627 if (alrm2)
628 (*clock->cl_ops->c_setalrm)(alarm_time);
629 UNLOCK_CLOCK(s);
630 }
631
632 /*
633 * ALARM DELIVERY ROUTINES.
634 */
635
636 static void
637 clock_alarm_deliver(
638 thread_call_param_t p0,
639 thread_call_param_t p1)
640 {
641 register alarm_t alrm;
642 kern_return_t code;
643 spl_t s;
644
645 LOCK_CLOCK(s);
646 while (alrm = alrmdone) {
647 if (alrmdone = alrm->al_next)
648 alrmdone->al_prev = (alarm_t) &alrmdone;
649 UNLOCK_CLOCK(s);
650
651 code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED);
652 if (alrm->al_port != IP_NULL) {
653 /* Deliver message to designated port */
654 if (IP_VALID(alrm->al_port)) {
655 clock_alarm_reply(alrm->al_port, alrm->al_port_type, code,
656 alrm->al_type, alrm->al_time);
657 }
658
659 LOCK_CLOCK(s);
660 alrm->al_status = ALARM_FREE;
661 alrm->al_next = alrmfree;
662 alrmfree = alrm;
663 }
664 else
665 panic("clock_alarm_deliver");
666 }
667
668 UNLOCK_CLOCK(s);
669 }
670
671 /*
672 * CLOCK PRIVATE SERVICING SUBROUTINES.
673 */
674
675 /*
676 * Flush all pending alarms on a clock. All alarms
677 * are activated and timestamped correctly, so any
678 * programs waiting on alarms/threads will proceed
679 * with accurate information.
680 */
681 static
682 void
683 flush_alarms(
684 clock_t clock)
685 {
686 register alarm_t alrm1, alrm2;
687 spl_t s;
688
689 /*
690 * Flush all outstanding alarms.
691 */
692 LOCK_CLOCK(s);
693 alrm1 = (alarm_t) &clock->cl_alarm;
694 while (alrm2 = alrm1->al_next) {
695 /*
696 * Remove alarm from the clock alarm list.
697 */
698 if (alrm1->al_next = alrm2->al_next)
699 (alrm1->al_next)->al_prev = alrm1;
700
701 /*
702 * If a clock_sleep() alarm, wakeup the thread
703 * which issued the clock_sleep() call.
704 */
705 if (alrm2->al_status == ALARM_SLEEP) {
706 alrm2->al_next = 0;
707 thread_wakeup((event_t)alrm2);
708 }
709 else {
710 /*
711 * If a clock_alarm() alarm, place the alarm on
712 * the alarm done list and wakeup the dedicated
713 * kernel alarm_thread to service the alarm.
714 */
715 assert(alrm2->al_status == ALARM_CLOCK);
716 if (alrm2->al_next = alrmdone)
717 alrmdone->al_prev = alrm2;
718 else
719 thread_wakeup((event_t)&alrmdone);
720 alrm2->al_prev = (alarm_t) &alrmdone;
721 alrmdone = alrm2;
722 }
723 }
724 UNLOCK_CLOCK(s);
725 }
726
727 /*
728 * Post an alarm on a clock's active alarm list. The alarm is
729 * inserted in time-order into the clock's active alarm list.
730 * Always called from within a LOCK_CLOCK() code section.
731 */
732 static
733 void
734 post_alarm(
735 clock_t clock,
736 alarm_t alarm)
737 {
738 register alarm_t alrm1, alrm2;
739 mach_timespec_t *alarm_time;
740 mach_timespec_t *queue_time;
741
742 /*
743 * Traverse alarm list until queue time is greater
744 * than alarm time, then insert alarm.
745 */
746 alarm_time = &alarm->al_time;
747 alrm1 = (alarm_t) &clock->cl_alarm;
748 while (alrm2 = alrm1->al_next) {
749 queue_time = &alrm2->al_time;
750 if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0)
751 break;
752 alrm1 = alrm2;
753 }
754 alrm1->al_next = alarm;
755 alarm->al_next = alrm2;
756 alarm->al_prev = alrm1;
757 if (alrm2)
758 alrm2->al_prev = alarm;
759
760 /*
761 * If the inserted alarm is the 'earliest' alarm,
762 * reset the device layer alarm time accordingly.
763 */
764 if (clock->cl_alarm.al_next == alarm)
765 (*clock->cl_ops->c_setalrm)(alarm_time);
766 }
767
768 /*
769 * Check the validity of 'alarm_time' and 'alarm_type'. If either
770 * argument is invalid, return a negative value. If the 'alarm_time'
771 * is now, return a 0 value. If the 'alarm_time' is in the future,
772 * return a positive value.
773 */
774 static
775 int
776 check_time(
777 alarm_type_t alarm_type,
778 mach_timespec_t *alarm_time,
779 mach_timespec_t *clock_time)
780 {
781 int result;
782
783 if (BAD_ALRMTYPE(alarm_type))
784 return (-1);
785 if (BAD_MACH_TIMESPEC(alarm_time))
786 return (-1);
787 if ((alarm_type & ALRMTYPE) == TIME_RELATIVE)
788 ADD_MACH_TIMESPEC(alarm_time, clock_time);
789
790 result = CMP_MACH_TIMESPEC(alarm_time, clock_time);
791
792 return ((result >= 0)? result: 0);
793 }
794
795 mach_timespec_t
796 clock_get_system_value(void)
797 {
798 clock_t clock = &clock_list[SYSTEM_CLOCK];
799 mach_timespec_t value;
800
801 (void) (*clock->cl_ops->c_gettime)(&value);
802
803 return value;
804 }
805
806 mach_timespec_t
807 clock_get_calendar_value(void)
808 {
809 clock_t clock = &clock_list[CALENDAR_CLOCK];
810 mach_timespec_t value = MACH_TIMESPEC_ZERO;
811
812 (void) (*clock->cl_ops->c_gettime)(&value);
813
814 return value;
815 }
816
817 void
818 clock_deadline_for_periodic_event(
819 uint64_t interval,
820 uint64_t abstime,
821 uint64_t *deadline)
822 {
823 assert(interval != 0);
824
825 *deadline += interval;
826
827 if (*deadline <= abstime) {
828 *deadline = abstime + interval;
829 abstime = mach_absolute_time();
830
831 if (*deadline <= abstime)
832 *deadline = abstime + interval;
833 }
834 }
835
836 void
837 mk_timebase_info(
838 uint32_t *delta,
839 uint32_t *abs_to_ns_numer,
840 uint32_t *abs_to_ns_denom,
841 uint32_t *proc_to_abs_numer,
842 uint32_t *proc_to_abs_denom)
843 {
844 mach_timebase_info_data_t info;
845 uint32_t one = 1;
846
847 clock_timebase_info(&info);
848
849 copyout((void *)&one, (void *)delta, sizeof (uint32_t));
850
851 copyout((void *)&info.numer, (void *)abs_to_ns_numer, sizeof (uint32_t));
852 copyout((void *)&info.denom, (void *)abs_to_ns_denom, sizeof (uint32_t));
853
854 copyout((void *)&one, (void *)proc_to_abs_numer, sizeof (uint32_t));
855 copyout((void *)&one, (void *)proc_to_abs_denom, sizeof (uint32_t));
856 }
857
858 kern_return_t
859 mach_timebase_info(
860 mach_timebase_info_t out_info)
861 {
862 mach_timebase_info_data_t info;
863
864 clock_timebase_info(&info);
865
866 copyout((void *)&info, (void *)out_info, sizeof (info));
867
868 return (KERN_SUCCESS);
869 }
870
871 kern_return_t
872 mach_wait_until(
873 uint64_t deadline)
874 {
875 int wait_result;
876
877 wait_result = assert_wait((event_t)&mach_wait_until, THREAD_ABORTSAFE);
878 if (wait_result == THREAD_WAITING) {
879 thread_set_timer_deadline(deadline);
880 wait_result = thread_block(THREAD_CONTINUE_NULL);
881 if (wait_result != THREAD_TIMED_OUT)
882 thread_cancel_timer();
883 }
884
885 return ((wait_result == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
886 }
887
888 void
889 clock_adjtime(
890 int32_t *secs,
891 int32_t *microsecs)
892 {
893 uint32_t interval;
894 spl_t s;
895
896 s = splclock();
897 simple_lock(&calend_adjlock);
898
899 interval = clock_set_calendar_adjtime(secs, microsecs);
900 if (interval != 0) {
901 if (calend_adjdeadline >= interval)
902 calend_adjdeadline -= interval;
903 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
904 &calend_adjdeadline);
905
906 timer_call_enter(&calend_adjcall, calend_adjdeadline);
907 }
908 else
909 timer_call_cancel(&calend_adjcall);
910
911 simple_unlock(&calend_adjlock);
912 splx(s);
913 }
914
915 static void
916 calend_adjust_call(
917 timer_call_param_t p0,
918 timer_call_param_t p1)
919 {
920 uint32_t interval;
921 spl_t s;
922
923 s = splclock();
924 simple_lock(&calend_adjlock);
925
926 interval = clock_adjust_calendar();
927 if (interval != 0) {
928 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
929 &calend_adjdeadline);
930
931 timer_call_enter(&calend_adjcall, calend_adjdeadline);
932 }
933
934 simple_unlock(&calend_adjlock);
935 splx(s);
936 }
937
938 void
939 clock_wakeup_calendar(void)
940 {
941 thread_call_enter(&calend_wakecall);
942 }
943
944 static void
945 calend_dowakeup(
946 thread_call_param_t p0,
947 thread_call_param_t p1)
948 {
949 void IOKitResetTime(void);
950
951 IOKitResetTime();
952 }