2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
28 * Purpose: Routines for the creation and use of kernel
29 * alarm clock services. This file and the ipc
30 * routines in kern/ipc_clock.c constitute the
31 * machine-independent clock service layer.
34 #include <mach_host.h>
36 #include <mach/mach_types.h>
37 #include <mach/boolean.h>
38 #include <mach/processor_info.h>
39 #include <mach/vm_param.h>
41 #include <kern/cpu_number.h>
42 #include <kern/misc_protos.h>
43 #include <kern/lock.h>
44 #include <kern/host.h>
46 #include <kern/sched_prim.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_host.h>
49 #include <kern/clock.h>
50 #include <kern/zalloc.h>
52 #include <ipc/ipc_types.h>
53 #include <ipc/ipc_port.h>
55 #include <mach/mach_traps.h>
56 #include <mach/clock_reply.h>
57 #include <mach/mach_time.h>
59 #include <mach/clock_server.h>
60 #include <mach/clock_priv_server.h>
61 #include <mach/host_priv_server.h>
67 #include <mach/clock_server.h>
68 #include <mach/mach_host_server.h>
70 /* local data declarations */
71 decl_simple_lock_data(static,ClockLock
) /* clock system synchronization */
72 static struct zone
*alarm_zone
; /* zone for user alarms */
73 static struct alarm
*alrmfree
; /* alarm free list pointer */
74 static struct alarm
*alrmdone
; /* alarm done list pointer */
75 static long alrm_seqno
; /* uniquely identifies alarms */
76 static thread_call_data_t alarm_deliver
;
78 decl_simple_lock_data(static,calend_adjlock
)
80 static timer_call_data_t calend_adjcall
;
81 static uint64_t calend_adjdeadline
;
83 static thread_call_data_t calend_wakecall
;
85 /* external declarations */
86 extern struct clock clock_list
[];
87 extern int clock_count
;
89 /* local clock subroutines */
101 alarm_type_t alarm_type
,
102 mach_timespec_t
*alarm_time
,
103 mach_timespec_t
*clock_time
);
106 void clock_alarm_deliver(
107 thread_call_param_t p0
,
108 thread_call_param_t p1
);
111 void calend_adjust_call(
112 timer_call_param_t p0
,
113 timer_call_param_t p1
);
116 void calend_dowakeup(
117 thread_call_param_t p0
,
118 thread_call_param_t p1
);
121 * Macros to lock/unlock clock system.
123 #define LOCK_CLOCK(s) \
125 simple_lock(&ClockLock);
127 #define UNLOCK_CLOCK(s) \
128 simple_unlock(&ClockLock); \
134 * Called once at boot to configure the clock subsystem.
142 assert(cpu_number() == master_cpu
);
144 simple_lock_init(&ClockLock
, 0);
145 thread_call_setup(&alarm_deliver
, clock_alarm_deliver
, NULL
);
147 simple_lock_init(&calend_adjlock
, 0);
148 timer_call_setup(&calend_adjcall
, calend_adjust_call
, NULL
);
150 thread_call_setup(&calend_wakecall
, calend_dowakeup
, NULL
);
153 * Configure clock devices.
155 for (i
= 0; i
< clock_count
; i
++) {
156 clock
= &clock_list
[i
];
158 if ((*clock
->cl_ops
->c_config
)() == 0)
164 * Initialize the timer callouts.
166 timer_call_initialize();
168 /* start alarm sequence numbers at 0 */
175 * Called on a processor each time started.
184 * Initialize basic clock structures.
186 for (i
= 0; i
< clock_count
; i
++) {
187 clock
= &clock_list
[i
];
188 if (clock
->cl_ops
&& clock
->cl_ops
->c_init
)
189 (*clock
->cl_ops
->c_init
)();
194 * Called by machine dependent code
195 * to initialize areas dependent on the
196 * timebase value. May be called multiple
197 * times during start up.
200 clock_timebase_init(void)
202 sched_timebase_init();
206 * Initialize the clock ipc service facility.
209 clock_service_create(void)
215 * Initialize ipc clock services.
217 for (i
= 0; i
< clock_count
; i
++) {
218 clock
= &clock_list
[i
];
220 ipc_clock_init(clock
);
221 ipc_clock_enable(clock
);
226 * Perform miscellaneous late
229 i
= sizeof(struct alarm
);
230 alarm_zone
= zinit(i
, (4096/i
)*i
, 10*i
, "alarms");
234 * Get the service port on a clock.
237 host_get_clock_service(
240 clock_t *clock
) /* OUT */
242 if (host
== HOST_NULL
|| clock_id
< 0 || clock_id
>= clock_count
) {
244 return (KERN_INVALID_ARGUMENT
);
247 *clock
= &clock_list
[clock_id
];
248 if ((*clock
)->cl_ops
== 0)
249 return (KERN_FAILURE
);
250 return (KERN_SUCCESS
);
254 * Get the control port on a clock.
257 host_get_clock_control(
258 host_priv_t host_priv
,
260 clock_t *clock
) /* OUT */
262 if (host_priv
== HOST_PRIV_NULL
|| clock_id
< 0 || clock_id
>= clock_count
) {
264 return (KERN_INVALID_ARGUMENT
);
267 *clock
= &clock_list
[clock_id
];
268 if ((*clock
)->cl_ops
== 0)
269 return (KERN_FAILURE
);
270 return (KERN_SUCCESS
);
274 * Get the current clock time.
279 mach_timespec_t
*cur_time
) /* OUT */
281 if (clock
== CLOCK_NULL
)
282 return (KERN_INVALID_ARGUMENT
);
283 return ((*clock
->cl_ops
->c_gettime
)(cur_time
));
287 * Get clock attributes.
290 clock_get_attributes(
292 clock_flavor_t flavor
,
293 clock_attr_t attr
, /* OUT */
294 mach_msg_type_number_t
*count
) /* IN/OUT */
296 if (clock
== CLOCK_NULL
)
297 return (KERN_INVALID_ARGUMENT
);
298 if (clock
->cl_ops
->c_getattr
)
299 return(clock
->cl_ops
->c_getattr(flavor
, attr
, count
));
301 return (KERN_FAILURE
);
305 * Set the current clock time.
310 mach_timespec_t new_time
)
312 mach_timespec_t
*clock_time
;
314 if (clock
== CLOCK_NULL
)
315 return (KERN_INVALID_ARGUMENT
);
316 if (clock
->cl_ops
->c_settime
== NULL
)
317 return (KERN_FAILURE
);
318 clock_time
= &new_time
;
319 if (BAD_MACH_TIMESPEC(clock_time
))
320 return (KERN_INVALID_VALUE
);
323 * Flush all outstanding alarms.
330 return (clock
->cl_ops
->c_settime(clock_time
));
334 * Set the clock alarm resolution.
337 clock_set_attributes(
339 clock_flavor_t flavor
,
341 mach_msg_type_number_t count
)
343 if (clock
== CLOCK_NULL
)
344 return (KERN_INVALID_ARGUMENT
);
345 if (clock
->cl_ops
->c_setattr
)
346 return (clock
->cl_ops
->c_setattr(flavor
, attr
, count
));
348 return (KERN_FAILURE
);
352 * Setup a clock alarm.
357 alarm_type_t alarm_type
,
358 mach_timespec_t alarm_time
,
359 ipc_port_t alarm_port
,
360 mach_msg_type_name_t alarm_port_type
)
363 mach_timespec_t clock_time
;
365 kern_return_t reply_code
;
368 if (clock
== CLOCK_NULL
)
369 return (KERN_INVALID_ARGUMENT
);
370 if (clock
->cl_ops
->c_setalrm
== 0)
371 return (KERN_FAILURE
);
372 if (IP_VALID(alarm_port
) == 0)
373 return (KERN_INVALID_CAPABILITY
);
376 * Check alarm parameters. If parameters are invalid,
377 * send alarm message immediately.
379 (*clock
->cl_ops
->c_gettime
)(&clock_time
);
380 chkstat
= check_time(alarm_type
, &alarm_time
, &clock_time
);
382 reply_code
= (chkstat
< 0 ? KERN_INVALID_VALUE
: KERN_SUCCESS
);
383 clock_alarm_reply(alarm_port
, alarm_port_type
,
384 reply_code
, alarm_type
, clock_time
);
385 return (KERN_SUCCESS
);
389 * Get alarm and add to clock alarm list.
393 if ((alarm
= alrmfree
) == 0) {
395 alarm
= (alarm_t
) zalloc(alarm_zone
);
397 return (KERN_RESOURCE_SHORTAGE
);
401 alrmfree
= alarm
->al_next
;
403 alarm
->al_status
= ALARM_CLOCK
;
404 alarm
->al_time
= alarm_time
;
405 alarm
->al_type
= alarm_type
;
406 alarm
->al_port
= alarm_port
;
407 alarm
->al_port_type
= alarm_port_type
;
408 alarm
->al_clock
= clock
;
409 alarm
->al_seqno
= alrm_seqno
++;
410 post_alarm(clock
, alarm
);
413 return (KERN_SUCCESS
);
417 * Sleep on a clock. System trap. User-level libmach clock_sleep
418 * interface call takes a mach_timespec_t sleep_time argument which it
419 * converts to sleep_sec and sleep_nsec arguments which are then
420 * passed to clock_sleep_trap.
424 struct clock_sleep_trap_args
*args
)
426 mach_port_name_t clock_name
= args
->clock_name
;
427 sleep_type_t sleep_type
= args
->sleep_type
;
428 int sleep_sec
= args
->sleep_sec
;
429 int sleep_nsec
= args
->sleep_nsec
;
430 mach_vm_address_t wakeup_time_addr
= args
->wakeup_time
;
432 mach_timespec_t swtime
;
433 kern_return_t rvalue
;
436 * Convert the trap parameters.
438 if (clock_name
!= MACH_PORT_NULL
)
439 clock
= port_name_to_clock(clock_name
);
441 clock
= &clock_list
[SYSTEM_CLOCK
];
443 swtime
.tv_sec
= sleep_sec
;
444 swtime
.tv_nsec
= sleep_nsec
;
447 * Call the actual clock_sleep routine.
449 rvalue
= clock_sleep_internal(clock
, sleep_type
, &swtime
);
452 * Return current time as wakeup time.
454 if (rvalue
!= KERN_INVALID_ARGUMENT
&& rvalue
!= KERN_FAILURE
) {
455 copyout((char *)&swtime
, wakeup_time_addr
, sizeof(mach_timespec_t
));
461 * Kernel internally callable clock sleep routine. The calling
462 * thread is suspended until the requested sleep time is reached.
465 clock_sleep_internal(
467 sleep_type_t sleep_type
,
468 mach_timespec_t
*sleep_time
)
471 mach_timespec_t clock_time
;
472 kern_return_t rvalue
;
476 if (clock
== CLOCK_NULL
)
477 return (KERN_INVALID_ARGUMENT
);
478 if (clock
->cl_ops
->c_setalrm
== 0)
479 return (KERN_FAILURE
);
482 * Check sleep parameters. If parameters are invalid
483 * return an error, otherwise post alarm request.
485 (*clock
->cl_ops
->c_gettime
)(&clock_time
);
487 chkstat
= check_time(sleep_type
, sleep_time
, &clock_time
);
489 return (KERN_INVALID_VALUE
);
490 rvalue
= KERN_SUCCESS
;
492 wait_result_t wait_result
;
495 * Get alarm and add to clock alarm list.
499 if ((alarm
= alrmfree
) == 0) {
501 alarm
= (alarm_t
) zalloc(alarm_zone
);
503 return (KERN_RESOURCE_SHORTAGE
);
507 alrmfree
= alarm
->al_next
;
510 * Wait for alarm to occur.
512 wait_result
= assert_wait((event_t
)alarm
, THREAD_ABORTSAFE
);
513 if (wait_result
== THREAD_WAITING
) {
514 alarm
->al_time
= *sleep_time
;
515 alarm
->al_status
= ALARM_SLEEP
;
516 post_alarm(clock
, alarm
);
519 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
522 * Note if alarm expired normally or whether it
523 * was aborted. If aborted, delete alarm from
524 * clock alarm list. Return alarm to free list.
527 if (alarm
->al_status
!= ALARM_DONE
) {
528 assert(wait_result
!= THREAD_AWAKENED
);
529 if (((alarm
->al_prev
)->al_next
= alarm
->al_next
) != NULL
)
530 (alarm
->al_next
)->al_prev
= alarm
->al_prev
;
531 rvalue
= KERN_ABORTED
;
533 *sleep_time
= alarm
->al_time
;
534 alarm
->al_status
= ALARM_FREE
;
536 assert(wait_result
== THREAD_INTERRUPTED
);
537 assert(alarm
->al_status
== ALARM_FREE
);
538 rvalue
= KERN_ABORTED
;
540 alarm
->al_next
= alrmfree
;
545 *sleep_time
= clock_time
;
551 * CLOCK INTERRUPT SERVICE ROUTINES.
555 * Service clock alarm interrupts. Called from machine dependent
556 * layer at splclock(). The clock_id argument specifies the clock,
557 * and the clock_time argument gives that clock's current time.
562 mach_timespec_t
*clock_time
)
565 register alarm_t alrm1
;
566 register alarm_t alrm2
;
567 mach_timespec_t
*alarm_time
;
570 clock
= &clock_list
[clock_id
];
573 * Update clock alarm list. All alarms that are due are moved
574 * to the alarmdone list to be serviced by the alarm_thread.
578 alrm1
= (alarm_t
) &clock
->cl_alarm
;
579 while ((alrm2
= alrm1
->al_next
) != NULL
) {
580 alarm_time
= &alrm2
->al_time
;
581 if (CMP_MACH_TIMESPEC(alarm_time
, clock_time
) > 0)
585 * Alarm has expired, so remove it from the
588 if ((alrm1
->al_next
= alrm2
->al_next
) != NULL
)
589 (alrm1
->al_next
)->al_prev
= alrm1
;
592 * If a clock_sleep() alarm, wakeup the thread
593 * which issued the clock_sleep() call.
595 if (alrm2
->al_status
== ALARM_SLEEP
) {
597 alrm2
->al_status
= ALARM_DONE
;
598 alrm2
->al_time
= *clock_time
;
599 thread_wakeup((event_t
)alrm2
);
603 * If a clock_alarm() alarm, place the alarm on
604 * the alarm done list and schedule the alarm
605 * delivery mechanism.
608 assert(alrm2
->al_status
== ALARM_CLOCK
);
609 if ((alrm2
->al_next
= alrmdone
) != NULL
)
610 alrmdone
->al_prev
= alrm2
;
612 thread_call_enter(&alarm_deliver
);
613 alrm2
->al_prev
= (alarm_t
) &alrmdone
;
615 alrm2
->al_status
= ALARM_DONE
;
616 alrm2
->al_time
= *clock_time
;
621 * Setup the clock dependent layer to deliver another
622 * interrupt for the next pending alarm.
625 (*clock
->cl_ops
->c_setalrm
)(alarm_time
);
630 * ALARM DELIVERY ROUTINES.
635 __unused thread_call_param_t p0
,
636 __unused thread_call_param_t p1
)
638 register alarm_t alrm
;
643 while ((alrm
= alrmdone
) != NULL
) {
644 if ((alrmdone
= alrm
->al_next
) != NULL
)
645 alrmdone
->al_prev
= (alarm_t
) &alrmdone
;
648 code
= (alrm
->al_status
== ALARM_DONE
? KERN_SUCCESS
: KERN_ABORTED
);
649 if (alrm
->al_port
!= IP_NULL
) {
650 /* Deliver message to designated port */
651 if (IP_VALID(alrm
->al_port
)) {
652 clock_alarm_reply(alrm
->al_port
, alrm
->al_port_type
, code
,
653 alrm
->al_type
, alrm
->al_time
);
657 alrm
->al_status
= ALARM_FREE
;
658 alrm
->al_next
= alrmfree
;
662 panic("clock_alarm_deliver");
669 * CLOCK PRIVATE SERVICING SUBROUTINES.
673 * Flush all pending alarms on a clock. All alarms
674 * are activated and timestamped correctly, so any
675 * programs waiting on alarms/threads will proceed
676 * with accurate information.
683 register alarm_t alrm1
, alrm2
;
687 * Flush all outstanding alarms.
690 alrm1
= (alarm_t
) &clock
->cl_alarm
;
691 while ((alrm2
= alrm1
->al_next
) != NULL
) {
693 * Remove alarm from the clock alarm list.
695 if ((alrm1
->al_next
= alrm2
->al_next
) != NULL
)
696 (alrm1
->al_next
)->al_prev
= alrm1
;
699 * If a clock_sleep() alarm, wakeup the thread
700 * which issued the clock_sleep() call.
702 if (alrm2
->al_status
== ALARM_SLEEP
) {
704 thread_wakeup((event_t
)alrm2
);
708 * If a clock_alarm() alarm, place the alarm on
709 * the alarm done list and wakeup the dedicated
710 * kernel alarm_thread to service the alarm.
712 assert(alrm2
->al_status
== ALARM_CLOCK
);
713 if ((alrm2
->al_next
= alrmdone
) != NULL
)
714 alrmdone
->al_prev
= alrm2
;
716 thread_wakeup((event_t
)&alrmdone
);
717 alrm2
->al_prev
= (alarm_t
) &alrmdone
;
725 * Post an alarm on a clock's active alarm list. The alarm is
726 * inserted in time-order into the clock's active alarm list.
727 * Always called from within a LOCK_CLOCK() code section.
735 register alarm_t alrm1
, alrm2
;
736 mach_timespec_t
*alarm_time
;
737 mach_timespec_t
*queue_time
;
740 * Traverse alarm list until queue time is greater
741 * than alarm time, then insert alarm.
743 alarm_time
= &alarm
->al_time
;
744 alrm1
= (alarm_t
) &clock
->cl_alarm
;
745 while ((alrm2
= alrm1
->al_next
) != NULL
) {
746 queue_time
= &alrm2
->al_time
;
747 if (CMP_MACH_TIMESPEC(queue_time
, alarm_time
) > 0)
751 alrm1
->al_next
= alarm
;
752 alarm
->al_next
= alrm2
;
753 alarm
->al_prev
= alrm1
;
755 alrm2
->al_prev
= alarm
;
758 * If the inserted alarm is the 'earliest' alarm,
759 * reset the device layer alarm time accordingly.
761 if (clock
->cl_alarm
.al_next
== alarm
)
762 (*clock
->cl_ops
->c_setalrm
)(alarm_time
);
766 * Check the validity of 'alarm_time' and 'alarm_type'. If either
767 * argument is invalid, return a negative value. If the 'alarm_time'
768 * is now, return a 0 value. If the 'alarm_time' is in the future,
769 * return a positive value.
774 alarm_type_t alarm_type
,
775 mach_timespec_t
*alarm_time
,
776 mach_timespec_t
*clock_time
)
780 if (BAD_ALRMTYPE(alarm_type
))
782 if (BAD_MACH_TIMESPEC(alarm_time
))
784 if ((alarm_type
& ALRMTYPE
) == TIME_RELATIVE
)
785 ADD_MACH_TIMESPEC(alarm_time
, clock_time
);
787 result
= CMP_MACH_TIMESPEC(alarm_time
, clock_time
);
789 return ((result
>= 0)? result
: 0);
793 clock_get_system_value(void)
795 clock_t clock
= &clock_list
[SYSTEM_CLOCK
];
796 mach_timespec_t value
;
798 (void) (*clock
->cl_ops
->c_gettime
)(&value
);
804 clock_get_calendar_value(void)
806 clock_t clock
= &clock_list
[CALENDAR_CLOCK
];
807 mach_timespec_t value
= MACH_TIMESPEC_ZERO
;
809 (void) (*clock
->cl_ops
->c_gettime
)(&value
);
815 clock_deadline_for_periodic_event(
820 assert(interval
!= 0);
822 *deadline
+= interval
;
824 if (*deadline
<= abstime
) {
825 *deadline
= abstime
+ interval
;
826 abstime
= mach_absolute_time();
828 if (*deadline
<= abstime
)
829 *deadline
= abstime
+ interval
;
834 mk_timebase_info_trap(
835 struct mk_timebase_info_trap_args
*args
)
837 uint32_t *delta
= args
->delta
;
838 uint32_t *abs_to_ns_numer
= args
->abs_to_ns_numer
;
839 uint32_t *abs_to_ns_denom
= args
->abs_to_ns_denom
;
840 uint32_t *proc_to_abs_numer
= args
->proc_to_abs_numer
;
841 uint32_t *proc_to_abs_denom
= args
->proc_to_abs_denom
;
842 mach_timebase_info_data_t info
;
845 clock_timebase_info(&info
);
847 copyout((void *)&one
, CAST_USER_ADDR_T(delta
), sizeof (uint32_t));
849 copyout((void *)&info
.numer
, CAST_USER_ADDR_T(abs_to_ns_numer
), sizeof (uint32_t));
850 copyout((void *)&info
.denom
, CAST_USER_ADDR_T(abs_to_ns_denom
), sizeof (uint32_t));
852 copyout((void *)&one
, CAST_USER_ADDR_T(proc_to_abs_numer
), sizeof (uint32_t));
853 copyout((void *)&one
, CAST_USER_ADDR_T(proc_to_abs_denom
), sizeof (uint32_t));
857 mach_timebase_info_trap(
858 struct mach_timebase_info_trap_args
*args
)
860 mach_vm_address_t out_info_addr
= args
->info
;
861 mach_timebase_info_data_t info
;
863 clock_timebase_info(&info
);
865 copyout((void *)&info
, out_info_addr
, sizeof (info
));
867 return (KERN_SUCCESS
);
871 mach_wait_until_continue(
872 __unused
void *parameter
,
873 wait_result_t wresult
)
875 thread_syscall_return((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
880 mach_wait_until_trap(
881 struct mach_wait_until_trap_args
*args
)
883 uint64_t deadline
= args
->deadline
;
884 wait_result_t wresult
;
886 wresult
= assert_wait_deadline((event_t
)mach_wait_until_trap
, THREAD_ABORTSAFE
, deadline
);
887 if (wresult
== THREAD_WAITING
)
888 wresult
= thread_block(mach_wait_until_continue
);
890 return ((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
900 uint64_t now
= mach_absolute_time();
905 if ( (deadline
- now
) < (8 * sched_cswtime
) ||
906 get_preemption_level() != 0 ||
907 ml_get_interrupts_enabled() == FALSE
)
908 machine_delay_until(deadline
);
910 assert_wait_deadline((event_t
)clock_delay_until
, THREAD_UNINT
, deadline
- sched_cswtime
);
912 thread_block(THREAD_CONTINUE_NULL
);
919 uint32_t scale_factor
)
923 clock_interval_to_deadline(interval
, scale_factor
, &end
);
925 clock_delay_until(end
);
932 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);
944 simple_lock(&calend_adjlock
);
946 interval
= clock_set_calendar_adjtime(secs
, microsecs
);
948 if (calend_adjdeadline
>= interval
)
949 calend_adjdeadline
-= interval
;
950 clock_deadline_for_periodic_event(interval
, mach_absolute_time(),
951 &calend_adjdeadline
);
953 timer_call_enter(&calend_adjcall
, calend_adjdeadline
);
956 timer_call_cancel(&calend_adjcall
);
958 simple_unlock(&calend_adjlock
);
964 __unused timer_call_param_t p0
,
965 __unused timer_call_param_t p1
)
971 simple_lock(&calend_adjlock
);
973 interval
= clock_adjust_calendar();
975 clock_deadline_for_periodic_event(interval
, mach_absolute_time(),
976 &calend_adjdeadline
);
978 timer_call_enter(&calend_adjcall
, calend_adjdeadline
);
981 simple_unlock(&calend_adjlock
);
986 clock_wakeup_calendar(void)
988 thread_call_enter(&calend_wakecall
);
991 extern void IOKitResetTime(void); /* XXX */
995 __unused thread_call_param_t p0
,
996 __unused thread_call_param_t p1
)