2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
27 * Purpose: Routines for the creation and use of kernel
28 * alarm clock services. This file and the ipc
29 * routines in kern/ipc_clock.c constitute the
30 * machine-independent clock service layer.
33 #include <mach_host.h>
35 #include <mach/mach_types.h>
36 #include <mach/boolean.h>
37 #include <mach/processor_info.h>
38 #include <mach/vm_param.h>
40 #include <kern/cpu_number.h>
41 #include <kern/misc_protos.h>
42 #include <kern/lock.h>
43 #include <kern/host.h>
45 #include <kern/sched_prim.h>
46 #include <kern/thread.h>
47 #include <kern/ipc_host.h>
48 #include <kern/clock.h>
49 #include <kern/zalloc.h>
51 #include <ipc/ipc_types.h>
52 #include <ipc/ipc_port.h>
54 #include <mach/mach_traps.h>
55 #include <mach/clock_reply.h>
56 #include <mach/mach_time.h>
58 #include <mach/clock_server.h>
59 #include <mach/clock_priv_server.h>
60 #include <mach/host_priv_server.h>
66 #include <mach/clock_server.h>
67 #include <mach/mach_host_server.h>
69 /* local data declarations */
70 decl_simple_lock_data(static,ClockLock
) /* clock system synchronization */
71 static struct zone
*alarm_zone
; /* zone for user alarms */
72 static struct alarm
*alrmfree
; /* alarm free list pointer */
73 static struct alarm
*alrmdone
; /* alarm done list pointer */
74 static long alrm_seqno
; /* uniquely identifies alarms */
75 static thread_call_data_t alarm_deliver
;
77 decl_simple_lock_data(static,calend_adjlock
)
79 static timer_call_data_t calend_adjcall
;
80 static uint64_t calend_adjdeadline
;
82 static thread_call_data_t calend_wakecall
;
84 /* external declarations */
85 extern struct clock clock_list
[];
86 extern int clock_count
;
88 /* local clock subroutines */
100 alarm_type_t alarm_type
,
101 mach_timespec_t
*alarm_time
,
102 mach_timespec_t
*clock_time
);
105 void clock_alarm_deliver(
106 thread_call_param_t p0
,
107 thread_call_param_t p1
);
110 void calend_adjust_call(
111 timer_call_param_t p0
,
112 timer_call_param_t p1
);
115 void calend_dowakeup(
116 thread_call_param_t p0
,
117 thread_call_param_t p1
);
120 * Macros to lock/unlock clock system.
122 #define LOCK_CLOCK(s) \
124 simple_lock(&ClockLock);
126 #define UNLOCK_CLOCK(s) \
127 simple_unlock(&ClockLock); \
133 * Called once at boot to configure the clock subsystem.
141 assert(cpu_number() == master_cpu
);
143 simple_lock_init(&ClockLock
, 0);
144 thread_call_setup(&alarm_deliver
, clock_alarm_deliver
, NULL
);
146 simple_lock_init(&calend_adjlock
, 0);
147 timer_call_setup(&calend_adjcall
, calend_adjust_call
, NULL
);
149 thread_call_setup(&calend_wakecall
, calend_dowakeup
, NULL
);
152 * Configure clock devices.
154 for (i
= 0; i
< clock_count
; i
++) {
155 clock
= &clock_list
[i
];
157 if ((*clock
->cl_ops
->c_config
)() == 0)
163 * Initialize the timer callouts.
165 timer_call_initialize();
167 /* start alarm sequence numbers at 0 */
174 * Called on a processor each time started.
183 * Initialize basic clock structures.
185 for (i
= 0; i
< clock_count
; i
++) {
186 clock
= &clock_list
[i
];
187 if (clock
->cl_ops
&& clock
->cl_ops
->c_init
)
188 (*clock
->cl_ops
->c_init
)();
193 * Called by machine dependent code
194 * to initialize areas dependent on the
195 * timebase value. May be called multiple
196 * times during start up.
199 clock_timebase_init(void)
201 sched_timebase_init();
205 * Initialize the clock ipc service facility.
208 clock_service_create(void)
214 * Initialize ipc clock services.
216 for (i
= 0; i
< clock_count
; i
++) {
217 clock
= &clock_list
[i
];
219 ipc_clock_init(clock
);
220 ipc_clock_enable(clock
);
225 * Perform miscellaneous late
228 i
= sizeof(struct alarm
);
229 alarm_zone
= zinit(i
, (4096/i
)*i
, 10*i
, "alarms");
233 * Get the service port on a clock.
236 host_get_clock_service(
239 clock_t *clock
) /* OUT */
241 if (host
== HOST_NULL
|| clock_id
< 0 || clock_id
>= clock_count
) {
243 return (KERN_INVALID_ARGUMENT
);
246 *clock
= &clock_list
[clock_id
];
247 if ((*clock
)->cl_ops
== 0)
248 return (KERN_FAILURE
);
249 return (KERN_SUCCESS
);
253 * Get the control port on a clock.
256 host_get_clock_control(
257 host_priv_t host_priv
,
259 clock_t *clock
) /* OUT */
261 if (host_priv
== HOST_PRIV_NULL
|| clock_id
< 0 || clock_id
>= clock_count
) {
263 return (KERN_INVALID_ARGUMENT
);
266 *clock
= &clock_list
[clock_id
];
267 if ((*clock
)->cl_ops
== 0)
268 return (KERN_FAILURE
);
269 return (KERN_SUCCESS
);
273 * Get the current clock time.
278 mach_timespec_t
*cur_time
) /* OUT */
280 if (clock
== CLOCK_NULL
)
281 return (KERN_INVALID_ARGUMENT
);
282 return ((*clock
->cl_ops
->c_gettime
)(cur_time
));
286 * Get clock attributes.
289 clock_get_attributes(
291 clock_flavor_t flavor
,
292 clock_attr_t attr
, /* OUT */
293 mach_msg_type_number_t
*count
) /* IN/OUT */
295 if (clock
== CLOCK_NULL
)
296 return (KERN_INVALID_ARGUMENT
);
297 if (clock
->cl_ops
->c_getattr
)
298 return(clock
->cl_ops
->c_getattr(flavor
, attr
, count
));
300 return (KERN_FAILURE
);
304 * Set the current clock time.
309 mach_timespec_t new_time
)
311 mach_timespec_t
*clock_time
;
313 if (clock
== CLOCK_NULL
)
314 return (KERN_INVALID_ARGUMENT
);
315 if (clock
->cl_ops
->c_settime
== NULL
)
316 return (KERN_FAILURE
);
317 clock_time
= &new_time
;
318 if (BAD_MACH_TIMESPEC(clock_time
))
319 return (KERN_INVALID_VALUE
);
322 * Flush all outstanding alarms.
329 return (clock
->cl_ops
->c_settime(clock_time
));
333 * Set the clock alarm resolution.
336 clock_set_attributes(
338 clock_flavor_t flavor
,
340 mach_msg_type_number_t count
)
342 if (clock
== CLOCK_NULL
)
343 return (KERN_INVALID_ARGUMENT
);
344 if (clock
->cl_ops
->c_setattr
)
345 return (clock
->cl_ops
->c_setattr(flavor
, attr
, count
));
347 return (KERN_FAILURE
);
351 * Setup a clock alarm.
356 alarm_type_t alarm_type
,
357 mach_timespec_t alarm_time
,
358 ipc_port_t alarm_port
,
359 mach_msg_type_name_t alarm_port_type
)
362 mach_timespec_t clock_time
;
364 kern_return_t reply_code
;
367 if (clock
== CLOCK_NULL
)
368 return (KERN_INVALID_ARGUMENT
);
369 if (clock
->cl_ops
->c_setalrm
== 0)
370 return (KERN_FAILURE
);
371 if (IP_VALID(alarm_port
) == 0)
372 return (KERN_INVALID_CAPABILITY
);
375 * Check alarm parameters. If parameters are invalid,
376 * send alarm message immediately.
378 (*clock
->cl_ops
->c_gettime
)(&clock_time
);
379 chkstat
= check_time(alarm_type
, &alarm_time
, &clock_time
);
381 reply_code
= (chkstat
< 0 ? KERN_INVALID_VALUE
: KERN_SUCCESS
);
382 clock_alarm_reply(alarm_port
, alarm_port_type
,
383 reply_code
, alarm_type
, clock_time
);
384 return (KERN_SUCCESS
);
388 * Get alarm and add to clock alarm list.
392 if ((alarm
= alrmfree
) == 0) {
394 alarm
= (alarm_t
) zalloc(alarm_zone
);
396 return (KERN_RESOURCE_SHORTAGE
);
400 alrmfree
= alarm
->al_next
;
402 alarm
->al_status
= ALARM_CLOCK
;
403 alarm
->al_time
= alarm_time
;
404 alarm
->al_type
= alarm_type
;
405 alarm
->al_port
= alarm_port
;
406 alarm
->al_port_type
= alarm_port_type
;
407 alarm
->al_clock
= clock
;
408 alarm
->al_seqno
= alrm_seqno
++;
409 post_alarm(clock
, alarm
);
412 return (KERN_SUCCESS
);
416 * Sleep on a clock. System trap. User-level libmach clock_sleep
417 * interface call takes a mach_timespec_t sleep_time argument which it
418 * converts to sleep_sec and sleep_nsec arguments which are then
419 * passed to clock_sleep_trap.
423 struct clock_sleep_trap_args
*args
)
425 mach_port_name_t clock_name
= args
->clock_name
;
426 sleep_type_t sleep_type
= args
->sleep_type
;
427 int sleep_sec
= args
->sleep_sec
;
428 int sleep_nsec
= args
->sleep_nsec
;
429 mach_vm_address_t wakeup_time_addr
= args
->wakeup_time
;
431 mach_timespec_t swtime
;
432 kern_return_t rvalue
;
435 * Convert the trap parameters.
437 if (clock_name
!= MACH_PORT_NULL
)
438 clock
= port_name_to_clock(clock_name
);
440 clock
= &clock_list
[SYSTEM_CLOCK
];
442 swtime
.tv_sec
= sleep_sec
;
443 swtime
.tv_nsec
= sleep_nsec
;
446 * Call the actual clock_sleep routine.
448 rvalue
= clock_sleep_internal(clock
, sleep_type
, &swtime
);
451 * Return current time as wakeup time.
453 if (rvalue
!= KERN_INVALID_ARGUMENT
&& rvalue
!= KERN_FAILURE
) {
454 copyout((char *)&swtime
, wakeup_time_addr
, sizeof(mach_timespec_t
));
460 * Kernel internally callable clock sleep routine. The calling
461 * thread is suspended until the requested sleep time is reached.
464 clock_sleep_internal(
466 sleep_type_t sleep_type
,
467 mach_timespec_t
*sleep_time
)
470 mach_timespec_t clock_time
;
471 kern_return_t rvalue
;
475 if (clock
== CLOCK_NULL
)
476 return (KERN_INVALID_ARGUMENT
);
477 if (clock
->cl_ops
->c_setalrm
== 0)
478 return (KERN_FAILURE
);
481 * Check sleep parameters. If parameters are invalid
482 * return an error, otherwise post alarm request.
484 (*clock
->cl_ops
->c_gettime
)(&clock_time
);
486 chkstat
= check_time(sleep_type
, sleep_time
, &clock_time
);
488 return (KERN_INVALID_VALUE
);
489 rvalue
= KERN_SUCCESS
;
491 wait_result_t wait_result
;
494 * Get alarm and add to clock alarm list.
498 if ((alarm
= alrmfree
) == 0) {
500 alarm
= (alarm_t
) zalloc(alarm_zone
);
502 return (KERN_RESOURCE_SHORTAGE
);
506 alrmfree
= alarm
->al_next
;
509 * Wait for alarm to occur.
511 wait_result
= assert_wait((event_t
)alarm
, THREAD_ABORTSAFE
);
512 if (wait_result
== THREAD_WAITING
) {
513 alarm
->al_time
= *sleep_time
;
514 alarm
->al_status
= ALARM_SLEEP
;
515 post_alarm(clock
, alarm
);
518 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
521 * Note if alarm expired normally or whether it
522 * was aborted. If aborted, delete alarm from
523 * clock alarm list. Return alarm to free list.
526 if (alarm
->al_status
!= ALARM_DONE
) {
527 assert(wait_result
!= THREAD_AWAKENED
);
528 if (((alarm
->al_prev
)->al_next
= alarm
->al_next
) != NULL
)
529 (alarm
->al_next
)->al_prev
= alarm
->al_prev
;
530 rvalue
= KERN_ABORTED
;
532 *sleep_time
= alarm
->al_time
;
533 alarm
->al_status
= ALARM_FREE
;
535 assert(wait_result
== THREAD_INTERRUPTED
);
536 assert(alarm
->al_status
== ALARM_FREE
);
537 rvalue
= KERN_ABORTED
;
539 alarm
->al_next
= alrmfree
;
544 *sleep_time
= clock_time
;
550 * CLOCK INTERRUPT SERVICE ROUTINES.
554 * Service clock alarm interrupts. Called from machine dependent
555 * layer at splclock(). The clock_id argument specifies the clock,
556 * and the clock_time argument gives that clock's current time.
561 mach_timespec_t
*clock_time
)
564 register alarm_t alrm1
;
565 register alarm_t alrm2
;
566 mach_timespec_t
*alarm_time
;
569 clock
= &clock_list
[clock_id
];
572 * Update clock alarm list. All alarms that are due are moved
573 * to the alarmdone list to be serviced by the alarm_thread.
577 alrm1
= (alarm_t
) &clock
->cl_alarm
;
578 while ((alrm2
= alrm1
->al_next
) != NULL
) {
579 alarm_time
= &alrm2
->al_time
;
580 if (CMP_MACH_TIMESPEC(alarm_time
, clock_time
) > 0)
584 * Alarm has expired, so remove it from the
587 if ((alrm1
->al_next
= alrm2
->al_next
) != NULL
)
588 (alrm1
->al_next
)->al_prev
= alrm1
;
591 * If a clock_sleep() alarm, wakeup the thread
592 * which issued the clock_sleep() call.
594 if (alrm2
->al_status
== ALARM_SLEEP
) {
596 alrm2
->al_status
= ALARM_DONE
;
597 alrm2
->al_time
= *clock_time
;
598 thread_wakeup((event_t
)alrm2
);
602 * If a clock_alarm() alarm, place the alarm on
603 * the alarm done list and schedule the alarm
604 * delivery mechanism.
607 assert(alrm2
->al_status
== ALARM_CLOCK
);
608 if ((alrm2
->al_next
= alrmdone
) != NULL
)
609 alrmdone
->al_prev
= alrm2
;
611 thread_call_enter(&alarm_deliver
);
612 alrm2
->al_prev
= (alarm_t
) &alrmdone
;
614 alrm2
->al_status
= ALARM_DONE
;
615 alrm2
->al_time
= *clock_time
;
620 * Setup the clock dependent layer to deliver another
621 * interrupt for the next pending alarm.
624 (*clock
->cl_ops
->c_setalrm
)(alarm_time
);
629 * ALARM DELIVERY ROUTINES.
634 __unused thread_call_param_t p0
,
635 __unused thread_call_param_t p1
)
637 register alarm_t alrm
;
642 while ((alrm
= alrmdone
) != NULL
) {
643 if ((alrmdone
= alrm
->al_next
) != NULL
)
644 alrmdone
->al_prev
= (alarm_t
) &alrmdone
;
647 code
= (alrm
->al_status
== ALARM_DONE
? KERN_SUCCESS
: KERN_ABORTED
);
648 if (alrm
->al_port
!= IP_NULL
) {
649 /* Deliver message to designated port */
650 if (IP_VALID(alrm
->al_port
)) {
651 clock_alarm_reply(alrm
->al_port
, alrm
->al_port_type
, code
,
652 alrm
->al_type
, alrm
->al_time
);
656 alrm
->al_status
= ALARM_FREE
;
657 alrm
->al_next
= alrmfree
;
661 panic("clock_alarm_deliver");
668 * CLOCK PRIVATE SERVICING SUBROUTINES.
672 * Flush all pending alarms on a clock. All alarms
673 * are activated and timestamped correctly, so any
674 * programs waiting on alarms/threads will proceed
675 * with accurate information.
682 register alarm_t alrm1
, alrm2
;
686 * Flush all outstanding alarms.
689 alrm1
= (alarm_t
) &clock
->cl_alarm
;
690 while ((alrm2
= alrm1
->al_next
) != NULL
) {
692 * Remove alarm from the clock alarm list.
694 if ((alrm1
->al_next
= alrm2
->al_next
) != NULL
)
695 (alrm1
->al_next
)->al_prev
= alrm1
;
698 * If a clock_sleep() alarm, wakeup the thread
699 * which issued the clock_sleep() call.
701 if (alrm2
->al_status
== ALARM_SLEEP
) {
703 thread_wakeup((event_t
)alrm2
);
707 * If a clock_alarm() alarm, place the alarm on
708 * the alarm done list and wakeup the dedicated
709 * kernel alarm_thread to service the alarm.
711 assert(alrm2
->al_status
== ALARM_CLOCK
);
712 if ((alrm2
->al_next
= alrmdone
) != NULL
)
713 alrmdone
->al_prev
= alrm2
;
715 thread_wakeup((event_t
)&alrmdone
);
716 alrm2
->al_prev
= (alarm_t
) &alrmdone
;
724 * Post an alarm on a clock's active alarm list. The alarm is
725 * inserted in time-order into the clock's active alarm list.
726 * Always called from within a LOCK_CLOCK() code section.
734 register alarm_t alrm1
, alrm2
;
735 mach_timespec_t
*alarm_time
;
736 mach_timespec_t
*queue_time
;
739 * Traverse alarm list until queue time is greater
740 * than alarm time, then insert alarm.
742 alarm_time
= &alarm
->al_time
;
743 alrm1
= (alarm_t
) &clock
->cl_alarm
;
744 while ((alrm2
= alrm1
->al_next
) != NULL
) {
745 queue_time
= &alrm2
->al_time
;
746 if (CMP_MACH_TIMESPEC(queue_time
, alarm_time
) > 0)
750 alrm1
->al_next
= alarm
;
751 alarm
->al_next
= alrm2
;
752 alarm
->al_prev
= alrm1
;
754 alrm2
->al_prev
= alarm
;
757 * If the inserted alarm is the 'earliest' alarm,
758 * reset the device layer alarm time accordingly.
760 if (clock
->cl_alarm
.al_next
== alarm
)
761 (*clock
->cl_ops
->c_setalrm
)(alarm_time
);
765 * Check the validity of 'alarm_time' and 'alarm_type'. If either
766 * argument is invalid, return a negative value. If the 'alarm_time'
767 * is now, return a 0 value. If the 'alarm_time' is in the future,
768 * return a positive value.
773 alarm_type_t alarm_type
,
774 mach_timespec_t
*alarm_time
,
775 mach_timespec_t
*clock_time
)
779 if (BAD_ALRMTYPE(alarm_type
))
781 if (BAD_MACH_TIMESPEC(alarm_time
))
783 if ((alarm_type
& ALRMTYPE
) == TIME_RELATIVE
)
784 ADD_MACH_TIMESPEC(alarm_time
, clock_time
);
786 result
= CMP_MACH_TIMESPEC(alarm_time
, clock_time
);
788 return ((result
>= 0)? result
: 0);
792 clock_get_system_value(void)
794 clock_t clock
= &clock_list
[SYSTEM_CLOCK
];
795 mach_timespec_t value
;
797 (void) (*clock
->cl_ops
->c_gettime
)(&value
);
803 clock_get_calendar_value(void)
805 clock_t clock
= &clock_list
[CALENDAR_CLOCK
];
806 mach_timespec_t value
= MACH_TIMESPEC_ZERO
;
808 (void) (*clock
->cl_ops
->c_gettime
)(&value
);
814 clock_deadline_for_periodic_event(
819 assert(interval
!= 0);
821 *deadline
+= interval
;
823 if (*deadline
<= abstime
) {
824 *deadline
= abstime
+ interval
;
825 abstime
= mach_absolute_time();
827 if (*deadline
<= abstime
)
828 *deadline
= abstime
+ interval
;
833 mk_timebase_info_trap(
834 struct mk_timebase_info_trap_args
*args
)
836 uint32_t *delta
= args
->delta
;
837 uint32_t *abs_to_ns_numer
= args
->abs_to_ns_numer
;
838 uint32_t *abs_to_ns_denom
= args
->abs_to_ns_denom
;
839 uint32_t *proc_to_abs_numer
= args
->proc_to_abs_numer
;
840 uint32_t *proc_to_abs_denom
= args
->proc_to_abs_denom
;
841 mach_timebase_info_data_t info
;
844 clock_timebase_info(&info
);
846 copyout((void *)&one
, CAST_USER_ADDR_T(delta
), sizeof (uint32_t));
848 copyout((void *)&info
.numer
, CAST_USER_ADDR_T(abs_to_ns_numer
), sizeof (uint32_t));
849 copyout((void *)&info
.denom
, CAST_USER_ADDR_T(abs_to_ns_denom
), sizeof (uint32_t));
851 copyout((void *)&one
, CAST_USER_ADDR_T(proc_to_abs_numer
), sizeof (uint32_t));
852 copyout((void *)&one
, CAST_USER_ADDR_T(proc_to_abs_denom
), sizeof (uint32_t));
856 mach_timebase_info_trap(
857 struct mach_timebase_info_trap_args
*args
)
859 mach_vm_address_t out_info_addr
= args
->info
;
860 mach_timebase_info_data_t info
;
862 clock_timebase_info(&info
);
864 copyout((void *)&info
, out_info_addr
, sizeof (info
));
866 return (KERN_SUCCESS
);
870 mach_wait_until_continue(
871 __unused
void *parameter
,
872 wait_result_t wresult
)
874 thread_syscall_return((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
879 mach_wait_until_trap(
880 struct mach_wait_until_trap_args
*args
)
882 uint64_t deadline
= args
->deadline
;
883 wait_result_t wresult
;
885 wresult
= assert_wait_deadline((event_t
)mach_wait_until_trap
, THREAD_ABORTSAFE
, deadline
);
886 if (wresult
== THREAD_WAITING
)
887 wresult
= thread_block(mach_wait_until_continue
);
889 return ((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
899 uint64_t now
= mach_absolute_time();
904 if ( (deadline
- now
) < (8 * sched_cswtime
) ||
905 get_preemption_level() != 0 ||
906 ml_get_interrupts_enabled() == FALSE
)
907 machine_delay_until(deadline
);
909 assert_wait_deadline((event_t
)clock_delay_until
, THREAD_UNINT
, deadline
- sched_cswtime
);
911 thread_block(THREAD_CONTINUE_NULL
);
918 uint32_t scale_factor
)
922 clock_interval_to_deadline(interval
, scale_factor
, &end
);
924 clock_delay_until(end
);
931 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);
943 simple_lock(&calend_adjlock
);
945 interval
= clock_set_calendar_adjtime(secs
, microsecs
);
947 if (calend_adjdeadline
>= interval
)
948 calend_adjdeadline
-= interval
;
949 clock_deadline_for_periodic_event(interval
, mach_absolute_time(),
950 &calend_adjdeadline
);
952 timer_call_enter(&calend_adjcall
, calend_adjdeadline
);
955 timer_call_cancel(&calend_adjcall
);
957 simple_unlock(&calend_adjlock
);
963 __unused timer_call_param_t p0
,
964 __unused timer_call_param_t p1
)
970 simple_lock(&calend_adjlock
);
972 interval
= clock_adjust_calendar();
974 clock_deadline_for_periodic_event(interval
, mach_absolute_time(),
975 &calend_adjdeadline
);
977 timer_call_enter(&calend_adjcall
, calend_adjdeadline
);
980 simple_unlock(&calend_adjlock
);
985 clock_wakeup_calendar(void)
987 thread_call_enter(&calend_wakecall
);
990 extern void IOKitResetTime(void); /* XXX */
994 __unused thread_call_param_t p0
,
995 __unused thread_call_param_t p1
)