2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
35 * Purpose: Routines for the creation and use of kernel
36 * alarm clock services. This file and the ipc
37 * routines in kern/ipc_clock.c constitute the
38 * machine-independent clock service layer.
41 #include <mach_host.h>
43 #include <mach/mach_types.h>
44 #include <mach/boolean.h>
45 #include <mach/processor_info.h>
46 #include <mach/vm_param.h>
48 #include <kern/cpu_number.h>
49 #include <kern/misc_protos.h>
50 #include <kern/lock.h>
51 #include <kern/host.h>
53 #include <kern/sched_prim.h>
54 #include <kern/thread.h>
55 #include <kern/ipc_host.h>
56 #include <kern/clock.h>
57 #include <kern/zalloc.h>
59 #include <ipc/ipc_types.h>
60 #include <ipc/ipc_port.h>
62 #include <mach/mach_traps.h>
63 #include <mach/clock_reply.h>
64 #include <mach/mach_time.h>
66 #include <mach/clock_server.h>
67 #include <mach/clock_priv_server.h>
68 #include <mach/host_priv_server.h>
74 #include <mach/clock_server.h>
75 #include <mach/mach_host_server.h>
77 /* local data declarations */
78 decl_simple_lock_data(static,ClockLock
) /* clock system synchronization */
79 static struct zone
*alarm_zone
; /* zone for user alarms */
80 static struct alarm
*alrmfree
; /* alarm free list pointer */
81 static struct alarm
*alrmdone
; /* alarm done list pointer */
82 static long alrm_seqno
; /* uniquely identifies alarms */
83 static thread_call_data_t alarm_deliver
;
85 decl_simple_lock_data(static,calend_adjlock
)
87 static timer_call_data_t calend_adjcall
;
88 static uint64_t calend_adjdeadline
;
90 static thread_call_data_t calend_wakecall
;
92 /* external declarations */
93 extern struct clock clock_list
[];
94 extern int clock_count
;
96 /* local clock subroutines */
108 alarm_type_t alarm_type
,
109 mach_timespec_t
*alarm_time
,
110 mach_timespec_t
*clock_time
);
113 void clock_alarm_deliver(
114 thread_call_param_t p0
,
115 thread_call_param_t p1
);
118 void calend_adjust_call(
119 timer_call_param_t p0
,
120 timer_call_param_t p1
);
123 void calend_dowakeup(
124 thread_call_param_t p0
,
125 thread_call_param_t p1
);
128 * Macros to lock/unlock clock system.
130 #define LOCK_CLOCK(s) \
132 simple_lock(&ClockLock);
134 #define UNLOCK_CLOCK(s) \
135 simple_unlock(&ClockLock); \
141 * Called once at boot to configure the clock subsystem.
149 assert(cpu_number() == master_cpu
);
151 simple_lock_init(&ClockLock
, 0);
152 thread_call_setup(&alarm_deliver
, clock_alarm_deliver
, NULL
);
154 simple_lock_init(&calend_adjlock
, 0);
155 timer_call_setup(&calend_adjcall
, calend_adjust_call
, NULL
);
157 thread_call_setup(&calend_wakecall
, calend_dowakeup
, NULL
);
160 * Configure clock devices.
162 for (i
= 0; i
< clock_count
; i
++) {
163 clock
= &clock_list
[i
];
165 if ((*clock
->cl_ops
->c_config
)() == 0)
171 * Initialize the timer callouts.
173 timer_call_initialize();
175 /* start alarm sequence numbers at 0 */
182 * Called on a processor each time started.
191 * Initialize basic clock structures.
193 for (i
= 0; i
< clock_count
; i
++) {
194 clock
= &clock_list
[i
];
195 if (clock
->cl_ops
&& clock
->cl_ops
->c_init
)
196 (*clock
->cl_ops
->c_init
)();
201 * Called by machine dependent code
202 * to initialize areas dependent on the
203 * timebase value. May be called multiple
204 * times during start up.
207 clock_timebase_init(void)
209 sched_timebase_init();
213 * Initialize the clock ipc service facility.
216 clock_service_create(void)
222 * Initialize ipc clock services.
224 for (i
= 0; i
< clock_count
; i
++) {
225 clock
= &clock_list
[i
];
227 ipc_clock_init(clock
);
228 ipc_clock_enable(clock
);
233 * Perform miscellaneous late
236 i
= sizeof(struct alarm
);
237 alarm_zone
= zinit(i
, (4096/i
)*i
, 10*i
, "alarms");
241 * Get the service port on a clock.
244 host_get_clock_service(
247 clock_t *clock
) /* OUT */
249 if (host
== HOST_NULL
|| clock_id
< 0 || clock_id
>= clock_count
) {
251 return (KERN_INVALID_ARGUMENT
);
254 *clock
= &clock_list
[clock_id
];
255 if ((*clock
)->cl_ops
== 0)
256 return (KERN_FAILURE
);
257 return (KERN_SUCCESS
);
261 * Get the control port on a clock.
264 host_get_clock_control(
265 host_priv_t host_priv
,
267 clock_t *clock
) /* OUT */
269 if (host_priv
== HOST_PRIV_NULL
|| clock_id
< 0 || clock_id
>= clock_count
) {
271 return (KERN_INVALID_ARGUMENT
);
274 *clock
= &clock_list
[clock_id
];
275 if ((*clock
)->cl_ops
== 0)
276 return (KERN_FAILURE
);
277 return (KERN_SUCCESS
);
281 * Get the current clock time.
286 mach_timespec_t
*cur_time
) /* OUT */
288 if (clock
== CLOCK_NULL
)
289 return (KERN_INVALID_ARGUMENT
);
290 return ((*clock
->cl_ops
->c_gettime
)(cur_time
));
294 * Get clock attributes.
297 clock_get_attributes(
299 clock_flavor_t flavor
,
300 clock_attr_t attr
, /* OUT */
301 mach_msg_type_number_t
*count
) /* IN/OUT */
303 if (clock
== CLOCK_NULL
)
304 return (KERN_INVALID_ARGUMENT
);
305 if (clock
->cl_ops
->c_getattr
)
306 return(clock
->cl_ops
->c_getattr(flavor
, attr
, count
));
308 return (KERN_FAILURE
);
312 * Set the current clock time.
317 mach_timespec_t new_time
)
319 mach_timespec_t
*clock_time
;
321 if (clock
== CLOCK_NULL
)
322 return (KERN_INVALID_ARGUMENT
);
323 if (clock
->cl_ops
->c_settime
== NULL
)
324 return (KERN_FAILURE
);
325 clock_time
= &new_time
;
326 if (BAD_MACH_TIMESPEC(clock_time
))
327 return (KERN_INVALID_VALUE
);
330 * Flush all outstanding alarms.
337 return (clock
->cl_ops
->c_settime(clock_time
));
341 * Set the clock alarm resolution.
344 clock_set_attributes(
346 clock_flavor_t flavor
,
348 mach_msg_type_number_t count
)
350 if (clock
== CLOCK_NULL
)
351 return (KERN_INVALID_ARGUMENT
);
352 if (clock
->cl_ops
->c_setattr
)
353 return (clock
->cl_ops
->c_setattr(flavor
, attr
, count
));
355 return (KERN_FAILURE
);
359 * Setup a clock alarm.
364 alarm_type_t alarm_type
,
365 mach_timespec_t alarm_time
,
366 ipc_port_t alarm_port
,
367 mach_msg_type_name_t alarm_port_type
)
370 mach_timespec_t clock_time
;
372 kern_return_t reply_code
;
375 if (clock
== CLOCK_NULL
)
376 return (KERN_INVALID_ARGUMENT
);
377 if (clock
->cl_ops
->c_setalrm
== 0)
378 return (KERN_FAILURE
);
379 if (IP_VALID(alarm_port
) == 0)
380 return (KERN_INVALID_CAPABILITY
);
383 * Check alarm parameters. If parameters are invalid,
384 * send alarm message immediately.
386 (*clock
->cl_ops
->c_gettime
)(&clock_time
);
387 chkstat
= check_time(alarm_type
, &alarm_time
, &clock_time
);
389 reply_code
= (chkstat
< 0 ? KERN_INVALID_VALUE
: KERN_SUCCESS
);
390 clock_alarm_reply(alarm_port
, alarm_port_type
,
391 reply_code
, alarm_type
, clock_time
);
392 return (KERN_SUCCESS
);
396 * Get alarm and add to clock alarm list.
400 if ((alarm
= alrmfree
) == 0) {
402 alarm
= (alarm_t
) zalloc(alarm_zone
);
404 return (KERN_RESOURCE_SHORTAGE
);
408 alrmfree
= alarm
->al_next
;
410 alarm
->al_status
= ALARM_CLOCK
;
411 alarm
->al_time
= alarm_time
;
412 alarm
->al_type
= alarm_type
;
413 alarm
->al_port
= alarm_port
;
414 alarm
->al_port_type
= alarm_port_type
;
415 alarm
->al_clock
= clock
;
416 alarm
->al_seqno
= alrm_seqno
++;
417 post_alarm(clock
, alarm
);
420 return (KERN_SUCCESS
);
424 * Sleep on a clock. System trap. User-level libmach clock_sleep
425 * interface call takes a mach_timespec_t sleep_time argument which it
426 * converts to sleep_sec and sleep_nsec arguments which are then
427 * passed to clock_sleep_trap.
431 struct clock_sleep_trap_args
*args
)
433 mach_port_name_t clock_name
= args
->clock_name
;
434 sleep_type_t sleep_type
= args
->sleep_type
;
435 int sleep_sec
= args
->sleep_sec
;
436 int sleep_nsec
= args
->sleep_nsec
;
437 mach_vm_address_t wakeup_time_addr
= args
->wakeup_time
;
439 mach_timespec_t swtime
;
440 kern_return_t rvalue
;
443 * Convert the trap parameters.
445 if (clock_name
!= MACH_PORT_NULL
)
446 clock
= port_name_to_clock(clock_name
);
448 clock
= &clock_list
[SYSTEM_CLOCK
];
450 swtime
.tv_sec
= sleep_sec
;
451 swtime
.tv_nsec
= sleep_nsec
;
454 * Call the actual clock_sleep routine.
456 rvalue
= clock_sleep_internal(clock
, sleep_type
, &swtime
);
459 * Return current time as wakeup time.
461 if (rvalue
!= KERN_INVALID_ARGUMENT
&& rvalue
!= KERN_FAILURE
) {
462 copyout((char *)&swtime
, wakeup_time_addr
, sizeof(mach_timespec_t
));
468 * Kernel internally callable clock sleep routine. The calling
469 * thread is suspended until the requested sleep time is reached.
472 clock_sleep_internal(
474 sleep_type_t sleep_type
,
475 mach_timespec_t
*sleep_time
)
478 mach_timespec_t clock_time
;
479 kern_return_t rvalue
;
483 if (clock
== CLOCK_NULL
)
484 return (KERN_INVALID_ARGUMENT
);
485 if (clock
->cl_ops
->c_setalrm
== 0)
486 return (KERN_FAILURE
);
489 * Check sleep parameters. If parameters are invalid
490 * return an error, otherwise post alarm request.
492 (*clock
->cl_ops
->c_gettime
)(&clock_time
);
494 chkstat
= check_time(sleep_type
, sleep_time
, &clock_time
);
496 return (KERN_INVALID_VALUE
);
497 rvalue
= KERN_SUCCESS
;
499 wait_result_t wait_result
;
502 * Get alarm and add to clock alarm list.
506 if ((alarm
= alrmfree
) == 0) {
508 alarm
= (alarm_t
) zalloc(alarm_zone
);
510 return (KERN_RESOURCE_SHORTAGE
);
514 alrmfree
= alarm
->al_next
;
517 * Wait for alarm to occur.
519 wait_result
= assert_wait((event_t
)alarm
, THREAD_ABORTSAFE
);
520 if (wait_result
== THREAD_WAITING
) {
521 alarm
->al_time
= *sleep_time
;
522 alarm
->al_status
= ALARM_SLEEP
;
523 post_alarm(clock
, alarm
);
526 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
529 * Note if alarm expired normally or whether it
530 * was aborted. If aborted, delete alarm from
531 * clock alarm list. Return alarm to free list.
534 if (alarm
->al_status
!= ALARM_DONE
) {
535 assert(wait_result
!= THREAD_AWAKENED
);
536 if (((alarm
->al_prev
)->al_next
= alarm
->al_next
) != NULL
)
537 (alarm
->al_next
)->al_prev
= alarm
->al_prev
;
538 rvalue
= KERN_ABORTED
;
540 *sleep_time
= alarm
->al_time
;
541 alarm
->al_status
= ALARM_FREE
;
543 assert(wait_result
== THREAD_INTERRUPTED
);
544 assert(alarm
->al_status
== ALARM_FREE
);
545 rvalue
= KERN_ABORTED
;
547 alarm
->al_next
= alrmfree
;
552 *sleep_time
= clock_time
;
558 * CLOCK INTERRUPT SERVICE ROUTINES.
562 * Service clock alarm interrupts. Called from machine dependent
563 * layer at splclock(). The clock_id argument specifies the clock,
564 * and the clock_time argument gives that clock's current time.
569 mach_timespec_t
*clock_time
)
572 register alarm_t alrm1
;
573 register alarm_t alrm2
;
574 mach_timespec_t
*alarm_time
;
577 clock
= &clock_list
[clock_id
];
580 * Update clock alarm list. All alarms that are due are moved
581 * to the alarmdone list to be serviced by the alarm_thread.
585 alrm1
= (alarm_t
) &clock
->cl_alarm
;
586 while ((alrm2
= alrm1
->al_next
) != NULL
) {
587 alarm_time
= &alrm2
->al_time
;
588 if (CMP_MACH_TIMESPEC(alarm_time
, clock_time
) > 0)
592 * Alarm has expired, so remove it from the
595 if ((alrm1
->al_next
= alrm2
->al_next
) != NULL
)
596 (alrm1
->al_next
)->al_prev
= alrm1
;
599 * If a clock_sleep() alarm, wakeup the thread
600 * which issued the clock_sleep() call.
602 if (alrm2
->al_status
== ALARM_SLEEP
) {
604 alrm2
->al_status
= ALARM_DONE
;
605 alrm2
->al_time
= *clock_time
;
606 thread_wakeup((event_t
)alrm2
);
610 * If a clock_alarm() alarm, place the alarm on
611 * the alarm done list and schedule the alarm
612 * delivery mechanism.
615 assert(alrm2
->al_status
== ALARM_CLOCK
);
616 if ((alrm2
->al_next
= alrmdone
) != NULL
)
617 alrmdone
->al_prev
= alrm2
;
619 thread_call_enter(&alarm_deliver
);
620 alrm2
->al_prev
= (alarm_t
) &alrmdone
;
622 alrm2
->al_status
= ALARM_DONE
;
623 alrm2
->al_time
= *clock_time
;
628 * Setup the clock dependent layer to deliver another
629 * interrupt for the next pending alarm.
632 (*clock
->cl_ops
->c_setalrm
)(alarm_time
);
637 * ALARM DELIVERY ROUTINES.
642 __unused thread_call_param_t p0
,
643 __unused thread_call_param_t p1
)
645 register alarm_t alrm
;
650 while ((alrm
= alrmdone
) != NULL
) {
651 if ((alrmdone
= alrm
->al_next
) != NULL
)
652 alrmdone
->al_prev
= (alarm_t
) &alrmdone
;
655 code
= (alrm
->al_status
== ALARM_DONE
? KERN_SUCCESS
: KERN_ABORTED
);
656 if (alrm
->al_port
!= IP_NULL
) {
657 /* Deliver message to designated port */
658 if (IP_VALID(alrm
->al_port
)) {
659 clock_alarm_reply(alrm
->al_port
, alrm
->al_port_type
, code
,
660 alrm
->al_type
, alrm
->al_time
);
664 alrm
->al_status
= ALARM_FREE
;
665 alrm
->al_next
= alrmfree
;
669 panic("clock_alarm_deliver");
676 * CLOCK PRIVATE SERVICING SUBROUTINES.
680 * Flush all pending alarms on a clock. All alarms
681 * are activated and timestamped correctly, so any
682 * programs waiting on alarms/threads will proceed
683 * with accurate information.
690 register alarm_t alrm1
, alrm2
;
694 * Flush all outstanding alarms.
697 alrm1
= (alarm_t
) &clock
->cl_alarm
;
698 while ((alrm2
= alrm1
->al_next
) != NULL
) {
700 * Remove alarm from the clock alarm list.
702 if ((alrm1
->al_next
= alrm2
->al_next
) != NULL
)
703 (alrm1
->al_next
)->al_prev
= alrm1
;
706 * If a clock_sleep() alarm, wakeup the thread
707 * which issued the clock_sleep() call.
709 if (alrm2
->al_status
== ALARM_SLEEP
) {
711 thread_wakeup((event_t
)alrm2
);
715 * If a clock_alarm() alarm, place the alarm on
716 * the alarm done list and wakeup the dedicated
717 * kernel alarm_thread to service the alarm.
719 assert(alrm2
->al_status
== ALARM_CLOCK
);
720 if ((alrm2
->al_next
= alrmdone
) != NULL
)
721 alrmdone
->al_prev
= alrm2
;
723 thread_wakeup((event_t
)&alrmdone
);
724 alrm2
->al_prev
= (alarm_t
) &alrmdone
;
732 * Post an alarm on a clock's active alarm list. The alarm is
733 * inserted in time-order into the clock's active alarm list.
734 * Always called from within a LOCK_CLOCK() code section.
742 register alarm_t alrm1
, alrm2
;
743 mach_timespec_t
*alarm_time
;
744 mach_timespec_t
*queue_time
;
747 * Traverse alarm list until queue time is greater
748 * than alarm time, then insert alarm.
750 alarm_time
= &alarm
->al_time
;
751 alrm1
= (alarm_t
) &clock
->cl_alarm
;
752 while ((alrm2
= alrm1
->al_next
) != NULL
) {
753 queue_time
= &alrm2
->al_time
;
754 if (CMP_MACH_TIMESPEC(queue_time
, alarm_time
) > 0)
758 alrm1
->al_next
= alarm
;
759 alarm
->al_next
= alrm2
;
760 alarm
->al_prev
= alrm1
;
762 alrm2
->al_prev
= alarm
;
765 * If the inserted alarm is the 'earliest' alarm,
766 * reset the device layer alarm time accordingly.
768 if (clock
->cl_alarm
.al_next
== alarm
)
769 (*clock
->cl_ops
->c_setalrm
)(alarm_time
);
773 * Check the validity of 'alarm_time' and 'alarm_type'. If either
774 * argument is invalid, return a negative value. If the 'alarm_time'
775 * is now, return a 0 value. If the 'alarm_time' is in the future,
776 * return a positive value.
781 alarm_type_t alarm_type
,
782 mach_timespec_t
*alarm_time
,
783 mach_timespec_t
*clock_time
)
787 if (BAD_ALRMTYPE(alarm_type
))
789 if (BAD_MACH_TIMESPEC(alarm_time
))
791 if ((alarm_type
& ALRMTYPE
) == TIME_RELATIVE
)
792 ADD_MACH_TIMESPEC(alarm_time
, clock_time
);
794 result
= CMP_MACH_TIMESPEC(alarm_time
, clock_time
);
796 return ((result
>= 0)? result
: 0);
800 clock_get_system_value(void)
802 clock_t clock
= &clock_list
[SYSTEM_CLOCK
];
803 mach_timespec_t value
;
805 (void) (*clock
->cl_ops
->c_gettime
)(&value
);
811 clock_get_calendar_value(void)
813 clock_t clock
= &clock_list
[CALENDAR_CLOCK
];
814 mach_timespec_t value
= MACH_TIMESPEC_ZERO
;
816 (void) (*clock
->cl_ops
->c_gettime
)(&value
);
822 clock_deadline_for_periodic_event(
827 assert(interval
!= 0);
829 *deadline
+= interval
;
831 if (*deadline
<= abstime
) {
832 *deadline
= abstime
+ interval
;
833 abstime
= mach_absolute_time();
835 if (*deadline
<= abstime
)
836 *deadline
= abstime
+ interval
;
841 mk_timebase_info_trap(
842 struct mk_timebase_info_trap_args
*args
)
844 uint32_t *delta
= args
->delta
;
845 uint32_t *abs_to_ns_numer
= args
->abs_to_ns_numer
;
846 uint32_t *abs_to_ns_denom
= args
->abs_to_ns_denom
;
847 uint32_t *proc_to_abs_numer
= args
->proc_to_abs_numer
;
848 uint32_t *proc_to_abs_denom
= args
->proc_to_abs_denom
;
849 mach_timebase_info_data_t info
;
852 clock_timebase_info(&info
);
854 copyout((void *)&one
, CAST_USER_ADDR_T(delta
), sizeof (uint32_t));
856 copyout((void *)&info
.numer
, CAST_USER_ADDR_T(abs_to_ns_numer
), sizeof (uint32_t));
857 copyout((void *)&info
.denom
, CAST_USER_ADDR_T(abs_to_ns_denom
), sizeof (uint32_t));
859 copyout((void *)&one
, CAST_USER_ADDR_T(proc_to_abs_numer
), sizeof (uint32_t));
860 copyout((void *)&one
, CAST_USER_ADDR_T(proc_to_abs_denom
), sizeof (uint32_t));
864 mach_timebase_info_trap(
865 struct mach_timebase_info_trap_args
*args
)
867 mach_vm_address_t out_info_addr
= args
->info
;
868 mach_timebase_info_data_t info
;
870 clock_timebase_info(&info
);
872 copyout((void *)&info
, out_info_addr
, sizeof (info
));
874 return (KERN_SUCCESS
);
878 mach_wait_until_continue(
879 __unused
void *parameter
,
880 wait_result_t wresult
)
882 thread_syscall_return((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
887 mach_wait_until_trap(
888 struct mach_wait_until_trap_args
*args
)
890 uint64_t deadline
= args
->deadline
;
891 wait_result_t wresult
;
893 wresult
= assert_wait_deadline((event_t
)mach_wait_until_trap
, THREAD_ABORTSAFE
, deadline
);
894 if (wresult
== THREAD_WAITING
)
895 wresult
= thread_block(mach_wait_until_continue
);
897 return ((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
907 uint64_t now
= mach_absolute_time();
912 if ( (deadline
- now
) < (8 * sched_cswtime
) ||
913 get_preemption_level() != 0 ||
914 ml_get_interrupts_enabled() == FALSE
)
915 machine_delay_until(deadline
);
917 assert_wait_deadline((event_t
)clock_delay_until
, THREAD_UNINT
, deadline
- sched_cswtime
);
919 thread_block(THREAD_CONTINUE_NULL
);
926 uint32_t scale_factor
)
930 clock_interval_to_deadline(interval
, scale_factor
, &end
);
932 clock_delay_until(end
);
939 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);
951 simple_lock(&calend_adjlock
);
953 interval
= clock_set_calendar_adjtime(secs
, microsecs
);
955 if (calend_adjdeadline
>= interval
)
956 calend_adjdeadline
-= interval
;
957 clock_deadline_for_periodic_event(interval
, mach_absolute_time(),
958 &calend_adjdeadline
);
960 timer_call_enter(&calend_adjcall
, calend_adjdeadline
);
963 timer_call_cancel(&calend_adjcall
);
965 simple_unlock(&calend_adjlock
);
971 __unused timer_call_param_t p0
,
972 __unused timer_call_param_t p1
)
978 simple_lock(&calend_adjlock
);
980 interval
= clock_adjust_calendar();
982 clock_deadline_for_periodic_event(interval
, mach_absolute_time(),
983 &calend_adjdeadline
);
985 timer_call_enter(&calend_adjcall
, calend_adjdeadline
);
988 simple_unlock(&calend_adjlock
);
993 clock_wakeup_calendar(void)
995 thread_call_enter(&calend_wakecall
);
998 extern void IOKitResetTime(void); /* XXX */
1002 __unused thread_call_param_t p0
,
1003 __unused thread_call_param_t p1
)