2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
27 * Purpose: Routines for the creation and use of kernel
28 * alarm clock services. This file and the ipc
29 * routines in kern/ipc_clock.c constitute the
30 * machine-independent clock service layer.
34 #include <mach_host.h>
36 #include <mach/boolean.h>
37 #include <mach/processor_info.h>
38 #include <mach/vm_param.h>
39 #include <machine/mach_param.h>
40 #include <kern/cpu_number.h>
41 #include <kern/misc_protos.h>
42 #include <kern/lock.h>
43 #include <kern/host.h>
45 #include <kern/thread.h>
46 #include <kern/thread_swap.h>
47 #include <kern/ipc_host.h>
48 #include <kern/clock.h>
49 #include <kern/zalloc.h>
50 #include <ipc/ipc_port.h>
52 #include <mach/mach_syscalls.h>
53 #include <mach/clock_reply.h>
54 #include <mach/mach_time.h>
56 #include <kern/mk_timer.h>
62 #include <mach/clock_server.h>
63 #include <mach/mach_host_server.h>
65 /* local data declarations */
66 decl_simple_lock_data(static,ClockLock
) /* clock system synchronization */
67 static struct zone
*alarm_zone
; /* zone for user alarms */
68 static struct alarm
*alrmfree
; /* alarm free list pointer */
69 static struct alarm
*alrmdone
; /* alarm done list pointer */
70 static long alrm_seqno
; /* uniquely identifies alarms */
71 static thread_call_data_t alarm_deliver
;
73 decl_simple_lock_data(static,calend_adjlock
)
74 static int64_t calend_adjtotal
;
75 static uint32_t calend_adjdelta
;
77 static timer_call_data_t calend_adjcall
;
78 static uint64_t calend_adjinterval
, calend_adjdeadline
;
80 /* backwards compatibility */
81 int hz
= HZ
; /* GET RID OF THIS !!! */
82 int tick
= (1000000 / HZ
); /* GET RID OF THIS !!! */
84 /* external declarations */
85 extern struct clock clock_list
[];
86 extern int clock_count
;
88 /* local clock subroutines */
100 alarm_type_t alarm_type
,
101 mach_timespec_t
*alarm_time
,
102 mach_timespec_t
*clock_time
);
105 void clock_alarm_deliver(
106 thread_call_param_t p0
,
107 thread_call_param_t p1
);
110 void clock_calend_adjust(
111 timer_call_param_t p0
,
112 timer_call_param_t p1
);
115 * Macros to lock/unlock clock system.
117 #define LOCK_CLOCK(s) \
119 simple_lock(&ClockLock);
121 #define UNLOCK_CLOCK(s) \
122 simple_unlock(&ClockLock); \
126 * Configure the clock system. (Not sure if we need this,
127 * as separate from clock_init()).
135 if (cpu_number() != master_cpu
)
136 panic("clock_config");
139 * Configure clock devices.
141 simple_lock_init(&calend_adjlock
, ETAP_MISC_CLOCK
);
142 simple_lock_init(&ClockLock
, ETAP_MISC_CLOCK
);
143 for (i
= 0; i
< clock_count
; i
++) {
144 clock
= &clock_list
[i
];
146 if ((*clock
->cl_ops
->c_config
)() == 0)
151 /* start alarm sequence numbers at 0 */
156 * Initialize the clock system.
165 * Initialize basic clock structures.
167 for (i
= 0; i
< clock_count
; i
++) {
168 clock
= &clock_list
[i
];
170 (*clock
->cl_ops
->c_init
)();
175 * Initialize the clock ipc service facility.
178 clock_service_create(void)
183 mk_timer_initialize();
186 * Initialize ipc clock services.
188 for (i
= 0; i
< clock_count
; i
++) {
189 clock
= &clock_list
[i
];
191 ipc_clock_init(clock
);
192 ipc_clock_enable(clock
);
196 timer_call_setup(&calend_adjcall
, clock_calend_adjust
, NULL
);
199 * Initialize clock service alarms.
201 i
= sizeof(struct alarm
);
202 alarm_zone
= zinit(i
, (4096/i
)*i
, 10*i
, "alarms");
204 thread_call_setup(&alarm_deliver
, clock_alarm_deliver
, NULL
);
208 * Get the service port on a clock.
211 host_get_clock_service(
214 clock_t *clock
) /* OUT */
216 if (host
== HOST_NULL
|| clock_id
< 0 || clock_id
>= clock_count
) {
218 return (KERN_INVALID_ARGUMENT
);
221 *clock
= &clock_list
[clock_id
];
222 if ((*clock
)->cl_ops
== 0)
223 return (KERN_FAILURE
);
224 return (KERN_SUCCESS
);
228 * Get the control port on a clock.
231 host_get_clock_control(
232 host_priv_t host_priv
,
234 clock_t *clock
) /* OUT */
236 if (host_priv
== HOST_PRIV_NULL
|| clock_id
< 0 || clock_id
>= clock_count
) {
238 return (KERN_INVALID_ARGUMENT
);
241 *clock
= &clock_list
[clock_id
];
242 if ((*clock
)->cl_ops
== 0)
243 return (KERN_FAILURE
);
244 return (KERN_SUCCESS
);
248 * Get the current clock time.
253 mach_timespec_t
*cur_time
) /* OUT */
255 if (clock
== CLOCK_NULL
)
256 return (KERN_INVALID_ARGUMENT
);
257 return ((*clock
->cl_ops
->c_gettime
)(cur_time
));
261 * Get clock attributes.
264 clock_get_attributes(
266 clock_flavor_t flavor
,
267 clock_attr_t attr
, /* OUT */
268 mach_msg_type_number_t
*count
) /* IN/OUT */
270 kern_return_t (*getattr
)(
271 clock_flavor_t flavor
,
273 mach_msg_type_number_t
*count
);
275 if (clock
== CLOCK_NULL
)
276 return (KERN_INVALID_ARGUMENT
);
277 if (getattr
= clock
->cl_ops
->c_getattr
)
278 return((*getattr
)(flavor
, attr
, count
));
280 return (KERN_FAILURE
);
284 * Set the current clock time.
289 mach_timespec_t new_time
)
291 mach_timespec_t
*clock_time
;
292 kern_return_t (*settime
)(
293 mach_timespec_t
*clock_time
);
296 mach_timespec_t
*clock_time
);
298 if (clock
== CLOCK_NULL
)
299 return (KERN_INVALID_ARGUMENT
);
300 if ((settime
= clock
->cl_ops
->c_settime
) == 0)
301 return (KERN_FAILURE
);
302 if (settime
== calend_settime
)
303 return (KERN_FAILURE
);
304 clock_time
= &new_time
;
305 if (BAD_MACH_TIMESPEC(clock_time
))
306 return (KERN_INVALID_VALUE
);
309 * Flush all outstanding alarms.
316 return ((*settime
)(clock_time
));
320 * Set the clock alarm resolution.
323 clock_set_attributes(
325 clock_flavor_t flavor
,
327 mach_msg_type_number_t count
)
329 kern_return_t (*setattr
)(
330 clock_flavor_t flavor
,
332 mach_msg_type_number_t count
);
334 if (clock
== CLOCK_NULL
)
335 return (KERN_INVALID_ARGUMENT
);
336 if (setattr
= clock
->cl_ops
->c_setattr
)
337 return ((*setattr
)(flavor
, attr
, count
));
339 return (KERN_FAILURE
);
343 * Setup a clock alarm.
348 alarm_type_t alarm_type
,
349 mach_timespec_t alarm_time
,
350 ipc_port_t alarm_port
,
351 mach_msg_type_name_t alarm_port_type
)
354 mach_timespec_t clock_time
;
356 kern_return_t reply_code
;
359 if (clock
== CLOCK_NULL
)
360 return (KERN_INVALID_ARGUMENT
);
361 if (clock
->cl_ops
->c_setalrm
== 0)
362 return (KERN_FAILURE
);
363 if (IP_VALID(alarm_port
) == 0)
364 return (KERN_INVALID_CAPABILITY
);
367 * Check alarm parameters. If parameters are invalid,
368 * send alarm message immediately.
370 (*clock
->cl_ops
->c_gettime
)(&clock_time
);
371 chkstat
= check_time(alarm_type
, &alarm_time
, &clock_time
);
373 reply_code
= (chkstat
< 0 ? KERN_INVALID_VALUE
: KERN_SUCCESS
);
374 clock_alarm_reply(alarm_port
, alarm_port_type
,
375 reply_code
, alarm_type
, clock_time
);
376 return (KERN_SUCCESS
);
380 * Get alarm and add to clock alarm list.
384 if ((alarm
= alrmfree
) == 0) {
386 alarm
= (alarm_t
) zalloc(alarm_zone
);
388 return (KERN_RESOURCE_SHORTAGE
);
392 alrmfree
= alarm
->al_next
;
394 alarm
->al_status
= ALARM_CLOCK
;
395 alarm
->al_time
= alarm_time
;
396 alarm
->al_type
= alarm_type
;
397 alarm
->al_port
= alarm_port
;
398 alarm
->al_port_type
= alarm_port_type
;
399 alarm
->al_clock
= clock
;
400 alarm
->al_seqno
= alrm_seqno
++;
401 post_alarm(clock
, alarm
);
404 return (KERN_SUCCESS
);
408 * Sleep on a clock. System trap. User-level libmach clock_sleep
409 * interface call takes a mach_timespec_t sleep_time argument which it
410 * converts to sleep_sec and sleep_nsec arguments which are then
411 * passed to clock_sleep_trap.
415 mach_port_name_t clock_name
,
416 sleep_type_t sleep_type
,
419 mach_timespec_t
*wakeup_time
)
422 mach_timespec_t swtime
;
423 kern_return_t rvalue
;
426 * Convert the trap parameters.
428 if (clock_name
!= MACH_PORT_NULL
)
429 clock
= port_name_to_clock(clock_name
);
431 clock
= &clock_list
[SYSTEM_CLOCK
];
433 swtime
.tv_sec
= sleep_sec
;
434 swtime
.tv_nsec
= sleep_nsec
;
437 * Call the actual clock_sleep routine.
439 rvalue
= clock_sleep_internal(clock
, sleep_type
, &swtime
);
442 * Return current time as wakeup time.
444 if (rvalue
!= KERN_INVALID_ARGUMENT
&& rvalue
!= KERN_FAILURE
) {
445 copyout((char *)&swtime
, (char *)wakeup_time
,
446 sizeof(mach_timespec_t
));
452 * Kernel internally callable clock sleep routine. The calling
453 * thread is suspended until the requested sleep time is reached.
456 clock_sleep_internal(
458 sleep_type_t sleep_type
,
459 mach_timespec_t
*sleep_time
)
462 mach_timespec_t clock_time
;
463 kern_return_t rvalue
;
467 if (clock
== CLOCK_NULL
)
468 return (KERN_INVALID_ARGUMENT
);
469 if (clock
->cl_ops
->c_setalrm
== 0)
470 return (KERN_FAILURE
);
473 * Check sleep parameters. If parameters are invalid
474 * return an error, otherwise post alarm request.
476 (*clock
->cl_ops
->c_gettime
)(&clock_time
);
478 chkstat
= check_time(sleep_type
, sleep_time
, &clock_time
);
480 return (KERN_INVALID_VALUE
);
481 rvalue
= KERN_SUCCESS
;
483 wait_result_t wait_result
;
486 * Get alarm and add to clock alarm list.
490 if ((alarm
= alrmfree
) == 0) {
492 alarm
= (alarm_t
) zalloc(alarm_zone
);
494 return (KERN_RESOURCE_SHORTAGE
);
498 alrmfree
= alarm
->al_next
;
501 * Wait for alarm to occur.
503 wait_result
= assert_wait((event_t
)alarm
, THREAD_ABORTSAFE
);
504 if (wait_result
== THREAD_WAITING
) {
505 alarm
->al_time
= *sleep_time
;
506 alarm
->al_status
= ALARM_SLEEP
;
507 post_alarm(clock
, alarm
);
510 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
513 * Note if alarm expired normally or whether it
514 * was aborted. If aborted, delete alarm from
515 * clock alarm list. Return alarm to free list.
518 if (alarm
->al_status
!= ALARM_DONE
) {
519 assert(wait_result
!= THREAD_AWAKENED
);
520 if ((alarm
->al_prev
)->al_next
= alarm
->al_next
)
521 (alarm
->al_next
)->al_prev
= alarm
->al_prev
;
522 rvalue
= KERN_ABORTED
;
524 *sleep_time
= alarm
->al_time
;
525 alarm
->al_status
= ALARM_FREE
;
527 assert(wait_result
== THREAD_INTERRUPTED
);
528 assert(alarm
->al_status
== ALARM_FREE
);
529 rvalue
= KERN_ABORTED
;
531 alarm
->al_next
= alrmfree
;
536 *sleep_time
= clock_time
;
542 * CLOCK INTERRUPT SERVICE ROUTINES.
546 * Service clock alarm interrupts. Called from machine dependent
547 * layer at splclock(). The clock_id argument specifies the clock,
548 * and the clock_time argument gives that clock's current time.
553 mach_timespec_t
*clock_time
)
556 register alarm_t alrm1
;
557 register alarm_t alrm2
;
558 mach_timespec_t
*alarm_time
;
561 clock
= &clock_list
[clock_id
];
564 * Update clock alarm list. All alarms that are due are moved
565 * to the alarmdone list to be serviced by the alarm_thread.
569 alrm1
= (alarm_t
) &clock
->cl_alarm
;
570 while (alrm2
= alrm1
->al_next
) {
571 alarm_time
= &alrm2
->al_time
;
572 if (CMP_MACH_TIMESPEC(alarm_time
, clock_time
) > 0)
576 * Alarm has expired, so remove it from the
579 if (alrm1
->al_next
= alrm2
->al_next
)
580 (alrm1
->al_next
)->al_prev
= alrm1
;
583 * If a clock_sleep() alarm, wakeup the thread
584 * which issued the clock_sleep() call.
586 if (alrm2
->al_status
== ALARM_SLEEP
) {
588 alrm2
->al_status
= ALARM_DONE
;
589 alrm2
->al_time
= *clock_time
;
590 thread_wakeup((event_t
)alrm2
);
594 * If a clock_alarm() alarm, place the alarm on
595 * the alarm done list and schedule the alarm
596 * delivery mechanism.
599 assert(alrm2
->al_status
== ALARM_CLOCK
);
600 if (alrm2
->al_next
= alrmdone
)
601 alrmdone
->al_prev
= alrm2
;
603 thread_call_enter(&alarm_deliver
);
604 alrm2
->al_prev
= (alarm_t
) &alrmdone
;
606 alrm2
->al_status
= ALARM_DONE
;
607 alrm2
->al_time
= *clock_time
;
612 * Setup the clock dependent layer to deliver another
613 * interrupt for the next pending alarm.
616 (*clock
->cl_ops
->c_setalrm
)(alarm_time
);
621 * ALARM DELIVERY ROUTINES.
626 thread_call_param_t p0
,
627 thread_call_param_t p1
)
629 register alarm_t alrm
;
634 while (alrm
= alrmdone
) {
635 if (alrmdone
= alrm
->al_next
)
636 alrmdone
->al_prev
= (alarm_t
) &alrmdone
;
639 code
= (alrm
->al_status
== ALARM_DONE
? KERN_SUCCESS
: KERN_ABORTED
);
640 if (alrm
->al_port
!= IP_NULL
) {
641 /* Deliver message to designated port */
642 if (IP_VALID(alrm
->al_port
)) {
643 clock_alarm_reply(alrm
->al_port
, alrm
->al_port_type
, code
,
644 alrm
->al_type
, alrm
->al_time
);
648 alrm
->al_status
= ALARM_FREE
;
649 alrm
->al_next
= alrmfree
;
653 panic("clock_alarm_deliver");
660 * CLOCK PRIVATE SERVICING SUBROUTINES.
664 * Flush all pending alarms on a clock. All alarms
665 * are activated and timestamped correctly, so any
666 * programs waiting on alarms/threads will proceed
667 * with accurate information.
674 register alarm_t alrm1
, alrm2
;
678 * Flush all outstanding alarms.
681 alrm1
= (alarm_t
) &clock
->cl_alarm
;
682 while (alrm2
= alrm1
->al_next
) {
684 * Remove alarm from the clock alarm list.
686 if (alrm1
->al_next
= alrm2
->al_next
)
687 (alrm1
->al_next
)->al_prev
= alrm1
;
690 * If a clock_sleep() alarm, wakeup the thread
691 * which issued the clock_sleep() call.
693 if (alrm2
->al_status
== ALARM_SLEEP
) {
695 thread_wakeup((event_t
)alrm2
);
699 * If a clock_alarm() alarm, place the alarm on
700 * the alarm done list and wakeup the dedicated
701 * kernel alarm_thread to service the alarm.
703 assert(alrm2
->al_status
== ALARM_CLOCK
);
704 if (alrm2
->al_next
= alrmdone
)
705 alrmdone
->al_prev
= alrm2
;
707 thread_wakeup((event_t
)&alrmdone
);
708 alrm2
->al_prev
= (alarm_t
) &alrmdone
;
716 * Post an alarm on a clock's active alarm list. The alarm is
717 * inserted in time-order into the clock's active alarm list.
718 * Always called from within a LOCK_CLOCK() code section.
726 register alarm_t alrm1
, alrm2
;
727 mach_timespec_t
*alarm_time
;
728 mach_timespec_t
*queue_time
;
731 * Traverse alarm list until queue time is greater
732 * than alarm time, then insert alarm.
734 alarm_time
= &alarm
->al_time
;
735 alrm1
= (alarm_t
) &clock
->cl_alarm
;
736 while (alrm2
= alrm1
->al_next
) {
737 queue_time
= &alrm2
->al_time
;
738 if (CMP_MACH_TIMESPEC(queue_time
, alarm_time
) > 0)
742 alrm1
->al_next
= alarm
;
743 alarm
->al_next
= alrm2
;
744 alarm
->al_prev
= alrm1
;
746 alrm2
->al_prev
= alarm
;
749 * If the inserted alarm is the 'earliest' alarm,
750 * reset the device layer alarm time accordingly.
752 if (clock
->cl_alarm
.al_next
== alarm
)
753 (*clock
->cl_ops
->c_setalrm
)(alarm_time
);
757 * Check the validity of 'alarm_time' and 'alarm_type'. If either
758 * argument is invalid, return a negative value. If the 'alarm_time'
759 * is now, return a 0 value. If the 'alarm_time' is in the future,
760 * return a positive value.
765 alarm_type_t alarm_type
,
766 mach_timespec_t
*alarm_time
,
767 mach_timespec_t
*clock_time
)
771 if (BAD_ALRMTYPE(alarm_type
))
773 if (BAD_MACH_TIMESPEC(alarm_time
))
775 if ((alarm_type
& ALRMTYPE
) == TIME_RELATIVE
)
776 ADD_MACH_TIMESPEC(alarm_time
, clock_time
);
778 result
= CMP_MACH_TIMESPEC(alarm_time
, clock_time
);
780 return ((result
>= 0)? result
: 0);
784 clock_get_system_value(void)
786 clock_t clock
= &clock_list
[SYSTEM_CLOCK
];
787 mach_timespec_t value
;
789 (void) (*clock
->cl_ops
->c_gettime
)(&value
);
795 clock_get_calendar_value(void)
797 clock_t clock
= &clock_list
[CALENDAR_CLOCK
];
798 mach_timespec_t value
= MACH_TIMESPEC_ZERO
;
800 (void) (*clock
->cl_ops
->c_gettime
)(&value
);
806 clock_set_calendar_value(
807 mach_timespec_t value
)
809 clock_t clock
= &clock_list
[CALENDAR_CLOCK
];
811 (void) (*clock
->cl_ops
->c_settime
)(&value
);
815 clock_deadline_for_periodic_event(
820 assert(interval
!= 0);
822 *deadline
+= interval
;
824 if (*deadline
<= abstime
) {
826 clock_get_uptime(&abstime
);
827 *deadline
+= interval
;
829 if (*deadline
<= abstime
) {
831 *deadline
+= interval
;
839 uint32_t *abs_to_ns_numer
,
840 uint32_t *abs_to_ns_denom
,
841 uint32_t *proc_to_abs_numer
,
842 uint32_t *proc_to_abs_denom
)
844 mach_timebase_info_data_t info
;
847 clock_timebase_info(&info
);
849 copyout((void *)&one
, (void *)delta
, sizeof (uint32_t));
851 copyout((void *)&info
.numer
, (void *)abs_to_ns_numer
, sizeof (uint32_t));
852 copyout((void *)&info
.denom
, (void *)abs_to_ns_denom
, sizeof (uint32_t));
854 copyout((void *)&one
, (void *)proc_to_abs_numer
, sizeof (uint32_t));
855 copyout((void *)&one
, (void *)proc_to_abs_denom
, sizeof (uint32_t));
860 mach_timebase_info_t out_info
)
862 mach_timebase_info_data_t info
;
864 clock_timebase_info(&info
);
866 copyout((void *)&info
, (void *)out_info
, sizeof (info
));
868 return (KERN_SUCCESS
);
877 wait_result
= assert_wait((event_t
)&mach_wait_until
, THREAD_ABORTSAFE
);
878 if (wait_result
== THREAD_WAITING
) {
879 thread_set_timer_deadline(deadline
);
880 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
881 if (wait_result
!= THREAD_TIMED_OUT
)
882 thread_cancel_timer();
885 return ((wait_result
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
889 clock_set_calendar_adjtime(
897 simple_lock(&calend_adjlock
);
899 if (calend_adjinterval
== 0)
900 clock_interval_to_absolutetime_interval(10000, NSEC_PER_USEC
,
901 &calend_adjinterval
);
903 ototal
= calend_adjtotal
;
917 calend_adjtotal
= total
;
918 calend_adjdelta
= delta
;
920 if (calend_adjdeadline
>= calend_adjinterval
)
921 calend_adjdeadline
-= calend_adjinterval
;
922 clock_get_uptime(&abstime
);
923 clock_deadline_for_periodic_event(calend_adjinterval
, abstime
,
924 &calend_adjdeadline
);
926 timer_call_enter(&calend_adjcall
, calend_adjdeadline
);
931 timer_call_cancel(&calend_adjcall
);
934 simple_unlock(&calend_adjlock
);
942 timer_call_param_t p0
,
943 timer_call_param_t p1
)
948 simple_lock(&calend_adjlock
);
950 if (calend_adjtotal
> 0) {
951 clock_adjust_calendar((clock_res_t
)calend_adjdelta
);
952 calend_adjtotal
-= calend_adjdelta
;
954 if (calend_adjdelta
> calend_adjtotal
)
955 calend_adjdelta
= calend_adjtotal
;
958 if (calend_adjtotal
< 0) {
959 clock_adjust_calendar(-(clock_res_t
)calend_adjdelta
);
960 calend_adjtotal
+= calend_adjdelta
;
962 if (calend_adjdelta
> -calend_adjtotal
)
963 calend_adjdelta
= -calend_adjtotal
;
966 if (calend_adjtotal
!= 0) {
969 clock_get_uptime(&abstime
);
970 clock_deadline_for_periodic_event(calend_adjinterval
, abstime
,
971 &calend_adjdeadline
);
973 timer_call_enter(&calend_adjcall
, calend_adjdeadline
);
976 simple_unlock(&calend_adjlock
);