]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/clock.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * File: kern/clock.c
30 * Purpose: Routines for the creation and use of kernel
31 * alarm clock services. This file and the ipc
32 * routines in kern/ipc_clock.c constitute the
33 * machine-independent clock service layer.
34 */
35
36#include <cpus.h>
37#include <mach_host.h>
38
39#include <mach/boolean.h>
40#include <mach/processor_info.h>
41#include <mach/vm_param.h>
42#include <machine/mach_param.h>
43#include <kern/cpu_number.h>
44#include <kern/misc_protos.h>
45#include <kern/lock.h>
46#include <kern/host.h>
47#include <kern/spl.h>
48#include <kern/sched_prim.h>
49#include <kern/thread.h>
50#include <kern/thread_swap.h>
51#include <kern/ipc_host.h>
52#include <kern/clock.h>
53#include <kern/zalloc.h>
54#include <ipc/ipc_port.h>
55
56#include <mach/mach_syscalls.h>
57#include <mach/clock_reply.h>
58#include <mach/mach_time.h>
59
60/*
61 * Exported interface
62 */
63
64#include <mach/clock_server.h>
65#include <mach/mach_host_server.h>
66
67/* local data declarations */
68decl_simple_lock_data(static,ClockLock) /* clock system synchronization */
69static struct zone *alarm_zone; /* zone for user alarms */
70static struct alarm *alrmfree; /* alarm free list pointer */
71static struct alarm *alrmdone; /* alarm done list pointer */
72static long alrm_seqno; /* uniquely identifies alarms */
73static thread_call_data_t alarm_deliver;
74
75decl_simple_lock_data(static,calend_adjlock)
76
77static timer_call_data_t calend_adjcall;
78static uint64_t calend_adjinterval, calend_adjdeadline;
79
80static thread_call_data_t calend_wakecall;
81
82/* backwards compatibility */
83int hz = HZ; /* GET RID OF THIS !!! */
84int tick = (1000000 / HZ); /* GET RID OF THIS !!! */
85
86/* external declarations */
87extern struct clock clock_list[];
88extern int clock_count;
89
90/* local clock subroutines */
91static
92void flush_alarms(
93 clock_t clock);
94
95static
96void post_alarm(
97 clock_t clock,
98 alarm_t alarm);
99
100static
101int check_time(
102 alarm_type_t alarm_type,
103 mach_timespec_t *alarm_time,
104 mach_timespec_t *clock_time);
105
106static
107void clock_alarm_deliver(
108 thread_call_param_t p0,
109 thread_call_param_t p1);
110
111static
112void calend_adjust_call(
113 timer_call_param_t p0,
114 timer_call_param_t p1);
115
116static
117void calend_dowakeup(
118 thread_call_param_t p0,
119 thread_call_param_t p1);
120
121/*
122 * Macros to lock/unlock clock system.
123 */
124#define LOCK_CLOCK(s) \
125 s = splclock(); \
126 simple_lock(&ClockLock);
127
128#define UNLOCK_CLOCK(s) \
129 simple_unlock(&ClockLock); \
130 splx(s);
131
132/*
133 * Configure the clock system. (Not sure if we need this,
134 * as separate from clock_init()).
135 */
136void
137clock_config(void)
138{
139 clock_t clock;
140 register int i;
141
142 if (cpu_number() != master_cpu)
143 panic("clock_config");
144
145 simple_lock_init(&ClockLock, ETAP_MISC_CLOCK);
146 thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL);
147
148 simple_lock_init(&calend_adjlock, ETAP_MISC_CLOCK);
149 timer_call_setup(&calend_adjcall, calend_adjust_call, NULL);
150
151 thread_call_setup(&calend_wakecall, calend_dowakeup, NULL);
152
153 /*
154 * Configure clock devices.
155 */
156 for (i = 0; i < clock_count; i++) {
157 clock = &clock_list[i];
158 if (clock->cl_ops) {
159 if ((*clock->cl_ops->c_config)() == 0)
160 clock->cl_ops = 0;
161 }
162 }
163
164 /* start alarm sequence numbers at 0 */
165 alrm_seqno = 0;
166}
167
168/*
169 * Initialize the clock system.
170 */
171void
172clock_init(void)
173{
174 clock_t clock;
175 register int i;
176
177 /*
178 * Initialize basic clock structures.
179 */
180 for (i = 0; i < clock_count; i++) {
181 clock = &clock_list[i];
182 if (clock->cl_ops)
183 (*clock->cl_ops->c_init)();
184 }
185}
186
187/*
188 * Called by machine dependent code
189 * to initialize areas dependent on the
190 * timebase value. May be called multiple
191 * times during start up.
192 */
193void
194clock_timebase_init(void)
195{
196 sched_timebase_init();
197}
198
199/*
200 * Initialize the clock ipc service facility.
201 */
202void
203clock_service_create(void)
204{
205 clock_t clock;
206 register int i;
207
208 /*
209 * Initialize ipc clock services.
210 */
211 for (i = 0; i < clock_count; i++) {
212 clock = &clock_list[i];
213 if (clock->cl_ops) {
214 ipc_clock_init(clock);
215 ipc_clock_enable(clock);
216 }
217 }
218
219 /*
220 * Perform miscellaneous late
221 * initialization.
222 */
223 i = sizeof(struct alarm);
224 alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms");
225}
226
227/*
228 * Get the service port on a clock.
229 */
230kern_return_t
231host_get_clock_service(
232 host_t host,
233 clock_id_t clock_id,
234 clock_t *clock) /* OUT */
235{
236 if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) {
237 *clock = CLOCK_NULL;
238 return (KERN_INVALID_ARGUMENT);
239 }
240
241 *clock = &clock_list[clock_id];
242 if ((*clock)->cl_ops == 0)
243 return (KERN_FAILURE);
244 return (KERN_SUCCESS);
245}
246
247/*
248 * Get the control port on a clock.
249 */
250kern_return_t
251host_get_clock_control(
252 host_priv_t host_priv,
253 clock_id_t clock_id,
254 clock_t *clock) /* OUT */
255{
256 if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) {
257 *clock = CLOCK_NULL;
258 return (KERN_INVALID_ARGUMENT);
259 }
260
261 *clock = &clock_list[clock_id];
262 if ((*clock)->cl_ops == 0)
263 return (KERN_FAILURE);
264 return (KERN_SUCCESS);
265}
266
267/*
268 * Get the current clock time.
269 */
270kern_return_t
271clock_get_time(
272 clock_t clock,
273 mach_timespec_t *cur_time) /* OUT */
274{
275 if (clock == CLOCK_NULL)
276 return (KERN_INVALID_ARGUMENT);
277 return ((*clock->cl_ops->c_gettime)(cur_time));
278}
279
280/*
281 * Get clock attributes.
282 */
283kern_return_t
284clock_get_attributes(
285 clock_t clock,
286 clock_flavor_t flavor,
287 clock_attr_t attr, /* OUT */
288 mach_msg_type_number_t *count) /* IN/OUT */
289{
290 kern_return_t (*getattr)(
291 clock_flavor_t flavor,
292 clock_attr_t attr,
293 mach_msg_type_number_t *count);
294
295 if (clock == CLOCK_NULL)
296 return (KERN_INVALID_ARGUMENT);
297 if (getattr = clock->cl_ops->c_getattr)
298 return((*getattr)(flavor, attr, count));
299 else
300 return (KERN_FAILURE);
301}
302
303/*
304 * Set the current clock time.
305 */
306kern_return_t
307clock_set_time(
308 clock_t clock,
309 mach_timespec_t new_time)
310{
311 mach_timespec_t *clock_time;
312 kern_return_t (*settime)(
313 mach_timespec_t *clock_time);
314
315 if (clock == CLOCK_NULL)
316 return (KERN_INVALID_ARGUMENT);
317 if ((settime = clock->cl_ops->c_settime) == 0)
318 return (KERN_FAILURE);
319 clock_time = &new_time;
320 if (BAD_MACH_TIMESPEC(clock_time))
321 return (KERN_INVALID_VALUE);
322
323 /*
324 * Flush all outstanding alarms.
325 */
326 flush_alarms(clock);
327
328 /*
329 * Set the new time.
330 */
331 return ((*settime)(clock_time));
332}
333
334/*
335 * Set the clock alarm resolution.
336 */
337kern_return_t
338clock_set_attributes(
339 clock_t clock,
340 clock_flavor_t flavor,
341 clock_attr_t attr,
342 mach_msg_type_number_t count)
343{
344 kern_return_t (*setattr)(
345 clock_flavor_t flavor,
346 clock_attr_t attr,
347 mach_msg_type_number_t count);
348
349 if (clock == CLOCK_NULL)
350 return (KERN_INVALID_ARGUMENT);
351 if (setattr = clock->cl_ops->c_setattr)
352 return ((*setattr)(flavor, attr, count));
353 else
354 return (KERN_FAILURE);
355}
356
357/*
358 * Setup a clock alarm.
359 */
360kern_return_t
361clock_alarm(
362 clock_t clock,
363 alarm_type_t alarm_type,
364 mach_timespec_t alarm_time,
365 ipc_port_t alarm_port,
366 mach_msg_type_name_t alarm_port_type)
367{
368 alarm_t alarm;
369 mach_timespec_t clock_time;
370 int chkstat;
371 kern_return_t reply_code;
372 spl_t s;
373
374 if (clock == CLOCK_NULL)
375 return (KERN_INVALID_ARGUMENT);
376 if (clock->cl_ops->c_setalrm == 0)
377 return (KERN_FAILURE);
378 if (IP_VALID(alarm_port) == 0)
379 return (KERN_INVALID_CAPABILITY);
380
381 /*
382 * Check alarm parameters. If parameters are invalid,
383 * send alarm message immediately.
384 */
385 (*clock->cl_ops->c_gettime)(&clock_time);
386 chkstat = check_time(alarm_type, &alarm_time, &clock_time);
387 if (chkstat <= 0) {
388 reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS);
389 clock_alarm_reply(alarm_port, alarm_port_type,
390 reply_code, alarm_type, clock_time);
391 return (KERN_SUCCESS);
392 }
393
394 /*
395 * Get alarm and add to clock alarm list.
396 */
397
398 LOCK_CLOCK(s);
399 if ((alarm = alrmfree) == 0) {
400 UNLOCK_CLOCK(s);
401 alarm = (alarm_t) zalloc(alarm_zone);
402 if (alarm == 0)
403 return (KERN_RESOURCE_SHORTAGE);
404 LOCK_CLOCK(s);
405 }
406 else
407 alrmfree = alarm->al_next;
408
409 alarm->al_status = ALARM_CLOCK;
410 alarm->al_time = alarm_time;
411 alarm->al_type = alarm_type;
412 alarm->al_port = alarm_port;
413 alarm->al_port_type = alarm_port_type;
414 alarm->al_clock = clock;
415 alarm->al_seqno = alrm_seqno++;
416 post_alarm(clock, alarm);
417 UNLOCK_CLOCK(s);
418
419 return (KERN_SUCCESS);
420}
421
422/*
423 * Sleep on a clock. System trap. User-level libmach clock_sleep
424 * interface call takes a mach_timespec_t sleep_time argument which it
425 * converts to sleep_sec and sleep_nsec arguments which are then
426 * passed to clock_sleep_trap.
427 */
428kern_return_t
429clock_sleep_trap(
430 mach_port_name_t clock_name,
431 sleep_type_t sleep_type,
432 int sleep_sec,
433 int sleep_nsec,
434 mach_timespec_t *wakeup_time)
435{
436 clock_t clock;
437 mach_timespec_t swtime;
438 kern_return_t rvalue;
439
440 /*
441 * Convert the trap parameters.
442 */
443 if (clock_name != MACH_PORT_NULL)
444 clock = port_name_to_clock(clock_name);
445 else
446 clock = &clock_list[SYSTEM_CLOCK];
447
448 swtime.tv_sec = sleep_sec;
449 swtime.tv_nsec = sleep_nsec;
450
451 /*
452 * Call the actual clock_sleep routine.
453 */
454 rvalue = clock_sleep_internal(clock, sleep_type, &swtime);
455
456 /*
457 * Return current time as wakeup time.
458 */
459 if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) {
460 copyout((char *)&swtime, (char *)wakeup_time,
461 sizeof(mach_timespec_t));
462 }
463 return (rvalue);
464}
465
466/*
467 * Kernel internally callable clock sleep routine. The calling
468 * thread is suspended until the requested sleep time is reached.
469 */
470kern_return_t
471clock_sleep_internal(
472 clock_t clock,
473 sleep_type_t sleep_type,
474 mach_timespec_t *sleep_time)
475{
476 alarm_t alarm;
477 mach_timespec_t clock_time;
478 kern_return_t rvalue;
479 int chkstat;
480 spl_t s;
481
482 if (clock == CLOCK_NULL)
483 return (KERN_INVALID_ARGUMENT);
484 if (clock->cl_ops->c_setalrm == 0)
485 return (KERN_FAILURE);
486
487 /*
488 * Check sleep parameters. If parameters are invalid
489 * return an error, otherwise post alarm request.
490 */
491 (*clock->cl_ops->c_gettime)(&clock_time);
492
493 chkstat = check_time(sleep_type, sleep_time, &clock_time);
494 if (chkstat < 0)
495 return (KERN_INVALID_VALUE);
496 rvalue = KERN_SUCCESS;
497 if (chkstat > 0) {
498 wait_result_t wait_result;
499
500 /*
501 * Get alarm and add to clock alarm list.
502 */
503
504 LOCK_CLOCK(s);
505 if ((alarm = alrmfree) == 0) {
506 UNLOCK_CLOCK(s);
507 alarm = (alarm_t) zalloc(alarm_zone);
508 if (alarm == 0)
509 return (KERN_RESOURCE_SHORTAGE);
510 LOCK_CLOCK(s);
511 }
512 else
513 alrmfree = alarm->al_next;
514
515 /*
516 * Wait for alarm to occur.
517 */
518 wait_result = assert_wait((event_t)alarm, THREAD_ABORTSAFE);
519 if (wait_result == THREAD_WAITING) {
520 alarm->al_time = *sleep_time;
521 alarm->al_status = ALARM_SLEEP;
522 post_alarm(clock, alarm);
523 UNLOCK_CLOCK(s);
524
525 wait_result = thread_block(THREAD_CONTINUE_NULL);
526
527 /*
528 * Note if alarm expired normally or whether it
529 * was aborted. If aborted, delete alarm from
530 * clock alarm list. Return alarm to free list.
531 */
532 LOCK_CLOCK(s);
533 if (alarm->al_status != ALARM_DONE) {
534 assert(wait_result != THREAD_AWAKENED);
535 if ((alarm->al_prev)->al_next = alarm->al_next)
536 (alarm->al_next)->al_prev = alarm->al_prev;
537 rvalue = KERN_ABORTED;
538 }
539 *sleep_time = alarm->al_time;
540 alarm->al_status = ALARM_FREE;
541 } else {
542 assert(wait_result == THREAD_INTERRUPTED);
543 assert(alarm->al_status == ALARM_FREE);
544 rvalue = KERN_ABORTED;
545 }
546 alarm->al_next = alrmfree;
547 alrmfree = alarm;
548 UNLOCK_CLOCK(s);
549 }
550 else
551 *sleep_time = clock_time;
552
553 return (rvalue);
554}
555
556/*
557 * CLOCK INTERRUPT SERVICE ROUTINES.
558 */
559
560/*
561 * Service clock alarm interrupts. Called from machine dependent
562 * layer at splclock(). The clock_id argument specifies the clock,
563 * and the clock_time argument gives that clock's current time.
564 */
565void
566clock_alarm_intr(
567 clock_id_t clock_id,
568 mach_timespec_t *clock_time)
569{
570 clock_t clock;
571 register alarm_t alrm1;
572 register alarm_t alrm2;
573 mach_timespec_t *alarm_time;
574 spl_t s;
575
576 clock = &clock_list[clock_id];
577
578 /*
579 * Update clock alarm list. All alarms that are due are moved
580 * to the alarmdone list to be serviced by the alarm_thread.
581 */
582
583 LOCK_CLOCK(s);
584 alrm1 = (alarm_t) &clock->cl_alarm;
585 while (alrm2 = alrm1->al_next) {
586 alarm_time = &alrm2->al_time;
587 if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0)
588 break;
589
590 /*
591 * Alarm has expired, so remove it from the
592 * clock alarm list.
593 */
594 if (alrm1->al_next = alrm2->al_next)
595 (alrm1->al_next)->al_prev = alrm1;
596
597 /*
598 * If a clock_sleep() alarm, wakeup the thread
599 * which issued the clock_sleep() call.
600 */
601 if (alrm2->al_status == ALARM_SLEEP) {
602 alrm2->al_next = 0;
603 alrm2->al_status = ALARM_DONE;
604 alrm2->al_time = *clock_time;
605 thread_wakeup((event_t)alrm2);
606 }
607
608 /*
609 * If a clock_alarm() alarm, place the alarm on
610 * the alarm done list and schedule the alarm
611 * delivery mechanism.
612 */
613 else {
614 assert(alrm2->al_status == ALARM_CLOCK);
615 if (alrm2->al_next = alrmdone)
616 alrmdone->al_prev = alrm2;
617 else
618 thread_call_enter(&alarm_deliver);
619 alrm2->al_prev = (alarm_t) &alrmdone;
620 alrmdone = alrm2;
621 alrm2->al_status = ALARM_DONE;
622 alrm2->al_time = *clock_time;
623 }
624 }
625
626 /*
627 * Setup the clock dependent layer to deliver another
628 * interrupt for the next pending alarm.
629 */
630 if (alrm2)
631 (*clock->cl_ops->c_setalrm)(alarm_time);
632 UNLOCK_CLOCK(s);
633}
634
635/*
636 * ALARM DELIVERY ROUTINES.
637 */
638
639static void
640clock_alarm_deliver(
641 thread_call_param_t p0,
642 thread_call_param_t p1)
643{
644 register alarm_t alrm;
645 kern_return_t code;
646 spl_t s;
647
648 LOCK_CLOCK(s);
649 while (alrm = alrmdone) {
650 if (alrmdone = alrm->al_next)
651 alrmdone->al_prev = (alarm_t) &alrmdone;
652 UNLOCK_CLOCK(s);
653
654 code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED);
655 if (alrm->al_port != IP_NULL) {
656 /* Deliver message to designated port */
657 if (IP_VALID(alrm->al_port)) {
658 clock_alarm_reply(alrm->al_port, alrm->al_port_type, code,
659 alrm->al_type, alrm->al_time);
660 }
661
662 LOCK_CLOCK(s);
663 alrm->al_status = ALARM_FREE;
664 alrm->al_next = alrmfree;
665 alrmfree = alrm;
666 }
667 else
668 panic("clock_alarm_deliver");
669 }
670
671 UNLOCK_CLOCK(s);
672}
673
674/*
675 * CLOCK PRIVATE SERVICING SUBROUTINES.
676 */
677
678/*
679 * Flush all pending alarms on a clock. All alarms
680 * are activated and timestamped correctly, so any
681 * programs waiting on alarms/threads will proceed
682 * with accurate information.
683 */
684static
685void
686flush_alarms(
687 clock_t clock)
688{
689 register alarm_t alrm1, alrm2;
690 spl_t s;
691
692 /*
693 * Flush all outstanding alarms.
694 */
695 LOCK_CLOCK(s);
696 alrm1 = (alarm_t) &clock->cl_alarm;
697 while (alrm2 = alrm1->al_next) {
698 /*
699 * Remove alarm from the clock alarm list.
700 */
701 if (alrm1->al_next = alrm2->al_next)
702 (alrm1->al_next)->al_prev = alrm1;
703
704 /*
705 * If a clock_sleep() alarm, wakeup the thread
706 * which issued the clock_sleep() call.
707 */
708 if (alrm2->al_status == ALARM_SLEEP) {
709 alrm2->al_next = 0;
710 thread_wakeup((event_t)alrm2);
711 }
712 else {
713 /*
714 * If a clock_alarm() alarm, place the alarm on
715 * the alarm done list and wakeup the dedicated
716 * kernel alarm_thread to service the alarm.
717 */
718 assert(alrm2->al_status == ALARM_CLOCK);
719 if (alrm2->al_next = alrmdone)
720 alrmdone->al_prev = alrm2;
721 else
722 thread_wakeup((event_t)&alrmdone);
723 alrm2->al_prev = (alarm_t) &alrmdone;
724 alrmdone = alrm2;
725 }
726 }
727 UNLOCK_CLOCK(s);
728}
729
730/*
731 * Post an alarm on a clock's active alarm list. The alarm is
732 * inserted in time-order into the clock's active alarm list.
733 * Always called from within a LOCK_CLOCK() code section.
734 */
735static
736void
737post_alarm(
738 clock_t clock,
739 alarm_t alarm)
740{
741 register alarm_t alrm1, alrm2;
742 mach_timespec_t *alarm_time;
743 mach_timespec_t *queue_time;
744
745 /*
746 * Traverse alarm list until queue time is greater
747 * than alarm time, then insert alarm.
748 */
749 alarm_time = &alarm->al_time;
750 alrm1 = (alarm_t) &clock->cl_alarm;
751 while (alrm2 = alrm1->al_next) {
752 queue_time = &alrm2->al_time;
753 if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0)
754 break;
755 alrm1 = alrm2;
756 }
757 alrm1->al_next = alarm;
758 alarm->al_next = alrm2;
759 alarm->al_prev = alrm1;
760 if (alrm2)
761 alrm2->al_prev = alarm;
762
763 /*
764 * If the inserted alarm is the 'earliest' alarm,
765 * reset the device layer alarm time accordingly.
766 */
767 if (clock->cl_alarm.al_next == alarm)
768 (*clock->cl_ops->c_setalrm)(alarm_time);
769}
770
771/*
772 * Check the validity of 'alarm_time' and 'alarm_type'. If either
773 * argument is invalid, return a negative value. If the 'alarm_time'
774 * is now, return a 0 value. If the 'alarm_time' is in the future,
775 * return a positive value.
776 */
777static
778int
779check_time(
780 alarm_type_t alarm_type,
781 mach_timespec_t *alarm_time,
782 mach_timespec_t *clock_time)
783{
784 int result;
785
786 if (BAD_ALRMTYPE(alarm_type))
787 return (-1);
788 if (BAD_MACH_TIMESPEC(alarm_time))
789 return (-1);
790 if ((alarm_type & ALRMTYPE) == TIME_RELATIVE)
791 ADD_MACH_TIMESPEC(alarm_time, clock_time);
792
793 result = CMP_MACH_TIMESPEC(alarm_time, clock_time);
794
795 return ((result >= 0)? result: 0);
796}
797
798mach_timespec_t
799clock_get_system_value(void)
800{
801 clock_t clock = &clock_list[SYSTEM_CLOCK];
802 mach_timespec_t value;
803
804 (void) (*clock->cl_ops->c_gettime)(&value);
805
806 return value;
807}
808
809mach_timespec_t
810clock_get_calendar_value(void)
811{
812 clock_t clock = &clock_list[CALENDAR_CLOCK];
813 mach_timespec_t value = MACH_TIMESPEC_ZERO;
814
815 (void) (*clock->cl_ops->c_gettime)(&value);
816
817 return value;
818}
819
820void
821clock_deadline_for_periodic_event(
822 uint64_t interval,
823 uint64_t abstime,
824 uint64_t *deadline)
825{
826 assert(interval != 0);
827
828 *deadline += interval;
829
830 if (*deadline <= abstime) {
831 *deadline = abstime + interval;
832 abstime = mach_absolute_time();
833
834 if (*deadline <= abstime)
835 *deadline = abstime + interval;
836 }
837}
838
839void
840mk_timebase_info(
841 uint32_t *delta,
842 uint32_t *abs_to_ns_numer,
843 uint32_t *abs_to_ns_denom,
844 uint32_t *proc_to_abs_numer,
845 uint32_t *proc_to_abs_denom)
846{
847 mach_timebase_info_data_t info;
848 uint32_t one = 1;
849
850 clock_timebase_info(&info);
851
852 copyout((void *)&one, (void *)delta, sizeof (uint32_t));
853
854 copyout((void *)&info.numer, (void *)abs_to_ns_numer, sizeof (uint32_t));
855 copyout((void *)&info.denom, (void *)abs_to_ns_denom, sizeof (uint32_t));
856
857 copyout((void *)&one, (void *)proc_to_abs_numer, sizeof (uint32_t));
858 copyout((void *)&one, (void *)proc_to_abs_denom, sizeof (uint32_t));
859}
860
861kern_return_t
862mach_timebase_info(
863 mach_timebase_info_t out_info)
864{
865 mach_timebase_info_data_t info;
866
867 clock_timebase_info(&info);
868
869 copyout((void *)&info, (void *)out_info, sizeof (info));
870
871 return (KERN_SUCCESS);
872}
873
874kern_return_t
875mach_wait_until(
876 uint64_t deadline)
877{
878 int wait_result;
879
880 wait_result = assert_wait((event_t)&mach_wait_until, THREAD_ABORTSAFE);
881 if (wait_result == THREAD_WAITING) {
882 thread_set_timer_deadline(deadline);
883 wait_result = thread_block(THREAD_CONTINUE_NULL);
884 if (wait_result != THREAD_TIMED_OUT)
885 thread_cancel_timer();
886 }
887
888 return ((wait_result == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
889}
890
891void
892clock_adjtime(
893 int32_t *secs,
894 int32_t *microsecs)
895{
896 uint32_t interval;
897 spl_t s;
898
899 s = splclock();
900 simple_lock(&calend_adjlock);
901
902 interval = clock_set_calendar_adjtime(secs, microsecs);
903 if (interval != 0) {
904 if (calend_adjdeadline >= interval)
905 calend_adjdeadline -= interval;
906 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
907 &calend_adjdeadline);
908
909 timer_call_enter(&calend_adjcall, calend_adjdeadline);
910 }
911 else
912 timer_call_cancel(&calend_adjcall);
913
914 simple_unlock(&calend_adjlock);
915 splx(s);
916}
917
918static void
919calend_adjust_call(
920 timer_call_param_t p0,
921 timer_call_param_t p1)
922{
923 uint32_t interval;
924 spl_t s;
925
926 s = splclock();
927 simple_lock(&calend_adjlock);
928
929 interval = clock_adjust_calendar();
930 if (interval != 0) {
931 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
932 &calend_adjdeadline);
933
934 timer_call_enter(&calend_adjcall, calend_adjdeadline);
935 }
936
937 simple_unlock(&calend_adjlock);
938 splx(s);
939}
940
941void
942clock_wakeup_calendar(void)
943{
944 thread_call_enter(&calend_wakecall);
945}
946
947static void
948calend_dowakeup(
949 thread_call_param_t p0,
950 thread_call_param_t p1)
951{
952 void IOKitResetTime(void);
953
954 IOKitResetTime();
955}