]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
461d2399a29f871d05f156025b2dbe09ed599731
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * File: kern/clock.c
35 * Purpose: Routines for the creation and use of kernel
36 * alarm clock services. This file and the ipc
37 * routines in kern/ipc_clock.c constitute the
38 * machine-independent clock service layer.
39 */
40
41 #include <mach_host.h>
42
43 #include <mach/mach_types.h>
44 #include <mach/boolean.h>
45 #include <mach/processor_info.h>
46 #include <mach/vm_param.h>
47
48 #include <kern/cpu_number.h>
49 #include <kern/misc_protos.h>
50 #include <kern/lock.h>
51 #include <kern/host.h>
52 #include <kern/spl.h>
53 #include <kern/sched_prim.h>
54 #include <kern/thread.h>
55 #include <kern/ipc_host.h>
56 #include <kern/clock.h>
57 #include <kern/zalloc.h>
58
59 #include <ipc/ipc_types.h>
60 #include <ipc/ipc_port.h>
61
62 #include <mach/mach_traps.h>
63 #include <mach/clock_reply.h>
64 #include <mach/mach_time.h>
65
66 #include <mach/clock_server.h>
67 #include <mach/clock_priv_server.h>
68 #include <mach/host_priv_server.h>
69
70 /*
71 * Exported interface
72 */
73
74 #include <mach/clock_server.h>
75 #include <mach/mach_host_server.h>
76
77 /* local data declarations */
78 decl_simple_lock_data(static,ClockLock) /* clock system synchronization */
79 static struct zone *alarm_zone; /* zone for user alarms */
80 static struct alarm *alrmfree; /* alarm free list pointer */
81 static struct alarm *alrmdone; /* alarm done list pointer */
82 static long alrm_seqno; /* uniquely identifies alarms */
83 static thread_call_data_t alarm_deliver;
84
85 decl_simple_lock_data(static,calend_adjlock)
86
87 static timer_call_data_t calend_adjcall;
88 static uint64_t calend_adjdeadline;
89
90 static thread_call_data_t calend_wakecall;
91
92 /* external declarations */
93 extern struct clock clock_list[];
94 extern int clock_count;
95
96 /* local clock subroutines */
97 static
98 void flush_alarms(
99 clock_t clock);
100
101 static
102 void post_alarm(
103 clock_t clock,
104 alarm_t alarm);
105
106 static
107 int check_time(
108 alarm_type_t alarm_type,
109 mach_timespec_t *alarm_time,
110 mach_timespec_t *clock_time);
111
112 static
113 void clock_alarm_deliver(
114 thread_call_param_t p0,
115 thread_call_param_t p1);
116
117 static
118 void calend_adjust_call(
119 timer_call_param_t p0,
120 timer_call_param_t p1);
121
122 static
123 void calend_dowakeup(
124 thread_call_param_t p0,
125 thread_call_param_t p1);
126
127 /*
128 * Macros to lock/unlock clock system.
129 */
130 #define LOCK_CLOCK(s) \
131 s = splclock(); \
132 simple_lock(&ClockLock);
133
134 #define UNLOCK_CLOCK(s) \
135 simple_unlock(&ClockLock); \
136 splx(s);
137
138 /*
139 * clock_config:
140 *
141 * Called once at boot to configure the clock subsystem.
142 */
143 void
144 clock_config(void)
145 {
146 clock_t clock;
147 register int i;
148
149 assert(cpu_number() == master_cpu);
150
151 simple_lock_init(&ClockLock, 0);
152 thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL);
153
154 simple_lock_init(&calend_adjlock, 0);
155 timer_call_setup(&calend_adjcall, calend_adjust_call, NULL);
156
157 thread_call_setup(&calend_wakecall, calend_dowakeup, NULL);
158
159 /*
160 * Configure clock devices.
161 */
162 for (i = 0; i < clock_count; i++) {
163 clock = &clock_list[i];
164 if (clock->cl_ops) {
165 if ((*clock->cl_ops->c_config)() == 0)
166 clock->cl_ops = 0;
167 }
168 }
169
170 /*
171 * Initialize the timer callouts.
172 */
173 timer_call_initialize();
174
175 /* start alarm sequence numbers at 0 */
176 alrm_seqno = 0;
177 }
178
179 /*
180 * clock_init:
181 *
182 * Called on a processor each time started.
183 */
184 void
185 clock_init(void)
186 {
187 clock_t clock;
188 register int i;
189
190 /*
191 * Initialize basic clock structures.
192 */
193 for (i = 0; i < clock_count; i++) {
194 clock = &clock_list[i];
195 if (clock->cl_ops && clock->cl_ops->c_init)
196 (*clock->cl_ops->c_init)();
197 }
198 }
199
200 /*
201 * Called by machine dependent code
202 * to initialize areas dependent on the
203 * timebase value. May be called multiple
204 * times during start up.
205 */
206 void
207 clock_timebase_init(void)
208 {
209 sched_timebase_init();
210 }
211
212 /*
213 * Initialize the clock ipc service facility.
214 */
215 void
216 clock_service_create(void)
217 {
218 clock_t clock;
219 register int i;
220
221 /*
222 * Initialize ipc clock services.
223 */
224 for (i = 0; i < clock_count; i++) {
225 clock = &clock_list[i];
226 if (clock->cl_ops) {
227 ipc_clock_init(clock);
228 ipc_clock_enable(clock);
229 }
230 }
231
232 /*
233 * Perform miscellaneous late
234 * initialization.
235 */
236 i = sizeof(struct alarm);
237 alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms");
238 }
239
240 /*
241 * Get the service port on a clock.
242 */
243 kern_return_t
244 host_get_clock_service(
245 host_t host,
246 clock_id_t clock_id,
247 clock_t *clock) /* OUT */
248 {
249 if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) {
250 *clock = CLOCK_NULL;
251 return (KERN_INVALID_ARGUMENT);
252 }
253
254 *clock = &clock_list[clock_id];
255 if ((*clock)->cl_ops == 0)
256 return (KERN_FAILURE);
257 return (KERN_SUCCESS);
258 }
259
260 /*
261 * Get the control port on a clock.
262 */
263 kern_return_t
264 host_get_clock_control(
265 host_priv_t host_priv,
266 clock_id_t clock_id,
267 clock_t *clock) /* OUT */
268 {
269 if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) {
270 *clock = CLOCK_NULL;
271 return (KERN_INVALID_ARGUMENT);
272 }
273
274 *clock = &clock_list[clock_id];
275 if ((*clock)->cl_ops == 0)
276 return (KERN_FAILURE);
277 return (KERN_SUCCESS);
278 }
279
280 /*
281 * Get the current clock time.
282 */
283 kern_return_t
284 clock_get_time(
285 clock_t clock,
286 mach_timespec_t *cur_time) /* OUT */
287 {
288 if (clock == CLOCK_NULL)
289 return (KERN_INVALID_ARGUMENT);
290 return ((*clock->cl_ops->c_gettime)(cur_time));
291 }
292
293 /*
294 * Get clock attributes.
295 */
296 kern_return_t
297 clock_get_attributes(
298 clock_t clock,
299 clock_flavor_t flavor,
300 clock_attr_t attr, /* OUT */
301 mach_msg_type_number_t *count) /* IN/OUT */
302 {
303 if (clock == CLOCK_NULL)
304 return (KERN_INVALID_ARGUMENT);
305 if (clock->cl_ops->c_getattr)
306 return(clock->cl_ops->c_getattr(flavor, attr, count));
307 else
308 return (KERN_FAILURE);
309 }
310
311 /*
312 * Set the current clock time.
313 */
314 kern_return_t
315 clock_set_time(
316 clock_t clock,
317 mach_timespec_t new_time)
318 {
319 mach_timespec_t *clock_time;
320
321 if (clock == CLOCK_NULL)
322 return (KERN_INVALID_ARGUMENT);
323 if (clock->cl_ops->c_settime == NULL)
324 return (KERN_FAILURE);
325 clock_time = &new_time;
326 if (BAD_MACH_TIMESPEC(clock_time))
327 return (KERN_INVALID_VALUE);
328
329 /*
330 * Flush all outstanding alarms.
331 */
332 flush_alarms(clock);
333
334 /*
335 * Set the new time.
336 */
337 return (clock->cl_ops->c_settime(clock_time));
338 }
339
340 /*
341 * Set the clock alarm resolution.
342 */
343 kern_return_t
344 clock_set_attributes(
345 clock_t clock,
346 clock_flavor_t flavor,
347 clock_attr_t attr,
348 mach_msg_type_number_t count)
349 {
350 if (clock == CLOCK_NULL)
351 return (KERN_INVALID_ARGUMENT);
352 if (clock->cl_ops->c_setattr)
353 return (clock->cl_ops->c_setattr(flavor, attr, count));
354 else
355 return (KERN_FAILURE);
356 }
357
358 /*
359 * Setup a clock alarm.
360 */
361 kern_return_t
362 clock_alarm(
363 clock_t clock,
364 alarm_type_t alarm_type,
365 mach_timespec_t alarm_time,
366 ipc_port_t alarm_port,
367 mach_msg_type_name_t alarm_port_type)
368 {
369 alarm_t alarm;
370 mach_timespec_t clock_time;
371 int chkstat;
372 kern_return_t reply_code;
373 spl_t s;
374
375 if (clock == CLOCK_NULL)
376 return (KERN_INVALID_ARGUMENT);
377 if (clock->cl_ops->c_setalrm == 0)
378 return (KERN_FAILURE);
379 if (IP_VALID(alarm_port) == 0)
380 return (KERN_INVALID_CAPABILITY);
381
382 /*
383 * Check alarm parameters. If parameters are invalid,
384 * send alarm message immediately.
385 */
386 (*clock->cl_ops->c_gettime)(&clock_time);
387 chkstat = check_time(alarm_type, &alarm_time, &clock_time);
388 if (chkstat <= 0) {
389 reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS);
390 clock_alarm_reply(alarm_port, alarm_port_type,
391 reply_code, alarm_type, clock_time);
392 return (KERN_SUCCESS);
393 }
394
395 /*
396 * Get alarm and add to clock alarm list.
397 */
398
399 LOCK_CLOCK(s);
400 if ((alarm = alrmfree) == 0) {
401 UNLOCK_CLOCK(s);
402 alarm = (alarm_t) zalloc(alarm_zone);
403 if (alarm == 0)
404 return (KERN_RESOURCE_SHORTAGE);
405 LOCK_CLOCK(s);
406 }
407 else
408 alrmfree = alarm->al_next;
409
410 alarm->al_status = ALARM_CLOCK;
411 alarm->al_time = alarm_time;
412 alarm->al_type = alarm_type;
413 alarm->al_port = alarm_port;
414 alarm->al_port_type = alarm_port_type;
415 alarm->al_clock = clock;
416 alarm->al_seqno = alrm_seqno++;
417 post_alarm(clock, alarm);
418 UNLOCK_CLOCK(s);
419
420 return (KERN_SUCCESS);
421 }
422
423 /*
424 * Sleep on a clock. System trap. User-level libmach clock_sleep
425 * interface call takes a mach_timespec_t sleep_time argument which it
426 * converts to sleep_sec and sleep_nsec arguments which are then
427 * passed to clock_sleep_trap.
428 */
429 kern_return_t
430 clock_sleep_trap(
431 struct clock_sleep_trap_args *args)
432 {
433 mach_port_name_t clock_name = args->clock_name;
434 sleep_type_t sleep_type = args->sleep_type;
435 int sleep_sec = args->sleep_sec;
436 int sleep_nsec = args->sleep_nsec;
437 mach_vm_address_t wakeup_time_addr = args->wakeup_time;
438 clock_t clock;
439 mach_timespec_t swtime;
440 kern_return_t rvalue;
441
442 /*
443 * Convert the trap parameters.
444 */
445 if (clock_name != MACH_PORT_NULL)
446 clock = port_name_to_clock(clock_name);
447 else
448 clock = &clock_list[SYSTEM_CLOCK];
449
450 swtime.tv_sec = sleep_sec;
451 swtime.tv_nsec = sleep_nsec;
452
453 /*
454 * Call the actual clock_sleep routine.
455 */
456 rvalue = clock_sleep_internal(clock, sleep_type, &swtime);
457
458 /*
459 * Return current time as wakeup time.
460 */
461 if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) {
462 copyout((char *)&swtime, wakeup_time_addr, sizeof(mach_timespec_t));
463 }
464 return (rvalue);
465 }
466
467 /*
468 * Kernel internally callable clock sleep routine. The calling
469 * thread is suspended until the requested sleep time is reached.
470 */
471 kern_return_t
472 clock_sleep_internal(
473 clock_t clock,
474 sleep_type_t sleep_type,
475 mach_timespec_t *sleep_time)
476 {
477 alarm_t alarm;
478 mach_timespec_t clock_time;
479 kern_return_t rvalue;
480 int chkstat;
481 spl_t s;
482
483 if (clock == CLOCK_NULL)
484 return (KERN_INVALID_ARGUMENT);
485 if (clock->cl_ops->c_setalrm == 0)
486 return (KERN_FAILURE);
487
488 /*
489 * Check sleep parameters. If parameters are invalid
490 * return an error, otherwise post alarm request.
491 */
492 (*clock->cl_ops->c_gettime)(&clock_time);
493
494 chkstat = check_time(sleep_type, sleep_time, &clock_time);
495 if (chkstat < 0)
496 return (KERN_INVALID_VALUE);
497 rvalue = KERN_SUCCESS;
498 if (chkstat > 0) {
499 wait_result_t wait_result;
500
501 /*
502 * Get alarm and add to clock alarm list.
503 */
504
505 LOCK_CLOCK(s);
506 if ((alarm = alrmfree) == 0) {
507 UNLOCK_CLOCK(s);
508 alarm = (alarm_t) zalloc(alarm_zone);
509 if (alarm == 0)
510 return (KERN_RESOURCE_SHORTAGE);
511 LOCK_CLOCK(s);
512 }
513 else
514 alrmfree = alarm->al_next;
515
516 /*
517 * Wait for alarm to occur.
518 */
519 wait_result = assert_wait((event_t)alarm, THREAD_ABORTSAFE);
520 if (wait_result == THREAD_WAITING) {
521 alarm->al_time = *sleep_time;
522 alarm->al_status = ALARM_SLEEP;
523 post_alarm(clock, alarm);
524 UNLOCK_CLOCK(s);
525
526 wait_result = thread_block(THREAD_CONTINUE_NULL);
527
528 /*
529 * Note if alarm expired normally or whether it
530 * was aborted. If aborted, delete alarm from
531 * clock alarm list. Return alarm to free list.
532 */
533 LOCK_CLOCK(s);
534 if (alarm->al_status != ALARM_DONE) {
535 assert(wait_result != THREAD_AWAKENED);
536 if (((alarm->al_prev)->al_next = alarm->al_next) != NULL)
537 (alarm->al_next)->al_prev = alarm->al_prev;
538 rvalue = KERN_ABORTED;
539 }
540 *sleep_time = alarm->al_time;
541 alarm->al_status = ALARM_FREE;
542 } else {
543 assert(wait_result == THREAD_INTERRUPTED);
544 assert(alarm->al_status == ALARM_FREE);
545 rvalue = KERN_ABORTED;
546 }
547 alarm->al_next = alrmfree;
548 alrmfree = alarm;
549 UNLOCK_CLOCK(s);
550 }
551 else
552 *sleep_time = clock_time;
553
554 return (rvalue);
555 }
556
557 /*
558 * CLOCK INTERRUPT SERVICE ROUTINES.
559 */
560
561 /*
562 * Service clock alarm interrupts. Called from machine dependent
563 * layer at splclock(). The clock_id argument specifies the clock,
564 * and the clock_time argument gives that clock's current time.
565 */
566 void
567 clock_alarm_intr(
568 clock_id_t clock_id,
569 mach_timespec_t *clock_time)
570 {
571 clock_t clock;
572 register alarm_t alrm1;
573 register alarm_t alrm2;
574 mach_timespec_t *alarm_time;
575 spl_t s;
576
577 clock = &clock_list[clock_id];
578
579 /*
580 * Update clock alarm list. All alarms that are due are moved
581 * to the alarmdone list to be serviced by the alarm_thread.
582 */
583
584 LOCK_CLOCK(s);
585 alrm1 = (alarm_t) &clock->cl_alarm;
586 while ((alrm2 = alrm1->al_next) != NULL) {
587 alarm_time = &alrm2->al_time;
588 if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0)
589 break;
590
591 /*
592 * Alarm has expired, so remove it from the
593 * clock alarm list.
594 */
595 if ((alrm1->al_next = alrm2->al_next) != NULL)
596 (alrm1->al_next)->al_prev = alrm1;
597
598 /*
599 * If a clock_sleep() alarm, wakeup the thread
600 * which issued the clock_sleep() call.
601 */
602 if (alrm2->al_status == ALARM_SLEEP) {
603 alrm2->al_next = 0;
604 alrm2->al_status = ALARM_DONE;
605 alrm2->al_time = *clock_time;
606 thread_wakeup((event_t)alrm2);
607 }
608
609 /*
610 * If a clock_alarm() alarm, place the alarm on
611 * the alarm done list and schedule the alarm
612 * delivery mechanism.
613 */
614 else {
615 assert(alrm2->al_status == ALARM_CLOCK);
616 if ((alrm2->al_next = alrmdone) != NULL)
617 alrmdone->al_prev = alrm2;
618 else
619 thread_call_enter(&alarm_deliver);
620 alrm2->al_prev = (alarm_t) &alrmdone;
621 alrmdone = alrm2;
622 alrm2->al_status = ALARM_DONE;
623 alrm2->al_time = *clock_time;
624 }
625 }
626
627 /*
628 * Setup the clock dependent layer to deliver another
629 * interrupt for the next pending alarm.
630 */
631 if (alrm2)
632 (*clock->cl_ops->c_setalrm)(alarm_time);
633 UNLOCK_CLOCK(s);
634 }
635
636 /*
637 * ALARM DELIVERY ROUTINES.
638 */
639
640 static void
641 clock_alarm_deliver(
642 __unused thread_call_param_t p0,
643 __unused thread_call_param_t p1)
644 {
645 register alarm_t alrm;
646 kern_return_t code;
647 spl_t s;
648
649 LOCK_CLOCK(s);
650 while ((alrm = alrmdone) != NULL) {
651 if ((alrmdone = alrm->al_next) != NULL)
652 alrmdone->al_prev = (alarm_t) &alrmdone;
653 UNLOCK_CLOCK(s);
654
655 code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED);
656 if (alrm->al_port != IP_NULL) {
657 /* Deliver message to designated port */
658 if (IP_VALID(alrm->al_port)) {
659 clock_alarm_reply(alrm->al_port, alrm->al_port_type, code,
660 alrm->al_type, alrm->al_time);
661 }
662
663 LOCK_CLOCK(s);
664 alrm->al_status = ALARM_FREE;
665 alrm->al_next = alrmfree;
666 alrmfree = alrm;
667 }
668 else
669 panic("clock_alarm_deliver");
670 }
671
672 UNLOCK_CLOCK(s);
673 }
674
675 /*
676 * CLOCK PRIVATE SERVICING SUBROUTINES.
677 */
678
679 /*
680 * Flush all pending alarms on a clock. All alarms
681 * are activated and timestamped correctly, so any
682 * programs waiting on alarms/threads will proceed
683 * with accurate information.
684 */
685 static
686 void
687 flush_alarms(
688 clock_t clock)
689 {
690 register alarm_t alrm1, alrm2;
691 spl_t s;
692
693 /*
694 * Flush all outstanding alarms.
695 */
696 LOCK_CLOCK(s);
697 alrm1 = (alarm_t) &clock->cl_alarm;
698 while ((alrm2 = alrm1->al_next) != NULL) {
699 /*
700 * Remove alarm from the clock alarm list.
701 */
702 if ((alrm1->al_next = alrm2->al_next) != NULL)
703 (alrm1->al_next)->al_prev = alrm1;
704
705 /*
706 * If a clock_sleep() alarm, wakeup the thread
707 * which issued the clock_sleep() call.
708 */
709 if (alrm2->al_status == ALARM_SLEEP) {
710 alrm2->al_next = 0;
711 thread_wakeup((event_t)alrm2);
712 }
713 else {
714 /*
715 * If a clock_alarm() alarm, place the alarm on
716 * the alarm done list and wakeup the dedicated
717 * kernel alarm_thread to service the alarm.
718 */
719 assert(alrm2->al_status == ALARM_CLOCK);
720 if ((alrm2->al_next = alrmdone) != NULL)
721 alrmdone->al_prev = alrm2;
722 else
723 thread_wakeup((event_t)&alrmdone);
724 alrm2->al_prev = (alarm_t) &alrmdone;
725 alrmdone = alrm2;
726 }
727 }
728 UNLOCK_CLOCK(s);
729 }
730
731 /*
732 * Post an alarm on a clock's active alarm list. The alarm is
733 * inserted in time-order into the clock's active alarm list.
734 * Always called from within a LOCK_CLOCK() code section.
735 */
736 static
737 void
738 post_alarm(
739 clock_t clock,
740 alarm_t alarm)
741 {
742 register alarm_t alrm1, alrm2;
743 mach_timespec_t *alarm_time;
744 mach_timespec_t *queue_time;
745
746 /*
747 * Traverse alarm list until queue time is greater
748 * than alarm time, then insert alarm.
749 */
750 alarm_time = &alarm->al_time;
751 alrm1 = (alarm_t) &clock->cl_alarm;
752 while ((alrm2 = alrm1->al_next) != NULL) {
753 queue_time = &alrm2->al_time;
754 if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0)
755 break;
756 alrm1 = alrm2;
757 }
758 alrm1->al_next = alarm;
759 alarm->al_next = alrm2;
760 alarm->al_prev = alrm1;
761 if (alrm2)
762 alrm2->al_prev = alarm;
763
764 /*
765 * If the inserted alarm is the 'earliest' alarm,
766 * reset the device layer alarm time accordingly.
767 */
768 if (clock->cl_alarm.al_next == alarm)
769 (*clock->cl_ops->c_setalrm)(alarm_time);
770 }
771
772 /*
773 * Check the validity of 'alarm_time' and 'alarm_type'. If either
774 * argument is invalid, return a negative value. If the 'alarm_time'
775 * is now, return a 0 value. If the 'alarm_time' is in the future,
776 * return a positive value.
777 */
778 static
779 int
780 check_time(
781 alarm_type_t alarm_type,
782 mach_timespec_t *alarm_time,
783 mach_timespec_t *clock_time)
784 {
785 int result;
786
787 if (BAD_ALRMTYPE(alarm_type))
788 return (-1);
789 if (BAD_MACH_TIMESPEC(alarm_time))
790 return (-1);
791 if ((alarm_type & ALRMTYPE) == TIME_RELATIVE)
792 ADD_MACH_TIMESPEC(alarm_time, clock_time);
793
794 result = CMP_MACH_TIMESPEC(alarm_time, clock_time);
795
796 return ((result >= 0)? result: 0);
797 }
798
799 mach_timespec_t
800 clock_get_system_value(void)
801 {
802 clock_t clock = &clock_list[SYSTEM_CLOCK];
803 mach_timespec_t value;
804
805 (void) (*clock->cl_ops->c_gettime)(&value);
806
807 return value;
808 }
809
810 mach_timespec_t
811 clock_get_calendar_value(void)
812 {
813 clock_t clock = &clock_list[CALENDAR_CLOCK];
814 mach_timespec_t value = MACH_TIMESPEC_ZERO;
815
816 (void) (*clock->cl_ops->c_gettime)(&value);
817
818 return value;
819 }
820
821 void
822 clock_deadline_for_periodic_event(
823 uint64_t interval,
824 uint64_t abstime,
825 uint64_t *deadline)
826 {
827 assert(interval != 0);
828
829 *deadline += interval;
830
831 if (*deadline <= abstime) {
832 *deadline = abstime + interval;
833 abstime = mach_absolute_time();
834
835 if (*deadline <= abstime)
836 *deadline = abstime + interval;
837 }
838 }
839
840 void
841 mk_timebase_info_trap(
842 struct mk_timebase_info_trap_args *args)
843 {
844 uint32_t *delta = args->delta;
845 uint32_t *abs_to_ns_numer = args->abs_to_ns_numer;
846 uint32_t *abs_to_ns_denom = args->abs_to_ns_denom;
847 uint32_t *proc_to_abs_numer = args->proc_to_abs_numer;
848 uint32_t *proc_to_abs_denom = args->proc_to_abs_denom;
849 mach_timebase_info_data_t info;
850 uint32_t one = 1;
851
852 clock_timebase_info(&info);
853
854 copyout((void *)&one, CAST_USER_ADDR_T(delta), sizeof (uint32_t));
855
856 copyout((void *)&info.numer, CAST_USER_ADDR_T(abs_to_ns_numer), sizeof (uint32_t));
857 copyout((void *)&info.denom, CAST_USER_ADDR_T(abs_to_ns_denom), sizeof (uint32_t));
858
859 copyout((void *)&one, CAST_USER_ADDR_T(proc_to_abs_numer), sizeof (uint32_t));
860 copyout((void *)&one, CAST_USER_ADDR_T(proc_to_abs_denom), sizeof (uint32_t));
861 }
862
863 kern_return_t
864 mach_timebase_info_trap(
865 struct mach_timebase_info_trap_args *args)
866 {
867 mach_vm_address_t out_info_addr = args->info;
868 mach_timebase_info_data_t info;
869
870 clock_timebase_info(&info);
871
872 copyout((void *)&info, out_info_addr, sizeof (info));
873
874 return (KERN_SUCCESS);
875 }
876
877 static void
878 mach_wait_until_continue(
879 __unused void *parameter,
880 wait_result_t wresult)
881 {
882 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
883 /*NOTREACHED*/
884 }
885
886 kern_return_t
887 mach_wait_until_trap(
888 struct mach_wait_until_trap_args *args)
889 {
890 uint64_t deadline = args->deadline;
891 wait_result_t wresult;
892
893 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
894 if (wresult == THREAD_WAITING)
895 wresult = thread_block(mach_wait_until_continue);
896
897 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
898 }
899
900 /*
901 * Delay primitives.
902 */
903 void
904 clock_delay_until(
905 uint64_t deadline)
906 {
907 uint64_t now = mach_absolute_time();
908
909 if (now >= deadline)
910 return;
911
912 if ( (deadline - now) < (8 * sched_cswtime) ||
913 get_preemption_level() != 0 ||
914 ml_get_interrupts_enabled() == FALSE )
915 machine_delay_until(deadline);
916 else {
917 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
918
919 thread_block(THREAD_CONTINUE_NULL);
920 }
921 }
922
923 void
924 delay_for_interval(
925 uint32_t interval,
926 uint32_t scale_factor)
927 {
928 uint64_t end;
929
930 clock_interval_to_deadline(interval, scale_factor, &end);
931
932 clock_delay_until(end);
933 }
934
935 void
936 delay(
937 int usec)
938 {
939 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
940 }
941
942 void
943 clock_adjtime(
944 int32_t *secs,
945 int32_t *microsecs)
946 {
947 uint32_t interval;
948 spl_t s;
949
950 s = splclock();
951 simple_lock(&calend_adjlock);
952
953 interval = clock_set_calendar_adjtime(secs, microsecs);
954 if (interval != 0) {
955 if (calend_adjdeadline >= interval)
956 calend_adjdeadline -= interval;
957 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
958 &calend_adjdeadline);
959
960 timer_call_enter(&calend_adjcall, calend_adjdeadline);
961 }
962 else
963 timer_call_cancel(&calend_adjcall);
964
965 simple_unlock(&calend_adjlock);
966 splx(s);
967 }
968
969 static void
970 calend_adjust_call(
971 __unused timer_call_param_t p0,
972 __unused timer_call_param_t p1)
973 {
974 uint32_t interval;
975 spl_t s;
976
977 s = splclock();
978 simple_lock(&calend_adjlock);
979
980 interval = clock_adjust_calendar();
981 if (interval != 0) {
982 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
983 &calend_adjdeadline);
984
985 timer_call_enter(&calend_adjcall, calend_adjdeadline);
986 }
987
988 simple_unlock(&calend_adjlock);
989 splx(s);
990 }
991
992 void
993 clock_wakeup_calendar(void)
994 {
995 thread_call_enter(&calend_wakecall);
996 }
997
998 extern void IOKitResetTime(void); /* XXX */
999
1000 static void
1001 calend_dowakeup(
1002 __unused thread_call_param_t p0,
1003 __unused thread_call_param_t p1)
1004 {
1005
1006 IOKitResetTime();
1007 }