]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * File: kern/clock.c
33 * Purpose: Routines for the creation and use of kernel
34 * alarm clock services. This file and the ipc
35 * routines in kern/ipc_clock.c constitute the
36 * machine-independent clock service layer.
37 */
38
39 #include <mach_host.h>
40
41 #include <mach/mach_types.h>
42 #include <mach/boolean.h>
43 #include <mach/processor_info.h>
44 #include <mach/vm_param.h>
45
46 #include <kern/cpu_number.h>
47 #include <kern/misc_protos.h>
48 #include <kern/lock.h>
49 #include <kern/host.h>
50 #include <kern/spl.h>
51 #include <kern/sched_prim.h>
52 #include <kern/thread.h>
53 #include <kern/ipc_host.h>
54 #include <kern/clock.h>
55 #include <kern/zalloc.h>
56
57 #include <ipc/ipc_types.h>
58 #include <ipc/ipc_port.h>
59
60 #include <mach/mach_traps.h>
61 #include <mach/clock_reply.h>
62 #include <mach/mach_time.h>
63
64 #include <mach/clock_server.h>
65 #include <mach/clock_priv_server.h>
66 #include <mach/host_priv_server.h>
67
68 /*
69 * Exported interface
70 */
71
72 #include <mach/clock_server.h>
73 #include <mach/mach_host_server.h>
74
75 /* local data declarations */
76 decl_simple_lock_data(static,ClockLock) /* clock system synchronization */
77 static struct zone *alarm_zone; /* zone for user alarms */
78 static struct alarm *alrmfree; /* alarm free list pointer */
79 static struct alarm *alrmdone; /* alarm done list pointer */
80 static long alrm_seqno; /* uniquely identifies alarms */
81 static thread_call_data_t alarm_deliver;
82
83 decl_simple_lock_data(static,calend_adjlock)
84
85 static timer_call_data_t calend_adjcall;
86 static uint64_t calend_adjdeadline;
87
88 static thread_call_data_t calend_wakecall;
89
90 /* external declarations */
91 extern struct clock clock_list[];
92 extern int clock_count;
93
94 /* local clock subroutines */
95 static
96 void flush_alarms(
97 clock_t clock);
98
99 static
100 void post_alarm(
101 clock_t clock,
102 alarm_t alarm);
103
104 static
105 int check_time(
106 alarm_type_t alarm_type,
107 mach_timespec_t *alarm_time,
108 mach_timespec_t *clock_time);
109
110 static
111 void clock_alarm_deliver(
112 thread_call_param_t p0,
113 thread_call_param_t p1);
114
115 static
116 void calend_adjust_call(
117 timer_call_param_t p0,
118 timer_call_param_t p1);
119
120 static
121 void calend_dowakeup(
122 thread_call_param_t p0,
123 thread_call_param_t p1);
124
125 /*
126 * Macros to lock/unlock clock system.
127 */
128 #define LOCK_CLOCK(s) \
129 s = splclock(); \
130 simple_lock(&ClockLock);
131
132 #define UNLOCK_CLOCK(s) \
133 simple_unlock(&ClockLock); \
134 splx(s);
135
136 /*
137 * clock_config:
138 *
139 * Called once at boot to configure the clock subsystem.
140 */
141 void
142 clock_config(void)
143 {
144 clock_t clock;
145 register int i;
146
147 assert(cpu_number() == master_cpu);
148
149 simple_lock_init(&ClockLock, 0);
150 thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL);
151
152 simple_lock_init(&calend_adjlock, 0);
153 timer_call_setup(&calend_adjcall, calend_adjust_call, NULL);
154
155 thread_call_setup(&calend_wakecall, calend_dowakeup, NULL);
156
157 /*
158 * Configure clock devices.
159 */
160 for (i = 0; i < clock_count; i++) {
161 clock = &clock_list[i];
162 if (clock->cl_ops) {
163 if ((*clock->cl_ops->c_config)() == 0)
164 clock->cl_ops = 0;
165 }
166 }
167
168 /*
169 * Initialize the timer callouts.
170 */
171 timer_call_initialize();
172
173 /* start alarm sequence numbers at 0 */
174 alrm_seqno = 0;
175 }
176
177 /*
178 * clock_init:
179 *
180 * Called on a processor each time started.
181 */
182 void
183 clock_init(void)
184 {
185 clock_t clock;
186 register int i;
187
188 /*
189 * Initialize basic clock structures.
190 */
191 for (i = 0; i < clock_count; i++) {
192 clock = &clock_list[i];
193 if (clock->cl_ops && clock->cl_ops->c_init)
194 (*clock->cl_ops->c_init)();
195 }
196 }
197
198 /*
199 * Called by machine dependent code
200 * to initialize areas dependent on the
201 * timebase value. May be called multiple
202 * times during start up.
203 */
204 void
205 clock_timebase_init(void)
206 {
207 sched_timebase_init();
208 }
209
210 /*
211 * Initialize the clock ipc service facility.
212 */
213 void
214 clock_service_create(void)
215 {
216 clock_t clock;
217 register int i;
218
219 /*
220 * Initialize ipc clock services.
221 */
222 for (i = 0; i < clock_count; i++) {
223 clock = &clock_list[i];
224 if (clock->cl_ops) {
225 ipc_clock_init(clock);
226 ipc_clock_enable(clock);
227 }
228 }
229
230 /*
231 * Perform miscellaneous late
232 * initialization.
233 */
234 i = sizeof(struct alarm);
235 alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms");
236 }
237
238 /*
239 * Get the service port on a clock.
240 */
241 kern_return_t
242 host_get_clock_service(
243 host_t host,
244 clock_id_t clock_id,
245 clock_t *clock) /* OUT */
246 {
247 if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) {
248 *clock = CLOCK_NULL;
249 return (KERN_INVALID_ARGUMENT);
250 }
251
252 *clock = &clock_list[clock_id];
253 if ((*clock)->cl_ops == 0)
254 return (KERN_FAILURE);
255 return (KERN_SUCCESS);
256 }
257
258 /*
259 * Get the control port on a clock.
260 */
261 kern_return_t
262 host_get_clock_control(
263 host_priv_t host_priv,
264 clock_id_t clock_id,
265 clock_t *clock) /* OUT */
266 {
267 if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) {
268 *clock = CLOCK_NULL;
269 return (KERN_INVALID_ARGUMENT);
270 }
271
272 *clock = &clock_list[clock_id];
273 if ((*clock)->cl_ops == 0)
274 return (KERN_FAILURE);
275 return (KERN_SUCCESS);
276 }
277
278 /*
279 * Get the current clock time.
280 */
281 kern_return_t
282 clock_get_time(
283 clock_t clock,
284 mach_timespec_t *cur_time) /* OUT */
285 {
286 if (clock == CLOCK_NULL)
287 return (KERN_INVALID_ARGUMENT);
288 return ((*clock->cl_ops->c_gettime)(cur_time));
289 }
290
291 /*
292 * Get clock attributes.
293 */
294 kern_return_t
295 clock_get_attributes(
296 clock_t clock,
297 clock_flavor_t flavor,
298 clock_attr_t attr, /* OUT */
299 mach_msg_type_number_t *count) /* IN/OUT */
300 {
301 if (clock == CLOCK_NULL)
302 return (KERN_INVALID_ARGUMENT);
303 if (clock->cl_ops->c_getattr)
304 return(clock->cl_ops->c_getattr(flavor, attr, count));
305 else
306 return (KERN_FAILURE);
307 }
308
309 /*
310 * Set the current clock time.
311 */
312 kern_return_t
313 clock_set_time(
314 clock_t clock,
315 mach_timespec_t new_time)
316 {
317 mach_timespec_t *clock_time;
318
319 if (clock == CLOCK_NULL)
320 return (KERN_INVALID_ARGUMENT);
321 if (clock->cl_ops->c_settime == NULL)
322 return (KERN_FAILURE);
323 clock_time = &new_time;
324 if (BAD_MACH_TIMESPEC(clock_time))
325 return (KERN_INVALID_VALUE);
326
327 /*
328 * Flush all outstanding alarms.
329 */
330 flush_alarms(clock);
331
332 /*
333 * Set the new time.
334 */
335 return (clock->cl_ops->c_settime(clock_time));
336 }
337
338 /*
339 * Set the clock alarm resolution.
340 */
341 kern_return_t
342 clock_set_attributes(
343 clock_t clock,
344 clock_flavor_t flavor,
345 clock_attr_t attr,
346 mach_msg_type_number_t count)
347 {
348 if (clock == CLOCK_NULL)
349 return (KERN_INVALID_ARGUMENT);
350 if (clock->cl_ops->c_setattr)
351 return (clock->cl_ops->c_setattr(flavor, attr, count));
352 else
353 return (KERN_FAILURE);
354 }
355
356 /*
357 * Setup a clock alarm.
358 */
359 kern_return_t
360 clock_alarm(
361 clock_t clock,
362 alarm_type_t alarm_type,
363 mach_timespec_t alarm_time,
364 ipc_port_t alarm_port,
365 mach_msg_type_name_t alarm_port_type)
366 {
367 alarm_t alarm;
368 mach_timespec_t clock_time;
369 int chkstat;
370 kern_return_t reply_code;
371 spl_t s;
372
373 if (clock == CLOCK_NULL)
374 return (KERN_INVALID_ARGUMENT);
375 if (clock->cl_ops->c_setalrm == 0)
376 return (KERN_FAILURE);
377 if (IP_VALID(alarm_port) == 0)
378 return (KERN_INVALID_CAPABILITY);
379
380 /*
381 * Check alarm parameters. If parameters are invalid,
382 * send alarm message immediately.
383 */
384 (*clock->cl_ops->c_gettime)(&clock_time);
385 chkstat = check_time(alarm_type, &alarm_time, &clock_time);
386 if (chkstat <= 0) {
387 reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS);
388 clock_alarm_reply(alarm_port, alarm_port_type,
389 reply_code, alarm_type, clock_time);
390 return (KERN_SUCCESS);
391 }
392
393 /*
394 * Get alarm and add to clock alarm list.
395 */
396
397 LOCK_CLOCK(s);
398 if ((alarm = alrmfree) == 0) {
399 UNLOCK_CLOCK(s);
400 alarm = (alarm_t) zalloc(alarm_zone);
401 if (alarm == 0)
402 return (KERN_RESOURCE_SHORTAGE);
403 LOCK_CLOCK(s);
404 }
405 else
406 alrmfree = alarm->al_next;
407
408 alarm->al_status = ALARM_CLOCK;
409 alarm->al_time = alarm_time;
410 alarm->al_type = alarm_type;
411 alarm->al_port = alarm_port;
412 alarm->al_port_type = alarm_port_type;
413 alarm->al_clock = clock;
414 alarm->al_seqno = alrm_seqno++;
415 post_alarm(clock, alarm);
416 UNLOCK_CLOCK(s);
417
418 return (KERN_SUCCESS);
419 }
420
421 /*
422 * Sleep on a clock. System trap. User-level libmach clock_sleep
423 * interface call takes a mach_timespec_t sleep_time argument which it
424 * converts to sleep_sec and sleep_nsec arguments which are then
425 * passed to clock_sleep_trap.
426 */
427 kern_return_t
428 clock_sleep_trap(
429 struct clock_sleep_trap_args *args)
430 {
431 mach_port_name_t clock_name = args->clock_name;
432 sleep_type_t sleep_type = args->sleep_type;
433 int sleep_sec = args->sleep_sec;
434 int sleep_nsec = args->sleep_nsec;
435 mach_vm_address_t wakeup_time_addr = args->wakeup_time;
436 clock_t clock;
437 mach_timespec_t swtime;
438 kern_return_t rvalue;
439
440 /*
441 * Convert the trap parameters.
442 */
443 if (clock_name != MACH_PORT_NULL)
444 clock = port_name_to_clock(clock_name);
445 else
446 clock = &clock_list[SYSTEM_CLOCK];
447
448 swtime.tv_sec = sleep_sec;
449 swtime.tv_nsec = sleep_nsec;
450
451 /*
452 * Call the actual clock_sleep routine.
453 */
454 rvalue = clock_sleep_internal(clock, sleep_type, &swtime);
455
456 /*
457 * Return current time as wakeup time.
458 */
459 if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) {
460 copyout((char *)&swtime, wakeup_time_addr, sizeof(mach_timespec_t));
461 }
462 return (rvalue);
463 }
464
465 /*
466 * Kernel internally callable clock sleep routine. The calling
467 * thread is suspended until the requested sleep time is reached.
468 */
469 kern_return_t
470 clock_sleep_internal(
471 clock_t clock,
472 sleep_type_t sleep_type,
473 mach_timespec_t *sleep_time)
474 {
475 alarm_t alarm;
476 mach_timespec_t clock_time;
477 kern_return_t rvalue;
478 int chkstat;
479 spl_t s;
480
481 if (clock == CLOCK_NULL)
482 return (KERN_INVALID_ARGUMENT);
483 if (clock->cl_ops->c_setalrm == 0)
484 return (KERN_FAILURE);
485
486 /*
487 * Check sleep parameters. If parameters are invalid
488 * return an error, otherwise post alarm request.
489 */
490 (*clock->cl_ops->c_gettime)(&clock_time);
491
492 chkstat = check_time(sleep_type, sleep_time, &clock_time);
493 if (chkstat < 0)
494 return (KERN_INVALID_VALUE);
495 rvalue = KERN_SUCCESS;
496 if (chkstat > 0) {
497 wait_result_t wait_result;
498
499 /*
500 * Get alarm and add to clock alarm list.
501 */
502
503 LOCK_CLOCK(s);
504 if ((alarm = alrmfree) == 0) {
505 UNLOCK_CLOCK(s);
506 alarm = (alarm_t) zalloc(alarm_zone);
507 if (alarm == 0)
508 return (KERN_RESOURCE_SHORTAGE);
509 LOCK_CLOCK(s);
510 }
511 else
512 alrmfree = alarm->al_next;
513
514 /*
515 * Wait for alarm to occur.
516 */
517 wait_result = assert_wait((event_t)alarm, THREAD_ABORTSAFE);
518 if (wait_result == THREAD_WAITING) {
519 alarm->al_time = *sleep_time;
520 alarm->al_status = ALARM_SLEEP;
521 post_alarm(clock, alarm);
522 UNLOCK_CLOCK(s);
523
524 wait_result = thread_block(THREAD_CONTINUE_NULL);
525
526 /*
527 * Note if alarm expired normally or whether it
528 * was aborted. If aborted, delete alarm from
529 * clock alarm list. Return alarm to free list.
530 */
531 LOCK_CLOCK(s);
532 if (alarm->al_status != ALARM_DONE) {
533 assert(wait_result != THREAD_AWAKENED);
534 if (((alarm->al_prev)->al_next = alarm->al_next) != NULL)
535 (alarm->al_next)->al_prev = alarm->al_prev;
536 rvalue = KERN_ABORTED;
537 }
538 *sleep_time = alarm->al_time;
539 alarm->al_status = ALARM_FREE;
540 } else {
541 assert(wait_result == THREAD_INTERRUPTED);
542 assert(alarm->al_status == ALARM_FREE);
543 rvalue = KERN_ABORTED;
544 }
545 alarm->al_next = alrmfree;
546 alrmfree = alarm;
547 UNLOCK_CLOCK(s);
548 }
549 else
550 *sleep_time = clock_time;
551
552 return (rvalue);
553 }
554
555 /*
556 * CLOCK INTERRUPT SERVICE ROUTINES.
557 */
558
559 /*
560 * Service clock alarm interrupts. Called from machine dependent
561 * layer at splclock(). The clock_id argument specifies the clock,
562 * and the clock_time argument gives that clock's current time.
563 */
564 void
565 clock_alarm_intr(
566 clock_id_t clock_id,
567 mach_timespec_t *clock_time)
568 {
569 clock_t clock;
570 register alarm_t alrm1;
571 register alarm_t alrm2;
572 mach_timespec_t *alarm_time;
573 spl_t s;
574
575 clock = &clock_list[clock_id];
576
577 /*
578 * Update clock alarm list. All alarms that are due are moved
579 * to the alarmdone list to be serviced by the alarm_thread.
580 */
581
582 LOCK_CLOCK(s);
583 alrm1 = (alarm_t) &clock->cl_alarm;
584 while ((alrm2 = alrm1->al_next) != NULL) {
585 alarm_time = &alrm2->al_time;
586 if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0)
587 break;
588
589 /*
590 * Alarm has expired, so remove it from the
591 * clock alarm list.
592 */
593 if ((alrm1->al_next = alrm2->al_next) != NULL)
594 (alrm1->al_next)->al_prev = alrm1;
595
596 /*
597 * If a clock_sleep() alarm, wakeup the thread
598 * which issued the clock_sleep() call.
599 */
600 if (alrm2->al_status == ALARM_SLEEP) {
601 alrm2->al_next = 0;
602 alrm2->al_status = ALARM_DONE;
603 alrm2->al_time = *clock_time;
604 thread_wakeup((event_t)alrm2);
605 }
606
607 /*
608 * If a clock_alarm() alarm, place the alarm on
609 * the alarm done list and schedule the alarm
610 * delivery mechanism.
611 */
612 else {
613 assert(alrm2->al_status == ALARM_CLOCK);
614 if ((alrm2->al_next = alrmdone) != NULL)
615 alrmdone->al_prev = alrm2;
616 else
617 thread_call_enter(&alarm_deliver);
618 alrm2->al_prev = (alarm_t) &alrmdone;
619 alrmdone = alrm2;
620 alrm2->al_status = ALARM_DONE;
621 alrm2->al_time = *clock_time;
622 }
623 }
624
625 /*
626 * Setup the clock dependent layer to deliver another
627 * interrupt for the next pending alarm.
628 */
629 if (alrm2)
630 (*clock->cl_ops->c_setalrm)(alarm_time);
631 UNLOCK_CLOCK(s);
632 }
633
634 /*
635 * ALARM DELIVERY ROUTINES.
636 */
637
638 static void
639 clock_alarm_deliver(
640 __unused thread_call_param_t p0,
641 __unused thread_call_param_t p1)
642 {
643 register alarm_t alrm;
644 kern_return_t code;
645 spl_t s;
646
647 LOCK_CLOCK(s);
648 while ((alrm = alrmdone) != NULL) {
649 if ((alrmdone = alrm->al_next) != NULL)
650 alrmdone->al_prev = (alarm_t) &alrmdone;
651 UNLOCK_CLOCK(s);
652
653 code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED);
654 if (alrm->al_port != IP_NULL) {
655 /* Deliver message to designated port */
656 if (IP_VALID(alrm->al_port)) {
657 clock_alarm_reply(alrm->al_port, alrm->al_port_type, code,
658 alrm->al_type, alrm->al_time);
659 }
660
661 LOCK_CLOCK(s);
662 alrm->al_status = ALARM_FREE;
663 alrm->al_next = alrmfree;
664 alrmfree = alrm;
665 }
666 else
667 panic("clock_alarm_deliver");
668 }
669
670 UNLOCK_CLOCK(s);
671 }
672
673 /*
674 * CLOCK PRIVATE SERVICING SUBROUTINES.
675 */
676
677 /*
678 * Flush all pending alarms on a clock. All alarms
679 * are activated and timestamped correctly, so any
680 * programs waiting on alarms/threads will proceed
681 * with accurate information.
682 */
683 static
684 void
685 flush_alarms(
686 clock_t clock)
687 {
688 register alarm_t alrm1, alrm2;
689 spl_t s;
690
691 /*
692 * Flush all outstanding alarms.
693 */
694 LOCK_CLOCK(s);
695 alrm1 = (alarm_t) &clock->cl_alarm;
696 while ((alrm2 = alrm1->al_next) != NULL) {
697 /*
698 * Remove alarm from the clock alarm list.
699 */
700 if ((alrm1->al_next = alrm2->al_next) != NULL)
701 (alrm1->al_next)->al_prev = alrm1;
702
703 /*
704 * If a clock_sleep() alarm, wakeup the thread
705 * which issued the clock_sleep() call.
706 */
707 if (alrm2->al_status == ALARM_SLEEP) {
708 alrm2->al_next = 0;
709 thread_wakeup((event_t)alrm2);
710 }
711 else {
712 /*
713 * If a clock_alarm() alarm, place the alarm on
714 * the alarm done list and wakeup the dedicated
715 * kernel alarm_thread to service the alarm.
716 */
717 assert(alrm2->al_status == ALARM_CLOCK);
718 if ((alrm2->al_next = alrmdone) != NULL)
719 alrmdone->al_prev = alrm2;
720 else
721 thread_wakeup((event_t)&alrmdone);
722 alrm2->al_prev = (alarm_t) &alrmdone;
723 alrmdone = alrm2;
724 }
725 }
726 UNLOCK_CLOCK(s);
727 }
728
729 /*
730 * Post an alarm on a clock's active alarm list. The alarm is
731 * inserted in time-order into the clock's active alarm list.
732 * Always called from within a LOCK_CLOCK() code section.
733 */
734 static
735 void
736 post_alarm(
737 clock_t clock,
738 alarm_t alarm)
739 {
740 register alarm_t alrm1, alrm2;
741 mach_timespec_t *alarm_time;
742 mach_timespec_t *queue_time;
743
744 /*
745 * Traverse alarm list until queue time is greater
746 * than alarm time, then insert alarm.
747 */
748 alarm_time = &alarm->al_time;
749 alrm1 = (alarm_t) &clock->cl_alarm;
750 while ((alrm2 = alrm1->al_next) != NULL) {
751 queue_time = &alrm2->al_time;
752 if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0)
753 break;
754 alrm1 = alrm2;
755 }
756 alrm1->al_next = alarm;
757 alarm->al_next = alrm2;
758 alarm->al_prev = alrm1;
759 if (alrm2)
760 alrm2->al_prev = alarm;
761
762 /*
763 * If the inserted alarm is the 'earliest' alarm,
764 * reset the device layer alarm time accordingly.
765 */
766 if (clock->cl_alarm.al_next == alarm)
767 (*clock->cl_ops->c_setalrm)(alarm_time);
768 }
769
770 /*
771 * Check the validity of 'alarm_time' and 'alarm_type'. If either
772 * argument is invalid, return a negative value. If the 'alarm_time'
773 * is now, return a 0 value. If the 'alarm_time' is in the future,
774 * return a positive value.
775 */
776 static
777 int
778 check_time(
779 alarm_type_t alarm_type,
780 mach_timespec_t *alarm_time,
781 mach_timespec_t *clock_time)
782 {
783 int result;
784
785 if (BAD_ALRMTYPE(alarm_type))
786 return (-1);
787 if (BAD_MACH_TIMESPEC(alarm_time))
788 return (-1);
789 if ((alarm_type & ALRMTYPE) == TIME_RELATIVE)
790 ADD_MACH_TIMESPEC(alarm_time, clock_time);
791
792 result = CMP_MACH_TIMESPEC(alarm_time, clock_time);
793
794 return ((result >= 0)? result: 0);
795 }
796
797 mach_timespec_t
798 clock_get_system_value(void)
799 {
800 clock_t clock = &clock_list[SYSTEM_CLOCK];
801 mach_timespec_t value;
802
803 (void) (*clock->cl_ops->c_gettime)(&value);
804
805 return value;
806 }
807
808 mach_timespec_t
809 clock_get_calendar_value(void)
810 {
811 clock_t clock = &clock_list[CALENDAR_CLOCK];
812 mach_timespec_t value = MACH_TIMESPEC_ZERO;
813
814 (void) (*clock->cl_ops->c_gettime)(&value);
815
816 return value;
817 }
818
819 void
820 clock_deadline_for_periodic_event(
821 uint64_t interval,
822 uint64_t abstime,
823 uint64_t *deadline)
824 {
825 assert(interval != 0);
826
827 *deadline += interval;
828
829 if (*deadline <= abstime) {
830 *deadline = abstime + interval;
831 abstime = mach_absolute_time();
832
833 if (*deadline <= abstime)
834 *deadline = abstime + interval;
835 }
836 }
837
838 void
839 mk_timebase_info_trap(
840 struct mk_timebase_info_trap_args *args)
841 {
842 uint32_t *delta = args->delta;
843 uint32_t *abs_to_ns_numer = args->abs_to_ns_numer;
844 uint32_t *abs_to_ns_denom = args->abs_to_ns_denom;
845 uint32_t *proc_to_abs_numer = args->proc_to_abs_numer;
846 uint32_t *proc_to_abs_denom = args->proc_to_abs_denom;
847 mach_timebase_info_data_t info;
848 uint32_t one = 1;
849
850 clock_timebase_info(&info);
851
852 copyout((void *)&one, CAST_USER_ADDR_T(delta), sizeof (uint32_t));
853
854 copyout((void *)&info.numer, CAST_USER_ADDR_T(abs_to_ns_numer), sizeof (uint32_t));
855 copyout((void *)&info.denom, CAST_USER_ADDR_T(abs_to_ns_denom), sizeof (uint32_t));
856
857 copyout((void *)&one, CAST_USER_ADDR_T(proc_to_abs_numer), sizeof (uint32_t));
858 copyout((void *)&one, CAST_USER_ADDR_T(proc_to_abs_denom), sizeof (uint32_t));
859 }
860
861 kern_return_t
862 mach_timebase_info_trap(
863 struct mach_timebase_info_trap_args *args)
864 {
865 mach_vm_address_t out_info_addr = args->info;
866 mach_timebase_info_data_t info;
867
868 clock_timebase_info(&info);
869
870 copyout((void *)&info, out_info_addr, sizeof (info));
871
872 return (KERN_SUCCESS);
873 }
874
875 static void
876 mach_wait_until_continue(
877 __unused void *parameter,
878 wait_result_t wresult)
879 {
880 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
881 /*NOTREACHED*/
882 }
883
884 kern_return_t
885 mach_wait_until_trap(
886 struct mach_wait_until_trap_args *args)
887 {
888 uint64_t deadline = args->deadline;
889 wait_result_t wresult;
890
891 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
892 if (wresult == THREAD_WAITING)
893 wresult = thread_block(mach_wait_until_continue);
894
895 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
896 }
897
898 /*
899 * Delay primitives.
900 */
901 void
902 clock_delay_until(
903 uint64_t deadline)
904 {
905 uint64_t now = mach_absolute_time();
906
907 if (now >= deadline)
908 return;
909
910 if ( (deadline - now) < (8 * sched_cswtime) ||
911 get_preemption_level() != 0 ||
912 ml_get_interrupts_enabled() == FALSE )
913 machine_delay_until(deadline);
914 else {
915 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
916
917 thread_block(THREAD_CONTINUE_NULL);
918 }
919 }
920
921 void
922 delay_for_interval(
923 uint32_t interval,
924 uint32_t scale_factor)
925 {
926 uint64_t end;
927
928 clock_interval_to_deadline(interval, scale_factor, &end);
929
930 clock_delay_until(end);
931 }
932
933 void
934 delay(
935 int usec)
936 {
937 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
938 }
939
940 void
941 clock_adjtime(
942 int32_t *secs,
943 int32_t *microsecs)
944 {
945 uint32_t interval;
946 spl_t s;
947
948 s = splclock();
949 simple_lock(&calend_adjlock);
950
951 interval = clock_set_calendar_adjtime(secs, microsecs);
952 if (interval != 0) {
953 if (calend_adjdeadline >= interval)
954 calend_adjdeadline -= interval;
955 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
956 &calend_adjdeadline);
957
958 timer_call_enter(&calend_adjcall, calend_adjdeadline);
959 }
960 else
961 timer_call_cancel(&calend_adjcall);
962
963 simple_unlock(&calend_adjlock);
964 splx(s);
965 }
966
967 static void
968 calend_adjust_call(
969 __unused timer_call_param_t p0,
970 __unused timer_call_param_t p1)
971 {
972 uint32_t interval;
973 spl_t s;
974
975 s = splclock();
976 simple_lock(&calend_adjlock);
977
978 interval = clock_adjust_calendar();
979 if (interval != 0) {
980 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
981 &calend_adjdeadline);
982
983 timer_call_enter(&calend_adjcall, calend_adjdeadline);
984 }
985
986 simple_unlock(&calend_adjlock);
987 splx(s);
988 }
989
990 void
991 clock_wakeup_calendar(void)
992 {
993 thread_call_enter(&calend_wakecall);
994 }
995
996 extern void IOKitResetTime(void); /* XXX */
997
998 static void
999 calend_dowakeup(
1000 __unused thread_call_param_t p0,
1001 __unused thread_call_param_t p1)
1002 {
1003
1004 IOKitResetTime();
1005 }