]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
dcd09bdd05e58ddc6f994753cab8eaa6f040a077
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * File: kern/clock.c
28 * Purpose: Routines for the creation and use of kernel
29 * alarm clock services. This file and the ipc
30 * routines in kern/ipc_clock.c constitute the
31 * machine-independent clock service layer.
32 */
33
34 #include <mach_host.h>
35
36 #include <mach/mach_types.h>
37 #include <mach/boolean.h>
38 #include <mach/processor_info.h>
39 #include <mach/vm_param.h>
40
41 #include <kern/cpu_number.h>
42 #include <kern/misc_protos.h>
43 #include <kern/lock.h>
44 #include <kern/host.h>
45 #include <kern/spl.h>
46 #include <kern/sched_prim.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_host.h>
49 #include <kern/clock.h>
50 #include <kern/zalloc.h>
51
52 #include <ipc/ipc_types.h>
53 #include <ipc/ipc_port.h>
54
55 #include <mach/mach_traps.h>
56 #include <mach/clock_reply.h>
57 #include <mach/mach_time.h>
58
59 #include <mach/clock_server.h>
60 #include <mach/clock_priv_server.h>
61 #include <mach/host_priv_server.h>
62
63 /*
64 * Exported interface
65 */
66
67 #include <mach/clock_server.h>
68 #include <mach/mach_host_server.h>
69
70 /* local data declarations */
71 decl_simple_lock_data(static,ClockLock) /* clock system synchronization */
72 static struct zone *alarm_zone; /* zone for user alarms */
73 static struct alarm *alrmfree; /* alarm free list pointer */
74 static struct alarm *alrmdone; /* alarm done list pointer */
75 static long alrm_seqno; /* uniquely identifies alarms */
76 static thread_call_data_t alarm_deliver;
77
78 decl_simple_lock_data(static,calend_adjlock)
79
80 static timer_call_data_t calend_adjcall;
81 static uint64_t calend_adjdeadline;
82
83 static thread_call_data_t calend_wakecall;
84
85 /* external declarations */
86 extern struct clock clock_list[];
87 extern int clock_count;
88
89 /* local clock subroutines */
90 static
91 void flush_alarms(
92 clock_t clock);
93
94 static
95 void post_alarm(
96 clock_t clock,
97 alarm_t alarm);
98
99 static
100 int check_time(
101 alarm_type_t alarm_type,
102 mach_timespec_t *alarm_time,
103 mach_timespec_t *clock_time);
104
105 static
106 void clock_alarm_deliver(
107 thread_call_param_t p0,
108 thread_call_param_t p1);
109
110 static
111 void calend_adjust_call(
112 timer_call_param_t p0,
113 timer_call_param_t p1);
114
115 static
116 void calend_dowakeup(
117 thread_call_param_t p0,
118 thread_call_param_t p1);
119
120 /*
121 * Macros to lock/unlock clock system.
122 */
123 #define LOCK_CLOCK(s) \
124 s = splclock(); \
125 simple_lock(&ClockLock);
126
127 #define UNLOCK_CLOCK(s) \
128 simple_unlock(&ClockLock); \
129 splx(s);
130
131 /*
132 * clock_config:
133 *
134 * Called once at boot to configure the clock subsystem.
135 */
136 void
137 clock_config(void)
138 {
139 clock_t clock;
140 register int i;
141
142 assert(cpu_number() == master_cpu);
143
144 simple_lock_init(&ClockLock, 0);
145 thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL);
146
147 simple_lock_init(&calend_adjlock, 0);
148 timer_call_setup(&calend_adjcall, calend_adjust_call, NULL);
149
150 thread_call_setup(&calend_wakecall, calend_dowakeup, NULL);
151
152 /*
153 * Configure clock devices.
154 */
155 for (i = 0; i < clock_count; i++) {
156 clock = &clock_list[i];
157 if (clock->cl_ops) {
158 if ((*clock->cl_ops->c_config)() == 0)
159 clock->cl_ops = 0;
160 }
161 }
162
163 /*
164 * Initialize the timer callouts.
165 */
166 timer_call_initialize();
167
168 /* start alarm sequence numbers at 0 */
169 alrm_seqno = 0;
170 }
171
172 /*
173 * clock_init:
174 *
175 * Called on a processor each time started.
176 */
177 void
178 clock_init(void)
179 {
180 clock_t clock;
181 register int i;
182
183 /*
184 * Initialize basic clock structures.
185 */
186 for (i = 0; i < clock_count; i++) {
187 clock = &clock_list[i];
188 if (clock->cl_ops && clock->cl_ops->c_init)
189 (*clock->cl_ops->c_init)();
190 }
191 }
192
193 /*
194 * Called by machine dependent code
195 * to initialize areas dependent on the
196 * timebase value. May be called multiple
197 * times during start up.
198 */
199 void
200 clock_timebase_init(void)
201 {
202 sched_timebase_init();
203 }
204
205 /*
206 * Initialize the clock ipc service facility.
207 */
208 void
209 clock_service_create(void)
210 {
211 clock_t clock;
212 register int i;
213
214 /*
215 * Initialize ipc clock services.
216 */
217 for (i = 0; i < clock_count; i++) {
218 clock = &clock_list[i];
219 if (clock->cl_ops) {
220 ipc_clock_init(clock);
221 ipc_clock_enable(clock);
222 }
223 }
224
225 /*
226 * Perform miscellaneous late
227 * initialization.
228 */
229 i = sizeof(struct alarm);
230 alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms");
231 }
232
233 /*
234 * Get the service port on a clock.
235 */
236 kern_return_t
237 host_get_clock_service(
238 host_t host,
239 clock_id_t clock_id,
240 clock_t *clock) /* OUT */
241 {
242 if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) {
243 *clock = CLOCK_NULL;
244 return (KERN_INVALID_ARGUMENT);
245 }
246
247 *clock = &clock_list[clock_id];
248 if ((*clock)->cl_ops == 0)
249 return (KERN_FAILURE);
250 return (KERN_SUCCESS);
251 }
252
253 /*
254 * Get the control port on a clock.
255 */
256 kern_return_t
257 host_get_clock_control(
258 host_priv_t host_priv,
259 clock_id_t clock_id,
260 clock_t *clock) /* OUT */
261 {
262 if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) {
263 *clock = CLOCK_NULL;
264 return (KERN_INVALID_ARGUMENT);
265 }
266
267 *clock = &clock_list[clock_id];
268 if ((*clock)->cl_ops == 0)
269 return (KERN_FAILURE);
270 return (KERN_SUCCESS);
271 }
272
273 /*
274 * Get the current clock time.
275 */
276 kern_return_t
277 clock_get_time(
278 clock_t clock,
279 mach_timespec_t *cur_time) /* OUT */
280 {
281 if (clock == CLOCK_NULL)
282 return (KERN_INVALID_ARGUMENT);
283 return ((*clock->cl_ops->c_gettime)(cur_time));
284 }
285
286 /*
287 * Get clock attributes.
288 */
289 kern_return_t
290 clock_get_attributes(
291 clock_t clock,
292 clock_flavor_t flavor,
293 clock_attr_t attr, /* OUT */
294 mach_msg_type_number_t *count) /* IN/OUT */
295 {
296 if (clock == CLOCK_NULL)
297 return (KERN_INVALID_ARGUMENT);
298 if (clock->cl_ops->c_getattr)
299 return(clock->cl_ops->c_getattr(flavor, attr, count));
300 else
301 return (KERN_FAILURE);
302 }
303
304 /*
305 * Set the current clock time.
306 */
307 kern_return_t
308 clock_set_time(
309 clock_t clock,
310 mach_timespec_t new_time)
311 {
312 mach_timespec_t *clock_time;
313
314 if (clock == CLOCK_NULL)
315 return (KERN_INVALID_ARGUMENT);
316 if (clock->cl_ops->c_settime == NULL)
317 return (KERN_FAILURE);
318 clock_time = &new_time;
319 if (BAD_MACH_TIMESPEC(clock_time))
320 return (KERN_INVALID_VALUE);
321
322 /*
323 * Flush all outstanding alarms.
324 */
325 flush_alarms(clock);
326
327 /*
328 * Set the new time.
329 */
330 return (clock->cl_ops->c_settime(clock_time));
331 }
332
333 /*
334 * Set the clock alarm resolution.
335 */
336 kern_return_t
337 clock_set_attributes(
338 clock_t clock,
339 clock_flavor_t flavor,
340 clock_attr_t attr,
341 mach_msg_type_number_t count)
342 {
343 if (clock == CLOCK_NULL)
344 return (KERN_INVALID_ARGUMENT);
345 if (clock->cl_ops->c_setattr)
346 return (clock->cl_ops->c_setattr(flavor, attr, count));
347 else
348 return (KERN_FAILURE);
349 }
350
351 /*
352 * Setup a clock alarm.
353 */
354 kern_return_t
355 clock_alarm(
356 clock_t clock,
357 alarm_type_t alarm_type,
358 mach_timespec_t alarm_time,
359 ipc_port_t alarm_port,
360 mach_msg_type_name_t alarm_port_type)
361 {
362 alarm_t alarm;
363 mach_timespec_t clock_time;
364 int chkstat;
365 kern_return_t reply_code;
366 spl_t s;
367
368 if (clock == CLOCK_NULL)
369 return (KERN_INVALID_ARGUMENT);
370 if (clock->cl_ops->c_setalrm == 0)
371 return (KERN_FAILURE);
372 if (IP_VALID(alarm_port) == 0)
373 return (KERN_INVALID_CAPABILITY);
374
375 /*
376 * Check alarm parameters. If parameters are invalid,
377 * send alarm message immediately.
378 */
379 (*clock->cl_ops->c_gettime)(&clock_time);
380 chkstat = check_time(alarm_type, &alarm_time, &clock_time);
381 if (chkstat <= 0) {
382 reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS);
383 clock_alarm_reply(alarm_port, alarm_port_type,
384 reply_code, alarm_type, clock_time);
385 return (KERN_SUCCESS);
386 }
387
388 /*
389 * Get alarm and add to clock alarm list.
390 */
391
392 LOCK_CLOCK(s);
393 if ((alarm = alrmfree) == 0) {
394 UNLOCK_CLOCK(s);
395 alarm = (alarm_t) zalloc(alarm_zone);
396 if (alarm == 0)
397 return (KERN_RESOURCE_SHORTAGE);
398 LOCK_CLOCK(s);
399 }
400 else
401 alrmfree = alarm->al_next;
402
403 alarm->al_status = ALARM_CLOCK;
404 alarm->al_time = alarm_time;
405 alarm->al_type = alarm_type;
406 alarm->al_port = alarm_port;
407 alarm->al_port_type = alarm_port_type;
408 alarm->al_clock = clock;
409 alarm->al_seqno = alrm_seqno++;
410 post_alarm(clock, alarm);
411 UNLOCK_CLOCK(s);
412
413 return (KERN_SUCCESS);
414 }
415
416 /*
417 * Sleep on a clock. System trap. User-level libmach clock_sleep
418 * interface call takes a mach_timespec_t sleep_time argument which it
419 * converts to sleep_sec and sleep_nsec arguments which are then
420 * passed to clock_sleep_trap.
421 */
422 kern_return_t
423 clock_sleep_trap(
424 struct clock_sleep_trap_args *args)
425 {
426 mach_port_name_t clock_name = args->clock_name;
427 sleep_type_t sleep_type = args->sleep_type;
428 int sleep_sec = args->sleep_sec;
429 int sleep_nsec = args->sleep_nsec;
430 mach_vm_address_t wakeup_time_addr = args->wakeup_time;
431 clock_t clock;
432 mach_timespec_t swtime;
433 kern_return_t rvalue;
434
435 /*
436 * Convert the trap parameters.
437 */
438 if (clock_name != MACH_PORT_NULL)
439 clock = port_name_to_clock(clock_name);
440 else
441 clock = &clock_list[SYSTEM_CLOCK];
442
443 swtime.tv_sec = sleep_sec;
444 swtime.tv_nsec = sleep_nsec;
445
446 /*
447 * Call the actual clock_sleep routine.
448 */
449 rvalue = clock_sleep_internal(clock, sleep_type, &swtime);
450
451 /*
452 * Return current time as wakeup time.
453 */
454 if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) {
455 copyout((char *)&swtime, wakeup_time_addr, sizeof(mach_timespec_t));
456 }
457 return (rvalue);
458 }
459
460 /*
461 * Kernel internally callable clock sleep routine. The calling
462 * thread is suspended until the requested sleep time is reached.
463 */
464 kern_return_t
465 clock_sleep_internal(
466 clock_t clock,
467 sleep_type_t sleep_type,
468 mach_timespec_t *sleep_time)
469 {
470 alarm_t alarm;
471 mach_timespec_t clock_time;
472 kern_return_t rvalue;
473 int chkstat;
474 spl_t s;
475
476 if (clock == CLOCK_NULL)
477 return (KERN_INVALID_ARGUMENT);
478 if (clock->cl_ops->c_setalrm == 0)
479 return (KERN_FAILURE);
480
481 /*
482 * Check sleep parameters. If parameters are invalid
483 * return an error, otherwise post alarm request.
484 */
485 (*clock->cl_ops->c_gettime)(&clock_time);
486
487 chkstat = check_time(sleep_type, sleep_time, &clock_time);
488 if (chkstat < 0)
489 return (KERN_INVALID_VALUE);
490 rvalue = KERN_SUCCESS;
491 if (chkstat > 0) {
492 wait_result_t wait_result;
493
494 /*
495 * Get alarm and add to clock alarm list.
496 */
497
498 LOCK_CLOCK(s);
499 if ((alarm = alrmfree) == 0) {
500 UNLOCK_CLOCK(s);
501 alarm = (alarm_t) zalloc(alarm_zone);
502 if (alarm == 0)
503 return (KERN_RESOURCE_SHORTAGE);
504 LOCK_CLOCK(s);
505 }
506 else
507 alrmfree = alarm->al_next;
508
509 /*
510 * Wait for alarm to occur.
511 */
512 wait_result = assert_wait((event_t)alarm, THREAD_ABORTSAFE);
513 if (wait_result == THREAD_WAITING) {
514 alarm->al_time = *sleep_time;
515 alarm->al_status = ALARM_SLEEP;
516 post_alarm(clock, alarm);
517 UNLOCK_CLOCK(s);
518
519 wait_result = thread_block(THREAD_CONTINUE_NULL);
520
521 /*
522 * Note if alarm expired normally or whether it
523 * was aborted. If aborted, delete alarm from
524 * clock alarm list. Return alarm to free list.
525 */
526 LOCK_CLOCK(s);
527 if (alarm->al_status != ALARM_DONE) {
528 assert(wait_result != THREAD_AWAKENED);
529 if (((alarm->al_prev)->al_next = alarm->al_next) != NULL)
530 (alarm->al_next)->al_prev = alarm->al_prev;
531 rvalue = KERN_ABORTED;
532 }
533 *sleep_time = alarm->al_time;
534 alarm->al_status = ALARM_FREE;
535 } else {
536 assert(wait_result == THREAD_INTERRUPTED);
537 assert(alarm->al_status == ALARM_FREE);
538 rvalue = KERN_ABORTED;
539 }
540 alarm->al_next = alrmfree;
541 alrmfree = alarm;
542 UNLOCK_CLOCK(s);
543 }
544 else
545 *sleep_time = clock_time;
546
547 return (rvalue);
548 }
549
550 /*
551 * CLOCK INTERRUPT SERVICE ROUTINES.
552 */
553
554 /*
555 * Service clock alarm interrupts. Called from machine dependent
556 * layer at splclock(). The clock_id argument specifies the clock,
557 * and the clock_time argument gives that clock's current time.
558 */
559 void
560 clock_alarm_intr(
561 clock_id_t clock_id,
562 mach_timespec_t *clock_time)
563 {
564 clock_t clock;
565 register alarm_t alrm1;
566 register alarm_t alrm2;
567 mach_timespec_t *alarm_time;
568 spl_t s;
569
570 clock = &clock_list[clock_id];
571
572 /*
573 * Update clock alarm list. All alarms that are due are moved
574 * to the alarmdone list to be serviced by the alarm_thread.
575 */
576
577 LOCK_CLOCK(s);
578 alrm1 = (alarm_t) &clock->cl_alarm;
579 while ((alrm2 = alrm1->al_next) != NULL) {
580 alarm_time = &alrm2->al_time;
581 if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0)
582 break;
583
584 /*
585 * Alarm has expired, so remove it from the
586 * clock alarm list.
587 */
588 if ((alrm1->al_next = alrm2->al_next) != NULL)
589 (alrm1->al_next)->al_prev = alrm1;
590
591 /*
592 * If a clock_sleep() alarm, wakeup the thread
593 * which issued the clock_sleep() call.
594 */
595 if (alrm2->al_status == ALARM_SLEEP) {
596 alrm2->al_next = 0;
597 alrm2->al_status = ALARM_DONE;
598 alrm2->al_time = *clock_time;
599 thread_wakeup((event_t)alrm2);
600 }
601
602 /*
603 * If a clock_alarm() alarm, place the alarm on
604 * the alarm done list and schedule the alarm
605 * delivery mechanism.
606 */
607 else {
608 assert(alrm2->al_status == ALARM_CLOCK);
609 if ((alrm2->al_next = alrmdone) != NULL)
610 alrmdone->al_prev = alrm2;
611 else
612 thread_call_enter(&alarm_deliver);
613 alrm2->al_prev = (alarm_t) &alrmdone;
614 alrmdone = alrm2;
615 alrm2->al_status = ALARM_DONE;
616 alrm2->al_time = *clock_time;
617 }
618 }
619
620 /*
621 * Setup the clock dependent layer to deliver another
622 * interrupt for the next pending alarm.
623 */
624 if (alrm2)
625 (*clock->cl_ops->c_setalrm)(alarm_time);
626 UNLOCK_CLOCK(s);
627 }
628
629 /*
630 * ALARM DELIVERY ROUTINES.
631 */
632
633 static void
634 clock_alarm_deliver(
635 __unused thread_call_param_t p0,
636 __unused thread_call_param_t p1)
637 {
638 register alarm_t alrm;
639 kern_return_t code;
640 spl_t s;
641
642 LOCK_CLOCK(s);
643 while ((alrm = alrmdone) != NULL) {
644 if ((alrmdone = alrm->al_next) != NULL)
645 alrmdone->al_prev = (alarm_t) &alrmdone;
646 UNLOCK_CLOCK(s);
647
648 code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED);
649 if (alrm->al_port != IP_NULL) {
650 /* Deliver message to designated port */
651 if (IP_VALID(alrm->al_port)) {
652 clock_alarm_reply(alrm->al_port, alrm->al_port_type, code,
653 alrm->al_type, alrm->al_time);
654 }
655
656 LOCK_CLOCK(s);
657 alrm->al_status = ALARM_FREE;
658 alrm->al_next = alrmfree;
659 alrmfree = alrm;
660 }
661 else
662 panic("clock_alarm_deliver");
663 }
664
665 UNLOCK_CLOCK(s);
666 }
667
668 /*
669 * CLOCK PRIVATE SERVICING SUBROUTINES.
670 */
671
672 /*
673 * Flush all pending alarms on a clock. All alarms
674 * are activated and timestamped correctly, so any
675 * programs waiting on alarms/threads will proceed
676 * with accurate information.
677 */
678 static
679 void
680 flush_alarms(
681 clock_t clock)
682 {
683 register alarm_t alrm1, alrm2;
684 spl_t s;
685
686 /*
687 * Flush all outstanding alarms.
688 */
689 LOCK_CLOCK(s);
690 alrm1 = (alarm_t) &clock->cl_alarm;
691 while ((alrm2 = alrm1->al_next) != NULL) {
692 /*
693 * Remove alarm from the clock alarm list.
694 */
695 if ((alrm1->al_next = alrm2->al_next) != NULL)
696 (alrm1->al_next)->al_prev = alrm1;
697
698 /*
699 * If a clock_sleep() alarm, wakeup the thread
700 * which issued the clock_sleep() call.
701 */
702 if (alrm2->al_status == ALARM_SLEEP) {
703 alrm2->al_next = 0;
704 thread_wakeup((event_t)alrm2);
705 }
706 else {
707 /*
708 * If a clock_alarm() alarm, place the alarm on
709 * the alarm done list and wakeup the dedicated
710 * kernel alarm_thread to service the alarm.
711 */
712 assert(alrm2->al_status == ALARM_CLOCK);
713 if ((alrm2->al_next = alrmdone) != NULL)
714 alrmdone->al_prev = alrm2;
715 else
716 thread_wakeup((event_t)&alrmdone);
717 alrm2->al_prev = (alarm_t) &alrmdone;
718 alrmdone = alrm2;
719 }
720 }
721 UNLOCK_CLOCK(s);
722 }
723
724 /*
725 * Post an alarm on a clock's active alarm list. The alarm is
726 * inserted in time-order into the clock's active alarm list.
727 * Always called from within a LOCK_CLOCK() code section.
728 */
729 static
730 void
731 post_alarm(
732 clock_t clock,
733 alarm_t alarm)
734 {
735 register alarm_t alrm1, alrm2;
736 mach_timespec_t *alarm_time;
737 mach_timespec_t *queue_time;
738
739 /*
740 * Traverse alarm list until queue time is greater
741 * than alarm time, then insert alarm.
742 */
743 alarm_time = &alarm->al_time;
744 alrm1 = (alarm_t) &clock->cl_alarm;
745 while ((alrm2 = alrm1->al_next) != NULL) {
746 queue_time = &alrm2->al_time;
747 if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0)
748 break;
749 alrm1 = alrm2;
750 }
751 alrm1->al_next = alarm;
752 alarm->al_next = alrm2;
753 alarm->al_prev = alrm1;
754 if (alrm2)
755 alrm2->al_prev = alarm;
756
757 /*
758 * If the inserted alarm is the 'earliest' alarm,
759 * reset the device layer alarm time accordingly.
760 */
761 if (clock->cl_alarm.al_next == alarm)
762 (*clock->cl_ops->c_setalrm)(alarm_time);
763 }
764
765 /*
766 * Check the validity of 'alarm_time' and 'alarm_type'. If either
767 * argument is invalid, return a negative value. If the 'alarm_time'
768 * is now, return a 0 value. If the 'alarm_time' is in the future,
769 * return a positive value.
770 */
771 static
772 int
773 check_time(
774 alarm_type_t alarm_type,
775 mach_timespec_t *alarm_time,
776 mach_timespec_t *clock_time)
777 {
778 int result;
779
780 if (BAD_ALRMTYPE(alarm_type))
781 return (-1);
782 if (BAD_MACH_TIMESPEC(alarm_time))
783 return (-1);
784 if ((alarm_type & ALRMTYPE) == TIME_RELATIVE)
785 ADD_MACH_TIMESPEC(alarm_time, clock_time);
786
787 result = CMP_MACH_TIMESPEC(alarm_time, clock_time);
788
789 return ((result >= 0)? result: 0);
790 }
791
792 mach_timespec_t
793 clock_get_system_value(void)
794 {
795 clock_t clock = &clock_list[SYSTEM_CLOCK];
796 mach_timespec_t value;
797
798 (void) (*clock->cl_ops->c_gettime)(&value);
799
800 return value;
801 }
802
803 mach_timespec_t
804 clock_get_calendar_value(void)
805 {
806 clock_t clock = &clock_list[CALENDAR_CLOCK];
807 mach_timespec_t value = MACH_TIMESPEC_ZERO;
808
809 (void) (*clock->cl_ops->c_gettime)(&value);
810
811 return value;
812 }
813
814 void
815 clock_deadline_for_periodic_event(
816 uint64_t interval,
817 uint64_t abstime,
818 uint64_t *deadline)
819 {
820 assert(interval != 0);
821
822 *deadline += interval;
823
824 if (*deadline <= abstime) {
825 *deadline = abstime + interval;
826 abstime = mach_absolute_time();
827
828 if (*deadline <= abstime)
829 *deadline = abstime + interval;
830 }
831 }
832
833 void
834 mk_timebase_info_trap(
835 struct mk_timebase_info_trap_args *args)
836 {
837 uint32_t *delta = args->delta;
838 uint32_t *abs_to_ns_numer = args->abs_to_ns_numer;
839 uint32_t *abs_to_ns_denom = args->abs_to_ns_denom;
840 uint32_t *proc_to_abs_numer = args->proc_to_abs_numer;
841 uint32_t *proc_to_abs_denom = args->proc_to_abs_denom;
842 mach_timebase_info_data_t info;
843 uint32_t one = 1;
844
845 clock_timebase_info(&info);
846
847 copyout((void *)&one, CAST_USER_ADDR_T(delta), sizeof (uint32_t));
848
849 copyout((void *)&info.numer, CAST_USER_ADDR_T(abs_to_ns_numer), sizeof (uint32_t));
850 copyout((void *)&info.denom, CAST_USER_ADDR_T(abs_to_ns_denom), sizeof (uint32_t));
851
852 copyout((void *)&one, CAST_USER_ADDR_T(proc_to_abs_numer), sizeof (uint32_t));
853 copyout((void *)&one, CAST_USER_ADDR_T(proc_to_abs_denom), sizeof (uint32_t));
854 }
855
856 kern_return_t
857 mach_timebase_info_trap(
858 struct mach_timebase_info_trap_args *args)
859 {
860 mach_vm_address_t out_info_addr = args->info;
861 mach_timebase_info_data_t info;
862
863 clock_timebase_info(&info);
864
865 copyout((void *)&info, out_info_addr, sizeof (info));
866
867 return (KERN_SUCCESS);
868 }
869
870 static void
871 mach_wait_until_continue(
872 __unused void *parameter,
873 wait_result_t wresult)
874 {
875 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
876 /*NOTREACHED*/
877 }
878
879 kern_return_t
880 mach_wait_until_trap(
881 struct mach_wait_until_trap_args *args)
882 {
883 uint64_t deadline = args->deadline;
884 wait_result_t wresult;
885
886 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
887 if (wresult == THREAD_WAITING)
888 wresult = thread_block(mach_wait_until_continue);
889
890 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
891 }
892
893 /*
894 * Delay primitives.
895 */
896 void
897 clock_delay_until(
898 uint64_t deadline)
899 {
900 uint64_t now = mach_absolute_time();
901
902 if (now >= deadline)
903 return;
904
905 if ( (deadline - now) < (8 * sched_cswtime) ||
906 get_preemption_level() != 0 ||
907 ml_get_interrupts_enabled() == FALSE )
908 machine_delay_until(deadline);
909 else {
910 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
911
912 thread_block(THREAD_CONTINUE_NULL);
913 }
914 }
915
916 void
917 delay_for_interval(
918 uint32_t interval,
919 uint32_t scale_factor)
920 {
921 uint64_t end;
922
923 clock_interval_to_deadline(interval, scale_factor, &end);
924
925 clock_delay_until(end);
926 }
927
928 void
929 delay(
930 int usec)
931 {
932 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
933 }
934
935 void
936 clock_adjtime(
937 int32_t *secs,
938 int32_t *microsecs)
939 {
940 uint32_t interval;
941 spl_t s;
942
943 s = splclock();
944 simple_lock(&calend_adjlock);
945
946 interval = clock_set_calendar_adjtime(secs, microsecs);
947 if (interval != 0) {
948 if (calend_adjdeadline >= interval)
949 calend_adjdeadline -= interval;
950 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
951 &calend_adjdeadline);
952
953 timer_call_enter(&calend_adjcall, calend_adjdeadline);
954 }
955 else
956 timer_call_cancel(&calend_adjcall);
957
958 simple_unlock(&calend_adjlock);
959 splx(s);
960 }
961
962 static void
963 calend_adjust_call(
964 __unused timer_call_param_t p0,
965 __unused timer_call_param_t p1)
966 {
967 uint32_t interval;
968 spl_t s;
969
970 s = splclock();
971 simple_lock(&calend_adjlock);
972
973 interval = clock_adjust_calendar();
974 if (interval != 0) {
975 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
976 &calend_adjdeadline);
977
978 timer_call_enter(&calend_adjcall, calend_adjdeadline);
979 }
980
981 simple_unlock(&calend_adjlock);
982 splx(s);
983 }
984
985 void
986 clock_wakeup_calendar(void)
987 {
988 thread_call_enter(&calend_wakecall);
989 }
990
991 extern void IOKitResetTime(void); /* XXX */
992
993 static void
994 calend_dowakeup(
995 __unused thread_call_param_t p0,
996 __unused thread_call_param_t p1)
997 {
998
999 IOKitResetTime();
1000 }