]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
25fecf46f2306c39298749974cf05d78a0249e4b
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * File: kern/clock.c
27 * Purpose: Routines for the creation and use of kernel
28 * alarm clock services. This file and the ipc
29 * routines in kern/ipc_clock.c constitute the
30 * machine-independent clock service layer.
31 */
32
33 #include <mach_host.h>
34
35 #include <mach/mach_types.h>
36 #include <mach/boolean.h>
37 #include <mach/processor_info.h>
38 #include <mach/vm_param.h>
39
40 #include <kern/cpu_number.h>
41 #include <kern/misc_protos.h>
42 #include <kern/lock.h>
43 #include <kern/host.h>
44 #include <kern/spl.h>
45 #include <kern/sched_prim.h>
46 #include <kern/thread.h>
47 #include <kern/ipc_host.h>
48 #include <kern/clock.h>
49 #include <kern/zalloc.h>
50
51 #include <ipc/ipc_types.h>
52 #include <ipc/ipc_port.h>
53
54 #include <mach/mach_traps.h>
55 #include <mach/clock_reply.h>
56 #include <mach/mach_time.h>
57
58 #include <mach/clock_server.h>
59 #include <mach/clock_priv_server.h>
60 #include <mach/host_priv_server.h>
61
62 /*
63 * Exported interface
64 */
65
66 #include <mach/clock_server.h>
67 #include <mach/mach_host_server.h>
68
69 /* local data declarations */
70 decl_simple_lock_data(static,ClockLock) /* clock system synchronization */
71 static struct zone *alarm_zone; /* zone for user alarms */
72 static struct alarm *alrmfree; /* alarm free list pointer */
73 static struct alarm *alrmdone; /* alarm done list pointer */
74 static long alrm_seqno; /* uniquely identifies alarms */
75 static thread_call_data_t alarm_deliver;
76
77 decl_simple_lock_data(static,calend_adjlock)
78
79 static timer_call_data_t calend_adjcall;
80 static uint64_t calend_adjdeadline;
81
82 static thread_call_data_t calend_wakecall;
83
84 /* external declarations */
85 extern struct clock clock_list[];
86 extern int clock_count;
87
88 /* local clock subroutines */
89 static
90 void flush_alarms(
91 clock_t clock);
92
93 static
94 void post_alarm(
95 clock_t clock,
96 alarm_t alarm);
97
98 static
99 int check_time(
100 alarm_type_t alarm_type,
101 mach_timespec_t *alarm_time,
102 mach_timespec_t *clock_time);
103
104 static
105 void clock_alarm_deliver(
106 thread_call_param_t p0,
107 thread_call_param_t p1);
108
109 static
110 void calend_adjust_call(
111 timer_call_param_t p0,
112 timer_call_param_t p1);
113
114 static
115 void calend_dowakeup(
116 thread_call_param_t p0,
117 thread_call_param_t p1);
118
119 /*
120 * Macros to lock/unlock clock system.
121 */
122 #define LOCK_CLOCK(s) \
123 s = splclock(); \
124 simple_lock(&ClockLock);
125
126 #define UNLOCK_CLOCK(s) \
127 simple_unlock(&ClockLock); \
128 splx(s);
129
130 /*
131 * clock_config:
132 *
133 * Called once at boot to configure the clock subsystem.
134 */
135 void
136 clock_config(void)
137 {
138 clock_t clock;
139 register int i;
140
141 assert(cpu_number() == master_cpu);
142
143 simple_lock_init(&ClockLock, 0);
144 thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL);
145
146 simple_lock_init(&calend_adjlock, 0);
147 timer_call_setup(&calend_adjcall, calend_adjust_call, NULL);
148
149 thread_call_setup(&calend_wakecall, calend_dowakeup, NULL);
150
151 /*
152 * Configure clock devices.
153 */
154 for (i = 0; i < clock_count; i++) {
155 clock = &clock_list[i];
156 if (clock->cl_ops) {
157 if ((*clock->cl_ops->c_config)() == 0)
158 clock->cl_ops = 0;
159 }
160 }
161
162 /*
163 * Initialize the timer callouts.
164 */
165 timer_call_initialize();
166
167 /* start alarm sequence numbers at 0 */
168 alrm_seqno = 0;
169 }
170
171 /*
172 * clock_init:
173 *
174 * Called on a processor each time started.
175 */
176 void
177 clock_init(void)
178 {
179 clock_t clock;
180 register int i;
181
182 /*
183 * Initialize basic clock structures.
184 */
185 for (i = 0; i < clock_count; i++) {
186 clock = &clock_list[i];
187 if (clock->cl_ops && clock->cl_ops->c_init)
188 (*clock->cl_ops->c_init)();
189 }
190 }
191
192 /*
193 * Called by machine dependent code
194 * to initialize areas dependent on the
195 * timebase value. May be called multiple
196 * times during start up.
197 */
198 void
199 clock_timebase_init(void)
200 {
201 sched_timebase_init();
202 }
203
204 /*
205 * Initialize the clock ipc service facility.
206 */
207 void
208 clock_service_create(void)
209 {
210 clock_t clock;
211 register int i;
212
213 /*
214 * Initialize ipc clock services.
215 */
216 for (i = 0; i < clock_count; i++) {
217 clock = &clock_list[i];
218 if (clock->cl_ops) {
219 ipc_clock_init(clock);
220 ipc_clock_enable(clock);
221 }
222 }
223
224 /*
225 * Perform miscellaneous late
226 * initialization.
227 */
228 i = sizeof(struct alarm);
229 alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms");
230 }
231
232 /*
233 * Get the service port on a clock.
234 */
235 kern_return_t
236 host_get_clock_service(
237 host_t host,
238 clock_id_t clock_id,
239 clock_t *clock) /* OUT */
240 {
241 if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) {
242 *clock = CLOCK_NULL;
243 return (KERN_INVALID_ARGUMENT);
244 }
245
246 *clock = &clock_list[clock_id];
247 if ((*clock)->cl_ops == 0)
248 return (KERN_FAILURE);
249 return (KERN_SUCCESS);
250 }
251
252 /*
253 * Get the control port on a clock.
254 */
255 kern_return_t
256 host_get_clock_control(
257 host_priv_t host_priv,
258 clock_id_t clock_id,
259 clock_t *clock) /* OUT */
260 {
261 if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) {
262 *clock = CLOCK_NULL;
263 return (KERN_INVALID_ARGUMENT);
264 }
265
266 *clock = &clock_list[clock_id];
267 if ((*clock)->cl_ops == 0)
268 return (KERN_FAILURE);
269 return (KERN_SUCCESS);
270 }
271
272 /*
273 * Get the current clock time.
274 */
275 kern_return_t
276 clock_get_time(
277 clock_t clock,
278 mach_timespec_t *cur_time) /* OUT */
279 {
280 if (clock == CLOCK_NULL)
281 return (KERN_INVALID_ARGUMENT);
282 return ((*clock->cl_ops->c_gettime)(cur_time));
283 }
284
285 /*
286 * Get clock attributes.
287 */
288 kern_return_t
289 clock_get_attributes(
290 clock_t clock,
291 clock_flavor_t flavor,
292 clock_attr_t attr, /* OUT */
293 mach_msg_type_number_t *count) /* IN/OUT */
294 {
295 if (clock == CLOCK_NULL)
296 return (KERN_INVALID_ARGUMENT);
297 if (clock->cl_ops->c_getattr)
298 return(clock->cl_ops->c_getattr(flavor, attr, count));
299 else
300 return (KERN_FAILURE);
301 }
302
303 /*
304 * Set the current clock time.
305 */
306 kern_return_t
307 clock_set_time(
308 clock_t clock,
309 mach_timespec_t new_time)
310 {
311 mach_timespec_t *clock_time;
312
313 if (clock == CLOCK_NULL)
314 return (KERN_INVALID_ARGUMENT);
315 if (clock->cl_ops->c_settime == NULL)
316 return (KERN_FAILURE);
317 clock_time = &new_time;
318 if (BAD_MACH_TIMESPEC(clock_time))
319 return (KERN_INVALID_VALUE);
320
321 /*
322 * Flush all outstanding alarms.
323 */
324 flush_alarms(clock);
325
326 /*
327 * Set the new time.
328 */
329 return (clock->cl_ops->c_settime(clock_time));
330 }
331
332 /*
333 * Set the clock alarm resolution.
334 */
335 kern_return_t
336 clock_set_attributes(
337 clock_t clock,
338 clock_flavor_t flavor,
339 clock_attr_t attr,
340 mach_msg_type_number_t count)
341 {
342 if (clock == CLOCK_NULL)
343 return (KERN_INVALID_ARGUMENT);
344 if (clock->cl_ops->c_setattr)
345 return (clock->cl_ops->c_setattr(flavor, attr, count));
346 else
347 return (KERN_FAILURE);
348 }
349
350 /*
351 * Setup a clock alarm.
352 */
353 kern_return_t
354 clock_alarm(
355 clock_t clock,
356 alarm_type_t alarm_type,
357 mach_timespec_t alarm_time,
358 ipc_port_t alarm_port,
359 mach_msg_type_name_t alarm_port_type)
360 {
361 alarm_t alarm;
362 mach_timespec_t clock_time;
363 int chkstat;
364 kern_return_t reply_code;
365 spl_t s;
366
367 if (clock == CLOCK_NULL)
368 return (KERN_INVALID_ARGUMENT);
369 if (clock->cl_ops->c_setalrm == 0)
370 return (KERN_FAILURE);
371 if (IP_VALID(alarm_port) == 0)
372 return (KERN_INVALID_CAPABILITY);
373
374 /*
375 * Check alarm parameters. If parameters are invalid,
376 * send alarm message immediately.
377 */
378 (*clock->cl_ops->c_gettime)(&clock_time);
379 chkstat = check_time(alarm_type, &alarm_time, &clock_time);
380 if (chkstat <= 0) {
381 reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS);
382 clock_alarm_reply(alarm_port, alarm_port_type,
383 reply_code, alarm_type, clock_time);
384 return (KERN_SUCCESS);
385 }
386
387 /*
388 * Get alarm and add to clock alarm list.
389 */
390
391 LOCK_CLOCK(s);
392 if ((alarm = alrmfree) == 0) {
393 UNLOCK_CLOCK(s);
394 alarm = (alarm_t) zalloc(alarm_zone);
395 if (alarm == 0)
396 return (KERN_RESOURCE_SHORTAGE);
397 LOCK_CLOCK(s);
398 }
399 else
400 alrmfree = alarm->al_next;
401
402 alarm->al_status = ALARM_CLOCK;
403 alarm->al_time = alarm_time;
404 alarm->al_type = alarm_type;
405 alarm->al_port = alarm_port;
406 alarm->al_port_type = alarm_port_type;
407 alarm->al_clock = clock;
408 alarm->al_seqno = alrm_seqno++;
409 post_alarm(clock, alarm);
410 UNLOCK_CLOCK(s);
411
412 return (KERN_SUCCESS);
413 }
414
415 /*
416 * Sleep on a clock. System trap. User-level libmach clock_sleep
417 * interface call takes a mach_timespec_t sleep_time argument which it
418 * converts to sleep_sec and sleep_nsec arguments which are then
419 * passed to clock_sleep_trap.
420 */
421 kern_return_t
422 clock_sleep_trap(
423 struct clock_sleep_trap_args *args)
424 {
425 mach_port_name_t clock_name = args->clock_name;
426 sleep_type_t sleep_type = args->sleep_type;
427 int sleep_sec = args->sleep_sec;
428 int sleep_nsec = args->sleep_nsec;
429 mach_vm_address_t wakeup_time_addr = args->wakeup_time;
430 clock_t clock;
431 mach_timespec_t swtime;
432 kern_return_t rvalue;
433
434 /*
435 * Convert the trap parameters.
436 */
437 if (clock_name != MACH_PORT_NULL)
438 clock = port_name_to_clock(clock_name);
439 else
440 clock = &clock_list[SYSTEM_CLOCK];
441
442 swtime.tv_sec = sleep_sec;
443 swtime.tv_nsec = sleep_nsec;
444
445 /*
446 * Call the actual clock_sleep routine.
447 */
448 rvalue = clock_sleep_internal(clock, sleep_type, &swtime);
449
450 /*
451 * Return current time as wakeup time.
452 */
453 if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) {
454 copyout((char *)&swtime, wakeup_time_addr, sizeof(mach_timespec_t));
455 }
456 return (rvalue);
457 }
458
459 /*
460 * Kernel internally callable clock sleep routine. The calling
461 * thread is suspended until the requested sleep time is reached.
462 */
463 kern_return_t
464 clock_sleep_internal(
465 clock_t clock,
466 sleep_type_t sleep_type,
467 mach_timespec_t *sleep_time)
468 {
469 alarm_t alarm;
470 mach_timespec_t clock_time;
471 kern_return_t rvalue;
472 int chkstat;
473 spl_t s;
474
475 if (clock == CLOCK_NULL)
476 return (KERN_INVALID_ARGUMENT);
477 if (clock->cl_ops->c_setalrm == 0)
478 return (KERN_FAILURE);
479
480 /*
481 * Check sleep parameters. If parameters are invalid
482 * return an error, otherwise post alarm request.
483 */
484 (*clock->cl_ops->c_gettime)(&clock_time);
485
486 chkstat = check_time(sleep_type, sleep_time, &clock_time);
487 if (chkstat < 0)
488 return (KERN_INVALID_VALUE);
489 rvalue = KERN_SUCCESS;
490 if (chkstat > 0) {
491 wait_result_t wait_result;
492
493 /*
494 * Get alarm and add to clock alarm list.
495 */
496
497 LOCK_CLOCK(s);
498 if ((alarm = alrmfree) == 0) {
499 UNLOCK_CLOCK(s);
500 alarm = (alarm_t) zalloc(alarm_zone);
501 if (alarm == 0)
502 return (KERN_RESOURCE_SHORTAGE);
503 LOCK_CLOCK(s);
504 }
505 else
506 alrmfree = alarm->al_next;
507
508 /*
509 * Wait for alarm to occur.
510 */
511 wait_result = assert_wait((event_t)alarm, THREAD_ABORTSAFE);
512 if (wait_result == THREAD_WAITING) {
513 alarm->al_time = *sleep_time;
514 alarm->al_status = ALARM_SLEEP;
515 post_alarm(clock, alarm);
516 UNLOCK_CLOCK(s);
517
518 wait_result = thread_block(THREAD_CONTINUE_NULL);
519
520 /*
521 * Note if alarm expired normally or whether it
522 * was aborted. If aborted, delete alarm from
523 * clock alarm list. Return alarm to free list.
524 */
525 LOCK_CLOCK(s);
526 if (alarm->al_status != ALARM_DONE) {
527 assert(wait_result != THREAD_AWAKENED);
528 if (((alarm->al_prev)->al_next = alarm->al_next) != NULL)
529 (alarm->al_next)->al_prev = alarm->al_prev;
530 rvalue = KERN_ABORTED;
531 }
532 *sleep_time = alarm->al_time;
533 alarm->al_status = ALARM_FREE;
534 } else {
535 assert(wait_result == THREAD_INTERRUPTED);
536 assert(alarm->al_status == ALARM_FREE);
537 rvalue = KERN_ABORTED;
538 }
539 alarm->al_next = alrmfree;
540 alrmfree = alarm;
541 UNLOCK_CLOCK(s);
542 }
543 else
544 *sleep_time = clock_time;
545
546 return (rvalue);
547 }
548
549 /*
550 * CLOCK INTERRUPT SERVICE ROUTINES.
551 */
552
553 /*
554 * Service clock alarm interrupts. Called from machine dependent
555 * layer at splclock(). The clock_id argument specifies the clock,
556 * and the clock_time argument gives that clock's current time.
557 */
558 void
559 clock_alarm_intr(
560 clock_id_t clock_id,
561 mach_timespec_t *clock_time)
562 {
563 clock_t clock;
564 register alarm_t alrm1;
565 register alarm_t alrm2;
566 mach_timespec_t *alarm_time;
567 spl_t s;
568
569 clock = &clock_list[clock_id];
570
571 /*
572 * Update clock alarm list. All alarms that are due are moved
573 * to the alarmdone list to be serviced by the alarm_thread.
574 */
575
576 LOCK_CLOCK(s);
577 alrm1 = (alarm_t) &clock->cl_alarm;
578 while ((alrm2 = alrm1->al_next) != NULL) {
579 alarm_time = &alrm2->al_time;
580 if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0)
581 break;
582
583 /*
584 * Alarm has expired, so remove it from the
585 * clock alarm list.
586 */
587 if ((alrm1->al_next = alrm2->al_next) != NULL)
588 (alrm1->al_next)->al_prev = alrm1;
589
590 /*
591 * If a clock_sleep() alarm, wakeup the thread
592 * which issued the clock_sleep() call.
593 */
594 if (alrm2->al_status == ALARM_SLEEP) {
595 alrm2->al_next = 0;
596 alrm2->al_status = ALARM_DONE;
597 alrm2->al_time = *clock_time;
598 thread_wakeup((event_t)alrm2);
599 }
600
601 /*
602 * If a clock_alarm() alarm, place the alarm on
603 * the alarm done list and schedule the alarm
604 * delivery mechanism.
605 */
606 else {
607 assert(alrm2->al_status == ALARM_CLOCK);
608 if ((alrm2->al_next = alrmdone) != NULL)
609 alrmdone->al_prev = alrm2;
610 else
611 thread_call_enter(&alarm_deliver);
612 alrm2->al_prev = (alarm_t) &alrmdone;
613 alrmdone = alrm2;
614 alrm2->al_status = ALARM_DONE;
615 alrm2->al_time = *clock_time;
616 }
617 }
618
619 /*
620 * Setup the clock dependent layer to deliver another
621 * interrupt for the next pending alarm.
622 */
623 if (alrm2)
624 (*clock->cl_ops->c_setalrm)(alarm_time);
625 UNLOCK_CLOCK(s);
626 }
627
628 /*
629 * ALARM DELIVERY ROUTINES.
630 */
631
632 static void
633 clock_alarm_deliver(
634 __unused thread_call_param_t p0,
635 __unused thread_call_param_t p1)
636 {
637 register alarm_t alrm;
638 kern_return_t code;
639 spl_t s;
640
641 LOCK_CLOCK(s);
642 while ((alrm = alrmdone) != NULL) {
643 if ((alrmdone = alrm->al_next) != NULL)
644 alrmdone->al_prev = (alarm_t) &alrmdone;
645 UNLOCK_CLOCK(s);
646
647 code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED);
648 if (alrm->al_port != IP_NULL) {
649 /* Deliver message to designated port */
650 if (IP_VALID(alrm->al_port)) {
651 clock_alarm_reply(alrm->al_port, alrm->al_port_type, code,
652 alrm->al_type, alrm->al_time);
653 }
654
655 LOCK_CLOCK(s);
656 alrm->al_status = ALARM_FREE;
657 alrm->al_next = alrmfree;
658 alrmfree = alrm;
659 }
660 else
661 panic("clock_alarm_deliver");
662 }
663
664 UNLOCK_CLOCK(s);
665 }
666
667 /*
668 * CLOCK PRIVATE SERVICING SUBROUTINES.
669 */
670
671 /*
672 * Flush all pending alarms on a clock. All alarms
673 * are activated and timestamped correctly, so any
674 * programs waiting on alarms/threads will proceed
675 * with accurate information.
676 */
677 static
678 void
679 flush_alarms(
680 clock_t clock)
681 {
682 register alarm_t alrm1, alrm2;
683 spl_t s;
684
685 /*
686 * Flush all outstanding alarms.
687 */
688 LOCK_CLOCK(s);
689 alrm1 = (alarm_t) &clock->cl_alarm;
690 while ((alrm2 = alrm1->al_next) != NULL) {
691 /*
692 * Remove alarm from the clock alarm list.
693 */
694 if ((alrm1->al_next = alrm2->al_next) != NULL)
695 (alrm1->al_next)->al_prev = alrm1;
696
697 /*
698 * If a clock_sleep() alarm, wakeup the thread
699 * which issued the clock_sleep() call.
700 */
701 if (alrm2->al_status == ALARM_SLEEP) {
702 alrm2->al_next = 0;
703 thread_wakeup((event_t)alrm2);
704 }
705 else {
706 /*
707 * If a clock_alarm() alarm, place the alarm on
708 * the alarm done list and wakeup the dedicated
709 * kernel alarm_thread to service the alarm.
710 */
711 assert(alrm2->al_status == ALARM_CLOCK);
712 if ((alrm2->al_next = alrmdone) != NULL)
713 alrmdone->al_prev = alrm2;
714 else
715 thread_wakeup((event_t)&alrmdone);
716 alrm2->al_prev = (alarm_t) &alrmdone;
717 alrmdone = alrm2;
718 }
719 }
720 UNLOCK_CLOCK(s);
721 }
722
723 /*
724 * Post an alarm on a clock's active alarm list. The alarm is
725 * inserted in time-order into the clock's active alarm list.
726 * Always called from within a LOCK_CLOCK() code section.
727 */
728 static
729 void
730 post_alarm(
731 clock_t clock,
732 alarm_t alarm)
733 {
734 register alarm_t alrm1, alrm2;
735 mach_timespec_t *alarm_time;
736 mach_timespec_t *queue_time;
737
738 /*
739 * Traverse alarm list until queue time is greater
740 * than alarm time, then insert alarm.
741 */
742 alarm_time = &alarm->al_time;
743 alrm1 = (alarm_t) &clock->cl_alarm;
744 while ((alrm2 = alrm1->al_next) != NULL) {
745 queue_time = &alrm2->al_time;
746 if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0)
747 break;
748 alrm1 = alrm2;
749 }
750 alrm1->al_next = alarm;
751 alarm->al_next = alrm2;
752 alarm->al_prev = alrm1;
753 if (alrm2)
754 alrm2->al_prev = alarm;
755
756 /*
757 * If the inserted alarm is the 'earliest' alarm,
758 * reset the device layer alarm time accordingly.
759 */
760 if (clock->cl_alarm.al_next == alarm)
761 (*clock->cl_ops->c_setalrm)(alarm_time);
762 }
763
764 /*
765 * Check the validity of 'alarm_time' and 'alarm_type'. If either
766 * argument is invalid, return a negative value. If the 'alarm_time'
767 * is now, return a 0 value. If the 'alarm_time' is in the future,
768 * return a positive value.
769 */
770 static
771 int
772 check_time(
773 alarm_type_t alarm_type,
774 mach_timespec_t *alarm_time,
775 mach_timespec_t *clock_time)
776 {
777 int result;
778
779 if (BAD_ALRMTYPE(alarm_type))
780 return (-1);
781 if (BAD_MACH_TIMESPEC(alarm_time))
782 return (-1);
783 if ((alarm_type & ALRMTYPE) == TIME_RELATIVE)
784 ADD_MACH_TIMESPEC(alarm_time, clock_time);
785
786 result = CMP_MACH_TIMESPEC(alarm_time, clock_time);
787
788 return ((result >= 0)? result: 0);
789 }
790
791 mach_timespec_t
792 clock_get_system_value(void)
793 {
794 clock_t clock = &clock_list[SYSTEM_CLOCK];
795 mach_timespec_t value;
796
797 (void) (*clock->cl_ops->c_gettime)(&value);
798
799 return value;
800 }
801
802 mach_timespec_t
803 clock_get_calendar_value(void)
804 {
805 clock_t clock = &clock_list[CALENDAR_CLOCK];
806 mach_timespec_t value = MACH_TIMESPEC_ZERO;
807
808 (void) (*clock->cl_ops->c_gettime)(&value);
809
810 return value;
811 }
812
813 void
814 clock_deadline_for_periodic_event(
815 uint64_t interval,
816 uint64_t abstime,
817 uint64_t *deadline)
818 {
819 assert(interval != 0);
820
821 *deadline += interval;
822
823 if (*deadline <= abstime) {
824 *deadline = abstime + interval;
825 abstime = mach_absolute_time();
826
827 if (*deadline <= abstime)
828 *deadline = abstime + interval;
829 }
830 }
831
832 void
833 mk_timebase_info_trap(
834 struct mk_timebase_info_trap_args *args)
835 {
836 uint32_t *delta = args->delta;
837 uint32_t *abs_to_ns_numer = args->abs_to_ns_numer;
838 uint32_t *abs_to_ns_denom = args->abs_to_ns_denom;
839 uint32_t *proc_to_abs_numer = args->proc_to_abs_numer;
840 uint32_t *proc_to_abs_denom = args->proc_to_abs_denom;
841 mach_timebase_info_data_t info;
842 uint32_t one = 1;
843
844 clock_timebase_info(&info);
845
846 copyout((void *)&one, CAST_USER_ADDR_T(delta), sizeof (uint32_t));
847
848 copyout((void *)&info.numer, CAST_USER_ADDR_T(abs_to_ns_numer), sizeof (uint32_t));
849 copyout((void *)&info.denom, CAST_USER_ADDR_T(abs_to_ns_denom), sizeof (uint32_t));
850
851 copyout((void *)&one, CAST_USER_ADDR_T(proc_to_abs_numer), sizeof (uint32_t));
852 copyout((void *)&one, CAST_USER_ADDR_T(proc_to_abs_denom), sizeof (uint32_t));
853 }
854
855 kern_return_t
856 mach_timebase_info_trap(
857 struct mach_timebase_info_trap_args *args)
858 {
859 mach_vm_address_t out_info_addr = args->info;
860 mach_timebase_info_data_t info;
861
862 clock_timebase_info(&info);
863
864 copyout((void *)&info, out_info_addr, sizeof (info));
865
866 return (KERN_SUCCESS);
867 }
868
869 static void
870 mach_wait_until_continue(
871 __unused void *parameter,
872 wait_result_t wresult)
873 {
874 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
875 /*NOTREACHED*/
876 }
877
878 kern_return_t
879 mach_wait_until_trap(
880 struct mach_wait_until_trap_args *args)
881 {
882 uint64_t deadline = args->deadline;
883 wait_result_t wresult;
884
885 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
886 if (wresult == THREAD_WAITING)
887 wresult = thread_block(mach_wait_until_continue);
888
889 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
890 }
891
892 /*
893 * Delay primitives.
894 */
895 void
896 clock_delay_until(
897 uint64_t deadline)
898 {
899 uint64_t now = mach_absolute_time();
900
901 if (now >= deadline)
902 return;
903
904 if ( (deadline - now) < (8 * sched_cswtime) ||
905 get_preemption_level() != 0 ||
906 ml_get_interrupts_enabled() == FALSE )
907 machine_delay_until(deadline);
908 else {
909 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
910
911 thread_block(THREAD_CONTINUE_NULL);
912 }
913 }
914
915 void
916 delay_for_interval(
917 uint32_t interval,
918 uint32_t scale_factor)
919 {
920 uint64_t end;
921
922 clock_interval_to_deadline(interval, scale_factor, &end);
923
924 clock_delay_until(end);
925 }
926
927 void
928 delay(
929 int usec)
930 {
931 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
932 }
933
934 void
935 clock_adjtime(
936 int32_t *secs,
937 int32_t *microsecs)
938 {
939 uint32_t interval;
940 spl_t s;
941
942 s = splclock();
943 simple_lock(&calend_adjlock);
944
945 interval = clock_set_calendar_adjtime(secs, microsecs);
946 if (interval != 0) {
947 if (calend_adjdeadline >= interval)
948 calend_adjdeadline -= interval;
949 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
950 &calend_adjdeadline);
951
952 timer_call_enter(&calend_adjcall, calend_adjdeadline);
953 }
954 else
955 timer_call_cancel(&calend_adjcall);
956
957 simple_unlock(&calend_adjlock);
958 splx(s);
959 }
960
961 static void
962 calend_adjust_call(
963 __unused timer_call_param_t p0,
964 __unused timer_call_param_t p1)
965 {
966 uint32_t interval;
967 spl_t s;
968
969 s = splclock();
970 simple_lock(&calend_adjlock);
971
972 interval = clock_adjust_calendar();
973 if (interval != 0) {
974 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
975 &calend_adjdeadline);
976
977 timer_call_enter(&calend_adjcall, calend_adjdeadline);
978 }
979
980 simple_unlock(&calend_adjlock);
981 splx(s);
982 }
983
984 void
985 clock_wakeup_calendar(void)
986 {
987 thread_call_enter(&calend_wakecall);
988 }
989
990 extern void IOKitResetTime(void); /* XXX */
991
992 static void
993 calend_dowakeup(
994 __unused thread_call_param_t p0,
995 __unused thread_call_param_t p1)
996 {
997
998 IOKitResetTime();
999 }