]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/clock.c
b6a263b70d464d4e1ade8225b5a514e67bca961d
[apple/xnu.git] / osfmk / kern / clock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * File: kern/clock.c
27 * Purpose: Routines for the creation and use of kernel
28 * alarm clock services. This file and the ipc
29 * routines in kern/ipc_clock.c constitute the
30 * machine-independent clock service layer.
31 */
32
33 #include <cpus.h>
34 #include <mach_host.h>
35
36 #include <mach/boolean.h>
37 #include <mach/processor_info.h>
38 #include <mach/vm_param.h>
39 #include <machine/mach_param.h>
40 #include <kern/cpu_number.h>
41 #include <kern/misc_protos.h>
42 #include <kern/lock.h>
43 #include <kern/host.h>
44 #include <kern/spl.h>
45 #include <kern/thread.h>
46 #include <kern/thread_swap.h>
47 #include <kern/ipc_host.h>
48 #include <kern/clock.h>
49 #include <kern/zalloc.h>
50 #include <ipc/ipc_port.h>
51
52 #include <mach/mach_syscalls.h>
53 #include <mach/clock_reply.h>
54 #include <mach/mach_time.h>
55
56 #include <kern/mk_timer.h>
57
58 /*
59 * Exported interface
60 */
61
62 #include <mach/clock_server.h>
63 #include <mach/mach_host_server.h>
64
65 /* local data declarations */
66 decl_simple_lock_data(static,ClockLock) /* clock system synchronization */
67 static struct zone *alarm_zone; /* zone for user alarms */
68 static struct alarm *alrmfree; /* alarm free list pointer */
69 static struct alarm *alrmdone; /* alarm done list pointer */
70 static long alrm_seqno; /* uniquely identifies alarms */
71 static thread_call_data_t alarm_deliver;
72
73 decl_simple_lock_data(static,calend_adjlock)
74 static int64_t calend_adjtotal;
75 static uint32_t calend_adjdelta;
76
77 static timer_call_data_t calend_adjcall;
78 static uint64_t calend_adjinterval, calend_adjdeadline;
79
80 /* backwards compatibility */
81 int hz = HZ; /* GET RID OF THIS !!! */
82 int tick = (1000000 / HZ); /* GET RID OF THIS !!! */
83
84 /* external declarations */
85 extern struct clock clock_list[];
86 extern int clock_count;
87
88 /* local clock subroutines */
89 static
90 void flush_alarms(
91 clock_t clock);
92
93 static
94 void post_alarm(
95 clock_t clock,
96 alarm_t alarm);
97
98 static
99 int check_time(
100 alarm_type_t alarm_type,
101 mach_timespec_t *alarm_time,
102 mach_timespec_t *clock_time);
103
104 static
105 void clock_alarm_deliver(
106 thread_call_param_t p0,
107 thread_call_param_t p1);
108
109 static
110 void clock_calend_adjust(
111 timer_call_param_t p0,
112 timer_call_param_t p1);
113
114 /*
115 * Macros to lock/unlock clock system.
116 */
117 #define LOCK_CLOCK(s) \
118 s = splclock(); \
119 simple_lock(&ClockLock);
120
121 #define UNLOCK_CLOCK(s) \
122 simple_unlock(&ClockLock); \
123 splx(s);
124
125 /*
126 * Configure the clock system. (Not sure if we need this,
127 * as separate from clock_init()).
128 */
129 void
130 clock_config(void)
131 {
132 clock_t clock;
133 register int i;
134
135 if (cpu_number() != master_cpu)
136 panic("clock_config");
137
138 /*
139 * Configure clock devices.
140 */
141 simple_lock_init(&calend_adjlock, ETAP_MISC_CLOCK);
142 simple_lock_init(&ClockLock, ETAP_MISC_CLOCK);
143 for (i = 0; i < clock_count; i++) {
144 clock = &clock_list[i];
145 if (clock->cl_ops) {
146 if ((*clock->cl_ops->c_config)() == 0)
147 clock->cl_ops = 0;
148 }
149 }
150
151 /* start alarm sequence numbers at 0 */
152 alrm_seqno = 0;
153 }
154
155 /*
156 * Initialize the clock system.
157 */
158 void
159 clock_init(void)
160 {
161 clock_t clock;
162 register int i;
163
164 /*
165 * Initialize basic clock structures.
166 */
167 for (i = 0; i < clock_count; i++) {
168 clock = &clock_list[i];
169 if (clock->cl_ops)
170 (*clock->cl_ops->c_init)();
171 }
172 }
173
174 /*
175 * Initialize the clock ipc service facility.
176 */
177 void
178 clock_service_create(void)
179 {
180 clock_t clock;
181 register int i;
182
183 mk_timer_initialize();
184
185 /*
186 * Initialize ipc clock services.
187 */
188 for (i = 0; i < clock_count; i++) {
189 clock = &clock_list[i];
190 if (clock->cl_ops) {
191 ipc_clock_init(clock);
192 ipc_clock_enable(clock);
193 }
194 }
195
196 timer_call_setup(&calend_adjcall, clock_calend_adjust, NULL);
197
198 /*
199 * Initialize clock service alarms.
200 */
201 i = sizeof(struct alarm);
202 alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms");
203
204 thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL);
205 }
206
207 /*
208 * Get the service port on a clock.
209 */
210 kern_return_t
211 host_get_clock_service(
212 host_t host,
213 clock_id_t clock_id,
214 clock_t *clock) /* OUT */
215 {
216 if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) {
217 *clock = CLOCK_NULL;
218 return (KERN_INVALID_ARGUMENT);
219 }
220
221 *clock = &clock_list[clock_id];
222 if ((*clock)->cl_ops == 0)
223 return (KERN_FAILURE);
224 return (KERN_SUCCESS);
225 }
226
227 /*
228 * Get the control port on a clock.
229 */
230 kern_return_t
231 host_get_clock_control(
232 host_priv_t host_priv,
233 clock_id_t clock_id,
234 clock_t *clock) /* OUT */
235 {
236 if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) {
237 *clock = CLOCK_NULL;
238 return (KERN_INVALID_ARGUMENT);
239 }
240
241 *clock = &clock_list[clock_id];
242 if ((*clock)->cl_ops == 0)
243 return (KERN_FAILURE);
244 return (KERN_SUCCESS);
245 }
246
247 /*
248 * Get the current clock time.
249 */
250 kern_return_t
251 clock_get_time(
252 clock_t clock,
253 mach_timespec_t *cur_time) /* OUT */
254 {
255 if (clock == CLOCK_NULL)
256 return (KERN_INVALID_ARGUMENT);
257 return ((*clock->cl_ops->c_gettime)(cur_time));
258 }
259
260 /*
261 * Get clock attributes.
262 */
263 kern_return_t
264 clock_get_attributes(
265 clock_t clock,
266 clock_flavor_t flavor,
267 clock_attr_t attr, /* OUT */
268 mach_msg_type_number_t *count) /* IN/OUT */
269 {
270 kern_return_t (*getattr)(
271 clock_flavor_t flavor,
272 clock_attr_t attr,
273 mach_msg_type_number_t *count);
274
275 if (clock == CLOCK_NULL)
276 return (KERN_INVALID_ARGUMENT);
277 if (getattr = clock->cl_ops->c_getattr)
278 return((*getattr)(flavor, attr, count));
279 else
280 return (KERN_FAILURE);
281 }
282
283 /*
284 * Set the current clock time.
285 */
286 kern_return_t
287 clock_set_time(
288 clock_t clock,
289 mach_timespec_t new_time)
290 {
291 mach_timespec_t *clock_time;
292 kern_return_t (*settime)(
293 mach_timespec_t *clock_time);
294 extern kern_return_t
295 calend_settime(
296 mach_timespec_t *clock_time);
297
298 if (clock == CLOCK_NULL)
299 return (KERN_INVALID_ARGUMENT);
300 if ((settime = clock->cl_ops->c_settime) == 0)
301 return (KERN_FAILURE);
302 if (settime == calend_settime)
303 return (KERN_FAILURE);
304 clock_time = &new_time;
305 if (BAD_MACH_TIMESPEC(clock_time))
306 return (KERN_INVALID_VALUE);
307
308 /*
309 * Flush all outstanding alarms.
310 */
311 flush_alarms(clock);
312
313 /*
314 * Set the new time.
315 */
316 return ((*settime)(clock_time));
317 }
318
319 /*
320 * Set the clock alarm resolution.
321 */
322 kern_return_t
323 clock_set_attributes(
324 clock_t clock,
325 clock_flavor_t flavor,
326 clock_attr_t attr,
327 mach_msg_type_number_t count)
328 {
329 kern_return_t (*setattr)(
330 clock_flavor_t flavor,
331 clock_attr_t attr,
332 mach_msg_type_number_t count);
333
334 if (clock == CLOCK_NULL)
335 return (KERN_INVALID_ARGUMENT);
336 if (setattr = clock->cl_ops->c_setattr)
337 return ((*setattr)(flavor, attr, count));
338 else
339 return (KERN_FAILURE);
340 }
341
342 /*
343 * Setup a clock alarm.
344 */
345 kern_return_t
346 clock_alarm(
347 clock_t clock,
348 alarm_type_t alarm_type,
349 mach_timespec_t alarm_time,
350 ipc_port_t alarm_port,
351 mach_msg_type_name_t alarm_port_type)
352 {
353 alarm_t alarm;
354 mach_timespec_t clock_time;
355 int chkstat;
356 kern_return_t reply_code;
357 spl_t s;
358
359 if (clock == CLOCK_NULL)
360 return (KERN_INVALID_ARGUMENT);
361 if (clock->cl_ops->c_setalrm == 0)
362 return (KERN_FAILURE);
363 if (IP_VALID(alarm_port) == 0)
364 return (KERN_INVALID_CAPABILITY);
365
366 /*
367 * Check alarm parameters. If parameters are invalid,
368 * send alarm message immediately.
369 */
370 (*clock->cl_ops->c_gettime)(&clock_time);
371 chkstat = check_time(alarm_type, &alarm_time, &clock_time);
372 if (chkstat <= 0) {
373 reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS);
374 clock_alarm_reply(alarm_port, alarm_port_type,
375 reply_code, alarm_type, clock_time);
376 return (KERN_SUCCESS);
377 }
378
379 /*
380 * Get alarm and add to clock alarm list.
381 */
382
383 LOCK_CLOCK(s);
384 if ((alarm = alrmfree) == 0) {
385 UNLOCK_CLOCK(s);
386 alarm = (alarm_t) zalloc(alarm_zone);
387 if (alarm == 0)
388 return (KERN_RESOURCE_SHORTAGE);
389 LOCK_CLOCK(s);
390 }
391 else
392 alrmfree = alarm->al_next;
393
394 alarm->al_status = ALARM_CLOCK;
395 alarm->al_time = alarm_time;
396 alarm->al_type = alarm_type;
397 alarm->al_port = alarm_port;
398 alarm->al_port_type = alarm_port_type;
399 alarm->al_clock = clock;
400 alarm->al_seqno = alrm_seqno++;
401 post_alarm(clock, alarm);
402 UNLOCK_CLOCK(s);
403
404 return (KERN_SUCCESS);
405 }
406
407 /*
408 * Sleep on a clock. System trap. User-level libmach clock_sleep
409 * interface call takes a mach_timespec_t sleep_time argument which it
410 * converts to sleep_sec and sleep_nsec arguments which are then
411 * passed to clock_sleep_trap.
412 */
413 kern_return_t
414 clock_sleep_trap(
415 mach_port_name_t clock_name,
416 sleep_type_t sleep_type,
417 int sleep_sec,
418 int sleep_nsec,
419 mach_timespec_t *wakeup_time)
420 {
421 clock_t clock;
422 mach_timespec_t swtime;
423 kern_return_t rvalue;
424
425 /*
426 * Convert the trap parameters.
427 */
428 if (clock_name != MACH_PORT_NULL)
429 clock = port_name_to_clock(clock_name);
430 else
431 clock = &clock_list[SYSTEM_CLOCK];
432
433 swtime.tv_sec = sleep_sec;
434 swtime.tv_nsec = sleep_nsec;
435
436 /*
437 * Call the actual clock_sleep routine.
438 */
439 rvalue = clock_sleep_internal(clock, sleep_type, &swtime);
440
441 /*
442 * Return current time as wakeup time.
443 */
444 if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) {
445 copyout((char *)&swtime, (char *)wakeup_time,
446 sizeof(mach_timespec_t));
447 }
448 return (rvalue);
449 }
450
451 /*
452 * Kernel internally callable clock sleep routine. The calling
453 * thread is suspended until the requested sleep time is reached.
454 */
455 kern_return_t
456 clock_sleep_internal(
457 clock_t clock,
458 sleep_type_t sleep_type,
459 mach_timespec_t *sleep_time)
460 {
461 alarm_t alarm;
462 mach_timespec_t clock_time;
463 kern_return_t rvalue;
464 int chkstat;
465 spl_t s;
466
467 if (clock == CLOCK_NULL)
468 return (KERN_INVALID_ARGUMENT);
469 if (clock->cl_ops->c_setalrm == 0)
470 return (KERN_FAILURE);
471
472 /*
473 * Check sleep parameters. If parameters are invalid
474 * return an error, otherwise post alarm request.
475 */
476 (*clock->cl_ops->c_gettime)(&clock_time);
477
478 chkstat = check_time(sleep_type, sleep_time, &clock_time);
479 if (chkstat < 0)
480 return (KERN_INVALID_VALUE);
481 rvalue = KERN_SUCCESS;
482 if (chkstat > 0) {
483 wait_result_t wait_result;
484
485 /*
486 * Get alarm and add to clock alarm list.
487 */
488
489 LOCK_CLOCK(s);
490 if ((alarm = alrmfree) == 0) {
491 UNLOCK_CLOCK(s);
492 alarm = (alarm_t) zalloc(alarm_zone);
493 if (alarm == 0)
494 return (KERN_RESOURCE_SHORTAGE);
495 LOCK_CLOCK(s);
496 }
497 else
498 alrmfree = alarm->al_next;
499
500 /*
501 * Wait for alarm to occur.
502 */
503 wait_result = assert_wait((event_t)alarm, THREAD_ABORTSAFE);
504 if (wait_result == THREAD_WAITING) {
505 alarm->al_time = *sleep_time;
506 alarm->al_status = ALARM_SLEEP;
507 post_alarm(clock, alarm);
508 UNLOCK_CLOCK(s);
509
510 wait_result = thread_block(THREAD_CONTINUE_NULL);
511
512 /*
513 * Note if alarm expired normally or whether it
514 * was aborted. If aborted, delete alarm from
515 * clock alarm list. Return alarm to free list.
516 */
517 LOCK_CLOCK(s);
518 if (alarm->al_status != ALARM_DONE) {
519 assert(wait_result != THREAD_AWAKENED);
520 if ((alarm->al_prev)->al_next = alarm->al_next)
521 (alarm->al_next)->al_prev = alarm->al_prev;
522 rvalue = KERN_ABORTED;
523 }
524 *sleep_time = alarm->al_time;
525 alarm->al_status = ALARM_FREE;
526 } else {
527 assert(wait_result == THREAD_INTERRUPTED);
528 assert(alarm->al_status == ALARM_FREE);
529 rvalue = KERN_ABORTED;
530 }
531 alarm->al_next = alrmfree;
532 alrmfree = alarm;
533 UNLOCK_CLOCK(s);
534 }
535 else
536 *sleep_time = clock_time;
537
538 return (rvalue);
539 }
540
541 /*
542 * CLOCK INTERRUPT SERVICE ROUTINES.
543 */
544
545 /*
546 * Service clock alarm interrupts. Called from machine dependent
547 * layer at splclock(). The clock_id argument specifies the clock,
548 * and the clock_time argument gives that clock's current time.
549 */
550 void
551 clock_alarm_intr(
552 clock_id_t clock_id,
553 mach_timespec_t *clock_time)
554 {
555 clock_t clock;
556 register alarm_t alrm1;
557 register alarm_t alrm2;
558 mach_timespec_t *alarm_time;
559 spl_t s;
560
561 clock = &clock_list[clock_id];
562
563 /*
564 * Update clock alarm list. All alarms that are due are moved
565 * to the alarmdone list to be serviced by the alarm_thread.
566 */
567
568 LOCK_CLOCK(s);
569 alrm1 = (alarm_t) &clock->cl_alarm;
570 while (alrm2 = alrm1->al_next) {
571 alarm_time = &alrm2->al_time;
572 if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0)
573 break;
574
575 /*
576 * Alarm has expired, so remove it from the
577 * clock alarm list.
578 */
579 if (alrm1->al_next = alrm2->al_next)
580 (alrm1->al_next)->al_prev = alrm1;
581
582 /*
583 * If a clock_sleep() alarm, wakeup the thread
584 * which issued the clock_sleep() call.
585 */
586 if (alrm2->al_status == ALARM_SLEEP) {
587 alrm2->al_next = 0;
588 alrm2->al_status = ALARM_DONE;
589 alrm2->al_time = *clock_time;
590 thread_wakeup((event_t)alrm2);
591 }
592
593 /*
594 * If a clock_alarm() alarm, place the alarm on
595 * the alarm done list and schedule the alarm
596 * delivery mechanism.
597 */
598 else {
599 assert(alrm2->al_status == ALARM_CLOCK);
600 if (alrm2->al_next = alrmdone)
601 alrmdone->al_prev = alrm2;
602 else
603 thread_call_enter(&alarm_deliver);
604 alrm2->al_prev = (alarm_t) &alrmdone;
605 alrmdone = alrm2;
606 alrm2->al_status = ALARM_DONE;
607 alrm2->al_time = *clock_time;
608 }
609 }
610
611 /*
612 * Setup the clock dependent layer to deliver another
613 * interrupt for the next pending alarm.
614 */
615 if (alrm2)
616 (*clock->cl_ops->c_setalrm)(alarm_time);
617 UNLOCK_CLOCK(s);
618 }
619
620 /*
621 * ALARM DELIVERY ROUTINES.
622 */
623
624 static void
625 clock_alarm_deliver(
626 thread_call_param_t p0,
627 thread_call_param_t p1)
628 {
629 register alarm_t alrm;
630 kern_return_t code;
631 spl_t s;
632
633 LOCK_CLOCK(s);
634 while (alrm = alrmdone) {
635 if (alrmdone = alrm->al_next)
636 alrmdone->al_prev = (alarm_t) &alrmdone;
637 UNLOCK_CLOCK(s);
638
639 code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED);
640 if (alrm->al_port != IP_NULL) {
641 /* Deliver message to designated port */
642 if (IP_VALID(alrm->al_port)) {
643 clock_alarm_reply(alrm->al_port, alrm->al_port_type, code,
644 alrm->al_type, alrm->al_time);
645 }
646
647 LOCK_CLOCK(s);
648 alrm->al_status = ALARM_FREE;
649 alrm->al_next = alrmfree;
650 alrmfree = alrm;
651 }
652 else
653 panic("clock_alarm_deliver");
654 }
655
656 UNLOCK_CLOCK(s);
657 }
658
659 /*
660 * CLOCK PRIVATE SERVICING SUBROUTINES.
661 */
662
663 /*
664 * Flush all pending alarms on a clock. All alarms
665 * are activated and timestamped correctly, so any
666 * programs waiting on alarms/threads will proceed
667 * with accurate information.
668 */
669 static
670 void
671 flush_alarms(
672 clock_t clock)
673 {
674 register alarm_t alrm1, alrm2;
675 spl_t s;
676
677 /*
678 * Flush all outstanding alarms.
679 */
680 LOCK_CLOCK(s);
681 alrm1 = (alarm_t) &clock->cl_alarm;
682 while (alrm2 = alrm1->al_next) {
683 /*
684 * Remove alarm from the clock alarm list.
685 */
686 if (alrm1->al_next = alrm2->al_next)
687 (alrm1->al_next)->al_prev = alrm1;
688
689 /*
690 * If a clock_sleep() alarm, wakeup the thread
691 * which issued the clock_sleep() call.
692 */
693 if (alrm2->al_status == ALARM_SLEEP) {
694 alrm2->al_next = 0;
695 thread_wakeup((event_t)alrm2);
696 }
697 else {
698 /*
699 * If a clock_alarm() alarm, place the alarm on
700 * the alarm done list and wakeup the dedicated
701 * kernel alarm_thread to service the alarm.
702 */
703 assert(alrm2->al_status == ALARM_CLOCK);
704 if (alrm2->al_next = alrmdone)
705 alrmdone->al_prev = alrm2;
706 else
707 thread_wakeup((event_t)&alrmdone);
708 alrm2->al_prev = (alarm_t) &alrmdone;
709 alrmdone = alrm2;
710 }
711 }
712 UNLOCK_CLOCK(s);
713 }
714
715 /*
716 * Post an alarm on a clock's active alarm list. The alarm is
717 * inserted in time-order into the clock's active alarm list.
718 * Always called from within a LOCK_CLOCK() code section.
719 */
720 static
721 void
722 post_alarm(
723 clock_t clock,
724 alarm_t alarm)
725 {
726 register alarm_t alrm1, alrm2;
727 mach_timespec_t *alarm_time;
728 mach_timespec_t *queue_time;
729
730 /*
731 * Traverse alarm list until queue time is greater
732 * than alarm time, then insert alarm.
733 */
734 alarm_time = &alarm->al_time;
735 alrm1 = (alarm_t) &clock->cl_alarm;
736 while (alrm2 = alrm1->al_next) {
737 queue_time = &alrm2->al_time;
738 if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0)
739 break;
740 alrm1 = alrm2;
741 }
742 alrm1->al_next = alarm;
743 alarm->al_next = alrm2;
744 alarm->al_prev = alrm1;
745 if (alrm2)
746 alrm2->al_prev = alarm;
747
748 /*
749 * If the inserted alarm is the 'earliest' alarm,
750 * reset the device layer alarm time accordingly.
751 */
752 if (clock->cl_alarm.al_next == alarm)
753 (*clock->cl_ops->c_setalrm)(alarm_time);
754 }
755
756 /*
757 * Check the validity of 'alarm_time' and 'alarm_type'. If either
758 * argument is invalid, return a negative value. If the 'alarm_time'
759 * is now, return a 0 value. If the 'alarm_time' is in the future,
760 * return a positive value.
761 */
762 static
763 int
764 check_time(
765 alarm_type_t alarm_type,
766 mach_timespec_t *alarm_time,
767 mach_timespec_t *clock_time)
768 {
769 int result;
770
771 if (BAD_ALRMTYPE(alarm_type))
772 return (-1);
773 if (BAD_MACH_TIMESPEC(alarm_time))
774 return (-1);
775 if ((alarm_type & ALRMTYPE) == TIME_RELATIVE)
776 ADD_MACH_TIMESPEC(alarm_time, clock_time);
777
778 result = CMP_MACH_TIMESPEC(alarm_time, clock_time);
779
780 return ((result >= 0)? result: 0);
781 }
782
783 mach_timespec_t
784 clock_get_system_value(void)
785 {
786 clock_t clock = &clock_list[SYSTEM_CLOCK];
787 mach_timespec_t value;
788
789 (void) (*clock->cl_ops->c_gettime)(&value);
790
791 return value;
792 }
793
794 mach_timespec_t
795 clock_get_calendar_value(void)
796 {
797 clock_t clock = &clock_list[CALENDAR_CLOCK];
798 mach_timespec_t value = MACH_TIMESPEC_ZERO;
799
800 (void) (*clock->cl_ops->c_gettime)(&value);
801
802 return value;
803 }
804
805 void
806 clock_set_calendar_value(
807 mach_timespec_t value)
808 {
809 clock_t clock = &clock_list[CALENDAR_CLOCK];
810
811 (void) (*clock->cl_ops->c_settime)(&value);
812 }
813
814 void
815 clock_deadline_for_periodic_event(
816 uint64_t interval,
817 uint64_t abstime,
818 uint64_t *deadline)
819 {
820 assert(interval != 0);
821
822 *deadline += interval;
823
824 if (*deadline <= abstime) {
825 *deadline = abstime;
826 clock_get_uptime(&abstime);
827 *deadline += interval;
828
829 if (*deadline <= abstime) {
830 *deadline = abstime;
831 *deadline += interval;
832 }
833 }
834 }
835
836 void
837 mk_timebase_info(
838 uint32_t *delta,
839 uint32_t *abs_to_ns_numer,
840 uint32_t *abs_to_ns_denom,
841 uint32_t *proc_to_abs_numer,
842 uint32_t *proc_to_abs_denom)
843 {
844 mach_timebase_info_data_t info;
845 uint32_t one = 1;
846
847 clock_timebase_info(&info);
848
849 copyout((void *)&one, (void *)delta, sizeof (uint32_t));
850
851 copyout((void *)&info.numer, (void *)abs_to_ns_numer, sizeof (uint32_t));
852 copyout((void *)&info.denom, (void *)abs_to_ns_denom, sizeof (uint32_t));
853
854 copyout((void *)&one, (void *)proc_to_abs_numer, sizeof (uint32_t));
855 copyout((void *)&one, (void *)proc_to_abs_denom, sizeof (uint32_t));
856 }
857
858 kern_return_t
859 mach_timebase_info(
860 mach_timebase_info_t out_info)
861 {
862 mach_timebase_info_data_t info;
863
864 clock_timebase_info(&info);
865
866 copyout((void *)&info, (void *)out_info, sizeof (info));
867
868 return (KERN_SUCCESS);
869 }
870
871 kern_return_t
872 mach_wait_until(
873 uint64_t deadline)
874 {
875 int wait_result;
876
877 wait_result = assert_wait((event_t)&mach_wait_until, THREAD_ABORTSAFE);
878 if (wait_result == THREAD_WAITING) {
879 thread_set_timer_deadline(deadline);
880 wait_result = thread_block(THREAD_CONTINUE_NULL);
881 if (wait_result != THREAD_TIMED_OUT)
882 thread_cancel_timer();
883 }
884
885 return ((wait_result == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
886 }
887
888 int64_t
889 clock_set_calendar_adjtime(
890 int64_t total,
891 uint32_t delta)
892 {
893 int64_t ototal;
894 spl_t s;
895
896 s = splclock();
897 simple_lock(&calend_adjlock);
898
899 if (calend_adjinterval == 0)
900 clock_interval_to_absolutetime_interval(10000, NSEC_PER_USEC,
901 &calend_adjinterval);
902
903 ototal = calend_adjtotal;
904
905 if (total != 0) {
906 uint64_t abstime;
907
908 if (total > 0) {
909 if (delta > total)
910 delta = total;
911 }
912 else {
913 if (delta > -total)
914 delta = -total;
915 }
916
917 calend_adjtotal = total;
918 calend_adjdelta = delta;
919
920 if (calend_adjdeadline >= calend_adjinterval)
921 calend_adjdeadline -= calend_adjinterval;
922 clock_get_uptime(&abstime);
923 clock_deadline_for_periodic_event(calend_adjinterval, abstime,
924 &calend_adjdeadline);
925
926 timer_call_enter(&calend_adjcall, calend_adjdeadline);
927 }
928 else {
929 calend_adjtotal = 0;
930
931 timer_call_cancel(&calend_adjcall);
932 }
933
934 simple_unlock(&calend_adjlock);
935 splx(s);
936
937 return (ototal);
938 }
939
940 static void
941 clock_calend_adjust(
942 timer_call_param_t p0,
943 timer_call_param_t p1)
944 {
945 spl_t s;
946
947 s = splclock();
948 simple_lock(&calend_adjlock);
949
950 if (calend_adjtotal > 0) {
951 clock_adjust_calendar((clock_res_t)calend_adjdelta);
952 calend_adjtotal -= calend_adjdelta;
953
954 if (calend_adjdelta > calend_adjtotal)
955 calend_adjdelta = calend_adjtotal;
956 }
957 else
958 if (calend_adjtotal < 0) {
959 clock_adjust_calendar(-(clock_res_t)calend_adjdelta);
960 calend_adjtotal += calend_adjdelta;
961
962 if (calend_adjdelta > -calend_adjtotal)
963 calend_adjdelta = -calend_adjtotal;
964 }
965
966 if (calend_adjtotal != 0) {
967 uint64_t abstime;
968
969 clock_get_uptime(&abstime);
970 clock_deadline_for_periodic_event(calend_adjinterval, abstime,
971 &calend_adjdeadline);
972
973 timer_call_enter(&calend_adjcall, calend_adjdeadline);
974 }
975
976 simple_unlock(&calend_adjlock);
977 splx(s);
978 }