]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/clock.c
xnu-1228.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
CommitLineData
1c79356b 1/*
0c530ab8 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
1c79356b
A
32 */
33
91447636 34#include <mach/mach_types.h>
91447636 35
1c79356b 36#include <kern/lock.h>
1c79356b 37#include <kern/spl.h>
55e303ae 38#include <kern/sched_prim.h>
1c79356b 39#include <kern/thread.h>
1c79356b 40#include <kern/clock.h>
0c530ab8
A
41#include <kern/host_notify.h>
42
43#include <IOKit/IOPlatformExpert.h>
c0fea474 44
0c530ab8 45#include <machine/commpage.h>
1c79356b 46
91447636 47#include <mach/mach_traps.h>
1c79356b
A
48#include <mach/mach_time.h>
49
2d21ac55
A
50uint32_t hz_tick_interval = 1;
51
52#if CONFIG_DTRACE
53static void clock_track_calend_nowait(void);
54#endif
55
0c530ab8 56decl_simple_lock_data(static,clock_lock)
91447636 57
1c79356b 58/*
0c530ab8
A
59 * Time of day (calendar) variables.
60 *
61 * Algorithm:
62 *
63 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
64 *
65 * where CONV converts absolute time units into seconds and a fraction.
1c79356b 66 */
0c530ab8 67static struct clock_calend {
2d21ac55
A
68
69 uint64_t epoch;
70 uint64_t offset;
71 int64_t adjtotal; /* Nanosecond remaining total adjustment */
72 uint64_t adjdeadline; /* Absolute time value for next adjustment period */
73 uint32_t adjinterval; /* Absolute time interval of adjustment period */
74 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
75 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
76 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
77 uint32_t adjactive;
78 timer_call_data_t adjcall;
79} clock_calend;
80
81#if CONFIG_DTRACE
82/*
83 * Unlocked calendar flipflop; this is used to track a clock_calend such
84 * that we can safely access a snapshot of a valid clock_calend structure
85 * without needing to take any locks to do it.
86 *
87 * The trick is to use a generation count and set the low bit when it is
88 * being updated/read; by doing this, we guarantee, through use of the
89 * hw_atomic functions, that the generation is incremented when the bit
90 * is cleared atomically (by using a 1 bit add).
91 */
92static struct unlocked_clock_calend {
93 struct clock_calend calend; /* copy of calendar */
94 uint32_t gen; /* generation count */
95} flipflop[ 2];
96#endif
1c79356b 97
0c530ab8
A
98/*
99 * Calendar adjustment variables and values.
100 */
101#define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
102#define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
103#define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
104
0c530ab8
A
105static uint32_t calend_set_adjustment(
106 int32_t *secs,
107 int32_t *microsecs);
108
109static void calend_adjust_call(void);
110static uint32_t calend_adjust(void);
9bccf70c 111
55e303ae
A
112static thread_call_data_t calend_wakecall;
113
0c530ab8 114extern void IOKitResetTime(void);
5d5c5d0d 115
0c530ab8 116static uint64_t clock_boottime; /* Seconds boottime epoch */
4452a7af 117
0c530ab8
A
118#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
119MACRO_BEGIN \
120 if (((rfrac) += (frac)) >= (unit)) { \
121 (rfrac) -= (unit); \
122 (rsecs) += 1; \
123 } \
124 (rsecs) += (secs); \
125MACRO_END
126
127#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
128MACRO_BEGIN \
129 if ((int32_t)((rfrac) -= (frac)) < 0) { \
130 (rfrac) += (unit); \
131 (rsecs) -= 1; \
132 } \
133 (rsecs) -= (secs); \
134MACRO_END
1c79356b
A
135
136/*
91447636
A
137 * clock_config:
138 *
139 * Called once at boot to configure the clock subsystem.
1c79356b
A
140 */
141void
142clock_config(void)
143{
0c530ab8 144 simple_lock_init(&clock_lock, 0);
8f6c56a5 145
2d21ac55 146 timer_call_setup(&clock_calend.adjcall, (timer_call_func_t)calend_adjust_call, NULL);
0c530ab8 147 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
6601e61a 148
0c530ab8 149 clock_oldconfig();
1c79356b 150
91447636
A
151 /*
152 * Initialize the timer callouts.
153 */
154 timer_call_initialize();
1c79356b
A
155}
156
157/*
91447636
A
158 * clock_init:
159 *
160 * Called on a processor each time started.
1c79356b
A
161 */
162void
163clock_init(void)
164{
0c530ab8 165 clock_oldinit();
1c79356b
A
166}
167
55e303ae 168/*
0c530ab8
A
169 * clock_timebase_init:
170 *
171 * Called by machine dependent code
172 * to initialize areas dependent on the
173 * timebase value. May be called multiple
174 * times during start up.
55e303ae
A
175 */
176void
177clock_timebase_init(void)
178{
0c530ab8 179 uint64_t abstime;
5d5c5d0d 180
0c530ab8 181 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
2d21ac55
A
182 clock_calend.adjinterval = abstime;
183
184 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
185 hz_tick_interval = abstime;
89b3af67 186
0c530ab8 187 sched_timebase_init();
8ad349bb 188}
c0fea474 189
8ad349bb 190/*
0c530ab8
A
191 * mach_timebase_info_trap:
192 *
193 * User trap returns timebase constant.
8ad349bb 194 */
6601e61a 195kern_return_t
0c530ab8
A
196mach_timebase_info_trap(
197 struct mach_timebase_info_trap_args *args)
6601e61a 198{
0c530ab8
A
199 mach_vm_address_t out_info_addr = args->info;
200 mach_timebase_info_data_t info;
6601e61a 201
0c530ab8 202 clock_timebase_info(&info);
89b3af67 203
0c530ab8 204 copyout((void *)&info, out_info_addr, sizeof (info));
4452a7af 205
6601e61a 206 return (KERN_SUCCESS);
8f6c56a5 207}
5d5c5d0d 208
8f6c56a5 209/*
0c530ab8 210 * Calendar routines.
8f6c56a5 211 */
4452a7af 212
6601e61a 213/*
0c530ab8
A
214 * clock_get_calendar_microtime:
215 *
216 * Returns the current calendar value,
217 * microseconds as the fraction.
6601e61a 218 */
0c530ab8
A
219void
220clock_get_calendar_microtime(
221 uint32_t *secs,
222 uint32_t *microsecs)
6601e61a 223{
0c530ab8
A
224 uint64_t now;
225 spl_t s;
4452a7af 226
0c530ab8
A
227 s = splclock();
228 simple_lock(&clock_lock);
4452a7af 229
0c530ab8 230 now = mach_absolute_time();
4452a7af 231
2d21ac55 232 if (clock_calend.adjdelta < 0) {
0c530ab8 233 uint32_t t32;
4452a7af 234
2d21ac55
A
235 if (now > clock_calend.adjstart) {
236 t32 = now - clock_calend.adjstart;
0c530ab8 237
2d21ac55
A
238 if (t32 > clock_calend.adjoffset)
239 now -= clock_calend.adjoffset;
0c530ab8 240 else
2d21ac55 241 now = clock_calend.adjstart;
0c530ab8
A
242 }
243 }
244
245 now += clock_calend.offset;
246
247 absolutetime_to_microtime(now, secs, microsecs);
248
249 *secs += clock_calend.epoch;
250
251 simple_unlock(&clock_lock);
252 splx(s);
21362eb3 253}
89b3af67 254
21362eb3 255/*
0c530ab8
A
256 * clock_get_calendar_nanotime:
257 *
258 * Returns the current calendar value,
259 * nanoseconds as the fraction.
260 *
261 * Since we do not have an interface to
262 * set the calendar with resolution greater
263 * than a microsecond, we honor that here.
21362eb3 264 */
0c530ab8
A
265void
266clock_get_calendar_nanotime(
267 uint32_t *secs,
268 uint32_t *nanosecs)
21362eb3 269{
0c530ab8
A
270 uint64_t now;
271 spl_t s;
272
273 s = splclock();
274 simple_lock(&clock_lock);
275
276 now = mach_absolute_time();
277
2d21ac55 278 if (clock_calend.adjdelta < 0) {
0c530ab8
A
279 uint32_t t32;
280
2d21ac55
A
281 if (now > clock_calend.adjstart) {
282 t32 = now - clock_calend.adjstart;
0c530ab8 283
2d21ac55
A
284 if (t32 > clock_calend.adjoffset)
285 now -= clock_calend.adjoffset;
0c530ab8 286 else
2d21ac55 287 now = clock_calend.adjstart;
0c530ab8
A
288 }
289 }
290
291 now += clock_calend.offset;
292
293 absolutetime_to_microtime(now, secs, nanosecs);
294 *nanosecs *= NSEC_PER_USEC;
295
296 *secs += clock_calend.epoch;
297
298 simple_unlock(&clock_lock);
299 splx(s);
6601e61a 300}
4452a7af 301
6601e61a 302/*
0c530ab8
A
303 * clock_gettimeofday:
304 *
305 * Kernel interface for commpage implementation of
306 * gettimeofday() syscall.
307 *
308 * Returns the current calendar value, and updates the
309 * commpage info as appropriate. Because most calls to
310 * gettimeofday() are handled in user mode by the commpage,
311 * this routine should be used infrequently.
6601e61a 312 */
0c530ab8
A
313void
314clock_gettimeofday(
315 uint32_t *secs,
316 uint32_t *microsecs)
6601e61a 317{
0c530ab8
A
318 uint64_t now;
319 spl_t s;
4452a7af 320
0c530ab8
A
321 s = splclock();
322 simple_lock(&clock_lock);
323
324 now = mach_absolute_time();
325
2d21ac55 326 if (clock_calend.adjdelta >= 0) {
0c530ab8 327 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
1c79356b 328 }
0c530ab8
A
329 else {
330 uint32_t t32;
4452a7af 331
2d21ac55
A
332 if (now > clock_calend.adjstart) {
333 t32 = now - clock_calend.adjstart;
0c530ab8 334
2d21ac55
A
335 if (t32 > clock_calend.adjoffset)
336 now -= clock_calend.adjoffset;
0c530ab8 337 else
2d21ac55 338 now = clock_calend.adjstart;
0c530ab8
A
339 }
340
341 now += clock_calend.offset;
4452a7af 342
0c530ab8
A
343 absolutetime_to_microtime(now, secs, microsecs);
344
345 *secs += clock_calend.epoch;
1c79356b 346 }
1c79356b 347
0c530ab8
A
348 simple_unlock(&clock_lock);
349 splx(s);
1c79356b
A
350}
351
352/*
0c530ab8
A
353 * clock_set_calendar_microtime:
354 *
355 * Sets the current calendar value by
356 * recalculating the epoch and offset
357 * from the system clock.
358 *
359 * Also adjusts the boottime to keep the
360 * value consistent, writes the new
361 * calendar value to the platform clock,
362 * and sends calendar change notifications.
1c79356b 363 */
0c530ab8
A
364void
365clock_set_calendar_microtime(
366 uint32_t secs,
367 uint32_t microsecs)
1c79356b 368{
0c530ab8
A
369 uint32_t sys, microsys;
370 uint32_t newsecs;
371 spl_t s;
8ad349bb 372
0c530ab8
A
373 newsecs = (microsecs < 500*USEC_PER_SEC)?
374 secs: secs + 1;
375
376 s = splclock();
377 simple_lock(&clock_lock);
8ad349bb 378
2d21ac55 379 commpage_disable_timestamp();
8f6c56a5 380
89b3af67 381 /*
0c530ab8
A
382 * Calculate the new calendar epoch based on
383 * the new value and the system clock.
89b3af67 384 */
0c530ab8
A
385 clock_get_system_microtime(&sys, &microsys);
386 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
8f6c56a5 387
4452a7af 388 /*
0c530ab8 389 * Adjust the boottime based on the delta.
4452a7af 390 */
0c530ab8 391 clock_boottime += secs - clock_calend.epoch;
21362eb3 392
4452a7af 393 /*
0c530ab8 394 * Set the new calendar epoch.
4452a7af 395 */
0c530ab8
A
396 clock_calend.epoch = secs;
397 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
21362eb3 398
0c530ab8
A
399 /*
400 * Cancel any adjustment in progress.
401 */
2d21ac55 402 clock_calend.adjdelta = clock_calend.adjtotal = 0;
21362eb3 403
0c530ab8 404 simple_unlock(&clock_lock);
6601e61a 405
0c530ab8
A
406 /*
407 * Set the new value for the platform clock.
408 */
409 PESetGMTTimeOfDay(newsecs);
6601e61a 410
0c530ab8 411 splx(s);
6601e61a 412
0c530ab8
A
413 /*
414 * Send host notifications.
415 */
416 host_notify_calendar_change();
2d21ac55
A
417
418#if CONFIG_DTRACE
419 clock_track_calend_nowait();
420#endif
1c79356b
A
421}
422
423/*
0c530ab8
A
424 * clock_initialize_calendar:
425 *
426 * Set the calendar and related clocks
427 * from the platform clock at boot or
428 * wake event.
429 *
430 * Also sends host notifications.
1c79356b
A
431 */
432void
0c530ab8 433clock_initialize_calendar(void)
1c79356b 434{
0c530ab8
A
435 uint32_t sys, microsys;
436 uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
437 spl_t s;
1c79356b 438
0c530ab8
A
439 s = splclock();
440 simple_lock(&clock_lock);
1c79356b 441
2d21ac55 442 commpage_disable_timestamp();
1c79356b 443
0c530ab8
A
444 if ((int32_t)secs >= (int32_t)clock_boottime) {
445 /*
446 * Initialize the boot time based on the platform clock.
447 */
448 if (clock_boottime == 0)
449 clock_boottime = secs;
1c79356b
A
450
451 /*
0c530ab8
A
452 * Calculate the new calendar epoch based on
453 * the platform clock and the system clock.
454 */
455 clock_get_system_microtime(&sys, &microsys);
456 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
1c79356b
A
457
458 /*
0c530ab8 459 * Set the new calendar epoch.
1c79356b 460 */
0c530ab8
A
461 clock_calend.epoch = secs;
462 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
1c79356b 463
0c530ab8
A
464 /*
465 * Cancel any adjustment in progress.
1c79356b 466 */
2d21ac55 467 clock_calend.adjdelta = clock_calend.adjtotal = 0;
1c79356b
A
468 }
469
0c530ab8
A
470 simple_unlock(&clock_lock);
471 splx(s);
472
1c79356b 473 /*
0c530ab8 474 * Send host notifications.
1c79356b 475 */
0c530ab8 476 host_notify_calendar_change();
2d21ac55
A
477
478#if CONFIG_DTRACE
479 clock_track_calend_nowait();
480#endif
1c79356b
A
481}
482
483/*
0c530ab8
A
484 * clock_get_boottime_nanotime:
485 *
486 * Return the boottime, used by sysctl.
1c79356b 487 */
0c530ab8
A
488void
489clock_get_boottime_nanotime(
490 uint32_t *secs,
491 uint32_t *nanosecs)
1c79356b 492{
0c530ab8
A
493 *secs = clock_boottime;
494 *nanosecs = 0;
1c79356b
A
495}
496
497/*
0c530ab8
A
498 * clock_adjtime:
499 *
500 * Interface to adjtime() syscall.
501 *
502 * Calculates adjustment variables and
503 * initiates adjustment.
6601e61a 504 */
1c79356b 505void
0c530ab8
A
506clock_adjtime(
507 int32_t *secs,
508 int32_t *microsecs)
1c79356b 509{
0c530ab8
A
510 uint32_t interval;
511 spl_t s;
1c79356b 512
0c530ab8
A
513 s = splclock();
514 simple_lock(&clock_lock);
1c79356b 515
0c530ab8
A
516 interval = calend_set_adjustment(secs, microsecs);
517 if (interval != 0) {
2d21ac55
A
518 clock_calend.adjdeadline = mach_absolute_time() + interval;
519 if (!timer_call_enter(&clock_calend.adjcall, clock_calend.adjdeadline))
520 clock_calend.adjactive++;
1c79356b 521 }
0c530ab8 522 else
2d21ac55
A
523 if (timer_call_cancel(&clock_calend.adjcall))
524 clock_calend.adjactive--;
0c530ab8
A
525
526 simple_unlock(&clock_lock);
527 splx(s);
1c79356b
A
528}
529
0c530ab8
A
530static uint32_t
531calend_set_adjustment(
532 int32_t *secs,
533 int32_t *microsecs)
1c79356b 534{
0c530ab8
A
535 uint64_t now, t64;
536 int64_t total, ototal;
537 uint32_t interval = 0;
1c79356b 538
0c530ab8 539 total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
1c79356b 540
2d21ac55 541 commpage_disable_timestamp();
1c79356b 542
0c530ab8 543 now = mach_absolute_time();
1c79356b 544
2d21ac55 545 ototal = clock_calend.adjtotal;
1c79356b 546
0c530ab8
A
547 if (total != 0) {
548 int32_t delta = calend_adjskew;
1c79356b 549
0c530ab8
A
550 if (total > 0) {
551 if (total > calend_adjbig)
552 delta *= 10;
553 if (delta > total)
554 delta = total;
c0fea474 555
0c530ab8 556 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
2d21ac55 557 clock_calend.adjoffset = t64;
0c530ab8
A
558 }
559 else {
560 if (total < -calend_adjbig)
561 delta *= 10;
562 delta = -delta;
563 if (delta < total)
564 delta = total;
5d5c5d0d 565
2d21ac55 566 clock_calend.adjstart = now;
89b3af67 567
0c530ab8 568 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
2d21ac55 569 clock_calend.adjoffset = t64;
0c530ab8 570 }
4452a7af 571
2d21ac55
A
572 clock_calend.adjtotal = total;
573 clock_calend.adjdelta = delta;
0c530ab8 574
2d21ac55 575 interval = clock_calend.adjinterval;
0c530ab8
A
576 }
577 else
2d21ac55 578 clock_calend.adjdelta = clock_calend.adjtotal = 0;
1c79356b 579
0c530ab8
A
580 if (ototal != 0) {
581 *secs = ototal / NSEC_PER_SEC;
582 *microsecs = (ototal % NSEC_PER_SEC) / NSEC_PER_USEC;
583 }
584 else
585 *secs = *microsecs = 0;
1c79356b 586
2d21ac55
A
587#if CONFIG_DTRACE
588 clock_track_calend_nowait();
589#endif
590
0c530ab8 591 return (interval);
1c79356b
A
592}
593
0c530ab8
A
594static void
595calend_adjust_call(void)
1c79356b 596{
0c530ab8
A
597 uint32_t interval;
598 spl_t s;
1c79356b 599
0c530ab8
A
600 s = splclock();
601 simple_lock(&clock_lock);
1c79356b 602
2d21ac55 603 if (--clock_calend.adjactive == 0) {
0c530ab8
A
604 interval = calend_adjust();
605 if (interval != 0) {
606 clock_deadline_for_periodic_event(interval, mach_absolute_time(),
2d21ac55 607 &clock_calend.adjdeadline);
1c79356b 608
2d21ac55
A
609 if (!timer_call_enter(&clock_calend.adjcall, clock_calend.adjdeadline))
610 clock_calend.adjactive++;
0c530ab8 611 }
1c79356b 612 }
0c530ab8
A
613
614 simple_unlock(&clock_lock);
615 splx(s);
1c79356b
A
616}
617
0c530ab8
A
618static uint32_t
619calend_adjust(void)
1c79356b 620{
0c530ab8
A
621 uint64_t now, t64;
622 int32_t delta;
623 uint32_t interval = 0;
89b3af67 624
2d21ac55 625 commpage_disable_timestamp();
89b3af67 626
0c530ab8 627 now = mach_absolute_time();
89b3af67 628
2d21ac55 629 delta = clock_calend.adjdelta;
89b3af67 630
0c530ab8 631 if (delta > 0) {
2d21ac55 632 clock_calend.offset += clock_calend.adjoffset;
4452a7af 633
2d21ac55
A
634 clock_calend.adjtotal -= delta;
635 if (delta > clock_calend.adjtotal) {
636 clock_calend.adjdelta = delta = clock_calend.adjtotal;
4452a7af 637
0c530ab8 638 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
2d21ac55 639 clock_calend.adjoffset = t64;
0c530ab8
A
640 }
641 }
642 else
643 if (delta < 0) {
2d21ac55 644 clock_calend.offset -= clock_calend.adjoffset;
4452a7af 645
2d21ac55
A
646 clock_calend.adjtotal -= delta;
647 if (delta < clock_calend.adjtotal) {
648 clock_calend.adjdelta = delta = clock_calend.adjtotal;
4452a7af 649
0c530ab8 650 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
2d21ac55 651 clock_calend.adjoffset = t64;
0c530ab8
A
652 }
653
2d21ac55
A
654 if (clock_calend.adjdelta != 0)
655 clock_calend.adjstart = now;
0c530ab8 656 }
2d21ac55
A
657
658 if (clock_calend.adjdelta != 0)
659 interval = clock_calend.adjinterval;
0c530ab8 660
2d21ac55
A
661#if CONFIG_DTRACE
662 clock_track_calend_nowait();
663#endif
0c530ab8
A
664
665 return (interval);
666}
667
668/*
669 * clock_wakeup_calendar:
670 *
671 * Interface to power management, used
672 * to initiate the reset of the calendar
673 * on wake from sleep event.
674 */
675void
676clock_wakeup_calendar(void)
677{
678 thread_call_enter(&calend_wakecall);
1c79356b
A
679}
680
0c530ab8
A
681/*
682 * Wait / delay routines.
683 */
91447636
A
684static void
685mach_wait_until_continue(
686 __unused void *parameter,
687 wait_result_t wresult)
688{
689 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
690 /*NOTREACHED*/
691}
692
1c79356b 693kern_return_t
91447636
A
694mach_wait_until_trap(
695 struct mach_wait_until_trap_args *args)
696{
697 uint64_t deadline = args->deadline;
698 wait_result_t wresult;
699
700 wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
701 if (wresult == THREAD_WAITING)
702 wresult = thread_block(mach_wait_until_continue);
703
704 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
705}
706
91447636
A
707void
708clock_delay_until(
1c79356b
A
709 uint64_t deadline)
710{
91447636
A
711 uint64_t now = mach_absolute_time();
712
713 if (now >= deadline)
714 return;
1c79356b 715
91447636
A
716 if ( (deadline - now) < (8 * sched_cswtime) ||
717 get_preemption_level() != 0 ||
718 ml_get_interrupts_enabled() == FALSE )
719 machine_delay_until(deadline);
720 else {
721 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
722
723 thread_block(THREAD_CONTINUE_NULL);
9bccf70c 724 }
91447636 725}
1c79356b 726
91447636
A
727void
728delay_for_interval(
729 uint32_t interval,
730 uint32_t scale_factor)
731{
732 uint64_t end;
733
734 clock_interval_to_deadline(interval, scale_factor, &end);
735
736 clock_delay_until(end);
737}
738
739void
740delay(
741 int usec)
742{
743 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1c79356b 744}
9bccf70c 745
0c530ab8
A
746/*
747 * Miscellaneous routines.
748 */
55e303ae 749void
0c530ab8
A
750clock_interval_to_deadline(
751 uint32_t interval,
752 uint32_t scale_factor,
753 uint64_t *result)
9bccf70c 754{
0c530ab8 755 uint64_t abstime;
c0fea474 756
0c530ab8 757 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
6601e61a 758
0c530ab8 759 *result = mach_absolute_time() + abstime;
8f6c56a5 760}
5d5c5d0d 761
0c530ab8
A
762void
763clock_absolutetime_interval_to_deadline(
764 uint64_t abstime,
765 uint64_t *result)
8f6c56a5 766{
0c530ab8 767 *result = mach_absolute_time() + abstime;
21362eb3 768}
89b3af67 769
4452a7af 770void
0c530ab8
A
771clock_get_uptime(
772 uint64_t *result)
21362eb3 773{
0c530ab8 774 *result = mach_absolute_time();
6601e61a 775}
4452a7af 776
0c530ab8
A
777void
778clock_deadline_for_periodic_event(
779 uint64_t interval,
780 uint64_t abstime,
781 uint64_t *deadline)
6601e61a 782{
0c530ab8
A
783 assert(interval != 0);
784
785 *deadline += interval;
786
787 if (*deadline <= abstime) {
788 *deadline = abstime + interval;
789 abstime = mach_absolute_time();
55e303ae 790
0c530ab8
A
791 if (*deadline <= abstime)
792 *deadline = abstime + interval;
793 }
55e303ae 794}
2d21ac55
A
795
796#if CONFIG_DTRACE
797
798/*
799 * clock_get_calendar_nanotime_nowait
800 *
801 * Description: Non-blocking version of clock_get_calendar_nanotime()
802 *
803 * Notes: This function operates by separately tracking calendar time
804 * updates using a two element structure to copy the calendar
805 * state, which may be asynchronously modified. It utilizes
806 * barrier instructions in the tracking process and in the local
807 * stable snapshot process in order to ensure that a consistent
808 * snapshot is used to perform the calculation.
809 */
810void
811clock_get_calendar_nanotime_nowait(
812 uint32_t *secs,
813 uint32_t *nanosecs)
814{
815 int i = 0;
816 uint64_t now;
817 struct unlocked_clock_calend stable;
818
819 for (;;) {
820 stable = flipflop[i]; /* take snapshot */
821
822 /*
823 * Use a barrier instructions to ensure atomicity. We AND
824 * off the "in progress" bit to get the current generation
825 * count.
826 */
827 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
828
829 /*
830 * If an update _is_ in progress, the generation count will be
831 * off by one, if it _was_ in progress, it will be off by two,
832 * and if we caught it at a good time, it will be equal (and
833 * our snapshot is threfore stable).
834 */
835 if (flipflop[i].gen == stable.gen)
836 break;
837
838 /* Switch to the oher element of the flipflop, and try again. */
839 i ^= 1;
840 }
841
842 now = mach_absolute_time();
843
844 if (stable.calend.adjdelta < 0) {
845 uint32_t t32;
846
847 if (now > stable.calend.adjstart) {
848 t32 = now - stable.calend.adjstart;
849
850 if (t32 > stable.calend.adjoffset)
851 now -= stable.calend.adjoffset;
852 else
853 now = stable.calend.adjstart;
854 }
855 }
856
857 now += stable.calend.offset;
858
859 absolutetime_to_microtime(now, secs, nanosecs);
860 *nanosecs *= NSEC_PER_USEC;
861
862 *secs += stable.calend.epoch;
863}
864
865static void
866clock_track_calend_nowait(void)
867{
868 int i;
869
870 for (i = 0; i < 2; i++) {
871 struct clock_calend tmp = clock_calend;
872
873 /*
874 * Set the low bit if the generation count; since we use a
875 * barrier instruction to do this, we are guaranteed that this
876 * will flag an update in progress to an async caller trying
877 * to examine the contents.
878 */
879 (void)hw_atomic_or(&flipflop[i].gen, 1);
880
881 flipflop[i].calend = tmp;
882
883 /*
884 * Increment the generation count to clear the low bit to
885 * signal completion. If a caller compares the generation
886 * count after taking a copy while in progress, the count
887 * will be off by two.
888 */
889 (void)hw_atomic_add(&flipflop[i].gen, 1);
890 }
891}
892#endif /* CONFIG_DTRACE */