]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/clock.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
1c79356b
A
32 */
33
91447636 34#include <mach/mach_types.h>
91447636 35
1c79356b 36#include <kern/spl.h>
55e303ae 37#include <kern/sched_prim.h>
1c79356b 38#include <kern/thread.h>
1c79356b 39#include <kern/clock.h>
0c530ab8 40#include <kern/host_notify.h>
39037602
A
41#include <kern/thread_call.h>
42#include <libkern/OSAtomic.h>
0c530ab8
A
43
44#include <IOKit/IOPlatformExpert.h>
c0fea474 45
0c530ab8 46#include <machine/commpage.h>
1c79356b 47
91447636 48#include <mach/mach_traps.h>
1c79356b
A
49#include <mach/mach_time.h>
50
3e170ce0
A
51#include <sys/kdebug.h>
52
2d21ac55
A
53uint32_t hz_tick_interval = 1;
54
2d21ac55 55
6d2010ae 56decl_simple_lock_data(,clock_lock)
91447636 57
b0d623f7
A
58#define clock_lock() \
59 simple_lock(&clock_lock)
60
61#define clock_unlock() \
62 simple_unlock(&clock_lock)
63
64#define clock_lock_init() \
65 simple_lock_init(&clock_lock, 0)
66
39037602
A
67#ifdef kdp_simple_lock_is_acquired
68boolean_t kdp_clock_is_locked()
69{
70 return kdp_simple_lock_is_acquired(&clock_lock);
71}
72#endif
b0d623f7 73
1c79356b 74/*
0c530ab8
A
75 * Time of day (calendar) variables.
76 *
77 * Algorithm:
78 *
79 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
80 *
81 * where CONV converts absolute time units into seconds and a fraction.
1c79356b 82 */
0c530ab8 83static struct clock_calend {
2d21ac55
A
84 uint64_t epoch;
85 uint64_t offset;
3e170ce0 86 uint64_t epoch_absolute;
b0d623f7 87
2d21ac55
A
88 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
89 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
90 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
2d21ac55
A
91} clock_calend;
92
b0d623f7
A
93#if CONFIG_DTRACE
94
2d21ac55
A
95/*
96 * Unlocked calendar flipflop; this is used to track a clock_calend such
97 * that we can safely access a snapshot of a valid clock_calend structure
98 * without needing to take any locks to do it.
99 *
100 * The trick is to use a generation count and set the low bit when it is
101 * being updated/read; by doing this, we guarantee, through use of the
102 * hw_atomic functions, that the generation is incremented when the bit
103 * is cleared atomically (by using a 1 bit add).
104 */
105static struct unlocked_clock_calend {
106 struct clock_calend calend; /* copy of calendar */
107 uint32_t gen; /* generation count */
108} flipflop[ 2];
b0d623f7
A
109
110static void clock_track_calend_nowait(void);
111
2d21ac55 112#endif
1c79356b 113
0c530ab8
A
114/*
115 * Calendar adjustment variables and values.
116 */
117#define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
118#define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
119#define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
120
b0d623f7
A
121static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
122static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
123static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
124
125static timer_call_data_t calend_adjcall;
126static uint32_t calend_adjactive;
127
0c530ab8 128static uint32_t calend_set_adjustment(
b0d623f7
A
129 long *secs,
130 int *microsecs);
0c530ab8
A
131
132static void calend_adjust_call(void);
133static uint32_t calend_adjust(void);
9bccf70c 134
316670eb
A
135void _clock_delay_until_deadline(uint64_t interval,
136 uint64_t deadline);
3e170ce0
A
137void _clock_delay_until_deadline_with_leeway(uint64_t interval,
138 uint64_t deadline,
139 uint64_t leeway);
316670eb 140
39037602
A
141/* Seconds boottime epoch */
142static uint64_t clock_boottime;
143static uint32_t clock_boottime_usec;
4452a7af 144
0c530ab8
A
145#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
146MACRO_BEGIN \
147 if (((rfrac) += (frac)) >= (unit)) { \
148 (rfrac) -= (unit); \
149 (rsecs) += 1; \
150 } \
151 (rsecs) += (secs); \
152MACRO_END
153
154#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
155MACRO_BEGIN \
b0d623f7 156 if ((int)((rfrac) -= (frac)) < 0) { \
0c530ab8
A
157 (rfrac) += (unit); \
158 (rsecs) -= 1; \
159 } \
160 (rsecs) -= (secs); \
161MACRO_END
1c79356b
A
162
163/*
91447636
A
164 * clock_config:
165 *
166 * Called once at boot to configure the clock subsystem.
1c79356b
A
167 */
168void
169clock_config(void)
170{
b0d623f7 171 clock_lock_init();
8f6c56a5 172
b0d623f7 173 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
6601e61a 174
0c530ab8 175 clock_oldconfig();
1c79356b
A
176}
177
178/*
91447636
A
179 * clock_init:
180 *
181 * Called on a processor each time started.
1c79356b
A
182 */
183void
184clock_init(void)
185{
0c530ab8 186 clock_oldinit();
1c79356b
A
187}
188
55e303ae 189/*
0c530ab8
A
190 * clock_timebase_init:
191 *
192 * Called by machine dependent code
193 * to initialize areas dependent on the
194 * timebase value. May be called multiple
195 * times during start up.
55e303ae
A
196 */
197void
198clock_timebase_init(void)
199{
0c530ab8 200 uint64_t abstime;
5d5c5d0d 201
0c530ab8 202 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
b0d623f7 203 calend_adjinterval = (uint32_t)abstime;
2d21ac55
A
204
205 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
b0d623f7 206 hz_tick_interval = (uint32_t)abstime;
89b3af67 207
0c530ab8 208 sched_timebase_init();
8ad349bb 209}
c0fea474 210
8ad349bb 211/*
0c530ab8
A
212 * mach_timebase_info_trap:
213 *
214 * User trap returns timebase constant.
8ad349bb 215 */
6601e61a 216kern_return_t
0c530ab8
A
217mach_timebase_info_trap(
218 struct mach_timebase_info_trap_args *args)
6601e61a 219{
0c530ab8
A
220 mach_vm_address_t out_info_addr = args->info;
221 mach_timebase_info_data_t info;
6601e61a 222
0c530ab8 223 clock_timebase_info(&info);
89b3af67 224
0c530ab8 225 copyout((void *)&info, out_info_addr, sizeof (info));
4452a7af 226
6601e61a 227 return (KERN_SUCCESS);
8f6c56a5 228}
5d5c5d0d 229
8f6c56a5 230/*
0c530ab8 231 * Calendar routines.
8f6c56a5 232 */
4452a7af 233
6601e61a 234/*
0c530ab8
A
235 * clock_get_calendar_microtime:
236 *
237 * Returns the current calendar value,
238 * microseconds as the fraction.
6601e61a 239 */
0c530ab8
A
240void
241clock_get_calendar_microtime(
b0d623f7
A
242 clock_sec_t *secs,
243 clock_usec_t *microsecs)
39236c6e
A
244{
245 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
246}
247
39037602
A
248static void
249clock_get_calendar_absolute_and_microtime_locked(
39236c6e
A
250 clock_sec_t *secs,
251 clock_usec_t *microsecs,
252 uint64_t *abstime)
6601e61a 253{
39037602 254 uint64_t now = mach_absolute_time();
39236c6e
A
255 if (abstime)
256 *abstime = now;
4452a7af 257
2d21ac55 258 if (clock_calend.adjdelta < 0) {
0c530ab8 259 uint32_t t32;
4452a7af 260
39037602 261 /*
6d2010ae
A
262 * Since offset is decremented during a negative adjustment,
263 * ensure that time increases monotonically without going
264 * temporarily backwards.
265 * If the delta has not yet passed, now is set to the start
266 * of the current adjustment period; otherwise, we're between
267 * the expiry of the delta and the next call to calend_adjust(),
268 * and we offset accordingly.
269 */
2d21ac55 270 if (now > clock_calend.adjstart) {
b0d623f7 271 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 272
2d21ac55
A
273 if (t32 > clock_calend.adjoffset)
274 now -= clock_calend.adjoffset;
0c530ab8 275 else
2d21ac55 276 now = clock_calend.adjstart;
0c530ab8
A
277 }
278 }
279
280 now += clock_calend.offset;
281
282 absolutetime_to_microtime(now, secs, microsecs);
283
b0d623f7 284 *secs += (clock_sec_t)clock_calend.epoch;
39037602
A
285}
286
287/*
288 * clock_get_calendar_absolute_and_microtime:
289 *
290 * Returns the current calendar value,
291 * microseconds as the fraction. Also
292 * returns mach_absolute_time if abstime
293 * is not NULL.
294 */
295void
296clock_get_calendar_absolute_and_microtime(
297 clock_sec_t *secs,
298 clock_usec_t *microsecs,
299 uint64_t *abstime)
300{
301 spl_t s;
302
303 s = splclock();
304 clock_lock();
305
306 clock_get_calendar_absolute_and_microtime_locked(secs, microsecs, abstime);
0c530ab8 307
b0d623f7 308 clock_unlock();
0c530ab8 309 splx(s);
21362eb3 310}
89b3af67 311
21362eb3 312/*
0c530ab8
A
313 * clock_get_calendar_nanotime:
314 *
315 * Returns the current calendar value,
316 * nanoseconds as the fraction.
317 *
318 * Since we do not have an interface to
319 * set the calendar with resolution greater
320 * than a microsecond, we honor that here.
21362eb3 321 */
0c530ab8
A
322void
323clock_get_calendar_nanotime(
b0d623f7
A
324 clock_sec_t *secs,
325 clock_nsec_t *nanosecs)
21362eb3 326{
0c530ab8
A
327 spl_t s;
328
329 s = splclock();
b0d623f7 330 clock_lock();
0c530ab8 331
39037602 332 clock_get_calendar_absolute_and_microtime_locked(secs, nanosecs, NULL);
6d2010ae 333
0c530ab8
A
334 *nanosecs *= NSEC_PER_USEC;
335
b0d623f7 336 clock_unlock();
0c530ab8 337 splx(s);
6601e61a 338}
4452a7af 339
6601e61a 340/*
0c530ab8
A
341 * clock_gettimeofday:
342 *
343 * Kernel interface for commpage implementation of
344 * gettimeofday() syscall.
345 *
346 * Returns the current calendar value, and updates the
347 * commpage info as appropriate. Because most calls to
348 * gettimeofday() are handled in user mode by the commpage,
349 * this routine should be used infrequently.
6601e61a 350 */
0c530ab8
A
351void
352clock_gettimeofday(
b0d623f7
A
353 clock_sec_t *secs,
354 clock_usec_t *microsecs)
39037602
A
355{
356 clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
357}
358
359void
360clock_gettimeofday_and_absolute_time(
361 clock_sec_t *secs,
362 clock_usec_t *microsecs,
363 uint64_t *mach_time)
6601e61a 364{
0c530ab8
A
365 uint64_t now;
366 spl_t s;
4452a7af 367
0c530ab8 368 s = splclock();
b0d623f7 369 clock_lock();
0c530ab8
A
370
371 now = mach_absolute_time();
372
2d21ac55 373 if (clock_calend.adjdelta >= 0) {
0c530ab8 374 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
1c79356b 375 }
0c530ab8
A
376 else {
377 uint32_t t32;
4452a7af 378
2d21ac55 379 if (now > clock_calend.adjstart) {
b0d623f7 380 t32 = (uint32_t)(now - clock_calend.adjstart);
0c530ab8 381
2d21ac55
A
382 if (t32 > clock_calend.adjoffset)
383 now -= clock_calend.adjoffset;
0c530ab8 384 else
2d21ac55 385 now = clock_calend.adjstart;
0c530ab8
A
386 }
387
388 now += clock_calend.offset;
4452a7af 389
0c530ab8
A
390 absolutetime_to_microtime(now, secs, microsecs);
391
b0d623f7 392 *secs += (clock_sec_t)clock_calend.epoch;
1c79356b 393 }
1c79356b 394
b0d623f7 395 clock_unlock();
0c530ab8 396 splx(s);
39037602
A
397
398 if (mach_time) {
399 *mach_time = now;
400 }
1c79356b
A
401}
402
403/*
0c530ab8
A
404 * clock_set_calendar_microtime:
405 *
406 * Sets the current calendar value by
407 * recalculating the epoch and offset
408 * from the system clock.
409 *
410 * Also adjusts the boottime to keep the
411 * value consistent, writes the new
412 * calendar value to the platform clock,
413 * and sends calendar change notifications.
1c79356b 414 */
0c530ab8
A
415void
416clock_set_calendar_microtime(
b0d623f7
A
417 clock_sec_t secs,
418 clock_usec_t microsecs)
1c79356b 419{
b0d623f7
A
420 clock_sec_t sys;
421 clock_usec_t microsys;
39037602 422 uint64_t absolutesys;
b0d623f7 423 clock_sec_t newsecs;
39037602 424 clock_sec_t oldsecs;
fe8ab488 425 clock_usec_t newmicrosecs;
39037602
A
426 clock_usec_t oldmicrosecs;
427 uint64_t commpage_value;
b0d623f7 428 spl_t s;
8ad349bb 429
fe8ab488
A
430 newsecs = secs;
431 newmicrosecs = microsecs;
0c530ab8
A
432
433 s = splclock();
b0d623f7 434 clock_lock();
8ad349bb 435
2d21ac55 436 commpage_disable_timestamp();
8f6c56a5 437
89b3af67 438 /*
39037602 439 * Adjust the boottime based on the delta.
89b3af67 440 */
39037602
A
441 clock_get_calendar_absolute_and_microtime_locked(&oldsecs, &oldmicrosecs, &absolutesys);
442 if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)){
443 // moving forwards
444 long deltasecs = secs, deltamicrosecs = microsecs;
445 TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
446 TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
447 } else {
448 // moving backwards
449 long deltasecs = oldsecs, deltamicrosecs = oldmicrosecs;
450 TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
451 TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
452 }
453 commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
8f6c56a5 454
4452a7af 455 /*
39037602
A
456 * Calculate the new calendar epoch based on
457 * the new value and the system clock.
4452a7af 458 */
39037602
A
459 absolutetime_to_microtime(absolutesys, &sys, &microsys);
460 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
21362eb3 461
4452a7af 462 /*
0c530ab8 463 * Set the new calendar epoch.
4452a7af 464 */
0c530ab8 465 clock_calend.epoch = secs;
6d2010ae 466
0c530ab8 467 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
21362eb3 468
3e170ce0
A
469 clock_interval_to_absolutetime_interval((uint32_t) secs, NSEC_PER_SEC, &clock_calend.epoch_absolute);
470 clock_calend.epoch_absolute += clock_calend.offset;
471
0c530ab8
A
472 /*
473 * Cancel any adjustment in progress.
474 */
b0d623f7 475 calend_adjtotal = clock_calend.adjdelta = 0;
21362eb3 476
b0d623f7 477 clock_unlock();
6601e61a 478
0c530ab8
A
479 /*
480 * Set the new value for the platform clock.
481 */
fe8ab488 482 PESetUTCTimeOfDay(newsecs, newmicrosecs);
6601e61a 483
0c530ab8 484 splx(s);
6601e61a 485
39037602
A
486 commpage_update_boottime(commpage_value);
487
0c530ab8
A
488 /*
489 * Send host notifications.
490 */
491 host_notify_calendar_change();
39037602
A
492 host_notify_calendar_set();
493
2d21ac55
A
494#if CONFIG_DTRACE
495 clock_track_calend_nowait();
496#endif
1c79356b
A
497}
498
499/*
0c530ab8
A
500 * clock_initialize_calendar:
501 *
502 * Set the calendar and related clocks
503 * from the platform clock at boot or
504 * wake event.
505 *
506 * Also sends host notifications.
1c79356b 507 */
3e170ce0
A
508
509uint64_t mach_absolutetime_asleep;
510uint64_t mach_absolutetime_last_sleep;
511
1c79356b 512void
0c530ab8 513clock_initialize_calendar(void)
1c79356b 514{
39037602
A
515 clock_sec_t sys; // sleepless time since boot in seconds
516 clock_sec_t secs; // Current UTC time
517 clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
518 clock_usec_t microsys;
519 clock_usec_t microsecs;
520 clock_usec_t utc_offset_microsecs;
521 uint64_t new_epoch; // utc_offset_secs in mach absolute time units
b0d623f7 522 spl_t s;
1c79356b 523
39037602 524 PEGetUTCTimeOfDay(&secs, &microsecs);
fe8ab488 525
0c530ab8 526 s = splclock();
b0d623f7 527 clock_lock();
1c79356b 528
2d21ac55 529 commpage_disable_timestamp();
1c79356b 530
b0d623f7 531 if ((long)secs >= (long)clock_boottime) {
0c530ab8
A
532 /*
533 * Initialize the boot time based on the platform clock.
534 */
39037602 535 if (clock_boottime == 0){
0c530ab8 536 clock_boottime = secs;
39037602
A
537 clock_boottime_usec = microsecs;
538 commpage_update_boottime(clock_boottime * USEC_PER_SEC + clock_boottime_usec);
539 }
1c79356b
A
540
541 /*
0c530ab8
A
542 * Calculate the new calendar epoch based on
543 * the platform clock and the system clock.
544 */
545 clock_get_system_microtime(&sys, &microsys);
39037602
A
546 utc_offset_secs = secs;
547 utc_offset_microsecs = microsecs;
548
549 // This macro mutates utc_offset_secs and micro_utc_offset
550 TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
1c79356b
A
551
552 /*
0c530ab8 553 * Set the new calendar epoch.
1c79356b 554 */
3e170ce0 555
39037602 556 clock_calend.epoch = utc_offset_secs;
6d2010ae 557
39037602 558 nanoseconds_to_absolutetime((uint64_t)utc_offset_microsecs * NSEC_PER_USEC, &clock_calend.offset);
1c79356b 559
39037602 560 clock_interval_to_absolutetime_interval((uint32_t) utc_offset_secs, NSEC_PER_SEC, &new_epoch);
3e170ce0
A
561 new_epoch += clock_calend.offset;
562
563 if (clock_calend.epoch_absolute)
564 {
39037602
A
565 /* new_epoch is the difference between absolute_time and utc_time
566 * this value will remain constant until the system sleeps.
567 * Then, difference between values would go up by the time the system sleeps.
568 * epoch_absolute is the last difference between the two values
569 * so the difference in the differences would be the time of the last sleep
570 */
571
572 if(new_epoch > clock_calend.epoch_absolute) {
573 mach_absolutetime_last_sleep = new_epoch - clock_calend.epoch_absolute;
574 }
575 else {
576 mach_absolutetime_last_sleep = 0;
577 }
3e170ce0
A
578 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
579 KERNEL_DEBUG_CONSTANT(
580 MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
39037602
A
581 (uintptr_t) mach_absolutetime_last_sleep,
582 (uintptr_t) mach_absolutetime_asleep,
583 (uintptr_t) (mach_absolutetime_last_sleep >> 32),
584 (uintptr_t) (mach_absolutetime_asleep >> 32),
3e170ce0
A
585 0);
586 }
587 clock_calend.epoch_absolute = new_epoch;
588
0c530ab8
A
589 /*
590 * Cancel any adjustment in progress.
1c79356b 591 */
b0d623f7 592 calend_adjtotal = clock_calend.adjdelta = 0;
1c79356b
A
593 }
594
39037602
A
595 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
596 adjust_cont_time_thread_calls();
597
b0d623f7 598 clock_unlock();
0c530ab8
A
599 splx(s);
600
1c79356b 601 /*
0c530ab8 602 * Send host notifications.
1c79356b 603 */
0c530ab8 604 host_notify_calendar_change();
2d21ac55
A
605
606#if CONFIG_DTRACE
607 clock_track_calend_nowait();
608#endif
1c79356b
A
609}
610
611/*
0c530ab8
A
612 * clock_get_boottime_nanotime:
613 *
614 * Return the boottime, used by sysctl.
1c79356b 615 */
0c530ab8
A
616void
617clock_get_boottime_nanotime(
b0d623f7
A
618 clock_sec_t *secs,
619 clock_nsec_t *nanosecs)
1c79356b 620{
b0d623f7
A
621 spl_t s;
622
623 s = splclock();
624 clock_lock();
625
626 *secs = (clock_sec_t)clock_boottime;
39037602
A
627 *nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
628
629 clock_unlock();
630 splx(s);
631}
632
633/*
634 * clock_get_boottime_nanotime:
635 *
636 * Return the boottime, used by sysctl.
637 */
638void
639clock_get_boottime_microtime(
640 clock_sec_t *secs,
641 clock_usec_t *microsecs)
642{
643 spl_t s;
644
645 s = splclock();
646 clock_lock();
647
648 *secs = (clock_sec_t)clock_boottime;
649 *microsecs = (clock_nsec_t)clock_boottime_usec;
b0d623f7
A
650
651 clock_unlock();
652 splx(s);
1c79356b
A
653}
654
655/*
0c530ab8
A
656 * clock_adjtime:
657 *
658 * Interface to adjtime() syscall.
659 *
660 * Calculates adjustment variables and
661 * initiates adjustment.
6601e61a 662 */
1c79356b 663void
0c530ab8 664clock_adjtime(
b0d623f7
A
665 long *secs,
666 int *microsecs)
1c79356b 667{
0c530ab8
A
668 uint32_t interval;
669 spl_t s;
1c79356b 670
0c530ab8 671 s = splclock();
b0d623f7 672 clock_lock();
1c79356b 673
0c530ab8
A
674 interval = calend_set_adjustment(secs, microsecs);
675 if (interval != 0) {
b0d623f7 676 calend_adjdeadline = mach_absolute_time() + interval;
39236c6e 677 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
b0d623f7 678 calend_adjactive++;
1c79356b 679 }
0c530ab8 680 else
b0d623f7
A
681 if (timer_call_cancel(&calend_adjcall))
682 calend_adjactive--;
0c530ab8 683
b0d623f7 684 clock_unlock();
0c530ab8 685 splx(s);
1c79356b
A
686}
687
0c530ab8
A
688static uint32_t
689calend_set_adjustment(
b0d623f7
A
690 long *secs,
691 int *microsecs)
1c79356b 692{
0c530ab8
A
693 uint64_t now, t64;
694 int64_t total, ototal;
695 uint32_t interval = 0;
1c79356b 696
6d2010ae
A
697 /*
698 * Compute the total adjustment time in nanoseconds.
699 */
39236c6e 700 total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
1c79356b 701
6d2010ae
A
702 /*
703 * Disable commpage gettimeofday().
704 */
2d21ac55 705 commpage_disable_timestamp();
1c79356b 706
6d2010ae
A
707 /*
708 * Get current absolute time.
709 */
0c530ab8 710 now = mach_absolute_time();
1c79356b 711
6d2010ae
A
712 /*
713 * Save the old adjustment total for later return.
714 */
b0d623f7 715 ototal = calend_adjtotal;
1c79356b 716
6d2010ae
A
717 /*
718 * Is a new correction specified?
719 */
0c530ab8 720 if (total != 0) {
6d2010ae
A
721 /*
722 * Set delta to the standard, small, adjustment skew.
723 */
0c530ab8 724 int32_t delta = calend_adjskew;
1c79356b 725
0c530ab8 726 if (total > 0) {
6d2010ae
A
727 /*
728 * Positive adjustment. If greater than the preset 'big'
729 * threshold, slew at a faster rate, capping if necessary.
730 */
39236c6e 731 if (total > (int64_t) calend_adjbig)
0c530ab8
A
732 delta *= 10;
733 if (delta > total)
b0d623f7 734 delta = (int32_t)total;
c0fea474 735
6d2010ae
A
736 /*
737 * Convert the delta back from ns to absolute time and store in adjoffset.
738 */
0c530ab8 739 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 740 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
741 }
742 else {
6d2010ae
A
743 /*
744 * Negative adjustment; therefore, negate the delta. If
745 * greater than the preset 'big' threshold, slew at a faster
746 * rate, capping if necessary.
747 */
39236c6e 748 if (total < (int64_t) -calend_adjbig)
0c530ab8
A
749 delta *= 10;
750 delta = -delta;
751 if (delta < total)
b0d623f7 752 delta = (int32_t)total;
5d5c5d0d 753
6d2010ae
A
754 /*
755 * Save the current absolute time. Subsequent time operations occuring
756 * during this negative correction can make use of this value to ensure
757 * that time increases monotonically.
758 */
2d21ac55 759 clock_calend.adjstart = now;
89b3af67 760
6d2010ae
A
761 /*
762 * Convert the delta back from ns to absolute time and store in adjoffset.
763 */
0c530ab8 764 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
b0d623f7 765 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8 766 }
4452a7af 767
6d2010ae
A
768 /*
769 * Store the total adjustment time in ns.
770 */
b0d623f7 771 calend_adjtotal = total;
6d2010ae
A
772
773 /*
774 * Store the delta for this adjustment period in ns.
775 */
2d21ac55 776 clock_calend.adjdelta = delta;
0c530ab8 777
6d2010ae
A
778 /*
779 * Set the interval in absolute time for later return.
780 */
b0d623f7 781 interval = calend_adjinterval;
0c530ab8 782 }
6d2010ae
A
783 else {
784 /*
785 * No change; clear any prior adjustment.
786 */
b0d623f7 787 calend_adjtotal = clock_calend.adjdelta = 0;
6d2010ae 788 }
1c79356b 789
6d2010ae
A
790 /*
791 * If an prior correction was in progress, return the
792 * remaining uncorrected time from it.
793 */
0c530ab8 794 if (ototal != 0) {
39236c6e
A
795 *secs = (long)(ototal / (long)NSEC_PER_SEC);
796 *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
0c530ab8
A
797 }
798 else
799 *secs = *microsecs = 0;
1c79356b 800
2d21ac55
A
801#if CONFIG_DTRACE
802 clock_track_calend_nowait();
803#endif
804
0c530ab8 805 return (interval);
1c79356b
A
806}
807
0c530ab8
A
808static void
809calend_adjust_call(void)
1c79356b 810{
0c530ab8
A
811 uint32_t interval;
812 spl_t s;
1c79356b 813
0c530ab8 814 s = splclock();
b0d623f7 815 clock_lock();
1c79356b 816
b0d623f7 817 if (--calend_adjactive == 0) {
0c530ab8
A
818 interval = calend_adjust();
819 if (interval != 0) {
b0d623f7 820 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
1c79356b 821
39236c6e 822 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
b0d623f7 823 calend_adjactive++;
0c530ab8 824 }
1c79356b 825 }
0c530ab8 826
b0d623f7 827 clock_unlock();
0c530ab8 828 splx(s);
1c79356b
A
829}
830
0c530ab8
A
831static uint32_t
832calend_adjust(void)
1c79356b 833{
0c530ab8
A
834 uint64_t now, t64;
835 int32_t delta;
836 uint32_t interval = 0;
89b3af67 837
2d21ac55 838 commpage_disable_timestamp();
89b3af67 839
0c530ab8 840 now = mach_absolute_time();
89b3af67 841
2d21ac55 842 delta = clock_calend.adjdelta;
89b3af67 843
0c530ab8 844 if (delta > 0) {
2d21ac55 845 clock_calend.offset += clock_calend.adjoffset;
4452a7af 846
b0d623f7
A
847 calend_adjtotal -= delta;
848 if (delta > calend_adjtotal) {
849 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 850
0c530ab8 851 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
b0d623f7 852 clock_calend.adjoffset = (uint32_t)t64;
0c530ab8
A
853 }
854 }
855 else
6d2010ae
A
856 if (delta < 0) {
857 clock_calend.offset -= clock_calend.adjoffset;
4452a7af 858
6d2010ae
A
859 calend_adjtotal -= delta;
860 if (delta < calend_adjtotal) {
861 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
4452a7af 862
6d2010ae
A
863 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
864 clock_calend.adjoffset = (uint32_t)t64;
865 }
866
867 if (clock_calend.adjdelta != 0)
868 clock_calend.adjstart = now;
0c530ab8
A
869 }
870
2d21ac55 871 if (clock_calend.adjdelta != 0)
b0d623f7 872 interval = calend_adjinterval;
0c530ab8 873
2d21ac55
A
874#if CONFIG_DTRACE
875 clock_track_calend_nowait();
876#endif
0c530ab8
A
877
878 return (interval);
879}
880
0c530ab8
A
881/*
882 * Wait / delay routines.
883 */
91447636
A
884static void
885mach_wait_until_continue(
886 __unused void *parameter,
887 wait_result_t wresult)
888{
889 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
890 /*NOTREACHED*/
891}
892
316670eb
A
893/*
894 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
895 *
896 * Parameters: args->deadline Amount of time to wait
897 *
898 * Returns: 0 Success
899 * !0 Not success
900 *
901 */
1c79356b 902kern_return_t
91447636
A
903mach_wait_until_trap(
904 struct mach_wait_until_trap_args *args)
905{
906 uint64_t deadline = args->deadline;
907 wait_result_t wresult;
908
39236c6e
A
909 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
910 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
91447636
A
911 if (wresult == THREAD_WAITING)
912 wresult = thread_block(mach_wait_until_continue);
913
914 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
915}
916
91447636
A
917void
918clock_delay_until(
1c79356b
A
919 uint64_t deadline)
920{
91447636
A
921 uint64_t now = mach_absolute_time();
922
923 if (now >= deadline)
924 return;
1c79356b 925
316670eb
A
926 _clock_delay_until_deadline(deadline - now, deadline);
927}
928
929/*
930 * Preserve the original precise interval that the client
931 * requested for comparison to the spin threshold.
932 */
933void
934_clock_delay_until_deadline(
935 uint64_t interval,
936 uint64_t deadline)
937{
3e170ce0
A
938 _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
939}
940
941/*
942 * Like _clock_delay_until_deadline, but it accepts a
943 * leeway value.
944 */
945void
946_clock_delay_until_deadline_with_leeway(
947 uint64_t interval,
948 uint64_t deadline,
949 uint64_t leeway)
950{
316670eb
A
951
952 if (interval == 0)
953 return;
954
955 if ( ml_delay_should_spin(interval) ||
91447636 956 get_preemption_level() != 0 ||
316670eb 957 ml_get_interrupts_enabled() == FALSE ) {
bd504ef0 958 machine_delay_until(interval, deadline);
316670eb 959 } else {
3e170ce0
A
960 /*
961 * For now, assume a leeway request of 0 means the client does not want a leeway
962 * value. We may want to change this interpretation in the future.
963 */
964
965 if (leeway) {
966 assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
967 } else {
968 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
969 }
91447636
A
970
971 thread_block(THREAD_CONTINUE_NULL);
9bccf70c 972 }
91447636 973}
1c79356b 974
91447636
A
975void
976delay_for_interval(
977 uint32_t interval,
978 uint32_t scale_factor)
979{
316670eb 980 uint64_t abstime;
91447636 981
316670eb 982 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
91447636 983
316670eb 984 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
91447636
A
985}
986
3e170ce0
A
987void
988delay_for_interval_with_leeway(
989 uint32_t interval,
990 uint32_t leeway,
991 uint32_t scale_factor)
992{
993 uint64_t abstime_interval;
994 uint64_t abstime_leeway;
995
996 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
997 clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
998
999 _clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
1000}
1001
91447636
A
1002void
1003delay(
1004 int usec)
1005{
1006 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1c79356b 1007}
9bccf70c 1008
0c530ab8
A
1009/*
1010 * Miscellaneous routines.
1011 */
55e303ae 1012void
0c530ab8
A
1013clock_interval_to_deadline(
1014 uint32_t interval,
1015 uint32_t scale_factor,
1016 uint64_t *result)
9bccf70c 1017{
0c530ab8 1018 uint64_t abstime;
c0fea474 1019
0c530ab8 1020 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
6601e61a 1021
0c530ab8 1022 *result = mach_absolute_time() + abstime;
8f6c56a5 1023}
5d5c5d0d 1024
0c530ab8
A
1025void
1026clock_absolutetime_interval_to_deadline(
1027 uint64_t abstime,
1028 uint64_t *result)
8f6c56a5 1029{
0c530ab8 1030 *result = mach_absolute_time() + abstime;
21362eb3 1031}
89b3af67 1032
39037602
A
1033void
1034clock_continuoustime_interval_to_deadline(
1035 uint64_t conttime,
1036 uint64_t *result)
1037{
1038 *result = mach_continuous_time() + conttime;
1039}
1040
4452a7af 1041void
0c530ab8
A
1042clock_get_uptime(
1043 uint64_t *result)
21362eb3 1044{
0c530ab8 1045 *result = mach_absolute_time();
6601e61a 1046}
4452a7af 1047
0c530ab8
A
1048void
1049clock_deadline_for_periodic_event(
1050 uint64_t interval,
1051 uint64_t abstime,
1052 uint64_t *deadline)
6601e61a 1053{
0c530ab8
A
1054 assert(interval != 0);
1055
1056 *deadline += interval;
1057
1058 if (*deadline <= abstime) {
1059 *deadline = abstime + interval;
1060 abstime = mach_absolute_time();
55e303ae 1061
0c530ab8
A
1062 if (*deadline <= abstime)
1063 *deadline = abstime + interval;
1064 }
55e303ae 1065}
2d21ac55 1066
39037602
A
1067uint64_t
1068mach_continuous_time(void)
1069{
1070 while(1) {
1071 uint64_t read1 = mach_absolutetime_asleep;
1072 uint64_t absolute = mach_absolute_time();
1073 OSMemoryBarrier();
1074 uint64_t read2 = mach_absolutetime_asleep;
1075
1076 if(__builtin_expect(read1 == read2, 1)) {
1077 return absolute + read1;
1078 }
1079 }
1080}
1081
1082uint64_t
1083mach_continuous_approximate_time(void)
1084{
1085 while(1) {
1086 uint64_t read1 = mach_absolutetime_asleep;
1087 uint64_t absolute = mach_approximate_time();
1088 OSMemoryBarrier();
1089 uint64_t read2 = mach_absolutetime_asleep;
1090
1091 if(__builtin_expect(read1 == read2, 1)) {
1092 return absolute + read1;
1093 }
1094 }
1095}
1096
1097/*
1098 * continuoustime_to_absolutetime
1099 * Must be called with interrupts disabled
1100 * Returned value is only valid until the next update to
1101 * mach_continuous_time
1102 */
1103uint64_t
1104continuoustime_to_absolutetime(uint64_t conttime) {
1105 if (conttime <= mach_absolutetime_asleep)
1106 return 0;
1107 else
1108 return conttime - mach_absolutetime_asleep;
1109}
1110
1111/*
1112 * absolutetime_to_continuoustime
1113 * Must be called with interrupts disabled
1114 * Returned value is only valid until the next update to
1115 * mach_continuous_time
1116 */
1117uint64_t
1118absolutetime_to_continuoustime(uint64_t abstime) {
1119 return abstime + mach_absolutetime_asleep;
1120}
1121
b0d623f7 1122#if CONFIG_DTRACE
2d21ac55
A
1123
1124/*
1125 * clock_get_calendar_nanotime_nowait
1126 *
1127 * Description: Non-blocking version of clock_get_calendar_nanotime()
1128 *
1129 * Notes: This function operates by separately tracking calendar time
1130 * updates using a two element structure to copy the calendar
1131 * state, which may be asynchronously modified. It utilizes
1132 * barrier instructions in the tracking process and in the local
1133 * stable snapshot process in order to ensure that a consistent
1134 * snapshot is used to perform the calculation.
1135 */
1136void
1137clock_get_calendar_nanotime_nowait(
b0d623f7
A
1138 clock_sec_t *secs,
1139 clock_nsec_t *nanosecs)
2d21ac55
A
1140{
1141 int i = 0;
1142 uint64_t now;
1143 struct unlocked_clock_calend stable;
1144
1145 for (;;) {
1146 stable = flipflop[i]; /* take snapshot */
1147
1148 /*
1149 * Use a barrier instructions to ensure atomicity. We AND
1150 * off the "in progress" bit to get the current generation
1151 * count.
1152 */
1153 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
1154
1155 /*
1156 * If an update _is_ in progress, the generation count will be
1157 * off by one, if it _was_ in progress, it will be off by two,
1158 * and if we caught it at a good time, it will be equal (and
1159 * our snapshot is threfore stable).
1160 */
1161 if (flipflop[i].gen == stable.gen)
1162 break;
1163
1164 /* Switch to the oher element of the flipflop, and try again. */
1165 i ^= 1;
1166 }
1167
1168 now = mach_absolute_time();
1169
1170 if (stable.calend.adjdelta < 0) {
1171 uint32_t t32;
1172
1173 if (now > stable.calend.adjstart) {
b0d623f7 1174 t32 = (uint32_t)(now - stable.calend.adjstart);
2d21ac55
A
1175
1176 if (t32 > stable.calend.adjoffset)
1177 now -= stable.calend.adjoffset;
1178 else
1179 now = stable.calend.adjstart;
1180 }
1181 }
1182
1183 now += stable.calend.offset;
1184
1185 absolutetime_to_microtime(now, secs, nanosecs);
1186 *nanosecs *= NSEC_PER_USEC;
1187
b0d623f7 1188 *secs += (clock_sec_t)stable.calend.epoch;
2d21ac55
A
1189}
1190
1191static void
1192clock_track_calend_nowait(void)
1193{
1194 int i;
1195
1196 for (i = 0; i < 2; i++) {
1197 struct clock_calend tmp = clock_calend;
1198
1199 /*
1200 * Set the low bit if the generation count; since we use a
1201 * barrier instruction to do this, we are guaranteed that this
1202 * will flag an update in progress to an async caller trying
1203 * to examine the contents.
1204 */
1205 (void)hw_atomic_or(&flipflop[i].gen, 1);
1206
1207 flipflop[i].calend = tmp;
1208
1209 /*
1210 * Increment the generation count to clear the low bit to
1211 * signal completion. If a caller compares the generation
1212 * count after taking a copy while in progress, the count
1213 * will be off by two.
1214 */
1215 (void)hw_atomic_add(&flipflop[i].gen, 1);
1216 }
1217}
b0d623f7
A
1218
1219#endif /* CONFIG_DTRACE */
fe8ab488 1220