* Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
#include <kern/arithmetic_128.h>
#include <os/log.h>
-uint32_t hz_tick_interval = 1;
+uint32_t hz_tick_interval = 1;
static uint64_t has_monotonic_clock = 0;
-decl_simple_lock_data(,clock_lock)
+decl_simple_lock_data(, clock_lock)
lck_grp_attr_t * settime_lock_grp_attr;
lck_grp_t * settime_lock_grp;
lck_attr_t * settime_lock_attr;
lck_mtx_t settime_lock;
-#define clock_lock() \
- simple_lock(&clock_lock)
+#define clock_lock() \
+ simple_lock(&clock_lock, LCK_GRP_NULL)
-#define clock_unlock() \
+#define clock_unlock() \
simple_unlock(&clock_lock)
-#define clock_lock_init() \
+#define clock_lock_init() \
simple_lock_init(&clock_lock, 0)
#ifdef kdp_simple_lock_is_acquired
-boolean_t kdp_clock_is_locked()
+boolean_t
+kdp_clock_is_locked()
{
return kdp_simple_lock_is_acquired(&clock_lock);
}
#endif
struct bintime {
- time_t sec;
+ time_t sec;
uint64_t frac;
};
_u = _bt->frac;
_bt->frac += _x;
- if (_u > _bt->frac)
+ if (_u > _bt->frac) {
_bt->sec++;
+ }
}
static __inline void
_u = _bt->frac;
_bt->frac -= _x;
- if (_u < _bt->frac)
+ if (_u < _bt->frac) {
_bt->sec--;
+ }
}
static __inline void
bintime_addns(struct bintime *bt, uint64_t ns)
{
- bt->sec += ns/ (uint64_t)NSEC_PER_SEC;
+ bt->sec += ns / (uint64_t)NSEC_PER_SEC;
ns = ns % (uint64_t)NSEC_PER_SEC;
if (ns) {
/* 18446744073 = int(2^64 / NSEC_PER_SEC) */
static __inline void
bintime_subns(struct bintime *bt, uint64_t ns)
{
- bt->sec -= ns/ (uint64_t)NSEC_PER_SEC;
+ bt->sec -= ns / (uint64_t)NSEC_PER_SEC;
ns = ns % (uint64_t)NSEC_PER_SEC;
if (ns) {
/* 18446744073 = int(2^64 / NSEC_PER_SEC) */
static __inline void
bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns)
{
- uint64_t uxns = (xns > 0)?(uint64_t )xns:(uint64_t)-xns;
+ uint64_t uxns = (xns > 0)?(uint64_t)xns:(uint64_t)-xns;
uint64_t ns = multi_overflow(a, uxns);
if (xns > 0) {
- if (ns)
+ if (ns) {
bintime_addns(bt, ns);
+ }
ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
bintime_addx(bt, ns);
- }
- else{
- if (ns)
+ } else {
+ if (ns) {
bintime_subns(bt, ns);
+ }
ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
- bintime_subx(bt,ns);
+ bintime_subx(bt, ns);
}
}
_u = _bt->frac;
_bt->frac += _bt2->frac;
- if (_u > _bt->frac)
+ if (_u > _bt->frac) {
_bt->sec++;
+ }
_bt->sec += _bt2->sec;
}
_u = _bt->frac;
_bt->frac -= _bt2->frac;
- if (_u < _bt->frac)
+ if (_u < _bt->frac) {
_bt->sec--;
+ }
_bt->sec -= _bt2->sec;
}
static __inline void
clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt)
{
-
_bt->sec = *secs;
/* 18446744073709 = int(2^64 / 1000000) */
_bt->frac = *microsecs * (uint64_t)18446744073709LL;
static __inline void
bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs)
{
-
*secs = _bt->sec;
*microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
}
static __inline void
bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs)
{
-
*secs = _bt->sec;
*nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
}
}
struct latched_time {
- uint64_t monotonic_time_usec;
- uint64_t mach_time;
+ uint64_t monotonic_time_usec;
+ uint64_t mach_time;
};
extern int
* TOD <- bintime + delta*scale
*
* where :
- * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
+ * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
* delta is ticks elapsed since last scale update.
* scale is computed according to an adjustment provided by ntp_kern.
*/
static struct clock_calend {
- uint64_t s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
- int64_t s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
- uint64_t tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
- uint64_t offset_count; /* abs time from which apply current scales */
- struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
- struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
- struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
- struct bintime basesleep;
+ uint64_t s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
+ int64_t s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
+ uint64_t tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
+ uint64_t offset_count; /* abs time from which apply current scales */
+ struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
+ struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
+ struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
+ struct bintime basesleep;
} clock_calend;
static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */
#define print_all_clock_variables_internal(...) do { } while (0)
#endif
-#if CONFIG_DTRACE
+#if CONFIG_DTRACE
/*
* is cleared atomically (by using a 1 bit add).
*/
static struct unlocked_clock_calend {
- struct clock_calend calend; /* copy of calendar */
- uint32_t gen; /* generation count */
-} flipflop[ 2];
+ struct clock_calend calend; /* copy of calendar */
+ uint32_t gen; /* generation count */
+} flipflop[2];
static void clock_track_calend_nowait(void);
static uint64_t clock_boottime;
static uint32_t clock_boottime_usec;
-#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
-MACRO_BEGIN \
- if (((rfrac) += (frac)) >= (unit)) { \
- (rfrac) -= (unit); \
- (rsecs) += 1; \
- } \
- (rsecs) += (secs); \
+#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
+MACRO_BEGIN \
+ if (((rfrac) += (frac)) >= (unit)) { \
+ (rfrac) -= (unit); \
+ (rsecs) += 1; \
+ } \
+ (rsecs) += (secs); \
MACRO_END
-#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
-MACRO_BEGIN \
- if ((int)((rfrac) -= (frac)) < 0) { \
- (rfrac) += (unit); \
- (rsecs) -= 1; \
- } \
- (rsecs) -= (secs); \
+#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
+MACRO_BEGIN \
+ if ((int)((rfrac) -= (frac)) < 0) { \
+ (rfrac) += (unit); \
+ (rsecs) -= 1; \
+ } \
+ (rsecs) -= (secs); \
MACRO_END
/*
void
clock_config(void)
{
-
clock_lock_init();
settime_lock_grp_attr = lck_grp_attr_alloc_init();
void
clock_timebase_init(void)
{
- uint64_t abstime;
+ uint64_t abstime;
nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
hz_tick_interval = (uint32_t)abstime;
mach_timebase_info_trap(
struct mach_timebase_info_trap_args *args)
{
- mach_vm_address_t out_info_addr = args->info;
- mach_timebase_info_data_t info = {};
+ mach_vm_address_t out_info_addr = args->info;
+ mach_timebase_info_data_t info = {};
clock_timebase_info(&info);
- copyout((void *)&info, out_info_addr, sizeof (info));
+ copyout((void *)&info, out_info_addr, sizeof(info));
- return (KERN_SUCCESS);
+ return KERN_SUCCESS;
}
/*
*/
void
clock_get_calendar_microtime(
- clock_sec_t *secs,
- clock_usec_t *microsecs)
+ clock_sec_t *secs,
+ clock_usec_t *microsecs)
{
clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
}
* Keep it as additional adjustment for the next sec.
*/
frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment));
- *s_adj_nsx = (frac>0)? frac << 32 : -( (-frac) << 32);
+ *s_adj_nsx = (frac > 0)? frac << 32 : -((-frac) << 32);
return;
}
* s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
*/
if (delta > ticks_per_sec) {
- sec = (delta/ticks_per_sec);
+ sec = (delta / ticks_per_sec);
new_ns = sec * s_scale_ns;
bintime_addns(&bt, new_ns);
if (s_adj_nsx) {
if (sec == 1) {
/* shortcut, no overflow can occur */
- if (s_adj_nsx > 0)
- bintime_addx(&bt, (uint64_t)s_adj_nsx/ (uint64_t)NSEC_PER_SEC);
- else
- bintime_subx(&bt, (uint64_t)-s_adj_nsx/ (uint64_t)NSEC_PER_SEC);
- }
- else{
+ if (s_adj_nsx > 0) {
+ bintime_addx(&bt, (uint64_t)s_adj_nsx / (uint64_t)NSEC_PER_SEC);
+ } else {
+ bintime_subx(&bt, (uint64_t)-s_adj_nsx / (uint64_t)NSEC_PER_SEC);
+ }
+ } else {
/*
* s_adj_nsx is 64 bit frac of ns.
* sec*s_adj_nsx might overflow in int64_t.
}
}
delta = (delta % ticks_per_sec);
- }
+ }
over = multi_overflow(tick_scale_x, delta);
- if(over){
+ if (over) {
bt.sec += over;
}
static void
clock_get_calendar_absolute_and_microtime_locked(
- clock_sec_t *secs,
- clock_usec_t *microsecs,
- uint64_t *abstime)
+ clock_sec_t *secs,
+ clock_usec_t *microsecs,
+ uint64_t *abstime)
{
uint64_t now;
struct bintime bt;
now = mach_absolute_time();
- if (abstime)
+ if (abstime) {
*abstime = now;
+ }
bt = get_scaled_time(now);
bintime_add(&bt, &clock_calend.bintime);
static void
clock_get_calendar_absolute_and_nanotime_locked(
- clock_sec_t *secs,
- clock_usec_t *nanosecs,
- uint64_t *abstime)
+ clock_sec_t *secs,
+ clock_usec_t *nanosecs,
+ uint64_t *abstime)
{
uint64_t now;
struct bintime bt;
now = mach_absolute_time();
- if (abstime)
+ if (abstime) {
*abstime = now;
+ }
bt = get_scaled_time(now);
bintime_add(&bt, &clock_calend.bintime);
*/
void
clock_get_calendar_absolute_and_microtime(
- clock_sec_t *secs,
- clock_usec_t *microsecs,
- uint64_t *abstime)
+ clock_sec_t *secs,
+ clock_usec_t *microsecs,
+ uint64_t *abstime)
{
- spl_t s;
+ spl_t s;
s = splclock();
clock_lock();
*/
void
clock_get_calendar_nanotime(
- clock_sec_t *secs,
- clock_nsec_t *nanosecs)
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
{
- spl_t s;
+ spl_t s;
s = splclock();
clock_lock();
*/
void
clock_gettimeofday(
- clock_sec_t *secs,
- clock_usec_t *microsecs)
+ clock_sec_t *secs,
+ clock_usec_t *microsecs)
{
clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
}
void
clock_gettimeofday_and_absolute_time(
- clock_sec_t *secs,
- clock_usec_t *microsecs,
- uint64_t *mach_time)
+ clock_sec_t *secs,
+ clock_usec_t *microsecs,
+ uint64_t *mach_time)
{
- uint64_t now;
- spl_t s;
- struct bintime bt;
+ uint64_t now;
+ spl_t s;
+ struct bintime bt;
s = splclock();
clock_lock();
*/
void
clock_set_calendar_microtime(
- clock_sec_t secs,
- clock_usec_t microsecs)
+ clock_sec_t secs,
+ clock_usec_t microsecs)
{
- uint64_t absolutesys;
- clock_sec_t newsecs;
- clock_sec_t oldsecs;
- clock_usec_t newmicrosecs;
- clock_usec_t oldmicrosecs;
- uint64_t commpage_value;
- spl_t s;
- struct bintime bt;
- clock_sec_t deltasecs;
- clock_usec_t deltamicrosecs;
+ uint64_t absolutesys;
+ clock_sec_t newsecs;
+ clock_sec_t oldsecs;
+ clock_usec_t newmicrosecs;
+ clock_usec_t oldmicrosecs;
+ uint64_t commpage_value;
+ spl_t s;
+ struct bintime bt;
+ clock_sec_t deltasecs;
+ clock_usec_t deltamicrosecs;
newsecs = secs;
newmicrosecs = microsecs;
#if DEVELOPMENT || DEBUG
if (g_should_log_clock_adjustments) {
os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
- __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
+ __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n",
- __func__, (unsigned long)secs, microsecs );
+ __func__, (unsigned long)secs, microsecs );
}
#endif
void
clock_update_calendar(void)
{
-
uint64_t now, delta;
struct bintime bt;
spl_t s;
os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment);
}
#endif
-
+
/*
* recomputing scale factors.
*/
clock_unlock();
splx(s);
- print_all_clock_variables(__func__, NULL,NULL,NULL,NULL, &calend_cp);
+ print_all_clock_variables(__func__, NULL, NULL, NULL, NULL, &calend_cp);
}
#if DEVELOPMENT || DEBUG
-void print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
+void
+print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
{
clock_sec_t offset_secs;
clock_usec_t offset_microsecs;
clock_usec_t bintime_microsecs;
clock_sec_t bootime_secs;
clock_usec_t bootime_microsecs;
-
- if (!g_should_log_clock_adjustments)
- return;
+
+ if (!g_should_log_clock_adjustments) {
+ return;
+ }
bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
- func , clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
- clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
+ func, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
+ clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
- func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
- (unsigned long)offset_secs, offset_microsecs);
+ func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
+ (unsigned long)offset_secs, offset_microsecs);
os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
- func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
- (unsigned long)bintime_secs, bintime_microsecs);
+ func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
+ (unsigned long)bintime_secs, bintime_microsecs);
os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
- func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
- (unsigned long)bootime_secs, bootime_microsecs);
+ func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
+ (unsigned long)bootime_secs, bootime_microsecs);
clock_sec_t basesleep_secs;
- clock_usec_t basesleep_microsecs;
-
+ clock_usec_t basesleep_microsecs;
+
bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
- func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
- (unsigned long)basesleep_secs, basesleep_microsecs);
-
+ func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
+ (unsigned long)basesleep_secs, basesleep_microsecs);
}
-void print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
+void
+print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
{
- if (!g_should_log_clock_adjustments)
+ if (!g_should_log_clock_adjustments) {
return;
+ }
struct bintime bt;
clock_sec_t wall_secs;
uint64_t delta;
if (pmu_secs) {
- os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
+ os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
}
if (sys_secs) {
os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
print_all_clock_variables_internal(func, clock_calend_cp);
now = mach_absolute_time();
- delta = now - clock_calend_cp->offset_count;
+ delta = now - clock_calend_cp->offset_count;
- bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
+ bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
bintime_add(&bt, &clock_calend_cp->bintime);
bintime2usclock(&bt, &wall_secs, &wall_microsecs);
os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
- func, (unsigned long)wall_secs, wall_microsecs, now);
+ func, (unsigned long)wall_secs, wall_microsecs, now);
}
void
clock_initialize_calendar(void)
{
- clock_sec_t sys; // sleepless time since boot in seconds
- clock_sec_t secs; // Current UTC time
- clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
- clock_usec_t microsys;
- clock_usec_t microsecs;
- clock_usec_t utc_offset_microsecs;
- spl_t s;
- struct bintime bt;
- struct bintime monotonic_bt;
- struct latched_time monotonic_time;
- uint64_t monotonic_usec_total;
+ clock_sec_t sys; // sleepless time since boot in seconds
+ clock_sec_t secs; // Current UTC time
+ clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
+ clock_usec_t microsys;
+ clock_usec_t microsecs;
+ clock_usec_t utc_offset_microsecs;
+ spl_t s;
+ struct bintime bt;
+ struct bintime monotonic_bt;
+ struct latched_time monotonic_time;
+ uint64_t monotonic_usec_total;
clock_sec_t sys2, monotonic_sec;
- clock_usec_t microsys2, monotonic_usec;
- size_t size;
+ clock_usec_t microsys2, monotonic_usec;
+ size_t size;
//Get the UTC time and corresponding sys time
PEGetUTCTimeOfDay(&secs, µsecs);
*/
if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
os_log(OS_LOG_DEFAULT, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
- __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
+ __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
secs = utc_offset_secs = sys;
microsecs = utc_offset_microsecs = microsys;
}
clock_calend.s_adj_nsx = 0;
if (has_monotonic_clock) {
-
monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
clock_unlock();
splx(s);
- print_all_clock_variables(__func__, &secs, µsecs, &sys, µsys, &clock_calend_cp);
+ print_all_clock_variables(__func__, &secs, µsecs, &sys, µsys, &clock_calend_cp);
/*
* Send host notifications.
*/
host_notify_calendar_change();
-
+
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
void
clock_wakeup_calendar(void)
{
- clock_sec_t wake_sys_sec;
+ clock_sec_t wake_sys_sec;
clock_usec_t wake_sys_usec;
- clock_sec_t wake_sec;
- clock_usec_t wake_usec;
+ clock_sec_t wake_sec;
+ clock_usec_t wake_usec;
clock_sec_t wall_time_sec;
clock_usec_t wall_time_usec;
- clock_sec_t diff_sec;
- clock_usec_t diff_usec;
+ clock_sec_t diff_sec;
+ clock_usec_t diff_usec;
clock_sec_t var_s;
clock_usec_t var_us;
- spl_t s;
- struct bintime bt, last_sleep_bt;
+ spl_t s;
+ struct bintime bt, last_sleep_bt;
struct latched_time monotonic_time;
- uint64_t monotonic_usec_total;
- uint64_t wake_abs;
- size_t size;
+ uint64_t monotonic_usec_total;
+ uint64_t wake_abs;
+ size_t size;
/*
* If the platform has the monotonic clock use that to
* it is doing it only througth the settimeofday interface.
*/
if (has_monotonic_clock) {
-
#if DEVELOPMENT || DEBUG
/*
* Just for debugging, get the wake UTC time.
}
#if DEVELOPMENT || DEBUG
- os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs);
- if (has_monotonic_clock) {
- os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us);
- }
+ os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs);
+ if (has_monotonic_clock) {
+ os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us);
+ }
#endif /* DEVELOPMENT || DEBUG */
s = splclock();
clock_lock();
-
+
commpage_disable_timestamp();
#if DEVELOPMENT || DEBUG
*/
if ((bt.sec > clock_calend.basesleep.sec) ||
((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
-
//last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
last_sleep_bt = bt;
bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
bintime2usclock(&last_sleep_bt, &var_s, &var_us);
os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long) var_s, var_us);
-
} else {
bintime2usclock(&clock_calend.basesleep, &var_s, &var_us);
os_log_error(OS_LOG_DEFAULT, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s, var_us, (unsigned long) diff_sec, diff_usec);
bintime_add(&bt, &clock_calend.bintime);
bintime2usclock(&bt, &wall_time_sec, &wall_time_usec);
- if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec) ) {
+ if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec)) {
os_log(OS_LOG_DEFAULT, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec, wall_time_usec, (unsigned long) wake_sec, wake_usec);
mach_absolutetime_last_sleep = 0;
}
done:
KERNEL_DEBUG_CONSTANT(
- MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
- (uintptr_t) mach_absolutetime_last_sleep,
- (uintptr_t) mach_absolutetime_asleep,
- (uintptr_t) (mach_absolutetime_last_sleep >> 32),
- (uintptr_t) (mach_absolutetime_asleep >> 32),
- 0);
+ MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
+ (uintptr_t) mach_absolutetime_last_sleep,
+ (uintptr_t) mach_absolutetime_asleep,
+ (uintptr_t) (mach_absolutetime_last_sleep >> 32),
+ (uintptr_t) (mach_absolutetime_asleep >> 32),
+ 0);
commpage_update_mach_continuous_time(mach_absolutetime_asleep);
adjust_cont_time_thread_calls();
*/
void
clock_get_boottime_nanotime(
- clock_sec_t *secs,
- clock_nsec_t *nanosecs)
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
{
- spl_t s;
+ spl_t s;
s = splclock();
clock_lock();
*/
void
clock_get_boottime_microtime(
- clock_sec_t *secs,
- clock_usec_t *microsecs)
+ clock_sec_t *secs,
+ clock_usec_t *microsecs)
{
- spl_t s;
+ spl_t s;
s = splclock();
clock_lock();
*/
static void
mach_wait_until_continue(
- __unused void *parameter,
- wait_result_t wresult)
+ __unused void *parameter,
+ wait_result_t wresult)
{
thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
/*NOTREACHED*/
* Parameters: args->deadline Amount of time to wait
*
* Returns: 0 Success
- * !0 Not success
+ * !0 Not success
*
*/
kern_return_t
mach_wait_until_trap(
- struct mach_wait_until_trap_args *args)
+ struct mach_wait_until_trap_args *args)
{
- uint64_t deadline = args->deadline;
- wait_result_t wresult;
+ uint64_t deadline = args->deadline;
+ wait_result_t wresult;
wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
- TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
- if (wresult == THREAD_WAITING)
+ TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
+ if (wresult == THREAD_WAITING) {
wresult = thread_block(mach_wait_until_continue);
+ }
- return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
+ return (wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS;
}
void
clock_delay_until(
- uint64_t deadline)
+ uint64_t deadline)
{
- uint64_t now = mach_absolute_time();
+ uint64_t now = mach_absolute_time();
- if (now >= deadline)
+ if (now >= deadline) {
return;
+ }
_clock_delay_until_deadline(deadline - now, deadline);
}
*/
void
_clock_delay_until_deadline(
- uint64_t interval,
- uint64_t deadline)
+ uint64_t interval,
+ uint64_t deadline)
{
_clock_delay_until_deadline_with_leeway(interval, deadline, 0);
}
*/
void
_clock_delay_until_deadline_with_leeway(
- uint64_t interval,
- uint64_t deadline,
- uint64_t leeway)
+ uint64_t interval,
+ uint64_t deadline,
+ uint64_t leeway)
{
-
- if (interval == 0)
+ if (interval == 0) {
return;
+ }
- if ( ml_delay_should_spin(interval) ||
- get_preemption_level() != 0 ||
- ml_get_interrupts_enabled() == FALSE ) {
+ if (ml_delay_should_spin(interval) ||
+ get_preemption_level() != 0 ||
+ ml_get_interrupts_enabled() == FALSE) {
machine_delay_until(interval, deadline);
} else {
/*
void
delay_for_interval(
- uint32_t interval,
- uint32_t scale_factor)
+ uint32_t interval,
+ uint32_t scale_factor)
{
- uint64_t abstime;
+ uint64_t abstime;
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
void
delay_for_interval_with_leeway(
- uint32_t interval,
- uint32_t leeway,
- uint32_t scale_factor)
+ uint32_t interval,
+ uint32_t leeway,
+ uint32_t scale_factor)
{
- uint64_t abstime_interval;
- uint64_t abstime_leeway;
+ uint64_t abstime_interval;
+ uint64_t abstime_leeway;
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
void
delay(
- int usec)
+ int usec)
{
delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
}
*/
void
clock_interval_to_deadline(
- uint32_t interval,
- uint32_t scale_factor,
- uint64_t *result)
+ uint32_t interval,
+ uint32_t scale_factor,
+ uint64_t *result)
{
- uint64_t abstime;
+ uint64_t abstime;
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
void
clock_absolutetime_interval_to_deadline(
- uint64_t abstime,
- uint64_t *result)
+ uint64_t abstime,
+ uint64_t *result)
{
*result = mach_absolute_time() + abstime;
}
void
clock_continuoustime_interval_to_deadline(
- uint64_t conttime,
- uint64_t *result)
+ uint64_t conttime,
+ uint64_t *result)
{
*result = mach_continuous_time() + conttime;
}
void
clock_get_uptime(
- uint64_t *result)
+ uint64_t *result)
{
*result = mach_absolute_time();
}
void
clock_deadline_for_periodic_event(
- uint64_t interval,
- uint64_t abstime,
- uint64_t *deadline)
+ uint64_t interval,
+ uint64_t abstime,
+ uint64_t *deadline)
{
assert(interval != 0);
*deadline = abstime + interval;
abstime = mach_absolute_time();
- if (*deadline <= abstime)
+ if (*deadline <= abstime) {
*deadline = abstime + interval;
+ }
}
}
uint64_t
mach_continuous_time(void)
{
- while(1) {
+ while (1) {
uint64_t read1 = mach_absolutetime_asleep;
uint64_t absolute = mach_absolute_time();
OSMemoryBarrier();
uint64_t read2 = mach_absolutetime_asleep;
- if(__builtin_expect(read1 == read2, 1)) {
+ if (__builtin_expect(read1 == read2, 1)) {
return absolute + read1;
}
}
uint64_t
mach_continuous_approximate_time(void)
{
- while(1) {
+ while (1) {
uint64_t read1 = mach_absolutetime_asleep;
uint64_t absolute = mach_approximate_time();
OSMemoryBarrier();
uint64_t read2 = mach_absolutetime_asleep;
- if(__builtin_expect(read1 == read2, 1)) {
+ if (__builtin_expect(read1 == read2, 1)) {
return absolute + read1;
}
}
* continuoustime_to_absolutetime
* Must be called with interrupts disabled
* Returned value is only valid until the next update to
- * mach_continuous_time
+ * mach_continuous_time
*/
uint64_t
-continuoustime_to_absolutetime(uint64_t conttime) {
- if (conttime <= mach_absolutetime_asleep)
+continuoustime_to_absolutetime(uint64_t conttime)
+{
+ if (conttime <= mach_absolutetime_asleep) {
return 0;
- else
+ } else {
return conttime - mach_absolutetime_asleep;
+ }
}
/*
* absolutetime_to_continuoustime
* Must be called with interrupts disabled
* Returned value is only valid until the next update to
- * mach_continuous_time
+ * mach_continuous_time
*/
uint64_t
-absolutetime_to_continuoustime(uint64_t abstime) {
+absolutetime_to_continuoustime(uint64_t abstime)
+{
return abstime + mach_absolutetime_asleep;
}
-#if CONFIG_DTRACE
+#if CONFIG_DTRACE
/*
* clock_get_calendar_nanotime_nowait
*/
void
clock_get_calendar_nanotime_nowait(
- clock_sec_t *secs,
- clock_nsec_t *nanosecs)
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
{
int i = 0;
- uint64_t now;
+ uint64_t now;
struct unlocked_clock_calend stable;
struct bintime bt;
for (;;) {
- stable = flipflop[i]; /* take snapshot */
+ stable = flipflop[i]; /* take snapshot */
/*
* Use a barrier instructions to ensure atomicity. We AND
* and if we caught it at a good time, it will be equal (and
* our snapshot is threfore stable).
*/
- if (flipflop[i].gen == stable.gen)
+ if (flipflop[i].gen == stable.gen) {
break;
+ }
/* Switch to the other element of the flipflop, and try again. */
i ^= 1;
bintime2nsclock(&bt, secs, nanosecs);
}
-static void
+static void
clock_track_calend_nowait(void)
{
int i;
}
}
-#endif /* CONFIG_DTRACE */
-
+#endif /* CONFIG_DTRACE */