/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
/*
- * File: kern/clock.c
- * Purpose: Routines for the creation and use of kernel
- * alarm clock services. This file and the ipc
- * routines in kern/ipc_clock.c constitute the
- * machine-independent clock service layer.
*/
-#include <cpus.h>
-#include <mach_host.h>
-
-#include <mach/boolean.h>
-#include <mach/processor_info.h>
-#include <mach/vm_param.h>
-#include <machine/mach_param.h>
-#include <kern/cpu_number.h>
-#include <kern/misc_protos.h>
-#include <kern/lock.h>
-#include <kern/host.h>
+#include <mach/mach_types.h>
+
#include <kern/spl.h>
+#include <kern/sched_prim.h>
#include <kern/thread.h>
-#include <kern/thread_swap.h>
-#include <kern/ipc_host.h>
#include <kern/clock.h>
-#include <kern/zalloc.h>
-#include <ipc/ipc_port.h>
+#include <kern/host_notify.h>
+
+#include <IOKit/IOPlatformExpert.h>
-#include <mach/mach_syscalls.h>
-#include <mach/clock_reply.h>
+#include <machine/commpage.h>
+
+#include <mach/mach_traps.h>
#include <mach/mach_time.h>
-#include <kern/mk_timer.h>
+uint32_t hz_tick_interval = 1;
+
+
+decl_simple_lock_data(,clock_lock)
+
+#define clock_lock() \
+ simple_lock(&clock_lock)
+
+#define clock_unlock() \
+ simple_unlock(&clock_lock)
+
+#define clock_lock_init() \
+ simple_lock_init(&clock_lock, 0)
+
/*
- * Exported interface
+ * Time of day (calendar) variables.
+ *
+ * Algorithm:
+ *
+ * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
+ *
+ * where CONV converts absolute time units into seconds and a fraction.
*/
+static struct clock_calend {
+ uint64_t epoch;
+ uint64_t offset;
+
+ int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
+ uint64_t adjstart; /* Absolute time value for start of this adjustment period */
+ uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
+} clock_calend;
-#include <mach/clock_server.h>
-#include <mach/mach_host_server.h>
+#if CONFIG_DTRACE
-/* local data declarations */
-decl_simple_lock_data(static,ClockLock) /* clock system synchronization */
-static struct zone *alarm_zone; /* zone for user alarms */
-static struct alarm *alrmfree; /* alarm free list pointer */
-static struct alarm *alrmdone; /* alarm done list pointer */
-static long alrm_seqno; /* uniquely identifies alarms */
-static thread_call_data_t alarm_deliver;
+/*
+ * Unlocked calendar flipflop; this is used to track a clock_calend such
+ * that we can safely access a snapshot of a valid clock_calend structure
+ * without needing to take any locks to do it.
+ *
+ * The trick is to use a generation count and set the low bit when it is
+ * being updated/read; by doing this, we guarantee, through use of the
+ * hw_atomic functions, that the generation is incremented when the bit
+ * is cleared atomically (by using a 1 bit add).
+ */
+static struct unlocked_clock_calend {
+ struct clock_calend calend; /* copy of calendar */
+ uint32_t gen; /* generation count */
+} flipflop[ 2];
-decl_simple_lock_data(static,calend_adjlock)
-static int64_t calend_adjtotal;
-static uint32_t calend_adjdelta;
+static void clock_track_calend_nowait(void);
-static timer_call_data_t calend_adjcall;
-static uint64_t calend_adjinterval, calend_adjdeadline;
-
-/* backwards compatibility */
-int hz = HZ; /* GET RID OF THIS !!! */
-int tick = (1000000 / HZ); /* GET RID OF THIS !!! */
-
-/* external declarations */
-extern struct clock clock_list[];
-extern int clock_count;
-
-/* local clock subroutines */
-static
-void flush_alarms(
- clock_t clock);
-
-static
-void post_alarm(
- clock_t clock,
- alarm_t alarm);
-
-static
-int check_time(
- alarm_type_t alarm_type,
- mach_timespec_t *alarm_time,
- mach_timespec_t *clock_time);
-
-static
-void clock_alarm_deliver(
- thread_call_param_t p0,
- thread_call_param_t p1);
-
-static
-void clock_calend_adjust(
- timer_call_param_t p0,
- timer_call_param_t p1);
+#endif
/*
- * Macros to lock/unlock clock system.
+ * Calendar adjustment variables and values.
*/
-#define LOCK_CLOCK(s) \
- s = splclock(); \
- simple_lock(&ClockLock);
+#define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
+#define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
+#define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
-#define UNLOCK_CLOCK(s) \
- simple_unlock(&ClockLock); \
- splx(s);
+static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
+static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
+static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
+
+static timer_call_data_t calend_adjcall;
+static uint32_t calend_adjactive;
+
+static uint32_t calend_set_adjustment(
+ long *secs,
+ int *microsecs);
+
+static void calend_adjust_call(void);
+static uint32_t calend_adjust(void);
+
+static thread_call_data_t calend_wakecall;
+
+extern void IOKitResetTime(void);
+
+void _clock_delay_until_deadline(uint64_t interval,
+ uint64_t deadline);
+
+static uint64_t clock_boottime; /* Seconds boottime epoch */
+
+#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
+MACRO_BEGIN \
+ if (((rfrac) += (frac)) >= (unit)) { \
+ (rfrac) -= (unit); \
+ (rsecs) += 1; \
+ } \
+ (rsecs) += (secs); \
+MACRO_END
+
+#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
+MACRO_BEGIN \
+ if ((int)((rfrac) -= (frac)) < 0) { \
+ (rfrac) += (unit); \
+ (rsecs) -= 1; \
+ } \
+ (rsecs) -= (secs); \
+MACRO_END
/*
- * Configure the clock system. (Not sure if we need this,
- * as separate from clock_init()).
+ * clock_config:
+ *
+ * Called once at boot to configure the clock subsystem.
*/
void
clock_config(void)
{
- clock_t clock;
- register int i;
+ clock_lock_init();
- if (cpu_number() != master_cpu)
- panic("clock_config");
+ timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
+ thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
- /*
- * Configure clock devices.
- */
- simple_lock_init(&calend_adjlock, ETAP_MISC_CLOCK);
- simple_lock_init(&ClockLock, ETAP_MISC_CLOCK);
- for (i = 0; i < clock_count; i++) {
- clock = &clock_list[i];
- if (clock->cl_ops) {
- if ((*clock->cl_ops->c_config)() == 0)
- clock->cl_ops = 0;
- }
- }
-
- /* start alarm sequence numbers at 0 */
- alrm_seqno = 0;
+ clock_oldconfig();
}
/*
- * Initialize the clock system.
+ * clock_init:
+ *
+ * Called on a processor each time started.
*/
void
clock_init(void)
{
- clock_t clock;
- register int i;
-
- /*
- * Initialize basic clock structures.
- */
- for (i = 0; i < clock_count; i++) {
- clock = &clock_list[i];
- if (clock->cl_ops)
- (*clock->cl_ops->c_init)();
- }
+ clock_oldinit();
}
/*
- * Initialize the clock ipc service facility.
+ * clock_timebase_init:
+ *
+ * Called by machine dependent code
+ * to initialize areas dependent on the
+ * timebase value. May be called multiple
+ * times during start up.
*/
void
-clock_service_create(void)
+clock_timebase_init(void)
{
- clock_t clock;
- register int i;
-
- mk_timer_initialize();
+ uint64_t abstime;
- /*
- * Initialize ipc clock services.
- */
- for (i = 0; i < clock_count; i++) {
- clock = &clock_list[i];
- if (clock->cl_ops) {
- ipc_clock_init(clock);
- ipc_clock_enable(clock);
- }
- }
+ nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
+ calend_adjinterval = (uint32_t)abstime;
- timer_call_setup(&calend_adjcall, clock_calend_adjust, NULL);
+ nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
+ hz_tick_interval = (uint32_t)abstime;
- /*
- * Initialize clock service alarms.
- */
- i = sizeof(struct alarm);
- alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms");
-
- thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL);
+ sched_timebase_init();
}
/*
- * Get the service port on a clock.
+ * mach_timebase_info_trap:
+ *
+ * User trap returns timebase constant.
*/
kern_return_t
-host_get_clock_service(
- host_t host,
- clock_id_t clock_id,
- clock_t *clock) /* OUT */
+mach_timebase_info_trap(
+ struct mach_timebase_info_trap_args *args)
{
- if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) {
- *clock = CLOCK_NULL;
- return (KERN_INVALID_ARGUMENT);
- }
+ mach_vm_address_t out_info_addr = args->info;
+ mach_timebase_info_data_t info;
- *clock = &clock_list[clock_id];
- if ((*clock)->cl_ops == 0)
- return (KERN_FAILURE);
- return (KERN_SUCCESS);
-}
+ clock_timebase_info(&info);
-/*
- * Get the control port on a clock.
- */
-kern_return_t
-host_get_clock_control(
- host_priv_t host_priv,
- clock_id_t clock_id,
- clock_t *clock) /* OUT */
-{
- if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) {
- *clock = CLOCK_NULL;
- return (KERN_INVALID_ARGUMENT);
- }
+ copyout((void *)&info, out_info_addr, sizeof (info));
- *clock = &clock_list[clock_id];
- if ((*clock)->cl_ops == 0)
- return (KERN_FAILURE);
return (KERN_SUCCESS);
}
/*
- * Get the current clock time.
+ * Calendar routines.
*/
-kern_return_t
-clock_get_time(
- clock_t clock,
- mach_timespec_t *cur_time) /* OUT */
-{
- if (clock == CLOCK_NULL)
- return (KERN_INVALID_ARGUMENT);
- return ((*clock->cl_ops->c_gettime)(cur_time));
-}
/*
- * Get clock attributes.
+ * clock_get_calendar_microtime:
+ *
+ * Returns the current calendar value,
+ * microseconds as the fraction.
*/
-kern_return_t
-clock_get_attributes(
- clock_t clock,
- clock_flavor_t flavor,
- clock_attr_t attr, /* OUT */
- mach_msg_type_number_t *count) /* IN/OUT */
+void
+clock_get_calendar_microtime(
+ clock_sec_t *secs,
+ clock_usec_t *microsecs)
{
- kern_return_t (*getattr)(
- clock_flavor_t flavor,
- clock_attr_t attr,
- mach_msg_type_number_t *count);
-
- if (clock == CLOCK_NULL)
- return (KERN_INVALID_ARGUMENT);
- if (getattr = clock->cl_ops->c_getattr)
- return((*getattr)(flavor, attr, count));
- else
- return (KERN_FAILURE);
+ clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
}
/*
- * Set the current clock time.
+ * clock_get_calendar_absolute_and_microtime:
+ *
+ * Returns the current calendar value,
+ * microseconds as the fraction. Also
+ * returns mach_absolute_time if abstime
+ * is not NULL.
*/
-kern_return_t
-clock_set_time(
- clock_t clock,
- mach_timespec_t new_time)
+void
+clock_get_calendar_absolute_and_microtime(
+ clock_sec_t *secs,
+ clock_usec_t *microsecs,
+ uint64_t *abstime)
{
- mach_timespec_t *clock_time;
- kern_return_t (*settime)(
- mach_timespec_t *clock_time);
- extern kern_return_t
- calend_settime(
- mach_timespec_t *clock_time);
-
- if (clock == CLOCK_NULL)
- return (KERN_INVALID_ARGUMENT);
- if ((settime = clock->cl_ops->c_settime) == 0)
- return (KERN_FAILURE);
- if (settime == calend_settime)
- return (KERN_FAILURE);
- clock_time = &new_time;
- if (BAD_MACH_TIMESPEC(clock_time))
- return (KERN_INVALID_VALUE);
+ uint64_t now;
+ spl_t s;
- /*
- * Flush all outstanding alarms.
- */
- flush_alarms(clock);
+ s = splclock();
+ clock_lock();
+
+ now = mach_absolute_time();
+ if (abstime)
+ *abstime = now;
+
+ if (clock_calend.adjdelta < 0) {
+ uint32_t t32;
+
+ /*
+ * Since offset is decremented during a negative adjustment,
+ * ensure that time increases monotonically without going
+ * temporarily backwards.
+ * If the delta has not yet passed, now is set to the start
+ * of the current adjustment period; otherwise, we're between
+ * the expiry of the delta and the next call to calend_adjust(),
+ * and we offset accordingly.
+ */
+ if (now > clock_calend.adjstart) {
+ t32 = (uint32_t)(now - clock_calend.adjstart);
- /*
- * Set the new time.
- */
- return ((*settime)(clock_time));
+ if (t32 > clock_calend.adjoffset)
+ now -= clock_calend.adjoffset;
+ else
+ now = clock_calend.adjstart;
+ }
+ }
+
+ now += clock_calend.offset;
+
+ absolutetime_to_microtime(now, secs, microsecs);
+
+ *secs += (clock_sec_t)clock_calend.epoch;
+
+ clock_unlock();
+ splx(s);
}
/*
- * Set the clock alarm resolution.
+ * clock_get_calendar_nanotime:
+ *
+ * Returns the current calendar value,
+ * nanoseconds as the fraction.
+ *
+ * Since we do not have an interface to
+ * set the calendar with resolution greater
+ * than a microsecond, we honor that here.
*/
-kern_return_t
-clock_set_attributes(
- clock_t clock,
- clock_flavor_t flavor,
- clock_attr_t attr,
- mach_msg_type_number_t count)
+void
+clock_get_calendar_nanotime(
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
{
- kern_return_t (*setattr)(
- clock_flavor_t flavor,
- clock_attr_t attr,
- mach_msg_type_number_t count);
-
- if (clock == CLOCK_NULL)
- return (KERN_INVALID_ARGUMENT);
- if (setattr = clock->cl_ops->c_setattr)
- return ((*setattr)(flavor, attr, count));
- else
- return (KERN_FAILURE);
+ uint64_t now;
+ spl_t s;
+
+ s = splclock();
+ clock_lock();
+
+ now = mach_absolute_time();
+
+ if (clock_calend.adjdelta < 0) {
+ uint32_t t32;
+
+ if (now > clock_calend.adjstart) {
+ t32 = (uint32_t)(now - clock_calend.adjstart);
+
+ if (t32 > clock_calend.adjoffset)
+ now -= clock_calend.adjoffset;
+ else
+ now = clock_calend.adjstart;
+ }
+ }
+
+ now += clock_calend.offset;
+
+ absolutetime_to_microtime(now, secs, nanosecs);
+
+ *nanosecs *= NSEC_PER_USEC;
+
+ *secs += (clock_sec_t)clock_calend.epoch;
+
+ clock_unlock();
+ splx(s);
}
/*
- * Setup a clock alarm.
+ * clock_gettimeofday:
+ *
+ * Kernel interface for commpage implementation of
+ * gettimeofday() syscall.
+ *
+ * Returns the current calendar value, and updates the
+ * commpage info as appropriate. Because most calls to
+ * gettimeofday() are handled in user mode by the commpage,
+ * this routine should be used infrequently.
*/
-kern_return_t
-clock_alarm(
- clock_t clock,
- alarm_type_t alarm_type,
- mach_timespec_t alarm_time,
- ipc_port_t alarm_port,
- mach_msg_type_name_t alarm_port_type)
+void
+clock_gettimeofday(
+ clock_sec_t *secs,
+ clock_usec_t *microsecs)
{
- alarm_t alarm;
- mach_timespec_t clock_time;
- int chkstat;
- kern_return_t reply_code;
- spl_t s;
-
- if (clock == CLOCK_NULL)
- return (KERN_INVALID_ARGUMENT);
- if (clock->cl_ops->c_setalrm == 0)
- return (KERN_FAILURE);
- if (IP_VALID(alarm_port) == 0)
- return (KERN_INVALID_CAPABILITY);
+ uint64_t now;
+ spl_t s;
- /*
- * Check alarm parameters. If parameters are invalid,
- * send alarm message immediately.
- */
- (*clock->cl_ops->c_gettime)(&clock_time);
- chkstat = check_time(alarm_type, &alarm_time, &clock_time);
- if (chkstat <= 0) {
- reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS);
- clock_alarm_reply(alarm_port, alarm_port_type,
- reply_code, alarm_type, clock_time);
- return (KERN_SUCCESS);
+ s = splclock();
+ clock_lock();
+
+ now = mach_absolute_time();
+
+ if (clock_calend.adjdelta >= 0) {
+ clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
}
+ else {
+ uint32_t t32;
- /*
- * Get alarm and add to clock alarm list.
- */
+ if (now > clock_calend.adjstart) {
+ t32 = (uint32_t)(now - clock_calend.adjstart);
- LOCK_CLOCK(s);
- if ((alarm = alrmfree) == 0) {
- UNLOCK_CLOCK(s);
- alarm = (alarm_t) zalloc(alarm_zone);
- if (alarm == 0)
- return (KERN_RESOURCE_SHORTAGE);
- LOCK_CLOCK(s);
+ if (t32 > clock_calend.adjoffset)
+ now -= clock_calend.adjoffset;
+ else
+ now = clock_calend.adjstart;
+ }
+
+ now += clock_calend.offset;
+
+ absolutetime_to_microtime(now, secs, microsecs);
+
+ *secs += (clock_sec_t)clock_calend.epoch;
}
- else
- alrmfree = alarm->al_next;
-
- alarm->al_status = ALARM_CLOCK;
- alarm->al_time = alarm_time;
- alarm->al_type = alarm_type;
- alarm->al_port = alarm_port;
- alarm->al_port_type = alarm_port_type;
- alarm->al_clock = clock;
- alarm->al_seqno = alrm_seqno++;
- post_alarm(clock, alarm);
- UNLOCK_CLOCK(s);
- return (KERN_SUCCESS);
+ clock_unlock();
+ splx(s);
}
/*
- * Sleep on a clock. System trap. User-level libmach clock_sleep
- * interface call takes a mach_timespec_t sleep_time argument which it
- * converts to sleep_sec and sleep_nsec arguments which are then
- * passed to clock_sleep_trap.
+ * clock_set_calendar_microtime:
+ *
+ * Sets the current calendar value by
+ * recalculating the epoch and offset
+ * from the system clock.
+ *
+ * Also adjusts the boottime to keep the
+ * value consistent, writes the new
+ * calendar value to the platform clock,
+ * and sends calendar change notifications.
*/
-kern_return_t
-clock_sleep_trap(
- mach_port_name_t clock_name,
- sleep_type_t sleep_type,
- int sleep_sec,
- int sleep_nsec,
- mach_timespec_t *wakeup_time)
+void
+clock_set_calendar_microtime(
+ clock_sec_t secs,
+ clock_usec_t microsecs)
{
- clock_t clock;
- mach_timespec_t swtime;
- kern_return_t rvalue;
+ clock_sec_t sys;
+ clock_usec_t microsys;
+ clock_sec_t newsecs;
+ clock_usec_t newmicrosecs;
+ spl_t s;
+
+ newsecs = secs;
+ newmicrosecs = microsecs;
+
+ s = splclock();
+ clock_lock();
+
+ commpage_disable_timestamp();
/*
- * Convert the trap parameters.
+ * Calculate the new calendar epoch based on
+ * the new value and the system clock.
*/
- if (clock_name != MACH_PORT_NULL)
- clock = port_name_to_clock(clock_name);
- else
- clock = &clock_list[SYSTEM_CLOCK];
+ clock_get_system_microtime(&sys, µsys);
+ TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
- swtime.tv_sec = sleep_sec;
- swtime.tv_nsec = sleep_nsec;
+ /*
+ * Adjust the boottime based on the delta.
+ */
+ clock_boottime += secs - clock_calend.epoch;
/*
- * Call the actual clock_sleep routine.
+ * Set the new calendar epoch.
*/
- rvalue = clock_sleep_internal(clock, sleep_type, &swtime);
+ clock_calend.epoch = secs;
+
+ nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
/*
- * Return current time as wakeup time.
+ * Cancel any adjustment in progress.
*/
- if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) {
- copyout((char *)&swtime, (char *)wakeup_time,
- sizeof(mach_timespec_t));
- }
- return (rvalue);
-}
+ calend_adjtotal = clock_calend.adjdelta = 0;
+
+ clock_unlock();
+
+ /*
+ * Set the new value for the platform clock.
+ */
+ PESetUTCTimeOfDay(newsecs, newmicrosecs);
+
+ splx(s);
+
+ /*
+ * Send host notifications.
+ */
+ host_notify_calendar_change();
+
+#if CONFIG_DTRACE
+ clock_track_calend_nowait();
+#endif
+}
/*
- * Kernel internally callable clock sleep routine. The calling
- * thread is suspended until the requested sleep time is reached.
+ * clock_initialize_calendar:
+ *
+ * Set the calendar and related clocks
+ * from the platform clock at boot or
+ * wake event.
+ *
+ * Also sends host notifications.
*/
-kern_return_t
-clock_sleep_internal(
- clock_t clock,
- sleep_type_t sleep_type,
- mach_timespec_t *sleep_time)
+void
+clock_initialize_calendar(void)
{
- alarm_t alarm;
- mach_timespec_t clock_time;
- kern_return_t rvalue;
- int chkstat;
+ clock_sec_t sys, secs;
+ clock_usec_t microsys, microsecs;
spl_t s;
- if (clock == CLOCK_NULL)
- return (KERN_INVALID_ARGUMENT);
- if (clock->cl_ops->c_setalrm == 0)
- return (KERN_FAILURE);
+ PEGetUTCTimeOfDay(&secs, µsecs);
- /*
- * Check sleep parameters. If parameters are invalid
- * return an error, otherwise post alarm request.
- */
- (*clock->cl_ops->c_gettime)(&clock_time);
+ s = splclock();
+ clock_lock();
- chkstat = check_time(sleep_type, sleep_time, &clock_time);
- if (chkstat < 0)
- return (KERN_INVALID_VALUE);
- rvalue = KERN_SUCCESS;
- if (chkstat > 0) {
- wait_result_t wait_result;
+ commpage_disable_timestamp();
+ if ((long)secs >= (long)clock_boottime) {
/*
- * Get alarm and add to clock alarm list.
+ * Initialize the boot time based on the platform clock.
*/
+ if (clock_boottime == 0)
+ clock_boottime = secs;
- LOCK_CLOCK(s);
- if ((alarm = alrmfree) == 0) {
- UNLOCK_CLOCK(s);
- alarm = (alarm_t) zalloc(alarm_zone);
- if (alarm == 0)
- return (KERN_RESOURCE_SHORTAGE);
- LOCK_CLOCK(s);
- }
- else
- alrmfree = alarm->al_next;
+ /*
+ * Calculate the new calendar epoch based on
+ * the platform clock and the system clock.
+ */
+ clock_get_system_microtime(&sys, µsys);
+ TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
/*
- * Wait for alarm to occur.
+ * Set the new calendar epoch.
*/
- wait_result = assert_wait((event_t)alarm, THREAD_ABORTSAFE);
- if (wait_result == THREAD_WAITING) {
- alarm->al_time = *sleep_time;
- alarm->al_status = ALARM_SLEEP;
- post_alarm(clock, alarm);
- UNLOCK_CLOCK(s);
+ clock_calend.epoch = secs;
- wait_result = thread_block(THREAD_CONTINUE_NULL);
+ nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
- /*
- * Note if alarm expired normally or whether it
- * was aborted. If aborted, delete alarm from
- * clock alarm list. Return alarm to free list.
- */
- LOCK_CLOCK(s);
- if (alarm->al_status != ALARM_DONE) {
- assert(wait_result != THREAD_AWAKENED);
- if ((alarm->al_prev)->al_next = alarm->al_next)
- (alarm->al_next)->al_prev = alarm->al_prev;
- rvalue = KERN_ABORTED;
- }
- *sleep_time = alarm->al_time;
- alarm->al_status = ALARM_FREE;
- } else {
- assert(wait_result == THREAD_INTERRUPTED);
- assert(alarm->al_status == ALARM_FREE);
- rvalue = KERN_ABORTED;
- }
- alarm->al_next = alrmfree;
- alrmfree = alarm;
- UNLOCK_CLOCK(s);
+ /*
+ * Cancel any adjustment in progress.
+ */
+ calend_adjtotal = clock_calend.adjdelta = 0;
}
- else
- *sleep_time = clock_time;
- return (rvalue);
+ clock_unlock();
+ splx(s);
+
+ /*
+ * Send host notifications.
+ */
+ host_notify_calendar_change();
+
+#if CONFIG_DTRACE
+ clock_track_calend_nowait();
+#endif
}
/*
- * CLOCK INTERRUPT SERVICE ROUTINES.
+ * clock_get_boottime_nanotime:
+ *
+ * Return the boottime, used by sysctl.
*/
+void
+clock_get_boottime_nanotime(
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
+{
+ spl_t s;
+
+ s = splclock();
+ clock_lock();
+
+ *secs = (clock_sec_t)clock_boottime;
+ *nanosecs = 0;
+
+ clock_unlock();
+ splx(s);
+}
/*
- * Service clock alarm interrupts. Called from machine dependent
- * layer at splclock(). The clock_id argument specifies the clock,
- * and the clock_time argument gives that clock's current time.
+ * clock_adjtime:
+ *
+ * Interface to adjtime() syscall.
+ *
+ * Calculates adjustment variables and
+ * initiates adjustment.
*/
void
-clock_alarm_intr(
- clock_id_t clock_id,
- mach_timespec_t *clock_time)
+clock_adjtime(
+ long *secs,
+ int *microsecs)
{
- clock_t clock;
- register alarm_t alrm1;
- register alarm_t alrm2;
- mach_timespec_t *alarm_time;
- spl_t s;
+ uint32_t interval;
+ spl_t s;
- clock = &clock_list[clock_id];
+ s = splclock();
+ clock_lock();
- /*
- * Update clock alarm list. All alarms that are due are moved
- * to the alarmdone list to be serviced by the alarm_thread.
+ interval = calend_set_adjustment(secs, microsecs);
+ if (interval != 0) {
+ calend_adjdeadline = mach_absolute_time() + interval;
+ if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
+ calend_adjactive++;
+ }
+ else
+ if (timer_call_cancel(&calend_adjcall))
+ calend_adjactive--;
+
+ clock_unlock();
+ splx(s);
+}
+
+static uint32_t
+calend_set_adjustment(
+ long *secs,
+ int *microsecs)
+{
+ uint64_t now, t64;
+ int64_t total, ototal;
+ uint32_t interval = 0;
+
+ /*
+ * Compute the total adjustment time in nanoseconds.
*/
+ total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
- LOCK_CLOCK(s);
- alrm1 = (alarm_t) &clock->cl_alarm;
- while (alrm2 = alrm1->al_next) {
- alarm_time = &alrm2->al_time;
- if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0)
- break;
+ /*
+ * Disable commpage gettimeofday().
+ */
+ commpage_disable_timestamp();
- /*
- * Alarm has expired, so remove it from the
- * clock alarm list.
- */
- if (alrm1->al_next = alrm2->al_next)
- (alrm1->al_next)->al_prev = alrm1;
+ /*
+ * Get current absolute time.
+ */
+ now = mach_absolute_time();
+
+ /*
+ * Save the old adjustment total for later return.
+ */
+ ototal = calend_adjtotal;
+ /*
+ * Is a new correction specified?
+ */
+ if (total != 0) {
/*
- * If a clock_sleep() alarm, wakeup the thread
- * which issued the clock_sleep() call.
+ * Set delta to the standard, small, adjustment skew.
*/
- if (alrm2->al_status == ALARM_SLEEP) {
- alrm2->al_next = 0;
- alrm2->al_status = ALARM_DONE;
- alrm2->al_time = *clock_time;
- thread_wakeup((event_t)alrm2);
- }
+ int32_t delta = calend_adjskew;
- /*
- * If a clock_alarm() alarm, place the alarm on
- * the alarm done list and schedule the alarm
- * delivery mechanism.
- */
+ if (total > 0) {
+ /*
+ * Positive adjustment. If greater than the preset 'big'
+ * threshold, slew at a faster rate, capping if necessary.
+ */
+ if (total > (int64_t) calend_adjbig)
+ delta *= 10;
+ if (delta > total)
+ delta = (int32_t)total;
+
+ /*
+ * Convert the delta back from ns to absolute time and store in adjoffset.
+ */
+ nanoseconds_to_absolutetime((uint64_t)delta, &t64);
+ clock_calend.adjoffset = (uint32_t)t64;
+ }
else {
- assert(alrm2->al_status == ALARM_CLOCK);
- if (alrm2->al_next = alrmdone)
- alrmdone->al_prev = alrm2;
- else
- thread_call_enter(&alarm_deliver);
- alrm2->al_prev = (alarm_t) &alrmdone;
- alrmdone = alrm2;
- alrm2->al_status = ALARM_DONE;
- alrm2->al_time = *clock_time;
+ /*
+ * Negative adjustment; therefore, negate the delta. If
+ * greater than the preset 'big' threshold, slew at a faster
+ * rate, capping if necessary.
+ */
+ if (total < (int64_t) -calend_adjbig)
+ delta *= 10;
+ delta = -delta;
+ if (delta < total)
+ delta = (int32_t)total;
+
+ /*
+ * Save the current absolute time. Subsequent time operations occuring
+ * during this negative correction can make use of this value to ensure
+ * that time increases monotonically.
+ */
+ clock_calend.adjstart = now;
+
+ /*
+ * Convert the delta back from ns to absolute time and store in adjoffset.
+ */
+ nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
+ clock_calend.adjoffset = (uint32_t)t64;
}
+
+ /*
+ * Store the total adjustment time in ns.
+ */
+ calend_adjtotal = total;
+
+ /*
+ * Store the delta for this adjustment period in ns.
+ */
+ clock_calend.adjdelta = delta;
+
+ /*
+ * Set the interval in absolute time for later return.
+ */
+ interval = calend_adjinterval;
+ }
+ else {
+ /*
+ * No change; clear any prior adjustment.
+ */
+ calend_adjtotal = clock_calend.adjdelta = 0;
}
- /*
- * Setup the clock dependent layer to deliver another
- * interrupt for the next pending alarm.
+ /*
+ * If an prior correction was in progress, return the
+ * remaining uncorrected time from it.
*/
- if (alrm2)
- (*clock->cl_ops->c_setalrm)(alarm_time);
- UNLOCK_CLOCK(s);
-}
+ if (ototal != 0) {
+ *secs = (long)(ototal / (long)NSEC_PER_SEC);
+ *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
+ }
+ else
+ *secs = *microsecs = 0;
-/*
- * ALARM DELIVERY ROUTINES.
- */
+#if CONFIG_DTRACE
+ clock_track_calend_nowait();
+#endif
+
+ return (interval);
+}
static void
-clock_alarm_deliver(
- thread_call_param_t p0,
- thread_call_param_t p1)
+calend_adjust_call(void)
{
- register alarm_t alrm;
- kern_return_t code;
- spl_t s;
+ uint32_t interval;
+ spl_t s;
- LOCK_CLOCK(s);
- while (alrm = alrmdone) {
- if (alrmdone = alrm->al_next)
- alrmdone->al_prev = (alarm_t) &alrmdone;
- UNLOCK_CLOCK(s);
-
- code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED);
- if (alrm->al_port != IP_NULL) {
- /* Deliver message to designated port */
- if (IP_VALID(alrm->al_port)) {
- clock_alarm_reply(alrm->al_port, alrm->al_port_type, code,
- alrm->al_type, alrm->al_time);
- }
+ s = splclock();
+ clock_lock();
+
+ if (--calend_adjactive == 0) {
+ interval = calend_adjust();
+ if (interval != 0) {
+ clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
- LOCK_CLOCK(s);
- alrm->al_status = ALARM_FREE;
- alrm->al_next = alrmfree;
- alrmfree = alrm;
+ if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
+ calend_adjactive++;
}
- else
- panic("clock_alarm_deliver");
}
- UNLOCK_CLOCK(s);
+ clock_unlock();
+ splx(s);
}
-/*
- * CLOCK PRIVATE SERVICING SUBROUTINES.
- */
+static uint32_t
+calend_adjust(void)
+{
+ uint64_t now, t64;
+ int32_t delta;
+ uint32_t interval = 0;
+
+ commpage_disable_timestamp();
+
+ now = mach_absolute_time();
+
+ delta = clock_calend.adjdelta;
+
+ if (delta > 0) {
+ clock_calend.offset += clock_calend.adjoffset;
+
+ calend_adjtotal -= delta;
+ if (delta > calend_adjtotal) {
+ clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
+
+ nanoseconds_to_absolutetime((uint64_t)delta, &t64);
+ clock_calend.adjoffset = (uint32_t)t64;
+ }
+ }
+ else
+ if (delta < 0) {
+ clock_calend.offset -= clock_calend.adjoffset;
+
+ calend_adjtotal -= delta;
+ if (delta < calend_adjtotal) {
+ clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
+
+ nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
+ clock_calend.adjoffset = (uint32_t)t64;
+ }
+
+ if (clock_calend.adjdelta != 0)
+ clock_calend.adjstart = now;
+ }
+
+ if (clock_calend.adjdelta != 0)
+ interval = calend_adjinterval;
+
+#if CONFIG_DTRACE
+ clock_track_calend_nowait();
+#endif
+
+ return (interval);
+}
/*
- * Flush all pending alarms on a clock. All alarms
- * are activated and timestamped correctly, so any
- * programs waiting on alarms/threads will proceed
- * with accurate information.
+ * clock_wakeup_calendar:
+ *
+ * Interface to power management, used
+ * to initiate the reset of the calendar
+ * on wake from sleep event.
*/
-static
void
-flush_alarms(
- clock_t clock)
+clock_wakeup_calendar(void)
{
- register alarm_t alrm1, alrm2;
- spl_t s;
-
- /*
- * Flush all outstanding alarms.
- */
- LOCK_CLOCK(s);
- alrm1 = (alarm_t) &clock->cl_alarm;
- while (alrm2 = alrm1->al_next) {
- /*
- * Remove alarm from the clock alarm list.
- */
- if (alrm1->al_next = alrm2->al_next)
- (alrm1->al_next)->al_prev = alrm1;
+ thread_call_enter(&calend_wakecall);
+}
- /*
- * If a clock_sleep() alarm, wakeup the thread
- * which issued the clock_sleep() call.
- */
- if (alrm2->al_status == ALARM_SLEEP) {
- alrm2->al_next = 0;
- thread_wakeup((event_t)alrm2);
- }
- else {
- /*
- * If a clock_alarm() alarm, place the alarm on
- * the alarm done list and wakeup the dedicated
- * kernel alarm_thread to service the alarm.
- */
- assert(alrm2->al_status == ALARM_CLOCK);
- if (alrm2->al_next = alrmdone)
- alrmdone->al_prev = alrm2;
- else
- thread_wakeup((event_t)&alrmdone);
- alrm2->al_prev = (alarm_t) &alrmdone;
- alrmdone = alrm2;
- }
- }
- UNLOCK_CLOCK(s);
+/*
+ * Wait / delay routines.
+ */
+static void
+mach_wait_until_continue(
+ __unused void *parameter,
+ wait_result_t wresult)
+{
+ thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
+ /*NOTREACHED*/
}
/*
- * Post an alarm on a clock's active alarm list. The alarm is
- * inserted in time-order into the clock's active alarm list.
- * Always called from within a LOCK_CLOCK() code section.
+ * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
+ *
+ * Parameters: args->deadline Amount of time to wait
+ *
+ * Returns: 0 Success
+ * !0 Not success
+ *
*/
-static
+kern_return_t
+mach_wait_until_trap(
+ struct mach_wait_until_trap_args *args)
+{
+ uint64_t deadline = args->deadline;
+ wait_result_t wresult;
+
+ wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
+ TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
+ if (wresult == THREAD_WAITING)
+ wresult = thread_block(mach_wait_until_continue);
+
+ return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
+}
+
void
-post_alarm(
- clock_t clock,
- alarm_t alarm)
+clock_delay_until(
+ uint64_t deadline)
{
- register alarm_t alrm1, alrm2;
- mach_timespec_t *alarm_time;
- mach_timespec_t *queue_time;
+ uint64_t now = mach_absolute_time();
- /*
- * Traverse alarm list until queue time is greater
- * than alarm time, then insert alarm.
- */
- alarm_time = &alarm->al_time;
- alrm1 = (alarm_t) &clock->cl_alarm;
- while (alrm2 = alrm1->al_next) {
- queue_time = &alrm2->al_time;
- if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0)
- break;
- alrm1 = alrm2;
- }
- alrm1->al_next = alarm;
- alarm->al_next = alrm2;
- alarm->al_prev = alrm1;
- if (alrm2)
- alrm2->al_prev = alarm;
+ if (now >= deadline)
+ return;
- /*
- * If the inserted alarm is the 'earliest' alarm,
- * reset the device layer alarm time accordingly.
- */
- if (clock->cl_alarm.al_next == alarm)
- (*clock->cl_ops->c_setalrm)(alarm_time);
+ _clock_delay_until_deadline(deadline - now, deadline);
}
/*
- * Check the validity of 'alarm_time' and 'alarm_type'. If either
- * argument is invalid, return a negative value. If the 'alarm_time'
- * is now, return a 0 value. If the 'alarm_time' is in the future,
- * return a positive value.
+ * Preserve the original precise interval that the client
+ * requested for comparison to the spin threshold.
*/
-static
-int
-check_time(
- alarm_type_t alarm_type,
- mach_timespec_t *alarm_time,
- mach_timespec_t *clock_time)
+void
+_clock_delay_until_deadline(
+ uint64_t interval,
+ uint64_t deadline)
{
- int result;
- if (BAD_ALRMTYPE(alarm_type))
- return (-1);
- if (BAD_MACH_TIMESPEC(alarm_time))
- return (-1);
- if ((alarm_type & ALRMTYPE) == TIME_RELATIVE)
- ADD_MACH_TIMESPEC(alarm_time, clock_time);
+ if (interval == 0)
+ return;
- result = CMP_MACH_TIMESPEC(alarm_time, clock_time);
+ if ( ml_delay_should_spin(interval) ||
+ get_preemption_level() != 0 ||
+ ml_get_interrupts_enabled() == FALSE ) {
+ machine_delay_until(interval, deadline);
+ } else {
+ assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
- return ((result >= 0)? result: 0);
+ thread_block(THREAD_CONTINUE_NULL);
+ }
}
-mach_timespec_t
-clock_get_system_value(void)
+
+void
+delay_for_interval(
+ uint32_t interval,
+ uint32_t scale_factor)
{
- clock_t clock = &clock_list[SYSTEM_CLOCK];
- mach_timespec_t value;
+ uint64_t abstime;
- (void) (*clock->cl_ops->c_gettime)(&value);
+ clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
- return value;
+ _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
}
-mach_timespec_t
-clock_get_calendar_value(void)
+void
+delay(
+ int usec)
{
- clock_t clock = &clock_list[CALENDAR_CLOCK];
- mach_timespec_t value = MACH_TIMESPEC_ZERO;
+ delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
+}
- (void) (*clock->cl_ops->c_gettime)(&value);
+/*
+ * Miscellaneous routines.
+ */
+void
+clock_interval_to_deadline(
+ uint32_t interval,
+ uint32_t scale_factor,
+ uint64_t *result)
+{
+ uint64_t abstime;
+
+ clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
- return value;
+ *result = mach_absolute_time() + abstime;
}
void
-clock_set_calendar_value(
- mach_timespec_t value)
+clock_absolutetime_interval_to_deadline(
+ uint64_t abstime,
+ uint64_t *result)
{
- clock_t clock = &clock_list[CALENDAR_CLOCK];
+ *result = mach_absolute_time() + abstime;
+}
- (void) (*clock->cl_ops->c_settime)(&value);
+void
+clock_get_uptime(
+ uint64_t *result)
+{
+ *result = mach_absolute_time();
}
void
*deadline += interval;
if (*deadline <= abstime) {
- *deadline = abstime;
- clock_get_uptime(&abstime);
- *deadline += interval;
+ *deadline = abstime + interval;
+ abstime = mach_absolute_time();
- if (*deadline <= abstime) {
- *deadline = abstime;
- *deadline += interval;
- }
+ if (*deadline <= abstime)
+ *deadline = abstime + interval;
}
}
-void
-mk_timebase_info(
- uint32_t *delta,
- uint32_t *abs_to_ns_numer,
- uint32_t *abs_to_ns_denom,
- uint32_t *proc_to_abs_numer,
- uint32_t *proc_to_abs_denom)
-{
- mach_timebase_info_data_t info;
- uint32_t one = 1;
-
- clock_timebase_info(&info);
+#if CONFIG_DTRACE
- copyout((void *)&one, (void *)delta, sizeof (uint32_t));
-
- copyout((void *)&info.numer, (void *)abs_to_ns_numer, sizeof (uint32_t));
- copyout((void *)&info.denom, (void *)abs_to_ns_denom, sizeof (uint32_t));
-
- copyout((void *)&one, (void *)proc_to_abs_numer, sizeof (uint32_t));
- copyout((void *)&one, (void *)proc_to_abs_denom, sizeof (uint32_t));
-}
-
-kern_return_t
-mach_timebase_info(
- mach_timebase_info_t out_info)
+/*
+ * clock_get_calendar_nanotime_nowait
+ *
+ * Description: Non-blocking version of clock_get_calendar_nanotime()
+ *
+ * Notes: This function operates by separately tracking calendar time
+ * updates using a two element structure to copy the calendar
+ * state, which may be asynchronously modified. It utilizes
+ * barrier instructions in the tracking process and in the local
+ * stable snapshot process in order to ensure that a consistent
+ * snapshot is used to perform the calculation.
+ */
+void
+clock_get_calendar_nanotime_nowait(
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
{
- mach_timebase_info_data_t info;
+ int i = 0;
+ uint64_t now;
+ struct unlocked_clock_calend stable;
- clock_timebase_info(&info);
+ for (;;) {
+ stable = flipflop[i]; /* take snapshot */
- copyout((void *)&info, (void *)out_info, sizeof (info));
+ /*
+ * Use a barrier instructions to ensure atomicity. We AND
+ * off the "in progress" bit to get the current generation
+ * count.
+ */
+ (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
- return (KERN_SUCCESS);
-}
+ /*
+ * If an update _is_ in progress, the generation count will be
+ * off by one, if it _was_ in progress, it will be off by two,
+ * and if we caught it at a good time, it will be equal (and
+ * our snapshot is threfore stable).
+ */
+ if (flipflop[i].gen == stable.gen)
+ break;
-kern_return_t
-mach_wait_until(
- uint64_t deadline)
-{
- int wait_result;
-
- wait_result = assert_wait((event_t)&mach_wait_until, THREAD_ABORTSAFE);
- if (wait_result == THREAD_WAITING) {
- thread_set_timer_deadline(deadline);
- wait_result = thread_block(THREAD_CONTINUE_NULL);
- if (wait_result != THREAD_TIMED_OUT)
- thread_cancel_timer();
+ /* Switch to the oher element of the flipflop, and try again. */
+ i ^= 1;
}
- return ((wait_result == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
-}
-
-int64_t
-clock_set_calendar_adjtime(
- int64_t total,
- uint32_t delta)
-{
- int64_t ototal;
- spl_t s;
-
- s = splclock();
- simple_lock(&calend_adjlock);
-
- if (calend_adjinterval == 0)
- clock_interval_to_absolutetime_interval(10000, NSEC_PER_USEC,
- &calend_adjinterval);
+ now = mach_absolute_time();
- ototal = calend_adjtotal;
+ if (stable.calend.adjdelta < 0) {
+ uint32_t t32;
- if (total != 0) {
- uint64_t abstime;
+ if (now > stable.calend.adjstart) {
+ t32 = (uint32_t)(now - stable.calend.adjstart);
- if (total > 0) {
- if (delta > total)
- delta = total;
- }
- else {
- if (delta > -total)
- delta = -total;
+ if (t32 > stable.calend.adjoffset)
+ now -= stable.calend.adjoffset;
+ else
+ now = stable.calend.adjstart;
}
-
- calend_adjtotal = total;
- calend_adjdelta = delta;
-
- if (calend_adjdeadline >= calend_adjinterval)
- calend_adjdeadline -= calend_adjinterval;
- clock_get_uptime(&abstime);
- clock_deadline_for_periodic_event(calend_adjinterval, abstime,
- &calend_adjdeadline);
-
- timer_call_enter(&calend_adjcall, calend_adjdeadline);
}
- else {
- calend_adjtotal = 0;
- timer_call_cancel(&calend_adjcall);
- }
+ now += stable.calend.offset;
- simple_unlock(&calend_adjlock);
- splx(s);
+ absolutetime_to_microtime(now, secs, nanosecs);
+ *nanosecs *= NSEC_PER_USEC;
- return (ototal);
+ *secs += (clock_sec_t)stable.calend.epoch;
}
-static void
-clock_calend_adjust(
- timer_call_param_t p0,
- timer_call_param_t p1)
+static void
+clock_track_calend_nowait(void)
{
- spl_t s;
+ int i;
- s = splclock();
- simple_lock(&calend_adjlock);
+ for (i = 0; i < 2; i++) {
+ struct clock_calend tmp = clock_calend;
- if (calend_adjtotal > 0) {
- clock_adjust_calendar((clock_res_t)calend_adjdelta);
- calend_adjtotal -= calend_adjdelta;
+ /*
+ * Set the low bit if the generation count; since we use a
+ * barrier instruction to do this, we are guaranteed that this
+ * will flag an update in progress to an async caller trying
+ * to examine the contents.
+ */
+ (void)hw_atomic_or(&flipflop[i].gen, 1);
- if (calend_adjdelta > calend_adjtotal)
- calend_adjdelta = calend_adjtotal;
- }
- else
- if (calend_adjtotal < 0) {
- clock_adjust_calendar(-(clock_res_t)calend_adjdelta);
- calend_adjtotal += calend_adjdelta;
+ flipflop[i].calend = tmp;
- if (calend_adjdelta > -calend_adjtotal)
- calend_adjdelta = -calend_adjtotal;
+ /*
+ * Increment the generation count to clear the low bit to
+ * signal completion. If a caller compares the generation
+ * count after taking a copy while in progress, the count
+ * will be off by two.
+ */
+ (void)hw_atomic_add(&flipflop[i].gen, 1);
}
+}
- if (calend_adjtotal != 0) {
- uint64_t abstime;
-
- clock_get_uptime(&abstime);
- clock_deadline_for_periodic_event(calend_adjinterval, abstime,
- &calend_adjdeadline);
-
- timer_call_enter(&calend_adjcall, calend_adjdeadline);
- }
+#endif /* CONFIG_DTRACE */
- simple_unlock(&calend_adjlock);
- splx(s);
-}