X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/43866e378188c25dd1e2208016ab3cbeb086ae6c..ff6e181ae92fc6f1e89841290f461d1f2f9badd9:/osfmk/ppc/rtclock.c diff --git a/osfmk/ppc/rtclock.c b/osfmk/ppc/rtclock.c index a44d3ef73..62f16b616 100644 --- a/osfmk/ppc/rtclock.c +++ b/osfmk/ppc/rtclock.c @@ -1,10 +1,8 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -34,8 +32,6 @@ * real-time clock. */ -#include - #include #include @@ -43,10 +39,16 @@ #include #include -#include /* HZ */ +#include + +#include +#include +#include #include +#include +#include -#include +#include #include @@ -74,75 +76,78 @@ struct clock_ops sysclk_ops = { int calend_config(void); -int calend_init(void); - kern_return_t calend_gettime( mach_timespec_t *cur_time); -kern_return_t calend_settime( - mach_timespec_t *cur_time); - kern_return_t calend_getattr( clock_flavor_t flavor, clock_attr_t attr, mach_msg_type_number_t *count); struct clock_ops calend_ops = { - calend_config, calend_init, - calend_gettime, calend_settime, + calend_config, 0, + calend_gettime, 0, calend_getattr, 0, 0, }; /* local data declarations */ -static struct rtclock { - mach_timespec_t calend_offset; - boolean_t calend_is_set; +static struct rtclock_calend { + uint32_t epoch; + uint32_t microepoch; - mach_timebase_info_data_t timebase_const; + uint64_t epoch1; - struct rtclock_timer { - uint64_t deadline; - boolean_t is_set; - } timer[NCPUS]; + int64_t adjtotal; + int32_t adjdelta; +} rtclock_calend; - clock_timer_func_t timer_expire; +static uint32_t rtclock_boottime; - timer_call_data_t alarm_timer; +#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \ +MACRO_BEGIN \ + if (((rfrac) += (frac)) >= (unit)) { \ + (rfrac) -= (unit); \ + (rsecs) += 1; \ + } \ + (rsecs) += (secs); \ +MACRO_END - /* debugging */ - uint64_t last_abstime[NCPUS]; - int last_decr[NCPUS]; +#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \ +MACRO_BEGIN \ + if ((int32_t)((rfrac) -= (frac)) < 0) { \ + (rfrac) += (unit); \ + (rsecs) -= 1; \ + } \ + (rsecs) -= (secs); \ +MACRO_END + +#define NSEC_PER_HZ (NSEC_PER_SEC / 100) +static uint32_t rtclock_tick_interval; + +static uint32_t rtclock_sec_divisor; - decl_simple_lock_data(,lock) /* real-time clock device lock */ -} rtclock; +static mach_timebase_info_data_t rtclock_timebase_const; -static boolean_t rtclock_initialized; +static boolean_t rtclock_timebase_initialized; -static uint64_t rtclock_tick_deadline[NCPUS]; -static uint64_t rtclock_tick_interval; +static clock_timer_func_t rtclock_timer_expire; -static void timespec_to_absolutetime( - mach_timespec_t timespec, - uint64_t *result); +static timer_call_data_t rtclock_alarm_timer; -static int deadline_to_decrementer( - uint64_t deadline, - uint64_t now); +static void nanotime_to_absolutetime( + uint32_t secs, + uint32_t nanosecs, + uint64_t *result); -static void rtclock_alarm_timer( +static void rtclock_alarm_expire( timer_call_param_t p0, timer_call_param_t p1); /* global data declarations */ -#define RTC_TICKPERIOD (NSEC_PER_SEC / HZ) - -#define DECREMENTER_MAX 0x7FFFFFFFUL -#define DECREMENTER_MIN 0xAUL - -natural_t rtclock_decrementer_min; +decl_simple_lock_data(static,rtclock_lock) /* * Macros to lock/unlock real-time clock device. @@ -150,12 +155,12 @@ natural_t rtclock_decrementer_min; #define LOCK_RTC(s) \ MACRO_BEGIN \ (s) = splclock(); \ - simple_lock(&rtclock.lock); \ + simple_lock(&rtclock_lock); \ MACRO_END #define UNLOCK_RTC(s) \ MACRO_BEGIN \ - simple_unlock(&rtclock.lock); \ + simple_unlock(&rtclock_lock); \ splx(s); \ MACRO_END @@ -163,28 +168,41 @@ static void timebase_callback( struct timebase_freq_t *freq) { - natural_t numer, denom; - int n; + uint32_t numer, denom; + uint64_t abstime; spl_t s; - denom = freq->timebase_num; - n = 9; - while (!(denom % 10)) { - if (n < 1) - break; - denom /= 10; - n--; - } + if ( freq->timebase_den < 1 || freq->timebase_den > 4 || + freq->timebase_num < freq->timebase_den ) + panic("rtclock timebase_callback: invalid constant %d / %d", + freq->timebase_num, freq->timebase_den); - numer = freq->timebase_den; - while (n-- > 0) { - numer *= 10; - } + denom = freq->timebase_num; + numer = freq->timebase_den * NSEC_PER_SEC; LOCK_RTC(s); - rtclock.timebase_const.numer = numer; - rtclock.timebase_const.denom = denom; + if (!rtclock_timebase_initialized) { + commpage_set_timestamp(0,0,0,0); + + rtclock_timebase_const.numer = numer; + rtclock_timebase_const.denom = denom; + rtclock_sec_divisor = freq->timebase_num / freq->timebase_den; + + nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime); + rtclock_tick_interval = abstime; + + ml_init_lock_timeout(); + } + else { + UNLOCK_RTC(s); + printf("rtclock timebase_callback: late old %d / %d new %d / %d\n", + rtclock_timebase_const.numer, rtclock_timebase_const.denom, + numer, denom); + return; + } UNLOCK_RTC(s); + + clock_timebase_init(); } /* @@ -193,12 +211,9 @@ timebase_callback( int sysclk_config(void) { - if (cpu_number() != master_cpu) - return(1); - - timer_call_setup(&rtclock.alarm_timer, rtclock_alarm_timer, NULL); + timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL); - simple_lock_init(&rtclock.lock, ETAP_MISC_RT_CLOCK); + simple_lock_init(&rtclock_lock, 0); PE_register_timebase_callback(timebase_callback); @@ -211,289 +226,64 @@ sysclk_config(void) int sysclk_init(void) { - uint64_t abstime; - int decr, mycpu = cpu_number(); + uint64_t abstime; + struct per_proc_info *pp; - if (mycpu != master_cpu) { - if (rtclock_initialized == FALSE) { - panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu); - } - /* Set decrementer and hence our next tick due */ - clock_get_uptime(&abstime); - rtclock_tick_deadline[mycpu] = abstime; - rtclock_tick_deadline[mycpu] += rtclock_tick_interval; - decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); - mtdec(decr); - rtclock.last_decr[mycpu] = decr; - - return(1); - } + pp = getPerProc(); - /* - * Initialize non-zero clock structure values. - */ - clock_interval_to_absolutetime_interval(RTC_TICKPERIOD, 1, - &rtclock_tick_interval); - /* Set decrementer and our next tick due */ - clock_get_uptime(&abstime); - rtclock_tick_deadline[mycpu] = abstime; - rtclock_tick_deadline[mycpu] += rtclock_tick_interval; - decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); - mtdec(decr); - rtclock.last_decr[mycpu] = decr; - - rtclock_initialized = TRUE; + abstime = mach_absolute_time(); + pp->rtclock_tick_deadline = abstime + rtclock_tick_interval; /* Get the time we need to pop */ + pp->rtcPop = pp->rtclock_tick_deadline; /* Set the rtc pop time the same for now */ + + (void)setTimerReq(); /* Start the timers going */ return (1); } -#define UnsignedWide_to_scalar(x) (*(uint64_t *)(x)) -#define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x)) - -/* - * Perform a full 64 bit by 32 bit unsigned multiply, - * yielding a 96 bit product. The most significant - * portion of the product is returned as a 64 bit - * quantity, with the lower portion as a 32 bit word. - */ -static void -umul_64by32( - UnsignedWide now64, - uint32_t mult32, - UnsignedWide *result64, - uint32_t *result32) -{ - uint32_t mid, mid2; - - asm volatile(" mullw %0,%1,%2" : - "=r" (*result32) : - "r" (now64.lo), "r" (mult32)); - - asm volatile(" mullw %0,%1,%2" : - "=r" (mid2) : - "r" (now64.hi), "r" (mult32)); - asm volatile(" mulhwu %0,%1,%2" : - "=r" (mid) : - "r" (now64.lo), "r" (mult32)); - - asm volatile(" mulhwu %0,%1,%2" : - "=r" (result64->hi) : - "r" (now64.hi), "r" (mult32)); - - asm volatile(" addc %0,%2,%3; - addze %1,%4" : - "=r" (result64->lo), "=r" (result64->hi) : - "r" (mid), "r" (mid2), "1" (result64->hi)); -} - -/* - * Perform a partial 64 bit by 32 bit unsigned multiply, - * yielding a 64 bit product. Only the least significant - * 64 bits of the product are calculated and returned. - */ -static void -umul_64by32to64( - UnsignedWide now64, - uint32_t mult32, - UnsignedWide *result64) -{ - uint32_t mid, mid2; - - asm volatile(" mullw %0,%1,%2" : - "=r" (result64->lo) : - "r" (now64.lo), "r" (mult32)); - - asm volatile(" mullw %0,%1,%2" : - "=r" (mid2) : - "r" (now64.hi), "r" (mult32)); - asm volatile(" mulhwu %0,%1,%2" : - "=r" (mid) : - "r" (now64.lo), "r" (mult32)); - - asm volatile(" add %0,%1,%2" : - "=r" (result64->hi) : - "r" (mid), "r" (mid2)); -} - -/* - * Perform an unsigned division of a 96 bit value - * by a 32 bit value, yielding a 96 bit quotient. - * The most significant portion of the product is - * returned as a 64 bit quantity, with the lower - * portion as a 32 bit word. - */ -static void -udiv_96by32( - UnsignedWide now64, - uint32_t now32, - uint32_t div32, - UnsignedWide *result64, - uint32_t *result32) -{ - UnsignedWide t64; - - if (now64.hi > 0 || now64.lo >= div32) { - UnsignedWide_to_scalar(result64) = - UnsignedWide_to_scalar(&now64) / div32; - - umul_64by32to64(*result64, div32, &t64); - - UnsignedWide_to_scalar(&t64) = - UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64); - - *result32 = (((uint64_t)t64.lo << 32) | now32) / div32; - } - else { - UnsignedWide_to_scalar(result64) = - (((uint64_t)now64.lo << 32) | now32) / div32; - - *result32 = result64->lo; - result64->lo = result64->hi; - result64->hi = 0; - } -} - -/* - * Perform an unsigned division of a 96 bit value - * by a 32 bit value, yielding a 64 bit quotient. - * Any higher order bits of the quotient are simply - * discarded. - */ -static void -udiv_96by32to64( - UnsignedWide now64, - uint32_t now32, - uint32_t div32, - UnsignedWide *result64) -{ - UnsignedWide t64; - - if (now64.hi > 0 || now64.lo >= div32) { - UnsignedWide_to_scalar(result64) = - UnsignedWide_to_scalar(&now64) / div32; - - umul_64by32to64(*result64, div32, &t64); - - UnsignedWide_to_scalar(&t64) = - UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64); - - result64->hi = result64->lo; - result64->lo = (((uint64_t)t64.lo << 32) | now32) / div32; - } - else { - UnsignedWide_to_scalar(result64) = - (((uint64_t)now64.lo << 32) | now32) / div32; - } -} - -/* - * Perform an unsigned division of a 96 bit value - * by a 32 bit value, yielding a 32 bit quotient, - * and a 32 bit remainder. Any higher order bits - * of the quotient are simply discarded. - */ -static void -udiv_96by32to32and32( - UnsignedWide now64, - uint32_t now32, - uint32_t div32, - uint32_t *result32, - uint32_t *remain32) -{ - UnsignedWide t64, u64; - - if (now64.hi > 0 || now64.lo >= div32) { - UnsignedWide_to_scalar(&t64) = - UnsignedWide_to_scalar(&now64) / div32; - - umul_64by32to64(t64, div32, &t64); - - UnsignedWide_to_scalar(&t64) = - UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64); - - UnsignedWide_to_scalar(&t64) = ((uint64_t)t64.lo << 32) | now32; - - UnsignedWide_to_scalar(&u64) = - UnsignedWide_to_scalar(&t64) / div32; - - *result32 = u64.lo; - - umul_64by32to64(u64, div32, &u64); - - *remain32 = UnsignedWide_to_scalar(&t64) - - UnsignedWide_to_scalar(&u64); - } - else { - UnsignedWide_to_scalar(&t64) = ((uint64_t)now64.lo << 32) | now32; - - UnsignedWide_to_scalar(&u64) = - UnsignedWide_to_scalar(&t64) / div32; - - *result32 = u64.lo; - - umul_64by32to64(u64, div32, &u64); - - *remain32 = UnsignedWide_to_scalar(&t64) - - UnsignedWide_to_scalar(&u64); - } -} - -/* - * Get the clock device time. This routine is responsible - * for converting the device's machine dependent time value - * into a canonical mach_timespec_t value. - * - * SMP configurations - *the processor clocks are synchronised* - */ kern_return_t -sysclk_gettime_internal( - mach_timespec_t *time) /* OUT */ +sysclk_gettime( + mach_timespec_t *time) /* OUT */ { - UnsignedWide now; - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - - clock_get_uptime((uint64_t *)&now); - - umul_64by32(now, numer, &t64, &t32); + uint64_t now, t64; + uint32_t divisor; - udiv_96by32(t64, t32, denom, &t64, &t32); + now = mach_absolute_time(); - udiv_96by32to32and32(t64, t32, NSEC_PER_SEC, - &time->tv_sec, &time->tv_nsec); + time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + time->tv_nsec = (now * NSEC_PER_SEC) / divisor; return (KERN_SUCCESS); } -kern_return_t -sysclk_gettime( - mach_timespec_t *time) /* OUT */ +void +clock_get_system_microtime( + uint32_t *secs, + uint32_t *microsecs) { - UnsignedWide now; - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; + uint64_t now, t64; + uint32_t divisor; - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); - - clock_get_uptime((uint64_t *)&now); + now = mach_absolute_time(); - umul_64by32(now, numer, &t64, &t32); + *secs = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + *microsecs = (now * USEC_PER_SEC) / divisor; +} - udiv_96by32(t64, t32, denom, &t64, &t32); +void +clock_get_system_nanotime( + uint32_t *secs, + uint32_t *nanosecs) +{ + uint64_t now, t64; + uint32_t divisor; - udiv_96by32to32and32(t64, t32, NSEC_PER_SEC, - &time->tv_sec, &time->tv_nsec); + now = mach_absolute_time(); - return (KERN_SUCCESS); + *secs = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + *nanosecs = (now * NSEC_PER_SEC) / divisor; } /* @@ -501,14 +291,15 @@ sysclk_gettime( */ kern_return_t sysclk_getattr( - clock_flavor_t flavor, - clock_attr_t attr, /* OUT */ + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ mach_msg_type_number_t *count) /* IN/OUT */ { - spl_t s; + spl_t s; if (*count != 1) return (KERN_FAILURE); + switch (flavor) { case CLOCK_GET_TIME_RES: /* >0 res */ @@ -516,13 +307,14 @@ sysclk_getattr( case CLOCK_ALARM_MINRES: case CLOCK_ALARM_MAXRES: LOCK_RTC(s); - *(clock_res_t *) attr = RTC_TICKPERIOD; + *(clock_res_t *) attr = NSEC_PER_HZ; UNLOCK_RTC(s); break; default: return (KERN_INVALID_VALUE); } + return (KERN_SUCCESS); } @@ -534,10 +326,10 @@ void sysclk_setalarm( mach_timespec_t *deadline) { - uint64_t abstime; + uint64_t abstime; - timespec_to_absolutetime(*deadline, &abstime); - timer_call_enter(&rtclock.alarm_timer, abstime); + nanotime_to_absolutetime(deadline->tv_sec, deadline->tv_nsec, &abstime); + timer_call_enter(&rtclock_alarm_timer, abstime); } /* @@ -549,58 +341,15 @@ calend_config(void) return (1); } -/* - * Initialize the calendar clock. - */ -int -calend_init(void) -{ - if (cpu_number() != master_cpu) - return(1); - - return (1); -} - /* * Get the current clock time. */ kern_return_t calend_gettime( - mach_timespec_t *curr_time) /* OUT */ + mach_timespec_t *time) /* OUT */ { - spl_t s; - - LOCK_RTC(s); - if (!rtclock.calend_is_set) { - UNLOCK_RTC(s); - return (KERN_FAILURE); - } - - (void) sysclk_gettime_internal(curr_time); - ADD_MACH_TIMESPEC(curr_time, &rtclock.calend_offset); - UNLOCK_RTC(s); - - return (KERN_SUCCESS); -} - -/* - * Set the current clock time. - */ -kern_return_t -calend_settime( - mach_timespec_t *new_time) -{ - mach_timespec_t curr_time; - spl_t s; - - LOCK_RTC(s); - (void) sysclk_gettime_internal(&curr_time); - rtclock.calend_offset = *new_time; - SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); - rtclock.calend_is_set = TRUE; - UNLOCK_RTC(s); - - PESetGMTTimeOfDay(new_time->tv_sec); + clock_get_calendar_nanotime( + &time->tv_sec, &time->tv_nsec); return (KERN_SUCCESS); } @@ -610,19 +359,20 @@ calend_settime( */ kern_return_t calend_getattr( - clock_flavor_t flavor, - clock_attr_t attr, /* OUT */ + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ mach_msg_type_number_t *count) /* IN/OUT */ { - spl_t s; + spl_t s; if (*count != 1) return (KERN_FAILURE); + switch (flavor) { case CLOCK_GET_TIME_RES: /* >0 res */ LOCK_RTC(s); - *(clock_res_t *) attr = RTC_TICKPERIOD; + *(clock_res_t *) attr = NSEC_PER_HZ; UNLOCK_RTC(s); break; @@ -635,62 +385,502 @@ calend_getattr( default: return (KERN_INVALID_VALUE); } + return (KERN_SUCCESS); } void -clock_adjust_calendar( - clock_res_t nsec) +clock_get_calendar_microtime( + uint32_t *secs, + uint32_t *microsecs) { - spl_t s; + uint32_t epoch, microepoch; + uint64_t now, t64; + spl_t s = splclock(); + + simple_lock(&rtclock_lock); + + if (rtclock_calend.adjdelta >= 0) { + uint32_t divisor; + + now = mach_absolute_time(); + + epoch = rtclock_calend.epoch; + microepoch = rtclock_calend.microepoch; + + simple_unlock(&rtclock_lock); + + *secs = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + *microsecs = (now * USEC_PER_SEC) / divisor; + + TIME_ADD(*secs, epoch, *microsecs, microepoch, USEC_PER_SEC); + } + else { + uint32_t delta, t32; + + delta = -rtclock_calend.adjdelta; + + now = mach_absolute_time(); + + *secs = rtclock_calend.epoch; + *microsecs = rtclock_calend.microepoch; + + if (now > rtclock_calend.epoch1) { + t64 = now - rtclock_calend.epoch1; + + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + if (t32 > delta) + TIME_ADD(*secs, 0, *microsecs, (t32 - delta), USEC_PER_SEC); + } + + simple_unlock(&rtclock_lock); + } + + splx(s); +} + +/* This is only called from the gettimeofday() syscall. As a side + * effect, it updates the commpage timestamp. Otherwise it is + * identical to clock_get_calendar_microtime(). Because most + * gettimeofday() calls are handled by the commpage in user mode, + * this routine should be infrequently used except when slowing down + * the clock. + */ +void +clock_gettimeofday( + uint32_t *secs_p, + uint32_t *microsecs_p) +{ + uint32_t epoch, microepoch; + uint32_t secs, microsecs; + uint64_t now, t64, secs_64, usec_64; + spl_t s = splclock(); + + simple_lock(&rtclock_lock); + + if (rtclock_calend.adjdelta >= 0) { + now = mach_absolute_time(); + + epoch = rtclock_calend.epoch; + microepoch = rtclock_calend.microepoch; + + secs = secs_64 = now / rtclock_sec_divisor; + t64 = now - (secs_64 * rtclock_sec_divisor); + microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + TIME_ADD(secs, epoch, microsecs, microepoch, USEC_PER_SEC); + + /* adjust "now" to be absolute time at _start_ of usecond */ + now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC); + + commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor); + } + else { + uint32_t delta, t32; + + delta = -rtclock_calend.adjdelta; + + now = mach_absolute_time(); + + secs = rtclock_calend.epoch; + microsecs = rtclock_calend.microepoch; + + if (now > rtclock_calend.epoch1) { + t64 = now - rtclock_calend.epoch1; + + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + if (t32 > delta) + TIME_ADD(secs, 0, microsecs, (t32 - delta), USEC_PER_SEC); + } + + /* no need to disable timestamp, it is already off */ + } + + simple_unlock(&rtclock_lock); + splx(s); + + *secs_p = secs; + *microsecs_p = microsecs; +} + +void +clock_get_calendar_nanotime( + uint32_t *secs, + uint32_t *nanosecs) +{ + uint32_t epoch, nanoepoch; + uint64_t now, t64; + spl_t s = splclock(); + + simple_lock(&rtclock_lock); + + if (rtclock_calend.adjdelta >= 0) { + uint32_t divisor; + + now = mach_absolute_time(); + + epoch = rtclock_calend.epoch; + nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC; + + simple_unlock(&rtclock_lock); + + *secs = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC; + + TIME_ADD(*secs, epoch, *nanosecs, nanoepoch, NSEC_PER_SEC); + } + else { + uint32_t delta, t32; + + delta = -rtclock_calend.adjdelta; + + now = mach_absolute_time(); + + *secs = rtclock_calend.epoch; + *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC; + + if (now > rtclock_calend.epoch1) { + t64 = now - rtclock_calend.epoch1; + + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + if (t32 > delta) + TIME_ADD(*secs, 0, *nanosecs, ((t32 - delta) * NSEC_PER_USEC), NSEC_PER_SEC); + } + + simple_unlock(&rtclock_lock); + } + + splx(s); +} + +void +clock_set_calendar_microtime( + uint32_t secs, + uint32_t microsecs) +{ + uint32_t sys, microsys; + uint32_t newsecs; + spl_t s; + + newsecs = (microsecs < 500*USEC_PER_SEC)? + secs: secs + 1; + + s = splclock(); + simple_lock(&rtclock_lock); + + commpage_set_timestamp(0,0,0,0); + + /* + * Cancel any adjustment in progress. + */ + if (rtclock_calend.adjdelta < 0) { + uint64_t now, t64; + uint32_t delta, t32; + + delta = -rtclock_calend.adjdelta; + + sys = rtclock_calend.epoch; + microsys = rtclock_calend.microepoch; + + now = mach_absolute_time(); + + if (now > rtclock_calend.epoch1) + t64 = now - rtclock_calend.epoch1; + else + t64 = 0; + + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + if (t32 > delta) + TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC); + + rtclock_calend.epoch = sys; + rtclock_calend.microepoch = microsys; + + sys = t64 = now / rtclock_sec_divisor; + now -= (t64 * rtclock_sec_divisor); + microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor; + + TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC); + } + + rtclock_calend.epoch1 = 0; + rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0; + + /* + * Calculate the new calendar epoch based on + * the new value and the system clock. + */ + clock_get_system_microtime(&sys, µsys); + TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC); + + /* + * Adjust the boottime based on the delta. + */ + rtclock_boottime += secs - rtclock_calend.epoch; + + /* + * Set the new calendar epoch. + */ + rtclock_calend.epoch = secs; + rtclock_calend.microepoch = microsecs; + + simple_unlock(&rtclock_lock); + + /* + * Set the new value for the platform clock. + */ + PESetGMTTimeOfDay(newsecs); + + splx(s); + + /* + * Send host notifications. + */ + host_notify_calendar_change(); +} + +#define tickadj (40) /* "standard" skew, us / tick */ +#define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */ + +uint32_t +clock_set_calendar_adjtime( + int32_t *secs, + int32_t *microsecs) +{ + int64_t total, ototal; + uint32_t interval = 0; + spl_t s; + + total = (int64_t)*secs * USEC_PER_SEC + *microsecs; LOCK_RTC(s); - if (rtclock.calend_is_set) - ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec); + commpage_set_timestamp(0,0,0,0); + + ototal = rtclock_calend.adjtotal; + + if (rtclock_calend.adjdelta < 0) { + uint64_t now, t64; + uint32_t delta, t32; + uint32_t sys, microsys; + + delta = -rtclock_calend.adjdelta; + + sys = rtclock_calend.epoch; + microsys = rtclock_calend.microepoch; + + now = mach_absolute_time(); + + if (now > rtclock_calend.epoch1) + t64 = now - rtclock_calend.epoch1; + else + t64 = 0; + + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + if (t32 > delta) + TIME_ADD(sys, 0, microsys, (t32 - delta), USEC_PER_SEC); + + rtclock_calend.epoch = sys; + rtclock_calend.microepoch = microsys; + + sys = t64 = now / rtclock_sec_divisor; + now -= (t64 * rtclock_sec_divisor); + microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor; + + TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC); + } + + if (total != 0) { + int32_t delta = tickadj; + + if (total > 0) { + if (total > bigadj) + delta *= 10; + if (delta > total) + delta = total; + + rtclock_calend.epoch1 = 0; + } + else { + uint64_t now, t64; + uint32_t sys, microsys; + + if (total < -bigadj) + delta *= 10; + delta = -delta; + if (delta < total) + delta = total; + + rtclock_calend.epoch1 = now = mach_absolute_time(); + + sys = t64 = now / rtclock_sec_divisor; + now -= (t64 * rtclock_sec_divisor); + microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor; + + TIME_ADD(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC); + } + + rtclock_calend.adjtotal = total; + rtclock_calend.adjdelta = delta; + + interval = rtclock_tick_interval; + } + else { + rtclock_calend.epoch1 = 0; + rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0; + } + UNLOCK_RTC(s); + + if (ototal == 0) + *secs = *microsecs = 0; + else { + *secs = ototal / USEC_PER_SEC; + *microsecs = ototal % USEC_PER_SEC; + } + + return (interval); } -void -clock_initialize_calendar(void) +uint32_t +clock_adjust_calendar(void) { - mach_timespec_t curr_time; - long seconds = PEGetGMTTimeOfDay(); - spl_t s; + uint32_t interval = 0; + int32_t delta; + spl_t s; LOCK_RTC(s); - (void) sysclk_gettime_internal(&curr_time); - if (curr_time.tv_nsec < 500*USEC_PER_SEC) - rtclock.calend_offset.tv_sec = seconds; + commpage_set_timestamp(0,0,0,0); + + delta = rtclock_calend.adjdelta; + + if (delta > 0) { + TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, delta, USEC_PER_SEC); + + rtclock_calend.adjtotal -= delta; + if (delta > rtclock_calend.adjtotal) + rtclock_calend.adjdelta = rtclock_calend.adjtotal; + } else - rtclock.calend_offset.tv_sec = seconds + 1; - rtclock.calend_offset.tv_nsec = 0; - SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); - rtclock.calend_is_set = TRUE; + if (delta < 0) { + uint64_t now, t64; + uint32_t t32; + + now = mach_absolute_time(); + + if (now > rtclock_calend.epoch1) + t64 = now - rtclock_calend.epoch1; + else + t64 = 0; + + rtclock_calend.epoch1 = now; + + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + TIME_ADD(rtclock_calend.epoch, 0, rtclock_calend.microepoch, (t32 + delta), USEC_PER_SEC); + + rtclock_calend.adjtotal -= delta; + if (delta < rtclock_calend.adjtotal) + rtclock_calend.adjdelta = rtclock_calend.adjtotal; + + if (rtclock_calend.adjdelta == 0) { + uint32_t sys, microsys; + + sys = t64 = now / rtclock_sec_divisor; + now -= (t64 * rtclock_sec_divisor); + microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor; + + TIME_SUB(rtclock_calend.epoch, sys, rtclock_calend.microepoch, microsys, USEC_PER_SEC); + + rtclock_calend.epoch1 = 0; + } + } + + if (rtclock_calend.adjdelta != 0) + interval = rtclock_tick_interval; + UNLOCK_RTC(s); + + return (interval); } -mach_timespec_t -clock_get_calendar_offset(void) +/* + * clock_initialize_calendar: + * + * Set the calendar and related clocks + * from the platform clock at boot or + * wake event. + */ +void +clock_initialize_calendar(void) { - mach_timespec_t result = MACH_TIMESPEC_ZERO; - spl_t s; + uint32_t sys, microsys; + uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay(); + spl_t s; LOCK_RTC(s); - if (rtclock.calend_is_set) - result = rtclock.calend_offset; + commpage_set_timestamp(0,0,0,0); + + if ((int32_t)secs >= (int32_t)rtclock_boottime) { + /* + * Initialize the boot time based on the platform clock. + */ + if (rtclock_boottime == 0) + rtclock_boottime = secs; + + /* + * Calculate the new calendar epoch based + * on the platform clock and the system + * clock. + */ + clock_get_system_microtime(&sys, µsys); + TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC); + + /* + * Set the new calendar epoch. + */ + rtclock_calend.epoch = secs; + rtclock_calend.microepoch = microsecs; + + /* + * Cancel any adjustment in progress. + */ + rtclock_calend.epoch1 = 0; + rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0; + } + UNLOCK_RTC(s); - return (result); + /* + * Send host notifications. + */ + host_notify_calendar_change(); +} + +void +clock_get_boottime_nanotime( + uint32_t *secs, + uint32_t *nanosecs) +{ + *secs = rtclock_boottime; + *nanosecs = 0; } void clock_timebase_info( mach_timebase_info_t info) { - spl_t s; + spl_t s; LOCK_RTC(s); - *info = rtclock.timebase_const; + rtclock_timebase_initialized = TRUE; + *info = rtclock_timebase_const; UNLOCK_RTC(s); } @@ -698,30 +888,25 @@ void clock_set_timer_deadline( uint64_t deadline) { + int decr; uint64_t abstime; - int decr, mycpu; - struct rtclock_timer *mytimer; + rtclock_timer_t *mytimer; + struct per_proc_info *pp; spl_t s; s = splclock(); - mycpu = cpu_number(); - mytimer = &rtclock.timer[mycpu]; - clock_get_uptime(&abstime); - rtclock.last_abstime[mycpu] = abstime; + pp = getPerProc(); + mytimer = &pp->rtclock_timer; mytimer->deadline = deadline; - mytimer->is_set = TRUE; - if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) { - decr = deadline_to_decrementer(mytimer->deadline, abstime); - if ( rtclock_decrementer_min != 0 && - rtclock_decrementer_min < (natural_t)decr ) - decr = rtclock_decrementer_min; - mtdec(decr); - rtclock.last_decr[mycpu] = decr; + if (!mytimer->has_expired && (deadline < pp->rtclock_tick_deadline)) { /* Has the timer already expired or is less that set? */ + pp->rtcPop = deadline; /* Yes, set the new rtc pop time */ + decr = setTimerReq(); /* Start the timers going */ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) - | DBG_FUNC_NONE, decr, 2, 0, 0, 0); + | DBG_FUNC_NONE, decr, 2, 0, 0, 0); } + splx(s); } @@ -732,85 +917,78 @@ clock_set_timer_func( spl_t s; LOCK_RTC(s); - if (rtclock.timer_expire == NULL) - rtclock.timer_expire = func; + if (rtclock_timer_expire == NULL) + rtclock_timer_expire = func; UNLOCK_RTC(s); } -/* - * Reset the clock device. This causes the realtime clock - * device to reload its mode and count value (frequency). - */ -void -rtclock_reset(void) -{ - return; -} - /* * Real-time clock device interrupt. */ void -rtclock_intr( - int device, - struct savearea *ssp, - spl_t old_spl) -{ +rtclock_intr(struct savearea *ssp) { + uint64_t abstime; - int decr[3], mycpu = cpu_number(); - struct rtclock_timer *mytimer = &rtclock.timer[mycpu]; + int decr; + rtclock_timer_t *mytimer; + struct per_proc_info *pp; - /* - * We may receive interrupts too early, we must reject them. - */ - if (rtclock_initialized == FALSE) { - mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */ - return; - } + pp = getPerProc(); + mytimer = &pp->rtclock_timer; - decr[1] = decr[2] = DECREMENTER_MAX; - - clock_get_uptime(&abstime); - rtclock.last_abstime[mycpu] = abstime; - if ( rtclock_tick_deadline[mycpu] <= abstime ) { + abstime = mach_absolute_time(); + if (pp->rtclock_tick_deadline <= abstime) { /* Have we passed the pop time? */ clock_deadline_for_periodic_event(rtclock_tick_interval, abstime, - &rtclock_tick_deadline[mycpu]); + &pp->rtclock_tick_deadline); hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0); + abstime = mach_absolute_time(); /* Refresh the current time since we went away */ } - clock_get_uptime(&abstime); - rtclock.last_abstime[mycpu] = abstime; - if ( mytimer->is_set && - mytimer->deadline <= abstime ) { - mytimer->is_set = FALSE; - (*rtclock.timer_expire)(abstime); + if (mytimer->deadline <= abstime) { /* Have we expired the deadline? */ + mytimer->has_expired = TRUE; /* Remember that we popped */ + mytimer->deadline = EndOfAllTime; /* Set timer request to the end of all time in case we have no more events */ + (*rtclock_timer_expire)(abstime); /* Process pop */ + mytimer->has_expired = FALSE; } - clock_get_uptime(&abstime); - rtclock.last_abstime[mycpu] = abstime; - decr[1] = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); + pp->rtcPop = (pp->rtclock_tick_deadline < mytimer->deadline) ? /* Get shortest pop */ + pp->rtclock_tick_deadline : /* It was the periodic timer */ + mytimer->deadline; /* Actually, an event request */ + + decr = setTimerReq(); /* Request the timer pop */ + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) + | DBG_FUNC_NONE, decr, 3, 0, 0, 0); +} - if (mytimer->is_set) - decr[2] = deadline_to_decrementer(mytimer->deadline, abstime); +/* + * Request an interruption at a specific time + * + * Sets the decrementer to pop at the right time based on the timebase. + * The value is chosen by comparing the rtc request with the power management. + * request. We may add other values at a future time. + * + */ + +int setTimerReq(void) { - if (decr[1] > decr[2]) - decr[1] = decr[2]; + struct per_proc_info *pp; + int decr; + uint64_t nexttime; + + pp = getPerProc(); /* Get per_proc */ - if ( rtclock_decrementer_min != 0 && - rtclock_decrementer_min < (natural_t)decr[1] ) - decr[1] = rtclock_decrementer_min; + nexttime = pp->rtcPop; /* Assume main timer */ - mtdec(decr[1]); - rtclock.last_decr[mycpu] = decr[1]; + decr = setPop((pp->pms.pmsPop < nexttime) ? pp->pms.pmsPop : nexttime); /* Schedule timer pop */ - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) - | DBG_FUNC_NONE, decr[1], 3, 0, 0, 0); + return decr; /* Pass back what we actually set */ } static void -rtclock_alarm_timer( - timer_call_param_t p0, - timer_call_param_t p1) +rtclock_alarm_expire( + __unused void *p0, + __unused void *p1) { mach_timespec_t timestamp; @@ -819,71 +997,30 @@ rtclock_alarm_timer( clock_alarm_intr(SYSTEM_CLOCK, ×tamp); } -void -clock_get_uptime( - uint64_t *result0) -{ - UnsignedWide *result = (UnsignedWide *)result0; - uint32_t hi, lo, hic; - - do { - asm volatile(" mftbu %0" : "=r" (hi)); - asm volatile(" mftb %0" : "=r" (lo)); - asm volatile(" mftbu %0" : "=r" (hic)); - } while (hic != hi); - - result->lo = lo; - result->hi = hi; -} - -static int -deadline_to_decrementer( - uint64_t deadline, - uint64_t now) +static void +nanotime_to_absolutetime( + uint32_t secs, + uint32_t nanosecs, + uint64_t *result) { - uint64_t delt; + uint32_t divisor = rtclock_sec_divisor; - if (deadline <= now) - return DECREMENTER_MIN; - else { - delt = deadline - now; - return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX: - ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN); - } + *result = ((uint64_t)secs * divisor) + + ((uint64_t)nanosecs * divisor) / NSEC_PER_SEC; } -static void -timespec_to_absolutetime( - mach_timespec_t timespec, - uint64_t *result0) +void +absolutetime_to_microtime( + uint64_t abstime, + uint32_t *secs, + uint32_t *microsecs) { - UnsignedWide *result = (UnsignedWide *)result0; - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; + uint64_t t64; + uint32_t divisor; - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); - - asm volatile(" mullw %0,%1,%2" : - "=r" (t64.lo) : - "r" (timespec.tv_sec), "r" (NSEC_PER_SEC)); - - asm volatile(" mulhwu %0,%1,%2" : - "=r" (t64.hi) : - "r" (timespec.tv_sec), "r" (NSEC_PER_SEC)); - - UnsignedWide_to_scalar(&t64) += timespec.tv_nsec; - - umul_64by32(t64, denom, &t64, &t32); - - udiv_96by32(t64, t32, numer, &t64, &t32); - - result->hi = t64.lo; - result->lo = t32; + *secs = t64 = abstime / (divisor = rtclock_sec_divisor); + abstime -= (t64 * divisor); + *microsecs = (abstime * USEC_PER_SEC) / divisor; } void @@ -892,7 +1029,7 @@ clock_interval_to_deadline( uint32_t scale_factor, uint64_t *result) { - uint64_t abstime; + uint64_t abstime; clock_get_uptime(result); @@ -905,32 +1042,16 @@ void clock_interval_to_absolutetime_interval( uint32_t interval, uint32_t scale_factor, - uint64_t *result0) + uint64_t *result) { - UnsignedWide *result = (UnsignedWide *)result0; - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; - - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); - - asm volatile(" mullw %0,%1,%2" : - "=r" (t64.lo) : - "r" (interval), "r" (scale_factor)); - asm volatile(" mulhwu %0,%1,%2" : - "=r" (t64.hi) : - "r" (interval), "r" (scale_factor)); - - umul_64by32(t64, denom, &t64, &t32); - - udiv_96by32(t64, t32, numer, &t64, &t32); - - result->hi = t64.lo; - result->lo = t32; + uint64_t nanosecs = (uint64_t)interval * scale_factor; + uint64_t t64; + uint32_t divisor; + + *result = (t64 = nanosecs / NSEC_PER_SEC) * + (divisor = rtclock_sec_divisor); + nanosecs -= (t64 * NSEC_PER_SEC); + *result += (nanosecs * divisor) / NSEC_PER_SEC; } void @@ -948,76 +1069,36 @@ absolutetime_to_nanoseconds( uint64_t abstime, uint64_t *result) { - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; - - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); - - UnsignedWide_to_scalar(&t64) = abstime; + uint64_t t64; + uint32_t divisor; - umul_64by32(t64, numer, &t64, &t32); - - udiv_96by32to64(t64, t32, denom, (void *)result); + *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC; + abstime -= (t64 * divisor); + *result += (abstime * NSEC_PER_SEC) / divisor; } void nanoseconds_to_absolutetime( - uint64_t nanoseconds, + uint64_t nanosecs, uint64_t *result) { - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; - - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); - - UnsignedWide_to_scalar(&t64) = nanoseconds; - - umul_64by32(t64, denom, &t64, &t32); + uint64_t t64; + uint32_t divisor; - udiv_96by32to64(t64, t32, numer, (void *)result); -} - -/* - * Spin-loop delay primitives. - */ -void -delay_for_interval( - uint32_t interval, - uint32_t scale_factor) -{ - uint64_t now, end; - - clock_interval_to_deadline(interval, scale_factor, &end); - - do { - clock_get_uptime(&now); - } while (now < end); + *result = (t64 = nanosecs / NSEC_PER_SEC) * + (divisor = rtclock_sec_divisor); + nanosecs -= (t64 * NSEC_PER_SEC); + *result += (nanosecs * divisor) / NSEC_PER_SEC; } void -clock_delay_until( +machine_delay_until( uint64_t deadline) { uint64_t now; do { - clock_get_uptime(&now); + now = mach_absolute_time(); } while (now < deadline); } -void -delay( - int usec) -{ - delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC); -}