/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
uint32_t hz_tick_interval = 1;
static uint64_t has_monotonic_clock = 0;
-decl_simple_lock_data(, clock_lock)
+decl_simple_lock_data(, clock_lock);
lck_grp_attr_t * settime_lock_grp_attr;
lck_grp_t * settime_lock_grp;
lck_attr_t * settime_lock_attr;
*
* The trick is to use a generation count and set the low bit when it is
* being updated/read; by doing this, we guarantee, through use of the
- * hw_atomic functions, that the generation is incremented when the bit
+ * os_atomic functions, that the generation is incremented when the bit
* is cleared atomically (by using a 1 bit add).
*/
static struct unlocked_clock_calend {
* off the "in progress" bit to get the current generation
* count.
*/
- (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
+ os_atomic_andnot(&stable.gen, 1, relaxed);
/*
* If an update _is_ in progress, the generation count will be
* will flag an update in progress to an async caller trying
* to examine the contents.
*/
- (void)hw_atomic_or(&flipflop[i].gen, 1);
+ os_atomic_or(&flipflop[i].gen, 1, relaxed);
flipflop[i].calend = tmp;
* count after taking a copy while in progress, the count
* will be off by two.
*/
- (void)hw_atomic_add(&flipflop[i].gen, 1);
+ os_atomic_inc(&flipflop[i].gen, relaxed);
}
}