2 * Copyright (c) 201 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifndef _I386_LOCKS_I386_INLINES_H_
30 #define _I386_LOCKS_I386_INLINES_H_
32 #include <kern/locks.h>
34 * We need only enough declarations from the BSD-side to be able to
35 * test if our probe is active, and to call __dtrace_probe(). Setting
36 * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in.
39 #define NEED_DTRACE_DEFS
40 #include <../bsd/sys/lockstat.h>
43 // Enforce program order of loads and stores.
44 #define ordered_load(target) _Generic( (target),\
45 uint32_t* : __c11_atomic_load((_Atomic uint32_t* )(target), memory_order_relaxed), \
46 uintptr_t*: __c11_atomic_load((_Atomic uintptr_t*)(target), memory_order_relaxed) )
47 #define ordered_store_release(target, value) _Generic( (target),\
48 uint32_t* : __c11_atomic_store((_Atomic uint32_t* )(target), (value), memory_order_release_smp), \
49 uintptr_t*: __c11_atomic_store((_Atomic uintptr_t*)(target), (value), memory_order_release_smp) )
50 #define ordered_store_volatile(target, value) _Generic( (target),\
51 volatile uint32_t* : __c11_atomic_store((_Atomic volatile uint32_t* )(target), (value), memory_order_relaxed), \
52 volatile uintptr_t*: __c11_atomic_store((_Atomic volatile uintptr_t*)(target), (value), memory_order_relaxed) )
54 /* Enforce program order of loads and stores. */
55 #define ordered_load_mtx_state(lock) ordered_load(&(lock)->lck_mtx_state)
56 #define ordered_store_mtx_state_release(lock, value) ordered_store_release(&(lock)->lck_mtx_state, (value))
57 #define ordered_store_mtx_owner(lock, value) ordered_store_volatile(&(lock)->lck_mtx_owner, (value))
59 #if DEVELOPMENT | DEBUG
60 void lck_mtx_owner_check_panic(lck_mtx_t
*mutex
);
63 __attribute__((always_inline
))
65 lck_mtx_ilk_unlock_inline(
69 state
&= ~LCK_MTX_ILOCKED_MSK
;
70 ordered_store_mtx_state_release(mutex
, state
);
75 __attribute__((always_inline
))
77 lck_mtx_lock_finish_inline(
82 assert(state
& LCK_MTX_ILOCKED_MSK
);
84 /* release the interlock and re-enable preemption */
85 lck_mtx_ilk_unlock_inline(mutex
, state
);
89 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE
, mutex
, 0);
91 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE
, mutex
, 0);
96 __attribute__((always_inline
))
98 lck_mtx_try_lock_finish_inline(
102 /* release the interlock and re-enable preemption */
103 lck_mtx_ilk_unlock_inline(mutex
, state
);
106 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE
, mutex
, 0);
110 __attribute__((always_inline
))
112 lck_mtx_convert_spin_finish_inline(
116 /* release the interlock and acquire it as mutex */
117 state
&= ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_SPIN_MSK
);
118 state
|= LCK_MTX_MLOCKED_MSK
;
120 ordered_store_mtx_state_release(mutex
, state
);
124 __attribute__((always_inline
))
126 lck_mtx_unlock_finish_inline(
134 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE
, mutex
, 0);
136 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE
, mutex
, 0);
138 #endif // CONFIG_DTRACE
141 #endif /* _I386_LOCKS_I386_INLINES_H_ */