]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locks_i386_inlines.h
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / i386 / locks_i386_inlines.h
CommitLineData
d9a64523
A
1/*
2 * Copyright (c) 201 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
d9a64523
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
d9a64523
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
d9a64523
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
d9a64523
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _I386_LOCKS_I386_INLINES_H_
30#define _I386_LOCKS_I386_INLINES_H_
31
32#include <kern/locks.h>
0a7de745 33#include <kern/lock_stat.h>
d9a64523
A
34
35// Enforce program order of loads and stores.
36#define ordered_load(target) _Generic( (target),\
0a7de745
A
37 uint32_t* : __c11_atomic_load((_Atomic uint32_t* )(target), memory_order_relaxed), \
38 uintptr_t*: __c11_atomic_load((_Atomic uintptr_t*)(target), memory_order_relaxed) )
d9a64523 39#define ordered_store_release(target, value) _Generic( (target),\
0a7de745
A
40 uint32_t* : __c11_atomic_store((_Atomic uint32_t* )(target), (value), memory_order_release_smp), \
41 uintptr_t*: __c11_atomic_store((_Atomic uintptr_t*)(target), (value), memory_order_release_smp) )
d9a64523 42#define ordered_store_volatile(target, value) _Generic( (target),\
0a7de745
A
43 volatile uint32_t* : __c11_atomic_store((_Atomic volatile uint32_t* )(target), (value), memory_order_relaxed), \
44 volatile uintptr_t*: __c11_atomic_store((_Atomic volatile uintptr_t*)(target), (value), memory_order_relaxed) )
d9a64523
A
45
46/* Enforce program order of loads and stores. */
0a7de745
A
47#define ordered_load_mtx_state(lock) ordered_load(&(lock)->lck_mtx_state)
48#define ordered_store_mtx_state_release(lock, value) ordered_store_release(&(lock)->lck_mtx_state, (value))
49#define ordered_store_mtx_owner(lock, value) ordered_store_volatile(&(lock)->lck_mtx_owner, (value))
d9a64523
A
50
51#if DEVELOPMENT | DEBUG
52void lck_mtx_owner_check_panic(lck_mtx_t *mutex);
53#endif
54
55__attribute__((always_inline))
56static inline void
57lck_mtx_ilk_unlock_inline(
58 lck_mtx_t *mutex,
0a7de745 59 uint32_t state)
d9a64523
A
60{
61 state &= ~LCK_MTX_ILOCKED_MSK;
62 ordered_store_mtx_state_release(mutex, state);
63
64 enable_preemption();
65}
66
67__attribute__((always_inline))
68static inline void
69lck_mtx_lock_finish_inline(
70 lck_mtx_t *mutex,
0a7de745
A
71 uint32_t state,
72 boolean_t indirect)
d9a64523
A
73{
74 assert(state & LCK_MTX_ILOCKED_MSK);
75
76 /* release the interlock and re-enable preemption */
77 lck_mtx_ilk_unlock_inline(mutex, state);
78
0a7de745 79#if CONFIG_DTRACE
d9a64523
A
80 if (indirect) {
81 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0);
82 } else {
83 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, mutex, 0);
84 }
85#endif
86}
87
88__attribute__((always_inline))
89static inline void
90lck_mtx_try_lock_finish_inline(
91 lck_mtx_t *mutex,
0a7de745 92 uint32_t state)
d9a64523
A
93{
94 /* release the interlock and re-enable preemption */
95 lck_mtx_ilk_unlock_inline(mutex, state);
96
0a7de745 97#if CONFIG_DTRACE
d9a64523
A
98 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, mutex, 0);
99#endif
100}
101
102__attribute__((always_inline))
103static inline void
104lck_mtx_convert_spin_finish_inline(
105 lck_mtx_t *mutex,
0a7de745 106 uint32_t state)
d9a64523
A
107{
108 /* release the interlock and acquire it as mutex */
109 state &= ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK);
110 state |= LCK_MTX_MLOCKED_MSK;
111
112 ordered_store_mtx_state_release(mutex, state);
113 enable_preemption();
114}
115
116__attribute__((always_inline))
117static inline void
118lck_mtx_unlock_finish_inline(
119 lck_mtx_t *mutex,
120 boolean_t indirect)
121{
122 enable_preemption();
123
0a7de745 124#if CONFIG_DTRACE
d9a64523
A
125 if (indirect) {
126 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, mutex, 0);
127 } else {
128 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, mutex, 0);
129 }
0a7de745 130#endif // CONFIG_DTRACE
d9a64523
A
131}
132
133#endif /* _I386_LOCKS_I386_INLINES_H_ */