]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/locks_i386_inlines.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / locks_i386_inlines.h
index 7e4aa5995b6b531d4f5b43a73ef011518d1a59bc..b10b70febabe617c349abad93307c3c2f2b0a241 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 201 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
@@ -22,7 +22,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 
 #define _I386_LOCKS_I386_INLINES_H_
 
 #include <kern/locks.h>
-/*
- * We need only enough declarations from the BSD-side to be able to
- * test if our probe is active, and to call __dtrace_probe().  Setting
- * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in.
- */
-#if    CONFIG_DTRACE
-#define NEED_DTRACE_DEFS
-#include <../bsd/sys/lockstat.h>
-#endif
+#include <kern/lock_stat.h>
+#include <kern/turnstile.h>
 
 // Enforce program order of loads and stores.
-#define ordered_load(target) _Generic( (target),\
-               uint32_t* : __c11_atomic_load((_Atomic uint32_t* )(target), memory_order_relaxed), \
-               uintptr_t*: __c11_atomic_load((_Atomic uintptr_t*)(target), memory_order_relaxed) )
-#define ordered_store_release(target, value) _Generic( (target),\
-               uint32_t* : __c11_atomic_store((_Atomic uint32_t* )(target), (value), memory_order_release_smp), \
-               uintptr_t*: __c11_atomic_store((_Atomic uintptr_t*)(target), (value), memory_order_release_smp) )
-#define ordered_store_volatile(target, value) _Generic( (target),\
-               volatile uint32_t* : __c11_atomic_store((_Atomic volatile uint32_t* )(target), (value), memory_order_relaxed), \
-               volatile uintptr_t*: __c11_atomic_store((_Atomic volatile uintptr_t*)(target), (value), memory_order_relaxed) )
+#define ordered_load(target) os_atomic_load(target, compiler_acq_rel)
+#define ordered_store_release(target, value) ({ \
+               os_atomic_store(target, value, release); \
+               os_compiler_barrier(); \
+})
 
 /* Enforce program order of loads and stores. */
-#define ordered_load_mtx_state(lock)                   ordered_load(&(lock)->lck_mtx_state)
-#define ordered_store_mtx_state_release(lock, value)           ordered_store_release(&(lock)->lck_mtx_state, (value))
-#define ordered_store_mtx_owner(lock, value)   ordered_store_volatile(&(lock)->lck_mtx_owner, (value))
+#define ordered_load_mtx_state(lock)                    ordered_load(&(lock)->lck_mtx_state)
+#define ordered_store_mtx_state_release(lock, value)            ordered_store_release(&(lock)->lck_mtx_state, (value))
+#define ordered_store_mtx_owner(lock, value)    os_atomic_store(&(lock)->lck_mtx_owner, (value), compiler_acq_rel)
 
 #if DEVELOPMENT | DEBUG
-void lck_mtx_owner_check_panic(lck_mtx_t       *mutex);
+void lck_mtx_owner_check_panic(lck_mtx_t       *mutex) __abortlike;
 #endif
 
 __attribute__((always_inline))
 static inline void
 lck_mtx_ilk_unlock_inline(
        lck_mtx_t       *mutex,
-       uint32_t        state)
+       uint32_t        state)
 {
        state &= ~LCK_MTX_ILOCKED_MSK;
        ordered_store_mtx_state_release(mutex, state);
@@ -76,15 +65,38 @@ __attribute__((always_inline))
 static inline void
 lck_mtx_lock_finish_inline(
        lck_mtx_t       *mutex,
-       uint32_t        state,
-       boolean_t       indirect)
+       uint32_t        state,
+       boolean_t       indirect)
+{
+       assert(state & LCK_MTX_ILOCKED_MSK);
+
+       /* release the interlock and re-enable preemption */
+       lck_mtx_ilk_unlock_inline(mutex, state);
+
+#if     CONFIG_DTRACE
+       if (indirect) {
+               LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0);
+       } else {
+               LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, mutex, 0);
+       }
+#endif
+}
+
+__attribute__((always_inline))
+static inline void
+lck_mtx_lock_finish_inline_with_cleanup(
+       lck_mtx_t       *mutex,
+       uint32_t        state,
+       boolean_t       indirect)
 {
        assert(state & LCK_MTX_ILOCKED_MSK);
 
        /* release the interlock and re-enable preemption */
        lck_mtx_ilk_unlock_inline(mutex, state);
 
-#if    CONFIG_DTRACE
+       turnstile_cleanup();
+
+#if     CONFIG_DTRACE
        if (indirect) {
                LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0);
        } else {
@@ -97,12 +109,12 @@ __attribute__((always_inline))
 static inline void
 lck_mtx_try_lock_finish_inline(
        lck_mtx_t       *mutex,
-       uint32_t        state)
+       uint32_t        state)
 {
        /* release the interlock and re-enable preemption */
        lck_mtx_ilk_unlock_inline(mutex, state);
 
-#if    CONFIG_DTRACE
+#if     CONFIG_DTRACE
        LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, mutex, 0);
 #endif
 }
@@ -111,7 +123,7 @@ __attribute__((always_inline))
 static inline void
 lck_mtx_convert_spin_finish_inline(
        lck_mtx_t       *mutex,
-       uint32_t        state)
+       uint32_t        state)
 {
        /* release the interlock and acquire it as mutex */
        state &= ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK);
@@ -129,14 +141,13 @@ lck_mtx_unlock_finish_inline(
 {
        enable_preemption();
 
-#if    CONFIG_DTRACE
+#if     CONFIG_DTRACE
        if (indirect) {
                LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, mutex, 0);
        } else {
                LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, mutex, 0);
        }
-#endif // CONFIG_DTRACE
+#endif  // CONFIG_DTRACE
 }
 
 #endif /* _I386_LOCKS_I386_INLINES_H_ */
-