2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1989 Carnegie-Mellon University
34 * All rights reserved. The CMU software License Agreement specifies
35 * the terms and conditions for use and redistribution.
38 #include <mach_ldebug.h>
40 #include <i386/eflags.h>
41 #include <i386/trap.h>
42 #include <config_dtrace.h>
47 #define PAUSE rep; nop
49 #include <i386/pal_lock_asm.h>
51 #define LEAF_ENTRY(name) \
57 /* For x86_64, the varargs ABI requires that %al indicate
58 * how many SSE register contain arguments. In our case, 0 */
59 #define ALIGN_STACK() and $0xFFFFFFFFFFFFFFF0, %rsp ;
60 #define LOAD_STRING_ARG0(label) leaq label(%rip), %rdi ;
61 #define LOAD_ARG1(x) mov x, %esi ;
62 #define LOAD_PTR_ARG1(x) mov x, %rsi ;
63 #define CALL_PANIC() xorb %al,%al ; call EXT(panic) ;
65 #define PREEMPTION_DISABLE \
66 incl %gs:CPU_PREEMPTION_LEVEL
68 #define PREEMPTION_LEVEL_DEBUG 1
69 #if PREEMPTION_LEVEL_DEBUG
70 #define PREEMPTION_ENABLE \
71 decl %gs:CPU_PREEMPTION_LEVEL ; \
74 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
77 testl $EFL_IF, S_PC ; \
83 call _preemption_underflow_panic ; \
88 #define PREEMPTION_ENABLE \
89 decl %gs:CPU_PREEMPTION_LEVEL ; \
91 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
94 testl $EFL_IF, S_PC ; \
105 * For most routines, the hw_lock_t pointer is loaded into a
106 * register initially, and then either a byte or register-sized
107 * word is loaded/stored to the pointer
111 * void hw_lock_byte_init(volatile uint8_t *)
113 * Initialize a hardware byte lock.
115 LEAF_ENTRY(hw_lock_byte_init)
116 movb $0, (%rdi) /* clear the lock */
120 * void hw_lock_byte_lock(uint8_t *lock_byte)
122 * Acquire byte sized lock operand, spinning until it becomes available.
123 * return with preemption disabled.
126 LEAF_ENTRY(hw_lock_byte_lock)
128 movl $1, %ecx /* Set lock value */
130 movb (%rdi), %al /* Load byte at address */
131 testb %al,%al /* lock locked? */
132 jne 3f /* branch if so */
133 lock; cmpxchg %cl,(%rdi) /* attempt atomic compare exchange */
135 LEAF_RET /* if yes, then nothing left to do */
137 PAUSE /* pause for hyper-threading */
138 jmp 1b /* try again */
141 * void hw_lock_byte_unlock(uint8_t *lock_byte)
143 * Unconditionally release byte sized lock operand,
144 * release preemption level.
147 LEAF_ENTRY(hw_lock_byte_unlock)
148 movb $0, (%rdi) /* Clear the lock byte */
152 LEAF_ENTRY(preemption_underflow_panic)
154 incl %gs:CPU_PREEMPTION_LEVEL
156 LOAD_STRING_ARG0(16f)
160 16: String "Preemption level underflow, possible cause unlocking an unlocked mutex or spinlock"