]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_lock.s
ac0726918fe241e26c3edc1fe5b3879498eae865
[apple/xnu.git] / osfmk / i386 / i386_lock.s
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1989 Carnegie-Mellon University
34 * All rights reserved. The CMU software License Agreement specifies
35 * the terms and conditions for use and redistribution.
36 */
37
38 #include <mach_ldebug.h>
39 #include <i386/asm.h>
40 #include <i386/eflags.h>
41 #include <i386/trap.h>
42 #include <config_dtrace.h>
43 #include <i386/mp.h>
44
45 #include "assym.s"
46
47 #define PAUSE rep; nop
48
49 #include <i386/pal_lock_asm.h>
50
51 #define LEAF_ENTRY(name) \
52 Entry(name)
53
54 #define LEAF_RET \
55 ret
56
57 /* For x86_64, the varargs ABI requires that %al indicate
58 * how many SSE register contain arguments. In our case, 0 */
59 #define ALIGN_STACK() and $0xFFFFFFFFFFFFFFF0, %rsp ;
60 #define LOAD_STRING_ARG0(label) leaq label(%rip), %rdi ;
61 #define LOAD_ARG1(x) mov x, %esi ;
62 #define LOAD_PTR_ARG1(x) mov x, %rsi ;
63 #define CALL_PANIC() xorb %al,%al ; call EXT(panic) ;
64
65 #define PREEMPTION_DISABLE \
66 incl %gs:CPU_PREEMPTION_LEVEL
67
68 #define PREEMPTION_LEVEL_DEBUG 1
69 #if PREEMPTION_LEVEL_DEBUG
70 #define PREEMPTION_ENABLE \
71 decl %gs:CPU_PREEMPTION_LEVEL ; \
72 js 17f ; \
73 jnz 19f ; \
74 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
75 jz 19f ; \
76 PUSHF ; \
77 testl $EFL_IF, S_PC ; \
78 jz 18f ; \
79 POPF ; \
80 int $(T_PREEMPT) ; \
81 jmp 19f ; \
82 17: \
83 call _preemption_underflow_panic ; \
84 18: \
85 POPF ; \
86 19:
87 #else
88 #define PREEMPTION_ENABLE \
89 decl %gs:CPU_PREEMPTION_LEVEL ; \
90 jnz 19f ; \
91 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
92 jz 19f ; \
93 PUSHF ; \
94 testl $EFL_IF, S_PC ; \
95 jz 18f ; \
96 POPF ; \
97 int $(T_PREEMPT) ; \
98 jmp 19f ; \
99 18: \
100 POPF ; \
101 19:
102 #endif
103
104 /*
105 * For most routines, the hw_lock_t pointer is loaded into a
106 * register initially, and then either a byte or register-sized
107 * word is loaded/stored to the pointer
108 */
109
110 /*
111 * void hw_lock_byte_init(volatile uint8_t *)
112 *
113 * Initialize a hardware byte lock.
114 */
115 LEAF_ENTRY(hw_lock_byte_init)
116 movb $0, (%rdi) /* clear the lock */
117 LEAF_RET
118
119 /*
120 * void hw_lock_byte_lock(uint8_t *lock_byte)
121 *
122 * Acquire byte sized lock operand, spinning until it becomes available.
123 * return with preemption disabled.
124 */
125
126 LEAF_ENTRY(hw_lock_byte_lock)
127 PREEMPTION_DISABLE
128 movl $1, %ecx /* Set lock value */
129 1:
130 movb (%rdi), %al /* Load byte at address */
131 testb %al,%al /* lock locked? */
132 jne 3f /* branch if so */
133 lock; cmpxchg %cl,(%rdi) /* attempt atomic compare exchange */
134 jne 3f
135 LEAF_RET /* if yes, then nothing left to do */
136 3:
137 PAUSE /* pause for hyper-threading */
138 jmp 1b /* try again */
139
140 /*
141 * void hw_lock_byte_unlock(uint8_t *lock_byte)
142 *
143 * Unconditionally release byte sized lock operand,
144 * release preemption level.
145 */
146
147 LEAF_ENTRY(hw_lock_byte_unlock)
148 movb $0, (%rdi) /* Clear the lock byte */
149 PREEMPTION_ENABLE
150 LEAF_RET
151
152 LEAF_ENTRY(preemption_underflow_panic)
153 FRAME
154 incl %gs:CPU_PREEMPTION_LEVEL
155 ALIGN_STACK()
156 LOAD_STRING_ARG0(16f)
157 CALL_PANIC()
158 hlt
159 .data
160 16: String "Preemption level underflow, possible cause unlocking an unlocked mutex or spinlock"
161 .text
162