]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39236c6e | 2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1989 Carnegie-Mellon University | |
34 | * All rights reserved. The CMU software License Agreement specifies | |
35 | * the terms and conditions for use and redistribution. | |
36 | */ | |
37 | ||
1c79356b | 38 | #include <mach_rt.h> |
1c79356b A |
39 | #include <mach_ldebug.h> |
40 | #include <i386/asm.h> | |
2d21ac55 A |
41 | #include <i386/eflags.h> |
42 | #include <i386/trap.h> | |
43 | #include <config_dtrace.h> | |
b0d623f7 A |
44 | #include <i386/mp.h> |
45 | ||
9bccf70c | 46 | #include "assym.s" |
1c79356b | 47 | |
91447636 A |
48 | #define PAUSE rep; nop |
49 | ||
6d2010ae | 50 | #include <i386/pal_lock_asm.h> |
b0d623f7 | 51 | |
91447636 A |
52 | #define LEAF_ENTRY(name) \ |
53 | Entry(name) | |
54 | ||
55 | #define LEAF_ENTRY2(n1,n2) \ | |
56 | Entry(n1); \ | |
57 | Entry(n2) | |
58 | ||
59 | #define LEAF_RET \ | |
60 | ret | |
1c79356b | 61 | |
91447636 A |
62 | /* Non-leaf routines always have a stack frame: */ |
63 | ||
64 | #define NONLEAF_ENTRY(name) \ | |
65 | Entry(name); \ | |
fe8ab488 | 66 | FRAME |
91447636 A |
67 | |
68 | #define NONLEAF_ENTRY2(n1,n2) \ | |
69 | Entry(n1); \ | |
70 | Entry(n2); \ | |
fe8ab488 | 71 | FRAME |
91447636 A |
72 | |
73 | #define NONLEAF_RET \ | |
74 | EMARF; \ | |
75 | ret | |
1c79356b A |
76 | |
77 | ||
b0d623f7 A |
78 | /* For x86_64, the varargs ABI requires that %al indicate |
79 | * how many SSE register contain arguments. In our case, 0 */ | |
6d2010ae | 80 | #define ALIGN_STACK() and $0xFFFFFFFFFFFFFFF0, %rsp ; |
b0d623f7 | 81 | #define LOAD_STRING_ARG0(label) leaq label(%rip), %rdi ; |
6d2010ae A |
82 | #define LOAD_ARG1(x) mov x, %esi ; |
83 | #define LOAD_PTR_ARG1(x) mov x, %rsi ; | |
b0d623f7 | 84 | #define CALL_PANIC() xorb %al,%al ; call EXT(panic) ; |
1c79356b | 85 | |
b0d623f7 A |
86 | #define CHECK_UNLOCK(current, owner) \ |
87 | cmp current, owner ; \ | |
88 | je 1f ; \ | |
6d2010ae | 89 | ALIGN_STACK() ; \ |
b0d623f7 A |
90 | LOAD_STRING_ARG0(2f) ; \ |
91 | CALL_PANIC() ; \ | |
92 | hlt ; \ | |
93 | .data ; \ | |
94 | 2: String "Mutex unlock attempted from non-owner thread"; \ | |
95 | .text ; \ | |
96 | 1: | |
1c79356b A |
97 | |
98 | #if MACH_LDEBUG | |
99 | /* | |
100 | * Routines for general lock debugging. | |
101 | */ | |
1c79356b A |
102 | |
103 | /* | |
104 | * Checks for expected lock types and calls "panic" on | |
105 | * mismatch. Detects calls to Mutex functions with | |
106 | * type simplelock and vice versa. | |
107 | */ | |
108 | #define CHECK_MUTEX_TYPE() \ | |
9bccf70c | 109 | cmpl $ MUTEX_TAG,M_TYPE ; \ |
1c79356b | 110 | je 1f ; \ |
6d2010ae | 111 | ALIGN_STACK() ; \ |
b0d623f7 A |
112 | LOAD_STRING_ARG0(2f) ; \ |
113 | CALL_PANIC() ; \ | |
1c79356b A |
114 | hlt ; \ |
115 | .data ; \ | |
116 | 2: String "not a mutex!" ; \ | |
117 | .text ; \ | |
118 | 1: | |
119 | ||
1c79356b A |
120 | /* |
121 | * If one or more simplelocks are currently held by a thread, | |
122 | * an attempt to acquire a mutex will cause this check to fail | |
123 | * (since a mutex lock may context switch, holding a simplelock | |
124 | * is not a good thing). | |
125 | */ | |
91447636 | 126 | #if MACH_RT |
1c79356b | 127 | #define CHECK_PREEMPTION_LEVEL() \ |
b0d623f7 A |
128 | cmpl $0,%gs:CPU_HIBERNATE ; \ |
129 | jne 1f ; \ | |
91447636 | 130 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \ |
1c79356b | 131 | je 1f ; \ |
6d2010ae A |
132 | ALIGN_STACK() ; \ |
133 | movl %gs:CPU_PREEMPTION_LEVEL, %eax ; \ | |
134 | LOAD_ARG1(%eax) ; \ | |
b0d623f7 A |
135 | LOAD_STRING_ARG0(2f) ; \ |
136 | CALL_PANIC() ; \ | |
1c79356b A |
137 | hlt ; \ |
138 | .data ; \ | |
b0d623f7 | 139 | 2: String "preemption_level(%d) != 0!" ; \ |
1c79356b A |
140 | .text ; \ |
141 | 1: | |
142 | #else /* MACH_RT */ | |
143 | #define CHECK_PREEMPTION_LEVEL() | |
144 | #endif /* MACH_RT */ | |
145 | ||
b0d623f7 A |
146 | #define CHECK_MYLOCK(current, owner) \ |
147 | cmp current, owner ; \ | |
1c79356b | 148 | jne 1f ; \ |
6d2010ae | 149 | ALIGN_STACK() ; \ |
b0d623f7 A |
150 | LOAD_STRING_ARG0(2f) ; \ |
151 | CALL_PANIC() ; \ | |
1c79356b A |
152 | hlt ; \ |
153 | .data ; \ | |
b0d623f7 | 154 | 2: String "Attempt to recursively lock a non-recursive lock"; \ |
1c79356b A |
155 | .text ; \ |
156 | 1: | |
157 | ||
1c79356b A |
158 | #else /* MACH_LDEBUG */ |
159 | #define CHECK_MUTEX_TYPE() | |
1c79356b | 160 | #define CHECK_PREEMPTION_LEVEL() |
1c79356b | 161 | #define CHECK_MYLOCK(thd) |
1c79356b A |
162 | #endif /* MACH_LDEBUG */ |
163 | ||
2d21ac55 | 164 | #define PREEMPTION_DISABLE \ |
6d2010ae A |
165 | incl %gs:CPU_PREEMPTION_LEVEL |
166 | ||
6d2010ae | 167 | #define PREEMPTION_LEVEL_DEBUG 1 |
6d2010ae | 168 | #if PREEMPTION_LEVEL_DEBUG |
2d21ac55 A |
169 | #define PREEMPTION_ENABLE \ |
170 | decl %gs:CPU_PREEMPTION_LEVEL ; \ | |
6d2010ae A |
171 | js 17f ; \ |
172 | jnz 19f ; \ | |
173 | testl $AST_URGENT,%gs:CPU_PENDING_AST ; \ | |
174 | jz 19f ; \ | |
b0d623f7 | 175 | PUSHF ; \ |
6d2010ae A |
176 | testl $EFL_IF, S_PC ; \ |
177 | jz 18f ; \ | |
b0d623f7 | 178 | POPF ; \ |
2d21ac55 | 179 | int $(T_PREEMPT) ; \ |
6d2010ae A |
180 | jmp 19f ; \ |
181 | 17: \ | |
182 | call _preemption_underflow_panic ; \ | |
183 | 18: \ | |
b0d623f7 | 184 | POPF ; \ |
6d2010ae A |
185 | 19: |
186 | #else | |
187 | #define PREEMPTION_ENABLE \ | |
188 | decl %gs:CPU_PREEMPTION_LEVEL ; \ | |
189 | jnz 19f ; \ | |
190 | testl $AST_URGENT,%gs:CPU_PENDING_AST ; \ | |
191 | jz 19f ; \ | |
192 | PUSHF ; \ | |
193 | testl $EFL_IF, S_PC ; \ | |
194 | jz 18f ; \ | |
195 | POPF ; \ | |
196 | int $(T_PREEMPT) ; \ | |
197 | jmp 19f ; \ | |
198 | 18: \ | |
199 | POPF ; \ | |
200 | 19: | |
201 | #endif | |
2d21ac55 | 202 | |
2d21ac55 A |
203 | |
204 | #if CONFIG_DTRACE | |
b0d623f7 A |
205 | |
206 | .globl _lockstat_probe | |
207 | .globl _lockstat_probemap | |
208 | ||
209 | /* | |
210 | * LOCKSTAT_LABEL creates a dtrace symbol which contains | |
211 | * a pointer into the lock code function body. At that | |
212 | * point is a "ret" instruction that can be patched into | |
213 | * a "nop" | |
214 | */ | |
215 | ||
b0d623f7 A |
216 | #define LOCKSTAT_LABEL(lab) \ |
217 | .data ;\ | |
218 | .globl lab ;\ | |
219 | lab: ;\ | |
220 | .quad 9f ;\ | |
221 | .text ;\ | |
222 | 9: | |
223 | ||
224 | #define LOCKSTAT_RECORD(id, lck) \ | |
225 | push %rbp ; \ | |
226 | mov %rsp,%rbp ; \ | |
227 | movl _lockstat_probemap + (id * 4)(%rip),%eax ; \ | |
228 | test %eax,%eax ; \ | |
229 | je 9f ; \ | |
230 | mov lck, %rsi ; \ | |
231 | mov %rax, %rdi ; \ | |
232 | mov $0, %rdx ; \ | |
233 | mov $0, %rcx ; \ | |
234 | mov $0, %r8 ; \ | |
235 | mov $0, %r9 ; \ | |
236 | call *_lockstat_probe(%rip) ; \ | |
2d21ac55 A |
237 | 9: leave |
238 | /* ret - left to subsequent code, e.g. return values */ | |
39236c6e | 239 | |
b0d623f7 | 240 | #endif /* CONFIG_DTRACE */ |
2d21ac55 | 241 | |
b0d623f7 A |
242 | /* |
243 | * For most routines, the hw_lock_t pointer is loaded into a | |
244 | * register initially, and then either a byte or register-sized | |
245 | * word is loaded/stored to the pointer | |
246 | */ | |
247 | ||
1c79356b A |
248 | /* |
249 | * void hw_lock_init(hw_lock_t) | |
250 | * | |
251 | * Initialize a hardware lock. | |
252 | */ | |
91447636 | 253 | LEAF_ENTRY(hw_lock_init) |
fe8ab488 | 254 | movq $0, (%rdi) /* clear the lock */ |
2d21ac55 A |
255 | LEAF_RET |
256 | ||
257 | ||
258 | /* | |
316670eb | 259 | * void hw_lock_byte_init(volatile uint8_t *) |
2d21ac55 A |
260 | * |
261 | * Initialize a hardware byte lock. | |
262 | */ | |
263 | LEAF_ENTRY(hw_lock_byte_init) | |
fe8ab488 | 264 | movb $0, (%rdi) /* clear the lock */ |
91447636 | 265 | LEAF_RET |
1c79356b A |
266 | |
267 | /* | |
268 | * void hw_lock_lock(hw_lock_t) | |
269 | * | |
270 | * Acquire lock, spinning until it becomes available. | |
271 | * MACH_RT: also return with preemption disabled. | |
272 | */ | |
91447636 | 273 | LEAF_ENTRY(hw_lock_lock) |
fe8ab488 | 274 | mov %gs:CPU_ACTIVE_THREAD, %rcx /* get thread pointer */ |
b0d623f7 | 275 | |
2d21ac55 | 276 | PREEMPTION_DISABLE |
0c530ab8 | 277 | 1: |
fe8ab488 A |
278 | mov (%rdi), %rax |
279 | test %rax,%rax /* lock locked? */ | |
91447636 | 280 | jne 3f /* branch if so */ |
fe8ab488 | 281 | lock; cmpxchg %rcx,(%rdi) /* try to acquire the HW lock */ |
1c79356b | 282 | jne 3f |
9bccf70c | 283 | movl $1,%eax /* In case this was a timeout call */ |
91447636 | 284 | LEAF_RET /* if yes, then nothing left to do */ |
0c530ab8 | 285 | 3: |
91447636 A |
286 | PAUSE /* pause for hyper-threading */ |
287 | jmp 1b /* try again */ | |
1c79356b | 288 | |
2d21ac55 A |
289 | /* |
290 | * void hw_lock_byte_lock(uint8_t *lock_byte) | |
291 | * | |
292 | * Acquire byte sized lock operand, spinning until it becomes available. | |
293 | * MACH_RT: also return with preemption disabled. | |
294 | */ | |
295 | ||
296 | LEAF_ENTRY(hw_lock_byte_lock) | |
2d21ac55 A |
297 | PREEMPTION_DISABLE |
298 | movl $1, %ecx /* Set lock value */ | |
299 | 1: | |
fe8ab488 | 300 | movb (%rdi), %al /* Load byte at address */ |
2d21ac55 A |
301 | testb %al,%al /* lock locked? */ |
302 | jne 3f /* branch if so */ | |
fe8ab488 | 303 | lock; cmpxchg %cl,(%rdi) /* attempt atomic compare exchange */ |
2d21ac55 A |
304 | jne 3f |
305 | LEAF_RET /* if yes, then nothing left to do */ | |
306 | 3: | |
307 | PAUSE /* pause for hyper-threading */ | |
308 | jmp 1b /* try again */ | |
309 | ||
55e303ae A |
310 | /* |
311 | * unsigned int hw_lock_to(hw_lock_t, unsigned int) | |
312 | * | |
313 | * Acquire lock, spinning until it becomes available or timeout. | |
314 | * MACH_RT: also return with preemption disabled. | |
315 | */ | |
91447636 | 316 | LEAF_ENTRY(hw_lock_to) |
55e303ae | 317 | 1: |
fe8ab488 | 318 | mov %gs:CPU_ACTIVE_THREAD, %rcx |
b0d623f7 | 319 | |
55e303ae A |
320 | /* |
321 | * Attempt to grab the lock immediately | |
322 | * - fastpath without timeout nonsense. | |
323 | */ | |
2d21ac55 | 324 | PREEMPTION_DISABLE |
b0d623f7 | 325 | |
fe8ab488 A |
326 | mov (%rdi), %rax |
327 | test %rax,%rax /* lock locked? */ | |
91447636 | 328 | jne 2f /* branch if so */ |
fe8ab488 | 329 | lock; cmpxchg %rcx,(%rdi) /* try to acquire the HW lock */ |
91447636 | 330 | jne 2f /* branch on failure */ |
55e303ae | 331 | movl $1,%eax |
91447636 | 332 | LEAF_RET |
55e303ae A |
333 | |
334 | 2: | |
335 | #define INNER_LOOP_COUNT 1000 | |
336 | /* | |
337 | * Failed to get the lock so set the timeout | |
338 | * and then spin re-checking the lock but pausing | |
339 | * every so many (INNER_LOOP_COUNT) spins to check for timeout. | |
340 | */ | |
b0d623f7 A |
341 | push %r9 |
342 | lfence | |
343 | rdtsc /* read cyclecount into %edx:%eax */ | |
b0d623f7 A |
344 | shlq $32, %rdx |
345 | orq %rdx, %rax /* load 64-bit quantity into %rax */ | |
346 | addq %rax, %rsi /* %rsi is the timeout expiry */ | |
b0d623f7 | 347 | |
55e303ae A |
348 | 4: |
349 | /* | |
350 | * The inner-loop spin to look for the lock being freed. | |
351 | */ | |
b0d623f7 | 352 | mov $(INNER_LOOP_COUNT),%r9 |
55e303ae | 353 | 5: |
91447636 | 354 | PAUSE /* pause for hyper-threading */ |
fe8ab488 A |
355 | mov (%rdi),%rax /* spin checking lock value in cache */ |
356 | test %rax,%rax | |
55e303ae | 357 | je 6f /* zero => unlocked, try to grab it */ |
b0d623f7 | 358 | decq %r9 /* decrement inner loop count */ |
55e303ae | 359 | jnz 5b /* time to check for timeout? */ |
b0d623f7 | 360 | |
55e303ae A |
361 | /* |
362 | * Here after spinning INNER_LOOP_COUNT times, check for timeout | |
363 | */ | |
b0d623f7 A |
364 | lfence |
365 | rdtsc /* cyclecount into %edx:%eax */ | |
b0d623f7 A |
366 | shlq $32, %rdx |
367 | orq %rdx, %rax /* load 64-bit quantity into %rax */ | |
368 | cmpq %rsi, %rax /* compare to timeout */ | |
369 | jb 4b /* continue spinning if less, or */ | |
370 | xor %rax,%rax /* with 0 return value */ | |
371 | pop %r9 | |
91447636 | 372 | LEAF_RET |
55e303ae A |
373 | |
374 | 6: | |
375 | /* | |
376 | * Here to try to grab the lock that now appears to be free | |
377 | * after contention. | |
378 | */ | |
fe8ab488 A |
379 | mov %gs:CPU_ACTIVE_THREAD, %rcx |
380 | lock; cmpxchg %rcx,(%rdi) /* try to acquire the HW lock */ | |
0c530ab8 | 381 | jne 4b /* no - spin again */ |
55e303ae | 382 | movl $1,%eax /* yes */ |
b0d623f7 | 383 | pop %r9 |
91447636 | 384 | LEAF_RET |
55e303ae | 385 | |
1c79356b A |
386 | /* |
387 | * void hw_lock_unlock(hw_lock_t) | |
388 | * | |
389 | * Unconditionally release lock. | |
390 | * MACH_RT: release preemption level. | |
391 | */ | |
91447636 | 392 | LEAF_ENTRY(hw_lock_unlock) |
fe8ab488 | 393 | movq $0, (%rdi) /* clear the lock */ |
2d21ac55 | 394 | PREEMPTION_ENABLE |
91447636 | 395 | LEAF_RET |
b0d623f7 | 396 | |
2d21ac55 A |
397 | /* |
398 | * void hw_lock_byte_unlock(uint8_t *lock_byte) | |
399 | * | |
400 | * Unconditionally release byte sized lock operand. | |
401 | * MACH_RT: release preemption level. | |
402 | */ | |
1c79356b | 403 | |
2d21ac55 | 404 | LEAF_ENTRY(hw_lock_byte_unlock) |
fe8ab488 | 405 | movb $0, (%rdi) /* Clear the lock byte */ |
2d21ac55 | 406 | PREEMPTION_ENABLE |
0c530ab8 A |
407 | LEAF_RET |
408 | ||
1c79356b A |
409 | /* |
410 | * unsigned int hw_lock_try(hw_lock_t) | |
411 | * MACH_RT: returns with preemption disabled on success. | |
412 | */ | |
91447636 | 413 | LEAF_ENTRY(hw_lock_try) |
fe8ab488 | 414 | mov %gs:CPU_ACTIVE_THREAD, %rcx |
2d21ac55 | 415 | PREEMPTION_DISABLE |
b0d623f7 | 416 | |
fe8ab488 A |
417 | mov (%rdi),%rax |
418 | test %rax,%rax | |
91447636 | 419 | jne 1f |
fe8ab488 | 420 | lock; cmpxchg %rcx,(%rdi) /* try to acquire the HW lock */ |
91447636 | 421 | jne 1f |
b0d623f7 | 422 | |
1c79356b | 423 | movl $1,%eax /* success */ |
91447636 | 424 | LEAF_RET |
1c79356b | 425 | |
0c530ab8 | 426 | 1: |
2d21ac55 | 427 | PREEMPTION_ENABLE /* failure: release preemption... */ |
1c79356b | 428 | xorl %eax,%eax /* ...and return failure */ |
91447636 | 429 | LEAF_RET |
1c79356b A |
430 | |
431 | /* | |
432 | * unsigned int hw_lock_held(hw_lock_t) | |
433 | * MACH_RT: doesn't change preemption state. | |
434 | * N.B. Racy, of course. | |
435 | */ | |
91447636 | 436 | LEAF_ENTRY(hw_lock_held) |
fe8ab488 A |
437 | mov (%rdi),%rax /* check lock value */ |
438 | test %rax,%rax | |
55e303ae | 439 | movl $1,%ecx |
91447636 A |
440 | cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */ |
441 | LEAF_RET | |
1c79356b | 442 | |
1c79356b | 443 | |
2d21ac55 A |
444 | /* |
445 | * Reader-writer lock fastpaths. These currently exist for the | |
b0d623f7 A |
446 | * shared lock acquire, the exclusive lock acquire, the shared to |
447 | * exclusive upgrade and the release paths (where they reduce overhead | |
448 | * considerably) -- these are by far the most frequently used routines | |
449 | * | |
450 | * The following should reflect the layout of the bitfield embedded within | |
451 | * the lck_rw_t structure (see i386/locks.h). | |
2d21ac55 | 452 | */ |
b0d623f7 A |
453 | #define LCK_RW_INTERLOCK (0x1 << 16) |
454 | ||
455 | #define LCK_RW_PRIV_EXCL (0x1 << 24) | |
456 | #define LCK_RW_WANT_UPGRADE (0x2 << 24) | |
457 | #define LCK_RW_WANT_WRITE (0x4 << 24) | |
458 | #define LCK_R_WAITING (0x8 << 24) | |
459 | #define LCK_W_WAITING (0x10 << 24) | |
460 | ||
461 | #define LCK_RW_SHARED_MASK (0xffff) | |
2d21ac55 A |
462 | |
463 | /* | |
b0d623f7 A |
464 | * For most routines, the lck_rw_t pointer is loaded into a |
465 | * register initially, and the flags bitfield loaded into another | |
466 | * register and examined | |
2d21ac55 | 467 | */ |
b0d623f7 | 468 | |
b0d623f7 | 469 | #define RW_LOCK_SHARED_MASK (LCK_RW_INTERLOCK | LCK_RW_WANT_UPGRADE | LCK_RW_WANT_WRITE) |
2d21ac55 | 470 | /* |
b0d623f7 | 471 | * void lck_rw_lock_shared(lck_rw_t *) |
2d21ac55 A |
472 | * |
473 | */ | |
2d21ac55 | 474 | Entry(lck_rw_lock_shared) |
39236c6e A |
475 | mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */ |
476 | incl TH_RWLOCK_COUNT(%rcx) /* Increment count before atomic CAS */ | |
2d21ac55 | 477 | 1: |
fe8ab488 | 478 | mov (%rdi), %eax /* Load state bitfield and interlock */ |
2d21ac55 A |
479 | testl $(RW_LOCK_SHARED_MASK), %eax /* Eligible for fastpath? */ |
480 | jne 3f | |
b0d623f7 A |
481 | |
482 | movl %eax, %ecx /* original value in %eax for cmpxchgl */ | |
2d21ac55 A |
483 | incl %ecx /* Increment reader refcount */ |
484 | lock | |
fe8ab488 | 485 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
2d21ac55 A |
486 | jne 2f |
487 | ||
488 | #if CONFIG_DTRACE | |
489 | /* | |
490 | * Dtrace lockstat event: LS_LCK_RW_LOCK_SHARED_ACQUIRE | |
491 | * Implemented by swapping between return and no-op instructions. | |
492 | * See bsd/dev/dtrace/lockstat.c. | |
493 | */ | |
494 | LOCKSTAT_LABEL(_lck_rw_lock_shared_lockstat_patch_point) | |
495 | ret | |
6d2010ae | 496 | /* |
fe8ab488 | 497 | Fall thru when patched, counting on lock pointer in %rdi |
6d2010ae | 498 | */ |
fe8ab488 | 499 | LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, %rdi) |
2d21ac55 A |
500 | #endif |
501 | ret | |
2d21ac55 A |
502 | 2: |
503 | PAUSE | |
504 | jmp 1b | |
505 | 3: | |
506 | jmp EXT(lck_rw_lock_shared_gen) | |
507 | ||
508 | ||
b0d623f7 A |
509 | |
510 | #define RW_TRY_LOCK_SHARED_MASK (LCK_RW_WANT_UPGRADE | LCK_RW_WANT_WRITE) | |
2d21ac55 | 511 | /* |
b0d623f7 | 512 | * void lck_rw_try_lock_shared(lck_rw_t *) |
2d21ac55 A |
513 | * |
514 | */ | |
b0d623f7 | 515 | Entry(lck_rw_try_lock_shared) |
2d21ac55 | 516 | 1: |
fe8ab488 | 517 | mov (%rdi), %eax /* Load state bitfield and interlock */ |
b0d623f7 A |
518 | testl $(LCK_RW_INTERLOCK), %eax |
519 | jne 2f | |
520 | testl $(RW_TRY_LOCK_SHARED_MASK), %eax | |
521 | jne 3f /* lock is busy */ | |
522 | ||
523 | movl %eax, %ecx /* original value in %eax for cmpxchgl */ | |
524 | incl %ecx /* Increment reader refcount */ | |
2d21ac55 | 525 | lock |
fe8ab488 | 526 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
2d21ac55 | 527 | jne 2f |
b0d623f7 | 528 | |
39236c6e A |
529 | mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */ |
530 | incl TH_RWLOCK_COUNT(%rcx) /* Increment count on success. */ | |
531 | /* There is a 3 instr window where preemption may not notice rwlock_count after cmpxchg */ | |
532 | ||
2d21ac55 | 533 | #if CONFIG_DTRACE |
b0d623f7 | 534 | movl $1, %eax |
2d21ac55 | 535 | /* |
b0d623f7 A |
536 | * Dtrace lockstat event: LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE |
537 | * Implemented by swapping between return and no-op instructions. | |
538 | * See bsd/dev/dtrace/lockstat.c. | |
2d21ac55 | 539 | */ |
b0d623f7 A |
540 | LOCKSTAT_LABEL(_lck_rw_try_lock_shared_lockstat_patch_point) |
541 | ret | |
fe8ab488 A |
542 | /* Fall thru when patched, counting on lock pointer in %rdi */ |
543 | LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, %rdi) | |
2d21ac55 | 544 | #endif |
b0d623f7 | 545 | movl $1, %eax /* return TRUE */ |
2d21ac55 | 546 | ret |
2d21ac55 A |
547 | 2: |
548 | PAUSE | |
549 | jmp 1b | |
550 | 3: | |
b0d623f7 A |
551 | xorl %eax, %eax |
552 | ret | |
1c79356b | 553 | |
2d21ac55 | 554 | |
b0d623f7 A |
555 | #define RW_LOCK_EXCLUSIVE_HELD (LCK_RW_WANT_WRITE | LCK_RW_WANT_UPGRADE) |
556 | /* | |
557 | * int lck_rw_grab_shared(lck_rw_t *) | |
558 | * | |
559 | */ | |
560 | Entry(lck_rw_grab_shared) | |
b0d623f7 | 561 | 1: |
fe8ab488 | 562 | mov (%rdi), %eax /* Load state bitfield and interlock */ |
b0d623f7 A |
563 | testl $(LCK_RW_INTERLOCK), %eax |
564 | jne 5f | |
565 | testl $(RW_LOCK_EXCLUSIVE_HELD), %eax | |
566 | jne 3f | |
567 | 2: | |
fe8ab488 A |
568 | movl %eax, %ecx /* original value in %eax for cmpxchgl */ |
569 | incl %ecx /* Increment reader refcount */ | |
b0d623f7 | 570 | lock |
fe8ab488 | 571 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
b0d623f7 A |
572 | jne 4f |
573 | ||
fe8ab488 | 574 | movl $1, %eax /* return success */ |
2d21ac55 | 575 | ret |
b0d623f7 A |
576 | 3: |
577 | testl $(LCK_RW_SHARED_MASK), %eax | |
578 | je 4f | |
579 | testl $(LCK_RW_PRIV_EXCL), %eax | |
580 | je 2b | |
581 | 4: | |
fe8ab488 | 582 | xorl %eax, %eax /* return failure */ |
2d21ac55 | 583 | ret |
b0d623f7 | 584 | 5: |
2d21ac55 | 585 | PAUSE |
b0d623f7 | 586 | jmp 1b |
0c530ab8 | 587 | |
91447636 | 588 | |
b0d623f7 A |
589 | |
590 | #define RW_LOCK_EXCLUSIVE_MASK (LCK_RW_SHARED_MASK | LCK_RW_INTERLOCK | \ | |
591 | LCK_RW_WANT_UPGRADE | LCK_RW_WANT_WRITE) | |
592 | /* | |
593 | * void lck_rw_lock_exclusive(lck_rw_t*) | |
594 | * | |
595 | */ | |
596 | Entry(lck_rw_lock_exclusive) | |
39236c6e A |
597 | mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */ |
598 | incl TH_RWLOCK_COUNT(%rcx) /* Increment count before atomic CAS */ | |
b0d623f7 | 599 | 1: |
fe8ab488 | 600 | mov (%rdi), %eax /* Load state bitfield, interlock and shared count */ |
b0d623f7 A |
601 | testl $(RW_LOCK_EXCLUSIVE_MASK), %eax /* Eligible for fastpath? */ |
602 | jne 3f /* no, go slow */ | |
1c79356b | 603 | |
b0d623f7 A |
604 | movl %eax, %ecx /* original value in %eax for cmpxchgl */ |
605 | orl $(LCK_RW_WANT_WRITE), %ecx | |
606 | lock | |
fe8ab488 | 607 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
b0d623f7 | 608 | jne 2f |
0c530ab8 | 609 | |
2d21ac55 | 610 | #if CONFIG_DTRACE |
b0d623f7 A |
611 | /* |
612 | * Dtrace lockstat event: LS_LCK_RW_LOCK_EXCL_ACQUIRE | |
613 | * Implemented by swapping between return and no-op instructions. | |
614 | * See bsd/dev/dtrace/lockstat.c. | |
615 | */ | |
616 | LOCKSTAT_LABEL(_lck_rw_lock_exclusive_lockstat_patch_point) | |
2d21ac55 | 617 | ret |
fe8ab488 A |
618 | /* Fall thru when patched, counting on lock pointer in %rdi */ |
619 | LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, %rdi) | |
2d21ac55 A |
620 | #endif |
621 | ret | |
b0d623f7 | 622 | 2: |
2d21ac55 | 623 | PAUSE |
b0d623f7 A |
624 | jmp 1b |
625 | 3: | |
626 | jmp EXT(lck_rw_lock_exclusive_gen) | |
0c530ab8 | 627 | |
2d21ac55 A |
628 | |
629 | ||
b0d623f7 A |
630 | #define RW_TRY_LOCK_EXCLUSIVE_MASK (LCK_RW_SHARED_MASK | LCK_RW_WANT_UPGRADE | LCK_RW_WANT_WRITE) |
631 | /* | |
632 | * void lck_rw_try_lock_exclusive(lck_rw_t *) | |
633 | * | |
634 | * Tries to get a write lock. | |
635 | * | |
636 | * Returns FALSE if the lock is not held on return. | |
637 | */ | |
638 | Entry(lck_rw_try_lock_exclusive) | |
b0d623f7 | 639 | 1: |
fe8ab488 | 640 | mov (%rdi), %eax /* Load state bitfield, interlock and shared count */ |
b0d623f7 A |
641 | testl $(LCK_RW_INTERLOCK), %eax |
642 | jne 2f | |
643 | testl $(RW_TRY_LOCK_EXCLUSIVE_MASK), %eax | |
fe8ab488 | 644 | jne 3f /* can't get it */ |
2d21ac55 | 645 | |
fe8ab488 | 646 | movl %eax, %ecx /* original value in %eax for cmpxchgl */ |
b0d623f7 A |
647 | orl $(LCK_RW_WANT_WRITE), %ecx |
648 | lock | |
fe8ab488 | 649 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
b0d623f7 | 650 | jne 2f |
2d21ac55 | 651 | |
39236c6e A |
652 | mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */ |
653 | incl TH_RWLOCK_COUNT(%rcx) /* Increment count on success. */ | |
654 | /* There is a 3 instr window where preemption may not notice rwlock_count after cmpxchg */ | |
655 | ||
2d21ac55 | 656 | #if CONFIG_DTRACE |
b0d623f7 A |
657 | movl $1, %eax |
658 | /* | |
659 | * Dtrace lockstat event: LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE | |
660 | * Implemented by swapping between return and no-op instructions. | |
661 | * See bsd/dev/dtrace/lockstat.c. | |
662 | */ | |
663 | LOCKSTAT_LABEL(_lck_rw_try_lock_exclusive_lockstat_patch_point) | |
2d21ac55 | 664 | ret |
fe8ab488 A |
665 | /* Fall thru when patched, counting on lock pointer in %rdi */ |
666 | LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, %rdi) | |
2d21ac55 | 667 | #endif |
b0d623f7 | 668 | movl $1, %eax /* return TRUE */ |
2d21ac55 | 669 | ret |
b0d623f7 | 670 | 2: |
2d21ac55 | 671 | PAUSE |
b0d623f7 A |
672 | jmp 1b |
673 | 3: | |
674 | xorl %eax, %eax /* return FALSE */ | |
675 | ret | |
2d21ac55 | 676 | |
0c530ab8 | 677 | |
1c79356b | 678 | |
b0d623f7 A |
679 | /* |
680 | * void lck_rw_lock_shared_to_exclusive(lck_rw_t*) | |
681 | * | |
682 | * fastpath can be taken if | |
683 | * the current rw_shared_count == 1 | |
684 | * AND the interlock is clear | |
685 | * AND RW_WANT_UPGRADE is not set | |
686 | * | |
687 | * note that RW_WANT_WRITE could be set, but will not | |
688 | * be indicative of an exclusive hold since we have | |
689 | * a read count on the lock that we have not yet released | |
690 | * we can blow by that state since the lck_rw_lock_exclusive | |
691 | * function will block until rw_shared_count == 0 and | |
692 | * RW_WANT_UPGRADE is clear... it does this check behind | |
693 | * the interlock which we are also checking for | |
694 | * | |
695 | * to make the transition we must be able to atomically | |
696 | * set RW_WANT_UPGRADE and get rid of the read count we hold | |
697 | */ | |
698 | Entry(lck_rw_lock_shared_to_exclusive) | |
b0d623f7 | 699 | 1: |
fe8ab488 | 700 | mov (%rdi), %eax /* Load state bitfield, interlock and shared count */ |
b0d623f7 A |
701 | testl $(LCK_RW_INTERLOCK), %eax |
702 | jne 7f | |
703 | testl $(LCK_RW_WANT_UPGRADE), %eax | |
704 | jne 2f | |
1c79356b | 705 | |
b0d623f7 A |
706 | movl %eax, %ecx /* original value in %eax for cmpxchgl */ |
707 | orl $(LCK_RW_WANT_UPGRADE), %ecx /* ask for WANT_UPGRADE */ | |
708 | decl %ecx /* and shed our read count */ | |
709 | lock | |
fe8ab488 | 710 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
b0d623f7 A |
711 | jne 7f |
712 | /* we now own the WANT_UPGRADE */ | |
713 | testl $(LCK_RW_SHARED_MASK), %ecx /* check to see if all of the readers are drained */ | |
714 | jne 8f /* if not, we need to go wait */ | |
1c79356b | 715 | |
2d21ac55 | 716 | #if CONFIG_DTRACE |
b0d623f7 A |
717 | movl $1, %eax |
718 | /* | |
719 | * Dtrace lockstat event: LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE | |
720 | * Implemented by swapping between return and no-op instructions. | |
721 | * See bsd/dev/dtrace/lockstat.c. | |
722 | */ | |
723 | LOCKSTAT_LABEL(_lck_rw_lock_shared_to_exclusive_lockstat_patch_point) | |
2d21ac55 | 724 | ret |
fe8ab488 A |
725 | /* Fall thru when patched, counting on lock pointer in %rdi */ |
726 | LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, %rdi) | |
2d21ac55 | 727 | #endif |
b0d623f7 | 728 | movl $1, %eax /* return success */ |
2d21ac55 | 729 | ret |
b0d623f7 A |
730 | |
731 | 2: /* someone else already holds WANT_UPGRADE */ | |
732 | movl %eax, %ecx /* original value in %eax for cmpxchgl */ | |
733 | decl %ecx /* shed our read count */ | |
734 | testl $(LCK_RW_SHARED_MASK), %ecx | |
735 | jne 3f /* we were the last reader */ | |
736 | andl $(~LCK_W_WAITING), %ecx /* so clear the wait indicator */ | |
737 | 3: | |
738 | lock | |
fe8ab488 | 739 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
b0d623f7 A |
740 | jne 7f |
741 | ||
b0d623f7 A |
742 | mov %eax, %esi /* put old flags as second arg */ |
743 | /* lock is alread in %rdi */ | |
744 | call EXT(lck_rw_lock_shared_to_exclusive_failure) | |
b0d623f7 A |
745 | ret /* and pass the failure return along */ |
746 | 7: | |
747 | PAUSE | |
748 | jmp 1b | |
749 | 8: | |
750 | jmp EXT(lck_rw_lock_shared_to_exclusive_success) | |
1c79356b | 751 | |
0c530ab8 | 752 | |
b0d623f7 A |
753 | |
754 | .cstring | |
755 | rwl_release_error_str: | |
756 | .asciz "Releasing non-exclusive RW lock without a reader refcount!" | |
757 | .text | |
758 | ||
759 | /* | |
760 | * lck_rw_type_t lck_rw_done(lck_rw_t *) | |
761 | * | |
762 | */ | |
763 | Entry(lck_rw_done) | |
b0d623f7 | 764 | 1: |
fe8ab488 | 765 | mov (%rdi), %eax /* Load state bitfield, interlock and reader count */ |
b0d623f7 A |
766 | testl $(LCK_RW_INTERLOCK), %eax |
767 | jne 7f /* wait for interlock to clear */ | |
768 | ||
769 | movl %eax, %ecx /* keep original value in %eax for cmpxchgl */ | |
770 | testl $(LCK_RW_SHARED_MASK), %ecx /* if reader count == 0, must be exclusive lock */ | |
771 | je 2f | |
772 | decl %ecx /* Decrement reader count */ | |
773 | testl $(LCK_RW_SHARED_MASK), %ecx /* if reader count has now gone to 0, check for waiters */ | |
774 | je 4f | |
775 | jmp 6f | |
776 | 2: | |
777 | testl $(LCK_RW_WANT_UPGRADE), %ecx | |
778 | je 3f | |
779 | andl $(~LCK_RW_WANT_UPGRADE), %ecx | |
780 | jmp 4f | |
781 | 3: | |
782 | testl $(LCK_RW_WANT_WRITE), %ecx | |
783 | je 8f /* lock is not 'owned', go panic */ | |
784 | andl $(~LCK_RW_WANT_WRITE), %ecx | |
785 | 4: | |
0c530ab8 | 786 | /* |
b0d623f7 A |
787 | * test the original values to match what |
788 | * lck_rw_done_gen is going to do to determine | |
789 | * which wakeups need to happen... | |
790 | * | |
791 | * if !(fake_lck->lck_rw_priv_excl && fake_lck->lck_w_waiting) | |
792 | */ | |
793 | testl $(LCK_W_WAITING), %eax | |
794 | je 5f | |
795 | andl $(~LCK_W_WAITING), %ecx | |
796 | ||
797 | testl $(LCK_RW_PRIV_EXCL), %eax | |
798 | jne 6f | |
799 | 5: | |
800 | andl $(~LCK_R_WAITING), %ecx | |
801 | 6: | |
802 | lock | |
fe8ab488 | 803 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
b0d623f7 A |
804 | jne 7f |
805 | ||
b0d623f7 A |
806 | mov %eax,%esi /* old flags in %rsi */ |
807 | /* lock is in %rdi already */ | |
808 | call EXT(lck_rw_done_gen) | |
b0d623f7 A |
809 | ret |
810 | 7: | |
811 | PAUSE | |
812 | jmp 1b | |
813 | 8: | |
6d2010ae | 814 | ALIGN_STACK() |
b0d623f7 A |
815 | LOAD_STRING_ARG0(rwl_release_error_str) |
816 | CALL_PANIC() | |
817 | ||
1c79356b | 818 | |
b0d623f7 A |
819 | |
820 | /* | |
821 | * lck_rw_type_t lck_rw_lock_exclusive_to_shared(lck_rw_t *) | |
822 | * | |
823 | */ | |
824 | Entry(lck_rw_lock_exclusive_to_shared) | |
b0d623f7 | 825 | 1: |
fe8ab488 | 826 | mov (%rdi), %eax /* Load state bitfield, interlock and reader count */ |
b0d623f7 A |
827 | testl $(LCK_RW_INTERLOCK), %eax |
828 | jne 6f /* wait for interlock to clear */ | |
829 | ||
830 | movl %eax, %ecx /* keep original value in %eax for cmpxchgl */ | |
831 | incl %ecx /* Increment reader count */ | |
832 | ||
833 | testl $(LCK_RW_WANT_UPGRADE), %ecx | |
834 | je 2f | |
835 | andl $(~LCK_RW_WANT_UPGRADE), %ecx | |
836 | jmp 3f | |
837 | 2: | |
838 | andl $(~LCK_RW_WANT_WRITE), %ecx | |
839 | 3: | |
840 | /* | |
841 | * test the original values to match what | |
842 | * lck_rw_lock_exclusive_to_shared_gen is going to do to determine | |
843 | * which wakeups need to happen... | |
844 | * | |
845 | * if !(fake_lck->lck_rw_priv_excl && fake_lck->lck_w_waiting) | |
846 | */ | |
847 | testl $(LCK_W_WAITING), %eax | |
848 | je 4f | |
849 | testl $(LCK_RW_PRIV_EXCL), %eax | |
850 | jne 5f | |
851 | 4: | |
852 | andl $(~LCK_R_WAITING), %ecx | |
853 | 5: | |
854 | lock | |
fe8ab488 | 855 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
b0d623f7 A |
856 | jne 6f |
857 | ||
b0d623f7 A |
858 | mov %eax,%esi |
859 | call EXT(lck_rw_lock_exclusive_to_shared_gen) | |
b0d623f7 A |
860 | ret |
861 | 6: | |
862 | PAUSE | |
863 | jmp 1b | |
2d21ac55 | 864 | |
2d21ac55 | 865 | |
2d21ac55 | 866 | |
b0d623f7 A |
867 | /* |
868 | * int lck_rw_grab_want(lck_rw_t *) | |
869 | * | |
870 | */ | |
871 | Entry(lck_rw_grab_want) | |
b0d623f7 | 872 | 1: |
fe8ab488 | 873 | mov (%rdi), %eax /* Load state bitfield, interlock and reader count */ |
b0d623f7 A |
874 | testl $(LCK_RW_INTERLOCK), %eax |
875 | jne 3f /* wait for interlock to clear */ | |
876 | testl $(LCK_RW_WANT_WRITE), %eax /* want_write has been grabbed by someone else */ | |
877 | jne 2f /* go return failure */ | |
2d21ac55 | 878 | |
b0d623f7 A |
879 | movl %eax, %ecx /* original value in %eax for cmpxchgl */ |
880 | orl $(LCK_RW_WANT_WRITE), %ecx | |
881 | lock | |
fe8ab488 | 882 | cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ |
b0d623f7 A |
883 | jne 2f |
884 | /* we now own want_write */ | |
885 | movl $1, %eax /* return success */ | |
2d21ac55 | 886 | ret |
b0d623f7 A |
887 | 2: |
888 | xorl %eax, %eax /* return failure */ | |
2d21ac55 | 889 | ret |
b0d623f7 A |
890 | 3: |
891 | PAUSE | |
892 | jmp 1b | |
2d21ac55 | 893 | |
b0d623f7 A |
894 | |
895 | #define RW_LOCK_SHARED_OR_UPGRADE_MASK (LCK_RW_SHARED_MASK | LCK_RW_INTERLOCK | LCK_RW_WANT_UPGRADE) | |
896 | /* | |
897 | * int lck_rw_held_read_or_upgrade(lck_rw_t *) | |
898 | * | |
899 | */ | |
900 | Entry(lck_rw_held_read_or_upgrade) | |
fe8ab488 | 901 | mov (%rdi), %eax |
b0d623f7 A |
902 | andl $(RW_LOCK_SHARED_OR_UPGRADE_MASK), %eax |
903 | ret | |
2d21ac55 | 904 | |
2d21ac55 A |
905 | |
906 | ||
b0d623f7 A |
907 | /* |
908 | * N.B.: On x86, statistics are currently recorded for all indirect mutexes. | |
909 | * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained | |
910 | * as a 64-bit quantity (this matches the existing PowerPC implementation, | |
911 | * and the new x86 specific statistics are also maintained as 32-bit | |
912 | * quantities). | |
913 | * | |
914 | * | |
915 | * Enable this preprocessor define to record the first miss alone | |
916 | * By default, we count every miss, hence multiple misses may be | |
917 | * recorded for a single lock acquire attempt via lck_mtx_lock | |
918 | */ | |
919 | #undef LOG_FIRST_MISS_ALONE | |
1c79356b | 920 | |
b0d623f7 A |
921 | /* |
922 | * This preprocessor define controls whether the R-M-W update of the | |
923 | * per-group statistics elements are atomic (LOCK-prefixed) | |
924 | * Enabled by default. | |
925 | */ | |
926 | #define ATOMIC_STAT_UPDATES 1 | |
1c79356b | 927 | |
b0d623f7 A |
928 | #if defined(ATOMIC_STAT_UPDATES) |
929 | #define LOCK_IF_ATOMIC_STAT_UPDATES lock | |
930 | #else | |
931 | #define LOCK_IF_ATOMIC_STAT_UPDATES | |
932 | #endif /* ATOMIC_STAT_UPDATES */ | |
2d21ac55 | 933 | |
2d21ac55 | 934 | |
b0d623f7 A |
935 | /* |
936 | * For most routines, the lck_mtx_t pointer is loaded into a | |
937 | * register initially, and the owner field checked for indirection. | |
938 | * Eventually the lock owner is loaded into a register and examined. | |
939 | */ | |
940 | ||
941 | #define M_OWNER MUTEX_OWNER | |
942 | #define M_PTR MUTEX_PTR | |
943 | #define M_STATE MUTEX_STATE | |
944 | ||
b0d623f7 A |
945 | |
946 | #define LMTX_ENTER_EXTENDED \ | |
fe8ab488 A |
947 | mov M_PTR(%rdx), %rdx ; \ |
948 | xor %r11, %r11 ; \ | |
949 | mov MUTEX_GRP(%rdx), %r10 ; \ | |
b0d623f7 | 950 | LOCK_IF_ATOMIC_STAT_UPDATES ; \ |
fe8ab488 | 951 | incq GRP_MTX_STAT_UTIL(%r10) |
b0d623f7 A |
952 | |
953 | ||
954 | #if LOG_FIRST_MISS_ALONE | |
955 | #define LMTX_UPDATE_MISS \ | |
fe8ab488 | 956 | test $1, %r11 ; \ |
b0d623f7 A |
957 | jnz 11f ; \ |
958 | LOCK_IF_ATOMIC_STAT_UPDATES ; \ | |
fe8ab488 A |
959 | incl GRP_MTX_STAT_MISS(%r10) ; \ |
960 | or $1, %r11 ; \ | |
b0d623f7 A |
961 | 11: |
962 | #else | |
963 | #define LMTX_UPDATE_MISS \ | |
964 | LOCK_IF_ATOMIC_STAT_UPDATES ; \ | |
fe8ab488 | 965 | incl GRP_MTX_STAT_MISS(%r10) |
b0d623f7 A |
966 | #endif |
967 | ||
2d21ac55 | 968 | |
b0d623f7 A |
969 | #if LOG_FIRST_MISS_ALONE |
970 | #define LMTX_UPDATE_WAIT \ | |
fe8ab488 | 971 | test $2, %r11 ; \ |
b0d623f7 A |
972 | jnz 11f ; \ |
973 | LOCK_IF_ATOMIC_STAT_UPDATES ; \ | |
fe8ab488 A |
974 | incl GRP_MTX_STAT_WAIT(%r10) ; \ |
975 | or $2, %r11 ; \ | |
b0d623f7 A |
976 | 11: |
977 | #else | |
978 | #define LMTX_UPDATE_WAIT \ | |
979 | LOCK_IF_ATOMIC_STAT_UPDATES ; \ | |
fe8ab488 | 980 | incl GRP_MTX_STAT_WAIT(%r10) |
b0d623f7 | 981 | #endif |
0c530ab8 | 982 | |
1c79356b | 983 | |
b0d623f7 A |
984 | /* |
985 | * Record the "direct wait" statistic, which indicates if a | |
986 | * miss proceeded to block directly without spinning--occurs | |
987 | * if the owner of the mutex isn't running on another processor | |
988 | * at the time of the check. | |
989 | */ | |
990 | #define LMTX_UPDATE_DIRECT_WAIT \ | |
991 | LOCK_IF_ATOMIC_STAT_UPDATES ; \ | |
fe8ab488 | 992 | incl GRP_MTX_STAT_DIRECT_WAIT(%r10) |
91447636 | 993 | |
b0d623f7 A |
994 | |
995 | #define LMTX_CALLEXT1(func_name) \ | |
fe8ab488 | 996 | cmp %rdx, %rdi ; \ |
b0d623f7 | 997 | je 12f ; \ |
fe8ab488 A |
998 | push %r10 ; \ |
999 | push %r11 ; \ | |
1000 | 12: push %rdi ; \ | |
1001 | push %rdx ; \ | |
1002 | mov %rdx, %rdi ; \ | |
b0d623f7 | 1003 | call EXT(func_name) ; \ |
fe8ab488 A |
1004 | pop %rdx ; \ |
1005 | pop %rdi ; \ | |
1006 | cmp %rdx, %rdi ; \ | |
b0d623f7 | 1007 | je 12f ; \ |
fe8ab488 A |
1008 | pop %r11 ; \ |
1009 | pop %r10 ; \ | |
b0d623f7 A |
1010 | 12: |
1011 | ||
1012 | #define LMTX_CALLEXT2(func_name, reg) \ | |
fe8ab488 | 1013 | cmp %rdx, %rdi ; \ |
b0d623f7 | 1014 | je 12f ; \ |
fe8ab488 A |
1015 | push %r10 ; \ |
1016 | push %r11 ; \ | |
1017 | 12: push %rdi ; \ | |
1018 | push %rdx ; \ | |
1019 | mov reg, %rsi ; \ | |
1020 | mov %rdx, %rdi ; \ | |
b0d623f7 | 1021 | call EXT(func_name) ; \ |
fe8ab488 A |
1022 | pop %rdx ; \ |
1023 | pop %rdi ; \ | |
1024 | cmp %rdx, %rdi ; \ | |
b0d623f7 | 1025 | je 12f ; \ |
fe8ab488 A |
1026 | pop %r11 ; \ |
1027 | pop %r10 ; \ | |
b0d623f7 | 1028 | 12: |
6d2010ae | 1029 | |
2d21ac55 | 1030 | |
b0d623f7 A |
1031 | #define M_WAITERS_MSK 0x0000ffff |
1032 | #define M_PRIORITY_MSK 0x00ff0000 | |
1033 | #define M_ILOCKED_MSK 0x01000000 | |
1034 | #define M_MLOCKED_MSK 0x02000000 | |
1035 | #define M_PROMOTED_MSK 0x04000000 | |
1036 | #define M_SPIN_MSK 0x08000000 | |
1037 | ||
2d21ac55 A |
1038 | /* |
1039 | * void lck_mtx_assert(lck_mtx_t* l, unsigned int) | |
2d21ac55 A |
1040 | * Takes the address of a lock, and an assertion type as parameters. |
1041 | * The assertion can take one of two forms determine by the type | |
1042 | * parameter: either the lock is held by the current thread, and the | |
1043 | * type is LCK_MTX_ASSERT_OWNED, or it isn't and the type is | |
b0d623f7 | 1044 | * LCK_MTX_ASSERT_NOTOWNED. Calls panic on assertion failure. |
2d21ac55 A |
1045 | * |
1046 | */ | |
1047 | ||
b0d623f7 | 1048 | NONLEAF_ENTRY(lck_mtx_assert) |
fe8ab488 A |
1049 | mov %rdi, %rdx /* Load lock address */ |
1050 | mov %gs:CPU_ACTIVE_THREAD, %rax /* Load current thread */ | |
2d21ac55 | 1051 | |
fe8ab488 A |
1052 | mov M_STATE(%rdx), %ecx |
1053 | cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ | |
6d2010ae | 1054 | jne 0f |
fe8ab488 | 1055 | mov M_PTR(%rdx), %rdx /* If so, take indirection */ |
6d2010ae | 1056 | 0: |
fe8ab488 A |
1057 | mov M_OWNER(%rdx), %rcx /* Load owner */ |
1058 | cmp $(MUTEX_ASSERT_OWNED), %rsi | |
2d21ac55 | 1059 | jne 2f /* Assert ownership? */ |
fe8ab488 | 1060 | cmp %rax, %rcx /* Current thread match? */ |
2d21ac55 | 1061 | jne 3f /* no, go panic */ |
fe8ab488 | 1062 | testl $(M_ILOCKED_MSK | M_MLOCKED_MSK), M_STATE(%rdx) |
b0d623f7 | 1063 | je 3f |
2d21ac55 | 1064 | 1: /* yes, we own it */ |
b0d623f7 | 1065 | NONLEAF_RET |
2d21ac55 | 1066 | 2: |
fe8ab488 | 1067 | cmp %rax, %rcx /* Current thread match? */ |
2d21ac55 | 1068 | jne 1b /* No, return */ |
6d2010ae | 1069 | ALIGN_STACK() |
fe8ab488 | 1070 | LOAD_PTR_ARG1(%rdx) |
b0d623f7 | 1071 | LOAD_STRING_ARG0(mutex_assert_owned_str) |
2d21ac55 A |
1072 | jmp 4f |
1073 | 3: | |
6d2010ae | 1074 | ALIGN_STACK() |
fe8ab488 | 1075 | LOAD_PTR_ARG1(%rdx) |
b0d623f7 | 1076 | LOAD_STRING_ARG0(mutex_assert_not_owned_str) |
2d21ac55 | 1077 | 4: |
b0d623f7 A |
1078 | CALL_PANIC() |
1079 | ||
1080 | ||
1081 | lck_mtx_destroyed: | |
6d2010ae | 1082 | ALIGN_STACK() |
fe8ab488 | 1083 | LOAD_PTR_ARG1(%rdx) |
b0d623f7 A |
1084 | LOAD_STRING_ARG0(mutex_interlock_destroyed_str) |
1085 | CALL_PANIC() | |
1086 | ||
2d21ac55 A |
1087 | |
1088 | .data | |
1089 | mutex_assert_not_owned_str: | |
1090 | .asciz "mutex (%p) not owned\n" | |
1091 | mutex_assert_owned_str: | |
1092 | .asciz "mutex (%p) owned\n" | |
b0d623f7 A |
1093 | mutex_interlock_destroyed_str: |
1094 | .asciz "trying to interlock destroyed mutex (%p)" | |
2d21ac55 A |
1095 | .text |
1096 | ||
2d21ac55 A |
1097 | |
1098 | ||
91447636 A |
1099 | /* |
1100 | * lck_mtx_lock() | |
1101 | * lck_mtx_try_lock() | |
b0d623f7 | 1102 | * lck_mtx_unlock() |
2d21ac55 | 1103 | * lck_mtx_lock_spin() |
6d2010ae | 1104 | * lck_mtx_lock_spin_always() |
39236c6e A |
1105 | * lck_mtx_try_lock_spin() |
1106 | * lck_mtx_try_lock_spin_always() | |
2d21ac55 | 1107 | * lck_mtx_convert_spin() |
91447636 | 1108 | */ |
6d2010ae | 1109 | NONLEAF_ENTRY(lck_mtx_lock_spin_always) |
fe8ab488 | 1110 | mov %rdi, %rdx /* fetch lock pointer */ |
316670eb A |
1111 | jmp Llmls_avoid_check |
1112 | ||
2d21ac55 | 1113 | NONLEAF_ENTRY(lck_mtx_lock_spin) |
fe8ab488 | 1114 | mov %rdi, %rdx /* fetch lock pointer */ |
1c79356b | 1115 | |
91447636 | 1116 | CHECK_PREEMPTION_LEVEL() |
316670eb | 1117 | Llmls_avoid_check: |
fe8ab488 A |
1118 | mov M_STATE(%rdx), %ecx |
1119 | test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */ | |
6d2010ae | 1120 | jnz Llmls_slow |
fe8ab488 A |
1121 | Llmls_try: /* no - can't be INDIRECT, DESTROYED or locked */ |
1122 | mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ | |
1123 | or $(M_ILOCKED_MSK | M_SPIN_MSK), %ecx | |
6d2010ae A |
1124 | |
1125 | PREEMPTION_DISABLE | |
b0d623f7 | 1126 | lock |
fe8ab488 | 1127 | cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ |
6d2010ae | 1128 | jne Llmls_busy_disabled |
2d21ac55 | 1129 | |
fe8ab488 A |
1130 | mov %gs:CPU_ACTIVE_THREAD, %rax |
1131 | mov %rax, M_OWNER(%rdx) /* record owner of interlock */ | |
6d2010ae | 1132 | #if MACH_LDEBUG |
fe8ab488 | 1133 | test %rax, %rax |
6d2010ae | 1134 | jz 1f |
fe8ab488 | 1135 | incl TH_MUTEX_COUNT(%rax) /* lock statistic */ |
6d2010ae A |
1136 | 1: |
1137 | #endif /* MACH_LDEBUG */ | |
0c530ab8 | 1138 | |
b0d623f7 | 1139 | /* return with the interlock held and preemption disabled */ |
2d21ac55 A |
1140 | leave |
1141 | #if CONFIG_DTRACE | |
b0d623f7 | 1142 | LOCKSTAT_LABEL(_lck_mtx_lock_spin_lockstat_patch_point) |
2d21ac55 | 1143 | ret |
fe8ab488 A |
1144 | /* inherit lock pointer in %rdx above */ |
1145 | LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, %rdx) | |
2d21ac55 A |
1146 | #endif |
1147 | ret | |
0c530ab8 | 1148 | |
6d2010ae | 1149 | Llmls_slow: |
fe8ab488 A |
1150 | test $M_ILOCKED_MSK, %ecx /* is the interlock held */ |
1151 | jz Llml_contended /* no, must have been the mutex */ | |
2d21ac55 | 1152 | |
fe8ab488 | 1153 | cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */ |
b0d623f7 | 1154 | je lck_mtx_destroyed |
fe8ab488 A |
1155 | cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex */ |
1156 | jne Llmls_loop /* no... must be interlocked */ | |
5d5c5d0d | 1157 | |
b0d623f7 | 1158 | LMTX_ENTER_EXTENDED |
0c530ab8 | 1159 | |
fe8ab488 A |
1160 | mov M_STATE(%rdx), %ecx |
1161 | test $(M_SPIN_MSK), %ecx | |
6d2010ae | 1162 | jz Llmls_loop1 |
2d21ac55 | 1163 | |
6d2010ae A |
1164 | LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */ |
1165 | Llmls_loop: | |
2d21ac55 | 1166 | PAUSE |
fe8ab488 | 1167 | mov M_STATE(%rdx), %ecx |
6d2010ae | 1168 | Llmls_loop1: |
fe8ab488 | 1169 | test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx |
6d2010ae | 1170 | jz Llmls_try |
fe8ab488 A |
1171 | test $(M_MLOCKED_MSK), %ecx |
1172 | jnz Llml_contended /* mutex owned by someone else, go contend for it */ | |
6d2010ae A |
1173 | jmp Llmls_loop |
1174 | ||
1175 | Llmls_busy_disabled: | |
1176 | PREEMPTION_ENABLE | |
1177 | jmp Llmls_loop | |
2d21ac55 | 1178 | |
9bccf70c | 1179 | |
6d2010ae A |
1180 | |
1181 | NONLEAF_ENTRY(lck_mtx_lock) | |
fe8ab488 | 1182 | mov %rdi, %rdx /* fetch lock pointer */ |
6d2010ae A |
1183 | |
1184 | CHECK_PREEMPTION_LEVEL() | |
1185 | ||
fe8ab488 A |
1186 | mov M_STATE(%rdx), %ecx |
1187 | test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */ | |
6d2010ae | 1188 | jnz Llml_slow |
fe8ab488 A |
1189 | Llml_try: /* no - can't be INDIRECT, DESTROYED or locked */ |
1190 | mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ | |
1191 | or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx | |
6d2010ae A |
1192 | |
1193 | PREEMPTION_DISABLE | |
b0d623f7 | 1194 | lock |
fe8ab488 | 1195 | cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ |
6d2010ae | 1196 | jne Llml_busy_disabled |
2d21ac55 | 1197 | |
fe8ab488 A |
1198 | mov %gs:CPU_ACTIVE_THREAD, %rax |
1199 | mov %rax, M_OWNER(%rdx) /* record owner of mutex */ | |
6d2010ae | 1200 | #if MACH_LDEBUG |
fe8ab488 | 1201 | test %rax, %rax |
6d2010ae | 1202 | jz 1f |
fe8ab488 | 1203 | incl TH_MUTEX_COUNT(%rax) /* lock statistic */ |
6d2010ae A |
1204 | 1: |
1205 | #endif /* MACH_LDEBUG */ | |
2d21ac55 | 1206 | |
fe8ab488 | 1207 | testl $(M_WAITERS_MSK), M_STATE(%rdx) |
6d2010ae | 1208 | jz Llml_finish |
2d21ac55 | 1209 | |
b0d623f7 | 1210 | LMTX_CALLEXT1(lck_mtx_lock_acquire_x86) |
6d2010ae A |
1211 | |
1212 | Llml_finish: | |
fe8ab488 | 1213 | andl $(~M_ILOCKED_MSK), M_STATE(%rdx) |
6d2010ae A |
1214 | PREEMPTION_ENABLE |
1215 | ||
fe8ab488 | 1216 | cmp %rdx, %rdi /* is this an extended mutex */ |
b0d623f7 | 1217 | jne 2f |
2d21ac55 | 1218 | |
b0d623f7 A |
1219 | leave |
1220 | #if CONFIG_DTRACE | |
1221 | LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point) | |
1222 | ret | |
fe8ab488 A |
1223 | /* inherit lock pointer in %rdx above */ |
1224 | LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %rdx) | |
b0d623f7 A |
1225 | #endif |
1226 | ret | |
1227 | 2: | |
2d21ac55 A |
1228 | leave |
1229 | #if CONFIG_DTRACE | |
1230 | LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point) | |
1231 | ret | |
fe8ab488 A |
1232 | /* inherit lock pointer in %rdx above */ |
1233 | LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %rdx) | |
2d21ac55 A |
1234 | #endif |
1235 | ret | |
6d2010ae A |
1236 | |
1237 | ||
1238 | Llml_slow: | |
fe8ab488 A |
1239 | test $M_ILOCKED_MSK, %ecx /* is the interlock held */ |
1240 | jz Llml_contended /* no, must have been the mutex */ | |
2d21ac55 | 1241 | |
fe8ab488 | 1242 | cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */ |
6d2010ae | 1243 | je lck_mtx_destroyed |
fe8ab488 A |
1244 | cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ |
1245 | jne Llml_loop /* no... must be interlocked */ | |
6d2010ae A |
1246 | |
1247 | LMTX_ENTER_EXTENDED | |
1248 | ||
fe8ab488 A |
1249 | mov M_STATE(%rdx), %ecx |
1250 | test $(M_SPIN_MSK), %ecx | |
6d2010ae A |
1251 | jz Llml_loop1 |
1252 | ||
1253 | LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */ | |
1254 | Llml_loop: | |
1255 | PAUSE | |
fe8ab488 | 1256 | mov M_STATE(%rdx), %ecx |
6d2010ae | 1257 | Llml_loop1: |
fe8ab488 | 1258 | test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx |
6d2010ae | 1259 | jz Llml_try |
fe8ab488 A |
1260 | test $(M_MLOCKED_MSK), %ecx |
1261 | jnz Llml_contended /* mutex owned by someone else, go contend for it */ | |
6d2010ae A |
1262 | jmp Llml_loop |
1263 | ||
1264 | Llml_busy_disabled: | |
1265 | PREEMPTION_ENABLE | |
1266 | jmp Llml_loop | |
2d21ac55 | 1267 | |
6d2010ae | 1268 | |
b0d623f7 | 1269 | Llml_contended: |
fe8ab488 | 1270 | cmp %rdx, %rdi /* is this an extended mutex */ |
b0d623f7 A |
1271 | je 0f |
1272 | LMTX_UPDATE_MISS | |
1273 | 0: | |
1274 | LMTX_CALLEXT1(lck_mtx_lock_spinwait_x86) | |
1275 | ||
fe8ab488 | 1276 | test %rax, %rax |
6d2010ae A |
1277 | jz Llml_acquired /* acquired mutex, interlock held and preemption disabled */ |
1278 | ||
fe8ab488 | 1279 | cmp $1, %rax /* check for direct wait status */ |
b0d623f7 | 1280 | je 2f |
fe8ab488 | 1281 | cmp %rdx, %rdi /* is this an extended mutex */ |
b0d623f7 A |
1282 | je 2f |
1283 | LMTX_UPDATE_DIRECT_WAIT | |
1284 | 2: | |
fe8ab488 A |
1285 | mov M_STATE(%rdx), %ecx |
1286 | test $(M_ILOCKED_MSK), %ecx | |
6d2010ae | 1287 | jnz 6f |
b0d623f7 | 1288 | |
fe8ab488 A |
1289 | mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ |
1290 | or $(M_ILOCKED_MSK), %ecx /* try to take the interlock */ | |
6d2010ae A |
1291 | |
1292 | PREEMPTION_DISABLE | |
b0d623f7 | 1293 | lock |
fe8ab488 | 1294 | cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ |
b0d623f7 | 1295 | jne 5f |
2d21ac55 | 1296 | |
fe8ab488 | 1297 | test $(M_MLOCKED_MSK), %ecx /* we've got the interlock and */ |
6d2010ae | 1298 | jnz 3f |
fe8ab488 A |
1299 | or $(M_MLOCKED_MSK), %ecx /* the mutex is free... grab it directly */ |
1300 | mov %ecx, M_STATE(%rdx) | |
b0d623f7 | 1301 | |
fe8ab488 A |
1302 | mov %gs:CPU_ACTIVE_THREAD, %rax |
1303 | mov %rax, M_OWNER(%rdx) /* record owner of mutex */ | |
6d2010ae | 1304 | #if MACH_LDEBUG |
fe8ab488 | 1305 | test %rax, %rax |
6d2010ae | 1306 | jz 1f |
fe8ab488 | 1307 | incl TH_MUTEX_COUNT(%rax) /* lock statistic */ |
6d2010ae A |
1308 | 1: |
1309 | #endif /* MACH_LDEBUG */ | |
2d21ac55 | 1310 | |
6d2010ae | 1311 | Llml_acquired: |
fe8ab488 | 1312 | testl $(M_WAITERS_MSK), M_STATE(%rdx) |
6d2010ae | 1313 | jnz 1f |
fe8ab488 A |
1314 | mov M_OWNER(%rdx), %rax |
1315 | mov TH_WAS_PROMOTED_ON_WAKEUP(%rax), %eax | |
1316 | test %eax, %eax | |
6d2010ae A |
1317 | jz Llml_finish |
1318 | 1: | |
1319 | LMTX_CALLEXT1(lck_mtx_lock_acquire_x86) | |
1320 | jmp Llml_finish | |
b0d623f7 | 1321 | |
6d2010ae | 1322 | 3: /* interlock held, mutex busy */ |
fe8ab488 | 1323 | cmp %rdx, %rdi /* is this an extended mutex */ |
b0d623f7 A |
1324 | je 4f |
1325 | LMTX_UPDATE_WAIT | |
1326 | 4: | |
1327 | LMTX_CALLEXT1(lck_mtx_lock_wait_x86) | |
1328 | jmp Llml_contended | |
1329 | 5: | |
6d2010ae | 1330 | PREEMPTION_ENABLE |
b0d623f7 A |
1331 | 6: |
1332 | PAUSE | |
1333 | jmp 2b | |
2d21ac55 A |
1334 | |
1335 | ||
39236c6e | 1336 | NONLEAF_ENTRY(lck_mtx_try_lock_spin_always) |
fe8ab488 | 1337 | mov %rdi, %rdx /* fetch lock pointer */ |
39236c6e A |
1338 | jmp Llmts_avoid_check |
1339 | ||
2d21ac55 | 1340 | NONLEAF_ENTRY(lck_mtx_try_lock_spin) |
fe8ab488 | 1341 | mov %rdi, %rdx /* fetch lock pointer */ |
1c79356b | 1342 | |
39236c6e | 1343 | Llmts_avoid_check: |
fe8ab488 A |
1344 | mov M_STATE(%rdx), %ecx |
1345 | test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */ | |
6d2010ae | 1346 | jnz Llmts_slow |
fe8ab488 A |
1347 | Llmts_try: /* no - can't be INDIRECT, DESTROYED or locked */ |
1348 | mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ | |
1349 | or $(M_ILOCKED_MSK | M_SPIN_MSK), %rcx | |
6d2010ae A |
1350 | |
1351 | PREEMPTION_DISABLE | |
b0d623f7 | 1352 | lock |
fe8ab488 | 1353 | cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ |
6d2010ae | 1354 | jne Llmts_busy_disabled |
2d21ac55 | 1355 | |
fe8ab488 A |
1356 | mov %gs:CPU_ACTIVE_THREAD, %rax |
1357 | mov %rax, M_OWNER(%rdx) /* record owner of mutex */ | |
6d2010ae | 1358 | #if MACH_LDEBUG |
fe8ab488 | 1359 | test %rax, %rax |
6d2010ae | 1360 | jz 1f |
fe8ab488 | 1361 | incl TH_MUTEX_COUNT(%rax) /* lock statistic */ |
6d2010ae A |
1362 | 1: |
1363 | #endif /* MACH_LDEBUG */ | |
2d21ac55 | 1364 | |
2d21ac55 | 1365 | leave |
b0d623f7 | 1366 | |
2d21ac55 | 1367 | #if CONFIG_DTRACE |
fe8ab488 | 1368 | mov $1, %rax /* return success */ |
2d21ac55 A |
1369 | LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point) |
1370 | ret | |
fe8ab488 A |
1371 | /* inherit lock pointer in %rdx above */ |
1372 | LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %rdx) | |
2d21ac55 | 1373 | #endif |
fe8ab488 | 1374 | mov $1, %rax /* return success */ |
2d21ac55 A |
1375 | ret |
1376 | ||
6d2010ae | 1377 | Llmts_slow: |
fe8ab488 | 1378 | test $(M_ILOCKED_MSK), %ecx /* is the interlock held */ |
6d2010ae | 1379 | jz Llmts_fail /* no, must be held as a mutex */ |
2d21ac55 | 1380 | |
fe8ab488 | 1381 | cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */ |
b0d623f7 | 1382 | je lck_mtx_destroyed |
fe8ab488 | 1383 | cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ |
6d2010ae | 1384 | jne Llmts_loop1 |
2d21ac55 | 1385 | |
b0d623f7 | 1386 | LMTX_ENTER_EXTENDED |
6d2010ae | 1387 | Llmts_loop: |
b0d623f7 | 1388 | PAUSE |
fe8ab488 | 1389 | mov M_STATE(%rdx), %ecx |
6d2010ae | 1390 | Llmts_loop1: |
fe8ab488 | 1391 | test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx |
6d2010ae | 1392 | jnz Llmts_fail |
fe8ab488 | 1393 | test $(M_ILOCKED_MSK), %ecx |
6d2010ae A |
1394 | jz Llmts_try |
1395 | jmp Llmts_loop | |
1396 | ||
1397 | Llmts_busy_disabled: | |
1398 | PREEMPTION_ENABLE | |
1399 | jmp Llmts_loop | |
1400 | ||
1401 | ||
1402 | ||
1403 | NONLEAF_ENTRY(lck_mtx_try_lock) | |
fe8ab488 | 1404 | mov %rdi, %rdx /* fetch lock pointer */ |
b0d623f7 | 1405 | |
fe8ab488 A |
1406 | mov M_STATE(%rdx), %ecx |
1407 | test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */ | |
6d2010ae | 1408 | jnz Llmt_slow |
fe8ab488 A |
1409 | Llmt_try: /* no - can't be INDIRECT, DESTROYED or locked */ |
1410 | mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ | |
1411 | or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx | |
6d2010ae A |
1412 | |
1413 | PREEMPTION_DISABLE | |
b0d623f7 | 1414 | lock |
fe8ab488 | 1415 | cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ |
6d2010ae | 1416 | jne Llmt_busy_disabled |
9bccf70c | 1417 | |
fe8ab488 A |
1418 | mov %gs:CPU_ACTIVE_THREAD, %rax |
1419 | mov %rax, M_OWNER(%rdx) /* record owner of mutex */ | |
6d2010ae | 1420 | #if MACH_LDEBUG |
fe8ab488 | 1421 | test %rax, %rax |
6d2010ae | 1422 | jz 1f |
fe8ab488 | 1423 | incl TH_MUTEX_COUNT(%rax) /* lock statistic */ |
6d2010ae A |
1424 | 1: |
1425 | #endif /* MACH_LDEBUG */ | |
1c79356b | 1426 | |
fe8ab488 | 1427 | test $(M_WAITERS_MSK), %ecx |
6d2010ae A |
1428 | jz 0f |
1429 | ||
b0d623f7 | 1430 | LMTX_CALLEXT1(lck_mtx_lock_acquire_x86) |
6d2010ae | 1431 | 0: |
fe8ab488 | 1432 | andl $(~M_ILOCKED_MSK), M_STATE(%rdx) |
6d2010ae | 1433 | PREEMPTION_ENABLE |
b0d623f7 | 1434 | |
6d2010ae | 1435 | leave |
2d21ac55 | 1436 | #if CONFIG_DTRACE |
fe8ab488 | 1437 | mov $1, %rax /* return success */ |
2d21ac55 A |
1438 | /* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */ |
1439 | LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point) | |
1440 | ret | |
fe8ab488 A |
1441 | /* inherit lock pointer in %rdx from above */ |
1442 | LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %rdx) | |
b0d623f7 | 1443 | #endif |
fe8ab488 | 1444 | mov $1, %rax /* return success */ |
2d21ac55 | 1445 | ret |
1c79356b | 1446 | |
6d2010ae | 1447 | Llmt_slow: |
fe8ab488 | 1448 | test $(M_ILOCKED_MSK), %ecx /* is the interlock held */ |
6d2010ae A |
1449 | jz Llmt_fail /* no, must be held as a mutex */ |
1450 | ||
fe8ab488 | 1451 | cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */ |
6d2010ae | 1452 | je lck_mtx_destroyed |
fe8ab488 | 1453 | cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ |
6d2010ae A |
1454 | jne Llmt_loop |
1455 | ||
1456 | LMTX_ENTER_EXTENDED | |
1457 | Llmt_loop: | |
1458 | PAUSE | |
fe8ab488 | 1459 | mov M_STATE(%rdx), %ecx |
6d2010ae | 1460 | Llmt_loop1: |
fe8ab488 | 1461 | test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx |
6d2010ae | 1462 | jnz Llmt_fail |
fe8ab488 | 1463 | test $(M_ILOCKED_MSK), %ecx |
6d2010ae A |
1464 | jz Llmt_try |
1465 | jmp Llmt_loop | |
1466 | ||
1467 | Llmt_busy_disabled: | |
1468 | PREEMPTION_ENABLE | |
1469 | jmp Llmt_loop | |
1470 | ||
0c530ab8 A |
1471 | |
1472 | Llmt_fail: | |
b0d623f7 | 1473 | Llmts_fail: |
fe8ab488 | 1474 | cmp %rdx, %rdi /* is this an extended mutex */ |
b0d623f7 A |
1475 | je 0f |
1476 | LMTX_UPDATE_MISS | |
b0d623f7 | 1477 | 0: |
fe8ab488 | 1478 | xor %rax, %rax |
91447636 | 1479 | NONLEAF_RET |
1c79356b | 1480 | |
2d21ac55 A |
1481 | |
1482 | ||
b0d623f7 | 1483 | NONLEAF_ENTRY(lck_mtx_convert_spin) |
fe8ab488 | 1484 | mov %rdi, %rdx /* fetch lock pointer */ |
2d21ac55 | 1485 | |
fe8ab488 A |
1486 | mov M_STATE(%rdx), %ecx |
1487 | cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ | |
6d2010ae | 1488 | jne 0f |
fe8ab488 A |
1489 | mov M_PTR(%rdx), %rdx /* If so, take indirection */ |
1490 | mov M_STATE(%rdx), %ecx | |
6d2010ae | 1491 | 0: |
fe8ab488 | 1492 | test $(M_MLOCKED_MSK), %ecx /* already owned as a mutex, just return */ |
6d2010ae | 1493 | jnz 2f |
fe8ab488 | 1494 | test $(M_WAITERS_MSK), %ecx /* are there any waiters? */ |
6d2010ae | 1495 | jz 1f |
2d21ac55 | 1496 | |
6d2010ae | 1497 | LMTX_CALLEXT1(lck_mtx_lock_acquire_x86) |
fe8ab488 | 1498 | mov M_STATE(%rdx), %ecx |
b0d623f7 | 1499 | 1: |
fe8ab488 A |
1500 | and $(~(M_ILOCKED_MSK | M_SPIN_MSK)), %ecx /* convert from spin version to mutex */ |
1501 | or $(M_MLOCKED_MSK), %ecx | |
1502 | mov %ecx, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */ | |
2d21ac55 | 1503 | |
6d2010ae | 1504 | PREEMPTION_ENABLE |
b0d623f7 A |
1505 | 2: |
1506 | NONLEAF_RET | |
2d21ac55 | 1507 | |
6d2010ae | 1508 | |
2d21ac55 | 1509 | |
b0d623f7 | 1510 | NONLEAF_ENTRY(lck_mtx_unlock) |
fe8ab488 | 1511 | mov %rdi, %rdx /* fetch lock pointer */ |
6d2010ae | 1512 | Llmu_entry: |
fe8ab488 | 1513 | mov M_STATE(%rdx), %ecx |
b0d623f7 | 1514 | Llmu_prim: |
fe8ab488 | 1515 | cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ |
b0d623f7 | 1516 | je Llmu_ext |
1c79356b | 1517 | |
6d2010ae | 1518 | Llmu_chktype: |
fe8ab488 | 1519 | test $(M_MLOCKED_MSK), %ecx /* check for full mutex */ |
6d2010ae A |
1520 | jz Llmu_unlock |
1521 | Llmu_mutex: | |
fe8ab488 | 1522 | test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */ |
6d2010ae | 1523 | jnz Llmu_busy |
b0d623f7 | 1524 | |
fe8ab488 A |
1525 | mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ |
1526 | and $(~M_MLOCKED_MSK), %ecx /* drop mutex */ | |
1527 | or $(M_ILOCKED_MSK), %ecx /* pick up interlock */ | |
6d2010ae A |
1528 | |
1529 | PREEMPTION_DISABLE | |
b0d623f7 | 1530 | lock |
fe8ab488 A |
1531 | cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ |
1532 | jne Llmu_busy_disabled /* branch on failure to spin loop */ | |
b0d623f7 | 1533 | |
6d2010ae | 1534 | Llmu_unlock: |
fe8ab488 A |
1535 | xor %rax, %rax |
1536 | mov %rax, M_OWNER(%rdx) | |
1537 | mov %rcx, %rax /* keep original state in %ecx for later evaluation */ | |
1538 | and $(~(M_ILOCKED_MSK | M_SPIN_MSK | M_PROMOTED_MSK)), %rax | |
6d2010ae | 1539 | |
fe8ab488 | 1540 | test $(M_WAITERS_MSK), %eax |
6d2010ae | 1541 | jz 2f |
fe8ab488 | 1542 | dec %eax /* decrement waiter count */ |
b0d623f7 | 1543 | 2: |
fe8ab488 | 1544 | mov %eax, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */ |
6d2010ae A |
1545 | |
1546 | #if MACH_LDEBUG | |
1547 | /* perform lock statistics after drop to prevent delay */ | |
fe8ab488 A |
1548 | mov %gs:CPU_ACTIVE_THREAD, %rax |
1549 | test %rax, %rax | |
6d2010ae | 1550 | jz 1f |
fe8ab488 | 1551 | decl TH_MUTEX_COUNT(%rax) /* lock statistic */ |
6d2010ae A |
1552 | 1: |
1553 | #endif /* MACH_LDEBUG */ | |
1554 | ||
fe8ab488 | 1555 | test $(M_PROMOTED_MSK | M_WAITERS_MSK), %ecx |
6d2010ae | 1556 | jz 3f |
b0d623f7 | 1557 | |
fe8ab488 | 1558 | LMTX_CALLEXT2(lck_mtx_unlock_wakeup_x86, %rcx) |
b0d623f7 | 1559 | 3: |
6d2010ae A |
1560 | PREEMPTION_ENABLE |
1561 | ||
fe8ab488 | 1562 | cmp %rdx, %rdi |
b0d623f7 | 1563 | jne 4f |
2d21ac55 | 1564 | |
2d21ac55 A |
1565 | leave |
1566 | #if CONFIG_DTRACE | |
1567 | /* Dtrace: LS_LCK_MTX_UNLOCK_RELEASE */ | |
1568 | LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point) | |
1569 | ret | |
fe8ab488 A |
1570 | /* inherit lock pointer in %rdx from above */ |
1571 | LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %rdx) | |
2d21ac55 A |
1572 | #endif |
1573 | ret | |
b0d623f7 | 1574 | 4: |
2d21ac55 A |
1575 | leave |
1576 | #if CONFIG_DTRACE | |
b0d623f7 A |
1577 | /* Dtrace: LS_LCK_MTX_EXT_UNLOCK_RELEASE */ |
1578 | LOCKSTAT_LABEL(_lck_mtx_ext_unlock_lockstat_patch_point) | |
2d21ac55 | 1579 | ret |
fe8ab488 A |
1580 | /* inherit lock pointer in %rdx from above */ |
1581 | LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, %rdx) | |
2d21ac55 A |
1582 | #endif |
1583 | ret | |
6d2010ae A |
1584 | |
1585 | ||
1586 | Llmu_busy_disabled: | |
1587 | PREEMPTION_ENABLE | |
1588 | Llmu_busy: | |
b0d623f7 | 1589 | PAUSE |
fe8ab488 | 1590 | mov M_STATE(%rdx), %ecx |
6d2010ae A |
1591 | jmp Llmu_mutex |
1592 | ||
b0d623f7 | 1593 | Llmu_ext: |
fe8ab488 A |
1594 | mov M_PTR(%rdx), %rdx |
1595 | mov M_OWNER(%rdx), %rax | |
1596 | mov %gs:CPU_ACTIVE_THREAD, %rcx | |
1597 | CHECK_UNLOCK(%rcx, %rax) | |
1598 | mov M_STATE(%rdx), %ecx | |
6d2010ae | 1599 | jmp Llmu_chktype |
b0d623f7 | 1600 | |
1c79356b | 1601 | |
2d21ac55 | 1602 | |
b0d623f7 | 1603 | LEAF_ENTRY(lck_mtx_ilk_unlock) |
fe8ab488 | 1604 | mov %rdi, %rdx /* fetch lock pointer - no indirection here */ |
0c530ab8 | 1605 | |
fe8ab488 | 1606 | andl $(~M_ILOCKED_MSK), M_STATE(%rdx) |
2d21ac55 | 1607 | |
fe8ab488 | 1608 | PREEMPTION_ENABLE /* need to re-enable preemption */ |
2d21ac55 | 1609 | |
b0d623f7 A |
1610 | LEAF_RET |
1611 | ||
2d21ac55 | 1612 | |
b0d623f7 A |
1613 | |
1614 | LEAF_ENTRY(lck_mtx_lock_grab_mutex) | |
fe8ab488 | 1615 | mov %rdi, %rdx /* fetch lock pointer - no indirection here */ |
2d21ac55 | 1616 | |
fe8ab488 | 1617 | mov M_STATE(%rdx), %ecx |
1c79356b | 1618 | |
fe8ab488 | 1619 | test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* can't have the mutex yet */ |
6d2010ae | 1620 | jnz 3f |
1c79356b | 1621 | |
fe8ab488 A |
1622 | mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ |
1623 | or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx | |
6d2010ae A |
1624 | |
1625 | PREEMPTION_DISABLE | |
b0d623f7 | 1626 | lock |
fe8ab488 | 1627 | cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ |
b0d623f7 | 1628 | jne 2f /* branch on failure to spin loop */ |
1c79356b | 1629 | |
fe8ab488 A |
1630 | mov %gs:CPU_ACTIVE_THREAD, %rax |
1631 | mov %rax, M_OWNER(%rdx) /* record owner of mutex */ | |
6d2010ae | 1632 | #if MACH_LDEBUG |
fe8ab488 | 1633 | test %rax, %rax |
6d2010ae | 1634 | jz 1f |
fe8ab488 | 1635 | incl TH_MUTEX_COUNT(%rax) /* lock statistic */ |
6d2010ae A |
1636 | 1: |
1637 | #endif /* MACH_LDEBUG */ | |
b0d623f7 | 1638 | |
fe8ab488 | 1639 | mov $1, %rax /* return success */ |
b0d623f7 A |
1640 | LEAF_RET |
1641 | 2: | |
6d2010ae A |
1642 | PREEMPTION_ENABLE |
1643 | 3: | |
fe8ab488 | 1644 | xor %rax, %rax /* return failure */ |
91447636 | 1645 | LEAF_RET |
b0d623f7 A |
1646 | |
1647 | ||
2d21ac55 | 1648 | |
b0d623f7 | 1649 | LEAF_ENTRY(lck_mtx_lock_mark_destroyed) |
fe8ab488 | 1650 | mov %rdi, %rdx |
b0d623f7 | 1651 | 1: |
fe8ab488 A |
1652 | mov M_STATE(%rdx), %ecx |
1653 | cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ | |
b0d623f7 A |
1654 | jne 2f |
1655 | ||
fe8ab488 | 1656 | movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */ |
b0d623f7 A |
1657 | jmp 3f |
1658 | 2: | |
fe8ab488 | 1659 | test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */ |
6d2010ae | 1660 | jnz 5f |
b0d623f7 | 1661 | |
6d2010ae | 1662 | PREEMPTION_DISABLE |
fe8ab488 A |
1663 | mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ |
1664 | or $(M_ILOCKED_MSK), %ecx | |
b0d623f7 | 1665 | lock |
fe8ab488 A |
1666 | cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ |
1667 | jne 4f /* branch on failure to spin loop */ | |
1668 | movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */ | |
6d2010ae | 1669 | PREEMPTION_ENABLE |
b0d623f7 | 1670 | 3: |
fe8ab488 | 1671 | LEAF_RET /* return with M_ILOCKED set */ |
b0d623f7 | 1672 | 4: |
6d2010ae | 1673 | PREEMPTION_ENABLE |
b0d623f7 A |
1674 | 5: |
1675 | PAUSE | |
1676 | jmp 1b | |
1677 | ||
6d2010ae A |
1678 | LEAF_ENTRY(preemption_underflow_panic) |
1679 | FRAME | |
1680 | incl %gs:CPU_PREEMPTION_LEVEL | |
1681 | ALIGN_STACK() | |
1682 | LOAD_STRING_ARG0(16f) | |
1683 | CALL_PANIC() | |
1684 | hlt | |
1685 | .data | |
1686 | 16: String "Preemption level underflow, possible cause unlocking an unlocked mutex or spinlock" | |
1687 | .text | |
1688 | ||
1689 | ||
91447636 | 1690 | LEAF_ENTRY(_disable_preemption) |
1c79356b | 1691 | #if MACH_RT |
6d2010ae | 1692 | PREEMPTION_DISABLE |
1c79356b | 1693 | #endif /* MACH_RT */ |
91447636 | 1694 | LEAF_RET |
1c79356b | 1695 | |
91447636 | 1696 | LEAF_ENTRY(_enable_preemption) |
1c79356b A |
1697 | #if MACH_RT |
1698 | #if MACH_ASSERT | |
91447636 | 1699 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b | 1700 | jg 1f |
b0d623f7 | 1701 | movl %gs:CPU_PREEMPTION_LEVEL,%esi |
6d2010ae | 1702 | ALIGN_STACK() |
b0d623f7 A |
1703 | LOAD_STRING_ARG0(_enable_preemption_less_than_zero) |
1704 | CALL_PANIC() | |
1c79356b | 1705 | hlt |
b0d623f7 A |
1706 | .cstring |
1707 | _enable_preemption_less_than_zero: | |
1708 | .asciz "_enable_preemption: preemption_level(%d) < 0!" | |
1c79356b A |
1709 | .text |
1710 | 1: | |
1711 | #endif /* MACH_ASSERT */ | |
6d2010ae | 1712 | PREEMPTION_ENABLE |
1c79356b | 1713 | #endif /* MACH_RT */ |
91447636 | 1714 | LEAF_RET |
1c79356b | 1715 | |
91447636 | 1716 | LEAF_ENTRY(_enable_preemption_no_check) |
1c79356b A |
1717 | #if MACH_RT |
1718 | #if MACH_ASSERT | |
91447636 | 1719 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b | 1720 | jg 1f |
6d2010ae | 1721 | ALIGN_STACK() |
b0d623f7 A |
1722 | LOAD_STRING_ARG0(_enable_preemption_no_check_less_than_zero) |
1723 | CALL_PANIC() | |
1c79356b | 1724 | hlt |
b0d623f7 A |
1725 | .cstring |
1726 | _enable_preemption_no_check_less_than_zero: | |
1727 | .asciz "_enable_preemption_no_check: preemption_level <= 0!" | |
1c79356b A |
1728 | .text |
1729 | 1: | |
1730 | #endif /* MACH_ASSERT */ | |
91447636 | 1731 | _ENABLE_PREEMPTION_NO_CHECK |
1c79356b | 1732 | #endif /* MACH_RT */ |
91447636 | 1733 | LEAF_RET |
1c79356b A |
1734 | |
1735 | ||
91447636 A |
1736 | LEAF_ENTRY(_mp_disable_preemption) |
1737 | #if MACH_RT | |
6d2010ae | 1738 | PREEMPTION_DISABLE |
91447636 A |
1739 | #endif /* MACH_RT */ |
1740 | LEAF_RET | |
1c79356b | 1741 | |
91447636 A |
1742 | LEAF_ENTRY(_mp_enable_preemption) |
1743 | #if MACH_RT | |
1c79356b | 1744 | #if MACH_ASSERT |
91447636 | 1745 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b | 1746 | jg 1f |
b0d623f7 | 1747 | movl %gs:CPU_PREEMPTION_LEVEL,%esi |
6d2010ae | 1748 | ALIGN_PANIC() |
b0d623f7 A |
1749 | LOAD_STRING_ARG0(_mp_enable_preemption_less_than_zero) |
1750 | CALL_PANIC() | |
1c79356b | 1751 | hlt |
b0d623f7 A |
1752 | .cstring |
1753 | _mp_enable_preemption_less_than_zero: | |
1754 | .asciz "_mp_enable_preemption: preemption_level (%d) <= 0!" | |
1c79356b A |
1755 | .text |
1756 | 1: | |
1757 | #endif /* MACH_ASSERT */ | |
6d2010ae | 1758 | PREEMPTION_ENABLE |
91447636 A |
1759 | #endif /* MACH_RT */ |
1760 | LEAF_RET | |
1c79356b | 1761 | |
91447636 A |
1762 | LEAF_ENTRY(_mp_enable_preemption_no_check) |
1763 | #if MACH_RT | |
1c79356b | 1764 | #if MACH_ASSERT |
91447636 | 1765 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b | 1766 | jg 1f |
6d2010ae | 1767 | ALIGN_STACK() |
b0d623f7 A |
1768 | LOAD_STRING_ARG0(_mp_enable_preemption_no_check_less_than_zero) |
1769 | CALL_PANIC() | |
1c79356b | 1770 | hlt |
b0d623f7 A |
1771 | .cstring |
1772 | _mp_enable_preemption_no_check_less_than_zero: | |
1773 | .asciz "_mp_enable_preemption_no_check: preemption_level <= 0!" | |
1c79356b A |
1774 | .text |
1775 | 1: | |
1776 | #endif /* MACH_ASSERT */ | |
91447636 A |
1777 | _ENABLE_PREEMPTION_NO_CHECK |
1778 | #endif /* MACH_RT */ | |
1779 | LEAF_RET | |
1c79356b | 1780 | |
b0d623f7 A |
1781 | /* |
1782 | * Atomic primitives, prototyped in kern/simple_lock.h | |
1783 | */ | |
1784 | LEAF_ENTRY(hw_atomic_add) | |
316670eb A |
1785 | #if MACH_LDEBUG |
1786 | test $3, %rdi | |
1787 | jz 1f | |
1788 | ud2 | |
1789 | 1: | |
1790 | #endif | |
b0d623f7 | 1791 | movl %esi, %eax /* Load addend */ |
fe8ab488 | 1792 | lock xaddl %eax, (%rdi) /* Atomic exchange and add */ |
b0d623f7 A |
1793 | addl %esi, %eax /* Calculate result */ |
1794 | LEAF_RET | |
1795 | ||
1796 | LEAF_ENTRY(hw_atomic_sub) | |
316670eb A |
1797 | #if MACH_LDEBUG |
1798 | test $3, %rdi | |
1799 | jz 1f | |
1800 | ud2 | |
1801 | 1: | |
1802 | #endif | |
b0d623f7 A |
1803 | negl %esi |
1804 | movl %esi, %eax | |
fe8ab488 | 1805 | lock xaddl %eax, (%rdi) /* Atomic exchange and add */ |
b0d623f7 A |
1806 | addl %esi, %eax /* Calculate result */ |
1807 | LEAF_RET | |
1808 | ||
1809 | LEAF_ENTRY(hw_atomic_or) | |
316670eb A |
1810 | #if MACH_LDEBUG |
1811 | test $3, %rdi | |
1812 | jz 1f | |
1813 | ud2 | |
1814 | 1: | |
1815 | #endif | |
b0d623f7 A |
1816 | movl (%rdi), %eax |
1817 | 1: | |
1818 | movl %esi, %edx /* Load mask */ | |
1819 | orl %eax, %edx | |
316670eb | 1820 | lock cmpxchgl %edx, (%rdi) /* Atomic CAS */ |
b0d623f7 A |
1821 | jne 1b |
1822 | movl %edx, %eax /* Result */ | |
1823 | LEAF_RET | |
1824 | /* | |
1825 | * A variant of hw_atomic_or which doesn't return a value. | |
1826 | * The implementation is thus comparatively more efficient. | |
1827 | */ | |
1828 | ||
1829 | LEAF_ENTRY(hw_atomic_or_noret) | |
316670eb A |
1830 | #if MACH_LDEBUG |
1831 | test $3, %rdi | |
1832 | jz 1f | |
1833 | ud2 | |
1834 | 1: | |
1835 | #endif | |
b0d623f7 A |
1836 | lock |
1837 | orl %esi, (%rdi) /* Atomic OR */ | |
1838 | LEAF_RET | |
1839 | ||
1840 | ||
1841 | LEAF_ENTRY(hw_atomic_and) | |
316670eb A |
1842 | #if MACH_LDEBUG |
1843 | test $3, %rdi | |
1844 | jz 1f | |
1845 | ud2 | |
1846 | 1: | |
1847 | #endif | |
b0d623f7 A |
1848 | movl (%rdi), %eax |
1849 | 1: | |
1850 | movl %esi, %edx /* Load mask */ | |
1851 | andl %eax, %edx | |
316670eb | 1852 | lock cmpxchgl %edx, (%rdi) /* Atomic CAS */ |
b0d623f7 A |
1853 | jne 1b |
1854 | movl %edx, %eax /* Result */ | |
1855 | LEAF_RET | |
1856 | /* | |
1857 | * A variant of hw_atomic_and which doesn't return a value. | |
1858 | * The implementation is thus comparatively more efficient. | |
1859 | */ | |
1860 | ||
1861 | LEAF_ENTRY(hw_atomic_and_noret) | |
316670eb A |
1862 | #if MACH_LDEBUG |
1863 | test $3, %rdi | |
1864 | jz 1f | |
1865 | ud2 | |
1866 | 1: | |
1867 | #endif | |
fe8ab488 | 1868 | lock andl %esi, (%rdi) /* Atomic OR */ |
b0d623f7 A |
1869 | LEAF_RET |
1870 |