]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/i386_lock.s
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_lock.s
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1989 Carnegie-Mellon University
34 * All rights reserved. The CMU software License Agreement specifies
35 * the terms and conditions for use and redistribution.
36 */
37
1c79356b
A
38#include <mach_ldebug.h>
39#include <i386/asm.h>
2d21ac55
A
40#include <i386/eflags.h>
41#include <i386/trap.h>
42#include <config_dtrace.h>
b0d623f7
A
43#include <i386/mp.h>
44
9bccf70c 45#include "assym.s"
1c79356b 46
91447636
A
47#define PAUSE rep; nop
48
6d2010ae 49#include <i386/pal_lock_asm.h>
b0d623f7 50
91447636
A
51#define LEAF_ENTRY(name) \
52 Entry(name)
53
54#define LEAF_ENTRY2(n1,n2) \
55 Entry(n1); \
56 Entry(n2)
57
58#define LEAF_RET \
59 ret
1c79356b 60
91447636
A
61/* Non-leaf routines always have a stack frame: */
62
63#define NONLEAF_ENTRY(name) \
64 Entry(name); \
fe8ab488 65 FRAME
91447636
A
66
67#define NONLEAF_ENTRY2(n1,n2) \
68 Entry(n1); \
69 Entry(n2); \
fe8ab488 70 FRAME
91447636
A
71
72#define NONLEAF_RET \
73 EMARF; \
74 ret
1c79356b
A
75
76
b0d623f7
A
77/* For x86_64, the varargs ABI requires that %al indicate
78 * how many SSE register contain arguments. In our case, 0 */
6d2010ae 79#define ALIGN_STACK() and $0xFFFFFFFFFFFFFFF0, %rsp ;
b0d623f7 80#define LOAD_STRING_ARG0(label) leaq label(%rip), %rdi ;
6d2010ae
A
81#define LOAD_ARG1(x) mov x, %esi ;
82#define LOAD_PTR_ARG1(x) mov x, %rsi ;
b0d623f7 83#define CALL_PANIC() xorb %al,%al ; call EXT(panic) ;
1c79356b 84
b0d623f7
A
85#define CHECK_UNLOCK(current, owner) \
86 cmp current, owner ; \
87 je 1f ; \
6d2010ae 88 ALIGN_STACK() ; \
b0d623f7
A
89 LOAD_STRING_ARG0(2f) ; \
90 CALL_PANIC() ; \
91 hlt ; \
92 .data ; \
932: String "Mutex unlock attempted from non-owner thread"; \
94 .text ; \
951:
1c79356b
A
96
97#if MACH_LDEBUG
98/*
99 * Routines for general lock debugging.
100 */
1c79356b
A
101
102/*
103 * Checks for expected lock types and calls "panic" on
104 * mismatch. Detects calls to Mutex functions with
105 * type simplelock and vice versa.
106 */
107#define CHECK_MUTEX_TYPE() \
9bccf70c 108 cmpl $ MUTEX_TAG,M_TYPE ; \
1c79356b 109 je 1f ; \
6d2010ae 110 ALIGN_STACK() ; \
b0d623f7
A
111 LOAD_STRING_ARG0(2f) ; \
112 CALL_PANIC() ; \
1c79356b
A
113 hlt ; \
114 .data ; \
1152: String "not a mutex!" ; \
116 .text ; \
1171:
118
5ba3f43e
A
119#define CHECK_MYLOCK(current, owner) \
120 cmp current, owner ; \
121 jne 1f ; \
122 ALIGN_STACK() ; \
123 LOAD_STRING_ARG0(2f) ; \
124 CALL_PANIC() ; \
125 hlt ; \
126 .data ; \
1272: String "Attempt to recursively lock a non-recursive lock"; \
128 .text ; \
1291:
130
131#else /* MACH_LDEBUG */
132#define CHECK_MUTEX_TYPE()
133#define CHECK_MYLOCK(thd)
134#endif /* MACH_LDEBUG */
135
136#if DEVELOPMENT || DEBUG
1c79356b
A
137/*
138 * If one or more simplelocks are currently held by a thread,
139 * an attempt to acquire a mutex will cause this check to fail
140 * (since a mutex lock may context switch, holding a simplelock
141 * is not a good thing).
142 */
1c79356b 143#define CHECK_PREEMPTION_LEVEL() \
91447636 144 cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \
1c79356b 145 je 1f ; \
5ba3f43e
A
146 cmpl $0,EXT(LckDisablePreemptCheck)(%rip) ; \
147 jne 1f ; \
148 cmpl $0,%gs:CPU_HIBERNATE ; \
149 jne 1f ; \
6d2010ae
A
150 ALIGN_STACK() ; \
151 movl %gs:CPU_PREEMPTION_LEVEL, %eax ; \
152 LOAD_ARG1(%eax) ; \
b0d623f7
A
153 LOAD_STRING_ARG0(2f) ; \
154 CALL_PANIC() ; \
1c79356b
A
155 hlt ; \
156 .data ; \
b0d623f7 1572: String "preemption_level(%d) != 0!" ; \
1c79356b
A
158 .text ; \
1591:
5ba3f43e 160#else /* DEVELOPMENT || DEBUG */
1c79356b 161#define CHECK_PREEMPTION_LEVEL()
5ba3f43e 162#endif /* DEVELOPMENT || DEBUG */
1c79356b 163
2d21ac55 164#define PREEMPTION_DISABLE \
6d2010ae
A
165 incl %gs:CPU_PREEMPTION_LEVEL
166
6d2010ae 167#define PREEMPTION_LEVEL_DEBUG 1
6d2010ae 168#if PREEMPTION_LEVEL_DEBUG
2d21ac55
A
169#define PREEMPTION_ENABLE \
170 decl %gs:CPU_PREEMPTION_LEVEL ; \
6d2010ae
A
171 js 17f ; \
172 jnz 19f ; \
173 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
174 jz 19f ; \
b0d623f7 175 PUSHF ; \
6d2010ae
A
176 testl $EFL_IF, S_PC ; \
177 jz 18f ; \
b0d623f7 178 POPF ; \
2d21ac55 179 int $(T_PREEMPT) ; \
6d2010ae
A
180 jmp 19f ; \
18117: \
182 call _preemption_underflow_panic ; \
18318: \
b0d623f7 184 POPF ; \
6d2010ae
A
18519:
186#else
187#define PREEMPTION_ENABLE \
188 decl %gs:CPU_PREEMPTION_LEVEL ; \
189 jnz 19f ; \
190 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
191 jz 19f ; \
192 PUSHF ; \
193 testl $EFL_IF, S_PC ; \
194 jz 18f ; \
195 POPF ; \
196 int $(T_PREEMPT) ; \
197 jmp 19f ; \
19818: \
199 POPF ; \
20019:
201#endif
2d21ac55 202
2d21ac55
A
203
204#if CONFIG_DTRACE
b0d623f7
A
205
206 .globl _lockstat_probe
207 .globl _lockstat_probemap
208
209/*
210 * LOCKSTAT_LABEL creates a dtrace symbol which contains
211 * a pointer into the lock code function body. At that
212 * point is a "ret" instruction that can be patched into
213 * a "nop"
214 */
215
b0d623f7
A
216#define LOCKSTAT_LABEL(lab) \
217 .data ;\
218 .globl lab ;\
219 lab: ;\
220 .quad 9f ;\
221 .text ;\
222 9:
223
224#define LOCKSTAT_RECORD(id, lck) \
225 push %rbp ; \
226 mov %rsp,%rbp ; \
227 movl _lockstat_probemap + (id * 4)(%rip),%eax ; \
228 test %eax,%eax ; \
229 je 9f ; \
230 mov lck, %rsi ; \
231 mov %rax, %rdi ; \
232 mov $0, %rdx ; \
233 mov $0, %rcx ; \
234 mov $0, %r8 ; \
235 mov $0, %r9 ; \
236 call *_lockstat_probe(%rip) ; \
2d21ac55
A
2379: leave
238 /* ret - left to subsequent code, e.g. return values */
39236c6e 239
b0d623f7 240#endif /* CONFIG_DTRACE */
2d21ac55 241
b0d623f7
A
242/*
243 * For most routines, the hw_lock_t pointer is loaded into a
244 * register initially, and then either a byte or register-sized
245 * word is loaded/stored to the pointer
246 */
2d21ac55
A
247
248/*
316670eb 249 * void hw_lock_byte_init(volatile uint8_t *)
2d21ac55
A
250 *
251 * Initialize a hardware byte lock.
252 */
253LEAF_ENTRY(hw_lock_byte_init)
fe8ab488 254 movb $0, (%rdi) /* clear the lock */
91447636 255 LEAF_RET
1c79356b 256
2d21ac55
A
257/*
258 * void hw_lock_byte_lock(uint8_t *lock_byte)
259 *
260 * Acquire byte sized lock operand, spinning until it becomes available.
5ba3f43e 261 * return with preemption disabled.
2d21ac55
A
262 */
263
264LEAF_ENTRY(hw_lock_byte_lock)
2d21ac55
A
265 PREEMPTION_DISABLE
266 movl $1, %ecx /* Set lock value */
2671:
fe8ab488 268 movb (%rdi), %al /* Load byte at address */
2d21ac55
A
269 testb %al,%al /* lock locked? */
270 jne 3f /* branch if so */
fe8ab488 271 lock; cmpxchg %cl,(%rdi) /* attempt atomic compare exchange */
2d21ac55
A
272 jne 3f
273 LEAF_RET /* if yes, then nothing left to do */
2743:
275 PAUSE /* pause for hyper-threading */
276 jmp 1b /* try again */
277
2d21ac55
A
278/*
279 * void hw_lock_byte_unlock(uint8_t *lock_byte)
280 *
5ba3f43e
A
281 * Unconditionally release byte sized lock operand,
282 * release preemption level.
2d21ac55 283 */
1c79356b 284
2d21ac55 285LEAF_ENTRY(hw_lock_byte_unlock)
fe8ab488 286 movb $0, (%rdi) /* Clear the lock byte */
2d21ac55 287 PREEMPTION_ENABLE
0c530ab8 288 LEAF_RET
2d21ac55 289
b0d623f7
A
290/*
291 * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
292 * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
293 * as a 64-bit quantity (this matches the existing PowerPC implementation,
294 * and the new x86 specific statistics are also maintained as 32-bit
295 * quantities).
296 *
297 *
298 * Enable this preprocessor define to record the first miss alone
299 * By default, we count every miss, hence multiple misses may be
300 * recorded for a single lock acquire attempt via lck_mtx_lock
301 */
302#undef LOG_FIRST_MISS_ALONE
1c79356b 303
b0d623f7
A
304/*
305 * This preprocessor define controls whether the R-M-W update of the
306 * per-group statistics elements are atomic (LOCK-prefixed)
307 * Enabled by default.
308 */
309#define ATOMIC_STAT_UPDATES 1
1c79356b 310
b0d623f7
A
311#if defined(ATOMIC_STAT_UPDATES)
312#define LOCK_IF_ATOMIC_STAT_UPDATES lock
313#else
314#define LOCK_IF_ATOMIC_STAT_UPDATES
315#endif /* ATOMIC_STAT_UPDATES */
2d21ac55 316
2d21ac55 317
b0d623f7
A
318/*
319 * For most routines, the lck_mtx_t pointer is loaded into a
320 * register initially, and the owner field checked for indirection.
321 * Eventually the lock owner is loaded into a register and examined.
322 */
323
324#define M_OWNER MUTEX_OWNER
325#define M_PTR MUTEX_PTR
326#define M_STATE MUTEX_STATE
327
b0d623f7
A
328
329#define LMTX_ENTER_EXTENDED \
fe8ab488
A
330 mov M_PTR(%rdx), %rdx ; \
331 xor %r11, %r11 ; \
332 mov MUTEX_GRP(%rdx), %r10 ; \
b0d623f7 333 LOCK_IF_ATOMIC_STAT_UPDATES ; \
fe8ab488 334 incq GRP_MTX_STAT_UTIL(%r10)
b0d623f7
A
335
336
337#if LOG_FIRST_MISS_ALONE
338#define LMTX_UPDATE_MISS \
fe8ab488 339 test $1, %r11 ; \
b0d623f7
A
340 jnz 11f ; \
341 LOCK_IF_ATOMIC_STAT_UPDATES ; \
fe8ab488
A
342 incl GRP_MTX_STAT_MISS(%r10) ; \
343 or $1, %r11 ; \
b0d623f7
A
34411:
345#else
346#define LMTX_UPDATE_MISS \
347 LOCK_IF_ATOMIC_STAT_UPDATES ; \
fe8ab488 348 incl GRP_MTX_STAT_MISS(%r10)
b0d623f7
A
349#endif
350
2d21ac55 351
b0d623f7
A
352#if LOG_FIRST_MISS_ALONE
353#define LMTX_UPDATE_WAIT \
fe8ab488 354 test $2, %r11 ; \
b0d623f7
A
355 jnz 11f ; \
356 LOCK_IF_ATOMIC_STAT_UPDATES ; \
fe8ab488
A
357 incl GRP_MTX_STAT_WAIT(%r10) ; \
358 or $2, %r11 ; \
b0d623f7
A
35911:
360#else
361#define LMTX_UPDATE_WAIT \
362 LOCK_IF_ATOMIC_STAT_UPDATES ; \
fe8ab488 363 incl GRP_MTX_STAT_WAIT(%r10)
b0d623f7 364#endif
0c530ab8 365
1c79356b 366
b0d623f7
A
367/*
368 * Record the "direct wait" statistic, which indicates if a
369 * miss proceeded to block directly without spinning--occurs
370 * if the owner of the mutex isn't running on another processor
371 * at the time of the check.
372 */
373#define LMTX_UPDATE_DIRECT_WAIT \
374 LOCK_IF_ATOMIC_STAT_UPDATES ; \
fe8ab488 375 incl GRP_MTX_STAT_DIRECT_WAIT(%r10)
91447636 376
b0d623f7
A
377
378#define LMTX_CALLEXT1(func_name) \
fe8ab488 379 cmp %rdx, %rdi ; \
b0d623f7 380 je 12f ; \
fe8ab488
A
381 push %r10 ; \
382 push %r11 ; \
38312: push %rdi ; \
384 push %rdx ; \
385 mov %rdx, %rdi ; \
b0d623f7 386 call EXT(func_name) ; \
fe8ab488
A
387 pop %rdx ; \
388 pop %rdi ; \
389 cmp %rdx, %rdi ; \
b0d623f7 390 je 12f ; \
fe8ab488
A
391 pop %r11 ; \
392 pop %r10 ; \
b0d623f7
A
39312:
394
395#define LMTX_CALLEXT2(func_name, reg) \
fe8ab488 396 cmp %rdx, %rdi ; \
b0d623f7 397 je 12f ; \
fe8ab488
A
398 push %r10 ; \
399 push %r11 ; \
40012: push %rdi ; \
401 push %rdx ; \
402 mov reg, %rsi ; \
403 mov %rdx, %rdi ; \
b0d623f7 404 call EXT(func_name) ; \
fe8ab488
A
405 pop %rdx ; \
406 pop %rdi ; \
407 cmp %rdx, %rdi ; \
b0d623f7 408 je 12f ; \
fe8ab488
A
409 pop %r11 ; \
410 pop %r10 ; \
b0d623f7 41112:
6d2010ae 412
2d21ac55 413
b0d623f7
A
414#define M_WAITERS_MSK 0x0000ffff
415#define M_PRIORITY_MSK 0x00ff0000
416#define M_ILOCKED_MSK 0x01000000
417#define M_MLOCKED_MSK 0x02000000
418#define M_PROMOTED_MSK 0x04000000
419#define M_SPIN_MSK 0x08000000
420
2d21ac55
A
421/*
422 * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
2d21ac55
A
423 * Takes the address of a lock, and an assertion type as parameters.
424 * The assertion can take one of two forms determine by the type
425 * parameter: either the lock is held by the current thread, and the
426 * type is LCK_MTX_ASSERT_OWNED, or it isn't and the type is
b0d623f7 427 * LCK_MTX_ASSERT_NOTOWNED. Calls panic on assertion failure.
2d21ac55
A
428 *
429 */
430
b0d623f7 431NONLEAF_ENTRY(lck_mtx_assert)
fe8ab488
A
432 mov %rdi, %rdx /* Load lock address */
433 mov %gs:CPU_ACTIVE_THREAD, %rax /* Load current thread */
2d21ac55 434
fe8ab488
A
435 mov M_STATE(%rdx), %ecx
436 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
6d2010ae 437 jne 0f
fe8ab488 438 mov M_PTR(%rdx), %rdx /* If so, take indirection */
6d2010ae 4390:
fe8ab488
A
440 mov M_OWNER(%rdx), %rcx /* Load owner */
441 cmp $(MUTEX_ASSERT_OWNED), %rsi
2d21ac55 442 jne 2f /* Assert ownership? */
fe8ab488 443 cmp %rax, %rcx /* Current thread match? */
2d21ac55 444 jne 3f /* no, go panic */
fe8ab488 445 testl $(M_ILOCKED_MSK | M_MLOCKED_MSK), M_STATE(%rdx)
b0d623f7 446 je 3f
2d21ac55 4471: /* yes, we own it */
b0d623f7 448 NONLEAF_RET
2d21ac55 4492:
fe8ab488 450 cmp %rax, %rcx /* Current thread match? */
2d21ac55 451 jne 1b /* No, return */
6d2010ae 452 ALIGN_STACK()
fe8ab488 453 LOAD_PTR_ARG1(%rdx)
b0d623f7 454 LOAD_STRING_ARG0(mutex_assert_owned_str)
2d21ac55
A
455 jmp 4f
4563:
6d2010ae 457 ALIGN_STACK()
fe8ab488 458 LOAD_PTR_ARG1(%rdx)
b0d623f7 459 LOAD_STRING_ARG0(mutex_assert_not_owned_str)
2d21ac55 4604:
b0d623f7
A
461 CALL_PANIC()
462
463
464lck_mtx_destroyed:
6d2010ae 465 ALIGN_STACK()
fe8ab488 466 LOAD_PTR_ARG1(%rdx)
b0d623f7
A
467 LOAD_STRING_ARG0(mutex_interlock_destroyed_str)
468 CALL_PANIC()
469
2d21ac55
A
470
471.data
472mutex_assert_not_owned_str:
473 .asciz "mutex (%p) not owned\n"
474mutex_assert_owned_str:
475 .asciz "mutex (%p) owned\n"
b0d623f7
A
476mutex_interlock_destroyed_str:
477 .asciz "trying to interlock destroyed mutex (%p)"
2d21ac55
A
478.text
479
2d21ac55
A
480
481
91447636
A
482/*
483 * lck_mtx_lock()
484 * lck_mtx_try_lock()
b0d623f7 485 * lck_mtx_unlock()
2d21ac55 486 * lck_mtx_lock_spin()
6d2010ae 487 * lck_mtx_lock_spin_always()
39236c6e
A
488 * lck_mtx_try_lock_spin()
489 * lck_mtx_try_lock_spin_always()
2d21ac55 490 * lck_mtx_convert_spin()
91447636 491 */
6d2010ae 492NONLEAF_ENTRY(lck_mtx_lock_spin_always)
fe8ab488 493 mov %rdi, %rdx /* fetch lock pointer */
316670eb
A
494 jmp Llmls_avoid_check
495
2d21ac55 496NONLEAF_ENTRY(lck_mtx_lock_spin)
fe8ab488 497 mov %rdi, %rdx /* fetch lock pointer */
1c79356b 498
91447636 499 CHECK_PREEMPTION_LEVEL()
316670eb 500Llmls_avoid_check:
fe8ab488
A
501 mov M_STATE(%rdx), %ecx
502 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
6d2010ae 503 jnz Llmls_slow
fe8ab488
A
504Llmls_try: /* no - can't be INDIRECT, DESTROYED or locked */
505 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
506 or $(M_ILOCKED_MSK | M_SPIN_MSK), %ecx
6d2010ae
A
507
508 PREEMPTION_DISABLE
b0d623f7 509 lock
fe8ab488 510 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
6d2010ae 511 jne Llmls_busy_disabled
2d21ac55 512
fe8ab488
A
513 mov %gs:CPU_ACTIVE_THREAD, %rax
514 mov %rax, M_OWNER(%rdx) /* record owner of interlock */
6d2010ae 515#if MACH_LDEBUG
fe8ab488 516 test %rax, %rax
6d2010ae 517 jz 1f
fe8ab488 518 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
6d2010ae
A
5191:
520#endif /* MACH_LDEBUG */
0c530ab8 521
b0d623f7 522 /* return with the interlock held and preemption disabled */
2d21ac55
A
523 leave
524#if CONFIG_DTRACE
b0d623f7 525 LOCKSTAT_LABEL(_lck_mtx_lock_spin_lockstat_patch_point)
2d21ac55 526 ret
fe8ab488
A
527 /* inherit lock pointer in %rdx above */
528 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, %rdx)
2d21ac55
A
529#endif
530 ret
0c530ab8 531
6d2010ae 532Llmls_slow:
fe8ab488
A
533 test $M_ILOCKED_MSK, %ecx /* is the interlock held */
534 jz Llml_contended /* no, must have been the mutex */
2d21ac55 535
fe8ab488 536 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
b0d623f7 537 je lck_mtx_destroyed
fe8ab488
A
538 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex */
539 jne Llmls_loop /* no... must be interlocked */
5d5c5d0d 540
b0d623f7 541 LMTX_ENTER_EXTENDED
0c530ab8 542
fe8ab488
A
543 mov M_STATE(%rdx), %ecx
544 test $(M_SPIN_MSK), %ecx
6d2010ae 545 jz Llmls_loop1
2d21ac55 546
6d2010ae
A
547 LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
548Llmls_loop:
2d21ac55 549 PAUSE
fe8ab488 550 mov M_STATE(%rdx), %ecx
6d2010ae 551Llmls_loop1:
fe8ab488 552 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
6d2010ae 553 jz Llmls_try
fe8ab488
A
554 test $(M_MLOCKED_MSK), %ecx
555 jnz Llml_contended /* mutex owned by someone else, go contend for it */
6d2010ae
A
556 jmp Llmls_loop
557
558Llmls_busy_disabled:
559 PREEMPTION_ENABLE
560 jmp Llmls_loop
2d21ac55 561
9bccf70c 562
6d2010ae
A
563
564NONLEAF_ENTRY(lck_mtx_lock)
fe8ab488 565 mov %rdi, %rdx /* fetch lock pointer */
6d2010ae
A
566
567 CHECK_PREEMPTION_LEVEL()
568
fe8ab488
A
569 mov M_STATE(%rdx), %ecx
570 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
6d2010ae 571 jnz Llml_slow
fe8ab488
A
572Llml_try: /* no - can't be INDIRECT, DESTROYED or locked */
573 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
574 or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
6d2010ae
A
575
576 PREEMPTION_DISABLE
b0d623f7 577 lock
fe8ab488 578 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
6d2010ae 579 jne Llml_busy_disabled
2d21ac55 580
fe8ab488
A
581 mov %gs:CPU_ACTIVE_THREAD, %rax
582 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
6d2010ae 583#if MACH_LDEBUG
fe8ab488 584 test %rax, %rax
6d2010ae 585 jz 1f
fe8ab488 586 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
6d2010ae
A
5871:
588#endif /* MACH_LDEBUG */
2d21ac55 589
fe8ab488 590 testl $(M_WAITERS_MSK), M_STATE(%rdx)
6d2010ae 591 jz Llml_finish
2d21ac55 592
b0d623f7 593 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
6d2010ae
A
594
595Llml_finish:
fe8ab488 596 andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
6d2010ae
A
597 PREEMPTION_ENABLE
598
fe8ab488 599 cmp %rdx, %rdi /* is this an extended mutex */
b0d623f7 600 jne 2f
2d21ac55 601
b0d623f7
A
602 leave
603#if CONFIG_DTRACE
604 LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point)
605 ret
fe8ab488
A
606 /* inherit lock pointer in %rdx above */
607 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %rdx)
b0d623f7
A
608#endif
609 ret
6102:
2d21ac55
A
611 leave
612#if CONFIG_DTRACE
613 LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point)
614 ret
fe8ab488
A
615 /* inherit lock pointer in %rdx above */
616 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %rdx)
2d21ac55
A
617#endif
618 ret
6d2010ae
A
619
620
621Llml_slow:
fe8ab488
A
622 test $M_ILOCKED_MSK, %ecx /* is the interlock held */
623 jz Llml_contended /* no, must have been the mutex */
2d21ac55 624
fe8ab488 625 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
6d2010ae 626 je lck_mtx_destroyed
fe8ab488
A
627 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
628 jne Llml_loop /* no... must be interlocked */
6d2010ae
A
629
630 LMTX_ENTER_EXTENDED
631
fe8ab488
A
632 mov M_STATE(%rdx), %ecx
633 test $(M_SPIN_MSK), %ecx
6d2010ae
A
634 jz Llml_loop1
635
636 LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
637Llml_loop:
638 PAUSE
fe8ab488 639 mov M_STATE(%rdx), %ecx
6d2010ae 640Llml_loop1:
fe8ab488 641 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
6d2010ae 642 jz Llml_try
fe8ab488
A
643 test $(M_MLOCKED_MSK), %ecx
644 jnz Llml_contended /* mutex owned by someone else, go contend for it */
6d2010ae
A
645 jmp Llml_loop
646
647Llml_busy_disabled:
648 PREEMPTION_ENABLE
649 jmp Llml_loop
2d21ac55 650
6d2010ae 651
b0d623f7 652Llml_contended:
fe8ab488 653 cmp %rdx, %rdi /* is this an extended mutex */
b0d623f7
A
654 je 0f
655 LMTX_UPDATE_MISS
6560:
657 LMTX_CALLEXT1(lck_mtx_lock_spinwait_x86)
658
fe8ab488 659 test %rax, %rax
6d2010ae
A
660 jz Llml_acquired /* acquired mutex, interlock held and preemption disabled */
661
fe8ab488 662 cmp $1, %rax /* check for direct wait status */
b0d623f7 663 je 2f
fe8ab488 664 cmp %rdx, %rdi /* is this an extended mutex */
b0d623f7
A
665 je 2f
666 LMTX_UPDATE_DIRECT_WAIT
6672:
fe8ab488
A
668 mov M_STATE(%rdx), %ecx
669 test $(M_ILOCKED_MSK), %ecx
6d2010ae 670 jnz 6f
b0d623f7 671
fe8ab488
A
672 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
673 or $(M_ILOCKED_MSK), %ecx /* try to take the interlock */
6d2010ae
A
674
675 PREEMPTION_DISABLE
b0d623f7 676 lock
fe8ab488 677 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
b0d623f7 678 jne 5f
2d21ac55 679
fe8ab488 680 test $(M_MLOCKED_MSK), %ecx /* we've got the interlock and */
6d2010ae 681 jnz 3f
fe8ab488
A
682 or $(M_MLOCKED_MSK), %ecx /* the mutex is free... grab it directly */
683 mov %ecx, M_STATE(%rdx)
b0d623f7 684
fe8ab488
A
685 mov %gs:CPU_ACTIVE_THREAD, %rax
686 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
6d2010ae 687#if MACH_LDEBUG
fe8ab488 688 test %rax, %rax
6d2010ae 689 jz 1f
fe8ab488 690 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
6d2010ae
A
6911:
692#endif /* MACH_LDEBUG */
2d21ac55 693
6d2010ae 694Llml_acquired:
fe8ab488 695 testl $(M_WAITERS_MSK), M_STATE(%rdx)
6d2010ae 696 jnz 1f
fe8ab488
A
697 mov M_OWNER(%rdx), %rax
698 mov TH_WAS_PROMOTED_ON_WAKEUP(%rax), %eax
699 test %eax, %eax
6d2010ae
A
700 jz Llml_finish
7011:
702 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
703 jmp Llml_finish
b0d623f7 704
6d2010ae 7053: /* interlock held, mutex busy */
fe8ab488 706 cmp %rdx, %rdi /* is this an extended mutex */
b0d623f7
A
707 je 4f
708 LMTX_UPDATE_WAIT
7094:
710 LMTX_CALLEXT1(lck_mtx_lock_wait_x86)
711 jmp Llml_contended
7125:
6d2010ae 713 PREEMPTION_ENABLE
b0d623f7
A
7146:
715 PAUSE
716 jmp 2b
2d21ac55
A
717
718
39236c6e 719NONLEAF_ENTRY(lck_mtx_try_lock_spin_always)
fe8ab488 720 mov %rdi, %rdx /* fetch lock pointer */
39236c6e
A
721 jmp Llmts_avoid_check
722
2d21ac55 723NONLEAF_ENTRY(lck_mtx_try_lock_spin)
fe8ab488 724 mov %rdi, %rdx /* fetch lock pointer */
1c79356b 725
39236c6e 726Llmts_avoid_check:
fe8ab488
A
727 mov M_STATE(%rdx), %ecx
728 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
6d2010ae 729 jnz Llmts_slow
fe8ab488
A
730Llmts_try: /* no - can't be INDIRECT, DESTROYED or locked */
731 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
732 or $(M_ILOCKED_MSK | M_SPIN_MSK), %rcx
6d2010ae
A
733
734 PREEMPTION_DISABLE
b0d623f7 735 lock
fe8ab488 736 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
6d2010ae 737 jne Llmts_busy_disabled
2d21ac55 738
fe8ab488
A
739 mov %gs:CPU_ACTIVE_THREAD, %rax
740 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
6d2010ae 741#if MACH_LDEBUG
fe8ab488 742 test %rax, %rax
6d2010ae 743 jz 1f
fe8ab488 744 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
6d2010ae
A
7451:
746#endif /* MACH_LDEBUG */
2d21ac55 747
2d21ac55 748 leave
b0d623f7 749
2d21ac55 750#if CONFIG_DTRACE
fe8ab488 751 mov $1, %rax /* return success */
2d21ac55
A
752 LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point)
753 ret
fe8ab488
A
754 /* inherit lock pointer in %rdx above */
755 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %rdx)
2d21ac55 756#endif
fe8ab488 757 mov $1, %rax /* return success */
2d21ac55
A
758 ret
759
6d2010ae 760Llmts_slow:
fe8ab488 761 test $(M_ILOCKED_MSK), %ecx /* is the interlock held */
6d2010ae 762 jz Llmts_fail /* no, must be held as a mutex */
2d21ac55 763
fe8ab488 764 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
b0d623f7 765 je lck_mtx_destroyed
fe8ab488 766 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
6d2010ae 767 jne Llmts_loop1
2d21ac55 768
b0d623f7 769 LMTX_ENTER_EXTENDED
6d2010ae 770Llmts_loop:
b0d623f7 771 PAUSE
fe8ab488 772 mov M_STATE(%rdx), %ecx
6d2010ae 773Llmts_loop1:
fe8ab488 774 test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx
6d2010ae 775 jnz Llmts_fail
fe8ab488 776 test $(M_ILOCKED_MSK), %ecx
6d2010ae
A
777 jz Llmts_try
778 jmp Llmts_loop
779
780Llmts_busy_disabled:
781 PREEMPTION_ENABLE
782 jmp Llmts_loop
783
784
785
786NONLEAF_ENTRY(lck_mtx_try_lock)
fe8ab488 787 mov %rdi, %rdx /* fetch lock pointer */
b0d623f7 788
fe8ab488
A
789 mov M_STATE(%rdx), %ecx
790 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
6d2010ae 791 jnz Llmt_slow
fe8ab488
A
792Llmt_try: /* no - can't be INDIRECT, DESTROYED or locked */
793 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
794 or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
6d2010ae
A
795
796 PREEMPTION_DISABLE
b0d623f7 797 lock
fe8ab488 798 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
6d2010ae 799 jne Llmt_busy_disabled
9bccf70c 800
fe8ab488
A
801 mov %gs:CPU_ACTIVE_THREAD, %rax
802 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
6d2010ae 803#if MACH_LDEBUG
fe8ab488 804 test %rax, %rax
6d2010ae 805 jz 1f
fe8ab488 806 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
6d2010ae
A
8071:
808#endif /* MACH_LDEBUG */
1c79356b 809
fe8ab488 810 test $(M_WAITERS_MSK), %ecx
6d2010ae
A
811 jz 0f
812
b0d623f7 813 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
6d2010ae 8140:
fe8ab488 815 andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
6d2010ae 816 PREEMPTION_ENABLE
b0d623f7 817
6d2010ae 818 leave
2d21ac55 819#if CONFIG_DTRACE
fe8ab488 820 mov $1, %rax /* return success */
2d21ac55
A
821 /* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */
822 LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point)
823 ret
fe8ab488
A
824 /* inherit lock pointer in %rdx from above */
825 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %rdx)
b0d623f7 826#endif
fe8ab488 827 mov $1, %rax /* return success */
2d21ac55 828 ret
1c79356b 829
6d2010ae 830Llmt_slow:
fe8ab488 831 test $(M_ILOCKED_MSK), %ecx /* is the interlock held */
6d2010ae
A
832 jz Llmt_fail /* no, must be held as a mutex */
833
fe8ab488 834 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
6d2010ae 835 je lck_mtx_destroyed
fe8ab488 836 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
6d2010ae
A
837 jne Llmt_loop
838
839 LMTX_ENTER_EXTENDED
840Llmt_loop:
841 PAUSE
fe8ab488 842 mov M_STATE(%rdx), %ecx
6d2010ae 843Llmt_loop1:
fe8ab488 844 test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx
6d2010ae 845 jnz Llmt_fail
fe8ab488 846 test $(M_ILOCKED_MSK), %ecx
6d2010ae
A
847 jz Llmt_try
848 jmp Llmt_loop
849
850Llmt_busy_disabled:
851 PREEMPTION_ENABLE
852 jmp Llmt_loop
853
0c530ab8
A
854
855Llmt_fail:
b0d623f7 856Llmts_fail:
fe8ab488 857 cmp %rdx, %rdi /* is this an extended mutex */
b0d623f7
A
858 je 0f
859 LMTX_UPDATE_MISS
b0d623f7 8600:
fe8ab488 861 xor %rax, %rax
91447636 862 NONLEAF_RET
1c79356b 863
2d21ac55
A
864
865
b0d623f7 866NONLEAF_ENTRY(lck_mtx_convert_spin)
fe8ab488 867 mov %rdi, %rdx /* fetch lock pointer */
2d21ac55 868
fe8ab488
A
869 mov M_STATE(%rdx), %ecx
870 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
6d2010ae 871 jne 0f
fe8ab488
A
872 mov M_PTR(%rdx), %rdx /* If so, take indirection */
873 mov M_STATE(%rdx), %ecx
6d2010ae 8740:
fe8ab488 875 test $(M_MLOCKED_MSK), %ecx /* already owned as a mutex, just return */
6d2010ae 876 jnz 2f
fe8ab488 877 test $(M_WAITERS_MSK), %ecx /* are there any waiters? */
6d2010ae 878 jz 1f
2d21ac55 879
6d2010ae 880 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
fe8ab488 881 mov M_STATE(%rdx), %ecx
b0d623f7 8821:
fe8ab488
A
883 and $(~(M_ILOCKED_MSK | M_SPIN_MSK)), %ecx /* convert from spin version to mutex */
884 or $(M_MLOCKED_MSK), %ecx
885 mov %ecx, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */
2d21ac55 886
6d2010ae 887 PREEMPTION_ENABLE
b0d623f7
A
8882:
889 NONLEAF_RET
2d21ac55 890
6d2010ae 891
2d21ac55 892
b0d623f7 893NONLEAF_ENTRY(lck_mtx_unlock)
fe8ab488 894 mov %rdi, %rdx /* fetch lock pointer */
6d2010ae 895Llmu_entry:
fe8ab488 896 mov M_STATE(%rdx), %ecx
b0d623f7 897Llmu_prim:
fe8ab488 898 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
b0d623f7 899 je Llmu_ext
1c79356b 900
6d2010ae 901Llmu_chktype:
fe8ab488 902 test $(M_MLOCKED_MSK), %ecx /* check for full mutex */
6d2010ae
A
903 jz Llmu_unlock
904Llmu_mutex:
fe8ab488 905 test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */
6d2010ae 906 jnz Llmu_busy
b0d623f7 907
fe8ab488
A
908 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
909 and $(~M_MLOCKED_MSK), %ecx /* drop mutex */
910 or $(M_ILOCKED_MSK), %ecx /* pick up interlock */
6d2010ae
A
911
912 PREEMPTION_DISABLE
b0d623f7 913 lock
fe8ab488
A
914 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
915 jne Llmu_busy_disabled /* branch on failure to spin loop */
b0d623f7 916
6d2010ae 917Llmu_unlock:
fe8ab488
A
918 xor %rax, %rax
919 mov %rax, M_OWNER(%rdx)
920 mov %rcx, %rax /* keep original state in %ecx for later evaluation */
921 and $(~(M_ILOCKED_MSK | M_SPIN_MSK | M_PROMOTED_MSK)), %rax
6d2010ae 922
fe8ab488 923 test $(M_WAITERS_MSK), %eax
6d2010ae 924 jz 2f
fe8ab488 925 dec %eax /* decrement waiter count */
b0d623f7 9262:
fe8ab488 927 mov %eax, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */
6d2010ae
A
928
929#if MACH_LDEBUG
930 /* perform lock statistics after drop to prevent delay */
fe8ab488
A
931 mov %gs:CPU_ACTIVE_THREAD, %rax
932 test %rax, %rax
6d2010ae 933 jz 1f
fe8ab488 934 decl TH_MUTEX_COUNT(%rax) /* lock statistic */
6d2010ae
A
9351:
936#endif /* MACH_LDEBUG */
937
fe8ab488 938 test $(M_PROMOTED_MSK | M_WAITERS_MSK), %ecx
6d2010ae 939 jz 3f
b0d623f7 940
fe8ab488 941 LMTX_CALLEXT2(lck_mtx_unlock_wakeup_x86, %rcx)
b0d623f7 9423:
6d2010ae
A
943 PREEMPTION_ENABLE
944
fe8ab488 945 cmp %rdx, %rdi
b0d623f7 946 jne 4f
2d21ac55 947
2d21ac55
A
948 leave
949#if CONFIG_DTRACE
950 /* Dtrace: LS_LCK_MTX_UNLOCK_RELEASE */
951 LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point)
952 ret
fe8ab488
A
953 /* inherit lock pointer in %rdx from above */
954 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %rdx)
2d21ac55
A
955#endif
956 ret
b0d623f7 9574:
2d21ac55
A
958 leave
959#if CONFIG_DTRACE
b0d623f7
A
960 /* Dtrace: LS_LCK_MTX_EXT_UNLOCK_RELEASE */
961 LOCKSTAT_LABEL(_lck_mtx_ext_unlock_lockstat_patch_point)
2d21ac55 962 ret
fe8ab488
A
963 /* inherit lock pointer in %rdx from above */
964 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, %rdx)
2d21ac55
A
965#endif
966 ret
6d2010ae
A
967
968
969Llmu_busy_disabled:
970 PREEMPTION_ENABLE
971Llmu_busy:
b0d623f7 972 PAUSE
fe8ab488 973 mov M_STATE(%rdx), %ecx
6d2010ae
A
974 jmp Llmu_mutex
975
b0d623f7 976Llmu_ext:
fe8ab488
A
977 mov M_PTR(%rdx), %rdx
978 mov M_OWNER(%rdx), %rax
979 mov %gs:CPU_ACTIVE_THREAD, %rcx
980 CHECK_UNLOCK(%rcx, %rax)
981 mov M_STATE(%rdx), %ecx
6d2010ae 982 jmp Llmu_chktype
b0d623f7 983
1c79356b 984
2d21ac55 985
3e170ce0
A
986LEAF_ENTRY(lck_mtx_ilk_try_lock)
987 mov %rdi, %rdx /* fetch lock pointer - no indirection here */
988
989 mov M_STATE(%rdx), %ecx
990
991 test $(M_ILOCKED_MSK), %ecx /* can't have the interlock yet */
992 jnz 3f
993
994 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
995 or $(M_ILOCKED_MSK), %ecx
996
997 PREEMPTION_DISABLE
998 lock
999 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
1000 jne 2f /* return failure after re-enabling preemption */
1001
1002 mov $1, %rax /* return success with preemption disabled */
1003 LEAF_RET
10042:
1005 PREEMPTION_ENABLE /* need to re-enable preemption */
10063:
1007 xor %rax, %rax /* return failure */
1008 LEAF_RET
1009
1010
b0d623f7 1011LEAF_ENTRY(lck_mtx_ilk_unlock)
fe8ab488 1012 mov %rdi, %rdx /* fetch lock pointer - no indirection here */
0c530ab8 1013
fe8ab488 1014 andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
2d21ac55 1015
fe8ab488 1016 PREEMPTION_ENABLE /* need to re-enable preemption */
2d21ac55 1017
b0d623f7 1018 LEAF_RET
2d21ac55 1019
b0d623f7
A
1020
1021LEAF_ENTRY(lck_mtx_lock_grab_mutex)
fe8ab488 1022 mov %rdi, %rdx /* fetch lock pointer - no indirection here */
2d21ac55 1023
fe8ab488 1024 mov M_STATE(%rdx), %ecx
1c79356b 1025
fe8ab488 1026 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* can't have the mutex yet */
6d2010ae 1027 jnz 3f
1c79356b 1028
fe8ab488
A
1029 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
1030 or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
6d2010ae
A
1031
1032 PREEMPTION_DISABLE
b0d623f7 1033 lock
fe8ab488 1034 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
b0d623f7 1035 jne 2f /* branch on failure to spin loop */
1c79356b 1036
fe8ab488
A
1037 mov %gs:CPU_ACTIVE_THREAD, %rax
1038 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
6d2010ae 1039#if MACH_LDEBUG
fe8ab488 1040 test %rax, %rax
6d2010ae 1041 jz 1f
fe8ab488 1042 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
6d2010ae
A
10431:
1044#endif /* MACH_LDEBUG */
b0d623f7 1045
fe8ab488 1046 mov $1, %rax /* return success */
b0d623f7
A
1047 LEAF_RET
10482:
6d2010ae
A
1049 PREEMPTION_ENABLE
10503:
fe8ab488 1051 xor %rax, %rax /* return failure */
91447636 1052 LEAF_RET
b0d623f7
A
1053
1054
2d21ac55 1055
b0d623f7 1056LEAF_ENTRY(lck_mtx_lock_mark_destroyed)
fe8ab488 1057 mov %rdi, %rdx
b0d623f7 10581:
fe8ab488
A
1059 mov M_STATE(%rdx), %ecx
1060 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
b0d623f7
A
1061 jne 2f
1062
fe8ab488 1063 movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */
b0d623f7
A
1064 jmp 3f
10652:
fe8ab488 1066 test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */
6d2010ae 1067 jnz 5f
b0d623f7 1068
6d2010ae 1069 PREEMPTION_DISABLE
fe8ab488
A
1070 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
1071 or $(M_ILOCKED_MSK), %ecx
b0d623f7 1072 lock
fe8ab488
A
1073 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
1074 jne 4f /* branch on failure to spin loop */
1075 movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */
6d2010ae 1076 PREEMPTION_ENABLE
b0d623f7 10773:
fe8ab488 1078 LEAF_RET /* return with M_ILOCKED set */
b0d623f7 10794:
6d2010ae 1080 PREEMPTION_ENABLE
b0d623f7
A
10815:
1082 PAUSE
1083 jmp 1b
1084
6d2010ae
A
1085LEAF_ENTRY(preemption_underflow_panic)
1086 FRAME
1087 incl %gs:CPU_PREEMPTION_LEVEL
1088 ALIGN_STACK()
1089 LOAD_STRING_ARG0(16f)
1090 CALL_PANIC()
1091 hlt
1092 .data
109316: String "Preemption level underflow, possible cause unlocking an unlocked mutex or spinlock"
1094 .text
1095
1096