]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_lock.s
xnu-4570.51.1.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_lock.s
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1989 Carnegie-Mellon University
34 * All rights reserved. The CMU software License Agreement specifies
35 * the terms and conditions for use and redistribution.
36 */
37
38 #include <mach_ldebug.h>
39 #include <i386/asm.h>
40 #include <i386/eflags.h>
41 #include <i386/trap.h>
42 #include <config_dtrace.h>
43 #include <i386/mp.h>
44
45 #include "assym.s"
46
47 #define PAUSE rep; nop
48
49 #include <i386/pal_lock_asm.h>
50
51 #define LEAF_ENTRY(name) \
52 Entry(name)
53
54 #define LEAF_ENTRY2(n1,n2) \
55 Entry(n1); \
56 Entry(n2)
57
58 #define LEAF_RET \
59 ret
60
61 /* Non-leaf routines always have a stack frame: */
62
63 #define NONLEAF_ENTRY(name) \
64 Entry(name); \
65 FRAME
66
67 #define NONLEAF_ENTRY2(n1,n2) \
68 Entry(n1); \
69 Entry(n2); \
70 FRAME
71
72 #define NONLEAF_RET \
73 EMARF; \
74 ret
75
76
77 /* For x86_64, the varargs ABI requires that %al indicate
78 * how many SSE register contain arguments. In our case, 0 */
79 #define ALIGN_STACK() and $0xFFFFFFFFFFFFFFF0, %rsp ;
80 #define LOAD_STRING_ARG0(label) leaq label(%rip), %rdi ;
81 #define LOAD_ARG1(x) mov x, %esi ;
82 #define LOAD_PTR_ARG1(x) mov x, %rsi ;
83 #define CALL_PANIC() xorb %al,%al ; call EXT(panic) ;
84
85 #define CHECK_UNLOCK(current, owner) \
86 cmp current, owner ; \
87 je 1f ; \
88 ALIGN_STACK() ; \
89 LOAD_STRING_ARG0(2f) ; \
90 CALL_PANIC() ; \
91 hlt ; \
92 .data ; \
93 2: String "Mutex unlock attempted from non-owner thread"; \
94 .text ; \
95 1:
96
97 #if MACH_LDEBUG
98 /*
99 * Routines for general lock debugging.
100 */
101
102 /*
103 * Checks for expected lock types and calls "panic" on
104 * mismatch. Detects calls to Mutex functions with
105 * type simplelock and vice versa.
106 */
107 #define CHECK_MUTEX_TYPE() \
108 cmpl $ MUTEX_TAG,M_TYPE ; \
109 je 1f ; \
110 ALIGN_STACK() ; \
111 LOAD_STRING_ARG0(2f) ; \
112 CALL_PANIC() ; \
113 hlt ; \
114 .data ; \
115 2: String "not a mutex!" ; \
116 .text ; \
117 1:
118
119 #define CHECK_MYLOCK(current, owner) \
120 cmp current, owner ; \
121 jne 1f ; \
122 ALIGN_STACK() ; \
123 LOAD_STRING_ARG0(2f) ; \
124 CALL_PANIC() ; \
125 hlt ; \
126 .data ; \
127 2: String "Attempt to recursively lock a non-recursive lock"; \
128 .text ; \
129 1:
130
131 #else /* MACH_LDEBUG */
132 #define CHECK_MUTEX_TYPE()
133 #define CHECK_MYLOCK(thd)
134 #endif /* MACH_LDEBUG */
135
136 #if DEVELOPMENT || DEBUG
137 /*
138 * If one or more simplelocks are currently held by a thread,
139 * an attempt to acquire a mutex will cause this check to fail
140 * (since a mutex lock may context switch, holding a simplelock
141 * is not a good thing).
142 */
143 #define CHECK_PREEMPTION_LEVEL() \
144 cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \
145 je 1f ; \
146 cmpl $0,EXT(LckDisablePreemptCheck)(%rip) ; \
147 jne 1f ; \
148 cmpl $0,%gs:CPU_HIBERNATE ; \
149 jne 1f ; \
150 ALIGN_STACK() ; \
151 movl %gs:CPU_PREEMPTION_LEVEL, %eax ; \
152 LOAD_ARG1(%eax) ; \
153 LOAD_STRING_ARG0(2f) ; \
154 CALL_PANIC() ; \
155 hlt ; \
156 .data ; \
157 2: String "preemption_level(%d) != 0!" ; \
158 .text ; \
159 1:
160 #else /* DEVELOPMENT || DEBUG */
161 #define CHECK_PREEMPTION_LEVEL()
162 #endif /* DEVELOPMENT || DEBUG */
163
164 #define PREEMPTION_DISABLE \
165 incl %gs:CPU_PREEMPTION_LEVEL
166
167 #define PREEMPTION_LEVEL_DEBUG 1
168 #if PREEMPTION_LEVEL_DEBUG
169 #define PREEMPTION_ENABLE \
170 decl %gs:CPU_PREEMPTION_LEVEL ; \
171 js 17f ; \
172 jnz 19f ; \
173 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
174 jz 19f ; \
175 PUSHF ; \
176 testl $EFL_IF, S_PC ; \
177 jz 18f ; \
178 POPF ; \
179 int $(T_PREEMPT) ; \
180 jmp 19f ; \
181 17: \
182 call _preemption_underflow_panic ; \
183 18: \
184 POPF ; \
185 19:
186 #else
187 #define PREEMPTION_ENABLE \
188 decl %gs:CPU_PREEMPTION_LEVEL ; \
189 jnz 19f ; \
190 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
191 jz 19f ; \
192 PUSHF ; \
193 testl $EFL_IF, S_PC ; \
194 jz 18f ; \
195 POPF ; \
196 int $(T_PREEMPT) ; \
197 jmp 19f ; \
198 18: \
199 POPF ; \
200 19:
201 #endif
202
203
204 #if CONFIG_DTRACE
205
206 .globl _lockstat_probe
207 .globl _lockstat_probemap
208
209 /*
210 * LOCKSTAT_LABEL creates a dtrace symbol which contains
211 * a pointer into the lock code function body. At that
212 * point is a "ret" instruction that can be patched into
213 * a "nop"
214 */
215
216 #define LOCKSTAT_LABEL(lab) \
217 .data ;\
218 .globl lab ;\
219 lab: ;\
220 .quad 9f ;\
221 .text ;\
222 9:
223
224 #define LOCKSTAT_RECORD(id, lck) \
225 push %rbp ; \
226 mov %rsp,%rbp ; \
227 movl _lockstat_probemap + (id * 4)(%rip),%eax ; \
228 test %eax,%eax ; \
229 je 9f ; \
230 mov lck, %rsi ; \
231 mov %rax, %rdi ; \
232 mov $0, %rdx ; \
233 mov $0, %rcx ; \
234 mov $0, %r8 ; \
235 mov $0, %r9 ; \
236 call *_lockstat_probe(%rip) ; \
237 9: leave
238 /* ret - left to subsequent code, e.g. return values */
239
240 #endif /* CONFIG_DTRACE */
241
242 /*
243 * For most routines, the hw_lock_t pointer is loaded into a
244 * register initially, and then either a byte or register-sized
245 * word is loaded/stored to the pointer
246 */
247
248 /*
249 * void hw_lock_byte_init(volatile uint8_t *)
250 *
251 * Initialize a hardware byte lock.
252 */
253 LEAF_ENTRY(hw_lock_byte_init)
254 movb $0, (%rdi) /* clear the lock */
255 LEAF_RET
256
257 /*
258 * void hw_lock_byte_lock(uint8_t *lock_byte)
259 *
260 * Acquire byte sized lock operand, spinning until it becomes available.
261 * return with preemption disabled.
262 */
263
264 LEAF_ENTRY(hw_lock_byte_lock)
265 PREEMPTION_DISABLE
266 movl $1, %ecx /* Set lock value */
267 1:
268 movb (%rdi), %al /* Load byte at address */
269 testb %al,%al /* lock locked? */
270 jne 3f /* branch if so */
271 lock; cmpxchg %cl,(%rdi) /* attempt atomic compare exchange */
272 jne 3f
273 LEAF_RET /* if yes, then nothing left to do */
274 3:
275 PAUSE /* pause for hyper-threading */
276 jmp 1b /* try again */
277
278 /*
279 * void hw_lock_byte_unlock(uint8_t *lock_byte)
280 *
281 * Unconditionally release byte sized lock operand,
282 * release preemption level.
283 */
284
285 LEAF_ENTRY(hw_lock_byte_unlock)
286 movb $0, (%rdi) /* Clear the lock byte */
287 PREEMPTION_ENABLE
288 LEAF_RET
289
290 /*
291 * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
292 * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
293 * as a 64-bit quantity (this matches the existing PowerPC implementation,
294 * and the new x86 specific statistics are also maintained as 32-bit
295 * quantities).
296 *
297 *
298 * Enable this preprocessor define to record the first miss alone
299 * By default, we count every miss, hence multiple misses may be
300 * recorded for a single lock acquire attempt via lck_mtx_lock
301 */
302 #undef LOG_FIRST_MISS_ALONE
303
304 /*
305 * This preprocessor define controls whether the R-M-W update of the
306 * per-group statistics elements are atomic (LOCK-prefixed)
307 * Enabled by default.
308 */
309 #define ATOMIC_STAT_UPDATES 1
310
311 #if defined(ATOMIC_STAT_UPDATES)
312 #define LOCK_IF_ATOMIC_STAT_UPDATES lock
313 #else
314 #define LOCK_IF_ATOMIC_STAT_UPDATES
315 #endif /* ATOMIC_STAT_UPDATES */
316
317
318 /*
319 * For most routines, the lck_mtx_t pointer is loaded into a
320 * register initially, and the owner field checked for indirection.
321 * Eventually the lock owner is loaded into a register and examined.
322 */
323
324 #define M_OWNER MUTEX_OWNER
325 #define M_PTR MUTEX_PTR
326 #define M_STATE MUTEX_STATE
327
328
329 #define LMTX_ENTER_EXTENDED \
330 mov M_PTR(%rdx), %rdx ; \
331 xor %r11, %r11 ; \
332 mov MUTEX_GRP(%rdx), %r10 ; \
333 LOCK_IF_ATOMIC_STAT_UPDATES ; \
334 incq GRP_MTX_STAT_UTIL(%r10)
335
336
337 #if LOG_FIRST_MISS_ALONE
338 #define LMTX_UPDATE_MISS \
339 test $1, %r11 ; \
340 jnz 11f ; \
341 LOCK_IF_ATOMIC_STAT_UPDATES ; \
342 incl GRP_MTX_STAT_MISS(%r10) ; \
343 or $1, %r11 ; \
344 11:
345 #else
346 #define LMTX_UPDATE_MISS \
347 LOCK_IF_ATOMIC_STAT_UPDATES ; \
348 incl GRP_MTX_STAT_MISS(%r10)
349 #endif
350
351
352 #if LOG_FIRST_MISS_ALONE
353 #define LMTX_UPDATE_WAIT \
354 test $2, %r11 ; \
355 jnz 11f ; \
356 LOCK_IF_ATOMIC_STAT_UPDATES ; \
357 incl GRP_MTX_STAT_WAIT(%r10) ; \
358 or $2, %r11 ; \
359 11:
360 #else
361 #define LMTX_UPDATE_WAIT \
362 LOCK_IF_ATOMIC_STAT_UPDATES ; \
363 incl GRP_MTX_STAT_WAIT(%r10)
364 #endif
365
366
367 /*
368 * Record the "direct wait" statistic, which indicates if a
369 * miss proceeded to block directly without spinning--occurs
370 * if the owner of the mutex isn't running on another processor
371 * at the time of the check.
372 */
373 #define LMTX_UPDATE_DIRECT_WAIT \
374 LOCK_IF_ATOMIC_STAT_UPDATES ; \
375 incl GRP_MTX_STAT_DIRECT_WAIT(%r10)
376
377
378 #define LMTX_CALLEXT1(func_name) \
379 cmp %rdx, %rdi ; \
380 je 12f ; \
381 push %r10 ; \
382 push %r11 ; \
383 12: push %rdi ; \
384 push %rdx ; \
385 mov %rdx, %rdi ; \
386 call EXT(func_name) ; \
387 pop %rdx ; \
388 pop %rdi ; \
389 cmp %rdx, %rdi ; \
390 je 12f ; \
391 pop %r11 ; \
392 pop %r10 ; \
393 12:
394
395 #define LMTX_CALLEXT2(func_name, reg) \
396 cmp %rdx, %rdi ; \
397 je 12f ; \
398 push %r10 ; \
399 push %r11 ; \
400 12: push %rdi ; \
401 push %rdx ; \
402 mov reg, %rsi ; \
403 mov %rdx, %rdi ; \
404 call EXT(func_name) ; \
405 pop %rdx ; \
406 pop %rdi ; \
407 cmp %rdx, %rdi ; \
408 je 12f ; \
409 pop %r11 ; \
410 pop %r10 ; \
411 12:
412
413
414 #define M_WAITERS_MSK 0x0000ffff
415 #define M_PRIORITY_MSK 0x00ff0000
416 #define M_ILOCKED_MSK 0x01000000
417 #define M_MLOCKED_MSK 0x02000000
418 #define M_PROMOTED_MSK 0x04000000
419 #define M_SPIN_MSK 0x08000000
420
421 /*
422 * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
423 * Takes the address of a lock, and an assertion type as parameters.
424 * The assertion can take one of two forms determine by the type
425 * parameter: either the lock is held by the current thread, and the
426 * type is LCK_MTX_ASSERT_OWNED, or it isn't and the type is
427 * LCK_MTX_ASSERT_NOTOWNED. Calls panic on assertion failure.
428 *
429 */
430
431 NONLEAF_ENTRY(lck_mtx_assert)
432 mov %rdi, %rdx /* Load lock address */
433 mov %gs:CPU_ACTIVE_THREAD, %rax /* Load current thread */
434
435 mov M_STATE(%rdx), %ecx
436 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
437 jne 0f
438 mov M_PTR(%rdx), %rdx /* If so, take indirection */
439 0:
440 mov M_OWNER(%rdx), %rcx /* Load owner */
441 cmp $(MUTEX_ASSERT_OWNED), %rsi
442 jne 2f /* Assert ownership? */
443 cmp %rax, %rcx /* Current thread match? */
444 jne 3f /* no, go panic */
445 testl $(M_ILOCKED_MSK | M_MLOCKED_MSK), M_STATE(%rdx)
446 je 3f
447 1: /* yes, we own it */
448 NONLEAF_RET
449 2:
450 cmp %rax, %rcx /* Current thread match? */
451 jne 1b /* No, return */
452 ALIGN_STACK()
453 LOAD_PTR_ARG1(%rdx)
454 LOAD_STRING_ARG0(mutex_assert_owned_str)
455 jmp 4f
456 3:
457 ALIGN_STACK()
458 LOAD_PTR_ARG1(%rdx)
459 LOAD_STRING_ARG0(mutex_assert_not_owned_str)
460 4:
461 CALL_PANIC()
462
463
464 lck_mtx_destroyed:
465 ALIGN_STACK()
466 LOAD_PTR_ARG1(%rdx)
467 LOAD_STRING_ARG0(mutex_interlock_destroyed_str)
468 CALL_PANIC()
469
470
471 .data
472 mutex_assert_not_owned_str:
473 .asciz "mutex (%p) not owned\n"
474 mutex_assert_owned_str:
475 .asciz "mutex (%p) owned\n"
476 mutex_interlock_destroyed_str:
477 .asciz "trying to interlock destroyed mutex (%p)"
478 .text
479
480
481
482 /*
483 * lck_mtx_lock()
484 * lck_mtx_try_lock()
485 * lck_mtx_unlock()
486 * lck_mtx_lock_spin()
487 * lck_mtx_lock_spin_always()
488 * lck_mtx_try_lock_spin()
489 * lck_mtx_try_lock_spin_always()
490 * lck_mtx_convert_spin()
491 */
492 NONLEAF_ENTRY(lck_mtx_lock_spin_always)
493 mov %rdi, %rdx /* fetch lock pointer */
494 jmp Llmls_avoid_check
495
496 NONLEAF_ENTRY(lck_mtx_lock_spin)
497 mov %rdi, %rdx /* fetch lock pointer */
498
499 CHECK_PREEMPTION_LEVEL()
500 Llmls_avoid_check:
501 mov M_STATE(%rdx), %ecx
502 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
503 jnz Llmls_slow
504 Llmls_try: /* no - can't be INDIRECT, DESTROYED or locked */
505 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
506 or $(M_ILOCKED_MSK | M_SPIN_MSK), %ecx
507
508 PREEMPTION_DISABLE
509 lock
510 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
511 jne Llmls_busy_disabled
512
513 mov %gs:CPU_ACTIVE_THREAD, %rax
514 mov %rax, M_OWNER(%rdx) /* record owner of interlock */
515 #if MACH_LDEBUG
516 test %rax, %rax
517 jz 1f
518 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
519 1:
520 #endif /* MACH_LDEBUG */
521
522 /* return with the interlock held and preemption disabled */
523 leave
524 #if CONFIG_DTRACE
525 LOCKSTAT_LABEL(_lck_mtx_lock_spin_lockstat_patch_point)
526 ret
527 /* inherit lock pointer in %rdx above */
528 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, %rdx)
529 #endif
530 ret
531
532 Llmls_slow:
533 test $M_ILOCKED_MSK, %ecx /* is the interlock held */
534 jz Llml_contended /* no, must have been the mutex */
535
536 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
537 je lck_mtx_destroyed
538 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex */
539 jne Llmls_loop /* no... must be interlocked */
540
541 LMTX_ENTER_EXTENDED
542
543 mov M_STATE(%rdx), %ecx
544 test $(M_SPIN_MSK), %ecx
545 jz Llmls_loop1
546
547 LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
548 Llmls_loop:
549 PAUSE
550 mov M_STATE(%rdx), %ecx
551 Llmls_loop1:
552 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
553 jz Llmls_try
554 test $(M_MLOCKED_MSK), %ecx
555 jnz Llml_contended /* mutex owned by someone else, go contend for it */
556 jmp Llmls_loop
557
558 Llmls_busy_disabled:
559 PREEMPTION_ENABLE
560 jmp Llmls_loop
561
562
563
564 NONLEAF_ENTRY(lck_mtx_lock)
565 mov %rdi, %rdx /* fetch lock pointer */
566
567 CHECK_PREEMPTION_LEVEL()
568
569 mov M_STATE(%rdx), %ecx
570 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
571 jnz Llml_slow
572 Llml_try: /* no - can't be INDIRECT, DESTROYED or locked */
573 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
574 or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
575
576 PREEMPTION_DISABLE
577 lock
578 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
579 jne Llml_busy_disabled
580
581 mov %gs:CPU_ACTIVE_THREAD, %rax
582 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
583 #if MACH_LDEBUG
584 test %rax, %rax
585 jz 1f
586 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
587 1:
588 #endif /* MACH_LDEBUG */
589
590 testl $(M_WAITERS_MSK), M_STATE(%rdx)
591 jz Llml_finish
592
593 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
594
595 Llml_finish:
596 andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
597 PREEMPTION_ENABLE
598
599 cmp %rdx, %rdi /* is this an extended mutex */
600 jne 2f
601
602 leave
603 #if CONFIG_DTRACE
604 LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point)
605 ret
606 /* inherit lock pointer in %rdx above */
607 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %rdx)
608 #endif
609 ret
610 2:
611 leave
612 #if CONFIG_DTRACE
613 LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point)
614 ret
615 /* inherit lock pointer in %rdx above */
616 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %rdx)
617 #endif
618 ret
619
620
621 Llml_slow:
622 test $M_ILOCKED_MSK, %ecx /* is the interlock held */
623 jz Llml_contended /* no, must have been the mutex */
624
625 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
626 je lck_mtx_destroyed
627 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
628 jne Llml_loop /* no... must be interlocked */
629
630 LMTX_ENTER_EXTENDED
631
632 mov M_STATE(%rdx), %ecx
633 test $(M_SPIN_MSK), %ecx
634 jz Llml_loop1
635
636 LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
637 Llml_loop:
638 PAUSE
639 mov M_STATE(%rdx), %ecx
640 Llml_loop1:
641 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
642 jz Llml_try
643 test $(M_MLOCKED_MSK), %ecx
644 jnz Llml_contended /* mutex owned by someone else, go contend for it */
645 jmp Llml_loop
646
647 Llml_busy_disabled:
648 PREEMPTION_ENABLE
649 jmp Llml_loop
650
651
652 Llml_contended:
653 cmp %rdx, %rdi /* is this an extended mutex */
654 je 0f
655 LMTX_UPDATE_MISS
656 0:
657 LMTX_CALLEXT1(lck_mtx_lock_spinwait_x86)
658
659 test %rax, %rax
660 jz Llml_acquired /* acquired mutex, interlock held and preemption disabled */
661
662 cmp $1, %rax /* check for direct wait status */
663 je 2f
664 cmp %rdx, %rdi /* is this an extended mutex */
665 je 2f
666 LMTX_UPDATE_DIRECT_WAIT
667 2:
668 mov M_STATE(%rdx), %ecx
669 test $(M_ILOCKED_MSK), %ecx
670 jnz 6f
671
672 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
673 or $(M_ILOCKED_MSK), %ecx /* try to take the interlock */
674
675 PREEMPTION_DISABLE
676 lock
677 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
678 jne 5f
679
680 test $(M_MLOCKED_MSK), %ecx /* we've got the interlock and */
681 jnz 3f
682 or $(M_MLOCKED_MSK), %ecx /* the mutex is free... grab it directly */
683 mov %ecx, M_STATE(%rdx)
684
685 mov %gs:CPU_ACTIVE_THREAD, %rax
686 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
687 #if MACH_LDEBUG
688 test %rax, %rax
689 jz 1f
690 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
691 1:
692 #endif /* MACH_LDEBUG */
693
694 Llml_acquired:
695 testl $(M_WAITERS_MSK), M_STATE(%rdx)
696 jnz 1f
697 mov M_OWNER(%rdx), %rax
698 mov TH_WAS_PROMOTED_ON_WAKEUP(%rax), %eax
699 test %eax, %eax
700 jz Llml_finish
701 1:
702 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
703 jmp Llml_finish
704
705 3: /* interlock held, mutex busy */
706 cmp %rdx, %rdi /* is this an extended mutex */
707 je 4f
708 LMTX_UPDATE_WAIT
709 4:
710 LMTX_CALLEXT1(lck_mtx_lock_wait_x86)
711 jmp Llml_contended
712 5:
713 PREEMPTION_ENABLE
714 6:
715 PAUSE
716 jmp 2b
717
718
719 NONLEAF_ENTRY(lck_mtx_try_lock_spin_always)
720 mov %rdi, %rdx /* fetch lock pointer */
721 jmp Llmts_avoid_check
722
723 NONLEAF_ENTRY(lck_mtx_try_lock_spin)
724 mov %rdi, %rdx /* fetch lock pointer */
725
726 Llmts_avoid_check:
727 mov M_STATE(%rdx), %ecx
728 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
729 jnz Llmts_slow
730 Llmts_try: /* no - can't be INDIRECT, DESTROYED or locked */
731 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
732 or $(M_ILOCKED_MSK | M_SPIN_MSK), %rcx
733
734 PREEMPTION_DISABLE
735 lock
736 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
737 jne Llmts_busy_disabled
738
739 mov %gs:CPU_ACTIVE_THREAD, %rax
740 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
741 #if MACH_LDEBUG
742 test %rax, %rax
743 jz 1f
744 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
745 1:
746 #endif /* MACH_LDEBUG */
747
748 leave
749
750 #if CONFIG_DTRACE
751 mov $1, %rax /* return success */
752 LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point)
753 ret
754 /* inherit lock pointer in %rdx above */
755 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %rdx)
756 #endif
757 mov $1, %rax /* return success */
758 ret
759
760 Llmts_slow:
761 test $(M_ILOCKED_MSK), %ecx /* is the interlock held */
762 jz Llmts_fail /* no, must be held as a mutex */
763
764 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
765 je lck_mtx_destroyed
766 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
767 jne Llmts_loop1
768
769 LMTX_ENTER_EXTENDED
770 Llmts_loop:
771 PAUSE
772 mov M_STATE(%rdx), %ecx
773 Llmts_loop1:
774 test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx
775 jnz Llmts_fail
776 test $(M_ILOCKED_MSK), %ecx
777 jz Llmts_try
778 jmp Llmts_loop
779
780 Llmts_busy_disabled:
781 PREEMPTION_ENABLE
782 jmp Llmts_loop
783
784
785
786 NONLEAF_ENTRY(lck_mtx_try_lock)
787 mov %rdi, %rdx /* fetch lock pointer */
788
789 mov M_STATE(%rdx), %ecx
790 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
791 jnz Llmt_slow
792 Llmt_try: /* no - can't be INDIRECT, DESTROYED or locked */
793 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
794 or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
795
796 PREEMPTION_DISABLE
797 lock
798 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
799 jne Llmt_busy_disabled
800
801 mov %gs:CPU_ACTIVE_THREAD, %rax
802 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
803 #if MACH_LDEBUG
804 test %rax, %rax
805 jz 1f
806 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
807 1:
808 #endif /* MACH_LDEBUG */
809
810 test $(M_WAITERS_MSK), %ecx
811 jz 0f
812
813 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
814 0:
815 andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
816 PREEMPTION_ENABLE
817
818 leave
819 #if CONFIG_DTRACE
820 mov $1, %rax /* return success */
821 /* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */
822 LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point)
823 ret
824 /* inherit lock pointer in %rdx from above */
825 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %rdx)
826 #endif
827 mov $1, %rax /* return success */
828 ret
829
830 Llmt_slow:
831 test $(M_ILOCKED_MSK), %ecx /* is the interlock held */
832 jz Llmt_fail /* no, must be held as a mutex */
833
834 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
835 je lck_mtx_destroyed
836 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
837 jne Llmt_loop
838
839 LMTX_ENTER_EXTENDED
840 Llmt_loop:
841 PAUSE
842 mov M_STATE(%rdx), %ecx
843 Llmt_loop1:
844 test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx
845 jnz Llmt_fail
846 test $(M_ILOCKED_MSK), %ecx
847 jz Llmt_try
848 jmp Llmt_loop
849
850 Llmt_busy_disabled:
851 PREEMPTION_ENABLE
852 jmp Llmt_loop
853
854
855 Llmt_fail:
856 Llmts_fail:
857 cmp %rdx, %rdi /* is this an extended mutex */
858 je 0f
859 LMTX_UPDATE_MISS
860 0:
861 xor %rax, %rax
862 NONLEAF_RET
863
864
865
866 NONLEAF_ENTRY(lck_mtx_convert_spin)
867 mov %rdi, %rdx /* fetch lock pointer */
868
869 mov M_STATE(%rdx), %ecx
870 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
871 jne 0f
872 mov M_PTR(%rdx), %rdx /* If so, take indirection */
873 mov M_STATE(%rdx), %ecx
874 0:
875 test $(M_MLOCKED_MSK), %ecx /* already owned as a mutex, just return */
876 jnz 2f
877 test $(M_WAITERS_MSK), %ecx /* are there any waiters? */
878 jz 1f
879
880 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
881 mov M_STATE(%rdx), %ecx
882 1:
883 and $(~(M_ILOCKED_MSK | M_SPIN_MSK)), %ecx /* convert from spin version to mutex */
884 or $(M_MLOCKED_MSK), %ecx
885 mov %ecx, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */
886
887 PREEMPTION_ENABLE
888 2:
889 NONLEAF_RET
890
891
892
893 NONLEAF_ENTRY(lck_mtx_unlock)
894 mov %rdi, %rdx /* fetch lock pointer */
895 Llmu_entry:
896 mov M_STATE(%rdx), %ecx
897 Llmu_prim:
898 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
899 je Llmu_ext
900
901 Llmu_chktype:
902 test $(M_MLOCKED_MSK), %ecx /* check for full mutex */
903 jz Llmu_unlock
904 Llmu_mutex:
905 test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */
906 jnz Llmu_busy
907
908 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
909 and $(~M_MLOCKED_MSK), %ecx /* drop mutex */
910 or $(M_ILOCKED_MSK), %ecx /* pick up interlock */
911
912 PREEMPTION_DISABLE
913 lock
914 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
915 jne Llmu_busy_disabled /* branch on failure to spin loop */
916
917 Llmu_unlock:
918 xor %rax, %rax
919 mov %rax, M_OWNER(%rdx)
920 mov %rcx, %rax /* keep original state in %ecx for later evaluation */
921 and $(~(M_ILOCKED_MSK | M_SPIN_MSK | M_PROMOTED_MSK)), %rax
922
923 test $(M_WAITERS_MSK), %eax
924 jz 2f
925 dec %eax /* decrement waiter count */
926 2:
927 mov %eax, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */
928
929 #if MACH_LDEBUG
930 /* perform lock statistics after drop to prevent delay */
931 mov %gs:CPU_ACTIVE_THREAD, %rax
932 test %rax, %rax
933 jz 1f
934 decl TH_MUTEX_COUNT(%rax) /* lock statistic */
935 1:
936 #endif /* MACH_LDEBUG */
937
938 test $(M_PROMOTED_MSK | M_WAITERS_MSK), %ecx
939 jz 3f
940
941 LMTX_CALLEXT2(lck_mtx_unlock_wakeup_x86, %rcx)
942 3:
943 PREEMPTION_ENABLE
944
945 cmp %rdx, %rdi
946 jne 4f
947
948 leave
949 #if CONFIG_DTRACE
950 /* Dtrace: LS_LCK_MTX_UNLOCK_RELEASE */
951 LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point)
952 ret
953 /* inherit lock pointer in %rdx from above */
954 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %rdx)
955 #endif
956 ret
957 4:
958 leave
959 #if CONFIG_DTRACE
960 /* Dtrace: LS_LCK_MTX_EXT_UNLOCK_RELEASE */
961 LOCKSTAT_LABEL(_lck_mtx_ext_unlock_lockstat_patch_point)
962 ret
963 /* inherit lock pointer in %rdx from above */
964 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, %rdx)
965 #endif
966 ret
967
968
969 Llmu_busy_disabled:
970 PREEMPTION_ENABLE
971 Llmu_busy:
972 PAUSE
973 mov M_STATE(%rdx), %ecx
974 jmp Llmu_mutex
975
976 Llmu_ext:
977 mov M_PTR(%rdx), %rdx
978 mov M_OWNER(%rdx), %rax
979 mov %gs:CPU_ACTIVE_THREAD, %rcx
980 CHECK_UNLOCK(%rcx, %rax)
981 mov M_STATE(%rdx), %ecx
982 jmp Llmu_chktype
983
984
985
986 LEAF_ENTRY(lck_mtx_ilk_try_lock)
987 mov %rdi, %rdx /* fetch lock pointer - no indirection here */
988
989 mov M_STATE(%rdx), %ecx
990
991 test $(M_ILOCKED_MSK), %ecx /* can't have the interlock yet */
992 jnz 3f
993
994 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
995 or $(M_ILOCKED_MSK), %ecx
996
997 PREEMPTION_DISABLE
998 lock
999 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
1000 jne 2f /* return failure after re-enabling preemption */
1001
1002 mov $1, %rax /* return success with preemption disabled */
1003 LEAF_RET
1004 2:
1005 PREEMPTION_ENABLE /* need to re-enable preemption */
1006 3:
1007 xor %rax, %rax /* return failure */
1008 LEAF_RET
1009
1010
1011 LEAF_ENTRY(lck_mtx_ilk_unlock)
1012 mov %rdi, %rdx /* fetch lock pointer - no indirection here */
1013
1014 andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
1015
1016 PREEMPTION_ENABLE /* need to re-enable preemption */
1017
1018 LEAF_RET
1019
1020
1021 LEAF_ENTRY(lck_mtx_lock_grab_mutex)
1022 mov %rdi, %rdx /* fetch lock pointer - no indirection here */
1023
1024 mov M_STATE(%rdx), %ecx
1025
1026 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* can't have the mutex yet */
1027 jnz 3f
1028
1029 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
1030 or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
1031
1032 PREEMPTION_DISABLE
1033 lock
1034 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
1035 jne 2f /* branch on failure to spin loop */
1036
1037 mov %gs:CPU_ACTIVE_THREAD, %rax
1038 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
1039 #if MACH_LDEBUG
1040 test %rax, %rax
1041 jz 1f
1042 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
1043 1:
1044 #endif /* MACH_LDEBUG */
1045
1046 mov $1, %rax /* return success */
1047 LEAF_RET
1048 2:
1049 PREEMPTION_ENABLE
1050 3:
1051 xor %rax, %rax /* return failure */
1052 LEAF_RET
1053
1054
1055
1056 LEAF_ENTRY(lck_mtx_lock_mark_destroyed)
1057 mov %rdi, %rdx
1058 1:
1059 mov M_STATE(%rdx), %ecx
1060 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
1061 jne 2f
1062
1063 movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */
1064 jmp 3f
1065 2:
1066 test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */
1067 jnz 5f
1068
1069 PREEMPTION_DISABLE
1070 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
1071 or $(M_ILOCKED_MSK), %ecx
1072 lock
1073 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
1074 jne 4f /* branch on failure to spin loop */
1075 movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */
1076 PREEMPTION_ENABLE
1077 3:
1078 LEAF_RET /* return with M_ILOCKED set */
1079 4:
1080 PREEMPTION_ENABLE
1081 5:
1082 PAUSE
1083 jmp 1b
1084
1085 LEAF_ENTRY(preemption_underflow_panic)
1086 FRAME
1087 incl %gs:CPU_PREEMPTION_LEVEL
1088 ALIGN_STACK()
1089 LOAD_STRING_ARG0(16f)
1090 CALL_PANIC()
1091 hlt
1092 .data
1093 16: String "Preemption level underflow, possible cause unlocking an unlocked mutex or spinlock"
1094 .text
1095
1096