]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_lock.s
b5153c73dbfd25907f0c42464f59ef4f513c727e
[apple/xnu.git] / osfmk / i386 / i386_lock.s
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1989 Carnegie-Mellon University
34 * All rights reserved. The CMU software License Agreement specifies
35 * the terms and conditions for use and redistribution.
36 */
37
38 #include <mach_rt.h>
39 #include <platforms.h>
40 #include <mach_ldebug.h>
41 #include <i386/asm.h>
42 #include <i386/eflags.h>
43 #include <i386/trap.h>
44 #include <config_dtrace.h>
45
46 #include "assym.s"
47
48 #define PAUSE rep; nop
49
50 /*
51 * When performance isn't the only concern, it's
52 * nice to build stack frames...
53 */
54 #define BUILD_STACK_FRAMES (GPROF || \
55 ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB))
56
57 #if BUILD_STACK_FRAMES
58
59 /* Stack-frame-relative: */
60 #define L_PC B_PC
61 #define L_ARG0 B_ARG0
62 #define L_ARG1 B_ARG1
63
64 #define LEAF_ENTRY(name) \
65 Entry(name); \
66 FRAME; \
67 MCOUNT
68
69 #define LEAF_ENTRY2(n1,n2) \
70 Entry(n1); \
71 Entry(n2); \
72 FRAME; \
73 MCOUNT
74
75 #define LEAF_RET \
76 EMARF; \
77 ret
78
79 #else /* BUILD_STACK_FRAMES */
80
81 /* Stack-pointer-relative: */
82 #define L_PC S_PC
83 #define L_ARG0 S_ARG0
84 #define L_ARG1 S_ARG1
85
86 #define LEAF_ENTRY(name) \
87 Entry(name)
88
89 #define LEAF_ENTRY2(n1,n2) \
90 Entry(n1); \
91 Entry(n2)
92
93 #define LEAF_RET \
94 ret
95
96 #endif /* BUILD_STACK_FRAMES */
97
98
99 /* Non-leaf routines always have a stack frame: */
100
101 #define NONLEAF_ENTRY(name) \
102 Entry(name); \
103 FRAME; \
104 MCOUNT
105
106 #define NONLEAF_ENTRY2(n1,n2) \
107 Entry(n1); \
108 Entry(n2); \
109 FRAME; \
110 MCOUNT
111
112 #define NONLEAF_RET \
113 EMARF; \
114 ret
115
116
117 #define M_ILK (%edx)
118 #define M_LOCKED MUTEX_LOCKED(%edx)
119 #define M_WAITERS MUTEX_WAITERS(%edx)
120 #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx)
121 #define M_ITAG MUTEX_ITAG(%edx)
122 #define M_PTR MUTEX_PTR(%edx)
123 #if MACH_LDEBUG
124 #define M_TYPE MUTEX_TYPE(%edx)
125 #define M_PC MUTEX_PC(%edx)
126 #define M_THREAD MUTEX_THREAD(%edx)
127 #endif /* MACH_LDEBUG */
128
129 #include <i386/mp.h>
130 #define CX(addr,reg) addr(,reg,4)
131
132 #if MACH_LDEBUG
133 /*
134 * Routines for general lock debugging.
135 */
136
137 /*
138 * Checks for expected lock types and calls "panic" on
139 * mismatch. Detects calls to Mutex functions with
140 * type simplelock and vice versa.
141 */
142 #define CHECK_MUTEX_TYPE() \
143 cmpl $ MUTEX_TAG,M_TYPE ; \
144 je 1f ; \
145 pushl $2f ; \
146 call EXT(panic) ; \
147 hlt ; \
148 .data ; \
149 2: String "not a mutex!" ; \
150 .text ; \
151 1:
152
153 /*
154 * If one or more simplelocks are currently held by a thread,
155 * an attempt to acquire a mutex will cause this check to fail
156 * (since a mutex lock may context switch, holding a simplelock
157 * is not a good thing).
158 */
159 #if MACH_RT
160 #define CHECK_PREEMPTION_LEVEL() \
161 cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \
162 je 1f ; \
163 pushl $2f ; \
164 call EXT(panic) ; \
165 hlt ; \
166 .data ; \
167 2: String "preemption_level != 0!" ; \
168 .text ; \
169 1:
170 #else /* MACH_RT */
171 #define CHECK_PREEMPTION_LEVEL()
172 #endif /* MACH_RT */
173
174 #define CHECK_NO_SIMPLELOCKS() \
175 cmpl $0,%gs:CPU_SIMPLE_LOCK_COUNT ; \
176 je 1f ; \
177 pushl $2f ; \
178 call EXT(panic) ; \
179 hlt ; \
180 .data ; \
181 2: String "simple_locks_held!" ; \
182 .text ; \
183 1:
184
185 /*
186 * Verifies return to the correct thread in "unlock" situations.
187 */
188 #define CHECK_THREAD(thd) \
189 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
190 testl %ecx,%ecx ; \
191 je 1f ; \
192 cmpl %ecx,thd ; \
193 je 1f ; \
194 pushl $2f ; \
195 call EXT(panic) ; \
196 hlt ; \
197 .data ; \
198 2: String "wrong thread!" ; \
199 .text ; \
200 1:
201
202 #define CHECK_MYLOCK(thd) \
203 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
204 testl %ecx,%ecx ; \
205 je 1f ; \
206 cmpl %ecx,thd ; \
207 jne 1f ; \
208 pushl $2f ; \
209 call EXT(panic) ; \
210 hlt ; \
211 .data ; \
212 2: String "mylock attempt!" ; \
213 .text ; \
214 1:
215
216 #define METER_SIMPLE_LOCK_LOCK(reg) \
217 pushl reg ; \
218 call EXT(meter_simple_lock) ; \
219 popl reg
220
221 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
222 pushl reg ; \
223 call EXT(meter_simple_unlock) ; \
224 popl reg
225
226 #else /* MACH_LDEBUG */
227 #define CHECK_MUTEX_TYPE()
228 #define CHECK_SIMPLE_LOCK_TYPE
229 #define CHECK_THREAD(thd)
230 #define CHECK_PREEMPTION_LEVEL()
231 #define CHECK_NO_SIMPLELOCKS()
232 #define CHECK_MYLOCK(thd)
233 #define METER_SIMPLE_LOCK_LOCK(reg)
234 #define METER_SIMPLE_LOCK_UNLOCK(reg)
235 #endif /* MACH_LDEBUG */
236
237
238 #define PREEMPTION_DISABLE \
239 incl %gs:CPU_PREEMPTION_LEVEL
240
241
242 #define PREEMPTION_ENABLE \
243 decl %gs:CPU_PREEMPTION_LEVEL ; \
244 jne 9f ; \
245 pushf ; \
246 testl $ EFL_IF,(%esp) ; \
247 je 8f ; \
248 cli ; \
249 movl %gs:CPU_PENDING_AST,%eax ; \
250 testl $ AST_URGENT,%eax ; \
251 je 8f ; \
252 movl %gs:CPU_INTERRUPT_LEVEL,%eax ; \
253 testl %eax,%eax ; \
254 jne 8f ; \
255 popf ; \
256 int $(T_PREEMPT) ; \
257 jmp 9f ; \
258 8: \
259 popf ; \
260 9:
261
262
263
264 #if CONFIG_DTRACE
265 #define LOCKSTAT_LABEL(lab) \
266 .data ;\
267 .globl lab ;\
268 lab: ;\
269 .long 9f ;\
270 .text ;\
271 9:
272
273 .globl _lockstat_probe
274 .globl _lockstat_probemap
275
276 #define LOCKSTAT_RECORD(id, lck) \
277 push %ebp ; \
278 mov %esp,%ebp ; \
279 sub $0x38,%esp /* size of dtrace_probe args */ ; \
280 movl _lockstat_probemap + (id * 4),%eax ; \
281 test %eax,%eax ; \
282 je 9f ; \
283 movl $0,36(%esp) ; \
284 movl $0,40(%esp) ; \
285 movl $0,28(%esp) ; \
286 movl $0,32(%esp) ; \
287 movl $0,20(%esp) ; \
288 movl $0,24(%esp) ; \
289 movl $0,12(%esp) ; \
290 movl $0,16(%esp) ; \
291 movl lck,4(%esp) /* copy lock pointer to arg 1 */ ; \
292 movl $0,8(%esp) ; \
293 movl %eax,(%esp) ; \
294 call *_lockstat_probe ; \
295 9: leave
296 /* ret - left to subsequent code, e.g. return values */
297
298 #define LOCKSTAT_RECORD2(id, lck, arg) \
299 push %ebp ; \
300 mov %esp,%ebp ; \
301 sub $0x38,%esp /* size of dtrace_probe args */ ; \
302 movl _lockstat_probemap + (id * 4),%eax ; \
303 test %eax,%eax ; \
304 je 9f ; \
305 movl $0,36(%esp) ; \
306 movl $0,40(%esp) ; \
307 movl $0,28(%esp) ; \
308 movl $0,32(%esp) ; \
309 movl $0,20(%esp) ; \
310 movl $0,24(%esp) ; \
311 movl $0,12(%esp) ; \
312 movl $0,16(%esp) ; \
313 movl lck,4(%esp) /* copy lock pointer to arg 1 */ ; \
314 movl arg,8(%esp) ; \
315 movl %eax,(%esp) ; \
316 call *_lockstat_probe ; \
317 9: leave
318 /* ret - left to subsequent code, e.g. return values */
319 #endif
320
321
322 /*
323 * void hw_lock_init(hw_lock_t)
324 *
325 * Initialize a hardware lock.
326 */
327 LEAF_ENTRY(hw_lock_init)
328 movl L_ARG0,%edx /* fetch lock pointer */
329 movl $0,(%edx) /* clear the lock */
330 LEAF_RET
331
332
333 /*
334 * void hw_lock_byte_init(uint8_t *)
335 *
336 * Initialize a hardware byte lock.
337 */
338 LEAF_ENTRY(hw_lock_byte_init)
339 movl L_ARG0,%edx /* fetch lock pointer */
340 movb $0,(%edx) /* clear the lock */
341 LEAF_RET
342
343 /*
344 * void hw_lock_lock(hw_lock_t)
345 *
346 * Acquire lock, spinning until it becomes available.
347 * MACH_RT: also return with preemption disabled.
348 */
349 LEAF_ENTRY(hw_lock_lock)
350 movl L_ARG0,%edx /* fetch lock pointer */
351
352 movl %gs:CPU_ACTIVE_THREAD,%ecx
353 PREEMPTION_DISABLE
354 1:
355 movl (%edx), %eax
356 testl %eax,%eax /* lock locked? */
357 jne 3f /* branch if so */
358 lock; cmpxchgl %ecx,(%edx) /* try to acquire the HW lock */
359 jne 3f
360 movl $1,%eax /* In case this was a timeout call */
361 LEAF_RET /* if yes, then nothing left to do */
362 3:
363 PAUSE /* pause for hyper-threading */
364 jmp 1b /* try again */
365
366 /*
367 * void hw_lock_byte_lock(uint8_t *lock_byte)
368 *
369 * Acquire byte sized lock operand, spinning until it becomes available.
370 * MACH_RT: also return with preemption disabled.
371 */
372
373 LEAF_ENTRY(hw_lock_byte_lock)
374 movl L_ARG0,%edx /* Load lock pointer */
375 PREEMPTION_DISABLE
376 movl $1, %ecx /* Set lock value */
377 1:
378 movb (%edx), %al /* Load byte at address */
379 testb %al,%al /* lock locked? */
380 jne 3f /* branch if so */
381 lock; cmpxchgb %cl,(%edx) /* attempt atomic compare exchange */
382 jne 3f
383 LEAF_RET /* if yes, then nothing left to do */
384 3:
385 PAUSE /* pause for hyper-threading */
386 jmp 1b /* try again */
387
388 /*
389 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
390 *
391 * Acquire lock, spinning until it becomes available or timeout.
392 * MACH_RT: also return with preemption disabled.
393 */
394 LEAF_ENTRY(hw_lock_to)
395 1:
396 movl L_ARG0,%edx /* fetch lock pointer */
397 movl %gs:CPU_ACTIVE_THREAD,%ecx
398 /*
399 * Attempt to grab the lock immediately
400 * - fastpath without timeout nonsense.
401 */
402 PREEMPTION_DISABLE
403 movl (%edx), %eax
404 testl %eax,%eax /* lock locked? */
405 jne 2f /* branch if so */
406 lock; cmpxchgl %ecx,(%edx) /* try to acquire the HW lock */
407 jne 2f /* branch on failure */
408 movl $1,%eax
409 LEAF_RET
410
411 2:
412 #define INNER_LOOP_COUNT 1000
413 /*
414 * Failed to get the lock so set the timeout
415 * and then spin re-checking the lock but pausing
416 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
417 */
418 movl L_ARG1,%ecx /* fetch timeout */
419 push %edi
420 push %ebx
421 mov %edx,%edi
422
423 rdtsc /* read cyclecount into %edx:%eax */
424 lfence
425 addl %ecx,%eax /* fetch and timeout */
426 adcl $0,%edx /* add carry */
427 mov %edx,%ecx
428 mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
429 4:
430 /*
431 * The inner-loop spin to look for the lock being freed.
432 */
433 mov $(INNER_LOOP_COUNT),%edx
434 5:
435 PAUSE /* pause for hyper-threading */
436 movl (%edi),%eax /* spin checking lock value in cache */
437 testl %eax,%eax
438 je 6f /* zero => unlocked, try to grab it */
439 decl %edx /* decrement inner loop count */
440 jnz 5b /* time to check for timeout? */
441
442 /*
443 * Here after spinning INNER_LOOP_COUNT times, check for timeout
444 */
445 rdtsc /* cyclecount into %edx:%eax */
446 lfence
447 cmpl %ecx,%edx /* compare high-order 32-bits */
448 jb 4b /* continue spinning if less, or */
449 cmpl %ebx,%eax /* compare low-order 32-bits */
450 jb 4b /* continue if less, else bail */
451 xor %eax,%eax /* with 0 return value */
452 pop %ebx
453 pop %edi
454 LEAF_RET
455
456 6:
457 /*
458 * Here to try to grab the lock that now appears to be free
459 * after contention.
460 */
461 movl %gs:CPU_ACTIVE_THREAD,%edx
462 lock; cmpxchgl %edx,(%edi) /* try to acquire the HW lock */
463 jne 4b /* no - spin again */
464 movl $1,%eax /* yes */
465 pop %ebx
466 pop %edi
467 LEAF_RET
468
469 /*
470 * void hw_lock_unlock(hw_lock_t)
471 *
472 * Unconditionally release lock.
473 * MACH_RT: release preemption level.
474 */
475 LEAF_ENTRY(hw_lock_unlock)
476 movl L_ARG0,%edx /* fetch lock pointer */
477 movl $0,(%edx) /* clear the lock */
478 PREEMPTION_ENABLE
479 LEAF_RET
480 /*
481 * void hw_lock_byte_unlock(uint8_t *lock_byte)
482 *
483 * Unconditionally release byte sized lock operand.
484 * MACH_RT: release preemption level.
485 */
486
487 LEAF_ENTRY(hw_lock_byte_unlock)
488 movl L_ARG0,%edx /* Load lock pointer */
489 movb $0,(%edx) /* Clear the lock byte */
490 PREEMPTION_ENABLE
491 LEAF_RET
492
493 /*
494 * void i386_lock_unlock_with_flush(hw_lock_t)
495 *
496 * Unconditionally release lock, followed by a cacheline flush of
497 * the line corresponding to the lock dword. This routine is currently
498 * used with certain locks which are susceptible to lock starvation,
499 * minimizing cache affinity for lock acquisitions. A queued spinlock
500 * or other mechanism that ensures fairness would obviate the need
501 * for this routine, but ideally few or no spinlocks should exhibit
502 * enough contention to require such measures.
503 * MACH_RT: release preemption level.
504 */
505 LEAF_ENTRY(i386_lock_unlock_with_flush)
506 movl L_ARG0,%edx /* Fetch lock pointer */
507 movl $0,(%edx) /* Clear the lock */
508 mfence /* Serialize prior stores */
509 clflush (%edx) /* Write back and invalidate line */
510 PREEMPTION_ENABLE
511 LEAF_RET
512
513 /*
514 * unsigned int hw_lock_try(hw_lock_t)
515 * MACH_RT: returns with preemption disabled on success.
516 */
517 LEAF_ENTRY(hw_lock_try)
518 movl L_ARG0,%edx /* fetch lock pointer */
519
520 movl %gs:CPU_ACTIVE_THREAD,%ecx
521 PREEMPTION_DISABLE
522 movl (%edx),%eax
523 testl %eax,%eax
524 jne 1f
525 lock; cmpxchgl %ecx,(%edx) /* try to acquire the HW lock */
526 jne 1f
527
528 movl $1,%eax /* success */
529 LEAF_RET
530
531 1:
532 PREEMPTION_ENABLE /* failure: release preemption... */
533 xorl %eax,%eax /* ...and return failure */
534 LEAF_RET
535
536 /*
537 * unsigned int hw_lock_held(hw_lock_t)
538 * MACH_RT: doesn't change preemption state.
539 * N.B. Racy, of course.
540 */
541 LEAF_ENTRY(hw_lock_held)
542 movl L_ARG0,%edx /* fetch lock pointer */
543
544 movl (%edx),%eax /* check lock value */
545 testl %eax,%eax
546 movl $1,%ecx
547 cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */
548 LEAF_RET
549
550 LEAF_ENTRY(mutex_init)
551 movl L_ARG0,%edx /* fetch lock pointer */
552 xorl %eax,%eax
553 movl %eax,M_ILK /* clear interlock */
554 movl %eax,M_LOCKED /* clear locked flag */
555 movw %ax,M_WAITERS /* init waiter count */
556 movw %ax,M_PROMOTED_PRI
557
558 #if MACH_LDEBUG
559 movl $ MUTEX_TAG,M_TYPE /* set lock type */
560 movl %eax,M_PC /* init caller pc */
561 movl %eax,M_THREAD /* and owning thread */
562 #endif
563
564 LEAF_RET
565
566 /*
567 * Reader-writer lock fastpaths. These currently exist for the
568 * shared lock acquire and release paths (where they reduce overhead
569 * considerably)--more can be added as necessary (DRK).
570 */
571
572 /*
573 * These should reflect the layout of the bitfield embedded within
574 * the lck_rw_t structure (see i386/locks.h).
575 */
576 #define LCK_RW_INTERLOCK 0x1
577 #define LCK_RW_WANT_UPGRADE 0x2
578 #define LCK_RW_WANT_WRITE 0x4
579 #define LCK_R_WAITING 0x8
580 #define LCK_W_WAITING 0x10
581
582 #define RW_LOCK_SHARED_MASK ((LCK_RW_INTERLOCK<<16) | \
583 ((LCK_RW_WANT_UPGRADE|LCK_RW_WANT_WRITE) << 24))
584 /*
585 * void lck_rw_lock_shared(lck_rw_t*)
586 *
587 */
588
589 Entry(lck_rw_lock_shared)
590 movl S_ARG0, %edx
591 1:
592 movl (%edx), %eax /* Load state bitfield and interlock */
593 testl $(RW_LOCK_SHARED_MASK), %eax /* Eligible for fastpath? */
594 jne 3f
595 movl %eax, %ecx
596 incl %ecx /* Increment reader refcount */
597 lock
598 cmpxchgl %ecx, (%edx) /* Attempt atomic exchange */
599 jne 2f
600
601 #if CONFIG_DTRACE
602 /*
603 * Dtrace lockstat event: LS_LCK_RW_LOCK_SHARED_ACQUIRE
604 * Implemented by swapping between return and no-op instructions.
605 * See bsd/dev/dtrace/lockstat.c.
606 */
607 LOCKSTAT_LABEL(_lck_rw_lock_shared_lockstat_patch_point)
608 ret
609 /* Fall thru when patched, counting on lock pointer in %edx */
610 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, %edx)
611 #endif
612 ret
613
614 2:
615 PAUSE
616 jmp 1b
617 3:
618 jmp EXT(lck_rw_lock_shared_gen)
619
620
621 /*
622 * lck_rw_type_t lck_rw_done(lck_rw_t*)
623 *
624 */
625
626 .data
627 rwl_release_error_str:
628 .asciz "Releasing non-exclusive RW lock without a reader refcount!"
629 .text
630
631 #define RW_LOCK_RELEASE_MASK ((LCK_RW_INTERLOCK<<16) | \
632 ((LCK_RW_WANT_UPGRADE|LCK_RW_WANT_WRITE|LCK_R_WAITING|LCK_W_WAITING) << 24))
633 Entry(lck_rw_done)
634 movl S_ARG0, %edx
635 1:
636 movl (%edx), %eax /* Load state bitfield and interlock */
637 testl $(RW_LOCK_RELEASE_MASK), %eax /* Eligible for fastpath? */
638 jne 3f
639 movl %eax, %ecx
640 /* Assert refcount */
641 testl $(0xFFFF), %ecx
642 jne 5f
643 movl $(rwl_release_error_str), S_ARG0
644 jmp EXT(panic)
645 5:
646 decl %ecx /* Decrement reader count */
647 lock
648 cmpxchgl %ecx, (%edx)
649 jne 2f
650 movl $(RW_SHARED), %eax /* Indicate that the lock was shared */
651 #if CONFIG_DTRACE
652 /* Dtrace lockstat probe: LS_RW_DONE_RELEASE as reader */
653 LOCKSTAT_LABEL(_lck_rw_done_lockstat_patch_point)
654 ret
655 /*
656 * Note: Dtrace's convention is 0 ==> reader, which is
657 * a different absolute value than $(RW_SHARED)
658 * %edx contains the lock address already from the above
659 */
660 LOCKSTAT_RECORD2(LS_LCK_RW_DONE_RELEASE, %edx, $0)
661 movl $(RW_SHARED), %eax /* Indicate that the lock was shared */
662 #endif
663 ret
664
665 2:
666 PAUSE
667 jmp 1b
668 3:
669 jmp EXT(lck_rw_done_gen)
670
671
672 NONLEAF_ENTRY2(mutex_lock_spin,_mutex_lock_spin)
673
674 movl B_ARG0,%edx /* fetch lock pointer */
675 pushf /* save interrupt state */
676
677 CHECK_MUTEX_TYPE()
678 CHECK_NO_SIMPLELOCKS()
679 CHECK_PREEMPTION_LEVEL()
680
681 movl M_ILK,%eax /* read interlock */
682 testl %eax,%eax /* unlocked? */
683 jne Lmls_ilk_loop /* no, go spin */
684 Lmls_retry:
685 cli /* disable interrupts */
686 movl %gs:CPU_ACTIVE_THREAD,%ecx
687
688 /* eax == 0 at this point */
689 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
690 jne Lmls_ilk_fail /* branch on failure to spin loop */
691
692 movl M_LOCKED,%ecx /* get lock owner */
693 testl %ecx,%ecx /* is the mutex locked? */
694 jne Lml_fail /* yes, fall back to a normal mutex lock */
695 movl $(MUTEX_LOCKED_AS_SPIN),M_LOCKED /* indicate ownership as a spin lock */
696
697 #if MACH_LDEBUG
698 movl %gs:CPU_ACTIVE_THREAD,%ecx
699 movl %ecx,M_THREAD
700 movl B_PC,%ecx
701 movl %ecx,M_PC
702 #endif
703 PREEMPTION_DISABLE
704 popf /* restore interrupt state */
705 leave /* return with the interlock held */
706 #if CONFIG_DTRACE
707 LOCKSTAT_LABEL(_mutex_lock_spin_lockstat_patch_point)
708 ret
709 /* %edx contains the lock address from above */
710 LOCKSTAT_RECORD(LS_MUTEX_LOCK_SPIN_ACQUIRE, %edx)
711 #endif
712 ret
713
714 Lmls_ilk_fail:
715 popf /* restore interrupt state */
716 pushf /* resave interrupt state on stack */
717
718 Lmls_ilk_loop:
719 PAUSE
720 movl M_ILK,%eax /* read interlock */
721 testl %eax,%eax /* unlocked? */
722 je Lmls_retry /* yes, go for it */
723 jmp Lmls_ilk_loop /* no, keep spinning */
724
725
726 NONLEAF_ENTRY2(mutex_lock,_mutex_lock)
727
728 movl B_ARG0,%edx /* fetch lock pointer */
729 pushf /* save interrupt state */
730
731 CHECK_MUTEX_TYPE()
732 CHECK_NO_SIMPLELOCKS()
733 CHECK_PREEMPTION_LEVEL()
734
735 movl M_ILK,%eax /* is interlock held */
736 testl %eax,%eax
737 jne Lml_ilk_loop /* yes, go do the spin loop */
738 Lml_retry:
739 cli /* disable interrupts */
740 movl %gs:CPU_ACTIVE_THREAD,%ecx
741
742 /* eax == 0 at this point */
743 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
744 jne Lml_ilk_fail /* branch on failure to spin loop */
745
746 movl M_LOCKED,%ecx /* get lock owner */
747 testl %ecx,%ecx /* is the mutex locked? */
748 jne Lml_fail /* yes, we lose */
749 Lml_acquire:
750 movl %gs:CPU_ACTIVE_THREAD,%ecx
751 movl %ecx,M_LOCKED
752
753 #if MACH_LDEBUG
754 movl %ecx,M_THREAD
755 movl B_PC,%ecx
756 movl %ecx,M_PC
757 #endif
758 cmpw $0,M_WAITERS /* are there any waiters? */
759 jne Lml_waiters /* yes, more work to do */
760 Lml_return:
761 xorl %eax,%eax
762 movl %eax,M_ILK
763
764 popf /* restore interrupt state */
765 leave
766 #if CONFIG_DTRACE
767 LOCKSTAT_LABEL(_mutex_lock_lockstat_patch_point)
768 ret
769 /* %edx still contains the lock pointer */
770 LOCKSTAT_RECORD(LS_MUTEX_LOCK_ACQUIRE, %edx)
771 #endif
772 ret
773
774 /*
775 * We got the mutex, but there are waiters. Update information
776 * on waiters.
777 */
778 Lml_waiters:
779 pushl %edx /* save mutex address */
780 pushl %edx
781 call EXT(lck_mtx_lock_acquire)
782 addl $4,%esp
783 popl %edx /* restore mutex address */
784 jmp Lml_return
785
786 Lml_restart:
787 Lml_ilk_fail:
788 popf /* restore interrupt state */
789 pushf /* resave interrupt state on stack */
790
791 Lml_ilk_loop:
792 PAUSE
793 movl M_ILK,%eax /* read interlock */
794 testl %eax,%eax /* unlocked? */
795 je Lml_retry /* yes, go try to grab it */
796 jmp Lml_ilk_loop /* no - keep spinning */
797
798 Lml_fail:
799 /*
800 * Check if the owner is on another processor and therefore
801 * we should try to spin before blocking.
802 */
803 testl $(OnProc),ACT_SPF(%ecx)
804 jz Lml_block
805
806 /*
807 * Here if owner is on another processor:
808 * - release the interlock
809 * - spin on the holder until release or timeout
810 * - in either case re-acquire the interlock
811 * - if released, acquire it
812 * - otherwise drop thru to block.
813 */
814 xorl %eax,%eax
815 movl %eax,M_ILK /* zero interlock */
816 popf
817 pushf /* restore interrupt state */
818
819 push %edx /* lock address */
820 call EXT(lck_mtx_lock_spinwait) /* call out to do spinning */
821 addl $4,%esp
822 movl B_ARG0,%edx /* refetch mutex address */
823
824 /* Re-acquire interlock - interrupts currently enabled */
825 movl M_ILK,%eax /* is interlock held */
826 testl %eax,%eax
827 jne Lml_ilk_reloop /* yes, go do the spin loop */
828 Lml_reget_retry:
829 cli /* disable interrupts */
830 movl %gs:CPU_ACTIVE_THREAD,%ecx
831
832 /* eax == 0 at this point */
833 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
834 jne Lml_ilk_refail /* branch on failure to spin loop */
835
836 movl M_LOCKED,%ecx /* get lock owner */
837 testl %ecx,%ecx /* is the mutex free? */
838 je Lml_acquire /* yes, acquire */
839
840 Lml_block:
841 CHECK_MYLOCK(M_THREAD)
842 pushl M_LOCKED
843 pushl %edx /* push mutex address */
844 call EXT(lck_mtx_lock_wait) /* wait for the lock */
845 addl $8,%esp /* returns with interlock dropped */
846 movl B_ARG0,%edx /* refetch mutex address */
847 jmp Lml_restart /* and start over */
848
849 Lml_ilk_refail:
850 popf /* restore interrupt state */
851 pushf /* resave interrupt state on stack */
852
853 Lml_ilk_reloop:
854 PAUSE
855 movl M_ILK,%eax /* read interlock */
856 testl %eax,%eax /* unlocked? */
857 je Lml_reget_retry /* yes, go try to grab it */
858 jmp Lml_ilk_reloop /* no - keep spinning */
859
860
861
862 NONLEAF_ENTRY2(mutex_try_spin,_mutex_try_spin)
863
864 movl B_ARG0,%edx /* fetch lock pointer */
865 pushf /* save interrupt state */
866
867 CHECK_MUTEX_TYPE()
868 CHECK_NO_SIMPLELOCKS()
869
870 movl M_ILK,%eax
871 testl %eax,%eax /* is the interlock held? */
872 jne Lmts_ilk_loop /* yes, go to spin loop */
873 Lmts_retry:
874 cli /* disable interrupts */
875 movl %gs:CPU_ACTIVE_THREAD,%ecx
876
877 /* eax == 0 at this point */
878 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
879 jne Lmts_ilk_fail /* branch on failure to spin loop */
880
881 movl M_LOCKED,%ecx /* get lock owner */
882 testl %ecx,%ecx /* is the mutex locked? */
883 jne Lmt_fail /* yes, we lose */
884 Lmts_acquire:
885 movl $(MUTEX_LOCKED_AS_SPIN),M_LOCKED /* indicate ownership as a spin lock */
886
887 #if MACH_LDEBUG
888 movl %gs:CPU_ACTIVE_THREAD,%ecx
889 movl %ecx,M_THREAD
890 movl B_PC,%ecx
891 movl %ecx,M_PC
892 #endif
893 PREEMPTION_DISABLE /* no, return with interlock held */
894 popf /* restore interrupt state */
895 movl $1,%eax
896 leave
897 #if CONFIG_DTRACE
898 LOCKSTAT_LABEL(_mutex_try_spin_lockstat_patch_point)
899 ret
900 /* %edx inherits the lock pointer from above */
901 LOCKSTAT_RECORD(LS_MUTEX_TRY_SPIN_ACQUIRE, %edx)
902 movl $1,%eax
903 #endif
904 ret
905
906 Lmts_ilk_fail:
907 popf /* restore interrupt state */
908 pushf /* resave interrupt state on stack */
909
910 Lmts_ilk_loop:
911 PAUSE
912 /*
913 * need to do this check outside of the interlock in
914 * case this lock is held as a simple lock which means
915 * we won't be able to take the interlock
916 */
917 movl M_LOCKED,%eax
918 testl %eax,%eax /* is the mutex locked? */
919 jne Lmt_fail_no_ilk /* yes, go return failure */
920
921 movl M_ILK,%eax /* read interlock */
922 testl %eax,%eax /* unlocked? */
923 je Lmts_retry /* yes, go try to grab it */
924 jmp Lmts_ilk_loop /* keep spinning */
925
926
927
928 NONLEAF_ENTRY2(mutex_try,_mutex_try)
929
930 movl B_ARG0,%edx /* fetch lock pointer */
931 pushf /* save interrupt state */
932
933 CHECK_MUTEX_TYPE()
934 CHECK_NO_SIMPLELOCKS()
935
936 movl M_ILK,%eax /* read interlock */
937 testl %eax,%eax /* unlocked? */
938 jne Lmt_ilk_loop /* yes, go try to grab it */
939 Lmt_retry:
940 cli /* disable interrupts */
941 movl %gs:CPU_ACTIVE_THREAD,%ecx
942
943 /* eax == 0 at this point */
944 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
945 jne Lmt_ilk_fail /* branch on failure to spin loop */
946
947 movl M_LOCKED,%ecx /* get lock owner */
948 testl %ecx,%ecx /* is the mutex locked? */
949 jne Lmt_fail /* yes, we lose */
950 Lmt_acquire:
951 movl %gs:CPU_ACTIVE_THREAD,%ecx
952 movl %ecx,M_LOCKED
953
954 #if MACH_LDEBUG
955 movl %ecx,M_THREAD
956 movl B_PC,%ecx
957 movl %ecx,M_PC
958 #endif
959 cmpw $0,M_WAITERS /* are there any waiters? */
960 jne Lmt_waiters /* yes, more work to do */
961 Lmt_return:
962 xorl %eax,%eax
963 movl %eax,M_ILK
964 popf /* restore interrupt state */
965
966 movl $1,%eax
967 leave
968 #if CONFIG_DTRACE
969 LOCKSTAT_LABEL(_mutex_try_lockstat_patch_point)
970 ret
971 /* inherit the lock pointer in %edx from above */
972 LOCKSTAT_RECORD(LS_MUTEX_TRY_LOCK_ACQUIRE, %edx)
973 movl $1,%eax
974 #endif
975 ret
976
977 Lmt_waiters:
978 pushl %edx /* save mutex address */
979 pushl %edx
980 call EXT(lck_mtx_lock_acquire)
981 addl $4,%esp
982 popl %edx /* restore mutex address */
983 jmp Lmt_return
984
985 Lmt_ilk_fail:
986 popf /* restore interrupt state */
987 pushf /* resave interrupt state on stack */
988
989 Lmt_ilk_loop:
990 PAUSE
991 /*
992 * need to do this check outside of the interlock in
993 * case this lock is held as a simple lock which means
994 * we won't be able to take the interlock
995 */
996 movl M_LOCKED,%eax /* get lock owner */
997 testl %eax,%eax /* is the mutex locked? */
998 jne Lmt_fail_no_ilk /* yes, go return failure */
999
1000 movl M_ILK,%eax /* read interlock */
1001 testl %eax,%eax /* unlocked? */
1002 je Lmt_retry /* yes, go try to grab it */
1003 jmp Lmt_ilk_loop /* no - keep spinning */
1004
1005 Lmt_fail:
1006 xorl %eax,%eax
1007 movl %eax,M_ILK
1008
1009 Lmt_fail_no_ilk:
1010 xorl %eax,%eax
1011 popf /* restore interrupt state */
1012 NONLEAF_RET
1013
1014
1015
1016 LEAF_ENTRY(mutex_convert_spin)
1017 movl L_ARG0,%edx /* fetch lock pointer */
1018
1019 movl M_LOCKED,%ecx /* is this the spin variant of the mutex */
1020 cmpl $(MUTEX_LOCKED_AS_SPIN),%ecx
1021 jne Lmcs_exit /* already owned as a mutex, just return */
1022
1023 movl M_ILK,%ecx /* convert from spin version to mutex */
1024 movl %ecx,M_LOCKED /* take control of the mutex */
1025
1026 cmpw $0,M_WAITERS /* are there any waiters? */
1027 jne Lmcs_waiters /* yes, more work to do */
1028
1029 Lmcs_return:
1030 xorl %ecx,%ecx
1031 movl %ecx,M_ILK /* clear interlock */
1032 PREEMPTION_ENABLE
1033 Lmcs_exit:
1034 #if CONFIG_DTRACE
1035 LOCKSTAT_LABEL(_mutex_convert_spin_lockstat_patch_point)
1036 ret
1037 /* inherit %edx from above */
1038 LOCKSTAT_RECORD(LS_MUTEX_CONVERT_SPIN_ACQUIRE, %edx)
1039 #endif
1040 ret
1041
1042
1043 Lmcs_waiters:
1044 pushl %edx /* save mutex address */
1045 pushl %edx
1046 call EXT(lck_mtx_lock_acquire)
1047 addl $4,%esp
1048 popl %edx /* restore mutex address */
1049 jmp Lmcs_return
1050
1051
1052
1053 NONLEAF_ENTRY(mutex_unlock)
1054 movl B_ARG0,%edx /* fetch lock pointer */
1055
1056 movl M_LOCKED,%ecx /* is this the spin variant of the mutex */
1057 cmpl $(MUTEX_LOCKED_AS_SPIN),%ecx
1058 jne Lmu_enter /* no, go treat like a real mutex */
1059
1060 cmpw $0,M_WAITERS /* are there any waiters? */
1061 jne Lmus_wakeup /* yes, more work to do */
1062
1063 Lmus_drop_ilk:
1064 xorl %ecx,%ecx
1065 movl %ecx,M_LOCKED /* yes, clear the spin indicator */
1066 movl %ecx,M_ILK /* release the interlock */
1067 PREEMPTION_ENABLE /* and re-enable preemption */
1068 leave
1069 #if CONFIG_DTRACE
1070 LOCKSTAT_LABEL(_mutex_unlock_lockstat_patch_point)
1071 ret
1072 /* inherit lock pointer in %edx from above */
1073 LOCKSTAT_RECORD(LS_MUTEX_UNLOCK_RELEASE, %edx)
1074 #endif
1075 ret
1076
1077 Lmus_wakeup:
1078 pushl %edx /* save mutex address */
1079 pushl %edx /* push mutex address */
1080 call EXT(lck_mtx_unlockspin_wakeup) /* yes, wake a thread */
1081 addl $4,%esp
1082 popl %edx /* restore mutex pointer */
1083 jmp Lmus_drop_ilk
1084
1085 Lmu_enter:
1086 pushf /* save interrupt state */
1087
1088 CHECK_MUTEX_TYPE()
1089 CHECK_THREAD(M_THREAD)
1090
1091 movl M_ILK,%eax /* read interlock */
1092 testl %eax,%eax /* unlocked? */
1093 jne Lmu_ilk_loop /* yes, go try to grab it */
1094 Lmu_retry:
1095 cli /* disable interrupts */
1096 movl %gs:CPU_ACTIVE_THREAD,%ecx
1097
1098 /* eax == 0 at this point */
1099 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1100 jne Lmu_ilk_fail /* branch on failure to spin loop */
1101
1102 cmpw $0,M_WAITERS /* are there any waiters? */
1103 jne Lmu_wakeup /* yes, more work to do */
1104
1105 Lmu_doit:
1106 #if MACH_LDEBUG
1107 movl $0,M_THREAD /* disown thread */
1108 #endif
1109 xorl %ecx,%ecx
1110 movl %ecx,M_LOCKED /* unlock the mutex */
1111 movl %ecx,M_ILK /* release the interlock */
1112 popf /* restore interrupt state */
1113 leave
1114 #if CONFIG_DTRACE
1115 LOCKSTAT_LABEL(_mutex_unlock2_lockstat_patch_point)
1116 ret
1117 /* inherit %edx from above */
1118 LOCKSTAT_RECORD(LS_MUTEX_UNLOCK_RELEASE, %edx)
1119 #endif
1120 ret
1121
1122 Lmu_ilk_fail:
1123 popf /* restore interrupt state */
1124 pushf /* resave interrupt state on stack */
1125
1126 Lmu_ilk_loop:
1127 PAUSE
1128 movl M_ILK,%eax /* read interlock */
1129 testl %eax,%eax /* unlocked? */
1130 je Lmu_retry /* yes, go try to grab it */
1131 jmp Lmu_ilk_loop /* no - keep spinning */
1132
1133 Lmu_wakeup:
1134 pushl M_LOCKED
1135 pushl %edx /* push mutex address */
1136 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
1137 addl $8,%esp
1138 movl B_ARG0,%edx /* restore lock pointer */
1139 jmp Lmu_doit
1140
1141 /*
1142 * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
1143 * void _mutex_assert(mutex_t, unsigned int)
1144 * Takes the address of a lock, and an assertion type as parameters.
1145 * The assertion can take one of two forms determine by the type
1146 * parameter: either the lock is held by the current thread, and the
1147 * type is LCK_MTX_ASSERT_OWNED, or it isn't and the type is
1148 * LCK_MTX_ASSERT_NOT_OWNED. Calls panic on assertion failure.
1149 *
1150 */
1151
1152 Entry(lck_mtx_assert)
1153 Entry(_mutex_assert)
1154 movl S_ARG0,%edx /* Load lock address */
1155 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Load current thread */
1156
1157 cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
1158 cmove M_PTR,%edx /* If so, take indirection */
1159
1160 movl M_LOCKED,%eax /* Load lock word */
1161 cmpl $(MUTEX_LOCKED_AS_SPIN),%eax /* check for spin variant */
1162 cmove M_ILK,%eax /* yes, spin lock owner is in the interlock */
1163
1164 cmpl $(MUTEX_ASSERT_OWNED),S_ARG1 /* Determine assert type */
1165 jne 2f /* Assert ownership? */
1166 cmpl %eax,%ecx /* Current thread match? */
1167 jne 3f /* no, go panic */
1168 1: /* yes, we own it */
1169 ret /* just return */
1170 2:
1171 cmpl %eax,%ecx /* Current thread match? */
1172 jne 1b /* No, return */
1173 movl %edx,S_ARG1 /* Prep assertion failure */
1174 movl $(mutex_assert_owned_str),S_ARG0
1175 jmp 4f
1176 3:
1177 movl %edx,S_ARG1 /* Prep assertion failure */
1178 movl $(mutex_assert_not_owned_str),S_ARG0
1179 4:
1180 jmp EXT(panic)
1181
1182 .data
1183 mutex_assert_not_owned_str:
1184 .asciz "mutex (%p) not owned\n"
1185 mutex_assert_owned_str:
1186 .asciz "mutex (%p) owned\n"
1187 .text
1188
1189 /* This preprocessor define controls whether the R-M-W update of the
1190 * per-group statistics elements are atomic (LOCK-prefixed)
1191 * Enabled by default.
1192 */
1193 #define ATOMIC_STAT_UPDATES 1
1194
1195 #if defined(ATOMIC_STAT_UPDATES)
1196 #define LOCK_IF_ATOMIC_STAT_UPDATES lock
1197 #else
1198 #define LOCK_IF_ATOMIC_STAT_UPDATES
1199 #endif /* ATOMIC_STAT_UPDATES */
1200
1201
1202 /*
1203 * lck_mtx_lock()
1204 * lck_mtx_try_lock()
1205 * lck_mutex_unlock()
1206 * lck_mtx_lock_spin()
1207 * lck_mtx_convert_spin()
1208 *
1209 * These are variants of mutex_lock(), mutex_try(), mutex_unlock()
1210 * mutex_lock_spin and mutex_convert_spin without
1211 * DEBUG checks (which require fields not present in lck_mtx_t's).
1212 */
1213
1214 NONLEAF_ENTRY(lck_mtx_lock_spin)
1215
1216 movl B_ARG0,%edx /* fetch lock pointer */
1217 pushf /* save interrupt state */
1218
1219 CHECK_NO_SIMPLELOCKS()
1220 CHECK_PREEMPTION_LEVEL()
1221
1222 movl M_ILK,%eax /* read interlock */
1223 testl %eax,%eax /* unlocked? */
1224 jne Llmls_eval_ilk /* no, go see if indirect */
1225 Llmls_retry:
1226 cli /* disable interrupts */
1227 movl %gs:CPU_ACTIVE_THREAD,%ecx
1228
1229 /* eax == 0 at this point */
1230 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1231 jne Llmls_ilk_fail /* branch on failure to spin loop */
1232
1233 movl M_LOCKED,%ecx /* get lock owner */
1234 testl %ecx,%ecx /* is the mutex locked? */
1235 jne Llml_fail /* yes, fall back to a normal mutex */
1236
1237 Llmls_acquire:
1238 movl $(MUTEX_LOCKED_AS_SPIN),M_LOCKED /* indicate ownership as a spin lock */
1239 PREEMPTION_DISABLE
1240 popf /* restore interrupt state */
1241 NONLEAF_RET /* return with the interlock held */
1242
1243 Llmls_ilk_fail:
1244 popf /* restore interrupt state */
1245 pushf /* resave interrupt state on stack */
1246
1247 Llmls_ilk_loop:
1248 PAUSE
1249 movl M_ILK,%eax /* read interlock */
1250 testl %eax,%eax /* unlocked? */
1251 je Llmls_retry /* yes - go try to grab it */
1252
1253 cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
1254 jne Llmls_ilk_loop /* no - keep spinning */
1255
1256 pushl %edx
1257 call EXT(lck_mtx_interlock_panic)
1258 /*
1259 * shouldn't return from here, but just in case
1260 */
1261 popl %edx
1262 jmp Llmls_ilk_loop
1263
1264
1265 Llmls_eval_ilk:
1266 cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
1267 cmove M_PTR,%edx /* If so, take indirection */
1268 jne Llmls_ilk_loop /* If not, go to spin loop */
1269
1270 Llmls_lck_ext:
1271 pushl %esi /* Used to hold the lock group ptr */
1272 pushl %edi /* Used for stat update records */
1273 movl MUTEX_GRP(%edx),%esi /* Load lock group */
1274 xorl %edi,%edi /* Clear stat update records */
1275 /* 64-bit increment of acquire attempt statistic (per-group) */
1276 LOCK_IF_ATOMIC_STAT_UPDATES
1277 addl $1, GRP_MTX_STAT_UTIL(%esi)
1278 jnc 1f
1279 incl GRP_MTX_STAT_UTIL+4(%esi)
1280 1:
1281 movl M_ILK,%eax /* read interlock */
1282 testl %eax,%eax /* unlocked? */
1283 jne Llmls_ext_ilk_loop /* no, go to spin loop */
1284 Llmls_ext_retry:
1285 cli /* disable interrupts */
1286 movl %gs:CPU_ACTIVE_THREAD,%ecx
1287
1288 /* eax == 0 at this point */
1289 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1290 jne Llmls_ext_ilk_fail /* branch on failure to retry */
1291
1292 movl M_LOCKED,%ecx /* get lock owner */
1293 testl %ecx,%ecx /* is the mutex locked? */
1294 jne Llml_ext_fail /* yes, we lose */
1295
1296 popl %edi
1297 popl %esi
1298 jmp Llmls_acquire
1299
1300 Llmls_ext_ilk_fail:
1301 /*
1302 * Slow path: call out to do the spinning.
1303 */
1304 movl 8(%esp),%ecx
1305 pushl %ecx
1306 popf /* restore interrupt state */
1307
1308 Llmls_ext_ilk_loop:
1309 PAUSE
1310 movl M_ILK,%eax /* read interlock */
1311 testl %eax,%eax /* unlocked? */
1312 je Llmls_ext_retry /* yes - go try to grab it */
1313
1314 cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
1315 jne Llmls_ext_ilk_loop /* no - keep spinning */
1316
1317 pushl %edx
1318 call EXT(lck_mtx_interlock_panic)
1319 /*
1320 * shouldn't return from here, but just in case
1321 */
1322 popl %edx
1323 jmp Llmls_ext_ilk_loop /* no - keep spinning */
1324
1325
1326
1327 NONLEAF_ENTRY(lck_mtx_lock)
1328
1329 movl B_ARG0,%edx /* fetch lock pointer */
1330 pushf /* save interrupt state */
1331
1332 CHECK_NO_SIMPLELOCKS()
1333 CHECK_PREEMPTION_LEVEL()
1334
1335 movl M_ILK,%eax /* read interlock */
1336 testl %eax,%eax /* unlocked? */
1337 jne Llml_eval_ilk /* no, go see if indirect */
1338 Llml_retry:
1339 cli /* disable interrupts */
1340 movl %gs:CPU_ACTIVE_THREAD,%ecx
1341
1342 /* eax == 0 at this point */
1343 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1344 jne Llml_ilk_fail /* branch on failure to spin loop */
1345
1346 movl M_LOCKED,%ecx /* get lock owner */
1347 testl %ecx,%ecx /* is the mutex locked? */
1348 jne Llml_fail /* yes, we lose */
1349 Llml_acquire:
1350 movl %gs:CPU_ACTIVE_THREAD,%ecx
1351 movl %ecx,M_LOCKED
1352
1353 cmpw $0,M_WAITERS /* are there any waiters? */
1354 jne Lml_waiters /* yes, more work to do */
1355 Llml_return:
1356 xorl %eax,%eax
1357 movl %eax,M_ILK
1358
1359 popf /* restore interrupt state */
1360 leave
1361 #if CONFIG_DTRACE
1362 LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point)
1363 ret
1364 /* inherit lock pointer in %edx above */
1365 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %edx)
1366 #endif
1367 ret
1368
1369 Llml_waiters:
1370 pushl %edx /* save mutex address */
1371 pushl %edx
1372 call EXT(lck_mtx_lock_acquire)
1373 addl $4,%esp
1374 popl %edx /* restore mutex address */
1375 jmp Llml_return
1376
1377 Llml_restart:
1378 Llml_ilk_fail:
1379 popf /* restore interrupt state */
1380 pushf /* resave interrupt state on stack */
1381
1382 Llml_ilk_loop:
1383 PAUSE
1384 movl M_ILK,%eax /* read interlock */
1385 testl %eax,%eax /* unlocked? */
1386 je Llml_retry /* yes - go try to grab it */
1387
1388 cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
1389 jne Llml_ilk_loop /* no - keep spinning */
1390
1391 pushl %edx
1392 call EXT(lck_mtx_interlock_panic)
1393 /*
1394 * shouldn't return from here, but just in case
1395 */
1396 popl %edx
1397 jmp Llml_ilk_loop /* no - keep spinning */
1398
1399 Llml_fail:
1400 /*
1401 * Check if the owner is on another processor and therefore
1402 * we should try to spin before blocking.
1403 */
1404 testl $(OnProc),ACT_SPF(%ecx)
1405 jz Llml_block
1406
1407 /*
1408 * Here if owner is on another processor:
1409 * - release the interlock
1410 * - spin on the holder until release or timeout
1411 * - in either case re-acquire the interlock
1412 * - if released, acquire it
1413 * - otherwise drop thru to block.
1414 */
1415 xorl %eax,%eax
1416 movl %eax,M_ILK /* zero interlock */
1417 popf
1418 pushf /* restore interrupt state */
1419 pushl %edx /* save mutex address */
1420 pushl %edx
1421 call EXT(lck_mtx_lock_spinwait)
1422 addl $4,%esp
1423 popl %edx /* restore mutex address */
1424
1425 /* Re-acquire interlock */
1426 movl M_ILK,%eax /* read interlock */
1427 testl %eax,%eax /* unlocked? */
1428 jne Llml_ilk_refail /* no, go to spin loop */
1429 Llml_reget_retry:
1430 cli /* disable interrupts */
1431 movl %gs:CPU_ACTIVE_THREAD,%ecx
1432
1433 /* eax == 0 at this point */
1434 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1435 jne Llml_ilk_refail /* branch on failure to retry */
1436
1437 movl M_LOCKED,%ecx /* get lock owner */
1438 testl %ecx,%ecx /* is the mutex free? */
1439 je Llml_acquire /* yes, acquire */
1440
1441 Llml_block:
1442 CHECK_MYLOCK(M_THREAD)
1443 pushl %edx /* save mutex address */
1444 pushl M_LOCKED
1445 pushl %edx /* push mutex address */
1446 /*
1447 * N.B.: lck_mtx_lock_wait is called here with interrupts disabled
1448 * Consider reworking.
1449 */
1450 call EXT(lck_mtx_lock_wait) /* wait for the lock */
1451 addl $8,%esp
1452 popl %edx /* restore mutex address */
1453 jmp Llml_restart /* and start over */
1454
1455 Llml_ilk_refail:
1456 popf /* restore interrupt state */
1457 pushf /* resave interrupt state on stack */
1458
1459 Llml_ilk_reloop:
1460 PAUSE
1461 movl M_ILK,%eax /* read interlock */
1462 testl %eax,%eax /* unlocked? */
1463 je Llml_reget_retry /* yes - go try to grab it */
1464
1465 cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
1466 jne Llml_ilk_reloop /* no - keep spinning */
1467
1468 pushl %edx
1469 call EXT(lck_mtx_interlock_panic)
1470 /*
1471 * shouldn't return from here, but just in case
1472 */
1473 popl %edx
1474 jmp Llml_ilk_reloop /* no - keep spinning */
1475
1476
1477 Llml_eval_ilk:
1478 cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
1479 cmove M_PTR,%edx /* If so, take indirection */
1480 jne Llml_ilk_loop /* If not, go to spin loop */
1481
1482 /*
1483 * Entry into statistics codepath for lck_mtx_lock:
1484 * EDX: real lock pointer
1485 * first dword on stack contains flags
1486 */
1487
1488 /* Enable this preprocessor define to record the first miss alone
1489 * By default, we count every miss, hence multiple misses may be
1490 * recorded for a single lock acquire attempt via lck_mtx_lock
1491 */
1492 #undef LOG_FIRST_MISS_ALONE
1493
1494 /*
1495 * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
1496 * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
1497 * as a 64-bit quantity (this matches the existing PowerPC implementation,
1498 * and the new x86 specific statistics are also maintained as 32-bit
1499 * quantities).
1500 */
1501
1502 Llml_lck_ext:
1503 pushl %esi /* Used to hold the lock group ptr */
1504 pushl %edi /* Used for stat update records */
1505 movl MUTEX_GRP(%edx),%esi /* Load lock group */
1506 xorl %edi,%edi /* Clear stat update records */
1507 /* 64-bit increment of acquire attempt statistic (per-group) */
1508 LOCK_IF_ATOMIC_STAT_UPDATES
1509 addl $1, GRP_MTX_STAT_UTIL(%esi)
1510 jnc 1f
1511 incl GRP_MTX_STAT_UTIL+4(%esi)
1512 1:
1513 movl M_ILK,%eax /* read interlock */
1514 testl %eax,%eax /* unlocked? */
1515 jne Llml_ext_ilk_loop /* no, go to spin loop */
1516 Llml_ext_get_hw:
1517 cli
1518 movl %gs:CPU_ACTIVE_THREAD,%ecx
1519
1520 /* eax == 0 at this point */
1521 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1522 jne Llml_ext_ilk_fail /* branch on failure to retry */
1523
1524 movl M_LOCKED,%ecx /* get lock owner */
1525 testl %ecx,%ecx /* is the mutex locked? */
1526 jne Llml_ext_fail /* yes, we lose */
1527
1528 Llml_ext_acquire:
1529 movl %gs:CPU_ACTIVE_THREAD,%ecx
1530 movl %ecx,M_LOCKED
1531
1532 cmpw $0,M_WAITERS /* are there any waiters? */
1533 jne Llml_ext_waiters /* yes, more work to do */
1534 Llml_ext_return:
1535 xorl %eax,%eax
1536 movl %eax,M_ILK
1537
1538 popl %edi
1539 popl %esi
1540 popf /* restore interrupt state */
1541 leave
1542 #if CONFIG_DTRACE
1543 LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point)
1544 ret
1545 /* inherit lock pointer in %edx above */
1546 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %edx)
1547 #endif
1548 ret
1549
1550 Llml_ext_waiters:
1551 pushl %edx /* save mutex address */
1552 pushl %edx
1553 call EXT(lck_mtx_lock_acquire)
1554 addl $4,%esp
1555 popl %edx /* restore mutex address */
1556 jmp Llml_ext_return
1557
1558 Llml_ext_restart:
1559 Llml_ext_ilk_fail:
1560 movl 8(%esp),%ecx
1561 pushl %ecx
1562 popf /* restore interrupt state */
1563
1564 Llml_ext_ilk_loop:
1565 PAUSE
1566 movl M_ILK,%eax /* read interlock */
1567 testl %eax,%eax /* unlocked? */
1568 je Llml_ext_get_hw /* yes - go try to grab it */
1569
1570 cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
1571 jne Llml_ext_ilk_loop /* no - keep spinning */
1572
1573 pushl %edx
1574 call EXT(lck_mtx_interlock_panic)
1575 /*
1576 * shouldn't return from here, but just in case
1577 */
1578 popl %edx
1579 jmp Llml_ext_ilk_loop
1580
1581
1582 Llml_ext_fail:
1583 #ifdef LOG_FIRST_MISS_ALONE
1584 testl $1, %edi
1585 jnz 1f
1586 #endif /* LOG_FIRST_MISS_ALONE */
1587 /* Record that a lock acquire attempt missed (per-group statistic) */
1588 LOCK_IF_ATOMIC_STAT_UPDATES
1589 incl GRP_MTX_STAT_MISS(%esi)
1590 #ifdef LOG_FIRST_MISS_ALONE
1591 orl $1, %edi
1592 #endif /* LOG_FIRST_MISS_ALONE */
1593 1:
1594 /*
1595 * Check if the owner is on another processor and therefore
1596 * we should try to spin before blocking.
1597 */
1598 testl $(OnProc),ACT_SPF(%ecx)
1599 jnz 2f
1600 /*
1601 * Record the "direct wait" statistic, which indicates if a
1602 * miss proceeded to block directly without spinning--occurs
1603 * if the owner of the mutex isn't running on another processor
1604 * at the time of the check.
1605 */
1606 LOCK_IF_ATOMIC_STAT_UPDATES
1607 incl GRP_MTX_STAT_DIRECT_WAIT(%esi)
1608 jmp Llml_ext_block
1609 2:
1610 /*
1611 * Here if owner is on another processor:
1612 * - release the interlock
1613 * - spin on the holder until release or timeout
1614 * - in either case re-acquire the interlock
1615 * - if released, acquire it
1616 * - otherwise drop thru to block.
1617 */
1618 xorl %eax,%eax
1619 movl %eax,M_ILK /* zero interlock */
1620
1621 pushl 8(%esp) /* Make another copy of EFLAGS image */
1622 popf /* Restore interrupt state */
1623 pushl %edx /* save mutex address */
1624 pushl %edx
1625 call EXT(lck_mtx_lock_spinwait)
1626 addl $4,%esp
1627 popl %edx /* restore mutex address */
1628
1629 /* Re-acquire interlock */
1630 movl M_ILK,%eax /* read interlock */
1631 testl %eax,%eax /* unlocked? */
1632 jne Llml_ext_ilk_refail /* no, go to spin loop */
1633 Llml_ext_reget_retry:
1634 cli /* disable interrupts */
1635 movl %gs:CPU_ACTIVE_THREAD,%ecx
1636
1637 /* eax == 0 at this point */
1638 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1639 jne Llml_ext_ilk_refail /* branch on failure to spin loop */
1640
1641 movl M_LOCKED,%ecx /* get lock owner */
1642 testl %ecx,%ecx /* is the mutex free? */
1643 je Llml_ext_acquire /* yes, acquire */
1644
1645 Llml_ext_block:
1646 /* If we wanted to count waits just once per lock acquire, we'd
1647 * skip over the stat update here
1648 */
1649 LOCK_IF_ATOMIC_STAT_UPDATES
1650 /* Record that a lock miss proceeded to block */
1651 incl GRP_MTX_STAT_WAIT(%esi)
1652 1:
1653 CHECK_MYLOCK(M_THREAD)
1654 pushl %edx /* save mutex address */
1655 pushl M_LOCKED
1656 pushl %edx /* push mutex address */
1657 /*
1658 * N.B.: lck_mtx_lock_wait is called here with interrupts disabled
1659 * Consider reworking.
1660 */
1661 call EXT(lck_mtx_lock_wait) /* wait for the lock */
1662 addl $8,%esp
1663 popl %edx /* restore mutex address */
1664 jmp Llml_ext_restart /* and start over */
1665
1666 Llml_ext_ilk_refail:
1667 movl 8(%esp),%ecx
1668 pushl %ecx
1669 popf /* restore interrupt state */
1670
1671 Llml_ext_ilk_reloop:
1672 PAUSE
1673 movl M_ILK,%eax /* read interlock */
1674 testl %eax,%eax /* unlocked? */
1675 je Llml_ext_reget_retry /* yes - go try to grab it */
1676
1677 cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
1678 jne Llml_ext_ilk_reloop /* no - keep spinning */
1679
1680 pushl %edx
1681 call EXT(lck_mtx_interlock_panic)
1682 /*
1683 * shouldn't return from here, but just in case
1684 */
1685 popl %edx
1686 jmp Llml_ext_ilk_reloop
1687
1688
1689
1690 NONLEAF_ENTRY(lck_mtx_try_lock_spin)
1691
1692 movl B_ARG0,%edx /* fetch lock pointer */
1693 pushf /* save interrupt state */
1694
1695 CHECK_NO_SIMPLELOCKS()
1696 CHECK_PREEMPTION_LEVEL()
1697
1698 movl M_ILK,%eax /* read interlock */
1699 testl %eax,%eax /* unlocked? */
1700 jne Llmts_eval_ilk /* no, go see if indirect */
1701 Llmts_retry:
1702 cli /* disable interrupts */
1703 movl %gs:CPU_ACTIVE_THREAD,%ecx
1704
1705 /* eax == 0 at this point */
1706 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1707 jne Llmts_ilk_fail /* branch on failure to retry */
1708
1709 movl M_LOCKED,%ecx /* get lock owner */
1710 testl %ecx,%ecx /* is the mutex locked? */
1711 jne Llmt_fail /* yes, we lose */
1712
1713 movl $(MUTEX_LOCKED_AS_SPIN),M_LOCKED /* no, indicate ownership as a spin lock */
1714 PREEMPTION_DISABLE /* and return with interlock held */
1715
1716 movl $1,%eax /* return success */
1717 popf /* restore interrupt state */
1718 leave
1719 #if CONFIG_DTRACE
1720 LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point)
1721 ret
1722 /* inherit lock pointer in %edx above */
1723 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %edx)
1724 movl $1,%eax /* return success */
1725 #endif
1726 ret
1727
1728 Llmts_ilk_fail:
1729 popf /* restore interrupt state */
1730 pushf /* resave interrupt state */
1731
1732 Llmts_ilk_loop:
1733 PAUSE
1734 /*
1735 * need to do this check outside of the interlock in
1736 * case this lock is held as a simple lock which means
1737 * we won't be able to take the interlock
1738 */
1739 movl M_LOCKED,%eax /* get lock owner */
1740 testl %eax,%eax /* is the mutex locked? */
1741 jne Llmt_fail_no_ilk /* yes, go return failure */
1742
1743 movl M_ILK,%eax /* read interlock */
1744 testl %eax,%eax /* unlocked? */
1745 je Llmts_retry /* yes - go try to grab it */
1746
1747 cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
1748 jne Llmts_ilk_loop /* no - keep spinning */
1749
1750 pushl %edx
1751 call EXT(lck_mtx_interlock_panic)
1752 /*
1753 * shouldn't return from here, but just in case
1754 */
1755 popl %edx
1756 jmp Llmts_ilk_loop
1757
1758 Llmts_eval_ilk:
1759 cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
1760 cmove M_PTR,%edx /* If so, take indirection */
1761 jne Llmts_ilk_loop /* If not, go to spin loop */
1762
1763 /*
1764 * bump counter on indirect lock
1765 */
1766 pushl %esi /* Used to hold the lock group ptr */
1767 movl MUTEX_GRP(%edx),%esi /* Load lock group */
1768 /* 64-bit increment of acquire attempt statistic (per-group) */
1769 LOCK_IF_ATOMIC_STAT_UPDATES
1770 addl $1, GRP_MTX_STAT_UTIL(%esi)
1771 jnc 1f
1772 incl GRP_MTX_STAT_UTIL+4(%esi)
1773 1:
1774 popl %esi
1775 jmp Llmts_ilk_loop
1776
1777
1778
1779 NONLEAF_ENTRY(lck_mtx_try_lock)
1780
1781 movl B_ARG0,%edx /* fetch lock pointer */
1782 pushf /* save interrupt state */
1783
1784 CHECK_NO_SIMPLELOCKS()
1785 CHECK_PREEMPTION_LEVEL()
1786
1787 movl M_ILK,%eax /* read interlock */
1788 testl %eax,%eax /* unlocked? */
1789 jne Llmt_eval_ilk /* no, go see if indirect */
1790 Llmt_retry:
1791 cli /* disable interrupts */
1792 movl %gs:CPU_ACTIVE_THREAD,%ecx
1793
1794 /* eax == 0 at this point */
1795 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1796 jne Llmt_ilk_fail /* branch on failure to retry */
1797
1798 movl M_LOCKED,%ecx /* get lock owner */
1799 testl %ecx,%ecx /* is the mutex locked? */
1800 jne Llmt_fail /* yes, we lose */
1801 Llmt_acquire:
1802 movl %gs:CPU_ACTIVE_THREAD,%ecx
1803 movl %ecx,M_LOCKED
1804
1805 cmpw $0,M_WAITERS /* are there any waiters? */
1806 jne Llmt_waiters /* yes, more work to do */
1807 Llmt_return:
1808 xorl %eax,%eax
1809 movl %eax,M_ILK
1810
1811 popf /* restore interrupt state */
1812
1813 movl $1,%eax /* return success */
1814 leave
1815 #if CONFIG_DTRACE
1816 /* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */
1817 LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point)
1818 ret
1819 /* inherit lock pointer in %edx from above */
1820 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %edx)
1821 movl $1,%eax /* return success */
1822 #endif
1823 ret
1824
1825 Llmt_waiters:
1826 pushl %edx /* save mutex address */
1827 pushl %edx
1828 call EXT(lck_mtx_lock_acquire)
1829 addl $4,%esp
1830 popl %edx /* restore mutex address */
1831 jmp Llmt_return
1832
1833 Llmt_ilk_fail:
1834 popf /* restore interrupt state */
1835 pushf /* resave interrupt state */
1836
1837 Llmt_ilk_loop:
1838 PAUSE
1839 /*
1840 * need to do this check outside of the interlock in
1841 * case this lock is held as a simple lock which means
1842 * we won't be able to take the interlock
1843 */
1844 movl M_LOCKED,%eax /* get lock owner */
1845 testl %eax,%eax /* is the mutex locked? */
1846 jne Llmt_fail_no_ilk /* yes, go return failure */
1847
1848 movl M_ILK,%eax /* read interlock */
1849 testl %eax,%eax /* unlocked? */
1850 je Llmt_retry /* yes - go try to grab it */
1851
1852 cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
1853 jne Llmt_ilk_loop /* no - keep spinning */
1854
1855 pushl %edx
1856 call EXT(lck_mtx_interlock_panic)
1857 /*
1858 * shouldn't return from here, but just in case
1859 */
1860 popl %edx
1861 jmp Llmt_ilk_loop
1862
1863 Llmt_fail:
1864 xorl %eax,%eax /* Zero interlock value */
1865 movl %eax,M_ILK
1866
1867 Llmt_fail_no_ilk:
1868 popf /* restore interrupt state */
1869
1870 cmpl %edx,B_ARG0
1871 jne Llmt_fail_indirect
1872
1873 xorl %eax,%eax
1874 /* Note that we don't record a dtrace event for trying and missing */
1875 NONLEAF_RET
1876
1877 Llmt_fail_indirect:
1878 pushl %esi /* Used to hold the lock group ptr */
1879 movl MUTEX_GRP(%edx),%esi /* Load lock group */
1880
1881 /* Record mutex acquire attempt miss statistic */
1882 LOCK_IF_ATOMIC_STAT_UPDATES
1883 incl GRP_MTX_STAT_MISS(%esi)
1884
1885 popl %esi
1886 xorl %eax,%eax
1887 NONLEAF_RET
1888
1889 Llmt_eval_ilk:
1890 cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
1891 cmove M_PTR,%edx /* If so, take indirection */
1892 jne Llmt_ilk_loop /* If not, go to spin loop */
1893
1894 /*
1895 * bump counter for indirect lock
1896 */
1897 pushl %esi /* Used to hold the lock group ptr */
1898 movl MUTEX_GRP(%edx),%esi /* Load lock group */
1899
1900 /* 64-bit increment of acquire attempt statistic (per-group) */
1901 LOCK_IF_ATOMIC_STAT_UPDATES
1902 addl $1, GRP_MTX_STAT_UTIL(%esi)
1903 jnc 1f
1904 incl GRP_MTX_STAT_UTIL+4(%esi)
1905 1:
1906 pop %esi
1907 jmp Llmt_ilk_loop
1908
1909
1910
1911 LEAF_ENTRY(lck_mtx_convert_spin)
1912 movl L_ARG0,%edx /* fetch lock pointer */
1913
1914 cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
1915 cmove M_PTR,%edx /* If so, take indirection */
1916
1917 movl M_LOCKED,%ecx /* is this the spin variant of the mutex */
1918 cmpl $(MUTEX_LOCKED_AS_SPIN),%ecx
1919 jne Llmcs_exit /* already owned as a mutex, just return */
1920
1921 movl M_ILK,%ecx /* convert from spin version to mutex */
1922 movl %ecx,M_LOCKED /* take control of the mutex */
1923
1924 cmpw $0,M_WAITERS /* are there any waiters? */
1925 jne Llmcs_waiters /* yes, more work to do */
1926
1927 Llmcs_return:
1928 xorl %ecx,%ecx
1929 movl %ecx,M_ILK /* clear interlock */
1930 PREEMPTION_ENABLE
1931 Llmcs_exit:
1932 LEAF_RET
1933
1934 Llmcs_waiters:
1935 pushl %edx /* save mutex address */
1936 pushl %edx
1937 call EXT(lck_mtx_lock_acquire)
1938 addl $4,%esp
1939 popl %edx /* restore mutex address */
1940 jmp Llmcs_return
1941
1942
1943
1944 NONLEAF_ENTRY(lck_mtx_unlock)
1945
1946 movl B_ARG0,%edx /* fetch lock pointer */
1947
1948 cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
1949 cmove M_PTR,%edx /* If so, take indirection */
1950
1951 movl M_LOCKED,%ecx /* is this the spin variant of the mutex */
1952 cmpl $(MUTEX_LOCKED_AS_SPIN),%ecx
1953 jne Llmu_enter /* no, go treat like a real mutex */
1954
1955 cmpw $0,M_WAITERS /* are there any waiters? */
1956 jne Llmus_wakeup /* yes, more work to do */
1957
1958 Llmu_drop_ilk:
1959 xorl %eax,%eax
1960 movl %eax,M_LOCKED /* clear spin indicator */
1961 movl %eax,M_ILK /* release the interlock */
1962
1963 PREEMPTION_ENABLE /* and re-enable preemption */
1964 leave
1965 #if CONFIG_DTRACE
1966 /* Dtrace: LS_LCK_MTX_UNLOCK_RELEASE */
1967 LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point)
1968 ret
1969 /* inherit lock pointer in %edx from above */
1970 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %edx)
1971 #endif
1972 ret
1973
1974 Llmus_wakeup:
1975 pushl %edx /* save mutex address */
1976 pushl %edx /* push mutex address */
1977 call EXT(lck_mtx_unlockspin_wakeup) /* yes, wake a thread */
1978 addl $4,%esp
1979 popl %edx /* restore mutex pointer */
1980 jmp Llmu_drop_ilk
1981
1982
1983 Llmu_enter:
1984 pushf /* save interrupt state */
1985
1986 movl M_ILK,%eax /* read interlock */
1987 testl %eax,%eax /* unlocked? */
1988 jne Llmu_ilk_loop /* no - go to spin loop */
1989 Llmu_retry:
1990 cli /* disable interrupts */
1991 movl %gs:CPU_ACTIVE_THREAD,%ecx
1992
1993 /* eax == 0 at this point */
1994 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
1995 jne Llmu_ilk_fail /* branch on failure to spin loop */
1996
1997 cmpw $0,M_WAITERS /* are there any waiters? */
1998 jne Llmu_wakeup /* yes, more work to do */
1999
2000 Llmu_doit:
2001 xorl %ecx,%ecx
2002 movl %ecx,M_LOCKED /* unlock the mutex */
2003 movl %ecx,M_ILK /* clear the interlock */
2004
2005 popf /* restore interrupt state */
2006 leave
2007 #if CONFIG_DTRACE
2008 LOCKSTAT_LABEL(_lck_mtx_unlock2_lockstat_patch_point)
2009 ret
2010 /* inherit lock pointer in %edx above */
2011 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %edx)
2012 #endif
2013 ret
2014
2015 Llmu_ilk_fail:
2016 popf /* restore interrupt state */
2017 pushf /* resave interrupt state */
2018
2019 Llmu_ilk_loop:
2020 PAUSE
2021 movl M_ILK,%eax /* read interlock */
2022 testl %eax,%eax /* unlocked? */
2023 je Llmu_retry /* yes - go try to grab it */
2024
2025 cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
2026 jne Llmu_ilk_loop /* no - keep spinning */
2027
2028 pushl %edx
2029 call EXT(lck_mtx_interlock_panic)
2030 /*
2031 * shouldn't return from here, but just in case
2032 */
2033 popl %edx
2034 jmp Llmu_ilk_loop
2035
2036 Llmu_wakeup:
2037 pushl %edx /* save mutex address */
2038 pushl M_LOCKED
2039 pushl %edx /* push mutex address */
2040 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
2041 addl $8,%esp
2042 popl %edx /* restore mutex pointer */
2043 xorl %ecx,%ecx
2044 movl %ecx,M_LOCKED /* unlock the mutex */
2045
2046 movl %ecx,M_ILK
2047
2048 popf /* restore interrupt state */
2049
2050 leave
2051 #if CONFIG_DTRACE
2052 /* Dtrace: LS_LCK_MTX_EXT_UNLOCK_RELEASE */
2053 LOCKSTAT_LABEL(_lck_mtx_ext_unlock_lockstat_patch_point)
2054 ret
2055 /* inherit lock pointer in %edx from above */
2056 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, %edx)
2057 #endif
2058 ret
2059
2060
2061 LEAF_ENTRY(lck_mtx_ilk_unlock)
2062 movl L_ARG0,%edx /* no indirection here */
2063
2064 xorl %eax,%eax
2065 movl %eax,M_ILK
2066
2067 LEAF_RET
2068
2069
2070 LEAF_ENTRY(_disable_preemption)
2071 #if MACH_RT
2072 _DISABLE_PREEMPTION
2073 #endif /* MACH_RT */
2074 LEAF_RET
2075
2076 LEAF_ENTRY(_enable_preemption)
2077 #if MACH_RT
2078 #if MACH_ASSERT
2079 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
2080 jg 1f
2081 pushl %gs:CPU_PREEMPTION_LEVEL
2082 pushl $2f
2083 call EXT(panic)
2084 hlt
2085 .data
2086 2: String "_enable_preemption: preemption_level(%d) < 0!"
2087 .text
2088 1:
2089 #endif /* MACH_ASSERT */
2090 _ENABLE_PREEMPTION
2091 #endif /* MACH_RT */
2092 LEAF_RET
2093
2094 LEAF_ENTRY(_enable_preemption_no_check)
2095 #if MACH_RT
2096 #if MACH_ASSERT
2097 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
2098 jg 1f
2099 pushl $2f
2100 call EXT(panic)
2101 hlt
2102 .data
2103 2: String "_enable_preemption_no_check: preemption_level <= 0!"
2104 .text
2105 1:
2106 #endif /* MACH_ASSERT */
2107 _ENABLE_PREEMPTION_NO_CHECK
2108 #endif /* MACH_RT */
2109 LEAF_RET
2110
2111
2112 LEAF_ENTRY(_mp_disable_preemption)
2113 #if MACH_RT
2114 _DISABLE_PREEMPTION
2115 #endif /* MACH_RT */
2116 LEAF_RET
2117
2118 LEAF_ENTRY(_mp_enable_preemption)
2119 #if MACH_RT
2120 #if MACH_ASSERT
2121 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
2122 jg 1f
2123 pushl %gs:CPU_PREEMPTION_LEVEL
2124 pushl $2f
2125 call EXT(panic)
2126 hlt
2127 .data
2128 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
2129 .text
2130 1:
2131 #endif /* MACH_ASSERT */
2132 _ENABLE_PREEMPTION
2133 #endif /* MACH_RT */
2134 LEAF_RET
2135
2136 LEAF_ENTRY(_mp_enable_preemption_no_check)
2137 #if MACH_RT
2138 #if MACH_ASSERT
2139 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
2140 jg 1f
2141 pushl $2f
2142 call EXT(panic)
2143 hlt
2144 .data
2145 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
2146 .text
2147 1:
2148 #endif /* MACH_ASSERT */
2149 _ENABLE_PREEMPTION_NO_CHECK
2150 #endif /* MACH_RT */
2151 LEAF_RET
2152
2153
2154 LEAF_ENTRY(i_bit_set)
2155 movl L_ARG0,%edx
2156 movl L_ARG1,%eax
2157 lock
2158 bts %edx,(%eax)
2159 LEAF_RET
2160
2161 LEAF_ENTRY(i_bit_clear)
2162 movl L_ARG0,%edx
2163 movl L_ARG1,%eax
2164 lock
2165 btr %edx,(%eax)
2166 LEAF_RET
2167
2168
2169 LEAF_ENTRY(bit_lock)
2170 movl L_ARG0,%ecx
2171 movl L_ARG1,%eax
2172 1:
2173 lock
2174 bts %ecx,(%eax)
2175 jb 1b
2176 LEAF_RET
2177
2178
2179 LEAF_ENTRY(bit_lock_try)
2180 movl L_ARG0,%ecx
2181 movl L_ARG1,%eax
2182 lock
2183 bts %ecx,(%eax)
2184 jb bit_lock_failed
2185 LEAF_RET /* %eax better not be null ! */
2186 bit_lock_failed:
2187 xorl %eax,%eax
2188 LEAF_RET
2189
2190 LEAF_ENTRY(bit_unlock)
2191 movl L_ARG0,%ecx
2192 movl L_ARG1,%eax
2193 lock
2194 btr %ecx,(%eax)
2195 LEAF_RET
2196
2197 /*
2198 * Atomic primitives, prototyped in kern/simple_lock.h
2199 */
2200 LEAF_ENTRY(hw_atomic_add)
2201 movl L_ARG0, %ecx /* Load address of operand */
2202 movl L_ARG1, %eax /* Load addend */
2203 movl %eax, %edx
2204 lock
2205 xaddl %eax, (%ecx) /* Atomic exchange and add */
2206 addl %edx, %eax /* Calculate result */
2207 LEAF_RET
2208
2209 LEAF_ENTRY(hw_atomic_sub)
2210 movl L_ARG0, %ecx /* Load address of operand */
2211 movl L_ARG1, %eax /* Load subtrahend */
2212 negl %eax
2213 movl %eax, %edx
2214 lock
2215 xaddl %eax, (%ecx) /* Atomic exchange and add */
2216 addl %edx, %eax /* Calculate result */
2217 LEAF_RET
2218
2219 LEAF_ENTRY(hw_atomic_or)
2220 movl L_ARG0, %ecx /* Load address of operand */
2221 movl (%ecx), %eax
2222 1:
2223 movl L_ARG1, %edx /* Load mask */
2224 orl %eax, %edx
2225 lock
2226 cmpxchgl %edx, (%ecx) /* Atomic CAS */
2227 jne 1b
2228 movl %edx, %eax /* Result */
2229 LEAF_RET
2230 /*
2231 * A variant of hw_atomic_or which doesn't return a value.
2232 * The implementation is thus comparatively more efficient.
2233 */
2234
2235 LEAF_ENTRY(hw_atomic_or_noret)
2236 movl L_ARG0, %ecx /* Load address of operand */
2237 movl L_ARG1, %edx /* Load mask */
2238 lock
2239 orl %edx, (%ecx) /* Atomic OR */
2240 LEAF_RET
2241
2242 LEAF_ENTRY(hw_atomic_and)
2243 movl L_ARG0, %ecx /* Load address of operand */
2244 movl (%ecx), %eax
2245 1:
2246 movl L_ARG1, %edx /* Load mask */
2247 andl %eax, %edx
2248 lock
2249 cmpxchgl %edx, (%ecx) /* Atomic CAS */
2250 jne 1b
2251 movl %edx, %eax /* Result */
2252 LEAF_RET
2253 /*
2254 * A variant of hw_atomic_and which doesn't return a value.
2255 * The implementation is thus comparatively more efficient.
2256 */
2257
2258 LEAF_ENTRY(hw_atomic_and_noret)
2259 movl L_ARG0, %ecx /* Load address of operand */
2260 movl L_ARG1, %edx /* Load mask */
2261 lock
2262 andl %edx, (%ecx) /* Atomic OR */
2263 LEAF_RET