]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_lock.s
9f0c4b7c09f8ddea58400c372bb98d26f8756907
[apple/xnu.git] / osfmk / i386 / i386_lock.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1989 Carnegie-Mellon University
36 * All rights reserved. The CMU software License Agreement specifies
37 * the terms and conditions for use and redistribution.
38 */
39
40 #include <mach_rt.h>
41 #include <platforms.h>
42 #include <mach_ldebug.h>
43 #include <i386/asm.h>
44
45 #include "assym.s"
46
47 #define PAUSE rep; nop
48
49 /*
50 * When performance isn't the only concern, it's
51 * nice to build stack frames...
52 */
53 #define BUILD_STACK_FRAMES (GPROF || \
54 ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB))
55
56 #if BUILD_STACK_FRAMES
57
58 /* STack-frame-relative: */
59 #define L_PC B_PC
60 #define L_ARG0 B_ARG0
61 #define L_ARG1 B_ARG1
62
63 #define LEAF_ENTRY(name) \
64 Entry(name); \
65 FRAME; \
66 MCOUNT
67
68 #define LEAF_ENTRY2(n1,n2) \
69 Entry(n1); \
70 Entry(n2); \
71 FRAME; \
72 MCOUNT
73
74 #define LEAF_RET \
75 EMARF; \
76 ret
77
78 #else /* BUILD_STACK_FRAMES */
79
80 /* Stack-pointer-relative: */
81 #define L_PC S_PC
82 #define L_ARG0 S_ARG0
83 #define L_ARG1 S_ARG1
84
85 #define LEAF_ENTRY(name) \
86 Entry(name)
87
88 #define LEAF_ENTRY2(n1,n2) \
89 Entry(n1); \
90 Entry(n2)
91
92 #define LEAF_RET \
93 ret
94
95 #endif /* BUILD_STACK_FRAMES */
96
97
98 /* Non-leaf routines always have a stack frame: */
99
100 #define NONLEAF_ENTRY(name) \
101 Entry(name); \
102 FRAME; \
103 MCOUNT
104
105 #define NONLEAF_ENTRY2(n1,n2) \
106 Entry(n1); \
107 Entry(n2); \
108 FRAME; \
109 MCOUNT
110
111 #define NONLEAF_RET \
112 EMARF; \
113 ret
114
115
116 #define M_ILK (%edx)
117 #define M_LOCKED MUTEX_LOCKED(%edx)
118 #define M_WAITERS MUTEX_WAITERS(%edx)
119 #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx)
120 #define M_ITAG MUTEX_ITAG(%edx)
121 #define M_PTR MUTEX_PTR(%edx)
122 #if MACH_LDEBUG
123 #define M_TYPE MUTEX_TYPE(%edx)
124 #define M_PC MUTEX_PC(%edx)
125 #define M_THREAD MUTEX_THREAD(%edx)
126 #endif /* MACH_LDEBUG */
127
128 #include <i386/mp.h>
129 #define CX(addr,reg) addr(,reg,4)
130
131 #if MACH_LDEBUG
132 /*
133 * Routines for general lock debugging.
134 */
135 #define S_TYPE SLOCK_TYPE(%edx)
136 #define S_PC SLOCK_PC(%edx)
137 #define S_THREAD SLOCK_THREAD(%edx)
138 #define S_DURATIONH SLOCK_DURATIONH(%edx)
139 #define S_DURATIONL SLOCK_DURATIONL(%edx)
140
141 /*
142 * Checks for expected lock types and calls "panic" on
143 * mismatch. Detects calls to Mutex functions with
144 * type simplelock and vice versa.
145 */
146 #define CHECK_MUTEX_TYPE() \
147 cmpl $ MUTEX_TAG,M_TYPE ; \
148 je 1f ; \
149 pushl $2f ; \
150 call EXT(panic) ; \
151 hlt ; \
152 .data ; \
153 2: String "not a mutex!" ; \
154 .text ; \
155 1:
156
157 #define CHECK_SIMPLE_LOCK_TYPE() \
158 cmpl $ USLOCK_TAG,S_TYPE ; \
159 je 1f ; \
160 pushl $2f ; \
161 call EXT(panic) ; \
162 hlt ; \
163 .data ; \
164 2: String "not a simple lock!" ; \
165 .text ; \
166 1:
167
168 /*
169 * If one or more simplelocks are currently held by a thread,
170 * an attempt to acquire a mutex will cause this check to fail
171 * (since a mutex lock may context switch, holding a simplelock
172 * is not a good thing).
173 */
174 #if MACH_RT
175 #define CHECK_PREEMPTION_LEVEL() \
176 cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \
177 je 1f ; \
178 pushl $2f ; \
179 call EXT(panic) ; \
180 hlt ; \
181 .data ; \
182 2: String "preemption_level != 0!" ; \
183 .text ; \
184 1:
185 #else /* MACH_RT */
186 #define CHECK_PREEMPTION_LEVEL()
187 #endif /* MACH_RT */
188
189 #define CHECK_NO_SIMPLELOCKS() \
190 cmpl $0,%gs:CPU_SIMPLE_LOCK_COUNT ; \
191 je 1f ; \
192 pushl $2f ; \
193 call EXT(panic) ; \
194 hlt ; \
195 .data ; \
196 2: String "simple_locks_held!" ; \
197 .text ; \
198 1:
199
200 /*
201 * Verifies return to the correct thread in "unlock" situations.
202 */
203 #define CHECK_THREAD(thd) \
204 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
205 testl %ecx,%ecx ; \
206 je 1f ; \
207 cmpl %ecx,thd ; \
208 je 1f ; \
209 pushl $2f ; \
210 call EXT(panic) ; \
211 hlt ; \
212 .data ; \
213 2: String "wrong thread!" ; \
214 .text ; \
215 1:
216
217 #define CHECK_MYLOCK(thd) \
218 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
219 testl %ecx,%ecx ; \
220 je 1f ; \
221 cmpl %ecx,thd ; \
222 jne 1f ; \
223 pushl $2f ; \
224 call EXT(panic) ; \
225 hlt ; \
226 .data ; \
227 2: String "mylock attempt!" ; \
228 .text ; \
229 1:
230
231 #define METER_SIMPLE_LOCK_LOCK(reg) \
232 pushl reg ; \
233 call EXT(meter_simple_lock) ; \
234 popl reg
235
236 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
237 pushl reg ; \
238 call EXT(meter_simple_unlock) ; \
239 popl reg
240
241 #else /* MACH_LDEBUG */
242 #define CHECK_MUTEX_TYPE()
243 #define CHECK_SIMPLE_LOCK_TYPE
244 #define CHECK_THREAD(thd)
245 #define CHECK_PREEMPTION_LEVEL()
246 #define CHECK_NO_SIMPLELOCKS()
247 #define CHECK_MYLOCK(thd)
248 #define METER_SIMPLE_LOCK_LOCK(reg)
249 #define METER_SIMPLE_LOCK_UNLOCK(reg)
250 #endif /* MACH_LDEBUG */
251
252
253 /*
254 * void hw_lock_init(hw_lock_t)
255 *
256 * Initialize a hardware lock.
257 */
258 LEAF_ENTRY(hw_lock_init)
259 movl L_ARG0,%edx /* fetch lock pointer */
260 movl $0,0(%edx) /* clear the lock */
261 LEAF_RET
262
263 /*
264 * void hw_lock_lock(hw_lock_t)
265 *
266 * Acquire lock, spinning until it becomes available.
267 * MACH_RT: also return with preemption disabled.
268 */
269 LEAF_ENTRY(hw_lock_lock)
270 movl L_ARG0,%edx /* fetch lock pointer */
271
272 movl L_PC,%ecx
273 1: DISABLE_PREEMPTION
274 movl 0(%edx), %eax
275 testl %eax,%eax /* lock locked? */
276 jne 3f /* branch if so */
277 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
278 jne 3f
279 movl $1,%eax /* In case this was a timeout call */
280 LEAF_RET /* if yes, then nothing left to do */
281
282 3: ENABLE_PREEMPTION /* no reason we can't be preemptable */
283 PAUSE /* pause for hyper-threading */
284 jmp 1b /* try again */
285
286 /*
287 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
288 *
289 * Acquire lock, spinning until it becomes available or timeout.
290 * MACH_RT: also return with preemption disabled.
291 */
292 LEAF_ENTRY(hw_lock_to)
293 1:
294 movl L_ARG0,%edx /* fetch lock pointer */
295 movl L_PC,%ecx
296 /*
297 * Attempt to grab the lock immediately
298 * - fastpath without timeout nonsense.
299 */
300 DISABLE_PREEMPTION
301 movl 0(%edx), %eax
302 testl %eax,%eax /* lock locked? */
303 jne 2f /* branch if so */
304 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
305 jne 2f /* branch on failure */
306 movl $1,%eax
307 LEAF_RET
308
309 2:
310 #define INNER_LOOP_COUNT 1000
311 /*
312 * Failed to get the lock so set the timeout
313 * and then spin re-checking the lock but pausing
314 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
315 */
316 movl L_ARG1,%ecx /* fetch timeout */
317 push %edi
318 push %ebx
319 mov %edx,%edi
320
321 rdtsc /* read cyclecount into %edx:%eax */
322 addl %ecx,%eax /* fetch and timeout */
323 adcl $0,%edx /* add carry */
324 mov %edx,%ecx
325 mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
326 3:
327 ENABLE_PREEMPTION /* no reason not to be preempted now */
328 4:
329 /*
330 * The inner-loop spin to look for the lock being freed.
331 */
332 mov $(INNER_LOOP_COUNT),%edx
333 5:
334 PAUSE /* pause for hyper-threading */
335 movl 0(%edi),%eax /* spin checking lock value in cache */
336 testl %eax,%eax
337 je 6f /* zero => unlocked, try to grab it */
338 decl %edx /* decrement inner loop count */
339 jnz 5b /* time to check for timeout? */
340
341 /*
342 * Here after spinning INNER_LOOP_COUNT times, check for timeout
343 */
344 rdtsc /* cyclecount into %edx:%eax */
345 cmpl %ecx,%edx /* compare high-order 32-bits */
346 jb 4b /* continue spinning if less, or */
347 cmpl %ebx,%eax /* compare low-order 32-bits */
348 jb 5b /* continue if less, else bail */
349 xor %eax,%eax /* with 0 return value */
350 pop %ebx
351 pop %edi
352 LEAF_RET
353
354 6:
355 /*
356 * Here to try to grab the lock that now appears to be free
357 * after contention.
358 */
359 movl 8+L_PC,%edx /* calling pc (8+ for pushed regs) */
360 DISABLE_PREEMPTION
361 lock; cmpxchgl %edx,0(%edi) /* try to acquire the HW lock */
362 jne 3b /* no - spin again */
363 movl $1,%eax /* yes */
364 pop %ebx
365 pop %edi
366 LEAF_RET
367
368 /*
369 * void hw_lock_unlock(hw_lock_t)
370 *
371 * Unconditionally release lock.
372 * MACH_RT: release preemption level.
373 */
374 LEAF_ENTRY(hw_lock_unlock)
375 movl L_ARG0,%edx /* fetch lock pointer */
376 movl $0,0(%edx) /* clear the lock */
377 ENABLE_PREEMPTION
378 LEAF_RET
379
380 /*
381 * unsigned int hw_lock_try(hw_lock_t)
382 * MACH_RT: returns with preemption disabled on success.
383 */
384 LEAF_ENTRY(hw_lock_try)
385 movl L_ARG0,%edx /* fetch lock pointer */
386
387 movl L_PC,%ecx
388 DISABLE_PREEMPTION
389 movl 0(%edx),%eax
390 testl %eax,%eax
391 jne 1f
392 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
393 jne 1f
394
395 movl $1,%eax /* success */
396 LEAF_RET
397
398 1: ENABLE_PREEMPTION /* failure: release preemption... */
399 xorl %eax,%eax /* ...and return failure */
400 LEAF_RET
401
402 /*
403 * unsigned int hw_lock_held(hw_lock_t)
404 * MACH_RT: doesn't change preemption state.
405 * N.B. Racy, of course.
406 */
407 LEAF_ENTRY(hw_lock_held)
408 movl L_ARG0,%edx /* fetch lock pointer */
409
410 movl 0(%edx),%eax /* check lock value */
411 testl %eax,%eax
412 movl $1,%ecx
413 cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */
414 LEAF_RET
415
416 LEAF_ENTRY(mutex_init)
417 movl L_ARG0,%edx /* fetch lock pointer */
418 xorl %eax,%eax
419 movl %eax,M_ILK /* clear interlock */
420 movl %eax,M_LOCKED /* clear locked flag */
421 movw %ax,M_WAITERS /* init waiter count */
422 movw %ax,M_PROMOTED_PRI
423
424 #if MACH_LDEBUG
425 movl $ MUTEX_TAG,M_TYPE /* set lock type */
426 movl %eax,M_PC /* init caller pc */
427 movl %eax,M_THREAD /* and owning thread */
428 #endif
429
430 LEAF_RET
431
432 NONLEAF_ENTRY2(mutex_lock,_mutex_lock)
433
434 movl B_ARG0,%edx /* fetch lock pointer */
435
436 CHECK_MUTEX_TYPE()
437 CHECK_NO_SIMPLELOCKS()
438 CHECK_PREEMPTION_LEVEL()
439
440 pushf /* save interrupt state */
441 cli /* disable interrupts */
442
443 ml_retry:
444 movl B_PC,%ecx
445
446 ml_get_hw:
447 movl M_ILK,%eax /* read interlock */
448 testl %eax,%eax /* unlocked? */
449 je 1f /* yes - attempt to lock it */
450 PAUSE /* no - pause */
451 jmp ml_get_hw /* try again */
452 1:
453 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
454 jne ml_get_hw /* branch on failure to retry */
455
456 movl M_LOCKED,%ecx /* get lock owner */
457 testl %ecx,%ecx /* is the mutex locked? */
458 jne ml_fail /* yes, we lose */
459 movl %gs:CPU_ACTIVE_THREAD,%ecx
460 movl %ecx,M_LOCKED
461
462 #if MACH_LDEBUG
463 movl %ecx,M_THREAD
464 movl B_PC,%ecx
465 movl %ecx,M_PC
466 #endif
467
468 pushl %edx /* save mutex address */
469 pushl %edx
470 call EXT(lck_mtx_lock_acquire)
471 addl $4,%esp
472 popl %edx /* restore mutex address */
473
474 xorl %eax,%eax
475 movl %eax,M_ILK
476
477 popf /* restore interrupt state */
478
479 NONLEAF_RET
480
481 ml_fail:
482 ml_block:
483 CHECK_MYLOCK(M_THREAD)
484 pushl M_LOCKED
485 pushl %edx /* push mutex address */
486 call EXT(lck_mtx_lock_wait) /* wait for the lock */
487 addl $8,%esp
488 movl B_ARG0,%edx /* refetch mutex address */
489 jmp ml_retry /* and try again */
490
491 NONLEAF_ENTRY2(mutex_try,_mutex_try)
492
493 movl B_ARG0,%edx /* fetch lock pointer */
494
495 CHECK_MUTEX_TYPE()
496 CHECK_NO_SIMPLELOCKS()
497
498 movl B_PC,%ecx
499
500 pushf /* save interrupt state */
501 cli /* disable interrupts */
502
503 mt_get_hw:
504 movl M_ILK,%eax /* read interlock */
505 testl %eax,%eax /* unlocked? */
506 je 1f /* yes - attempt to lock it */
507 PAUSE /* no - pause */
508 jmp mt_get_hw /* try again */
509 1:
510 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
511 jne mt_get_hw /* branch on failure to retry */
512
513 movl M_LOCKED,%ecx /* get lock owner */
514 testl %ecx,%ecx /* is the mutex locked? */
515 jne mt_fail /* yes, we lose */
516 movl %gs:CPU_ACTIVE_THREAD,%ecx
517 movl %ecx,M_LOCKED
518
519 #if MACH_LDEBUG
520 movl %ecx,M_THREAD
521 movl B_PC,%ecx
522 movl %ecx,M_PC
523 #endif
524
525 pushl %edx /* save mutex address */
526 pushl %edx
527 call EXT(lck_mtx_lock_acquire)
528 addl $4,%esp
529 popl %edx /* restore mutex address */
530
531 xorl %eax,%eax
532 movl %eax,M_ILK
533
534 popf /* restore interrupt state */
535
536 movl $1,%eax
537
538 NONLEAF_RET
539
540 mt_fail:
541 xorl %eax,%eax
542 movl %eax,M_ILK
543
544 popf /* restore interrupt state */
545
546 xorl %eax,%eax
547
548 NONLEAF_RET
549
550 NONLEAF_ENTRY(mutex_unlock)
551 movl B_ARG0,%edx /* fetch lock pointer */
552
553 CHECK_MUTEX_TYPE()
554 CHECK_THREAD(M_THREAD)
555
556 movl B_PC,%ecx
557
558 pushf /* save interrupt state */
559 cli /* disable interrupts */
560
561 mu_get_hw:
562 movl M_ILK,%eax /* read interlock */
563 testl %eax,%eax /* unlocked? */
564 je 1f /* yes - attempt to lock it */
565 PAUSE /* no - pause */
566 jmp mu_get_hw /* try again */
567 1:
568 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
569 jne mu_get_hw /* branch on failure to retry */
570
571 cmpw $0,M_WAITERS /* are there any waiters? */
572 jne mu_wakeup /* yes, more work to do */
573
574 mu_doit:
575
576 #if MACH_LDEBUG
577 movl $0,M_THREAD /* disown thread */
578 #endif
579
580 xorl %ecx,%ecx
581 movl %ecx,M_LOCKED /* unlock the mutex */
582
583 movl %ecx,M_ILK
584
585 popf /* restore interrupt state */
586
587 NONLEAF_RET
588
589 mu_wakeup:
590 pushl M_LOCKED
591 pushl %edx /* push mutex address */
592 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
593 addl $8,%esp
594 movl B_ARG0,%edx /* restore lock pointer */
595 jmp mu_doit
596
597 /*
598 * lck_mtx_lock()
599 * lck_mtx_try_lock()
600 * lck_mutex_unlock()
601 *
602 * These are variants of mutex_lock(), mutex_try() and mutex_unlock() without
603 * DEBUG checks (which require fields not present in lck_mtx_t's).
604 */
605 NONLEAF_ENTRY(lck_mtx_lock)
606
607 movl B_ARG0,%edx /* fetch lock pointer */
608 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
609 cmove M_PTR,%edx /* yes - take indirection */
610
611 CHECK_NO_SIMPLELOCKS()
612 CHECK_PREEMPTION_LEVEL()
613
614 pushf /* save interrupt state */
615 cli /* disable interrupts */
616
617 lml_retry:
618 movl B_PC,%ecx
619
620 lml_get_hw:
621 movl M_ILK,%eax /* read interlock */
622 testl %eax,%eax /* unlocked? */
623 je 1f /* yes - attempt to lock it */
624 PAUSE /* no - pause */
625 jmp lml_get_hw /* try again */
626 1:
627 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
628 jne lml_get_hw /* branch on failure to retry */
629
630 movl M_LOCKED,%ecx /* get lock owner */
631 testl %ecx,%ecx /* is the mutex locked? */
632 jne lml_fail /* yes, we lose */
633 movl %gs:CPU_ACTIVE_THREAD,%ecx
634 movl %ecx,M_LOCKED
635
636 pushl %edx /* save mutex address */
637 pushl %edx
638 call EXT(lck_mtx_lock_acquire)
639 addl $4,%esp
640 popl %edx /* restore mutex address */
641
642 xorl %eax,%eax
643 movl %eax,M_ILK
644
645 popf /* restore interrupt state */
646
647 NONLEAF_RET
648
649 lml_fail:
650 CHECK_MYLOCK(M_THREAD)
651 pushl %edx /* save mutex address */
652 pushl M_LOCKED
653 pushl %edx /* push mutex address */
654 call EXT(lck_mtx_lock_wait) /* wait for the lock */
655 addl $8,%esp
656 popl %edx /* restore mutex address */
657 jmp lml_retry /* and try again */
658
659 NONLEAF_ENTRY(lck_mtx_try_lock)
660
661 movl B_ARG0,%edx /* fetch lock pointer */
662 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
663 cmove M_PTR,%edx /* yes - take indirection */
664
665 CHECK_NO_SIMPLELOCKS()
666 CHECK_PREEMPTION_LEVEL()
667
668 movl B_PC,%ecx
669
670 pushf /* save interrupt state */
671 cli /* disable interrupts */
672
673 lmt_get_hw:
674 movl M_ILK,%eax /* read interlock */
675 testl %eax,%eax /* unlocked? */
676 je 1f /* yes - attempt to lock it */
677 PAUSE /* no - pause */
678 jmp lmt_get_hw /* try again */
679 1:
680 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
681 jne lmt_get_hw /* branch on failure to retry */
682
683 movl M_LOCKED,%ecx /* get lock owner */
684 testl %ecx,%ecx /* is the mutex locked? */
685 jne lmt_fail /* yes, we lose */
686 movl %gs:CPU_ACTIVE_THREAD,%ecx
687 movl %ecx,M_LOCKED
688
689 pushl %edx /* save mutex address */
690 pushl %edx
691 call EXT(lck_mtx_lock_acquire)
692 addl $4,%esp
693 popl %edx /* restore mutex address */
694
695 xorl %eax,%eax
696 movl %eax,M_ILK
697
698 popf /* restore interrupt state */
699
700 movl $1,%eax /* return success */
701 NONLEAF_RET
702
703 lmt_fail:
704 xorl %eax,%eax
705 movl %eax,M_ILK
706
707 popf /* restore interrupt state */
708
709 xorl %eax,%eax /* return failure */
710 NONLEAF_RET
711
712 NONLEAF_ENTRY(lck_mtx_unlock)
713
714 movl B_ARG0,%edx /* fetch lock pointer */
715 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
716 cmove M_PTR,%edx /* yes - take indirection */
717
718 movl B_PC,%ecx
719
720 pushf /* save interrupt state */
721 cli /* disable interrupts */
722
723 lmu_get_hw:
724 movl M_ILK,%eax /* read interlock */
725 testl %eax,%eax /* unlocked? */
726 je 1f /* yes - attempt to lock it */
727 PAUSE /* no - pause */
728 jmp lmu_get_hw /* try again */
729 1:
730 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
731 jne lmu_get_hw /* branch on failure to retry */
732
733 cmpw $0,M_WAITERS /* are there any waiters? */
734 jne lmu_wakeup /* yes, more work to do */
735
736 lmu_doit:
737 xorl %ecx,%ecx
738 movl %ecx,M_LOCKED /* unlock the mutex */
739
740 movl %ecx,M_ILK
741
742 popf /* restore interrupt state */
743
744 NONLEAF_RET
745
746 lmu_wakeup:
747 pushl %edx /* save mutex address */
748 pushl M_LOCKED
749 pushl %edx /* push mutex address */
750 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
751 addl $8,%esp
752 popl %edx /* restore mutex pointer */
753 jmp lmu_doit
754
755 LEAF_ENTRY(lck_mtx_ilk_unlock)
756 movl L_ARG0,%edx /* no indirection here */
757
758 xorl %eax,%eax
759 movl %eax,M_ILK
760
761 LEAF_RET
762
763 LEAF_ENTRY(_disable_preemption)
764 #if MACH_RT
765 _DISABLE_PREEMPTION
766 #endif /* MACH_RT */
767 LEAF_RET
768
769 LEAF_ENTRY(_enable_preemption)
770 #if MACH_RT
771 #if MACH_ASSERT
772 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
773 jg 1f
774 pushl %gs:CPU_PREEMPTION_LEVEL
775 pushl $2f
776 call EXT(panic)
777 hlt
778 .data
779 2: String "_enable_preemption: preemption_level(%d) < 0!"
780 .text
781 1:
782 #endif /* MACH_ASSERT */
783 _ENABLE_PREEMPTION
784 #endif /* MACH_RT */
785 LEAF_RET
786
787 LEAF_ENTRY(_enable_preemption_no_check)
788 #if MACH_RT
789 #if MACH_ASSERT
790 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
791 jg 1f
792 pushl $2f
793 call EXT(panic)
794 hlt
795 .data
796 2: String "_enable_preemption_no_check: preemption_level <= 0!"
797 .text
798 1:
799 #endif /* MACH_ASSERT */
800 _ENABLE_PREEMPTION_NO_CHECK
801 #endif /* MACH_RT */
802 LEAF_RET
803
804
805 LEAF_ENTRY(_mp_disable_preemption)
806 #if MACH_RT
807 _DISABLE_PREEMPTION
808 #endif /* MACH_RT */
809 LEAF_RET
810
811 LEAF_ENTRY(_mp_enable_preemption)
812 #if MACH_RT
813 #if MACH_ASSERT
814 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
815 jg 1f
816 pushl %gs:CPU_PREEMPTION_LEVEL
817 pushl $2f
818 call EXT(panic)
819 hlt
820 .data
821 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
822 .text
823 1:
824 #endif /* MACH_ASSERT */
825 _ENABLE_PREEMPTION
826 #endif /* MACH_RT */
827 LEAF_RET
828
829 LEAF_ENTRY(_mp_enable_preemption_no_check)
830 #if MACH_RT
831 #if MACH_ASSERT
832 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
833 jg 1f
834 pushl $2f
835 call EXT(panic)
836 hlt
837 .data
838 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
839 .text
840 1:
841 #endif /* MACH_ASSERT */
842 _ENABLE_PREEMPTION_NO_CHECK
843 #endif /* MACH_RT */
844 LEAF_RET
845
846
847 LEAF_ENTRY(i_bit_set)
848 movl L_ARG0,%edx
849 movl L_ARG1,%eax
850 lock
851 bts %edx,(%eax)
852 LEAF_RET
853
854 LEAF_ENTRY(i_bit_clear)
855 movl L_ARG0,%edx
856 movl L_ARG1,%eax
857 lock
858 btr %edx,(%eax)
859 LEAF_RET
860
861 LEAF_ENTRY(bit_lock)
862 movl L_ARG0,%ecx
863 movl L_ARG1,%eax
864 1:
865 lock
866 bts %ecx,(%eax)
867 jb 1b
868 LEAF_RET
869
870 LEAF_ENTRY(bit_lock_try)
871 movl L_ARG0,%ecx
872 movl L_ARG1,%eax
873 lock
874 bts %ecx,(%eax)
875 jb bit_lock_failed
876 LEAF_RET /* %eax better not be null ! */
877 bit_lock_failed:
878 xorl %eax,%eax
879 LEAF_RET
880
881 LEAF_ENTRY(bit_unlock)
882 movl L_ARG0,%ecx
883 movl L_ARG1,%eax
884 lock
885 btr %ecx,(%eax)
886 LEAF_RET