]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_lock.s
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_lock.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1989 Carnegie-Mellon University
29 * All rights reserved. The CMU software License Agreement specifies
30 * the terms and conditions for use and redistribution.
31 */
32
33 #include <mach_rt.h>
34 #include <platforms.h>
35 #include <mach_ldebug.h>
36 #include <i386/asm.h>
37
38 #include "assym.s"
39
40 #define PAUSE rep; nop
41
42 /*
43 * When performance isn't the only concern, it's
44 * nice to build stack frames...
45 */
46 #define BUILD_STACK_FRAMES (GPROF || \
47 ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB))
48
49 #if BUILD_STACK_FRAMES
50
51 /* STack-frame-relative: */
52 #define L_PC B_PC
53 #define L_ARG0 B_ARG0
54 #define L_ARG1 B_ARG1
55
56 #define LEAF_ENTRY(name) \
57 Entry(name); \
58 FRAME; \
59 MCOUNT
60
61 #define LEAF_ENTRY2(n1,n2) \
62 Entry(n1); \
63 Entry(n2); \
64 FRAME; \
65 MCOUNT
66
67 #define LEAF_RET \
68 EMARF; \
69 ret
70
71 #else /* BUILD_STACK_FRAMES */
72
73 /* Stack-pointer-relative: */
74 #define L_PC S_PC
75 #define L_ARG0 S_ARG0
76 #define L_ARG1 S_ARG1
77
78 #define LEAF_ENTRY(name) \
79 Entry(name)
80
81 #define LEAF_ENTRY2(n1,n2) \
82 Entry(n1); \
83 Entry(n2)
84
85 #define LEAF_RET \
86 ret
87
88 #endif /* BUILD_STACK_FRAMES */
89
90
91 /* Non-leaf routines always have a stack frame: */
92
93 #define NONLEAF_ENTRY(name) \
94 Entry(name); \
95 FRAME; \
96 MCOUNT
97
98 #define NONLEAF_ENTRY2(n1,n2) \
99 Entry(n1); \
100 Entry(n2); \
101 FRAME; \
102 MCOUNT
103
104 #define NONLEAF_RET \
105 EMARF; \
106 ret
107
108
109 #define M_ILK (%edx)
110 #define M_LOCKED MUTEX_LOCKED(%edx)
111 #define M_WAITERS MUTEX_WAITERS(%edx)
112 #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx)
113 #define M_ITAG MUTEX_ITAG(%edx)
114 #define M_PTR MUTEX_PTR(%edx)
115 #if MACH_LDEBUG
116 #define M_TYPE MUTEX_TYPE(%edx)
117 #define M_PC MUTEX_PC(%edx)
118 #define M_THREAD MUTEX_THREAD(%edx)
119 #endif /* MACH_LDEBUG */
120
121 #include <i386/mp.h>
122 #define CX(addr,reg) addr(,reg,4)
123
124 #if MACH_LDEBUG
125 /*
126 * Routines for general lock debugging.
127 */
128 #define S_TYPE SLOCK_TYPE(%edx)
129 #define S_PC SLOCK_PC(%edx)
130 #define S_THREAD SLOCK_THREAD(%edx)
131 #define S_DURATIONH SLOCK_DURATIONH(%edx)
132 #define S_DURATIONL SLOCK_DURATIONL(%edx)
133
134 /*
135 * Checks for expected lock types and calls "panic" on
136 * mismatch. Detects calls to Mutex functions with
137 * type simplelock and vice versa.
138 */
139 #define CHECK_MUTEX_TYPE() \
140 cmpl $ MUTEX_TAG,M_TYPE ; \
141 je 1f ; \
142 pushl $2f ; \
143 call EXT(panic) ; \
144 hlt ; \
145 .data ; \
146 2: String "not a mutex!" ; \
147 .text ; \
148 1:
149
150 #define CHECK_SIMPLE_LOCK_TYPE() \
151 cmpl $ USLOCK_TAG,S_TYPE ; \
152 je 1f ; \
153 pushl $2f ; \
154 call EXT(panic) ; \
155 hlt ; \
156 .data ; \
157 2: String "not a simple lock!" ; \
158 .text ; \
159 1:
160
161 /*
162 * If one or more simplelocks are currently held by a thread,
163 * an attempt to acquire a mutex will cause this check to fail
164 * (since a mutex lock may context switch, holding a simplelock
165 * is not a good thing).
166 */
167 #if MACH_RT
168 #define CHECK_PREEMPTION_LEVEL() \
169 cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \
170 je 1f ; \
171 pushl $2f ; \
172 call EXT(panic) ; \
173 hlt ; \
174 .data ; \
175 2: String "preemption_level != 0!" ; \
176 .text ; \
177 1:
178 #else /* MACH_RT */
179 #define CHECK_PREEMPTION_LEVEL()
180 #endif /* MACH_RT */
181
182 #define CHECK_NO_SIMPLELOCKS() \
183 cmpl $0,%gs:CPU_SIMPLE_LOCK_COUNT ; \
184 je 1f ; \
185 pushl $2f ; \
186 call EXT(panic) ; \
187 hlt ; \
188 .data ; \
189 2: String "simple_locks_held!" ; \
190 .text ; \
191 1:
192
193 /*
194 * Verifies return to the correct thread in "unlock" situations.
195 */
196 #define CHECK_THREAD(thd) \
197 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
198 testl %ecx,%ecx ; \
199 je 1f ; \
200 cmpl %ecx,thd ; \
201 je 1f ; \
202 pushl $2f ; \
203 call EXT(panic) ; \
204 hlt ; \
205 .data ; \
206 2: String "wrong thread!" ; \
207 .text ; \
208 1:
209
210 #define CHECK_MYLOCK(thd) \
211 movl %gs:CPU_ACTIVE_THREAD,%ecx ; \
212 testl %ecx,%ecx ; \
213 je 1f ; \
214 cmpl %ecx,thd ; \
215 jne 1f ; \
216 pushl $2f ; \
217 call EXT(panic) ; \
218 hlt ; \
219 .data ; \
220 2: String "mylock attempt!" ; \
221 .text ; \
222 1:
223
224 #define METER_SIMPLE_LOCK_LOCK(reg) \
225 pushl reg ; \
226 call EXT(meter_simple_lock) ; \
227 popl reg
228
229 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
230 pushl reg ; \
231 call EXT(meter_simple_unlock) ; \
232 popl reg
233
234 #else /* MACH_LDEBUG */
235 #define CHECK_MUTEX_TYPE()
236 #define CHECK_SIMPLE_LOCK_TYPE
237 #define CHECK_THREAD(thd)
238 #define CHECK_PREEMPTION_LEVEL()
239 #define CHECK_NO_SIMPLELOCKS()
240 #define CHECK_MYLOCK(thd)
241 #define METER_SIMPLE_LOCK_LOCK(reg)
242 #define METER_SIMPLE_LOCK_UNLOCK(reg)
243 #endif /* MACH_LDEBUG */
244
245
246 /*
247 * void hw_lock_init(hw_lock_t)
248 *
249 * Initialize a hardware lock.
250 */
251 LEAF_ENTRY(hw_lock_init)
252 movl L_ARG0,%edx /* fetch lock pointer */
253 movl $0,0(%edx) /* clear the lock */
254 LEAF_RET
255
256 /*
257 * void hw_lock_lock(hw_lock_t)
258 *
259 * Acquire lock, spinning until it becomes available.
260 * MACH_RT: also return with preemption disabled.
261 */
262 LEAF_ENTRY(hw_lock_lock)
263 movl L_ARG0,%edx /* fetch lock pointer */
264
265 movl L_PC,%ecx
266 1: DISABLE_PREEMPTION
267 movl 0(%edx), %eax
268 testl %eax,%eax /* lock locked? */
269 jne 3f /* branch if so */
270 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
271 jne 3f
272 movl $1,%eax /* In case this was a timeout call */
273 LEAF_RET /* if yes, then nothing left to do */
274
275 3: ENABLE_PREEMPTION /* no reason we can't be preemptable */
276 PAUSE /* pause for hyper-threading */
277 jmp 1b /* try again */
278
279 /*
280 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
281 *
282 * Acquire lock, spinning until it becomes available or timeout.
283 * MACH_RT: also return with preemption disabled.
284 */
285 LEAF_ENTRY(hw_lock_to)
286 1:
287 movl L_ARG0,%edx /* fetch lock pointer */
288 movl L_PC,%ecx
289 /*
290 * Attempt to grab the lock immediately
291 * - fastpath without timeout nonsense.
292 */
293 DISABLE_PREEMPTION
294 movl 0(%edx), %eax
295 testl %eax,%eax /* lock locked? */
296 jne 2f /* branch if so */
297 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
298 jne 2f /* branch on failure */
299 movl $1,%eax
300 LEAF_RET
301
302 2:
303 #define INNER_LOOP_COUNT 1000
304 /*
305 * Failed to get the lock so set the timeout
306 * and then spin re-checking the lock but pausing
307 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
308 */
309 movl L_ARG1,%ecx /* fetch timeout */
310 push %edi
311 push %ebx
312 mov %edx,%edi
313
314 rdtsc /* read cyclecount into %edx:%eax */
315 addl %ecx,%eax /* fetch and timeout */
316 adcl $0,%edx /* add carry */
317 mov %edx,%ecx
318 mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
319 3:
320 ENABLE_PREEMPTION /* no reason not to be preempted now */
321 4:
322 /*
323 * The inner-loop spin to look for the lock being freed.
324 */
325 mov $(INNER_LOOP_COUNT),%edx
326 5:
327 PAUSE /* pause for hyper-threading */
328 movl 0(%edi),%eax /* spin checking lock value in cache */
329 testl %eax,%eax
330 je 6f /* zero => unlocked, try to grab it */
331 decl %edx /* decrement inner loop count */
332 jnz 5b /* time to check for timeout? */
333
334 /*
335 * Here after spinning INNER_LOOP_COUNT times, check for timeout
336 */
337 rdtsc /* cyclecount into %edx:%eax */
338 cmpl %ecx,%edx /* compare high-order 32-bits */
339 jb 4b /* continue spinning if less, or */
340 cmpl %ebx,%eax /* compare low-order 32-bits */
341 jb 5b /* continue if less, else bail */
342 xor %eax,%eax /* with 0 return value */
343 pop %ebx
344 pop %edi
345 LEAF_RET
346
347 6:
348 /*
349 * Here to try to grab the lock that now appears to be free
350 * after contention.
351 */
352 movl 8+L_PC,%edx /* calling pc (8+ for pushed regs) */
353 DISABLE_PREEMPTION
354 lock; cmpxchgl %edx,0(%edi) /* try to acquire the HW lock */
355 jne 3b /* no - spin again */
356 movl $1,%eax /* yes */
357 pop %ebx
358 pop %edi
359 LEAF_RET
360
361 /*
362 * void hw_lock_unlock(hw_lock_t)
363 *
364 * Unconditionally release lock.
365 * MACH_RT: release preemption level.
366 */
367 LEAF_ENTRY(hw_lock_unlock)
368 movl L_ARG0,%edx /* fetch lock pointer */
369 movl $0,0(%edx) /* clear the lock */
370 ENABLE_PREEMPTION
371 LEAF_RET
372
373 /*
374 * unsigned int hw_lock_try(hw_lock_t)
375 * MACH_RT: returns with preemption disabled on success.
376 */
377 LEAF_ENTRY(hw_lock_try)
378 movl L_ARG0,%edx /* fetch lock pointer */
379
380 movl L_PC,%ecx
381 DISABLE_PREEMPTION
382 movl 0(%edx),%eax
383 testl %eax,%eax
384 jne 1f
385 lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
386 jne 1f
387
388 movl $1,%eax /* success */
389 LEAF_RET
390
391 1: ENABLE_PREEMPTION /* failure: release preemption... */
392 xorl %eax,%eax /* ...and return failure */
393 LEAF_RET
394
395 /*
396 * unsigned int hw_lock_held(hw_lock_t)
397 * MACH_RT: doesn't change preemption state.
398 * N.B. Racy, of course.
399 */
400 LEAF_ENTRY(hw_lock_held)
401 movl L_ARG0,%edx /* fetch lock pointer */
402
403 movl 0(%edx),%eax /* check lock value */
404 testl %eax,%eax
405 movl $1,%ecx
406 cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */
407 LEAF_RET
408
409 LEAF_ENTRY(mutex_init)
410 movl L_ARG0,%edx /* fetch lock pointer */
411 xorl %eax,%eax
412 movl %eax,M_ILK /* clear interlock */
413 movl %eax,M_LOCKED /* clear locked flag */
414 movw %ax,M_WAITERS /* init waiter count */
415 movw %ax,M_PROMOTED_PRI
416
417 #if MACH_LDEBUG
418 movl $ MUTEX_TAG,M_TYPE /* set lock type */
419 movl %eax,M_PC /* init caller pc */
420 movl %eax,M_THREAD /* and owning thread */
421 #endif
422
423 LEAF_RET
424
425 NONLEAF_ENTRY2(mutex_lock,_mutex_lock)
426
427 movl B_ARG0,%edx /* fetch lock pointer */
428
429 CHECK_MUTEX_TYPE()
430 CHECK_NO_SIMPLELOCKS()
431 CHECK_PREEMPTION_LEVEL()
432
433 pushf /* save interrupt state */
434 cli /* disable interrupts */
435
436 ml_retry:
437 movl B_PC,%ecx
438
439 ml_get_hw:
440 movl M_ILK,%eax /* read interlock */
441 testl %eax,%eax /* unlocked? */
442 je 1f /* yes - attempt to lock it */
443 PAUSE /* no - pause */
444 jmp ml_get_hw /* try again */
445 1:
446 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
447 jne ml_get_hw /* branch on failure to retry */
448
449 movl M_LOCKED,%ecx /* get lock owner */
450 testl %ecx,%ecx /* is the mutex locked? */
451 jne ml_fail /* yes, we lose */
452 movl %gs:CPU_ACTIVE_THREAD,%ecx
453 movl %ecx,M_LOCKED
454
455 #if MACH_LDEBUG
456 movl %ecx,M_THREAD
457 movl B_PC,%ecx
458 movl %ecx,M_PC
459 #endif
460
461 pushl %edx /* save mutex address */
462 pushl %edx
463 call EXT(lck_mtx_lock_acquire)
464 addl $4,%esp
465 popl %edx /* restore mutex address */
466
467 xorl %eax,%eax
468 movl %eax,M_ILK
469
470 popf /* restore interrupt state */
471
472 NONLEAF_RET
473
474 ml_fail:
475 ml_block:
476 CHECK_MYLOCK(M_THREAD)
477 pushl M_LOCKED
478 pushl %edx /* push mutex address */
479 call EXT(lck_mtx_lock_wait) /* wait for the lock */
480 addl $8,%esp
481 movl B_ARG0,%edx /* refetch mutex address */
482 jmp ml_retry /* and try again */
483
484 NONLEAF_ENTRY2(mutex_try,_mutex_try)
485
486 movl B_ARG0,%edx /* fetch lock pointer */
487
488 CHECK_MUTEX_TYPE()
489 CHECK_NO_SIMPLELOCKS()
490
491 movl B_PC,%ecx
492
493 pushf /* save interrupt state */
494 cli /* disable interrupts */
495
496 mt_get_hw:
497 movl M_ILK,%eax /* read interlock */
498 testl %eax,%eax /* unlocked? */
499 je 1f /* yes - attempt to lock it */
500 PAUSE /* no - pause */
501 jmp mt_get_hw /* try again */
502 1:
503 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
504 jne mt_get_hw /* branch on failure to retry */
505
506 movl M_LOCKED,%ecx /* get lock owner */
507 testl %ecx,%ecx /* is the mutex locked? */
508 jne mt_fail /* yes, we lose */
509 movl %gs:CPU_ACTIVE_THREAD,%ecx
510 movl %ecx,M_LOCKED
511
512 #if MACH_LDEBUG
513 movl %ecx,M_THREAD
514 movl B_PC,%ecx
515 movl %ecx,M_PC
516 #endif
517
518 pushl %edx /* save mutex address */
519 pushl %edx
520 call EXT(lck_mtx_lock_acquire)
521 addl $4,%esp
522 popl %edx /* restore mutex address */
523
524 xorl %eax,%eax
525 movl %eax,M_ILK
526
527 popf /* restore interrupt state */
528
529 movl $1,%eax
530
531 NONLEAF_RET
532
533 mt_fail:
534 xorl %eax,%eax
535 movl %eax,M_ILK
536
537 popf /* restore interrupt state */
538
539 xorl %eax,%eax
540
541 NONLEAF_RET
542
543 NONLEAF_ENTRY(mutex_unlock)
544 movl B_ARG0,%edx /* fetch lock pointer */
545
546 CHECK_MUTEX_TYPE()
547 CHECK_THREAD(M_THREAD)
548
549 movl B_PC,%ecx
550
551 pushf /* save interrupt state */
552 cli /* disable interrupts */
553
554 mu_get_hw:
555 movl M_ILK,%eax /* read interlock */
556 testl %eax,%eax /* unlocked? */
557 je 1f /* yes - attempt to lock it */
558 PAUSE /* no - pause */
559 jmp mu_get_hw /* try again */
560 1:
561 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
562 jne mu_get_hw /* branch on failure to retry */
563
564 cmpw $0,M_WAITERS /* are there any waiters? */
565 jne mu_wakeup /* yes, more work to do */
566
567 mu_doit:
568
569 #if MACH_LDEBUG
570 movl $0,M_THREAD /* disown thread */
571 #endif
572
573 xorl %ecx,%ecx
574 movl %ecx,M_LOCKED /* unlock the mutex */
575
576 movl %ecx,M_ILK
577
578 popf /* restore interrupt state */
579
580 NONLEAF_RET
581
582 mu_wakeup:
583 pushl M_LOCKED
584 pushl %edx /* push mutex address */
585 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
586 addl $8,%esp
587 movl B_ARG0,%edx /* restore lock pointer */
588 jmp mu_doit
589
590 /*
591 * lck_mtx_lock()
592 * lck_mtx_try_lock()
593 * lck_mutex_unlock()
594 *
595 * These are variants of mutex_lock(), mutex_try() and mutex_unlock() without
596 * DEBUG checks (which require fields not present in lck_mtx_t's).
597 */
598 NONLEAF_ENTRY(lck_mtx_lock)
599
600 movl B_ARG0,%edx /* fetch lock pointer */
601 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
602 cmove M_PTR,%edx /* yes - take indirection */
603
604 CHECK_NO_SIMPLELOCKS()
605 CHECK_PREEMPTION_LEVEL()
606
607 pushf /* save interrupt state */
608 cli /* disable interrupts */
609
610 lml_retry:
611 movl B_PC,%ecx
612
613 lml_get_hw:
614 movl M_ILK,%eax /* read interlock */
615 testl %eax,%eax /* unlocked? */
616 je 1f /* yes - attempt to lock it */
617 PAUSE /* no - pause */
618 jmp lml_get_hw /* try again */
619 1:
620 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
621 jne lml_get_hw /* branch on failure to retry */
622
623 movl M_LOCKED,%ecx /* get lock owner */
624 testl %ecx,%ecx /* is the mutex locked? */
625 jne lml_fail /* yes, we lose */
626 movl %gs:CPU_ACTIVE_THREAD,%ecx
627 movl %ecx,M_LOCKED
628
629 pushl %edx /* save mutex address */
630 pushl %edx
631 call EXT(lck_mtx_lock_acquire)
632 addl $4,%esp
633 popl %edx /* restore mutex address */
634
635 xorl %eax,%eax
636 movl %eax,M_ILK
637
638 popf /* restore interrupt state */
639
640 NONLEAF_RET
641
642 lml_fail:
643 CHECK_MYLOCK(M_THREAD)
644 pushl %edx /* save mutex address */
645 pushl M_LOCKED
646 pushl %edx /* push mutex address */
647 call EXT(lck_mtx_lock_wait) /* wait for the lock */
648 addl $8,%esp
649 popl %edx /* restore mutex address */
650 jmp lml_retry /* and try again */
651
652 NONLEAF_ENTRY(lck_mtx_try_lock)
653
654 movl B_ARG0,%edx /* fetch lock pointer */
655 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
656 cmove M_PTR,%edx /* yes - take indirection */
657
658 CHECK_NO_SIMPLELOCKS()
659 CHECK_PREEMPTION_LEVEL()
660
661 movl B_PC,%ecx
662
663 pushf /* save interrupt state */
664 cli /* disable interrupts */
665
666 lmt_get_hw:
667 movl M_ILK,%eax /* read interlock */
668 testl %eax,%eax /* unlocked? */
669 je 1f /* yes - attempt to lock it */
670 PAUSE /* no - pause */
671 jmp lmt_get_hw /* try again */
672 1:
673 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
674 jne lmt_get_hw /* branch on failure to retry */
675
676 movl M_LOCKED,%ecx /* get lock owner */
677 testl %ecx,%ecx /* is the mutex locked? */
678 jne lmt_fail /* yes, we lose */
679 movl %gs:CPU_ACTIVE_THREAD,%ecx
680 movl %ecx,M_LOCKED
681
682 pushl %edx /* save mutex address */
683 pushl %edx
684 call EXT(lck_mtx_lock_acquire)
685 addl $4,%esp
686 popl %edx /* restore mutex address */
687
688 xorl %eax,%eax
689 movl %eax,M_ILK
690
691 popf /* restore interrupt state */
692
693 movl $1,%eax /* return success */
694 NONLEAF_RET
695
696 lmt_fail:
697 xorl %eax,%eax
698 movl %eax,M_ILK
699
700 popf /* restore interrupt state */
701
702 xorl %eax,%eax /* return failure */
703 NONLEAF_RET
704
705 NONLEAF_ENTRY(lck_mtx_unlock)
706
707 movl B_ARG0,%edx /* fetch lock pointer */
708 cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
709 cmove M_PTR,%edx /* yes - take indirection */
710
711 movl B_PC,%ecx
712
713 pushf /* save interrupt state */
714 cli /* disable interrupts */
715
716 lmu_get_hw:
717 movl M_ILK,%eax /* read interlock */
718 testl %eax,%eax /* unlocked? */
719 je 1f /* yes - attempt to lock it */
720 PAUSE /* no - pause */
721 jmp lmu_get_hw /* try again */
722 1:
723 lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
724 jne lmu_get_hw /* branch on failure to retry */
725
726 cmpw $0,M_WAITERS /* are there any waiters? */
727 jne lmu_wakeup /* yes, more work to do */
728
729 lmu_doit:
730 xorl %ecx,%ecx
731 movl %ecx,M_LOCKED /* unlock the mutex */
732
733 movl %ecx,M_ILK
734
735 popf /* restore interrupt state */
736
737 NONLEAF_RET
738
739 lmu_wakeup:
740 pushl %edx /* save mutex address */
741 pushl M_LOCKED
742 pushl %edx /* push mutex address */
743 call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
744 addl $8,%esp
745 popl %edx /* restore mutex pointer */
746 jmp lmu_doit
747
748 LEAF_ENTRY(lck_mtx_ilk_unlock)
749 movl L_ARG0,%edx /* no indirection here */
750
751 xorl %eax,%eax
752 movl %eax,M_ILK
753
754 LEAF_RET
755
756 LEAF_ENTRY(_disable_preemption)
757 #if MACH_RT
758 _DISABLE_PREEMPTION
759 #endif /* MACH_RT */
760 LEAF_RET
761
762 LEAF_ENTRY(_enable_preemption)
763 #if MACH_RT
764 #if MACH_ASSERT
765 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
766 jg 1f
767 pushl %gs:CPU_PREEMPTION_LEVEL
768 pushl $2f
769 call EXT(panic)
770 hlt
771 .data
772 2: String "_enable_preemption: preemption_level(%d) < 0!"
773 .text
774 1:
775 #endif /* MACH_ASSERT */
776 _ENABLE_PREEMPTION
777 #endif /* MACH_RT */
778 LEAF_RET
779
780 LEAF_ENTRY(_enable_preemption_no_check)
781 #if MACH_RT
782 #if MACH_ASSERT
783 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
784 jg 1f
785 pushl $2f
786 call EXT(panic)
787 hlt
788 .data
789 2: String "_enable_preemption_no_check: preemption_level <= 0!"
790 .text
791 1:
792 #endif /* MACH_ASSERT */
793 _ENABLE_PREEMPTION_NO_CHECK
794 #endif /* MACH_RT */
795 LEAF_RET
796
797
798 LEAF_ENTRY(_mp_disable_preemption)
799 #if MACH_RT
800 _DISABLE_PREEMPTION
801 #endif /* MACH_RT */
802 LEAF_RET
803
804 LEAF_ENTRY(_mp_enable_preemption)
805 #if MACH_RT
806 #if MACH_ASSERT
807 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
808 jg 1f
809 pushl %gs:CPU_PREEMPTION_LEVEL
810 pushl $2f
811 call EXT(panic)
812 hlt
813 .data
814 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
815 .text
816 1:
817 #endif /* MACH_ASSERT */
818 _ENABLE_PREEMPTION
819 #endif /* MACH_RT */
820 LEAF_RET
821
822 LEAF_ENTRY(_mp_enable_preemption_no_check)
823 #if MACH_RT
824 #if MACH_ASSERT
825 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
826 jg 1f
827 pushl $2f
828 call EXT(panic)
829 hlt
830 .data
831 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
832 .text
833 1:
834 #endif /* MACH_ASSERT */
835 _ENABLE_PREEMPTION_NO_CHECK
836 #endif /* MACH_RT */
837 LEAF_RET
838
839
840 LEAF_ENTRY(i_bit_set)
841 movl L_ARG0,%edx
842 movl L_ARG1,%eax
843 lock
844 bts %dl,(%eax)
845 LEAF_RET
846
847 LEAF_ENTRY(i_bit_clear)
848 movl L_ARG0,%edx
849 movl L_ARG1,%eax
850 lock
851 btr %dl,(%eax)
852 LEAF_RET
853
854 LEAF_ENTRY(bit_lock)
855 movl L_ARG0,%ecx
856 movl L_ARG1,%eax
857 1:
858 lock
859 bts %ecx,(%eax)
860 jb 1b
861 LEAF_RET
862
863 LEAF_ENTRY(bit_lock_try)
864 movl L_ARG0,%ecx
865 movl L_ARG1,%eax
866 lock
867 bts %ecx,(%eax)
868 jb bit_lock_failed
869 LEAF_RET /* %eax better not be null ! */
870 bit_lock_failed:
871 xorl %eax,%eax
872 LEAF_RET
873
874 LEAF_ENTRY(bit_unlock)
875 movl L_ARG0,%ecx
876 movl L_ARG1,%eax
877 lock
878 btr %ecx,(%eax)
879 LEAF_RET