]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_lock.s
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_lock.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1989 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
33 */
34
35 #include <cpus.h>
36 #include <mach_rt.h>
37 #include <platforms.h>
38 #include <mach_ldebug.h>
39 #include <i386/asm.h>
40 #include <kern/etap_options.h>
41
42 #include "assym.s"
43
44 /*
45 * When performance isn't the only concern, it's
46 * nice to build stack frames...
47 */
48 #define BUILD_STACK_FRAMES ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)
49
50 #if BUILD_STACK_FRAMES
51
52 #define L_PC 4(%ebp)
53 #define L_ARG0 8(%ebp)
54 #define L_ARG1 12(%ebp)
55
56 #define SWT_HI -4(%ebp)
57 #define SWT_LO -8(%ebp)
58 #define MISSED -12(%ebp)
59
60 #else /* BUILD_STACK_FRAMES */
61
62 #undef FRAME
63 #undef EMARF
64 #define FRAME
65 #define EMARF
66 #define L_PC (%esp)
67 #define L_ARG0 4(%esp)
68 #define L_ARG1 8(%esp)
69
70 #endif /* BUILD_STACK_FRAMES */
71
72
73 #define M_ILK (%edx)
74 #define M_LOCKED 1(%edx)
75 #define M_WAITERS 2(%edx)
76 #define M_PROMOTED_PRI 4(%edx)
77 #if MACH_LDEBUG
78 #define M_TYPE 6(%edx)
79 #define M_PC 10(%edx)
80 #define M_THREAD 14(%edx)
81 #endif /* MACH_LDEBUG */
82
83 #include <i386/AT386/mp/mp.h>
84 #if (NCPUS > 1)
85 #define CX(addr,reg) addr(,reg,4)
86 #else
87 #define CPU_NUMBER(reg)
88 #define CX(addr,reg) addr
89 #endif /* (NCPUS > 1) */
90
91 #if MACH_LDEBUG
92 /*
93 * Routines for general lock debugging.
94 */
95 #define S_TYPE 4(%edx)
96 #define S_PC 8(%edx)
97 #define S_THREAD 12(%edx)
98 #define S_DURATIONH 16(%edx)
99 #define S_DURATIONL 20(%edx)
100
101 /*
102 * Checks for expected lock types and calls "panic" on
103 * mismatch. Detects calls to Mutex functions with
104 * type simplelock and vice versa.
105 */
106 #define CHECK_MUTEX_TYPE() \
107 cmpl $ MUTEX_TAG,M_TYPE ; \
108 je 1f ; \
109 pushl $2f ; \
110 call EXT(panic) ; \
111 hlt ; \
112 .data ; \
113 2: String "not a mutex!" ; \
114 .text ; \
115 1:
116
117 #define CHECK_SIMPLE_LOCK_TYPE() \
118 cmpl $ SIMPLE_LOCK_TAG,S_TYPE ; \
119 je 1f ; \
120 pushl $2f ; \
121 call EXT(panic) ; \
122 hlt ; \
123 .data ; \
124 2: String "not a simple lock!" ; \
125 .text ; \
126 1:
127
128 /*
129 * If one or more simplelocks are currently held by a thread,
130 * an attempt to acquire a mutex will cause this check to fail
131 * (since a mutex lock may context switch, holding a simplelock
132 * is not a good thing).
133 */
134 #if 0 /*MACH_RT - 11/12/99 - lion@apple.com disable check for now*/
135 #define CHECK_PREEMPTION_LEVEL() \
136 movl $ CPD_PREEMPTION_LEVEL,%eax ; \
137 cmpl $0,%gs:(%eax) ; \
138 je 1f ; \
139 pushl $2f ; \
140 call EXT(panic) ; \
141 hlt ; \
142 .data ; \
143 2: String "preemption_level != 0!" ; \
144 .text ; \
145 1:
146 #else /* MACH_RT */
147 #define CHECK_PREEMPTION_LEVEL()
148 #endif /* MACH_RT */
149
150 #define CHECK_NO_SIMPLELOCKS() \
151 movl $ CPD_SIMPLE_LOCK_COUNT,%eax ; \
152 cmpl $0,%gs:(%eax) ; \
153 je 1f ; \
154 pushl $2f ; \
155 call EXT(panic) ; \
156 hlt ; \
157 .data ; \
158 2: String "simple_locks_held!" ; \
159 .text ; \
160 1:
161
162 /*
163 * Verifies return to the correct thread in "unlock" situations.
164 */
165 #define CHECK_THREAD(thd) \
166 movl $ CPD_ACTIVE_THREAD,%eax ; \
167 movl %gs:(%eax),%ecx ; \
168 testl %ecx,%ecx ; \
169 je 1f ; \
170 cmpl %ecx,thd ; \
171 je 1f ; \
172 pushl $2f ; \
173 call EXT(panic) ; \
174 hlt ; \
175 .data ; \
176 2: String "wrong thread!" ; \
177 .text ; \
178 1:
179
180 #define CHECK_MYLOCK(thd) \
181 movl $ CPD_ACTIVE_THREAD,%eax ; \
182 movl %gs:(%eax),%ecx ; \
183 testl %ecx,%ecx ; \
184 je 1f ; \
185 cmpl %ecx,thd ; \
186 jne 1f ; \
187 pushl $2f ; \
188 call EXT(panic) ; \
189 hlt ; \
190 .data ; \
191 2: String "mylock attempt!" ; \
192 .text ; \
193 1:
194
195 #define METER_SIMPLE_LOCK_LOCK(reg) \
196 pushl reg ; \
197 call EXT(meter_simple_lock) ; \
198 popl reg
199
200 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
201 pushl reg ; \
202 call EXT(meter_simple_unlock) ; \
203 popl reg
204
205 #else /* MACH_LDEBUG */
206 #define CHECK_MUTEX_TYPE()
207 #define CHECK_SIMPLE_LOCK_TYPE
208 #define CHECK_THREAD(thd)
209 #define CHECK_PREEMPTION_LEVEL()
210 #define CHECK_NO_SIMPLELOCKS()
211 #define CHECK_MYLOCK(thd)
212 #define METER_SIMPLE_LOCK_LOCK(reg)
213 #define METER_SIMPLE_LOCK_UNLOCK(reg)
214 #endif /* MACH_LDEBUG */
215
216
217 /*
218 * void hw_lock_init(hw_lock_t)
219 *
220 * Initialize a hardware lock.
221 */
222 ENTRY(hw_lock_init)
223 FRAME
224 movl L_ARG0,%edx /* fetch lock pointer */
225 xorl %eax,%eax
226 movb %al,0(%edx) /* clear the lock */
227 EMARF
228 ret
229
230 /*
231 * void hw_lock_lock(hw_lock_t)
232 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
233 *
234 * Acquire lock, spinning until it becomes available.
235 * XXX: For now, we don't actually implement the timeout.
236 * MACH_RT: also return with preemption disabled.
237 */
238 ENTRY2(hw_lock_lock,hw_lock_to)
239 FRAME
240 movl L_ARG0,%edx /* fetch lock pointer */
241
242 1: DISABLE_PREEMPTION(%eax)
243 movb $1,%cl
244 xchgb 0(%edx),%cl /* try to acquire the HW lock */
245 testb %cl,%cl /* success? */
246 jne 3f
247 movl $1,%eax /* In case this was a timeout call */
248 EMARF /* if yes, then nothing left to do */
249 ret
250
251 3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */
252
253 movb $1,%cl
254 2: testb %cl,0(%edx) /* spin checking lock value in cache */
255 jne 2b /* non-zero means locked, keep spinning */
256 jmp 1b /* zero means unlocked, try to grab it */
257
258 /*
259 * void hw_lock_unlock(hw_lock_t)
260 *
261 * Unconditionally release lock.
262 * MACH_RT: release preemption level.
263 */
264 ENTRY(hw_lock_unlock)
265 FRAME
266 movl L_ARG0,%edx /* fetch lock pointer */
267 xorl %eax,%eax
268 xchgb 0(%edx),%al /* clear the lock... a mov instruction */
269 /* ...might be cheaper and less paranoid */
270 ENABLE_PREEMPTION(%eax)
271 EMARF
272 ret
273
274 /*
275 * unsigned int hw_lock_try(hw_lock_t)
276 * MACH_RT: returns with preemption disabled on success.
277 */
278 ENTRY(hw_lock_try)
279 FRAME
280 movl L_ARG0,%edx /* fetch lock pointer */
281
282 DISABLE_PREEMPTION(%eax)
283 movb $1,%cl
284 xchgb 0(%edx),%cl /* try to acquire the HW lock */
285 testb %cl,%cl /* success? */
286 jne 1f /* if yes, let the caller know */
287
288 movl $1,%eax /* success */
289 EMARF
290 ret
291
292 1: ENABLE_PREEMPTION(%eax) /* failure: release preemption... */
293 xorl %eax,%eax /* ...and return failure */
294 EMARF
295 ret
296
297 /*
298 * unsigned int hw_lock_held(hw_lock_t)
299 * MACH_RT: doesn't change preemption state.
300 * N.B. Racy, of course.
301 */
302 ENTRY(hw_lock_held)
303 FRAME
304 movl L_ARG0,%edx /* fetch lock pointer */
305
306 movb $1,%cl
307 testb %cl,0(%edx) /* check lock value */
308 jne 1f /* non-zero means locked */
309 xorl %eax,%eax /* tell caller: lock wasn't locked */
310 EMARF
311 ret
312
313 1: movl $1,%eax /* tell caller: lock was locked */
314 EMARF
315 ret
316
317
318
319 #if 0
320
321
322 ENTRY(_usimple_lock_init)
323 FRAME
324 movl L_ARG0,%edx /* fetch lock pointer */
325 xorl %eax,%eax
326 movb %al,USL_INTERLOCK(%edx) /* unlock the HW lock */
327 EMARF
328 ret
329
330 ENTRY(_simple_lock)
331 FRAME
332 movl L_ARG0,%edx /* fetch lock pointer */
333
334 CHECK_SIMPLE_LOCK_TYPE()
335
336 DISABLE_PREEMPTION(%eax)
337
338 sl_get_hw:
339 movb $1,%cl
340 xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */
341 testb %cl,%cl /* did we succeed? */
342
343 #if MACH_LDEBUG
344 je 5f
345 CHECK_MYLOCK(S_THREAD)
346 jmp sl_get_hw
347 5:
348 #else /* MACH_LDEBUG */
349 jne sl_get_hw /* no, try again */
350 #endif /* MACH_LDEBUG */
351
352 #if MACH_LDEBUG
353 movl L_PC,%ecx
354 movl %ecx,S_PC
355 movl $ CPD_ACTIVE_THREAD,%eax
356 movl %gs:(%eax),%ecx
357 movl %ecx,S_THREAD
358 incl CX(EXT(simple_lock_count),%eax)
359 #if 0
360 METER_SIMPLE_LOCK_LOCK(%edx)
361 #endif
362 #if NCPUS == 1
363 pushf
364 pushl %edx
365 cli
366 call EXT(lock_stack_push)
367 popl %edx
368 popfl
369 #endif /* NCPUS == 1 */
370 #endif /* MACH_LDEBUG */
371
372 EMARF
373 ret
374
375 ENTRY(_simple_lock_try)
376 FRAME
377 movl L_ARG0,%edx /* fetch lock pointer */
378
379 CHECK_SIMPLE_LOCK_TYPE()
380
381 DISABLE_PREEMPTION(%eax)
382
383 movb $1,%cl
384 xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */
385 testb %cl,%cl /* did we succeed? */
386 jne 1f /* no, return failure */
387
388 #if MACH_LDEBUG
389 movl L_PC,%ecx
390 movl %ecx,S_PC
391 movl $ CPD_ACTIVE_THREAD,%eax
392 movl %gs:(%eax),%ecx
393 movl %ecx,S_THREAD
394 incl CX(EXT(simple_lock_count),%eax)
395 #if 0
396 METER_SIMPLE_LOCK_LOCK(%edx)
397 #endif
398 #if NCPUS == 1
399 pushf
400 pushl %edx
401 cli
402 call EXT(lock_stack_push)
403 popl %edx
404 popfl
405 #endif /* NCPUS == 1 */
406 #endif /* MACH_LDEBUG */
407
408 movl $1,%eax /* return success */
409
410 EMARF
411 ret
412
413 1:
414 ENABLE_PREEMPTION(%eax)
415
416 xorl %eax,%eax /* and return failure */
417
418 EMARF
419 ret
420
421 ENTRY(_simple_unlock)
422 FRAME
423 movl L_ARG0,%edx /* fetch lock pointer */
424
425 CHECK_SIMPLE_LOCK_TYPE()
426 CHECK_THREAD(S_THREAD)
427
428 #if MACH_LDEBUG
429 xorl %eax,%eax
430 movl %eax,S_THREAD /* disown thread */
431 MP_DISABLE_PREEMPTION(%eax)
432 CPU_NUMBER(%eax)
433 decl CX(EXT(simple_lock_count),%eax)
434 MP_ENABLE_PREEMPTION(%eax)
435 #if 0
436 METER_SIMPLE_LOCK_UNLOCK(%edx)
437 #endif
438 #if NCPUS == 1
439 pushf
440 pushl %edx
441 cli
442 call EXT(lock_stack_pop)
443 popl %edx
444 popfl
445 #endif /* NCPUS == 1 */
446 #endif /* MACH_LDEBUG */
447
448 xorb %cl,%cl
449 xchgb USL_INTERLOCK(%edx),%cl /* unlock the HW lock */
450
451 ENABLE_PREEMPTION(%eax)
452
453 EMARF
454 ret
455
456 #endif /* 0 */
457
458
459 ENTRY(mutex_init)
460 FRAME
461 movl L_ARG0,%edx /* fetch lock pointer */
462 xorl %eax,%eax
463 movb %al,M_ILK /* clear interlock */
464 movb %al,M_LOCKED /* clear locked flag */
465 movw %ax,M_WAITERS /* init waiter count */
466 movw %ax,M_PROMOTED_PRI
467
468 #if MACH_LDEBUG
469 movl $ MUTEX_TAG,M_TYPE /* set lock type */
470 movl %eax,M_PC /* init caller pc */
471 movl %eax,M_THREAD /* and owning thread */
472 #endif
473 #if ETAP_LOCK_TRACE
474 movl L_ARG1,%ecx /* fetch event type */
475 pushl %ecx /* push event type */
476 pushl %edx /* push mutex address */
477 call EXT(etap_mutex_init) /* init ETAP data */
478 addl $8,%esp
479 #endif /* ETAP_LOCK_TRACE */
480
481 EMARF
482 ret
483
484 ENTRY2(mutex_lock,_mutex_lock)
485 FRAME
486
487 #if ETAP_LOCK_TRACE
488 subl $12,%esp /* make room for locals */
489 movl $0,SWT_HI /* set wait time to zero (HI) */
490 movl $0,SWT_LO /* set wait time to zero (LO) */
491 movl $0,MISSED /* clear local miss marker */
492 #endif /* ETAP_LOCK_TRACE */
493
494 movl L_ARG0,%edx /* fetch lock pointer */
495
496 CHECK_MUTEX_TYPE()
497 CHECK_NO_SIMPLELOCKS()
498 CHECK_PREEMPTION_LEVEL()
499
500 ml_retry:
501 DISABLE_PREEMPTION(%eax)
502
503 ml_get_hw:
504 movb $1,%cl
505 xchgb %cl,M_ILK
506 testb %cl,%cl /* did we succeed? */
507 jne ml_get_hw /* no, try again */
508
509 movb $1,%cl
510 xchgb %cl,M_LOCKED /* try to set locked flag */
511 testb %cl,%cl /* is the mutex locked? */
512 jne ml_fail /* yes, we lose */
513
514 pushl %edx
515 call EXT(mutex_lock_acquire)
516 addl $4,%esp
517 movl L_ARG0,%edx
518
519 #if MACH_LDEBUG
520 movl L_PC,%ecx
521 movl %ecx,M_PC
522 movl $ CPD_ACTIVE_THREAD,%eax
523 movl %gs:(%eax),%ecx
524 movl %ecx,M_THREAD
525 testl %ecx,%ecx
526 je 3f
527 incl TH_MUTEX_COUNT(%ecx)
528 3:
529 #endif
530
531 xorb %cl,%cl
532 xchgb %cl,M_ILK
533
534 ENABLE_PREEMPTION(%eax)
535
536 #if ETAP_LOCK_TRACE
537 movl L_PC,%eax /* fetch pc */
538 pushl SWT_LO /* push wait time (low) */
539 pushl SWT_HI /* push wait time (high) */
540 pushl %eax /* push pc */
541 pushl %edx /* push mutex address */
542 call EXT(etap_mutex_hold) /* collect hold timestamp */
543 addl $16+12,%esp /* clean up stack, adjusting for locals */
544 #endif /* ETAP_LOCK_TRACE */
545
546 EMARF
547 ret
548
549 ml_fail:
550 #if ETAP_LOCK_TRACE
551 cmp $0,MISSED /* did we already take a wait timestamp? */
552 jne ml_block /* yup. carry-on */
553 pushl %edx /* push mutex address */
554 call EXT(etap_mutex_miss) /* get wait timestamp */
555 movl %eax,SWT_HI /* set wait time (high word) */
556 movl %edx,SWT_LO /* set wait time (low word) */
557 popl %edx /* clean up stack */
558 movl $1,MISSED /* mark wait timestamp as taken */
559 #endif /* ETAP_LOCK_TRACE */
560
561 ml_block:
562 CHECK_MYLOCK(M_THREAD)
563 xorl %eax,%eax
564 pushl %eax /* no promotion here yet */
565 pushl %edx /* push mutex address */
566 call EXT(mutex_lock_wait) /* wait for the lock */
567 addl $8,%esp
568 movl L_ARG0,%edx /* refetch lock pointer */
569 jmp ml_retry /* and try again */
570
571 ENTRY2(mutex_try,_mutex_try)
572 FRAME
573
574 #if ETAP_LOCK_TRACE
575 subl $8,%esp /* make room for locals */
576 movl $0,SWT_HI /* set wait time to zero (HI) */
577 movl $0,SWT_LO /* set wait time to zero (LO) */
578 #endif /* ETAP_LOCK_TRACE */
579
580 movl L_ARG0,%edx /* fetch lock pointer */
581
582 CHECK_MUTEX_TYPE()
583 CHECK_NO_SIMPLELOCKS()
584
585 DISABLE_PREEMPTION(%eax)
586
587 mt_get_hw:
588 movb $1,%cl
589 xchgb %cl,M_ILK
590 testb %cl,%cl
591 jne mt_get_hw
592
593 movb $1,%cl
594 xchgb %cl,M_LOCKED
595 testb %cl,%cl
596 jne mt_fail
597
598 pushl %edx
599 call EXT(mutex_lock_acquire)
600 addl $4,%esp
601 movl L_ARG0,%edx
602
603 #if MACH_LDEBUG
604 movl L_PC,%ecx
605 movl %ecx,M_PC
606 movl $ CPD_ACTIVE_THREAD,%ecx
607 movl %gs:(%ecx),%ecx
608 movl %ecx,M_THREAD
609 testl %ecx,%ecx
610 je 1f
611 incl TH_MUTEX_COUNT(%ecx)
612 1:
613 #endif
614
615 xorb %cl,%cl
616 xchgb %cl,M_ILK
617
618 ENABLE_PREEMPTION(%eax)
619
620 #if ETAP_LOCK_TRACE
621 movl L_PC,%eax /* fetch pc */
622 pushl SWT_LO /* push wait time (low) */
623 pushl SWT_HI /* push wait time (high) */
624 pushl %eax /* push pc */
625 pushl %edx /* push mutex address */
626 call EXT(etap_mutex_hold) /* get start hold timestamp */
627 addl $16,%esp /* clean up stack, adjusting for locals */
628 #endif /* ETAP_LOCK_TRACE */
629
630 movl $1,%eax
631
632 #if MACH_LDEBUG || ETAP_LOCK_TRACE
633 #if ETAP_LOCK_TRACE
634 addl $8,%esp /* pop stack claimed on entry */
635 #endif
636 #endif
637
638 EMARF
639 ret
640
641 mt_fail:
642 #if MACH_LDEBUG
643 movl L_PC,%ecx
644 movl %ecx,M_PC
645 movl $ CPD_ACTIVE_THREAD,%ecx
646 movl %gs:(%ecx),%ecx
647 movl %ecx,M_THREAD
648 testl %ecx,%ecx
649 je 1f
650 incl TH_MUTEX_COUNT(%ecx)
651 1:
652 #endif
653
654 xorb %cl,%cl
655 xchgb %cl,M_ILK
656
657 ENABLE_PREEMPTION(%eax)
658
659 #if ETAP_LOCK_TRACE
660 movl L_PC,%eax /* fetch pc */
661 pushl SWT_LO /* push wait time (low) */
662 pushl SWT_HI /* push wait time (high) */
663 pushl %eax /* push pc */
664 pushl %edx /* push mutex address */
665 call EXT(etap_mutex_hold) /* get start hold timestamp */
666 addl $16,%esp /* clean up stack, adjusting for locals */
667 #endif /* ETAP_LOCK_TRACE */
668
669 xorl %eax,%eax
670
671 #if MACH_LDEBUG || ETAP_LOCK_TRACE
672 #if ETAP_LOCK_TRACE
673 addl $8,%esp /* pop stack claimed on entry */
674 #endif
675 #endif
676
677 EMARF
678 ret
679
680 ENTRY(mutex_unlock)
681 FRAME
682 movl L_ARG0,%edx /* fetch lock pointer */
683
684 #if ETAP_LOCK_TRACE
685 pushl %edx /* push mutex address */
686 call EXT(etap_mutex_unlock) /* collect ETAP data */
687 popl %edx /* restore mutex address */
688 #endif /* ETAP_LOCK_TRACE */
689
690 CHECK_MUTEX_TYPE()
691 CHECK_THREAD(M_THREAD)
692
693 DISABLE_PREEMPTION(%eax)
694
695 mu_get_hw:
696 movb $1,%cl
697 xchgb %cl,M_ILK
698 testb %cl,%cl /* did we succeed? */
699 jne mu_get_hw /* no, try again */
700
701 cmpw $0,M_WAITERS /* are there any waiters? */
702 jne mu_wakeup /* yes, more work to do */
703
704 mu_doit:
705 #if MACH_LDEBUG
706 xorl %eax,%eax
707 movl %eax,M_THREAD /* disown thread */
708 movl $ CPD_ACTIVE_THREAD,%eax
709 movl %gs:(%eax),%ecx
710 testl %ecx,%ecx
711 je 0f
712 decl TH_MUTEX_COUNT(%ecx)
713 0:
714 #endif
715
716 xorb %cl,%cl
717 xchgb %cl,M_LOCKED /* unlock the mutex */
718
719 xorb %cl,%cl
720 xchgb %cl,M_ILK
721
722 ENABLE_PREEMPTION(%eax)
723
724 EMARF
725 ret
726
727 mu_wakeup:
728 xorl %eax,%eax
729 pushl %eax /* no promotion here yet */
730 pushl %edx /* push mutex address */
731 call EXT(mutex_unlock_wakeup)/* yes, wake a thread */
732 addl $8,%esp
733 movl L_ARG0,%edx /* refetch lock pointer */
734 jmp mu_doit
735
736 ENTRY(interlock_unlock)
737 FRAME
738 movl L_ARG0,%edx
739
740 xorb %cl,%cl
741 xchgb %cl,M_ILK
742
743 ENABLE_PREEMPTION(%eax)
744
745 EMARF
746 ret
747
748
749 ENTRY(_disable_preemption)
750 #if MACH_RT
751 _DISABLE_PREEMPTION(%eax)
752 #endif /* MACH_RT */
753 ret
754
755 ENTRY(_enable_preemption)
756 #if MACH_RT
757 #if MACH_ASSERT
758 movl $ CPD_PREEMPTION_LEVEL,%eax
759 cmpl $0,%gs:(%eax)
760 jg 1f
761 pushl %gs:(%eax)
762 pushl $2f
763 call EXT(panic)
764 hlt
765 .data
766 2: String "_enable_preemption: preemption_level(%d) < 0!"
767 .text
768 1:
769 #endif /* MACH_ASSERT */
770 _ENABLE_PREEMPTION(%eax)
771 #endif /* MACH_RT */
772 ret
773
774 ENTRY(_enable_preemption_no_check)
775 #if MACH_RT
776 #if MACH_ASSERT
777 movl $ CPD_PREEMPTION_LEVEL,%eax
778 cmpl $0,%gs:(%eax)
779 jg 1f
780 pushl $2f
781 call EXT(panic)
782 hlt
783 .data
784 2: String "_enable_preemption_no_check: preemption_level <= 0!"
785 .text
786 1:
787 #endif /* MACH_ASSERT */
788 _ENABLE_PREEMPTION_NO_CHECK(%eax)
789 #endif /* MACH_RT */
790 ret
791
792
793 ENTRY(_mp_disable_preemption)
794 #if MACH_RT && NCPUS > 1
795 _DISABLE_PREEMPTION(%eax)
796 #endif /* MACH_RT && NCPUS > 1*/
797 ret
798
799 ENTRY(_mp_enable_preemption)
800 #if MACH_RT && NCPUS > 1
801 #if MACH_ASSERT
802 movl $ CPD_PREEMPTION_LEVEL,%eax
803 cmpl $0,%gs:(%eax)
804 jg 1f
805 pushl %gs:(%eax)
806 pushl $2f
807 call EXT(panic)
808 hlt
809 .data
810 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
811 .text
812 1:
813 #endif /* MACH_ASSERT */
814 _ENABLE_PREEMPTION(%eax)
815 #endif /* MACH_RT && NCPUS > 1 */
816 ret
817
818 ENTRY(_mp_enable_preemption_no_check)
819 #if MACH_RT && NCPUS > 1
820 #if MACH_ASSERT
821 movl $ CPD_PREEMPTION_LEVEL,%eax
822 cmpl $0,%gs:(%eax)
823 jg 1f
824 pushl $2f
825 call EXT(panic)
826 hlt
827 .data
828 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
829 .text
830 1:
831 #endif /* MACH_ASSERT */
832 _ENABLE_PREEMPTION_NO_CHECK(%eax)
833 #endif /* MACH_RT && NCPUS > 1 */
834 ret
835
836
837 ENTRY(i_bit_set)
838 movl S_ARG0,%edx
839 movl S_ARG1,%eax
840 lock
841 bts %dl,(%eax)
842 ret
843
844 ENTRY(i_bit_clear)
845 movl S_ARG0,%edx
846 movl S_ARG1,%eax
847 lock
848 btr %dl,(%eax)
849 ret
850
851 ENTRY(bit_lock)
852 movl S_ARG0,%ecx
853 movl S_ARG1,%eax
854 1:
855 lock
856 bts %ecx,(%eax)
857 jb 1b
858 ret
859
860 ENTRY(bit_lock_try)
861 movl S_ARG0,%ecx
862 movl S_ARG1,%eax
863 lock
864 bts %ecx,(%eax)
865 jb bit_lock_failed
866 ret /* %eax better not be null ! */
867 bit_lock_failed:
868 xorl %eax,%eax
869 ret
870
871 ENTRY(bit_unlock)
872 movl S_ARG0,%ecx
873 movl S_ARG1,%eax
874 lock
875 btr %ecx,(%eax)
876 ret