]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_lock.s
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_lock.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1989 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
30 */
31
32 #include <cpus.h>
33 #include <mach_rt.h>
34 #include <platforms.h>
35 #include <mach_ldebug.h>
36 #include <i386/asm.h>
37 #include <kern/etap_options.h>
38
39 #include "assym.s"
40
41 /*
42 * When performance isn't the only concern, it's
43 * nice to build stack frames...
44 */
45 #define BUILD_STACK_FRAMES ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)
46
47 #if BUILD_STACK_FRAMES
48
49 #define L_PC 4(%ebp)
50 #define L_ARG0 8(%ebp)
51 #define L_ARG1 12(%ebp)
52
53 #define SWT_HI -4(%ebp)
54 #define SWT_LO -8(%ebp)
55 #define MISSED -12(%ebp)
56
57 #else /* BUILD_STACK_FRAMES */
58
59 #undef FRAME
60 #undef EMARF
61 #define FRAME
62 #define EMARF
63 #define L_PC (%esp)
64 #define L_ARG0 4(%esp)
65 #define L_ARG1 8(%esp)
66
67 #endif /* BUILD_STACK_FRAMES */
68
69
70 #define M_ILK (%edx)
71 #define M_LOCKED MUTEX_LOCKED(%edx)
72 #define M_WAITERS MUTEX_WAITERS(%edx)
73 #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx)
74 #if MACH_LDEBUG
75 #define M_TYPE MUTEX_TYPE(%edx)
76 #define M_PC MUTEX_PC(%edx)
77 #define M_THREAD MUTEX_THREAD(%edx)
78 #endif /* MACH_LDEBUG */
79
80 #include <i386/mp.h>
81 #if (NCPUS > 1)
82 #define CX(addr,reg) addr(,reg,4)
83 #else
84 #define CPU_NUMBER(reg)
85 #define CX(addr,reg) addr
86 #endif /* (NCPUS > 1) */
87
88 #if MACH_LDEBUG
89 /*
90 * Routines for general lock debugging.
91 */
92 #define S_TYPE SLOCK_TYPE(%edx)
93 #define S_PC SLOCK_PC(%edx)
94 #define S_THREAD SLOCK_THREAD(%edx)
95 #define S_DURATIONH SLOCK_DURATIONH(%edx)
96 #define S_DURATIONL SLOCK_DURATIONL(%edx)
97
98 /*
99 * Checks for expected lock types and calls "panic" on
100 * mismatch. Detects calls to Mutex functions with
101 * type simplelock and vice versa.
102 */
103 #define CHECK_MUTEX_TYPE() \
104 cmpl $ MUTEX_TAG,M_TYPE ; \
105 je 1f ; \
106 pushl $2f ; \
107 call EXT(panic) ; \
108 hlt ; \
109 .data ; \
110 2: String "not a mutex!" ; \
111 .text ; \
112 1:
113
114 #define CHECK_SIMPLE_LOCK_TYPE() \
115 cmpl $ USLOCK_TAG,S_TYPE ; \
116 je 1f ; \
117 pushl $2f ; \
118 call EXT(panic) ; \
119 hlt ; \
120 .data ; \
121 2: String "not a simple lock!" ; \
122 .text ; \
123 1:
124
125 /*
126 * If one or more simplelocks are currently held by a thread,
127 * an attempt to acquire a mutex will cause this check to fail
128 * (since a mutex lock may context switch, holding a simplelock
129 * is not a good thing).
130 */
131 #if 0 /*MACH_RT - 11/12/99 - lion@apple.com disable check for now*/
132 #define CHECK_PREEMPTION_LEVEL() \
133 movl $ CPD_PREEMPTION_LEVEL,%eax ; \
134 cmpl $0,%gs:(%eax) ; \
135 je 1f ; \
136 pushl $2f ; \
137 call EXT(panic) ; \
138 hlt ; \
139 .data ; \
140 2: String "preemption_level != 0!" ; \
141 .text ; \
142 1:
143 #else /* MACH_RT */
144 #define CHECK_PREEMPTION_LEVEL()
145 #endif /* MACH_RT */
146
147 #define CHECK_NO_SIMPLELOCKS() \
148 movl $ CPD_SIMPLE_LOCK_COUNT,%eax ; \
149 cmpl $0,%gs:(%eax) ; \
150 je 1f ; \
151 pushl $2f ; \
152 call EXT(panic) ; \
153 hlt ; \
154 .data ; \
155 2: String "simple_locks_held!" ; \
156 .text ; \
157 1:
158
159 /*
160 * Verifies return to the correct thread in "unlock" situations.
161 */
162 #define CHECK_THREAD(thd) \
163 movl $ CPD_ACTIVE_THREAD,%eax ; \
164 movl %gs:(%eax),%ecx ; \
165 testl %ecx,%ecx ; \
166 je 1f ; \
167 cmpl %ecx,thd ; \
168 je 1f ; \
169 pushl $2f ; \
170 call EXT(panic) ; \
171 hlt ; \
172 .data ; \
173 2: String "wrong thread!" ; \
174 .text ; \
175 1:
176
177 #define CHECK_MYLOCK(thd) \
178 movl $ CPD_ACTIVE_THREAD,%eax ; \
179 movl %gs:(%eax),%ecx ; \
180 testl %ecx,%ecx ; \
181 je 1f ; \
182 cmpl %ecx,thd ; \
183 jne 1f ; \
184 pushl $2f ; \
185 call EXT(panic) ; \
186 hlt ; \
187 .data ; \
188 2: String "mylock attempt!" ; \
189 .text ; \
190 1:
191
192 #define METER_SIMPLE_LOCK_LOCK(reg) \
193 pushl reg ; \
194 call EXT(meter_simple_lock) ; \
195 popl reg
196
197 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
198 pushl reg ; \
199 call EXT(meter_simple_unlock) ; \
200 popl reg
201
202 #else /* MACH_LDEBUG */
203 #define CHECK_MUTEX_TYPE()
204 #define CHECK_SIMPLE_LOCK_TYPE
205 #define CHECK_THREAD(thd)
206 #define CHECK_PREEMPTION_LEVEL()
207 #define CHECK_NO_SIMPLELOCKS()
208 #define CHECK_MYLOCK(thd)
209 #define METER_SIMPLE_LOCK_LOCK(reg)
210 #define METER_SIMPLE_LOCK_UNLOCK(reg)
211 #endif /* MACH_LDEBUG */
212
213
214 /*
215 * void hw_lock_init(hw_lock_t)
216 *
217 * Initialize a hardware lock.
218 */
219 ENTRY(hw_lock_init)
220 FRAME
221 movl L_ARG0,%edx /* fetch lock pointer */
222 xorl %eax,%eax
223 movl %eax,0(%edx) /* clear the lock */
224 EMARF
225 ret
226
227 /*
228 * void hw_lock_lock(hw_lock_t)
229 *
230 * Acquire lock, spinning until it becomes available.
231 * MACH_RT: also return with preemption disabled.
232 */
233 ENTRY(hw_lock_lock)
234 FRAME
235 movl L_ARG0,%edx /* fetch lock pointer */
236
237 1: DISABLE_PREEMPTION(%eax)
238 movl $1,%ecx
239 xchgl 0(%edx),%ecx /* try to acquire the HW lock */
240 testl %ecx,%ecx /* success? */
241 jne 3f
242 movl $1,%eax /* In case this was a timeout call */
243 EMARF /* if yes, then nothing left to do */
244 ret
245
246 3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */
247
248 movl $1,%ecx
249 2:
250 rep; nop /* pause for hyper-threading */
251 testl %ecx,0(%edx) /* spin checking lock value in cache */
252 jne 2b /* non-zero means locked, keep spinning */
253 jmp 1b /* zero means unlocked, try to grab it */
254
255 /*
256 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
257 *
258 * Acquire lock, spinning until it becomes available or timeout.
259 * MACH_RT: also return with preemption disabled.
260 */
261 ENTRY(hw_lock_to)
262 FRAME
263 movl L_ARG0,%edx /* fetch lock pointer */
264 1:
265 /*
266 * Attempt to grab the lock immediately
267 * - fastpath without timeout nonsense.
268 */
269 DISABLE_PREEMPTION(%eax)
270 movl $1,%eax
271 xchgl 0(%edx),%eax /* try to acquire the HW lock */
272 testl %eax,%eax /* success? */
273 jne 2f /* no */
274 movl $1,%eax /* yes, return true */
275 EMARF
276 ret
277
278 2:
279 #define INNER_LOOP_COUNT 1000
280 /*
281 * Failed to get the lock so set the timeout
282 * and then spin re-checking the lock but pausing
283 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
284 */
285 movl L_ARG1,%ecx /* fetch timeout */
286 push %edi
287 push %ebx
288 mov %edx,%edi
289
290 rdtsc /* read cyclecount into %edx:%eax */
291 addl %ecx,%eax /* fetch and timeout */
292 adcl $0,%edx /* add carry */
293 mov %edx,%ecx
294 mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
295 3:
296 ENABLE_PREEMPTION(%eax) /* no reason not to be preempted now */
297 4:
298 /*
299 * The inner-loop spin to look for the lock being freed.
300 */
301 movl $1,%eax
302 mov $(INNER_LOOP_COUNT),%edx
303 5:
304 rep; nop /* pause for hyper-threading */
305 testl %eax,0(%edi) /* spin checking lock value in cache */
306 je 6f /* zero => unlocked, try to grab it */
307 decl %edx /* decrement inner loop count */
308 jnz 5b /* time to check for timeout? */
309
310 /*
311 * Here after spinning INNER_LOOP_COUNT times, check for timeout
312 */
313 rdtsc /* cyclecount into %edx:%eax */
314 cmpl %ecx,%edx /* compare high-order 32-bits */
315 jb 4b /* continue spinning if less, or */
316 cmpl %ebx,%eax /* compare low-order 32-bits */
317 jb 5b /* continue is less, else bail */
318 xor %eax,%eax /* with 0 return value */
319 pop %ebx
320 pop %edi
321 EMARF
322 ret
323
324 6:
325 /*
326 * Here to try to grab the lock that now appears to be free
327 * after contention.
328 */
329 DISABLE_PREEMPTION(%eax)
330 movl $1,%eax
331 xchgl 0(%edi),%eax /* try to acquire the HW lock */
332 testl %eax,%eax /* success? */
333 jne 3b /* no - spin again */
334 movl $1,%eax /* yes */
335 pop %ebx
336 pop %edi
337 EMARF
338 ret
339
340 /*
341 * void hw_lock_unlock(hw_lock_t)
342 *
343 * Unconditionally release lock.
344 * MACH_RT: release preemption level.
345 */
346 ENTRY(hw_lock_unlock)
347 FRAME
348 movl L_ARG0,%edx /* fetch lock pointer */
349 xorl %eax,%eax
350 xchgl 0(%edx),%eax /* clear the lock... a mov instruction */
351 /* ...might be cheaper and less paranoid */
352 ENABLE_PREEMPTION(%eax)
353 EMARF
354 ret
355
356 /*
357 * unsigned int hw_lock_try(hw_lock_t)
358 * MACH_RT: returns with preemption disabled on success.
359 */
360 ENTRY(hw_lock_try)
361 FRAME
362 movl L_ARG0,%edx /* fetch lock pointer */
363
364 DISABLE_PREEMPTION(%eax)
365 movl $1,%ecx
366 xchgl 0(%edx),%ecx /* try to acquire the HW lock */
367 testl %ecx,%ecx /* success? */
368 jne 1f /* if yes, let the caller know */
369
370 movl $1,%eax /* success */
371 EMARF
372 ret
373
374 1: ENABLE_PREEMPTION(%eax) /* failure: release preemption... */
375 xorl %eax,%eax /* ...and return failure */
376 EMARF
377 ret
378
379 /*
380 * unsigned int hw_lock_held(hw_lock_t)
381 * MACH_RT: doesn't change preemption state.
382 * N.B. Racy, of course.
383 */
384 ENTRY(hw_lock_held)
385 FRAME
386 movl L_ARG0,%edx /* fetch lock pointer */
387
388 movl $1,%ecx
389 testl %ecx,0(%edx) /* check lock value */
390 jne 1f /* non-zero means locked */
391 xorl %eax,%eax /* tell caller: lock wasn't locked */
392 EMARF
393 ret
394
395 1: movl $1,%eax /* tell caller: lock was locked */
396 EMARF
397 ret
398
399
400
401 #if 0
402
403
404 ENTRY(_usimple_lock_init)
405 FRAME
406 movl L_ARG0,%edx /* fetch lock pointer */
407 xorl %eax,%eax
408 movl %eax,USL_INTERLOCK(%edx) /* unlock the HW lock */
409 EMARF
410 ret
411
412 ENTRY(_simple_lock)
413 FRAME
414 movl L_ARG0,%edx /* fetch lock pointer */
415
416 CHECK_SIMPLE_LOCK_TYPE()
417
418 DISABLE_PREEMPTION(%eax)
419
420 sl_get_hw:
421 movl $1,%ecx
422 xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */
423 testl %ecx,%ecx /* did we succeed? */
424
425 #if MACH_LDEBUG
426 je 5f
427 CHECK_MYLOCK(S_THREAD)
428 jmp sl_get_hw
429 5:
430 #else /* MACH_LDEBUG */
431 jne sl_get_hw /* no, try again */
432 #endif /* MACH_LDEBUG */
433
434 #if MACH_LDEBUG
435 movl L_PC,%ecx
436 movl %ecx,S_PC
437 movl $ CPD_ACTIVE_THREAD,%eax
438 movl %gs:(%eax),%ecx
439 movl %ecx,S_THREAD
440 incl CX(EXT(simple_lock_count),%eax)
441 #if 0
442 METER_SIMPLE_LOCK_LOCK(%edx)
443 #endif
444 #if NCPUS == 1
445 pushf
446 pushl %edx
447 cli
448 call EXT(lock_stack_push)
449 popl %edx
450 popfl
451 #endif /* NCPUS == 1 */
452 #endif /* MACH_LDEBUG */
453
454 EMARF
455 ret
456
457 ENTRY(_simple_lock_try)
458 FRAME
459 movl L_ARG0,%edx /* fetch lock pointer */
460
461 CHECK_SIMPLE_LOCK_TYPE()
462
463 DISABLE_PREEMPTION(%eax)
464
465 movl $1,%ecx
466 xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */
467 testl %ecx,%ecx /* did we succeed? */
468 jne 1f /* no, return failure */
469
470 #if MACH_LDEBUG
471 movl L_PC,%ecx
472 movl %ecx,S_PC
473 movl $ CPD_ACTIVE_THREAD,%eax
474 movl %gs:(%eax),%ecx
475 movl %ecx,S_THREAD
476 incl CX(EXT(simple_lock_count),%eax)
477 #if 0
478 METER_SIMPLE_LOCK_LOCK(%edx)
479 #endif
480 #if NCPUS == 1
481 pushf
482 pushl %edx
483 cli
484 call EXT(lock_stack_push)
485 popl %edx
486 popfl
487 #endif /* NCPUS == 1 */
488 #endif /* MACH_LDEBUG */
489
490 movl $1,%eax /* return success */
491
492 EMARF
493 ret
494
495 1:
496 ENABLE_PREEMPTION(%eax)
497
498 xorl %eax,%eax /* and return failure */
499
500 EMARF
501 ret
502
503 ENTRY(_simple_unlock)
504 FRAME
505 movl L_ARG0,%edx /* fetch lock pointer */
506
507 CHECK_SIMPLE_LOCK_TYPE()
508 CHECK_THREAD(S_THREAD)
509
510 #if MACH_LDEBUG
511 xorl %eax,%eax
512 movl %eax,S_THREAD /* disown thread */
513 MP_DISABLE_PREEMPTION(%eax)
514 CPU_NUMBER(%eax)
515 decl CX(EXT(simple_lock_count),%eax)
516 MP_ENABLE_PREEMPTION(%eax)
517 #if 0
518 METER_SIMPLE_LOCK_UNLOCK(%edx)
519 #endif
520 #if NCPUS == 1
521 pushf
522 pushl %edx
523 cli
524 call EXT(lock_stack_pop)
525 popl %edx
526 popfl
527 #endif /* NCPUS == 1 */
528 #endif /* MACH_LDEBUG */
529
530 xorl %ecx,%ecx
531 xchgl USL_INTERLOCK(%edx),%ecx /* unlock the HW lock */
532
533 ENABLE_PREEMPTION(%eax)
534
535 EMARF
536 ret
537
538 #endif /* 0 */
539
540
541 ENTRY(mutex_init)
542 FRAME
543 movl L_ARG0,%edx /* fetch lock pointer */
544 xorl %eax,%eax
545 movl %eax,M_ILK /* clear interlock */
546 movl %eax,M_LOCKED /* clear locked flag */
547 movw %ax,M_WAITERS /* init waiter count */
548 movw %ax,M_PROMOTED_PRI
549
550 #if MACH_LDEBUG
551 movl $ MUTEX_TAG,M_TYPE /* set lock type */
552 movl %eax,M_PC /* init caller pc */
553 movl %eax,M_THREAD /* and owning thread */
554 #endif
555 #if ETAP_LOCK_TRACE
556 movl L_ARG1,%ecx /* fetch event type */
557 pushl %ecx /* push event type */
558 pushl %edx /* push mutex address */
559 call EXT(etap_mutex_init) /* init ETAP data */
560 addl $8,%esp
561 #endif /* ETAP_LOCK_TRACE */
562
563 EMARF
564 ret
565
566 ENTRY2(mutex_lock,_mutex_lock)
567 FRAME
568
569 #if ETAP_LOCK_TRACE
570 subl $12,%esp /* make room for locals */
571 movl $0,SWT_HI /* set wait time to zero (HI) */
572 movl $0,SWT_LO /* set wait time to zero (LO) */
573 movl $0,MISSED /* clear local miss marker */
574 #endif /* ETAP_LOCK_TRACE */
575
576 movl L_ARG0,%edx /* fetch lock pointer */
577
578 CHECK_MUTEX_TYPE()
579 CHECK_NO_SIMPLELOCKS()
580 CHECK_PREEMPTION_LEVEL()
581
582 ml_retry:
583 DISABLE_PREEMPTION(%eax)
584
585 ml_get_hw:
586 movl $1,%ecx
587 xchgl %ecx,M_ILK
588 testl %ecx,%ecx /* did we succeed? */
589 jne ml_get_hw /* no, try again */
590
591 movl $1,%ecx
592 xchgl %ecx,M_LOCKED /* try to set locked flag */
593 testl %ecx,%ecx /* is the mutex locked? */
594 jne ml_fail /* yes, we lose */
595
596 pushl %edx
597 call EXT(mutex_lock_acquire)
598 addl $4,%esp
599 movl L_ARG0,%edx
600
601 #if MACH_LDEBUG
602 movl L_PC,%ecx
603 movl %ecx,M_PC
604 movl $ CPD_ACTIVE_THREAD,%eax
605 movl %gs:(%eax),%ecx
606 movl %ecx,M_THREAD
607 testl %ecx,%ecx
608 je 3f
609 incl TH_MUTEX_COUNT(%ecx)
610 3:
611 #endif
612
613 xorl %ecx,%ecx
614 xchgl %ecx,M_ILK
615
616 ENABLE_PREEMPTION(%eax)
617
618 #if ETAP_LOCK_TRACE
619 movl L_PC,%eax /* fetch pc */
620 pushl SWT_LO /* push wait time (low) */
621 pushl SWT_HI /* push wait time (high) */
622 pushl %eax /* push pc */
623 pushl %edx /* push mutex address */
624 call EXT(etap_mutex_hold) /* collect hold timestamp */
625 addl $16+12,%esp /* clean up stack, adjusting for locals */
626 #endif /* ETAP_LOCK_TRACE */
627
628 EMARF
629 ret
630
631 ml_fail:
632 #if ETAP_LOCK_TRACE
633 cmp $0,MISSED /* did we already take a wait timestamp? */
634 jne ml_block /* yup. carry-on */
635 pushl %edx /* push mutex address */
636 call EXT(etap_mutex_miss) /* get wait timestamp */
637 movl %eax,SWT_HI /* set wait time (high word) */
638 movl %edx,SWT_LO /* set wait time (low word) */
639 popl %edx /* clean up stack */
640 movl $1,MISSED /* mark wait timestamp as taken */
641 #endif /* ETAP_LOCK_TRACE */
642
643 ml_block:
644 CHECK_MYLOCK(M_THREAD)
645 xorl %eax,%eax
646 pushl %eax /* no promotion here yet */
647 pushl %edx /* push mutex address */
648 call EXT(mutex_lock_wait) /* wait for the lock */
649 addl $8,%esp
650 movl L_ARG0,%edx /* refetch lock pointer */
651 jmp ml_retry /* and try again */
652
653 ENTRY2(mutex_try,_mutex_try)
654 FRAME
655
656 #if ETAP_LOCK_TRACE
657 subl $8,%esp /* make room for locals */
658 movl $0,SWT_HI /* set wait time to zero (HI) */
659 movl $0,SWT_LO /* set wait time to zero (LO) */
660 #endif /* ETAP_LOCK_TRACE */
661
662 movl L_ARG0,%edx /* fetch lock pointer */
663
664 CHECK_MUTEX_TYPE()
665 CHECK_NO_SIMPLELOCKS()
666
667 DISABLE_PREEMPTION(%eax)
668
669 mt_get_hw:
670 movl $1,%ecx
671 xchgl %ecx,M_ILK
672 testl %ecx,%ecx
673 jne mt_get_hw
674
675 movl $1,%ecx
676 xchgl %ecx,M_LOCKED
677 testl %ecx,%ecx
678 jne mt_fail
679
680 pushl %edx
681 call EXT(mutex_lock_acquire)
682 addl $4,%esp
683 movl L_ARG0,%edx
684
685 #if MACH_LDEBUG
686 movl L_PC,%ecx
687 movl %ecx,M_PC
688 movl $ CPD_ACTIVE_THREAD,%ecx
689 movl %gs:(%ecx),%ecx
690 movl %ecx,M_THREAD
691 testl %ecx,%ecx
692 je 1f
693 incl TH_MUTEX_COUNT(%ecx)
694 1:
695 #endif
696
697 xorl %ecx,%ecx
698 xchgl %ecx,M_ILK
699
700 ENABLE_PREEMPTION(%eax)
701
702 #if ETAP_LOCK_TRACE
703 movl L_PC,%eax /* fetch pc */
704 pushl SWT_LO /* push wait time (low) */
705 pushl SWT_HI /* push wait time (high) */
706 pushl %eax /* push pc */
707 pushl %edx /* push mutex address */
708 call EXT(etap_mutex_hold) /* get start hold timestamp */
709 addl $16,%esp /* clean up stack, adjusting for locals */
710 #endif /* ETAP_LOCK_TRACE */
711
712 movl $1,%eax
713
714 #if MACH_LDEBUG || ETAP_LOCK_TRACE
715 #if ETAP_LOCK_TRACE
716 addl $8,%esp /* pop stack claimed on entry */
717 #endif
718 #endif
719
720 EMARF
721 ret
722
723 mt_fail:
724 xorl %ecx,%ecx
725 xchgl %ecx,M_ILK
726
727 ENABLE_PREEMPTION(%eax)
728
729 #if ETAP_LOCK_TRACE
730 movl L_PC,%eax /* fetch pc */
731 pushl SWT_LO /* push wait time (low) */
732 pushl SWT_HI /* push wait time (high) */
733 pushl %eax /* push pc */
734 pushl %edx /* push mutex address */
735 call EXT(etap_mutex_hold) /* get start hold timestamp */
736 addl $16,%esp /* clean up stack, adjusting for locals */
737 #endif /* ETAP_LOCK_TRACE */
738
739 xorl %eax,%eax
740
741 #if MACH_LDEBUG || ETAP_LOCK_TRACE
742 #if ETAP_LOCK_TRACE
743 addl $8,%esp /* pop stack claimed on entry */
744 #endif
745 #endif
746
747 EMARF
748 ret
749
750 ENTRY(mutex_unlock)
751 FRAME
752 movl L_ARG0,%edx /* fetch lock pointer */
753
754 #if ETAP_LOCK_TRACE
755 pushl %edx /* push mutex address */
756 call EXT(etap_mutex_unlock) /* collect ETAP data */
757 popl %edx /* restore mutex address */
758 #endif /* ETAP_LOCK_TRACE */
759
760 CHECK_MUTEX_TYPE()
761 CHECK_THREAD(M_THREAD)
762
763 DISABLE_PREEMPTION(%eax)
764
765 mu_get_hw:
766 movl $1,%ecx
767 xchgl %ecx,M_ILK
768 testl %ecx,%ecx /* did we succeed? */
769 jne mu_get_hw /* no, try again */
770
771 cmpw $0,M_WAITERS /* are there any waiters? */
772 jne mu_wakeup /* yes, more work to do */
773
774 mu_doit:
775 #if MACH_LDEBUG
776 xorl %eax,%eax
777 movl %eax,M_THREAD /* disown thread */
778 movl $ CPD_ACTIVE_THREAD,%eax
779 movl %gs:(%eax),%ecx
780 testl %ecx,%ecx
781 je 0f
782 decl TH_MUTEX_COUNT(%ecx)
783 0:
784 #endif
785
786 xorl %ecx,%ecx
787 xchgl %ecx,M_LOCKED /* unlock the mutex */
788
789 xorl %ecx,%ecx
790 xchgl %ecx,M_ILK
791
792 ENABLE_PREEMPTION(%eax)
793
794 EMARF
795 ret
796
797 mu_wakeup:
798 xorl %eax,%eax
799 pushl %eax /* no promotion here yet */
800 pushl %edx /* push mutex address */
801 call EXT(mutex_unlock_wakeup)/* yes, wake a thread */
802 addl $8,%esp
803 movl L_ARG0,%edx /* refetch lock pointer */
804 jmp mu_doit
805
806 ENTRY(interlock_unlock)
807 FRAME
808 movl L_ARG0,%edx
809
810 xorl %ecx,%ecx
811 xchgl %ecx,M_ILK
812
813 ENABLE_PREEMPTION(%eax)
814
815 EMARF
816 ret
817
818
819 ENTRY(_disable_preemption)
820 #if MACH_RT
821 _DISABLE_PREEMPTION(%eax)
822 #endif /* MACH_RT */
823 ret
824
825 ENTRY(_enable_preemption)
826 #if MACH_RT
827 #if MACH_ASSERT
828 movl $ CPD_PREEMPTION_LEVEL,%eax
829 cmpl $0,%gs:(%eax)
830 jg 1f
831 pushl %gs:(%eax)
832 pushl $2f
833 call EXT(panic)
834 hlt
835 .data
836 2: String "_enable_preemption: preemption_level(%d) < 0!"
837 .text
838 1:
839 #endif /* MACH_ASSERT */
840 _ENABLE_PREEMPTION(%eax)
841 #endif /* MACH_RT */
842 ret
843
844 ENTRY(_enable_preemption_no_check)
845 #if MACH_RT
846 #if MACH_ASSERT
847 movl $ CPD_PREEMPTION_LEVEL,%eax
848 cmpl $0,%gs:(%eax)
849 jg 1f
850 pushl $2f
851 call EXT(panic)
852 hlt
853 .data
854 2: String "_enable_preemption_no_check: preemption_level <= 0!"
855 .text
856 1:
857 #endif /* MACH_ASSERT */
858 _ENABLE_PREEMPTION_NO_CHECK(%eax)
859 #endif /* MACH_RT */
860 ret
861
862
863 ENTRY(_mp_disable_preemption)
864 #if MACH_RT && NCPUS > 1
865 _DISABLE_PREEMPTION(%eax)
866 #endif /* MACH_RT && NCPUS > 1*/
867 ret
868
869 ENTRY(_mp_enable_preemption)
870 #if MACH_RT && NCPUS > 1
871 #if MACH_ASSERT
872 movl $ CPD_PREEMPTION_LEVEL,%eax
873 cmpl $0,%gs:(%eax)
874 jg 1f
875 pushl %gs:(%eax)
876 pushl $2f
877 call EXT(panic)
878 hlt
879 .data
880 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
881 .text
882 1:
883 #endif /* MACH_ASSERT */
884 _ENABLE_PREEMPTION(%eax)
885 #endif /* MACH_RT && NCPUS > 1 */
886 ret
887
888 ENTRY(_mp_enable_preemption_no_check)
889 #if MACH_RT && NCPUS > 1
890 #if MACH_ASSERT
891 movl $ CPD_PREEMPTION_LEVEL,%eax
892 cmpl $0,%gs:(%eax)
893 jg 1f
894 pushl $2f
895 call EXT(panic)
896 hlt
897 .data
898 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
899 .text
900 1:
901 #endif /* MACH_ASSERT */
902 _ENABLE_PREEMPTION_NO_CHECK(%eax)
903 #endif /* MACH_RT && NCPUS > 1 */
904 ret
905
906
907 ENTRY(i_bit_set)
908 movl S_ARG0,%edx
909 movl S_ARG1,%eax
910 lock
911 bts %dl,(%eax)
912 ret
913
914 ENTRY(i_bit_clear)
915 movl S_ARG0,%edx
916 movl S_ARG1,%eax
917 lock
918 btr %dl,(%eax)
919 ret
920
921 ENTRY(bit_lock)
922 movl S_ARG0,%ecx
923 movl S_ARG1,%eax
924 1:
925 lock
926 bts %ecx,(%eax)
927 jb 1b
928 ret
929
930 ENTRY(bit_lock_try)
931 movl S_ARG0,%ecx
932 movl S_ARG1,%eax
933 lock
934 bts %ecx,(%eax)
935 jb bit_lock_failed
936 ret /* %eax better not be null ! */
937 bit_lock_failed:
938 xorl %eax,%eax
939 ret
940
941 ENTRY(bit_unlock)
942 movl S_ARG0,%ecx
943 movl S_ARG1,%eax
944 lock
945 btr %ecx,(%eax)
946 ret