]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_lock.s
xnu-517.3.15.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_lock.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1989 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
33 */
34
35 #include <cpus.h>
36 #include <mach_rt.h>
37 #include <platforms.h>
38 #include <mach_ldebug.h>
39 #include <i386/asm.h>
40 #include <kern/etap_options.h>
41
42 #include "assym.s"
43
44 /*
45 * When performance isn't the only concern, it's
46 * nice to build stack frames...
47 */
48 #define BUILD_STACK_FRAMES ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)
49
50 #if BUILD_STACK_FRAMES
51
52 #define L_PC 4(%ebp)
53 #define L_ARG0 8(%ebp)
54 #define L_ARG1 12(%ebp)
55
56 #define SWT_HI -4(%ebp)
57 #define SWT_LO -8(%ebp)
58 #define MISSED -12(%ebp)
59
60 #else /* BUILD_STACK_FRAMES */
61
62 #undef FRAME
63 #undef EMARF
64 #define FRAME
65 #define EMARF
66 #define L_PC (%esp)
67 #define L_ARG0 4(%esp)
68 #define L_ARG1 8(%esp)
69
70 #endif /* BUILD_STACK_FRAMES */
71
72
73 #define M_ILK (%edx)
74 #define M_LOCKED MUTEX_LOCKED(%edx)
75 #define M_WAITERS MUTEX_WAITERS(%edx)
76 #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx)
77 #if MACH_LDEBUG
78 #define M_TYPE MUTEX_TYPE(%edx)
79 #define M_PC MUTEX_PC(%edx)
80 #define M_THREAD MUTEX_THREAD(%edx)
81 #endif /* MACH_LDEBUG */
82
83 #include <i386/mp.h>
84 #if (NCPUS > 1)
85 #define CX(addr,reg) addr(,reg,4)
86 #else
87 #define CPU_NUMBER(reg)
88 #define CX(addr,reg) addr
89 #endif /* (NCPUS > 1) */
90
91 #if MACH_LDEBUG
92 /*
93 * Routines for general lock debugging.
94 */
95 #define S_TYPE SLOCK_TYPE(%edx)
96 #define S_PC SLOCK_PC(%edx)
97 #define S_THREAD SLOCK_THREAD(%edx)
98 #define S_DURATIONH SLOCK_DURATIONH(%edx)
99 #define S_DURATIONL SLOCK_DURATIONL(%edx)
100
101 /*
102 * Checks for expected lock types and calls "panic" on
103 * mismatch. Detects calls to Mutex functions with
104 * type simplelock and vice versa.
105 */
106 #define CHECK_MUTEX_TYPE() \
107 cmpl $ MUTEX_TAG,M_TYPE ; \
108 je 1f ; \
109 pushl $2f ; \
110 call EXT(panic) ; \
111 hlt ; \
112 .data ; \
113 2: String "not a mutex!" ; \
114 .text ; \
115 1:
116
117 #define CHECK_SIMPLE_LOCK_TYPE() \
118 cmpl $ USLOCK_TAG,S_TYPE ; \
119 je 1f ; \
120 pushl $2f ; \
121 call EXT(panic) ; \
122 hlt ; \
123 .data ; \
124 2: String "not a simple lock!" ; \
125 .text ; \
126 1:
127
128 /*
129 * If one or more simplelocks are currently held by a thread,
130 * an attempt to acquire a mutex will cause this check to fail
131 * (since a mutex lock may context switch, holding a simplelock
132 * is not a good thing).
133 */
134 #if 0 /*MACH_RT - 11/12/99 - lion@apple.com disable check for now*/
135 #define CHECK_PREEMPTION_LEVEL() \
136 movl $ CPD_PREEMPTION_LEVEL,%eax ; \
137 cmpl $0,%gs:(%eax) ; \
138 je 1f ; \
139 pushl $2f ; \
140 call EXT(panic) ; \
141 hlt ; \
142 .data ; \
143 2: String "preemption_level != 0!" ; \
144 .text ; \
145 1:
146 #else /* MACH_RT */
147 #define CHECK_PREEMPTION_LEVEL()
148 #endif /* MACH_RT */
149
150 #define CHECK_NO_SIMPLELOCKS() \
151 movl $ CPD_SIMPLE_LOCK_COUNT,%eax ; \
152 cmpl $0,%gs:(%eax) ; \
153 je 1f ; \
154 pushl $2f ; \
155 call EXT(panic) ; \
156 hlt ; \
157 .data ; \
158 2: String "simple_locks_held!" ; \
159 .text ; \
160 1:
161
162 /*
163 * Verifies return to the correct thread in "unlock" situations.
164 */
165 #define CHECK_THREAD(thd) \
166 movl $ CPD_ACTIVE_THREAD,%eax ; \
167 movl %gs:(%eax),%ecx ; \
168 testl %ecx,%ecx ; \
169 je 1f ; \
170 cmpl %ecx,thd ; \
171 je 1f ; \
172 pushl $2f ; \
173 call EXT(panic) ; \
174 hlt ; \
175 .data ; \
176 2: String "wrong thread!" ; \
177 .text ; \
178 1:
179
180 #define CHECK_MYLOCK(thd) \
181 movl $ CPD_ACTIVE_THREAD,%eax ; \
182 movl %gs:(%eax),%ecx ; \
183 testl %ecx,%ecx ; \
184 je 1f ; \
185 cmpl %ecx,thd ; \
186 jne 1f ; \
187 pushl $2f ; \
188 call EXT(panic) ; \
189 hlt ; \
190 .data ; \
191 2: String "mylock attempt!" ; \
192 .text ; \
193 1:
194
195 #define METER_SIMPLE_LOCK_LOCK(reg) \
196 pushl reg ; \
197 call EXT(meter_simple_lock) ; \
198 popl reg
199
200 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
201 pushl reg ; \
202 call EXT(meter_simple_unlock) ; \
203 popl reg
204
205 #else /* MACH_LDEBUG */
206 #define CHECK_MUTEX_TYPE()
207 #define CHECK_SIMPLE_LOCK_TYPE
208 #define CHECK_THREAD(thd)
209 #define CHECK_PREEMPTION_LEVEL()
210 #define CHECK_NO_SIMPLELOCKS()
211 #define CHECK_MYLOCK(thd)
212 #define METER_SIMPLE_LOCK_LOCK(reg)
213 #define METER_SIMPLE_LOCK_UNLOCK(reg)
214 #endif /* MACH_LDEBUG */
215
216
217 /*
218 * void hw_lock_init(hw_lock_t)
219 *
220 * Initialize a hardware lock.
221 */
222 ENTRY(hw_lock_init)
223 FRAME
224 movl L_ARG0,%edx /* fetch lock pointer */
225 xorl %eax,%eax
226 movl %eax,0(%edx) /* clear the lock */
227 EMARF
228 ret
229
230 /*
231 * void hw_lock_lock(hw_lock_t)
232 *
233 * Acquire lock, spinning until it becomes available.
234 * MACH_RT: also return with preemption disabled.
235 */
236 ENTRY(hw_lock_lock)
237 FRAME
238 movl L_ARG0,%edx /* fetch lock pointer */
239
240 1: DISABLE_PREEMPTION(%eax)
241 movl $1,%ecx
242 xchgl 0(%edx),%ecx /* try to acquire the HW lock */
243 testl %ecx,%ecx /* success? */
244 jne 3f
245 movl $1,%eax /* In case this was a timeout call */
246 EMARF /* if yes, then nothing left to do */
247 ret
248
249 3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */
250
251 movl $1,%ecx
252 2:
253 rep; nop /* pause for hyper-threading */
254 testl %ecx,0(%edx) /* spin checking lock value in cache */
255 jne 2b /* non-zero means locked, keep spinning */
256 jmp 1b /* zero means unlocked, try to grab it */
257
258 /*
259 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
260 *
261 * Acquire lock, spinning until it becomes available or timeout.
262 * MACH_RT: also return with preemption disabled.
263 */
264 ENTRY(hw_lock_to)
265 FRAME
266 movl L_ARG0,%edx /* fetch lock pointer */
267 1:
268 /*
269 * Attempt to grab the lock immediately
270 * - fastpath without timeout nonsense.
271 */
272 DISABLE_PREEMPTION(%eax)
273 movl $1,%eax
274 xchgl 0(%edx),%eax /* try to acquire the HW lock */
275 testl %eax,%eax /* success? */
276 jne 2f /* no */
277 movl $1,%eax /* yes, return true */
278 EMARF
279 ret
280
281 2:
282 #define INNER_LOOP_COUNT 1000
283 /*
284 * Failed to get the lock so set the timeout
285 * and then spin re-checking the lock but pausing
286 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
287 */
288 movl L_ARG1,%ecx /* fetch timeout */
289 push %edi
290 push %ebx
291 mov %edx,%edi
292
293 rdtsc /* read cyclecount into %edx:%eax */
294 addl %ecx,%eax /* fetch and timeout */
295 adcl $0,%edx /* add carry */
296 mov %edx,%ecx
297 mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
298 3:
299 ENABLE_PREEMPTION(%eax) /* no reason not to be preempted now */
300 4:
301 /*
302 * The inner-loop spin to look for the lock being freed.
303 */
304 movl $1,%eax
305 mov $(INNER_LOOP_COUNT),%edx
306 5:
307 rep; nop /* pause for hyper-threading */
308 testl %eax,0(%edi) /* spin checking lock value in cache */
309 je 6f /* zero => unlocked, try to grab it */
310 decl %edx /* decrement inner loop count */
311 jnz 5b /* time to check for timeout? */
312
313 /*
314 * Here after spinning INNER_LOOP_COUNT times, check for timeout
315 */
316 rdtsc /* cyclecount into %edx:%eax */
317 cmpl %ecx,%edx /* compare high-order 32-bits */
318 jb 4b /* continue spinning if less, or */
319 cmpl %ebx,%eax /* compare low-order 32-bits */
320 jb 5b /* continue is less, else bail */
321 xor %eax,%eax /* with 0 return value */
322 pop %ebx
323 pop %edi
324 EMARF
325 ret
326
327 6:
328 /*
329 * Here to try to grab the lock that now appears to be free
330 * after contention.
331 */
332 DISABLE_PREEMPTION(%eax)
333 movl $1,%eax
334 xchgl 0(%edi),%eax /* try to acquire the HW lock */
335 testl %eax,%eax /* success? */
336 jne 3b /* no - spin again */
337 movl $1,%eax /* yes */
338 pop %ebx
339 pop %edi
340 EMARF
341 ret
342
343 /*
344 * void hw_lock_unlock(hw_lock_t)
345 *
346 * Unconditionally release lock.
347 * MACH_RT: release preemption level.
348 */
349 ENTRY(hw_lock_unlock)
350 FRAME
351 movl L_ARG0,%edx /* fetch lock pointer */
352 xorl %eax,%eax
353 xchgl 0(%edx),%eax /* clear the lock... a mov instruction */
354 /* ...might be cheaper and less paranoid */
355 ENABLE_PREEMPTION(%eax)
356 EMARF
357 ret
358
359 /*
360 * unsigned int hw_lock_try(hw_lock_t)
361 * MACH_RT: returns with preemption disabled on success.
362 */
363 ENTRY(hw_lock_try)
364 FRAME
365 movl L_ARG0,%edx /* fetch lock pointer */
366
367 DISABLE_PREEMPTION(%eax)
368 movl $1,%ecx
369 xchgl 0(%edx),%ecx /* try to acquire the HW lock */
370 testl %ecx,%ecx /* success? */
371 jne 1f /* if yes, let the caller know */
372
373 movl $1,%eax /* success */
374 EMARF
375 ret
376
377 1: ENABLE_PREEMPTION(%eax) /* failure: release preemption... */
378 xorl %eax,%eax /* ...and return failure */
379 EMARF
380 ret
381
382 /*
383 * unsigned int hw_lock_held(hw_lock_t)
384 * MACH_RT: doesn't change preemption state.
385 * N.B. Racy, of course.
386 */
387 ENTRY(hw_lock_held)
388 FRAME
389 movl L_ARG0,%edx /* fetch lock pointer */
390
391 movl $1,%ecx
392 testl %ecx,0(%edx) /* check lock value */
393 jne 1f /* non-zero means locked */
394 xorl %eax,%eax /* tell caller: lock wasn't locked */
395 EMARF
396 ret
397
398 1: movl $1,%eax /* tell caller: lock was locked */
399 EMARF
400 ret
401
402
403
404 #if 0
405
406
407 ENTRY(_usimple_lock_init)
408 FRAME
409 movl L_ARG0,%edx /* fetch lock pointer */
410 xorl %eax,%eax
411 movl %eax,USL_INTERLOCK(%edx) /* unlock the HW lock */
412 EMARF
413 ret
414
415 ENTRY(_simple_lock)
416 FRAME
417 movl L_ARG0,%edx /* fetch lock pointer */
418
419 CHECK_SIMPLE_LOCK_TYPE()
420
421 DISABLE_PREEMPTION(%eax)
422
423 sl_get_hw:
424 movl $1,%ecx
425 xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */
426 testl %ecx,%ecx /* did we succeed? */
427
428 #if MACH_LDEBUG
429 je 5f
430 CHECK_MYLOCK(S_THREAD)
431 jmp sl_get_hw
432 5:
433 #else /* MACH_LDEBUG */
434 jne sl_get_hw /* no, try again */
435 #endif /* MACH_LDEBUG */
436
437 #if MACH_LDEBUG
438 movl L_PC,%ecx
439 movl %ecx,S_PC
440 movl $ CPD_ACTIVE_THREAD,%eax
441 movl %gs:(%eax),%ecx
442 movl %ecx,S_THREAD
443 incl CX(EXT(simple_lock_count),%eax)
444 #if 0
445 METER_SIMPLE_LOCK_LOCK(%edx)
446 #endif
447 #if NCPUS == 1
448 pushf
449 pushl %edx
450 cli
451 call EXT(lock_stack_push)
452 popl %edx
453 popfl
454 #endif /* NCPUS == 1 */
455 #endif /* MACH_LDEBUG */
456
457 EMARF
458 ret
459
460 ENTRY(_simple_lock_try)
461 FRAME
462 movl L_ARG0,%edx /* fetch lock pointer */
463
464 CHECK_SIMPLE_LOCK_TYPE()
465
466 DISABLE_PREEMPTION(%eax)
467
468 movl $1,%ecx
469 xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */
470 testl %ecx,%ecx /* did we succeed? */
471 jne 1f /* no, return failure */
472
473 #if MACH_LDEBUG
474 movl L_PC,%ecx
475 movl %ecx,S_PC
476 movl $ CPD_ACTIVE_THREAD,%eax
477 movl %gs:(%eax),%ecx
478 movl %ecx,S_THREAD
479 incl CX(EXT(simple_lock_count),%eax)
480 #if 0
481 METER_SIMPLE_LOCK_LOCK(%edx)
482 #endif
483 #if NCPUS == 1
484 pushf
485 pushl %edx
486 cli
487 call EXT(lock_stack_push)
488 popl %edx
489 popfl
490 #endif /* NCPUS == 1 */
491 #endif /* MACH_LDEBUG */
492
493 movl $1,%eax /* return success */
494
495 EMARF
496 ret
497
498 1:
499 ENABLE_PREEMPTION(%eax)
500
501 xorl %eax,%eax /* and return failure */
502
503 EMARF
504 ret
505
506 ENTRY(_simple_unlock)
507 FRAME
508 movl L_ARG0,%edx /* fetch lock pointer */
509
510 CHECK_SIMPLE_LOCK_TYPE()
511 CHECK_THREAD(S_THREAD)
512
513 #if MACH_LDEBUG
514 xorl %eax,%eax
515 movl %eax,S_THREAD /* disown thread */
516 MP_DISABLE_PREEMPTION(%eax)
517 CPU_NUMBER(%eax)
518 decl CX(EXT(simple_lock_count),%eax)
519 MP_ENABLE_PREEMPTION(%eax)
520 #if 0
521 METER_SIMPLE_LOCK_UNLOCK(%edx)
522 #endif
523 #if NCPUS == 1
524 pushf
525 pushl %edx
526 cli
527 call EXT(lock_stack_pop)
528 popl %edx
529 popfl
530 #endif /* NCPUS == 1 */
531 #endif /* MACH_LDEBUG */
532
533 xorl %ecx,%ecx
534 xchgl USL_INTERLOCK(%edx),%ecx /* unlock the HW lock */
535
536 ENABLE_PREEMPTION(%eax)
537
538 EMARF
539 ret
540
541 #endif /* 0 */
542
543
544 ENTRY(mutex_init)
545 FRAME
546 movl L_ARG0,%edx /* fetch lock pointer */
547 xorl %eax,%eax
548 movl %eax,M_ILK /* clear interlock */
549 movl %eax,M_LOCKED /* clear locked flag */
550 movw %ax,M_WAITERS /* init waiter count */
551 movw %ax,M_PROMOTED_PRI
552
553 #if MACH_LDEBUG
554 movl $ MUTEX_TAG,M_TYPE /* set lock type */
555 movl %eax,M_PC /* init caller pc */
556 movl %eax,M_THREAD /* and owning thread */
557 #endif
558 #if ETAP_LOCK_TRACE
559 movl L_ARG1,%ecx /* fetch event type */
560 pushl %ecx /* push event type */
561 pushl %edx /* push mutex address */
562 call EXT(etap_mutex_init) /* init ETAP data */
563 addl $8,%esp
564 #endif /* ETAP_LOCK_TRACE */
565
566 EMARF
567 ret
568
569 ENTRY2(mutex_lock,_mutex_lock)
570 FRAME
571
572 #if ETAP_LOCK_TRACE
573 subl $12,%esp /* make room for locals */
574 movl $0,SWT_HI /* set wait time to zero (HI) */
575 movl $0,SWT_LO /* set wait time to zero (LO) */
576 movl $0,MISSED /* clear local miss marker */
577 #endif /* ETAP_LOCK_TRACE */
578
579 movl L_ARG0,%edx /* fetch lock pointer */
580
581 CHECK_MUTEX_TYPE()
582 CHECK_NO_SIMPLELOCKS()
583 CHECK_PREEMPTION_LEVEL()
584
585 ml_retry:
586 DISABLE_PREEMPTION(%eax)
587
588 ml_get_hw:
589 movl $1,%ecx
590 xchgl %ecx,M_ILK
591 testl %ecx,%ecx /* did we succeed? */
592 jne ml_get_hw /* no, try again */
593
594 movl $1,%ecx
595 xchgl %ecx,M_LOCKED /* try to set locked flag */
596 testl %ecx,%ecx /* is the mutex locked? */
597 jne ml_fail /* yes, we lose */
598
599 pushl %edx
600 call EXT(mutex_lock_acquire)
601 addl $4,%esp
602 movl L_ARG0,%edx
603
604 #if MACH_LDEBUG
605 movl L_PC,%ecx
606 movl %ecx,M_PC
607 movl $ CPD_ACTIVE_THREAD,%eax
608 movl %gs:(%eax),%ecx
609 movl %ecx,M_THREAD
610 testl %ecx,%ecx
611 je 3f
612 incl TH_MUTEX_COUNT(%ecx)
613 3:
614 #endif
615
616 xorl %ecx,%ecx
617 xchgl %ecx,M_ILK
618
619 ENABLE_PREEMPTION(%eax)
620
621 #if ETAP_LOCK_TRACE
622 movl L_PC,%eax /* fetch pc */
623 pushl SWT_LO /* push wait time (low) */
624 pushl SWT_HI /* push wait time (high) */
625 pushl %eax /* push pc */
626 pushl %edx /* push mutex address */
627 call EXT(etap_mutex_hold) /* collect hold timestamp */
628 addl $16+12,%esp /* clean up stack, adjusting for locals */
629 #endif /* ETAP_LOCK_TRACE */
630
631 EMARF
632 ret
633
634 ml_fail:
635 #if ETAP_LOCK_TRACE
636 cmp $0,MISSED /* did we already take a wait timestamp? */
637 jne ml_block /* yup. carry-on */
638 pushl %edx /* push mutex address */
639 call EXT(etap_mutex_miss) /* get wait timestamp */
640 movl %eax,SWT_HI /* set wait time (high word) */
641 movl %edx,SWT_LO /* set wait time (low word) */
642 popl %edx /* clean up stack */
643 movl $1,MISSED /* mark wait timestamp as taken */
644 #endif /* ETAP_LOCK_TRACE */
645
646 ml_block:
647 CHECK_MYLOCK(M_THREAD)
648 xorl %eax,%eax
649 pushl %eax /* no promotion here yet */
650 pushl %edx /* push mutex address */
651 call EXT(mutex_lock_wait) /* wait for the lock */
652 addl $8,%esp
653 movl L_ARG0,%edx /* refetch lock pointer */
654 jmp ml_retry /* and try again */
655
656 ENTRY2(mutex_try,_mutex_try)
657 FRAME
658
659 #if ETAP_LOCK_TRACE
660 subl $8,%esp /* make room for locals */
661 movl $0,SWT_HI /* set wait time to zero (HI) */
662 movl $0,SWT_LO /* set wait time to zero (LO) */
663 #endif /* ETAP_LOCK_TRACE */
664
665 movl L_ARG0,%edx /* fetch lock pointer */
666
667 CHECK_MUTEX_TYPE()
668 CHECK_NO_SIMPLELOCKS()
669
670 DISABLE_PREEMPTION(%eax)
671
672 mt_get_hw:
673 movl $1,%ecx
674 xchgl %ecx,M_ILK
675 testl %ecx,%ecx
676 jne mt_get_hw
677
678 movl $1,%ecx
679 xchgl %ecx,M_LOCKED
680 testl %ecx,%ecx
681 jne mt_fail
682
683 pushl %edx
684 call EXT(mutex_lock_acquire)
685 addl $4,%esp
686 movl L_ARG0,%edx
687
688 #if MACH_LDEBUG
689 movl L_PC,%ecx
690 movl %ecx,M_PC
691 movl $ CPD_ACTIVE_THREAD,%ecx
692 movl %gs:(%ecx),%ecx
693 movl %ecx,M_THREAD
694 testl %ecx,%ecx
695 je 1f
696 incl TH_MUTEX_COUNT(%ecx)
697 1:
698 #endif
699
700 xorl %ecx,%ecx
701 xchgl %ecx,M_ILK
702
703 ENABLE_PREEMPTION(%eax)
704
705 #if ETAP_LOCK_TRACE
706 movl L_PC,%eax /* fetch pc */
707 pushl SWT_LO /* push wait time (low) */
708 pushl SWT_HI /* push wait time (high) */
709 pushl %eax /* push pc */
710 pushl %edx /* push mutex address */
711 call EXT(etap_mutex_hold) /* get start hold timestamp */
712 addl $16,%esp /* clean up stack, adjusting for locals */
713 #endif /* ETAP_LOCK_TRACE */
714
715 movl $1,%eax
716
717 #if MACH_LDEBUG || ETAP_LOCK_TRACE
718 #if ETAP_LOCK_TRACE
719 addl $8,%esp /* pop stack claimed on entry */
720 #endif
721 #endif
722
723 EMARF
724 ret
725
726 mt_fail:
727 xorl %ecx,%ecx
728 xchgl %ecx,M_ILK
729
730 ENABLE_PREEMPTION(%eax)
731
732 #if ETAP_LOCK_TRACE
733 movl L_PC,%eax /* fetch pc */
734 pushl SWT_LO /* push wait time (low) */
735 pushl SWT_HI /* push wait time (high) */
736 pushl %eax /* push pc */
737 pushl %edx /* push mutex address */
738 call EXT(etap_mutex_hold) /* get start hold timestamp */
739 addl $16,%esp /* clean up stack, adjusting for locals */
740 #endif /* ETAP_LOCK_TRACE */
741
742 xorl %eax,%eax
743
744 #if MACH_LDEBUG || ETAP_LOCK_TRACE
745 #if ETAP_LOCK_TRACE
746 addl $8,%esp /* pop stack claimed on entry */
747 #endif
748 #endif
749
750 EMARF
751 ret
752
753 ENTRY(mutex_unlock)
754 FRAME
755 movl L_ARG0,%edx /* fetch lock pointer */
756
757 #if ETAP_LOCK_TRACE
758 pushl %edx /* push mutex address */
759 call EXT(etap_mutex_unlock) /* collect ETAP data */
760 popl %edx /* restore mutex address */
761 #endif /* ETAP_LOCK_TRACE */
762
763 CHECK_MUTEX_TYPE()
764 CHECK_THREAD(M_THREAD)
765
766 DISABLE_PREEMPTION(%eax)
767
768 mu_get_hw:
769 movl $1,%ecx
770 xchgl %ecx,M_ILK
771 testl %ecx,%ecx /* did we succeed? */
772 jne mu_get_hw /* no, try again */
773
774 cmpw $0,M_WAITERS /* are there any waiters? */
775 jne mu_wakeup /* yes, more work to do */
776
777 mu_doit:
778 #if MACH_LDEBUG
779 xorl %eax,%eax
780 movl %eax,M_THREAD /* disown thread */
781 movl $ CPD_ACTIVE_THREAD,%eax
782 movl %gs:(%eax),%ecx
783 testl %ecx,%ecx
784 je 0f
785 decl TH_MUTEX_COUNT(%ecx)
786 0:
787 #endif
788
789 xorl %ecx,%ecx
790 xchgl %ecx,M_LOCKED /* unlock the mutex */
791
792 xorl %ecx,%ecx
793 xchgl %ecx,M_ILK
794
795 ENABLE_PREEMPTION(%eax)
796
797 EMARF
798 ret
799
800 mu_wakeup:
801 xorl %eax,%eax
802 pushl %eax /* no promotion here yet */
803 pushl %edx /* push mutex address */
804 call EXT(mutex_unlock_wakeup)/* yes, wake a thread */
805 addl $8,%esp
806 movl L_ARG0,%edx /* refetch lock pointer */
807 jmp mu_doit
808
809 ENTRY(interlock_unlock)
810 FRAME
811 movl L_ARG0,%edx
812
813 xorl %ecx,%ecx
814 xchgl %ecx,M_ILK
815
816 ENABLE_PREEMPTION(%eax)
817
818 EMARF
819 ret
820
821
822 ENTRY(_disable_preemption)
823 #if MACH_RT
824 _DISABLE_PREEMPTION(%eax)
825 #endif /* MACH_RT */
826 ret
827
828 ENTRY(_enable_preemption)
829 #if MACH_RT
830 #if MACH_ASSERT
831 movl $ CPD_PREEMPTION_LEVEL,%eax
832 cmpl $0,%gs:(%eax)
833 jg 1f
834 pushl %gs:(%eax)
835 pushl $2f
836 call EXT(panic)
837 hlt
838 .data
839 2: String "_enable_preemption: preemption_level(%d) < 0!"
840 .text
841 1:
842 #endif /* MACH_ASSERT */
843 _ENABLE_PREEMPTION(%eax)
844 #endif /* MACH_RT */
845 ret
846
847 ENTRY(_enable_preemption_no_check)
848 #if MACH_RT
849 #if MACH_ASSERT
850 movl $ CPD_PREEMPTION_LEVEL,%eax
851 cmpl $0,%gs:(%eax)
852 jg 1f
853 pushl $2f
854 call EXT(panic)
855 hlt
856 .data
857 2: String "_enable_preemption_no_check: preemption_level <= 0!"
858 .text
859 1:
860 #endif /* MACH_ASSERT */
861 _ENABLE_PREEMPTION_NO_CHECK(%eax)
862 #endif /* MACH_RT */
863 ret
864
865
866 ENTRY(_mp_disable_preemption)
867 #if MACH_RT && NCPUS > 1
868 _DISABLE_PREEMPTION(%eax)
869 #endif /* MACH_RT && NCPUS > 1*/
870 ret
871
872 ENTRY(_mp_enable_preemption)
873 #if MACH_RT && NCPUS > 1
874 #if MACH_ASSERT
875 movl $ CPD_PREEMPTION_LEVEL,%eax
876 cmpl $0,%gs:(%eax)
877 jg 1f
878 pushl %gs:(%eax)
879 pushl $2f
880 call EXT(panic)
881 hlt
882 .data
883 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
884 .text
885 1:
886 #endif /* MACH_ASSERT */
887 _ENABLE_PREEMPTION(%eax)
888 #endif /* MACH_RT && NCPUS > 1 */
889 ret
890
891 ENTRY(_mp_enable_preemption_no_check)
892 #if MACH_RT && NCPUS > 1
893 #if MACH_ASSERT
894 movl $ CPD_PREEMPTION_LEVEL,%eax
895 cmpl $0,%gs:(%eax)
896 jg 1f
897 pushl $2f
898 call EXT(panic)
899 hlt
900 .data
901 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
902 .text
903 1:
904 #endif /* MACH_ASSERT */
905 _ENABLE_PREEMPTION_NO_CHECK(%eax)
906 #endif /* MACH_RT && NCPUS > 1 */
907 ret
908
909
910 ENTRY(i_bit_set)
911 movl S_ARG0,%edx
912 movl S_ARG1,%eax
913 lock
914 bts %dl,(%eax)
915 ret
916
917 ENTRY(i_bit_clear)
918 movl S_ARG0,%edx
919 movl S_ARG1,%eax
920 lock
921 btr %dl,(%eax)
922 ret
923
924 ENTRY(bit_lock)
925 movl S_ARG0,%ecx
926 movl S_ARG1,%eax
927 1:
928 lock
929 bts %ecx,(%eax)
930 jb 1b
931 ret
932
933 ENTRY(bit_lock_try)
934 movl S_ARG0,%ecx
935 movl S_ARG1,%eax
936 lock
937 bts %ecx,(%eax)
938 jb bit_lock_failed
939 ret /* %eax better not be null ! */
940 bit_lock_failed:
941 xorl %eax,%eax
942 ret
943
944 ENTRY(bit_unlock)
945 movl S_ARG0,%ecx
946 movl S_ARG1,%eax
947 lock
948 btr %ecx,(%eax)
949 ret