]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
8ad349bb | 4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ |
1c79356b | 5 | * |
8ad349bb A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
1c79356b A |
29 | */ |
30 | /* | |
31 | * @OSF_COPYRIGHT@ | |
32 | */ | |
33 | /* | |
34 | * Mach Operating System | |
35 | * Copyright (c) 1989 Carnegie-Mellon University | |
36 | * All rights reserved. The CMU software License Agreement specifies | |
37 | * the terms and conditions for use and redistribution. | |
38 | */ | |
39 | ||
1c79356b A |
40 | #include <mach_rt.h> |
41 | #include <platforms.h> | |
42 | #include <mach_ldebug.h> | |
43 | #include <i386/asm.h> | |
1c79356b | 44 | |
9bccf70c | 45 | #include "assym.s" |
1c79356b | 46 | |
91447636 A |
47 | #define PAUSE rep; nop |
48 | ||
1c79356b A |
49 | /* |
50 | * When performance isn't the only concern, it's | |
51 | * nice to build stack frames... | |
52 | */ | |
91447636 A |
53 | #define BUILD_STACK_FRAMES (GPROF || \ |
54 | ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)) | |
1c79356b A |
55 | |
56 | #if BUILD_STACK_FRAMES | |
57 | ||
91447636 A |
58 | /* STack-frame-relative: */ |
59 | #define L_PC B_PC | |
60 | #define L_ARG0 B_ARG0 | |
61 | #define L_ARG1 B_ARG1 | |
62 | ||
63 | #define LEAF_ENTRY(name) \ | |
64 | Entry(name); \ | |
65 | FRAME; \ | |
66 | MCOUNT | |
67 | ||
68 | #define LEAF_ENTRY2(n1,n2) \ | |
69 | Entry(n1); \ | |
70 | Entry(n2); \ | |
71 | FRAME; \ | |
72 | MCOUNT | |
73 | ||
74 | #define LEAF_RET \ | |
75 | EMARF; \ | |
76 | ret | |
1c79356b | 77 | |
91447636 | 78 | #else /* BUILD_STACK_FRAMES */ |
1c79356b | 79 | |
91447636 A |
80 | /* Stack-pointer-relative: */ |
81 | #define L_PC S_PC | |
82 | #define L_ARG0 S_ARG0 | |
83 | #define L_ARG1 S_ARG1 | |
84 | ||
85 | #define LEAF_ENTRY(name) \ | |
86 | Entry(name) | |
87 | ||
88 | #define LEAF_ENTRY2(n1,n2) \ | |
89 | Entry(n1); \ | |
90 | Entry(n2) | |
91 | ||
92 | #define LEAF_RET \ | |
93 | ret | |
1c79356b | 94 | |
91447636 | 95 | #endif /* BUILD_STACK_FRAMES */ |
1c79356b | 96 | |
91447636 A |
97 | |
98 | /* Non-leaf routines always have a stack frame: */ | |
99 | ||
100 | #define NONLEAF_ENTRY(name) \ | |
101 | Entry(name); \ | |
102 | FRAME; \ | |
103 | MCOUNT | |
104 | ||
105 | #define NONLEAF_ENTRY2(n1,n2) \ | |
106 | Entry(n1); \ | |
107 | Entry(n2); \ | |
108 | FRAME; \ | |
109 | MCOUNT | |
110 | ||
111 | #define NONLEAF_RET \ | |
112 | EMARF; \ | |
113 | ret | |
1c79356b A |
114 | |
115 | ||
55e303ae A |
116 | #define M_ILK (%edx) |
117 | #define M_LOCKED MUTEX_LOCKED(%edx) | |
118 | #define M_WAITERS MUTEX_WAITERS(%edx) | |
119 | #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx) | |
91447636 A |
120 | #define M_ITAG MUTEX_ITAG(%edx) |
121 | #define M_PTR MUTEX_PTR(%edx) | |
1c79356b | 122 | #if MACH_LDEBUG |
55e303ae A |
123 | #define M_TYPE MUTEX_TYPE(%edx) |
124 | #define M_PC MUTEX_PC(%edx) | |
125 | #define M_THREAD MUTEX_THREAD(%edx) | |
1c79356b A |
126 | #endif /* MACH_LDEBUG */ |
127 | ||
55e303ae | 128 | #include <i386/mp.h> |
1c79356b | 129 | #define CX(addr,reg) addr(,reg,4) |
1c79356b A |
130 | |
131 | #if MACH_LDEBUG | |
132 | /* | |
133 | * Routines for general lock debugging. | |
134 | */ | |
1c79356b A |
135 | |
136 | /* | |
137 | * Checks for expected lock types and calls "panic" on | |
138 | * mismatch. Detects calls to Mutex functions with | |
139 | * type simplelock and vice versa. | |
140 | */ | |
141 | #define CHECK_MUTEX_TYPE() \ | |
9bccf70c | 142 | cmpl $ MUTEX_TAG,M_TYPE ; \ |
1c79356b A |
143 | je 1f ; \ |
144 | pushl $2f ; \ | |
145 | call EXT(panic) ; \ | |
146 | hlt ; \ | |
147 | .data ; \ | |
148 | 2: String "not a mutex!" ; \ | |
149 | .text ; \ | |
150 | 1: | |
151 | ||
1c79356b A |
152 | /* |
153 | * If one or more simplelocks are currently held by a thread, | |
154 | * an attempt to acquire a mutex will cause this check to fail | |
155 | * (since a mutex lock may context switch, holding a simplelock | |
156 | * is not a good thing). | |
157 | */ | |
91447636 | 158 | #if MACH_RT |
1c79356b | 159 | #define CHECK_PREEMPTION_LEVEL() \ |
91447636 | 160 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \ |
1c79356b A |
161 | je 1f ; \ |
162 | pushl $2f ; \ | |
163 | call EXT(panic) ; \ | |
164 | hlt ; \ | |
165 | .data ; \ | |
166 | 2: String "preemption_level != 0!" ; \ | |
167 | .text ; \ | |
168 | 1: | |
169 | #else /* MACH_RT */ | |
170 | #define CHECK_PREEMPTION_LEVEL() | |
171 | #endif /* MACH_RT */ | |
172 | ||
173 | #define CHECK_NO_SIMPLELOCKS() \ | |
91447636 | 174 | cmpl $0,%gs:CPU_SIMPLE_LOCK_COUNT ; \ |
1c79356b A |
175 | je 1f ; \ |
176 | pushl $2f ; \ | |
177 | call EXT(panic) ; \ | |
178 | hlt ; \ | |
179 | .data ; \ | |
180 | 2: String "simple_locks_held!" ; \ | |
181 | .text ; \ | |
182 | 1: | |
183 | ||
184 | /* | |
185 | * Verifies return to the correct thread in "unlock" situations. | |
186 | */ | |
187 | #define CHECK_THREAD(thd) \ | |
91447636 | 188 | movl %gs:CPU_ACTIVE_THREAD,%ecx ; \ |
1c79356b A |
189 | testl %ecx,%ecx ; \ |
190 | je 1f ; \ | |
191 | cmpl %ecx,thd ; \ | |
192 | je 1f ; \ | |
193 | pushl $2f ; \ | |
194 | call EXT(panic) ; \ | |
195 | hlt ; \ | |
196 | .data ; \ | |
197 | 2: String "wrong thread!" ; \ | |
198 | .text ; \ | |
199 | 1: | |
200 | ||
201 | #define CHECK_MYLOCK(thd) \ | |
91447636 | 202 | movl %gs:CPU_ACTIVE_THREAD,%ecx ; \ |
1c79356b A |
203 | testl %ecx,%ecx ; \ |
204 | je 1f ; \ | |
205 | cmpl %ecx,thd ; \ | |
206 | jne 1f ; \ | |
207 | pushl $2f ; \ | |
208 | call EXT(panic) ; \ | |
209 | hlt ; \ | |
210 | .data ; \ | |
211 | 2: String "mylock attempt!" ; \ | |
212 | .text ; \ | |
213 | 1: | |
214 | ||
215 | #define METER_SIMPLE_LOCK_LOCK(reg) \ | |
216 | pushl reg ; \ | |
217 | call EXT(meter_simple_lock) ; \ | |
218 | popl reg | |
219 | ||
220 | #define METER_SIMPLE_LOCK_UNLOCK(reg) \ | |
221 | pushl reg ; \ | |
222 | call EXT(meter_simple_unlock) ; \ | |
223 | popl reg | |
224 | ||
225 | #else /* MACH_LDEBUG */ | |
226 | #define CHECK_MUTEX_TYPE() | |
227 | #define CHECK_SIMPLE_LOCK_TYPE | |
228 | #define CHECK_THREAD(thd) | |
229 | #define CHECK_PREEMPTION_LEVEL() | |
230 | #define CHECK_NO_SIMPLELOCKS() | |
231 | #define CHECK_MYLOCK(thd) | |
232 | #define METER_SIMPLE_LOCK_LOCK(reg) | |
233 | #define METER_SIMPLE_LOCK_UNLOCK(reg) | |
234 | #endif /* MACH_LDEBUG */ | |
235 | ||
236 | ||
237 | /* | |
238 | * void hw_lock_init(hw_lock_t) | |
239 | * | |
240 | * Initialize a hardware lock. | |
241 | */ | |
91447636 | 242 | LEAF_ENTRY(hw_lock_init) |
1c79356b | 243 | movl L_ARG0,%edx /* fetch lock pointer */ |
91447636 A |
244 | movl $0,0(%edx) /* clear the lock */ |
245 | LEAF_RET | |
1c79356b A |
246 | |
247 | /* | |
248 | * void hw_lock_lock(hw_lock_t) | |
249 | * | |
250 | * Acquire lock, spinning until it becomes available. | |
251 | * MACH_RT: also return with preemption disabled. | |
252 | */ | |
91447636 | 253 | LEAF_ENTRY(hw_lock_lock) |
1c79356b A |
254 | movl L_ARG0,%edx /* fetch lock pointer */ |
255 | ||
5d5c5d0d A |
256 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
257 | DISABLE_PREEMPTION | |
258 | 1: | |
91447636 A |
259 | movl 0(%edx), %eax |
260 | testl %eax,%eax /* lock locked? */ | |
261 | jne 3f /* branch if so */ | |
262 | lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */ | |
1c79356b | 263 | jne 3f |
9bccf70c | 264 | movl $1,%eax /* In case this was a timeout call */ |
91447636 | 265 | LEAF_RET /* if yes, then nothing left to do */ |
5d5c5d0d | 266 | 3: |
91447636 A |
267 | PAUSE /* pause for hyper-threading */ |
268 | jmp 1b /* try again */ | |
1c79356b | 269 | |
55e303ae A |
270 | /* |
271 | * unsigned int hw_lock_to(hw_lock_t, unsigned int) | |
272 | * | |
273 | * Acquire lock, spinning until it becomes available or timeout. | |
274 | * MACH_RT: also return with preemption disabled. | |
275 | */ | |
91447636 | 276 | LEAF_ENTRY(hw_lock_to) |
55e303ae | 277 | 1: |
91447636 | 278 | movl L_ARG0,%edx /* fetch lock pointer */ |
5d5c5d0d | 279 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
55e303ae A |
280 | /* |
281 | * Attempt to grab the lock immediately | |
282 | * - fastpath without timeout nonsense. | |
283 | */ | |
91447636 A |
284 | DISABLE_PREEMPTION |
285 | movl 0(%edx), %eax | |
286 | testl %eax,%eax /* lock locked? */ | |
287 | jne 2f /* branch if so */ | |
288 | lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */ | |
289 | jne 2f /* branch on failure */ | |
55e303ae | 290 | movl $1,%eax |
91447636 | 291 | LEAF_RET |
55e303ae A |
292 | |
293 | 2: | |
294 | #define INNER_LOOP_COUNT 1000 | |
295 | /* | |
296 | * Failed to get the lock so set the timeout | |
297 | * and then spin re-checking the lock but pausing | |
298 | * every so many (INNER_LOOP_COUNT) spins to check for timeout. | |
299 | */ | |
300 | movl L_ARG1,%ecx /* fetch timeout */ | |
301 | push %edi | |
302 | push %ebx | |
303 | mov %edx,%edi | |
304 | ||
305 | rdtsc /* read cyclecount into %edx:%eax */ | |
306 | addl %ecx,%eax /* fetch and timeout */ | |
307 | adcl $0,%edx /* add carry */ | |
308 | mov %edx,%ecx | |
309 | mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */ | |
55e303ae A |
310 | 4: |
311 | /* | |
312 | * The inner-loop spin to look for the lock being freed. | |
313 | */ | |
55e303ae A |
314 | mov $(INNER_LOOP_COUNT),%edx |
315 | 5: | |
91447636 A |
316 | PAUSE /* pause for hyper-threading */ |
317 | movl 0(%edi),%eax /* spin checking lock value in cache */ | |
318 | testl %eax,%eax | |
55e303ae A |
319 | je 6f /* zero => unlocked, try to grab it */ |
320 | decl %edx /* decrement inner loop count */ | |
321 | jnz 5b /* time to check for timeout? */ | |
322 | ||
323 | /* | |
324 | * Here after spinning INNER_LOOP_COUNT times, check for timeout | |
325 | */ | |
326 | rdtsc /* cyclecount into %edx:%eax */ | |
327 | cmpl %ecx,%edx /* compare high-order 32-bits */ | |
328 | jb 4b /* continue spinning if less, or */ | |
329 | cmpl %ebx,%eax /* compare low-order 32-bits */ | |
5d5c5d0d | 330 | jb 4b /* continue if less, else bail */ |
55e303ae A |
331 | xor %eax,%eax /* with 0 return value */ |
332 | pop %ebx | |
333 | pop %edi | |
91447636 | 334 | LEAF_RET |
55e303ae A |
335 | |
336 | 6: | |
337 | /* | |
338 | * Here to try to grab the lock that now appears to be free | |
339 | * after contention. | |
340 | */ | |
5d5c5d0d | 341 | movl %gs:CPU_ACTIVE_THREAD,%edx |
91447636 | 342 | lock; cmpxchgl %edx,0(%edi) /* try to acquire the HW lock */ |
5d5c5d0d | 343 | jne 4b /* no - spin again */ |
55e303ae A |
344 | movl $1,%eax /* yes */ |
345 | pop %ebx | |
346 | pop %edi | |
91447636 | 347 | LEAF_RET |
55e303ae | 348 | |
1c79356b A |
349 | /* |
350 | * void hw_lock_unlock(hw_lock_t) | |
351 | * | |
352 | * Unconditionally release lock. | |
353 | * MACH_RT: release preemption level. | |
354 | */ | |
91447636 | 355 | LEAF_ENTRY(hw_lock_unlock) |
1c79356b | 356 | movl L_ARG0,%edx /* fetch lock pointer */ |
91447636 A |
357 | movl $0,0(%edx) /* clear the lock */ |
358 | ENABLE_PREEMPTION | |
359 | LEAF_RET | |
1c79356b A |
360 | |
361 | /* | |
362 | * unsigned int hw_lock_try(hw_lock_t) | |
363 | * MACH_RT: returns with preemption disabled on success. | |
364 | */ | |
91447636 | 365 | LEAF_ENTRY(hw_lock_try) |
1c79356b A |
366 | movl L_ARG0,%edx /* fetch lock pointer */ |
367 | ||
5d5c5d0d | 368 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
91447636 A |
369 | DISABLE_PREEMPTION |
370 | movl 0(%edx),%eax | |
371 | testl %eax,%eax | |
372 | jne 1f | |
373 | lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */ | |
374 | jne 1f | |
1c79356b A |
375 | |
376 | movl $1,%eax /* success */ | |
91447636 | 377 | LEAF_RET |
1c79356b | 378 | |
5d5c5d0d A |
379 | 1: |
380 | ENABLE_PREEMPTION /* failure: release preemption... */ | |
1c79356b | 381 | xorl %eax,%eax /* ...and return failure */ |
91447636 | 382 | LEAF_RET |
1c79356b A |
383 | |
384 | /* | |
385 | * unsigned int hw_lock_held(hw_lock_t) | |
386 | * MACH_RT: doesn't change preemption state. | |
387 | * N.B. Racy, of course. | |
388 | */ | |
91447636 | 389 | LEAF_ENTRY(hw_lock_held) |
1c79356b A |
390 | movl L_ARG0,%edx /* fetch lock pointer */ |
391 | ||
91447636 A |
392 | movl 0(%edx),%eax /* check lock value */ |
393 | testl %eax,%eax | |
55e303ae | 394 | movl $1,%ecx |
91447636 A |
395 | cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */ |
396 | LEAF_RET | |
1c79356b | 397 | |
91447636 A |
398 | LEAF_ENTRY(mutex_init) |
399 | movl L_ARG0,%edx /* fetch lock pointer */ | |
400 | xorl %eax,%eax | |
401 | movl %eax,M_ILK /* clear interlock */ | |
402 | movl %eax,M_LOCKED /* clear locked flag */ | |
403 | movw %ax,M_WAITERS /* init waiter count */ | |
404 | movw %ax,M_PROMOTED_PRI | |
1c79356b | 405 | |
91447636 A |
406 | #if MACH_LDEBUG |
407 | movl $ MUTEX_TAG,M_TYPE /* set lock type */ | |
408 | movl %eax,M_PC /* init caller pc */ | |
409 | movl %eax,M_THREAD /* and owning thread */ | |
410 | #endif | |
1c79356b | 411 | |
91447636 | 412 | LEAF_RET |
1c79356b | 413 | |
91447636 | 414 | NONLEAF_ENTRY2(mutex_lock,_mutex_lock) |
1c79356b | 415 | |
91447636 | 416 | movl B_ARG0,%edx /* fetch lock pointer */ |
1c79356b | 417 | |
91447636 A |
418 | CHECK_MUTEX_TYPE() |
419 | CHECK_NO_SIMPLELOCKS() | |
420 | CHECK_PREEMPTION_LEVEL() | |
1c79356b | 421 | |
91447636 A |
422 | pushf /* save interrupt state */ |
423 | cli /* disable interrupts */ | |
5d5c5d0d A |
424 | Lml_retry: |
425 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
1c79356b | 426 | |
5d5c5d0d | 427 | Lml_get_hw: |
91447636 A |
428 | movl M_ILK,%eax /* read interlock */ |
429 | testl %eax,%eax /* unlocked? */ | |
5d5c5d0d A |
430 | jne Lml_ilk_fail /* no - take the slow path */ |
431 | ||
91447636 | 432 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
5d5c5d0d | 433 | jne Lml_get_hw /* branch on failure to retry */ |
91447636 A |
434 | |
435 | movl M_LOCKED,%ecx /* get lock owner */ | |
436 | testl %ecx,%ecx /* is the mutex locked? */ | |
5d5c5d0d A |
437 | jne Lml_fail /* yes, we lose */ |
438 | Lml_acquire: | |
91447636 A |
439 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
440 | movl %ecx,M_LOCKED | |
1c79356b A |
441 | |
442 | #if MACH_LDEBUG | |
91447636 A |
443 | movl %ecx,M_THREAD |
444 | movl B_PC,%ecx | |
445 | movl %ecx,M_PC | |
1c79356b | 446 | #endif |
1c79356b | 447 | |
5d5c5d0d A |
448 | cmpw $0,M_WAITERS /* are there any waiters? */ |
449 | jne Lml_waiters /* yes, more work to do */ | |
450 | Lml_return: | |
451 | xorl %eax,%eax | |
452 | movl %eax,M_ILK | |
453 | ||
454 | popf /* restore interrupt state */ | |
455 | ||
456 | NONLEAF_RET | |
457 | ||
458 | Lml_waiters: | |
91447636 A |
459 | pushl %edx /* save mutex address */ |
460 | pushl %edx | |
461 | call EXT(lck_mtx_lock_acquire) | |
462 | addl $4,%esp | |
463 | popl %edx /* restore mutex address */ | |
5d5c5d0d A |
464 | jmp Lml_return |
465 | ||
466 | Lml_ilk_fail: | |
467 | /* | |
468 | * Slow path: call out to do the spinning. | |
469 | */ | |
470 | pushl %edx /* lock address */ | |
471 | call EXT(lck_mtx_interlock_spin) | |
472 | popl %edx /* lock pointer */ | |
473 | jmp Lml_retry /* try again */ | |
1c79356b | 474 | |
5d5c5d0d A |
475 | Lml_fail: |
476 | /* | |
477 | n Check if the owner is on another processor and therefore | |
478 | * we should try to spin before blocking. | |
479 | */ | |
480 | testl $(OnProc),ACT_SPF(%ecx) | |
481 | jz Lml_block | |
482 | ||
483 | /* | |
484 | * Here if owner is on another processor: | |
485 | * - release the interlock | |
486 | * - spin on the holder until release or timeout | |
487 | * - in either case re-acquire the interlock | |
488 | * - if released, acquire it | |
489 | * - otherwise drop thru to block. | |
490 | */ | |
91447636 | 491 | xorl %eax,%eax |
5d5c5d0d A |
492 | movl %eax,M_ILK /* zero interlock */ |
493 | popf | |
494 | pushf /* restore interrupt state */ | |
1c79356b | 495 | |
5d5c5d0d A |
496 | push %edx /* lock address */ |
497 | call EXT(lck_mtx_lock_spin) /* call out to do spinning */ | |
498 | addl $4,%esp | |
499 | movl B_ARG0,%edx /* refetch mutex address */ | |
c0fea474 | 500 | |
5d5c5d0d A |
501 | /* Re-acquire interlock */ |
502 | cli /* disable interrupts */ | |
503 | Lml_reget_retry: | |
504 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
c0fea474 | 505 | |
5d5c5d0d A |
506 | Lml_reget_hw: |
507 | movl M_ILK,%eax /* read interlock */ | |
508 | testl %eax,%eax /* unlocked? */ | |
509 | jne Lml_ilk_refail /* no - slow path */ | |
510 | ||
511 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ | |
512 | jne Lml_reget_hw /* branch on failure to retry */ | |
513 | ||
514 | movl M_LOCKED,%ecx /* get lock owner */ | |
515 | testl %ecx,%ecx /* is the mutex free? */ | |
516 | je Lml_acquire /* yes, acquire */ | |
517 | ||
518 | Lml_block: | |
91447636 A |
519 | CHECK_MYLOCK(M_THREAD) |
520 | pushl M_LOCKED | |
521 | pushl %edx /* push mutex address */ | |
522 | call EXT(lck_mtx_lock_wait) /* wait for the lock */ | |
523 | addl $8,%esp | |
524 | movl B_ARG0,%edx /* refetch mutex address */ | |
5d5c5d0d A |
525 | cli /* ensure interrupts disabled */ |
526 | jmp Lml_retry /* and try again */ | |
527 | ||
528 | Lml_ilk_refail: | |
529 | /* | |
530 | * Slow path: call out to do the spinning. | |
531 | */ | |
532 | pushl %edx /* lock address */ | |
533 | call EXT(lck_mtx_interlock_spin) | |
534 | popl %edx /* lock pointer */ | |
535 | jmp Lml_reget_retry /* try again */ | |
1c79356b | 536 | |
91447636 | 537 | NONLEAF_ENTRY2(mutex_try,_mutex_try) |
1c79356b | 538 | |
91447636 | 539 | movl B_ARG0,%edx /* fetch lock pointer */ |
1c79356b | 540 | |
91447636 A |
541 | CHECK_MUTEX_TYPE() |
542 | CHECK_NO_SIMPLELOCKS() | |
1c79356b | 543 | |
91447636 A |
544 | pushf /* save interrupt state */ |
545 | cli /* disable interrupts */ | |
5d5c5d0d A |
546 | Lmt_retry: |
547 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
1c79356b | 548 | |
5d5c5d0d | 549 | Lmt_get_hw: |
91447636 A |
550 | movl M_ILK,%eax /* read interlock */ |
551 | testl %eax,%eax /* unlocked? */ | |
5d5c5d0d A |
552 | jne Lmt_ilk_fail /* no - slow path */ |
553 | ||
91447636 | 554 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
5d5c5d0d | 555 | jne Lmt_get_hw /* branch on failure to retry */ |
1c79356b | 556 | |
91447636 A |
557 | movl M_LOCKED,%ecx /* get lock owner */ |
558 | testl %ecx,%ecx /* is the mutex locked? */ | |
5d5c5d0d | 559 | jne Lmt_fail /* yes, we lose */ |
91447636 A |
560 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
561 | movl %ecx,M_LOCKED | |
1c79356b A |
562 | |
563 | #if MACH_LDEBUG | |
91447636 A |
564 | movl %ecx,M_THREAD |
565 | movl B_PC,%ecx | |
566 | movl %ecx,M_PC | |
1c79356b | 567 | #endif |
1c79356b | 568 | |
5d5c5d0d A |
569 | cmpl $0,M_WAITERS /* are there any waiters? */ |
570 | jne Lmt_waiters /* yes, more work to do */ | |
571 | Lmt_return: | |
91447636 A |
572 | xorl %eax,%eax |
573 | movl %eax,M_ILK | |
91447636 | 574 | popf /* restore interrupt state */ |
1c79356b | 575 | |
91447636 | 576 | movl $1,%eax |
1c79356b | 577 | |
91447636 | 578 | NONLEAF_RET |
1c79356b | 579 | |
5d5c5d0d A |
580 | Lmt_waiters: |
581 | pushl %edx /* save mutex address */ | |
582 | pushl %edx | |
583 | call EXT(lck_mtx_lock_acquire) | |
584 | addl $4,%esp | |
585 | popl %edx /* restore mutex address */ | |
586 | jmp Lmt_return | |
587 | ||
588 | Lmt_ilk_fail: | |
589 | /* | |
590 | * Slow path: call out to do the spinning. | |
591 | */ | |
592 | pushl %edx /* lock address */ | |
593 | call EXT(lck_mtx_interlock_spin) | |
594 | popl %edx /* lock pointer */ | |
595 | jmp Lmt_retry /* try again */ | |
596 | ||
597 | Lmt_fail: | |
1c79356b | 598 | xorl %eax,%eax |
91447636 | 599 | movl %eax,M_ILK |
1c79356b | 600 | |
91447636 | 601 | popf /* restore interrupt state */ |
1c79356b | 602 | |
91447636 | 603 | xorl %eax,%eax |
1c79356b | 604 | |
91447636 | 605 | NONLEAF_RET |
1c79356b | 606 | |
91447636 A |
607 | NONLEAF_ENTRY(mutex_unlock) |
608 | movl B_ARG0,%edx /* fetch lock pointer */ | |
1c79356b A |
609 | |
610 | CHECK_MUTEX_TYPE() | |
91447636 | 611 | CHECK_THREAD(M_THREAD) |
1c79356b | 612 | |
91447636 A |
613 | pushf /* save interrupt state */ |
614 | cli /* disable interrupts */ | |
5d5c5d0d A |
615 | Lmu_retry: |
616 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
1c79356b | 617 | |
5d5c5d0d | 618 | Lmu_get_hw: |
91447636 A |
619 | movl M_ILK,%eax /* read interlock */ |
620 | testl %eax,%eax /* unlocked? */ | |
5d5c5d0d A |
621 | jne Lmu_ilk_fail /* no - slow path */ |
622 | ||
91447636 | 623 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
5d5c5d0d | 624 | jne Lmu_get_hw /* branch on failure to retry */ |
1c79356b | 625 | |
91447636 | 626 | cmpw $0,M_WAITERS /* are there any waiters? */ |
5d5c5d0d | 627 | jne Lmu_wakeup /* yes, more work to do */ |
91447636 | 628 | |
5d5c5d0d | 629 | Lmu_doit: |
9bccf70c | 630 | |
1c79356b | 631 | #if MACH_LDEBUG |
91447636 | 632 | movl $0,M_THREAD /* disown thread */ |
1c79356b A |
633 | #endif |
634 | ||
55e303ae | 635 | xorl %ecx,%ecx |
91447636 | 636 | movl %ecx,M_LOCKED /* unlock the mutex */ |
1c79356b | 637 | |
91447636 | 638 | movl %ecx,M_ILK |
1c79356b | 639 | |
91447636 | 640 | popf /* restore interrupt state */ |
1c79356b | 641 | |
91447636 | 642 | NONLEAF_RET |
1c79356b | 643 | |
5d5c5d0d A |
644 | Lmu_ilk_fail: |
645 | /* | |
646 | * Slow path: call out to do the spinning. | |
647 | */ | |
648 | pushl %edx /* lock address */ | |
649 | call EXT(lck_mtx_interlock_spin) | |
650 | popl %edx /* lock pointer */ | |
651 | jmp Lmu_retry /* try again */ | |
652 | ||
653 | Lmu_wakeup: | |
91447636 | 654 | pushl M_LOCKED |
1c79356b | 655 | pushl %edx /* push mutex address */ |
91447636 | 656 | call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */ |
9bccf70c | 657 | addl $8,%esp |
91447636 | 658 | movl B_ARG0,%edx /* restore lock pointer */ |
5d5c5d0d | 659 | jmp Lmu_doit |
1c79356b | 660 | |
91447636 A |
661 | /* |
662 | * lck_mtx_lock() | |
663 | * lck_mtx_try_lock() | |
664 | * lck_mutex_unlock() | |
665 | * | |
666 | * These are variants of mutex_lock(), mutex_try() and mutex_unlock() without | |
667 | * DEBUG checks (which require fields not present in lck_mtx_t's). | |
668 | */ | |
669 | NONLEAF_ENTRY(lck_mtx_lock) | |
1c79356b | 670 | |
91447636 A |
671 | movl B_ARG0,%edx /* fetch lock pointer */ |
672 | cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */ | |
673 | cmove M_PTR,%edx /* yes - take indirection */ | |
1c79356b | 674 | |
1c79356b | 675 | CHECK_NO_SIMPLELOCKS() |
91447636 | 676 | CHECK_PREEMPTION_LEVEL() |
1c79356b | 677 | |
91447636 A |
678 | pushf /* save interrupt state */ |
679 | cli /* disable interrupts */ | |
5d5c5d0d A |
680 | Llml_retry: |
681 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
9bccf70c | 682 | |
5d5c5d0d | 683 | Llml_get_hw: |
91447636 A |
684 | movl M_ILK,%eax /* read interlock */ |
685 | testl %eax,%eax /* unlocked? */ | |
5d5c5d0d A |
686 | jne Llml_ilk_fail /* no - slow path */ |
687 | ||
91447636 | 688 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
5d5c5d0d | 689 | jne Llml_get_hw /* branch on failure to retry */ |
91447636 A |
690 | |
691 | movl M_LOCKED,%ecx /* get lock owner */ | |
692 | testl %ecx,%ecx /* is the mutex locked? */ | |
5d5c5d0d A |
693 | jne Llml_fail /* yes, we lose */ |
694 | Llml_acquire: | |
91447636 A |
695 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
696 | movl %ecx,M_LOCKED | |
9bccf70c | 697 | |
5d5c5d0d A |
698 | cmpl $0,M_WAITERS /* are there any waiters? */ |
699 | jne Llml_waiters /* yes, more work to do */ | |
700 | Llml_return: | |
701 | xorl %eax,%eax | |
702 | movl %eax,M_ILK | |
703 | ||
704 | popf /* restore interrupt state */ | |
705 | ||
706 | NONLEAF_RET | |
707 | ||
708 | Llml_waiters: | |
91447636 | 709 | pushl %edx /* save mutex address */ |
9bccf70c | 710 | pushl %edx |
91447636 | 711 | call EXT(lck_mtx_lock_acquire) |
9bccf70c | 712 | addl $4,%esp |
91447636 | 713 | popl %edx /* restore mutex address */ |
5d5c5d0d A |
714 | jmp Llml_return |
715 | ||
716 | Llml_ilk_fail: | |
717 | /* | |
718 | * Slow path: call out to do the spinning. | |
719 | */ | |
720 | pushl %edx /* lock address */ | |
721 | call EXT(lck_mtx_interlock_spin) | |
722 | popl %edx /* lock pointer */ | |
723 | jmp Llml_retry /* try again */ | |
9bccf70c | 724 | |
5d5c5d0d A |
725 | Llml_fail: |
726 | /* | |
727 | * Check if the owner is on another processor and therefore | |
728 | * we should try to spin before blocking. | |
729 | */ | |
730 | testl $(OnProc),ACT_SPF(%ecx) | |
731 | jz Llml_block | |
732 | ||
733 | /* | |
734 | * Here if owner is on another processor: | |
735 | * - release the interlock | |
736 | * - spin on the holder until release or timeout | |
737 | * - in either case re-acquire the interlock | |
738 | * - if released, acquire it | |
739 | * - otherwise drop thru to block. | |
740 | */ | |
91447636 | 741 | xorl %eax,%eax |
5d5c5d0d A |
742 | movl %eax,M_ILK /* zero interlock */ |
743 | popf | |
744 | pushf /* restore interrupt state */ | |
9bccf70c | 745 | |
5d5c5d0d A |
746 | pushl %edx /* save mutex address */ |
747 | pushl %edx | |
748 | call EXT(lck_mtx_lock_spin) | |
749 | addl $4,%esp | |
750 | popl %edx /* restore mutex address */ | |
c0fea474 | 751 | |
5d5c5d0d A |
752 | /* Re-acquire interlock */ |
753 | cli /* disable interrupts */ | |
754 | Llml_reget_retry: | |
755 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
c0fea474 | 756 | |
5d5c5d0d A |
757 | Llml_reget_hw: |
758 | movl M_ILK,%eax /* read interlock */ | |
759 | testl %eax,%eax /* unlocked? */ | |
760 | jne Llml_ilk_refail /* no - slow path */ | |
761 | ||
762 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ | |
763 | jne Llml_reget_hw /* branch on failure to retry */ | |
764 | ||
765 | movl M_LOCKED,%ecx /* get lock owner */ | |
766 | testl %ecx,%ecx /* is the mutex free? */ | |
767 | je Llml_acquire /* yes, acquire */ | |
768 | ||
769 | Llml_block: | |
91447636 A |
770 | CHECK_MYLOCK(M_THREAD) |
771 | pushl %edx /* save mutex address */ | |
772 | pushl M_LOCKED | |
9bccf70c | 773 | pushl %edx /* push mutex address */ |
91447636 A |
774 | call EXT(lck_mtx_lock_wait) /* wait for the lock */ |
775 | addl $8,%esp | |
776 | popl %edx /* restore mutex address */ | |
5d5c5d0d A |
777 | cli /* ensure interrupts disabled */ |
778 | jmp Llml_retry /* and try again */ | |
779 | ||
780 | Llml_ilk_refail: | |
781 | /* | |
782 | * Slow path: call out to do the spinning. | |
783 | */ | |
784 | pushl %edx /* lock address */ | |
785 | call EXT(lck_mtx_interlock_spin) | |
786 | popl %edx /* lock pointer */ | |
787 | jmp Llml_reget_retry /* try again */ | |
9bccf70c | 788 | |
91447636 | 789 | NONLEAF_ENTRY(lck_mtx_try_lock) |
1c79356b | 790 | |
91447636 A |
791 | movl B_ARG0,%edx /* fetch lock pointer */ |
792 | cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */ | |
793 | cmove M_PTR,%edx /* yes - take indirection */ | |
9bccf70c | 794 | |
91447636 A |
795 | CHECK_NO_SIMPLELOCKS() |
796 | CHECK_PREEMPTION_LEVEL() | |
1c79356b | 797 | |
91447636 A |
798 | pushf /* save interrupt state */ |
799 | cli /* disable interrupts */ | |
5d5c5d0d A |
800 | Llmt_retry: |
801 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
9bccf70c | 802 | |
5d5c5d0d | 803 | Llmt_get_hw: |
91447636 A |
804 | movl M_ILK,%eax /* read interlock */ |
805 | testl %eax,%eax /* unlocked? */ | |
5d5c5d0d A |
806 | jne Llmt_ilk_fail /* no - slow path */ |
807 | ||
91447636 | 808 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
5d5c5d0d | 809 | jne Llmt_get_hw /* branch on failure to retry */ |
91447636 A |
810 | |
811 | movl M_LOCKED,%ecx /* get lock owner */ | |
812 | testl %ecx,%ecx /* is the mutex locked? */ | |
5d5c5d0d | 813 | jne Llmt_fail /* yes, we lose */ |
91447636 A |
814 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
815 | movl %ecx,M_LOCKED | |
816 | ||
5d5c5d0d A |
817 | cmpl $0,M_WAITERS /* are there any waiters? */ |
818 | jne Llmt_waiters /* yes, more work to do */ | |
819 | Llmt_return: | |
9bccf70c | 820 | xorl %eax,%eax |
91447636 | 821 | movl %eax,M_ILK |
9bccf70c | 822 | |
91447636 | 823 | popf /* restore interrupt state */ |
1c79356b | 824 | |
91447636 A |
825 | movl $1,%eax /* return success */ |
826 | NONLEAF_RET | |
1c79356b | 827 | |
5d5c5d0d A |
828 | Llmt_waiters: |
829 | pushl %edx /* save mutex address */ | |
830 | pushl %edx | |
831 | call EXT(lck_mtx_lock_acquire) | |
832 | addl $4,%esp | |
833 | popl %edx /* restore mutex address */ | |
834 | jmp Llmt_return | |
835 | ||
836 | Llmt_ilk_fail: | |
837 | /* | |
838 | * Slow path: call out to do the spinning. | |
839 | */ | |
840 | pushl %edx /* lock address */ | |
841 | call EXT(lck_mtx_interlock_spin) | |
842 | popl %edx /* lock pointer */ | |
843 | jmp Llmt_retry /* try again */ | |
844 | ||
845 | Llmt_fail: | |
91447636 A |
846 | xorl %eax,%eax |
847 | movl %eax,M_ILK | |
1c79356b | 848 | |
91447636 | 849 | popf /* restore interrupt state */ |
1c79356b | 850 | |
91447636 A |
851 | xorl %eax,%eax /* return failure */ |
852 | NONLEAF_RET | |
1c79356b | 853 | |
91447636 | 854 | NONLEAF_ENTRY(lck_mtx_unlock) |
1c79356b | 855 | |
91447636 A |
856 | movl B_ARG0,%edx /* fetch lock pointer */ |
857 | cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */ | |
858 | cmove M_PTR,%edx /* yes - take indirection */ | |
1c79356b | 859 | |
91447636 A |
860 | pushf /* save interrupt state */ |
861 | cli /* disable interrupts */ | |
5d5c5d0d A |
862 | Llmu_retry: |
863 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
1c79356b | 864 | |
5d5c5d0d | 865 | Llmu_get_hw: |
91447636 A |
866 | movl M_ILK,%eax /* read interlock */ |
867 | testl %eax,%eax /* unlocked? */ | |
5d5c5d0d A |
868 | jne Llmu_ilk_fail /* no - slow path */ |
869 | ||
91447636 | 870 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
5d5c5d0d | 871 | jne Llmu_get_hw /* branch on failure to retry */ |
1c79356b | 872 | |
91447636 | 873 | cmpw $0,M_WAITERS /* are there any waiters? */ |
5d5c5d0d | 874 | jne Llmu_wakeup /* yes, more work to do */ |
91447636 | 875 | |
5d5c5d0d | 876 | Llmu_doit: |
55e303ae | 877 | xorl %ecx,%ecx |
91447636 | 878 | movl %ecx,M_LOCKED /* unlock the mutex */ |
1c79356b | 879 | |
91447636 | 880 | movl %ecx,M_ILK |
1c79356b | 881 | |
91447636 | 882 | popf /* restore interrupt state */ |
1c79356b | 883 | |
91447636 A |
884 | NONLEAF_RET |
885 | ||
5d5c5d0d A |
886 | Llmu_ilk_fail: |
887 | /* | |
888 | * Slow path: call out to do the spinning. | |
889 | */ | |
890 | pushl %edx /* lock address */ | |
891 | call EXT(lck_mtx_interlock_spin) | |
892 | popl %edx /* lock pointer */ | |
893 | jmp Llmu_retry /* try again */ | |
894 | ||
895 | Llmu_wakeup: | |
91447636 A |
896 | pushl %edx /* save mutex address */ |
897 | pushl M_LOCKED | |
1c79356b | 898 | pushl %edx /* push mutex address */ |
91447636 | 899 | call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */ |
9bccf70c | 900 | addl $8,%esp |
91447636 | 901 | popl %edx /* restore mutex pointer */ |
5d5c5d0d | 902 | jmp Llmu_doit |
1c79356b | 903 | |
91447636 A |
904 | LEAF_ENTRY(lck_mtx_ilk_unlock) |
905 | movl L_ARG0,%edx /* no indirection here */ | |
1c79356b | 906 | |
91447636 A |
907 | xorl %eax,%eax |
908 | movl %eax,M_ILK | |
1c79356b | 909 | |
91447636 | 910 | LEAF_RET |
1c79356b | 911 | |
91447636 | 912 | LEAF_ENTRY(_disable_preemption) |
1c79356b | 913 | #if MACH_RT |
91447636 | 914 | _DISABLE_PREEMPTION |
1c79356b | 915 | #endif /* MACH_RT */ |
91447636 | 916 | LEAF_RET |
1c79356b | 917 | |
91447636 | 918 | LEAF_ENTRY(_enable_preemption) |
1c79356b A |
919 | #if MACH_RT |
920 | #if MACH_ASSERT | |
91447636 | 921 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b | 922 | jg 1f |
91447636 | 923 | pushl %gs:CPU_PREEMPTION_LEVEL |
1c79356b A |
924 | pushl $2f |
925 | call EXT(panic) | |
926 | hlt | |
927 | .data | |
928 | 2: String "_enable_preemption: preemption_level(%d) < 0!" | |
929 | .text | |
930 | 1: | |
931 | #endif /* MACH_ASSERT */ | |
91447636 | 932 | _ENABLE_PREEMPTION |
1c79356b | 933 | #endif /* MACH_RT */ |
91447636 | 934 | LEAF_RET |
1c79356b | 935 | |
91447636 | 936 | LEAF_ENTRY(_enable_preemption_no_check) |
1c79356b A |
937 | #if MACH_RT |
938 | #if MACH_ASSERT | |
91447636 | 939 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b A |
940 | jg 1f |
941 | pushl $2f | |
942 | call EXT(panic) | |
943 | hlt | |
944 | .data | |
945 | 2: String "_enable_preemption_no_check: preemption_level <= 0!" | |
946 | .text | |
947 | 1: | |
948 | #endif /* MACH_ASSERT */ | |
91447636 | 949 | _ENABLE_PREEMPTION_NO_CHECK |
1c79356b | 950 | #endif /* MACH_RT */ |
91447636 | 951 | LEAF_RET |
1c79356b A |
952 | |
953 | ||
91447636 A |
954 | LEAF_ENTRY(_mp_disable_preemption) |
955 | #if MACH_RT | |
956 | _DISABLE_PREEMPTION | |
957 | #endif /* MACH_RT */ | |
958 | LEAF_RET | |
1c79356b | 959 | |
91447636 A |
960 | LEAF_ENTRY(_mp_enable_preemption) |
961 | #if MACH_RT | |
1c79356b | 962 | #if MACH_ASSERT |
91447636 | 963 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b | 964 | jg 1f |
91447636 | 965 | pushl %gs:CPU_PREEMPTION_LEVEL |
1c79356b A |
966 | pushl $2f |
967 | call EXT(panic) | |
968 | hlt | |
969 | .data | |
970 | 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!" | |
971 | .text | |
972 | 1: | |
973 | #endif /* MACH_ASSERT */ | |
91447636 A |
974 | _ENABLE_PREEMPTION |
975 | #endif /* MACH_RT */ | |
976 | LEAF_RET | |
1c79356b | 977 | |
91447636 A |
978 | LEAF_ENTRY(_mp_enable_preemption_no_check) |
979 | #if MACH_RT | |
1c79356b | 980 | #if MACH_ASSERT |
91447636 | 981 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b A |
982 | jg 1f |
983 | pushl $2f | |
984 | call EXT(panic) | |
985 | hlt | |
986 | .data | |
987 | 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!" | |
988 | .text | |
989 | 1: | |
990 | #endif /* MACH_ASSERT */ | |
91447636 A |
991 | _ENABLE_PREEMPTION_NO_CHECK |
992 | #endif /* MACH_RT */ | |
993 | LEAF_RET | |
1c79356b A |
994 | |
995 | ||
91447636 A |
996 | LEAF_ENTRY(i_bit_set) |
997 | movl L_ARG0,%edx | |
998 | movl L_ARG1,%eax | |
1c79356b | 999 | lock |
c0fea474 | 1000 | bts %edx,(%eax) |
91447636 | 1001 | LEAF_RET |
1c79356b | 1002 | |
91447636 A |
1003 | LEAF_ENTRY(i_bit_clear) |
1004 | movl L_ARG0,%edx | |
1005 | movl L_ARG1,%eax | |
1c79356b | 1006 | lock |
c0fea474 | 1007 | btr %edx,(%eax) |
91447636 | 1008 | LEAF_RET |
1c79356b | 1009 | |
91447636 A |
1010 | LEAF_ENTRY(bit_lock) |
1011 | movl L_ARG0,%ecx | |
1012 | movl L_ARG1,%eax | |
1c79356b A |
1013 | 1: |
1014 | lock | |
1015 | bts %ecx,(%eax) | |
1016 | jb 1b | |
91447636 | 1017 | LEAF_RET |
1c79356b | 1018 | |
91447636 A |
1019 | LEAF_ENTRY(bit_lock_try) |
1020 | movl L_ARG0,%ecx | |
1021 | movl L_ARG1,%eax | |
1c79356b A |
1022 | lock |
1023 | bts %ecx,(%eax) | |
1024 | jb bit_lock_failed | |
91447636 | 1025 | LEAF_RET /* %eax better not be null ! */ |
1c79356b A |
1026 | bit_lock_failed: |
1027 | xorl %eax,%eax | |
91447636 | 1028 | LEAF_RET |
1c79356b | 1029 | |
91447636 A |
1030 | LEAF_ENTRY(bit_unlock) |
1031 | movl L_ARG0,%ecx | |
1032 | movl L_ARG1,%eax | |
1c79356b A |
1033 | lock |
1034 | btr %ecx,(%eax) | |
91447636 | 1035 | LEAF_RET |