]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1989 Carnegie-Mellon University | |
34 | * All rights reserved. The CMU software License Agreement specifies | |
35 | * the terms and conditions for use and redistribution. | |
36 | */ | |
37 | ||
1c79356b A |
38 | #include <mach_rt.h> |
39 | #include <platforms.h> | |
40 | #include <mach_ldebug.h> | |
41 | #include <i386/asm.h> | |
2d21ac55 A |
42 | #include <i386/eflags.h> |
43 | #include <i386/trap.h> | |
44 | #include <config_dtrace.h> | |
1c79356b | 45 | |
9bccf70c | 46 | #include "assym.s" |
1c79356b | 47 | |
91447636 A |
48 | #define PAUSE rep; nop |
49 | ||
1c79356b A |
50 | /* |
51 | * When performance isn't the only concern, it's | |
52 | * nice to build stack frames... | |
53 | */ | |
91447636 A |
54 | #define BUILD_STACK_FRAMES (GPROF || \ |
55 | ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)) | |
1c79356b A |
56 | |
57 | #if BUILD_STACK_FRAMES | |
58 | ||
2d21ac55 | 59 | /* Stack-frame-relative: */ |
91447636 A |
60 | #define L_PC B_PC |
61 | #define L_ARG0 B_ARG0 | |
62 | #define L_ARG1 B_ARG1 | |
63 | ||
64 | #define LEAF_ENTRY(name) \ | |
65 | Entry(name); \ | |
66 | FRAME; \ | |
67 | MCOUNT | |
68 | ||
69 | #define LEAF_ENTRY2(n1,n2) \ | |
70 | Entry(n1); \ | |
71 | Entry(n2); \ | |
72 | FRAME; \ | |
73 | MCOUNT | |
74 | ||
75 | #define LEAF_RET \ | |
76 | EMARF; \ | |
77 | ret | |
1c79356b | 78 | |
91447636 | 79 | #else /* BUILD_STACK_FRAMES */ |
1c79356b | 80 | |
91447636 A |
81 | /* Stack-pointer-relative: */ |
82 | #define L_PC S_PC | |
83 | #define L_ARG0 S_ARG0 | |
84 | #define L_ARG1 S_ARG1 | |
85 | ||
86 | #define LEAF_ENTRY(name) \ | |
87 | Entry(name) | |
88 | ||
89 | #define LEAF_ENTRY2(n1,n2) \ | |
90 | Entry(n1); \ | |
91 | Entry(n2) | |
92 | ||
93 | #define LEAF_RET \ | |
94 | ret | |
1c79356b | 95 | |
91447636 | 96 | #endif /* BUILD_STACK_FRAMES */ |
1c79356b | 97 | |
91447636 A |
98 | |
99 | /* Non-leaf routines always have a stack frame: */ | |
100 | ||
101 | #define NONLEAF_ENTRY(name) \ | |
102 | Entry(name); \ | |
103 | FRAME; \ | |
104 | MCOUNT | |
105 | ||
106 | #define NONLEAF_ENTRY2(n1,n2) \ | |
107 | Entry(n1); \ | |
108 | Entry(n2); \ | |
109 | FRAME; \ | |
110 | MCOUNT | |
111 | ||
112 | #define NONLEAF_RET \ | |
113 | EMARF; \ | |
114 | ret | |
1c79356b A |
115 | |
116 | ||
55e303ae A |
117 | #define M_ILK (%edx) |
118 | #define M_LOCKED MUTEX_LOCKED(%edx) | |
119 | #define M_WAITERS MUTEX_WAITERS(%edx) | |
120 | #define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx) | |
91447636 A |
121 | #define M_ITAG MUTEX_ITAG(%edx) |
122 | #define M_PTR MUTEX_PTR(%edx) | |
1c79356b | 123 | #if MACH_LDEBUG |
55e303ae A |
124 | #define M_TYPE MUTEX_TYPE(%edx) |
125 | #define M_PC MUTEX_PC(%edx) | |
126 | #define M_THREAD MUTEX_THREAD(%edx) | |
1c79356b A |
127 | #endif /* MACH_LDEBUG */ |
128 | ||
55e303ae | 129 | #include <i386/mp.h> |
1c79356b | 130 | #define CX(addr,reg) addr(,reg,4) |
1c79356b A |
131 | |
132 | #if MACH_LDEBUG | |
133 | /* | |
134 | * Routines for general lock debugging. | |
135 | */ | |
1c79356b A |
136 | |
137 | /* | |
138 | * Checks for expected lock types and calls "panic" on | |
139 | * mismatch. Detects calls to Mutex functions with | |
140 | * type simplelock and vice versa. | |
141 | */ | |
142 | #define CHECK_MUTEX_TYPE() \ | |
9bccf70c | 143 | cmpl $ MUTEX_TAG,M_TYPE ; \ |
1c79356b A |
144 | je 1f ; \ |
145 | pushl $2f ; \ | |
146 | call EXT(panic) ; \ | |
147 | hlt ; \ | |
148 | .data ; \ | |
149 | 2: String "not a mutex!" ; \ | |
150 | .text ; \ | |
151 | 1: | |
152 | ||
1c79356b A |
153 | /* |
154 | * If one or more simplelocks are currently held by a thread, | |
155 | * an attempt to acquire a mutex will cause this check to fail | |
156 | * (since a mutex lock may context switch, holding a simplelock | |
157 | * is not a good thing). | |
158 | */ | |
91447636 | 159 | #if MACH_RT |
1c79356b | 160 | #define CHECK_PREEMPTION_LEVEL() \ |
91447636 | 161 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \ |
1c79356b A |
162 | je 1f ; \ |
163 | pushl $2f ; \ | |
164 | call EXT(panic) ; \ | |
165 | hlt ; \ | |
166 | .data ; \ | |
167 | 2: String "preemption_level != 0!" ; \ | |
168 | .text ; \ | |
169 | 1: | |
170 | #else /* MACH_RT */ | |
171 | #define CHECK_PREEMPTION_LEVEL() | |
172 | #endif /* MACH_RT */ | |
173 | ||
174 | #define CHECK_NO_SIMPLELOCKS() \ | |
91447636 | 175 | cmpl $0,%gs:CPU_SIMPLE_LOCK_COUNT ; \ |
1c79356b A |
176 | je 1f ; \ |
177 | pushl $2f ; \ | |
178 | call EXT(panic) ; \ | |
179 | hlt ; \ | |
180 | .data ; \ | |
181 | 2: String "simple_locks_held!" ; \ | |
182 | .text ; \ | |
183 | 1: | |
184 | ||
185 | /* | |
186 | * Verifies return to the correct thread in "unlock" situations. | |
187 | */ | |
188 | #define CHECK_THREAD(thd) \ | |
91447636 | 189 | movl %gs:CPU_ACTIVE_THREAD,%ecx ; \ |
1c79356b A |
190 | testl %ecx,%ecx ; \ |
191 | je 1f ; \ | |
192 | cmpl %ecx,thd ; \ | |
193 | je 1f ; \ | |
194 | pushl $2f ; \ | |
195 | call EXT(panic) ; \ | |
196 | hlt ; \ | |
197 | .data ; \ | |
198 | 2: String "wrong thread!" ; \ | |
199 | .text ; \ | |
200 | 1: | |
201 | ||
202 | #define CHECK_MYLOCK(thd) \ | |
91447636 | 203 | movl %gs:CPU_ACTIVE_THREAD,%ecx ; \ |
1c79356b A |
204 | testl %ecx,%ecx ; \ |
205 | je 1f ; \ | |
206 | cmpl %ecx,thd ; \ | |
207 | jne 1f ; \ | |
208 | pushl $2f ; \ | |
209 | call EXT(panic) ; \ | |
210 | hlt ; \ | |
211 | .data ; \ | |
212 | 2: String "mylock attempt!" ; \ | |
213 | .text ; \ | |
214 | 1: | |
215 | ||
216 | #define METER_SIMPLE_LOCK_LOCK(reg) \ | |
217 | pushl reg ; \ | |
218 | call EXT(meter_simple_lock) ; \ | |
219 | popl reg | |
220 | ||
221 | #define METER_SIMPLE_LOCK_UNLOCK(reg) \ | |
222 | pushl reg ; \ | |
223 | call EXT(meter_simple_unlock) ; \ | |
224 | popl reg | |
225 | ||
226 | #else /* MACH_LDEBUG */ | |
227 | #define CHECK_MUTEX_TYPE() | |
228 | #define CHECK_SIMPLE_LOCK_TYPE | |
229 | #define CHECK_THREAD(thd) | |
230 | #define CHECK_PREEMPTION_LEVEL() | |
231 | #define CHECK_NO_SIMPLELOCKS() | |
232 | #define CHECK_MYLOCK(thd) | |
233 | #define METER_SIMPLE_LOCK_LOCK(reg) | |
234 | #define METER_SIMPLE_LOCK_UNLOCK(reg) | |
235 | #endif /* MACH_LDEBUG */ | |
236 | ||
237 | ||
2d21ac55 A |
238 | #define PREEMPTION_DISABLE \ |
239 | incl %gs:CPU_PREEMPTION_LEVEL | |
240 | ||
241 | ||
242 | #define PREEMPTION_ENABLE \ | |
243 | decl %gs:CPU_PREEMPTION_LEVEL ; \ | |
244 | jne 9f ; \ | |
245 | pushf ; \ | |
246 | testl $ EFL_IF,(%esp) ; \ | |
247 | je 8f ; \ | |
248 | cli ; \ | |
249 | movl %gs:CPU_PENDING_AST,%eax ; \ | |
250 | testl $ AST_URGENT,%eax ; \ | |
251 | je 8f ; \ | |
252 | movl %gs:CPU_INTERRUPT_LEVEL,%eax ; \ | |
253 | testl %eax,%eax ; \ | |
254 | jne 8f ; \ | |
255 | popf ; \ | |
256 | int $(T_PREEMPT) ; \ | |
257 | jmp 9f ; \ | |
258 | 8: \ | |
259 | popf ; \ | |
260 | 9: | |
261 | ||
262 | ||
263 | ||
264 | #if CONFIG_DTRACE | |
265 | #define LOCKSTAT_LABEL(lab) \ | |
266 | .data ;\ | |
267 | .globl lab ;\ | |
268 | lab: ;\ | |
269 | .long 9f ;\ | |
270 | .text ;\ | |
271 | 9: | |
272 | ||
273 | .globl _lockstat_probe | |
274 | .globl _lockstat_probemap | |
275 | ||
276 | #define LOCKSTAT_RECORD(id, lck) \ | |
277 | push %ebp ; \ | |
278 | mov %esp,%ebp ; \ | |
279 | sub $0x38,%esp /* size of dtrace_probe args */ ; \ | |
280 | movl _lockstat_probemap + (id * 4),%eax ; \ | |
281 | test %eax,%eax ; \ | |
282 | je 9f ; \ | |
283 | movl $0,36(%esp) ; \ | |
284 | movl $0,40(%esp) ; \ | |
285 | movl $0,28(%esp) ; \ | |
286 | movl $0,32(%esp) ; \ | |
287 | movl $0,20(%esp) ; \ | |
288 | movl $0,24(%esp) ; \ | |
289 | movl $0,12(%esp) ; \ | |
290 | movl $0,16(%esp) ; \ | |
291 | movl lck,4(%esp) /* copy lock pointer to arg 1 */ ; \ | |
292 | movl $0,8(%esp) ; \ | |
293 | movl %eax,(%esp) ; \ | |
294 | call *_lockstat_probe ; \ | |
295 | 9: leave | |
296 | /* ret - left to subsequent code, e.g. return values */ | |
297 | ||
298 | #define LOCKSTAT_RECORD2(id, lck, arg) \ | |
299 | push %ebp ; \ | |
300 | mov %esp,%ebp ; \ | |
301 | sub $0x38,%esp /* size of dtrace_probe args */ ; \ | |
302 | movl _lockstat_probemap + (id * 4),%eax ; \ | |
303 | test %eax,%eax ; \ | |
304 | je 9f ; \ | |
305 | movl $0,36(%esp) ; \ | |
306 | movl $0,40(%esp) ; \ | |
307 | movl $0,28(%esp) ; \ | |
308 | movl $0,32(%esp) ; \ | |
309 | movl $0,20(%esp) ; \ | |
310 | movl $0,24(%esp) ; \ | |
311 | movl $0,12(%esp) ; \ | |
312 | movl $0,16(%esp) ; \ | |
313 | movl lck,4(%esp) /* copy lock pointer to arg 1 */ ; \ | |
314 | movl arg,8(%esp) ; \ | |
315 | movl %eax,(%esp) ; \ | |
316 | call *_lockstat_probe ; \ | |
317 | 9: leave | |
318 | /* ret - left to subsequent code, e.g. return values */ | |
319 | #endif | |
320 | ||
321 | ||
1c79356b A |
322 | /* |
323 | * void hw_lock_init(hw_lock_t) | |
324 | * | |
325 | * Initialize a hardware lock. | |
326 | */ | |
91447636 | 327 | LEAF_ENTRY(hw_lock_init) |
1c79356b | 328 | movl L_ARG0,%edx /* fetch lock pointer */ |
2d21ac55 A |
329 | movl $0,(%edx) /* clear the lock */ |
330 | LEAF_RET | |
331 | ||
332 | ||
333 | /* | |
334 | * void hw_lock_byte_init(uint8_t *) | |
335 | * | |
336 | * Initialize a hardware byte lock. | |
337 | */ | |
338 | LEAF_ENTRY(hw_lock_byte_init) | |
339 | movl L_ARG0,%edx /* fetch lock pointer */ | |
340 | movb $0,(%edx) /* clear the lock */ | |
91447636 | 341 | LEAF_RET |
1c79356b A |
342 | |
343 | /* | |
344 | * void hw_lock_lock(hw_lock_t) | |
345 | * | |
346 | * Acquire lock, spinning until it becomes available. | |
347 | * MACH_RT: also return with preemption disabled. | |
348 | */ | |
91447636 | 349 | LEAF_ENTRY(hw_lock_lock) |
1c79356b A |
350 | movl L_ARG0,%edx /* fetch lock pointer */ |
351 | ||
0c530ab8 | 352 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
2d21ac55 | 353 | PREEMPTION_DISABLE |
0c530ab8 | 354 | 1: |
2d21ac55 | 355 | movl (%edx), %eax |
91447636 A |
356 | testl %eax,%eax /* lock locked? */ |
357 | jne 3f /* branch if so */ | |
2d21ac55 | 358 | lock; cmpxchgl %ecx,(%edx) /* try to acquire the HW lock */ |
1c79356b | 359 | jne 3f |
9bccf70c | 360 | movl $1,%eax /* In case this was a timeout call */ |
91447636 | 361 | LEAF_RET /* if yes, then nothing left to do */ |
0c530ab8 | 362 | 3: |
91447636 A |
363 | PAUSE /* pause for hyper-threading */ |
364 | jmp 1b /* try again */ | |
1c79356b | 365 | |
2d21ac55 A |
366 | /* |
367 | * void hw_lock_byte_lock(uint8_t *lock_byte) | |
368 | * | |
369 | * Acquire byte sized lock operand, spinning until it becomes available. | |
370 | * MACH_RT: also return with preemption disabled. | |
371 | */ | |
372 | ||
373 | LEAF_ENTRY(hw_lock_byte_lock) | |
374 | movl L_ARG0,%edx /* Load lock pointer */ | |
375 | PREEMPTION_DISABLE | |
376 | movl $1, %ecx /* Set lock value */ | |
377 | 1: | |
378 | movb (%edx), %al /* Load byte at address */ | |
379 | testb %al,%al /* lock locked? */ | |
380 | jne 3f /* branch if so */ | |
381 | lock; cmpxchgb %cl,(%edx) /* attempt atomic compare exchange */ | |
382 | jne 3f | |
383 | LEAF_RET /* if yes, then nothing left to do */ | |
384 | 3: | |
385 | PAUSE /* pause for hyper-threading */ | |
386 | jmp 1b /* try again */ | |
387 | ||
55e303ae A |
388 | /* |
389 | * unsigned int hw_lock_to(hw_lock_t, unsigned int) | |
390 | * | |
391 | * Acquire lock, spinning until it becomes available or timeout. | |
392 | * MACH_RT: also return with preemption disabled. | |
393 | */ | |
91447636 | 394 | LEAF_ENTRY(hw_lock_to) |
55e303ae | 395 | 1: |
91447636 | 396 | movl L_ARG0,%edx /* fetch lock pointer */ |
0c530ab8 | 397 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
55e303ae A |
398 | /* |
399 | * Attempt to grab the lock immediately | |
400 | * - fastpath without timeout nonsense. | |
401 | */ | |
2d21ac55 A |
402 | PREEMPTION_DISABLE |
403 | movl (%edx), %eax | |
91447636 A |
404 | testl %eax,%eax /* lock locked? */ |
405 | jne 2f /* branch if so */ | |
2d21ac55 | 406 | lock; cmpxchgl %ecx,(%edx) /* try to acquire the HW lock */ |
91447636 | 407 | jne 2f /* branch on failure */ |
55e303ae | 408 | movl $1,%eax |
91447636 | 409 | LEAF_RET |
55e303ae A |
410 | |
411 | 2: | |
412 | #define INNER_LOOP_COUNT 1000 | |
413 | /* | |
414 | * Failed to get the lock so set the timeout | |
415 | * and then spin re-checking the lock but pausing | |
416 | * every so many (INNER_LOOP_COUNT) spins to check for timeout. | |
417 | */ | |
418 | movl L_ARG1,%ecx /* fetch timeout */ | |
419 | push %edi | |
420 | push %ebx | |
421 | mov %edx,%edi | |
422 | ||
423 | rdtsc /* read cyclecount into %edx:%eax */ | |
424 | addl %ecx,%eax /* fetch and timeout */ | |
425 | adcl $0,%edx /* add carry */ | |
426 | mov %edx,%ecx | |
427 | mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */ | |
55e303ae A |
428 | 4: |
429 | /* | |
430 | * The inner-loop spin to look for the lock being freed. | |
431 | */ | |
55e303ae A |
432 | mov $(INNER_LOOP_COUNT),%edx |
433 | 5: | |
91447636 | 434 | PAUSE /* pause for hyper-threading */ |
2d21ac55 | 435 | movl (%edi),%eax /* spin checking lock value in cache */ |
91447636 | 436 | testl %eax,%eax |
55e303ae A |
437 | je 6f /* zero => unlocked, try to grab it */ |
438 | decl %edx /* decrement inner loop count */ | |
439 | jnz 5b /* time to check for timeout? */ | |
440 | ||
441 | /* | |
442 | * Here after spinning INNER_LOOP_COUNT times, check for timeout | |
443 | */ | |
444 | rdtsc /* cyclecount into %edx:%eax */ | |
445 | cmpl %ecx,%edx /* compare high-order 32-bits */ | |
446 | jb 4b /* continue spinning if less, or */ | |
447 | cmpl %ebx,%eax /* compare low-order 32-bits */ | |
0c530ab8 | 448 | jb 4b /* continue if less, else bail */ |
55e303ae A |
449 | xor %eax,%eax /* with 0 return value */ |
450 | pop %ebx | |
451 | pop %edi | |
91447636 | 452 | LEAF_RET |
55e303ae A |
453 | |
454 | 6: | |
455 | /* | |
456 | * Here to try to grab the lock that now appears to be free | |
457 | * after contention. | |
458 | */ | |
0c530ab8 | 459 | movl %gs:CPU_ACTIVE_THREAD,%edx |
2d21ac55 | 460 | lock; cmpxchgl %edx,(%edi) /* try to acquire the HW lock */ |
0c530ab8 | 461 | jne 4b /* no - spin again */ |
55e303ae A |
462 | movl $1,%eax /* yes */ |
463 | pop %ebx | |
464 | pop %edi | |
91447636 | 465 | LEAF_RET |
55e303ae | 466 | |
1c79356b A |
467 | /* |
468 | * void hw_lock_unlock(hw_lock_t) | |
469 | * | |
470 | * Unconditionally release lock. | |
471 | * MACH_RT: release preemption level. | |
472 | */ | |
91447636 | 473 | LEAF_ENTRY(hw_lock_unlock) |
1c79356b | 474 | movl L_ARG0,%edx /* fetch lock pointer */ |
2d21ac55 A |
475 | movl $0,(%edx) /* clear the lock */ |
476 | PREEMPTION_ENABLE | |
91447636 | 477 | LEAF_RET |
2d21ac55 A |
478 | /* |
479 | * void hw_lock_byte_unlock(uint8_t *lock_byte) | |
480 | * | |
481 | * Unconditionally release byte sized lock operand. | |
482 | * MACH_RT: release preemption level. | |
483 | */ | |
1c79356b | 484 | |
2d21ac55 A |
485 | LEAF_ENTRY(hw_lock_byte_unlock) |
486 | movl L_ARG0,%edx /* Load lock pointer */ | |
487 | movb $0,(%edx) /* Clear the lock byte */ | |
488 | PREEMPTION_ENABLE | |
489 | LEAF_RET | |
490 | ||
0c530ab8 A |
491 | /* |
492 | * void i386_lock_unlock_with_flush(hw_lock_t) | |
493 | * | |
494 | * Unconditionally release lock, followed by a cacheline flush of | |
495 | * the line corresponding to the lock dword. This routine is currently | |
496 | * used with certain locks which are susceptible to lock starvation, | |
497 | * minimizing cache affinity for lock acquisitions. A queued spinlock | |
498 | * or other mechanism that ensures fairness would obviate the need | |
499 | * for this routine, but ideally few or no spinlocks should exhibit | |
500 | * enough contention to require such measures. | |
501 | * MACH_RT: release preemption level. | |
502 | */ | |
503 | LEAF_ENTRY(i386_lock_unlock_with_flush) | |
504 | movl L_ARG0,%edx /* Fetch lock pointer */ | |
2d21ac55 | 505 | movl $0,(%edx) /* Clear the lock */ |
0c530ab8 | 506 | mfence /* Serialize prior stores */ |
2d21ac55 A |
507 | clflush (%edx) /* Write back and invalidate line */ |
508 | PREEMPTION_ENABLE | |
0c530ab8 A |
509 | LEAF_RET |
510 | ||
1c79356b A |
511 | /* |
512 | * unsigned int hw_lock_try(hw_lock_t) | |
513 | * MACH_RT: returns with preemption disabled on success. | |
514 | */ | |
91447636 | 515 | LEAF_ENTRY(hw_lock_try) |
1c79356b A |
516 | movl L_ARG0,%edx /* fetch lock pointer */ |
517 | ||
0c530ab8 | 518 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
2d21ac55 A |
519 | PREEMPTION_DISABLE |
520 | movl (%edx),%eax | |
91447636 A |
521 | testl %eax,%eax |
522 | jne 1f | |
2d21ac55 | 523 | lock; cmpxchgl %ecx,(%edx) /* try to acquire the HW lock */ |
91447636 | 524 | jne 1f |
1c79356b A |
525 | |
526 | movl $1,%eax /* success */ | |
91447636 | 527 | LEAF_RET |
1c79356b | 528 | |
0c530ab8 | 529 | 1: |
2d21ac55 | 530 | PREEMPTION_ENABLE /* failure: release preemption... */ |
1c79356b | 531 | xorl %eax,%eax /* ...and return failure */ |
91447636 | 532 | LEAF_RET |
1c79356b A |
533 | |
534 | /* | |
535 | * unsigned int hw_lock_held(hw_lock_t) | |
536 | * MACH_RT: doesn't change preemption state. | |
537 | * N.B. Racy, of course. | |
538 | */ | |
91447636 | 539 | LEAF_ENTRY(hw_lock_held) |
1c79356b A |
540 | movl L_ARG0,%edx /* fetch lock pointer */ |
541 | ||
2d21ac55 | 542 | movl (%edx),%eax /* check lock value */ |
91447636 | 543 | testl %eax,%eax |
55e303ae | 544 | movl $1,%ecx |
91447636 A |
545 | cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */ |
546 | LEAF_RET | |
1c79356b | 547 | |
91447636 A |
548 | LEAF_ENTRY(mutex_init) |
549 | movl L_ARG0,%edx /* fetch lock pointer */ | |
550 | xorl %eax,%eax | |
551 | movl %eax,M_ILK /* clear interlock */ | |
552 | movl %eax,M_LOCKED /* clear locked flag */ | |
553 | movw %ax,M_WAITERS /* init waiter count */ | |
554 | movw %ax,M_PROMOTED_PRI | |
1c79356b | 555 | |
91447636 A |
556 | #if MACH_LDEBUG |
557 | movl $ MUTEX_TAG,M_TYPE /* set lock type */ | |
558 | movl %eax,M_PC /* init caller pc */ | |
559 | movl %eax,M_THREAD /* and owning thread */ | |
560 | #endif | |
1c79356b | 561 | |
91447636 | 562 | LEAF_RET |
1c79356b | 563 | |
2d21ac55 A |
564 | /* |
565 | * Reader-writer lock fastpaths. These currently exist for the | |
566 | * shared lock acquire and release paths (where they reduce overhead | |
567 | * considerably)--more can be added as necessary (DRK). | |
568 | */ | |
569 | ||
570 | /* | |
571 | * These should reflect the layout of the bitfield embedded within | |
572 | * the lck_rw_t structure (see i386/locks.h). | |
573 | */ | |
574 | #define LCK_RW_INTERLOCK 0x1 | |
575 | #define LCK_RW_WANT_UPGRADE 0x2 | |
576 | #define LCK_RW_WANT_WRITE 0x4 | |
577 | #define LCK_R_WAITING 0x8 | |
578 | #define LCK_W_WAITING 0x10 | |
579 | ||
580 | #define RW_LOCK_SHARED_MASK ((LCK_RW_INTERLOCK<<16) | \ | |
581 | ((LCK_RW_WANT_UPGRADE|LCK_RW_WANT_WRITE) << 24)) | |
582 | /* | |
583 | * void lck_rw_lock_shared(lck_rw_t*) | |
584 | * | |
585 | */ | |
586 | ||
587 | Entry(lck_rw_lock_shared) | |
588 | movl S_ARG0, %edx | |
589 | 1: | |
590 | movl (%edx), %eax /* Load state bitfield and interlock */ | |
591 | testl $(RW_LOCK_SHARED_MASK), %eax /* Eligible for fastpath? */ | |
592 | jne 3f | |
593 | movl %eax, %ecx | |
594 | incl %ecx /* Increment reader refcount */ | |
595 | lock | |
596 | cmpxchgl %ecx, (%edx) /* Attempt atomic exchange */ | |
597 | jne 2f | |
598 | ||
599 | #if CONFIG_DTRACE | |
600 | /* | |
601 | * Dtrace lockstat event: LS_LCK_RW_LOCK_SHARED_ACQUIRE | |
602 | * Implemented by swapping between return and no-op instructions. | |
603 | * See bsd/dev/dtrace/lockstat.c. | |
604 | */ | |
605 | LOCKSTAT_LABEL(_lck_rw_lock_shared_lockstat_patch_point) | |
606 | ret | |
607 | /* Fall thru when patched, counting on lock pointer in %edx */ | |
608 | LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, %edx) | |
609 | #endif | |
610 | ret | |
611 | ||
612 | 2: | |
613 | PAUSE | |
614 | jmp 1b | |
615 | 3: | |
616 | jmp EXT(lck_rw_lock_shared_gen) | |
617 | ||
618 | ||
619 | /* | |
620 | * lck_rw_type_t lck_rw_done(lck_rw_t*) | |
621 | * | |
622 | */ | |
623 | ||
624 | .data | |
625 | rwl_release_error_str: | |
626 | .asciz "Releasing non-exclusive RW lock without a reader refcount!" | |
627 | .text | |
628 | ||
629 | #define RW_LOCK_RELEASE_MASK ((LCK_RW_INTERLOCK<<16) | \ | |
630 | ((LCK_RW_WANT_UPGRADE|LCK_RW_WANT_WRITE|LCK_R_WAITING|LCK_W_WAITING) << 24)) | |
631 | Entry(lck_rw_done) | |
632 | movl S_ARG0, %edx | |
633 | 1: | |
634 | movl (%edx), %eax /* Load state bitfield and interlock */ | |
635 | testl $(RW_LOCK_RELEASE_MASK), %eax /* Eligible for fastpath? */ | |
636 | jne 3f | |
637 | movl %eax, %ecx | |
638 | /* Assert refcount */ | |
639 | testl $(0xFFFF), %ecx | |
640 | jne 5f | |
641 | movl $(rwl_release_error_str), S_ARG0 | |
642 | jmp EXT(panic) | |
643 | 5: | |
644 | decl %ecx /* Decrement reader count */ | |
645 | lock | |
646 | cmpxchgl %ecx, (%edx) | |
647 | jne 2f | |
648 | movl $(RW_SHARED), %eax /* Indicate that the lock was shared */ | |
649 | #if CONFIG_DTRACE | |
650 | /* Dtrace lockstat probe: LS_RW_DONE_RELEASE as reader */ | |
651 | LOCKSTAT_LABEL(_lck_rw_done_lockstat_patch_point) | |
652 | ret | |
653 | /* | |
654 | * Note: Dtrace's convention is 0 ==> reader, which is | |
655 | * a different absolute value than $(RW_SHARED) | |
656 | * %edx contains the lock address already from the above | |
657 | */ | |
658 | LOCKSTAT_RECORD2(LS_LCK_RW_DONE_RELEASE, %edx, $0) | |
659 | movl $(RW_SHARED), %eax /* Indicate that the lock was shared */ | |
660 | #endif | |
661 | ret | |
662 | ||
663 | 2: | |
664 | PAUSE | |
665 | jmp 1b | |
666 | 3: | |
667 | jmp EXT(lck_rw_done_gen) | |
668 | ||
669 | ||
670 | NONLEAF_ENTRY2(mutex_lock_spin,_mutex_lock_spin) | |
1c79356b | 671 | |
91447636 | 672 | movl B_ARG0,%edx /* fetch lock pointer */ |
2d21ac55 | 673 | pushf /* save interrupt state */ |
1c79356b | 674 | |
91447636 A |
675 | CHECK_MUTEX_TYPE() |
676 | CHECK_NO_SIMPLELOCKS() | |
677 | CHECK_PREEMPTION_LEVEL() | |
1c79356b | 678 | |
2d21ac55 A |
679 | movl M_ILK,%eax /* read interlock */ |
680 | testl %eax,%eax /* unlocked? */ | |
681 | jne Lmls_ilk_loop /* no, go spin */ | |
682 | Lmls_retry: | |
91447636 | 683 | cli /* disable interrupts */ |
0c530ab8 | 684 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
1c79356b | 685 | |
2d21ac55 A |
686 | /* eax == 0 at this point */ |
687 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ | |
688 | jne Lmls_ilk_fail /* branch on failure to spin loop */ | |
689 | ||
690 | movl M_LOCKED,%ecx /* get lock owner */ | |
691 | testl %ecx,%ecx /* is the mutex locked? */ | |
692 | jne Lml_fail /* yes, fall back to a normal mutex lock */ | |
693 | movl $(MUTEX_LOCKED_AS_SPIN),M_LOCKED /* indicate ownership as a spin lock */ | |
694 | ||
695 | #if MACH_LDEBUG | |
696 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
697 | movl %ecx,M_THREAD | |
698 | movl B_PC,%ecx | |
699 | movl %ecx,M_PC | |
700 | #endif | |
701 | PREEMPTION_DISABLE | |
702 | popf /* restore interrupt state */ | |
703 | leave /* return with the interlock held */ | |
704 | #if CONFIG_DTRACE | |
705 | LOCKSTAT_LABEL(_mutex_lock_spin_lockstat_patch_point) | |
706 | ret | |
707 | /* %edx contains the lock address from above */ | |
708 | LOCKSTAT_RECORD(LS_MUTEX_LOCK_SPIN_ACQUIRE, %edx) | |
709 | #endif | |
710 | ret | |
711 | ||
712 | Lmls_ilk_fail: | |
713 | popf /* restore interrupt state */ | |
714 | pushf /* resave interrupt state on stack */ | |
715 | ||
716 | Lmls_ilk_loop: | |
717 | PAUSE | |
91447636 A |
718 | movl M_ILK,%eax /* read interlock */ |
719 | testl %eax,%eax /* unlocked? */ | |
2d21ac55 A |
720 | je Lmls_retry /* yes, go for it */ |
721 | jmp Lmls_ilk_loop /* no, keep spinning */ | |
722 | ||
723 | ||
724 | NONLEAF_ENTRY2(mutex_lock,_mutex_lock) | |
725 | ||
726 | movl B_ARG0,%edx /* fetch lock pointer */ | |
727 | pushf /* save interrupt state */ | |
728 | ||
729 | CHECK_MUTEX_TYPE() | |
730 | CHECK_NO_SIMPLELOCKS() | |
731 | CHECK_PREEMPTION_LEVEL() | |
732 | ||
733 | movl M_ILK,%eax /* is interlock held */ | |
734 | testl %eax,%eax | |
735 | jne Lml_ilk_loop /* yes, go do the spin loop */ | |
736 | Lml_retry: | |
737 | cli /* disable interrupts */ | |
738 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
0c530ab8 | 739 | |
2d21ac55 | 740 | /* eax == 0 at this point */ |
91447636 | 741 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
2d21ac55 | 742 | jne Lml_ilk_fail /* branch on failure to spin loop */ |
91447636 A |
743 | |
744 | movl M_LOCKED,%ecx /* get lock owner */ | |
745 | testl %ecx,%ecx /* is the mutex locked? */ | |
0c530ab8 A |
746 | jne Lml_fail /* yes, we lose */ |
747 | Lml_acquire: | |
91447636 A |
748 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
749 | movl %ecx,M_LOCKED | |
1c79356b A |
750 | |
751 | #if MACH_LDEBUG | |
91447636 A |
752 | movl %ecx,M_THREAD |
753 | movl B_PC,%ecx | |
754 | movl %ecx,M_PC | |
1c79356b | 755 | #endif |
0c530ab8 A |
756 | cmpw $0,M_WAITERS /* are there any waiters? */ |
757 | jne Lml_waiters /* yes, more work to do */ | |
758 | Lml_return: | |
759 | xorl %eax,%eax | |
760 | movl %eax,M_ILK | |
761 | ||
762 | popf /* restore interrupt state */ | |
2d21ac55 A |
763 | leave |
764 | #if CONFIG_DTRACE | |
765 | LOCKSTAT_LABEL(_mutex_lock_lockstat_patch_point) | |
766 | ret | |
767 | /* %edx still contains the lock pointer */ | |
768 | LOCKSTAT_RECORD(LS_MUTEX_LOCK_ACQUIRE, %edx) | |
769 | #endif | |
770 | ret | |
0c530ab8 | 771 | |
2d21ac55 A |
772 | /* |
773 | * We got the mutex, but there are waiters. Update information | |
774 | * on waiters. | |
775 | */ | |
0c530ab8 | 776 | Lml_waiters: |
91447636 A |
777 | pushl %edx /* save mutex address */ |
778 | pushl %edx | |
779 | call EXT(lck_mtx_lock_acquire) | |
780 | addl $4,%esp | |
781 | popl %edx /* restore mutex address */ | |
0c530ab8 | 782 | jmp Lml_return |
1c79356b | 783 | |
2d21ac55 | 784 | Lml_restart: |
0c530ab8 | 785 | Lml_ilk_fail: |
2d21ac55 A |
786 | popf /* restore interrupt state */ |
787 | pushf /* resave interrupt state on stack */ | |
788 | ||
789 | Lml_ilk_loop: | |
790 | PAUSE | |
791 | movl M_ILK,%eax /* read interlock */ | |
792 | testl %eax,%eax /* unlocked? */ | |
793 | je Lml_retry /* yes, go try to grab it */ | |
794 | jmp Lml_ilk_loop /* no - keep spinning */ | |
0c530ab8 A |
795 | |
796 | Lml_fail: | |
797 | /* | |
2d21ac55 | 798 | * Check if the owner is on another processor and therefore |
0c530ab8 A |
799 | * we should try to spin before blocking. |
800 | */ | |
801 | testl $(OnProc),ACT_SPF(%ecx) | |
802 | jz Lml_block | |
803 | ||
804 | /* | |
805 | * Here if owner is on another processor: | |
806 | * - release the interlock | |
807 | * - spin on the holder until release or timeout | |
808 | * - in either case re-acquire the interlock | |
809 | * - if released, acquire it | |
810 | * - otherwise drop thru to block. | |
811 | */ | |
91447636 | 812 | xorl %eax,%eax |
0c530ab8 A |
813 | movl %eax,M_ILK /* zero interlock */ |
814 | popf | |
815 | pushf /* restore interrupt state */ | |
4452a7af | 816 | |
0c530ab8 | 817 | push %edx /* lock address */ |
2d21ac55 | 818 | call EXT(lck_mtx_lock_spinwait) /* call out to do spinning */ |
0c530ab8 A |
819 | addl $4,%esp |
820 | movl B_ARG0,%edx /* refetch mutex address */ | |
4452a7af | 821 | |
2d21ac55 A |
822 | /* Re-acquire interlock - interrupts currently enabled */ |
823 | movl M_ILK,%eax /* is interlock held */ | |
824 | testl %eax,%eax | |
825 | jne Lml_ilk_reloop /* yes, go do the spin loop */ | |
0c530ab8 | 826 | Lml_reget_retry: |
2d21ac55 | 827 | cli /* disable interrupts */ |
0c530ab8 A |
828 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
829 | ||
2d21ac55 | 830 | /* eax == 0 at this point */ |
0c530ab8 | 831 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
2d21ac55 | 832 | jne Lml_ilk_refail /* branch on failure to spin loop */ |
5d5c5d0d | 833 | |
0c530ab8 A |
834 | movl M_LOCKED,%ecx /* get lock owner */ |
835 | testl %ecx,%ecx /* is the mutex free? */ | |
836 | je Lml_acquire /* yes, acquire */ | |
837 | ||
838 | Lml_block: | |
91447636 A |
839 | CHECK_MYLOCK(M_THREAD) |
840 | pushl M_LOCKED | |
841 | pushl %edx /* push mutex address */ | |
842 | call EXT(lck_mtx_lock_wait) /* wait for the lock */ | |
2d21ac55 | 843 | addl $8,%esp /* returns with interlock dropped */ |
91447636 | 844 | movl B_ARG0,%edx /* refetch mutex address */ |
2d21ac55 | 845 | jmp Lml_restart /* and start over */ |
0c530ab8 A |
846 | |
847 | Lml_ilk_refail: | |
2d21ac55 A |
848 | popf /* restore interrupt state */ |
849 | pushf /* resave interrupt state on stack */ | |
1c79356b | 850 | |
2d21ac55 A |
851 | Lml_ilk_reloop: |
852 | PAUSE | |
853 | movl M_ILK,%eax /* read interlock */ | |
854 | testl %eax,%eax /* unlocked? */ | |
855 | je Lml_reget_retry /* yes, go try to grab it */ | |
856 | jmp Lml_ilk_reloop /* no - keep spinning */ | |
857 | ||
858 | ||
859 | ||
860 | NONLEAF_ENTRY2(mutex_try_spin,_mutex_try_spin) | |
1c79356b | 861 | |
91447636 | 862 | movl B_ARG0,%edx /* fetch lock pointer */ |
2d21ac55 | 863 | pushf /* save interrupt state */ |
1c79356b | 864 | |
91447636 A |
865 | CHECK_MUTEX_TYPE() |
866 | CHECK_NO_SIMPLELOCKS() | |
1c79356b | 867 | |
2d21ac55 A |
868 | movl M_ILK,%eax |
869 | testl %eax,%eax /* is the interlock held? */ | |
870 | jne Lmts_ilk_loop /* yes, go to spin loop */ | |
871 | Lmts_retry: | |
91447636 | 872 | cli /* disable interrupts */ |
0c530ab8 | 873 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
1c79356b | 874 | |
2d21ac55 A |
875 | /* eax == 0 at this point */ |
876 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ | |
877 | jne Lmts_ilk_fail /* branch on failure to spin loop */ | |
878 | ||
879 | movl M_LOCKED,%ecx /* get lock owner */ | |
880 | testl %ecx,%ecx /* is the mutex locked? */ | |
881 | jne Lmt_fail /* yes, we lose */ | |
882 | Lmts_acquire: | |
883 | movl $(MUTEX_LOCKED_AS_SPIN),M_LOCKED /* indicate ownership as a spin lock */ | |
884 | ||
885 | #if MACH_LDEBUG | |
886 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
887 | movl %ecx,M_THREAD | |
888 | movl B_PC,%ecx | |
889 | movl %ecx,M_PC | |
890 | #endif | |
891 | PREEMPTION_DISABLE /* no, return with interlock held */ | |
892 | popf /* restore interrupt state */ | |
893 | movl $1,%eax | |
894 | leave | |
895 | #if CONFIG_DTRACE | |
896 | LOCKSTAT_LABEL(_mutex_try_spin_lockstat_patch_point) | |
897 | ret | |
898 | /* %edx inherits the lock pointer from above */ | |
899 | LOCKSTAT_RECORD(LS_MUTEX_TRY_SPIN_ACQUIRE, %edx) | |
900 | movl $1,%eax | |
901 | #endif | |
902 | ret | |
903 | ||
904 | Lmts_ilk_fail: | |
905 | popf /* restore interrupt state */ | |
906 | pushf /* resave interrupt state on stack */ | |
907 | ||
908 | Lmts_ilk_loop: | |
909 | PAUSE | |
910 | /* | |
911 | * need to do this check outside of the interlock in | |
912 | * case this lock is held as a simple lock which means | |
913 | * we won't be able to take the interlock | |
914 | */ | |
915 | movl M_LOCKED,%eax | |
916 | testl %eax,%eax /* is the mutex locked? */ | |
917 | jne Lmt_fail_no_ilk /* yes, go return failure */ | |
918 | ||
91447636 A |
919 | movl M_ILK,%eax /* read interlock */ |
920 | testl %eax,%eax /* unlocked? */ | |
2d21ac55 A |
921 | je Lmts_retry /* yes, go try to grab it */ |
922 | jmp Lmts_ilk_loop /* keep spinning */ | |
923 | ||
924 | ||
925 | ||
926 | NONLEAF_ENTRY2(mutex_try,_mutex_try) | |
927 | ||
928 | movl B_ARG0,%edx /* fetch lock pointer */ | |
929 | pushf /* save interrupt state */ | |
930 | ||
931 | CHECK_MUTEX_TYPE() | |
932 | CHECK_NO_SIMPLELOCKS() | |
933 | ||
934 | movl M_ILK,%eax /* read interlock */ | |
935 | testl %eax,%eax /* unlocked? */ | |
936 | jne Lmt_ilk_loop /* yes, go try to grab it */ | |
937 | Lmt_retry: | |
938 | cli /* disable interrupts */ | |
939 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
0c530ab8 | 940 | |
2d21ac55 | 941 | /* eax == 0 at this point */ |
91447636 | 942 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
2d21ac55 | 943 | jne Lmt_ilk_fail /* branch on failure to spin loop */ |
1c79356b | 944 | |
91447636 A |
945 | movl M_LOCKED,%ecx /* get lock owner */ |
946 | testl %ecx,%ecx /* is the mutex locked? */ | |
0c530ab8 | 947 | jne Lmt_fail /* yes, we lose */ |
2d21ac55 | 948 | Lmt_acquire: |
91447636 A |
949 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
950 | movl %ecx,M_LOCKED | |
1c79356b A |
951 | |
952 | #if MACH_LDEBUG | |
91447636 A |
953 | movl %ecx,M_THREAD |
954 | movl B_PC,%ecx | |
955 | movl %ecx,M_PC | |
1c79356b | 956 | #endif |
2d21ac55 | 957 | cmpw $0,M_WAITERS /* are there any waiters? */ |
0c530ab8 A |
958 | jne Lmt_waiters /* yes, more work to do */ |
959 | Lmt_return: | |
91447636 A |
960 | xorl %eax,%eax |
961 | movl %eax,M_ILK | |
91447636 | 962 | popf /* restore interrupt state */ |
1c79356b | 963 | |
91447636 | 964 | movl $1,%eax |
2d21ac55 A |
965 | leave |
966 | #if CONFIG_DTRACE | |
967 | LOCKSTAT_LABEL(_mutex_try_lockstat_patch_point) | |
968 | ret | |
969 | /* inherit the lock pointer in %edx from above */ | |
970 | LOCKSTAT_RECORD(LS_MUTEX_TRY_LOCK_ACQUIRE, %edx) | |
971 | movl $1,%eax | |
972 | #endif | |
973 | ret | |
1c79356b | 974 | |
0c530ab8 A |
975 | Lmt_waiters: |
976 | pushl %edx /* save mutex address */ | |
977 | pushl %edx | |
978 | call EXT(lck_mtx_lock_acquire) | |
979 | addl $4,%esp | |
980 | popl %edx /* restore mutex address */ | |
981 | jmp Lmt_return | |
982 | ||
983 | Lmt_ilk_fail: | |
2d21ac55 A |
984 | popf /* restore interrupt state */ |
985 | pushf /* resave interrupt state on stack */ | |
986 | ||
987 | Lmt_ilk_loop: | |
988 | PAUSE | |
0c530ab8 | 989 | /* |
2d21ac55 A |
990 | * need to do this check outside of the interlock in |
991 | * case this lock is held as a simple lock which means | |
992 | * we won't be able to take the interlock | |
993 | */ | |
994 | movl M_LOCKED,%eax /* get lock owner */ | |
995 | testl %eax,%eax /* is the mutex locked? */ | |
996 | jne Lmt_fail_no_ilk /* yes, go return failure */ | |
997 | ||
998 | movl M_ILK,%eax /* read interlock */ | |
999 | testl %eax,%eax /* unlocked? */ | |
1000 | je Lmt_retry /* yes, go try to grab it */ | |
1001 | jmp Lmt_ilk_loop /* no - keep spinning */ | |
0c530ab8 A |
1002 | |
1003 | Lmt_fail: | |
1c79356b | 1004 | xorl %eax,%eax |
91447636 | 1005 | movl %eax,M_ILK |
1c79356b | 1006 | |
2d21ac55 A |
1007 | Lmt_fail_no_ilk: |
1008 | xorl %eax,%eax | |
91447636 | 1009 | popf /* restore interrupt state */ |
2d21ac55 | 1010 | NONLEAF_RET |
1c79356b | 1011 | |
1c79356b | 1012 | |
2d21ac55 A |
1013 | |
1014 | LEAF_ENTRY(mutex_convert_spin) | |
1015 | movl L_ARG0,%edx /* fetch lock pointer */ | |
1016 | ||
1017 | movl M_LOCKED,%ecx /* is this the spin variant of the mutex */ | |
1018 | cmpl $(MUTEX_LOCKED_AS_SPIN),%ecx | |
1019 | jne Lmcs_exit /* already owned as a mutex, just return */ | |
1020 | ||
1021 | movl M_ILK,%ecx /* convert from spin version to mutex */ | |
1022 | movl %ecx,M_LOCKED /* take control of the mutex */ | |
1023 | ||
1024 | cmpw $0,M_WAITERS /* are there any waiters? */ | |
1025 | jne Lmcs_waiters /* yes, more work to do */ | |
1026 | ||
1027 | Lmcs_return: | |
1028 | xorl %ecx,%ecx | |
1029 | movl %ecx,M_ILK /* clear interlock */ | |
1030 | PREEMPTION_ENABLE | |
1031 | Lmcs_exit: | |
1032 | #if CONFIG_DTRACE | |
1033 | LOCKSTAT_LABEL(_mutex_convert_spin_lockstat_patch_point) | |
1034 | ret | |
1035 | /* inherit %edx from above */ | |
1036 | LOCKSTAT_RECORD(LS_MUTEX_CONVERT_SPIN_ACQUIRE, %edx) | |
1037 | #endif | |
1038 | ret | |
1039 | ||
1040 | ||
1041 | Lmcs_waiters: | |
1042 | pushl %edx /* save mutex address */ | |
1043 | pushl %edx | |
1044 | call EXT(lck_mtx_lock_acquire) | |
1045 | addl $4,%esp | |
1046 | popl %edx /* restore mutex address */ | |
1047 | jmp Lmcs_return | |
1048 | ||
1049 | ||
1c79356b | 1050 | |
91447636 A |
1051 | NONLEAF_ENTRY(mutex_unlock) |
1052 | movl B_ARG0,%edx /* fetch lock pointer */ | |
1c79356b | 1053 | |
2d21ac55 A |
1054 | movl M_LOCKED,%ecx /* is this the spin variant of the mutex */ |
1055 | cmpl $(MUTEX_LOCKED_AS_SPIN),%ecx | |
1056 | jne Lmu_enter /* no, go treat like a real mutex */ | |
1057 | ||
1058 | cmpw $0,M_WAITERS /* are there any waiters? */ | |
1059 | jne Lmus_wakeup /* yes, more work to do */ | |
1060 | ||
1061 | Lmus_drop_ilk: | |
1062 | xorl %ecx,%ecx | |
1063 | movl %ecx,M_LOCKED /* yes, clear the spin indicator */ | |
1064 | movl %ecx,M_ILK /* release the interlock */ | |
1065 | PREEMPTION_ENABLE /* and re-enable preemption */ | |
1066 | leave | |
1067 | #if CONFIG_DTRACE | |
1068 | LOCKSTAT_LABEL(_mutex_unlock_lockstat_patch_point) | |
1069 | ret | |
1070 | /* inherit lock pointer in %edx from above */ | |
1071 | LOCKSTAT_RECORD(LS_MUTEX_UNLOCK_RELEASE, %edx) | |
1072 | #endif | |
1073 | ret | |
1c79356b | 1074 | |
2d21ac55 A |
1075 | Lmus_wakeup: |
1076 | pushl %edx /* save mutex address */ | |
1077 | pushl %edx /* push mutex address */ | |
1078 | call EXT(lck_mtx_unlockspin_wakeup) /* yes, wake a thread */ | |
1079 | addl $4,%esp | |
1080 | popl %edx /* restore mutex pointer */ | |
1081 | jmp Lmus_drop_ilk | |
1082 | ||
1083 | Lmu_enter: | |
91447636 | 1084 | pushf /* save interrupt state */ |
1c79356b | 1085 | |
2d21ac55 A |
1086 | CHECK_MUTEX_TYPE() |
1087 | CHECK_THREAD(M_THREAD) | |
1088 | ||
91447636 A |
1089 | movl M_ILK,%eax /* read interlock */ |
1090 | testl %eax,%eax /* unlocked? */ | |
2d21ac55 A |
1091 | jne Lmu_ilk_loop /* yes, go try to grab it */ |
1092 | Lmu_retry: | |
1093 | cli /* disable interrupts */ | |
1094 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
0c530ab8 | 1095 | |
2d21ac55 | 1096 | /* eax == 0 at this point */ |
91447636 | 1097 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
2d21ac55 | 1098 | jne Lmu_ilk_fail /* branch on failure to spin loop */ |
1c79356b | 1099 | |
91447636 | 1100 | cmpw $0,M_WAITERS /* are there any waiters? */ |
0c530ab8 | 1101 | jne Lmu_wakeup /* yes, more work to do */ |
91447636 | 1102 | |
0c530ab8 | 1103 | Lmu_doit: |
1c79356b | 1104 | #if MACH_LDEBUG |
91447636 | 1105 | movl $0,M_THREAD /* disown thread */ |
1c79356b | 1106 | #endif |
55e303ae | 1107 | xorl %ecx,%ecx |
91447636 | 1108 | movl %ecx,M_LOCKED /* unlock the mutex */ |
2d21ac55 | 1109 | movl %ecx,M_ILK /* release the interlock */ |
91447636 | 1110 | popf /* restore interrupt state */ |
2d21ac55 A |
1111 | leave |
1112 | #if CONFIG_DTRACE | |
1113 | LOCKSTAT_LABEL(_mutex_unlock2_lockstat_patch_point) | |
1114 | ret | |
1115 | /* inherit %edx from above */ | |
1116 | LOCKSTAT_RECORD(LS_MUTEX_UNLOCK_RELEASE, %edx) | |
1117 | #endif | |
1118 | ret | |
1c79356b | 1119 | |
0c530ab8 | 1120 | Lmu_ilk_fail: |
2d21ac55 A |
1121 | popf /* restore interrupt state */ |
1122 | pushf /* resave interrupt state on stack */ | |
1123 | ||
1124 | Lmu_ilk_loop: | |
1125 | PAUSE | |
1126 | movl M_ILK,%eax /* read interlock */ | |
1127 | testl %eax,%eax /* unlocked? */ | |
1128 | je Lmu_retry /* yes, go try to grab it */ | |
1129 | jmp Lmu_ilk_loop /* no - keep spinning */ | |
0c530ab8 A |
1130 | |
1131 | Lmu_wakeup: | |
91447636 | 1132 | pushl M_LOCKED |
1c79356b | 1133 | pushl %edx /* push mutex address */ |
91447636 | 1134 | call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */ |
9bccf70c | 1135 | addl $8,%esp |
91447636 | 1136 | movl B_ARG0,%edx /* restore lock pointer */ |
0c530ab8 | 1137 | jmp Lmu_doit |
1c79356b | 1138 | |
2d21ac55 A |
1139 | /* |
1140 | * void lck_mtx_assert(lck_mtx_t* l, unsigned int) | |
1141 | * void _mutex_assert(mutex_t, unsigned int) | |
1142 | * Takes the address of a lock, and an assertion type as parameters. | |
1143 | * The assertion can take one of two forms determine by the type | |
1144 | * parameter: either the lock is held by the current thread, and the | |
1145 | * type is LCK_MTX_ASSERT_OWNED, or it isn't and the type is | |
1146 | * LCK_MTX_ASSERT_NOT_OWNED. Calls panic on assertion failure. | |
1147 | * | |
1148 | */ | |
1149 | ||
1150 | Entry(lck_mtx_assert) | |
1151 | Entry(_mutex_assert) | |
1152 | movl S_ARG0,%edx /* Load lock address */ | |
1153 | movl %gs:CPU_ACTIVE_THREAD,%ecx /* Load current thread */ | |
1154 | ||
1155 | cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */ | |
1156 | cmove M_PTR,%edx /* If so, take indirection */ | |
1157 | ||
1158 | movl M_LOCKED,%eax /* Load lock word */ | |
1159 | cmpl $(MUTEX_LOCKED_AS_SPIN),%eax /* check for spin variant */ | |
1160 | cmove M_ILK,%eax /* yes, spin lock owner is in the interlock */ | |
1161 | ||
1162 | cmpl $(MUTEX_ASSERT_OWNED),S_ARG1 /* Determine assert type */ | |
1163 | jne 2f /* Assert ownership? */ | |
1164 | cmpl %eax,%ecx /* Current thread match? */ | |
1165 | jne 3f /* no, go panic */ | |
1166 | 1: /* yes, we own it */ | |
1167 | ret /* just return */ | |
1168 | 2: | |
1169 | cmpl %eax,%ecx /* Current thread match? */ | |
1170 | jne 1b /* No, return */ | |
1171 | movl %edx,S_ARG1 /* Prep assertion failure */ | |
1172 | movl $(mutex_assert_owned_str),S_ARG0 | |
1173 | jmp 4f | |
1174 | 3: | |
1175 | movl %edx,S_ARG1 /* Prep assertion failure */ | |
1176 | movl $(mutex_assert_not_owned_str),S_ARG0 | |
1177 | 4: | |
1178 | jmp EXT(panic) | |
1179 | ||
1180 | .data | |
1181 | mutex_assert_not_owned_str: | |
1182 | .asciz "mutex (%p) not owned\n" | |
1183 | mutex_assert_owned_str: | |
1184 | .asciz "mutex (%p) owned\n" | |
1185 | .text | |
1186 | ||
1187 | /* This preprocessor define controls whether the R-M-W update of the | |
1188 | * per-group statistics elements are atomic (LOCK-prefixed) | |
1189 | * Enabled by default. | |
1190 | */ | |
1191 | #define ATOMIC_STAT_UPDATES 1 | |
1192 | ||
1193 | #if defined(ATOMIC_STAT_UPDATES) | |
1194 | #define LOCK_IF_ATOMIC_STAT_UPDATES lock | |
1195 | #else | |
1196 | #define LOCK_IF_ATOMIC_STAT_UPDATES | |
1197 | #endif /* ATOMIC_STAT_UPDATES */ | |
1198 | ||
1199 | ||
91447636 A |
1200 | /* |
1201 | * lck_mtx_lock() | |
1202 | * lck_mtx_try_lock() | |
1203 | * lck_mutex_unlock() | |
2d21ac55 A |
1204 | * lck_mtx_lock_spin() |
1205 | * lck_mtx_convert_spin() | |
91447636 | 1206 | * |
2d21ac55 A |
1207 | * These are variants of mutex_lock(), mutex_try(), mutex_unlock() |
1208 | * mutex_lock_spin and mutex_convert_spin without | |
91447636 A |
1209 | * DEBUG checks (which require fields not present in lck_mtx_t's). |
1210 | */ | |
2d21ac55 A |
1211 | |
1212 | NONLEAF_ENTRY(lck_mtx_lock_spin) | |
1c79356b | 1213 | |
91447636 | 1214 | movl B_ARG0,%edx /* fetch lock pointer */ |
2d21ac55 | 1215 | pushf /* save interrupt state */ |
1c79356b | 1216 | |
1c79356b | 1217 | CHECK_NO_SIMPLELOCKS() |
91447636 | 1218 | CHECK_PREEMPTION_LEVEL() |
1c79356b | 1219 | |
2d21ac55 A |
1220 | movl M_ILK,%eax /* read interlock */ |
1221 | testl %eax,%eax /* unlocked? */ | |
1222 | jne Llmls_eval_ilk /* no, go see if indirect */ | |
1223 | Llmls_retry: | |
1224 | cli /* disable interrupts */ | |
1225 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
1226 | ||
1227 | /* eax == 0 at this point */ | |
1228 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ | |
1229 | jne Llmls_ilk_fail /* branch on failure to spin loop */ | |
1230 | ||
1231 | movl M_LOCKED,%ecx /* get lock owner */ | |
1232 | testl %ecx,%ecx /* is the mutex locked? */ | |
1233 | jne Llml_fail /* yes, fall back to a normal mutex */ | |
1234 | ||
1235 | Llmls_acquire: | |
1236 | movl $(MUTEX_LOCKED_AS_SPIN),M_LOCKED /* indicate ownership as a spin lock */ | |
1237 | PREEMPTION_DISABLE | |
1238 | popf /* restore interrupt state */ | |
1239 | NONLEAF_RET /* return with the interlock held */ | |
1240 | ||
1241 | Llmls_ilk_fail: | |
1242 | popf /* restore interrupt state */ | |
1243 | pushf /* resave interrupt state on stack */ | |
1244 | ||
1245 | Llmls_ilk_loop: | |
1246 | PAUSE | |
1247 | movl M_ILK,%eax /* read interlock */ | |
1248 | testl %eax,%eax /* unlocked? */ | |
1249 | je Llmls_retry /* yes - go try to grab it */ | |
1250 | ||
1251 | cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */ | |
1252 | jne Llmls_ilk_loop /* no - keep spinning */ | |
1253 | ||
1254 | pushl %edx | |
1255 | call EXT(lck_mtx_interlock_panic) | |
1256 | /* | |
1257 | * shouldn't return from here, but just in case | |
1258 | */ | |
1259 | popl %edx | |
1260 | jmp Llmls_ilk_loop | |
1261 | ||
1262 | ||
1263 | Llmls_eval_ilk: | |
1264 | cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */ | |
1265 | cmove M_PTR,%edx /* If so, take indirection */ | |
1266 | jne Llmls_ilk_loop /* If not, go to spin loop */ | |
1267 | ||
1268 | Llmls_lck_ext: | |
1269 | pushl %esi /* Used to hold the lock group ptr */ | |
1270 | pushl %edi /* Used for stat update records */ | |
1271 | movl MUTEX_GRP(%edx),%esi /* Load lock group */ | |
1272 | xorl %edi,%edi /* Clear stat update records */ | |
1273 | /* 64-bit increment of acquire attempt statistic (per-group) */ | |
1274 | LOCK_IF_ATOMIC_STAT_UPDATES | |
1275 | addl $1, GRP_MTX_STAT_UTIL(%esi) | |
1276 | jnc 1f | |
1277 | incl GRP_MTX_STAT_UTIL+4(%esi) | |
1278 | 1: | |
1279 | movl M_ILK,%eax /* read interlock */ | |
1280 | testl %eax,%eax /* unlocked? */ | |
1281 | jne Llmls_ext_ilk_loop /* no, go to spin loop */ | |
1282 | Llmls_ext_retry: | |
91447636 | 1283 | cli /* disable interrupts */ |
0c530ab8 | 1284 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
9bccf70c | 1285 | |
2d21ac55 A |
1286 | /* eax == 0 at this point */ |
1287 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ | |
1288 | jne Llmls_ext_ilk_fail /* branch on failure to retry */ | |
1289 | ||
1290 | movl M_LOCKED,%ecx /* get lock owner */ | |
1291 | testl %ecx,%ecx /* is the mutex locked? */ | |
1292 | jne Llml_ext_fail /* yes, we lose */ | |
1293 | ||
1294 | popl %edi | |
1295 | popl %esi | |
1296 | jmp Llmls_acquire | |
1297 | ||
1298 | Llmls_ext_ilk_fail: | |
1299 | /* | |
1300 | * Slow path: call out to do the spinning. | |
1301 | */ | |
1302 | movl 8(%esp),%ecx | |
1303 | pushl %ecx | |
1304 | popf /* restore interrupt state */ | |
1305 | ||
1306 | Llmls_ext_ilk_loop: | |
1307 | PAUSE | |
1308 | movl M_ILK,%eax /* read interlock */ | |
1309 | testl %eax,%eax /* unlocked? */ | |
1310 | je Llmls_ext_retry /* yes - go try to grab it */ | |
1311 | ||
1312 | cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */ | |
1313 | jne Llmls_ext_ilk_loop /* no - keep spinning */ | |
1314 | ||
1315 | pushl %edx | |
1316 | call EXT(lck_mtx_interlock_panic) | |
1317 | /* | |
1318 | * shouldn't return from here, but just in case | |
1319 | */ | |
1320 | popl %edx | |
1321 | jmp Llmls_ext_ilk_loop /* no - keep spinning */ | |
1322 | ||
1323 | ||
1324 | ||
1325 | NONLEAF_ENTRY(lck_mtx_lock) | |
1326 | ||
1327 | movl B_ARG0,%edx /* fetch lock pointer */ | |
1328 | pushf /* save interrupt state */ | |
1329 | ||
1330 | CHECK_NO_SIMPLELOCKS() | |
1331 | CHECK_PREEMPTION_LEVEL() | |
1332 | ||
91447636 A |
1333 | movl M_ILK,%eax /* read interlock */ |
1334 | testl %eax,%eax /* unlocked? */ | |
2d21ac55 A |
1335 | jne Llml_eval_ilk /* no, go see if indirect */ |
1336 | Llml_retry: | |
1337 | cli /* disable interrupts */ | |
1338 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
0c530ab8 | 1339 | |
2d21ac55 | 1340 | /* eax == 0 at this point */ |
91447636 | 1341 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
2d21ac55 | 1342 | jne Llml_ilk_fail /* branch on failure to spin loop */ |
91447636 A |
1343 | |
1344 | movl M_LOCKED,%ecx /* get lock owner */ | |
1345 | testl %ecx,%ecx /* is the mutex locked? */ | |
0c530ab8 A |
1346 | jne Llml_fail /* yes, we lose */ |
1347 | Llml_acquire: | |
91447636 A |
1348 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
1349 | movl %ecx,M_LOCKED | |
9bccf70c | 1350 | |
2d21ac55 A |
1351 | cmpw $0,M_WAITERS /* are there any waiters? */ |
1352 | jne Lml_waiters /* yes, more work to do */ | |
0c530ab8 A |
1353 | Llml_return: |
1354 | xorl %eax,%eax | |
1355 | movl %eax,M_ILK | |
1356 | ||
1357 | popf /* restore interrupt state */ | |
2d21ac55 A |
1358 | leave |
1359 | #if CONFIG_DTRACE | |
1360 | LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point) | |
1361 | ret | |
1362 | /* inherit lock pointer in %edx above */ | |
1363 | LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %edx) | |
1364 | #endif | |
1365 | ret | |
0c530ab8 A |
1366 | |
1367 | Llml_waiters: | |
91447636 | 1368 | pushl %edx /* save mutex address */ |
9bccf70c | 1369 | pushl %edx |
91447636 | 1370 | call EXT(lck_mtx_lock_acquire) |
9bccf70c | 1371 | addl $4,%esp |
91447636 | 1372 | popl %edx /* restore mutex address */ |
0c530ab8 A |
1373 | jmp Llml_return |
1374 | ||
2d21ac55 | 1375 | Llml_restart: |
0c530ab8 | 1376 | Llml_ilk_fail: |
2d21ac55 A |
1377 | popf /* restore interrupt state */ |
1378 | pushf /* resave interrupt state on stack */ | |
1379 | ||
1380 | Llml_ilk_loop: | |
1381 | PAUSE | |
1382 | movl M_ILK,%eax /* read interlock */ | |
1383 | testl %eax,%eax /* unlocked? */ | |
1384 | je Llml_retry /* yes - go try to grab it */ | |
1385 | ||
1386 | cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */ | |
1387 | jne Llml_ilk_loop /* no - keep spinning */ | |
1388 | ||
1389 | pushl %edx | |
1390 | call EXT(lck_mtx_interlock_panic) | |
0c530ab8 | 1391 | /* |
2d21ac55 | 1392 | * shouldn't return from here, but just in case |
0c530ab8 | 1393 | */ |
2d21ac55 A |
1394 | popl %edx |
1395 | jmp Llml_ilk_loop /* no - keep spinning */ | |
0c530ab8 A |
1396 | |
1397 | Llml_fail: | |
1398 | /* | |
1399 | * Check if the owner is on another processor and therefore | |
1400 | * we should try to spin before blocking. | |
1401 | */ | |
1402 | testl $(OnProc),ACT_SPF(%ecx) | |
1403 | jz Llml_block | |
5d5c5d0d | 1404 | |
0c530ab8 A |
1405 | /* |
1406 | * Here if owner is on another processor: | |
1407 | * - release the interlock | |
1408 | * - spin on the holder until release or timeout | |
1409 | * - in either case re-acquire the interlock | |
1410 | * - if released, acquire it | |
1411 | * - otherwise drop thru to block. | |
1412 | */ | |
91447636 | 1413 | xorl %eax,%eax |
0c530ab8 A |
1414 | movl %eax,M_ILK /* zero interlock */ |
1415 | popf | |
1416 | pushf /* restore interrupt state */ | |
0c530ab8 A |
1417 | pushl %edx /* save mutex address */ |
1418 | pushl %edx | |
2d21ac55 | 1419 | call EXT(lck_mtx_lock_spinwait) |
0c530ab8 A |
1420 | addl $4,%esp |
1421 | popl %edx /* restore mutex address */ | |
89b3af67 | 1422 | |
0c530ab8 | 1423 | /* Re-acquire interlock */ |
0c530ab8 A |
1424 | movl M_ILK,%eax /* read interlock */ |
1425 | testl %eax,%eax /* unlocked? */ | |
2d21ac55 A |
1426 | jne Llml_ilk_refail /* no, go to spin loop */ |
1427 | Llml_reget_retry: | |
1428 | cli /* disable interrupts */ | |
1429 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
4452a7af | 1430 | |
2d21ac55 | 1431 | /* eax == 0 at this point */ |
0c530ab8 | 1432 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
2d21ac55 | 1433 | jne Llml_ilk_refail /* branch on failure to retry */ |
0c530ab8 A |
1434 | |
1435 | movl M_LOCKED,%ecx /* get lock owner */ | |
1436 | testl %ecx,%ecx /* is the mutex free? */ | |
1437 | je Llml_acquire /* yes, acquire */ | |
1438 | ||
1439 | Llml_block: | |
91447636 A |
1440 | CHECK_MYLOCK(M_THREAD) |
1441 | pushl %edx /* save mutex address */ | |
1442 | pushl M_LOCKED | |
9bccf70c | 1443 | pushl %edx /* push mutex address */ |
2d21ac55 A |
1444 | /* |
1445 | * N.B.: lck_mtx_lock_wait is called here with interrupts disabled | |
1446 | * Consider reworking. | |
1447 | */ | |
91447636 A |
1448 | call EXT(lck_mtx_lock_wait) /* wait for the lock */ |
1449 | addl $8,%esp | |
1450 | popl %edx /* restore mutex address */ | |
2d21ac55 | 1451 | jmp Llml_restart /* and start over */ |
0c530ab8 A |
1452 | |
1453 | Llml_ilk_refail: | |
2d21ac55 A |
1454 | popf /* restore interrupt state */ |
1455 | pushf /* resave interrupt state on stack */ | |
1456 | ||
1457 | Llml_ilk_reloop: | |
1458 | PAUSE | |
1459 | movl M_ILK,%eax /* read interlock */ | |
1460 | testl %eax,%eax /* unlocked? */ | |
1461 | je Llml_reget_retry /* yes - go try to grab it */ | |
1462 | ||
1463 | cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */ | |
1464 | jne Llml_ilk_reloop /* no - keep spinning */ | |
1465 | ||
1466 | pushl %edx | |
1467 | call EXT(lck_mtx_interlock_panic) | |
0c530ab8 | 1468 | /* |
2d21ac55 | 1469 | * shouldn't return from here, but just in case |
0c530ab8 | 1470 | */ |
2d21ac55 A |
1471 | popl %edx |
1472 | jmp Llml_ilk_reloop /* no - keep spinning */ | |
9bccf70c | 1473 | |
2d21ac55 A |
1474 | |
1475 | Llml_eval_ilk: | |
1476 | cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */ | |
1477 | cmove M_PTR,%edx /* If so, take indirection */ | |
1478 | jne Llml_ilk_loop /* If not, go to spin loop */ | |
1479 | ||
1480 | /* | |
1481 | * Entry into statistics codepath for lck_mtx_lock: | |
1482 | * EDX: real lock pointer | |
1483 | * first dword on stack contains flags | |
1484 | */ | |
1485 | ||
1486 | /* Enable this preprocessor define to record the first miss alone | |
1487 | * By default, we count every miss, hence multiple misses may be | |
1488 | * recorded for a single lock acquire attempt via lck_mtx_lock | |
1489 | */ | |
1490 | #undef LOG_FIRST_MISS_ALONE | |
1491 | ||
1492 | /* | |
1493 | * N.B.: On x86, statistics are currently recorded for all indirect mutexes. | |
1494 | * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained | |
1495 | * as a 64-bit quantity (this matches the existing PowerPC implementation, | |
1496 | * and the new x86 specific statistics are also maintained as 32-bit | |
1497 | * quantities). | |
1498 | */ | |
1499 | ||
1500 | Llml_lck_ext: | |
1501 | pushl %esi /* Used to hold the lock group ptr */ | |
1502 | pushl %edi /* Used for stat update records */ | |
1503 | movl MUTEX_GRP(%edx),%esi /* Load lock group */ | |
1504 | xorl %edi,%edi /* Clear stat update records */ | |
1505 | /* 64-bit increment of acquire attempt statistic (per-group) */ | |
1506 | LOCK_IF_ATOMIC_STAT_UPDATES | |
1507 | addl $1, GRP_MTX_STAT_UTIL(%esi) | |
1508 | jnc 1f | |
1509 | incl GRP_MTX_STAT_UTIL+4(%esi) | |
1510 | 1: | |
1511 | movl M_ILK,%eax /* read interlock */ | |
1512 | testl %eax,%eax /* unlocked? */ | |
1513 | jne Llml_ext_ilk_loop /* no, go to spin loop */ | |
1514 | Llml_ext_get_hw: | |
1515 | cli | |
1516 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
1517 | ||
1518 | /* eax == 0 at this point */ | |
1519 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ | |
1520 | jne Llml_ext_ilk_fail /* branch on failure to retry */ | |
1521 | ||
1522 | movl M_LOCKED,%ecx /* get lock owner */ | |
1523 | testl %ecx,%ecx /* is the mutex locked? */ | |
1524 | jne Llml_ext_fail /* yes, we lose */ | |
1525 | ||
1526 | Llml_ext_acquire: | |
1527 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
1528 | movl %ecx,M_LOCKED | |
1529 | ||
1530 | cmpw $0,M_WAITERS /* are there any waiters? */ | |
1531 | jne Llml_ext_waiters /* yes, more work to do */ | |
1532 | Llml_ext_return: | |
1533 | xorl %eax,%eax | |
1534 | movl %eax,M_ILK | |
1535 | ||
1536 | popl %edi | |
1537 | popl %esi | |
1538 | popf /* restore interrupt state */ | |
1539 | leave | |
1540 | #if CONFIG_DTRACE | |
1541 | LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point) | |
1542 | ret | |
1543 | /* inherit lock pointer in %edx above */ | |
1544 | LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %edx) | |
1545 | #endif | |
1546 | ret | |
1547 | ||
1548 | Llml_ext_waiters: | |
1549 | pushl %edx /* save mutex address */ | |
1550 | pushl %edx | |
1551 | call EXT(lck_mtx_lock_acquire) | |
1552 | addl $4,%esp | |
1553 | popl %edx /* restore mutex address */ | |
1554 | jmp Llml_ext_return | |
1555 | ||
1556 | Llml_ext_restart: | |
1557 | Llml_ext_ilk_fail: | |
1558 | movl 8(%esp),%ecx | |
1559 | pushl %ecx | |
1560 | popf /* restore interrupt state */ | |
1561 | ||
1562 | Llml_ext_ilk_loop: | |
1563 | PAUSE | |
1564 | movl M_ILK,%eax /* read interlock */ | |
1565 | testl %eax,%eax /* unlocked? */ | |
1566 | je Llml_ext_get_hw /* yes - go try to grab it */ | |
1567 | ||
1568 | cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */ | |
1569 | jne Llml_ext_ilk_loop /* no - keep spinning */ | |
1570 | ||
1571 | pushl %edx | |
1572 | call EXT(lck_mtx_interlock_panic) | |
1573 | /* | |
1574 | * shouldn't return from here, but just in case | |
1575 | */ | |
1576 | popl %edx | |
1577 | jmp Llml_ext_ilk_loop | |
1578 | ||
1579 | ||
1580 | Llml_ext_fail: | |
1581 | #ifdef LOG_FIRST_MISS_ALONE | |
1582 | testl $1, %edi | |
1583 | jnz 1f | |
1584 | #endif /* LOG_FIRST_MISS_ALONE */ | |
1585 | /* Record that a lock acquire attempt missed (per-group statistic) */ | |
1586 | LOCK_IF_ATOMIC_STAT_UPDATES | |
1587 | incl GRP_MTX_STAT_MISS(%esi) | |
1588 | #ifdef LOG_FIRST_MISS_ALONE | |
1589 | orl $1, %edi | |
1590 | #endif /* LOG_FIRST_MISS_ALONE */ | |
1591 | 1: | |
1592 | /* | |
1593 | * Check if the owner is on another processor and therefore | |
1594 | * we should try to spin before blocking. | |
1595 | */ | |
1596 | testl $(OnProc),ACT_SPF(%ecx) | |
1597 | jnz 2f | |
1598 | /* | |
1599 | * Record the "direct wait" statistic, which indicates if a | |
1600 | * miss proceeded to block directly without spinning--occurs | |
1601 | * if the owner of the mutex isn't running on another processor | |
1602 | * at the time of the check. | |
1603 | */ | |
1604 | LOCK_IF_ATOMIC_STAT_UPDATES | |
1605 | incl GRP_MTX_STAT_DIRECT_WAIT(%esi) | |
1606 | jmp Llml_ext_block | |
1607 | 2: | |
1608 | /* | |
1609 | * Here if owner is on another processor: | |
1610 | * - release the interlock | |
1611 | * - spin on the holder until release or timeout | |
1612 | * - in either case re-acquire the interlock | |
1613 | * - if released, acquire it | |
1614 | * - otherwise drop thru to block. | |
1615 | */ | |
1616 | xorl %eax,%eax | |
1617 | movl %eax,M_ILK /* zero interlock */ | |
1618 | ||
1619 | pushl 8(%esp) /* Make another copy of EFLAGS image */ | |
1620 | popf /* Restore interrupt state */ | |
1621 | pushl %edx /* save mutex address */ | |
1622 | pushl %edx | |
1623 | call EXT(lck_mtx_lock_spinwait) | |
1624 | addl $4,%esp | |
1625 | popl %edx /* restore mutex address */ | |
1626 | ||
1627 | /* Re-acquire interlock */ | |
1628 | movl M_ILK,%eax /* read interlock */ | |
1629 | testl %eax,%eax /* unlocked? */ | |
1630 | jne Llml_ext_ilk_refail /* no, go to spin loop */ | |
1631 | Llml_ext_reget_retry: | |
1632 | cli /* disable interrupts */ | |
1633 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
1634 | ||
1635 | /* eax == 0 at this point */ | |
1636 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ | |
1637 | jne Llml_ext_ilk_refail /* branch on failure to spin loop */ | |
1638 | ||
1639 | movl M_LOCKED,%ecx /* get lock owner */ | |
1640 | testl %ecx,%ecx /* is the mutex free? */ | |
1641 | je Llml_ext_acquire /* yes, acquire */ | |
1642 | ||
1643 | Llml_ext_block: | |
1644 | /* If we wanted to count waits just once per lock acquire, we'd | |
1645 | * skip over the stat update here | |
1646 | */ | |
1647 | LOCK_IF_ATOMIC_STAT_UPDATES | |
1648 | /* Record that a lock miss proceeded to block */ | |
1649 | incl GRP_MTX_STAT_WAIT(%esi) | |
1650 | 1: | |
1651 | CHECK_MYLOCK(M_THREAD) | |
1652 | pushl %edx /* save mutex address */ | |
1653 | pushl M_LOCKED | |
1654 | pushl %edx /* push mutex address */ | |
1655 | /* | |
1656 | * N.B.: lck_mtx_lock_wait is called here with interrupts disabled | |
1657 | * Consider reworking. | |
1658 | */ | |
1659 | call EXT(lck_mtx_lock_wait) /* wait for the lock */ | |
1660 | addl $8,%esp | |
1661 | popl %edx /* restore mutex address */ | |
1662 | jmp Llml_ext_restart /* and start over */ | |
1663 | ||
1664 | Llml_ext_ilk_refail: | |
1665 | movl 8(%esp),%ecx | |
1666 | pushl %ecx | |
1667 | popf /* restore interrupt state */ | |
1668 | ||
1669 | Llml_ext_ilk_reloop: | |
1670 | PAUSE | |
1671 | movl M_ILK,%eax /* read interlock */ | |
1672 | testl %eax,%eax /* unlocked? */ | |
1673 | je Llml_ext_reget_retry /* yes - go try to grab it */ | |
1674 | ||
1675 | cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */ | |
1676 | jne Llml_ext_ilk_reloop /* no - keep spinning */ | |
1677 | ||
1678 | pushl %edx | |
1679 | call EXT(lck_mtx_interlock_panic) | |
1680 | /* | |
1681 | * shouldn't return from here, but just in case | |
1682 | */ | |
1683 | popl %edx | |
1684 | jmp Llml_ext_ilk_reloop | |
1685 | ||
1686 | ||
1687 | ||
1688 | NONLEAF_ENTRY(lck_mtx_try_lock_spin) | |
1c79356b | 1689 | |
91447636 | 1690 | movl B_ARG0,%edx /* fetch lock pointer */ |
2d21ac55 | 1691 | pushf /* save interrupt state */ |
9bccf70c | 1692 | |
91447636 A |
1693 | CHECK_NO_SIMPLELOCKS() |
1694 | CHECK_PREEMPTION_LEVEL() | |
1c79356b | 1695 | |
2d21ac55 A |
1696 | movl M_ILK,%eax /* read interlock */ |
1697 | testl %eax,%eax /* unlocked? */ | |
1698 | jne Llmts_eval_ilk /* no, go see if indirect */ | |
1699 | Llmts_retry: | |
91447636 | 1700 | cli /* disable interrupts */ |
0c530ab8 | 1701 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
9bccf70c | 1702 | |
2d21ac55 A |
1703 | /* eax == 0 at this point */ |
1704 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ | |
1705 | jne Llmts_ilk_fail /* branch on failure to retry */ | |
1706 | ||
1707 | movl M_LOCKED,%ecx /* get lock owner */ | |
1708 | testl %ecx,%ecx /* is the mutex locked? */ | |
1709 | jne Llmt_fail /* yes, we lose */ | |
1710 | ||
1711 | movl $(MUTEX_LOCKED_AS_SPIN),M_LOCKED /* no, indicate ownership as a spin lock */ | |
1712 | PREEMPTION_DISABLE /* and return with interlock held */ | |
1713 | ||
1714 | movl $1,%eax /* return success */ | |
1715 | popf /* restore interrupt state */ | |
1716 | leave | |
1717 | #if CONFIG_DTRACE | |
1718 | LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point) | |
1719 | ret | |
1720 | /* inherit lock pointer in %edx above */ | |
1721 | LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %edx) | |
1722 | movl $1,%eax /* return success */ | |
1723 | #endif | |
1724 | ret | |
1725 | ||
1726 | Llmts_ilk_fail: | |
1727 | popf /* restore interrupt state */ | |
1728 | pushf /* resave interrupt state */ | |
1729 | ||
1730 | Llmts_ilk_loop: | |
1731 | PAUSE | |
1732 | /* | |
1733 | * need to do this check outside of the interlock in | |
1734 | * case this lock is held as a simple lock which means | |
1735 | * we won't be able to take the interlock | |
1736 | */ | |
1737 | movl M_LOCKED,%eax /* get lock owner */ | |
1738 | testl %eax,%eax /* is the mutex locked? */ | |
1739 | jne Llmt_fail_no_ilk /* yes, go return failure */ | |
1740 | ||
1741 | movl M_ILK,%eax /* read interlock */ | |
1742 | testl %eax,%eax /* unlocked? */ | |
1743 | je Llmts_retry /* yes - go try to grab it */ | |
1744 | ||
1745 | cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */ | |
1746 | jne Llmts_ilk_loop /* no - keep spinning */ | |
1747 | ||
1748 | pushl %edx | |
1749 | call EXT(lck_mtx_interlock_panic) | |
1750 | /* | |
1751 | * shouldn't return from here, but just in case | |
1752 | */ | |
1753 | popl %edx | |
1754 | jmp Llmts_ilk_loop | |
1755 | ||
1756 | Llmts_eval_ilk: | |
1757 | cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */ | |
1758 | cmove M_PTR,%edx /* If so, take indirection */ | |
1759 | jne Llmts_ilk_loop /* If not, go to spin loop */ | |
1760 | ||
1761 | /* | |
1762 | * bump counter on indirect lock | |
1763 | */ | |
1764 | pushl %esi /* Used to hold the lock group ptr */ | |
1765 | movl MUTEX_GRP(%edx),%esi /* Load lock group */ | |
1766 | /* 64-bit increment of acquire attempt statistic (per-group) */ | |
1767 | LOCK_IF_ATOMIC_STAT_UPDATES | |
1768 | addl $1, GRP_MTX_STAT_UTIL(%esi) | |
1769 | jnc 1f | |
1770 | incl GRP_MTX_STAT_UTIL+4(%esi) | |
1771 | 1: | |
1772 | popl %esi | |
1773 | jmp Llmts_ilk_loop | |
1774 | ||
1775 | ||
1776 | ||
1777 | NONLEAF_ENTRY(lck_mtx_try_lock) | |
1778 | ||
1779 | movl B_ARG0,%edx /* fetch lock pointer */ | |
1780 | pushf /* save interrupt state */ | |
1781 | ||
1782 | CHECK_NO_SIMPLELOCKS() | |
1783 | CHECK_PREEMPTION_LEVEL() | |
1784 | ||
91447636 A |
1785 | movl M_ILK,%eax /* read interlock */ |
1786 | testl %eax,%eax /* unlocked? */ | |
2d21ac55 A |
1787 | jne Llmt_eval_ilk /* no, go see if indirect */ |
1788 | Llmt_retry: | |
1789 | cli /* disable interrupts */ | |
1790 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
0c530ab8 | 1791 | |
2d21ac55 | 1792 | /* eax == 0 at this point */ |
91447636 | 1793 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
2d21ac55 | 1794 | jne Llmt_ilk_fail /* branch on failure to retry */ |
91447636 A |
1795 | |
1796 | movl M_LOCKED,%ecx /* get lock owner */ | |
1797 | testl %ecx,%ecx /* is the mutex locked? */ | |
0c530ab8 | 1798 | jne Llmt_fail /* yes, we lose */ |
2d21ac55 | 1799 | Llmt_acquire: |
91447636 A |
1800 | movl %gs:CPU_ACTIVE_THREAD,%ecx |
1801 | movl %ecx,M_LOCKED | |
1802 | ||
2d21ac55 | 1803 | cmpw $0,M_WAITERS /* are there any waiters? */ |
0c530ab8 A |
1804 | jne Llmt_waiters /* yes, more work to do */ |
1805 | Llmt_return: | |
9bccf70c | 1806 | xorl %eax,%eax |
91447636 | 1807 | movl %eax,M_ILK |
9bccf70c | 1808 | |
91447636 | 1809 | popf /* restore interrupt state */ |
1c79356b | 1810 | |
91447636 | 1811 | movl $1,%eax /* return success */ |
2d21ac55 A |
1812 | leave |
1813 | #if CONFIG_DTRACE | |
1814 | /* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */ | |
1815 | LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point) | |
1816 | ret | |
1817 | /* inherit lock pointer in %edx from above */ | |
1818 | LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %edx) | |
1819 | movl $1,%eax /* return success */ | |
1820 | #endif | |
1821 | ret | |
1c79356b | 1822 | |
0c530ab8 A |
1823 | Llmt_waiters: |
1824 | pushl %edx /* save mutex address */ | |
1825 | pushl %edx | |
1826 | call EXT(lck_mtx_lock_acquire) | |
1827 | addl $4,%esp | |
1828 | popl %edx /* restore mutex address */ | |
1829 | jmp Llmt_return | |
1830 | ||
1831 | Llmt_ilk_fail: | |
2d21ac55 A |
1832 | popf /* restore interrupt state */ |
1833 | pushf /* resave interrupt state */ | |
1834 | ||
1835 | Llmt_ilk_loop: | |
1836 | PAUSE | |
0c530ab8 | 1837 | /* |
2d21ac55 A |
1838 | * need to do this check outside of the interlock in |
1839 | * case this lock is held as a simple lock which means | |
1840 | * we won't be able to take the interlock | |
1841 | */ | |
1842 | movl M_LOCKED,%eax /* get lock owner */ | |
1843 | testl %eax,%eax /* is the mutex locked? */ | |
1844 | jne Llmt_fail_no_ilk /* yes, go return failure */ | |
1845 | ||
1846 | movl M_ILK,%eax /* read interlock */ | |
1847 | testl %eax,%eax /* unlocked? */ | |
1848 | je Llmt_retry /* yes - go try to grab it */ | |
1849 | ||
1850 | cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */ | |
1851 | jne Llmt_ilk_loop /* no - keep spinning */ | |
1852 | ||
1853 | pushl %edx | |
1854 | call EXT(lck_mtx_interlock_panic) | |
1855 | /* | |
1856 | * shouldn't return from here, but just in case | |
0c530ab8 | 1857 | */ |
2d21ac55 A |
1858 | popl %edx |
1859 | jmp Llmt_ilk_loop | |
0c530ab8 A |
1860 | |
1861 | Llmt_fail: | |
2d21ac55 | 1862 | xorl %eax,%eax /* Zero interlock value */ |
91447636 | 1863 | movl %eax,M_ILK |
1c79356b | 1864 | |
2d21ac55 | 1865 | Llmt_fail_no_ilk: |
91447636 | 1866 | popf /* restore interrupt state */ |
1c79356b | 1867 | |
2d21ac55 A |
1868 | cmpl %edx,B_ARG0 |
1869 | jne Llmt_fail_indirect | |
1870 | ||
1871 | xorl %eax,%eax | |
1872 | /* Note that we don't record a dtrace event for trying and missing */ | |
1873 | NONLEAF_RET | |
1874 | ||
1875 | Llmt_fail_indirect: | |
1876 | pushl %esi /* Used to hold the lock group ptr */ | |
1877 | movl MUTEX_GRP(%edx),%esi /* Load lock group */ | |
1878 | ||
1879 | /* Record mutex acquire attempt miss statistic */ | |
1880 | LOCK_IF_ATOMIC_STAT_UPDATES | |
1881 | incl GRP_MTX_STAT_MISS(%esi) | |
1882 | ||
1883 | popl %esi | |
1884 | xorl %eax,%eax | |
91447636 | 1885 | NONLEAF_RET |
1c79356b | 1886 | |
2d21ac55 A |
1887 | Llmt_eval_ilk: |
1888 | cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */ | |
1889 | cmove M_PTR,%edx /* If so, take indirection */ | |
1890 | jne Llmt_ilk_loop /* If not, go to spin loop */ | |
1891 | ||
1892 | /* | |
1893 | * bump counter for indirect lock | |
1894 | */ | |
1895 | pushl %esi /* Used to hold the lock group ptr */ | |
1896 | movl MUTEX_GRP(%edx),%esi /* Load lock group */ | |
1897 | ||
1898 | /* 64-bit increment of acquire attempt statistic (per-group) */ | |
1899 | LOCK_IF_ATOMIC_STAT_UPDATES | |
1900 | addl $1, GRP_MTX_STAT_UTIL(%esi) | |
1901 | jnc 1f | |
1902 | incl GRP_MTX_STAT_UTIL+4(%esi) | |
1903 | 1: | |
1904 | pop %esi | |
1905 | jmp Llmt_ilk_loop | |
1906 | ||
1907 | ||
1908 | ||
1909 | LEAF_ENTRY(lck_mtx_convert_spin) | |
1910 | movl L_ARG0,%edx /* fetch lock pointer */ | |
1911 | ||
1912 | cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */ | |
1913 | cmove M_PTR,%edx /* If so, take indirection */ | |
1914 | ||
1915 | movl M_LOCKED,%ecx /* is this the spin variant of the mutex */ | |
1916 | cmpl $(MUTEX_LOCKED_AS_SPIN),%ecx | |
1917 | jne Llmcs_exit /* already owned as a mutex, just return */ | |
1918 | ||
1919 | movl M_ILK,%ecx /* convert from spin version to mutex */ | |
1920 | movl %ecx,M_LOCKED /* take control of the mutex */ | |
1921 | ||
1922 | cmpw $0,M_WAITERS /* are there any waiters? */ | |
1923 | jne Llmcs_waiters /* yes, more work to do */ | |
1924 | ||
1925 | Llmcs_return: | |
1926 | xorl %ecx,%ecx | |
1927 | movl %ecx,M_ILK /* clear interlock */ | |
1928 | PREEMPTION_ENABLE | |
1929 | Llmcs_exit: | |
1930 | LEAF_RET | |
1931 | ||
1932 | Llmcs_waiters: | |
1933 | pushl %edx /* save mutex address */ | |
1934 | pushl %edx | |
1935 | call EXT(lck_mtx_lock_acquire) | |
1936 | addl $4,%esp | |
1937 | popl %edx /* restore mutex address */ | |
1938 | jmp Llmcs_return | |
1939 | ||
1940 | ||
1941 | ||
91447636 | 1942 | NONLEAF_ENTRY(lck_mtx_unlock) |
1c79356b | 1943 | |
91447636 | 1944 | movl B_ARG0,%edx /* fetch lock pointer */ |
1c79356b | 1945 | |
2d21ac55 A |
1946 | cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */ |
1947 | cmove M_PTR,%edx /* If so, take indirection */ | |
1948 | ||
1949 | movl M_LOCKED,%ecx /* is this the spin variant of the mutex */ | |
1950 | cmpl $(MUTEX_LOCKED_AS_SPIN),%ecx | |
1951 | jne Llmu_enter /* no, go treat like a real mutex */ | |
1952 | ||
1953 | cmpw $0,M_WAITERS /* are there any waiters? */ | |
1954 | jne Llmus_wakeup /* yes, more work to do */ | |
1955 | ||
1956 | Llmu_drop_ilk: | |
1957 | xorl %eax,%eax | |
1958 | movl %eax,M_LOCKED /* clear spin indicator */ | |
1959 | movl %eax,M_ILK /* release the interlock */ | |
1960 | ||
1961 | PREEMPTION_ENABLE /* and re-enable preemption */ | |
1962 | leave | |
1963 | #if CONFIG_DTRACE | |
1964 | /* Dtrace: LS_LCK_MTX_UNLOCK_RELEASE */ | |
1965 | LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point) | |
1966 | ret | |
1967 | /* inherit lock pointer in %edx from above */ | |
1968 | LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %edx) | |
1969 | #endif | |
1970 | ret | |
1971 | ||
1972 | Llmus_wakeup: | |
1973 | pushl %edx /* save mutex address */ | |
1974 | pushl %edx /* push mutex address */ | |
1975 | call EXT(lck_mtx_unlockspin_wakeup) /* yes, wake a thread */ | |
1976 | addl $4,%esp | |
1977 | popl %edx /* restore mutex pointer */ | |
1978 | jmp Llmu_drop_ilk | |
1979 | ||
1980 | ||
1981 | Llmu_enter: | |
91447636 | 1982 | pushf /* save interrupt state */ |
1c79356b | 1983 | |
91447636 A |
1984 | movl M_ILK,%eax /* read interlock */ |
1985 | testl %eax,%eax /* unlocked? */ | |
2d21ac55 A |
1986 | jne Llmu_ilk_loop /* no - go to spin loop */ |
1987 | Llmu_retry: | |
1988 | cli /* disable interrupts */ | |
1989 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
0c530ab8 | 1990 | |
2d21ac55 | 1991 | /* eax == 0 at this point */ |
91447636 | 1992 | lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ |
2d21ac55 | 1993 | jne Llmu_ilk_fail /* branch on failure to spin loop */ |
1c79356b | 1994 | |
91447636 | 1995 | cmpw $0,M_WAITERS /* are there any waiters? */ |
0c530ab8 | 1996 | jne Llmu_wakeup /* yes, more work to do */ |
91447636 | 1997 | |
0c530ab8 | 1998 | Llmu_doit: |
55e303ae | 1999 | xorl %ecx,%ecx |
91447636 | 2000 | movl %ecx,M_LOCKED /* unlock the mutex */ |
2d21ac55 | 2001 | movl %ecx,M_ILK /* clear the interlock */ |
1c79356b | 2002 | |
2d21ac55 A |
2003 | popf /* restore interrupt state */ |
2004 | leave | |
2005 | #if CONFIG_DTRACE | |
2006 | LOCKSTAT_LABEL(_lck_mtx_unlock2_lockstat_patch_point) | |
2007 | ret | |
2008 | /* inherit lock pointer in %edx above */ | |
2009 | LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %edx) | |
2010 | #endif | |
2011 | ret | |
1c79356b | 2012 | |
2d21ac55 | 2013 | Llmu_ilk_fail: |
91447636 | 2014 | popf /* restore interrupt state */ |
2d21ac55 A |
2015 | pushf /* resave interrupt state */ |
2016 | ||
2017 | Llmu_ilk_loop: | |
2018 | PAUSE | |
2019 | movl M_ILK,%eax /* read interlock */ | |
2020 | testl %eax,%eax /* unlocked? */ | |
2021 | je Llmu_retry /* yes - go try to grab it */ | |
1c79356b | 2022 | |
2d21ac55 A |
2023 | cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */ |
2024 | jne Llmu_ilk_loop /* no - keep spinning */ | |
91447636 | 2025 | |
2d21ac55 A |
2026 | pushl %edx |
2027 | call EXT(lck_mtx_interlock_panic) | |
0c530ab8 | 2028 | /* |
2d21ac55 | 2029 | * shouldn't return from here, but just in case |
0c530ab8 | 2030 | */ |
2d21ac55 A |
2031 | popl %edx |
2032 | jmp Llmu_ilk_loop | |
0c530ab8 A |
2033 | |
2034 | Llmu_wakeup: | |
91447636 A |
2035 | pushl %edx /* save mutex address */ |
2036 | pushl M_LOCKED | |
1c79356b | 2037 | pushl %edx /* push mutex address */ |
91447636 | 2038 | call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */ |
9bccf70c | 2039 | addl $8,%esp |
91447636 | 2040 | popl %edx /* restore mutex pointer */ |
2d21ac55 A |
2041 | xorl %ecx,%ecx |
2042 | movl %ecx,M_LOCKED /* unlock the mutex */ | |
2043 | ||
2044 | movl %ecx,M_ILK | |
2045 | ||
2046 | popf /* restore interrupt state */ | |
2047 | ||
2048 | leave | |
2049 | #if CONFIG_DTRACE | |
2050 | /* Dtrace: LS_LCK_MTX_EXT_UNLOCK_RELEASE */ | |
2051 | LOCKSTAT_LABEL(_lck_mtx_ext_unlock_lockstat_patch_point) | |
2052 | ret | |
2053 | /* inherit lock pointer in %edx from above */ | |
2054 | LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, %edx) | |
2055 | #endif | |
2056 | ret | |
2057 | ||
1c79356b | 2058 | |
91447636 A |
2059 | LEAF_ENTRY(lck_mtx_ilk_unlock) |
2060 | movl L_ARG0,%edx /* no indirection here */ | |
1c79356b | 2061 | |
91447636 A |
2062 | xorl %eax,%eax |
2063 | movl %eax,M_ILK | |
1c79356b | 2064 | |
91447636 | 2065 | LEAF_RET |
2d21ac55 A |
2066 | |
2067 | ||
91447636 | 2068 | LEAF_ENTRY(_disable_preemption) |
1c79356b | 2069 | #if MACH_RT |
91447636 | 2070 | _DISABLE_PREEMPTION |
1c79356b | 2071 | #endif /* MACH_RT */ |
91447636 | 2072 | LEAF_RET |
1c79356b | 2073 | |
91447636 | 2074 | LEAF_ENTRY(_enable_preemption) |
1c79356b A |
2075 | #if MACH_RT |
2076 | #if MACH_ASSERT | |
91447636 | 2077 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b | 2078 | jg 1f |
91447636 | 2079 | pushl %gs:CPU_PREEMPTION_LEVEL |
1c79356b A |
2080 | pushl $2f |
2081 | call EXT(panic) | |
2082 | hlt | |
2083 | .data | |
2084 | 2: String "_enable_preemption: preemption_level(%d) < 0!" | |
2085 | .text | |
2086 | 1: | |
2087 | #endif /* MACH_ASSERT */ | |
91447636 | 2088 | _ENABLE_PREEMPTION |
1c79356b | 2089 | #endif /* MACH_RT */ |
91447636 | 2090 | LEAF_RET |
1c79356b | 2091 | |
91447636 | 2092 | LEAF_ENTRY(_enable_preemption_no_check) |
1c79356b A |
2093 | #if MACH_RT |
2094 | #if MACH_ASSERT | |
91447636 | 2095 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b A |
2096 | jg 1f |
2097 | pushl $2f | |
2098 | call EXT(panic) | |
2099 | hlt | |
2100 | .data | |
2101 | 2: String "_enable_preemption_no_check: preemption_level <= 0!" | |
2102 | .text | |
2103 | 1: | |
2104 | #endif /* MACH_ASSERT */ | |
91447636 | 2105 | _ENABLE_PREEMPTION_NO_CHECK |
1c79356b | 2106 | #endif /* MACH_RT */ |
91447636 | 2107 | LEAF_RET |
1c79356b A |
2108 | |
2109 | ||
91447636 A |
2110 | LEAF_ENTRY(_mp_disable_preemption) |
2111 | #if MACH_RT | |
2112 | _DISABLE_PREEMPTION | |
2113 | #endif /* MACH_RT */ | |
2114 | LEAF_RET | |
1c79356b | 2115 | |
91447636 A |
2116 | LEAF_ENTRY(_mp_enable_preemption) |
2117 | #if MACH_RT | |
1c79356b | 2118 | #if MACH_ASSERT |
91447636 | 2119 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b | 2120 | jg 1f |
91447636 | 2121 | pushl %gs:CPU_PREEMPTION_LEVEL |
1c79356b A |
2122 | pushl $2f |
2123 | call EXT(panic) | |
2124 | hlt | |
2125 | .data | |
2126 | 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!" | |
2127 | .text | |
2128 | 1: | |
2129 | #endif /* MACH_ASSERT */ | |
91447636 A |
2130 | _ENABLE_PREEMPTION |
2131 | #endif /* MACH_RT */ | |
2132 | LEAF_RET | |
1c79356b | 2133 | |
91447636 A |
2134 | LEAF_ENTRY(_mp_enable_preemption_no_check) |
2135 | #if MACH_RT | |
1c79356b | 2136 | #if MACH_ASSERT |
91447636 | 2137 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL |
1c79356b A |
2138 | jg 1f |
2139 | pushl $2f | |
2140 | call EXT(panic) | |
2141 | hlt | |
2142 | .data | |
2143 | 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!" | |
2144 | .text | |
2145 | 1: | |
2146 | #endif /* MACH_ASSERT */ | |
91447636 A |
2147 | _ENABLE_PREEMPTION_NO_CHECK |
2148 | #endif /* MACH_RT */ | |
2149 | LEAF_RET | |
1c79356b A |
2150 | |
2151 | ||
91447636 A |
2152 | LEAF_ENTRY(i_bit_set) |
2153 | movl L_ARG0,%edx | |
2154 | movl L_ARG1,%eax | |
1c79356b | 2155 | lock |
c0fea474 | 2156 | bts %edx,(%eax) |
91447636 | 2157 | LEAF_RET |
1c79356b | 2158 | |
91447636 A |
2159 | LEAF_ENTRY(i_bit_clear) |
2160 | movl L_ARG0,%edx | |
2161 | movl L_ARG1,%eax | |
1c79356b | 2162 | lock |
c0fea474 | 2163 | btr %edx,(%eax) |
91447636 | 2164 | LEAF_RET |
1c79356b | 2165 | |
2d21ac55 | 2166 | |
91447636 A |
2167 | LEAF_ENTRY(bit_lock) |
2168 | movl L_ARG0,%ecx | |
2169 | movl L_ARG1,%eax | |
1c79356b A |
2170 | 1: |
2171 | lock | |
2172 | bts %ecx,(%eax) | |
2173 | jb 1b | |
91447636 | 2174 | LEAF_RET |
1c79356b | 2175 | |
2d21ac55 | 2176 | |
91447636 A |
2177 | LEAF_ENTRY(bit_lock_try) |
2178 | movl L_ARG0,%ecx | |
2179 | movl L_ARG1,%eax | |
1c79356b A |
2180 | lock |
2181 | bts %ecx,(%eax) | |
2182 | jb bit_lock_failed | |
91447636 | 2183 | LEAF_RET /* %eax better not be null ! */ |
1c79356b A |
2184 | bit_lock_failed: |
2185 | xorl %eax,%eax | |
91447636 | 2186 | LEAF_RET |
1c79356b | 2187 | |
91447636 A |
2188 | LEAF_ENTRY(bit_unlock) |
2189 | movl L_ARG0,%ecx | |
2190 | movl L_ARG1,%eax | |
1c79356b A |
2191 | lock |
2192 | btr %ecx,(%eax) | |
91447636 | 2193 | LEAF_RET |
2d21ac55 A |
2194 | |
2195 | /* | |
2196 | * Atomic primitives, prototyped in kern/simple_lock.h | |
2197 | */ | |
2198 | LEAF_ENTRY(hw_atomic_add) | |
2199 | movl L_ARG0, %ecx /* Load address of operand */ | |
2200 | movl L_ARG1, %eax /* Load addend */ | |
2201 | movl %eax, %edx | |
2202 | lock | |
2203 | xaddl %eax, (%ecx) /* Atomic exchange and add */ | |
2204 | addl %edx, %eax /* Calculate result */ | |
2205 | LEAF_RET | |
2206 | ||
2207 | LEAF_ENTRY(hw_atomic_sub) | |
2208 | movl L_ARG0, %ecx /* Load address of operand */ | |
2209 | movl L_ARG1, %eax /* Load subtrahend */ | |
2210 | negl %eax | |
2211 | movl %eax, %edx | |
2212 | lock | |
2213 | xaddl %eax, (%ecx) /* Atomic exchange and add */ | |
2214 | addl %edx, %eax /* Calculate result */ | |
2215 | LEAF_RET | |
2216 | ||
2217 | LEAF_ENTRY(hw_atomic_or) | |
2218 | movl L_ARG0, %ecx /* Load address of operand */ | |
2219 | movl (%ecx), %eax | |
2220 | 1: | |
2221 | movl L_ARG1, %edx /* Load mask */ | |
2222 | orl %eax, %edx | |
2223 | lock | |
2224 | cmpxchgl %edx, (%ecx) /* Atomic CAS */ | |
2225 | jne 1b | |
2226 | movl %edx, %eax /* Result */ | |
2227 | LEAF_RET | |
2228 | /* | |
2229 | * A variant of hw_atomic_or which doesn't return a value. | |
2230 | * The implementation is thus comparatively more efficient. | |
2231 | */ | |
2232 | ||
2233 | LEAF_ENTRY(hw_atomic_or_noret) | |
2234 | movl L_ARG0, %ecx /* Load address of operand */ | |
2235 | movl L_ARG1, %edx /* Load mask */ | |
2236 | lock | |
2237 | orl %edx, (%ecx) /* Atomic OR */ | |
2238 | LEAF_RET | |
2239 | ||
2240 | LEAF_ENTRY(hw_atomic_and) | |
2241 | movl L_ARG0, %ecx /* Load address of operand */ | |
2242 | movl (%ecx), %eax | |
2243 | 1: | |
2244 | movl L_ARG1, %edx /* Load mask */ | |
2245 | andl %eax, %edx | |
2246 | lock | |
2247 | cmpxchgl %edx, (%ecx) /* Atomic CAS */ | |
2248 | jne 1b | |
2249 | movl %edx, %eax /* Result */ | |
2250 | LEAF_RET | |
2251 | /* | |
2252 | * A variant of hw_atomic_and which doesn't return a value. | |
2253 | * The implementation is thus comparatively more efficient. | |
2254 | */ | |
2255 | ||
2256 | LEAF_ENTRY(hw_atomic_and_noret) | |
2257 | movl L_ARG0, %ecx /* Load address of operand */ | |
2258 | movl L_ARG1, %edx /* Load mask */ | |
2259 | lock | |
2260 | andl %edx, (%ecx) /* Atomic OR */ | |
2261 | LEAF_RET |