]>
Commit | Line | Data |
---|---|---|
91447636 A |
1 | /* |
2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
91447636 | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
91447636 A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | * File: kern/lock.c | |
58 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
59 | * Date: 1985 | |
60 | * | |
61 | * Locking primitives implementation | |
62 | */ | |
63 | ||
64 | #include <mach_kdb.h> | |
65 | #include <mach_ldebug.h> | |
66 | ||
67 | #include <kern/lock.h> | |
68 | #include <kern/locks.h> | |
69 | #include <kern/kalloc.h> | |
70 | #include <kern/misc_protos.h> | |
71 | #include <kern/thread.h> | |
72 | #include <kern/processor.h> | |
73 | #include <kern/cpu_data.h> | |
74 | #include <kern/cpu_number.h> | |
75 | #include <kern/sched_prim.h> | |
76 | #include <kern/xpr.h> | |
77 | #include <kern/debug.h> | |
78 | #include <string.h> | |
79 | ||
80 | #if MACH_KDB | |
81 | #include <ddb/db_command.h> | |
82 | #include <ddb/db_output.h> | |
83 | #include <ddb/db_sym.h> | |
84 | #include <ddb/db_print.h> | |
85 | #endif /* MACH_KDB */ | |
86 | ||
4452a7af | 87 | #include <i386/machine_cpu.h> |
91447636 A |
88 | |
89 | #include <sys/kdebug.h> | |
90 | ||
91 | #define LCK_RW_LCK_EXCLUSIVE_CODE 0x100 | |
92 | #define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101 | |
93 | #define LCK_RW_LCK_SHARED_CODE 0x102 | |
94 | #define LCK_RW_LCK_SH_TO_EX_CODE 0x103 | |
95 | #define LCK_RW_LCK_SH_TO_EX1_CODE 0x104 | |
96 | #define LCK_RW_LCK_EX_TO_SH_CODE 0x105 | |
97 | ||
4452a7af | 98 | #define LCK_MTX_LCK_SPIN 0x200 |
91447636 A |
99 | |
100 | #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG) | |
101 | ||
102 | unsigned int LcksOpts=0; | |
103 | unsigned int lock_wait_time[2] = { (unsigned int)-1, 100 } ; | |
104 | ||
105 | /* Forwards */ | |
106 | ||
107 | #if MACH_KDB | |
108 | void db_print_simple_lock( | |
109 | simple_lock_t addr); | |
110 | ||
111 | void db_print_mutex( | |
112 | mutex_t * addr); | |
113 | #endif /* MACH_KDB */ | |
114 | ||
115 | ||
116 | #if USLOCK_DEBUG | |
117 | /* | |
118 | * Perform simple lock checks. | |
119 | */ | |
120 | int uslock_check = 1; | |
121 | int max_lock_loops = 100000000; | |
122 | decl_simple_lock_data(extern , printf_lock) | |
123 | decl_simple_lock_data(extern , panic_lock) | |
124 | #if MACH_KDB | |
125 | decl_simple_lock_data(extern , kdb_lock) | |
126 | #endif /* MACH_KDB */ | |
127 | #endif /* USLOCK_DEBUG */ | |
128 | ||
129 | ||
130 | /* | |
131 | * We often want to know the addresses of the callers | |
132 | * of the various lock routines. However, this information | |
133 | * is only used for debugging and statistics. | |
134 | */ | |
135 | typedef void *pc_t; | |
136 | #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS) | |
137 | #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS) | |
138 | #if ANY_LOCK_DEBUG | |
139 | #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l))) | |
140 | #define DECL_PC(pc) pc_t pc; | |
141 | #else /* ANY_LOCK_DEBUG */ | |
142 | #define DECL_PC(pc) | |
143 | #ifdef lint | |
144 | /* | |
145 | * Eliminate lint complaints about unused local pc variables. | |
146 | */ | |
147 | #define OBTAIN_PC(pc,l) ++pc | |
148 | #else /* lint */ | |
149 | #define OBTAIN_PC(pc,l) | |
150 | #endif /* lint */ | |
151 | #endif /* USLOCK_DEBUG */ | |
152 | ||
153 | ||
154 | /* | |
155 | * Portable lock package implementation of usimple_locks. | |
156 | */ | |
157 | ||
158 | #if USLOCK_DEBUG | |
159 | #define USLDBG(stmt) stmt | |
160 | void usld_lock_init(usimple_lock_t, unsigned short); | |
161 | void usld_lock_pre(usimple_lock_t, pc_t); | |
162 | void usld_lock_post(usimple_lock_t, pc_t); | |
163 | void usld_unlock(usimple_lock_t, pc_t); | |
164 | void usld_lock_try_pre(usimple_lock_t, pc_t); | |
165 | void usld_lock_try_post(usimple_lock_t, pc_t); | |
166 | int usld_lock_common_checks(usimple_lock_t, char *); | |
167 | #else /* USLOCK_DEBUG */ | |
168 | #define USLDBG(stmt) | |
169 | #endif /* USLOCK_DEBUG */ | |
170 | ||
171 | /* | |
172 | * Routine: lck_spin_alloc_init | |
173 | */ | |
174 | lck_spin_t * | |
175 | lck_spin_alloc_init( | |
176 | lck_grp_t *grp, | |
177 | lck_attr_t *attr) | |
178 | { | |
179 | lck_spin_t *lck; | |
180 | ||
181 | if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0) | |
182 | lck_spin_init(lck, grp, attr); | |
183 | ||
184 | return(lck); | |
185 | } | |
186 | ||
187 | /* | |
188 | * Routine: lck_spin_free | |
189 | */ | |
190 | void | |
191 | lck_spin_free( | |
192 | lck_spin_t *lck, | |
193 | lck_grp_t *grp) | |
194 | { | |
195 | lck_spin_destroy(lck, grp); | |
196 | kfree(lck, sizeof(lck_spin_t)); | |
197 | } | |
198 | ||
199 | /* | |
200 | * Routine: lck_spin_init | |
201 | */ | |
202 | void | |
203 | lck_spin_init( | |
204 | lck_spin_t *lck, | |
205 | lck_grp_t *grp, | |
206 | __unused lck_attr_t *attr) | |
207 | { | |
208 | usimple_lock_init((usimple_lock_t) lck, 0); | |
209 | lck_grp_reference(grp); | |
210 | lck_grp_lckcnt_incr(grp, LCK_TYPE_SPIN); | |
211 | } | |
212 | ||
213 | /* | |
214 | * Routine: lck_spin_destroy | |
215 | */ | |
216 | void | |
217 | lck_spin_destroy( | |
218 | lck_spin_t *lck, | |
219 | lck_grp_t *grp) | |
220 | { | |
221 | if (lck->lck_spin_data[0] == LCK_SPIN_TAG_DESTROYED) | |
222 | return; | |
223 | lck->lck_spin_data[0] = LCK_SPIN_TAG_DESTROYED; | |
224 | lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN); | |
225 | lck_grp_deallocate(grp); | |
226 | return; | |
227 | } | |
228 | ||
229 | /* | |
230 | * Routine: lck_spin_lock | |
231 | */ | |
232 | void | |
233 | lck_spin_lock( | |
234 | lck_spin_t *lck) | |
235 | { | |
236 | usimple_lock((usimple_lock_t) lck); | |
237 | } | |
238 | ||
239 | /* | |
240 | * Routine: lck_spin_unlock | |
241 | */ | |
242 | void | |
243 | lck_spin_unlock( | |
244 | lck_spin_t *lck) | |
245 | { | |
246 | usimple_unlock((usimple_lock_t) lck); | |
247 | } | |
248 | ||
249 | ||
250 | /* | |
251 | * Routine: lck_spin_try_lock | |
252 | */ | |
253 | boolean_t | |
254 | lck_spin_try_lock( | |
255 | lck_spin_t *lck) | |
256 | { | |
4452a7af | 257 | return(usimple_lock_try((usimple_lock_t) lck)); |
91447636 A |
258 | } |
259 | ||
260 | /* | |
261 | * Initialize a usimple_lock. | |
262 | * | |
263 | * No change in preemption state. | |
264 | */ | |
265 | void | |
266 | usimple_lock_init( | |
267 | usimple_lock_t l, | |
268 | __unused unsigned short tag) | |
269 | { | |
270 | #ifndef MACHINE_SIMPLE_LOCK | |
271 | USLDBG(usld_lock_init(l, tag)); | |
272 | hw_lock_init(&l->interlock); | |
273 | #else | |
274 | simple_lock_init((simple_lock_t)l,tag); | |
275 | #endif | |
276 | } | |
277 | ||
278 | ||
279 | /* | |
280 | * Acquire a usimple_lock. | |
281 | * | |
282 | * Returns with preemption disabled. Note | |
283 | * that the hw_lock routines are responsible for | |
284 | * maintaining preemption state. | |
285 | */ | |
286 | void | |
287 | usimple_lock( | |
288 | usimple_lock_t l) | |
289 | { | |
290 | #ifndef MACHINE_SIMPLE_LOCK | |
291 | pc_t pc = NULL; | |
292 | ||
293 | OBTAIN_PC(pc, l); | |
294 | USLDBG(usld_lock_pre(l, pc)); | |
295 | ||
296 | if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */ | |
297 | panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l, cpu_number(), pc); | |
298 | ||
299 | USLDBG(usld_lock_post(l, pc)); | |
300 | #else | |
301 | simple_lock((simple_lock_t)l); | |
302 | #endif | |
303 | } | |
304 | ||
305 | ||
306 | /* | |
307 | * Release a usimple_lock. | |
308 | * | |
309 | * Returns with preemption enabled. Note | |
310 | * that the hw_lock routines are responsible for | |
311 | * maintaining preemption state. | |
312 | */ | |
313 | void | |
314 | usimple_unlock( | |
315 | usimple_lock_t l) | |
316 | { | |
317 | #ifndef MACHINE_SIMPLE_LOCK | |
318 | DECL_PC(pc); | |
319 | ||
320 | OBTAIN_PC(pc, l); | |
321 | USLDBG(usld_unlock(l, pc)); | |
322 | hw_lock_unlock(&l->interlock); | |
323 | #else | |
324 | simple_unlock_rwmb((simple_lock_t)l); | |
325 | #endif | |
326 | } | |
327 | ||
328 | ||
329 | /* | |
330 | * Conditionally acquire a usimple_lock. | |
331 | * | |
332 | * On success, returns with preemption disabled. | |
333 | * On failure, returns with preemption in the same state | |
334 | * as when first invoked. Note that the hw_lock routines | |
335 | * are responsible for maintaining preemption state. | |
336 | * | |
337 | * XXX No stats are gathered on a miss; I preserved this | |
338 | * behavior from the original assembly-language code, but | |
339 | * doesn't it make sense to log misses? XXX | |
340 | */ | |
341 | unsigned int | |
342 | usimple_lock_try( | |
343 | usimple_lock_t l) | |
344 | { | |
345 | #ifndef MACHINE_SIMPLE_LOCK | |
346 | DECL_PC(pc); | |
347 | unsigned int success; | |
348 | ||
349 | OBTAIN_PC(pc, l); | |
350 | USLDBG(usld_lock_try_pre(l, pc)); | |
351 | if ((success = hw_lock_try(&l->interlock))) { | |
352 | USLDBG(usld_lock_try_post(l, pc)); | |
353 | } | |
354 | return success; | |
355 | #else | |
356 | return(simple_lock_try((simple_lock_t)l)); | |
357 | #endif | |
358 | } | |
359 | ||
360 | #if USLOCK_DEBUG | |
361 | /* | |
362 | * States of a usimple_lock. The default when initializing | |
363 | * a usimple_lock is setting it up for debug checking. | |
364 | */ | |
365 | #define USLOCK_CHECKED 0x0001 /* lock is being checked */ | |
366 | #define USLOCK_TAKEN 0x0002 /* lock has been taken */ | |
367 | #define USLOCK_INIT 0xBAA0 /* lock has been initialized */ | |
368 | #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED) | |
369 | #define USLOCK_CHECKING(l) (uslock_check && \ | |
370 | ((l)->debug.state & USLOCK_CHECKED)) | |
371 | ||
372 | /* | |
373 | * Trace activities of a particularly interesting lock. | |
374 | */ | |
375 | void usl_trace(usimple_lock_t, int, pc_t, const char *); | |
376 | ||
377 | ||
378 | /* | |
379 | * Initialize the debugging information contained | |
380 | * in a usimple_lock. | |
381 | */ | |
382 | void | |
383 | usld_lock_init( | |
384 | usimple_lock_t l, | |
385 | __unused unsigned short tag) | |
386 | { | |
387 | if (l == USIMPLE_LOCK_NULL) | |
388 | panic("lock initialization: null lock pointer"); | |
389 | l->lock_type = USLOCK_TAG; | |
390 | l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0; | |
391 | l->debug.lock_cpu = l->debug.unlock_cpu = 0; | |
392 | l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC; | |
393 | l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD; | |
394 | l->debug.duration[0] = l->debug.duration[1] = 0; | |
395 | l->debug.unlock_cpu = l->debug.unlock_cpu = 0; | |
396 | l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC; | |
397 | l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD; | |
398 | } | |
399 | ||
400 | ||
401 | /* | |
402 | * These checks apply to all usimple_locks, not just | |
403 | * those with USLOCK_CHECKED turned on. | |
404 | */ | |
405 | int | |
406 | usld_lock_common_checks( | |
407 | usimple_lock_t l, | |
408 | char *caller) | |
409 | { | |
410 | if (l == USIMPLE_LOCK_NULL) | |
411 | panic("%s: null lock pointer", caller); | |
412 | if (l->lock_type != USLOCK_TAG) | |
413 | panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l); | |
414 | if (!(l->debug.state & USLOCK_INIT)) | |
415 | panic("%s: 0x%x is not an initialized lock", | |
416 | caller, (integer_t) l); | |
417 | return USLOCK_CHECKING(l); | |
418 | } | |
419 | ||
420 | ||
421 | /* | |
422 | * Debug checks on a usimple_lock just before attempting | |
423 | * to acquire it. | |
424 | */ | |
425 | /* ARGSUSED */ | |
426 | void | |
427 | usld_lock_pre( | |
428 | usimple_lock_t l, | |
429 | pc_t pc) | |
430 | { | |
431 | char caller[] = "usimple_lock"; | |
432 | ||
433 | ||
434 | if (!usld_lock_common_checks(l, caller)) | |
435 | return; | |
436 | ||
437 | /* | |
438 | * Note that we have a weird case where we are getting a lock when we are] | |
439 | * in the process of putting the system to sleep. We are running with no | |
440 | * current threads, therefore we can't tell if we are trying to retake a lock | |
441 | * we have or someone on the other processor has it. Therefore we just | |
442 | * ignore this test if the locking thread is 0. | |
443 | */ | |
444 | ||
445 | if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread && | |
446 | l->debug.lock_thread == (void *) current_thread()) { | |
447 | printf("%s: lock 0x%x already locked (at 0x%x) by", | |
448 | caller, (integer_t) l, l->debug.lock_pc); | |
449 | printf(" current thread 0x%x (new attempt at pc 0x%x)\n", | |
450 | l->debug.lock_thread, pc); | |
451 | panic(caller); | |
452 | } | |
453 | mp_disable_preemption(); | |
454 | usl_trace(l, cpu_number(), pc, caller); | |
455 | mp_enable_preemption(); | |
456 | } | |
457 | ||
458 | ||
459 | /* | |
460 | * Debug checks on a usimple_lock just after acquiring it. | |
461 | * | |
462 | * Pre-emption has been disabled at this point, | |
463 | * so we are safe in using cpu_number. | |
464 | */ | |
465 | void | |
466 | usld_lock_post( | |
467 | usimple_lock_t l, | |
468 | pc_t pc) | |
469 | { | |
470 | register int mycpu; | |
471 | char caller[] = "successful usimple_lock"; | |
472 | ||
473 | ||
474 | if (!usld_lock_common_checks(l, caller)) | |
475 | return; | |
476 | ||
477 | if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) | |
478 | panic("%s: lock 0x%x became uninitialized", | |
479 | caller, (integer_t) l); | |
480 | if ((l->debug.state & USLOCK_TAKEN)) | |
481 | panic("%s: lock 0x%x became TAKEN by someone else", | |
482 | caller, (integer_t) l); | |
483 | ||
484 | mycpu = cpu_number(); | |
485 | l->debug.lock_thread = (void *)current_thread(); | |
486 | l->debug.state |= USLOCK_TAKEN; | |
487 | l->debug.lock_pc = pc; | |
488 | l->debug.lock_cpu = mycpu; | |
489 | ||
490 | usl_trace(l, mycpu, pc, caller); | |
491 | } | |
492 | ||
493 | ||
494 | /* | |
495 | * Debug checks on a usimple_lock just before | |
496 | * releasing it. Note that the caller has not | |
497 | * yet released the hardware lock. | |
498 | * | |
499 | * Preemption is still disabled, so there's | |
500 | * no problem using cpu_number. | |
501 | */ | |
502 | void | |
503 | usld_unlock( | |
504 | usimple_lock_t l, | |
505 | pc_t pc) | |
506 | { | |
507 | register int mycpu; | |
508 | char caller[] = "usimple_unlock"; | |
509 | ||
510 | ||
511 | if (!usld_lock_common_checks(l, caller)) | |
512 | return; | |
513 | ||
514 | mycpu = cpu_number(); | |
515 | ||
516 | if (!(l->debug.state & USLOCK_TAKEN)) | |
517 | panic("%s: lock 0x%x hasn't been taken", | |
518 | caller, (integer_t) l); | |
519 | if (l->debug.lock_thread != (void *) current_thread()) | |
520 | panic("%s: unlocking lock 0x%x, owned by thread 0x%x", | |
521 | caller, (integer_t) l, l->debug.lock_thread); | |
522 | if (l->debug.lock_cpu != mycpu) { | |
523 | printf("%s: unlocking lock 0x%x on cpu 0x%x", | |
524 | caller, (integer_t) l, mycpu); | |
525 | printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu); | |
526 | panic(caller); | |
527 | } | |
528 | usl_trace(l, mycpu, pc, caller); | |
529 | ||
530 | l->debug.unlock_thread = l->debug.lock_thread; | |
531 | l->debug.lock_thread = INVALID_PC; | |
532 | l->debug.state &= ~USLOCK_TAKEN; | |
533 | l->debug.unlock_pc = pc; | |
534 | l->debug.unlock_cpu = mycpu; | |
535 | } | |
536 | ||
537 | ||
538 | /* | |
539 | * Debug checks on a usimple_lock just before | |
540 | * attempting to acquire it. | |
541 | * | |
542 | * Preemption isn't guaranteed to be disabled. | |
543 | */ | |
544 | void | |
545 | usld_lock_try_pre( | |
546 | usimple_lock_t l, | |
547 | pc_t pc) | |
548 | { | |
549 | char caller[] = "usimple_lock_try"; | |
550 | ||
551 | if (!usld_lock_common_checks(l, caller)) | |
552 | return; | |
553 | mp_disable_preemption(); | |
554 | usl_trace(l, cpu_number(), pc, caller); | |
555 | mp_enable_preemption(); | |
556 | } | |
557 | ||
558 | ||
559 | /* | |
560 | * Debug checks on a usimple_lock just after | |
561 | * successfully attempting to acquire it. | |
562 | * | |
563 | * Preemption has been disabled by the | |
564 | * lock acquisition attempt, so it's safe | |
565 | * to use cpu_number. | |
566 | */ | |
567 | void | |
568 | usld_lock_try_post( | |
569 | usimple_lock_t l, | |
570 | pc_t pc) | |
571 | { | |
572 | register int mycpu; | |
573 | char caller[] = "successful usimple_lock_try"; | |
574 | ||
575 | if (!usld_lock_common_checks(l, caller)) | |
576 | return; | |
577 | ||
578 | if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) | |
579 | panic("%s: lock 0x%x became uninitialized", | |
580 | caller, (integer_t) l); | |
581 | if ((l->debug.state & USLOCK_TAKEN)) | |
582 | panic("%s: lock 0x%x became TAKEN by someone else", | |
583 | caller, (integer_t) l); | |
584 | ||
585 | mycpu = cpu_number(); | |
586 | l->debug.lock_thread = (void *) current_thread(); | |
587 | l->debug.state |= USLOCK_TAKEN; | |
588 | l->debug.lock_pc = pc; | |
589 | l->debug.lock_cpu = mycpu; | |
590 | ||
591 | usl_trace(l, mycpu, pc, caller); | |
592 | } | |
593 | ||
594 | ||
595 | /* | |
596 | * For very special cases, set traced_lock to point to a | |
597 | * specific lock of interest. The result is a series of | |
598 | * XPRs showing lock operations on that lock. The lock_seq | |
599 | * value is used to show the order of those operations. | |
600 | */ | |
601 | usimple_lock_t traced_lock; | |
602 | unsigned int lock_seq; | |
603 | ||
604 | void | |
605 | usl_trace( | |
606 | usimple_lock_t l, | |
607 | int mycpu, | |
608 | pc_t pc, | |
609 | const char * op_name) | |
610 | { | |
611 | if (traced_lock == l) { | |
612 | XPR(XPR_SLOCK, | |
613 | "seq %d, cpu %d, %s @ %x\n", | |
614 | (integer_t) lock_seq, (integer_t) mycpu, | |
615 | (integer_t) op_name, (integer_t) pc, 0); | |
616 | lock_seq++; | |
617 | } | |
618 | } | |
619 | ||
620 | ||
621 | #endif /* USLOCK_DEBUG */ | |
622 | ||
623 | /* | |
624 | * Routine: lock_alloc | |
625 | * Function: | |
626 | * Allocate a lock for external users who cannot | |
627 | * hard-code the structure definition into their | |
628 | * objects. | |
629 | * For now just use kalloc, but a zone is probably | |
630 | * warranted. | |
631 | */ | |
632 | lock_t * | |
633 | lock_alloc( | |
634 | boolean_t can_sleep, | |
635 | unsigned short tag, | |
636 | unsigned short tag1) | |
637 | { | |
638 | lock_t *l; | |
639 | ||
640 | if ((l = (lock_t *)kalloc(sizeof(lock_t))) != 0) | |
641 | lock_init(l, can_sleep, tag, tag1); | |
642 | return(l); | |
643 | } | |
644 | ||
645 | /* | |
646 | * Routine: lock_free | |
647 | * Function: | |
648 | * Free a lock allocated for external users. | |
649 | * For now just use kfree, but a zone is probably | |
650 | * warranted. | |
651 | */ | |
652 | void | |
653 | lock_free( | |
654 | lock_t *l) | |
655 | { | |
656 | kfree(l, sizeof(lock_t)); | |
657 | } | |
658 | ||
659 | ||
660 | /* | |
661 | * Routine: lock_init | |
662 | * Function: | |
663 | * Initialize a lock; required before use. | |
664 | * Note that clients declare the "struct lock" | |
665 | * variables and then initialize them, rather | |
666 | * than getting a new one from this module. | |
667 | */ | |
668 | void | |
669 | lock_init( | |
670 | lock_t *l, | |
671 | boolean_t can_sleep, | |
672 | __unused unsigned short tag, | |
4452a7af | 673 | __unused unsigned short tag1) |
91447636 | 674 | { |
4452a7af | 675 | hw_lock_init(&l->interlock); |
91447636 A |
676 | l->want_write = FALSE; |
677 | l->want_upgrade = FALSE; | |
678 | l->read_count = 0; | |
679 | l->can_sleep = can_sleep; | |
4452a7af | 680 | l->lck_rw_tag = tag; |
91447636 A |
681 | } |
682 | ||
683 | ||
684 | /* | |
685 | * Sleep locks. These use the same data structure and algorithm | |
686 | * as the spin locks, but the process sleeps while it is waiting | |
687 | * for the lock. These work on uniprocessor systems. | |
688 | */ | |
689 | ||
690 | #define DECREMENTER_TIMEOUT 1000000 | |
691 | ||
692 | void | |
693 | lock_write( | |
694 | register lock_t * l) | |
695 | { | |
4452a7af | 696 | lck_rw_lock_exclusive(l); |
91447636 A |
697 | } |
698 | ||
699 | void | |
700 | lock_done( | |
701 | register lock_t * l) | |
702 | { | |
4452a7af | 703 | (void) lck_rw_done(l); |
91447636 A |
704 | } |
705 | ||
706 | void | |
707 | lock_read( | |
708 | register lock_t * l) | |
709 | { | |
4452a7af | 710 | lck_rw_lock_shared(l); |
91447636 A |
711 | } |
712 | ||
713 | ||
714 | /* | |
715 | * Routine: lock_read_to_write | |
716 | * Function: | |
717 | * Improves a read-only lock to one with | |
718 | * write permission. If another reader has | |
719 | * already requested an upgrade to a write lock, | |
720 | * no lock is held upon return. | |
721 | * | |
722 | * Returns TRUE if the upgrade *failed*. | |
723 | */ | |
724 | ||
725 | boolean_t | |
726 | lock_read_to_write( | |
727 | register lock_t * l) | |
728 | { | |
4452a7af | 729 | return lck_rw_lock_shared_to_exclusive(l); |
91447636 A |
730 | } |
731 | ||
732 | void | |
733 | lock_write_to_read( | |
734 | register lock_t * l) | |
735 | { | |
4452a7af | 736 | lck_rw_lock_exclusive_to_shared(l); |
8f6c56a5 A |
737 | } |
738 | ||
8f6c56a5 | 739 | |
91447636 A |
740 | |
741 | /* | |
742 | * Routine: lck_rw_alloc_init | |
743 | */ | |
744 | lck_rw_t * | |
745 | lck_rw_alloc_init( | |
746 | lck_grp_t *grp, | |
747 | lck_attr_t *attr) { | |
748 | lck_rw_t *lck; | |
749 | ||
750 | if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0) | |
751 | lck_rw_init(lck, grp, attr); | |
752 | ||
753 | return(lck); | |
754 | } | |
755 | ||
756 | /* | |
757 | * Routine: lck_rw_free | |
758 | */ | |
759 | void | |
760 | lck_rw_free( | |
761 | lck_rw_t *lck, | |
762 | lck_grp_t *grp) { | |
763 | lck_rw_destroy(lck, grp); | |
764 | kfree(lck, sizeof(lck_rw_t)); | |
765 | } | |
766 | ||
767 | /* | |
768 | * Routine: lck_rw_init | |
769 | */ | |
770 | void | |
771 | lck_rw_init( | |
772 | lck_rw_t *lck, | |
773 | lck_grp_t *grp, | |
4452a7af A |
774 | lck_attr_t *attr) |
775 | { | |
776 | lck_attr_t *lck_attr = (attr != LCK_ATTR_NULL) ? | |
777 | attr : &LockDefaultLckAttr; | |
91447636 A |
778 | |
779 | hw_lock_init(&lck->interlock); | |
780 | lck->want_write = FALSE; | |
781 | lck->want_upgrade = FALSE; | |
782 | lck->read_count = 0; | |
783 | lck->can_sleep = TRUE; | |
784 | lck->lck_rw_tag = 0; | |
4452a7af A |
785 | lck->read_priority = (lck_attr->lck_attr_val & |
786 | LCK_ATTR_RW_SHARED_PRIORITY) != 0; | |
91447636 A |
787 | |
788 | lck_grp_reference(grp); | |
789 | lck_grp_lckcnt_incr(grp, LCK_TYPE_RW); | |
790 | } | |
791 | ||
792 | /* | |
793 | * Routine: lck_rw_destroy | |
794 | */ | |
795 | void | |
796 | lck_rw_destroy( | |
797 | lck_rw_t *lck, | |
798 | lck_grp_t *grp) { | |
799 | if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED) | |
800 | return; | |
801 | lck->lck_rw_tag = LCK_RW_TAG_DESTROYED; | |
802 | lck_grp_lckcnt_decr(grp, LCK_TYPE_RW); | |
803 | lck_grp_deallocate(grp); | |
804 | return; | |
805 | } | |
806 | ||
807 | /* | |
808 | * Sleep locks. These use the same data structure and algorithm | |
809 | * as the spin locks, but the process sleeps while it is waiting | |
810 | * for the lock. These work on uniprocessor systems. | |
811 | */ | |
812 | ||
813 | #define DECREMENTER_TIMEOUT 1000000 | |
814 | ||
815 | ||
816 | /* | |
817 | * We need to disable interrupts while holding the mutex interlock | |
818 | * to prevent an IPI intervening. | |
819 | * Hence, local helper functions lck_interlock_lock()/lck_interlock_unlock(). | |
820 | */ | |
821 | static boolean_t | |
822 | lck_interlock_lock(lck_rw_t *lck) | |
823 | { | |
824 | boolean_t istate; | |
825 | ||
826 | istate = ml_set_interrupts_enabled(FALSE); | |
827 | hw_lock_lock(&lck->interlock); | |
828 | ||
829 | return istate; | |
830 | } | |
831 | ||
832 | static void | |
833 | lck_interlock_unlock(lck_rw_t *lck, boolean_t istate) | |
834 | { | |
835 | hw_lock_unlock(&lck->interlock); | |
836 | ml_set_interrupts_enabled(istate); | |
837 | } | |
838 | ||
4452a7af A |
839 | |
840 | /* | |
841 | * This inline is used when busy-waiting for an rw lock. | |
842 | * If interrupts were disabled when the lock primitive was called, | |
843 | * we poll the IPI handler for pending tlb flushes. | |
844 | * XXX This is a hack to avoid deadlocking on the pmap_system_lock. | |
845 | */ | |
846 | static inline void | |
847 | lck_rw_lock_pause(boolean_t interrupts_enabled) | |
848 | { | |
849 | if (!interrupts_enabled) | |
850 | handle_pending_TLB_flushes(); | |
851 | cpu_pause(); | |
852 | } | |
853 | ||
91447636 A |
854 | /* |
855 | * Routine: lck_rw_lock_exclusive | |
856 | */ | |
857 | void | |
858 | lck_rw_lock_exclusive( | |
859 | lck_rw_t *lck) | |
860 | { | |
861 | int i; | |
862 | boolean_t lock_miss = FALSE; | |
863 | wait_result_t res; | |
864 | #if MACH_LDEBUG | |
865 | int decrementer; | |
866 | #endif /* MACH_LDEBUG */ | |
867 | boolean_t istate; | |
868 | ||
869 | istate = lck_interlock_lock(lck); | |
870 | ||
871 | #if MACH_LDEBUG | |
872 | decrementer = DECREMENTER_TIMEOUT; | |
873 | #endif /* MACH_LDEBUG */ | |
874 | ||
875 | /* | |
876 | * Try to acquire the want_write bit. | |
877 | */ | |
878 | while (lck->want_write) { | |
879 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0); | |
880 | ||
881 | if (!lock_miss) { | |
882 | lock_miss = TRUE; | |
883 | } | |
884 | ||
885 | i = lock_wait_time[lck->can_sleep ? 1 : 0]; | |
886 | if (i != 0) { | |
887 | lck_interlock_unlock(lck, istate); | |
888 | #if MACH_LDEBUG | |
889 | if (!--decrementer) | |
890 | Debugger("timeout - want_write"); | |
891 | #endif /* MACH_LDEBUG */ | |
892 | while (--i != 0 && lck->want_write) | |
4452a7af | 893 | lck_rw_lock_pause(istate); |
91447636 A |
894 | istate = lck_interlock_lock(lck); |
895 | } | |
896 | ||
897 | if (lck->can_sleep && lck->want_write) { | |
898 | lck->waiting = TRUE; | |
899 | res = assert_wait((event_t) lck, THREAD_UNINT); | |
900 | if (res == THREAD_WAITING) { | |
901 | lck_interlock_unlock(lck, istate); | |
902 | res = thread_block(THREAD_CONTINUE_NULL); | |
903 | istate = lck_interlock_lock(lck); | |
904 | } | |
905 | } | |
906 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0); | |
907 | } | |
908 | lck->want_write = TRUE; | |
909 | ||
910 | /* Wait for readers (and upgrades) to finish */ | |
911 | ||
912 | #if MACH_LDEBUG | |
913 | decrementer = DECREMENTER_TIMEOUT; | |
914 | #endif /* MACH_LDEBUG */ | |
915 | while ((lck->read_count != 0) || lck->want_upgrade) { | |
916 | if (!lock_miss) { | |
917 | lock_miss = TRUE; | |
918 | } | |
919 | ||
920 | i = lock_wait_time[lck->can_sleep ? 1 : 0]; | |
921 | ||
922 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START, | |
923 | (int)lck, lck->read_count, lck->want_upgrade, i, 0); | |
924 | ||
925 | if (i != 0) { | |
926 | lck_interlock_unlock(lck, istate); | |
927 | #if MACH_LDEBUG | |
928 | if (!--decrementer) | |
929 | Debugger("timeout - wait for readers"); | |
930 | #endif /* MACH_LDEBUG */ | |
931 | while (--i != 0 && (lck->read_count != 0 || | |
932 | lck->want_upgrade)) | |
4452a7af | 933 | lck_rw_lock_pause(istate); |
91447636 A |
934 | istate = lck_interlock_lock(lck); |
935 | } | |
936 | ||
937 | if (lck->can_sleep && (lck->read_count != 0 || lck->want_upgrade)) { | |
938 | lck->waiting = TRUE; | |
939 | res = assert_wait((event_t) lck, THREAD_UNINT); | |
940 | if (res == THREAD_WAITING) { | |
941 | lck_interlock_unlock(lck, istate); | |
942 | res = thread_block(THREAD_CONTINUE_NULL); | |
943 | istate = lck_interlock_lock(lck); | |
944 | } | |
945 | } | |
946 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END, | |
947 | (int)lck, lck->read_count, lck->want_upgrade, res, 0); | |
948 | } | |
949 | ||
950 | lck_interlock_unlock(lck, istate); | |
951 | } | |
952 | ||
953 | ||
954 | /* | |
955 | * Routine: lck_rw_done | |
956 | */ | |
957 | lck_rw_type_t | |
958 | lck_rw_done( | |
959 | lck_rw_t *lck) | |
960 | { | |
961 | boolean_t do_wakeup = FALSE; | |
962 | lck_rw_type_t lck_rw_type; | |
963 | boolean_t istate; | |
964 | ||
965 | ||
966 | istate = lck_interlock_lock(lck); | |
967 | ||
968 | if (lck->read_count != 0) { | |
969 | lck_rw_type = LCK_RW_TYPE_SHARED; | |
970 | lck->read_count--; | |
971 | } | |
972 | else { | |
973 | lck_rw_type = LCK_RW_TYPE_EXCLUSIVE; | |
974 | if (lck->want_upgrade) | |
975 | lck->want_upgrade = FALSE; | |
976 | else | |
977 | lck->want_write = FALSE; | |
978 | } | |
979 | ||
980 | /* | |
981 | * There is no reason to wakeup a waiting thread | |
982 | * if the read-count is non-zero. Consider: | |
983 | * we must be dropping a read lock | |
984 | * threads are waiting only if one wants a write lock | |
985 | * if there are still readers, they can't proceed | |
986 | */ | |
987 | ||
988 | if (lck->waiting && (lck->read_count == 0)) { | |
989 | lck->waiting = FALSE; | |
990 | do_wakeup = TRUE; | |
991 | } | |
992 | ||
993 | lck_interlock_unlock(lck, istate); | |
994 | ||
995 | if (do_wakeup) | |
996 | thread_wakeup((event_t) lck); | |
997 | return(lck_rw_type); | |
998 | } | |
999 | ||
1000 | ||
1001 | ||
1002 | ||
1003 | /* | |
1004 | * Routine: lck_rw_unlock | |
1005 | */ | |
1006 | void | |
1007 | lck_rw_unlock( | |
1008 | lck_rw_t *lck, | |
1009 | lck_rw_type_t lck_rw_type) | |
1010 | { | |
1011 | if (lck_rw_type == LCK_RW_TYPE_SHARED) | |
1012 | lck_rw_unlock_shared(lck); | |
1013 | else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) | |
1014 | lck_rw_unlock_exclusive(lck); | |
1015 | else | |
1016 | panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type); | |
1017 | } | |
1018 | ||
1019 | ||
1020 | /* | |
1021 | * Routine: lck_rw_unlock_shared | |
1022 | */ | |
1023 | void | |
1024 | lck_rw_unlock_shared( | |
1025 | lck_rw_t *lck) | |
1026 | { | |
1027 | lck_rw_type_t ret; | |
1028 | ||
1029 | ret = lck_rw_done(lck); | |
1030 | ||
1031 | if (ret != LCK_RW_TYPE_SHARED) | |
1032 | panic("lck_rw_unlock(): lock held in mode: %d\n", ret); | |
1033 | } | |
1034 | ||
1035 | ||
1036 | /* | |
1037 | * Routine: lck_rw_unlock_exclusive | |
1038 | */ | |
1039 | void | |
1040 | lck_rw_unlock_exclusive( | |
1041 | lck_rw_t *lck) | |
1042 | { | |
1043 | lck_rw_type_t ret; | |
1044 | ||
1045 | ret = lck_rw_done(lck); | |
1046 | ||
1047 | if (ret != LCK_RW_TYPE_EXCLUSIVE) | |
1048 | panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret); | |
1049 | } | |
1050 | ||
1051 | ||
1052 | /* | |
1053 | * Routine: lck_rw_lock | |
1054 | */ | |
1055 | void | |
1056 | lck_rw_lock( | |
1057 | lck_rw_t *lck, | |
1058 | lck_rw_type_t lck_rw_type) | |
1059 | { | |
1060 | if (lck_rw_type == LCK_RW_TYPE_SHARED) | |
1061 | lck_rw_lock_shared(lck); | |
1062 | else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) | |
1063 | lck_rw_lock_exclusive(lck); | |
1064 | else | |
1065 | panic("lck_rw_lock(): Invalid RW lock type: %x\n", lck_rw_type); | |
1066 | } | |
1067 | ||
1068 | ||
1069 | /* | |
1070 | * Routine: lck_rw_lock_shared | |
1071 | */ | |
1072 | void | |
1073 | lck_rw_lock_shared( | |
1074 | lck_rw_t *lck) | |
1075 | { | |
1076 | int i; | |
1077 | wait_result_t res; | |
1078 | #if MACH_LDEBUG | |
1079 | int decrementer; | |
1080 | #endif /* MACH_LDEBUG */ | |
1081 | boolean_t istate; | |
1082 | ||
1083 | istate = lck_interlock_lock(lck); | |
1084 | ||
1085 | #if MACH_LDEBUG | |
1086 | decrementer = DECREMENTER_TIMEOUT; | |
1087 | #endif /* MACH_LDEBUG */ | |
4452a7af A |
1088 | while ((lck->want_write && (lck->read_priority ? |
1089 | lck->read_count == 0 : TRUE)) || | |
1090 | lck->want_upgrade) { | |
1091 | ||
91447636 A |
1092 | i = lock_wait_time[lck->can_sleep ? 1 : 0]; |
1093 | ||
1094 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START, | |
1095 | (int)lck, lck->want_write, lck->want_upgrade, i, 0); | |
1096 | ||
1097 | if (i != 0) { | |
1098 | lck_interlock_unlock(lck, istate); | |
1099 | #if MACH_LDEBUG | |
1100 | if (!--decrementer) | |
1101 | Debugger("timeout - wait no writers"); | |
1102 | #endif /* MACH_LDEBUG */ | |
4452a7af A |
1103 | while (--i != 0 && |
1104 | ((lck->want_write && (lck->read_priority ? | |
1105 | lck->read_count == 0 : TRUE)) || | |
1106 | lck->want_upgrade)) | |
1107 | lck_rw_lock_pause(istate); | |
91447636 A |
1108 | istate = lck_interlock_lock(lck); |
1109 | } | |
1110 | ||
4452a7af A |
1111 | if (lck->can_sleep && |
1112 | ((lck->want_write && (lck->read_priority ? | |
1113 | lck->read_count == 0 : TRUE)) || | |
1114 | lck->want_upgrade)) { | |
91447636 A |
1115 | lck->waiting = TRUE; |
1116 | res = assert_wait((event_t) lck, THREAD_UNINT); | |
1117 | if (res == THREAD_WAITING) { | |
1118 | lck_interlock_unlock(lck, istate); | |
1119 | res = thread_block(THREAD_CONTINUE_NULL); | |
1120 | istate = lck_interlock_lock(lck); | |
1121 | } | |
1122 | } | |
1123 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END, | |
1124 | (int)lck, lck->want_write, lck->want_upgrade, res, 0); | |
1125 | } | |
1126 | ||
1127 | lck->read_count++; | |
1128 | ||
1129 | lck_interlock_unlock(lck, istate); | |
1130 | } | |
1131 | ||
1132 | ||
1133 | /* | |
1134 | * Routine: lck_rw_lock_shared_to_exclusive | |
1135 | * Function: | |
1136 | * Improves a read-only lock to one with | |
1137 | * write permission. If another reader has | |
1138 | * already requested an upgrade to a write lock, | |
1139 | * no lock is held upon return. | |
1140 | * | |
1141 | * Returns TRUE if the upgrade *failed*. | |
1142 | */ | |
1143 | ||
1144 | boolean_t | |
1145 | lck_rw_lock_shared_to_exclusive( | |
1146 | lck_rw_t *lck) | |
1147 | { | |
1148 | int i; | |
1149 | boolean_t do_wakeup = FALSE; | |
1150 | wait_result_t res; | |
1151 | #if MACH_LDEBUG | |
1152 | int decrementer; | |
1153 | #endif /* MACH_LDEBUG */ | |
1154 | boolean_t istate; | |
1155 | ||
1156 | istate = lck_interlock_lock(lck); | |
1157 | ||
1158 | lck->read_count--; | |
1159 | ||
1160 | if (lck->want_upgrade) { | |
1161 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START, | |
1162 | (int)lck, lck->read_count, lck->want_upgrade, 0, 0); | |
1163 | ||
1164 | /* | |
1165 | * Someone else has requested upgrade. | |
1166 | * Since we've released a read lock, wake | |
1167 | * him up. | |
1168 | */ | |
1169 | if (lck->waiting && (lck->read_count == 0)) { | |
1170 | lck->waiting = FALSE; | |
1171 | do_wakeup = TRUE; | |
1172 | } | |
1173 | ||
1174 | lck_interlock_unlock(lck, istate); | |
1175 | ||
1176 | if (do_wakeup) | |
1177 | thread_wakeup((event_t) lck); | |
1178 | ||
1179 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END, | |
1180 | (int)lck, lck->read_count, lck->want_upgrade, 0, 0); | |
1181 | ||
1182 | return (TRUE); | |
1183 | } | |
1184 | ||
1185 | lck->want_upgrade = TRUE; | |
1186 | ||
1187 | #if MACH_LDEBUG | |
1188 | decrementer = DECREMENTER_TIMEOUT; | |
1189 | #endif /* MACH_LDEBUG */ | |
1190 | while (lck->read_count != 0) { | |
1191 | i = lock_wait_time[lck->can_sleep ? 1 : 0]; | |
1192 | ||
1193 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START, | |
1194 | (int)lck, lck->read_count, i, 0, 0); | |
1195 | ||
1196 | if (i != 0) { | |
1197 | lck_interlock_unlock(lck, istate); | |
1198 | #if MACH_LDEBUG | |
1199 | if (!--decrementer) | |
1200 | Debugger("timeout - read_count"); | |
1201 | #endif /* MACH_LDEBUG */ | |
1202 | while (--i != 0 && lck->read_count != 0) | |
4452a7af | 1203 | lck_rw_lock_pause(istate); |
91447636 A |
1204 | istate = lck_interlock_lock(lck); |
1205 | } | |
1206 | ||
1207 | if (lck->can_sleep && lck->read_count != 0) { | |
1208 | lck->waiting = TRUE; | |
1209 | res = assert_wait((event_t) lck, THREAD_UNINT); | |
1210 | if (res == THREAD_WAITING) { | |
1211 | lck_interlock_unlock(lck, istate); | |
1212 | res = thread_block(THREAD_CONTINUE_NULL); | |
1213 | istate = lck_interlock_lock(lck); | |
1214 | } | |
1215 | } | |
1216 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END, | |
1217 | (int)lck, lck->read_count, 0, 0, 0); | |
1218 | } | |
1219 | ||
1220 | lck_interlock_unlock(lck, istate); | |
1221 | ||
1222 | return (FALSE); | |
1223 | } | |
1224 | ||
1225 | /* | |
1226 | * Routine: lck_rw_lock_exclusive_to_shared | |
1227 | */ | |
1228 | void | |
1229 | lck_rw_lock_exclusive_to_shared( | |
1230 | lck_rw_t *lck) | |
1231 | { | |
1232 | boolean_t do_wakeup = FALSE; | |
1233 | boolean_t istate; | |
1234 | ||
1235 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START, | |
1236 | (int)lck, lck->want_write, lck->want_upgrade, 0, 0); | |
1237 | ||
1238 | istate = lck_interlock_lock(lck); | |
1239 | ||
1240 | lck->read_count++; | |
1241 | if (lck->want_upgrade) | |
1242 | lck->want_upgrade = FALSE; | |
1243 | else | |
1244 | lck->want_write = FALSE; | |
1245 | ||
1246 | if (lck->waiting) { | |
1247 | lck->waiting = FALSE; | |
1248 | do_wakeup = TRUE; | |
1249 | } | |
1250 | ||
1251 | lck_interlock_unlock(lck, istate); | |
1252 | ||
1253 | if (do_wakeup) | |
1254 | thread_wakeup((event_t) lck); | |
1255 | ||
1256 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END, | |
1257 | (int)lck, lck->want_write, lck->want_upgrade, lck->read_count, 0); | |
1258 | ||
1259 | } | |
1260 | ||
1261 | ||
1262 | /* | |
1263 | * Routine: lck_rw_try_lock | |
1264 | */ | |
1265 | boolean_t | |
1266 | lck_rw_try_lock( | |
1267 | lck_rw_t *lck, | |
1268 | lck_rw_type_t lck_rw_type) | |
1269 | { | |
1270 | if (lck_rw_type == LCK_RW_TYPE_SHARED) | |
1271 | return(lck_rw_try_lock_shared(lck)); | |
1272 | else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) | |
1273 | return(lck_rw_try_lock_exclusive(lck)); | |
1274 | else | |
1275 | panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type); | |
1276 | return(FALSE); | |
1277 | } | |
1278 | ||
1279 | /* | |
1280 | * Routine: lck_rw_try_lock_exclusive | |
1281 | * Function: | |
1282 | * Tries to get a write lock. | |
1283 | * | |
1284 | * Returns FALSE if the lock is not held on return. | |
1285 | */ | |
1286 | ||
1287 | boolean_t | |
1288 | lck_rw_try_lock_exclusive( | |
1289 | lck_rw_t *lck) | |
1290 | { | |
1291 | boolean_t istate; | |
1292 | ||
1293 | istate = lck_interlock_lock(lck); | |
1294 | ||
1295 | if (lck->want_write || lck->want_upgrade || lck->read_count) { | |
1296 | /* | |
1297 | * Can't get lock. | |
1298 | */ | |
1299 | lck_interlock_unlock(lck, istate); | |
1300 | return(FALSE); | |
1301 | } | |
1302 | ||
1303 | /* | |
1304 | * Have lock. | |
1305 | */ | |
1306 | ||
1307 | lck->want_write = TRUE; | |
1308 | ||
1309 | lck_interlock_unlock(lck, istate); | |
1310 | ||
1311 | return(TRUE); | |
1312 | } | |
1313 | ||
1314 | /* | |
1315 | * Routine: lck_rw_try_lock_shared | |
1316 | * Function: | |
1317 | * Tries to get a read lock. | |
1318 | * | |
1319 | * Returns FALSE if the lock is not held on return. | |
1320 | */ | |
1321 | ||
1322 | boolean_t | |
1323 | lck_rw_try_lock_shared( | |
1324 | lck_rw_t *lck) | |
1325 | { | |
1326 | boolean_t istate; | |
1327 | ||
1328 | istate = lck_interlock_lock(lck); | |
1329 | ||
1330 | if (lck->want_write || lck->want_upgrade) { | |
1331 | lck_interlock_unlock(lck, istate); | |
1332 | return(FALSE); | |
1333 | } | |
1334 | ||
1335 | lck->read_count++; | |
1336 | ||
1337 | lck_interlock_unlock(lck, istate); | |
1338 | ||
1339 | return(TRUE); | |
1340 | } | |
1341 | ||
1342 | /* | |
1343 | * Routine: lck_mtx_alloc_init | |
1344 | */ | |
1345 | lck_mtx_t * | |
1346 | lck_mtx_alloc_init( | |
1347 | lck_grp_t *grp, | |
1348 | lck_attr_t *attr) | |
1349 | { | |
1350 | lck_mtx_t *lck; | |
1351 | ||
1352 | if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0) | |
1353 | lck_mtx_init(lck, grp, attr); | |
1354 | ||
1355 | return(lck); | |
1356 | } | |
1357 | ||
1358 | /* | |
1359 | * Routine: lck_mtx_free | |
1360 | */ | |
1361 | void | |
1362 | lck_mtx_free( | |
1363 | lck_mtx_t *lck, | |
1364 | lck_grp_t *grp) | |
1365 | { | |
1366 | lck_mtx_destroy(lck, grp); | |
1367 | kfree(lck, sizeof(lck_mtx_t)); | |
1368 | } | |
1369 | ||
1370 | /* | |
1371 | * Routine: lck_mtx_ext_init | |
1372 | */ | |
1373 | static void | |
1374 | lck_mtx_ext_init( | |
1375 | lck_mtx_ext_t *lck, | |
1376 | lck_grp_t *grp, | |
1377 | lck_attr_t *attr) | |
1378 | { | |
1379 | lck->lck_mtx.lck_mtx_ilk = 0; | |
1380 | lck->lck_mtx.lck_mtx_locked = 0; | |
1381 | lck->lck_mtx.lck_mtx_waiters = 0; | |
1382 | lck->lck_mtx.lck_mtx_pri = 0; | |
1383 | lck->lck_mtx_attr = 0; | |
1384 | ||
1385 | if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) { | |
1386 | lck->lck_mtx_deb.pc = 0; | |
1387 | lck->lck_mtx_deb.thread = 0; | |
1388 | lck->lck_mtx_deb.type = MUTEX_TAG; | |
1389 | lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG; | |
1390 | } | |
1391 | ||
1392 | lck->lck_mtx_grp = grp; | |
1393 | } | |
1394 | ||
1395 | /* | |
1396 | * Routine: lck_mtx_init | |
1397 | */ | |
1398 | void | |
1399 | lck_mtx_init( | |
1400 | lck_mtx_t *lck, | |
1401 | lck_grp_t *grp, | |
1402 | lck_attr_t *attr) | |
1403 | { | |
1404 | lck_mtx_ext_t *lck_ext; | |
1405 | ||
1406 | if ((attr != LCK_ATTR_NULL) && ((attr->lck_attr_val) & LCK_ATTR_DEBUG)) { | |
1407 | if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) { | |
1408 | lck_mtx_ext_init(lck_ext, grp, attr); | |
1409 | lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT; | |
1410 | lck->lck_mtx_ptr = lck_ext; | |
1411 | } | |
1412 | } else { | |
1413 | lck->lck_mtx_ilk = 0; | |
1414 | lck->lck_mtx_locked = 0; | |
1415 | lck->lck_mtx_waiters = 0; | |
1416 | lck->lck_mtx_pri = 0; | |
1417 | } | |
1418 | lck_grp_reference(grp); | |
1419 | lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX); | |
1420 | } | |
1421 | ||
1422 | /* | |
1423 | * Routine: lck_mtx_destroy | |
1424 | */ | |
1425 | void | |
1426 | lck_mtx_destroy( | |
1427 | lck_mtx_t *lck, | |
1428 | lck_grp_t *grp) | |
1429 | { | |
1430 | boolean_t lck_is_indirect; | |
1431 | ||
1432 | if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED) | |
1433 | return; | |
1434 | lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT); | |
1435 | lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED; | |
1436 | if (lck_is_indirect) | |
1437 | kfree(lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t)); | |
1438 | lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX); | |
1439 | lck_grp_deallocate(grp); | |
1440 | return; | |
1441 | } | |
1442 | ||
1443 | /* | |
1444 | * Routine: lck_mtx_assert | |
1445 | */ | |
1446 | void | |
1447 | lck_mtx_assert( | |
1448 | __unused lck_mtx_t *lck, | |
1449 | __unused unsigned int type) | |
1450 | { | |
1451 | } | |
1452 | ||
4452a7af A |
1453 | /* |
1454 | * Routine: lck_mtx_lock_spin | |
1455 | * | |
1456 | * Invoked trying to acquire a mutex when there is contention but | |
1457 | * the holder is running on another processor. We spin for up to a maximum | |
1458 | * time waiting for the lock to be released. | |
1459 | * | |
1460 | * Called with the interlock unlocked. | |
1461 | */ | |
1462 | void | |
1463 | lck_mtx_lock_spin( | |
1464 | lck_mtx_t *lck) | |
1465 | { | |
1466 | thread_t holder; | |
1467 | lck_mtx_t *mutex; | |
1468 | uint64_t deadline; | |
1469 | ||
1470 | if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) | |
1471 | mutex = lck; | |
1472 | else | |
1473 | mutex = &lck->lck_mtx_ptr->lck_mtx; | |
1474 | ||
1475 | KERNEL_DEBUG( | |
1476 | MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN) | DBG_FUNC_START, | |
1477 | (int)lck, (int)mutex->lck_mtx_locked, 0, 0, 0); | |
1478 | ||
1479 | deadline = mach_absolute_time() + MutexSpin; | |
1480 | /* | |
1481 | * Spin while: | |
1482 | * - mutex is locked, and | |
1483 | * - owner is running on another processor, and | |
1484 | * - owner is not is the idle delay, and | |
1485 | * - we haven't spun for long enough. | |
1486 | */ | |
1487 | while ((holder = (thread_t) mutex->lck_mtx_locked) != NULL && | |
1488 | (holder->machine.specFlags & OnProc) != 0 && | |
1489 | (holder->options & TH_OPT_DELAYIDLE) == 0 && | |
1490 | mach_absolute_time() < deadline) | |
1491 | cpu_pause(); | |
1492 | } | |
1493 | ||
1494 | /* | |
1495 | * Called from assembly code when a mutex interlock is held. | |
1496 | * We spin here re-checking the interlock but panic if we timeout. | |
1497 | * Note: here with interrupts disabled. | |
1498 | */ | |
1499 | void | |
1500 | lck_mtx_interlock_spin( | |
1501 | lck_mtx_t *lck) | |
1502 | { | |
1503 | lck_mtx_t *mutex; | |
1504 | uint64_t deadline; | |
1505 | ||
1506 | if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) | |
1507 | mutex = lck; | |
1508 | else | |
1509 | mutex = &lck->lck_mtx_ptr->lck_mtx; | |
1510 | ||
1511 | deadline = mach_absolute_time() + LockTimeOut; | |
1512 | while (mutex->lck_mtx_ilk != 0) { | |
1513 | cpu_pause(); | |
1514 | if (mach_absolute_time() > deadline) | |
1515 | panic("interlock timeout for mutex %p", lck); | |
1516 | } | |
1517 | ||
1518 | } | |
1519 | ||
91447636 A |
1520 | #if MACH_KDB |
1521 | ||
1522 | void db_show_one_lock(lock_t *); | |
1523 | ||
1524 | void | |
1525 | db_show_one_lock( | |
1526 | lock_t *lock) | |
1527 | { | |
1528 | db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ", | |
1529 | lock->read_count, | |
1530 | lock->want_upgrade ? "" : "!", | |
1531 | lock->want_write ? "" : "!"); | |
1532 | db_printf("%swaiting, %scan_sleep\n", | |
1533 | lock->waiting ? "" : "!", lock->can_sleep ? "" : "!"); | |
1534 | db_printf("Interlock:\n"); | |
4452a7af | 1535 | db_show_one_simple_lock((db_expr_t) ((vm_offset_t)simple_lock_addr(lock->interlock)), |
91447636 A |
1536 | TRUE, (db_expr_t)0, (char *)0); |
1537 | } | |
1538 | ||
1539 | #endif /* MACH_KDB */ | |
1540 | ||
1541 | /* | |
1542 | * The C portion of the mutex package. These routines are only invoked | |
1543 | * if the optimized assembler routines can't do the work. | |
1544 | */ | |
1545 | ||
1546 | /* | |
1547 | * Routine: lock_alloc | |
1548 | * Function: | |
1549 | * Allocate a mutex for external users who cannot | |
1550 | * hard-code the structure definition into their | |
1551 | * objects. | |
1552 | * For now just use kalloc, but a zone is probably | |
1553 | * warranted. | |
1554 | */ | |
1555 | mutex_t * | |
1556 | mutex_alloc( | |
1557 | unsigned short tag) | |
1558 | { | |
1559 | mutex_t *m; | |
1560 | ||
1561 | if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0) | |
1562 | mutex_init(m, tag); | |
1563 | return(m); | |
1564 | } | |
1565 | ||
1566 | /* | |
1567 | * Routine: mutex_free | |
1568 | * Function: | |
1569 | * Free a mutex allocated for external users. | |
1570 | * For now just use kfree, but a zone is probably | |
1571 | * warranted. | |
1572 | */ | |
1573 | void | |
1574 | mutex_free( | |
1575 | mutex_t *m) | |
1576 | { | |
1577 | kfree(m, sizeof(mutex_t)); | |
1578 | } | |
1579 | ||
1580 | /* | |
1581 | * Routine: _mutex_assert | |
1582 | */ | |
1583 | void | |
1584 | _mutex_assert ( | |
1585 | mutex_t *mutex, | |
1586 | unsigned int what) | |
1587 | { | |
1588 | ||
1589 | thread_t thread = current_thread(); | |
1590 | thread_t holder; | |
1591 | ||
1592 | if (panicstr != NULL) | |
1593 | return; | |
1594 | ||
1595 | holder = (thread_t) mutex->lck_mtx.lck_mtx_locked; | |
1596 | ||
1597 | switch (what) { | |
1598 | case MA_OWNED: | |
1599 | if (thread != holder) | |
1600 | panic("mutex %x not owned\n", mutex); | |
1601 | break; | |
1602 | ||
1603 | case MA_NOTOWNED: | |
1604 | if (thread == holder) | |
1605 | panic("mutex %x owned\n", mutex); | |
1606 | break; | |
1607 | } | |
1608 | ||
1609 | } | |
1610 | ||
1611 | #if MACH_KDB | |
1612 | /* | |
1613 | * Routines to print out simple_locks and mutexes in a nicely-formatted | |
1614 | * fashion. | |
1615 | */ | |
1616 | ||
4452a7af A |
1617 | const char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER"; |
1618 | const char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER"; | |
91447636 A |
1619 | |
1620 | void | |
1621 | db_show_one_simple_lock ( | |
1622 | db_expr_t addr, | |
1623 | boolean_t have_addr, | |
4452a7af A |
1624 | __unused db_expr_t count, |
1625 | __unused char * modif) | |
91447636 | 1626 | { |
4452a7af | 1627 | simple_lock_t saddr = (simple_lock_t) ((vm_offset_t) addr); |
91447636 A |
1628 | |
1629 | if (saddr == (simple_lock_t)0 || !have_addr) { | |
1630 | db_error ("No simple_lock\n"); | |
1631 | } | |
1632 | #if USLOCK_DEBUG | |
1633 | else if (saddr->lock_type != USLOCK_TAG) | |
1634 | db_error ("Not a simple_lock\n"); | |
1635 | #endif /* USLOCK_DEBUG */ | |
1636 | ||
1637 | db_printf ("%s\n", simple_lock_labels); | |
1638 | db_print_simple_lock (saddr); | |
1639 | } | |
1640 | ||
1641 | void | |
1642 | db_print_simple_lock ( | |
1643 | simple_lock_t addr) | |
1644 | { | |
1645 | ||
1646 | db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock)); | |
1647 | #if USLOCK_DEBUG | |
1648 | db_printf (" %08x", addr->debug.lock_thread); | |
1649 | db_printf (" %08x ", addr->debug.duration[1]); | |
1650 | db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY); | |
1651 | #endif /* USLOCK_DEBUG */ | |
1652 | db_printf ("\n"); | |
1653 | } | |
1654 | ||
1655 | void | |
1656 | db_show_one_mutex ( | |
1657 | db_expr_t addr, | |
1658 | boolean_t have_addr, | |
4452a7af A |
1659 | __unused db_expr_t count, |
1660 | __unused char * modif) | |
91447636 | 1661 | { |
4452a7af | 1662 | mutex_t * maddr = (mutex_t *)((vm_offset_t) addr); |
91447636 A |
1663 | |
1664 | if (maddr == (mutex_t *)0 || !have_addr) | |
1665 | db_error ("No mutex\n"); | |
1666 | #if MACH_LDEBUG | |
1667 | else if (maddr->type != MUTEX_TAG) | |
1668 | db_error ("Not a mutex\n"); | |
1669 | #endif /* MACH_LDEBUG */ | |
1670 | ||
1671 | db_printf ("%s\n", mutex_labels); | |
1672 | db_print_mutex (maddr); | |
1673 | } | |
1674 | ||
1675 | void | |
1676 | db_print_mutex ( | |
1677 | mutex_t * addr) | |
1678 | { | |
1679 | db_printf ("%08x %6d %7d", | |
1680 | addr, *addr, addr->lck_mtx.lck_mtx_waiters); | |
1681 | #if MACH_LDEBUG | |
1682 | db_printf (" %08x ", addr->thread); | |
1683 | db_printsym (addr->pc, DB_STGY_ANY); | |
1684 | #endif /* MACH_LDEBUG */ | |
1685 | db_printf ("\n"); | |
1686 | } | |
1687 | ||
1688 | #endif /* MACH_KDB */ |