]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
0b4e3aa0 | 2 | * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ | |
26 | /* | |
27 | * Copyright (c) 1995 | |
28 | * The Regents of the University of California. All rights reserved. | |
29 | * | |
30 | * This code contains ideas from software contributed to Berkeley by | |
31 | * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating | |
32 | * System project at Carnegie-Mellon University. | |
33 | * | |
34 | * Redistribution and use in source and binary forms, with or without | |
35 | * modification, are permitted provided that the following conditions | |
36 | * are met: | |
37 | * 1. Redistributions of source code must retain the above copyright | |
38 | * notice, this list of conditions and the following disclaimer. | |
39 | * 2. Redistributions in binary form must reproduce the above copyright | |
40 | * notice, this list of conditions and the following disclaimer in the | |
41 | * documentation and/or other materials provided with the distribution. | |
42 | * 3. All advertising materials mentioning features or use of this software | |
43 | * must display the following acknowledgement: | |
44 | * This product includes software developed by the University of | |
45 | * California, Berkeley and its contributors. | |
46 | * 4. Neither the name of the University nor the names of its contributors | |
47 | * may be used to endorse or promote products derived from this software | |
48 | * without specific prior written permission. | |
49 | * | |
50 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
51 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
52 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
53 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
54 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
55 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
56 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
57 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
58 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
59 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
60 | * SUCH DAMAGE. | |
61 | * | |
62 | * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 | |
63 | */ | |
64 | ||
65 | #include <sys/param.h> | |
66 | #include <sys/proc.h> | |
67 | #include <sys/lock.h> | |
68 | #include <kern/cpu_number.h> | |
69 | #include <kern/thread.h> | |
70 | ||
71 | #include <mach/mach_types.h> | |
72 | ||
73 | /* | |
74 | * Locking primitives implementation. | |
75 | * Locks provide shared/exclusive sychronization. | |
76 | */ | |
77 | ||
78 | #if 0 | |
79 | #define COUNT(p, x) if (p) (p)->p_locks += (x) | |
80 | #else | |
81 | #define COUNT(p, x) | |
82 | #endif | |
83 | ||
84 | #if NCPUS > 1 | |
85 | ||
86 | /* | |
87 | * For multiprocessor system, try spin lock first. | |
88 | * | |
89 | * This should be inline expanded below, but we cannot have #if | |
90 | * inside a multiline define. | |
91 | */ | |
92 | int lock_wait_time = 100; | |
93 | #define PAUSE(lkp, wanted) \ | |
94 | if (lock_wait_time > 0) { \ | |
95 | int i; \ | |
96 | \ | |
97 | simple_unlock(&lkp->lk_interlock); \ | |
98 | for (i = lock_wait_time; i > 0; i--) \ | |
99 | if (!(wanted)) \ | |
100 | break; \ | |
101 | simple_lock(&lkp->lk_interlock); \ | |
102 | } \ | |
103 | if (!(wanted)) \ | |
104 | break; | |
105 | ||
106 | #else /* NCPUS == 1 */ | |
107 | ||
108 | /* | |
109 | * It is an error to spin on a uniprocessor as nothing will ever cause | |
110 | * the simple lock to clear while we are executing. | |
111 | */ | |
112 | #define PAUSE(lkp, wanted) | |
113 | ||
114 | #endif /* NCPUS == 1 */ | |
115 | ||
116 | /* | |
117 | * Acquire a resource. | |
118 | */ | |
119 | #define ACQUIRE(lkp, error, extflags, wanted) \ | |
120 | PAUSE(lkp, wanted); \ | |
121 | for (error = 0; wanted; ) { \ | |
122 | (lkp)->lk_waitcount++; \ | |
123 | simple_unlock(&(lkp)->lk_interlock); \ | |
124 | error = tsleep((void *)lkp, (lkp)->lk_prio, \ | |
125 | (lkp)->lk_wmesg, (lkp)->lk_timo); \ | |
126 | simple_lock(&(lkp)->lk_interlock); \ | |
127 | (lkp)->lk_waitcount--; \ | |
128 | if (error) \ | |
129 | break; \ | |
130 | if ((extflags) & LK_SLEEPFAIL) { \ | |
131 | error = ENOLCK; \ | |
132 | break; \ | |
133 | } \ | |
134 | } | |
135 | ||
136 | /* | |
137 | * Initialize a lock; required before use. | |
138 | */ | |
139 | void | |
140 | lockinit(lkp, prio, wmesg, timo, flags) | |
141 | struct lock__bsd__ *lkp; | |
142 | int prio; | |
143 | char *wmesg; | |
144 | int timo; | |
145 | int flags; | |
146 | { | |
147 | ||
148 | bzero(lkp, sizeof(struct lock__bsd__)); | |
149 | simple_lock_init(&lkp->lk_interlock); | |
150 | lkp->lk_flags = flags & LK_EXTFLG_MASK; | |
151 | lkp->lk_prio = prio; | |
152 | lkp->lk_timo = timo; | |
153 | lkp->lk_wmesg = wmesg; | |
154 | lkp->lk_lockholder = LK_NOPROC; | |
155 | lkp->lk_lockthread = 0; | |
156 | } | |
157 | ||
158 | /* | |
159 | * Determine the status of a lock. | |
160 | */ | |
161 | int | |
162 | lockstatus(lkp) | |
163 | struct lock__bsd__ *lkp; | |
164 | { | |
165 | int lock_type = 0; | |
166 | ||
167 | simple_lock(&lkp->lk_interlock); | |
168 | if (lkp->lk_exclusivecount != 0) | |
169 | lock_type = LK_EXCLUSIVE; | |
170 | else if (lkp->lk_sharecount != 0) | |
171 | lock_type = LK_SHARED; | |
172 | simple_unlock(&lkp->lk_interlock); | |
173 | return (lock_type); | |
174 | } | |
175 | ||
176 | /* | |
177 | * Set, change, or release a lock. | |
178 | * | |
179 | * Shared requests increment the shared count. Exclusive requests set the | |
180 | * LK_WANT_EXCL flag (preventing further shared locks), and wait for already | |
181 | * accepted shared locks and shared-to-exclusive upgrades to go away. | |
182 | */ | |
183 | int | |
184 | lockmgr(lkp, flags, interlkp, p) | |
185 | struct lock__bsd__ *lkp; | |
186 | u_int flags; | |
187 | simple_lock_t interlkp; | |
188 | struct proc *p; | |
189 | { | |
190 | int error; | |
191 | pid_t pid; | |
192 | int extflags; | |
193 | void *self; | |
194 | ||
55e303ae | 195 | error = 0; self = current_act(); |
1c79356b A |
196 | if (p) |
197 | pid = p->p_pid; | |
198 | else | |
199 | pid = LK_KERNPROC; | |
200 | simple_lock(&lkp->lk_interlock); | |
201 | if (flags & LK_INTERLOCK) | |
202 | simple_unlock(interlkp); | |
203 | extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; | |
204 | #if 0 | |
205 | /* | |
206 | * Once a lock has drained, the LK_DRAINING flag is set and an | |
207 | * exclusive lock is returned. The only valid operation thereafter | |
208 | * is a single release of that exclusive lock. This final release | |
209 | * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any | |
210 | * further requests of any sort will result in a panic. The bits | |
211 | * selected for these two flags are chosen so that they will be set | |
212 | * in memory that is freed (freed memory is filled with 0xdeadbeef). | |
213 | * The final release is permitted to give a new lease on life to | |
214 | * the lock by specifying LK_REENABLE. | |
215 | */ | |
216 | if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { | |
217 | if (lkp->lk_flags & LK_DRAINED) | |
218 | panic("lockmgr: using decommissioned lock"); | |
219 | if ((flags & LK_TYPE_MASK) != LK_RELEASE || | |
220 | (lkp->lk_lockholder != pid && lkp->lk_lockthread != self) | |
221 | panic("lockmgr: non-release on draining lock: %d\n", | |
222 | flags & LK_TYPE_MASK); | |
223 | lkp->lk_flags &= ~LK_DRAINING; | |
224 | if ((flags & LK_REENABLE) == 0) | |
225 | lkp->lk_flags |= LK_DRAINED; | |
226 | } | |
227 | #endif | |
228 | ||
229 | switch (flags & LK_TYPE_MASK) { | |
230 | ||
231 | case LK_SHARED: | |
232 | if (lkp->lk_lockholder != pid || lkp->lk_lockthread != self) { | |
233 | /* | |
234 | * If just polling, check to see if we will block. | |
235 | */ | |
236 | if ((extflags & LK_NOWAIT) && (lkp->lk_flags & | |
237 | (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { | |
238 | error = EBUSY; | |
239 | break; | |
240 | } | |
241 | /* | |
242 | * Wait for exclusive locks and upgrades to clear. | |
243 | */ | |
244 | ACQUIRE(lkp, error, extflags, lkp->lk_flags & | |
245 | (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); | |
246 | if (error) | |
247 | break; | |
248 | lkp->lk_sharecount++; | |
249 | COUNT(p, 1); | |
250 | break; | |
251 | } | |
252 | /* | |
253 | * We hold an exclusive lock, so downgrade it to shared. | |
254 | * An alternative would be to fail with EDEADLK. | |
255 | */ | |
256 | lkp->lk_sharecount++; | |
257 | COUNT(p, 1); | |
258 | /* fall into downgrade */ | |
259 | ||
260 | case LK_DOWNGRADE: | |
261 | if (lkp->lk_lockholder != pid || | |
262 | lkp->lk_lockthread != self || | |
263 | lkp->lk_exclusivecount == 0) | |
264 | panic("lockmgr: not holding exclusive lock"); | |
265 | lkp->lk_sharecount += lkp->lk_exclusivecount; | |
266 | lkp->lk_exclusivecount = 0; | |
267 | lkp->lk_flags &= ~LK_HAVE_EXCL; | |
268 | lkp->lk_lockholder = LK_NOPROC; | |
269 | lkp->lk_lockthread = 0; | |
270 | if (lkp->lk_waitcount) | |
271 | wakeup((void *)lkp); | |
272 | break; | |
273 | ||
274 | case LK_EXCLUPGRADE: | |
275 | /* | |
276 | * If another process is ahead of us to get an upgrade, | |
277 | * then we want to fail rather than have an intervening | |
278 | * exclusive access. | |
279 | */ | |
280 | if (lkp->lk_flags & LK_WANT_UPGRADE) { | |
281 | lkp->lk_sharecount--; | |
282 | COUNT(p, -1); | |
283 | error = EBUSY; | |
284 | break; | |
285 | } | |
286 | /* fall into normal upgrade */ | |
287 | ||
288 | case LK_UPGRADE: | |
289 | /* | |
290 | * Upgrade a shared lock to an exclusive one. If another | |
291 | * shared lock has already requested an upgrade to an | |
292 | * exclusive lock, our shared lock is released and an | |
293 | * exclusive lock is requested (which will be granted | |
294 | * after the upgrade). If we return an error, the file | |
295 | * will always be unlocked. | |
296 | */ | |
297 | if ((lkp->lk_lockholder == pid && | |
298 | lkp->lk_lockthread == self) || | |
299 | lkp->lk_sharecount <= 0) | |
300 | panic("lockmgr: upgrade exclusive lock"); | |
301 | lkp->lk_sharecount--; | |
302 | COUNT(p, -1); | |
303 | /* | |
304 | * If we are just polling, check to see if we will block. | |
305 | */ | |
306 | if ((extflags & LK_NOWAIT) && | |
307 | ((lkp->lk_flags & LK_WANT_UPGRADE) || | |
308 | lkp->lk_sharecount > 1)) { | |
309 | error = EBUSY; | |
310 | break; | |
311 | } | |
312 | if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { | |
313 | /* | |
314 | * We are first shared lock to request an upgrade, so | |
315 | * request upgrade and wait for the shared count to | |
316 | * drop to zero, then take exclusive lock. | |
317 | */ | |
318 | lkp->lk_flags |= LK_WANT_UPGRADE; | |
319 | ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); | |
320 | lkp->lk_flags &= ~LK_WANT_UPGRADE; | |
321 | if (error) | |
322 | break; | |
323 | lkp->lk_flags |= LK_HAVE_EXCL; | |
324 | lkp->lk_lockholder = pid; | |
325 | lkp->lk_lockthread = self; | |
326 | if (lkp->lk_exclusivecount != 0) | |
327 | panic("lockmgr: non-zero exclusive count"); | |
328 | lkp->lk_exclusivecount = 1; | |
329 | COUNT(p, 1); | |
330 | break; | |
331 | } | |
332 | /* | |
333 | * Someone else has requested upgrade. Release our shared | |
334 | * lock, awaken upgrade requestor if we are the last shared | |
335 | * lock, then request an exclusive lock. | |
336 | */ | |
337 | if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) | |
338 | wakeup((void *)lkp); | |
339 | /* fall into exclusive request */ | |
340 | ||
341 | case LK_EXCLUSIVE: | |
342 | if (lkp->lk_lockholder == pid && lkp->lk_lockthread == self) { | |
343 | /* | |
344 | * Recursive lock. | |
345 | */ | |
346 | if ((extflags & LK_CANRECURSE) == 0) | |
347 | panic("lockmgr: locking against myself"); | |
348 | lkp->lk_exclusivecount++; | |
349 | COUNT(p, 1); | |
350 | break; | |
351 | } | |
352 | /* | |
353 | * If we are just polling, check to see if we will sleep. | |
354 | */ | |
355 | if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & | |
356 | (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || | |
357 | lkp->lk_sharecount != 0)) { | |
358 | error = EBUSY; | |
359 | break; | |
360 | } | |
361 | /* | |
362 | * Try to acquire the want_exclusive flag. | |
363 | */ | |
364 | ACQUIRE(lkp, error, extflags, lkp->lk_flags & | |
365 | (LK_HAVE_EXCL | LK_WANT_EXCL)); | |
366 | if (error) | |
367 | break; | |
368 | lkp->lk_flags |= LK_WANT_EXCL; | |
369 | /* | |
370 | * Wait for shared locks and upgrades to finish. | |
371 | */ | |
372 | ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || | |
373 | (lkp->lk_flags & LK_WANT_UPGRADE)); | |
374 | lkp->lk_flags &= ~LK_WANT_EXCL; | |
375 | if (error) | |
376 | break; | |
377 | lkp->lk_flags |= LK_HAVE_EXCL; | |
378 | lkp->lk_lockholder = pid; | |
379 | lkp->lk_lockthread = self; | |
380 | if (lkp->lk_exclusivecount != 0) | |
381 | panic("lockmgr: non-zero exclusive count"); | |
382 | lkp->lk_exclusivecount = 1; | |
383 | COUNT(p, 1); | |
384 | break; | |
385 | ||
386 | case LK_RELEASE: | |
387 | if (lkp->lk_exclusivecount != 0) { | |
388 | if (pid != lkp->lk_lockholder || | |
389 | lkp->lk_lockthread != self) | |
0b4e3aa0 A |
390 | panic("lockmgr: pid %d, thread 0x%8x," |
391 | " not exclusive lock holder pid %d" | |
392 | " thread 0x%8x unlocking, exclusive count %d", | |
393 | pid, self, lkp->lk_lockholder, | |
394 | lkp->lk_lockthread, lkp->lk_exclusivecount); | |
1c79356b A |
395 | lkp->lk_exclusivecount--; |
396 | COUNT(p, -1); | |
397 | if (lkp->lk_exclusivecount == 0) { | |
398 | lkp->lk_flags &= ~LK_HAVE_EXCL; | |
399 | lkp->lk_lockholder = LK_NOPROC; | |
400 | lkp->lk_lockthread = 0; | |
401 | } | |
402 | } else if (lkp->lk_sharecount != 0) { | |
403 | lkp->lk_sharecount--; | |
404 | COUNT(p, -1); | |
405 | } | |
406 | if (lkp->lk_waitcount) | |
407 | wakeup((void *)lkp); | |
408 | break; | |
409 | ||
410 | case LK_DRAIN: | |
411 | /* | |
412 | * Check that we do not already hold the lock, as it can | |
413 | * never drain if we do. Unfortunately, we have no way to | |
414 | * check for holding a shared lock, but at least we can | |
415 | * check for an exclusive one. | |
416 | */ | |
417 | if (lkp->lk_lockholder == pid && lkp->lk_lockthread == self) | |
418 | panic("lockmgr: draining against myself"); | |
419 | /* | |
420 | * If we are just polling, check to see if we will sleep. | |
421 | */ | |
422 | if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & | |
423 | (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || | |
424 | lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { | |
425 | error = EBUSY; | |
426 | break; | |
427 | } | |
428 | PAUSE(lkp, ((lkp->lk_flags & | |
429 | (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || | |
430 | lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); | |
431 | for (error = 0; ((lkp->lk_flags & | |
432 | (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || | |
433 | lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { | |
434 | lkp->lk_flags |= LK_WAITDRAIN; | |
435 | simple_unlock(&lkp->lk_interlock); | |
436 | if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio, | |
437 | lkp->lk_wmesg, lkp->lk_timo)) | |
438 | return (error); | |
439 | if ((extflags) & LK_SLEEPFAIL) | |
440 | return (ENOLCK); | |
441 | simple_lock(&lkp->lk_interlock); | |
442 | } | |
443 | lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; | |
444 | lkp->lk_lockholder = pid; | |
445 | lkp->lk_lockthread = self; | |
446 | lkp->lk_exclusivecount = 1; | |
447 | COUNT(p, 1); | |
448 | break; | |
449 | ||
450 | default: | |
451 | simple_unlock(&lkp->lk_interlock); | |
452 | panic("lockmgr: unknown locktype request %d", | |
453 | flags & LK_TYPE_MASK); | |
454 | /* NOTREACHED */ | |
455 | } | |
456 | if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & | |
457 | (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && | |
458 | lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { | |
459 | lkp->lk_flags &= ~LK_WAITDRAIN; | |
460 | wakeup((void *)&lkp->lk_flags); | |
461 | } | |
462 | simple_unlock(&lkp->lk_interlock); | |
463 | return (error); | |
464 | } | |
465 | ||
466 | /* | |
467 | * Print out information about state of a lock. Used by VOP_PRINT | |
468 | * routines to display ststus about contained locks. | |
469 | */ | |
470 | lockmgr_printinfo(lkp) | |
471 | struct lock__bsd__ *lkp; | |
472 | { | |
473 | ||
474 | if (lkp->lk_sharecount) | |
475 | printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, | |
476 | lkp->lk_sharecount); | |
477 | else if (lkp->lk_flags & LK_HAVE_EXCL) | |
478 | printf(" lock type %s: EXCL (count %d) by pid %d", | |
479 | lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); | |
480 | if (lkp->lk_waitcount > 0) | |
481 | printf(" with %d pending", lkp->lk_waitcount); | |
482 | } |