]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_lock.c
0ca1530b04dd24becb8270834dc7ffa0e414ede7
[apple/xnu.git] / bsd / kern / kern_lock.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
24 /*
25 * Copyright (c) 1995
26 * The Regents of the University of California. All rights reserved.
27 *
28 * This code contains ideas from software contributed to Berkeley by
29 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
30 * System project at Carnegie-Mellon University.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
61 */
62
63 #include <sys/param.h>
64 #include <sys/proc_internal.h>
65 #include <sys/lock.h>
66 #include <kern/cpu_number.h>
67 #include <kern/thread.h>
68
69 #include <mach/mach_types.h>
70
71 /*
72 * Locking primitives implementation.
73 * Locks provide shared/exclusive sychronization.
74 */
75
76 #if 0
77 #define COUNT(p, x) if (p) (p)->p_locks += (x)
78 #else
79 #define COUNT(p, x)
80 #endif
81
82 #if NCPUS > 1
83
84 /*
85 * For multiprocessor system, try spin lock first.
86 *
87 * This should be inline expanded below, but we cannot have #if
88 * inside a multiline define.
89 */
90 int lock_wait_time = 100;
91 #define PAUSE(lkp, wanted) \
92 if (lock_wait_time > 0) { \
93 int i; \
94 \
95 for (i = lock_wait_time; i > 0; i--) \
96 if (!(wanted)) \
97 break; \
98 } \
99 if (!(wanted)) \
100 break;
101
102 #else /* NCPUS == 1 */
103
104 /*
105 * It is an error to spin on a uniprocessor as nothing will ever cause
106 * the simple lock to clear while we are executing.
107 */
108 #define PAUSE(lkp, wanted)
109
110 #endif /* NCPUS == 1 */
111
112 /*
113 * Acquire a resource.
114 */
115 #define ACQUIRE(lkp, error, extflags, wanted) \
116 PAUSE(lkp, wanted); \
117 for (error = 0; wanted; ) { \
118 (lkp)->lk_waitcount++; \
119 error = tsleep((void *)lkp, (lkp)->lk_prio, \
120 (lkp)->lk_wmesg, (lkp)->lk_timo); \
121 (lkp)->lk_waitcount--; \
122 if (error) \
123 break; \
124 if ((extflags) & LK_SLEEPFAIL) { \
125 error = ENOLCK; \
126 break; \
127 } \
128 }
129
130 /*
131 * Initialize a lock; required before use.
132 */
133 void
134 lockinit(lkp, prio, wmesg, timo, flags)
135 struct lock__bsd__ *lkp;
136 int prio;
137 const char *wmesg;
138 int timo;
139 int flags;
140 {
141
142 bzero(lkp, sizeof(struct lock__bsd__));
143 lkp->lk_flags = flags & LK_EXTFLG_MASK;
144 lkp->lk_prio = prio;
145 lkp->lk_timo = timo;
146 lkp->lk_wmesg = wmesg;
147 lkp->lk_lockholder = LK_NOPROC;
148 lkp->lk_lockthread = 0;
149 }
150
151 /*
152 * Determine the status of a lock.
153 */
154 int
155 lockstatus(lkp)
156 struct lock__bsd__ *lkp;
157 {
158 int lock_type = 0;
159
160 if (lkp->lk_exclusivecount != 0)
161 lock_type = LK_EXCLUSIVE;
162 else if (lkp->lk_sharecount != 0)
163 lock_type = LK_SHARED;
164 return (lock_type);
165 }
166
167 /*
168 * Set, change, or release a lock.
169 *
170 * Shared requests increment the shared count. Exclusive requests set the
171 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
172 * accepted shared locks and shared-to-exclusive upgrades to go away.
173 */
174 int
175 lockmgr(lkp, flags, interlkp, p)
176 struct lock__bsd__ *lkp;
177 u_int flags;
178 void * interlkp;
179 struct proc *p;
180 {
181 int error;
182 pid_t pid;
183 int extflags;
184 void *self;
185
186 error = 0; self = current_thread();
187 if (p)
188 pid = p->p_pid;
189 else
190 pid = LK_KERNPROC;
191 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
192 #if 0
193 /*
194 * Once a lock has drained, the LK_DRAINING flag is set and an
195 * exclusive lock is returned. The only valid operation thereafter
196 * is a single release of that exclusive lock. This final release
197 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
198 * further requests of any sort will result in a panic. The bits
199 * selected for these two flags are chosen so that they will be set
200 * in memory that is freed (freed memory is filled with 0xdeadbeef).
201 * The final release is permitted to give a new lease on life to
202 * the lock by specifying LK_REENABLE.
203 */
204 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
205 if (lkp->lk_flags & LK_DRAINED)
206 panic("lockmgr: using decommissioned lock");
207 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
208 (lkp->lk_lockholder != pid && lkp->lk_lockthread != self)
209 panic("lockmgr: non-release on draining lock: %d\n",
210 flags & LK_TYPE_MASK);
211 lkp->lk_flags &= ~LK_DRAINING;
212 if ((flags & LK_REENABLE) == 0)
213 lkp->lk_flags |= LK_DRAINED;
214 }
215 #endif
216
217 switch (flags & LK_TYPE_MASK) {
218
219 case LK_SHARED:
220 if (lkp->lk_lockholder != pid || lkp->lk_lockthread != self) {
221 /*
222 * If just polling, check to see if we will block.
223 */
224 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
225 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
226 error = EBUSY;
227 break;
228 }
229 /*
230 * Wait for exclusive locks and upgrades to clear.
231 */
232 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
233 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
234 if (error)
235 break;
236 lkp->lk_sharecount++;
237 COUNT(p, 1);
238 break;
239 }
240 /*
241 * We hold an exclusive lock, so downgrade it to shared.
242 * An alternative would be to fail with EDEADLK.
243 */
244 lkp->lk_sharecount++;
245 COUNT(p, 1);
246 /* fall into downgrade */
247
248 case LK_DOWNGRADE:
249 if (lkp->lk_lockholder != pid ||
250 lkp->lk_lockthread != self ||
251 lkp->lk_exclusivecount == 0)
252 panic("lockmgr: not holding exclusive lock");
253 lkp->lk_sharecount += lkp->lk_exclusivecount;
254 lkp->lk_exclusivecount = 0;
255 lkp->lk_flags &= ~LK_HAVE_EXCL;
256 lkp->lk_lockholder = LK_NOPROC;
257 lkp->lk_lockthread = 0;
258 if (lkp->lk_waitcount)
259 wakeup((void *)lkp);
260 break;
261
262 case LK_EXCLUPGRADE:
263 /*
264 * If another process is ahead of us to get an upgrade,
265 * then we want to fail rather than have an intervening
266 * exclusive access.
267 */
268 if (lkp->lk_flags & LK_WANT_UPGRADE) {
269 lkp->lk_sharecount--;
270 COUNT(p, -1);
271 error = EBUSY;
272 break;
273 }
274 /* fall into normal upgrade */
275
276 case LK_UPGRADE:
277 /*
278 * Upgrade a shared lock to an exclusive one. If another
279 * shared lock has already requested an upgrade to an
280 * exclusive lock, our shared lock is released and an
281 * exclusive lock is requested (which will be granted
282 * after the upgrade). If we return an error, the file
283 * will always be unlocked.
284 */
285 if ((lkp->lk_lockholder == pid &&
286 lkp->lk_lockthread == self) ||
287 lkp->lk_sharecount <= 0)
288 panic("lockmgr: upgrade exclusive lock");
289 lkp->lk_sharecount--;
290 COUNT(p, -1);
291 /*
292 * If we are just polling, check to see if we will block.
293 */
294 if ((extflags & LK_NOWAIT) &&
295 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
296 lkp->lk_sharecount > 1)) {
297 error = EBUSY;
298 break;
299 }
300 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
301 /*
302 * We are first shared lock to request an upgrade, so
303 * request upgrade and wait for the shared count to
304 * drop to zero, then take exclusive lock.
305 */
306 lkp->lk_flags |= LK_WANT_UPGRADE;
307 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
308 lkp->lk_flags &= ~LK_WANT_UPGRADE;
309 if (error)
310 break;
311 lkp->lk_flags |= LK_HAVE_EXCL;
312 lkp->lk_lockholder = pid;
313 lkp->lk_lockthread = self;
314 if (lkp->lk_exclusivecount != 0)
315 panic("lockmgr: non-zero exclusive count");
316 lkp->lk_exclusivecount = 1;
317 COUNT(p, 1);
318 break;
319 }
320 /*
321 * Someone else has requested upgrade. Release our shared
322 * lock, awaken upgrade requestor if we are the last shared
323 * lock, then request an exclusive lock.
324 */
325 if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
326 wakeup((void *)lkp);
327 /* fall into exclusive request */
328
329 case LK_EXCLUSIVE:
330 if (lkp->lk_lockholder == pid && lkp->lk_lockthread == self) {
331 /*
332 * Recursive lock.
333 */
334 if ((extflags & LK_CANRECURSE) == 0)
335 panic("lockmgr: locking against myself");
336 lkp->lk_exclusivecount++;
337 COUNT(p, 1);
338 break;
339 }
340 /*
341 * If we are just polling, check to see if we will sleep.
342 */
343 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
344 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
345 lkp->lk_sharecount != 0)) {
346 error = EBUSY;
347 break;
348 }
349 /*
350 * Try to acquire the want_exclusive flag.
351 */
352 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
353 (LK_HAVE_EXCL | LK_WANT_EXCL));
354 if (error)
355 break;
356 lkp->lk_flags |= LK_WANT_EXCL;
357 /*
358 * Wait for shared locks and upgrades to finish.
359 */
360 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
361 (lkp->lk_flags & LK_WANT_UPGRADE));
362 lkp->lk_flags &= ~LK_WANT_EXCL;
363 if (error)
364 break;
365 lkp->lk_flags |= LK_HAVE_EXCL;
366 lkp->lk_lockholder = pid;
367 lkp->lk_lockthread = self;
368 if (lkp->lk_exclusivecount != 0)
369 panic("lockmgr: non-zero exclusive count");
370 lkp->lk_exclusivecount = 1;
371 COUNT(p, 1);
372 break;
373
374 case LK_RELEASE:
375 if (lkp->lk_exclusivecount != 0) {
376 if (pid != lkp->lk_lockholder ||
377 lkp->lk_lockthread != self)
378 panic("lockmgr: pid %d, thread 0x%8x,"
379 " not exclusive lock holder pid %d"
380 " thread 0x%8x unlocking, exclusive count %d",
381 pid, self, lkp->lk_lockholder,
382 lkp->lk_lockthread, lkp->lk_exclusivecount);
383 lkp->lk_exclusivecount--;
384 COUNT(p, -1);
385 if (lkp->lk_exclusivecount == 0) {
386 lkp->lk_flags &= ~LK_HAVE_EXCL;
387 lkp->lk_lockholder = LK_NOPROC;
388 lkp->lk_lockthread = 0;
389 }
390 } else if (lkp->lk_sharecount != 0) {
391 lkp->lk_sharecount--;
392 COUNT(p, -1);
393 }
394 if (lkp->lk_waitcount)
395 wakeup((void *)lkp);
396 break;
397
398 case LK_DRAIN:
399 /*
400 * Check that we do not already hold the lock, as it can
401 * never drain if we do. Unfortunately, we have no way to
402 * check for holding a shared lock, but at least we can
403 * check for an exclusive one.
404 */
405 if (lkp->lk_lockholder == pid && lkp->lk_lockthread == self)
406 panic("lockmgr: draining against myself");
407 /*
408 * If we are just polling, check to see if we will sleep.
409 */
410 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
411 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
412 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
413 error = EBUSY;
414 break;
415 }
416 PAUSE(lkp, ((lkp->lk_flags &
417 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
418 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
419 for (error = 0; ((lkp->lk_flags &
420 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
421 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
422 lkp->lk_flags |= LK_WAITDRAIN;
423 if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
424 lkp->lk_wmesg, lkp->lk_timo))
425 return (error);
426 if ((extflags) & LK_SLEEPFAIL)
427 return (ENOLCK);
428 }
429 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
430 lkp->lk_lockholder = pid;
431 lkp->lk_lockthread = self;
432 lkp->lk_exclusivecount = 1;
433 COUNT(p, 1);
434 break;
435
436 default:
437 panic("lockmgr: unknown locktype request %d",
438 flags & LK_TYPE_MASK);
439 /* NOTREACHED */
440 }
441 if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
442 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
443 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
444 lkp->lk_flags &= ~LK_WAITDRAIN;
445 wakeup((void *)&lkp->lk_flags);
446 }
447 return (error);
448 }
449
450 /*
451 * Print out information about state of a lock. Used by VOP_PRINT
452 * routines to display ststus about contained locks.
453 */
454 void
455 lockmgr_printinfo(lkp)
456 struct lock__bsd__ *lkp;
457 {
458
459 if (lkp->lk_sharecount)
460 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
461 lkp->lk_sharecount);
462 else if (lkp->lk_flags & LK_HAVE_EXCL)
463 printf(" lock type %s: EXCL (count %d) by pid %d",
464 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
465 if (lkp->lk_waitcount > 0)
466 printf(" with %d pending", lkp->lk_waitcount);
467 }