]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_lock.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / bsd / kern / kern_lock.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code contains ideas from software contributed to Berkeley by
34 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
35 * System project at Carnegie-Mellon University.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
66 */
67
68 #include <sys/param.h>
69 #include <sys/proc_internal.h>
70 #include <sys/lock.h>
71 #include <kern/cpu_number.h>
72 #include <kern/thread.h>
73
74 #include <mach/mach_types.h>
75
76 /*
77 * Locking primitives implementation.
78 * Locks provide shared/exclusive sychronization.
79 */
80
81 #if 0
82 #define COUNT(p, x) if (p) (p)->p_locks += (x)
83 #else
84 #define COUNT(p, x)
85 #endif
86
87 #if NCPUS > 1
88
89 /*
90 * For multiprocessor system, try spin lock first.
91 *
92 * This should be inline expanded below, but we cannot have #if
93 * inside a multiline define.
94 */
95 int lock_wait_time = 100;
96 #define PAUSE(lkp, wanted) \
97 if (lock_wait_time > 0) { \
98 int i; \
99 \
100 for (i = lock_wait_time; i > 0; i--) \
101 if (!(wanted)) \
102 break; \
103 } \
104 if (!(wanted)) \
105 break;
106
107 #else /* NCPUS == 1 */
108
109 /*
110 * It is an error to spin on a uniprocessor as nothing will ever cause
111 * the simple lock to clear while we are executing.
112 */
113 #define PAUSE(lkp, wanted)
114
115 #endif /* NCPUS == 1 */
116
117 /*
118 * Acquire a resource.
119 */
120 #define ACQUIRE(lkp, error, extflags, wanted) \
121 PAUSE(lkp, wanted); \
122 for (error = 0; wanted; ) { \
123 (lkp)->lk_waitcount++; \
124 error = tsleep((void *)lkp, (lkp)->lk_prio, \
125 (lkp)->lk_wmesg, (lkp)->lk_timo); \
126 (lkp)->lk_waitcount--; \
127 if (error) \
128 break; \
129 if ((extflags) & LK_SLEEPFAIL) { \
130 error = ENOLCK; \
131 break; \
132 } \
133 }
134
135 /*
136 * Initialize a lock; required before use.
137 */
138 void
139 lockinit(lkp, prio, wmesg, timo, flags)
140 struct lock__bsd__ *lkp;
141 int prio;
142 const char *wmesg;
143 int timo;
144 int flags;
145 {
146
147 bzero(lkp, sizeof(struct lock__bsd__));
148 lkp->lk_flags = flags & LK_EXTFLG_MASK;
149 lkp->lk_prio = prio;
150 lkp->lk_timo = timo;
151 lkp->lk_wmesg = wmesg;
152 lkp->lk_lockholder = LK_NOPROC;
153 lkp->lk_lockthread = 0;
154 }
155
156 /*
157 * Determine the status of a lock.
158 */
159 int
160 lockstatus(lkp)
161 struct lock__bsd__ *lkp;
162 {
163 int lock_type = 0;
164
165 if (lkp->lk_exclusivecount != 0)
166 lock_type = LK_EXCLUSIVE;
167 else if (lkp->lk_sharecount != 0)
168 lock_type = LK_SHARED;
169 return (lock_type);
170 }
171
172 /*
173 * Set, change, or release a lock.
174 *
175 * Shared requests increment the shared count. Exclusive requests set the
176 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
177 * accepted shared locks and shared-to-exclusive upgrades to go away.
178 */
179 int
180 lockmgr(lkp, flags, interlkp, p)
181 struct lock__bsd__ *lkp;
182 u_int flags;
183 void * interlkp;
184 struct proc *p;
185 {
186 int error;
187 pid_t pid;
188 int extflags;
189 void *self;
190
191 error = 0; self = current_thread();
192 if (p)
193 pid = p->p_pid;
194 else
195 pid = LK_KERNPROC;
196 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
197 #if 0
198 /*
199 * Once a lock has drained, the LK_DRAINING flag is set and an
200 * exclusive lock is returned. The only valid operation thereafter
201 * is a single release of that exclusive lock. This final release
202 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
203 * further requests of any sort will result in a panic. The bits
204 * selected for these two flags are chosen so that they will be set
205 * in memory that is freed (freed memory is filled with 0xdeadbeef).
206 * The final release is permitted to give a new lease on life to
207 * the lock by specifying LK_REENABLE.
208 */
209 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
210 if (lkp->lk_flags & LK_DRAINED)
211 panic("lockmgr: using decommissioned lock");
212 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
213 (lkp->lk_lockholder != pid && lkp->lk_lockthread != self)
214 panic("lockmgr: non-release on draining lock: %d\n",
215 flags & LK_TYPE_MASK);
216 lkp->lk_flags &= ~LK_DRAINING;
217 if ((flags & LK_REENABLE) == 0)
218 lkp->lk_flags |= LK_DRAINED;
219 }
220 #endif
221
222 switch (flags & LK_TYPE_MASK) {
223
224 case LK_SHARED:
225 if (lkp->lk_lockholder != pid || lkp->lk_lockthread != self) {
226 /*
227 * If just polling, check to see if we will block.
228 */
229 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
230 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
231 error = EBUSY;
232 break;
233 }
234 /*
235 * Wait for exclusive locks and upgrades to clear.
236 */
237 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
238 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
239 if (error)
240 break;
241 lkp->lk_sharecount++;
242 COUNT(p, 1);
243 break;
244 }
245 /*
246 * We hold an exclusive lock, so downgrade it to shared.
247 * An alternative would be to fail with EDEADLK.
248 */
249 lkp->lk_sharecount++;
250 COUNT(p, 1);
251 /* fall into downgrade */
252
253 case LK_DOWNGRADE:
254 if (lkp->lk_lockholder != pid ||
255 lkp->lk_lockthread != self ||
256 lkp->lk_exclusivecount == 0)
257 panic("lockmgr: not holding exclusive lock");
258 lkp->lk_sharecount += lkp->lk_exclusivecount;
259 lkp->lk_exclusivecount = 0;
260 lkp->lk_flags &= ~LK_HAVE_EXCL;
261 lkp->lk_lockholder = LK_NOPROC;
262 lkp->lk_lockthread = 0;
263 if (lkp->lk_waitcount)
264 wakeup((void *)lkp);
265 break;
266
267 case LK_EXCLUPGRADE:
268 /*
269 * If another process is ahead of us to get an upgrade,
270 * then we want to fail rather than have an intervening
271 * exclusive access.
272 */
273 if (lkp->lk_flags & LK_WANT_UPGRADE) {
274 lkp->lk_sharecount--;
275 COUNT(p, -1);
276 error = EBUSY;
277 break;
278 }
279 /* fall into normal upgrade */
280
281 case LK_UPGRADE:
282 /*
283 * Upgrade a shared lock to an exclusive one. If another
284 * shared lock has already requested an upgrade to an
285 * exclusive lock, our shared lock is released and an
286 * exclusive lock is requested (which will be granted
287 * after the upgrade). If we return an error, the file
288 * will always be unlocked.
289 */
290 if ((lkp->lk_lockholder == pid &&
291 lkp->lk_lockthread == self) ||
292 lkp->lk_sharecount <= 0)
293 panic("lockmgr: upgrade exclusive lock");
294 lkp->lk_sharecount--;
295 COUNT(p, -1);
296 /*
297 * If we are just polling, check to see if we will block.
298 */
299 if ((extflags & LK_NOWAIT) &&
300 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
301 lkp->lk_sharecount > 1)) {
302 error = EBUSY;
303 break;
304 }
305 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
306 /*
307 * We are first shared lock to request an upgrade, so
308 * request upgrade and wait for the shared count to
309 * drop to zero, then take exclusive lock.
310 */
311 lkp->lk_flags |= LK_WANT_UPGRADE;
312 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
313 lkp->lk_flags &= ~LK_WANT_UPGRADE;
314 if (error)
315 break;
316 lkp->lk_flags |= LK_HAVE_EXCL;
317 lkp->lk_lockholder = pid;
318 lkp->lk_lockthread = self;
319 if (lkp->lk_exclusivecount != 0)
320 panic("lockmgr: non-zero exclusive count");
321 lkp->lk_exclusivecount = 1;
322 COUNT(p, 1);
323 break;
324 }
325 /*
326 * Someone else has requested upgrade. Release our shared
327 * lock, awaken upgrade requestor if we are the last shared
328 * lock, then request an exclusive lock.
329 */
330 if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
331 wakeup((void *)lkp);
332 /* fall into exclusive request */
333
334 case LK_EXCLUSIVE:
335 if (lkp->lk_lockholder == pid && lkp->lk_lockthread == self) {
336 /*
337 * Recursive lock.
338 */
339 if ((extflags & LK_CANRECURSE) == 0)
340 panic("lockmgr: locking against myself");
341 lkp->lk_exclusivecount++;
342 COUNT(p, 1);
343 break;
344 }
345 /*
346 * If we are just polling, check to see if we will sleep.
347 */
348 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
349 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
350 lkp->lk_sharecount != 0)) {
351 error = EBUSY;
352 break;
353 }
354 /*
355 * Try to acquire the want_exclusive flag.
356 */
357 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
358 (LK_HAVE_EXCL | LK_WANT_EXCL));
359 if (error)
360 break;
361 lkp->lk_flags |= LK_WANT_EXCL;
362 /*
363 * Wait for shared locks and upgrades to finish.
364 */
365 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
366 (lkp->lk_flags & LK_WANT_UPGRADE));
367 lkp->lk_flags &= ~LK_WANT_EXCL;
368 if (error)
369 break;
370 lkp->lk_flags |= LK_HAVE_EXCL;
371 lkp->lk_lockholder = pid;
372 lkp->lk_lockthread = self;
373 if (lkp->lk_exclusivecount != 0)
374 panic("lockmgr: non-zero exclusive count");
375 lkp->lk_exclusivecount = 1;
376 COUNT(p, 1);
377 break;
378
379 case LK_RELEASE:
380 if (lkp->lk_exclusivecount != 0) {
381 if (pid != lkp->lk_lockholder ||
382 lkp->lk_lockthread != self)
383 panic("lockmgr: pid %d, thread 0x%8x,"
384 " not exclusive lock holder pid %d"
385 " thread 0x%8x unlocking, exclusive count %d",
386 pid, self, lkp->lk_lockholder,
387 lkp->lk_lockthread, lkp->lk_exclusivecount);
388 lkp->lk_exclusivecount--;
389 COUNT(p, -1);
390 if (lkp->lk_exclusivecount == 0) {
391 lkp->lk_flags &= ~LK_HAVE_EXCL;
392 lkp->lk_lockholder = LK_NOPROC;
393 lkp->lk_lockthread = 0;
394 }
395 } else if (lkp->lk_sharecount != 0) {
396 lkp->lk_sharecount--;
397 COUNT(p, -1);
398 }
399 if (lkp->lk_waitcount)
400 wakeup((void *)lkp);
401 break;
402
403 case LK_DRAIN:
404 /*
405 * Check that we do not already hold the lock, as it can
406 * never drain if we do. Unfortunately, we have no way to
407 * check for holding a shared lock, but at least we can
408 * check for an exclusive one.
409 */
410 if (lkp->lk_lockholder == pid && lkp->lk_lockthread == self)
411 panic("lockmgr: draining against myself");
412 /*
413 * If we are just polling, check to see if we will sleep.
414 */
415 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
416 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
417 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
418 error = EBUSY;
419 break;
420 }
421 PAUSE(lkp, ((lkp->lk_flags &
422 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
423 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
424 for (error = 0; ((lkp->lk_flags &
425 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
426 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
427 lkp->lk_flags |= LK_WAITDRAIN;
428 if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
429 lkp->lk_wmesg, lkp->lk_timo))
430 return (error);
431 if ((extflags) & LK_SLEEPFAIL)
432 return (ENOLCK);
433 }
434 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
435 lkp->lk_lockholder = pid;
436 lkp->lk_lockthread = self;
437 lkp->lk_exclusivecount = 1;
438 COUNT(p, 1);
439 break;
440
441 default:
442 panic("lockmgr: unknown locktype request %d",
443 flags & LK_TYPE_MASK);
444 /* NOTREACHED */
445 }
446 if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
447 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
448 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
449 lkp->lk_flags &= ~LK_WAITDRAIN;
450 wakeup((void *)&lkp->lk_flags);
451 }
452 return (error);
453 }
454
455 /*
456 * Print out information about state of a lock. Used by VOP_PRINT
457 * routines to display ststus about contained locks.
458 */
459 void
460 lockmgr_printinfo(lkp)
461 struct lock__bsd__ *lkp;
462 {
463
464 if (lkp->lk_sharecount)
465 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
466 lkp->lk_sharecount);
467 else if (lkp->lk_flags & LK_HAVE_EXCL)
468 printf(" lock type %s: EXCL (count %d) by pid %d",
469 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
470 if (lkp->lk_waitcount > 0)
471 printf(" with %d pending", lkp->lk_waitcount);
472 }