]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
6d2010ae | 2 | * Copyright (c) 2000-2009 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | * | |
31 | */ | |
32 | /* | |
33 | * File: kern/sync_lock.c | |
34 | * Author: Joseph CaraDonna | |
35 | * | |
36 | * Contains RT distributed lock synchronization services. | |
37 | */ | |
38 | ||
91447636 A |
39 | #include <mach/mach_types.h> |
40 | #include <mach/lock_set_server.h> | |
41 | #include <mach/task_server.h> | |
42 | ||
1c79356b | 43 | #include <kern/misc_protos.h> |
91447636 | 44 | #include <kern/kalloc.h> |
1c79356b A |
45 | #include <kern/sync_lock.h> |
46 | #include <kern/sched_prim.h> | |
47 | #include <kern/ipc_kobject.h> | |
48 | #include <kern/ipc_sync.h> | |
1c79356b A |
49 | #include <kern/thread.h> |
50 | #include <kern/task.h> | |
51 | ||
52 | #include <ipc/ipc_port.h> | |
53 | #include <ipc/ipc_space.h> | |
54 | ||
55 | /* | |
56 | * Ulock ownership MACROS | |
57 | * | |
58 | * Assumes: ulock internal lock is held | |
59 | */ | |
60 | ||
61 | #define ulock_ownership_set(ul, th) \ | |
62 | MACRO_BEGIN \ | |
91447636 A |
63 | thread_mtx_lock(th); \ |
64 | enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \ | |
65 | thread_mtx_unlock(th); \ | |
66 | (ul)->holder = th; \ | |
1c79356b A |
67 | MACRO_END |
68 | ||
69 | #define ulock_ownership_clear(ul) \ | |
70 | MACRO_BEGIN \ | |
b0d623f7 | 71 | thread_t th; \ |
91447636 | 72 | th = (ul)->holder; \ |
b0d623f7 | 73 | if ((th)->active) { \ |
91447636 | 74 | thread_mtx_lock(th); \ |
6d2010ae | 75 | remqueue((queue_entry_t) (ul)); \ |
91447636 | 76 | thread_mtx_unlock(th); \ |
1c79356b | 77 | } else { \ |
6d2010ae | 78 | remqueue((queue_entry_t) (ul)); \ |
1c79356b | 79 | } \ |
91447636 | 80 | (ul)->holder = THREAD_NULL; \ |
1c79356b A |
81 | MACRO_END |
82 | ||
83 | /* | |
84 | * Lock set ownership MACROS | |
85 | */ | |
86 | ||
87 | #define lock_set_ownership_set(ls, t) \ | |
88 | MACRO_BEGIN \ | |
89 | task_lock((t)); \ | |
90 | enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\ | |
91 | (t)->lock_sets_owned++; \ | |
92 | task_unlock((t)); \ | |
93 | (ls)->owner = (t); \ | |
94 | MACRO_END | |
95 | ||
96 | #define lock_set_ownership_clear(ls, t) \ | |
97 | MACRO_BEGIN \ | |
98 | task_lock((t)); \ | |
6d2010ae | 99 | remqueue((queue_entry_t) (ls)); \ |
1c79356b A |
100 | (t)->lock_sets_owned--; \ |
101 | task_unlock((t)); \ | |
102 | MACRO_END | |
103 | ||
104 | unsigned int lock_set_event; | |
cf7d32b8 | 105 | #define LOCK_SET_EVENT CAST_EVENT64_T(&lock_set_event) |
1c79356b A |
106 | |
107 | unsigned int lock_set_handoff; | |
cf7d32b8 | 108 | #define LOCK_SET_HANDOFF CAST_EVENT64_T(&lock_set_handoff) |
1c79356b | 109 | |
b0d623f7 A |
110 | |
111 | lck_attr_t lock_set_attr; | |
112 | lck_grp_t lock_set_grp; | |
113 | static lck_grp_attr_t lock_set_grp_attr; | |
114 | ||
115 | ||
116 | ||
1c79356b A |
117 | /* |
118 | * ROUTINE: lock_set_init [private] | |
119 | * | |
120 | * Initialize the lock_set subsystem. | |
1c79356b A |
121 | */ |
122 | void | |
123 | lock_set_init(void) | |
124 | { | |
b0d623f7 A |
125 | lck_grp_attr_setdefault(&lock_set_grp_attr); |
126 | lck_grp_init(&lock_set_grp, "lock_set", &lock_set_grp_attr); | |
127 | lck_attr_setdefault(&lock_set_attr); | |
1c79356b A |
128 | } |
129 | ||
130 | ||
131 | /* | |
132 | * ROUTINE: lock_set_create [exported] | |
133 | * | |
134 | * Creates a lock set. | |
135 | * The port representing the lock set is returned as a parameter. | |
136 | */ | |
137 | kern_return_t | |
138 | lock_set_create ( | |
139 | task_t task, | |
140 | lock_set_t *new_lock_set, | |
141 | int n_ulocks, | |
142 | int policy) | |
143 | { | |
144 | lock_set_t lock_set = LOCK_SET_NULL; | |
145 | ulock_t ulock; | |
8ad349bb | 146 | vm_size_t size; |
1c79356b A |
147 | int x; |
148 | ||
149 | *new_lock_set = LOCK_SET_NULL; | |
150 | ||
151 | if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX) | |
152 | return KERN_INVALID_ARGUMENT; | |
153 | ||
2d21ac55 | 154 | if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks) |
8ad349bb A |
155 | return KERN_RESOURCE_SHORTAGE; |
156 | ||
1c79356b A |
157 | size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1)); |
158 | lock_set = (lock_set_t) kalloc (size); | |
159 | ||
160 | if (lock_set == LOCK_SET_NULL) | |
161 | return KERN_RESOURCE_SHORTAGE; | |
162 | ||
163 | ||
164 | lock_set_lock_init(lock_set); | |
165 | lock_set->n_ulocks = n_ulocks; | |
b0d623f7 | 166 | lock_set->ref_count = (task == kernel_task) ? 1 : 2; /* one for kernel, one for port */ |
1c79356b A |
167 | |
168 | /* | |
169 | * Create and initialize the lock set port | |
170 | */ | |
171 | lock_set->port = ipc_port_alloc_kernel(); | |
172 | if (lock_set->port == IP_NULL) { | |
b0d623f7 | 173 | kfree(lock_set, size); |
1c79356b A |
174 | return KERN_RESOURCE_SHORTAGE; |
175 | } | |
176 | ||
177 | ipc_kobject_set (lock_set->port, | |
178 | (ipc_kobject_t) lock_set, | |
179 | IKOT_LOCK_SET); | |
180 | ||
181 | /* | |
182 | * Initialize each ulock in the lock set | |
183 | */ | |
184 | ||
185 | for (x=0; x < n_ulocks; x++) { | |
186 | ulock = (ulock_t) &lock_set->ulock_list[x]; | |
187 | ulock_lock_init(ulock); | |
188 | ulock->lock_set = lock_set; | |
91447636 | 189 | ulock->holder = THREAD_NULL; |
1c79356b A |
190 | ulock->blocked = FALSE; |
191 | ulock->unstable = FALSE; | |
192 | ulock->ho_wait = FALSE; | |
b0d623f7 | 193 | ulock->accept_wait = FALSE; |
1c79356b A |
194 | wait_queue_init(&ulock->wait_queue, policy); |
195 | } | |
196 | ||
197 | lock_set_ownership_set(lock_set, task); | |
198 | ||
199 | lock_set->active = TRUE; | |
200 | *new_lock_set = lock_set; | |
201 | ||
202 | return KERN_SUCCESS; | |
203 | } | |
204 | ||
205 | /* | |
206 | * ROUTINE: lock_set_destroy [exported] | |
207 | * | |
208 | * Destroys a lock set. This call will only succeed if the | |
209 | * specified task is the SAME task name specified at the lock set's | |
210 | * creation. | |
211 | * | |
212 | * NOTES: | |
213 | * - All threads currently blocked on the lock set's ulocks are awoken. | |
214 | * - These threads will return with the KERN_LOCK_SET_DESTROYED error. | |
215 | */ | |
216 | kern_return_t | |
217 | lock_set_destroy (task_t task, lock_set_t lock_set) | |
218 | { | |
1c79356b A |
219 | ulock_t ulock; |
220 | int i; | |
221 | ||
222 | if (task == TASK_NULL || lock_set == LOCK_SET_NULL) | |
223 | return KERN_INVALID_ARGUMENT; | |
224 | ||
225 | if (lock_set->owner != task) | |
226 | return KERN_INVALID_RIGHT; | |
227 | ||
228 | lock_set_lock(lock_set); | |
229 | if (!lock_set->active) { | |
230 | lock_set_unlock(lock_set); | |
231 | return KERN_LOCK_SET_DESTROYED; | |
232 | } | |
233 | ||
234 | /* | |
235 | * Deactivate lock set | |
236 | */ | |
237 | lock_set->active = FALSE; | |
238 | ||
239 | /* | |
240 | * If a ulock is currently held in the target lock set: | |
241 | * | |
242 | * 1) Wakeup all threads blocked on the ulock (if any). Threads | |
243 | * may be blocked waiting normally, or waiting for a handoff. | |
244 | * Blocked threads will return with KERN_LOCK_SET_DESTROYED. | |
245 | * | |
246 | * 2) ulock ownership is cleared. | |
247 | * The thread currently holding the ulock is revoked of its | |
248 | * ownership. | |
249 | */ | |
250 | for (i = 0; i < lock_set->n_ulocks; i++) { | |
251 | ulock = &lock_set->ulock_list[i]; | |
252 | ||
253 | ulock_lock(ulock); | |
254 | ||
255 | if (ulock->accept_wait) { | |
256 | ulock->accept_wait = FALSE; | |
9bccf70c | 257 | wait_queue_wakeup64_one(&ulock->wait_queue, |
1c79356b A |
258 | LOCK_SET_HANDOFF, |
259 | THREAD_RESTART); | |
260 | } | |
261 | ||
262 | if (ulock->holder) { | |
263 | if (ulock->blocked) { | |
264 | ulock->blocked = FALSE; | |
9bccf70c | 265 | wait_queue_wakeup64_all(&ulock->wait_queue, |
1c79356b A |
266 | LOCK_SET_EVENT, |
267 | THREAD_RESTART); | |
268 | } | |
269 | if (ulock->ho_wait) { | |
270 | ulock->ho_wait = FALSE; | |
9bccf70c | 271 | wait_queue_wakeup64_one(&ulock->wait_queue, |
1c79356b A |
272 | LOCK_SET_HANDOFF, |
273 | THREAD_RESTART); | |
274 | } | |
275 | ulock_ownership_clear(ulock); | |
276 | } | |
277 | ||
278 | ulock_unlock(ulock); | |
279 | } | |
280 | ||
281 | lock_set_unlock(lock_set); | |
282 | lock_set_ownership_clear(lock_set, task); | |
283 | ||
284 | /* | |
b0d623f7 A |
285 | * Drop the lock set reference given to the containing task, |
286 | * which inturn destroys the lock set structure if the reference | |
287 | * count goes to zero. | |
1c79356b | 288 | */ |
1c79356b A |
289 | lock_set_dereference(lock_set); |
290 | ||
291 | return KERN_SUCCESS; | |
292 | } | |
293 | ||
294 | kern_return_t | |
295 | lock_acquire (lock_set_t lock_set, int lock_id) | |
296 | { | |
297 | ulock_t ulock; | |
298 | ||
299 | if (lock_set == LOCK_SET_NULL) | |
300 | return KERN_INVALID_ARGUMENT; | |
301 | ||
302 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
303 | return KERN_INVALID_ARGUMENT; | |
304 | ||
305 | retry: | |
306 | lock_set_lock(lock_set); | |
307 | if (!lock_set->active) { | |
308 | lock_set_unlock(lock_set); | |
309 | return KERN_LOCK_SET_DESTROYED; | |
310 | } | |
311 | ||
312 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
313 | ulock_lock(ulock); | |
314 | lock_set_unlock(lock_set); | |
315 | ||
316 | /* | |
317 | * Block the current thread if the lock is already held. | |
318 | */ | |
319 | ||
91447636 | 320 | if (ulock->holder != THREAD_NULL) { |
1c79356b A |
321 | int wait_result; |
322 | ||
91447636 | 323 | if (ulock->holder == current_thread()) { |
1c79356b A |
324 | ulock_unlock(ulock); |
325 | return KERN_LOCK_OWNED_SELF; | |
326 | } | |
327 | ||
328 | ulock->blocked = TRUE; | |
9bccf70c | 329 | wait_result = wait_queue_assert_wait64(&ulock->wait_queue, |
1c79356b | 330 | LOCK_SET_EVENT, |
91447636 | 331 | THREAD_ABORTSAFE, 0); |
1c79356b A |
332 | ulock_unlock(ulock); |
333 | ||
334 | /* | |
335 | * Block - Wait for lock to become available. | |
336 | */ | |
9bccf70c A |
337 | if (wait_result == THREAD_WAITING) |
338 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
1c79356b A |
339 | |
340 | /* | |
341 | * Check the result status: | |
342 | * | |
343 | * Check to see why thread was woken up. In all cases, we | |
344 | * already have been removed from the queue. | |
345 | */ | |
346 | switch (wait_result) { | |
347 | case THREAD_AWAKENED: | |
348 | /* lock transitioned from old locker to us */ | |
349 | /* he already made us owner */ | |
350 | return (ulock->unstable) ? KERN_LOCK_UNSTABLE : | |
351 | KERN_SUCCESS; | |
352 | ||
353 | case THREAD_INTERRUPTED: | |
354 | return KERN_ABORTED; | |
355 | ||
356 | case THREAD_RESTART: | |
357 | goto retry; /* probably a dead lock_set */ | |
358 | ||
359 | default: | |
360 | panic("lock_acquire\n"); | |
361 | } | |
362 | } | |
363 | ||
364 | /* | |
365 | * Assign lock ownership | |
366 | */ | |
367 | ulock_ownership_set(ulock, current_thread()); | |
368 | ulock_unlock(ulock); | |
369 | ||
370 | return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS; | |
371 | } | |
372 | ||
373 | kern_return_t | |
374 | lock_release (lock_set_t lock_set, int lock_id) | |
375 | { | |
376 | ulock_t ulock; | |
377 | ||
378 | if (lock_set == LOCK_SET_NULL) | |
379 | return KERN_INVALID_ARGUMENT; | |
380 | ||
381 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
382 | return KERN_INVALID_ARGUMENT; | |
383 | ||
384 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
385 | ||
91447636 | 386 | return (ulock_release_internal(ulock, current_thread())); |
1c79356b A |
387 | } |
388 | ||
389 | kern_return_t | |
390 | lock_try (lock_set_t lock_set, int lock_id) | |
391 | { | |
392 | ulock_t ulock; | |
393 | ||
394 | ||
395 | if (lock_set == LOCK_SET_NULL) | |
396 | return KERN_INVALID_ARGUMENT; | |
397 | ||
398 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
399 | return KERN_INVALID_ARGUMENT; | |
400 | ||
401 | ||
402 | lock_set_lock(lock_set); | |
403 | if (!lock_set->active) { | |
404 | lock_set_unlock(lock_set); | |
405 | return KERN_LOCK_SET_DESTROYED; | |
406 | } | |
407 | ||
408 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
409 | ulock_lock(ulock); | |
410 | lock_set_unlock(lock_set); | |
411 | ||
412 | /* | |
413 | * If the lock is already owned, we return without blocking. | |
414 | * | |
415 | * An ownership status is returned to inform the caller as to | |
416 | * whether it already holds the lock or another thread does. | |
417 | */ | |
418 | ||
91447636 | 419 | if (ulock->holder != THREAD_NULL) { |
1c79356b A |
420 | lock_set_unlock(lock_set); |
421 | ||
91447636 | 422 | if (ulock->holder == current_thread()) { |
1c79356b A |
423 | ulock_unlock(ulock); |
424 | return KERN_LOCK_OWNED_SELF; | |
425 | } | |
426 | ||
427 | ulock_unlock(ulock); | |
428 | return KERN_LOCK_OWNED; | |
429 | } | |
430 | ||
431 | /* | |
432 | * Add the ulock to the lock set's held_ulocks list. | |
433 | */ | |
434 | ||
435 | ulock_ownership_set(ulock, current_thread()); | |
436 | ulock_unlock(ulock); | |
437 | ||
438 | return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS; | |
439 | } | |
440 | ||
441 | kern_return_t | |
442 | lock_make_stable (lock_set_t lock_set, int lock_id) | |
443 | { | |
444 | ulock_t ulock; | |
445 | ||
446 | ||
447 | if (lock_set == LOCK_SET_NULL) | |
448 | return KERN_INVALID_ARGUMENT; | |
449 | ||
450 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
451 | return KERN_INVALID_ARGUMENT; | |
452 | ||
453 | ||
454 | lock_set_lock(lock_set); | |
455 | if (!lock_set->active) { | |
456 | lock_set_unlock(lock_set); | |
457 | return KERN_LOCK_SET_DESTROYED; | |
458 | } | |
459 | ||
460 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
461 | ulock_lock(ulock); | |
462 | lock_set_unlock(lock_set); | |
463 | ||
91447636 | 464 | if (ulock->holder != current_thread()) { |
1c79356b A |
465 | ulock_unlock(ulock); |
466 | return KERN_INVALID_RIGHT; | |
467 | } | |
468 | ||
469 | ulock->unstable = FALSE; | |
470 | ulock_unlock(ulock); | |
471 | ||
472 | return KERN_SUCCESS; | |
473 | } | |
474 | ||
475 | /* | |
476 | * ROUTINE: lock_make_unstable [internal] | |
477 | * | |
478 | * Marks the lock as unstable. | |
479 | * | |
480 | * NOTES: | |
481 | * - All future acquisitions of the lock will return with a | |
482 | * KERN_LOCK_UNSTABLE status, until the lock is made stable again. | |
483 | */ | |
484 | kern_return_t | |
91447636 | 485 | lock_make_unstable (ulock_t ulock, thread_t thread) |
1c79356b A |
486 | { |
487 | lock_set_t lock_set; | |
488 | ||
1c79356b A |
489 | lock_set = ulock->lock_set; |
490 | lock_set_lock(lock_set); | |
491 | if (!lock_set->active) { | |
492 | lock_set_unlock(lock_set); | |
493 | return KERN_LOCK_SET_DESTROYED; | |
494 | } | |
495 | ||
496 | ulock_lock(ulock); | |
497 | lock_set_unlock(lock_set); | |
498 | ||
91447636 | 499 | if (ulock->holder != thread) { |
1c79356b A |
500 | ulock_unlock(ulock); |
501 | return KERN_INVALID_RIGHT; | |
502 | } | |
503 | ||
504 | ulock->unstable = TRUE; | |
505 | ulock_unlock(ulock); | |
506 | ||
507 | return KERN_SUCCESS; | |
508 | } | |
509 | ||
510 | /* | |
91447636 | 511 | * ROUTINE: ulock_release_internal [internal] |
1c79356b A |
512 | * |
513 | * Releases the ulock. | |
514 | * If any threads are blocked waiting for the ulock, one is woken-up. | |
515 | * | |
516 | */ | |
517 | kern_return_t | |
91447636 | 518 | ulock_release_internal (ulock_t ulock, thread_t thread) |
1c79356b A |
519 | { |
520 | lock_set_t lock_set; | |
1c79356b A |
521 | |
522 | if ((lock_set = ulock->lock_set) == LOCK_SET_NULL) | |
523 | return KERN_INVALID_ARGUMENT; | |
524 | ||
525 | lock_set_lock(lock_set); | |
526 | if (!lock_set->active) { | |
527 | lock_set_unlock(lock_set); | |
528 | return KERN_LOCK_SET_DESTROYED; | |
529 | } | |
530 | ulock_lock(ulock); | |
531 | lock_set_unlock(lock_set); | |
532 | ||
91447636 | 533 | if (ulock->holder != thread) { |
1c79356b | 534 | ulock_unlock(ulock); |
1c79356b A |
535 | return KERN_INVALID_RIGHT; |
536 | } | |
537 | ||
538 | /* | |
539 | * If we have a hint that threads might be waiting, | |
540 | * try to transfer the lock ownership to a waiting thread | |
541 | * and wake it up. | |
542 | */ | |
543 | if (ulock->blocked) { | |
544 | wait_queue_t wq = &ulock->wait_queue; | |
91447636 | 545 | thread_t wqthread; |
1c79356b A |
546 | spl_t s; |
547 | ||
548 | s = splsched(); | |
549 | wait_queue_lock(wq); | |
91447636 | 550 | wqthread = wait_queue_wakeup64_identity_locked(wq, |
1c79356b A |
551 | LOCK_SET_EVENT, |
552 | THREAD_AWAKENED, | |
553 | TRUE); | |
554 | /* wait_queue now unlocked, thread locked */ | |
555 | ||
91447636 | 556 | if (wqthread != THREAD_NULL) { |
91447636 | 557 | thread_unlock(wqthread); |
1c79356b A |
558 | splx(s); |
559 | ||
560 | /* | |
561 | * Transfer ulock ownership | |
562 | * from the current thread to the acquisition thread. | |
563 | */ | |
564 | ulock_ownership_clear(ulock); | |
91447636 | 565 | ulock_ownership_set(ulock, wqthread); |
1c79356b A |
566 | ulock_unlock(ulock); |
567 | ||
568 | return KERN_SUCCESS; | |
569 | } else { | |
570 | ulock->blocked = FALSE; | |
571 | splx(s); | |
572 | } | |
573 | } | |
574 | ||
575 | /* | |
576 | * Disown ulock | |
577 | */ | |
578 | ulock_ownership_clear(ulock); | |
579 | ulock_unlock(ulock); | |
580 | ||
581 | return KERN_SUCCESS; | |
582 | } | |
583 | ||
584 | kern_return_t | |
585 | lock_handoff (lock_set_t lock_set, int lock_id) | |
586 | { | |
587 | ulock_t ulock; | |
588 | int wait_result; | |
589 | ||
590 | ||
591 | if (lock_set == LOCK_SET_NULL) | |
592 | return KERN_INVALID_ARGUMENT; | |
593 | ||
594 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
595 | return KERN_INVALID_ARGUMENT; | |
596 | ||
597 | retry: | |
598 | lock_set_lock(lock_set); | |
599 | ||
600 | if (!lock_set->active) { | |
601 | lock_set_unlock(lock_set); | |
602 | return KERN_LOCK_SET_DESTROYED; | |
603 | } | |
604 | ||
605 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
606 | ulock_lock(ulock); | |
607 | lock_set_unlock(lock_set); | |
608 | ||
91447636 | 609 | if (ulock->holder != current_thread()) { |
1c79356b | 610 | ulock_unlock(ulock); |
1c79356b A |
611 | return KERN_INVALID_RIGHT; |
612 | } | |
613 | ||
614 | /* | |
615 | * If the accepting thread (the receiver) is already waiting | |
616 | * to accept the lock from the handoff thread (the sender), | |
617 | * then perform the hand-off now. | |
618 | */ | |
619 | ||
620 | if (ulock->accept_wait) { | |
621 | wait_queue_t wq = &ulock->wait_queue; | |
622 | thread_t thread; | |
623 | spl_t s; | |
624 | ||
625 | /* | |
626 | * See who the lucky devil is, if he is still there waiting. | |
627 | */ | |
628 | s = splsched(); | |
629 | wait_queue_lock(wq); | |
9bccf70c | 630 | thread = wait_queue_wakeup64_identity_locked( |
1c79356b A |
631 | wq, |
632 | LOCK_SET_HANDOFF, | |
633 | THREAD_AWAKENED, | |
634 | TRUE); | |
635 | /* wait queue unlocked, thread locked */ | |
636 | ||
637 | /* | |
638 | * Transfer lock ownership | |
639 | */ | |
640 | if (thread != THREAD_NULL) { | |
b0d623f7 A |
641 | /* |
642 | * The thread we are transferring to will try | |
643 | * to take the lock on the ulock, and therefore | |
644 | * will wait for us complete the handoff even | |
645 | * through we set the thread running. | |
1c79356b A |
646 | */ |
647 | thread_unlock(thread); | |
648 | splx(s); | |
649 | ||
650 | ulock_ownership_clear(ulock); | |
651 | ulock_ownership_set(ulock, thread); | |
652 | ulock->accept_wait = FALSE; | |
653 | ulock_unlock(ulock); | |
654 | return KERN_SUCCESS; | |
655 | } else { | |
656 | ||
657 | /* | |
658 | * OOPS. The accepting thread must have been aborted. | |
659 | * and is racing back to clear the flag that says is | |
660 | * waiting for an accept. He will clear it when we | |
661 | * release the lock, so just fall thru and wait for | |
662 | * the next accept thread (that's the way it is | |
663 | * specified). | |
664 | */ | |
665 | splx(s); | |
666 | } | |
667 | } | |
668 | ||
669 | /* | |
670 | * Indicate that there is a hand-off thread waiting, and then wait | |
671 | * for an accepting thread. | |
672 | */ | |
673 | ulock->ho_wait = TRUE; | |
9bccf70c | 674 | wait_result = wait_queue_assert_wait64(&ulock->wait_queue, |
1c79356b | 675 | LOCK_SET_HANDOFF, |
91447636 | 676 | THREAD_ABORTSAFE, 0); |
1c79356b A |
677 | ulock_unlock(ulock); |
678 | ||
9bccf70c A |
679 | if (wait_result == THREAD_WAITING) |
680 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
1c79356b A |
681 | |
682 | /* | |
683 | * If the thread was woken-up via some action other than | |
684 | * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate), | |
685 | * then we need to clear the ulock's handoff state. | |
686 | */ | |
687 | switch (wait_result) { | |
688 | ||
b0d623f7 | 689 | |
1c79356b | 690 | case THREAD_AWAKENED: |
b0d623f7 A |
691 | /* |
692 | * we take the ulock lock to syncronize with the | |
693 | * thread that is accepting ownership. | |
694 | */ | |
695 | ulock_lock(ulock); | |
696 | assert(ulock->holder != current_thread()); | |
697 | ulock_unlock(ulock); | |
1c79356b A |
698 | return KERN_SUCCESS; |
699 | ||
700 | case THREAD_INTERRUPTED: | |
701 | ulock_lock(ulock); | |
91447636 | 702 | assert(ulock->holder == current_thread()); |
1c79356b A |
703 | ulock->ho_wait = FALSE; |
704 | ulock_unlock(ulock); | |
705 | return KERN_ABORTED; | |
706 | ||
707 | case THREAD_RESTART: | |
708 | goto retry; | |
1c79356b | 709 | } |
91447636 A |
710 | |
711 | panic("lock_handoff"); | |
712 | return KERN_FAILURE; | |
1c79356b A |
713 | } |
714 | ||
715 | kern_return_t | |
716 | lock_handoff_accept (lock_set_t lock_set, int lock_id) | |
717 | { | |
718 | ulock_t ulock; | |
719 | int wait_result; | |
720 | ||
721 | ||
722 | if (lock_set == LOCK_SET_NULL) | |
723 | return KERN_INVALID_ARGUMENT; | |
724 | ||
725 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
726 | return KERN_INVALID_ARGUMENT; | |
727 | ||
728 | retry: | |
729 | lock_set_lock(lock_set); | |
730 | if (!lock_set->active) { | |
731 | lock_set_unlock(lock_set); | |
732 | return KERN_LOCK_SET_DESTROYED; | |
733 | } | |
734 | ||
735 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
736 | ulock_lock(ulock); | |
737 | lock_set_unlock(lock_set); | |
738 | ||
739 | /* | |
740 | * If there is another accepting thread that beat us, just | |
741 | * return with an error. | |
742 | */ | |
743 | if (ulock->accept_wait) { | |
744 | ulock_unlock(ulock); | |
745 | return KERN_ALREADY_WAITING; | |
746 | } | |
747 | ||
91447636 | 748 | if (ulock->holder == current_thread()) { |
1c79356b A |
749 | ulock_unlock(ulock); |
750 | return KERN_LOCK_OWNED_SELF; | |
751 | } | |
752 | ||
753 | /* | |
754 | * If the handoff thread (the sender) is already waiting to | |
755 | * hand-off the lock to the accepting thread (the receiver), | |
756 | * then perform the hand-off now. | |
757 | */ | |
758 | if (ulock->ho_wait) { | |
759 | wait_queue_t wq = &ulock->wait_queue; | |
1c79356b A |
760 | |
761 | /* | |
762 | * See who the lucky devil is, if he is still there waiting. | |
763 | */ | |
91447636 | 764 | assert(ulock->holder != THREAD_NULL); |
1c79356b | 765 | |
9bccf70c | 766 | if (wait_queue_wakeup64_thread(wq, |
1c79356b | 767 | LOCK_SET_HANDOFF, |
91447636 | 768 | ulock->holder, |
1c79356b A |
769 | THREAD_AWAKENED) == KERN_SUCCESS) { |
770 | /* | |
771 | * Holder thread was still waiting to give it | |
772 | * away. Take over ownership. | |
773 | */ | |
774 | ulock_ownership_clear(ulock); | |
775 | ulock_ownership_set(ulock, current_thread()); | |
776 | ulock->ho_wait = FALSE; | |
777 | ulock_unlock(ulock); | |
778 | return (ulock->unstable) ? KERN_LOCK_UNSTABLE : | |
779 | KERN_SUCCESS; | |
780 | } | |
781 | ||
782 | /* | |
783 | * OOPS. The owner was aborted out of the handoff. | |
784 | * He will clear his own flag when he gets back. | |
785 | * in the meantime, we will wait as if we didn't | |
786 | * even see his flag (by falling thru). | |
787 | */ | |
788 | } | |
789 | ||
790 | ulock->accept_wait = TRUE; | |
9bccf70c | 791 | wait_result = wait_queue_assert_wait64(&ulock->wait_queue, |
1c79356b | 792 | LOCK_SET_HANDOFF, |
91447636 | 793 | THREAD_ABORTSAFE, 0); |
1c79356b A |
794 | ulock_unlock(ulock); |
795 | ||
9bccf70c A |
796 | if (wait_result == THREAD_WAITING) |
797 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
1c79356b A |
798 | |
799 | /* | |
800 | * If the thread was woken-up via some action other than | |
801 | * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate), | |
802 | * then we need to clear the ulock's handoff state. | |
803 | */ | |
804 | switch (wait_result) { | |
805 | ||
806 | case THREAD_AWAKENED: | |
b0d623f7 A |
807 | /* |
808 | * Take the lock to synchronize with the thread handing | |
809 | * off the lock to us. We don't want to continue until | |
810 | * they complete the handoff. | |
811 | */ | |
812 | ulock_lock(ulock); | |
813 | assert(ulock->accept_wait == FALSE); | |
814 | assert(ulock->holder == current_thread()); | |
815 | ulock_unlock(ulock); | |
1c79356b A |
816 | return KERN_SUCCESS; |
817 | ||
818 | case THREAD_INTERRUPTED: | |
819 | ulock_lock(ulock); | |
820 | ulock->accept_wait = FALSE; | |
821 | ulock_unlock(ulock); | |
822 | return KERN_ABORTED; | |
823 | ||
824 | case THREAD_RESTART: | |
825 | goto retry; | |
1c79356b | 826 | } |
91447636 A |
827 | |
828 | panic("lock_handoff_accept"); | |
829 | return KERN_FAILURE; | |
1c79356b A |
830 | } |
831 | ||
832 | /* | |
833 | * Routine: lock_set_reference | |
834 | * | |
835 | * Take out a reference on a lock set. This keeps the data structure | |
836 | * in existence (but the lock set may be deactivated). | |
837 | */ | |
838 | void | |
839 | lock_set_reference(lock_set_t lock_set) | |
840 | { | |
841 | lock_set_lock(lock_set); | |
842 | lock_set->ref_count++; | |
843 | lock_set_unlock(lock_set); | |
844 | } | |
845 | ||
846 | /* | |
847 | * Routine: lock_set_dereference | |
848 | * | |
849 | * Release a reference on a lock set. If this is the last reference, | |
850 | * the lock set data structure is deallocated. | |
851 | */ | |
852 | void | |
853 | lock_set_dereference(lock_set_t lock_set) | |
854 | { | |
855 | int ref_count; | |
856 | int size; | |
857 | ||
858 | lock_set_lock(lock_set); | |
859 | ref_count = --(lock_set->ref_count); | |
860 | lock_set_unlock(lock_set); | |
861 | ||
862 | if (ref_count == 0) { | |
b0d623f7 A |
863 | ipc_port_dealloc_kernel(lock_set->port); |
864 | size = (int)(sizeof(struct lock_set) + | |
865 | (sizeof(struct ulock) * (lock_set->n_ulocks - 1))); | |
91447636 A |
866 | kfree(lock_set, size); |
867 | } | |
868 | } | |
869 | ||
870 | void | |
871 | ulock_release_all( | |
872 | thread_t thread) | |
873 | { | |
874 | ulock_t ulock; | |
875 | ||
876 | while (!queue_empty(&thread->held_ulocks)) { | |
877 | ulock = (ulock_t)queue_first(&thread->held_ulocks); | |
878 | lock_make_unstable(ulock, thread); | |
879 | ulock_release_internal(ulock, thread); | |
1c79356b A |
880 | } |
881 | } |