]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
e5568f75 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
1c79356b | 11 | * |
e5568f75 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
e5568f75 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
1c79356b A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * File: kern/sync_lock.c | |
28 | * Author: Joseph CaraDonna | |
29 | * | |
30 | * Contains RT distributed lock synchronization services. | |
31 | */ | |
32 | ||
33 | #include <kern/etap_macros.h> | |
34 | #include <kern/misc_protos.h> | |
35 | #include <kern/sync_lock.h> | |
36 | #include <kern/sched_prim.h> | |
37 | #include <kern/ipc_kobject.h> | |
38 | #include <kern/ipc_sync.h> | |
39 | #include <kern/etap_macros.h> | |
40 | #include <kern/thread.h> | |
41 | #include <kern/task.h> | |
42 | ||
43 | #include <ipc/ipc_port.h> | |
44 | #include <ipc/ipc_space.h> | |
45 | ||
46 | /* | |
47 | * Ulock ownership MACROS | |
48 | * | |
49 | * Assumes: ulock internal lock is held | |
50 | */ | |
51 | ||
52 | #define ulock_ownership_set(ul, th) \ | |
53 | MACRO_BEGIN \ | |
54 | thread_act_t _th_act; \ | |
55 | _th_act = (th)->top_act; \ | |
56 | act_lock(_th_act); \ | |
57 | enqueue (&_th_act->held_ulocks, (queue_entry_t) (ul)); \ | |
58 | act_unlock(_th_act); \ | |
59 | (ul)->holder = _th_act; \ | |
60 | MACRO_END | |
61 | ||
62 | #define ulock_ownership_clear(ul) \ | |
63 | MACRO_BEGIN \ | |
64 | thread_act_t _th_act; \ | |
65 | _th_act = (ul)->holder; \ | |
66 | if (_th_act->active) { \ | |
67 | act_lock(_th_act); \ | |
68 | remqueue(&_th_act->held_ulocks, \ | |
69 | (queue_entry_t) (ul)); \ | |
70 | act_unlock(_th_act); \ | |
71 | } else { \ | |
72 | remqueue(&_th_act->held_ulocks, \ | |
73 | (queue_entry_t) (ul)); \ | |
74 | } \ | |
75 | (ul)->holder = THR_ACT_NULL; \ | |
76 | MACRO_END | |
77 | ||
78 | /* | |
79 | * Lock set ownership MACROS | |
80 | */ | |
81 | ||
82 | #define lock_set_ownership_set(ls, t) \ | |
83 | MACRO_BEGIN \ | |
84 | task_lock((t)); \ | |
85 | enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\ | |
86 | (t)->lock_sets_owned++; \ | |
87 | task_unlock((t)); \ | |
88 | (ls)->owner = (t); \ | |
89 | MACRO_END | |
90 | ||
91 | #define lock_set_ownership_clear(ls, t) \ | |
92 | MACRO_BEGIN \ | |
93 | task_lock((t)); \ | |
94 | remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \ | |
95 | (t)->lock_sets_owned--; \ | |
96 | task_unlock((t)); \ | |
97 | MACRO_END | |
98 | ||
99 | unsigned int lock_set_event; | |
9bccf70c | 100 | #define LOCK_SET_EVENT ((event64_t)&lock_set_event) |
1c79356b A |
101 | |
102 | unsigned int lock_set_handoff; | |
9bccf70c | 103 | #define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff) |
1c79356b A |
104 | |
105 | /* | |
106 | * ROUTINE: lock_set_init [private] | |
107 | * | |
108 | * Initialize the lock_set subsystem. | |
109 | * | |
110 | * For now, we don't have anything to do here. | |
111 | */ | |
112 | void | |
113 | lock_set_init(void) | |
114 | { | |
115 | return; | |
116 | } | |
117 | ||
118 | ||
119 | /* | |
120 | * ROUTINE: lock_set_create [exported] | |
121 | * | |
122 | * Creates a lock set. | |
123 | * The port representing the lock set is returned as a parameter. | |
124 | */ | |
125 | kern_return_t | |
126 | lock_set_create ( | |
127 | task_t task, | |
128 | lock_set_t *new_lock_set, | |
129 | int n_ulocks, | |
130 | int policy) | |
131 | { | |
132 | lock_set_t lock_set = LOCK_SET_NULL; | |
133 | ulock_t ulock; | |
134 | int size; | |
135 | int x; | |
136 | ||
137 | *new_lock_set = LOCK_SET_NULL; | |
138 | ||
139 | if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX) | |
140 | return KERN_INVALID_ARGUMENT; | |
141 | ||
142 | size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1)); | |
143 | lock_set = (lock_set_t) kalloc (size); | |
144 | ||
145 | if (lock_set == LOCK_SET_NULL) | |
146 | return KERN_RESOURCE_SHORTAGE; | |
147 | ||
148 | ||
149 | lock_set_lock_init(lock_set); | |
150 | lock_set->n_ulocks = n_ulocks; | |
151 | lock_set->ref_count = 1; | |
152 | ||
153 | /* | |
154 | * Create and initialize the lock set port | |
155 | */ | |
156 | lock_set->port = ipc_port_alloc_kernel(); | |
157 | if (lock_set->port == IP_NULL) { | |
158 | /* This will deallocate the lock set */ | |
159 | lock_set_dereference(lock_set); | |
160 | return KERN_RESOURCE_SHORTAGE; | |
161 | } | |
162 | ||
163 | ipc_kobject_set (lock_set->port, | |
164 | (ipc_kobject_t) lock_set, | |
165 | IKOT_LOCK_SET); | |
166 | ||
167 | /* | |
168 | * Initialize each ulock in the lock set | |
169 | */ | |
170 | ||
171 | for (x=0; x < n_ulocks; x++) { | |
172 | ulock = (ulock_t) &lock_set->ulock_list[x]; | |
173 | ulock_lock_init(ulock); | |
174 | ulock->lock_set = lock_set; | |
175 | ulock->holder = THR_ACT_NULL; | |
176 | ulock->blocked = FALSE; | |
177 | ulock->unstable = FALSE; | |
178 | ulock->ho_wait = FALSE; | |
179 | wait_queue_init(&ulock->wait_queue, policy); | |
180 | } | |
181 | ||
182 | lock_set_ownership_set(lock_set, task); | |
183 | ||
184 | lock_set->active = TRUE; | |
185 | *new_lock_set = lock_set; | |
186 | ||
187 | return KERN_SUCCESS; | |
188 | } | |
189 | ||
190 | /* | |
191 | * ROUTINE: lock_set_destroy [exported] | |
192 | * | |
193 | * Destroys a lock set. This call will only succeed if the | |
194 | * specified task is the SAME task name specified at the lock set's | |
195 | * creation. | |
196 | * | |
197 | * NOTES: | |
198 | * - All threads currently blocked on the lock set's ulocks are awoken. | |
199 | * - These threads will return with the KERN_LOCK_SET_DESTROYED error. | |
200 | */ | |
201 | kern_return_t | |
202 | lock_set_destroy (task_t task, lock_set_t lock_set) | |
203 | { | |
204 | thread_t thread; | |
205 | ulock_t ulock; | |
206 | int i; | |
207 | ||
208 | if (task == TASK_NULL || lock_set == LOCK_SET_NULL) | |
209 | return KERN_INVALID_ARGUMENT; | |
210 | ||
211 | if (lock_set->owner != task) | |
212 | return KERN_INVALID_RIGHT; | |
213 | ||
214 | lock_set_lock(lock_set); | |
215 | if (!lock_set->active) { | |
216 | lock_set_unlock(lock_set); | |
217 | return KERN_LOCK_SET_DESTROYED; | |
218 | } | |
219 | ||
220 | /* | |
221 | * Deactivate lock set | |
222 | */ | |
223 | lock_set->active = FALSE; | |
224 | ||
225 | /* | |
226 | * If a ulock is currently held in the target lock set: | |
227 | * | |
228 | * 1) Wakeup all threads blocked on the ulock (if any). Threads | |
229 | * may be blocked waiting normally, or waiting for a handoff. | |
230 | * Blocked threads will return with KERN_LOCK_SET_DESTROYED. | |
231 | * | |
232 | * 2) ulock ownership is cleared. | |
233 | * The thread currently holding the ulock is revoked of its | |
234 | * ownership. | |
235 | */ | |
236 | for (i = 0; i < lock_set->n_ulocks; i++) { | |
237 | ulock = &lock_set->ulock_list[i]; | |
238 | ||
239 | ulock_lock(ulock); | |
240 | ||
241 | if (ulock->accept_wait) { | |
242 | ulock->accept_wait = FALSE; | |
9bccf70c | 243 | wait_queue_wakeup64_one(&ulock->wait_queue, |
1c79356b A |
244 | LOCK_SET_HANDOFF, |
245 | THREAD_RESTART); | |
246 | } | |
247 | ||
248 | if (ulock->holder) { | |
249 | if (ulock->blocked) { | |
250 | ulock->blocked = FALSE; | |
9bccf70c | 251 | wait_queue_wakeup64_all(&ulock->wait_queue, |
1c79356b A |
252 | LOCK_SET_EVENT, |
253 | THREAD_RESTART); | |
254 | } | |
255 | if (ulock->ho_wait) { | |
256 | ulock->ho_wait = FALSE; | |
9bccf70c | 257 | wait_queue_wakeup64_one(&ulock->wait_queue, |
1c79356b A |
258 | LOCK_SET_HANDOFF, |
259 | THREAD_RESTART); | |
260 | } | |
261 | ulock_ownership_clear(ulock); | |
262 | } | |
263 | ||
264 | ulock_unlock(ulock); | |
265 | } | |
266 | ||
267 | lock_set_unlock(lock_set); | |
268 | lock_set_ownership_clear(lock_set, task); | |
269 | ||
270 | /* | |
271 | * Deallocate | |
272 | * | |
273 | * Drop the lock set reference, which inturn destroys the | |
274 | * lock set structure if the reference count goes to zero. | |
275 | */ | |
276 | ||
277 | ipc_port_dealloc_kernel(lock_set->port); | |
278 | lock_set_dereference(lock_set); | |
279 | ||
280 | return KERN_SUCCESS; | |
281 | } | |
282 | ||
283 | kern_return_t | |
284 | lock_acquire (lock_set_t lock_set, int lock_id) | |
285 | { | |
286 | ulock_t ulock; | |
287 | ||
288 | if (lock_set == LOCK_SET_NULL) | |
289 | return KERN_INVALID_ARGUMENT; | |
290 | ||
291 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
292 | return KERN_INVALID_ARGUMENT; | |
293 | ||
294 | retry: | |
295 | lock_set_lock(lock_set); | |
296 | if (!lock_set->active) { | |
297 | lock_set_unlock(lock_set); | |
298 | return KERN_LOCK_SET_DESTROYED; | |
299 | } | |
300 | ||
301 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
302 | ulock_lock(ulock); | |
303 | lock_set_unlock(lock_set); | |
304 | ||
305 | /* | |
306 | * Block the current thread if the lock is already held. | |
307 | */ | |
308 | ||
309 | if (ulock->holder != THR_ACT_NULL) { | |
310 | int wait_result; | |
311 | ||
1c79356b A |
312 | if (ulock->holder == current_act()) { |
313 | ulock_unlock(ulock); | |
314 | return KERN_LOCK_OWNED_SELF; | |
315 | } | |
316 | ||
317 | ulock->blocked = TRUE; | |
9bccf70c | 318 | wait_result = wait_queue_assert_wait64(&ulock->wait_queue, |
1c79356b A |
319 | LOCK_SET_EVENT, |
320 | THREAD_ABORTSAFE); | |
321 | ulock_unlock(ulock); | |
322 | ||
323 | /* | |
324 | * Block - Wait for lock to become available. | |
325 | */ | |
9bccf70c A |
326 | if (wait_result == THREAD_WAITING) |
327 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
1c79356b A |
328 | |
329 | /* | |
330 | * Check the result status: | |
331 | * | |
332 | * Check to see why thread was woken up. In all cases, we | |
333 | * already have been removed from the queue. | |
334 | */ | |
335 | switch (wait_result) { | |
336 | case THREAD_AWAKENED: | |
337 | /* lock transitioned from old locker to us */ | |
338 | /* he already made us owner */ | |
339 | return (ulock->unstable) ? KERN_LOCK_UNSTABLE : | |
340 | KERN_SUCCESS; | |
341 | ||
342 | case THREAD_INTERRUPTED: | |
343 | return KERN_ABORTED; | |
344 | ||
345 | case THREAD_RESTART: | |
346 | goto retry; /* probably a dead lock_set */ | |
347 | ||
348 | default: | |
349 | panic("lock_acquire\n"); | |
350 | } | |
351 | } | |
352 | ||
353 | /* | |
354 | * Assign lock ownership | |
355 | */ | |
356 | ulock_ownership_set(ulock, current_thread()); | |
357 | ulock_unlock(ulock); | |
358 | ||
359 | return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS; | |
360 | } | |
361 | ||
362 | kern_return_t | |
363 | lock_release (lock_set_t lock_set, int lock_id) | |
364 | { | |
365 | ulock_t ulock; | |
366 | ||
367 | if (lock_set == LOCK_SET_NULL) | |
368 | return KERN_INVALID_ARGUMENT; | |
369 | ||
370 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
371 | return KERN_INVALID_ARGUMENT; | |
372 | ||
373 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
374 | ||
375 | return (lock_release_internal(ulock, current_act())); | |
376 | } | |
377 | ||
378 | kern_return_t | |
379 | lock_try (lock_set_t lock_set, int lock_id) | |
380 | { | |
381 | ulock_t ulock; | |
382 | ||
383 | ||
384 | if (lock_set == LOCK_SET_NULL) | |
385 | return KERN_INVALID_ARGUMENT; | |
386 | ||
387 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
388 | return KERN_INVALID_ARGUMENT; | |
389 | ||
390 | ||
391 | lock_set_lock(lock_set); | |
392 | if (!lock_set->active) { | |
393 | lock_set_unlock(lock_set); | |
394 | return KERN_LOCK_SET_DESTROYED; | |
395 | } | |
396 | ||
397 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
398 | ulock_lock(ulock); | |
399 | lock_set_unlock(lock_set); | |
400 | ||
401 | /* | |
402 | * If the lock is already owned, we return without blocking. | |
403 | * | |
404 | * An ownership status is returned to inform the caller as to | |
405 | * whether it already holds the lock or another thread does. | |
406 | */ | |
407 | ||
408 | if (ulock->holder != THR_ACT_NULL) { | |
409 | lock_set_unlock(lock_set); | |
410 | ||
411 | if (ulock->holder == current_act()) { | |
412 | ulock_unlock(ulock); | |
413 | return KERN_LOCK_OWNED_SELF; | |
414 | } | |
415 | ||
416 | ulock_unlock(ulock); | |
417 | return KERN_LOCK_OWNED; | |
418 | } | |
419 | ||
420 | /* | |
421 | * Add the ulock to the lock set's held_ulocks list. | |
422 | */ | |
423 | ||
424 | ulock_ownership_set(ulock, current_thread()); | |
425 | ulock_unlock(ulock); | |
426 | ||
427 | return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS; | |
428 | } | |
429 | ||
430 | kern_return_t | |
431 | lock_make_stable (lock_set_t lock_set, int lock_id) | |
432 | { | |
433 | ulock_t ulock; | |
434 | ||
435 | ||
436 | if (lock_set == LOCK_SET_NULL) | |
437 | return KERN_INVALID_ARGUMENT; | |
438 | ||
439 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
440 | return KERN_INVALID_ARGUMENT; | |
441 | ||
442 | ||
443 | lock_set_lock(lock_set); | |
444 | if (!lock_set->active) { | |
445 | lock_set_unlock(lock_set); | |
446 | return KERN_LOCK_SET_DESTROYED; | |
447 | } | |
448 | ||
449 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
450 | ulock_lock(ulock); | |
451 | lock_set_unlock(lock_set); | |
452 | ||
453 | if (ulock->holder != current_act()) { | |
454 | ulock_unlock(ulock); | |
455 | return KERN_INVALID_RIGHT; | |
456 | } | |
457 | ||
458 | ulock->unstable = FALSE; | |
459 | ulock_unlock(ulock); | |
460 | ||
461 | return KERN_SUCCESS; | |
462 | } | |
463 | ||
464 | /* | |
465 | * ROUTINE: lock_make_unstable [internal] | |
466 | * | |
467 | * Marks the lock as unstable. | |
468 | * | |
469 | * NOTES: | |
470 | * - All future acquisitions of the lock will return with a | |
471 | * KERN_LOCK_UNSTABLE status, until the lock is made stable again. | |
472 | */ | |
473 | kern_return_t | |
474 | lock_make_unstable (ulock_t ulock, thread_act_t thr_act) | |
475 | { | |
476 | lock_set_t lock_set; | |
477 | ||
478 | ||
479 | lock_set = ulock->lock_set; | |
480 | lock_set_lock(lock_set); | |
481 | if (!lock_set->active) { | |
482 | lock_set_unlock(lock_set); | |
483 | return KERN_LOCK_SET_DESTROYED; | |
484 | } | |
485 | ||
486 | ulock_lock(ulock); | |
487 | lock_set_unlock(lock_set); | |
488 | ||
489 | if (ulock->holder != thr_act) { | |
490 | ulock_unlock(ulock); | |
491 | return KERN_INVALID_RIGHT; | |
492 | } | |
493 | ||
494 | ulock->unstable = TRUE; | |
495 | ulock_unlock(ulock); | |
496 | ||
497 | return KERN_SUCCESS; | |
498 | } | |
499 | ||
500 | /* | |
501 | * ROUTINE: lock_release_internal [internal] | |
502 | * | |
503 | * Releases the ulock. | |
504 | * If any threads are blocked waiting for the ulock, one is woken-up. | |
505 | * | |
506 | */ | |
507 | kern_return_t | |
508 | lock_release_internal (ulock_t ulock, thread_act_t thr_act) | |
509 | { | |
510 | lock_set_t lock_set; | |
511 | int result; | |
512 | ||
513 | ||
514 | if ((lock_set = ulock->lock_set) == LOCK_SET_NULL) | |
515 | return KERN_INVALID_ARGUMENT; | |
516 | ||
517 | lock_set_lock(lock_set); | |
518 | if (!lock_set->active) { | |
519 | lock_set_unlock(lock_set); | |
520 | return KERN_LOCK_SET_DESTROYED; | |
521 | } | |
522 | ulock_lock(ulock); | |
523 | lock_set_unlock(lock_set); | |
524 | ||
525 | if (ulock->holder != thr_act) { | |
526 | ulock_unlock(ulock); | |
1c79356b A |
527 | return KERN_INVALID_RIGHT; |
528 | } | |
529 | ||
530 | /* | |
531 | * If we have a hint that threads might be waiting, | |
532 | * try to transfer the lock ownership to a waiting thread | |
533 | * and wake it up. | |
534 | */ | |
535 | if (ulock->blocked) { | |
536 | wait_queue_t wq = &ulock->wait_queue; | |
537 | thread_t thread; | |
538 | spl_t s; | |
539 | ||
540 | s = splsched(); | |
541 | wait_queue_lock(wq); | |
9bccf70c | 542 | thread = wait_queue_wakeup64_identity_locked(wq, |
1c79356b A |
543 | LOCK_SET_EVENT, |
544 | THREAD_AWAKENED, | |
545 | TRUE); | |
546 | /* wait_queue now unlocked, thread locked */ | |
547 | ||
548 | if (thread != THREAD_NULL) { | |
549 | /* | |
550 | * JMM - These ownership transfer macros have a | |
551 | * locking/race problem. To keep the thread from | |
552 | * changing states on us (nullifying the ownership | |
553 | * assignment) we need to keep the thread locked | |
554 | * during the assignment. But we can't because the | |
555 | * macros take an activation lock, which is a mutex. | |
556 | * Since this code was already broken before I got | |
557 | * here, I will leave it for now. | |
558 | */ | |
559 | thread_unlock(thread); | |
560 | splx(s); | |
561 | ||
562 | /* | |
563 | * Transfer ulock ownership | |
564 | * from the current thread to the acquisition thread. | |
565 | */ | |
566 | ulock_ownership_clear(ulock); | |
567 | ulock_ownership_set(ulock, thread); | |
568 | ulock_unlock(ulock); | |
569 | ||
570 | return KERN_SUCCESS; | |
571 | } else { | |
572 | ulock->blocked = FALSE; | |
573 | splx(s); | |
574 | } | |
575 | } | |
576 | ||
577 | /* | |
578 | * Disown ulock | |
579 | */ | |
580 | ulock_ownership_clear(ulock); | |
581 | ulock_unlock(ulock); | |
582 | ||
583 | return KERN_SUCCESS; | |
584 | } | |
585 | ||
586 | kern_return_t | |
587 | lock_handoff (lock_set_t lock_set, int lock_id) | |
588 | { | |
589 | ulock_t ulock; | |
590 | int wait_result; | |
591 | ||
592 | ||
593 | if (lock_set == LOCK_SET_NULL) | |
594 | return KERN_INVALID_ARGUMENT; | |
595 | ||
596 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
597 | return KERN_INVALID_ARGUMENT; | |
598 | ||
599 | retry: | |
600 | lock_set_lock(lock_set); | |
601 | ||
602 | if (!lock_set->active) { | |
603 | lock_set_unlock(lock_set); | |
604 | return KERN_LOCK_SET_DESTROYED; | |
605 | } | |
606 | ||
607 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
608 | ulock_lock(ulock); | |
609 | lock_set_unlock(lock_set); | |
610 | ||
611 | if (ulock->holder != current_act()) { | |
612 | ulock_unlock(ulock); | |
1c79356b A |
613 | return KERN_INVALID_RIGHT; |
614 | } | |
615 | ||
616 | /* | |
617 | * If the accepting thread (the receiver) is already waiting | |
618 | * to accept the lock from the handoff thread (the sender), | |
619 | * then perform the hand-off now. | |
620 | */ | |
621 | ||
622 | if (ulock->accept_wait) { | |
623 | wait_queue_t wq = &ulock->wait_queue; | |
624 | thread_t thread; | |
625 | spl_t s; | |
626 | ||
627 | /* | |
628 | * See who the lucky devil is, if he is still there waiting. | |
629 | */ | |
630 | s = splsched(); | |
631 | wait_queue_lock(wq); | |
9bccf70c | 632 | thread = wait_queue_wakeup64_identity_locked( |
1c79356b A |
633 | wq, |
634 | LOCK_SET_HANDOFF, | |
635 | THREAD_AWAKENED, | |
636 | TRUE); | |
637 | /* wait queue unlocked, thread locked */ | |
638 | ||
639 | /* | |
640 | * Transfer lock ownership | |
641 | */ | |
642 | if (thread != THREAD_NULL) { | |
643 | /* | |
644 | * JMM - These ownership transfer macros have a | |
645 | * locking/race problem. To keep the thread from | |
646 | * changing states on us (nullifying the ownership | |
647 | * assignment) we need to keep the thread locked | |
648 | * during the assignment. But we can't because the | |
649 | * macros take an activation lock, which is a mutex. | |
650 | * Since this code was already broken before I got | |
651 | * here, I will leave it for now. | |
652 | */ | |
653 | thread_unlock(thread); | |
654 | splx(s); | |
655 | ||
656 | ulock_ownership_clear(ulock); | |
657 | ulock_ownership_set(ulock, thread); | |
658 | ulock->accept_wait = FALSE; | |
659 | ulock_unlock(ulock); | |
660 | return KERN_SUCCESS; | |
661 | } else { | |
662 | ||
663 | /* | |
664 | * OOPS. The accepting thread must have been aborted. | |
665 | * and is racing back to clear the flag that says is | |
666 | * waiting for an accept. He will clear it when we | |
667 | * release the lock, so just fall thru and wait for | |
668 | * the next accept thread (that's the way it is | |
669 | * specified). | |
670 | */ | |
671 | splx(s); | |
672 | } | |
673 | } | |
674 | ||
675 | /* | |
676 | * Indicate that there is a hand-off thread waiting, and then wait | |
677 | * for an accepting thread. | |
678 | */ | |
679 | ulock->ho_wait = TRUE; | |
9bccf70c | 680 | wait_result = wait_queue_assert_wait64(&ulock->wait_queue, |
1c79356b A |
681 | LOCK_SET_HANDOFF, |
682 | THREAD_ABORTSAFE); | |
683 | ulock_unlock(ulock); | |
684 | ||
9bccf70c A |
685 | if (wait_result == THREAD_WAITING) |
686 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
1c79356b A |
687 | |
688 | /* | |
689 | * If the thread was woken-up via some action other than | |
690 | * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate), | |
691 | * then we need to clear the ulock's handoff state. | |
692 | */ | |
693 | switch (wait_result) { | |
694 | ||
695 | case THREAD_AWAKENED: | |
696 | return KERN_SUCCESS; | |
697 | ||
698 | case THREAD_INTERRUPTED: | |
699 | ulock_lock(ulock); | |
700 | assert(ulock->holder == current_act()); | |
701 | ulock->ho_wait = FALSE; | |
702 | ulock_unlock(ulock); | |
703 | return KERN_ABORTED; | |
704 | ||
705 | case THREAD_RESTART: | |
706 | goto retry; | |
707 | ||
708 | default: | |
709 | panic("lock_handoff"); | |
710 | } | |
711 | } | |
712 | ||
713 | kern_return_t | |
714 | lock_handoff_accept (lock_set_t lock_set, int lock_id) | |
715 | { | |
716 | ulock_t ulock; | |
717 | int wait_result; | |
718 | ||
719 | ||
720 | if (lock_set == LOCK_SET_NULL) | |
721 | return KERN_INVALID_ARGUMENT; | |
722 | ||
723 | if (lock_id < 0 || lock_id >= lock_set->n_ulocks) | |
724 | return KERN_INVALID_ARGUMENT; | |
725 | ||
726 | retry: | |
727 | lock_set_lock(lock_set); | |
728 | if (!lock_set->active) { | |
729 | lock_set_unlock(lock_set); | |
730 | return KERN_LOCK_SET_DESTROYED; | |
731 | } | |
732 | ||
733 | ulock = (ulock_t) &lock_set->ulock_list[lock_id]; | |
734 | ulock_lock(ulock); | |
735 | lock_set_unlock(lock_set); | |
736 | ||
737 | /* | |
738 | * If there is another accepting thread that beat us, just | |
739 | * return with an error. | |
740 | */ | |
741 | if (ulock->accept_wait) { | |
742 | ulock_unlock(ulock); | |
743 | return KERN_ALREADY_WAITING; | |
744 | } | |
745 | ||
746 | if (ulock->holder == current_act()) { | |
747 | ulock_unlock(ulock); | |
748 | return KERN_LOCK_OWNED_SELF; | |
749 | } | |
750 | ||
751 | /* | |
752 | * If the handoff thread (the sender) is already waiting to | |
753 | * hand-off the lock to the accepting thread (the receiver), | |
754 | * then perform the hand-off now. | |
755 | */ | |
756 | if (ulock->ho_wait) { | |
757 | wait_queue_t wq = &ulock->wait_queue; | |
758 | thread_t thread; | |
759 | ||
760 | /* | |
761 | * See who the lucky devil is, if he is still there waiting. | |
762 | */ | |
763 | assert(ulock->holder != THR_ACT_NULL); | |
764 | thread = ulock->holder->thread; | |
765 | ||
9bccf70c | 766 | if (wait_queue_wakeup64_thread(wq, |
1c79356b A |
767 | LOCK_SET_HANDOFF, |
768 | thread, | |
769 | THREAD_AWAKENED) == KERN_SUCCESS) { | |
770 | /* | |
771 | * Holder thread was still waiting to give it | |
772 | * away. Take over ownership. | |
773 | */ | |
774 | ulock_ownership_clear(ulock); | |
775 | ulock_ownership_set(ulock, current_thread()); | |
776 | ulock->ho_wait = FALSE; | |
777 | ulock_unlock(ulock); | |
778 | return (ulock->unstable) ? KERN_LOCK_UNSTABLE : | |
779 | KERN_SUCCESS; | |
780 | } | |
781 | ||
782 | /* | |
783 | * OOPS. The owner was aborted out of the handoff. | |
784 | * He will clear his own flag when he gets back. | |
785 | * in the meantime, we will wait as if we didn't | |
786 | * even see his flag (by falling thru). | |
787 | */ | |
788 | } | |
789 | ||
790 | ulock->accept_wait = TRUE; | |
9bccf70c | 791 | wait_result = wait_queue_assert_wait64(&ulock->wait_queue, |
1c79356b A |
792 | LOCK_SET_HANDOFF, |
793 | THREAD_ABORTSAFE); | |
794 | ulock_unlock(ulock); | |
795 | ||
9bccf70c A |
796 | if (wait_result == THREAD_WAITING) |
797 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
1c79356b A |
798 | |
799 | /* | |
800 | * If the thread was woken-up via some action other than | |
801 | * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate), | |
802 | * then we need to clear the ulock's handoff state. | |
803 | */ | |
804 | switch (wait_result) { | |
805 | ||
806 | case THREAD_AWAKENED: | |
807 | return KERN_SUCCESS; | |
808 | ||
809 | case THREAD_INTERRUPTED: | |
810 | ulock_lock(ulock); | |
811 | ulock->accept_wait = FALSE; | |
812 | ulock_unlock(ulock); | |
813 | return KERN_ABORTED; | |
814 | ||
815 | case THREAD_RESTART: | |
816 | goto retry; | |
817 | ||
818 | default: | |
819 | panic("lock_handoff_accept"); | |
820 | } | |
821 | } | |
822 | ||
823 | /* | |
824 | * Routine: lock_set_reference | |
825 | * | |
826 | * Take out a reference on a lock set. This keeps the data structure | |
827 | * in existence (but the lock set may be deactivated). | |
828 | */ | |
829 | void | |
830 | lock_set_reference(lock_set_t lock_set) | |
831 | { | |
832 | lock_set_lock(lock_set); | |
833 | lock_set->ref_count++; | |
834 | lock_set_unlock(lock_set); | |
835 | } | |
836 | ||
837 | /* | |
838 | * Routine: lock_set_dereference | |
839 | * | |
840 | * Release a reference on a lock set. If this is the last reference, | |
841 | * the lock set data structure is deallocated. | |
842 | */ | |
843 | void | |
844 | lock_set_dereference(lock_set_t lock_set) | |
845 | { | |
846 | int ref_count; | |
847 | int size; | |
848 | ||
849 | lock_set_lock(lock_set); | |
850 | ref_count = --(lock_set->ref_count); | |
851 | lock_set_unlock(lock_set); | |
852 | ||
853 | if (ref_count == 0) { | |
854 | size = sizeof(struct lock_set) + | |
855 | (sizeof(struct ulock) * (lock_set->n_ulocks - 1)); | |
856 | kfree((vm_offset_t) lock_set, size); | |
857 | } | |
858 | } |