]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_FREE_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * File: wait_queue.c (adapted from sched_prim.c) | |
54 | * Author: Avadis Tevanian, Jr. | |
55 | * Date: 1986 | |
56 | * | |
57 | * Primitives for manipulating wait queues: either global | |
58 | * ones from sched_prim.c, or private ones associated with | |
59 | * particular structures(pots, semaphores, etc..). | |
60 | */ | |
61 | ||
62 | #include <kern/kern_types.h> | |
63 | #include <kern/simple_lock.h> | |
64 | #include <kern/kalloc.h> | |
65 | #include <kern/queue.h> | |
66 | #include <kern/spl.h> | |
67 | #include <mach/sync_policy.h> | |
68 | ||
69 | #include <kern/sched_prim.h> | |
70 | #include <kern/wait_queue.h> | |
71 | ||
72 | void | |
73 | wait_queue_init( | |
74 | wait_queue_t wq, | |
75 | int policy) | |
76 | { | |
77 | wq->wq_fifo = (policy == SYNC_POLICY_FIFO); | |
78 | wq->wq_issub = FALSE; | |
79 | queue_init(&wq->wq_queue); | |
80 | hw_lock_init(&wq->wq_interlock); | |
81 | } | |
82 | ||
83 | void | |
84 | wait_queue_sub_init( | |
85 | wait_queue_sub_t wqsub, | |
86 | int policy) | |
87 | { | |
88 | wait_queue_init(&wqsub->wqs_wait_queue, policy); | |
89 | wqsub->wqs_wait_queue.wq_issub = TRUE; | |
0b4e3aa0 A |
90 | if ( policy & SYNC_POLICY_PREPOST) { |
91 | wqsub->wqs_wait_queue.wq_isprepost = TRUE; | |
92 | wqsub->wqs_refcount = 0; | |
93 | } else | |
94 | wqsub->wqs_wait_queue.wq_isprepost = FALSE; | |
1c79356b A |
95 | queue_init(&wqsub->wqs_sublinks); |
96 | } | |
97 | ||
0b4e3aa0 A |
98 | void |
99 | wait_queue_sub_clearrefs( | |
100 | wait_queue_sub_t wq_sub) | |
101 | { | |
102 | assert(wait_queue_is_sub(wq_sub)); | |
103 | ||
104 | wqs_lock(wq_sub); | |
105 | ||
106 | wq_sub->wqs_refcount = 0; | |
107 | ||
108 | wqs_unlock(wq_sub); | |
109 | ||
110 | } | |
111 | ||
1c79356b A |
112 | void |
113 | wait_queue_link_init( | |
114 | wait_queue_link_t wql) | |
115 | { | |
116 | queue_init(&wql->wql_links); | |
117 | queue_init(&wql->wql_sublinks); | |
118 | wql->wql_queue = WAIT_QUEUE_NULL; | |
119 | wql->wql_subqueue = WAIT_QUEUE_SUB_NULL; | |
120 | wql->wql_event = NO_EVENT; | |
121 | } | |
122 | ||
0b4e3aa0 A |
123 | /* |
124 | * Routine: wait_queue_alloc | |
125 | * Purpose: | |
126 | * Allocate and initialize a wait queue for use outside of | |
127 | * of the mach part of the kernel. | |
128 | * | |
129 | * Conditions: | |
130 | * Nothing locked - can block. | |
131 | * | |
132 | * Returns: | |
133 | * The allocated and initialized wait queue | |
134 | * WAIT_QUEUE_NULL if there is a resource shortage | |
135 | */ | |
136 | wait_queue_t | |
137 | wait_queue_alloc( | |
138 | int policy) | |
139 | { | |
140 | wait_queue_t wq; | |
141 | ||
142 | wq = (wait_queue_t) kalloc(sizeof(struct wait_queue)); | |
143 | if (wq != WAIT_QUEUE_NULL) | |
144 | wait_queue_init(wq, policy); | |
145 | return wq; | |
146 | } | |
147 | ||
148 | /* | |
149 | * Routine: wait_queue_free | |
150 | * Purpose: | |
151 | * Free an allocated wait queue. | |
152 | * | |
153 | * Conditions: | |
154 | * Nothing locked - can block. | |
155 | */ | |
156 | void | |
157 | wait_queue_free( | |
158 | wait_queue_t wq) | |
159 | { | |
160 | assert(queue_empty(&wq->wq_queue)); | |
161 | kfree((vm_offset_t)wq, sizeof(struct wait_queue)); | |
162 | } | |
163 | ||
1c79356b A |
164 | |
165 | /* | |
166 | * Routine: wait_queue_lock | |
167 | * Purpose: | |
168 | * Lock the wait queue. | |
169 | * Conditions: | |
170 | * the appropriate spl level (if any) is already raised. | |
171 | */ | |
172 | void | |
173 | wait_queue_lock( | |
174 | wait_queue_t wq) | |
175 | { | |
176 | #ifdef __ppc__ | |
177 | vm_offset_t pc; | |
178 | ||
179 | /* | |
180 | * Double the standard lock timeout, because wait queues tend | |
181 | * to iterate over a number of threads - locking each. If there is | |
182 | * a problem with a thread lock, it normally times out at the wait | |
183 | * queue level first, hiding the real problem. | |
184 | */ | |
185 | pc = GET_RETURN_PC(&wq); | |
186 | if (!hw_lock_to(&wq->wq_interlock, LockTimeOut * 2)) { | |
187 | panic("wait queue deadlock detection - wq=0x%x, cpu=%d, ret=0x%x\n", wq, cpu_number(), pc); | |
188 | } | |
189 | #else | |
190 | hw_lock_lock(&wq->wq_interlock); | |
191 | #endif | |
192 | } | |
193 | ||
194 | /* | |
195 | * Routine: wait_queue_lock_try | |
196 | * Purpose: | |
197 | * Try to lock the wait queue without waiting | |
198 | * Conditions: | |
199 | * the appropriate spl level (if any) is already raised. | |
200 | * Returns: | |
201 | * TRUE if the lock was acquired | |
202 | * FALSE if we would have needed to wait | |
203 | */ | |
204 | boolean_t | |
205 | wait_queue_lock_try( | |
206 | wait_queue_t wq) | |
207 | { | |
208 | return hw_lock_try(&wq->wq_interlock); | |
209 | } | |
210 | ||
211 | /* | |
212 | * Routine: wait_queue_unlock | |
213 | * Purpose: | |
214 | * unlock the wait queue | |
215 | * Conditions: | |
216 | * The wait queue is assumed locked. | |
217 | * appropriate spl level is still maintained | |
218 | */ | |
219 | void | |
220 | wait_queue_unlock( | |
221 | wait_queue_t wq) | |
222 | { | |
223 | assert(hw_lock_held(&wq->wq_interlock)); | |
224 | ||
225 | hw_lock_unlock(&wq->wq_interlock); | |
226 | } | |
227 | ||
228 | int _wait_queue_subordinate; /* phoney event for subordinate wait q elements */ | |
229 | ||
230 | ||
231 | /* | |
232 | * Routine: wait_queue_member_locked | |
233 | * Purpose: | |
234 | * Indicate if this sub queue is a member of the queue | |
235 | * Conditions: | |
236 | * The wait queue is locked | |
237 | * The sub queue is just that, a sub queue | |
238 | */ | |
239 | boolean_t | |
240 | wait_queue_member_locked( | |
241 | wait_queue_t wq, | |
242 | wait_queue_sub_t wq_sub) | |
243 | { | |
244 | wait_queue_element_t wq_element; | |
245 | queue_t q; | |
246 | ||
247 | assert(wait_queue_held(wq)); | |
248 | assert(wait_queue_is_sub(wq_sub)); | |
249 | ||
250 | q = &wq->wq_queue; | |
251 | ||
252 | wq_element = (wait_queue_element_t) queue_first(q); | |
253 | while (!queue_end(q, (queue_entry_t)wq_element)) { | |
254 | ||
255 | if ((wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE)) { | |
256 | wait_queue_link_t wql = (wait_queue_link_t)wq_element; | |
257 | ||
258 | if (wql->wql_subqueue == wq_sub) | |
259 | return TRUE; | |
260 | } | |
261 | wq_element = (wait_queue_element_t) | |
262 | queue_next((queue_t) wq_element); | |
263 | } | |
264 | return FALSE; | |
265 | } | |
266 | ||
267 | ||
268 | /* | |
269 | * Routine: wait_queue_member | |
270 | * Purpose: | |
271 | * Indicate if this sub queue is a member of the queue | |
272 | * Conditions: | |
273 | * The sub queue is just that, a sub queue | |
274 | */ | |
275 | boolean_t | |
276 | wait_queue_member( | |
277 | wait_queue_t wq, | |
278 | wait_queue_sub_t wq_sub) | |
279 | { | |
280 | boolean_t ret; | |
281 | spl_t s; | |
282 | ||
283 | assert(wait_queue_is_sub(wq_sub)); | |
284 | ||
285 | s = splsched(); | |
286 | wait_queue_lock(wq); | |
287 | ret = wait_queue_member_locked(wq, wq_sub); | |
288 | wait_queue_unlock(wq); | |
289 | splx(s); | |
290 | ||
291 | return ret; | |
292 | } | |
293 | ||
294 | /* | |
295 | * Routine: wait_queue_link | |
296 | * Purpose: | |
297 | * Insert a subordinate wait queue into a wait queue. This | |
298 | * requires us to link the two together using a wait_queue_link | |
299 | * structure that we allocate. | |
300 | * Conditions: | |
301 | * The wait queue being inserted must be inited as a sub queue | |
302 | * The sub waitq is not already linked | |
303 | * | |
304 | */ | |
305 | kern_return_t | |
306 | wait_queue_link( | |
307 | wait_queue_t wq, | |
308 | wait_queue_sub_t wq_sub) | |
309 | { | |
310 | wait_queue_link_t wql; | |
311 | spl_t s; | |
312 | ||
313 | assert(wait_queue_is_sub(wq_sub)); | |
314 | assert(!wait_queue_member(wq, wq_sub)); | |
315 | ||
316 | wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link)); | |
317 | if (wql == WAIT_QUEUE_LINK_NULL) | |
318 | return KERN_RESOURCE_SHORTAGE; | |
319 | ||
320 | wait_queue_link_init(wql); | |
321 | ||
322 | s = splsched(); | |
323 | wait_queue_lock(wq); | |
324 | wqs_lock(wq_sub); | |
325 | ||
326 | wql->wql_queue = wq; | |
327 | wql->wql_subqueue = wq_sub; | |
328 | wql->wql_event = WAIT_QUEUE_SUBORDINATE; | |
329 | queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links); | |
330 | queue_enter(&wq_sub->wqs_sublinks, wql, wait_queue_link_t, wql_sublinks); | |
331 | ||
332 | wqs_unlock(wq_sub); | |
333 | wait_queue_unlock(wq); | |
334 | splx(s); | |
335 | ||
336 | return KERN_SUCCESS; | |
337 | } | |
0b4e3aa0 A |
338 | /* |
339 | * Routine: wait_queue_link_noalloc | |
340 | * Purpose: | |
341 | * Insert a subordinate wait queue into a wait queue. This | |
342 | * requires us to link the two together using a wait_queue_link | |
343 | * structure that we allocate. | |
344 | * Conditions: | |
345 | * The wait queue being inserted must be inited as a sub queue | |
346 | * The sub waitq is not already linked | |
347 | * | |
348 | */ | |
349 | kern_return_t | |
350 | wait_queue_link_noalloc( | |
351 | wait_queue_t wq, | |
352 | wait_queue_sub_t wq_sub, | |
353 | wait_queue_link_t wql) | |
354 | { | |
355 | spl_t s; | |
356 | ||
357 | assert(wait_queue_is_sub(wq_sub)); | |
358 | assert(!wait_queue_member(wq, wq_sub)); | |
359 | ||
360 | wait_queue_link_init(wql); | |
361 | ||
362 | s = splsched(); | |
363 | wait_queue_lock(wq); | |
364 | wqs_lock(wq_sub); | |
365 | ||
366 | wql->wql_queue = wq; | |
367 | wql->wql_subqueue = wq_sub; | |
368 | wql->wql_event = WAIT_QUEUE_SUBORDINATE; | |
369 | queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links); | |
370 | queue_enter(&wq_sub->wqs_sublinks, wql, wait_queue_link_t, wql_sublinks); | |
371 | ||
372 | wqs_unlock(wq_sub); | |
373 | wait_queue_unlock(wq); | |
374 | splx(s); | |
375 | ||
376 | return KERN_SUCCESS; | |
377 | } | |
1c79356b A |
378 | |
379 | /* | |
380 | * Routine: wait_queue_unlink | |
381 | * Purpose: | |
382 | * Remove the linkage between a wait queue and its subordinate. | |
383 | * Conditions: | |
384 | * The wait queue being must be a member sub queue | |
385 | */ | |
386 | kern_return_t | |
387 | wait_queue_unlink( | |
388 | wait_queue_t wq, | |
389 | wait_queue_sub_t wq_sub) | |
390 | { | |
391 | wait_queue_element_t wq_element; | |
392 | queue_t q; | |
393 | spl_t s; | |
394 | ||
395 | assert(wait_queue_is_sub(wq_sub)); | |
396 | assert(wait_queue_member(wq, wq_sub)); | |
397 | ||
398 | s = splsched(); | |
399 | wait_queue_lock(wq); | |
400 | wqs_lock(wq_sub); | |
401 | ||
402 | q = &wq->wq_queue; | |
403 | ||
404 | wq_element = (wait_queue_element_t) queue_first(q); | |
405 | while (!queue_end(q, (queue_entry_t)wq_element)) { | |
406 | ||
407 | if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { | |
408 | wait_queue_link_t wql = (wait_queue_link_t)wq_element; | |
409 | queue_t sq; | |
410 | ||
411 | if (wql->wql_subqueue == wq_sub) { | |
412 | sq = &wq_sub->wqs_sublinks; | |
413 | queue_remove(q, wql, wait_queue_link_t, wql_links); | |
414 | queue_remove(sq, wql, wait_queue_link_t, wql_sublinks); | |
415 | wqs_unlock(wq_sub); | |
416 | wait_queue_unlock(wq); | |
417 | splx(s); | |
418 | kfree((vm_offset_t)wql,sizeof(struct wait_queue_link)); | |
419 | return; | |
420 | } | |
421 | } | |
422 | ||
423 | wq_element = (wait_queue_element_t) | |
424 | queue_next((queue_t) wq_element); | |
425 | } | |
426 | panic("wait_queue_unlink"); | |
427 | } | |
428 | ||
0b4e3aa0 A |
429 | /* |
430 | * Routine: wait_queue_unlink_nofree | |
431 | * Purpose: | |
432 | * Remove the linkage between a wait queue and its subordinate. Do not deallcoate the wql | |
433 | * Conditions: | |
434 | * The wait queue being must be a member sub queue | |
435 | */ | |
436 | kern_return_t | |
437 | wait_queue_unlink_nofree( | |
438 | wait_queue_t wq, | |
439 | wait_queue_sub_t wq_sub) | |
440 | { | |
441 | wait_queue_element_t wq_element; | |
442 | queue_t q; | |
443 | ||
444 | assert(wait_queue_is_sub(wq_sub)); | |
445 | ||
446 | q = &wq->wq_queue; | |
447 | ||
448 | wq_element = (wait_queue_element_t) queue_first(q); | |
449 | while (!queue_end(q, (queue_entry_t)wq_element)) { | |
450 | ||
451 | if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { | |
452 | wait_queue_link_t wql = (wait_queue_link_t)wq_element; | |
453 | queue_t sq; | |
454 | ||
455 | if (wql->wql_subqueue == wq_sub) { | |
456 | sq = &wq_sub->wqs_sublinks; | |
457 | queue_remove(q, wql, wait_queue_link_t, wql_links); | |
458 | queue_remove(sq, wql, wait_queue_link_t, wql_sublinks); | |
459 | return(KERN_SUCCESS); | |
460 | } | |
461 | } | |
462 | ||
463 | wq_element = (wait_queue_element_t) | |
464 | queue_next((queue_t) wq_element); | |
465 | } | |
466 | /* due to dropping the sub's lock to get to this routine we can see | |
467 | * no entries in waitqueue. It is valid case, so we should just return | |
468 | */ | |
469 | return(KERN_FAILURE); | |
470 | } | |
471 | ||
472 | /* | |
473 | * Routine: wait_subqueue_unlink_all | |
474 | * Purpose: | |
475 | * Remove the linkage between a wait queue and its subordinate. | |
476 | * Conditions: | |
477 | * The wait queue being must be a member sub queue | |
478 | */ | |
479 | kern_return_t | |
480 | wait_subqueue_unlink_all( | |
481 | wait_queue_sub_t wq_sub) | |
482 | { | |
483 | wait_queue_link_t wql; | |
484 | wait_queue_t wq; | |
485 | queue_t q; | |
486 | kern_return_t kret; | |
487 | spl_t s; | |
488 | ||
489 | assert(wait_queue_is_sub(wq_sub)); | |
490 | ||
491 | retry: | |
492 | s = splsched(); | |
493 | wqs_lock(wq_sub); | |
494 | ||
495 | q = &wq_sub->wqs_sublinks; | |
496 | ||
497 | wql = (wait_queue_link_t)queue_first(q); | |
498 | while (!queue_end(q, (queue_entry_t)wql)) { | |
499 | wq = wql->wql_queue; | |
500 | if (wait_queue_lock_try(wq)) { | |
501 | #if 0 | |
502 | queue_t q1; | |
503 | ||
504 | q1 = &wq->wq_queue; | |
505 | ||
506 | queue_remove(q1, wql, wait_queue_link_t, wql_links); | |
507 | queue_remove(q, wql, wait_queue_link_t, wql_sublinks); | |
508 | #else | |
509 | if ((kret = wait_queue_unlink_nofree(wq, wq_sub)) != KERN_SUCCESS) { | |
510 | queue_remove(q, wql, wait_queue_link_t, wql_sublinks); | |
511 | ||
512 | } | |
513 | #endif | |
514 | wait_queue_unlock(wq); | |
515 | wql = (wait_queue_link_t)queue_first(q); | |
516 | } else { | |
517 | wqs_unlock(wq_sub); | |
518 | splx(s); | |
519 | mutex_pause(); | |
520 | goto retry; | |
521 | } | |
522 | } | |
523 | wqs_unlock(wq_sub); | |
524 | splx(s); | |
525 | return(KERN_SUCCESS); | |
526 | } | |
527 | ||
528 | ||
529 | /* | |
530 | * Routine: wait_queue_unlinkall_nofree | |
531 | * Purpose: | |
532 | * Remove the linkage between a wait queue and all subordinates. | |
533 | */ | |
534 | ||
535 | kern_return_t | |
536 | wait_queue_unlinkall_nofree( | |
537 | wait_queue_t wq) | |
538 | { | |
539 | wait_queue_element_t wq_element; | |
540 | wait_queue_sub_t wq_sub; | |
541 | queue_t q; | |
542 | spl_t s; | |
543 | ||
544 | ||
545 | s = splsched(); | |
546 | wait_queue_lock(wq); | |
547 | ||
548 | q = &wq->wq_queue; | |
549 | ||
550 | wq_element = (wait_queue_element_t) queue_first(q); | |
551 | while (!queue_end(q, (queue_entry_t)wq_element)) { | |
552 | ||
553 | if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { | |
554 | wait_queue_link_t wql = (wait_queue_link_t)wq_element; | |
555 | queue_t sq; | |
556 | ||
557 | wq_sub = wql->wql_subqueue; | |
558 | wqs_lock(wq_sub); | |
559 | sq = &wq_sub->wqs_sublinks; | |
560 | queue_remove(q, wql, wait_queue_link_t, wql_links); | |
561 | queue_remove(sq, wql, wait_queue_link_t, wql_sublinks); | |
562 | wqs_unlock(wq_sub); | |
563 | wq_element = (wait_queue_element_t) queue_first(q); | |
564 | } else { | |
565 | wq_element = (wait_queue_element_t) | |
566 | queue_next((queue_t) wq_element); | |
567 | } | |
568 | ||
569 | } | |
570 | wait_queue_unlock(wq); | |
571 | splx(s); | |
572 | ||
573 | return(KERN_SUCCESS); | |
574 | } | |
1c79356b A |
575 | /* |
576 | * Routine: wait_queue_unlink_one | |
577 | * Purpose: | |
578 | * Find and unlink one subordinate wait queue | |
579 | * Conditions: | |
580 | * Nothing of interest locked. | |
581 | */ | |
582 | void | |
583 | wait_queue_unlink_one( | |
584 | wait_queue_t wq, | |
585 | wait_queue_sub_t *wq_subp) | |
586 | { | |
587 | wait_queue_element_t wq_element; | |
588 | queue_t q; | |
589 | spl_t s; | |
590 | ||
591 | s = splsched(); | |
592 | wait_queue_lock(wq); | |
593 | ||
594 | q = &wq->wq_queue; | |
595 | ||
596 | wq_element = (wait_queue_element_t) queue_first(q); | |
597 | while (!queue_end(q, (queue_entry_t)wq_element)) { | |
598 | ||
599 | if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { | |
600 | wait_queue_link_t wql = (wait_queue_link_t)wq_element; | |
601 | wait_queue_sub_t wq_sub = wql->wql_subqueue; | |
602 | queue_t sq; | |
603 | ||
604 | wqs_lock(wq_sub); | |
605 | sq = &wq_sub->wqs_sublinks; | |
606 | queue_remove(q, wql, wait_queue_link_t, wql_links); | |
607 | queue_remove(sq, wql, wait_queue_link_t, wql_sublinks); | |
608 | wqs_unlock(wq_sub); | |
609 | wait_queue_unlock(wq); | |
610 | splx(s); | |
611 | kfree((vm_offset_t)wql,sizeof(struct wait_queue_link)); | |
612 | *wq_subp = wq_sub; | |
613 | return; | |
614 | } | |
615 | ||
616 | wq_element = (wait_queue_element_t) | |
617 | queue_next((queue_t) wq_element); | |
618 | } | |
619 | wait_queue_unlock(wq); | |
620 | splx(s); | |
621 | *wq_subp = WAIT_QUEUE_SUB_NULL; | |
622 | } | |
623 | ||
624 | /* | |
625 | * Routine: wait_queue_assert_wait_locked | |
626 | * Purpose: | |
627 | * Insert the current thread into the supplied wait queue | |
628 | * waiting for a particular event to be posted to that queue. | |
629 | * | |
630 | * Conditions: | |
631 | * The wait queue is assumed locked. | |
632 | * | |
633 | */ | |
0b4e3aa0 | 634 | boolean_t |
1c79356b A |
635 | wait_queue_assert_wait_locked( |
636 | wait_queue_t wq, | |
637 | event_t event, | |
638 | int interruptible, | |
639 | boolean_t unlock) | |
640 | { | |
641 | thread_t thread = current_thread(); | |
0b4e3aa0 A |
642 | boolean_t ret; |
643 | ||
644 | ||
645 | if (wq->wq_issub && wq->wq_isprepost) { | |
646 | wait_queue_sub_t wqs = (wait_queue_sub_t)wq; | |
647 | ||
648 | if (wqs->wqs_refcount > 0) { | |
649 | if (unlock) | |
650 | wait_queue_unlock(wq); | |
651 | return(FALSE); | |
652 | } | |
653 | } | |
1c79356b A |
654 | |
655 | thread_lock(thread); | |
656 | ||
657 | /* | |
658 | * This is the extent to which we currently take scheduling attributes | |
659 | * into account. If the thread is vm priviledged, we stick it at | |
660 | * the front of the queue. Later, these queues will honor the policy | |
661 | * value set at wait_queue_init time. | |
662 | */ | |
663 | if (thread->vm_privilege) | |
664 | enqueue_head(&wq->wq_queue, (queue_entry_t) thread); | |
665 | else | |
666 | enqueue_tail(&wq->wq_queue, (queue_entry_t) thread); | |
667 | thread->wait_event = event; | |
668 | thread->wait_queue = wq; | |
669 | thread_mark_wait_locked(thread, interruptible); | |
670 | thread_unlock(thread); | |
671 | if (unlock) | |
672 | wait_queue_unlock(wq); | |
0b4e3aa0 | 673 | return(TRUE); |
1c79356b A |
674 | } |
675 | ||
676 | /* | |
677 | * Routine: wait_queue_assert_wait | |
678 | * Purpose: | |
679 | * Insert the current thread into the supplied wait queue | |
680 | * waiting for a particular event to be posted to that queue. | |
681 | * | |
682 | * Conditions: | |
683 | * nothing of interest locked. | |
684 | */ | |
0b4e3aa0 | 685 | boolean_t |
1c79356b A |
686 | wait_queue_assert_wait( |
687 | wait_queue_t wq, | |
688 | event_t event, | |
689 | int interruptible) | |
690 | { | |
691 | spl_t s; | |
0b4e3aa0 | 692 | boolean_t ret; |
1c79356b A |
693 | |
694 | s = splsched(); | |
695 | wait_queue_lock(wq); | |
0b4e3aa0 | 696 | ret = wait_queue_assert_wait_locked(wq, event, interruptible, TRUE); |
1c79356b A |
697 | /* wait queue unlocked */ |
698 | splx(s); | |
0b4e3aa0 | 699 | return(ret); |
1c79356b A |
700 | } |
701 | ||
702 | ||
703 | /* | |
704 | * Routine: wait_queue_select_all | |
705 | * Purpose: | |
706 | * Select all threads off a wait queue that meet the | |
707 | * supplied criteria. | |
708 | * | |
709 | * Conditions: | |
710 | * at splsched | |
711 | * wait queue locked | |
712 | * wake_queue initialized and ready for insertion | |
713 | * possibly recursive | |
714 | * | |
715 | * Returns: | |
716 | * a queue of locked threads | |
717 | */ | |
718 | void | |
719 | _wait_queue_select_all( | |
720 | wait_queue_t wq, | |
721 | event_t event, | |
722 | queue_t wake_queue) | |
723 | { | |
724 | wait_queue_element_t wq_element; | |
725 | wait_queue_element_t wqe_next; | |
726 | queue_t q; | |
727 | ||
728 | q = &wq->wq_queue; | |
729 | ||
730 | wq_element = (wait_queue_element_t) queue_first(q); | |
731 | while (!queue_end(q, (queue_entry_t)wq_element)) { | |
732 | wqe_next = (wait_queue_element_t) | |
733 | queue_next((queue_t) wq_element); | |
734 | ||
735 | /* | |
736 | * We may have to recurse if this is a compound wait queue. | |
737 | */ | |
738 | if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { | |
739 | wait_queue_link_t wql = (wait_queue_link_t)wq_element; | |
740 | wait_queue_t sub_queue; | |
741 | ||
742 | /* | |
743 | * We have to check the subordinate wait queue. | |
744 | */ | |
745 | sub_queue = (wait_queue_t)wql->wql_subqueue; | |
746 | wait_queue_lock(sub_queue); | |
0b4e3aa0 A |
747 | if (sub_queue->wq_isprepost) { |
748 | wait_queue_sub_t wqs = (wait_queue_sub_t)sub_queue; | |
749 | ||
750 | /* | |
751 | * Preposting is only for subordinates and wait queue | |
752 | * is the first element of subordinate | |
753 | */ | |
754 | wqs->wqs_refcount++; | |
755 | } | |
1c79356b A |
756 | if (! wait_queue_empty(sub_queue)) |
757 | _wait_queue_select_all(sub_queue, event, wake_queue); | |
758 | wait_queue_unlock(sub_queue); | |
759 | } else { | |
760 | ||
761 | /* | |
762 | * Otherwise, its a thread. If it is waiting on | |
763 | * the event we are posting to this queue, pull | |
764 | * it off the queue and stick it in out wake_queue. | |
765 | */ | |
766 | thread_t t = (thread_t)wq_element; | |
767 | ||
768 | if (t->wait_event == event) { | |
769 | thread_lock(t); | |
770 | remqueue(q, (queue_entry_t) t); | |
771 | enqueue (wake_queue, (queue_entry_t) t); | |
772 | t->wait_queue = WAIT_QUEUE_NULL; | |
773 | t->wait_event = NO_EVENT; | |
774 | t->at_safe_point = FALSE; | |
775 | /* returned locked */ | |
776 | } | |
777 | } | |
778 | wq_element = wqe_next; | |
779 | } | |
780 | } | |
781 | ||
782 | /* | |
783 | * Routine: wait_queue_wakeup_all_locked | |
784 | * Purpose: | |
785 | * Wakeup some number of threads that are in the specified | |
786 | * wait queue and waiting on the specified event. | |
787 | * Conditions: | |
788 | * wait queue already locked (may be released). | |
789 | * Returns: | |
790 | * KERN_SUCCESS - Threads were woken up | |
791 | * KERN_NOT_WAITING - No threads were waiting <wq,event> pair | |
792 | */ | |
793 | kern_return_t | |
794 | wait_queue_wakeup_all_locked( | |
795 | wait_queue_t wq, | |
796 | event_t event, | |
797 | int result, | |
798 | boolean_t unlock) | |
799 | { | |
800 | queue_head_t wake_queue_head; | |
801 | queue_t q = &wake_queue_head; | |
802 | kern_return_t ret = KERN_NOT_WAITING; | |
803 | ||
804 | assert(wait_queue_held(wq)); | |
805 | ||
806 | queue_init(q); | |
807 | ||
808 | /* | |
809 | * Select the threads that we will wake up. The threads | |
810 | * are returned to us locked and cleanly removed from the | |
811 | * wait queue. | |
812 | */ | |
813 | _wait_queue_select_all(wq, event, q); | |
814 | if (unlock) | |
815 | wait_queue_unlock(wq); | |
816 | ||
817 | /* | |
818 | * For each thread, set it running. | |
819 | */ | |
820 | while (!queue_empty (q)) { | |
821 | thread_t thread = (thread_t) dequeue(q); | |
822 | thread_go_locked(thread, result); | |
823 | thread_unlock(thread); | |
824 | ret = KERN_SUCCESS; | |
825 | } | |
826 | return ret; | |
827 | } | |
828 | ||
829 | ||
830 | /* | |
831 | * Routine: wait_queue_wakeup_all | |
832 | * Purpose: | |
833 | * Wakeup some number of threads that are in the specified | |
834 | * wait queue and waiting on the specified event. | |
835 | * | |
836 | * Conditions: | |
837 | * Nothing locked | |
838 | * | |
839 | * Returns: | |
840 | * KERN_SUCCESS - Threads were woken up | |
841 | * KERN_NOT_WAITING - No threads were waiting <wq,event> pair | |
842 | */ | |
843 | kern_return_t | |
844 | wait_queue_wakeup_all( | |
845 | wait_queue_t wq, | |
846 | event_t event, | |
847 | int result) | |
848 | { | |
849 | kern_return_t ret; | |
850 | spl_t s; | |
851 | ||
852 | s = splsched(); | |
853 | wait_queue_lock(wq); | |
854 | ret = wait_queue_wakeup_all_locked(wq, event, result, TRUE); | |
855 | /* lock released */ | |
856 | splx(s); | |
857 | ||
858 | return ret; | |
859 | } | |
860 | ||
861 | /* | |
862 | * Routine: wait_queue_select_one | |
863 | * Purpose: | |
864 | * Select the best thread off a wait queue that meet the | |
865 | * supplied criteria. | |
866 | * Conditions: | |
867 | * at splsched | |
868 | * wait queue locked | |
869 | * possibly recursive | |
870 | * Returns: | |
871 | * a locked thread - if one found | |
872 | * Note: | |
873 | * This is where the sync policy of the wait queue comes | |
874 | * into effect. For now, we just assume FIFO. | |
875 | */ | |
876 | thread_t | |
877 | _wait_queue_select_one( | |
878 | wait_queue_t wq, | |
879 | event_t event) | |
880 | { | |
881 | wait_queue_element_t wq_element; | |
882 | wait_queue_element_t wqe_next; | |
883 | thread_t t = THREAD_NULL; | |
884 | queue_t q; | |
885 | ||
886 | assert(wq->wq_fifo); | |
887 | ||
888 | q = &wq->wq_queue; | |
889 | ||
890 | wq_element = (wait_queue_element_t) queue_first(q); | |
891 | while (!queue_end(q, (queue_entry_t)wq_element)) { | |
892 | wqe_next = (wait_queue_element_t) | |
893 | queue_next((queue_t) wq_element); | |
894 | ||
895 | /* | |
896 | * We may have to recurse if this is a compound wait queue. | |
897 | */ | |
898 | if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { | |
899 | wait_queue_link_t wql = (wait_queue_link_t)wq_element; | |
900 | wait_queue_t sub_queue; | |
901 | ||
902 | /* | |
903 | * We have to check the subordinate wait queue. | |
904 | */ | |
905 | sub_queue = (wait_queue_t)wql->wql_subqueue; | |
906 | wait_queue_lock(sub_queue); | |
907 | if (! wait_queue_empty(sub_queue)) { | |
908 | t = _wait_queue_select_one(sub_queue, event); | |
909 | } | |
910 | wait_queue_unlock(sub_queue); | |
911 | if (t != THREAD_NULL) | |
912 | return t; | |
913 | } else { | |
914 | ||
915 | /* | |
916 | * Otherwise, its a thread. If it is waiting on | |
917 | * the event we are posting to this queue, pull | |
918 | * it off the queue and stick it in out wake_queue. | |
919 | */ | |
920 | thread_t t = (thread_t)wq_element; | |
921 | ||
922 | if (t->wait_event == event) { | |
923 | thread_lock(t); | |
924 | remqueue(q, (queue_entry_t) t); | |
925 | t->wait_queue = WAIT_QUEUE_NULL; | |
926 | t->wait_event = NO_EVENT; | |
927 | t->at_safe_point = FALSE; | |
928 | return t; /* still locked */ | |
929 | } | |
930 | } | |
931 | wq_element = wqe_next; | |
932 | } | |
933 | return THREAD_NULL; | |
934 | } | |
935 | ||
936 | /* | |
937 | * Routine: wait_queue_peek_locked | |
938 | * Purpose: | |
939 | * Select the best thread from a wait queue that meet the | |
940 | * supplied criteria, but leave it on the queue you it was | |
941 | * found on. The thread, and the actual wait_queue the | |
942 | * thread was found on are identified. | |
943 | * Conditions: | |
944 | * at splsched | |
945 | * wait queue locked | |
946 | * possibly recursive | |
947 | * Returns: | |
948 | * a locked thread - if one found | |
949 | * a locked waitq - the one the thread was found on | |
950 | * Note: | |
951 | * Only the waitq the thread was actually found on is locked | |
952 | * after this. | |
953 | */ | |
954 | void | |
955 | wait_queue_peek_locked( | |
956 | wait_queue_t wq, | |
957 | event_t event, | |
958 | thread_t *tp, | |
959 | wait_queue_t *wqp) | |
960 | { | |
961 | wait_queue_element_t wq_element; | |
962 | wait_queue_element_t wqe_next; | |
963 | thread_t t; | |
964 | queue_t q; | |
965 | ||
966 | assert(wq->wq_fifo); | |
967 | ||
968 | *tp = THREAD_NULL; | |
969 | ||
970 | q = &wq->wq_queue; | |
971 | ||
972 | wq_element = (wait_queue_element_t) queue_first(q); | |
973 | while (!queue_end(q, (queue_entry_t)wq_element)) { | |
974 | wqe_next = (wait_queue_element_t) | |
975 | queue_next((queue_t) wq_element); | |
976 | ||
977 | /* | |
978 | * We may have to recurse if this is a compound wait queue. | |
979 | */ | |
980 | if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { | |
981 | wait_queue_link_t wql = (wait_queue_link_t)wq_element; | |
982 | wait_queue_t sub_queue; | |
983 | ||
984 | /* | |
985 | * We have to check the subordinate wait queue. | |
986 | */ | |
987 | sub_queue = (wait_queue_t)wql->wql_subqueue; | |
988 | wait_queue_lock(sub_queue); | |
989 | if (! wait_queue_empty(sub_queue)) { | |
990 | wait_queue_peek_locked(sub_queue, event, tp, wqp); | |
991 | } | |
992 | if (*tp != THREAD_NULL) | |
993 | return; /* thread and its waitq locked */ | |
994 | ||
995 | wait_queue_unlock(sub_queue); | |
996 | } else { | |
997 | ||
998 | /* | |
999 | * Otherwise, its a thread. If it is waiting on | |
1000 | * the event we are posting to this queue, return | |
1001 | * it locked, but leave it on the queue. | |
1002 | */ | |
1003 | thread_t t = (thread_t)wq_element; | |
1004 | ||
1005 | if (t->wait_event == event) { | |
1006 | thread_lock(t); | |
1007 | *tp = t; | |
1008 | *wqp = wq; | |
1009 | return; | |
1010 | } | |
1011 | } | |
1012 | wq_element = wqe_next; | |
1013 | } | |
1014 | } | |
1015 | ||
1016 | /* | |
1017 | * Routine: wait_queue_pull_thread_locked | |
1018 | * Purpose: | |
1019 | * Pull a thread that was previously "peeked" off the wait | |
1020 | * queue and (possibly) unlock the waitq. | |
1021 | * Conditions: | |
1022 | * at splsched | |
1023 | * wait queue locked | |
1024 | * thread locked | |
1025 | * Returns: | |
1026 | * with the thread still locked. | |
1027 | */ | |
1028 | void | |
1029 | wait_queue_pull_thread_locked( | |
1030 | wait_queue_t waitq, | |
1031 | thread_t thread, | |
1032 | boolean_t unlock) | |
1033 | { | |
1034 | ||
1035 | assert(thread->wait_queue == waitq); | |
1036 | ||
1037 | remqueue(&waitq->wq_queue, (queue_entry_t)thread ); | |
1038 | thread->wait_queue = WAIT_QUEUE_NULL; | |
1039 | thread->wait_event = NO_EVENT; | |
1040 | thread->at_safe_point = FALSE; | |
1041 | if (unlock) | |
1042 | wait_queue_unlock(waitq); | |
1043 | } | |
1044 | ||
1045 | ||
1046 | /* | |
1047 | * Routine: wait_queue_select_thread | |
1048 | * Purpose: | |
1049 | * Look for a thread and remove it from the queues, if | |
1050 | * (and only if) the thread is waiting on the supplied | |
1051 | * <wait_queue, event> pair. | |
1052 | * Conditions: | |
1053 | * at splsched | |
1054 | * wait queue locked | |
1055 | * possibly recursive | |
1056 | * Returns: | |
1057 | * KERN_NOT_WAITING: Thread is not waiting here. | |
1058 | * KERN_SUCCESS: It was, and is now removed (returned locked) | |
1059 | */ | |
1060 | kern_return_t | |
1061 | _wait_queue_select_thread( | |
1062 | wait_queue_t wq, | |
1063 | event_t event, | |
1064 | thread_t thread) | |
1065 | { | |
1066 | wait_queue_element_t wq_element; | |
1067 | wait_queue_element_t wqe_next; | |
1068 | kern_return_t res = KERN_NOT_WAITING; | |
1069 | queue_t q = &wq->wq_queue; | |
1070 | ||
1071 | assert(wq->wq_fifo); | |
1072 | ||
1073 | thread_lock(thread); | |
1074 | if ((thread->wait_queue == wq) && (thread->wait_event == event)) { | |
1075 | remqueue(q, (queue_entry_t) thread); | |
1076 | thread->at_safe_point = FALSE; | |
1077 | thread->wait_event = NO_EVENT; | |
1078 | thread->wait_queue = WAIT_QUEUE_NULL; | |
1079 | /* thread still locked */ | |
1080 | return KERN_SUCCESS; | |
1081 | } | |
1082 | thread_unlock(thread); | |
1083 | ||
1084 | /* | |
1085 | * The wait_queue associated with the thread may be one of this | |
1086 | * wait queue's subordinates. Go see. If so, removing it from | |
1087 | * there is like removing it from here. | |
1088 | */ | |
1089 | wq_element = (wait_queue_element_t) queue_first(q); | |
1090 | while (!queue_end(q, (queue_entry_t)wq_element)) { | |
1091 | wqe_next = (wait_queue_element_t) | |
1092 | queue_next((queue_t) wq_element); | |
1093 | ||
1094 | if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { | |
1095 | wait_queue_link_t wql = (wait_queue_link_t)wq_element; | |
1096 | wait_queue_t sub_queue; | |
1097 | ||
1098 | sub_queue = (wait_queue_t)wql->wql_subqueue; | |
1099 | wait_queue_lock(sub_queue); | |
1100 | if (! wait_queue_empty(sub_queue)) { | |
1101 | res = _wait_queue_select_thread(sub_queue, | |
1102 | event, | |
1103 | thread); | |
1104 | } | |
1105 | wait_queue_unlock(sub_queue); | |
1106 | if (res == KERN_SUCCESS) | |
1107 | return KERN_SUCCESS; | |
1108 | } | |
1109 | wq_element = wqe_next; | |
1110 | } | |
1111 | return res; | |
1112 | } | |
1113 | ||
1114 | ||
1115 | /* | |
1116 | * Routine: wait_queue_wakeup_identity_locked | |
1117 | * Purpose: | |
1118 | * Select a single thread that is most-eligible to run and set | |
1119 | * set it running. But return the thread locked. | |
1120 | * | |
1121 | * Conditions: | |
1122 | * at splsched | |
1123 | * wait queue locked | |
1124 | * possibly recursive | |
1125 | * Returns: | |
1126 | * a pointer to the locked thread that was awakened | |
1127 | */ | |
1128 | thread_t | |
1129 | wait_queue_wakeup_identity_locked( | |
1130 | wait_queue_t wq, | |
1131 | event_t event, | |
1132 | int result, | |
1133 | boolean_t unlock) | |
1134 | { | |
1135 | thread_t thread; | |
1136 | ||
1137 | assert(wait_queue_held(wq)); | |
1138 | ||
1139 | thread = _wait_queue_select_one(wq, event); | |
1140 | if (unlock) | |
1141 | wait_queue_unlock(wq); | |
1142 | ||
1143 | if (thread) | |
1144 | thread_go_locked(thread, result); | |
1145 | return thread; /* still locked if not NULL */ | |
1146 | } | |
1147 | ||
1148 | ||
1149 | /* | |
1150 | * Routine: wait_queue_wakeup_one_locked | |
1151 | * Purpose: | |
1152 | * Select a single thread that is most-eligible to run and set | |
1153 | * set it runnings. | |
1154 | * | |
1155 | * Conditions: | |
1156 | * at splsched | |
1157 | * wait queue locked | |
1158 | * possibly recursive | |
1159 | * Returns: | |
1160 | * KERN_SUCCESS: It was, and is, now removed. | |
1161 | * KERN_NOT_WAITING - No thread was waiting <wq,event> pair | |
1162 | */ | |
1163 | kern_return_t | |
1164 | wait_queue_wakeup_one_locked( | |
1165 | wait_queue_t wq, | |
1166 | event_t event, | |
1167 | int result, | |
1168 | boolean_t unlock) | |
1169 | { | |
1170 | thread_t thread; | |
1171 | ||
1172 | assert(wait_queue_held(wq)); | |
1173 | ||
1174 | thread = _wait_queue_select_one(wq, event); | |
1175 | if (unlock) | |
1176 | wait_queue_unlock(wq); | |
1177 | ||
1178 | if (thread) { | |
1179 | thread_go_locked(thread, result); | |
1180 | thread_unlock(thread); | |
1181 | return KERN_SUCCESS; | |
1182 | } | |
1183 | ||
1184 | return KERN_NOT_WAITING; | |
1185 | } | |
1186 | ||
1187 | /* | |
1188 | * Routine: wait_queue_wakeup_one | |
1189 | * Purpose: | |
1190 | * Wakeup the most appropriate thread that is in the specified | |
1191 | * wait queue for the specified event. | |
1192 | * | |
1193 | * Conditions: | |
1194 | * Nothing locked | |
1195 | * | |
1196 | * Returns: | |
1197 | * KERN_SUCCESS - Thread was woken up | |
1198 | * KERN_NOT_WAITING - No thread was waiting <wq,event> pair | |
1199 | */ | |
1200 | kern_return_t | |
1201 | wait_queue_wakeup_one( | |
1202 | wait_queue_t wq, | |
1203 | event_t event, | |
1204 | int result) | |
1205 | { | |
1206 | thread_t thread; | |
1207 | spl_t s; | |
1208 | ||
1209 | s = splsched(); | |
1210 | wait_queue_lock(wq); | |
1211 | thread = _wait_queue_select_one(wq, event); | |
1212 | wait_queue_unlock(wq); | |
1213 | ||
1214 | if (thread) { | |
1215 | thread_go_locked(thread, result); | |
1216 | thread_unlock(thread); | |
1217 | splx(s); | |
1218 | return KERN_SUCCESS; | |
1219 | } | |
1220 | ||
1221 | splx(s); | |
1222 | return KERN_NOT_WAITING; | |
1223 | } | |
1224 | ||
1225 | ||
1226 | ||
1227 | /* | |
1228 | * Routine: wait_queue_wakeup_thread_locked | |
1229 | * Purpose: | |
1230 | * Wakeup the particular thread that was specified if and only | |
1231 | * it was in this wait queue (or one of it's subordinate queues) | |
1232 | * and waiting on the specified event. | |
1233 | * | |
1234 | * This is much safer than just removing the thread from | |
1235 | * whatever wait queue it happens to be on. For instance, it | |
1236 | * may have already been awoken from the wait you intended to | |
1237 | * interrupt and waited on something else (like another | |
1238 | * semaphore). | |
1239 | * Conditions: | |
1240 | * at splsched | |
1241 | * wait queue already locked (may be released). | |
1242 | * Returns: | |
1243 | * KERN_SUCCESS - the thread was found waiting and awakened | |
1244 | * KERN_NOT_WAITING - the thread was not waiting here | |
1245 | */ | |
1246 | kern_return_t | |
1247 | wait_queue_wakeup_thread_locked( | |
1248 | wait_queue_t wq, | |
1249 | event_t event, | |
1250 | thread_t thread, | |
1251 | int result, | |
1252 | boolean_t unlock) | |
1253 | { | |
1254 | kern_return_t res; | |
1255 | ||
1256 | assert(wait_queue_held(wq)); | |
1257 | ||
1258 | /* | |
1259 | * See if the thread was still waiting there. If so, it got | |
1260 | * dequeued and returned locked. | |
1261 | */ | |
1262 | res = _wait_queue_select_thread(wq, event, thread); | |
1263 | if (unlock) | |
1264 | wait_queue_unlock(wq); | |
1265 | ||
1266 | if (res != KERN_SUCCESS) | |
1267 | return KERN_NOT_WAITING; | |
1268 | ||
1269 | thread_go_locked(thread, result); | |
1270 | thread_unlock(thread); | |
1271 | return KERN_SUCCESS; | |
1272 | } | |
1273 | ||
1274 | /* | |
1275 | * Routine: wait_queue_wakeup_thread | |
1276 | * Purpose: | |
1277 | * Wakeup the particular thread that was specified if and only | |
1278 | * it was in this wait queue (or one of it's subordinate queues) | |
1279 | * and waiting on the specified event. | |
1280 | * | |
1281 | * This is much safer than just removing the thread from | |
1282 | * whatever wait queue it happens to be on. For instance, it | |
1283 | * may have already been awoken from the wait you intended to | |
1284 | * interrupt and waited on something else (like another | |
1285 | * semaphore). | |
1286 | * Conditions: | |
1287 | * nothing of interest locked | |
1288 | * we need to assume spl needs to be raised | |
1289 | * Returns: | |
1290 | * KERN_SUCCESS - the thread was found waiting and awakened | |
1291 | * KERN_NOT_WAITING - the thread was not waiting here | |
1292 | */ | |
1293 | kern_return_t | |
1294 | wait_queue_wakeup_thread( | |
1295 | wait_queue_t wq, | |
1296 | event_t event, | |
1297 | thread_t thread, | |
1298 | int result) | |
1299 | { | |
1300 | kern_return_t res; | |
1301 | spl_t s; | |
1302 | ||
1303 | s = splsched(); | |
1304 | wait_queue_lock(wq); | |
1305 | res = _wait_queue_select_thread(wq, event, thread); | |
1306 | wait_queue_unlock(wq); | |
1307 | ||
1308 | if (res == KERN_SUCCESS) { | |
1309 | thread_go_locked(thread, result); | |
1310 | thread_unlock(thread); | |
1311 | splx(s); | |
1312 | return KERN_SUCCESS; | |
1313 | } | |
1314 | splx(s); | |
1315 | return KERN_NOT_WAITING; | |
1316 | } | |
1317 | ||
1318 | ||
1319 | /* | |
1320 | * Routine: wait_queue_remove | |
1321 | * Purpose: | |
1322 | * Normal removal operations from wait queues drive from the | |
1323 | * wait queue to select a thread. However, if a thread is | |
1324 | * interrupted out of a wait, this routine is called to | |
1325 | * remove it from whatever wait queue it may be in. | |
1326 | * | |
1327 | * Conditions: | |
1328 | * splsched | |
1329 | * thread locked on entry and exit, but may be dropped. | |
1330 | * | |
1331 | * Returns: | |
1332 | * KERN_SUCCESS - if thread was in a wait queue | |
1333 | * KERN_NOT_WAITING - it was not | |
1334 | */ | |
1335 | kern_return_t | |
1336 | wait_queue_remove( | |
1337 | thread_t thread) | |
1338 | { | |
1339 | wait_queue_t wq = thread->wait_queue; | |
1340 | ||
1341 | if (wq == WAIT_QUEUE_NULL) | |
1342 | return KERN_NOT_WAITING; | |
1343 | ||
1344 | /* | |
1345 | * have to get the locks again in the right order. | |
1346 | */ | |
1347 | thread_unlock(thread); | |
1348 | wait_queue_lock(wq); | |
1349 | thread_lock(thread); | |
1350 | ||
1351 | if (thread->wait_queue == wq) { | |
1352 | remqueue(&wq->wq_queue, (queue_entry_t)thread); | |
1353 | thread->wait_queue = WAIT_QUEUE_NULL; | |
1354 | thread->wait_event = NO_EVENT; | |
1355 | thread->at_safe_point = FALSE; | |
1356 | wait_queue_unlock(wq); | |
1357 | return KERN_SUCCESS; | |
1358 | } else { | |
1359 | wait_queue_unlock(wq); | |
1360 | return KERN_NOT_WAITING; /* anymore */ | |
1361 | } | |
1362 | } | |
1363 |