]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
f427ee49 | 2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
0a7de745 | 30 | * |
1c79356b A |
31 | */ |
32 | /* | |
33 | * File: kern/sync_sema.c | |
34 | * Author: Joseph CaraDonna | |
35 | * | |
36 | * Contains RT distributed semaphore synchronization services. | |
37 | */ | |
38 | ||
39 | #include <mach/mach_types.h> | |
91447636 | 40 | #include <mach/mach_traps.h> |
1c79356b A |
41 | #include <mach/kern_return.h> |
42 | #include <mach/semaphore.h> | |
43 | #include <mach/sync_policy.h> | |
91447636 | 44 | #include <mach/task.h> |
1c79356b A |
45 | |
46 | #include <kern/misc_protos.h> | |
47 | #include <kern/sync_sema.h> | |
48 | #include <kern/spl.h> | |
49 | #include <kern/ipc_kobject.h> | |
50 | #include <kern/ipc_sync.h> | |
51 | #include <kern/ipc_tt.h> | |
52 | #include <kern/thread.h> | |
53 | #include <kern/clock.h> | |
54 | #include <ipc/ipc_port.h> | |
55 | #include <ipc/ipc_space.h> | |
56 | #include <kern/host.h> | |
3e170ce0 | 57 | #include <kern/waitq.h> |
1c79356b A |
58 | #include <kern/zalloc.h> |
59 | #include <kern/mach_param.h> | |
60 | ||
316670eb A |
61 | #include <libkern/OSAtomic.h> |
62 | ||
9bccf70c | 63 | static unsigned int semaphore_event; |
cf7d32b8 | 64 | #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event) |
1c79356b | 65 | |
f427ee49 | 66 | ZONE_DECLARE(semaphore_zone, "semaphores", sizeof(struct semaphore), ZC_NONE); |
1c79356b | 67 | |
d9a64523 A |
68 | os_refgrp_decl(static, sema_refgrp, "semaphore", NULL); |
69 | ||
91447636 A |
70 | /* Forward declarations */ |
71 | ||
72 | ||
0a7de745 | 73 | kern_return_t |
91447636 | 74 | semaphore_wait_trap_internal( |
0a7de745 A |
75 | mach_port_name_t name, |
76 | void (*caller_cont)(kern_return_t)); | |
91447636 | 77 | |
0a7de745 | 78 | kern_return_t |
91447636 | 79 | semaphore_wait_signal_trap_internal( |
0a7de745 A |
80 | mach_port_name_t wait_name, |
81 | mach_port_name_t signal_name, | |
82 | void (*caller_cont)(kern_return_t)); | |
91447636 | 83 | |
0a7de745 | 84 | kern_return_t |
91447636 | 85 | semaphore_timedwait_trap_internal( |
0a7de745 A |
86 | mach_port_name_t name, |
87 | unsigned int sec, | |
88 | clock_res_t nsec, | |
89 | void (*caller_cont)(kern_return_t)); | |
91447636 | 90 | |
0a7de745 | 91 | kern_return_t |
91447636 | 92 | semaphore_timedwait_signal_trap_internal( |
0a7de745 A |
93 | mach_port_name_t wait_name, |
94 | mach_port_name_t signal_name, | |
95 | unsigned int sec, | |
96 | clock_res_t nsec, | |
97 | void (*caller_cont)(kern_return_t)); | |
91447636 | 98 | |
2d21ac55 A |
99 | kern_return_t |
100 | semaphore_signal_internal_trap(mach_port_name_t sema_name); | |
91447636 A |
101 | |
102 | kern_return_t | |
103 | semaphore_signal_internal( | |
0a7de745 A |
104 | semaphore_t semaphore, |
105 | thread_t thread, | |
106 | int options); | |
91447636 A |
107 | |
108 | kern_return_t | |
109 | semaphore_convert_wait_result( | |
0a7de745 | 110 | int wait_result); |
91447636 A |
111 | |
112 | void | |
f427ee49 | 113 | semaphore_wait_continue(void *arg __unused, wait_result_t wr); |
91447636 | 114 | |
b0d623f7 | 115 | static kern_return_t |
91447636 | 116 | semaphore_wait_internal( |
0a7de745 A |
117 | semaphore_t wait_semaphore, |
118 | semaphore_t signal_semaphore, | |
119 | uint64_t deadline, | |
120 | int option, | |
121 | void (*caller_cont)(kern_return_t)); | |
91447636 | 122 | |
b0d623f7 A |
123 | static __inline__ uint64_t |
124 | semaphore_deadline( | |
0a7de745 A |
125 | unsigned int sec, |
126 | clock_res_t nsec) | |
b0d623f7 | 127 | { |
0a7de745 | 128 | uint64_t abstime; |
b0d623f7 | 129 | |
0a7de745 | 130 | nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime); |
b0d623f7 A |
131 | clock_absolutetime_interval_to_deadline(abstime, &abstime); |
132 | ||
0a7de745 | 133 | return abstime; |
b0d623f7 A |
134 | } |
135 | ||
1c79356b A |
136 | /* |
137 | * Routine: semaphore_create | |
138 | * | |
139 | * Creates a semaphore. | |
140 | * The port representing the semaphore is returned as a parameter. | |
141 | */ | |
142 | kern_return_t | |
143 | semaphore_create( | |
0a7de745 A |
144 | task_t task, |
145 | semaphore_t *new_semaphore, | |
f427ee49 A |
146 | int policy, |
147 | int value) | |
1c79356b | 148 | { |
f427ee49 | 149 | semaphore_t s = SEMAPHORE_NULL; |
0a7de745 | 150 | kern_return_t kret; |
1c79356b | 151 | |
b0d623f7 | 152 | *new_semaphore = SEMAPHORE_NULL; |
f427ee49 | 153 | if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX || policy < 0) { |
1c79356b | 154 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 155 | } |
1c79356b | 156 | |
0a7de745 | 157 | s = (semaphore_t) zalloc(semaphore_zone); |
1c79356b | 158 | |
0a7de745 A |
159 | if (s == SEMAPHORE_NULL) { |
160 | return KERN_RESOURCE_SHORTAGE; | |
161 | } | |
b0d623f7 | 162 | |
3e170ce0 | 163 | kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */ |
b0d623f7 A |
164 | if (kret != KERN_SUCCESS) { |
165 | zfree(semaphore_zone, s); | |
166 | return kret; | |
1c79356b A |
167 | } |
168 | ||
1c79356b | 169 | /* |
3e170ce0 | 170 | * Initialize the semaphore values. |
1c79356b | 171 | */ |
0a7de745 | 172 | s->port = IP_NULL; |
d9a64523 | 173 | os_ref_init(&s->ref_count, &sema_refgrp); |
3e170ce0 A |
174 | s->count = value; |
175 | s->active = TRUE; | |
176 | s->owner = task; | |
1c79356b A |
177 | |
178 | /* | |
179 | * Associate the new semaphore with the task by adding | |
180 | * the new semaphore to the task's semaphore list. | |
1c79356b A |
181 | */ |
182 | task_lock(task); | |
183 | enqueue_head(&task->semaphore_list, (queue_entry_t) s); | |
184 | task->semaphores_owned++; | |
1c79356b A |
185 | task_unlock(task); |
186 | ||
187 | *new_semaphore = s; | |
188 | ||
189 | return KERN_SUCCESS; | |
0a7de745 | 190 | } |
1c79356b A |
191 | |
192 | /* | |
3e170ce0 | 193 | * Routine: semaphore_destroy_internal |
1c79356b | 194 | * |
4bd07ac2 A |
195 | * Disassociate a semaphore from its owning task, mark it inactive, |
196 | * and set any waiting threads running with THREAD_RESTART. | |
1c79356b | 197 | * |
4bd07ac2 A |
198 | * Conditions: |
199 | * task is locked | |
200 | * semaphore is locked | |
201 | * semaphore is owned by the specified task | |
202 | * Returns: | |
203 | * with semaphore unlocked | |
1c79356b | 204 | */ |
4bd07ac2 | 205 | static void |
3e170ce0 | 206 | semaphore_destroy_internal( |
0a7de745 A |
207 | task_t task, |
208 | semaphore_t semaphore) | |
1c79356b | 209 | { |
0a7de745 | 210 | int old_count; |
3e170ce0 | 211 | |
4bd07ac2 A |
212 | /* unlink semaphore from owning task */ |
213 | assert(semaphore->owner == task); | |
6d2010ae | 214 | remqueue((queue_entry_t) semaphore); |
1c79356b A |
215 | semaphore->owner = TASK_NULL; |
216 | task->semaphores_owned--; | |
1c79356b | 217 | |
1c79356b A |
218 | /* |
219 | * Deactivate semaphore | |
220 | */ | |
221 | assert(semaphore->active); | |
222 | semaphore->active = FALSE; | |
223 | ||
224 | /* | |
0a7de745 | 225 | * Wakeup blocked threads |
1c79356b A |
226 | */ |
227 | old_count = semaphore->count; | |
228 | semaphore->count = 0; | |
229 | ||
230 | if (old_count < 0) { | |
3e170ce0 | 231 | waitq_wakeup64_all_locked(&semaphore->waitq, |
0a7de745 A |
232 | SEMAPHORE_EVENT, |
233 | THREAD_RESTART, NULL, | |
234 | WAITQ_ALL_PRIORITIES, | |
235 | WAITQ_UNLOCK); | |
3e170ce0 | 236 | /* waitq/semaphore is unlocked */ |
1c79356b A |
237 | } else { |
238 | semaphore_unlock(semaphore); | |
239 | } | |
1c79356b A |
240 | } |
241 | ||
3e170ce0 A |
242 | /* |
243 | * Routine: semaphore_destroy | |
244 | * | |
245 | * Destroys a semaphore and consume the caller's reference on the | |
246 | * semaphore. | |
247 | */ | |
248 | kern_return_t | |
249 | semaphore_destroy( | |
0a7de745 A |
250 | task_t task, |
251 | semaphore_t semaphore) | |
3e170ce0 | 252 | { |
4bd07ac2 | 253 | spl_t spl_level; |
3e170ce0 | 254 | |
0a7de745 | 255 | if (semaphore == SEMAPHORE_NULL) { |
3e170ce0 | 256 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 257 | } |
3e170ce0 A |
258 | |
259 | if (task == TASK_NULL) { | |
4bd07ac2 A |
260 | semaphore_dereference(semaphore); |
261 | return KERN_INVALID_ARGUMENT; | |
262 | } | |
263 | ||
264 | task_lock(task); | |
265 | spl_level = splsched(); | |
266 | semaphore_lock(semaphore); | |
267 | ||
268 | if (semaphore->owner != task) { | |
269 | semaphore_unlock(semaphore); | |
d9a64523 | 270 | semaphore_dereference(semaphore); |
4bd07ac2 A |
271 | splx(spl_level); |
272 | task_unlock(task); | |
273 | return KERN_INVALID_ARGUMENT; | |
3e170ce0 | 274 | } |
d9a64523 | 275 | |
4bd07ac2 A |
276 | semaphore_destroy_internal(task, semaphore); |
277 | /* semaphore unlocked */ | |
278 | ||
279 | splx(spl_level); | |
280 | task_unlock(task); | |
281 | ||
3e170ce0 | 282 | semaphore_dereference(semaphore); |
4bd07ac2 A |
283 | return KERN_SUCCESS; |
284 | } | |
285 | ||
286 | /* | |
287 | * Routine: semaphore_destroy_all | |
288 | * | |
289 | * Destroy all the semaphores associated with a given task. | |
290 | */ | |
291 | #define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */ | |
292 | ||
293 | void | |
294 | semaphore_destroy_all( | |
0a7de745 | 295 | task_t task) |
4bd07ac2 A |
296 | { |
297 | uint32_t count; | |
298 | spl_t spl_level; | |
299 | ||
300 | count = 0; | |
301 | task_lock(task); | |
302 | while (!queue_empty(&task->semaphore_list)) { | |
303 | semaphore_t semaphore; | |
304 | ||
305 | semaphore = (semaphore_t) queue_first(&task->semaphore_list); | |
306 | ||
0a7de745 | 307 | if (count == 0) { |
4bd07ac2 | 308 | spl_level = splsched(); |
0a7de745 | 309 | } |
4bd07ac2 A |
310 | semaphore_lock(semaphore); |
311 | ||
312 | semaphore_destroy_internal(task, semaphore); | |
313 | /* semaphore unlocked */ | |
314 | ||
315 | /* throttle number of semaphores per interrupt disablement */ | |
316 | if (++count == SEMASPERSPL) { | |
317 | count = 0; | |
318 | splx(spl_level); | |
319 | } | |
320 | } | |
0a7de745 | 321 | if (count != 0) { |
4bd07ac2 | 322 | splx(spl_level); |
0a7de745 | 323 | } |
4bd07ac2 A |
324 | |
325 | task_unlock(task); | |
3e170ce0 A |
326 | } |
327 | ||
1c79356b A |
328 | /* |
329 | * Routine: semaphore_signal_internal | |
330 | * | |
0a7de745 | 331 | * Signals the semaphore as direct. |
1c79356b A |
332 | * Assumptions: |
333 | * Semaphore is locked. | |
334 | */ | |
335 | kern_return_t | |
336 | semaphore_signal_internal( | |
0a7de745 A |
337 | semaphore_t semaphore, |
338 | thread_t thread, | |
339 | int options) | |
1c79356b A |
340 | { |
341 | kern_return_t kr; | |
342 | spl_t spl_level; | |
343 | ||
344 | spl_level = splsched(); | |
345 | semaphore_lock(semaphore); | |
346 | ||
347 | if (!semaphore->active) { | |
348 | semaphore_unlock(semaphore); | |
349 | splx(spl_level); | |
350 | return KERN_TERMINATED; | |
351 | } | |
352 | ||
91447636 | 353 | if (thread != THREAD_NULL) { |
1c79356b | 354 | if (semaphore->count < 0) { |
3e170ce0 | 355 | kr = waitq_wakeup64_thread_locked( |
0a7de745 A |
356 | &semaphore->waitq, |
357 | SEMAPHORE_EVENT, | |
358 | thread, | |
359 | THREAD_AWAKENED, | |
360 | WAITQ_UNLOCK); | |
3e170ce0 | 361 | /* waitq/semaphore is unlocked */ |
1c79356b | 362 | } else { |
1c79356b | 363 | kr = KERN_NOT_WAITING; |
3e170ce0 | 364 | semaphore_unlock(semaphore); |
1c79356b A |
365 | } |
366 | splx(spl_level); | |
367 | return kr; | |
0a7de745 | 368 | } |
1c79356b A |
369 | |
370 | if (options & SEMAPHORE_SIGNAL_ALL) { | |
371 | int old_count = semaphore->count; | |
372 | ||
3e170ce0 | 373 | kr = KERN_NOT_WAITING; |
1c79356b A |
374 | if (old_count < 0) { |
375 | semaphore->count = 0; /* always reset */ | |
3e170ce0 | 376 | kr = waitq_wakeup64_all_locked( |
0a7de745 A |
377 | &semaphore->waitq, |
378 | SEMAPHORE_EVENT, | |
379 | THREAD_AWAKENED, NULL, | |
380 | WAITQ_ALL_PRIORITIES, | |
381 | WAITQ_UNLOCK); | |
3e170ce0 | 382 | /* waitq / semaphore is unlocked */ |
1c79356b | 383 | } else { |
0a7de745 | 384 | if (options & SEMAPHORE_SIGNAL_PREPOST) { |
1c79356b | 385 | semaphore->count++; |
0a7de745 | 386 | } |
1c79356b | 387 | kr = KERN_SUCCESS; |
3e170ce0 | 388 | semaphore_unlock(semaphore); |
1c79356b A |
389 | } |
390 | splx(spl_level); | |
391 | return kr; | |
392 | } | |
0a7de745 | 393 | |
1c79356b | 394 | if (semaphore->count < 0) { |
f427ee49 A |
395 | waitq_options_t wq_option = (options & SEMAPHORE_THREAD_HANDOFF) ? |
396 | WQ_OPTION_HANDOFF : WQ_OPTION_NONE; | |
3e170ce0 | 397 | kr = waitq_wakeup64_one_locked( |
0a7de745 A |
398 | &semaphore->waitq, |
399 | SEMAPHORE_EVENT, | |
400 | THREAD_AWAKENED, NULL, | |
401 | WAITQ_ALL_PRIORITIES, | |
f427ee49 A |
402 | WAITQ_KEEP_LOCKED, |
403 | wq_option); | |
3e170ce0 | 404 | if (kr == KERN_SUCCESS) { |
1c79356b A |
405 | semaphore_unlock(semaphore); |
406 | splx(spl_level); | |
407 | return KERN_SUCCESS; | |
3e170ce0 | 408 | } else { |
1c79356b | 409 | semaphore->count = 0; /* all waiters gone */ |
3e170ce0 | 410 | } |
1c79356b A |
411 | } |
412 | ||
413 | if (options & SEMAPHORE_SIGNAL_PREPOST) { | |
414 | semaphore->count++; | |
415 | } | |
416 | ||
417 | semaphore_unlock(semaphore); | |
418 | splx(spl_level); | |
419 | return KERN_NOT_WAITING; | |
420 | } | |
421 | ||
422 | /* | |
423 | * Routine: semaphore_signal_thread | |
424 | * | |
91447636 A |
425 | * If the specified thread is blocked on the semaphore, it is |
426 | * woken up. If a NULL thread was supplied, then any one | |
1c79356b A |
427 | * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING |
428 | * and the semaphore is unchanged. | |
429 | */ | |
430 | kern_return_t | |
431 | semaphore_signal_thread( | |
0a7de745 A |
432 | semaphore_t semaphore, |
433 | thread_t thread) | |
1c79356b | 434 | { |
0a7de745 | 435 | kern_return_t ret; |
1c79356b | 436 | |
0a7de745 | 437 | if (semaphore == SEMAPHORE_NULL) { |
1c79356b | 438 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 439 | } |
1c79356b A |
440 | |
441 | ret = semaphore_signal_internal(semaphore, | |
0a7de745 A |
442 | thread, |
443 | SEMAPHORE_OPTION_NONE); | |
1c79356b | 444 | return ret; |
0a7de745 | 445 | } |
1c79356b A |
446 | |
447 | /* | |
448 | * Routine: semaphore_signal_thread_trap | |
449 | * | |
450 | * Trap interface to the semaphore_signal_thread function. | |
451 | */ | |
452 | kern_return_t | |
453 | semaphore_signal_thread_trap( | |
91447636 | 454 | struct semaphore_signal_thread_trap_args *args) |
1c79356b | 455 | { |
91447636 A |
456 | mach_port_name_t sema_name = args->signal_name; |
457 | mach_port_name_t thread_name = args->thread_name; | |
0a7de745 A |
458 | semaphore_t semaphore; |
459 | thread_t thread; | |
460 | kern_return_t kr; | |
1c79356b | 461 | |
0a7de745 | 462 | /* |
1c79356b A |
463 | * MACH_PORT_NULL is not an error. It means that we want to |
464 | * select any one thread that is already waiting, but not to | |
465 | * pre-post the semaphore. | |
466 | */ | |
467 | if (thread_name != MACH_PORT_NULL) { | |
cb323159 | 468 | thread = port_name_to_thread(thread_name, PORT_TO_THREAD_NONE); |
0a7de745 | 469 | if (thread == THREAD_NULL) { |
1c79356b | 470 | return KERN_INVALID_ARGUMENT; |
0a7de745 A |
471 | } |
472 | } else { | |
91447636 | 473 | thread = THREAD_NULL; |
0a7de745 | 474 | } |
1c79356b A |
475 | |
476 | kr = port_name_to_semaphore(sema_name, &semaphore); | |
91447636 A |
477 | if (kr == KERN_SUCCESS) { |
478 | kr = semaphore_signal_internal(semaphore, | |
0a7de745 A |
479 | thread, |
480 | SEMAPHORE_OPTION_NONE); | |
91447636 A |
481 | semaphore_dereference(semaphore); |
482 | } | |
483 | if (thread != THREAD_NULL) { | |
484 | thread_deallocate(thread); | |
1c79356b | 485 | } |
1c79356b A |
486 | return kr; |
487 | } | |
488 | ||
489 | ||
490 | ||
491 | /* | |
492 | * Routine: semaphore_signal | |
493 | * | |
494 | * Traditional (in-kernel client and MIG interface) semaphore | |
495 | * signal routine. Most users will access the trap version. | |
496 | * | |
497 | * This interface in not defined to return info about whether | |
498 | * this call found a thread waiting or not. The internal | |
499 | * routines (and future external routines) do. We have to | |
500 | * convert those into plain KERN_SUCCESS returns. | |
501 | */ | |
502 | kern_return_t | |
503 | semaphore_signal( | |
0a7de745 | 504 | semaphore_t semaphore) |
1c79356b | 505 | { |
0a7de745 | 506 | kern_return_t kr; |
1c79356b | 507 | |
0a7de745 | 508 | if (semaphore == SEMAPHORE_NULL) { |
1c79356b | 509 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 510 | } |
1c79356b A |
511 | |
512 | kr = semaphore_signal_internal(semaphore, | |
0a7de745 A |
513 | THREAD_NULL, |
514 | SEMAPHORE_SIGNAL_PREPOST); | |
515 | if (kr == KERN_NOT_WAITING) { | |
1c79356b | 516 | return KERN_SUCCESS; |
0a7de745 | 517 | } |
1c79356b A |
518 | return kr; |
519 | } | |
520 | ||
521 | /* | |
522 | * Routine: semaphore_signal_trap | |
523 | * | |
524 | * Trap interface to the semaphore_signal function. | |
525 | */ | |
526 | kern_return_t | |
527 | semaphore_signal_trap( | |
91447636 | 528 | struct semaphore_signal_trap_args *args) |
1c79356b | 529 | { |
91447636 | 530 | mach_port_name_t sema_name = args->signal_name; |
2d21ac55 | 531 | |
0a7de745 | 532 | return semaphore_signal_internal_trap(sema_name); |
2d21ac55 A |
533 | } |
534 | ||
535 | kern_return_t | |
536 | semaphore_signal_internal_trap(mach_port_name_t sema_name) | |
537 | { | |
0a7de745 | 538 | semaphore_t semaphore; |
1c79356b A |
539 | kern_return_t kr; |
540 | ||
541 | kr = port_name_to_semaphore(sema_name, &semaphore); | |
91447636 | 542 | if (kr == KERN_SUCCESS) { |
0a7de745 A |
543 | kr = semaphore_signal_internal(semaphore, |
544 | THREAD_NULL, | |
545 | SEMAPHORE_SIGNAL_PREPOST); | |
91447636 | 546 | semaphore_dereference(semaphore); |
0a7de745 | 547 | if (kr == KERN_NOT_WAITING) { |
91447636 | 548 | kr = KERN_SUCCESS; |
0a7de745 | 549 | } |
1c79356b | 550 | } |
1c79356b A |
551 | return kr; |
552 | } | |
553 | ||
554 | /* | |
555 | * Routine: semaphore_signal_all | |
556 | * | |
557 | * Awakens ALL threads currently blocked on the semaphore. | |
558 | * The semaphore count returns to zero. | |
559 | */ | |
560 | kern_return_t | |
561 | semaphore_signal_all( | |
0a7de745 | 562 | semaphore_t semaphore) |
1c79356b A |
563 | { |
564 | kern_return_t kr; | |
565 | ||
0a7de745 | 566 | if (semaphore == SEMAPHORE_NULL) { |
1c79356b | 567 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 568 | } |
1c79356b A |
569 | |
570 | kr = semaphore_signal_internal(semaphore, | |
0a7de745 A |
571 | THREAD_NULL, |
572 | SEMAPHORE_SIGNAL_ALL); | |
573 | if (kr == KERN_NOT_WAITING) { | |
1c79356b | 574 | return KERN_SUCCESS; |
0a7de745 | 575 | } |
1c79356b A |
576 | return kr; |
577 | } | |
578 | ||
579 | /* | |
580 | * Routine: semaphore_signal_all_trap | |
581 | * | |
582 | * Trap interface to the semaphore_signal_all function. | |
583 | */ | |
584 | kern_return_t | |
585 | semaphore_signal_all_trap( | |
91447636 | 586 | struct semaphore_signal_all_trap_args *args) |
1c79356b | 587 | { |
91447636 | 588 | mach_port_name_t sema_name = args->signal_name; |
0a7de745 | 589 | semaphore_t semaphore; |
1c79356b A |
590 | kern_return_t kr; |
591 | ||
592 | kr = port_name_to_semaphore(sema_name, &semaphore); | |
91447636 A |
593 | if (kr == KERN_SUCCESS) { |
594 | kr = semaphore_signal_internal(semaphore, | |
0a7de745 A |
595 | THREAD_NULL, |
596 | SEMAPHORE_SIGNAL_ALL); | |
91447636 | 597 | semaphore_dereference(semaphore); |
0a7de745 | 598 | if (kr == KERN_NOT_WAITING) { |
91447636 | 599 | kr = KERN_SUCCESS; |
0a7de745 | 600 | } |
1c79356b | 601 | } |
1c79356b A |
602 | return kr; |
603 | } | |
604 | ||
605 | /* | |
606 | * Routine: semaphore_convert_wait_result | |
607 | * | |
608 | * Generate the return code after a semaphore wait/block. It | |
609 | * takes the wait result as an input and coverts that to an | |
610 | * appropriate result. | |
611 | */ | |
612 | kern_return_t | |
613 | semaphore_convert_wait_result(int wait_result) | |
614 | { | |
615 | switch (wait_result) { | |
616 | case THREAD_AWAKENED: | |
617 | return KERN_SUCCESS; | |
618 | ||
619 | case THREAD_TIMED_OUT: | |
620 | return KERN_OPERATION_TIMED_OUT; | |
0a7de745 | 621 | |
1c79356b A |
622 | case THREAD_INTERRUPTED: |
623 | return KERN_ABORTED; | |
624 | ||
625 | case THREAD_RESTART: | |
626 | return KERN_TERMINATED; | |
627 | ||
628 | default: | |
629 | panic("semaphore_block\n"); | |
630 | return KERN_FAILURE; | |
631 | } | |
632 | } | |
633 | ||
634 | /* | |
635 | * Routine: semaphore_wait_continue | |
636 | * | |
637 | * Common continuation routine after waiting on a semphore. | |
638 | * It returns directly to user space. | |
639 | */ | |
640 | void | |
f427ee49 | 641 | semaphore_wait_continue(void *arg __unused, wait_result_t wr) |
1c79356b A |
642 | { |
643 | thread_t self = current_thread(); | |
1c79356b A |
644 | void (*caller_cont)(kern_return_t) = self->sth_continuation; |
645 | ||
646 | assert(self->sth_waitsemaphore != SEMAPHORE_NULL); | |
647 | semaphore_dereference(self->sth_waitsemaphore); | |
0a7de745 | 648 | if (self->sth_signalsemaphore != SEMAPHORE_NULL) { |
1c79356b | 649 | semaphore_dereference(self->sth_signalsemaphore); |
0a7de745 | 650 | } |
1c79356b | 651 | |
f427ee49 | 652 | assert(self->handoff_thread == THREAD_NULL); |
1c79356b | 653 | assert(caller_cont != (void (*)(kern_return_t))0); |
f427ee49 | 654 | (*caller_cont)(semaphore_convert_wait_result(wr)); |
1c79356b A |
655 | } |
656 | ||
1c79356b A |
657 | /* |
658 | * Routine: semaphore_wait_internal | |
659 | * | |
660 | * Decrements the semaphore count by one. If the count is | |
661 | * negative after the decrement, the calling thread blocks | |
662 | * (possibly at a continuation and/or with a timeout). | |
663 | * | |
664 | * Assumptions: | |
665 | * The reference | |
666 | * A reference is held on the signal semaphore. | |
667 | */ | |
b0d623f7 | 668 | static kern_return_t |
1c79356b | 669 | semaphore_wait_internal( |
0a7de745 A |
670 | semaphore_t wait_semaphore, |
671 | semaphore_t signal_semaphore, | |
672 | uint64_t deadline, | |
673 | int option, | |
674 | void (*caller_cont)(kern_return_t)) | |
1c79356b | 675 | { |
0a7de745 A |
676 | int wait_result; |
677 | spl_t spl_level; | |
678 | kern_return_t kr = KERN_ALREADY_WAITING; | |
1c79356b A |
679 | |
680 | spl_level = splsched(); | |
681 | semaphore_lock(wait_semaphore); | |
f427ee49 A |
682 | thread_t self = current_thread(); |
683 | thread_t handoff_thread = THREAD_NULL; | |
684 | thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE; | |
685 | int semaphore_signal_options = SEMAPHORE_SIGNAL_PREPOST; | |
1c79356b | 686 | |
1c79356b A |
687 | if (!wait_semaphore->active) { |
688 | kr = KERN_TERMINATED; | |
689 | } else if (wait_semaphore->count > 0) { | |
690 | wait_semaphore->count--; | |
691 | kr = KERN_SUCCESS; | |
b0d623f7 | 692 | } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) { |
1c79356b | 693 | kr = KERN_OPERATION_TIMED_OUT; |
55e303ae | 694 | } else { |
1c79356b | 695 | wait_semaphore->count = -1; /* we don't keep an actual count */ |
813fb2f6 A |
696 | |
697 | thread_set_pending_block_hint(self, kThreadWaitSemaphore); | |
3e170ce0 | 698 | (void)waitq_assert_wait64_locked( |
0a7de745 A |
699 | &wait_semaphore->waitq, |
700 | SEMAPHORE_EVENT, | |
701 | THREAD_ABORTSAFE, | |
702 | TIMEOUT_URGENCY_USER_NORMAL, | |
703 | deadline, TIMEOUT_NO_LEEWAY, | |
704 | self); | |
f427ee49 A |
705 | |
706 | semaphore_signal_options |= SEMAPHORE_THREAD_HANDOFF; | |
1c79356b A |
707 | } |
708 | semaphore_unlock(wait_semaphore); | |
709 | splx(spl_level); | |
710 | ||
711 | /* | |
712 | * wait_semaphore is unlocked so we are free to go ahead and | |
713 | * signal the signal_semaphore (if one was provided). | |
714 | */ | |
715 | if (signal_semaphore != SEMAPHORE_NULL) { | |
716 | kern_return_t signal_kr; | |
717 | ||
718 | /* | |
719 | * lock the signal semaphore reference we got and signal it. | |
720 | * This will NOT block (we cannot block after having asserted | |
721 | * our intention to wait above). | |
722 | */ | |
723 | signal_kr = semaphore_signal_internal(signal_semaphore, | |
f427ee49 | 724 | THREAD_NULL, semaphore_signal_options); |
1c79356b | 725 | |
0a7de745 | 726 | if (signal_kr == KERN_NOT_WAITING) { |
f427ee49 | 727 | assert(self->handoff_thread == THREAD_NULL); |
1c79356b | 728 | signal_kr = KERN_SUCCESS; |
0a7de745 A |
729 | } else if (signal_kr == KERN_TERMINATED) { |
730 | /* | |
1c79356b A |
731 | * Uh!Oh! The semaphore we were to signal died. |
732 | * We have to get ourselves out of the wait in | |
733 | * case we get stuck here forever (it is assumed | |
734 | * that the semaphore we were posting is gating | |
735 | * the decision by someone else to post the | |
736 | * semaphore we are waiting on). People will | |
737 | * discover the other dead semaphore soon enough. | |
738 | * If we got out of the wait cleanly (someone | |
739 | * already posted a wakeup to us) then return that | |
740 | * (most important) result. Otherwise, | |
741 | * return the KERN_TERMINATED status. | |
742 | */ | |
f427ee49 | 743 | assert(self->handoff_thread == THREAD_NULL); |
1c79356b A |
744 | clear_wait(self, THREAD_INTERRUPTED); |
745 | kr = semaphore_convert_wait_result(self->wait_result); | |
0a7de745 | 746 | if (kr == KERN_ABORTED) { |
1c79356b | 747 | kr = KERN_TERMINATED; |
0a7de745 | 748 | } |
1c79356b A |
749 | } |
750 | } | |
0a7de745 | 751 | |
1c79356b A |
752 | /* |
753 | * If we had an error, or we didn't really need to wait we can | |
754 | * return now that we have signalled the signal semaphore. | |
755 | */ | |
0a7de745 | 756 | if (kr != KERN_ALREADY_WAITING) { |
f427ee49 | 757 | assert(self->handoff_thread == THREAD_NULL); |
1c79356b | 758 | return kr; |
0a7de745 | 759 | } |
1c79356b | 760 | |
f427ee49 A |
761 | if (self->handoff_thread) { |
762 | handoff_thread = self->handoff_thread; | |
763 | self->handoff_thread = THREAD_NULL; | |
764 | handoff_option = THREAD_HANDOFF_SETRUN_NEEDED; | |
765 | } | |
1c79356b A |
766 | /* |
767 | * Now, we can block. If the caller supplied a continuation | |
768 | * pointer of his own for after the block, block with the | |
f427ee49 | 769 | * appropriate semaphore continuation. This will gather the |
1c79356b A |
770 | * semaphore results, release references on the semaphore(s), |
771 | * and then call the caller's continuation. | |
772 | */ | |
773 | if (caller_cont) { | |
1c79356b A |
774 | self->sth_continuation = caller_cont; |
775 | self->sth_waitsemaphore = wait_semaphore; | |
776 | self->sth_signalsemaphore = signal_semaphore; | |
f427ee49 A |
777 | |
778 | thread_handoff_parameter(handoff_thread, semaphore_wait_continue, | |
779 | NULL, handoff_option); | |
0a7de745 | 780 | } else { |
f427ee49 | 781 | wait_result = thread_handoff_deallocate(handoff_thread, handoff_option); |
1c79356b A |
782 | } |
783 | ||
f427ee49 | 784 | assert(self->handoff_thread == THREAD_NULL); |
0a7de745 | 785 | return semaphore_convert_wait_result(wait_result); |
1c79356b A |
786 | } |
787 | ||
788 | ||
789 | /* | |
790 | * Routine: semaphore_wait | |
791 | * | |
792 | * Traditional (non-continuation) interface presented to | |
0a7de745 | 793 | * in-kernel clients to wait on a semaphore. |
1c79356b A |
794 | */ |
795 | kern_return_t | |
796 | semaphore_wait( | |
0a7de745 A |
797 | semaphore_t semaphore) |
798 | { | |
799 | if (semaphore == SEMAPHORE_NULL) { | |
1c79356b | 800 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 801 | } |
1c79356b | 802 | |
0a7de745 A |
803 | return semaphore_wait_internal(semaphore, |
804 | SEMAPHORE_NULL, | |
805 | 0ULL, SEMAPHORE_OPTION_NONE, | |
806 | (void (*)(kern_return_t))0); | |
b0d623f7 A |
807 | } |
808 | ||
809 | kern_return_t | |
810 | semaphore_wait_noblock( | |
0a7de745 A |
811 | semaphore_t semaphore) |
812 | { | |
813 | if (semaphore == SEMAPHORE_NULL) { | |
b0d623f7 | 814 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 815 | } |
b0d623f7 | 816 | |
0a7de745 A |
817 | return semaphore_wait_internal(semaphore, |
818 | SEMAPHORE_NULL, | |
819 | 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, | |
820 | (void (*)(kern_return_t))0); | |
b0d623f7 A |
821 | } |
822 | ||
823 | kern_return_t | |
824 | semaphore_wait_deadline( | |
0a7de745 A |
825 | semaphore_t semaphore, |
826 | uint64_t deadline) | |
827 | { | |
828 | if (semaphore == SEMAPHORE_NULL) { | |
b0d623f7 | 829 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 830 | } |
b0d623f7 | 831 | |
0a7de745 A |
832 | return semaphore_wait_internal(semaphore, |
833 | SEMAPHORE_NULL, | |
834 | deadline, SEMAPHORE_OPTION_NONE, | |
835 | (void (*)(kern_return_t))0); | |
1c79356b A |
836 | } |
837 | ||
838 | /* | |
839 | * Trap: semaphore_wait_trap | |
840 | * | |
841 | * Trap version of semaphore wait. Called on behalf of user-level | |
842 | * clients. | |
843 | */ | |
91447636 | 844 | |
1c79356b A |
845 | kern_return_t |
846 | semaphore_wait_trap( | |
91447636 A |
847 | struct semaphore_wait_trap_args *args) |
848 | { | |
0a7de745 | 849 | return semaphore_wait_trap_internal(args->wait_name, thread_syscall_return); |
91447636 A |
850 | } |
851 | ||
852 | ||
853 | ||
854 | kern_return_t | |
855 | semaphore_wait_trap_internal( | |
0a7de745 | 856 | mach_port_name_t name, |
91447636 | 857 | void (*caller_cont)(kern_return_t)) |
0a7de745 A |
858 | { |
859 | semaphore_t semaphore; | |
1c79356b A |
860 | kern_return_t kr; |
861 | ||
862 | kr = port_name_to_semaphore(name, &semaphore); | |
91447636 A |
863 | if (kr == KERN_SUCCESS) { |
864 | kr = semaphore_wait_internal(semaphore, | |
0a7de745 A |
865 | SEMAPHORE_NULL, |
866 | 0ULL, SEMAPHORE_OPTION_NONE, | |
867 | caller_cont); | |
91447636 A |
868 | semaphore_dereference(semaphore); |
869 | } | |
1c79356b A |
870 | return kr; |
871 | } | |
872 | ||
873 | /* | |
874 | * Routine: semaphore_timedwait | |
875 | * | |
876 | * Traditional (non-continuation) interface presented to | |
0a7de745 | 877 | * in-kernel clients to wait on a semaphore with a timeout. |
1c79356b A |
878 | * |
879 | * A timeout of {0,0} is considered non-blocking. | |
880 | */ | |
881 | kern_return_t | |
882 | semaphore_timedwait( | |
0a7de745 A |
883 | semaphore_t semaphore, |
884 | mach_timespec_t wait_time) | |
b0d623f7 | 885 | { |
0a7de745 A |
886 | int option = SEMAPHORE_OPTION_NONE; |
887 | uint64_t deadline = 0; | |
b0d623f7 | 888 | |
0a7de745 | 889 | if (semaphore == SEMAPHORE_NULL) { |
1c79356b | 890 | return KERN_INVALID_ARGUMENT; |
0a7de745 A |
891 | } |
892 | ||
893 | if (BAD_MACH_TIMESPEC(&wait_time)) { | |
1c79356b | 894 | return KERN_INVALID_VALUE; |
0a7de745 | 895 | } |
b0d623f7 | 896 | |
0a7de745 | 897 | if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { |
b0d623f7 | 898 | option = SEMAPHORE_TIMEOUT_NOBLOCK; |
0a7de745 | 899 | } else { |
b0d623f7 | 900 | deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec); |
0a7de745 A |
901 | } |
902 | ||
903 | return semaphore_wait_internal(semaphore, | |
904 | SEMAPHORE_NULL, | |
905 | deadline, option, | |
906 | (void (*)(kern_return_t))0); | |
1c79356b A |
907 | } |
908 | ||
909 | /* | |
910 | * Trap: semaphore_timedwait_trap | |
911 | * | |
912 | * Trap version of a semaphore_timedwait. The timeout parameter | |
913 | * is passed in two distinct parts and re-assembled on this side | |
914 | * of the trap interface (to accomodate calling conventions that | |
915 | * pass structures as pointers instead of inline in registers without | |
916 | * having to add a copyin). | |
917 | * | |
918 | * A timeout of {0,0} is considered non-blocking. | |
919 | */ | |
920 | kern_return_t | |
921 | semaphore_timedwait_trap( | |
91447636 | 922 | struct semaphore_timedwait_trap_args *args) |
0a7de745 A |
923 | { |
924 | return semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return); | |
91447636 A |
925 | } |
926 | ||
927 | ||
928 | kern_return_t | |
929 | semaphore_timedwait_trap_internal( | |
930 | mach_port_name_t name, | |
931 | unsigned int sec, | |
932 | clock_res_t nsec, | |
933 | void (*caller_cont)(kern_return_t)) | |
934 | { | |
1c79356b A |
935 | semaphore_t semaphore; |
936 | mach_timespec_t wait_time; | |
937 | kern_return_t kr; | |
938 | ||
939 | wait_time.tv_sec = sec; | |
940 | wait_time.tv_nsec = nsec; | |
0a7de745 | 941 | if (BAD_MACH_TIMESPEC(&wait_time)) { |
1c79356b | 942 | return KERN_INVALID_VALUE; |
0a7de745 A |
943 | } |
944 | ||
1c79356b | 945 | kr = port_name_to_semaphore(name, &semaphore); |
91447636 | 946 | if (kr == KERN_SUCCESS) { |
0a7de745 A |
947 | int option = SEMAPHORE_OPTION_NONE; |
948 | uint64_t deadline = 0; | |
b0d623f7 | 949 | |
0a7de745 | 950 | if (sec == 0 && nsec == 0) { |
b0d623f7 | 951 | option = SEMAPHORE_TIMEOUT_NOBLOCK; |
0a7de745 | 952 | } else { |
b0d623f7 | 953 | deadline = semaphore_deadline(sec, nsec); |
0a7de745 | 954 | } |
b0d623f7 | 955 | |
91447636 | 956 | kr = semaphore_wait_internal(semaphore, |
0a7de745 A |
957 | SEMAPHORE_NULL, |
958 | deadline, option, | |
959 | caller_cont); | |
91447636 A |
960 | semaphore_dereference(semaphore); |
961 | } | |
1c79356b A |
962 | return kr; |
963 | } | |
964 | ||
965 | /* | |
966 | * Routine: semaphore_wait_signal | |
967 | * | |
968 | * Atomically register a wait on a semaphore and THEN signal | |
969 | * another. This is the in-kernel entry point that does not | |
970 | * block at a continuation and does not free a signal_semaphore | |
971 | * reference. | |
972 | */ | |
973 | kern_return_t | |
974 | semaphore_wait_signal( | |
0a7de745 A |
975 | semaphore_t wait_semaphore, |
976 | semaphore_t signal_semaphore) | |
1c79356b | 977 | { |
0a7de745 | 978 | if (wait_semaphore == SEMAPHORE_NULL) { |
1c79356b | 979 | return KERN_INVALID_ARGUMENT; |
0a7de745 A |
980 | } |
981 | ||
982 | return semaphore_wait_internal(wait_semaphore, | |
983 | signal_semaphore, | |
984 | 0ULL, SEMAPHORE_OPTION_NONE, | |
985 | (void (*)(kern_return_t))0); | |
1c79356b A |
986 | } |
987 | ||
988 | /* | |
989 | * Trap: semaphore_wait_signal_trap | |
990 | * | |
991 | * Atomically register a wait on a semaphore and THEN signal | |
0a7de745 | 992 | * another. This is the trap version from user space. |
1c79356b A |
993 | */ |
994 | kern_return_t | |
995 | semaphore_wait_signal_trap( | |
91447636 A |
996 | struct semaphore_wait_signal_trap_args *args) |
997 | { | |
0a7de745 | 998 | return semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return); |
91447636 A |
999 | } |
1000 | ||
1001 | kern_return_t | |
1002 | semaphore_wait_signal_trap_internal( | |
1003 | mach_port_name_t wait_name, | |
1004 | mach_port_name_t signal_name, | |
1005 | void (*caller_cont)(kern_return_t)) | |
1c79356b A |
1006 | { |
1007 | semaphore_t wait_semaphore; | |
1008 | semaphore_t signal_semaphore; | |
1009 | kern_return_t kr; | |
1010 | ||
1011 | kr = port_name_to_semaphore(signal_name, &signal_semaphore); | |
91447636 A |
1012 | if (kr == KERN_SUCCESS) { |
1013 | kr = port_name_to_semaphore(wait_name, &wait_semaphore); | |
1014 | if (kr == KERN_SUCCESS) { | |
1015 | kr = semaphore_wait_internal(wait_semaphore, | |
0a7de745 A |
1016 | signal_semaphore, |
1017 | 0ULL, SEMAPHORE_OPTION_NONE, | |
1018 | caller_cont); | |
91447636 A |
1019 | semaphore_dereference(wait_semaphore); |
1020 | } | |
1c79356b | 1021 | semaphore_dereference(signal_semaphore); |
1c79356b | 1022 | } |
1c79356b A |
1023 | return kr; |
1024 | } | |
1025 | ||
1026 | ||
1027 | /* | |
1028 | * Routine: semaphore_timedwait_signal | |
1029 | * | |
1030 | * Atomically register a wait on a semaphore and THEN signal | |
1031 | * another. This is the in-kernel entry point that does not | |
1032 | * block at a continuation. | |
1033 | * | |
1034 | * A timeout of {0,0} is considered non-blocking. | |
1035 | */ | |
1036 | kern_return_t | |
1037 | semaphore_timedwait_signal( | |
0a7de745 A |
1038 | semaphore_t wait_semaphore, |
1039 | semaphore_t signal_semaphore, | |
1040 | mach_timespec_t wait_time) | |
1c79356b | 1041 | { |
0a7de745 A |
1042 | int option = SEMAPHORE_OPTION_NONE; |
1043 | uint64_t deadline = 0; | |
b0d623f7 | 1044 | |
0a7de745 | 1045 | if (wait_semaphore == SEMAPHORE_NULL) { |
1c79356b | 1046 | return KERN_INVALID_ARGUMENT; |
0a7de745 A |
1047 | } |
1048 | ||
1049 | if (BAD_MACH_TIMESPEC(&wait_time)) { | |
1c79356b | 1050 | return KERN_INVALID_VALUE; |
0a7de745 | 1051 | } |
b0d623f7 | 1052 | |
0a7de745 | 1053 | if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { |
b0d623f7 | 1054 | option = SEMAPHORE_TIMEOUT_NOBLOCK; |
0a7de745 | 1055 | } else { |
b0d623f7 | 1056 | deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec); |
0a7de745 A |
1057 | } |
1058 | ||
1059 | return semaphore_wait_internal(wait_semaphore, | |
1060 | signal_semaphore, | |
1061 | deadline, option, | |
1062 | (void (*)(kern_return_t))0); | |
1c79356b A |
1063 | } |
1064 | ||
1065 | /* | |
1066 | * Trap: semaphore_timedwait_signal_trap | |
1067 | * | |
1068 | * Atomically register a timed wait on a semaphore and THEN signal | |
0a7de745 | 1069 | * another. This is the trap version from user space. |
1c79356b A |
1070 | */ |
1071 | kern_return_t | |
1072 | semaphore_timedwait_signal_trap( | |
91447636 A |
1073 | struct semaphore_timedwait_signal_trap_args *args) |
1074 | { | |
0a7de745 | 1075 | return semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return); |
91447636 A |
1076 | } |
1077 | ||
1078 | kern_return_t | |
1079 | semaphore_timedwait_signal_trap_internal( | |
1080 | mach_port_name_t wait_name, | |
1081 | mach_port_name_t signal_name, | |
1082 | unsigned int sec, | |
1083 | clock_res_t nsec, | |
1084 | void (*caller_cont)(kern_return_t)) | |
1c79356b A |
1085 | { |
1086 | semaphore_t wait_semaphore; | |
1087 | semaphore_t signal_semaphore; | |
1088 | mach_timespec_t wait_time; | |
1089 | kern_return_t kr; | |
1090 | ||
1091 | wait_time.tv_sec = sec; | |
1092 | wait_time.tv_nsec = nsec; | |
0a7de745 | 1093 | if (BAD_MACH_TIMESPEC(&wait_time)) { |
1c79356b | 1094 | return KERN_INVALID_VALUE; |
0a7de745 A |
1095 | } |
1096 | ||
1c79356b | 1097 | kr = port_name_to_semaphore(signal_name, &signal_semaphore); |
91447636 A |
1098 | if (kr == KERN_SUCCESS) { |
1099 | kr = port_name_to_semaphore(wait_name, &wait_semaphore); | |
1100 | if (kr == KERN_SUCCESS) { | |
0a7de745 A |
1101 | int option = SEMAPHORE_OPTION_NONE; |
1102 | uint64_t deadline = 0; | |
b0d623f7 | 1103 | |
0a7de745 | 1104 | if (sec == 0 && nsec == 0) { |
b0d623f7 | 1105 | option = SEMAPHORE_TIMEOUT_NOBLOCK; |
0a7de745 | 1106 | } else { |
b0d623f7 | 1107 | deadline = semaphore_deadline(sec, nsec); |
0a7de745 | 1108 | } |
b0d623f7 | 1109 | |
91447636 | 1110 | kr = semaphore_wait_internal(wait_semaphore, |
0a7de745 A |
1111 | signal_semaphore, |
1112 | deadline, option, | |
1113 | caller_cont); | |
91447636 A |
1114 | semaphore_dereference(wait_semaphore); |
1115 | } | |
1c79356b | 1116 | semaphore_dereference(signal_semaphore); |
1c79356b | 1117 | } |
1c79356b A |
1118 | return kr; |
1119 | } | |
1120 | ||
1121 | ||
1122 | /* | |
1123 | * Routine: semaphore_reference | |
1124 | * | |
1125 | * Take out a reference on a semaphore. This keeps the data structure | |
1126 | * in existence (but the semaphore may be deactivated). | |
1127 | */ | |
1128 | void | |
1129 | semaphore_reference( | |
0a7de745 | 1130 | semaphore_t semaphore) |
1c79356b | 1131 | { |
d9a64523 | 1132 | os_ref_retain(&semaphore->ref_count); |
1c79356b A |
1133 | } |
1134 | ||
1135 | /* | |
1136 | * Routine: semaphore_dereference | |
1137 | * | |
1138 | * Release a reference on a semaphore. If this is the last reference, | |
1139 | * the semaphore data structure is deallocated. | |
1140 | */ | |
1141 | void | |
1142 | semaphore_dereference( | |
0a7de745 | 1143 | semaphore_t semaphore) |
1c79356b | 1144 | { |
4bd07ac2 A |
1145 | uint32_t collisions; |
1146 | spl_t spl_level; | |
1147 | ||
0a7de745 | 1148 | if (semaphore == NULL) { |
3e170ce0 | 1149 | return; |
0a7de745 | 1150 | } |
3e170ce0 | 1151 | |
d9a64523 | 1152 | if (os_ref_release(&semaphore->ref_count) > 0) { |
3e170ce0 | 1153 | return; |
d9a64523 | 1154 | } |
3e170ce0 A |
1155 | |
1156 | /* | |
1157 | * Last ref, clean up the port [if any] | |
1158 | * associated with the semaphore, destroy | |
1159 | * it (if still active) and then free | |
1160 | * the semaphore. | |
1161 | */ | |
1162 | ipc_port_t port = semaphore->port; | |
1163 | ||
1164 | if (IP_VALID(port)) { | |
1165 | assert(!port->ip_srights); | |
1166 | ipc_port_dealloc_kernel(port); | |
1167 | } | |
4bd07ac2 A |
1168 | |
1169 | /* | |
1170 | * Lock the semaphore to lock in the owner task reference. | |
1171 | * Then continue to try to lock the task (inverse order). | |
1172 | */ | |
1173 | spl_level = splsched(); | |
1174 | semaphore_lock(semaphore); | |
1175 | for (collisions = 0; semaphore->active; collisions++) { | |
1176 | task_t task = semaphore->owner; | |
1177 | ||
1178 | assert(task != TASK_NULL); | |
0a7de745 | 1179 | |
4bd07ac2 A |
1180 | if (task_lock_try(task)) { |
1181 | semaphore_destroy_internal(task, semaphore); | |
1182 | /* semaphore unlocked */ | |
1183 | splx(spl_level); | |
1184 | task_unlock(task); | |
1185 | goto out; | |
1186 | } | |
0a7de745 | 1187 | |
4bd07ac2 A |
1188 | /* failed to get out-of-order locks */ |
1189 | semaphore_unlock(semaphore); | |
1190 | splx(spl_level); | |
1191 | mutex_pause(collisions); | |
1192 | spl_level = splsched(); | |
1193 | semaphore_lock(semaphore); | |
1c79356b | 1194 | } |
4bd07ac2 A |
1195 | semaphore_unlock(semaphore); |
1196 | splx(spl_level); | |
1197 | ||
0a7de745 | 1198 | out: |
3e170ce0 | 1199 | zfree(semaphore_zone, semaphore); |
1c79356b | 1200 | } |
3e170ce0 | 1201 | |
813fb2f6 A |
1202 | #define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq))) |
1203 | void | |
1204 | kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) | |
1205 | { | |
1206 | semaphore_t sem = WAITQ_TO_SEMA(waitq); | |
1207 | assert(event == SEMAPHORE_EVENT); | |
f427ee49 A |
1208 | |
1209 | zone_require(semaphore_zone, sem); | |
3e170ce0 | 1210 | |
813fb2f6 | 1211 | waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port); |
0a7de745 | 1212 | if (sem->owner) { |
813fb2f6 | 1213 | waitinfo->owner = pid_from_task(sem->owner); |
0a7de745 | 1214 | } |
813fb2f6 | 1215 | } |