zone_t semaphore_zone;
unsigned int semaphore_max;
+os_refgrp_decl(static, sema_refgrp, "semaphore", NULL);
+
/* Forward declarations */
* Initialize the semaphore values.
*/
s->port = IP_NULL;
- s->ref_count = 1;
+ os_ref_init(&s->ref_count, &sema_refgrp);
s->count = value;
s->active = TRUE;
s->owner = task;
/*
* Routine: semaphore_destroy_internal
*
- * This call will only succeed if the specified task is the SAME task
- * specified at the semaphore's creation.
+ * Disassociate a semaphore from its owning task, mark it inactive,
+ * and set any waiting threads running with THREAD_RESTART.
*
- * All threads currently blocked on the semaphore are awoken. These
- * threads will return with the KERN_TERMINATED error.
+ * Conditions:
+ * task is locked
+ * semaphore is locked
+ * semaphore is owned by the specified task
+ * Returns:
+ * with semaphore unlocked
*/
-kern_return_t
+static void
semaphore_destroy_internal(
task_t task,
semaphore_t semaphore)
{
int old_count;
- spl_t spl_level;
-
- /*
- * Disown semaphore
- */
- task_lock(task);
- if (semaphore->owner != task) {
- task_unlock(task);
- return KERN_INVALID_ARGUMENT;
- }
- spl_level = splsched();
- semaphore_lock(semaphore);
+ /* unlink semaphore from owning task */
+ assert(semaphore->owner == task);
remqueue((queue_entry_t) semaphore);
semaphore->owner = TASK_NULL;
task->semaphores_owned--;
- task_unlock(task);
-
/*
* Deactivate semaphore
*/
} else {
semaphore_unlock(semaphore);
}
- splx(spl_level);
-
- return KERN_SUCCESS;
}
/*
task_t task,
semaphore_t semaphore)
{
- kern_return_t kr;
+ spl_t spl_level;
if (semaphore == SEMAPHORE_NULL)
return KERN_INVALID_ARGUMENT;
if (task == TASK_NULL) {
- kr = KERN_INVALID_ARGUMENT;
- } else {
- kr = semaphore_destroy_internal(task, semaphore);
+ semaphore_dereference(semaphore);
+ return KERN_INVALID_ARGUMENT;
}
+
+ task_lock(task);
+ spl_level = splsched();
+ semaphore_lock(semaphore);
+
+ if (semaphore->owner != task) {
+ semaphore_unlock(semaphore);
+ semaphore_dereference(semaphore);
+ splx(spl_level);
+ task_unlock(task);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ semaphore_destroy_internal(task, semaphore);
+ /* semaphore unlocked */
+
+ splx(spl_level);
+ task_unlock(task);
+
semaphore_dereference(semaphore);
- return kr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: semaphore_destroy_all
+ *
+ * Destroy all the semaphores associated with a given task.
+ */
+#define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */
+
+void
+semaphore_destroy_all(
+ task_t task)
+{
+ uint32_t count;
+ spl_t spl_level;
+
+ count = 0;
+ task_lock(task);
+ while (!queue_empty(&task->semaphore_list)) {
+ semaphore_t semaphore;
+
+ semaphore = (semaphore_t) queue_first(&task->semaphore_list);
+
+ if (count == 0)
+ spl_level = splsched();
+ semaphore_lock(semaphore);
+
+ semaphore_destroy_internal(task, semaphore);
+ /* semaphore unlocked */
+
+ /* throttle number of semaphores per interrupt disablement */
+ if (++count == SEMASPERSPL) {
+ count = 0;
+ splx(spl_level);
+ }
+ }
+ if (count != 0)
+ splx(spl_level);
+
+ task_unlock(task);
}
/*
thread_t self = current_thread();
wait_semaphore->count = -1; /* we don't keep an actual count */
- thread_lock(self);
+
+ thread_set_pending_block_hint(self, kThreadWaitSemaphore);
(void)waitq_assert_wait64_locked(
&wait_semaphore->waitq,
SEMAPHORE_EVENT,
TIMEOUT_URGENCY_USER_NORMAL,
deadline, TIMEOUT_NO_LEEWAY,
self);
- thread_unlock(self);
}
semaphore_unlock(wait_semaphore);
splx(spl_level);
semaphore_reference(
semaphore_t semaphore)
{
- (void)hw_atomic_add(&semaphore->ref_count, 1);
+ os_ref_retain(&semaphore->ref_count);
}
/*
semaphore_dereference(
semaphore_t semaphore)
{
+ uint32_t collisions;
+ spl_t spl_level;
+
if (semaphore == NULL)
return;
- if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
+ if (os_ref_release(&semaphore->ref_count) > 0) {
return;
+ }
/*
* Last ref, clean up the port [if any]
assert(!port->ip_srights);
ipc_port_dealloc_kernel(port);
}
- if (semaphore->active) {
- assert(semaphore->owner != TASK_NULL);
- semaphore_destroy_internal(semaphore->owner, semaphore);
+
+ /*
+ * Lock the semaphore to lock in the owner task reference.
+ * Then continue to try to lock the task (inverse order).
+ */
+ spl_level = splsched();
+ semaphore_lock(semaphore);
+ for (collisions = 0; semaphore->active; collisions++) {
+ task_t task = semaphore->owner;
+
+ assert(task != TASK_NULL);
+
+ if (task_lock_try(task)) {
+ semaphore_destroy_internal(task, semaphore);
+ /* semaphore unlocked */
+ splx(spl_level);
+ task_unlock(task);
+ goto out;
+ }
+
+ /* failed to get out-of-order locks */
+ semaphore_unlock(semaphore);
+ splx(spl_level);
+ mutex_pause(collisions);
+ spl_level = splsched();
+ semaphore_lock(semaphore);
}
+ semaphore_unlock(semaphore);
+ splx(spl_level);
+
+ out:
zfree(semaphore_zone, semaphore);
}
+#define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq)))
+void
+kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
+{
+ semaphore_t sem = WAITQ_TO_SEMA(waitq);
+ assert(event == SEMAPHORE_EVENT);
+ assert(kdp_is_in_zone(sem, "semaphores"));
+ waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port);
+ if (sem->owner)
+ waitinfo->owner = pid_from_task(sem->owner);
+}