/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
#include <kern/host.h>
-#include <kern/wait_queue.h>
+#include <kern/waitq.h>
#include <kern/zalloc.h>
#include <kern/mach_param.h>
+#include <libkern/OSAtomic.h>
+
static unsigned int semaphore_event;
#define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event)
semaphore_max * sizeof(struct semaphore),
sizeof(struct semaphore),
"semaphores");
+ zone_change(semaphore_zone, Z_NOENCRYPT, TRUE);
}
/*
if (s == SEMAPHORE_NULL)
return KERN_RESOURCE_SHORTAGE;
- kret = wait_queue_init(&s->wait_queue, policy); /* also inits lock */
+ kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */
if (kret != KERN_SUCCESS) {
zfree(semaphore_zone, s);
return kret;
}
- s->count = value;
- s->ref_count = (task == kernel_task) ? 1 : 2;
-
/*
- * Create and initialize the semaphore port
+ * Initialize the semaphore values.
*/
- s->port = ipc_port_alloc_kernel();
- if (s->port == IP_NULL) {
- zfree(semaphore_zone, s);
- return KERN_RESOURCE_SHORTAGE;
- }
-
- ipc_kobject_set (s->port, (ipc_kobject_t) s, IKOT_SEMAPHORE);
+ s->port = IP_NULL;
+ s->ref_count = 1;
+ s->count = value;
+ s->active = TRUE;
+ s->owner = task;
/*
* Associate the new semaphore with the task by adding
* the new semaphore to the task's semaphore list.
- *
- * Associate the task with the new semaphore by having the
- * semaphores task pointer point to the owning task's structure.
*/
task_lock(task);
enqueue_head(&task->semaphore_list, (queue_entry_t) s);
task->semaphores_owned++;
- s->owner = task;
- s->active = TRUE;
task_unlock(task);
*new_semaphore = s;
}
/*
- * Routine: semaphore_destroy
+ * Routine: semaphore_destroy_internal
*
- * Destroys a semaphore. This call will only succeed if the
- * specified task is the SAME task name specified at the semaphore's
- * creation.
+ * Disassociate a semaphore from its owning task, mark it inactive,
+ * and set any waiting threads running with THREAD_RESTART.
*
- * All threads currently blocked on the semaphore are awoken. These
- * threads will return with the KERN_TERMINATED error.
+ * Conditions:
+ * task is locked
+ * semaphore is locked
+ * semaphore is owned by the specified task
+ * Returns:
+ * with semaphore unlocked
*/
-kern_return_t
-semaphore_destroy(
+static void
+semaphore_destroy_internal(
task_t task,
semaphore_t semaphore)
{
- int old_count;
- spl_t spl_level;
+ int old_count;
-
- if (task == TASK_NULL || semaphore == SEMAPHORE_NULL)
- return KERN_INVALID_ARGUMENT;
-
- /*
- * Disown semaphore
- */
- task_lock(task);
- if (semaphore->owner != task) {
- task_unlock(task);
- return KERN_INVALID_ARGUMENT;
- }
- remqueue(&task->semaphore_list, (queue_entry_t) semaphore);
+ /* unlink semaphore from owning task */
+ assert(semaphore->owner == task);
+ remqueue((queue_entry_t) semaphore);
semaphore->owner = TASK_NULL;
task->semaphores_owned--;
- task_unlock(task);
-
- spl_level = splsched();
- semaphore_lock(semaphore);
/*
* Deactivate semaphore
semaphore->count = 0;
if (old_count < 0) {
- wait_queue_wakeup64_all_locked(&semaphore->wait_queue,
- SEMAPHORE_EVENT,
- THREAD_RESTART,
- TRUE); /* unlock? */
+ waitq_wakeup64_all_locked(&semaphore->waitq,
+ SEMAPHORE_EVENT,
+ THREAD_RESTART, NULL,
+ WAITQ_ALL_PRIORITIES,
+ WAITQ_UNLOCK);
+ /* waitq/semaphore is unlocked */
} else {
semaphore_unlock(semaphore);
}
+}
+
+/*
+ * Routine: semaphore_destroy
+ *
+ * Destroys a semaphore and consume the caller's reference on the
+ * semaphore.
+ */
+kern_return_t
+semaphore_destroy(
+ task_t task,
+ semaphore_t semaphore)
+{
+ spl_t spl_level;
+
+ if (semaphore == SEMAPHORE_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (task == TASK_NULL) {
+ semaphore_dereference(semaphore);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ task_lock(task);
+ spl_level = splsched();
+ semaphore_lock(semaphore);
+
+ if (semaphore->owner != task) {
+ semaphore_unlock(semaphore);
+ splx(spl_level);
+ task_unlock(task);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ semaphore_destroy_internal(task, semaphore);
+ /* semaphore unlocked */
+
splx(spl_level);
+ task_unlock(task);
- /*
- * Deallocate
- *
- * Drop the task's semaphore reference, which in turn deallocates
- * the semaphore structure if the reference count goes to zero.
- */
semaphore_dereference(semaphore);
return KERN_SUCCESS;
}
+/*
+ * Routine: semaphore_destroy_all
+ *
+ * Destroy all the semaphores associated with a given task.
+ */
+#define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */
+
+void
+semaphore_destroy_all(
+ task_t task)
+{
+ uint32_t count;
+ spl_t spl_level;
+
+ count = 0;
+ task_lock(task);
+ while (!queue_empty(&task->semaphore_list)) {
+ semaphore_t semaphore;
+
+ semaphore = (semaphore_t) queue_first(&task->semaphore_list);
+
+ if (count == 0)
+ spl_level = splsched();
+ semaphore_lock(semaphore);
+
+ semaphore_destroy_internal(task, semaphore);
+ /* semaphore unlocked */
+
+ /* throttle number of semaphores per interrupt disablement */
+ if (++count == SEMASPERSPL) {
+ count = 0;
+ splx(spl_level);
+ }
+ }
+ if (count != 0)
+ splx(spl_level);
+
+ task_unlock(task);
+}
+
/*
* Routine: semaphore_signal_internal
*
if (thread != THREAD_NULL) {
if (semaphore->count < 0) {
- kr = wait_queue_wakeup64_thread_locked(
- &semaphore->wait_queue,
+ kr = waitq_wakeup64_thread_locked(
+ &semaphore->waitq,
SEMAPHORE_EVENT,
thread,
THREAD_AWAKENED,
- TRUE); /* unlock? */
+ WAITQ_UNLOCK);
+ /* waitq/semaphore is unlocked */
} else {
- semaphore_unlock(semaphore);
kr = KERN_NOT_WAITING;
+ semaphore_unlock(semaphore);
}
splx(spl_level);
return kr;
if (options & SEMAPHORE_SIGNAL_ALL) {
int old_count = semaphore->count;
+ kr = KERN_NOT_WAITING;
if (old_count < 0) {
semaphore->count = 0; /* always reset */
- kr = wait_queue_wakeup64_all_locked(
- &semaphore->wait_queue,
+ kr = waitq_wakeup64_all_locked(
+ &semaphore->waitq,
SEMAPHORE_EVENT,
- THREAD_AWAKENED,
- TRUE); /* unlock? */
+ THREAD_AWAKENED, NULL,
+ WAITQ_ALL_PRIORITIES,
+ WAITQ_UNLOCK);
+ /* waitq / semaphore is unlocked */
} else {
if (options & SEMAPHORE_SIGNAL_PREPOST)
semaphore->count++;
- semaphore_unlock(semaphore);
kr = KERN_SUCCESS;
+ semaphore_unlock(semaphore);
}
splx(spl_level);
return kr;
}
if (semaphore->count < 0) {
- if (wait_queue_wakeup64_one_locked(
- &semaphore->wait_queue,
+ kr = waitq_wakeup64_one_locked(
+ &semaphore->waitq,
SEMAPHORE_EVENT,
- THREAD_AWAKENED,
- FALSE) == KERN_SUCCESS) {
+ THREAD_AWAKENED, NULL,
+ WAITQ_ALL_PRIORITIES,
+ WAITQ_KEEP_LOCKED);
+ if (kr == KERN_SUCCESS) {
semaphore_unlock(semaphore);
splx(spl_level);
return KERN_SUCCESS;
- } else
+ } else {
semaphore->count = 0; /* all waiters gone */
+ }
}
if (options & SEMAPHORE_SIGNAL_PREPOST) {
thread_t self = current_thread();
wait_semaphore->count = -1; /* we don't keep an actual count */
- thread_lock(self);
- (void)wait_queue_assert_wait64_locked(
- &wait_semaphore->wait_queue,
+
+ thread_set_pending_block_hint(self, kThreadWaitSemaphore);
+ (void)waitq_assert_wait64_locked(
+ &wait_semaphore->waitq,
SEMAPHORE_EVENT,
- THREAD_ABORTSAFE, deadline,
+ THREAD_ABORTSAFE,
+ TIMEOUT_URGENCY_USER_NORMAL,
+ deadline, TIMEOUT_NO_LEEWAY,
self);
- thread_unlock(self);
}
semaphore_unlock(wait_semaphore);
splx(spl_level);
semaphore_dereference(
semaphore_t semaphore)
{
- int ref_count;
+ uint32_t collisions;
+ spl_t spl_level;
+
+ if (semaphore == NULL)
+ return;
- if (semaphore != NULL) {
- ref_count = hw_atomic_sub(&semaphore->ref_count, 1);
+ if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
+ return;
- if (ref_count == 0) {
- assert(wait_queue_empty(&semaphore->wait_queue));
- ipc_port_dealloc_kernel(semaphore->port);
- zfree(semaphore_zone, semaphore);
+ /*
+ * Last ref, clean up the port [if any]
+ * associated with the semaphore, destroy
+ * it (if still active) and then free
+ * the semaphore.
+ */
+ ipc_port_t port = semaphore->port;
+
+ if (IP_VALID(port)) {
+ assert(!port->ip_srights);
+ ipc_port_dealloc_kernel(port);
+ }
+
+ /*
+ * Lock the semaphore to lock in the owner task reference.
+ * Then continue to try to lock the task (inverse order).
+ */
+ spl_level = splsched();
+ semaphore_lock(semaphore);
+ for (collisions = 0; semaphore->active; collisions++) {
+ task_t task = semaphore->owner;
+
+ assert(task != TASK_NULL);
+
+ if (task_lock_try(task)) {
+ semaphore_destroy_internal(task, semaphore);
+ /* semaphore unlocked */
+ splx(spl_level);
+ task_unlock(task);
+ goto out;
}
+
+ /* failed to get out-of-order locks */
+ semaphore_unlock(semaphore);
+ splx(spl_level);
+ mutex_pause(collisions);
+ spl_level = splsched();
+ semaphore_lock(semaphore);
}
+ semaphore_unlock(semaphore);
+ splx(spl_level);
+
+ out:
+ zfree(semaphore_zone, semaphore);
+}
+
+#define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq)))
+void
+kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
+{
+ semaphore_t sem = WAITQ_TO_SEMA(waitq);
+ assert(event == SEMAPHORE_EVENT);
+ assert(kdp_is_in_zone(sem, "semaphores"));
+
+ waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port);
+ if (sem->owner)
+ waitinfo->owner = pid_from_task(sem->owner);
}