X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/316670eb35587141e969394ae8537d66b9211e80..5ba3f43ea354af8ad55bea84372a2bc834d8757c:/osfmk/kern/sync_sema.c diff --git a/osfmk/kern/sync_sema.c b/osfmk/kern/sync_sema.c index 687387b3c..0a6f7b33d 100644 --- a/osfmk/kern/sync_sema.c +++ b/osfmk/kern/sync_sema.c @@ -54,7 +54,7 @@ #include #include #include -#include +#include #include #include @@ -174,43 +174,28 @@ semaphore_create( if (s == SEMAPHORE_NULL) return KERN_RESOURCE_SHORTAGE; - kret = wait_queue_init(&s->wait_queue, policy); /* also inits lock */ + kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */ if (kret != KERN_SUCCESS) { zfree(semaphore_zone, s); return kret; } - s->count = value; - - /* - * One reference for caller, one for port, and one for owner - * task (if not the kernel itself). - */ - s->ref_count = (task == kernel_task) ? 2 : 3; - /* - * Create and initialize the semaphore port + * Initialize the semaphore values. */ - s->port = ipc_port_alloc_kernel(); - if (s->port == IP_NULL) { - zfree(semaphore_zone, s); - return KERN_RESOURCE_SHORTAGE; - } - - ipc_kobject_set (s->port, (ipc_kobject_t) s, IKOT_SEMAPHORE); + s->port = IP_NULL; + s->ref_count = 1; + s->count = value; + s->active = TRUE; + s->owner = task; /* * Associate the new semaphore with the task by adding * the new semaphore to the task's semaphore list. - * - * Associate the task with the new semaphore by having the - * semaphores task pointer point to the owning task's structure. */ task_lock(task); enqueue_head(&task->semaphore_list, (queue_entry_t) s); task->semaphores_owned++; - s->owner = task; - s->active = TRUE; task_unlock(task); *new_semaphore = s; @@ -219,42 +204,30 @@ semaphore_create( } /* - * Routine: semaphore_destroy + * Routine: semaphore_destroy_internal * - * Destroys a semaphore. This call will only succeed if the - * specified task is the SAME task name specified at the semaphore's - * creation. + * Disassociate a semaphore from its owning task, mark it inactive, + * and set any waiting threads running with THREAD_RESTART. * - * All threads currently blocked on the semaphore are awoken. These - * threads will return with the KERN_TERMINATED error. + * Conditions: + * task is locked + * semaphore is locked + * semaphore is owned by the specified task + * Returns: + * with semaphore unlocked */ -kern_return_t -semaphore_destroy( +static void +semaphore_destroy_internal( task_t task, semaphore_t semaphore) { - int old_count; - spl_t spl_level; - - - if (task == TASK_NULL || semaphore == SEMAPHORE_NULL) - return KERN_INVALID_ARGUMENT; + int old_count; - /* - * Disown semaphore - */ - task_lock(task); - if (semaphore->owner != task) { - task_unlock(task); - return KERN_INVALID_ARGUMENT; - } + /* unlink semaphore from owning task */ + assert(semaphore->owner == task); remqueue((queue_entry_t) semaphore); semaphore->owner = TASK_NULL; task->semaphores_owned--; - task_unlock(task); - - spl_level = splsched(); - semaphore_lock(semaphore); /* * Deactivate semaphore @@ -269,25 +242,99 @@ semaphore_destroy( semaphore->count = 0; if (old_count < 0) { - wait_queue_wakeup64_all_locked(&semaphore->wait_queue, - SEMAPHORE_EVENT, - THREAD_RESTART, - TRUE); /* unlock? */ + waitq_wakeup64_all_locked(&semaphore->waitq, + SEMAPHORE_EVENT, + THREAD_RESTART, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_UNLOCK); + /* waitq/semaphore is unlocked */ } else { semaphore_unlock(semaphore); } +} + +/* + * Routine: semaphore_destroy + * + * Destroys a semaphore and consume the caller's reference on the + * semaphore. + */ +kern_return_t +semaphore_destroy( + task_t task, + semaphore_t semaphore) +{ + spl_t spl_level; + + if (semaphore == SEMAPHORE_NULL) + return KERN_INVALID_ARGUMENT; + + if (task == TASK_NULL) { + semaphore_dereference(semaphore); + return KERN_INVALID_ARGUMENT; + } + + task_lock(task); + spl_level = splsched(); + semaphore_lock(semaphore); + + if (semaphore->owner != task) { + semaphore_unlock(semaphore); + splx(spl_level); + task_unlock(task); + return KERN_INVALID_ARGUMENT; + } + + semaphore_destroy_internal(task, semaphore); + /* semaphore unlocked */ + splx(spl_level); + task_unlock(task); - /* - * Deallocate - * - * Drop the task's semaphore reference, which in turn deallocates - * the semaphore structure if the reference count goes to zero. - */ semaphore_dereference(semaphore); return KERN_SUCCESS; } +/* + * Routine: semaphore_destroy_all + * + * Destroy all the semaphores associated with a given task. + */ +#define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */ + +void +semaphore_destroy_all( + task_t task) +{ + uint32_t count; + spl_t spl_level; + + count = 0; + task_lock(task); + while (!queue_empty(&task->semaphore_list)) { + semaphore_t semaphore; + + semaphore = (semaphore_t) queue_first(&task->semaphore_list); + + if (count == 0) + spl_level = splsched(); + semaphore_lock(semaphore); + + semaphore_destroy_internal(task, semaphore); + /* semaphore unlocked */ + + /* throttle number of semaphores per interrupt disablement */ + if (++count == SEMASPERSPL) { + count = 0; + splx(spl_level); + } + } + if (count != 0) + splx(spl_level); + + task_unlock(task); +} + /* * Routine: semaphore_signal_internal * @@ -315,15 +362,16 @@ semaphore_signal_internal( if (thread != THREAD_NULL) { if (semaphore->count < 0) { - kr = wait_queue_wakeup64_thread_locked( - &semaphore->wait_queue, + kr = waitq_wakeup64_thread_locked( + &semaphore->waitq, SEMAPHORE_EVENT, thread, THREAD_AWAKENED, - TRUE); /* unlock? */ + WAITQ_UNLOCK); + /* waitq/semaphore is unlocked */ } else { - semaphore_unlock(semaphore); kr = KERN_NOT_WAITING; + semaphore_unlock(semaphore); } splx(spl_level); return kr; @@ -332,34 +380,40 @@ semaphore_signal_internal( if (options & SEMAPHORE_SIGNAL_ALL) { int old_count = semaphore->count; + kr = KERN_NOT_WAITING; if (old_count < 0) { semaphore->count = 0; /* always reset */ - kr = wait_queue_wakeup64_all_locked( - &semaphore->wait_queue, + kr = waitq_wakeup64_all_locked( + &semaphore->waitq, SEMAPHORE_EVENT, - THREAD_AWAKENED, - TRUE); /* unlock? */ + THREAD_AWAKENED, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_UNLOCK); + /* waitq / semaphore is unlocked */ } else { if (options & SEMAPHORE_SIGNAL_PREPOST) semaphore->count++; - semaphore_unlock(semaphore); kr = KERN_SUCCESS; + semaphore_unlock(semaphore); } splx(spl_level); return kr; } if (semaphore->count < 0) { - if (wait_queue_wakeup64_one_locked( - &semaphore->wait_queue, + kr = waitq_wakeup64_one_locked( + &semaphore->waitq, SEMAPHORE_EVENT, - THREAD_AWAKENED, - FALSE) == KERN_SUCCESS) { + THREAD_AWAKENED, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_KEEP_LOCKED); + if (kr == KERN_SUCCESS) { semaphore_unlock(semaphore); splx(spl_level); return KERN_SUCCESS; - } else + } else { semaphore->count = 0; /* all waiters gone */ + } } if (options & SEMAPHORE_SIGNAL_PREPOST) { @@ -633,13 +687,15 @@ semaphore_wait_internal( thread_t self = current_thread(); wait_semaphore->count = -1; /* we don't keep an actual count */ - thread_lock(self); - (void)wait_queue_assert_wait64_locked( - &wait_semaphore->wait_queue, + + thread_set_pending_block_hint(self, kThreadWaitSemaphore); + (void)waitq_assert_wait64_locked( + &wait_semaphore->waitq, SEMAPHORE_EVENT, - THREAD_ABORTSAFE, deadline, + THREAD_ABORTSAFE, + TIMEOUT_URGENCY_USER_NORMAL, + deadline, TIMEOUT_NO_LEEWAY, self); - thread_unlock(self); } semaphore_unlock(wait_semaphore); splx(spl_level); @@ -1062,27 +1118,70 @@ void semaphore_dereference( semaphore_t semaphore) { - int ref_count; - - if (semaphore != NULL) { - ref_count = hw_atomic_sub(&semaphore->ref_count, 1); - - if (ref_count == 1) { - ipc_port_t port = semaphore->port; - - if (IP_VALID(port) && - OSCompareAndSwapPtr(port, IP_NULL, &semaphore->port)) { - /* - * We get to disassociate the port from the sema and - * drop the port's reference on the sema. - */ - ipc_port_dealloc_kernel(port); - ref_count = hw_atomic_sub(&semaphore->ref_count, 1); - } - } - if (ref_count == 0) { - assert(wait_queue_empty(&semaphore->wait_queue)); - zfree(semaphore_zone, semaphore); + uint32_t collisions; + spl_t spl_level; + + if (semaphore == NULL) + return; + + if (hw_atomic_sub(&semaphore->ref_count, 1) != 0) + return; + + /* + * Last ref, clean up the port [if any] + * associated with the semaphore, destroy + * it (if still active) and then free + * the semaphore. + */ + ipc_port_t port = semaphore->port; + + if (IP_VALID(port)) { + assert(!port->ip_srights); + ipc_port_dealloc_kernel(port); + } + + /* + * Lock the semaphore to lock in the owner task reference. + * Then continue to try to lock the task (inverse order). + */ + spl_level = splsched(); + semaphore_lock(semaphore); + for (collisions = 0; semaphore->active; collisions++) { + task_t task = semaphore->owner; + + assert(task != TASK_NULL); + + if (task_lock_try(task)) { + semaphore_destroy_internal(task, semaphore); + /* semaphore unlocked */ + splx(spl_level); + task_unlock(task); + goto out; } + + /* failed to get out-of-order locks */ + semaphore_unlock(semaphore); + splx(spl_level); + mutex_pause(collisions); + spl_level = splsched(); + semaphore_lock(semaphore); } + semaphore_unlock(semaphore); + splx(spl_level); + + out: + zfree(semaphore_zone, semaphore); +} + +#define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq))) +void +kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) +{ + semaphore_t sem = WAITQ_TO_SEMA(waitq); + assert(event == SEMAPHORE_EVENT); + assert(kdp_is_in_zone(sem, "semaphores")); + + waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port); + if (sem->owner) + waitinfo->owner = pid_from_task(sem->owner); }