]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/sync_sema.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / kern / sync_sema.c
index 0cba287b0ad96c207619958ef8c5a271275bcb53..98f33ba8d69dae1e37e6ce1111ee1a92ec1a5911 100644 (file)
@@ -66,6 +66,8 @@ static unsigned int semaphore_event;
 zone_t semaphore_zone;
 unsigned int semaphore_max;
 
+os_refgrp_decl(static, sema_refgrp, "semaphore", NULL);
+
 /* Forward declarations */
 
 
@@ -184,7 +186,7 @@ semaphore_create(
         * Initialize the semaphore values.
         */
        s->port = IP_NULL;
-       s->ref_count = 1;
+       os_ref_init(&s->ref_count, &sema_refgrp);
        s->count = value;
        s->active = TRUE;
        s->owner = task;
@@ -206,37 +208,29 @@ semaphore_create(
 /*
  *     Routine:        semaphore_destroy_internal
  *
- *     This call will only succeed if the specified task is the SAME task
- *     specified at the semaphore's creation.
+ *     Disassociate a semaphore from its owning task, mark it inactive,
+ *     and set any waiting threads running with THREAD_RESTART.
  *
- *     All threads currently blocked on the semaphore are awoken.  These
- *     threads will return with the KERN_TERMINATED error.
+ *     Conditions:
+ *                     task is locked
+ *                     semaphore is locked
+ *                     semaphore is owned by the specified task
+ *     Returns:
+ *                     with semaphore unlocked
  */
-kern_return_t
+static void
 semaphore_destroy_internal(
        task_t                  task,
        semaphore_t             semaphore)
 {
        int                     old_count;
-       spl_t                   spl_level;
-
-       /*
-        *  Disown semaphore
-        */
-       task_lock(task);
-       if (semaphore->owner != task) {
-               task_unlock(task);
-               return KERN_INVALID_ARGUMENT;
-       }
-       spl_level = splsched();
-       semaphore_lock(semaphore);
 
+       /* unlink semaphore from owning task */
+       assert(semaphore->owner == task);
        remqueue((queue_entry_t) semaphore);
        semaphore->owner = TASK_NULL;
        task->semaphores_owned--;
 
-       task_unlock(task);
-
        /*
         *  Deactivate semaphore
         */
@@ -259,9 +253,6 @@ semaphore_destroy_internal(
        } else {
                semaphore_unlock(semaphore);
        }
-       splx(spl_level);
-
-       return KERN_SUCCESS;
 }
 
 /*
@@ -275,18 +266,76 @@ semaphore_destroy(
        task_t                  task,
        semaphore_t             semaphore)
 {
-       kern_return_t kr;
+       spl_t spl_level;
 
        if (semaphore == SEMAPHORE_NULL)
                return KERN_INVALID_ARGUMENT;
 
        if (task == TASK_NULL) {
-               kr = KERN_INVALID_ARGUMENT;
-       } else {
-               kr = semaphore_destroy_internal(task, semaphore);
+               semaphore_dereference(semaphore);
+               return KERN_INVALID_ARGUMENT;
        }
+
+       task_lock(task);
+       spl_level = splsched();
+       semaphore_lock(semaphore);
+
+       if (semaphore->owner != task) {
+               semaphore_unlock(semaphore);
+               semaphore_dereference(semaphore);
+               splx(spl_level);
+               task_unlock(task);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       semaphore_destroy_internal(task, semaphore);
+       /* semaphore unlocked */
+
+       splx(spl_level);
+       task_unlock(task);
+
        semaphore_dereference(semaphore);
-       return kr;
+       return KERN_SUCCESS;
+}
+
+/*
+ *     Routine:        semaphore_destroy_all
+ *
+ *     Destroy all the semaphores associated with a given task.
+ */
+#define SEMASPERSPL 20  /* max number of semaphores to destroy per spl hold */
+
+void
+semaphore_destroy_all(
+       task_t                  task)
+{
+       uint32_t count;
+       spl_t spl_level;
+
+       count = 0;
+       task_lock(task);
+       while (!queue_empty(&task->semaphore_list)) {
+               semaphore_t semaphore;
+
+               semaphore = (semaphore_t) queue_first(&task->semaphore_list);
+
+               if (count == 0) 
+                       spl_level = splsched();
+               semaphore_lock(semaphore);
+
+               semaphore_destroy_internal(task, semaphore);
+               /* semaphore unlocked */
+
+               /* throttle number of semaphores per interrupt disablement */
+               if (++count == SEMASPERSPL) {
+                       count = 0;
+                       splx(spl_level);
+               }
+       }
+       if (count != 0)
+               splx(spl_level);
+
+       task_unlock(task);
 }
 
 /*
@@ -641,7 +690,8 @@ semaphore_wait_internal(
                thread_t        self = current_thread();
 
                wait_semaphore->count = -1;  /* we don't keep an actual count */
-               thread_lock(self);
+
+               thread_set_pending_block_hint(self, kThreadWaitSemaphore);
                (void)waitq_assert_wait64_locked(
                                        &wait_semaphore->waitq,
                                        SEMAPHORE_EVENT,
@@ -649,7 +699,6 @@ semaphore_wait_internal(
                                        TIMEOUT_URGENCY_USER_NORMAL,
                                        deadline, TIMEOUT_NO_LEEWAY,
                                        self);
-               thread_unlock(self);
        }
        semaphore_unlock(wait_semaphore);
        splx(spl_level);
@@ -1059,7 +1108,7 @@ void
 semaphore_reference(
        semaphore_t             semaphore)
 {
-       (void)hw_atomic_add(&semaphore->ref_count, 1);
+       os_ref_retain(&semaphore->ref_count);
 }
 
 /*
@@ -1072,11 +1121,15 @@ void
 semaphore_dereference(
        semaphore_t             semaphore)
 {
+       uint32_t collisions;
+       spl_t spl_level;
+
        if (semaphore == NULL)
                return;
 
-       if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
+       if (os_ref_release(&semaphore->ref_count) > 0) {
                return;
+       }
 
        /*
         * Last ref, clean up the port [if any]
@@ -1090,11 +1143,49 @@ semaphore_dereference(
                assert(!port->ip_srights);
                ipc_port_dealloc_kernel(port);
        }
-       if (semaphore->active) {
-               assert(semaphore->owner != TASK_NULL);
-               semaphore_destroy_internal(semaphore->owner, semaphore);
+
+       /*
+        * Lock the semaphore to lock in the owner task reference.
+        * Then continue to try to lock the task (inverse order).
+        */
+       spl_level = splsched();
+       semaphore_lock(semaphore);
+       for (collisions = 0; semaphore->active; collisions++) {
+               task_t task = semaphore->owner;
+
+               assert(task != TASK_NULL);
+               
+               if (task_lock_try(task)) {
+                       semaphore_destroy_internal(task, semaphore);
+                       /* semaphore unlocked */
+                       splx(spl_level);
+                       task_unlock(task);
+                       goto out;
+               }
+               
+               /* failed to get out-of-order locks */
+               semaphore_unlock(semaphore);
+               splx(spl_level);
+               mutex_pause(collisions);
+               spl_level = splsched();
+               semaphore_lock(semaphore);
        }
+       semaphore_unlock(semaphore);
+       splx(spl_level);
+
+ out:
        zfree(semaphore_zone, semaphore);
 }
 
+#define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq)))
+void
+kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
+{
+       semaphore_t sem = WAITQ_TO_SEMA(waitq);
+       assert(event == SEMAPHORE_EVENT);
+       assert(kdp_is_in_zone(sem, "semaphores"));
 
+       waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port);
+       if (sem->owner)
+               waitinfo->owner = pid_from_task(sem->owner);
+}