+static aio_anchor_cb aio_anchor;
+static lck_grp_t *aio_proc_lock_grp;
+static lck_grp_t *aio_entry_lock_grp;
+static lck_grp_t *aio_queue_lock_grp;
+static lck_attr_t *aio_lock_attr;
+static lck_grp_attr_t *aio_lock_grp_attr;
+static struct zone *aio_workq_zonep;
+static lck_mtx_t aio_entry_mtx;
+static lck_mtx_t aio_proc_mtx;
+
+static void
+aio_entry_lock(__unused aio_workq_entry *entryp)
+{
+ lck_mtx_lock(&aio_entry_mtx);
+}
+
+static void
+aio_entry_lock_spin(__unused aio_workq_entry *entryp)
+{
+ lck_mtx_lock_spin(&aio_entry_mtx);
+}
+
+static void
+aio_entry_unlock(__unused aio_workq_entry *entryp)
+{
+ lck_mtx_unlock(&aio_entry_mtx);
+}
+
+/* Hash */
+static aio_workq_t
+aio_entry_workq(__unused aio_workq_entry *entryp)
+{
+ return &aio_anchor.aio_async_workqs[0];
+}
+
+static lck_mtx_t*
+aio_entry_mutex(__unused aio_workq_entry *entryp)
+{
+ return &aio_entry_mtx;
+}
+
+static void
+aio_workq_init(aio_workq_t wq)
+{
+ TAILQ_INIT(&wq->aioq_entries);
+ wq->aioq_count = 0;
+ lck_mtx_init(&wq->aioq_mtx, aio_queue_lock_grp, aio_lock_attr);
+ wq->aioq_waitq = wait_queue_alloc(SYNC_POLICY_FIFO);
+}
+
+
+/*
+ * Can be passed a queue which is locked spin.
+ */
+static void
+aio_workq_remove_entry_locked(aio_workq_t queue, aio_workq_entry *entryp)
+{
+ ASSERT_AIO_WORKQ_LOCK_OWNED(queue);
+
+ if (entryp->aio_workq_link.tqe_prev == NULL) {
+ panic("Trying to remove an entry from a work queue, but it is not on a queue\n");
+ }
+
+ TAILQ_REMOVE(&queue->aioq_entries, entryp, aio_workq_link);
+ queue->aioq_count--;
+ entryp->aio_workq_link.tqe_prev = NULL; /* Not on a workq */
+
+ if (queue->aioq_count < 0) {
+ panic("Negative count on a queue.\n");
+ }
+}
+
+static void
+aio_workq_add_entry_locked(aio_workq_t queue, aio_workq_entry *entryp)
+{
+ ASSERT_AIO_WORKQ_LOCK_OWNED(queue);
+
+ TAILQ_INSERT_TAIL(&queue->aioq_entries, entryp, aio_workq_link);
+ if (queue->aioq_count < 0) {
+ panic("Negative count on a queue.\n");
+ }
+ queue->aioq_count++;
+}
+
+static void
+aio_proc_lock(proc_t procp)
+{
+ lck_mtx_lock(aio_proc_mutex(procp));
+}
+
+static void
+aio_proc_lock_spin(proc_t procp)
+{
+ lck_mtx_lock_spin(aio_proc_mutex(procp));
+}
+
+static void
+aio_proc_move_done_locked(proc_t procp, aio_workq_entry *entryp)
+{
+ ASSERT_AIO_PROC_LOCK_OWNED(procp);
+
+ TAILQ_REMOVE(&procp->p_aio_activeq, entryp, aio_proc_link );
+ TAILQ_INSERT_TAIL( &procp->p_aio_doneq, entryp, aio_proc_link);
+ procp->p_aio_active_count--;
+ OSIncrementAtomic(&aio_anchor.aio_done_count);
+}
+
+static void
+aio_proc_remove_done_locked(proc_t procp, aio_workq_entry *entryp)
+{
+ TAILQ_REMOVE(&procp->p_aio_doneq, entryp, aio_proc_link);
+ OSDecrementAtomic(&aio_anchor.aio_done_count);
+ aio_decrement_total_count();
+ procp->p_aio_total_count--;
+}
+
+static void
+aio_proc_unlock(proc_t procp)
+{
+ lck_mtx_unlock(aio_proc_mutex(procp));
+}
+
+static lck_mtx_t*
+aio_proc_mutex(proc_t procp)
+{
+ return &procp->p_mlock;
+}
+
+static void
+aio_entry_ref_locked(aio_workq_entry *entryp)
+{
+ ASSERT_AIO_ENTRY_LOCK_OWNED(entryp);
+
+ if (entryp->aio_refcount < 0) {
+ panic("AIO workq entry with a negative refcount.\n");
+ }
+ entryp->aio_refcount++;
+}
+
+
+/* Return 1 if you've freed it */
+static void
+aio_entry_unref_locked(aio_workq_entry *entryp)
+{
+ ASSERT_AIO_ENTRY_LOCK_OWNED(entryp);
+
+ entryp->aio_refcount--;
+ if (entryp->aio_refcount < 0) {
+ panic("AIO workq entry with a negative refcount.\n");
+ }
+}
+
+static void
+aio_entry_ref(aio_workq_entry *entryp)
+{
+ aio_entry_lock_spin(entryp);
+ aio_entry_ref_locked(entryp);
+ aio_entry_unlock(entryp);
+}
+static void
+aio_entry_unref(aio_workq_entry *entryp)
+{
+ aio_entry_lock_spin(entryp);
+ aio_entry_unref_locked(entryp);
+
+ if ((entryp->aio_refcount == 0) && ((entryp->flags & AIO_DO_FREE) != 0)) {
+ aio_entry_unlock(entryp);
+ aio_free_request(entryp);
+ } else {
+ aio_entry_unlock(entryp);
+ }
+
+ return;
+}
+
+static void
+aio_entry_update_for_cancel(aio_workq_entry *entryp, boolean_t cancelled, int wait_for_completion, boolean_t disable_notification)
+{
+ aio_entry_lock_spin(entryp);
+
+ if (cancelled) {
+ aio_entry_ref_locked(entryp);
+ entryp->errorval = ECANCELED;
+ entryp->returnval = -1;
+ }
+
+ if ( wait_for_completion ) {
+ entryp->flags |= wait_for_completion; /* flag for special completion processing */
+ }
+
+ if ( disable_notification ) {
+ entryp->flags |= AIO_DISABLE; /* Don't want a signal */
+ }
+
+ aio_entry_unlock(entryp);
+}
+
+static int
+aio_entry_try_workq_remove(aio_workq_entry *entryp)
+{
+ /* Can only be cancelled if it's still on a work queue */
+ if (entryp->aio_workq_link.tqe_prev != NULL) {
+ aio_workq_t queue;
+
+ /* Will have to check again under the lock */
+ queue = aio_entry_workq(entryp);
+ aio_workq_lock_spin(queue);
+ if (entryp->aio_workq_link.tqe_prev != NULL) {
+ aio_workq_remove_entry_locked(queue, entryp);
+ aio_workq_unlock(queue);
+ return 1;
+ } else {
+ aio_workq_unlock(queue);
+ }
+ }
+
+ return 0;
+}