+static void
+aio_proc_remove_done_locked(proc_t procp, aio_workq_entry *entryp)
+{
+ TAILQ_REMOVE(&procp->p_aio_doneq, entryp, aio_proc_link);
+ OSDecrementAtomic(&aio_anchor.aio_done_count);
+ aio_decrement_total_count();
+ procp->p_aio_total_count--;
+}
+
+static void
+aio_proc_unlock(proc_t procp)
+{
+ lck_mtx_unlock(aio_proc_mutex(procp));
+}
+
+static lck_mtx_t*
+aio_proc_mutex(proc_t procp)
+{
+ return &procp->p_mlock;
+}
+
+static void
+aio_entry_ref_locked(aio_workq_entry *entryp)
+{
+ ASSERT_AIO_ENTRY_LOCK_OWNED(entryp);
+
+ if (entryp->aio_refcount < 0) {
+ panic("AIO workq entry with a negative refcount.\n");
+ }
+ entryp->aio_refcount++;
+}
+
+
+/* Return 1 if you've freed it */
+static void
+aio_entry_unref_locked(aio_workq_entry *entryp)
+{
+ ASSERT_AIO_ENTRY_LOCK_OWNED(entryp);
+
+ entryp->aio_refcount--;
+ if (entryp->aio_refcount < 0) {
+ panic("AIO workq entry with a negative refcount.\n");
+ }
+}
+
+static void
+aio_entry_ref(aio_workq_entry *entryp)
+{
+ aio_entry_lock_spin(entryp);
+ aio_entry_ref_locked(entryp);
+ aio_entry_unlock(entryp);
+}
+static void
+aio_entry_unref(aio_workq_entry *entryp)
+{
+ aio_entry_lock_spin(entryp);
+ aio_entry_unref_locked(entryp);
+
+ if ((entryp->aio_refcount == 0) && ((entryp->flags & AIO_DO_FREE) != 0)) {
+ aio_entry_unlock(entryp);
+ aio_free_request(entryp);
+ } else {
+ aio_entry_unlock(entryp);
+ }
+
+ return;
+}
+
+static void
+aio_entry_update_for_cancel(aio_workq_entry *entryp, boolean_t cancelled, int wait_for_completion, boolean_t disable_notification)
+{
+ aio_entry_lock_spin(entryp);
+
+ if (cancelled) {
+ aio_entry_ref_locked(entryp);
+ entryp->errorval = ECANCELED;
+ entryp->returnval = -1;
+ }
+
+ if ( wait_for_completion ) {
+ entryp->flags |= wait_for_completion; /* flag for special completion processing */
+ }
+
+ if ( disable_notification ) {
+ entryp->flags |= AIO_DISABLE; /* Don't want a signal */
+ }
+
+ aio_entry_unlock(entryp);
+}
+
+static int
+aio_entry_try_workq_remove(aio_workq_entry *entryp)
+{
+ /* Can only be cancelled if it's still on a work queue */
+ if (entryp->aio_workq_link.tqe_prev != NULL) {
+ aio_workq_t queue;
+
+ /* Will have to check again under the lock */
+ queue = aio_entry_workq(entryp);
+ aio_workq_lock_spin(queue);
+ if (entryp->aio_workq_link.tqe_prev != NULL) {
+ aio_workq_remove_entry_locked(queue, entryp);
+ aio_workq_unlock(queue);
+ return 1;
+ } else {
+ aio_workq_unlock(queue);
+ }
+ }
+
+ return 0;
+}
+
+static void
+aio_workq_lock_spin(aio_workq_t wq)
+{
+ lck_mtx_lock_spin(aio_workq_mutex(wq));
+}