*
* @APPLE_LICENSE_HEADER_START@
*
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License"). You may not use this file except in compliance with the
+ * License. Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
goto ExitThisRoutine;
}
- if ( uap->nent < 1 || uap->nent > aio_max_requests_per_process ) {
+ if ( uap->nent < 1 || uap->nent > AIO_LISTIO_MAX ) {
error = EINVAL;
goto ExitThisRoutine;
}
goto ExitThisRoutine;
}
- /* copyin our aiocb pointers from list */
+ /* check list of aio requests to see if any have completed */
for ( i = 0; i < uap->nent; i++ ) {
struct aiocb *aiocbp;
/* copyin in aiocb pointer from list */
- error = copyin( (void *)(uap->aiocblist + i), (aiocbpp + i), sizeof(*aiocbpp) );
+ error = copyin( (void *)(uap->aiocblist + i), (aiocbpp + i), sizeof(aiocbp) );
if ( error != 0 ) {
error = EAGAIN;
goto ExitThisRoutine;
}
- } /* for ( ; i < uap->nent; ) */
-
- /* check list of aio requests to see if any have completed */
- AIO_LOCK;
- for ( i = 0; i < uap->nent; i++ ) {
- struct aiocb *aiocbp;
-
+
/* NULL elements are legal so check for 'em */
aiocbp = *(aiocbpp + i);
if ( aiocbp == NULL )
continue;
-
+
/* return immediately if any aio request in the list is done */
+ AIO_LOCK;
TAILQ_FOREACH( entryp, &p->aio_doneq, aio_workq_link ) {
if ( entryp->uaiocbp == aiocbp ) {
*retval = 0;
goto ExitThisRoutine;
}
}
+ AIO_UNLOCK;
} /* for ( ; i < uap->nent; ) */
KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend_sleep)) | DBG_FUNC_NONE,
* use tsleep() here in order to avoid getting kernel funnel lock.
*/
assert_wait( (event_t) &p->AIO_SUSPEND_SLEEP_CHAN, THREAD_ABORTSAFE );
- AIO_UNLOCK;
-
if ( abstime > 0 ) {
thread_set_timer_deadline( abstime );
}
error = thread_block( THREAD_CONTINUE_NULL );
-
if ( error == THREAD_AWAKENED ) {
/* got our wakeup call from aio_work_thread() */
if ( abstime > 0 ) {
aio_anchor.lio_sync_workq_count++;
}
}
+ AIO_UNLOCK;
- if ( uap->mode == LIO_NOWAIT ) {
+ if ( uap->mode == LIO_NOWAIT )
/* caller does not want to wait so we'll fire off a worker thread and return */
wakeup_one( &aio_anchor.aio_async_workq );
- }
else {
aio_workq_entry *entryp;
int error;
/*
* mode is LIO_WAIT - handle the IO requests now.
*/
+ AIO_LOCK;
entryp = TAILQ_FIRST( &aio_anchor.lio_sync_workq );
while ( entryp != NULL ) {
if ( p == entryp->procp && group_tag == entryp->group_tag ) {
entryp = TAILQ_NEXT( entryp, aio_workq_link );
} /* while ( entryp != NULL ) */
+ AIO_UNLOCK;
} /* uap->mode == LIO_WAIT */
- AIO_UNLOCK;
/* call_result == -1 means we had no trouble queueing up requests */
if ( call_result == -1 ) {
struct uthread *uthread = (struct uthread *)get_bsdthread_info(current_act());
for( ;; ) {
- AIO_LOCK;
entryp = aio_get_some_work();
if ( entryp == NULL ) {
/*
* tsleep() here in order to avoid getting kernel funnel lock.
*/
assert_wait( (event_t) &aio_anchor.aio_async_workq, THREAD_UNINT );
- AIO_UNLOCK;
thread_block( THREAD_CONTINUE_NULL );
KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_wake)) | DBG_FUNC_NONE,
vm_map_t oldmap = VM_MAP_NULL;
task_t oldaiotask = TASK_NULL;
- AIO_UNLOCK;
-
KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_thread)) | DBG_FUNC_START,
(int)entryp->procp, (int)entryp->uaiocbp, entryp->flags, 0, 0 );
* aio_get_some_work - get the next async IO request that is ready to be executed.
* aio_fsync complicates matters a bit since we cannot do the fsync until all async
* IO requests at the time the aio_fsync call came in have completed.
- * NOTE - AIO_LOCK must be held by caller
*/
static aio_workq_entry *
int skip_count = 0;
/* pop some work off the work queue and add to our active queue */
+ AIO_LOCK;
for ( entryp = TAILQ_FIRST( &aio_anchor.aio_async_workq );
entryp != NULL;
entryp = TAILQ_NEXT( entryp, aio_workq_link ) ) {
aio_anchor.aio_active_count++;
entryp->procp->aio_active_count++;
}
+ AIO_UNLOCK;
return( entryp );
TAILQ_INSERT_TAIL( &aio_anchor.aio_async_workq, entryp, aio_workq_link );
aio_anchor.aio_async_workq_count++;
- wakeup_one( &aio_anchor.aio_async_workq );
- AIO_UNLOCK;
+ AIO_UNLOCK;
KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_NONE,
(int)procp, (int)aiocbp, 0, 0, 0 );
-
+
+ wakeup_one( &aio_anchor.aio_async_workq );
+
return( 0 );
error_exit:
AIO_LOCK;
active_requests = aio_active_requests_for_process( entryp->procp );
- //AIO_UNLOCK;
+ AIO_UNLOCK;
if ( active_requests < 1 ) {
/* no active aio requests for this process, continue exiting */
- wakeup_one( &entryp->procp->AIO_CLEANUP_SLEEP_CHAN );
KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wake)) | DBG_FUNC_NONE,
(int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 );
+
+ wakeup_one( &entryp->procp->AIO_CLEANUP_SLEEP_CHAN );
}
- AIO_UNLOCK;
return;
}
* call wakeup for them. If we do mark them we should unmark them after
* the aio_suspend wakes up.
*/
- AIO_LOCK;
- wakeup_one( &entryp->procp->AIO_SUSPEND_SLEEP_CHAN );
- AIO_UNLOCK;
-
KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_suspend_wake)) | DBG_FUNC_NONE,
(int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 );
+
+ wakeup_one( &entryp->procp->AIO_SUSPEND_SLEEP_CHAN );
return;