X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..935ed37a5c468c8a1c07408573c08b8b7ef80e8b:/bsd/kern/kern_aio.c diff --git a/bsd/kern/kern_aio.c b/bsd/kern/kern_aio.c index af4c6fe96..813456ab9 100644 --- a/bsd/kern/kern_aio.c +++ b/bsd/kern/kern_aio.c @@ -135,8 +135,8 @@ typedef struct aio_anchor_cb aio_anchor_cb; * us sleep channels that currently do not collide with any other kernel routines. * At this time, for binary compatibility reasons, we cannot create new proc fields. */ -#define AIO_SUSPEND_SLEEP_CHAN p_estcpu -#define AIO_CLEANUP_SLEEP_CHAN p_pctcpu +#define AIO_SUSPEND_SLEEP_CHAN aio_active_count +#define AIO_CLEANUP_SLEEP_CHAN aio_done_count /* @@ -149,20 +149,20 @@ typedef struct aio_anchor_cb aio_anchor_cb; /* * LOCAL PROTOTYPES */ -static int aio_active_requests_for_process( struct proc *procp ); +static int aio_active_requests_for_process(proc_t procp ); static boolean_t aio_delay_fsync_request( aio_workq_entry *entryp ); static int aio_free_request( aio_workq_entry *entryp, vm_map_t the_map ); static int aio_get_all_queues_count( void ); -static int aio_get_process_count( struct proc *procp ); +static int aio_get_process_count(proc_t procp ); static aio_workq_entry * aio_get_some_work( void ); static boolean_t aio_last_group_io( aio_workq_entry *entryp ); static void aio_mark_requests( aio_workq_entry *entryp ); -static int aio_queue_async_request( struct proc *procp, +static int aio_queue_async_request(proc_t procp, user_addr_t aiocbp, int kindOfIO ); static int aio_validate( aio_workq_entry *entryp ); static void aio_work_thread( void ); -static int do_aio_cancel( struct proc *p, +static int do_aio_cancel(proc_t p, int fd, user_addr_t aiocbp, boolean_t wait_for_completion, @@ -172,14 +172,14 @@ static int do_aio_fsync( aio_workq_entry *entryp ); static int do_aio_read( aio_workq_entry *entryp ); static int do_aio_write( aio_workq_entry *entryp ); static void do_munge_aiocb( struct aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ); -static boolean_t is_already_queued( struct proc *procp, +static boolean_t is_already_queued(proc_t procp, user_addr_t aiocbp ); -static int lio_create_async_entry( struct proc *procp, +static int lio_create_async_entry(proc_t procp, user_addr_t aiocbp, user_addr_t sigp, long group_tag, aio_workq_entry **entrypp ); -static int lio_create_sync_entry( struct proc *procp, +static int lio_create_sync_entry(proc_t procp, user_addr_t aiocbp, long group_tag, aio_workq_entry **entrypp ); @@ -190,10 +190,10 @@ static int lio_create_sync_entry( struct proc *procp, */ /* in ...bsd/kern/sys_generic.c */ -extern int dofileread( struct proc *p, struct fileproc *fp, int fd, +extern int dofileread(vfs_context_t ctx, struct fileproc *fp, user_addr_t bufp, user_size_t nbyte, off_t offset, int flags, user_ssize_t *retval ); -extern int dofilewrite( struct proc *p, struct fileproc *fp, int fd, +extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp, user_addr_t bufp, user_size_t nbyte, off_t offset, int flags, user_ssize_t *retval ); @@ -227,7 +227,7 @@ static struct zone *aio_workq_zonep; */ int -aio_cancel( struct proc *p, struct aio_cancel_args *uap, int *retval ) +aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval ) { struct user_aiocb my_aiocb; int result; @@ -240,7 +240,8 @@ aio_cancel( struct proc *p, struct aio_cancel_args *uap, int *retval ) result = aio_get_all_queues_count( ); AIO_UNLOCK; if ( result < 1 ) { - result = EBADF; + result = 0; + *retval = AIO_ALLDONE; goto ExitRoutine; } @@ -295,7 +296,7 @@ ExitRoutine: */ __private_extern__ void -_aio_close( struct proc *p, int fd ) +_aio_close(proc_t p, int fd ) { int error, count; @@ -343,7 +344,7 @@ _aio_close( struct proc *p, int fd ) */ int -aio_error( struct proc *p, struct aio_error_args *uap, int *retval ) +aio_error(proc_t p, struct aio_error_args *uap, int *retval ) { aio_workq_entry *entryp; int error; @@ -412,7 +413,7 @@ ExitRoutine: */ int -aio_fsync( struct proc *p, struct aio_fsync_args *uap, int *retval ) +aio_fsync(proc_t p, struct aio_fsync_args *uap, int *retval ) { int error; int fsync_kind; @@ -453,7 +454,7 @@ ExitRoutine: */ int -aio_read( struct proc *p, struct aio_read_args *uap, int *retval ) +aio_read(proc_t p, struct aio_read_args *uap, int *retval ) { int error; @@ -483,7 +484,7 @@ aio_read( struct proc *p, struct aio_read_args *uap, int *retval ) */ int -aio_return( struct proc *p, struct aio_return_args *uap, user_ssize_t *retval ) +aio_return(proc_t p, struct aio_return_args *uap, user_ssize_t *retval ) { aio_workq_entry *entryp; int error; @@ -572,7 +573,7 @@ ExitRoutine: */ __private_extern__ void -_aio_exec( struct proc *p ) +_aio_exec(proc_t p ) { KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exec)) | DBG_FUNC_START, @@ -596,7 +597,7 @@ _aio_exec( struct proc *p ) */ __private_extern__ void -_aio_exit( struct proc *p ) +_aio_exit(proc_t p ) { int error, count; aio_workq_entry *entryp; @@ -690,7 +691,7 @@ _aio_exit( struct proc *p ) */ static int -do_aio_cancel( struct proc *p, int fd, user_addr_t aiocbp, +do_aio_cancel(proc_t p, int fd, user_addr_t aiocbp, boolean_t wait_for_completion, boolean_t disable_notification ) { aio_workq_entry *entryp; @@ -859,9 +860,16 @@ do_aio_cancel( struct proc *p, int fd, user_addr_t aiocbp, * set appropriately - EAGAIN if timeout elapses or EINTR if an interrupt * woke us up. */ +int +aio_suspend(proc_t p, struct aio_suspend_args *uap, int *retval ) +{ + __pthread_testcancel(1); + return(aio_suspend_nocancel(p, (struct aio_suspend_nocancel_args *)uap, retval)); +} + int -aio_suspend( struct proc *p, struct aio_suspend_args *uap, int *retval ) +aio_suspend_nocancel(proc_t p, struct aio_suspend_nocancel_args *uap, int *retval ) { int error; int i, count; @@ -908,7 +916,7 @@ aio_suspend( struct proc *p, struct aio_suspend_args *uap, int *retval ) goto ExitThisRoutine; } - if ( ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000 ) { + if ( ts.tv_sec < 0 || ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000 ) { error = EINVAL; goto ExitThisRoutine; } @@ -946,6 +954,7 @@ aio_suspend( struct proc *p, struct aio_suspend_args *uap, int *retval ) } /* check list of aio requests to see if any have completed */ +check_for_our_aiocbp: AIO_LOCK; for ( i = 0; i < uap->nent; i++ ) { user_addr_t aiocbp; @@ -981,9 +990,15 @@ aio_suspend( struct proc *p, struct aio_suspend_args *uap, int *retval ) error = thread_block( THREAD_CONTINUE_NULL ); if ( error == THREAD_AWAKENED ) { - /* got our wakeup call from aio_work_thread() */ - *retval = 0; - error = 0; + /* + * got our wakeup call from aio_work_thread(). + * Since we can get a wakeup on this channel from another thread in the + * same process we head back up to make sure this is for the correct aiocbp. + * If it is the correct aiocbp we will return from where we do the check + * (see entryp->uaiocbp == aiocbp after check_for_our_aiocbp label) + * else we will fall out and just sleep again. + */ + goto check_for_our_aiocbp; } else if ( error == THREAD_TIMED_OUT ) { /* our timeout expired */ @@ -1012,7 +1027,7 @@ ExitThisRoutine: */ int -aio_write( struct proc *p, struct aio_write_args *uap, int *retval ) +aio_write(proc_t p, struct aio_write_args *uap, int *retval ) { int error; @@ -1042,7 +1057,7 @@ aio_write( struct proc *p, struct aio_write_args *uap, int *retval ) */ int -lio_listio( struct proc *p, struct lio_listio_args *uap, int *retval ) +lio_listio(proc_t p, struct lio_listio_args *uap, int *retval ) { int i; int call_result; @@ -1448,7 +1463,7 @@ aio_delay_fsync_request( aio_workq_entry *entryp ) */ static int -aio_queue_async_request( struct proc *procp, user_addr_t aiocbp, int kindOfIO ) +aio_queue_async_request(proc_t procp, user_addr_t aiocbp, int kindOfIO ) { aio_workq_entry *entryp; int result; @@ -1551,7 +1566,7 @@ error_exit: */ static int -lio_create_async_entry( struct proc *procp, user_addr_t aiocbp, +lio_create_async_entry(proc_t procp, user_addr_t aiocbp, user_addr_t sigp, long group_tag, aio_workq_entry **entrypp ) { @@ -1687,7 +1702,7 @@ aio_mark_requests( aio_workq_entry *entryp ) */ static int -lio_create_sync_entry( struct proc *procp, user_addr_t aiocbp, +lio_create_sync_entry(proc_t procp, user_addr_t aiocbp, long group_tag, aio_workq_entry **entrypp ) { aio_workq_entry *entryp; @@ -1856,7 +1871,7 @@ aio_validate( aio_workq_entry *entryp ) */ static int -aio_get_process_count( struct proc *procp ) +aio_get_process_count(proc_t procp ) { aio_workq_entry *entryp; int count; @@ -2024,8 +2039,9 @@ aio_last_group_io( aio_workq_entry *entryp ) static int do_aio_read( aio_workq_entry *entryp ) { - struct fileproc *fp; - int error; + struct fileproc *fp; + int error; + struct vfs_context context; if ( (error = fp_lookup(entryp->procp, entryp->aiocb.aio_fildes, &fp , 0)) ) return(error); @@ -2033,18 +2049,20 @@ do_aio_read( aio_workq_entry *entryp ) fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); return(EBADF); } - if ( fp != NULL ) { - error = dofileread( entryp->procp, fp, entryp->aiocb.aio_fildes, - entryp->aiocb.aio_buf, - entryp->aiocb.aio_nbytes, - entryp->aiocb.aio_offset, FOF_OFFSET, - &entryp->returnval ); - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - } - else { - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - error = EBADF; - } + + /* + * + * Needs vfs_context_t from vfs_context_create() in entryp! + */ + context.vc_thread = proc_thread(entryp->procp); /* XXX */ + context.vc_ucred = fp->f_fglob->fg_cred; + + error = dofileread(&context, fp, + entryp->aiocb.aio_buf, + entryp->aiocb.aio_nbytes, + entryp->aiocb.aio_offset, FOF_OFFSET, + &entryp->returnval); + fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); return( error ); @@ -2059,6 +2077,7 @@ do_aio_write( aio_workq_entry *entryp ) { struct fileproc *fp; int error; + struct vfs_context context; if ( (error = fp_lookup(entryp->procp, entryp->aiocb.aio_fildes, &fp , 0)) ) return(error); @@ -2066,23 +2085,24 @@ do_aio_write( aio_workq_entry *entryp ) fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); return(EBADF); } - if ( fp != NULL ) { - /* NB: tell dofilewrite the offset, and to use the proc cred */ - error = dofilewrite( entryp->procp, - fp, - entryp->aiocb.aio_fildes, - entryp->aiocb.aio_buf, - entryp->aiocb.aio_nbytes, - entryp->aiocb.aio_offset, - FOF_OFFSET | FOF_PCRED, - &entryp->returnval); - - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - } - else { - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - error = EBADF; - } + + /* + * + * Needs vfs_context_t from vfs_context_create() in entryp! + */ + context.vc_thread = proc_thread(entryp->procp); /* XXX */ + context.vc_ucred = fp->f_fglob->fg_cred; + + /* NB: tell dofilewrite the offset, and to use the proc cred */ + error = dofilewrite(&context, + fp, + entryp->aiocb.aio_buf, + entryp->aiocb.aio_nbytes, + entryp->aiocb.aio_offset, + FOF_OFFSET | FOF_PCRED, + &entryp->returnval); + + fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); return( error ); @@ -2096,7 +2116,7 @@ do_aio_write( aio_workq_entry *entryp ) */ static int -aio_active_requests_for_process( struct proc *procp ) +aio_active_requests_for_process(proc_t procp ) { return( procp->aio_active_count ); @@ -2128,7 +2148,7 @@ do_aio_fsync( aio_workq_entry *entryp ) entryp->returnval = -1; return(error); } - context.vc_proc = entryp->procp; + context.vc_thread = current_thread(); context.vc_ucred = fp->f_fglob->fg_cred; error = VNOP_FSYNC( vp, MNT_WAIT, &context); @@ -2153,7 +2173,7 @@ do_aio_fsync( aio_workq_entry *entryp ) */ static boolean_t -is_already_queued( struct proc *procp, +is_already_queued(proc_t procp, user_addr_t aiocbp ) { aio_workq_entry *entryp; @@ -2208,10 +2228,8 @@ aio_init( void ) int i; aio_lock_grp_attr = lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(aio_lock_grp_attr); aio_lock_grp = lck_grp_alloc_init("aio", aio_lock_grp_attr); aio_lock_attr = lck_attr_alloc_init(); - //lck_attr_setdebug(aio_lock_attr); aio_lock = lck_mtx_alloc_init(aio_lock_grp, aio_lock_attr);