X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/5ba3f43ea354af8ad55bea84372a2bc834d8757c..94ff46dc2849db4d43eaaf144872decc522aafb4:/bsd/miscfs/specfs/spec_vnops.c diff --git a/bsd/miscfs/specfs/spec_vnops.c b/bsd/miscfs/specfs/spec_vnops.c index 625e9736f..300894634 100644 --- a/bsd/miscfs/specfs/spec_vnops.c +++ b/bsd/miscfs/specfs/spec_vnops.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -100,98 +100,99 @@ #include /* XXX following three prototypes should be in a header file somewhere */ -extern dev_t chrtoblk(dev_t dev); -extern boolean_t iskmemdev(dev_t dev); -extern int bpfkqfilter(dev_t dev, struct knote *kn); +extern dev_t chrtoblk(dev_t dev); +extern boolean_t iskmemdev(dev_t dev); +extern int bpfkqfilter(dev_t dev, struct knote *kn); extern int ptsd_kqfilter(dev_t, struct knote *); +extern int ptmx_kqfilter(dev_t, struct knote *); struct vnode *speclisth[SPECHSZ]; /* symbolic sleep message strings for devices */ -char devopn[] = "devopn"; -char devio[] = "devio"; -char devwait[] = "devwait"; -char devin[] = "devin"; -char devout[] = "devout"; -char devioc[] = "devioc"; -char devcls[] = "devcls"; +char devopn[] = "devopn"; +char devio[] = "devio"; +char devwait[] = "devwait"; +char devin[] = "devin"; +char devout[] = "devout"; +char devioc[] = "devioc"; +char devcls[] = "devcls"; #define VOPFUNC int (*)(void *) -int (**spec_vnodeop_p)(void *); -struct vnodeopv_entry_desc spec_vnodeop_entries[] = { - { &vnop_default_desc, (VOPFUNC)vn_default_error }, - { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ - { &vnop_create_desc, (VOPFUNC)err_create }, /* create */ - { &vnop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */ - { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */ - { &vnop_close_desc, (VOPFUNC)spec_close }, /* close */ - { &vnop_access_desc, (VOPFUNC)spec_access }, /* access */ - { &vnop_getattr_desc, (VOPFUNC)spec_getattr }, /* getattr */ - { &vnop_setattr_desc, (VOPFUNC)spec_setattr }, /* setattr */ - { &vnop_read_desc, (VOPFUNC)spec_read }, /* read */ - { &vnop_write_desc, (VOPFUNC)spec_write }, /* write */ - { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ - { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */ - { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */ - { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ - { &vnop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ - { &vnop_remove_desc, (VOPFUNC)err_remove }, /* remove */ - { &vnop_link_desc, (VOPFUNC)err_link }, /* link */ - { &vnop_rename_desc, (VOPFUNC)err_rename }, /* rename */ - { &vnop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */ - { &vnop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */ - { &vnop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */ - { &vnop_inactive_desc, (VOPFUNC)nop_inactive }, /* inactive */ - { &vnop_reclaim_desc, (VOPFUNC)nop_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ - { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ - { &vnop_bwrite_desc, (VOPFUNC)spec_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ - { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (VOPFUNC)spec_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (VOPFUNC)spec_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (VOPFUNC)spec_blockmap }, /* blockmap */ - { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL } +int(**spec_vnodeop_p)(void *); +const struct vnodeopv_entry_desc spec_vnodeop_entries[] = { + { .opve_op = &vnop_default_desc, .opve_impl = (VOPFUNC)vn_default_error }, + { .opve_op = &vnop_lookup_desc, .opve_impl = (VOPFUNC)spec_lookup }, /* lookup */ + { .opve_op = &vnop_create_desc, .opve_impl = (VOPFUNC)err_create }, /* create */ + { .opve_op = &vnop_mknod_desc, .opve_impl = (VOPFUNC)err_mknod }, /* mknod */ + { .opve_op = &vnop_open_desc, .opve_impl = (VOPFUNC)spec_open }, /* open */ + { .opve_op = &vnop_close_desc, .opve_impl = (VOPFUNC)spec_close }, /* close */ + { .opve_op = &vnop_access_desc, .opve_impl = (VOPFUNC)spec_access }, /* access */ + { .opve_op = &vnop_getattr_desc, .opve_impl = (VOPFUNC)spec_getattr }, /* getattr */ + { .opve_op = &vnop_setattr_desc, .opve_impl = (VOPFUNC)spec_setattr }, /* setattr */ + { .opve_op = &vnop_read_desc, .opve_impl = (VOPFUNC)spec_read }, /* read */ + { .opve_op = &vnop_write_desc, .opve_impl = (VOPFUNC)spec_write }, /* write */ + { .opve_op = &vnop_ioctl_desc, .opve_impl = (VOPFUNC)spec_ioctl }, /* ioctl */ + { .opve_op = &vnop_select_desc, .opve_impl = (VOPFUNC)spec_select }, /* select */ + { .opve_op = &vnop_revoke_desc, .opve_impl = (VOPFUNC)nop_revoke }, /* revoke */ + { .opve_op = &vnop_mmap_desc, .opve_impl = (VOPFUNC)err_mmap }, /* mmap */ + { .opve_op = &vnop_fsync_desc, .opve_impl = (VOPFUNC)spec_fsync }, /* fsync */ + { .opve_op = &vnop_remove_desc, .opve_impl = (VOPFUNC)err_remove }, /* remove */ + { .opve_op = &vnop_link_desc, .opve_impl = (VOPFUNC)err_link }, /* link */ + { .opve_op = &vnop_rename_desc, .opve_impl = (VOPFUNC)err_rename }, /* rename */ + { .opve_op = &vnop_mkdir_desc, .opve_impl = (VOPFUNC)err_mkdir }, /* mkdir */ + { .opve_op = &vnop_rmdir_desc, .opve_impl = (VOPFUNC)err_rmdir }, /* rmdir */ + { .opve_op = &vnop_symlink_desc, .opve_impl = (VOPFUNC)err_symlink }, /* symlink */ + { .opve_op = &vnop_readdir_desc, .opve_impl = (VOPFUNC)err_readdir }, /* readdir */ + { .opve_op = &vnop_readlink_desc, .opve_impl = (VOPFUNC)err_readlink }, /* readlink */ + { .opve_op = &vnop_inactive_desc, .opve_impl = (VOPFUNC)nop_inactive }, /* inactive */ + { .opve_op = &vnop_reclaim_desc, .opve_impl = (VOPFUNC)nop_reclaim }, /* reclaim */ + { .opve_op = &vnop_strategy_desc, .opve_impl = (VOPFUNC)spec_strategy }, /* strategy */ + { .opve_op = &vnop_pathconf_desc, .opve_impl = (VOPFUNC)spec_pathconf }, /* pathconf */ + { .opve_op = &vnop_advlock_desc, .opve_impl = (VOPFUNC)err_advlock }, /* advlock */ + { .opve_op = &vnop_bwrite_desc, .opve_impl = (VOPFUNC)spec_bwrite }, /* bwrite */ + { .opve_op = &vnop_pagein_desc, .opve_impl = (VOPFUNC)err_pagein }, /* Pagein */ + { .opve_op = &vnop_pageout_desc, .opve_impl = (VOPFUNC)err_pageout }, /* Pageout */ + { .opve_op = &vnop_copyfile_desc, .opve_impl = (VOPFUNC)err_copyfile }, /* Copyfile */ + { .opve_op = &vnop_blktooff_desc, .opve_impl = (VOPFUNC)spec_blktooff }, /* blktooff */ + { .opve_op = &vnop_offtoblk_desc, .opve_impl = (VOPFUNC)spec_offtoblk }, /* offtoblk */ + { .opve_op = &vnop_blockmap_desc, .opve_impl = (VOPFUNC)spec_blockmap }, /* blockmap */ + { .opve_op = (struct vnodeop_desc*)NULL, .opve_impl = (int (*)(void *))NULL } }; -struct vnodeopv_desc spec_vnodeop_opv_desc = - { &spec_vnodeop_p, spec_vnodeop_entries }; +const struct vnodeopv_desc spec_vnodeop_opv_desc = +{ .opv_desc_vector_p = &spec_vnodeop_p, .opv_desc_ops = spec_vnodeop_entries }; static void set_blocksize(vnode_t, dev_t); -#define LOWPRI_TIER1_WINDOW_MSECS 25 -#define LOWPRI_TIER2_WINDOW_MSECS 100 -#define LOWPRI_TIER3_WINDOW_MSECS 500 +#define LOWPRI_TIER1_WINDOW_MSECS 25 +#define LOWPRI_TIER2_WINDOW_MSECS 100 +#define LOWPRI_TIER3_WINDOW_MSECS 500 -#define LOWPRI_TIER1_IO_PERIOD_MSECS 40 -#define LOWPRI_TIER2_IO_PERIOD_MSECS 85 -#define LOWPRI_TIER3_IO_PERIOD_MSECS 200 +#define LOWPRI_TIER1_IO_PERIOD_MSECS 40 +#define LOWPRI_TIER2_IO_PERIOD_MSECS 85 +#define LOWPRI_TIER3_IO_PERIOD_MSECS 200 #define LOWPRI_TIER1_IO_PERIOD_SSD_MSECS 5 #define LOWPRI_TIER2_IO_PERIOD_SSD_MSECS 15 #define LOWPRI_TIER3_IO_PERIOD_SSD_MSECS 25 -int throttle_windows_msecs[THROTTLE_LEVEL_END + 1] = { +int throttle_windows_msecs[THROTTLE_LEVEL_END + 1] = { 0, LOWPRI_TIER1_WINDOW_MSECS, LOWPRI_TIER2_WINDOW_MSECS, LOWPRI_TIER3_WINDOW_MSECS, }; -int throttle_io_period_msecs[THROTTLE_LEVEL_END + 1] = { +int throttle_io_period_msecs[THROTTLE_LEVEL_END + 1] = { 0, LOWPRI_TIER1_IO_PERIOD_MSECS, LOWPRI_TIER2_IO_PERIOD_MSECS, LOWPRI_TIER3_IO_PERIOD_MSECS, }; -int throttle_io_period_ssd_msecs[THROTTLE_LEVEL_END + 1] = { +int throttle_io_period_ssd_msecs[THROTTLE_LEVEL_END + 1] = { 0, LOWPRI_TIER1_IO_PERIOD_SSD_MSECS, LOWPRI_TIER2_IO_PERIOD_SSD_MSECS, @@ -199,29 +200,29 @@ int throttle_io_period_ssd_msecs[THROTTLE_LEVEL_END + 1] = { }; -int throttled_count[THROTTLE_LEVEL_END + 1]; +int throttled_count[THROTTLE_LEVEL_END + 1]; struct _throttle_io_info_t { - lck_mtx_t throttle_lock; - - struct timeval throttle_last_write_timestamp; - struct timeval throttle_min_timer_deadline; - struct timeval throttle_window_start_timestamp[THROTTLE_LEVEL_END + 1]; /* window starts at both the beginning and completion of an I/O */ - struct timeval throttle_last_IO_timestamp[THROTTLE_LEVEL_END + 1]; - pid_t throttle_last_IO_pid[THROTTLE_LEVEL_END + 1]; - struct timeval throttle_start_IO_period_timestamp[THROTTLE_LEVEL_END + 1]; + lck_mtx_t throttle_lock; + + struct timeval throttle_last_write_timestamp; + struct timeval throttle_min_timer_deadline; + struct timeval throttle_window_start_timestamp[THROTTLE_LEVEL_END + 1]; /* window starts at both the beginning and completion of an I/O */ + struct timeval throttle_last_IO_timestamp[THROTTLE_LEVEL_END + 1]; + pid_t throttle_last_IO_pid[THROTTLE_LEVEL_END + 1]; + struct timeval throttle_start_IO_period_timestamp[THROTTLE_LEVEL_END + 1]; int32_t throttle_inflight_count[THROTTLE_LEVEL_END + 1]; - TAILQ_HEAD( , uthread) throttle_uthlist[THROTTLE_LEVEL_END + 1]; /* Lists of throttled uthreads */ - int throttle_next_wake_level; + TAILQ_HEAD(, uthread) throttle_uthlist[THROTTLE_LEVEL_END + 1]; /* Lists of throttled uthreads */ + int throttle_next_wake_level; - thread_call_t throttle_timer_call; - int32_t throttle_timer_ref; - int32_t throttle_timer_active; + thread_call_t throttle_timer_call; + int32_t throttle_timer_ref; + int32_t throttle_timer_active; - int32_t throttle_io_count; - int32_t throttle_io_count_begin; - int *throttle_io_periods; + int32_t throttle_io_count; + int32_t throttle_io_count_begin; + int *throttle_io_periods; uint32_t throttle_io_period_num; int32_t throttle_refcnt; @@ -233,7 +234,7 @@ struct _throttle_io_info_t { struct _throttle_io_info_t _throttle_io_info[LOWPRI_MAX_NUM_DEV]; -int lowpri_throttle_enabled = 1; +int lowpri_throttle_enabled = 1; static void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level); @@ -248,44 +249,43 @@ void throttle_info_mount_reset_period(mount_t mp, int isssd); int spec_lookup(struct vnop_lookup_args *ap) { - *ap->a_vpp = NULL; - return (ENOTDIR); + return ENOTDIR; } static void set_blocksize(struct vnode *vp, dev_t dev) { - int (*size)(dev_t); - int rsize; + int (*size)(dev_t); + int rsize; - if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) { - rsize = (*size)(dev); - if (rsize <= 0) /* did size fail? */ - vp->v_specsize = DEV_BSIZE; - else - vp->v_specsize = rsize; - } - else - vp->v_specsize = DEV_BSIZE; + if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) { + rsize = (*size)(dev); + if (rsize <= 0) { /* did size fail? */ + vp->v_specsize = DEV_BSIZE; + } else { + vp->v_specsize = rsize; + } + } else { + vp->v_specsize = DEV_BSIZE; + } } void set_fsblocksize(struct vnode *vp) { - if (vp->v_type == VBLK) { dev_t dev = (dev_t)vp->v_rdev; int maj = major(dev); - if ((u_int)maj >= (u_int)nblkdev) + if ((u_int)maj >= (u_int)nblkdev) { return; + } vnode_lock(vp); set_blocksize(vp, dev); vnode_unlock(vp); } - } @@ -305,33 +305,39 @@ spec_open(struct vnop_open_args *ap) /* * Don't allow open if fs is mounted -nodev. */ - if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) - return (ENXIO); + if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) { + return ENXIO; + } switch (vp->v_type) { - case VCHR: - if ((u_int)maj >= (u_int)nchrdev) - return (ENXIO); + if ((u_int)maj >= (u_int)nchrdev) { + return ENXIO; + } if (cred != FSCRED && (ap->a_mode & FWRITE)) { +#if 0 /* * When running in very secure mode, do not allow * opens for writing of any disk character devices. */ - if (securelevel >= 2 && isdisk(dev, VCHR)) - return (EPERM); + if (securelevel >= 2 && isdisk(dev, VCHR)) { + return EPERM; + } +#endif /* Never allow writing to /dev/mem or /dev/kmem */ - if (iskmemdev(dev)) - return (EPERM); + if (iskmemdev(dev)) { + return EPERM; + } /* * When running in secure mode, do not allow opens for * writing of character devices whose corresponding block * devices are currently mounted. */ if (securelevel >= 1) { - if ((bdev = chrtoblk(dev)) != NODEV && check_mountedon(bdev, VBLK, &error)) - return (error); + if ((bdev = chrtoblk(dev)) != NODEV && check_mountedon(bdev, VBLK, &error)) { + return error; + } } } @@ -345,12 +351,11 @@ spec_open(struct vnop_open_args *ap) devsw_unlock(dev, S_IFCHR); if (error == 0 && cdevsw[maj].d_type == D_DISK && !vp->v_un.vu_specinfo->si_initted) { - int isssd = 0; + int isssd = 0; uint64_t throttle_mask = 0; uint32_t devbsdunit = 0; if (VNOP_IOCTL(vp, DKIOCGETTHROTTLEMASK, (caddr_t)&throttle_mask, 0, NULL) == 0) { - if (throttle_mask != 0 && VNOP_IOCTL(vp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ap->a_context) == 0) { /* @@ -360,7 +365,7 @@ spec_open(struct vnop_open_args *ap) devbsdunit = num_trailing_0(throttle_mask); vnode_lock(vp); - + vp->v_un.vu_specinfo->si_isssd = isssd; vp->v_un.vu_specinfo->si_devbsdunit = devbsdunit; vp->v_un.vu_specinfo->si_throttle_mask = throttle_mask; @@ -376,24 +381,27 @@ spec_open(struct vnop_open_args *ap) vnode_unlock(vp); } } - return (error); + return error; case VBLK: - if ((u_int)maj >= (u_int)nblkdev) - return (ENXIO); + if ((u_int)maj >= (u_int)nblkdev) { + return ENXIO; + } /* * When running in very secure mode, do not allow * opens for writing of any disk block devices. */ if (securelevel >= 2 && cred != FSCRED && - (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) - return (EPERM); + (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) { + return EPERM; + } /* * Do not allow opens of block devices that are * currently mounted. */ - if ( (error = vfs_mountedon(vp)) ) - return (error); + if ((error = vfs_mountedon(vp))) { + return error; + } devsw_lock(dev, S_IFBLK); error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p); @@ -403,47 +411,47 @@ spec_open(struct vnop_open_args *ap) devsw_unlock(dev, S_IFBLK); if (!error) { - u_int64_t blkcnt; - u_int32_t blksize; + u_int64_t blkcnt; + u_int32_t blksize; int setsize = 0; u_int32_t size512 = 512; - if (!VNOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, ap->a_context)) { + if (!VNOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, ap->a_context)) { /* Switch to 512 byte sectors (temporarily) */ if (!VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, ap->a_context)) { - /* Get the number of 512 byte physical blocks. */ - if (!VNOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, ap->a_context)) { + /* Get the number of 512 byte physical blocks. */ + if (!VNOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, ap->a_context)) { setsize = 1; - } + } } /* If it doesn't set back, we can't recover */ - if (VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, ap->a_context)) - error = ENXIO; - } + if (VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, ap->a_context)) { + error = ENXIO; + } + } vnode_lock(vp); - set_blocksize(vp, dev); + set_blocksize(vp, dev); - /* - * Cache the size in bytes of the block device for later - * use by spec_write(). - */ - if (setsize) + /* + * Cache the size in bytes of the block device for later + * use by spec_write(). + */ + if (setsize) { vp->v_specdevsize = blkcnt * (u_int64_t)size512; - else - vp->v_specdevsize = (u_int64_t)0; /* Default: Can't get */ - + } else { + vp->v_specdevsize = (u_int64_t)0; /* Default: Can't get */ + } vnode_unlock(vp); - } - return(error); + return error; default: - panic("spec_open type"); + panic("spec_open type"); } - return (0); + return 0; } /* @@ -457,65 +465,106 @@ spec_read(struct vnop_read_args *ap) struct buf *bp; daddr64_t bn, nextbn; long bsize, bscale; - int devBlockSize=0; + int devBlockSize = 0; int n, on; int error = 0; dev_t dev; #if DIAGNOSTIC - if (uio->uio_rw != UIO_READ) + if (uio->uio_rw != UIO_READ) { panic("spec_read mode"); - if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) + } + if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { panic("spec_read proc"); + } #endif - if (uio_resid(uio) == 0) - return (0); + if (uio_resid(uio) == 0) { + return 0; + } switch (vp->v_type) { - case VCHR: - { - struct _throttle_io_info_t *throttle_info = NULL; - int thread_throttle_level; - if (cdevsw[major(vp->v_rdev)].d_type == D_DISK && vp->v_un.vu_specinfo->si_throttleable) { + { + struct _throttle_io_info_t *throttle_info = NULL; + int thread_throttle_level; + uint64_t blkno = 0; + uint32_t iolen = 0; + int ddisk = 0; + int ktrace_code = DKIO_READ; + devBlockSize = vp->v_specsize; + uintptr_t our_id; + + if (cdevsw[major(vp->v_rdev)].d_type == D_DISK) { + ddisk = 1; + } + + if (ddisk && vp->v_un.vu_specinfo->si_throttleable) { throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit]; - thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL); - } - error = (*cdevsw[major(vp->v_rdev)].d_read) - (vp->v_rdev, uio, ap->a_ioflag); + thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL); + } - if (throttle_info) { - throttle_info_end_io_internal(throttle_info, thread_throttle_level); + if (kdebug_enable && ddisk) { + if (devBlockSize == 0) { + devBlockSize = 512; // default sector size } - return (error); + if (uio_offset(uio) && devBlockSize) { + blkno = ((uint64_t) uio_offset(uio) / ((uint64_t)devBlockSize)); + } + iolen = (int) uio_resid(uio); + our_id = (uintptr_t)thread_tid(current_thread()); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, + (FSDBG_CODE(DBG_DKRW, ktrace_code)) | DBG_FUNC_NONE, our_id, + vp->v_rdev, blkno, iolen, 0); } + error = (*cdevsw[major(vp->v_rdev)].d_read) + (vp->v_rdev, uio, ap->a_ioflag); + + + if (kdebug_enable && ddisk) { + uint32_t residual = (uint32_t)uio_resid(uio); + ktrace_code |= DKIO_DONE; + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, + (FSDBG_CODE(DBG_DKRW, ktrace_code)) | DBG_FUNC_NONE, our_id, + (uintptr_t)VM_KERNEL_ADDRPERM(vp), residual, error, 0); + } + + if (throttle_info) { + throttle_info_end_io_internal(throttle_info, thread_throttle_level); + } + + return error; + } + case VBLK: - if (uio->uio_offset < 0) - return (EINVAL); + if (uio->uio_offset < 0) { + return EINVAL; + } dev = vp->v_rdev; devBlockSize = vp->v_specsize; - if (devBlockSize > PAGE_SIZE) - return (EINVAL); + if (devBlockSize > PAGE_SIZE) { + return EINVAL; + } - bscale = PAGE_SIZE / devBlockSize; + bscale = PAGE_SIZE / devBlockSize; bsize = bscale * devBlockSize; do { on = uio->uio_offset % bsize; - bn = (daddr64_t)((uio->uio_offset / devBlockSize) &~ (bscale - 1)); - + bn = (daddr64_t)((uio->uio_offset / devBlockSize) & ~(bscale - 1)); + if (vp->v_speclastr + bscale == bn) { - nextbn = bn + bscale; + nextbn = bn + bscale; error = buf_breadn(vp, bn, (int)bsize, &nextbn, - (int *)&bsize, 1, NOCRED, &bp); - } else - error = buf_bread(vp, bn, (int)bsize, NOCRED, &bp); + (int *)&bsize, 1, NOCRED, &bp); + } else { + error = buf_bread(vp, bn, (int)bsize, NOCRED, &bp); + } vnode_lock(vp); vp->v_speclastr = bn; @@ -523,26 +572,28 @@ spec_read(struct vnop_read_args *ap) n = bsize - buf_resid(bp); if ((on > n) || error) { - if (!error) - error = EINVAL; + if (!error) { + error = EINVAL; + } buf_brelse(bp); - return (error); + return error; } n = min((unsigned)(n - on), uio_resid(uio)); error = uiomove((char *)buf_dataptr(bp) + on, n, uio); - if (n + on == bsize) + if (n + on == bsize) { buf_markaged(bp); + } buf_brelse(bp); } while (error == 0 && uio_resid(uio) > 0 && n != 0); - return (error); + return error; default: panic("spec_read type"); } /* NOTREACHED */ - return (0); + return 0; } /* @@ -557,62 +608,101 @@ spec_write(struct vnop_write_args *ap) daddr64_t bn; int bsize, blkmask, bscale; int io_sync; - int devBlockSize=0; + int devBlockSize = 0; int n, on; int error = 0; dev_t dev; #if DIAGNOSTIC - if (uio->uio_rw != UIO_WRITE) + if (uio->uio_rw != UIO_WRITE) { panic("spec_write mode"); - if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) + } + if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { panic("spec_write proc"); + } #endif switch (vp->v_type) { - case VCHR: - { - struct _throttle_io_info_t *throttle_info = NULL; - int thread_throttle_level; - if (cdevsw[major(vp->v_rdev)].d_type == D_DISK && vp->v_un.vu_specinfo->si_throttleable) { + { + struct _throttle_io_info_t *throttle_info = NULL; + int thread_throttle_level; + dev = vp->v_rdev; + devBlockSize = vp->v_specsize; + uint32_t iolen = 0; + uint64_t blkno = 0; + int ddisk = 0; + int ktrace_code = 0; // write is implied; read must be OR'd in. + uintptr_t our_id; + + if (cdevsw[major(dev)].d_type == D_DISK) { + ddisk = 1; + } + + if (ddisk && vp->v_un.vu_specinfo->si_throttleable) { throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit]; - thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL); + thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL); microuptime(&throttle_info->throttle_last_write_timestamp); - } - error = (*cdevsw[major(vp->v_rdev)].d_write) - (vp->v_rdev, uio, ap->a_ioflag); + } - if (throttle_info) { - throttle_info_end_io_internal(throttle_info, thread_throttle_level); + if (kdebug_enable && ddisk) { + if (devBlockSize == 0) { + devBlockSize = 512; // default sector size + } + if ((uio_offset(uio) != 0) && devBlockSize) { + blkno = ((uint64_t)uio_offset(uio)) / ((uint64_t)devBlockSize); } + iolen = (int)uio_resid(uio); + our_id = (uintptr_t)thread_tid(current_thread()); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, + (FSDBG_CODE(DBG_DKRW, ktrace_code)) | DBG_FUNC_NONE, our_id, + vp->v_rdev, blkno, iolen, 0); + } + error = (*cdevsw[major(vp->v_rdev)].d_write) + (vp->v_rdev, uio, ap->a_ioflag); + + if (kdebug_enable && ddisk) { + //emit the I/O completion + uint32_t residual = (uint32_t)uio_resid(uio); + ktrace_code |= DKIO_DONE; + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, + (FSDBG_CODE(DBG_DKRW, ktrace_code)) | DBG_FUNC_NONE, our_id, + (uintptr_t)VM_KERNEL_ADDRPERM(vp), residual, error, 0); + } - return (error); + if (throttle_info) { + throttle_info_end_io_internal(throttle_info, thread_throttle_level); } + return error; + } + case VBLK: - if (uio_resid(uio) == 0) - return (0); - if (uio->uio_offset < 0) - return (EINVAL); + if (uio_resid(uio) == 0) { + return 0; + } + if (uio->uio_offset < 0) { + return EINVAL; + } io_sync = (ap->a_ioflag & IO_SYNC); dev = (vp->v_rdev); devBlockSize = vp->v_specsize; - if (devBlockSize > PAGE_SIZE) - return(EINVAL); + if (devBlockSize > PAGE_SIZE) { + return EINVAL; + } - bscale = PAGE_SIZE / devBlockSize; + bscale = PAGE_SIZE / devBlockSize; blkmask = bscale - 1; bsize = bscale * devBlockSize; - + do { - bn = (daddr64_t)((uio->uio_offset / devBlockSize) &~ blkmask); + bn = (daddr64_t)((uio->uio_offset / devBlockSize) & ~blkmask); on = uio->uio_offset % bsize; n = min((unsigned)(bsize - on), uio_resid(uio)); @@ -630,48 +720,51 @@ spec_write(struct vnop_write_args *ap) if (n == bsize && vp->v_specdevsize != (u_int64_t)0 && (uio->uio_offset + (u_int64_t)n) > vp->v_specdevsize) { - /* reduce the size of the read to what is there */ - n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize; + /* reduce the size of the read to what is there */ + n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize; } - if (n == bsize) - bp = buf_getblk(vp, bn, bsize, 0, 0, BLK_WRITE); - else - error = (int)buf_bread(vp, bn, bsize, NOCRED, &bp); + if (n == bsize) { + bp = buf_getblk(vp, bn, bsize, 0, 0, BLK_WRITE); + } else { + error = (int)buf_bread(vp, bn, bsize, NOCRED, &bp); + } /* Translate downstream error for upstream, if needed */ - if (!error) + if (!error) { error = (int)buf_error(bp); + } if (error) { buf_brelse(bp); - return (error); + return error; } n = min(n, bsize - buf_resid(bp)); error = uiomove((char *)buf_dataptr(bp) + on, n, uio); if (error) { buf_brelse(bp); - return (error); + return error; } buf_markaged(bp); - if (io_sync) - error = buf_bwrite(bp); - else { - if ((n + on) == bsize) - error = buf_bawrite(bp); - else - error = buf_bdwrite(bp); + if (io_sync) { + error = buf_bwrite(bp); + } else { + if ((n + on) == bsize) { + error = buf_bawrite(bp); + } else { + error = buf_bdwrite(bp); + } } } while (error == 0 && uio_resid(uio) > 0 && n != 0); - return (error); + return error; default: panic("spec_write type"); } /* NOTREACHED */ - return (0); + return 0; } /* @@ -682,22 +775,22 @@ spec_ioctl(struct vnop_ioctl_args *ap) { proc_t p = vfs_context_proc(ap->a_context); dev_t dev = ap->a_vp->v_rdev; - int retval = 0; + int retval = 0; KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_START, - dev, ap->a_command, ap->a_fflag, ap->a_vp->v_type, 0); + dev, ap->a_command, ap->a_fflag, ap->a_vp->v_type, 0); switch (ap->a_vp->v_type) { - case VCHR: retval = (*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, - ap->a_fflag, p); + ap->a_fflag, p); break; case VBLK: retval = (*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, ap->a_fflag, p); - if (!retval && ap->a_command == DKIOCSETBLOCKSIZE) + if (!retval && ap->a_command == DKIOCSETBLOCKSIZE) { ap->a_vp->v_specsize = *(uint32_t *)ap->a_data; + } break; default: @@ -705,9 +798,9 @@ spec_ioctl(struct vnop_ioctl_args *ap) /* NOTREACHED */ } KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_END, - dev, ap->a_command, ap->a_fflag, retval, 0); + dev, ap->a_command, ap->a_fflag, retval, 0); - return (retval); + return retval; } int @@ -717,9 +810,8 @@ spec_select(struct vnop_select_args *ap) dev_t dev; switch (ap->a_vp->v_type) { - default: - return (1); /* XXX */ + return 1; /* XXX */ case VCHR: dev = ap->a_vp->v_rdev; @@ -727,10 +819,10 @@ spec_select(struct vnop_select_args *ap) } } -static int filt_specattach(struct knote *kn, struct kevent_internal_s *kev); +static int filt_specattach(struct knote *kn, struct kevent_qos_s *kev); int -spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_internal_s *kev) +spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_qos_s *kev) { dev_t dev; @@ -746,7 +838,7 @@ spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_internal_s *kev) * other attaches. */ int32_t tmp_flags = kn->kn_flags; - int64_t tmp_data = kn->kn_data; + int64_t tmp_sdata = kn->kn_sdata; int res; res = bpfkqfilter(dev, kn); @@ -754,7 +846,7 @@ spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_internal_s *kev) return res; } kn->kn_flags = tmp_flags; - kn->kn_data = tmp_data; + kn->kn_sdata = tmp_sdata; #endif if (major(dev) > nchrdev) { @@ -768,9 +860,10 @@ spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_internal_s *kev) if (cdevsw_flags[major(dev)] & CDEVSW_IS_PTS) { kn->kn_filtid = EVFILTID_PTSD; return ptsd_kqfilter(dev, kn); - } else if (cdevsw[major(dev)].d_type == D_TTY && - !(cdevsw_flags[major(dev)] & CDEVSW_IS_PTC) && - kn->kn_vnode_kqok) { + } else if (cdevsw_flags[major(dev)] & CDEVSW_IS_PTC) { + kn->kn_filtid = EVFILTID_PTMX; + return ptmx_kqfilter(dev, kn); + } else if (cdevsw[major(dev)].d_type == D_TTY && kn->kn_vnode_kqok) { /* * TTYs from drivers that use struct ttys use their own filter * routines. The PTC driver doesn't use the tty for character @@ -790,14 +883,15 @@ spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_internal_s *kev) int spec_fsync_internal(vnode_t vp, int waitfor, __unused vfs_context_t context) { - if (vp->v_type == VCHR) - return (0); + if (vp->v_type == VCHR) { + return 0; + } /* * Flush all dirty buffers associated with a block device. */ buf_flushdirtyblks(vp, (waitfor == MNT_WAIT || waitfor == MNT_DWAIT), 0, "spec_fsync"); - return (0); + return 0; } int @@ -813,14 +907,14 @@ spec_fsync(struct vnop_fsync_args *ap) void throttle_init(void); -#if 0 -#define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...) \ - do { \ - if ((debug_info)->alloc) \ - printf("%s: "format, __FUNCTION__, ## args); \ +#if 0 +#define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...) \ + do { \ + if ((debug_info)->alloc) \ + printf("%s: "format, __FUNCTION__, ## args); \ } while(0) -#else +#else #define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...) #endif @@ -856,8 +950,9 @@ num_trailing_0(uint64_t n) * since in most cases the number of trailing 0s is very small, * we simply counting sequentially from the lowest bit */ - if (n == 0) + if (n == 0) { return sizeof(n) * 8; + } int count = 0; while (!ISSET(n, 1)) { n >>= 1; @@ -878,22 +973,23 @@ throttle_info_rel(struct _throttle_io_info_t *info) { SInt32 oldValue = OSDecrementAtomic(&info->throttle_refcnt); - DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", - info, (int)(oldValue -1), info ); + DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", + info, (int)(oldValue - 1), info ); /* The reference count just went negative, very bad */ - if (oldValue == 0) + if (oldValue == 0) { panic("throttle info ref cnt went negative!"); + } - /* - * Once reference count is zero, no one else should be able to take a - * reference + /* + * Once reference count is zero, no one else should be able to take a + * reference */ if ((info->throttle_refcnt == 0) && (info->throttle_alloc)) { DEBUG_ALLOC_THROTTLE_INFO("Freeing info = %p\n", info); - + lck_mtx_destroy(&info->throttle_lock, throttle_lock_grp); - FREE(info, M_TEMP); + FREE(info, M_TEMP); } return oldValue; } @@ -909,11 +1005,12 @@ throttle_info_ref(struct _throttle_io_info_t *info) { SInt32 oldValue = OSIncrementAtomic(&info->throttle_refcnt); - DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", - info, (int)(oldValue -1), info ); + DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", + info, (int)(oldValue - 1), info ); /* Allocated items should never have a reference of zero */ - if (info->throttle_alloc && (oldValue == 0)) + if (info->throttle_alloc && (oldValue == 0)) { panic("Taking a reference without calling create throttle info!\n"); + } return oldValue; } @@ -930,16 +1027,16 @@ throttle_info_ref(struct _throttle_io_info_t *info) */ static uint32_t throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count, int wakelevel) -{ +{ struct timeval elapsed; struct timeval now; struct timeval period; - uint64_t elapsed_msecs; - int throttle_level; - int level; - int msecs; - boolean_t throttled = FALSE; - boolean_t need_timer = FALSE; + uint64_t elapsed_msecs; + int throttle_level; + int level; + int msecs; + boolean_t throttled = FALSE; + boolean_t need_timer = FALSE; microuptime(&now); @@ -947,8 +1044,9 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count info->throttle_io_count_begin = info->throttle_io_count; info->throttle_io_period_num++; - while (wakelevel >= THROTTLE_LEVEL_THROTTLED) + while (wakelevel >= THROTTLE_LEVEL_THROTTLED) { info->throttle_start_IO_period_timestamp[wakelevel--] = now; + } info->throttle_min_timer_deadline = now; @@ -959,15 +1057,12 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count timevaladd(&info->throttle_min_timer_deadline, &period); } for (throttle_level = THROTTLE_LEVEL_START; throttle_level < THROTTLE_LEVEL_END; throttle_level++) { - elapsed = now; timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]); elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); for (level = throttle_level + 1; level <= THROTTLE_LEVEL_END; level++) { - if (!TAILQ_EMPTY(&info->throttle_uthlist[level])) { - if (elapsed_msecs < (uint64_t)throttle_windows_msecs[level] || info->throttle_inflight_count[throttle_level]) { /* * we had an I/O occur at a higher priority tier within @@ -985,23 +1080,24 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count break; } } - if (throttled == TRUE) + if (throttled == TRUE) { break; + } } if (throttled == TRUE) { - uint64_t deadline = 0; + uint64_t deadline = 0; struct timeval target; struct timeval min_target; - /* + /* * we've got at least one tier still in a throttled window * so we need a timer running... compute the next deadline * and schedule it */ - for (level = throttle_level+1; level <= THROTTLE_LEVEL_END; level++) { - - if (TAILQ_EMPTY(&info->throttle_uthlist[level])) + for (level = throttle_level + 1; level <= THROTTLE_LEVEL_END; level++) { + if (TAILQ_EMPTY(&info->throttle_uthlist[level])) { continue; + } target = info->throttle_start_IO_period_timestamp[level]; @@ -1010,15 +1106,16 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count period.tv_usec = (msecs % 1000) * 1000; timevaladd(&target, &period); - + if (need_timer == FALSE || timevalcmp(&target, &min_target, <)) { min_target = target; need_timer = TRUE; } } if (timevalcmp(&info->throttle_min_timer_deadline, &now, >)) { - if (timevalcmp(&info->throttle_min_timer_deadline, &min_target, >)) - min_target = info->throttle_min_timer_deadline; + if (timevalcmp(&info->throttle_min_timer_deadline, &min_target, >)) { + min_target = info->throttle_min_timer_deadline; + } } if (info->throttle_timer_active) { @@ -1030,8 +1127,9 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count * proceed and eventually re-run this function */ need_timer = FALSE; - } else + } else { info->throttle_timer_active = 0; + } } if (need_timer == TRUE) { /* @@ -1041,7 +1139,7 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count * 32-bit which allows us to use the clock_interval_to_deadline() * routine. */ - int target_msecs; + int target_msecs; if (info->throttle_timer_ref == 0) { /* @@ -1069,7 +1167,7 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count info->throttle_timer_active = 1; } } - return (throttle_level); + return throttle_level; } @@ -1077,19 +1175,19 @@ static void throttle_timer(struct _throttle_io_info_t *info) { uthread_t ut, utlist; - struct timeval elapsed; - struct timeval now; - uint64_t elapsed_msecs; - int throttle_level; - int level; - int wake_level; - caddr_t wake_address = NULL; - boolean_t update_io_count = FALSE; - boolean_t need_wakeup = FALSE; - boolean_t need_release = FALSE; + struct timeval elapsed; + struct timeval now; + uint64_t elapsed_msecs; + int throttle_level; + int level; + int wake_level; + caddr_t wake_address = NULL; + boolean_t update_io_count = FALSE; + boolean_t need_wakeup = FALSE; + boolean_t need_release = FALSE; ut = NULL; - lck_mtx_lock(&info->throttle_lock); + lck_mtx_lock(&info->throttle_lock); info->throttle_timer_active = 0; microuptime(&now); @@ -1099,11 +1197,9 @@ throttle_timer(struct _throttle_io_info_t *info) elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); if (elapsed_msecs >= (uint64_t)info->throttle_io_periods[THROTTLE_LEVEL_THROTTLED]) { - wake_level = info->throttle_next_wake_level; for (level = THROTTLE_LEVEL_START; level < THROTTLE_LEVEL_END; level++) { - elapsed = now; timevalsub(&elapsed, &info->throttle_start_IO_period_timestamp[wake_level]); elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); @@ -1119,42 +1215,43 @@ throttle_timer(struct _throttle_io_info_t *info) info->throttle_next_wake_level = wake_level - 1; - if (info->throttle_next_wake_level == THROTTLE_LEVEL_START) + if (info->throttle_next_wake_level == THROTTLE_LEVEL_START) { info->throttle_next_wake_level = THROTTLE_LEVEL_END; + } break; } wake_level--; - if (wake_level == THROTTLE_LEVEL_START) + if (wake_level == THROTTLE_LEVEL_START) { wake_level = THROTTLE_LEVEL_END; + } } } if (need_wakeup == TRUE) { if (!TAILQ_EMPTY(&info->throttle_uthlist[wake_level])) { - ut = (uthread_t)TAILQ_FIRST(&info->throttle_uthlist[wake_level]); TAILQ_REMOVE(&info->throttle_uthlist[wake_level], ut, uu_throttlelist); ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE; - ut->uu_is_throttled = FALSE; + ut->uu_is_throttled = false; wake_address = (caddr_t)&ut->uu_on_throttlelist; } - } else + } else { wake_level = THROTTLE_LEVEL_START; + } - throttle_level = throttle_timer_start(info, update_io_count, wake_level); + throttle_level = throttle_timer_start(info, update_io_count, wake_level); - if (wake_address != NULL) + if (wake_address != NULL) { wakeup(wake_address); + } for (level = THROTTLE_LEVEL_THROTTLED; level <= throttle_level; level++) { - TAILQ_FOREACH_SAFE(ut, &info->throttle_uthlist[level], uu_throttlelist, utlist) { - TAILQ_REMOVE(&info->throttle_uthlist[level], ut, uu_throttlelist); ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE; - ut->uu_is_throttled = FALSE; + ut->uu_is_throttled = false; wakeup(&ut->uu_on_throttlelist); } @@ -1163,10 +1260,11 @@ throttle_timer(struct _throttle_io_info_t *info) info->throttle_timer_ref = 0; need_release = TRUE; } - lck_mtx_unlock(&info->throttle_lock); + lck_mtx_unlock(&info->throttle_lock); - if (need_release == TRUE) + if (need_release == TRUE) { throttle_info_rel(info); + } } @@ -1181,10 +1279,11 @@ throttle_add_to_list(struct _throttle_io_info_t *info, uthread_t ut, int mylevel start_timer = TRUE; } - if (insert_tail == TRUE) + if (insert_tail == TRUE) { TAILQ_INSERT_TAIL(&info->throttle_uthlist[mylevel], ut, uu_throttlelist); - else + } else { TAILQ_INSERT_HEAD(&info->throttle_uthlist[mylevel], ut, uu_throttlelist); + } ut->uu_on_throttlelist = mylevel; @@ -1200,7 +1299,7 @@ throttle_add_to_list(struct _throttle_io_info_t *info, uthread_t ut, int mylevel } } } - return (level); + return level; } static void @@ -1217,24 +1316,30 @@ throttle_init_throttle_window(void) */ /* Override global values with device-tree properties */ - if (PE_get_default("kern.io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_get_default("kern.io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER1] = throttle_window_size; + } - if (PE_get_default("kern.io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_get_default("kern.io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER2] = throttle_window_size; + } - if (PE_get_default("kern.io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_get_default("kern.io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER3] = throttle_window_size; - + } + /* Override with boot-args */ - if (PE_parse_boot_argn("io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_parse_boot_argn("io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER1] = throttle_window_size; + } - if (PE_parse_boot_argn("io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_parse_boot_argn("io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER2] = throttle_window_size; - - if (PE_parse_boot_argn("io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) + } + + if (PE_parse_boot_argn("io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER3] = throttle_window_size; + } } static void @@ -1251,65 +1356,71 @@ throttle_init_throttle_period(struct _throttle_io_info_t *info, boolean_t isssd) */ /* Assign global defaults */ - if ((isssd == TRUE) && (info->throttle_is_fusion_with_priority == 0)) + if ((isssd == TRUE) && (info->throttle_is_fusion_with_priority == 0)) { info->throttle_io_periods = &throttle_io_period_ssd_msecs[0]; - else + } else { info->throttle_io_periods = &throttle_io_period_msecs[0]; + } /* Override global values with device-tree properties */ - if (PE_get_default("kern.io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) + if (PE_get_default("kern.io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER1] = throttle_period_size; - - if (PE_get_default("kern.io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) + } + + if (PE_get_default("kern.io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER2] = throttle_period_size; + } - if (PE_get_default("kern.io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) + if (PE_get_default("kern.io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER3] = throttle_period_size; - + } + /* Override with boot-args */ - if (PE_parse_boot_argn("io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) + if (PE_parse_boot_argn("io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER1] = throttle_period_size; - - if (PE_parse_boot_argn("io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) + } + + if (PE_parse_boot_argn("io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER2] = throttle_period_size; + } - if (PE_parse_boot_argn("io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) + if (PE_parse_boot_argn("io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER3] = throttle_period_size; - + } } #if CONFIG_IOSCHED -extern void vm_io_reprioritize_init(void); -int iosched_enabled = 1; +extern void vm_io_reprioritize_init(void); +int iosched_enabled = 1; #endif void throttle_init(void) { - struct _throttle_io_info_t *info; - int i; - int level; + struct _throttle_io_info_t *info; + int i; + int level; #if CONFIG_IOSCHED - int iosched; + int iosched; #endif - /* - * allocate lock group attribute and group - */ - throttle_lock_grp_attr = lck_grp_attr_alloc_init(); - throttle_lock_grp = lck_grp_alloc_init("throttle I/O", throttle_lock_grp_attr); + /* + * allocate lock group attribute and group + */ + throttle_lock_grp_attr = lck_grp_attr_alloc_init(); + throttle_lock_grp = lck_grp_alloc_init("throttle I/O", throttle_lock_grp_attr); /* Update throttle parameters based on device tree configuration */ throttle_init_throttle_window(); - /* - * allocate the lock attribute - */ - throttle_lock_attr = lck_attr_alloc_init(); + /* + * allocate the lock attribute + */ + throttle_lock_attr = lck_attr_alloc_init(); for (i = 0; i < LOWPRI_MAX_NUM_DEV; i++) { - info = &_throttle_io_info[i]; - - lck_mtx_init(&info->throttle_lock, throttle_lock_grp, throttle_lock_attr); + info = &_throttle_io_info[i]; + + lck_mtx_init(&info->throttle_lock, throttle_lock_grp, throttle_lock_attr); info->throttle_timer_call = thread_call_allocate((thread_call_func_t)throttle_timer, (thread_call_param_t)info); for (level = 0; level <= THROTTLE_LEVEL_END; level++) { @@ -1333,13 +1444,13 @@ throttle_init(void) } void -sys_override_io_throttle(int flag) +sys_override_io_throttle(boolean_t enable_override) { - if (flag == THROTTLE_IO_ENABLE) - lowpri_throttle_enabled = 1; - - if (flag == THROTTLE_IO_DISABLE) + if (enable_override) { lowpri_throttle_enabled = 0; + } else { + lowpri_throttle_enabled = 1; + } } int rethrottle_wakeups = 0; @@ -1374,25 +1485,26 @@ rethrottle_thread(uthread_t ut) * If uthread doesn't have throttle state, then there's no chance * of it needing a rethrottle. */ - if (ut->uu_throttle_info == NULL) + if (ut->uu_throttle_info == NULL) { return; + } boolean_t s = ml_set_interrupts_enabled(FALSE); lck_spin_lock(&ut->uu_rethrottle_lock); - if (ut->uu_is_throttled == FALSE) - ut->uu_was_rethrottled = TRUE; - else { + if (!ut->uu_is_throttled) { + ut->uu_was_rethrottled = true; + } else { int my_new_level = throttle_get_thread_throttle_level(ut); if (my_new_level != ut->uu_on_throttlelist) { /* * ut is currently blocked (as indicated by - * ut->uu_is_throttled == TRUE) + * ut->uu_is_throttled == true) * and we're changing it's throttle level, so * we need to wake it up. */ - ut->uu_is_throttled = FALSE; + ut->uu_is_throttled = false; wakeup(&ut->uu_on_throttlelist); rethrottle_wakeups++; @@ -1414,13 +1526,14 @@ rethrottle_thread(uthread_t ut) void * throttle_info_create(void) { - struct _throttle_io_info_t *info; - int level; + struct _throttle_io_info_t *info; + int level; MALLOC(info, struct _throttle_io_info_t *, sizeof(*info), M_TEMP, M_ZERO | M_WAITOK); /* Should never happen but just in case */ - if (info == NULL) + if (info == NULL) { return NULL; + } /* Mark that this one was allocated and needs to be freed */ DEBUG_ALLOC_THROTTLE_INFO("Creating info = %p\n", info, info ); info->throttle_alloc = TRUE; @@ -1441,17 +1554,18 @@ throttle_info_create(void) /* * KPI routine * - * Release the throttle info pointer if all the reference are gone. Should be - * called to release reference taken by throttle_info_create - */ + * Release the throttle info pointer if all the reference are gone. Should be + * called to release reference taken by throttle_info_create + */ void throttle_info_release(void *throttle_info) { DEBUG_ALLOC_THROTTLE_INFO("Releaseing info = %p\n", - (struct _throttle_io_info_t *)throttle_info, - (struct _throttle_io_info_t *)throttle_info); - if (throttle_info) /* Just to be careful */ + (struct _throttle_io_info_t *)throttle_info, + (struct _throttle_io_info_t *)throttle_info); + if (throttle_info) { /* Just to be careful */ throttle_info_rel(throttle_info); + } } /* @@ -1460,20 +1574,22 @@ throttle_info_release(void *throttle_info) * File Systems that create an info structure, need to call this routine in * their mount routine (used by cluster code). File Systems that call this in * their mount routines must call throttle_info_mount_rel in their unmount - * routines. + * routines. */ -void +void throttle_info_mount_ref(mount_t mp, void *throttle_info) { - if ((throttle_info == NULL) || (mp == NULL)) + if ((throttle_info == NULL) || (mp == NULL)) { return; + } throttle_info_ref(throttle_info); /* * We already have a reference release it before adding the new one */ - if (mp->mnt_throttle_info) + if (mp->mnt_throttle_info) { throttle_info_rel(mp->mnt_throttle_info); + } mp->mnt_throttle_info = throttle_info; } @@ -1486,12 +1602,13 @@ throttle_info_mount_ref(mount_t mp, void *throttle_info) int throttle_info_ref_by_mask(uint64_t throttle_mask, throttle_info_handle_t *throttle_info_handle) { - int dev_index; + int dev_index; struct _throttle_io_info_t *info; - if (throttle_info_handle == NULL) + if (throttle_info_handle == NULL) { return EINVAL; - + } + dev_index = num_trailing_0(throttle_mask); info = &_throttle_io_info[dev_index]; throttle_info_ref(info); @@ -1519,12 +1636,13 @@ throttle_info_rel_by_mask(throttle_info_handle_t throttle_info_handle) * * File Systems that throttle_info_mount_ref, must call this routine in their * umount routine. - */ + */ void throttle_info_mount_rel(mount_t mp) { - if (mp->mnt_throttle_info) + if (mp->mnt_throttle_info) { throttle_info_rel(mp->mnt_throttle_info); + } mp->mnt_throttle_info = NULL; } @@ -1539,12 +1657,13 @@ throttle_info_mount_reset_period(mount_t mp, int isssd) { struct _throttle_io_info_t *info; - if (mp == NULL) + if (mp == NULL) { info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; - else if (mp->mnt_throttle_info == NULL) + } else if (mp->mnt_throttle_info == NULL) { info = &_throttle_io_info[mp->mnt_devbsdunit]; - else + } else { info = mp->mnt_throttle_info; + } throttle_init_throttle_period(info, isssd); } @@ -1552,14 +1671,15 @@ throttle_info_mount_reset_period(mount_t mp, int isssd) void throttle_info_get_last_io_time(mount_t mp, struct timeval *tv) { - struct _throttle_io_info_t *info; + struct _throttle_io_info_t *info; - if (mp == NULL) + if (mp == NULL) { info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; - else if (mp->mnt_throttle_info == NULL) + } else if (mp->mnt_throttle_info == NULL) { info = &_throttle_io_info[mp->mnt_devbsdunit]; - else + } else { info = mp->mnt_throttle_info; + } *tv = info->throttle_last_write_timestamp; } @@ -1567,36 +1687,40 @@ throttle_info_get_last_io_time(mount_t mp, struct timeval *tv) void update_last_io_time(mount_t mp) { - struct _throttle_io_info_t *info; - - if (mp == NULL) + struct _throttle_io_info_t *info; + + if (mp == NULL) { info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; - else if (mp->mnt_throttle_info == NULL) + } else if (mp->mnt_throttle_info == NULL) { info = &_throttle_io_info[mp->mnt_devbsdunit]; - else + } else { info = mp->mnt_throttle_info; + } microuptime(&info->throttle_last_write_timestamp); - if (mp != NULL) + if (mp != NULL) { mp->mnt_last_write_completed_timestamp = info->throttle_last_write_timestamp; + } } int throttle_get_io_policy(uthread_t *ut) { - if (ut != NULL) + if (ut != NULL) { *ut = get_bsdthread_info(current_thread()); + } - return (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO)); + return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO); } int throttle_get_passive_io_policy(uthread_t *ut) { - if (ut != NULL) + if (ut != NULL) { *ut = get_bsdthread_info(current_thread()); + } - return (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_PASSIVE_IO)); + return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_PASSIVE_IO); } @@ -1613,15 +1737,17 @@ throttle_get_thread_throttle_level(uthread_t ut) * Return a throttle level given an existing I/O tier (such as returned by throttle_get_io_policy) */ static int -throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) { +throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) +{ int thread_throttle_level = io_tier; int user_idle_level; assert(ut != NULL); /* Bootcache misses should always be throttled */ - if (ut->uu_throttle_bc == TRUE) + if (ut->uu_throttle_bc) { thread_throttle_level = THROTTLE_LEVEL_TIER3; + } /* * Issue tier3 I/O as tier2 when the user is idle @@ -1637,7 +1763,7 @@ throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) { } } - return (thread_throttle_level); + return thread_throttle_level; } /* @@ -1650,15 +1776,16 @@ throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) { static int throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int * throttling_level) { - struct _throttle_io_info_t *info = throttle_info; + struct _throttle_io_info_t *info = throttle_info; struct timeval elapsed; struct timeval now; uint64_t elapsed_msecs; - int thread_throttle_level; - int throttle_level; + int thread_throttle_level; + int throttle_level; - if ((thread_throttle_level = throttle_get_thread_throttle_level(NULL)) < THROTTLE_LEVEL_THROTTLED) - return (THROTTLE_DISENGAGED); + if ((thread_throttle_level = throttle_get_thread_throttle_level(NULL)) < THROTTLE_LEVEL_THROTTLED) { + return THROTTLE_DISENGAGED; + } microuptime(&now); @@ -1670,8 +1797,9 @@ throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]); elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); - if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) + if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) { break; + } } if (throttle_level >= thread_throttle_level) { /* @@ -1679,28 +1807,30 @@ throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int * that affect the throttle level of this thread, * so go ahead and treat as normal I/O */ - return (THROTTLE_DISENGAGED); + return THROTTLE_DISENGAGED; } - if (mylevel) + if (mylevel) { *mylevel = thread_throttle_level; - if (throttling_level) + } + if (throttling_level) { *throttling_level = throttle_level; + } if (info->throttle_io_count != info->throttle_io_count_begin) { /* * we've already issued at least one throttleable I/O * in the current I/O window, so avoid issuing another one */ - return (THROTTLE_NOW); + return THROTTLE_NOW; } /* * we're in the throttle window, so * cut the I/O size back */ - return (THROTTLE_ENGAGED); + return THROTTLE_ENGAGED; } -/* +/* * If we have a mount point and it has a throttle info pointer then * use it to do the check, otherwise use the device unit number to find * the correct throttle info array element. @@ -1708,35 +1838,38 @@ throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int int throttle_io_will_be_throttled(__unused int lowpri_window_msecs, mount_t mp) { - struct _throttle_io_info_t *info; + struct _throttle_io_info_t *info; /* * Should we just return zero if no mount point */ - if (mp == NULL) - info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; - else if (mp->mnt_throttle_info == NULL) - info = &_throttle_io_info[mp->mnt_devbsdunit]; - else - info = mp->mnt_throttle_info; + if (mp == NULL) { + info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; + } else if (mp->mnt_throttle_info == NULL) { + info = &_throttle_io_info[mp->mnt_devbsdunit]; + } else { + info = mp->mnt_throttle_info; + } if (info->throttle_is_fusion_with_priority) { uthread_t ut = get_bsdthread_info(current_thread()); - if (ut->uu_lowpri_window == 0) - return (THROTTLE_DISENGAGED); + if (ut->uu_lowpri_window == 0) { + return THROTTLE_DISENGAGED; + } } - if (info->throttle_disabled) - return (THROTTLE_DISENGAGED); - else + if (info->throttle_disabled) { + return THROTTLE_DISENGAGED; + } else { return throttle_io_will_be_throttled_internal(info, NULL, NULL); + } } -/* +/* * Routine to increment I/O throttling counters maintained in the proc */ -static void +static void throttle_update_proc_stats(pid_t throttling_pid, int count) { proc_t throttling_proc; @@ -1744,7 +1877,7 @@ throttle_update_proc_stats(pid_t throttling_pid, int count) /* The throttled_proc is always the current proc; so we are not concerned with refs */ OSAddAtomic64(count, &(throttled_proc->was_throttled)); - + /* The throttling pid might have exited by now */ throttling_proc = proc_find(throttling_pid); if (throttling_proc != PROC_NULL) { @@ -1763,48 +1896,53 @@ throttle_lowpri_io(int sleep_amount) { uthread_t ut; struct _throttle_io_info_t *info; - int throttle_type = 0; - int mylevel = 0; - int throttling_level = THROTTLE_LEVEL_NONE; - int sleep_cnt = 0; + int throttle_type = 0; + int mylevel = 0; + int throttling_level = THROTTLE_LEVEL_NONE; + int sleep_cnt = 0; uint32_t throttle_io_period_num = 0; boolean_t insert_tail = TRUE; boolean_t s; ut = get_bsdthread_info(current_thread()); - if (ut->uu_lowpri_window == 0) - return (0); + if (ut->uu_lowpri_window == 0) { + return 0; + } info = ut->uu_throttle_info; if (info == NULL) { - ut->uu_throttle_bc = FALSE; + ut->uu_throttle_bc = false; ut->uu_lowpri_window = 0; - return (0); + return 0; } lck_mtx_lock(&info->throttle_lock); assert(ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED); - if (sleep_amount == 0) + if (sleep_amount == 0) { goto done; + } - if (sleep_amount == 1 && ut->uu_throttle_bc == FALSE) + if (sleep_amount == 1 && !ut->uu_throttle_bc) { sleep_amount = 0; + } throttle_io_period_num = info->throttle_io_period_num; - ut->uu_was_rethrottled = FALSE; - - while ( (throttle_type = throttle_io_will_be_throttled_internal(info, &mylevel, &throttling_level)) ) { + ut->uu_was_rethrottled = false; + while ((throttle_type = throttle_io_will_be_throttled_internal(info, &mylevel, &throttling_level))) { if (throttle_type == THROTTLE_ENGAGED) { - if (sleep_amount == 0) - break; - if (info->throttle_io_period_num < throttle_io_period_num) + if (sleep_amount == 0) { + break; + } + if (info->throttle_io_period_num < throttle_io_period_num) { break; - if ((info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) + } + if ((info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) { break; + } } /* * keep the same position in the list if "rethrottle_thread" changes our throttle level and @@ -1822,8 +1960,9 @@ throttle_lowpri_io(int sleep_amount) insert_tail = TRUE; } if (ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED) { - if (throttle_add_to_list(info, ut, mylevel, insert_tail) == THROTTLE_LEVEL_END) + if (throttle_add_to_list(info, ut, mylevel, insert_tail) == THROTTLE_LEVEL_END) { goto done; + } } assert(throttling_level >= THROTTLE_LEVEL_START && throttling_level <= THROTTLE_LEVEL_END); @@ -1834,30 +1973,29 @@ throttle_lowpri_io(int sleep_amount) * this is the critical section w/r to our interaction * with "rethrottle_thread" */ - if (ut->uu_was_rethrottled == TRUE) { - + if (ut->uu_was_rethrottled) { lck_spin_unlock(&ut->uu_rethrottle_lock); ml_set_interrupts_enabled(s); lck_mtx_yield(&info->throttle_lock); KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 103)), thread_tid(ut->uu_thread), ut->uu_on_throttlelist, 0, 0, 0); - ut->uu_was_rethrottled = FALSE; + ut->uu_was_rethrottled = false; continue; } KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, PROCESS_THROTTLED)) | DBG_FUNC_NONE, - info->throttle_last_IO_pid[throttling_level], throttling_level, proc_selfpid(), mylevel, 0); - + info->throttle_last_IO_pid[throttling_level], throttling_level, proc_selfpid(), mylevel, 0); + if (sleep_cnt == 0) { KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, - throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0); + throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0); throttled_count[mylevel]++; } ut->uu_wmesg = "throttle_lowpri_io"; assert_wait((caddr_t)&ut->uu_on_throttlelist, THREAD_UNINT); - ut->uu_is_throttled = TRUE; + ut->uu_is_throttled = true; lck_spin_unlock(&ut->uu_rethrottle_lock); ml_set_interrupts_enabled(s); @@ -1867,17 +2005,17 @@ throttle_lowpri_io(int sleep_amount) ut->uu_wmesg = NULL; - ut->uu_is_throttled = FALSE; - ut->uu_was_rethrottled = FALSE; + ut->uu_is_throttled = false; + ut->uu_was_rethrottled = false; lck_mtx_lock(&info->throttle_lock); sleep_cnt++; - - if (sleep_amount == 0) + + if (sleep_amount == 0) { insert_tail = FALSE; - else if (info->throttle_io_period_num < throttle_io_period_num || - (info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) { + } else if (info->throttle_io_period_num < throttle_io_period_num || + (info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) { insert_tail = FALSE; sleep_amount = 0; } @@ -1891,25 +2029,69 @@ done: if (sleep_cnt) { KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, - throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0); + throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0); /* * We update the stats for the last pid which opened a throttle window for the throttled thread. * This might not be completely accurate since the multiple throttles seen by the lower tier pid - * might have been caused by various higher prio pids. However, updating these stats accurately + * might have been caused by various higher prio pids. However, updating these stats accurately * means doing a proc_find while holding the throttle lock which leads to deadlock. */ throttle_update_proc_stats(info->throttle_last_IO_pid[throttling_level], sleep_cnt); } ut->uu_throttle_info = NULL; - ut->uu_throttle_bc = FALSE; + ut->uu_throttle_bc = false; ut->uu_lowpri_window = 0; throttle_info_rel(info); - return (sleep_cnt); + return sleep_cnt; } +/* + * returns TRUE if the throttle_lowpri_io called with the same sleep_amount would've slept + * This function mimics the most of the throttle_lowpri_io checks but without actual sleeping + */ +int +throttle_lowpri_io_will_be_throttled(int sleep_amount) +{ + if (sleep_amount == 0) { + return FALSE; + } + + uthread_t ut = get_bsdthread_info(current_thread()); + if (ut->uu_lowpri_window == 0) { + return FALSE; + } + + struct _throttle_io_info_t *info = ut->uu_throttle_info; + if (info == NULL) { + return FALSE; + } + + lck_mtx_lock(&info->throttle_lock); + assert(ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED); + + if (sleep_amount == 1 && !ut->uu_throttle_bc) { + sleep_amount = 0; + } + + int result = FALSE; + + int throttle_type = throttle_io_will_be_throttled_internal(info, NULL, NULL); + if (throttle_type > THROTTLE_DISENGAGED) { + result = TRUE; + if ((throttle_type == THROTTLE_ENGAGED) && (sleep_amount == 0)) { + result = FALSE; + } + } + + lck_mtx_unlock(&info->throttle_lock); + + return result; +} + + /* * KPI routine * @@ -1918,38 +2100,48 @@ done: * * explanations about these policies are in the man page of setiopolicy_np */ -void throttle_set_thread_io_policy(int policy) +void +throttle_set_thread_io_policy(int policy) { proc_set_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_IOPOL, policy); } -void throttle_info_reset_window(uthread_t ut) +int +throttle_get_thread_effective_io_policy() +{ + return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO); +} + +void +throttle_info_reset_window(uthread_t ut) { struct _throttle_io_info_t *info; - if (ut == NULL) + if (ut == NULL) { ut = get_bsdthread_info(current_thread()); + } - if ( (info = ut->uu_throttle_info) ) { + if ((info = ut->uu_throttle_info)) { throttle_info_rel(info); ut->uu_throttle_info = NULL; ut->uu_lowpri_window = 0; - ut->uu_throttle_bc = FALSE; + ut->uu_throttle_bc = false; } } static -void throttle_info_set_initial_window(uthread_t ut, struct _throttle_io_info_t *info, boolean_t BC_throttle, boolean_t isssd) +void +throttle_info_set_initial_window(uthread_t ut, struct _throttle_io_info_t *info, boolean_t BC_throttle, boolean_t isssd) { - if (lowpri_throttle_enabled == 0 || info->throttle_disabled) + if (lowpri_throttle_enabled == 0 || info->throttle_disabled) { return; + } if (info->throttle_io_periods == 0) { throttle_init_throttle_period(info, isssd); } if (ut->uu_throttle_info == NULL) { - ut->uu_throttle_info = info; throttle_info_ref(info); DEBUG_ALLOC_THROTTLE_INFO("updating info = %p\n", info, info ); @@ -1965,7 +2157,9 @@ void throttle_info_set_initial_window(uthread_t ut, struct _throttle_io_info_t * * * Only affects IO that was sent through spec_strategy */ -void throttle_info_end_io(buf_t bp) { +void +throttle_info_end_io(buf_t bp) +{ mount_t mp; struct bufattr *bap; struct _throttle_io_info_t *info; @@ -1996,7 +2190,9 @@ void throttle_info_end_io(buf_t bp) { * Decrement inflight count initially incremented by throttle_info_update_internal */ static -void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level) { +void +throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level) +{ if (throttle_level == THROTTLE_LEVEL_NONE) { return; } @@ -2011,15 +2207,18 @@ void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttl * throttle_info_end_io_internal to avoid leaking in-flight I/O. */ static -int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap) +int +throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap) { - int thread_throttle_level; + int thread_throttle_level; - if (lowpri_throttle_enabled == 0 || info->throttle_disabled) + if (lowpri_throttle_enabled == 0 || info->throttle_disabled) { return THROTTLE_LEVEL_NONE; + } - if (ut == NULL) + if (ut == NULL) { ut = get_bsdthread_info(current_thread()); + } if (bap && inflight && !ut->uu_throttle_bc) { thread_throttle_level = GET_BUFATTR_IO_TIER(bap); @@ -2031,7 +2230,7 @@ int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut } if (thread_throttle_level != THROTTLE_LEVEL_NONE) { - if(!ISSET(flags, B_PASSIVE)) { + if (!ISSET(flags, B_PASSIVE)) { info->throttle_last_IO_pid[thread_throttle_level] = proc_selfpid(); if (inflight && !ut->uu_throttle_bc) { if (NULL != bap) { @@ -2042,7 +2241,7 @@ int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut microuptime(&info->throttle_window_start_timestamp[thread_throttle_level]); } KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, OPEN_THROTTLE_WINDOW)) | DBG_FUNC_NONE, - current_proc()->p_pid, thread_throttle_level, 0, 0, 0); + current_proc()->p_pid, thread_throttle_level, 0, 0, 0); } microuptime(&info->throttle_last_IO_timestamp[thread_throttle_level]); } @@ -2059,7 +2258,7 @@ int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut * do the delay just before we return from the system * call that triggered this I/O or from vnode_pagein */ - OSAddAtomic(1, &info->throttle_io_count); + OSAddAtomic(1, &info->throttle_io_count); throttle_info_set_initial_window(ut, info, FALSE, isssd); } @@ -2067,7 +2266,8 @@ int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut return thread_throttle_level; } -void *throttle_info_update_by_mount(mount_t mp) +void * +throttle_info_update_by_mount(mount_t mp) { struct _throttle_io_info_t *info; uthread_t ut; @@ -2076,14 +2276,17 @@ void *throttle_info_update_by_mount(mount_t mp) ut = get_bsdthread_info(current_thread()); if (mp != NULL) { - if (disk_conditioner_mount_is_ssd(mp)) + if (disk_conditioner_mount_is_ssd(mp)) { isssd = TRUE; + } info = &_throttle_io_info[mp->mnt_devbsdunit]; - } else + } else { info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; + } - if (!ut->uu_lowpri_window) + if (!ut->uu_lowpri_window) { throttle_info_set_initial_window(ut, info, FALSE, isssd); + } return info; } @@ -2095,10 +2298,12 @@ void *throttle_info_update_by_mount(mount_t mp) * this is usually called before every I/O, used for throttled I/O * book keeping. This routine has low overhead and does not sleep */ -void throttle_info_update(void *throttle_info, int flags) +void +throttle_info_update(void *throttle_info, int flags) { - if (throttle_info) + if (throttle_info) { throttle_info_update_internal(throttle_info, NULL, flags, FALSE, FALSE, NULL); + } } /* @@ -2107,7 +2312,8 @@ void throttle_info_update(void *throttle_info, int flags) * this is usually called before every I/O, used for throttled I/O * book keeping. This routine has low overhead and does not sleep */ -void throttle_info_update_by_mask(void *throttle_info_handle, int flags) +void +throttle_info_update_by_mask(void *throttle_info_handle, int flags) { void *throttle_info = throttle_info_handle; @@ -2121,17 +2327,19 @@ void throttle_info_update_by_mask(void *throttle_info_handle, int flags) } /* * KPI routine - * - * This routine marks the throttle info as disabled. Used for mount points which + * + * This routine marks the throttle info as disabled. Used for mount points which * support I/O scheduling. */ -void throttle_info_disable_throttle(int devno, boolean_t isfusion) +void +throttle_info_disable_throttle(int devno, boolean_t isfusion) { struct _throttle_io_info_t *info; - if (devno < 0 || devno >= LOWPRI_MAX_NUM_DEV) + if (devno < 0 || devno >= LOWPRI_MAX_NUM_DEV) { panic("Illegal devno (%d) passed into throttle_info_disable_throttle()", devno); + } info = &_throttle_io_info[devno]; // don't disable software throttling on devices that are part of a fusion device @@ -2142,34 +2350,34 @@ void throttle_info_disable_throttle(int devno, boolean_t isfusion) } info->throttle_disabled = !info->throttle_is_fusion_with_priority; return; -} +} /* * KPI routine (private) * Called to determine if this IO is being throttled to this level so that it can be treated specially */ -int throttle_info_io_will_be_throttled(void * throttle_info, int policy) +int +throttle_info_io_will_be_throttled(void * throttle_info, int policy) { - struct _throttle_io_info_t *info = throttle_info; + struct _throttle_io_info_t *info = throttle_info; struct timeval elapsed; uint64_t elapsed_msecs; - int throttle_level; - int thread_throttle_level; - - switch (policy) { - - case IOPOL_THROTTLE: - thread_throttle_level = THROTTLE_LEVEL_TIER3; - break; - case IOPOL_UTILITY: - thread_throttle_level = THROTTLE_LEVEL_TIER2; - break; - case IOPOL_STANDARD: - thread_throttle_level = THROTTLE_LEVEL_TIER1; - break; - default: - thread_throttle_level = THROTTLE_LEVEL_TIER0; + int throttle_level; + int thread_throttle_level; + + switch (policy) { + case IOPOL_THROTTLE: + thread_throttle_level = THROTTLE_LEVEL_TIER3; + break; + case IOPOL_UTILITY: + thread_throttle_level = THROTTLE_LEVEL_TIER2; + break; + case IOPOL_STANDARD: + thread_throttle_level = THROTTLE_LEVEL_TIER1; + break; + default: + thread_throttle_level = THROTTLE_LEVEL_TIER0; break; } for (throttle_level = THROTTLE_LEVEL_START; throttle_level < thread_throttle_level; throttle_level++) { @@ -2181,23 +2389,25 @@ int throttle_info_io_will_be_throttled(void * throttle_info, int policy) timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]); elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); - if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) + if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) { break; + } } if (throttle_level >= thread_throttle_level) { /* * we're beyond all of the throttle windows * so go ahead and treat as normal I/O */ - return (THROTTLE_DISENGAGED); + return THROTTLE_DISENGAGED; } /* * we're in the throttle window */ - return (THROTTLE_ENGAGED); + return THROTTLE_ENGAGED; } -int throttle_lowpri_window(void) +int +throttle_lowpri_window(void) { struct uthread *ut = get_bsdthread_info(current_thread()); return ut->uu_lowpri_window; @@ -2211,15 +2421,15 @@ int upl_get_cached_tier(void *); int spec_strategy(struct vnop_strategy_args *ap) { - buf_t bp; - int bflags; - int io_tier; - int passive; - dev_t bdev; + buf_t bp; + int bflags; + int io_tier; + int passive; + dev_t bdev; uthread_t ut; mount_t mp; - struct bufattr *bap; - int strategy_ret; + struct bufattr *bap; + int strategy_ret; struct _throttle_io_info_t *throttle_info; boolean_t isssd = FALSE; boolean_t inflight = FALSE; @@ -2230,28 +2440,30 @@ spec_strategy(struct vnop_strategy_args *ap) proc_t curproc = current_proc(); #endif /* !CONFIG_EMBEDDED */ - bp = ap->a_bp; + bp = ap->a_bp; bdev = buf_device(bp); mp = buf_vnode(bp)->v_mount; bap = &bp->b_attr; #if CONFIG_IOSCHED - if (bp->b_flags & B_CLUSTER) { - - io_tier = upl_get_cached_tier(bp->b_upl); + if (bp->b_flags & B_CLUSTER) { + io_tier = upl_get_cached_tier(bp->b_upl); - if (io_tier == -1) - io_tier = throttle_get_io_policy(&ut); + if (io_tier == -1) { + io_tier = throttle_get_io_policy(&ut); + } #if DEVELOPMENT || DEBUG - else { - int my_io_tier = throttle_get_io_policy(&ut); + else { + int my_io_tier = throttle_get_io_policy(&ut); - if (io_tier != my_io_tier) - KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, IO_TIER_UPL_MISMATCH)) | DBG_FUNC_NONE, buf_kernel_addrperm_addr(bp), my_io_tier, io_tier, 0, 0); - } + if (io_tier != my_io_tier) { + KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, IO_TIER_UPL_MISMATCH)) | DBG_FUNC_NONE, buf_kernel_addrperm_addr(bp), my_io_tier, io_tier, 0, 0); + } + } #endif - } else - io_tier = throttle_get_io_policy(&ut); + } else { + io_tier = throttle_get_io_policy(&ut); + } #else io_tier = throttle_get_io_policy(&ut); #endif @@ -2272,18 +2484,19 @@ spec_strategy(struct vnop_strategy_args *ap) #endif /* CONFIG_IOSCHED */ } - if (bp->b_flags & B_META) + if (bp->b_flags & B_META) { bap->ba_flags |= BA_META; + } #if CONFIG_IOSCHED - /* + /* * For I/O Scheduling, we currently do not have a way to track and expedite metadata I/Os. * To ensure we dont get into priority inversions due to metadata I/Os, we use the following rules: * For metadata reads, ceil all I/Os to IOSCHED_METADATA_TIER & mark them passive if the I/O tier was upgraded * For metadata writes, unconditionally mark them as IOSCHED_METADATA_TIER and passive */ if (bap->ba_flags & BA_META) { - if (mp && (mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)) { + if ((mp && (mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)) || (bap->ba_flags & BA_IO_SCHEDULED)) { if (bp->b_flags & B_READ) { if (io_tier > IOSCHED_METADATA_TIER) { io_tier = IOSCHED_METADATA_TIER; @@ -2296,7 +2509,7 @@ spec_strategy(struct vnop_strategy_args *ap) } } #endif /* CONFIG_IOSCHED */ - + SET_BUFATTR_IO_TIER(bap, io_tier); if (passive) { @@ -2305,35 +2518,43 @@ spec_strategy(struct vnop_strategy_args *ap) } #if !CONFIG_EMBEDDED - if ((curproc != NULL) && ((curproc->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP)) + if ((curproc != NULL) && ((curproc->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP)) { bap->ba_flags |= BA_DELAYIDLESLEEP; + } #endif /* !CONFIG_EMBEDDED */ - + bflags = bp->b_flags; - if (((bflags & B_READ) == 0) && ((bflags & B_ASYNC) == 0)) + if (((bflags & B_READ) == 0) && ((bflags & B_ASYNC) == 0)) { bufattr_markquickcomplete(bap); + } - if (bflags & B_READ) - code |= DKIO_READ; - if (bflags & B_ASYNC) - code |= DKIO_ASYNC; + if (bflags & B_READ) { + code |= DKIO_READ; + } + if (bflags & B_ASYNC) { + code |= DKIO_ASYNC; + } - if (bap->ba_flags & BA_META) - code |= DKIO_META; - else if (bflags & B_PAGEIO) - code |= DKIO_PAGING; + if (bap->ba_flags & BA_META) { + code |= DKIO_META; + } else if (bflags & B_PAGEIO) { + code |= DKIO_PAGING; + } - if (io_tier != 0) + if (io_tier != 0) { code |= DKIO_THROTTLE; + } code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK); - if (bflags & B_PASSIVE) + if (bflags & B_PASSIVE) { code |= DKIO_PASSIVE; + } - if (bap->ba_flags & BA_NOCACHE) + if (bap->ba_flags & BA_NOCACHE) { code |= DKIO_NOCACHE; + } if (upgrade) { code |= DKIO_TIER_UPGRADE; @@ -2342,14 +2563,15 @@ spec_strategy(struct vnop_strategy_args *ap) if (kdebug_enable) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, - buf_kernel_addrperm_addr(bp), bdev, (int)buf_blkno(bp), buf_count(bp), 0); - } + buf_kernel_addrperm_addr(bp), bdev, buf_blkno(bp), buf_count(bp), 0); + } thread_update_io_stats(current_thread(), buf_count(bp), code); if (mp != NULL) { - if (disk_conditioner_mount_is_ssd(mp)) + if (disk_conditioner_mount_is_ssd(mp)) { isssd = TRUE; + } /* * Partially initialized mounts don't have a final devbsdunit and should not be tracked. * Verify that devbsdunit is initialized (non-zero) or that 0 is the correct initialized value @@ -2359,9 +2581,9 @@ spec_strategy(struct vnop_strategy_args *ap) inflight = TRUE; } throttle_info = &_throttle_io_info[mp->mnt_devbsdunit]; - - } else + } else { throttle_info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; + } throttle_info_update_internal(throttle_info, ut, bflags, isssd, inflight, bap); @@ -2390,29 +2612,28 @@ spec_strategy(struct vnop_strategy_args *ap) * the boot cache too often. * * Note that typical strategy routines are defined with - * a void return so we'll get garbage here. In the + * a void return so we'll get garbage here. In the * unlikely case the garbage matches our special return * value, it's not a big deal since we're only adjusting * the throttling delay. - */ + */ #define IO_SATISFIED_BY_CACHE ((int)0xcafefeed) #define IO_SHOULD_BE_THROTTLED ((int)0xcafebeef) - typedef int strategy_fcn_ret_t(struct buf *bp); - + typedef int strategy_fcn_ret_t(struct buf *bp); + strategy_ret = (*(strategy_fcn_ret_t*)bdevsw[major(bdev)].d_strategy)(bp); // disk conditioner needs to track when this I/O actually starts // which means track it after `strategy` which may include delays // from inflight I/Os microuptime(&bp->b_timestamp_tv); - + if (IO_SATISFIED_BY_CACHE == strategy_ret) { /* * If this was a throttled IO satisfied by the boot cache, * don't delay the thread. */ throttle_info_reset_window(ut); - } else if (IO_SHOULD_BE_THROTTLED == strategy_ret) { /* * If the boot cache indicates this IO should be throttled, @@ -2420,7 +2641,7 @@ spec_strategy(struct vnop_strategy_args *ap) */ throttle_info_set_initial_window(ut, throttle_info, TRUE, isssd); } - return (0); + return 0; } @@ -2430,7 +2651,7 @@ spec_strategy(struct vnop_strategy_args *ap) int spec_blockmap(__unused struct vnop_blockmap_args *ap) { - return (ENOTSUP); + return ENOTSUP; } @@ -2448,7 +2669,6 @@ spec_close(struct vnop_close_args *ap) struct session *sessp; switch (vp->v_type) { - case VCHR: /* * Hack: a tty device that is a controlling terminal @@ -2473,7 +2693,7 @@ spec_close(struct vnop_close_args *ap) sessp->s_ttyvid = 0; sessp->s_ttyp = TTY_NULL; sessp->s_ttypgrpid = NO_PID; - } + } session_unlock(sessp); if (tp != TTY_NULL) { @@ -2493,14 +2713,16 @@ spec_close(struct vnop_close_args *ap) session_rele(sessp); } - if (--vp->v_specinfo->si_opencount < 0) + if (--vp->v_specinfo->si_opencount < 0) { panic("negative open count (c, %u, %u)", major(dev), minor(dev)); + } /* * close on last reference or on vnode revoke call */ - if (vcount(vp) == 0 || (flags & IO_REVOKE) != 0) + if (vcount(vp) == 0 || (flags & IO_REVOKE) != 0) { error = cdevsw[major(dev)].d_close(dev, flags, S_IFCHR, p); + } devsw_unlock(dev, S_IFCHR); break; @@ -2514,7 +2736,7 @@ spec_close(struct vnop_close_args *ap) if (vcount(vp) > 1) { vp->v_specinfo->si_opencount--; devsw_unlock(dev, S_IFBLK); - return (0); + return 0; } devsw_unlock(dev, S_IFBLK); @@ -2523,27 +2745,31 @@ spec_close(struct vnop_close_args *ap) * we must invalidate any in core blocks, so that * we can, for instance, change floppy disks. */ - if ((error = spec_fsync_internal(vp, MNT_WAIT, ap->a_context))) - return (error); + if ((error = spec_fsync_internal(vp, MNT_WAIT, ap->a_context))) { + return error; + } error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0); - if (error) - return (error); + if (error) { + return error; + } devsw_lock(dev, S_IFBLK); - if (--vp->v_specinfo->si_opencount < 0) + if (--vp->v_specinfo->si_opencount < 0) { panic("negative open count (b, %u, %u)", major(dev), minor(dev)); + } - if (vcount(vp) == 0) + if (vcount(vp) == 0) { error = bdevsw[major(dev)].d_close(dev, flags, S_IFBLK, p); + } devsw_unlock(dev, S_IFBLK); break; default: panic("spec_close: not special"); - return(EBADF); + return EBADF; } return error; @@ -2555,28 +2781,27 @@ spec_close(struct vnop_close_args *ap) int spec_pathconf(struct vnop_pathconf_args *ap) { - switch (ap->a_name) { case _PC_LINK_MAX: *ap->a_retval = LINK_MAX; - return (0); + return 0; case _PC_MAX_CANON: *ap->a_retval = MAX_CANON; - return (0); + return 0; case _PC_MAX_INPUT: *ap->a_retval = MAX_INPUT; - return (0); + return 0; case _PC_PIPE_BUF: *ap->a_retval = PIPE_BUF; - return (0); + return 0; case _PC_CHOWN_RESTRICTED: - *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ - return (0); + *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ + return 0; case _PC_VDISABLE: *ap->a_retval = _POSIX_VDISABLE; - return (0); + return 0; default: - return (EINVAL); + return EINVAL; } /* NOTREACHED */ } @@ -2587,8 +2812,7 @@ spec_pathconf(struct vnop_pathconf_args *ap) int spec_ebadf(__unused void *dummy) { - - return (EBADF); + return EBADF; } /* Blktooff derives file offset from logical block number */ @@ -2600,19 +2824,19 @@ spec_blktooff(struct vnop_blktooff_args *ap) switch (vp->v_type) { case VCHR: *ap->a_offset = (off_t)-1; /* failure */ - return (ENOTSUP); + return ENOTSUP; case VBLK: printf("spec_blktooff: not implemented for VBLK\n"); *ap->a_offset = (off_t)-1; /* failure */ - return (ENOTSUP); + return ENOTSUP; default: panic("spec_blktooff type"); } /* NOTREACHED */ - return (0); + return 0; } /* Offtoblk derives logical block number from file offset */ @@ -2624,26 +2848,26 @@ spec_offtoblk(struct vnop_offtoblk_args *ap) switch (vp->v_type) { case VCHR: *ap->a_lblkno = (daddr64_t)-1; /* failure */ - return (ENOTSUP); + return ENOTSUP; case VBLK: printf("spec_offtoblk: not implemented for VBLK\n"); *ap->a_lblkno = (daddr64_t)-1; /* failure */ - return (ENOTSUP); + return ENOTSUP; default: panic("spec_offtoblk type"); } /* NOTREACHED */ - return (0); + return 0; } static void filt_specdetach(struct knote *kn); static int filt_specevent(struct knote *kn, long hint); -static int filt_spectouch(struct knote *kn, struct kevent_internal_s *kev); -static int filt_specprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); -static unsigned filt_specpeek(struct knote *kn); +static int filt_spectouch(struct knote *kn, struct kevent_qos_s *kev); +static int filt_specprocess(struct knote *kn, struct kevent_qos_s *kev); +static int filt_specpeek(struct knote *kn); SECURITY_READ_ONLY_EARLY(struct filterops) spec_filtops = { .f_isfd = 1, @@ -2682,7 +2906,7 @@ spec_knote_select_and_link(struct knote *kn) ctx = vfs_context_current(); vp = (vnode_t)kn->kn_fp->f_fglob->fg_data; - int error = vnode_getwithvid(vp, kn->kn_hookid); + int error = vnode_getwithvid(vp, vnode_vid(vp)); if (error != 0) { knote_set_error(kn, ENOENT); return 0; @@ -2691,7 +2915,7 @@ spec_knote_select_and_link(struct knote *kn) /* * This function may be called many times to link or re-link the * underlying vnode to the kqueue. If we've already linked the two, - * we will have a valid kn_hook_data which ties us to the underlying + * we will have a valid kn_hook_waitqid which ties us to the underlying * device's waitq via a the waitq's prepost table object. However, * devices can abort any select action by calling selthreadclear(). * This is OK because the table object will be invalidated by the @@ -2707,21 +2931,43 @@ spec_knote_select_and_link(struct knote *kn) rlptr = (void *)&rsvd_arg; /* - * Trick selrecord() into hooking kqueue's wait queue set - * set into device's selinfo wait queue + * Trick selrecord() into hooking kqueue's wait queue set into the device's + * selinfo wait queue. */ old_wqs = uth->uu_wqset; uth->uu_wqset = &(knote_get_kq(kn)->kq_wqs); + + /* + * Be sure that the waitq set is linked + * before calling select to avoid possible + * allocation under spinlocks. + */ + waitq_set_lazy_init_link(uth->uu_wqset); + + /* + * Now these are the laws of VNOP_SELECT, as old and as true as the sky, + * And the device that shall keep it may prosper, but the device that shall + * break it must receive ENODEV: + * + * 1. Take a lock to protect against other selects on the same vnode. + * 2. Return 1 if data is ready to be read. + * 3. Return 0 and call `selrecord` on a handy `selinfo` structure if there + * is no data. + * 4. Call `selwakeup` when the vnode has an active `selrecord` and data + * can be read or written (depending on the seltype). + * 5. If there's a `selrecord` and no corresponding `selwakeup`, but the + * vnode is going away, call `selthreadclear`. + */ selres = VNOP_SELECT(vp, knote_get_seltype(kn), 0, rlptr, ctx); uth->uu_wqset = old_wqs; /* - * make sure to cleanup the reserved link - this guards against + * Make sure to cleanup the reserved link - this guards against * drivers that may not actually call selrecord(). */ waitq_link_release(rsvd); if (rsvd != rsvd_arg) { - /* the driver / handler called selrecord() */ + /* The driver / handler called selrecord() */ struct waitq *wq; memcpy(&wq, rlptr, sizeof(void *)); @@ -2739,15 +2985,23 @@ spec_knote_select_and_link(struct knote *kn) * the table object's ID to us. It will also set the * waitq_prepost_id field within the waitq structure. * - * We can just overwrite kn_hook_data because it's simply a + * We can just overwrite kn_hook_waitqid because it's simply a * table ID used to grab a reference when needed. * * We have a reference on the vnode, so we know that the * device won't go away while we get this ID. + * + * Note: on 32bit this field is 32bit only. */ - kn->kn_hook_data = waitq_get_prepost_id(wq); - } else { - assert(selres != 0); + kn->kn_hook_waitqid = (typeof(kn->kn_hook_waitqid))waitq_get_prepost_id(wq); + } else if (selres == 0) { + /* + * The device indicated that there's no data to read, but didn't call + * `selrecord`. Nothing will be notified of changes to this vnode, so + * return an error back to user space, to make it clear that the knote + * is not attached. + */ + knote_set_error(kn, ENODEV); } vnode_put(vp); @@ -2755,21 +3009,33 @@ spec_knote_select_and_link(struct knote *kn) return selres; } -static void filt_spec_common(struct knote *kn, int selres) +static int +filt_spec_common(struct knote *kn, struct kevent_qos_s *kev, int selres) { + int64_t data; + int ret; + if (kn->kn_vnode_use_ofst) { if (kn->kn_fp->f_fglob->fg_offset >= (uint32_t)selres) { - kn->kn_data = 0; + data = 0; } else { - kn->kn_data = ((uint32_t)selres) - kn->kn_fp->f_fglob->fg_offset; + data = ((uint32_t)selres) - kn->kn_fp->f_fglob->fg_offset; } } else { - kn->kn_data = selres; + data = selres; + } + + ret = data >= knote_low_watermark(kn); + + if (ret && kev) { + knote_fill_kevent(kn, kev, data); } + + return ret; } static int -filt_specattach(struct knote *kn, __unused struct kevent_internal_s *kev) +filt_specattach(struct knote *kn, __unused struct kevent_qos_s *kev) { vnode_t vp; dev_t dev; @@ -2805,8 +3071,7 @@ filt_specattach(struct knote *kn, __unused struct kevent_internal_s *kev) } kn->kn_filtid = EVFILTID_SPEC; - kn->kn_hook_data = 0; - kn->kn_hookid = vnode_vid(vp); + kn->kn_hook_waitqid = 0; knote_markstayactive(kn); return spec_knote_select_and_link(kn); @@ -2821,7 +3086,7 @@ filt_specdetach(struct knote *kn) * This is potentially tricky: the device's selinfo waitq that was * tricked into being part of this knote's waitq set may not be a part * of any other set, and the device itself may have revoked the memory - * in which the waitq was held. We use the knote's kn_hook_data field + * in which the waitq was held. We use the knote's kn_hook_waitqid field * to keep the ID of the waitq's prepost table object. This * object keeps a pointer back to the waitq, and gives us a safe way * to decouple the dereferencing of driver allocated memory: if the @@ -2829,9 +3094,9 @@ filt_specdetach(struct knote *kn) * object will be invalidated. The waitq details are handled in the * waitq API invoked here. */ - if (kn->kn_hook_data) { - waitq_unlink_by_prepost_id(kn->kn_hook_data, &(knote_get_kq(kn)->kq_wqs)); - kn->kn_hook_data = 0; + if (kn->kn_hook_waitqid) { + waitq_unlink_by_prepost_id(kn->kn_hook_waitqid, &(knote_get_kq(kn)->kq_wqs)); + kn->kn_hook_waitqid = 0; } } @@ -2846,12 +3111,10 @@ filt_specevent(struct knote *kn, __unused long hint) } static int -filt_spectouch(struct knote *kn, struct kevent_internal_s *kev) +filt_spectouch(struct knote *kn, struct kevent_qos_s *kev) { kn->kn_sdata = kev->data; kn->kn_sfflags = kev->fflags; - if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) - kn->kn_udata = kev->udata; if (kev->flags & EV_ENABLE) { return spec_knote_select_and_link(kn); @@ -2861,9 +3124,8 @@ filt_spectouch(struct knote *kn, struct kevent_internal_s *kev) } static int -filt_specprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev) +filt_specprocess(struct knote *kn, struct kevent_qos_s *kev) { -#pragma unused(data) vnode_t vp; uthread_t uth; vfs_context_t ctx; @@ -2875,42 +3137,26 @@ filt_specprocess(struct knote *kn, struct filt_process_s *data, struct kevent_in ctx = vfs_context_current(); vp = (vnode_t)kn->kn_fp->f_fglob->fg_data; - /* FIXME JMM - locking against touches? */ - - error = vnode_getwithvid(vp, kn->kn_hookid); + error = vnode_getwithvid(vp, vnode_vid(vp)); if (error != 0) { kn->kn_flags |= (EV_EOF | EV_ONESHOT); - *kev = kn->kn_kevent; + knote_fill_kevent(kn, kev, 0); return 1; } selres = spec_knote_select_and_link(kn); - filt_spec_common(kn, selres); + res = filt_spec_common(kn, kev, selres); vnode_put(vp); - res = ((kn->kn_sfflags & NOTE_LOWAT) != 0) ? - (kn->kn_data >= kn->kn_sdata) : kn->kn_data; - - if (res) { - *kev = kn->kn_kevent; - if (kn->kn_flags & EV_CLEAR) { - kn->kn_fflags = 0; - kn->kn_data = 0; - } - } - return res; } -static unsigned +static int filt_specpeek(struct knote *kn) { int selres = 0; selres = spec_knote_select_and_link(kn); - filt_spec_common(kn, selres); - - return kn->kn_data; + return filt_spec_common(kn, NULL, selres); } -