+#if CONFIG_EMBEDDED
+#define THROTTLE_MAX_IOSIZE (hard_throttle_max_iosize)
+#else
+#define THROTTLE_MAX_IOSIZE (throttle_legacy_process_count == 0 ? hard_throttle_max_iosize : legacy_hard_throttle_max_iosize)
+#endif
+
+
+SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_max_iosize, CTLFLAG_RW | CTLFLAG_LOCKED, &hard_throttle_max_iosize, 0, "");
+SYSCTL_INT(_debug, OID_AUTO, lowpri_legacy_throttle_max_iosize, CTLFLAG_RW | CTLFLAG_LOCKED, &legacy_hard_throttle_max_iosize, 0, "");
+
+
+void
+cluster_init(void) {
+ /*
+ * allocate lock group attribute and group
+ */
+ cl_mtx_grp_attr = lck_grp_attr_alloc_init();
+ cl_mtx_grp = lck_grp_alloc_init("cluster I/O", cl_mtx_grp_attr);
+
+ /*
+ * allocate the lock attribute
+ */
+ cl_mtx_attr = lck_attr_alloc_init();
+
+ cl_transaction_mtxp = lck_mtx_alloc_init(cl_mtx_grp, cl_mtx_attr);
+
+ if (cl_transaction_mtxp == NULL)
+ panic("cluster_init: failed to allocate cl_transaction_mtxp");
+}
+
+
+uint32_t
+cluster_max_io_size(mount_t mp, int type)
+{
+ uint32_t max_io_size;
+ uint32_t segcnt;
+ uint32_t maxcnt;
+
+ switch(type) {
+
+ case CL_READ:
+ segcnt = mp->mnt_segreadcnt;
+ maxcnt = mp->mnt_maxreadcnt;
+ break;
+ case CL_WRITE:
+ segcnt = mp->mnt_segwritecnt;
+ maxcnt = mp->mnt_maxwritecnt;
+ break;
+ default:
+ segcnt = min(mp->mnt_segreadcnt, mp->mnt_segwritecnt);
+ maxcnt = min(mp->mnt_maxreadcnt, mp->mnt_maxwritecnt);
+ break;
+ }
+ if (segcnt > MAX_UPL_SIZE) {
+ /*
+ * don't allow a size beyond the max UPL size we can create
+ */
+ segcnt = MAX_UPL_SIZE;
+ }
+ max_io_size = min((segcnt * PAGE_SIZE), maxcnt);
+
+ if (max_io_size < (MAX_UPL_TRANSFER * PAGE_SIZE)) {
+ /*
+ * don't allow a size smaller than the old fixed limit
+ */
+ max_io_size = (MAX_UPL_TRANSFER * PAGE_SIZE);
+ } else {
+ /*
+ * make sure the size specified is a multiple of PAGE_SIZE
+ */
+ max_io_size &= ~PAGE_MASK;
+ }
+ return (max_io_size);
+}
+
+
+
+
+#define CLW_ALLOCATE 0x01
+#define CLW_RETURNLOCKED 0x02
+#define CLW_IONOCACHE 0x04
+#define CLW_IOPASSIVE 0x08
+
+/*
+ * if the read ahead context doesn't yet exist,
+ * allocate and initialize it...
+ * the vnode lock serializes multiple callers
+ * during the actual assignment... first one
+ * to grab the lock wins... the other callers
+ * will release the now unnecessary storage
+ *
+ * once the context is present, try to grab (but don't block on)
+ * the lock associated with it... if someone
+ * else currently owns it, than the read
+ * will run without read-ahead. this allows
+ * multiple readers to run in parallel and
+ * since there's only 1 read ahead context,
+ * there's no real loss in only allowing 1
+ * reader to have read-ahead enabled.
+ */
+static struct cl_readahead *
+cluster_get_rap(vnode_t vp)
+{
+ struct ubc_info *ubc;
+ struct cl_readahead *rap;
+
+ ubc = vp->v_ubcinfo;
+
+ if ((rap = ubc->cl_rahead) == NULL) {
+ MALLOC_ZONE(rap, struct cl_readahead *, sizeof *rap, M_CLRDAHEAD, M_WAITOK);
+
+ bzero(rap, sizeof *rap);
+ rap->cl_lastr = -1;
+ lck_mtx_init(&rap->cl_lockr, cl_mtx_grp, cl_mtx_attr);
+
+ vnode_lock(vp);
+
+ if (ubc->cl_rahead == NULL)
+ ubc->cl_rahead = rap;
+ else {
+ lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp);
+ FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD);
+ rap = ubc->cl_rahead;
+ }
+ vnode_unlock(vp);
+ }
+ if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE)
+ return(rap);
+
+ return ((struct cl_readahead *)NULL);
+}
+
+
+/*
+ * if the write behind context doesn't yet exist,
+ * and CLW_ALLOCATE is specified, allocate and initialize it...
+ * the vnode lock serializes multiple callers
+ * during the actual assignment... first one
+ * to grab the lock wins... the other callers
+ * will release the now unnecessary storage
+ *
+ * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
+ * the lock associated with the write behind context before
+ * returning
+ */
+
+static struct cl_writebehind *
+cluster_get_wbp(vnode_t vp, int flags)
+{
+ struct ubc_info *ubc;
+ struct cl_writebehind *wbp;
+
+ ubc = vp->v_ubcinfo;
+
+ if ((wbp = ubc->cl_wbehind) == NULL) {
+
+ if ( !(flags & CLW_ALLOCATE))
+ return ((struct cl_writebehind *)NULL);
+
+ MALLOC_ZONE(wbp, struct cl_writebehind *, sizeof *wbp, M_CLWRBEHIND, M_WAITOK);
+
+ bzero(wbp, sizeof *wbp);
+ lck_mtx_init(&wbp->cl_lockw, cl_mtx_grp, cl_mtx_attr);
+
+ vnode_lock(vp);
+
+ if (ubc->cl_wbehind == NULL)
+ ubc->cl_wbehind = wbp;
+ else {
+ lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
+ FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
+ wbp = ubc->cl_wbehind;
+ }
+ vnode_unlock(vp);
+ }
+ if (flags & CLW_RETURNLOCKED)
+ lck_mtx_lock(&wbp->cl_lockw);
+
+ return (wbp);
+}
+
+
+static void
+cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg)
+{
+ struct cl_writebehind *wbp;
+
+ if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
+
+ if (wbp->cl_number) {
+ lck_mtx_lock(&wbp->cl_lockw);
+
+ cluster_try_push(wbp, vp, newEOF, PUSH_ALL | PUSH_SYNC, 0, callback, callback_arg);
+
+ lck_mtx_unlock(&wbp->cl_lockw);
+ }
+ }
+}
+
+
+static int
+cluster_io_present_in_BC(vnode_t vp, off_t f_offset)
+{
+ daddr64_t blkno;
+ size_t io_size;
+ int (*bootcache_check_fn)(dev_t device, u_int64_t blkno) = bootcache_contains_block;
+
+ if (bootcache_check_fn) {
+ if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ, NULL))
+ return(0);
+
+ if (io_size == 0)
+ return (0);
+
+ if (bootcache_check_fn(vp->v_mount->mnt_devvp->v_rdev, blkno))
+ return(1);
+ }
+ return(0);
+}
+