+ if ( (error = copyin(argp, (caddr_t)&ra_struct, sizeof(ra_struct))) )
+ goto outdrop;
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+ error = VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, &context);
+
+ (void)vnode_put(vp);
+ }
+ goto outdrop;
+ }
+
+ case F_FLUSH_DATA:
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+ error = cluster_push(vp, 0);
+
+ (void)vnode_put(vp);
+ }
+ goto outdrop;
+
+ case F_LOG2PHYS:
+ case F_LOG2PHYS_EXT: {
+ struct log2phys l2p_struct; /* structure for allocate command */
+ int devBlockSize;
+
+ off_t file_offset = 0;
+ size_t a_size = 0;
+ size_t run = 0;
+
+ if (uap->cmd == F_LOG2PHYS_EXT) {
+ error = copyin(argp, (caddr_t)&l2p_struct, sizeof(l2p_struct));
+ if (error)
+ goto out;
+ file_offset = l2p_struct.l2p_devoffset;
+ } else {
+ file_offset = fp->f_offset;
+ }
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+ if ( (error = vnode_getwithref(vp)) ) {
+ goto outdrop;
+ }
+ error = VNOP_OFFTOBLK(vp, file_offset, &lbn);
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+ error = VNOP_BLKTOOFF(vp, lbn, &offset);
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+ devBlockSize = vfs_devblocksize(vnode_mount(vp));
+ if (uap->cmd == F_LOG2PHYS_EXT) {
+#if defined(__LP64__)
+ a_size = l2p_struct.l2p_contigbytes;
+#else
+ if ((l2p_struct.l2p_contigbytes > SIZE_MAX) || (l2p_struct.l2p_contigbytes < 0)) {
+ /* size_t is 32-bit on a 32-bit kernel, therefore
+ * assigning l2p_contigbytes to a_size may have
+ * caused integer overflow. We, therefore, return
+ * an error here instead of calculating incorrect
+ * value.
+ */
+ printf ("fcntl: F_LOG2PHYS_EXT: l2p_contigbytes=%lld will overflow, returning error\n", l2p_struct.l2p_contigbytes);
+ error = EFBIG;
+ goto outdrop;
+ } else {
+ a_size = l2p_struct.l2p_contigbytes;
+ }
+#endif
+ } else {
+ a_size = devBlockSize;
+ }
+
+ error = VNOP_BLOCKMAP(vp, offset, a_size, &bn, &run, NULL, 0, &context);
+
+ (void)vnode_put(vp);
+
+ if (!error) {
+ l2p_struct.l2p_flags = 0; /* for now */
+ if (uap->cmd == F_LOG2PHYS_EXT) {
+ l2p_struct.l2p_contigbytes = run - (file_offset - offset);
+ } else {
+ l2p_struct.l2p_contigbytes = 0; /* for now */
+ }
+
+ /*
+ * The block number being -1 suggests that the file offset is not backed
+ * by any real blocks on-disk. As a result, just let it be passed back up wholesale.
+ */
+ if (bn == -1) {
+ /* Don't multiply it by the block size */
+ l2p_struct.l2p_devoffset = bn;
+ }
+ else {
+ l2p_struct.l2p_devoffset = bn * devBlockSize;
+ l2p_struct.l2p_devoffset += file_offset - offset;
+ }
+ error = copyout((caddr_t)&l2p_struct, argp, sizeof(l2p_struct));
+ }
+ goto outdrop;
+ }
+ case F_GETPATH: {
+ char *pathbufp;
+ int pathlen;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ pathlen = MAXPATHLEN;
+ MALLOC(pathbufp, char *, pathlen, M_TEMP, M_WAITOK);
+ if (pathbufp == NULL) {
+ error = ENOMEM;
+ goto outdrop;
+ }
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+ error = vn_getpath(vp, pathbufp, &pathlen);
+ (void)vnode_put(vp);
+
+ if (error == 0)
+ error = copyout((caddr_t)pathbufp, argp, pathlen);
+ }
+ FREE(pathbufp, M_TEMP);
+ goto outdrop;
+ }
+
+ case F_PATHPKG_CHECK: {
+ char *pathbufp;
+ size_t pathlen;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ pathlen = MAXPATHLEN;
+ pathbufp = kalloc(MAXPATHLEN);
+
+ if ( (error = copyinstr(argp, pathbufp, MAXPATHLEN, &pathlen)) == 0 ) {
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+ AUDIT_ARG(text, pathbufp);
+ error = vn_path_package_check(vp, pathbufp, pathlen, retval);
+
+ (void)vnode_put(vp);
+ }
+ }
+ kfree(pathbufp, MAXPATHLEN);
+ goto outdrop;
+ }
+
+ case F_CHKCLEAN: // used by regression tests to see if all dirty pages got cleaned by fsync()
+ case F_FULLFSYNC: // fsync + flush the journal + DKIOCSYNCHRONIZECACHE
+ case F_FREEZE_FS: // freeze all other fs operations for the fs of this fd
+ case F_THAW_FS: { // thaw all frozen fs operations for the fs of this fd
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+ error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)NULL, 0, &context);
+
+ (void)vnode_put(vp);
+ }
+ break;
+ }
+
+ /*
+ * SPI (private) for opening a file starting from a dir fd
+ */
+ case F_OPENFROM: {
+ struct user_fopenfrom fopen;
+ struct vnode_attr va;
+ struct nameidata nd;
+ int cmode;
+
+ /* Check if this isn't a valid file descriptor */
+ if ((fp->f_type != DTYPE_VNODE) ||
+ (fp->f_flag & FREAD) == 0) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only valid for directories */
+ if (vp->v_type != VDIR) {
+ vnode_put(vp);
+ error = ENOTDIR;
+ goto outdrop;
+ }
+
+ /* Get flags, mode and pathname arguments. */
+ if (IS_64BIT_PROCESS(p)) {
+ error = copyin(argp, &fopen, sizeof(fopen));
+ } else {
+ struct user32_fopenfrom fopen32;
+
+ error = copyin(argp, &fopen32, sizeof(fopen32));
+ fopen.o_flags = fopen32.o_flags;
+ fopen.o_mode = fopen32.o_mode;
+ fopen.o_pathname = CAST_USER_ADDR_T(fopen32.o_pathname);
+ }
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+ AUDIT_ARG(fflags, fopen.o_flags);
+ AUDIT_ARG(mode, fopen.o_mode);
+ VATTR_INIT(&va);
+ /* Mask off all but regular access permissions */
+ cmode = ((fopen.o_mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
+ VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
+
+ /* Start the lookup relative to the file descriptor's vnode. */
+ NDINIT(&nd, LOOKUP, OP_OPEN, USEDVP | FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
+ fopen.o_pathname, &context);
+ nd.ni_dvp = vp;
+
+ error = open1(&context, &nd, fopen.o_flags, &va,
+ fileproc_alloc_init, NULL, retval);
+
+ vnode_put(vp);
+ break;
+ }
+ /*
+ * SPI (private) for unlinking a file starting from a dir fd
+ */
+ case F_UNLINKFROM: {
+ struct nameidata nd;
+ user_addr_t pathname;
+
+ /* Check if this isn't a valid file descriptor */
+ if ((fp->f_type != DTYPE_VNODE) ||
+ (fp->f_flag & FREAD) == 0) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only valid for directories */
+ if (vp->v_type != VDIR) {
+ vnode_put(vp);
+ error = ENOTDIR;
+ goto outdrop;
+ }
+
+ /* Get flags, mode and pathname arguments. */
+ if (IS_64BIT_PROCESS(p)) {
+ pathname = (user_addr_t)argp;
+ } else {
+ pathname = CAST_USER_ADDR_T(argp);
+ }
+
+ /* Start the lookup relative to the file descriptor's vnode. */
+ NDINIT(&nd, DELETE, OP_UNLINK, USEDVP | AUDITVNPATH1, UIO_USERSPACE,
+ pathname, &context);
+ nd.ni_dvp = vp;
+
+ error = unlink1(&context, &nd, 0);
+
+ vnode_put(vp);
+ break;
+
+ }
+
+ case F_ADDSIGS:
+ case F_ADDFILESIGS:
+ {
+ struct user_fsignatures fs;
+ kern_return_t kr;
+ vm_offset_t kernel_blob_addr;
+ vm_size_t kernel_blob_size;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+ error = vnode_getwithref(vp);
+ if (error)
+ goto outdrop;
+
+ if (IS_64BIT_PROCESS(p)) {
+ error = copyin(argp, &fs, sizeof (fs));
+ } else {
+ struct user32_fsignatures fs32;
+
+ error = copyin(argp, &fs32, sizeof (fs32));
+ fs.fs_file_start = fs32.fs_file_start;
+ fs.fs_blob_start = CAST_USER_ADDR_T(fs32.fs_blob_start);
+ fs.fs_blob_size = fs32.fs_blob_size;
+ }
+
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ if(ubc_cs_blob_get(vp, CPU_TYPE_ANY, fs.fs_file_start))
+ {
+ vnode_put(vp);
+ goto outdrop;
+ }
+/*
+ * An arbitrary limit, to prevent someone from mapping in a 20GB blob. This should cover
+ * our use cases for the immediate future, but note that at the time of this commit, some
+ * platforms are nearing 2MB blob sizes (with a prior soft limit of 2.5MB).
+ *
+ * We should consider how we can manage this more effectively; the above means that some
+ * platforms are using megabytes of memory for signing data; it merely hasn't crossed the
+ * threshold considered ridiculous at the time of this change.
+ */
+#define CS_MAX_BLOB_SIZE (10ULL * 1024ULL * 1024ULL)
+ if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
+ error = E2BIG;
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
+ kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
+ if (kr != KERN_SUCCESS) {
+ error = ENOMEM;
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ if(uap->cmd == F_ADDSIGS) {
+ error = copyin(fs.fs_blob_start,
+ (void *) kernel_blob_addr,
+ kernel_blob_size);
+ } else /* F_ADDFILESIGS */ {
+ error = vn_rdwr(UIO_READ,
+ vp,
+ (caddr_t) kernel_blob_addr,
+ kernel_blob_size,
+ fs.fs_file_start + fs.fs_blob_start,
+ UIO_SYSSPACE,
+ 0,
+ kauth_cred_get(),
+ 0,
+ p);
+ }
+
+ if (error) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ error = ubc_cs_blob_add(
+ vp,
+ CPU_TYPE_ANY, /* not for a specific architecture */
+ fs.fs_file_start,
+ kernel_blob_addr,
+ kernel_blob_size);
+ if (error) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ } else {
+ /* ubc_blob_add() has consumed "kernel_blob_addr" */
+#if CHECK_CS_VALIDATION_BITMAP
+ ubc_cs_validation_bitmap_allocate( vp );
+#endif
+ }
+
+ (void) vnode_put(vp);
+ break;
+ }
+ case F_FINDSIGS: {
+#ifdef SECURE_KERNEL
+ error = ENOTSUP;
+#else /* !SECURE_KERNEL */
+ off_t offsetMacho;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+ error = vnode_getwithref(vp);
+ if (error)
+ goto outdrop;
+
+ error = copyin(argp, &offsetMacho, sizeof(offsetMacho));
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+
+#if CONFIG_MACF
+ error = mac_vnode_find_sigs(p, vp, offsetMacho);
+#else
+ error = EPERM;
+#endif
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+#endif /* SECURE_KERNEL */
+ break;
+ }
+#if CONFIG_PROTECT
+ case F_GETPROTECTIONCLASS: {
+ int class = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ error = cp_vnode_getclass (vp, &class);
+ if (error == 0) {
+ *retval = class;
+ }
+
+ vnode_put(vp);
+ break;
+ }
+
+ case F_SETPROTECTIONCLASS: {
+ /* tmp must be a valid PROTECTION_CLASS_* */
+ tmp = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+ error = cp_vnode_setclass (vp, tmp);
+ vnode_put(vp);
+ break;
+ }
+
+ case F_TRANSCODEKEY: {
+
+ char *backup_keyp = NULL;
+ unsigned backup_key_len = CP_MAX_WRAPPEDKEYSIZE;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ MALLOC(backup_keyp, char *, backup_key_len, M_TEMP, M_WAITOK);
+ if (backup_keyp == NULL) {
+ error = ENOMEM;
+ goto outdrop;
+ }
+
+ error = cp_vnode_transcode (vp, backup_keyp, &backup_key_len);
+ vnode_put(vp);
+
+ if (error == 0) {
+ error = copyout((caddr_t)backup_keyp, argp, backup_key_len);
+ *retval = backup_key_len;
+ }
+
+ FREE(backup_keyp, M_TEMP);
+
+ break;
+ }
+
+ case F_GETPROTECTIONLEVEL: {
+ uint32_t cp_version = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_root_major_vers (vp, &cp_version);
+ *retval = cp_version;
+
+ vnode_put (vp);
+ break;
+ }
+
+ case F_GETDEFAULTPROTLEVEL: {
+ uint32_t cp_default = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_default_level(vp, &cp_default);
+ *retval = cp_default;
+
+ vnode_put (vp);
+ break;
+ }
+
+
+#endif /* CONFIG_PROTECT */
+
+ case F_MOVEDATAEXTENTS: {
+ struct fileproc *fp2 = NULL;
+ struct vnode *src_vp = NULLVP;
+ struct vnode *dst_vp = NULLVP;
+ /* We need to grab the 2nd FD out of the argments before moving on. */
+ int fd2 = CAST_DOWN_EXPLICIT(int32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ /* For now, special case HFS+ only, since this is SPI. */
+ src_vp = (struct vnode *)fp->f_data;
+ if (src_vp->v_tag != VT_HFS) {
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Get the references before we start acquiring iocounts on the vnodes,
+ * while we still hold the proc fd lock
+ */
+ if ( (error = fp_lookup(p, fd2, &fp2, 1)) ) {
+ error = EBADF;
+ goto out;
+ }
+ if (fp2->f_type != DTYPE_VNODE) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EBADF;
+ goto out;
+ }
+ dst_vp = (struct vnode *)fp2->f_data;
+ if (dst_vp->v_tag != VT_HFS) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EINVAL;
+ goto out;
+ }
+
+#if CONFIG_MACF
+ /* Re-do MAC checks against the new FD, pass in a fake argument */
+ error = mac_file_check_fcntl(proc_ucred(p), fp2->f_fglob, uap->cmd, 0);
+ if (error) {
+ fp_drop(p, fd2, fp2, 1);
+ goto out;
+ }
+#endif
+ /* Audit the 2nd FD */
+ AUDIT_ARG(fd, fd2);
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(src_vp)) {
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+ if (vnode_getwithref(dst_vp)) {
+ vnode_put (src_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * Basic asserts; validate they are not the same and that
+ * both live on the same filesystem.
+ */
+ if (dst_vp == src_vp) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ if (dst_vp->v_mount != src_vp->v_mount) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EXDEV;
+ goto outdrop;
+ }
+
+ /* Now we have a legit pair of FDs. Go to work */
+
+ /* Now check for write access to the target files */
+ if(vnode_authorize(src_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ if(vnode_authorize(dst_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* Verify that both vps point to files and not directories */
+ if ( !vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
+ error = EINVAL;
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ goto outdrop;
+ }
+
+ /*
+ * The exchangedata syscall handler passes in 0 for the flags to VNOP_EXCHANGE.
+ * We'll pass in our special bit indicating that the new behavior is expected
+ */
+
+ error = VNOP_EXCHANGE(src_vp, dst_vp, FSOPT_EXCHANGE_DATA_ONLY, &context);
+
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ break;
+ }
+
+ /*
+ * SPI for making a file compressed.
+ */
+ case F_MAKECOMPRESSED: {
+ uint32_t gcounter = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ /* get the vnode */
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Is it a file? */
+ if ((vnode_isreg(vp) == 0) && (vnode_islnk(vp) == 0)) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* invoke ioctl to pass off to FS */
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)&gcounter, 0, &context);
+
+ vnode_put (vp);
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will written to the Fastflow.
+ */
+ case F_SET_GREEDY_MODE:
+ /* intentionally drop through to the same handler as F_SETSTATIC.
+ * both fcntls should pass the argument and their selector into VNOP_IOCTL.
+ */
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will represent static content.
+ */
+ case F_SETSTATICCONTENT: {
+ caddr_t ioctl_arg = NULL;
+
+ if (uap->arg) {
+ ioctl_arg = (caddr_t) 1;
+ }
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, ioctl_arg, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to the lower level storage driver that the
+ * subsequent writes should be of a particular IO type (burst, greedy, static),
+ * or other flavors that may be necessary.
+ */
+ case F_SETIOTYPE: {
+ caddr_t param_ptr;
+ uint32_t param;
+
+ if (uap->arg) {
+ /* extract 32 bits of flags from userland */
+ param_ptr = (caddr_t) uap->arg;
+ param = (uint32_t) param_ptr;
+ }
+ else {
+ /* If no argument is specified, error out */
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Validate the different types of flags that can be specified:
+ * all of them are mutually exclusive for now.
+ */
+ switch (param) {
+ case F_IOTYPE_ISOCHRONOUS:
+ break;
+
+ default:
+ error = EINVAL;
+ goto out;
+ }
+
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, param_ptr, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+
+ /*
+ * Extract the CodeDirectory of the vnode associated with
+ * the file descriptor and copy it back to user space
+ */
+ case F_GETCODEDIR: {
+ struct user_fcodeblobs args;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((fp->f_flag & FREAD) == 0) {
+ error = EBADF;