+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ MALLOC(backup_keyp, char *, backup_key_len, M_TEMP, M_WAITOK);
+ if (backup_keyp == NULL) {
+ error = ENOMEM;
+ goto outdrop;
+ }
+
+ error = cp_vnode_transcode (vp, backup_keyp, &backup_key_len);
+ vnode_put(vp);
+
+ if (error == 0) {
+ error = copyout((caddr_t)backup_keyp, argp, backup_key_len);
+ *retval = backup_key_len;
+ }
+
+ FREE(backup_keyp, M_TEMP);
+
+ break;
+ }
+
+ case F_GETPROTECTIONLEVEL: {
+ uint32_t cp_version = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_root_major_vers (vp, &cp_version);
+ *retval = cp_version;
+
+ vnode_put (vp);
+ break;
+ }
+
+ case F_GETDEFAULTPROTLEVEL: {
+ uint32_t cp_default = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_default_level(vp, &cp_default);
+ *retval = cp_default;
+
+ vnode_put (vp);
+ break;
+ }
+
+
+#endif /* CONFIG_PROTECT */
+
+ case F_MOVEDATAEXTENTS: {
+ struct fileproc *fp2 = NULL;
+ struct vnode *src_vp = NULLVP;
+ struct vnode *dst_vp = NULLVP;
+ /* We need to grab the 2nd FD out of the argments before moving on. */
+ int fd2 = CAST_DOWN_EXPLICIT(int32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ /* For now, special case HFS+ only, since this is SPI. */
+ src_vp = (struct vnode *)fp->f_data;
+ if (src_vp->v_tag != VT_HFS) {
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Get the references before we start acquiring iocounts on the vnodes,
+ * while we still hold the proc fd lock
+ */
+ if ( (error = fp_lookup(p, fd2, &fp2, 1)) ) {
+ error = EBADF;
+ goto out;
+ }
+ if (fp2->f_type != DTYPE_VNODE) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EBADF;
+ goto out;
+ }
+ dst_vp = (struct vnode *)fp2->f_data;
+ if (dst_vp->v_tag != VT_HFS) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EINVAL;
+ goto out;
+ }
+
+#if CONFIG_MACF
+ /* Re-do MAC checks against the new FD, pass in a fake argument */
+ error = mac_file_check_fcntl(proc_ucred(p), fp2->f_fglob, uap->cmd, 0);
+ if (error) {
+ fp_drop(p, fd2, fp2, 1);
+ goto out;
+ }
+#endif
+ /* Audit the 2nd FD */
+ AUDIT_ARG(fd, fd2);
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(src_vp)) {
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+ if (vnode_getwithref(dst_vp)) {
+ vnode_put (src_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * Basic asserts; validate they are not the same and that
+ * both live on the same filesystem.
+ */
+ if (dst_vp == src_vp) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ if (dst_vp->v_mount != src_vp->v_mount) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EXDEV;
+ goto outdrop;
+ }
+
+ /* Now we have a legit pair of FDs. Go to work */
+
+ /* Now check for write access to the target files */
+ if(vnode_authorize(src_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ if(vnode_authorize(dst_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* Verify that both vps point to files and not directories */
+ if ( !vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
+ error = EINVAL;
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ goto outdrop;
+ }
+
+ /*
+ * The exchangedata syscall handler passes in 0 for the flags to VNOP_EXCHANGE.
+ * We'll pass in our special bit indicating that the new behavior is expected
+ */
+
+ error = VNOP_EXCHANGE(src_vp, dst_vp, FSOPT_EXCHANGE_DATA_ONLY, &context);
+
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ break;
+ }
+
+ /*
+ * SPI for making a file compressed.
+ */
+ case F_MAKECOMPRESSED: {
+ uint32_t gcounter = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ /* get the vnode */
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Is it a file? */
+ if ((vnode_isreg(vp) == 0) && (vnode_islnk(vp) == 0)) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* invoke ioctl to pass off to FS */
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)&gcounter, 0, &context);
+
+ vnode_put (vp);
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will written to the Fastflow.
+ */
+ case F_SET_GREEDY_MODE:
+ /* intentionally drop through to the same handler as F_SETSTATIC.
+ * both fcntls should pass the argument and their selector into VNOP_IOCTL.
+ */
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will represent static content.
+ */
+ case F_SETSTATICCONTENT: {
+ caddr_t ioctl_arg = NULL;
+
+ if (uap->arg) {
+ ioctl_arg = (caddr_t) 1;
+ }
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, ioctl_arg, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to the lower level storage driver that the
+ * subsequent writes should be of a particular IO type (burst, greedy, static),
+ * or other flavors that may be necessary.
+ */
+ case F_SETIOTYPE: {
+ caddr_t param_ptr;
+ uint32_t param;
+
+ if (uap->arg) {
+ /* extract 32 bits of flags from userland */
+ param_ptr = (caddr_t) uap->arg;
+ param = (uint32_t) param_ptr;
+ }
+ else {
+ /* If no argument is specified, error out */
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Validate the different types of flags that can be specified:
+ * all of them are mutually exclusive for now.
+ */
+ switch (param) {
+ case F_IOTYPE_ISOCHRONOUS:
+ break;
+
+ default:
+ error = EINVAL;
+ goto out;
+ }
+
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, param_ptr, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+
+ /*
+ * Extract the CodeDirectory of the vnode associated with
+ * the file descriptor and copy it back to user space
+ */
+ case F_GETCODEDIR: {
+ struct user_fcodeblobs args;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((fp->f_flag & FREAD) == 0) {
+ error = EBADF;
+ goto outdrop;
+ }
+
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_fcodeblobs args64;
+
+ error = copyin(argp, &args64, sizeof(args64));
+ if (error)
+ goto outdrop;
+
+ args.f_cd_hash = args64.f_cd_hash;
+ args.f_hash_size = args64.f_hash_size;
+ args.f_cd_buffer = args64.f_cd_buffer;
+ args.f_cd_size = args64.f_cd_size;
+ args.f_out_size = args64.f_out_size;
+ args.f_arch = args64.f_arch;
+ } else {
+ struct user32_fcodeblobs args32;
+
+ error = copyin(argp, &args32, sizeof(args32));
+ if (error)
+ goto outdrop;
+
+ args.f_cd_hash = CAST_USER_ADDR_T(args32.f_cd_hash);
+ args.f_hash_size = args32.f_hash_size;
+ args.f_cd_buffer = CAST_USER_ADDR_T(args32.f_cd_buffer);
+ args.f_cd_size = args32.f_cd_size;
+ args.f_out_size = CAST_USER_ADDR_T(args32.f_out_size);
+ args.f_arch = args32.f_arch;
+ }
+
+ if (vp->v_ubcinfo == NULL) {
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ struct cs_blob *t_blob = vp->v_ubcinfo->cs_blobs;
+
+ /*
+ * This call fails if there is no cs_blob corresponding to the
+ * vnode, or if there are multiple cs_blobs present, and the caller
+ * did not specify which cpu_type they want the cs_blob for
+ */
+ if (t_blob == NULL) {
+ error = ENOENT; /* there is no codesigning blob for this process */
+ goto outdrop;
+ } else if (args.f_arch == 0 && t_blob->csb_next != NULL) {
+ error = ENOENT; /* too many architectures and none specified */
+ goto outdrop;
+ }
+
+ /* If the user specified an architecture, find the right blob */
+ if (args.f_arch != 0) {
+ while (t_blob) {
+ if (t_blob->csb_cpu_type == args.f_arch)
+ break;
+ t_blob = t_blob->csb_next;
+ }
+ /* The cpu_type the user requested could not be found */
+ if (t_blob == NULL) {
+ error = ENOENT;
+ goto outdrop;
+ }
+ }
+
+ const CS_SuperBlob *super_blob = (void *)t_blob->csb_mem_kaddr;
+ const CS_CodeDirectory *cd = findCodeDirectory(super_blob,
+ (char *) super_blob,
+ (char *) super_blob + t_blob->csb_mem_size);
+ if (cd == NULL) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ uint64_t buffer_size = ntohl(cd->length);
+
+ if (buffer_size > UINT_MAX) {
+ error = ERANGE;
+ goto outdrop;
+ }
+
+ error = copyout(&buffer_size, args.f_out_size, sizeof(unsigned int));
+ if (error)
+ goto outdrop;
+
+ if (sizeof(t_blob->csb_sha1) > args.f_hash_size ||
+ buffer_size > args.f_cd_size) {
+ error = ERANGE;
+ goto outdrop;
+ }
+
+ error = copyout(t_blob->csb_sha1, args.f_cd_hash, sizeof(t_blob->csb_sha1));
+ if (error)
+ goto outdrop;
+ error = copyout(cd, args.f_cd_buffer, buffer_size);
+ if (error)
+ goto outdrop;
+
+ break;
+ }
+
+ /*
+ * Set the vnode pointed to by 'fd'
+ * and tag it as the (potentially future) backing store
+ * for another filesystem
+ */
+ case F_SETBACKINGSTORE: {
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+
+ if (vp->v_tag != VT_HFS) {
+ error = EINVAL;
+ goto out;
+ }
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* only proceed if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+
+ /* If arg != 0, set, otherwise unset */
+ if (uap->arg) {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)1, 0, &context);
+ }
+ else {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)NULL, 0, &context);
+ }
+
+ vnode_put(vp);
+ break;
+ }
+
+ /*
+ * like F_GETPATH, but special semantics for
+ * the mobile time machine handler.
+ */
+ case F_GETPATH_MTMINFO: {
+ char *pathbufp;
+ int pathlen;
+