+ /*
+ * Round the file offset down to a page-size boundary (or to 0).
+ * The filesystem will need to round the length up to the end of the page boundary
+ * or to the EOF of the file.
+ */
+ uint64_t foff = (((uint64_t)args.fsr_offset) & ~((uint64_t)PAGE_MASK));
+ uint64_t foff_delta = args.fsr_offset - foff;
+ args.fsr_offset = (off_t) foff;
+
+ /*
+ * Now add in the delta to the supplied length. Since we may have adjusted the
+ * offset, increase it by the amount that we adjusted.
+ */
+ args.fsr_length += foff_delta;
+
+ if ((error = vnode_getwithref(vp))) {
+ goto outdrop;
+ }
+ error = VNOP_IOCTL(vp, F_SPECULATIVE_READ, (caddr_t)&args, 0, &context);
+ (void)vnode_put(vp);
+
+ goto outdrop;
+ }
+ case F_SETSIZE:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = copyin(argp, (caddr_t)&offset, sizeof(off_t));
+ if (error) {
+ goto outdrop;
+ }
+ AUDIT_ARG(value64, offset);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ goto outdrop;
+ }
+
+#if CONFIG_MACF
+ error = mac_vnode_check_truncate(&context,
+ fp->fp_glob->fg_cred, vp);
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+#endif
+ /*
+ * Make sure that we are root. Growing a file
+ * without zero filling the data is a security hole.
+ */
+ if (!kauth_cred_issuser(kauth_cred_get())) {
+ error = EACCES;
+ } else {
+ /*
+ * Require privilege to change file size without zerofill,
+ * else will change the file size and zerofill it.
+ */
+ error = priv_check_cred(kauth_cred_get(), PRIV_VFS_SETSIZE, 0);
+ if (error == 0) {
+ error = vnode_setsize(vp, offset, IO_NOZEROFILL, &context);
+ } else {
+ error = vnode_setsize(vp, offset, 0, &context);
+ }
+
+#if CONFIG_MACF
+ if (error == 0) {
+ mac_vnode_notify_truncate(&context, fp->fp_glob->fg_cred, vp);
+ }
+#endif
+ }
+
+ (void)vnode_put(vp);
+ goto outdrop;
+
+ case F_RDAHEAD:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ if (uap->arg) {
+ os_atomic_andnot(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed);
+ } else {
+ os_atomic_or(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed);
+ }
+ goto out;
+
+ case F_NOCACHE:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ if (uap->arg) {
+ os_atomic_or(&fp->fp_glob->fg_flag, FNOCACHE, relaxed);
+ } else {
+ os_atomic_andnot(&fp->fp_glob->fg_flag, FNOCACHE, relaxed);
+ }
+ goto out;
+
+ case F_NODIRECT:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ if (uap->arg) {
+ os_atomic_or(&fp->fp_glob->fg_flag, FNODIRECT, relaxed);
+ } else {
+ os_atomic_andnot(&fp->fp_glob->fg_flag, FNODIRECT, relaxed);
+ }
+ goto out;
+
+ case F_SINGLE_WRITER:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ if (uap->arg) {
+ os_atomic_or(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed);
+ } else {
+ os_atomic_andnot(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed);
+ }
+ goto out;
+
+ case F_GLOBAL_NOCACHE:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((error = vnode_getwithref(vp)) == 0) {
+ *retval = vnode_isnocache(vp);
+
+ if (uap->arg) {
+ vnode_setnocache(vp);
+ } else {
+ vnode_clearnocache(vp);
+ }
+
+ (void)vnode_put(vp);
+ }
+ goto outdrop;
+
+ case F_CHECK_OPENEVT:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((error = vnode_getwithref(vp)) == 0) {
+ *retval = vnode_is_openevt(vp);
+
+ if (uap->arg) {
+ vnode_set_openevt(vp);
+ } else {
+ vnode_clear_openevt(vp);
+ }
+
+ (void)vnode_put(vp);
+ }
+ goto outdrop;
+
+ case F_RDADVISE: {
+ struct radvisory ra_struct;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((error = copyin(argp, (caddr_t)&ra_struct, sizeof(ra_struct)))) {
+ goto outdrop;
+ }
+ if (ra_struct.ra_offset < 0 || ra_struct.ra_count < 0) {
+ error = EINVAL;
+ goto outdrop;
+ }
+ if ((error = vnode_getwithref(vp)) == 0) {
+ error = VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, &context);
+
+ (void)vnode_put(vp);
+ }
+ goto outdrop;
+ }
+
+ case F_FLUSH_DATA:
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((error = vnode_getwithref(vp)) == 0) {
+ error = VNOP_FSYNC(vp, MNT_NOWAIT, &context);
+
+ (void)vnode_put(vp);
+ }
+ goto outdrop;
+
+ case F_LOG2PHYS:
+ case F_LOG2PHYS_EXT: {
+ struct log2phys l2p_struct = {}; /* structure for allocate command */
+ int devBlockSize;
+
+ off_t file_offset = 0;
+ size_t a_size = 0;
+ size_t run = 0;
+
+ if (uap->cmd == F_LOG2PHYS_EXT) {
+ error = copyin(argp, (caddr_t)&l2p_struct, sizeof(l2p_struct));
+ if (error) {
+ goto out;
+ }
+ file_offset = l2p_struct.l2p_devoffset;
+ } else {
+ file_offset = fp->f_offset;
+ }
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+ if ((error = vnode_getwithref(vp))) {
+ goto outdrop;
+ }
+ error = VNOP_OFFTOBLK(vp, file_offset, &lbn);
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+ error = VNOP_BLKTOOFF(vp, lbn, &offset);
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+ devBlockSize = vfs_devblocksize(vnode_mount(vp));
+ if (uap->cmd == F_LOG2PHYS_EXT) {
+ if (l2p_struct.l2p_contigbytes < 0) {
+ vnode_put(vp);
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ a_size = (size_t)MIN((uint64_t)l2p_struct.l2p_contigbytes, SIZE_MAX);
+ } else {
+ a_size = devBlockSize;
+ }
+
+ error = VNOP_BLOCKMAP(vp, offset, a_size, &bn, &run, NULL, 0, &context);
+
+ (void)vnode_put(vp);
+
+ if (!error) {
+ l2p_struct.l2p_flags = 0; /* for now */
+ if (uap->cmd == F_LOG2PHYS_EXT) {
+ l2p_struct.l2p_contigbytes = run - (file_offset - offset);
+ } else {
+ l2p_struct.l2p_contigbytes = 0; /* for now */
+ }
+
+ /*
+ * The block number being -1 suggests that the file offset is not backed
+ * by any real blocks on-disk. As a result, just let it be passed back up wholesale.
+ */
+ if (bn == -1) {
+ /* Don't multiply it by the block size */
+ l2p_struct.l2p_devoffset = bn;
+ } else {
+ l2p_struct.l2p_devoffset = bn * devBlockSize;
+ l2p_struct.l2p_devoffset += file_offset - offset;
+ }
+ error = copyout((caddr_t)&l2p_struct, argp, sizeof(l2p_struct));
+ }
+ goto outdrop;
+ }
+ case F_GETPATH:
+ case F_GETPATH_NOFIRMLINK: {
+ char *pathbufp;
+ int pathlen;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ pathlen = MAXPATHLEN;
+ MALLOC(pathbufp, char *, pathlen, M_TEMP, M_WAITOK);
+ if (pathbufp == NULL) {
+ error = ENOMEM;
+ goto outdrop;
+ }
+ if ((error = vnode_getwithref(vp)) == 0) {
+ if (uap->cmd == F_GETPATH_NOFIRMLINK) {
+ error = vn_getpath_ext(vp, NULL, pathbufp, &pathlen, VN_GETPATH_NO_FIRMLINK);
+ } else {
+ error = vn_getpath(vp, pathbufp, &pathlen);
+ }
+ (void)vnode_put(vp);
+
+ if (error == 0) {
+ error = copyout((caddr_t)pathbufp, argp, pathlen);
+ }
+ }
+ FREE(pathbufp, M_TEMP);
+ goto outdrop;
+ }
+
+ case F_PATHPKG_CHECK: {
+ char *pathbufp;
+ size_t pathlen;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ pathlen = MAXPATHLEN;
+ pathbufp = zalloc(ZV_NAMEI);
+
+ if ((error = copyinstr(argp, pathbufp, MAXPATHLEN, &pathlen)) == 0) {
+ if ((error = vnode_getwithref(vp)) == 0) {
+ AUDIT_ARG(text, pathbufp);
+ error = vn_path_package_check(vp, pathbufp, (int)pathlen, retval);
+
+ (void)vnode_put(vp);
+ }
+ }
+ zfree(ZV_NAMEI, pathbufp);
+ goto outdrop;
+ }
+
+ case F_CHKCLEAN: // used by regression tests to see if all dirty pages got cleaned by fsync()
+ case F_FULLFSYNC: // fsync + flush the journal + DKIOCSYNCHRONIZE
+ case F_BARRIERFSYNC: // fsync + barrier
+ case F_FREEZE_FS: // freeze all other fs operations for the fs of this fd
+ case F_THAW_FS: { // thaw all frozen fs operations for the fs of this fd
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((error = vnode_getwithref(vp)) == 0) {
+ error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)NULL, 0, &context);
+
+ (void)vnode_put(vp);
+ }
+ break;
+ }
+
+ /*
+ * SPI (private) for opening a file starting from a dir fd
+ */
+ case F_OPENFROM: {
+ struct user_fopenfrom fopen;
+ struct vnode_attr va;
+ struct nameidata nd;
+ int cmode;
+
+ /* Check if this isn't a valid file descriptor */
+ if ((fp->f_type != DTYPE_VNODE) ||
+ (fp->f_flag & FREAD) == 0) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only valid for directories */
+ if (vp->v_type != VDIR) {
+ vnode_put(vp);
+ error = ENOTDIR;
+ goto outdrop;
+ }
+
+ /*
+ * Only entitled apps may use the credentials of the thread
+ * that opened the file descriptor.
+ * Non-entitled threads will use their own context.
+ */
+ if (IOTaskHasEntitlement(current_task(), ACCOUNT_OPENFROM_ENTITLEMENT)) {
+ has_entitlement = 1;
+ }
+
+ /* Get flags, mode and pathname arguments. */
+ if (IS_64BIT_PROCESS(p)) {
+ error = copyin(argp, &fopen, sizeof(fopen));
+ } else {
+ struct user32_fopenfrom fopen32;
+
+ error = copyin(argp, &fopen32, sizeof(fopen32));
+ fopen.o_flags = fopen32.o_flags;
+ fopen.o_mode = fopen32.o_mode;
+ fopen.o_pathname = CAST_USER_ADDR_T(fopen32.o_pathname);
+ }
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+ AUDIT_ARG(fflags, fopen.o_flags);
+ AUDIT_ARG(mode, fopen.o_mode);
+ VATTR_INIT(&va);
+ /* Mask off all but regular access permissions */
+ cmode = ((fopen.o_mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
+ VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
+
+ /* Start the lookup relative to the file descriptor's vnode. */
+ NDINIT(&nd, LOOKUP, OP_OPEN, USEDVP | FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
+ fopen.o_pathname, has_entitlement ? &context : vfs_context_current());
+ nd.ni_dvp = vp;
+
+ error = open1(has_entitlement ? &context : vfs_context_current(),
+ &nd, fopen.o_flags, &va, fileproc_alloc_init, NULL, retval);
+
+ vnode_put(vp);
+ break;
+ }
+ /*
+ * SPI (private) for unlinking a file starting from a dir fd
+ */
+ case F_UNLINKFROM: {
+ user_addr_t pathname;
+
+ /* Check if this isn't a valid file descriptor */
+ if ((fp->f_type != DTYPE_VNODE) ||
+ (fp->f_flag & FREAD) == 0) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only valid for directories */
+ if (vp->v_type != VDIR) {
+ vnode_put(vp);
+ error = ENOTDIR;
+ goto outdrop;
+ }
+
+ /*
+ * Only entitled apps may use the credentials of the thread
+ * that opened the file descriptor.
+ * Non-entitled threads will use their own context.
+ */
+ if (IOTaskHasEntitlement(current_task(), ACCOUNT_OPENFROM_ENTITLEMENT)) {
+ has_entitlement = 1;
+ }
+
+ /* Get flags, mode and pathname arguments. */
+ if (IS_64BIT_PROCESS(p)) {
+ pathname = (user_addr_t)argp;
+ } else {
+ pathname = CAST_USER_ADDR_T(argp);
+ }
+
+ /* Start the lookup relative to the file descriptor's vnode. */
+ error = unlink1(has_entitlement ? &context : vfs_context_current(),
+ vp, pathname, UIO_USERSPACE, 0);
+
+ vnode_put(vp);
+ break;
+ }
+
+ case F_ADDSIGS:
+ case F_ADDFILESIGS:
+ case F_ADDFILESIGS_FOR_DYLD_SIM:
+ case F_ADDFILESIGS_RETURN:
+ case F_ADDFILESIGS_INFO:
+ {
+ struct cs_blob *blob = NULL;
+ struct user_fsignatures fs;
+ kern_return_t kr;
+ vm_offset_t kernel_blob_addr;
+ vm_size_t kernel_blob_size;
+ int blob_add_flags = 0;
+ const size_t sizeof_fs = (uap->cmd == F_ADDFILESIGS_INFO ?
+ offsetof(struct user_fsignatures, fs_cdhash /* first output element */) :
+ offsetof(struct user_fsignatures, fs_fsignatures_size /* compat */));
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM) {
+ blob_add_flags |= MAC_VNODE_CHECK_DYLD_SIM;
+ if ((p->p_csflags & CS_KILL) == 0) {
+ proc_lock(p);
+ p->p_csflags |= CS_KILL;
+ proc_unlock(p);
+ }
+ }
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ goto outdrop;
+ }
+
+ if (IS_64BIT_PROCESS(p)) {
+ error = copyin(argp, &fs, sizeof_fs);
+ } else {
+ if (uap->cmd == F_ADDFILESIGS_INFO) {
+ error = EINVAL;
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ struct user32_fsignatures fs32;
+
+ error = copyin(argp, &fs32, sizeof(fs32));
+ fs.fs_file_start = fs32.fs_file_start;
+ fs.fs_blob_start = CAST_USER_ADDR_T(fs32.fs_blob_start);
+ fs.fs_blob_size = fs32.fs_blob_size;
+ }
+
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ /*
+ * First check if we have something loaded a this offset
+ */
+ blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, fs.fs_file_start);
+ if (blob != NULL) {
+ /* If this is for dyld_sim revalidate the blob */
+ if (uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM) {
+ error = ubc_cs_blob_revalidate(vp, blob, NULL, blob_add_flags, proc_platform(p));
+ if (error) {
+ blob = NULL;
+ if (error != EAGAIN) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+ }
+ }
+ }
+
+ if (blob == NULL) {
+ /*
+ * An arbitrary limit, to prevent someone from mapping in a 20GB blob. This should cover
+ * our use cases for the immediate future, but note that at the time of this commit, some
+ * platforms are nearing 2MB blob sizes (with a prior soft limit of 2.5MB).
+ *
+ * We should consider how we can manage this more effectively; the above means that some
+ * platforms are using megabytes of memory for signing data; it merely hasn't crossed the
+ * threshold considered ridiculous at the time of this change.
+ */
+#define CS_MAX_BLOB_SIZE (40ULL * 1024ULL * 1024ULL)
+ if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
+ error = E2BIG;
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
+ kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
+ if (kr != KERN_SUCCESS || kernel_blob_size < fs.fs_blob_size) {
+ error = ENOMEM;
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ if (uap->cmd == F_ADDSIGS) {
+ error = copyin(fs.fs_blob_start,
+ (void *) kernel_blob_addr,
+ fs.fs_blob_size);
+ } else { /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM || F_ADDFILESIGS_INFO */
+ int resid;
+
+ error = vn_rdwr(UIO_READ,
+ vp,
+ (caddr_t) kernel_blob_addr,
+ (int)kernel_blob_size,
+ fs.fs_file_start + fs.fs_blob_start,
+ UIO_SYSSPACE,
+ 0,
+ kauth_cred_get(),
+ &resid,
+ p);
+ if ((error == 0) && resid) {
+ /* kernel_blob_size rounded to a page size, but signature may be at end of file */
+ memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid);
+ }
+ }
+
+ if (error) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ blob = NULL;
+ error = ubc_cs_blob_add(vp,
+ proc_platform(p),
+ CPU_TYPE_ANY, /* not for a specific architecture */
+ CPU_SUBTYPE_ANY,
+ fs.fs_file_start,
+ &kernel_blob_addr,
+ kernel_blob_size,
+ NULL,
+ blob_add_flags,
+ &blob);
+
+ /* ubc_blob_add() has consumed "kernel_blob_addr" if it is zeroed */
+ if (error) {
+ if (kernel_blob_addr) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ }
+ vnode_put(vp);
+ goto outdrop;
+ } else {
+#if CHECK_CS_VALIDATION_BITMAP
+ ubc_cs_validation_bitmap_allocate( vp );
+#endif
+ }
+ }
+
+ if (uap->cmd == F_ADDFILESIGS_RETURN || uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM ||
+ uap->cmd == F_ADDFILESIGS_INFO) {
+ /*
+ * The first element of the structure is a
+ * off_t that happen to have the same size for
+ * all archs. Lets overwrite that.
+ */
+ off_t end_offset = 0;
+ if (blob) {
+ end_offset = blob->csb_end_offset;
+ }
+ error = copyout(&end_offset, argp, sizeof(end_offset));
+
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+ }
+
+ if (uap->cmd == F_ADDFILESIGS_INFO) {
+ /* Return information. What we copy out depends on the size of the
+ * passed in structure, to keep binary compatibility. */
+
+ if (fs.fs_fsignatures_size >= sizeof(struct user_fsignatures)) {
+ // enough room for fs_cdhash[20]+fs_hash_type
+
+ if (blob != NULL) {
+ error = copyout(blob->csb_cdhash,
+ (vm_address_t)argp + offsetof(struct user_fsignatures, fs_cdhash),
+ USER_FSIGNATURES_CDHASH_LEN);
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+ int hashtype = cs_hash_type(blob->csb_hashtype);
+ error = copyout(&hashtype,
+ (vm_address_t)argp + offsetof(struct user_fsignatures, fs_hash_type),
+ sizeof(int));
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+ }
+ }
+ }
+
+ (void) vnode_put(vp);
+ break;
+ }
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+ case F_ADDFILESUPPL:
+ {
+ struct vnode *ivp;
+ struct cs_blob *blob = NULL;
+ struct user_fsupplement fs;
+ int orig_fd;
+ struct fileproc* orig_fp = NULL;
+ kern_return_t kr;
+ vm_offset_t kernel_blob_addr;
+ vm_size_t kernel_blob_size;
+
+ if (!IS_64BIT_PROCESS(p)) {
+ error = EINVAL;
+ goto out; // drop fp and unlock fds
+ }
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ error = copyin(argp, &fs, sizeof(fs));
+ if (error) {
+ goto out;
+ }
+
+ orig_fd = fs.fs_orig_fd;
+ if ((error = fp_lookup(p, orig_fd, &orig_fp, 1))) {
+ printf("CODE SIGNING: Failed to find original file for supplemental signature attachment\n");
+ goto out;
+ }
+
+ if (orig_fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ fp_drop(p, orig_fd, orig_fp, 1);
+ goto out;
+ }
+
+ ivp = (struct vnode *)orig_fp->f_data;
+
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(ivp);
+ if (error) {
+ fp_drop(p, orig_fd, orig_fp, 0);
+ goto outdrop; //drop fp
+ }
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ vnode_put(ivp);
+ fp_drop(p, orig_fd, orig_fp, 0);
+ goto outdrop;
+ }
+
+ if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
+ error = E2BIG;
+ goto dropboth; // drop iocounts on vp and ivp, drop orig_fp then drop fp via outdrop
+ }
+
+ kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
+ kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
+ if (kr != KERN_SUCCESS) {
+ error = ENOMEM;
+ goto dropboth;
+ }
+
+ int resid;
+ error = vn_rdwr(UIO_READ, vp,
+ (caddr_t)kernel_blob_addr, (int)kernel_blob_size,
+ fs.fs_file_start + fs.fs_blob_start,
+ UIO_SYSSPACE, 0,
+ kauth_cred_get(), &resid, p);
+ if ((error == 0) && resid) {
+ /* kernel_blob_size rounded to a page size, but signature may be at end of file */
+ memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid);
+ }
+
+ if (error) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ goto dropboth;
+ }
+
+ error = ubc_cs_blob_add_supplement(vp, ivp, fs.fs_file_start,
+ &kernel_blob_addr, kernel_blob_size, &blob);
+
+ /* ubc_blob_add_supplement() has consumed kernel_blob_addr if it is zeroed */
+ if (error) {
+ if (kernel_blob_addr) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ }
+ goto dropboth;
+ }
+ vnode_put(ivp);
+ vnode_put(vp);
+ fp_drop(p, orig_fd, orig_fp, 0);
+ break;
+
+dropboth:
+ vnode_put(ivp);
+ vnode_put(vp);
+ fp_drop(p, orig_fd, orig_fp, 0);
+ goto outdrop;
+ }
+#endif
+ case F_GETCODEDIR:
+ case F_FINDSIGS: {
+ error = ENOTSUP;
+ goto out;
+ }
+ case F_CHECK_LV: {
+ struct fileglob *fg;
+ fchecklv_t lv = {};
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ fg = fp->fp_glob;
+ proc_fdunlock(p);
+
+ if (IS_64BIT_PROCESS(p)) {
+ error = copyin(argp, &lv, sizeof(lv));
+ } else {
+ struct user32_fchecklv lv32 = {};
+
+ error = copyin(argp, &lv32, sizeof(lv32));
+ lv.lv_file_start = lv32.lv_file_start;
+ lv.lv_error_message = (void *)(uintptr_t)lv32.lv_error_message;
+ lv.lv_error_message_size = lv32.lv_error_message_size;
+ }
+ if (error) {
+ goto outdrop;
+ }
+
+#if CONFIG_MACF
+ error = mac_file_check_library_validation(p, fg, lv.lv_file_start,
+ (user_long_t)lv.lv_error_message, lv.lv_error_message_size);
+#endif
+
+ break;
+ }
+ case F_GETSIGSINFO: {
+ struct cs_blob *blob = NULL;
+ fgetsigsinfo_t sigsinfo = {};
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ goto outdrop;
+ }
+
+ error = copyin(argp, &sigsinfo, sizeof(sigsinfo));
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, sigsinfo.fg_file_start);
+ if (blob == NULL) {
+ error = ENOENT;
+ vnode_put(vp);
+ goto outdrop;
+ }
+ switch (sigsinfo.fg_info_request) {
+ case GETSIGSINFO_PLATFORM_BINARY:
+ sigsinfo.fg_sig_is_platform = blob->csb_platform_binary;
+ error = copyout(&sigsinfo.fg_sig_is_platform,
+ (vm_address_t)argp + offsetof(struct fgetsigsinfo, fg_sig_is_platform),
+ sizeof(sigsinfo.fg_sig_is_platform));
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+ break;
+ default:
+ error = EINVAL;
+ vnode_put(vp);
+ goto outdrop;
+ }
+ vnode_put(vp);
+ break;
+ }
+#if CONFIG_PROTECT
+ case F_GETPROTECTIONCLASS: {
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ struct vnode_attr va;
+
+ VATTR_INIT(&va);
+ VATTR_WANTED(&va, va_dataprotect_class);
+ error = VNOP_GETATTR(vp, &va, &context);
+ if (!error) {
+ if (VATTR_IS_SUPPORTED(&va, va_dataprotect_class)) {
+ *retval = va.va_dataprotect_class;
+ } else {
+ error = ENOTSUP;
+ }
+ }
+
+ vnode_put(vp);
+ break;
+ }
+
+ case F_SETPROTECTIONCLASS: {
+ /* tmp must be a valid PROTECTION_CLASS_* */
+ tmp = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+