+ error = unlink1(&context, &nd, 0);
+
+ vnode_put(vp);
+ break;
+
+ }
+
+ case F_ADDSIGS:
+ case F_ADDFILESIGS:
+ {
+ struct user_fsignatures fs;
+ kern_return_t kr;
+ vm_offset_t kernel_blob_addr;
+ vm_size_t kernel_blob_size;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+ error = vnode_getwithref(vp);
+ if (error)
+ goto outdrop;
+
+ if (IS_64BIT_PROCESS(p)) {
+ error = copyin(argp, &fs, sizeof (fs));
+ } else {
+ struct user32_fsignatures fs32;
+
+ error = copyin(argp, &fs32, sizeof (fs32));
+ fs.fs_file_start = fs32.fs_file_start;
+ fs.fs_blob_start = CAST_USER_ADDR_T(fs32.fs_blob_start);
+ fs.fs_blob_size = fs32.fs_blob_size;
+ }
+
+ if (error) {
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ if(ubc_cs_blob_get(vp, CPU_TYPE_ANY, fs.fs_file_start))
+ {
+ vnode_put(vp);
+ goto outdrop;
+ }
+/*
+ * An arbitrary limit, to prevent someone from mapping in a 20GB blob. This should cover
+ * our use cases for the immediate future, but note that at the time of this commit, some
+ * platforms are nearing 2MB blob sizes (with a prior soft limit of 2.5MB).
+ *
+ * We should consider how we can manage this more effectively; the above means that some
+ * platforms are using megabytes of memory for signing data; it merely hasn't crossed the
+ * threshold considered ridiculous at the time of this change.
+ */
+#define CS_MAX_BLOB_SIZE (10ULL * 1024ULL * 1024ULL)
+ if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
+ error = E2BIG;
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
+ kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
+ if (kr != KERN_SUCCESS) {
+ error = ENOMEM;
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ if(uap->cmd == F_ADDSIGS) {
+ error = copyin(fs.fs_blob_start,
+ (void *) kernel_blob_addr,
+ kernel_blob_size);
+ } else /* F_ADDFILESIGS */ {
+ error = vn_rdwr(UIO_READ,
+ vp,
+ (caddr_t) kernel_blob_addr,
+ kernel_blob_size,
+ fs.fs_file_start + fs.fs_blob_start,
+ UIO_SYSSPACE,
+ 0,
+ kauth_cred_get(),
+ 0,
+ p);
+ }
+
+ if (error) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ error = ubc_cs_blob_add(
+ vp,
+ CPU_TYPE_ANY, /* not for a specific architecture */
+ fs.fs_file_start,
+ kernel_blob_addr,
+ kernel_blob_size);
+ if (error) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ } else {
+ /* ubc_blob_add() has consumed "kernel_blob_addr" */
+#if CHECK_CS_VALIDATION_BITMAP
+ ubc_cs_validation_bitmap_allocate( vp );
+#endif
+ }
+
+ (void) vnode_put(vp);
+ break;
+ }
+ case F_FINDSIGS: {
+#ifdef SECURE_KERNEL
+ error = ENOTSUP;
+#else /* !SECURE_KERNEL */
+ off_t offsetMacho;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+ error = vnode_getwithref(vp);
+ if (error)
+ goto outdrop;
+
+ error = copyin(argp, &offsetMacho, sizeof(offsetMacho));
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+
+#if CONFIG_MACF
+ error = mac_vnode_find_sigs(p, vp, offsetMacho);
+#else
+ error = EPERM;
+#endif
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+#endif /* SECURE_KERNEL */
+ break;
+ }
+#if CONFIG_PROTECT
+ case F_GETPROTECTIONCLASS: {
+ int class = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ error = cp_vnode_getclass (vp, &class);
+ if (error == 0) {
+ *retval = class;
+ }
+
+ vnode_put(vp);
+ break;
+ }
+
+ case F_SETPROTECTIONCLASS: {
+ /* tmp must be a valid PROTECTION_CLASS_* */
+ tmp = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+ error = cp_vnode_setclass (vp, tmp);
+ vnode_put(vp);
+ break;
+ }
+
+ case F_TRANSCODEKEY: {
+
+ char *backup_keyp = NULL;
+ unsigned backup_key_len = CP_MAX_WRAPPEDKEYSIZE;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ MALLOC(backup_keyp, char *, backup_key_len, M_TEMP, M_WAITOK);
+ if (backup_keyp == NULL) {
+ error = ENOMEM;
+ goto outdrop;
+ }
+
+ error = cp_vnode_transcode (vp, backup_keyp, &backup_key_len);
+ vnode_put(vp);
+
+ if (error == 0) {
+ error = copyout((caddr_t)backup_keyp, argp, backup_key_len);
+ *retval = backup_key_len;
+ }
+
+ FREE(backup_keyp, M_TEMP);
+
+ break;
+ }
+
+ case F_GETPROTECTIONLEVEL: {
+ uint32_t cp_version = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_root_major_vers (vp, &cp_version);
+ *retval = cp_version;
+
+ vnode_put (vp);
+ break;
+ }
+
+ case F_GETDEFAULTPROTLEVEL: {
+ uint32_t cp_default = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_default_level(vp, &cp_default);
+ *retval = cp_default;
+
+ vnode_put (vp);
+ break;
+ }
+
+
+#endif /* CONFIG_PROTECT */
+
+ case F_MOVEDATAEXTENTS: {
+ struct fileproc *fp2 = NULL;
+ struct vnode *src_vp = NULLVP;
+ struct vnode *dst_vp = NULLVP;
+ /* We need to grab the 2nd FD out of the argments before moving on. */
+ int fd2 = CAST_DOWN_EXPLICIT(int32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ /* For now, special case HFS+ only, since this is SPI. */
+ src_vp = (struct vnode *)fp->f_data;
+ if (src_vp->v_tag != VT_HFS) {
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Get the references before we start acquiring iocounts on the vnodes,
+ * while we still hold the proc fd lock
+ */
+ if ( (error = fp_lookup(p, fd2, &fp2, 1)) ) {
+ error = EBADF;
+ goto out;
+ }
+ if (fp2->f_type != DTYPE_VNODE) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EBADF;
+ goto out;
+ }
+ dst_vp = (struct vnode *)fp2->f_data;
+ if (dst_vp->v_tag != VT_HFS) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EINVAL;
+ goto out;
+ }
+
+#if CONFIG_MACF
+ /* Re-do MAC checks against the new FD, pass in a fake argument */
+ error = mac_file_check_fcntl(proc_ucred(p), fp2->f_fglob, uap->cmd, 0);
+ if (error) {
+ fp_drop(p, fd2, fp2, 1);
+ goto out;
+ }
+#endif
+ /* Audit the 2nd FD */
+ AUDIT_ARG(fd, fd2);
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(src_vp)) {
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+ if (vnode_getwithref(dst_vp)) {
+ vnode_put (src_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * Basic asserts; validate they are not the same and that
+ * both live on the same filesystem.
+ */
+ if (dst_vp == src_vp) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ if (dst_vp->v_mount != src_vp->v_mount) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EXDEV;
+ goto outdrop;
+ }
+
+ /* Now we have a legit pair of FDs. Go to work */
+
+ /* Now check for write access to the target files */
+ if(vnode_authorize(src_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ if(vnode_authorize(dst_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* Verify that both vps point to files and not directories */
+ if ( !vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
+ error = EINVAL;
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ goto outdrop;
+ }
+
+ /*
+ * The exchangedata syscall handler passes in 0 for the flags to VNOP_EXCHANGE.
+ * We'll pass in our special bit indicating that the new behavior is expected
+ */
+
+ error = VNOP_EXCHANGE(src_vp, dst_vp, FSOPT_EXCHANGE_DATA_ONLY, &context);
+
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ break;
+ }
+
+ /*
+ * SPI for making a file compressed.
+ */
+ case F_MAKECOMPRESSED: {
+ uint32_t gcounter = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ /* get the vnode */
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Is it a file? */
+ if ((vnode_isreg(vp) == 0) && (vnode_islnk(vp) == 0)) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* invoke ioctl to pass off to FS */
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)&gcounter, 0, &context);
+
+ vnode_put (vp);
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will written to the Fastflow.
+ */
+ case F_SET_GREEDY_MODE:
+ /* intentionally drop through to the same handler as F_SETSTATIC.
+ * both fcntls should pass the argument and their selector into VNOP_IOCTL.
+ */
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will represent static content.
+ */
+ case F_SETSTATICCONTENT: {
+ caddr_t ioctl_arg = NULL;
+
+ if (uap->arg) {
+ ioctl_arg = (caddr_t) 1;
+ }
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, ioctl_arg, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to the lower level storage driver that the
+ * subsequent writes should be of a particular IO type (burst, greedy, static),
+ * or other flavors that may be necessary.
+ */
+ case F_SETIOTYPE: {
+ caddr_t param_ptr;
+ uint32_t param;
+
+ if (uap->arg) {
+ /* extract 32 bits of flags from userland */
+ param_ptr = (caddr_t) uap->arg;
+ param = (uint32_t) param_ptr;
+ }
+ else {
+ /* If no argument is specified, error out */
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Validate the different types of flags that can be specified:
+ * all of them are mutually exclusive for now.
+ */
+ switch (param) {
+ case F_IOTYPE_ISOCHRONOUS:
+ break;
+
+ default:
+ error = EINVAL;
+ goto out;
+ }
+
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, param_ptr, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+
+ /*
+ * Extract the CodeDirectory of the vnode associated with
+ * the file descriptor and copy it back to user space
+ */
+ case F_GETCODEDIR: {
+ struct user_fcodeblobs args;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((fp->f_flag & FREAD) == 0) {
+ error = EBADF;
+ goto outdrop;
+ }
+
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_fcodeblobs args64;
+
+ error = copyin(argp, &args64, sizeof(args64));
+ if (error)
+ goto outdrop;
+
+ args.f_cd_hash = args64.f_cd_hash;
+ args.f_hash_size = args64.f_hash_size;
+ args.f_cd_buffer = args64.f_cd_buffer;
+ args.f_cd_size = args64.f_cd_size;
+ args.f_out_size = args64.f_out_size;
+ args.f_arch = args64.f_arch;
+ } else {
+ struct user32_fcodeblobs args32;
+
+ error = copyin(argp, &args32, sizeof(args32));
+ if (error)
+ goto outdrop;
+
+ args.f_cd_hash = CAST_USER_ADDR_T(args32.f_cd_hash);
+ args.f_hash_size = args32.f_hash_size;
+ args.f_cd_buffer = CAST_USER_ADDR_T(args32.f_cd_buffer);
+ args.f_cd_size = args32.f_cd_size;
+ args.f_out_size = CAST_USER_ADDR_T(args32.f_out_size);
+ args.f_arch = args32.f_arch;
+ }
+
+ if (vp->v_ubcinfo == NULL) {
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ struct cs_blob *t_blob = vp->v_ubcinfo->cs_blobs;
+
+ /*
+ * This call fails if there is no cs_blob corresponding to the
+ * vnode, or if there are multiple cs_blobs present, and the caller
+ * did not specify which cpu_type they want the cs_blob for
+ */
+ if (t_blob == NULL) {
+ error = ENOENT; /* there is no codesigning blob for this process */
+ goto outdrop;
+ } else if (args.f_arch == 0 && t_blob->csb_next != NULL) {
+ error = ENOENT; /* too many architectures and none specified */
+ goto outdrop;
+ }
+
+ /* If the user specified an architecture, find the right blob */
+ if (args.f_arch != 0) {
+ while (t_blob) {
+ if (t_blob->csb_cpu_type == args.f_arch)
+ break;
+ t_blob = t_blob->csb_next;
+ }
+ /* The cpu_type the user requested could not be found */
+ if (t_blob == NULL) {
+ error = ENOENT;
+ goto outdrop;
+ }
+ }
+
+ const CS_SuperBlob *super_blob = (void *)t_blob->csb_mem_kaddr;
+ const CS_CodeDirectory *cd = findCodeDirectory(super_blob,
+ (char *) super_blob,
+ (char *) super_blob + t_blob->csb_mem_size);
+ if (cd == NULL) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ uint64_t buffer_size = ntohl(cd->length);
+
+ if (buffer_size > UINT_MAX) {
+ error = ERANGE;
+ goto outdrop;
+ }
+
+ error = copyout(&buffer_size, args.f_out_size, sizeof(unsigned int));
+ if (error)
+ goto outdrop;
+
+ if (sizeof(t_blob->csb_sha1) > args.f_hash_size ||
+ buffer_size > args.f_cd_size) {
+ error = ERANGE;
+ goto outdrop;
+ }
+
+ error = copyout(t_blob->csb_sha1, args.f_cd_hash, sizeof(t_blob->csb_sha1));
+ if (error)
+ goto outdrop;
+ error = copyout(cd, args.f_cd_buffer, buffer_size);
+ if (error)
+ goto outdrop;
+
+ break;
+ }
+
+ /*
+ * Set the vnode pointed to by 'fd'
+ * and tag it as the (potentially future) backing store
+ * for another filesystem
+ */
+ case F_SETBACKINGSTORE: {
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+
+ if (vp->v_tag != VT_HFS) {
+ error = EINVAL;
+ goto out;
+ }
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* only proceed if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+
+ /* If arg != 0, set, otherwise unset */
+ if (uap->arg) {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)1, 0, &context);
+ }
+ else {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)NULL, 0, &context);
+ }
+
+ vnode_put(vp);
+ break;
+ }
+
+ /*
+ * like F_GETPATH, but special semantics for
+ * the mobile time machine handler.
+ */
+ case F_GETPATH_MTMINFO: {
+ char *pathbufp;
+ int pathlen;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ pathlen = MAXPATHLEN;
+ MALLOC(pathbufp, char *, pathlen, M_TEMP, M_WAITOK);
+ if (pathbufp == NULL) {
+ error = ENOMEM;
+ goto outdrop;
+ }
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+ int backingstore = 0;
+
+ /* Check for error from vn_getpath before moving on */
+ if ((error = vn_getpath(vp, pathbufp, &pathlen)) == 0) {
+ if (vp->v_tag == VT_HFS) {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t) &backingstore, 0, &context);
+ }
+ (void)vnode_put(vp);
+
+ if (error == 0) {
+ error = copyout((caddr_t)pathbufp, argp, pathlen);
+ }
+ if (error == 0) {
+ /*
+ * If the copyout was successful, now check to ensure
+ * that this vnode is not a BACKINGSTORE vnode. mtmd
+ * wants the path regardless.
+ */
+ if (backingstore) {
+ error = EBUSY;
+ }
+ }
+ } else
+ (void)vnode_put(vp);
+ }
+ FREE(pathbufp, M_TEMP);
+ goto outdrop;
+ }
+
+ default:
+ /*
+ * This is an fcntl() that we d not recognize at this level;
+ * if this is a vnode, we send it down into the VNOP_IOCTL
+ * for this vnode; this can include special devices, and will
+ * effectively overload fcntl() to send ioctl()'s.
+ */
+ if((uap->cmd & IOC_VOID) && (uap->cmd & IOC_INOUT)){
+ error = EINVAL;
+ goto out;
+ }
+
+ /* Catch any now-invalid fcntl() selectors */
+ switch (uap->cmd) {
+ case F_MARKDEPENDENCY:
+ error = EINVAL;
+ goto out;
+ default:
+ break;
+ }
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+#define STK_PARAMS 128
+ char stkbuf[STK_PARAMS];
+ unsigned int size;
+ caddr_t data, memp;
+ /*
+ * For this to work properly, we have to copy in the
+ * ioctl() cmd argument if there is one; we must also
+ * check that a command parameter, if present, does
+ * not exceed the maximum command length dictated by
+ * the number of bits we have available in the command
+ * to represent a structure length. Finally, we have
+ * to copy the results back out, if it is that type of
+ * ioctl().
+ */
+ size = IOCPARM_LEN(uap->cmd);
+ if (size > IOCPARM_MAX) {
+ (void)vnode_put(vp);
+ error = EINVAL;
+ break;
+ }
+
+ memp = NULL;
+ if (size > sizeof (stkbuf)) {
+ if ((memp = (caddr_t)kalloc(size)) == 0) {
+ (void)vnode_put(vp);
+ error = ENOMEM;
+ goto outdrop;
+ }
+ data = memp;
+ } else {
+ data = &stkbuf[0];
+ }
+
+ if (uap->cmd & IOC_IN) {
+ if (size) {
+ /* structure */
+ error = copyin(argp, data, size);
+ if (error) {
+ (void)vnode_put(vp);
+ if (memp)
+ kfree(memp, size);
+ goto outdrop;
+ }
+
+ /* Bzero the section beyond that which was needed */
+ if (size <= sizeof(stkbuf)) {
+ bzero ( (((uint8_t*)data) + size), (sizeof(stkbuf) - size));
+ }
+ } else {
+ /* int */
+ if (is64bit) {
+ *(user_addr_t *)data = argp;
+ } else {
+ *(uint32_t *)data = (uint32_t)argp;
+ }
+ };
+ } else if ((uap->cmd & IOC_OUT) && size) {
+ /*
+ * Zero the buffer so the user always
+ * gets back something deterministic.
+ */
+ bzero(data, size);
+ } else if (uap->cmd & IOC_VOID) {
+ if (is64bit) {
+ *(user_addr_t *)data = argp;
+ } else {
+ *(uint32_t *)data = (uint32_t)argp;
+ }
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, CAST_DOWN(caddr_t, data), 0, &context);
+
+ (void)vnode_put(vp);
+
+ /* Copy any output data to user */
+ if (error == 0 && (uap->cmd & IOC_OUT) && size)
+ error = copyout(data, argp, size);
+ if (memp)
+ kfree(memp, size);
+ }
+ break;
+ }
+
+outdrop:
+ AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1);
+ fp_drop(p, fd, fp, 0);
+ return(error);
+out:
+ fp_drop(p, fd, fp, 1);
+ proc_fdunlock(p);
+ return(error);
+}
+
+
+/*
+ * finishdup
+ *
+ * Description: Common code for dup, dup2, and fcntl(F_DUPFD).
+ *
+ * Parameters: p Process performing the dup
+ * old The fd to dup
+ * new The fd to dup it to
+ * fd_flags Flags to augment the new fd
+ * retval Pointer to the call return area
+ *
+ * Returns: 0 Success
+ * EBADF
+ * ENOMEM
+ *
+ * Implicit returns:
+ * *retval (modified) The new descriptor
+ *
+ * Locks: Assumes proc_fdlock for process pointing to fdp is held by
+ * the caller
+ *
+ * Notes: This function may drop and reacquire this lock; it is unsafe
+ * for a caller to assume that other state protected by the lock
+ * has not been subsequently changed out from under it.
+ */
+int
+finishdup(proc_t p,
+ struct filedesc *fdp, int old, int new, int fd_flags, int32_t *retval)
+{
+ struct fileproc *nfp;
+ struct fileproc *ofp;
+#if CONFIG_MACF
+ int error;
+#endif
+
+#if DIAGNOSTIC
+ proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
+#endif
+ if ((ofp = fdp->fd_ofiles[old]) == NULL ||
+ (fdp->fd_ofileflags[old] & UF_RESERVED)) {
+ fdrelse(p, new);
+ return (EBADF);
+ }
+ fg_ref(ofp);
+
+#if CONFIG_MACF
+ error = mac_file_check_dup(proc_ucred(p), ofp->f_fglob, new);
+ if (error) {
+ fg_drop(ofp);
+ fdrelse(p, new);
+ return (error);
+ }
+#endif
+
+ proc_fdunlock(p);
+
+ nfp = fileproc_alloc_init(NULL);
+
+ proc_fdlock(p);
+
+ if (nfp == NULL) {
+ fg_drop(ofp);
+ fdrelse(p, new);
+ return (ENOMEM);
+ }
+
+ nfp->f_fglob = ofp->f_fglob;
+
+#if DIAGNOSTIC
+ if (fdp->fd_ofiles[new] != 0)
+ panic("finishdup: overwriting fd_ofiles with new %d", new);
+ if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0)
+ panic("finishdup: unreserved fileflags with new %d", new);
+#endif
+
+ if (new > fdp->fd_lastfile)
+ fdp->fd_lastfile = new;
+ *fdflags(p, new) |= fd_flags;
+ procfdtbl_releasefd(p, new, nfp);
+ *retval = new;
+ return (0);
+}
+
+
+/*
+ * close
+ *
+ * Description: The implementation of the close(2) system call
+ *
+ * Parameters: p Process in whose per process file table
+ * the close is to occur
+ * uap->fd fd to be closed
+ * retval <unused>
+ *
+ * Returns: 0 Success
+ * fp_lookup:EBADF Bad file descriptor
+ * fp_guard_exception:??? Guarded file descriptor
+ * close_internal:EBADF
+ * close_internal:??? Anything returnable by a per-fileops
+ * close function
+ */
+int
+close(proc_t p, struct close_args *uap, int32_t *retval)
+{
+ __pthread_testcancel(1);
+ return(close_nocancel(p, (struct close_nocancel_args *)uap, retval));
+}
+
+
+int
+close_nocancel(proc_t p, struct close_nocancel_args *uap, __unused int32_t *retval)
+{
+ struct fileproc *fp;
+ int fd = uap->fd;
+ int error;
+
+ AUDIT_SYSCLOSE(p, fd);
+
+ proc_fdlock(p);
+
+ if ( (error = fp_lookup(p,fd,&fp, 1)) ) {
+ proc_fdunlock(p);
+ return(error);
+ }
+
+ if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
+ error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
+ (void) fp_drop(p, fd, fp, 1);
+ proc_fdunlock(p);
+ return (error);
+ }
+
+ error = close_internal_locked(p, fd, fp, 0);
+
+ proc_fdunlock(p);
+
+ return (error);
+}
+
+
+/*
+ * close_internal_locked
+ *
+ * Close a file descriptor.
+ *
+ * Parameters: p Process in whose per process file table
+ * the close is to occur
+ * fd fd to be closed
+ * fp fileproc associated with the fd
+ *
+ * Returns: 0 Success
+ * EBADF fd already in close wait state
+ * closef_locked:??? Anything returnable by a per-fileops
+ * close function
+ *
+ * Locks: Assumes proc_fdlock for process is held by the caller and returns
+ * with lock held
+ *
+ * Notes: This function may drop and reacquire this lock; it is unsafe
+ * for a caller to assume that other state protected by the lock
+ * has not been subsequently changed out from under it.
+ */
+int
+close_internal_locked(proc_t p, int fd, struct fileproc *fp, int flags)
+{
+ struct filedesc *fdp = p->p_fd;
+ int error =0;
+ int resvfd = flags & FD_DUP2RESV;
+
+
+#if DIAGNOSTIC
+ proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
+#endif
+
+ /* Keep people from using the filedesc while we are closing it */
+ procfdtbl_markclosefd(p, fd);
+
+
+ if ((fp->f_flags & FP_CLOSING) == FP_CLOSING) {
+ panic("close_internal_locked: being called on already closing fd");
+ }
+
+
+#if DIAGNOSTIC
+ if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0)
+ panic("close_internal: unreserved fileflags with fd %d", fd);
+#endif
+
+ fp->f_flags |= FP_CLOSING;
+
+ if ( (fp->f_flags & FP_AIOISSUED) || kauth_authorize_fileop_has_listeners() ) {
+
+ proc_fdunlock(p);
+
+ if ( (fp->f_type == DTYPE_VNODE) && kauth_authorize_fileop_has_listeners() ) {
+ /*
+ * call out to allow 3rd party notification of close.
+ * Ignore result of kauth_authorize_fileop call.
+ */
+ if (vnode_getwithref((vnode_t)fp->f_data) == 0) {
+ u_int fileop_flags = 0;
+ if ((fp->f_flags & FP_WRITTEN) != 0)
+ fileop_flags |= KAUTH_FILEOP_CLOSE_MODIFIED;
+ kauth_authorize_fileop(fp->f_fglob->fg_cred, KAUTH_FILEOP_CLOSE,
+ (uintptr_t)fp->f_data, (uintptr_t)fileop_flags);
+ vnode_put((vnode_t)fp->f_data);
+ }
+ }
+ if (fp->f_flags & FP_AIOISSUED)
+ /*
+ * cancel all async IO requests that can be cancelled.
+ */
+ _aio_close( p, fd );
+
+ proc_fdlock(p);
+ }
+
+ if (fd < fdp->fd_knlistsize)
+ knote_fdclose(p, fd);
+
+ if (fp->f_flags & FP_WAITEVENT)
+ (void)waitevent_close(p, fp);
+
+ fileproc_drain(p, fp);
+
+ if (resvfd == 0) {
+ _fdrelse(p, fd);
+ } else {
+ procfdtbl_reservefd(p, fd);
+ }
+
+ error = closef_locked(fp, fp->f_fglob, p);
+ if ((fp->f_flags & FP_WAITCLOSE) == FP_WAITCLOSE)
+ wakeup(&fp->f_flags);
+ fp->f_flags &= ~(FP_WAITCLOSE | FP_CLOSING);
+
+ proc_fdunlock(p);
+
+ fileproc_free(fp);
+
+ proc_fdlock(p);
+
+#if DIAGNOSTIC
+ if (resvfd != 0) {
+ if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0)
+ panic("close with reserved fd returns with freed fd:%d: proc: %p", fd, p);
+ }
+#endif
+
+ return(error);
+}
+
+
+/*
+ * fstat1
+ *
+ * Description: Return status information about a file descriptor.
+ *
+ * Parameters: p The process doing the fstat
+ * fd The fd to stat
+ * ub The user stat buffer
+ * xsecurity The user extended security
+ * buffer, or 0 if none
+ * xsecurity_size The size of xsecurity, or 0
+ * if no xsecurity
+ * isstat64 Flag to indicate 64 bit version
+ * for inode size, etc.
+ *
+ * Returns: 0 Success
+ * EBADF
+ * EFAULT
+ * fp_lookup:EBADF Bad file descriptor
+ * vnode_getwithref:???
+ * copyout:EFAULT
+ * vnode_getwithref:???
+ * vn_stat:???
+ * soo_stat:???
+ * pipe_stat:???
+ * pshm_stat:???
+ * kqueue_stat:???
+ *
+ * Notes: Internal implementation for all other fstat() related
+ * functions
+ *
+ * XXX switch on node type is bogus; need a stat in struct
+ * XXX fileops instead.
+ */
+static int
+fstat1(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64)
+{
+ struct fileproc *fp;
+ union {
+ struct stat sb;
+ struct stat64 sb64;
+ } source;
+ union {
+ struct user64_stat user64_sb;
+ struct user32_stat user32_sb;
+ struct user64_stat64 user64_sb64;
+ struct user32_stat64 user32_sb64;
+ } dest;
+ int error, my_size;
+ file_type_t type;
+ caddr_t data;
+ kauth_filesec_t fsec;
+ user_size_t xsecurity_bufsize;
+ vfs_context_t ctx = vfs_context_current();
+ void * sbptr;
+
+
+ AUDIT_ARG(fd, fd);
+
+ if ((error = fp_lookup(p, fd, &fp, 0)) != 0) {
+ return(error);
+ }
+ type = fp->f_type;
+ data = fp->f_data;
+ fsec = KAUTH_FILESEC_NONE;
+
+ sbptr = (void *)&source;
+
+ switch (type) {
+
+ case DTYPE_VNODE:
+ if ((error = vnode_getwithref((vnode_t)data)) == 0) {
+ /*
+ * If the caller has the file open, and is not
+ * requesting extended security information, we are
+ * going to let them get the basic stat information.
+ */
+ if (xsecurity == USER_ADDR_NULL) {
+ error = vn_stat_noauth((vnode_t)data, sbptr, NULL, isstat64, ctx);
+ } else {
+ error = vn_stat((vnode_t)data, sbptr, &fsec, isstat64, ctx);
+ }
+
+ AUDIT_ARG(vnpath, (struct vnode *)data, ARG_VNODE1);
+ (void)vnode_put((vnode_t)data);
+ }
+ break;
+
+#if SOCKETS
+ case DTYPE_SOCKET:
+ error = soo_stat((struct socket *)data, sbptr, isstat64);
+ break;
+#endif /* SOCKETS */
+
+ case DTYPE_PIPE:
+ error = pipe_stat((void *)data, sbptr, isstat64);
+ break;
+
+ case DTYPE_PSXSHM:
+ error = pshm_stat((void *)data, sbptr, isstat64);
+ break;
+
+ case DTYPE_KQUEUE:
+ error = kqueue_stat((void *)data, sbptr, isstat64, p);
+ break;
+
+ default:
+ error = EBADF;
+ goto out;
+ }
+ if (error == 0) {
+ caddr_t sbp;
+
+ if (isstat64 != 0) {
+ source.sb64.st_lspare = 0;
+ source.sb64.st_qspare[0] = 0LL;
+ source.sb64.st_qspare[1] = 0LL;
+
+ if (IS_64BIT_PROCESS(current_proc())) {
+ munge_user64_stat64(&source.sb64, &dest.user64_sb64);
+ my_size = sizeof(dest.user64_sb64);
+ sbp = (caddr_t)&dest.user64_sb64;
+ } else {
+ munge_user32_stat64(&source.sb64, &dest.user32_sb64);
+ my_size = sizeof(dest.user32_sb64);
+ sbp = (caddr_t)&dest.user32_sb64;
+ }
+ } else {
+ source.sb.st_lspare = 0;
+ source.sb.st_qspare[0] = 0LL;
+ source.sb.st_qspare[1] = 0LL;
+ if (IS_64BIT_PROCESS(current_proc())) {
+ munge_user64_stat(&source.sb, &dest.user64_sb);
+ my_size = sizeof(dest.user64_sb);
+ sbp = (caddr_t)&dest.user64_sb;
+ } else {
+ munge_user32_stat(&source.sb, &dest.user32_sb);
+ my_size = sizeof(dest.user32_sb);
+ sbp = (caddr_t)&dest.user32_sb;
+ }
+ }
+
+ error = copyout(sbp, ub, my_size);
+ }
+
+ /* caller wants extended security information? */
+ if (xsecurity != USER_ADDR_NULL) {
+
+ /* did we get any? */
+ if (fsec == KAUTH_FILESEC_NONE) {
+ if (susize(xsecurity_size, 0) != 0) {
+ error = EFAULT;
+ goto out;
+ }
+ } else {
+ /* find the user buffer size */
+ xsecurity_bufsize = fusize(xsecurity_size);
+
+ /* copy out the actual data size */
+ if (susize(xsecurity_size, KAUTH_FILESEC_COPYSIZE(fsec)) != 0) {
+ error = EFAULT;
+ goto out;
+ }
+
+ /* if the caller supplied enough room, copy out to it */
+ if (xsecurity_bufsize >= KAUTH_FILESEC_COPYSIZE(fsec))
+ error = copyout(fsec, xsecurity, KAUTH_FILESEC_COPYSIZE(fsec));
+ }
+ }
+out:
+ fp_drop(p, fd, fp, 0);
+ if (fsec != NULL)
+ kauth_filesec_free(fsec);
+ return (error);
+}
+
+
+/*
+ * fstat_extended
+ *
+ * Description: Extended version of fstat supporting returning extended
+ * security information
+ *
+ * Parameters: p The process doing the fstat
+ * uap->fd The fd to stat
+ * uap->ub The user stat buffer
+ * uap->xsecurity The user extended security
+ * buffer, or 0 if none
+ * uap->xsecurity_size The size of xsecurity, or 0
+ *
+ * Returns: 0 Success
+ * !0 Errno (see fstat1)
+ */
+int
+fstat_extended(proc_t p, struct fstat_extended_args *uap, __unused int32_t *retval)
+{
+ return(fstat1(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 0));
+}
+
+
+/*
+ * fstat
+ *
+ * Description: Get file status for the file associated with fd
+ *
+ * Parameters: p The process doing the fstat
+ * uap->fd The fd to stat
+ * uap->ub The user stat buffer
+ *
+ * Returns: 0 Success
+ * !0 Errno (see fstat1)
+ */
+int
+fstat(proc_t p, register struct fstat_args *uap, __unused int32_t *retval)
+{
+ return(fstat1(p, uap->fd, uap->ub, 0, 0, 0));
+}
+
+
+/*
+ * fstat64_extended
+ *
+ * Description: Extended version of fstat64 supporting returning extended
+ * security information
+ *
+ * Parameters: p The process doing the fstat
+ * uap->fd The fd to stat
+ * uap->ub The user stat buffer
+ * uap->xsecurity The user extended security
+ * buffer, or 0 if none
+ * uap->xsecurity_size The size of xsecurity, or 0
+ *
+ * Returns: 0 Success
+ * !0 Errno (see fstat1)
+ */
+int
+fstat64_extended(proc_t p, struct fstat64_extended_args *uap, __unused int32_t *retval)
+{
+ return(fstat1(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 1));
+}
+
+
+/*
+ * fstat64
+ *
+ * Description: Get 64 bit version of the file status for the file associated
+ * with fd
+ *
+ * Parameters: p The process doing the fstat
+ * uap->fd The fd to stat
+ * uap->ub The user stat buffer
+ *
+ * Returns: 0 Success
+ * !0 Errno (see fstat1)
+ */
+int
+fstat64(proc_t p, register struct fstat64_args *uap, __unused int32_t *retval)
+{
+ return(fstat1(p, uap->fd, uap->ub, 0, 0, 1));
+}
+
+
+/*
+ * fpathconf
+ *
+ * Description: Return pathconf information about a file descriptor.
+ *
+ * Parameters: p Process making the request
+ * uap->fd fd to get information about
+ * uap->name Name of information desired
+ * retval Pointer to the call return area
+ *
+ * Returns: 0 Success
+ * EINVAL
+ * fp_lookup:EBADF Bad file descriptor
+ * vnode_getwithref:???
+ * vn_pathconf:???
+ *
+ * Implicit returns:
+ * *retval (modified) Returned information (numeric)
+ */
+int
+fpathconf(proc_t p, struct fpathconf_args *uap, int32_t *retval)
+{
+ int fd = uap->fd;
+ struct fileproc *fp;
+ struct vnode *vp;
+ int error = 0;
+ file_type_t type;
+ caddr_t data;
+
+
+ AUDIT_ARG(fd, uap->fd);
+ if ( (error = fp_lookup(p, fd, &fp, 0)) )
+ return(error);
+ type = fp->f_type;
+ data = fp->f_data;
+
+ switch (type) {
+
+ case DTYPE_SOCKET:
+ if (uap->name != _PC_PIPE_BUF) {
+ error = EINVAL;
+ goto out;
+ }
+ *retval = PIPE_BUF;
+ error = 0;
+ goto out;
+
+ case DTYPE_PIPE:
+ if (uap->name != _PC_PIPE_BUF) {
+ error = EINVAL;
+ goto out;
+ }
+ *retval = PIPE_BUF;
+ error = 0;
+ goto out;
+
+ case DTYPE_VNODE:
+ vp = (struct vnode *)data;
+
+ if ( (error = vnode_getwithref(vp)) == 0) {
+ AUDIT_ARG(vnpath, vp, ARG_VNODE1);
+
+ error = vn_pathconf(vp, uap->name, retval, vfs_context_current());
+
+ (void)vnode_put(vp);
+ }
+ goto out;
+
+ default:
+ error = EINVAL;
+ goto out;