+ } else {
+ /*
+ * An arbitrary limit, to prevent someone from mapping in a 20GB blob. This should cover
+ * our use cases for the immediate future, but note that at the time of this commit, some
+ * platforms are nearing 2MB blob sizes (with a prior soft limit of 2.5MB).
+ *
+ * We should consider how we can manage this more effectively; the above means that some
+ * platforms are using megabytes of memory for signing data; it merely hasn't crossed the
+ * threshold considered ridiculous at the time of this change.
+ */
+#define CS_MAX_BLOB_SIZE (40ULL * 1024ULL * 1024ULL)
+ if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
+ error = E2BIG;
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
+ kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
+ if (kr != KERN_SUCCESS) {
+ error = ENOMEM;
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ if(uap->cmd == F_ADDSIGS) {
+ error = copyin(fs.fs_blob_start,
+ (void *) kernel_blob_addr,
+ kernel_blob_size);
+ } else /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM */ {
+ int resid;
+
+ error = vn_rdwr(UIO_READ,
+ vp,
+ (caddr_t) kernel_blob_addr,
+ kernel_blob_size,
+ fs.fs_file_start + fs.fs_blob_start,
+ UIO_SYSSPACE,
+ 0,
+ kauth_cred_get(),
+ &resid,
+ p);
+ if ((error == 0) && resid) {
+ /* kernel_blob_size rounded to a page size, but signature may be at end of file */
+ memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid);
+ }
+ }
+
+ if (error) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ vnode_put(vp);
+ goto outdrop;
+ }
+
+ blob = NULL;
+ error = ubc_cs_blob_add(vp,
+ CPU_TYPE_ANY, /* not for a specific architecture */
+ fs.fs_file_start,
+ kernel_blob_addr,
+ kernel_blob_size,
+ blob_add_flags,
+ &blob);
+ if (error) {
+ ubc_cs_blob_deallocate(kernel_blob_addr,
+ kernel_blob_size);
+ } else {
+ /* ubc_blob_add() has consumed "kernel_blob_addr" */
+#if CHECK_CS_VALIDATION_BITMAP
+ ubc_cs_validation_bitmap_allocate( vp );
+#endif
+ }
+ }
+
+ if (uap->cmd == F_ADDFILESIGS_RETURN || uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM) {
+ /*
+ * The first element of the structure is a
+ * off_t that happen to have the same size for
+ * all archs. Lets overwrite that.
+ */
+ off_t end_offset = 0;
+ if (blob)
+ end_offset = blob->csb_end_offset;
+ error = copyout(&end_offset, argp, sizeof (end_offset));
+ }
+
+ (void) vnode_put(vp);
+ break;
+ }
+ case F_FINDSIGS: {
+ error = ENOTSUP;
+ goto out;
+ }
+#if CONFIG_PROTECT
+ case F_GETPROTECTIONCLASS: {
+ int class = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ error = cp_vnode_getclass (vp, &class);
+ if (error == 0) {
+ *retval = class;
+ }
+
+ vnode_put(vp);
+ break;
+ }
+
+ case F_SETPROTECTIONCLASS: {
+ /* tmp must be a valid PROTECTION_CLASS_* */
+ tmp = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+ error = cp_vnode_setclass (vp, tmp);
+ vnode_put(vp);
+ break;
+ }
+
+ case F_TRANSCODEKEY: {
+
+ char *backup_keyp = NULL;
+ unsigned backup_key_len = CP_MAX_WRAPPEDKEYSIZE;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ MALLOC(backup_keyp, char *, backup_key_len, M_TEMP, M_WAITOK);
+ if (backup_keyp == NULL) {
+ error = ENOMEM;
+ goto outdrop;
+ }
+
+ error = cp_vnode_transcode (vp, backup_keyp, &backup_key_len);
+ vnode_put(vp);
+
+ if (error == 0) {
+ error = copyout((caddr_t)backup_keyp, argp, backup_key_len);
+ *retval = backup_key_len;
+ }
+
+ FREE(backup_keyp, M_TEMP);
+
+ break;
+ }
+
+ case F_GETPROTECTIONLEVEL: {
+ uint32_t cp_version = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_root_major_vers (vp, &cp_version);
+ *retval = cp_version;
+
+ vnode_put (vp);
+ break;
+ }
+
+ case F_GETDEFAULTPROTLEVEL: {
+ uint32_t cp_default = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_default_level(vp, &cp_default);
+ *retval = cp_default;
+
+ vnode_put (vp);
+ break;
+ }
+
+
+#endif /* CONFIG_PROTECT */
+
+ case F_MOVEDATAEXTENTS: {
+ struct fileproc *fp2 = NULL;
+ struct vnode *src_vp = NULLVP;
+ struct vnode *dst_vp = NULLVP;
+ /* We need to grab the 2nd FD out of the argments before moving on. */
+ int fd2 = CAST_DOWN_EXPLICIT(int32_t, uap->arg);
+
+ error = priv_check_cred(kauth_cred_get(), PRIV_VFS_MOVE_DATA_EXTENTS, 0);
+ if (error)
+ goto out;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ /* For now, special case HFS+ only, since this is SPI. */
+ src_vp = (struct vnode *)fp->f_data;
+ if (src_vp->v_tag != VT_HFS) {
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Get the references before we start acquiring iocounts on the vnodes,
+ * while we still hold the proc fd lock
+ */
+ if ( (error = fp_lookup(p, fd2, &fp2, 1)) ) {
+ error = EBADF;
+ goto out;
+ }
+ if (fp2->f_type != DTYPE_VNODE) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EBADF;
+ goto out;
+ }
+ dst_vp = (struct vnode *)fp2->f_data;
+ if (dst_vp->v_tag != VT_HFS) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EINVAL;
+ goto out;
+ }
+
+#if CONFIG_MACF
+ /* Re-do MAC checks against the new FD, pass in a fake argument */
+ error = mac_file_check_fcntl(proc_ucred(p), fp2->f_fglob, uap->cmd, 0);
+ if (error) {
+ fp_drop(p, fd2, fp2, 1);
+ goto out;
+ }
+#endif
+ /* Audit the 2nd FD */
+ AUDIT_ARG(fd, fd2);
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(src_vp)) {
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+ if (vnode_getwithref(dst_vp)) {
+ vnode_put (src_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * Basic asserts; validate they are not the same and that
+ * both live on the same filesystem.
+ */
+ if (dst_vp == src_vp) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ if (dst_vp->v_mount != src_vp->v_mount) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EXDEV;
+ goto outdrop;
+ }
+
+ /* Now we have a legit pair of FDs. Go to work */
+
+ /* Now check for write access to the target files */
+ if(vnode_authorize(src_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ if(vnode_authorize(dst_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* Verify that both vps point to files and not directories */
+ if ( !vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
+ error = EINVAL;
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ goto outdrop;
+ }
+
+ /*
+ * The exchangedata syscall handler passes in 0 for the flags to VNOP_EXCHANGE.
+ * We'll pass in our special bit indicating that the new behavior is expected
+ */
+
+ error = VNOP_EXCHANGE(src_vp, dst_vp, FSOPT_EXCHANGE_DATA_ONLY, &context);
+
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ break;
+ }
+
+ /*
+ * SPI for making a file compressed.
+ */
+ case F_MAKECOMPRESSED: {
+ uint32_t gcounter = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ /* get the vnode */
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Is it a file? */
+ if ((vnode_isreg(vp) == 0) && (vnode_islnk(vp) == 0)) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* invoke ioctl to pass off to FS */
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)&gcounter, 0, &context);
+
+ vnode_put (vp);
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will written to the Fastflow.
+ */
+ case F_SET_GREEDY_MODE:
+ /* intentionally drop through to the same handler as F_SETSTATIC.
+ * both fcntls should pass the argument and their selector into VNOP_IOCTL.
+ */
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will represent static content.
+ */
+ case F_SETSTATICCONTENT: {
+ caddr_t ioctl_arg = NULL;
+
+ if (uap->arg) {
+ ioctl_arg = (caddr_t) 1;
+ }
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, ioctl_arg, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to the lower level storage driver that the
+ * subsequent writes should be of a particular IO type (burst, greedy, static),
+ * or other flavors that may be necessary.
+ */
+ case F_SETIOTYPE: {
+ caddr_t param_ptr;
+ uint32_t param;
+
+ if (uap->arg) {
+ /* extract 32 bits of flags from userland */
+ param_ptr = (caddr_t) uap->arg;
+ param = (uint32_t) param_ptr;
+ }
+ else {
+ /* If no argument is specified, error out */
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Validate the different types of flags that can be specified:
+ * all of them are mutually exclusive for now.
+ */
+ switch (param) {
+ case F_IOTYPE_ISOCHRONOUS:
+ break;
+
+ default:
+ error = EINVAL;
+ goto out;
+ }
+
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, param_ptr, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+
+ /*
+ * Extract the CodeDirectory of the vnode associated with
+ * the file descriptor and copy it back to user space
+ */
+ case F_GETCODEDIR: {
+ struct user_fcodeblobs args;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((fp->f_flag & FREAD) == 0) {
+ error = EBADF;
+ goto outdrop;
+ }
+
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_fcodeblobs args64;
+
+ error = copyin(argp, &args64, sizeof(args64));
+ if (error)
+ goto outdrop;
+
+ args.f_cd_hash = args64.f_cd_hash;
+ args.f_hash_size = args64.f_hash_size;
+ args.f_cd_buffer = args64.f_cd_buffer;
+ args.f_cd_size = args64.f_cd_size;
+ args.f_out_size = args64.f_out_size;
+ args.f_arch = args64.f_arch;
+ } else {
+ struct user32_fcodeblobs args32;
+
+ error = copyin(argp, &args32, sizeof(args32));
+ if (error)
+ goto outdrop;
+
+ args.f_cd_hash = CAST_USER_ADDR_T(args32.f_cd_hash);
+ args.f_hash_size = args32.f_hash_size;
+ args.f_cd_buffer = CAST_USER_ADDR_T(args32.f_cd_buffer);
+ args.f_cd_size = args32.f_cd_size;
+ args.f_out_size = CAST_USER_ADDR_T(args32.f_out_size);
+ args.f_arch = args32.f_arch;
+ }
+
+ if (vp->v_ubcinfo == NULL) {
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ struct cs_blob *t_blob = vp->v_ubcinfo->cs_blobs;
+
+ /*
+ * This call fails if there is no cs_blob corresponding to the
+ * vnode, or if there are multiple cs_blobs present, and the caller
+ * did not specify which cpu_type they want the cs_blob for
+ */
+ if (t_blob == NULL) {
+ error = ENOENT; /* there is no codesigning blob for this process */
+ goto outdrop;
+ } else if (args.f_arch == 0 && t_blob->csb_next != NULL) {
+ error = ENOENT; /* too many architectures and none specified */
+ goto outdrop;
+ }
+
+ /* If the user specified an architecture, find the right blob */
+ if (args.f_arch != 0) {
+ while (t_blob) {
+ if (t_blob->csb_cpu_type == args.f_arch)
+ break;
+ t_blob = t_blob->csb_next;
+ }
+ /* The cpu_type the user requested could not be found */
+ if (t_blob == NULL) {
+ error = ENOENT;
+ goto outdrop;
+ }
+ }
+
+ const CS_SuperBlob *super_blob = (void *)t_blob->csb_mem_kaddr;
+ const CS_CodeDirectory *cd = findCodeDirectory(super_blob,
+ (const char *) super_blob,
+ (const char *) super_blob + t_blob->csb_mem_size);
+ if (cd == NULL) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ uint64_t buffer_size = ntohl(cd->length);
+
+ if (buffer_size > UINT_MAX) {
+ error = ERANGE;
+ goto outdrop;
+ }
+
+ error = copyout(&buffer_size, args.f_out_size, sizeof(unsigned int));
+ if (error)
+ goto outdrop;
+
+ if (sizeof(t_blob->csb_cdhash) > args.f_hash_size ||
+ buffer_size > args.f_cd_size) {
+ error = ERANGE;
+ goto outdrop;
+ }
+
+ error = copyout(t_blob->csb_cdhash, args.f_cd_hash, sizeof(t_blob->csb_cdhash));
+ if (error)
+ goto outdrop;
+ error = copyout(cd, args.f_cd_buffer, buffer_size);
+ if (error)
+ goto outdrop;
+
+ break;
+ }
+
+ /*
+ * Set the vnode pointed to by 'fd'
+ * and tag it as the (potentially future) backing store
+ * for another filesystem
+ */
+ case F_SETBACKINGSTORE: {
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+
+ if (vp->v_tag != VT_HFS) {
+ error = EINVAL;
+ goto out;
+ }
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* only proceed if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+
+ /* If arg != 0, set, otherwise unset */
+ if (uap->arg) {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)1, 0, &context);
+ }
+ else {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)NULL, 0, &context);
+ }
+
+ vnode_put(vp);
+ break;
+ }
+
+ /*
+ * like F_GETPATH, but special semantics for
+ * the mobile time machine handler.
+ */
+ case F_GETPATH_MTMINFO: {
+ char *pathbufp;
+ int pathlen;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ pathlen = MAXPATHLEN;
+ MALLOC(pathbufp, char *, pathlen, M_TEMP, M_WAITOK);
+ if (pathbufp == NULL) {
+ error = ENOMEM;
+ goto outdrop;
+ }
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+ int backingstore = 0;
+
+ /* Check for error from vn_getpath before moving on */
+ if ((error = vn_getpath(vp, pathbufp, &pathlen)) == 0) {
+ if (vp->v_tag == VT_HFS) {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t) &backingstore, 0, &context);
+ }
+ (void)vnode_put(vp);
+
+ if (error == 0) {
+ error = copyout((caddr_t)pathbufp, argp, pathlen);
+ }
+ if (error == 0) {
+ /*
+ * If the copyout was successful, now check to ensure
+ * that this vnode is not a BACKINGSTORE vnode. mtmd
+ * wants the path regardless.
+ */
+ if (backingstore) {
+ error = EBUSY;
+ }
+ }
+ } else
+ (void)vnode_put(vp);
+ }
+ FREE(pathbufp, M_TEMP);
+ goto outdrop;
+ }
+
+#if DEBUG || DEVELOPMENT
+ case F_RECYCLE:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ vnode_recycle(vp);
+ break;
+#endif
+
+ default:
+ /*
+ * This is an fcntl() that we d not recognize at this level;
+ * if this is a vnode, we send it down into the VNOP_IOCTL
+ * for this vnode; this can include special devices, and will
+ * effectively overload fcntl() to send ioctl()'s.
+ */
+ if((uap->cmd & IOC_VOID) && (uap->cmd & IOC_INOUT)){
+ error = EINVAL;
+ goto out;
+ }
+
+ /* Catch any now-invalid fcntl() selectors */
+ switch (uap->cmd) {
+ case F_MARKDEPENDENCY:
+ error = EINVAL;
+ goto out;
+ default:
+ break;
+ }
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+#define STK_PARAMS 128
+ char stkbuf[STK_PARAMS];
+ unsigned int size;
+ caddr_t data, memp;
+ /*
+ * For this to work properly, we have to copy in the
+ * ioctl() cmd argument if there is one; we must also
+ * check that a command parameter, if present, does
+ * not exceed the maximum command length dictated by
+ * the number of bits we have available in the command
+ * to represent a structure length. Finally, we have
+ * to copy the results back out, if it is that type of
+ * ioctl().
+ */
+ size = IOCPARM_LEN(uap->cmd);
+ if (size > IOCPARM_MAX) {
+ (void)vnode_put(vp);
+ error = EINVAL;
+ break;
+ }
+
+ memp = NULL;
+ if (size > sizeof (stkbuf)) {
+ if ((memp = (caddr_t)kalloc(size)) == 0) {
+ (void)vnode_put(vp);
+ error = ENOMEM;
+ goto outdrop;
+ }
+ data = memp;
+ } else {
+ data = &stkbuf[0];
+ }
+
+ if (uap->cmd & IOC_IN) {
+ if (size) {
+ /* structure */
+ error = copyin(argp, data, size);
+ if (error) {
+ (void)vnode_put(vp);
+ if (memp)
+ kfree(memp, size);
+ goto outdrop;
+ }
+
+ /* Bzero the section beyond that which was needed */
+ if (size <= sizeof(stkbuf)) {
+ bzero ( (((uint8_t*)data) + size), (sizeof(stkbuf) - size));
+ }
+ } else {
+ /* int */
+ if (is64bit) {
+ *(user_addr_t *)data = argp;
+ } else {
+ *(uint32_t *)data = (uint32_t)argp;
+ }
+ };
+ } else if ((uap->cmd & IOC_OUT) && size) {
+ /*
+ * Zero the buffer so the user always
+ * gets back something deterministic.
+ */
+ bzero(data, size);
+ } else if (uap->cmd & IOC_VOID) {
+ if (is64bit) {
+ *(user_addr_t *)data = argp;
+ } else {
+ *(uint32_t *)data = (uint32_t)argp;
+ }
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, CAST_DOWN(caddr_t, data), 0, &context);
+
+ (void)vnode_put(vp);
+
+ /* Copy any output data to user */
+ if (error == 0 && (uap->cmd & IOC_OUT) && size)
+ error = copyout(data, argp, size);
+ if (memp)
+ kfree(memp, size);
+ }
+ break;
+ }
+
+outdrop:
+ AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1);
+ fp_drop(p, fd, fp, 0);
+ return(error);
+out:
+ fp_drop(p, fd, fp, 1);
+ proc_fdunlock(p);
+ return(error);
+}
+
+
+/*
+ * finishdup
+ *
+ * Description: Common code for dup, dup2, and fcntl(F_DUPFD).
+ *
+ * Parameters: p Process performing the dup
+ * old The fd to dup
+ * new The fd to dup it to
+ * fd_flags Flags to augment the new fd
+ * retval Pointer to the call return area
+ *
+ * Returns: 0 Success
+ * EBADF
+ * ENOMEM
+ *
+ * Implicit returns:
+ * *retval (modified) The new descriptor
+ *
+ * Locks: Assumes proc_fdlock for process pointing to fdp is held by
+ * the caller
+ *
+ * Notes: This function may drop and reacquire this lock; it is unsafe
+ * for a caller to assume that other state protected by the lock
+ * has not been subsequently changed out from under it.
+ */
+int
+finishdup(proc_t p,
+ struct filedesc *fdp, int old, int new, int fd_flags, int32_t *retval)
+{
+ struct fileproc *nfp;
+ struct fileproc *ofp;
+#if CONFIG_MACF
+ int error;
+#endif
+
+#if DIAGNOSTIC
+ proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
+#endif
+ if ((ofp = fdp->fd_ofiles[old]) == NULL ||
+ (fdp->fd_ofileflags[old] & UF_RESERVED)) {
+ fdrelse(p, new);
+ return (EBADF);
+ }
+ fg_ref(ofp);
+
+#if CONFIG_MACF
+ error = mac_file_check_dup(proc_ucred(p), ofp->f_fglob, new);
+ if (error) {
+ fg_drop(ofp);
+ fdrelse(p, new);
+ return (error);
+ }
+#endif
+
+ proc_fdunlock(p);
+
+ nfp = fileproc_alloc_init(NULL);
+
+ proc_fdlock(p);
+
+ if (nfp == NULL) {
+ fg_drop(ofp);
+ fdrelse(p, new);
+ return (ENOMEM);
+ }
+
+ nfp->f_fglob = ofp->f_fglob;
+
+#if DIAGNOSTIC
+ if (fdp->fd_ofiles[new] != 0)
+ panic("finishdup: overwriting fd_ofiles with new %d", new);
+ if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0)
+ panic("finishdup: unreserved fileflags with new %d", new);
+#endif
+
+ if (new > fdp->fd_lastfile)
+ fdp->fd_lastfile = new;
+ *fdflags(p, new) |= fd_flags;
+ procfdtbl_releasefd(p, new, nfp);
+ *retval = new;
+ return (0);
+}
+
+
+/*
+ * close
+ *
+ * Description: The implementation of the close(2) system call
+ *
+ * Parameters: p Process in whose per process file table
+ * the close is to occur
+ * uap->fd fd to be closed
+ * retval <unused>
+ *
+ * Returns: 0 Success
+ * fp_lookup:EBADF Bad file descriptor
+ * fp_guard_exception:??? Guarded file descriptor
+ * close_internal:EBADF
+ * close_internal:??? Anything returnable by a per-fileops
+ * close function
+ */
+int
+close(proc_t p, struct close_args *uap, int32_t *retval)
+{
+ __pthread_testcancel(1);
+ return(close_nocancel(p, (struct close_nocancel_args *)uap, retval));
+}
+
+
+int
+close_nocancel(proc_t p, struct close_nocancel_args *uap, __unused int32_t *retval)
+{
+ struct fileproc *fp;
+ int fd = uap->fd;
+ int error;
+
+ AUDIT_SYSCLOSE(p, fd);
+
+ proc_fdlock(p);
+
+ if ( (error = fp_lookup(p,fd,&fp, 1)) ) {
+ proc_fdunlock(p);
+ return(error);
+ }
+
+ if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
+ error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
+ (void) fp_drop(p, fd, fp, 1);
+ proc_fdunlock(p);
+ return (error);
+ }
+
+ error = close_internal_locked(p, fd, fp, 0);
+
+ proc_fdunlock(p);
+
+ return (error);
+}