/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/proc_internal.h>
#include <sys/kauth.h>
#include <sys/file_internal.h>
+#include <sys/guarded.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/stat.h>
#include <sys/resourcevar.h>
#include <sys/aio_kern.h>
#include <sys/ev.h>
-#include <kern/lock.h>
+#include <kern/locks.h>
#include <sys/uio_internal.h>
+#include <sys/codesign.h>
+#include <sys/codedir_internal.h>
#include <security/audit/audit.h>
#include <sys/kdebug.h>
#include <sys/sysproto.h>
#include <sys/pipe.h>
+#include <sys/spawn.h>
#include <kern/kern_types.h>
#include <kern/kalloc.h>
#include <libkern/OSAtomic.h>
#include <vm/vm_protos.h>
#include <mach/mach_port.h>
+#include <stdbool.h>
+
+#if CONFIG_PROTECT
+#include <sys/cprotect.h>
+#endif
+#include <hfs/hfs.h>
kern_return_t ipc_object_copyin(ipc_space_t, mach_port_name_t,
mach_msg_type_name_t, ipc_port_t *);
struct psemnode;
struct pshmnode;
-int fdopen(dev_t dev, int mode, int type, proc_t p);
-int finishdup(proc_t p, struct filedesc *fdp, int old, int new, int32_t *retval);
+static int finishdup(proc_t p,
+ struct filedesc *fdp, int old, int new, int flags, int32_t *retval);
int falloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx, int locked);
void fg_drop(struct fileproc * fp);
void fg_free(struct fileglob *fg);
void fg_ref(struct fileproc * fp);
-#if CONFIG_EMBEDDED
void fileport_releasefg(struct fileglob *fg);
-#endif /* CONFIG_EMBEDDED */
/* flags for close_internal_locked */
#define FD_DUP2RESV 1
-static int close_internal_locked(struct proc *p, int fd, struct fileproc *fp, int flags);
-
-static int closef_finish(struct fileproc *fp, struct fileglob *fg, proc_t p, vfs_context_t ctx);
/* We don't want these exported */
-__private_extern__
-int open1(vfs_context_t, struct nameidata *, int, struct vnode_attr *, int32_t *);
__private_extern__
int unlink1(vfs_context_t, struct nameidata *, int);
static void _fdrelse(struct proc * p, int fd);
-extern void file_lock_init(void) __attribute__((section("__TEXT, initcode")));
-extern int kqueue_stat(struct fileproc *fp, void *ub, int isstat4, proc_t p);
-#if SOCKETS
-extern int soo_stat(struct socket *so, void *ub, int isstat64);
-#endif /* SOCKETS */
+extern void file_lock_init(void);
extern kauth_scope_t kauth_scope_fileop;
-extern int cs_debug;
+/* Conflict wait queue for when selects collide (opaque type) */
+extern struct wait_queue select_conflict_queue;
#define f_flag f_fglob->fg_flag
-#define f_type f_fglob->fg_type
+#define f_type f_fglob->fg_ops->fo_type
#define f_msgcount f_fglob->fg_msgcount
#define f_cred f_fglob->fg_cred
#define f_ops f_fglob->fg_ops
#define f_offset f_fglob->fg_offset
#define f_data f_fglob->fg_data
+#define CHECK_ADD_OVERFLOW_INT64L(x, y) \
+ (((((x) > 0) && ((y) > 0) && ((x) > LLONG_MAX - (y))) || \
+ (((x) < 0) && ((y) < 0) && ((x) < LLONG_MIN - (y)))) \
+ ? 1 : 0)
/*
* Descriptor management.
*/
-struct filelist filehead; /* head of list of open files */
struct fmsglist fmsghead; /* head of list of open files */
struct fmsglist fmsg_ithead; /* head of list of open files */
int nfiles; /* actual number of open files */
lck_attr_t * file_lck_attr;
lck_mtx_t * uipc_lock;
-lck_mtx_t * file_flist_lock;
+
+
+/*
+ * check_file_seek_range
+ *
+ * Description: Checks if seek offsets are in the range of 0 to LLONG_MAX.
+ *
+ * Parameters: fl Flock structure.
+ * cur_file_offset Current offset in the file.
+ *
+ * Returns: 0 on Success.
+ * EOVERFLOW on overflow.
+ * EINVAL on offset less than zero.
+ */
+
+static int
+check_file_seek_range(struct flock *fl, off_t cur_file_offset)
+{
+ if (fl->l_whence == SEEK_CUR) {
+ /* Check if the start marker is beyond LLONG_MAX. */
+ if (CHECK_ADD_OVERFLOW_INT64L(fl->l_start, cur_file_offset)) {
+ /* Check if start marker is negative */
+ if (fl->l_start < 0) {
+ return EINVAL;
+ }
+ return EOVERFLOW;
+ }
+ /* Check if the start marker is negative. */
+ if (fl->l_start + cur_file_offset < 0) {
+ return EINVAL;
+ }
+ /* Check if end marker is beyond LLONG_MAX. */
+ if ((fl->l_len > 0) && (CHECK_ADD_OVERFLOW_INT64L(fl->l_start +
+ cur_file_offset, fl->l_len - 1))) {
+ return EOVERFLOW;
+ }
+ /* Check if the end marker is negative. */
+ if ((fl->l_len <= 0) && (fl->l_start + cur_file_offset +
+ fl->l_len < 0)) {
+ return EINVAL;
+ }
+ } else if (fl->l_whence == SEEK_SET) {
+ /* Check if the start marker is negative. */
+ if (fl->l_start < 0) {
+ return EINVAL;
+ }
+ /* Check if the end marker is beyond LLONG_MAX. */
+ if ((fl->l_len > 0) &&
+ CHECK_ADD_OVERFLOW_INT64L(fl->l_start, fl->l_len - 1)) {
+ return EOVERFLOW;
+ }
+ /* Check if the end marker is negative. */
+ if ((fl->l_len < 0) && fl->l_start + fl->l_len < 0) {
+ return EINVAL;
+ }
+ }
+ return 0;
+}
/*
file_lck_attr = lck_attr_alloc_init();
uipc_lock = lck_mtx_alloc_init(file_lck_grp, file_lck_attr);
- file_flist_lock = lck_mtx_alloc_init(file_lck_grp, file_lck_attr);
}
uio_t auio = NULL;
char uio_buf[ UIO_SIZEOF(1) ];
struct vfs_context context = *(vfs_context_current());
+ bool wrote_some = false;
p = current_proc();
if ( !(io_flg & IO_APPEND))
flags = FOF_OFFSET;
- if (rw == UIO_WRITE)
+ if (rw == UIO_WRITE) {
+ user_ssize_t orig_resid = uio_resid(auio);
error = fo_write(fp, auio, flags, &context);
- else
+ wrote_some = uio_resid(auio) < orig_resid;
+ } else
error = fo_read(fp, auio, flags, &context);
if (aresid)
error = EIO;
}
out:
- if (rw == UIO_WRITE && error == 0)
+ if (wrote_some)
fp_drop_written(p, fd, fp);
else
fp_drop(p, fd, fp, 0);
proc_fdunlock(p);
return(error);
}
+ if (FP_ISGUARDED(fp, GUARD_DUP)) {
+ error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
+ (void) fp_drop(p, old, fp, 1);
+ proc_fdunlock(p);
+ return (error);
+ }
if ( (error = fdalloc(p, 0, &new)) ) {
fp_drop(p, old, fp, 1);
proc_fdunlock(p);
return (error);
}
- error = finishdup(p, fdp, old, new, retval);
+ error = finishdup(p, fdp, old, new, 0, retval);
fp_drop(p, old, fp, 1);
proc_fdunlock(p);
return (error);
}
-
/*
* dup2
*
* Description: Duplicate a file descriptor to a particular value.
*
* Parameters: p Process performing the dup
- * uap->fd The fd to dup
+ * uap->from The fd to dup
* uap->to The fd to dup it to
* retval Pointer to the call return area
*
proc_fdunlock(p);
return(error);
}
+ if (FP_ISGUARDED(fp, GUARD_DUP)) {
+ error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
+ (void) fp_drop(p, old, fp, 1);
+ proc_fdunlock(p);
+ return (error);
+ }
if (new < 0 ||
(rlim_t)new >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
new >= maxfiles) {
goto startover;
}
- if ((fdp->fd_ofiles[new] != NULL) && ((error = fp_lookup(p, new, &nfp, 1)) == 0)) {
+ if ((fdp->fd_ofiles[new] != NULL) &&
+ ((error = fp_lookup(p, new, &nfp, 1)) == 0)) {
fp_drop(p, old, fp, 1);
+ if (FP_ISGUARDED(nfp, GUARD_CLOSE)) {
+ error = fp_guard_exception(p,
+ new, nfp, kGUARD_EXC_CLOSE);
+ (void) fp_drop(p, new, nfp, 1);
+ proc_fdunlock(p);
+ return (error);
+ }
(void)close_internal_locked(p, new, nfp, FD_DUP2RESV);
#if DIAGNOSTIC
proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
} else {
#if DIAGNOSTIC
if (fdp->fd_ofiles[new] != NULL)
- panic("dup2: unable to get ref on a fileproc %d\n", new);
+ panic("dup2: no ref on fileproc %d", new);
#endif
procfdtbl_reservefd(p, new);
}
}
#if DIAGNOSTIC
if (fdp->fd_ofiles[new] != 0)
- panic("dup2-1: overwriting fd_ofiles with new %d\n", new);
+ panic("dup2: overwriting fd_ofiles with new %d", new);
if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0)
- panic("dup2-1: unreserved fileflags with new %d\n", new);
+ panic("dup2: unreserved fileflags with new %d", new);
#endif
- error = finishdup(p, fdp, old, new, retval);
+ error = finishdup(p, fdp, old, new, 0, retval);
fp_drop(p, old, fp, 1);
proc_fdunlock(p);
* copyin:EFAULT
* vnode_getwithref:???
* VNOP_ADVLOCK:???
+ * msleep:ETIMEDOUT
* [F_GETLK]
* EBADF
* EOVERFLOW
struct vnode *vp = NULLVP; /* for AUDIT_ARG() at end */
int i, tmp, error, error2, flg = F_POSIX;
struct flock fl;
+ struct flocktimeout fltimeout;
+ struct timespec *timeout = NULL;
struct vfs_context context;
off_t offset;
int newmin;
daddr64_t lbn, bn;
- int devBlockSize = 0;
unsigned int fflag;
user_addr_t argp;
boolean_t is64bit;
switch (uap->cmd) {
case F_DUPFD:
+ case F_DUPFD_CLOEXEC:
+ if (FP_ISGUARDED(fp, GUARD_DUP)) {
+ error = fp_guard_exception(p, fd, fp, kGUARD_EXC_DUP);
+ goto out;
+ }
newmin = CAST_DOWN_EXPLICIT(int, uap->arg); /* arg is an int, so we won't lose bits */
AUDIT_ARG(value32, newmin);
if ((u_int)newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
}
if ( (error = fdalloc(p, newmin, &i)) )
goto out;
- error = finishdup(p, fdp, fd, i, retval);
+ error = finishdup(p, fdp, fd, i,
+ uap->cmd == F_DUPFD_CLOEXEC ? UF_EXCLOSE : 0, retval);
goto out;
case F_GETFD:
- *retval = (*pop & UF_EXCLOSE)? 1 : 0;
+ *retval = (*pop & UF_EXCLOSE)? FD_CLOEXEC : 0;
error = 0;
goto out;
case F_SETFD:
AUDIT_ARG(value32, uap->arg);
- *pop = (*pop &~ UF_EXCLOSE) |
- (uap->arg & 1)? UF_EXCLOSE : 0;
+ if (uap->arg & FD_CLOEXEC)
+ *pop |= UF_EXCLOSE;
+ else {
+ if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+ error = fp_guard_exception(p,
+ fd, fp, kGUARD_EXC_NOCLOEXEC);
+ goto out;
+ }
+ *pop &= ~UF_EXCLOSE;
+ }
error = 0;
goto out;
goto out;
}
if (fp->f_type == DTYPE_PIPE) {
- error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context);
+ error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
goto out;
}
error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context);
goto out;
+ case F_SETNOSIGPIPE:
+ tmp = CAST_DOWN_EXPLICIT(int, uap->arg);
+ if (fp->f_type == DTYPE_SOCKET) {
+#if SOCKETS
+ error = sock_setsockopt((struct socket *)fp->f_data,
+ SOL_SOCKET, SO_NOSIGPIPE, &tmp, sizeof (tmp));
+#else
+ error = EINVAL;
+#endif
+ } else {
+ struct fileglob *fg = fp->f_fglob;
+
+ lck_mtx_lock_spin(&fg->fg_lock);
+ if (tmp)
+ fg->fg_lflags |= FG_NOSIGPIPE;
+ else
+ fg->fg_lflags &= FG_NOSIGPIPE;
+ lck_mtx_unlock(&fg->fg_lock);
+ error = 0;
+ }
+ goto out;
+
+ case F_GETNOSIGPIPE:
+ if (fp->f_type == DTYPE_SOCKET) {
+#if SOCKETS
+ int retsize = sizeof (*retval);
+ error = sock_getsockopt((struct socket *)fp->f_data,
+ SOL_SOCKET, SO_NOSIGPIPE, retval, &retsize);
+#else
+ error = EINVAL;
+#endif
+ } else {
+ *retval = (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) ?
+ 1 : 0;
+ error = 0;
+ }
+ goto out;
+
+ case F_SETLKWTIMEOUT:
case F_SETLKW:
flg |= F_WAIT;
/* Fall into F_SETLK */
proc_fdunlock(p);
/* Copy in the lock structure */
- error = copyin(argp, (caddr_t)&fl, sizeof(fl));
- if (error) {
- goto outdrop;
+ if (uap->cmd == F_SETLKWTIMEOUT) {
+ error = copyin(argp, (caddr_t) &fltimeout, sizeof(fltimeout));
+ if (error) {
+ goto outdrop;
+ }
+ fl = fltimeout.fl;
+ timeout = &fltimeout.timeout;
+ } else {
+ error = copyin(argp, (caddr_t)&fl, sizeof(fl));
+ if (error) {
+ goto outdrop;
+ }
}
- if ((fl.l_whence == SEEK_CUR) && (fl.l_start + offset < fl.l_start)) {
- error = EOVERFLOW;
- goto outdrop;
+ /* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
+ /* and ending byte for EOVERFLOW in SEEK_SET */
+ error = check_file_seek_range(&fl, offset);
+ if (error) {
+ goto outdrop;
}
if ( (error = vnode_getwithref(vp)) ) {
}
// XXX UInt32 unsafe for LP64 kernel
OSBitOrAtomic(P_LADVLOCK, &p->p_ladvflag);
- error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context);
+ error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context, timeout);
(void)vnode_put(vp);
goto outdrop;
}
// XXX UInt32 unsafe for LP64 kernel
OSBitOrAtomic(P_LADVLOCK, &p->p_ladvflag);
- error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context);
+ error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context, timeout);
(void)vnode_put(vp);
goto outdrop;
case F_UNLCK:
error = VNOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &fl,
- F_POSIX, &context);
+ F_POSIX, &context, timeout);
(void)vnode_put(vp);
goto outdrop;
/* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
/* and ending byte for EOVERFLOW in SEEK_SET */
- if (((fl.l_whence == SEEK_CUR) &&
- ((fl.l_start + offset < fl.l_start) ||
- ((fl.l_len > 0) && (fl.l_start+offset + fl.l_len - 1 < fl.l_start+offset)))) ||
- ((fl.l_whence == SEEK_SET) && (fl.l_len > 0) && (fl.l_start + fl.l_len - 1 < fl.l_start)))
- {
- /* lf_advlock doesn't check start/end for F_GETLK if file has no locks */
- error = EOVERFLOW;
+ error = check_file_seek_range(&fl, offset);
+ if (error) {
goto outdrop;
}
#if CONFIG_MACF
error = mac_file_check_lock(proc_ucred(p), fp->f_fglob,
- F_GETLK, &fl);
+ uap->cmd, &fl);
if (error == 0)
#endif
- error = VNOP_ADVLOCK(vp, (caddr_t)p, F_GETLK, &fl, F_POSIX, &context);
+ error = VNOP_ADVLOCK(vp, (caddr_t)p, uap->cmd, &fl, F_POSIX, &context, NULL);
(void)vnode_put(vp);
* without zero filling the data is a security hole
* root would have access anyway so we'll allow it
*/
- if (!is_suser()) {
+ if (!kauth_cred_issuser(kauth_cred_get())) {
error = EACCES;
} else {
/*
goto out;
+ case F_NODIRECT:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ if (uap->arg)
+ fp->f_fglob->fg_flag |= FNODIRECT;
+ else
+ fp->f_fglob->fg_flag &= ~FNODIRECT;
+
+ goto out;
+
+ case F_SINGLE_WRITER:
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ if (uap->arg)
+ fp->f_fglob->fg_flag |= FSINGLE_WRITER;
+ else
+ fp->f_fglob->fg_flag &= ~FSINGLE_WRITER;
+
+ goto out;
+
case F_GLOBAL_NOCACHE:
if (fp->f_type != DTYPE_VNODE) {
error = EBADF;
goto outdrop;
}
- case F_READBOOTSTRAP:
- case F_WRITEBOOTSTRAP: {
- user32_fbootstraptransfer_t user32_fbt_struct;
- user_fbootstraptransfer_t user_fbt_struct;
- int sizeof_struct;
- caddr_t boot_structp;
+ case F_FLUSH_DATA:
- if (fp->f_type != DTYPE_VNODE) {
- error = EBADF;
- goto out;
- }
- vp = (struct vnode *)fp->f_data;
- proc_fdunlock(p);
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
- if (IS_64BIT_PROCESS(p)) {
- sizeof_struct = sizeof(user_fbt_struct);
- boot_structp = (caddr_t) &user_fbt_struct;
- }
- else {
- sizeof_struct = sizeof(user32_fbt_struct);
- boot_structp = (caddr_t) &user32_fbt_struct;
- }
- error = copyin(argp, boot_structp, sizeof_struct);
- if (error)
- goto outdrop;
- if ( (error = vnode_getwithref(vp)) ) {
- goto outdrop;
- }
- if (uap->cmd == F_WRITEBOOTSTRAP) {
- /*
- * Make sure that we are root. Updating the
- * bootstrap on a disk could be a security hole
- */
- if (!is_suser()) {
- (void)vnode_put(vp);
- error = EACCES;
- goto outdrop;
- }
- }
- if (strncmp(vnode_mount(vp)->mnt_vfsstat.f_fstypename, "hfs",
- sizeof(vnode_mount(vp)->mnt_vfsstat.f_fstypename)) != 0) {
- error = EINVAL;
- } else {
- /*
- * call vnop_ioctl to handle the I/O
- */
- error = VNOP_IOCTL(vp, uap->cmd, boot_structp, 0, &context);
- }
- (void)vnode_put(vp);
- goto outdrop;
- }
- case F_LOG2PHYS: {
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+ error = cluster_push(vp, 0);
+
+ (void)vnode_put(vp);
+ }
+ goto outdrop;
+
+ case F_LOG2PHYS:
+ case F_LOG2PHYS_EXT: {
struct log2phys l2p_struct; /* structure for allocate command */
+ int devBlockSize;
+ off_t file_offset = 0;
+ size_t a_size = 0;
+ size_t run = 0;
+
+ if (uap->cmd == F_LOG2PHYS_EXT) {
+ error = copyin(argp, (caddr_t)&l2p_struct, sizeof(l2p_struct));
+ if (error)
+ goto out;
+ file_offset = l2p_struct.l2p_devoffset;
+ } else {
+ file_offset = fp->f_offset;
+ }
if (fp->f_type != DTYPE_VNODE) {
error = EBADF;
goto out;
if ( (error = vnode_getwithref(vp)) ) {
goto outdrop;
}
- error = VNOP_OFFTOBLK(vp, fp->f_offset, &lbn);
+ error = VNOP_OFFTOBLK(vp, file_offset, &lbn);
if (error) {
(void)vnode_put(vp);
goto outdrop;
goto outdrop;
}
devBlockSize = vfs_devblocksize(vnode_mount(vp));
-
- error = VNOP_BLOCKMAP(vp, offset, devBlockSize, &bn, NULL, NULL, 0, &context);
+ if (uap->cmd == F_LOG2PHYS_EXT) {
+#if defined(__LP64__)
+ a_size = l2p_struct.l2p_contigbytes;
+#else
+ if ((l2p_struct.l2p_contigbytes > SIZE_MAX) || (l2p_struct.l2p_contigbytes < 0)) {
+ /* size_t is 32-bit on a 32-bit kernel, therefore
+ * assigning l2p_contigbytes to a_size may have
+ * caused integer overflow. We, therefore, return
+ * an error here instead of calculating incorrect
+ * value.
+ */
+ printf ("fcntl: F_LOG2PHYS_EXT: l2p_contigbytes=%lld will overflow, returning error\n", l2p_struct.l2p_contigbytes);
+ error = EFBIG;
+ goto outdrop;
+ } else {
+ a_size = l2p_struct.l2p_contigbytes;
+ }
+#endif
+ } else {
+ a_size = devBlockSize;
+ }
+
+ error = VNOP_BLOCKMAP(vp, offset, a_size, &bn, &run, NULL, 0, &context);
(void)vnode_put(vp);
if (!error) {
l2p_struct.l2p_flags = 0; /* for now */
- l2p_struct.l2p_contigbytes = 0; /* for now */
- l2p_struct.l2p_devoffset = bn * devBlockSize;
- l2p_struct.l2p_devoffset += fp->f_offset - offset;
+ if (uap->cmd == F_LOG2PHYS_EXT) {
+ l2p_struct.l2p_contigbytes = run - (file_offset - offset);
+ } else {
+ l2p_struct.l2p_contigbytes = 0; /* for now */
+ }
+
+ /*
+ * The block number being -1 suggests that the file offset is not backed
+ * by any real blocks on-disk. As a result, just let it be passed back up wholesale.
+ */
+ if (bn == -1) {
+ /* Don't multiply it by the block size */
+ l2p_struct.l2p_devoffset = bn;
+ }
+ else {
+ l2p_struct.l2p_devoffset = bn * devBlockSize;
+ l2p_struct.l2p_devoffset += file_offset - offset;
+ }
error = copyout((caddr_t)&l2p_struct, argp, sizeof(l2p_struct));
}
goto outdrop;
VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
/* Start the lookup relative to the file descriptor's vnode. */
- NDINIT(&nd, LOOKUP, USEDVP | FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
+ NDINIT(&nd, LOOKUP, OP_OPEN, USEDVP | FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
fopen.o_pathname, &context);
nd.ni_dvp = vp;
- error = open1(&context, &nd, fopen.o_flags, &va, retval);
+ error = open1(&context, &nd, fopen.o_flags, &va,
+ fileproc_alloc_init, NULL, retval);
vnode_put(vp);
break;
}
/* Start the lookup relative to the file descriptor's vnode. */
- NDINIT(&nd, DELETE, USEDVP | AUDITVNPATH1, UIO_USERSPACE, pathname, &context);
+ NDINIT(&nd, DELETE, OP_UNLINK, USEDVP | AUDITVNPATH1, UIO_USERSPACE,
+ pathname, &context);
nd.ni_dvp = vp;
error = unlink1(&context, &nd, 0);
if(ubc_cs_blob_get(vp, CPU_TYPE_ANY, fs.fs_file_start))
{
- /*
- if(cs_debug)
- printf("CODE SIGNING: resident blob offered for: %s\n", vp->v_name);
- */
vnode_put(vp);
goto outdrop;
}
-
-#define CS_MAX_BLOB_SIZE (1ULL * 1024 * 1024) /* XXX ? */
+/*
+ * An arbitrary limit, to prevent someone from mapping in a 20GB blob. This should cover
+ * our use cases for the immediate future, but note that at the time of this commit, some
+ * platforms are nearing 2MB blob sizes (with a prior soft limit of 2.5MB).
+ *
+ * We should consider how we can manage this more effectively; the above means that some
+ * platforms are using megabytes of memory for signing data; it merely hasn't crossed the
+ * threshold considered ridiculous at the time of this change.
+ */
+#define CS_MAX_BLOB_SIZE (10ULL * 1024ULL * 1024ULL)
if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
error = E2BIG;
vnode_put(vp);
kernel_blob_size);
} else {
/* ubc_blob_add() has consumed "kernel_blob_addr" */
+#if CHECK_CS_VALIDATION_BITMAP
+ ubc_cs_validation_bitmap_allocate( vp );
+#endif
}
(void) vnode_put(vp);
break;
}
+ case F_FINDSIGS: {
+#ifdef SECURE_KERNEL
+ error = ENOTSUP;
+#else /* !SECURE_KERNEL */
+ off_t offsetMacho;
- case F_MARKDEPENDENCY: {
- struct vnode *root_vp;
- struct vnode_attr va;
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+ error = vnode_getwithref(vp);
+ if (error)
+ goto outdrop;
+
+ error = copyin(argp, &offsetMacho, sizeof(offsetMacho));
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+
+#if CONFIG_MACF
+ error = mac_vnode_find_sigs(p, vp, offsetMacho);
+#else
+ error = EPERM;
+#endif
+ if (error) {
+ (void)vnode_put(vp);
+ goto outdrop;
+ }
+#endif /* SECURE_KERNEL */
+ break;
+ }
+#if CONFIG_PROTECT
+ case F_GETPROTECTIONCLASS: {
+ int class = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ error = cp_vnode_getclass (vp, &class);
+ if (error == 0) {
+ *retval = class;
+ }
+
+ vnode_put(vp);
+ break;
+ }
+
+ case F_SETPROTECTIONCLASS: {
+ /* tmp must be a valid PROTECTION_CLASS_* */
+ tmp = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
vfs_context_t ctx = vfs_context_current();
- kauth_cred_t cred;
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+ error = cp_vnode_setclass (vp, tmp);
+ vnode_put(vp);
+ break;
+ }
+
+ case F_TRANSCODEKEY: {
+
+ char *backup_keyp = NULL;
+ unsigned backup_key_len = CP_MAX_WRAPPEDKEYSIZE;
- if ((current_proc()->p_flag & P_DEPENDENCY_CAPABLE) == 0) {
- error = EPERM;
- goto out;
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
}
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ MALLOC(backup_keyp, char *, backup_key_len, M_TEMP, M_WAITOK);
+ if (backup_keyp == NULL) {
+ error = ENOMEM;
+ goto outdrop;
+ }
+
+ error = cp_vnode_transcode (vp, backup_keyp, &backup_key_len);
+ vnode_put(vp);
+
+ if (error == 0) {
+ error = copyout((caddr_t)backup_keyp, argp, backup_key_len);
+ *retval = backup_key_len;
+ }
+
+ FREE(backup_keyp, M_TEMP);
+
+ break;
+ }
+
+ case F_GETPROTECTIONLEVEL: {
+ uint32_t cp_version = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_root_major_vers (vp, &cp_version);
+ *retval = cp_version;
+
+ vnode_put (vp);
+ break;
+ }
+
+ case F_GETDEFAULTPROTLEVEL: {
+ uint32_t cp_default = 0;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * if cp_get_major_vers fails, error will be set to proper errno
+ * and cp_version will still be 0.
+ */
+
+ error = cp_get_default_level(vp, &cp_default);
+ *retval = cp_default;
+
+ vnode_put (vp);
+ break;
+ }
+
+
+#endif /* CONFIG_PROTECT */
+
+ case F_MOVEDATAEXTENTS: {
+ struct fileproc *fp2 = NULL;
+ struct vnode *src_vp = NULLVP;
+ struct vnode *dst_vp = NULLVP;
+ /* We need to grab the 2nd FD out of the argments before moving on. */
+ int fd2 = CAST_DOWN_EXPLICIT(int32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ /* For now, special case HFS+ only, since this is SPI. */
+ src_vp = (struct vnode *)fp->f_data;
+ if (src_vp->v_tag != VT_HFS) {
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Get the references before we start acquiring iocounts on the vnodes,
+ * while we still hold the proc fd lock
+ */
+ if ( (error = fp_lookup(p, fd2, &fp2, 1)) ) {
+ error = EBADF;
+ goto out;
+ }
+ if (fp2->f_type != DTYPE_VNODE) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EBADF;
+ goto out;
+ }
+ dst_vp = (struct vnode *)fp2->f_data;
+ if (dst_vp->v_tag != VT_HFS) {
+ fp_drop(p, fd2, fp2, 1);
+ error = EINVAL;
+ goto out;
+ }
+
+#if CONFIG_MACF
+ /* Re-do MAC checks against the new FD, pass in a fake argument */
+ error = mac_file_check_fcntl(proc_ucred(p), fp2->f_fglob, uap->cmd, 0);
+ if (error) {
+ fp_drop(p, fd2, fp2, 1);
+ goto out;
+ }
+#endif
+ /* Audit the 2nd FD */
+ AUDIT_ARG(fd, fd2);
+
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(src_vp)) {
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+ if (vnode_getwithref(dst_vp)) {
+ vnode_put (src_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /*
+ * Basic asserts; validate they are not the same and that
+ * both live on the same filesystem.
+ */
+ if (dst_vp == src_vp) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ if (dst_vp->v_mount != src_vp->v_mount) {
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ error = EXDEV;
+ goto outdrop;
+ }
+
+ /* Now we have a legit pair of FDs. Go to work */
+
+ /* Now check for write access to the target files */
+ if(vnode_authorize(src_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ if(vnode_authorize(dst_vp, NULLVP,
+ (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
+ vnode_put(src_vp);
+ vnode_put(dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* Verify that both vps point to files and not directories */
+ if ( !vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
+ error = EINVAL;
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop (p, fd2, fp2, 0);
+ goto outdrop;
+ }
+
+ /*
+ * The exchangedata syscall handler passes in 0 for the flags to VNOP_EXCHANGE.
+ * We'll pass in our special bit indicating that the new behavior is expected
+ */
+
+ error = VNOP_EXCHANGE(src_vp, dst_vp, FSOPT_EXCHANGE_DATA_ONLY, &context);
+
+ vnode_put (src_vp);
+ vnode_put (dst_vp);
+ fp_drop(p, fd2, fp2, 0);
+ break;
+ }
+
+ /*
+ * SPI for making a file compressed.
+ */
+ case F_MAKECOMPRESSED: {
+ uint32_t gcounter = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode*) fp->f_data;
+ proc_fdunlock (p);
+
+ /* get the vnode */
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Is it a file? */
+ if ((vnode_isreg(vp) == 0) && (vnode_islnk(vp) == 0)) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ /* invoke ioctl to pass off to FS */
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)&gcounter, 0, &context);
+
+ vnode_put (vp);
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will written to the Fastflow.
+ */
+ case F_SET_GREEDY_MODE:
+ /* intentionally drop through to the same handler as F_SETSTATIC.
+ * both fcntls should pass the argument and their selector into VNOP_IOCTL.
+ */
+
+ /*
+ * SPI (private) for indicating to a filesystem that subsequent writes to
+ * the open FD will represent static content.
+ */
+ case F_SETSTATICCONTENT: {
+ caddr_t ioctl_arg = NULL;
+
+ if (uap->arg) {
+ ioctl_arg = (caddr_t) 1;
+ }
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, ioctl_arg, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+ /*
+ * SPI (private) for indicating to the lower level storage driver that the
+ * subsequent writes should be of a particular IO type (burst, greedy, static),
+ * or other flavors that may be necessary.
+ */
+ case F_SETIOTYPE: {
+ caddr_t param_ptr;
+ uint32_t param;
+
+ if (uap->arg) {
+ /* extract 32 bits of flags from userland */
+ param_ptr = (caddr_t) uap->arg;
+ param = (uint32_t) param_ptr;
+ }
+ else {
+ /* If no argument is specified, error out */
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Validate the different types of flags that can be specified:
+ * all of them are mutually exclusive for now.
+ */
+ switch (param) {
+ case F_IOTYPE_ISOCHRONOUS:
+ break;
+
+ default:
+ error = EINVAL;
+ goto out;
+ }
+
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* Only go forward if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+ error = VNOP_IOCTL(vp, uap->cmd, param_ptr, 0, &context);
+ (void)vnode_put(vp);
+
+ break;
+ }
+
+
+ /*
+ * Extract the CodeDirectory of the vnode associated with
+ * the file descriptor and copy it back to user space
+ */
+ case F_GETCODEDIR: {
+ struct user_fcodeblobs args;
+
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+ proc_fdunlock(p);
+
+ if ((fp->f_flag & FREAD) == 0) {
+ error = EBADF;
+ goto outdrop;
+ }
+
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_fcodeblobs args64;
+
+ error = copyin(argp, &args64, sizeof(args64));
+ if (error)
+ goto outdrop;
+
+ args.f_cd_hash = args64.f_cd_hash;
+ args.f_hash_size = args64.f_hash_size;
+ args.f_cd_buffer = args64.f_cd_buffer;
+ args.f_cd_size = args64.f_cd_size;
+ args.f_out_size = args64.f_out_size;
+ args.f_arch = args64.f_arch;
+ } else {
+ struct user32_fcodeblobs args32;
+
+ error = copyin(argp, &args32, sizeof(args32));
+ if (error)
+ goto outdrop;
+
+ args.f_cd_hash = CAST_USER_ADDR_T(args32.f_cd_hash);
+ args.f_hash_size = args32.f_hash_size;
+ args.f_cd_buffer = CAST_USER_ADDR_T(args32.f_cd_buffer);
+ args.f_cd_size = args32.f_cd_size;
+ args.f_out_size = CAST_USER_ADDR_T(args32.f_out_size);
+ args.f_arch = args32.f_arch;
+ }
+
+ if (vp->v_ubcinfo == NULL) {
+ error = EINVAL;
+ goto outdrop;
+ }
+
+ struct cs_blob *t_blob = vp->v_ubcinfo->cs_blobs;
+
+ /*
+ * This call fails if there is no cs_blob corresponding to the
+ * vnode, or if there are multiple cs_blobs present, and the caller
+ * did not specify which cpu_type they want the cs_blob for
+ */
+ if (t_blob == NULL) {
+ error = ENOENT; /* there is no codesigning blob for this process */
+ goto outdrop;
+ } else if (args.f_arch == 0 && t_blob->csb_next != NULL) {
+ error = ENOENT; /* too many architectures and none specified */
+ goto outdrop;
+ }
+
+ /* If the user specified an architecture, find the right blob */
+ if (args.f_arch != 0) {
+ while (t_blob) {
+ if (t_blob->csb_cpu_type == args.f_arch)
+ break;
+ t_blob = t_blob->csb_next;
+ }
+ /* The cpu_type the user requested could not be found */
+ if (t_blob == NULL) {
+ error = ENOENT;
+ goto outdrop;
+ }
+ }
+
+ const CS_SuperBlob *super_blob = (void *)t_blob->csb_mem_kaddr;
+ const CS_CodeDirectory *cd = findCodeDirectory(super_blob,
+ (char *) super_blob,
+ (char *) super_blob + t_blob->csb_mem_size);
+ if (cd == NULL) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ uint64_t buffer_size = ntohl(cd->length);
+
+ if (buffer_size > UINT_MAX) {
+ error = ERANGE;
+ goto outdrop;
+ }
+
+ error = copyout(&buffer_size, args.f_out_size, sizeof(unsigned int));
+ if (error)
+ goto outdrop;
+
+ if (sizeof(t_blob->csb_sha1) > args.f_hash_size ||
+ buffer_size > args.f_cd_size) {
+ error = ERANGE;
+ goto outdrop;
+ }
+
+ error = copyout(t_blob->csb_sha1, args.f_cd_hash, sizeof(t_blob->csb_sha1));
+ if (error)
+ goto outdrop;
+ error = copyout(cd, args.f_cd_buffer, buffer_size);
+ if (error)
+ goto outdrop;
+
+ break;
+ }
+
+ /*
+ * Set the vnode pointed to by 'fd'
+ * and tag it as the (potentially future) backing store
+ * for another filesystem
+ */
+ case F_SETBACKINGSTORE: {
+ if (fp->f_type != DTYPE_VNODE) {
+ error = EBADF;
+ goto out;
+ }
+
+ vp = (struct vnode *)fp->f_data;
+
+ if (vp->v_tag != VT_HFS) {
+ error = EINVAL;
+ goto out;
+ }
+ proc_fdunlock(p);
+
+ if (vnode_getwithref(vp)) {
+ error = ENOENT;
+ goto outdrop;
+ }
+
+ /* only proceed if you have write access */
+ vfs_context_t ctx = vfs_context_current();
+ if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
+ vnode_put(vp);
+ error = EBADF;
+ goto outdrop;
+ }
+
+
+ /* If arg != 0, set, otherwise unset */
+ if (uap->arg) {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)1, 0, &context);
+ }
+ else {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)NULL, 0, &context);
+ }
+
+ vnode_put(vp);
+ break;
+ }
+
+ /*
+ * like F_GETPATH, but special semantics for
+ * the mobile time machine handler.
+ */
+ case F_GETPATH_MTMINFO: {
+ char *pathbufp;
+ int pathlen;
+
if (fp->f_type != DTYPE_VNODE) {
error = EBADF;
goto out;
}
-
vp = (struct vnode *)fp->f_data;
proc_fdunlock(p);
- if (vnode_getwithref(vp)) {
- error = ENOENT;
+ pathlen = MAXPATHLEN;
+ MALLOC(pathbufp, char *, pathlen, M_TEMP, M_WAITOK);
+ if (pathbufp == NULL) {
+ error = ENOMEM;
goto outdrop;
}
+ if ( (error = vnode_getwithref(vp)) == 0 ) {
+ int backingstore = 0;
+
+ /* Check for error from vn_getpath before moving on */
+ if ((error = vn_getpath(vp, pathbufp, &pathlen)) == 0) {
+ if (vp->v_tag == VT_HFS) {
+ error = VNOP_IOCTL (vp, uap->cmd, (caddr_t) &backingstore, 0, &context);
+ }
+ (void)vnode_put(vp);
- // the passed in vnode must be the root dir of the file system
- if (VFS_ROOT(vp->v_mount, &root_vp, ctx) != 0 || vp != root_vp) {
- error = EINVAL;
- vnode_put(vp);
- goto outdrop;
- }
- vnode_put(root_vp);
-
- // get the owner of the root dir
- VATTR_INIT(&va);
- VATTR_WANTED(&va, va_uid);
- if (vnode_getattr(vp, &va, ctx) != 0) {
- error = EINVAL;
- vnode_put(vp);
- goto outdrop;
- }
-
- // and last, check that the caller is the super user or
- // the owner of the mount point
- cred = vfs_context_ucred(ctx);
- if (!is_suser() && va.va_uid != kauth_cred_getuid(cred)) {
- error = EACCES;
- vnode_put(vp);
- goto outdrop;
+ if (error == 0) {
+ error = copyout((caddr_t)pathbufp, argp, pathlen);
+ }
+ if (error == 0) {
+ /*
+ * If the copyout was successful, now check to ensure
+ * that this vnode is not a BACKINGSTORE vnode. mtmd
+ * wants the path regardless.
+ */
+ if (backingstore) {
+ error = EBUSY;
+ }
+ }
+ } else
+ (void)vnode_put(vp);
}
-
- // if all those checks pass then we can mark the dependency
- vfs_markdependency(vp->v_mount);
- error = 0;
-
- vnode_put(vp);
-
- break;
- }
-
- case F_GETPROTECTIONCLASS: {
- // stub to make the API work
- printf("Reached F_GETPROTECTIONCLASS, returning without action\n");
- error = 0;
- goto out;
- }
-
- case F_SETPROTECTIONCLASS: {
- // stub to make the API work
- printf("Reached F_SETPROTECTIONCLASS, returning without action\n");
- error = 0;
- goto out;
+ FREE(pathbufp, M_TEMP);
+ goto outdrop;
}
-
default:
/*
* This is an fcntl() that we d not recognize at this level;
* effectively overload fcntl() to send ioctl()'s.
*/
if((uap->cmd & IOC_VOID) && (uap->cmd & IOC_INOUT)){
- error = EINVAL;
+ error = EINVAL;
goto out;
}
+ /* Catch any now-invalid fcntl() selectors */
+ switch (uap->cmd) {
+ case F_MARKDEPENDENCY:
+ error = EINVAL;
+ goto out;
+ default:
+ break;
+ }
+
if (fp->f_type != DTYPE_VNODE) {
error = EBADF;
goto out;
kfree(memp, size);
goto outdrop;
}
+
+ /* Bzero the section beyond that which was needed */
+ if (size <= sizeof(stkbuf)) {
+ bzero ( (((uint8_t*)data) + size), (sizeof(stkbuf) - size));
+ }
} else {
/* int */
if (is64bit) {
* Parameters: p Process performing the dup
* old The fd to dup
* new The fd to dup it to
+ * fd_flags Flags to augment the new fd
* retval Pointer to the call return area
*
* Returns: 0 Success
*
* Notes: This function may drop and reacquire this lock; it is unsafe
* for a caller to assume that other state protected by the lock
- * has not been subsequently changes out from under it.
+ * has not been subsequently changed out from under it.
*/
int
-finishdup(proc_t p, struct filedesc *fdp, int old, int new, int32_t *retval)
+finishdup(proc_t p,
+ struct filedesc *fdp, int old, int new, int fd_flags, int32_t *retval)
{
struct fileproc *nfp;
struct fileproc *ofp;
#if DIAGNOSTIC
proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
#endif
-
if ((ofp = fdp->fd_ofiles[old]) == NULL ||
- (fdp->fd_ofileflags[old] & UF_RESERVED)) {
+ (fdp->fd_ofileflags[old] & UF_RESERVED)) {
fdrelse(p, new);
return (EBADF);
}
proc_fdunlock(p);
- MALLOC_ZONE(nfp, struct fileproc *, sizeof(struct fileproc), M_FILEPROC, M_WAITOK);
- /* Failure check follows proc_fdlock() due to handling requirements */
+ nfp = fileproc_alloc_init(NULL);
proc_fdlock(p);
return (ENOMEM);
}
- bzero(nfp, sizeof(struct fileproc));
-
- nfp->f_flags = 0;
nfp->f_fglob = ofp->f_fglob;
- nfp->f_iocount = 0;
#if DIAGNOSTIC
if (fdp->fd_ofiles[new] != 0)
- panic("finishdup: overwriting fd_ofiles with new %d\n", new);
+ panic("finishdup: overwriting fd_ofiles with new %d", new);
if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0)
- panic("finishdup: unreserved fileflags with new %d\n", new);
+ panic("finishdup: unreserved fileflags with new %d", new);
#endif
if (new > fdp->fd_lastfile)
fdp->fd_lastfile = new;
+ *fdflags(p, new) |= fd_flags;
procfdtbl_releasefd(p, new, nfp);
*retval = new;
return (0);
*
* Returns: 0 Success
* fp_lookup:EBADF Bad file descriptor
+ * fp_guard_exception:??? Guarded file descriptor
* close_internal:EBADF
* close_internal:??? Anything returnable by a per-fileops
* close function
{
struct fileproc *fp;
int fd = uap->fd;
- int error =0;
+ int error;
AUDIT_SYSCLOSE(p, fd);
return(error);
}
+ if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
+ error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
+ (void) fp_drop(p, fd, fp, 1);
+ proc_fdunlock(p);
+ return (error);
+ }
+
error = close_internal_locked(p, fd, fp, 0);
proc_fdunlock(p);
- return(error);
+ return (error);
}
*
* Notes: This function may drop and reacquire this lock; it is unsafe
* for a caller to assume that other state protected by the lock
- * has not been subsequently changes out from under it, if the
- * caller made the call with the lock held.
+ * has not been subsequently changed out from under it.
*/
-static int
+int
close_internal_locked(proc_t p, int fd, struct fileproc *fp, int flags)
{
struct filedesc *fdp = p->p_fd;
if ((fp->f_flags & FP_CLOSING) == FP_CLOSING) {
- panic("close_internal_locked: being called on already closing fd\n");
+ panic("close_internal_locked: being called on already closing fd");
}
#if DIAGNOSTIC
if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0)
- panic("close_internal: unreserved fileflags with fd %d\n", fd);
+ panic("close_internal: unreserved fileflags with fd %d", fd);
#endif
fp->f_flags |= FP_CLOSING;
if (fp->f_flags & FP_WAITEVENT)
(void)waitevent_close(p, fp);
- if ((fp->f_flags & FP_INCHRREAD) == 0)
- fileproc_drain(p, fp);
+ fileproc_drain(p, fp);
- if (resvfd == 0)
+ if (resvfd == 0) {
_fdrelse(p, fd);
+ } else {
+ procfdtbl_reservefd(p, fd);
+ }
error = closef_locked(fp, fp->f_fglob, p);
if ((fp->f_flags & FP_WAITCLOSE) == FP_WAITCLOSE)
proc_fdunlock(p);
- FREE_ZONE(fp, sizeof(*fp), M_FILEPROC);
+ fileproc_free(fp);
proc_fdlock(p);
#if DIAGNOSTIC
if (resvfd != 0) {
if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0)
- panic("close with reserved fd returns with freed fd:%d: proc: %x\n", fd, (unsigned int)p);
+ panic("close with reserved fd returns with freed fd:%d: proc: %p", fd, p);
}
#endif
struct user32_stat64 user32_sb64;
} dest;
int error, my_size;
- int funnel_state;
file_type_t type;
caddr_t data;
kauth_filesec_t fsec;
break;
case DTYPE_KQUEUE:
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- error = kqueue_stat(fp, sbptr, isstat64, p);
- thread_funnel_set(kernel_flock, funnel_state);
+ error = kqueue_stat((void *)data, sbptr, isstat64, p);
break;
default:
}
goto out;
- case DTYPE_PSXSHM:
- case DTYPE_PSXSEM:
- case DTYPE_KQUEUE:
- case DTYPE_FSEVENTS:
+ default:
error = EINVAL;
goto out;
return (0);
}
-#if NETAT
-#define DTYPE_ATALK -1 /* XXX This does not belong here */
-
-
-/*
- * fp_getfatalk
- *
- * Description: Get fileproc and atalk pointer for a given fd from the
- * per process open file table of the specified process
- * and if successful, increment the f_iocount
- *
- * Parameters: p Process in which fd lives
- * fd fd to get information for
- * resultfp Pointer to result fileproc
- * pointer area, or 0 if none
- * resultatalk Pointer to result atalk
- * pointer area, or 0 if none
- * Returns: EBADF The file descriptor is invalid
- * EBADF The file descriptor is not a socket
- * 0 Success
- *
- * Implicit returns:
- * *resultfp (modified) Fileproc pointer
- * *resultatalk (modified) atalk pointer
- *
- * Notes: The second EBADF should probably be something else to make
- * the error condition distinct.
- *
- * XXX This code is specific to AppleTalk protocol support, and
- * XXX should be conditionally compiled
- */
-int
-fp_getfatalk(proc_t p, int fd, struct fileproc **resultfp,
- struct atalk **resultatalk)
-{
- struct filedesc *fdp = p->p_fd;
- struct fileproc *fp;
-
- proc_fdlock_spin(p);
- if (fd < 0 || fd >= fdp->fd_nfiles ||
- (fp = fdp->fd_ofiles[fd]) == NULL ||
- (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
- proc_fdunlock(p);
- return (EBADF);
- }
- if (fp->f_type != (DTYPE_ATALK+1)) {
- proc_fdunlock(p);
- return(EBADF);
- }
- fp->f_iocount++;
-
- if (resultfp)
- *resultfp = fp;
- if (resultatalk)
- *resultatalk = (struct atalk *)fp->f_data;
- proc_fdunlock(p);
-
- return (0);
-}
-
-#endif /* NETAT */
-
/*
* fp_lookup
*
}
+/*
+ * fp_tryswap
+ *
+ * Description: Swap the fileproc pointer for a given fd with a new
+ * fileproc pointer in the per-process open file table of
+ * the specified process. The fdlock must be held at entry.
+ *
+ * Parameters: p Process containing the fd
+ * fd The fd of interest
+ * nfp Pointer to the newfp
+ *
+ * Returns: 0 Success
+ * EBADF Bad file descriptor
+ * EINTR Interrupted
+ * EKEEPLOOKING f_iocount changed while lock was dropped.
+ */
+int
+fp_tryswap(proc_t p, int fd, struct fileproc *nfp)
+{
+ struct fileproc *fp;
+ int error;
+
+ proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
+
+ if (0 != (error = fp_lookup(p, fd, &fp, 1)))
+ return (error);
+ /*
+ * At this point, our caller (change_guardedfd_np) has
+ * one f_iocount reference, and we just took another
+ * one to begin the replacement.
+ */
+ if (fp->f_iocount < 2) {
+ panic("f_iocount too small %d", fp->f_iocount);
+ } else if (2 == fp->f_iocount) {
+
+ /* Copy the contents of *fp, preserving the "type" of *nfp */
+
+ nfp->f_flags = (nfp->f_flags & FP_TYPEMASK) |
+ (fp->f_flags & ~FP_TYPEMASK);
+ nfp->f_iocount = fp->f_iocount;
+ nfp->f_fglob = fp->f_fglob;
+ nfp->f_waddr = fp->f_waddr;
+
+ p->p_fd->fd_ofiles[fd] = nfp;
+ (void) fp_drop(p, fd, nfp, 1);
+ } else {
+ /*
+ * Wait for all other active references to evaporate.
+ */
+ p->p_fpdrainwait = 1;
+ error = msleep(&p->p_fpdrainwait, &p->p_fdmlock,
+ PRIBIO | PCATCH, "tryswap fpdrain", NULL);
+ if (0 == error) {
+ /*
+ * Return an "internal" errno to trigger a full
+ * reevaluation of the change-guard attempt.
+ */
+ error = EKEEPLOOKING;
+ printf("%s: lookup collision fd %d\n", __func__, fd);
+ }
+ (void) fp_drop(p, fd, fp, 1);
+ }
+ return (error);
+}
+
+
/*
* fp_drop_written
*
}
fp->f_iocount--;
- if (p->p_fpdrainwait && fp->f_iocount == 0) {
- p->p_fpdrainwait = 0;
- needwakeup = 1;
+ if (fp->f_iocount == 0) {
+ if (fp->f_flags & FP_SELCONFLICT)
+ fp->f_flags &= ~FP_SELCONFLICT;
+
+ if (p->p_fpdrainwait) {
+ p->p_fpdrainwait = 0;
+ needwakeup = 1;
+ }
}
if (!locked)
proc_fdunlock(p);
*
* The fileproc referenced is not returned; because of this, care
* must be taken to not drop the last reference (e.g. by closing
- * the file). This is inhernely unsafe, since the reference may
+ * the file). This is inherently unsafe, since the reference may
* not be recoverable from the vnode, if there is a subsequent
* close that destroys the associate fileproc. The caller should
* therefore retain their own reference on the fileproc so that
*
* The fileproc referenced is not returned; because of this, care
* must be taken to not drop the last reference (e.g. by closing
- * the file). This is inhernely unsafe, since the reference may
+ * the file). This is inherently unsafe, since the reference may
* not be recoverable from the vnode, if there is a subsequent
* close that destroys the associate fileproc. The caller should
* therefore retain their own reference on the fileproc so that
*
* The fileproc referenced is not returned; because of this, care
* must be taken to not drop the last reference (e.g. by closing
- * the file). This is inhernely unsafe, since the reference may
+ * the file). This is inherently unsafe, since the reference may
* not be recoverable from the socket, if there is a subsequent
* close that destroys the associate fileproc. The caller should
* therefore retain their own reference on the fileproc so that
}
fp->f_iocount --;
- if (p->p_fpdrainwait && fp->f_iocount == 0) {
- p->p_fpdrainwait = 0;
- needwakeup = 1;
+ if (fp->f_iocount == 0) {
+ if (fp->f_flags & FP_SELCONFLICT)
+ fp->f_flags &= ~FP_SELCONFLICT;
+
+ if (p->p_fpdrainwait) {
+ p->p_fpdrainwait = 0;
+ needwakeup = 1;
+ }
}
proc_fdunlock(p);
}
+static int falloc_withalloc_locked(proc_t, struct fileproc **, int *,
+ vfs_context_t, struct fileproc * (*)(void *), void *, int);
+
/*
* falloc
*
* *resultfd (modified) Returned fd
*
* Locks: This function takes and drops the proc_fdlock; if this lock
- * is alread held, use falloc_locked() instead.
+ * is already held, use falloc_locked() instead.
*
* Notes: This function takes separate process and context arguments
* solely to support kern_exec.c; otherwise, it would take
*/
int
falloc(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx)
+{
+ return (falloc_withalloc(p, resultfp, resultfd, ctx,
+ fileproc_alloc_init, NULL));
+}
+
+/*
+ * Like falloc, but including the fileproc allocator and create-args
+ */
+int
+falloc_withalloc(proc_t p, struct fileproc **resultfp, int *resultfd,
+ vfs_context_t ctx, fp_allocfn_t fp_zalloc, void *arg)
{
int error;
proc_fdlock(p);
- error = falloc_locked(p, resultfp, resultfd, ctx, 1);
+ error = falloc_withalloc_locked(p,
+ resultfp, resultfd, ctx, fp_zalloc, arg, 1);
proc_fdunlock(p);
- return(error);
+ return (error);
}
+/*
+ * "uninitialized" ops -- ensure fg->fg_ops->fo_type always exists
+ */
+static const struct fileops uninitops;
/*
* falloc_locked
*
* Create a new open file structure and allocate
- * a file decriptor for the process that refers to it.
+ * a file descriptor for the process that refers to it.
*
* Returns: 0 Success
*
falloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd,
vfs_context_t ctx, int locked)
{
- struct fileproc *fp, *fq;
+ return (falloc_withalloc_locked(p, resultfp, resultfd, ctx,
+ fileproc_alloc_init, NULL, locked));
+}
+
+static int
+falloc_withalloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd,
+ vfs_context_t ctx, fp_allocfn_t fp_zalloc, void *crarg,
+ int locked)
+{
+ struct fileproc *fp;
struct fileglob *fg;
int error, nfd;
*/
proc_fdunlock(p);
- MALLOC_ZONE(fp, struct fileproc *, sizeof(struct fileproc), M_FILEPROC, M_WAITOK);
+ fp = (*fp_zalloc)(crarg);
if (fp == NULL) {
if (locked)
proc_fdlock(p);
}
MALLOC_ZONE(fg, struct fileglob *, sizeof(struct fileglob), M_FILEGLOB, M_WAITOK);
if (fg == NULL) {
- FREE_ZONE(fp, sizeof(*fp), M_FILEPROC);
+ fileproc_free(fp);
if (locked)
proc_fdlock(p);
return (ENOMEM);
}
- bzero(fp, sizeof(struct fileproc));
bzero(fg, sizeof(struct fileglob));
lck_mtx_init(&fg->fg_lock, file_lck_grp, file_lck_attr);
fp->f_iocount = 1;
fg->fg_count = 1;
+ fg->fg_ops = &uninitops;
fp->f_fglob = fg;
#if CONFIG_MACF
mac_file_label_init(fg);
mac_file_label_associate(fp->f_cred, fg);
#endif
- lck_mtx_lock_spin(file_flist_lock);
-
- nfiles++;
-
- if ( (fq = p->p_fd->fd_ofiles[0]) ) {
- LIST_INSERT_AFTER(fq->f_fglob, fg, f_list);
- } else {
- LIST_INSERT_HEAD(&filehead, fg, f_list);
- }
- lck_mtx_unlock(file_flist_lock);
+ OSAddAtomic(1, &nfiles);
p->p_fd->fd_ofiles[nfd] = fp;
void
fg_free(struct fileglob *fg)
{
- lck_mtx_lock_spin(file_flist_lock);
- LIST_REMOVE(fg, f_list);
- nfiles--;
- lck_mtx_unlock(file_flist_lock);
+ OSAddAtomic(-1, &nfiles);
+
+ if (fg->fg_vn_data) {
+ fg_vn_data_free(fg->fg_vn_data);
+ fg->fg_vn_data = NULL;
+ }
if (IS_VALID_CRED(fg->fg_cred)) {
kauth_cred_unref(&fg->fg_cred);
* that are either marked as close-on-exec, or which were in the
* process of being opened at the time of the execve
*
+ * Also handles the case (via posix_spawn()) where -all-
+ * files except those marked with "inherit" as treated as
+ * close-on-exec.
+ *
* Parameters: p Pointer to process calling
* execve
*
*
* Locks: This function internally takes and drops proc_fdlock()
*
- * Notes: This function drops and retakes the kernel funnel; this is
- * inherently unsafe, since another thread may have the
- * proc_fdlock.
- *
- * XXX: We should likely reverse the lock and funnel drop/acquire
- * order to avoid the small race window; it's also possible that
- * if the program doing the exec has an outstanding listen socket
- * and a network connection is completed asyncrhonously that we
- * will end up with a "ghost" socket reference in the new process.
- *
- * This needs reworking to make it safe to remove the funnel from
- * the execve and posix_spawn system calls.
*/
void
-fdexec(proc_t p)
+fdexec(proc_t p, short flags)
{
struct filedesc *fdp = p->p_fd;
int i;
- struct fileproc *fp;
+ boolean_t cloexec_default = (flags & POSIX_SPAWN_CLOEXEC_DEFAULT) != 0;
proc_fdlock(p);
- i = fdp->fd_lastfile;
+ for (i = fdp->fd_lastfile; i >= 0; i--) {
- while (i >= 0) {
+ struct fileproc *fp = fdp->fd_ofiles[i];
+ char *flagp = &fdp->fd_ofileflags[i];
+
+ if (fp && cloexec_default) {
+ /*
+ * Reverse the usual semantics of file descriptor
+ * inheritance - all of them should be closed
+ * except files marked explicitly as "inherit" and
+ * not marked close-on-exec.
+ */
+ if ((*flagp & (UF_EXCLOSE|UF_INHERIT)) != UF_INHERIT)
+ *flagp |= UF_EXCLOSE;
+ *flagp &= ~UF_INHERIT;
+ }
- fp = fdp->fd_ofiles[i];
if (
- ((fdp->fd_ofileflags[i] & (UF_RESERVED|UF_EXCLOSE)) == UF_EXCLOSE)
+ ((*flagp & (UF_RESERVED|UF_EXCLOSE)) == UF_EXCLOSE)
#if CONFIG_MACF
|| (fp && mac_file_check_inherit(proc_ucred(p), fp->f_fglob))
#endif
fdp->fd_lastfile--;
if (i < fdp->fd_freefile)
fdp->fd_freefile = i;
+
+ /*
+ * Wait for any third party viewers (e.g., lsof)
+ * to release their references to this fileproc.
+ */
+ while (fp->f_iocount > 0) {
+ p->p_fpdrainwait = 1;
+ msleep(&p->p_fpdrainwait, &p->p_fdmlock, PRIBIO,
+ "fpdrain", NULL);
+ }
+
closef_locked(fp, fp->f_fglob, p);
- FREE_ZONE(fp, sizeof(*fp), M_FILEPROC);
+
+ fileproc_free(fp);
}
- i--;
}
proc_fdunlock(p);
}
* thread making the call, rather than from the process.
*
* In the case of a failure to obtain a reference, for most cases,
- * the file entry will be silently droppped. There's an exception
+ * the file entry will be silently dropped. There's an exception
* for the case of a chroot dir, since a failure to to obtain a
* reference there would constitute an "escape" from the chroot
* environment, which must not be allowed. In that case, we will
* our reference from the parent also
* since the vnode has gone DEAD making
* it useless... by dropping it we'll
- * be that much closer to recyling it
+ * be that much closer to recycling it
*/
vnode_rele(fdp->fd_cdir);
fdp->fd_cdir = NULL;
FREE_ZONE(newfdp, sizeof *newfdp, M_FILEDESC);
return(NULL);
}
- newfdp->fd_refcnt = 1;
/*
* If the number of open files fits in the internal arrays
*/
if (newfdp->fd_knlistsize != -1) {
fpp = &newfdp->fd_ofiles[newfdp->fd_lastfile];
- for (i = newfdp->fd_lastfile; i >= 0; i--, fpp--) {
+ flags = &newfdp->fd_ofileflags[newfdp->fd_lastfile];
+ for (i = newfdp->fd_lastfile;
+ i >= 0; i--, fpp--, flags--) {
+ if (*flags & UF_RESERVED)
+ continue; /* (removed below) */
if (*fpp != NULL && (*fpp)->f_type == DTYPE_KQUEUE) {
*fpp = NULL;
- newfdp->fd_ofileflags[i] = 0;
+ *flags = 0;
if (i < newfdp->fd_freefile)
newfdp->fd_freefile = i;
}
flags = newfdp->fd_ofileflags;
for (i = newfdp->fd_lastfile + 1; --i >= 0; fpp++, flags++)
- if ((ofp = *fpp) != NULL && !(*flags & UF_RESERVED)) {
- MALLOC_ZONE(fp, struct fileproc *, sizeof(struct fileproc), M_FILEPROC, M_WAITOK);
+ if ((ofp = *fpp) != NULL &&
+ 0 == (*flags & (UF_FORKCLOSE|UF_RESERVED))) {
+#if DEBUG
+ if (FILEPROC_TYPE(ofp) != FTYPE_SIMPLE)
+ panic("complex fileproc");
+#endif
+ fp = fileproc_alloc_init(NULL);
if (fp == NULL) {
/*
* XXX no room to copy, unable to
*/
*fpp = NULL;
} else {
- bzero(fp, sizeof(struct fileproc));
- fp->f_flags = ofp->f_flags;
- //fp->f_iocount = ofp->f_iocount;
- fp->f_iocount = 0;
+ fp->f_flags |=
+ (ofp->f_flags & ~FP_TYPEMASK);
fp->f_fglob = ofp->f_fglob;
(void)fg_ref(fp);
*fpp = fp;
proc_fdlock(p);
- /* Certain daemons might not have file descriptors */
- fdp = p->p_fd;
-
- if ((fdp == NULL) || (--fdp->fd_refcnt > 0)) {
+ if (p == kernproc || NULL == (fdp = p->p_fd)) {
proc_fdunlock(p);
return;
}
- if (fdp->fd_refcnt == 0xffff)
- panic("fdfree: bad fd_refcnt");
- /* Last reference: the structure can't change out from under us */
+ extern struct filedesc filedesc0;
+
+ if (&filedesc0 == fdp)
+ panic("filedesc0");
if (fdp->fd_nfiles > 0 && fdp->fd_ofiles) {
for (i = fdp->fd_lastfile; i >= 0; i--) {
if ((fp = fdp->fd_ofiles[i]) != NULL) {
if (fdp->fd_ofileflags[i] & UF_RESERVED)
- panic("fdfree: found fp with UF_RESERVED\n");
+ panic("fdfree: found fp with UF_RESERVED");
- /* closef drops the iocount ... */
- if ((fp->f_flags & FP_INCHRREAD) != 0)
- fp->f_iocount++;
procfdtbl_reservefd(p, i);
if (i < fdp->fd_knlistsize)
if (fp->f_flags & FP_WAITEVENT)
(void)waitevent_close(p, fp);
(void) closef_locked(fp, fp->f_fglob, p);
- FREE_ZONE(fp, sizeof(*fp), M_FILEPROC);
+ fileproc_free(fp);
}
}
FREE_ZONE(fdp->fd_ofiles, fdp->fd_nfiles * OFILESIZE, M_OFILETABL);
FREE_ZONE(fdp, sizeof(*fdp), M_FILEDESC);
}
-
-/*
- * closef_finish
- *
- * Description: Called on last open instance for a fileglob for a file being
- * closed.
- *
- * Parameters: fp Pointer to fileproc for fd
- * fg Pointer to fileglob for fd
- * p Pointer to proc structure
- *
- * Returns: 0 Success
- * <fo_close>:??? Anything returnable by a per-fileops
- * close function
- *
- * Note: fp can only be non-NULL if p is also non-NULL. If p is NULL,
- * then fg must eith be locked (FHASLOCK) or must not have a
- * type of DTYPE_VNODE.
- *
- * On return, the fg is freed.
- *
- * This function may block draining output to a character
- * device on last close of that device.
- */
-static int
-closef_finish(struct fileproc *fp, struct fileglob *fg, proc_t p, vfs_context_t ctx)
-{
- int error;
-
-
- /* fg_ops completed initialization? */
- if (fg->fg_ops)
- error = fo_close(fg, ctx);
- else
- error = 0;
-
- /* if fp is non-NULL, drain it out */
- if (((fp != (struct fileproc *)0) && ((fp->f_flags & FP_INCHRREAD) != 0))) {
- proc_fdlock_spin(p);
- if ( ((fp->f_flags & FP_INCHRREAD) != 0) ) {
- fileproc_drain(p, fp);
- }
- proc_fdunlock(p);
- }
- fg_free(fg);
-
- return (error);
-}
-
/*
* closef_locked
*
* If the descriptor was in a message, POSIX-style locks
* aren't passed with the descriptor.
*/
- if (p && (p->p_ladvflag & P_LADVLOCK) && fg->fg_type == DTYPE_VNODE) {
+ if (p && (p->p_ladvflag & P_LADVLOCK) &&
+ DTYPE_VNODE == FILEGLOB_DTYPE(fg)) {
proc_fdunlock(p);
lf.l_whence = SEEK_SET;
vp = (struct vnode *)fg->fg_data;
if ( (error = vnode_getwithref(vp)) == 0 ) {
- (void) VNOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX, &context);
+ (void) VNOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX, &context, NULL);
(void)vnode_put(vp);
}
proc_fdlock(p);
if (p)
proc_fdunlock(p);
- error = closef_finish(fp, fg, p, &context);
+
+ /* Since we ensure that fg->fg_ops is always initialized,
+ * it is safe to invoke fo_close on the fg */
+ error = fo_close(fg, &context);
+
+ fg_free(fg);
+
if (p)
proc_fdlock(p);
* Locks: Assumes the caller holds the proc_fdlock
*
* Notes: For character devices, this occurs on the last close of the
- * device; for all other file descriptos, this occurs on each
+ * device; for all other file descriptors, this occurs on each
* close to prevent fd's from being closed out from under
* operations currently in progress and blocked
*
if (fp->f_fglob->fg_ops->fo_drain) {
(*fp->f_fglob->fg_ops->fo_drain)(fp, &context);
}
- if (((fp->f_flags & FP_INSELECT)== FP_INSELECT)) {
- wait_queue_wakeup_all((wait_queue_t)fp->f_waddr, NULL, THREAD_INTERRUPTED);
+ if ((fp->f_flags & FP_INSELECT) == FP_INSELECT) {
+ if (wait_queue_wakeup_all((wait_queue_t)fp->f_waddr, NULL, THREAD_INTERRUPTED) == KERN_INVALID_ARGUMENT)
+ panic("bad wait queue for wait_queue_wakeup_all %p", fp->f_waddr);
}
+ if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT) {
+ if (wait_queue_wakeup_all(&select_conflict_queue, NULL, THREAD_INTERRUPTED) == KERN_INVALID_ARGUMENT)
+ panic("bad select_conflict_queue");
+ }
p->p_fpdrainwait = 1;
msleep(&p->p_fpdrainwait, &p->p_fdmlock, PRIBIO, "fpdrain", NULL);
}
+#if DIAGNOSTIC
+ if ((fp->f_flags & FP_INSELECT) != 0)
+ panic("FP_INSELECT set on drained fp");
+#endif
+ if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT)
+ fp->f_flags &= ~FP_SELCONFLICT;
}
proc_fdunlock(p);
fg_free(fp->f_fglob);
- FREE_ZONE(fp, sizeof(*fp), M_FILEPROC);
+ fileproc_free(fp);
return(0);
}
if (how & LOCK_UN) {
lf.l_type = F_UNLCK;
fp->f_flag &= ~FHASLOCK;
- error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_UNLCK, &lf, F_FLOCK, ctx);
+ error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_UNLCK, &lf, F_FLOCK, ctx, NULL);
goto out;
}
if (how & LOCK_EX)
#endif
fp->f_flag |= FHASLOCK;
if (how & LOCK_NB) {
- error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, F_FLOCK, ctx);
+ error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, F_FLOCK, ctx, NULL);
goto out;
}
- error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, F_FLOCK|F_WAIT, ctx);
+ error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, F_FLOCK|F_WAIT, ctx, NULL);
out:
(void)vnode_put(vp);
out1:
}
-#if CONFIG_EMBEDDED
/*
* fileport_makeport
*
goto out;
}
+ if (FP_ISGUARDED(fp, GUARD_FILEPORT)) {
+ proc_fdlock(p);
+ err = fp_guard_exception(p, fd, fp, kGUARD_EXC_FILEPORT);
+ proc_fdunlock(p);
+ goto out;
+ }
+
/* Dropped when port is deallocated */
fg = fp->f_fglob;
fg_ref(fp);
err = EINVAL;
goto out;
}
-
- MALLOC_ZONE(fp, struct fileproc *, sizeof(*fp), M_FILEPROC, M_WAITOK);
+
+ fp = fileproc_alloc_init(NULL);
if (fp == FILEPROC_NULL) {
err = ENOMEM;
goto out;
}
- bzero(fp, sizeof(*fp));
-
fp->f_fglob = fg;
fg_ref(fp);
proc_fdunlock(p);
goto out;
}
+ *fdflags(p, fd) |= UF_EXCLOSE;
procfdtbl_releasefd(p, fd, fp);
proc_fdunlock(p);
err = 0;
out:
if ((fp != NULL) && (0 != err)) {
- FREE_ZONE(fp, sizeof(*fp), M_FILEPROC);
+ fileproc_free(fp);
}
if (IPC_PORT_NULL != port) {
return err;
}
-#endif /* CONFIG_EMBEDDED */
/*
* Notes: XXX This is not thread safe; see fdopen() above
*/
int
-dupfdopen(struct filedesc *fdp, int indx, int dfd, int mode, int error)
+dupfdopen(struct filedesc *fdp, int indx, int dfd, int flags, int error)
{
struct fileproc *wfp;
struct fileproc *fp;
*/
switch (error) {
case ENODEV:
+ if (FP_ISGUARDED(wfp, GUARD_DUP)) {
+ int err = fp_guard_exception(p,
+ dfd, wfp, kGUARD_EXC_DUP);
+ proc_fdunlock(p);
+ return (err);
+ }
+
/*
* Check that the mode the file is being opened for is a
* subset of the mode of the existing descriptor.
*/
- if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
+ if (((flags & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
proc_fdunlock(p);
return (EACCES);
}
fg_free(fp->f_fglob);
fp->f_fglob = wfp->f_fglob;
- fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
+ fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd] |
+ (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
proc_fdunlock(p);
return (0);
#if DIAGNOSTIC
if ((fp->f_flags & ~((unsigned int)FP_VALID_FLAGS)) != 0)
- panic("fg_ref: invalid bits on fp%x\n", (unsigned int)fp);
+ panic("fg_ref: invalid bits on fp %p", fp);
if (fg->fg_count == 0)
- panic("fg_ref: adding fgcount to zeroed fg :fp %x, fg%x\n ", (unsigned int)fp, (unsigned int)fg);
+ panic("fg_ref: adding fgcount to zeroed fg: fp %p fg %p",
+ fp, fg);
#endif
fg->fg_count++;
lck_mtx_unlock(&fg->fg_lock);
lck_mtx_unlock(&fg->fg_lock);
}
-
+#if SOCKETS
/*
* fg_insertuipc
*
lck_mtx_unlock(&fg->fg_lock);
}
}
-
+#endif /* SOCKETS */
/*
* fo_read
return FALSE;
}
}
+
+
+struct fileproc *
+fileproc_alloc_init(__unused void *arg)
+{
+ struct fileproc *fp;
+
+ MALLOC_ZONE(fp, struct fileproc *, sizeof (*fp), M_FILEPROC, M_WAITOK);
+ if (fp)
+ bzero(fp, sizeof (*fp));
+
+ return (fp);
+}
+
+void
+fileproc_free(struct fileproc *fp)
+{
+ switch (FILEPROC_TYPE(fp)) {
+ case FTYPE_SIMPLE:
+ FREE_ZONE(fp, sizeof (*fp), M_FILEPROC);
+ break;
+ case FTYPE_GUARDED:
+ guarded_fileproc_free(fp);
+ break;
+ default:
+ panic("%s: corrupt fp %p flags %x", __func__, fp, fp->f_flags);
+ }
+}