#include <security/mac_framework.h>
#endif
-#if CONFIG_PROTECT
-#include <sys/cprotect.h>
-#endif
-
-extern void sigpup_attach_vnode(vnode_t); /* XXX */
+#include <IOKit/IOBSD.h>
static int vn_closefile(struct fileglob *fp, vfs_context_t ctx);
static int vn_ioctl(struct fileproc *fp, u_long com, caddr_t data,
vfs_context_t ctx);
static void filt_vndetach(struct knote *kn);
static int filt_vnode(struct knote *kn, long hint);
+static int filt_vnode_common(struct knote *kn, vnode_t vp, long hint);
static int vn_open_auth_finish(vnode_t vp, int fmode, vfs_context_t ctx);
#if 0
static int vn_kqfilt_remove(struct vnode *vp, uintptr_t ident,
#endif
const struct fileops vnops = {
- DTYPE_VNODE,
- vn_read,
- vn_write,
- vn_ioctl,
- vn_select,
- vn_closefile,
- vn_kqfilt_add,
- NULL
+ .fo_type = DTYPE_VNODE,
+ .fo_read = vn_read,
+ .fo_write = vn_write,
+ .fo_ioctl = vn_ioctl,
+ .fo_select = vn_select,
+ .fo_close = vn_closefile,
+ .fo_kqfilter = vn_kqfilt_add,
+ .fo_drain = NULL,
};
+static int filt_vntouch(struct knote *kn, struct kevent_internal_s *kev);
+static int filt_vnprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
+
struct filterops vnode_filtops = {
.f_isfd = 1,
.f_attach = NULL,
.f_detach = filt_vndetach,
- .f_event = filt_vnode
+ .f_event = filt_vnode,
+ .f_touch = filt_vntouch,
+ .f_process = filt_vnprocess,
};
/*
kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_OPEN,
(uintptr_t)vp, 0);
- sigpup_attach_vnode(vp);
-
return 0;
bad:
return error;
}
+/*
+ * This is the number of times we'll loop in vn_open_auth without explicitly
+ * yielding the CPU when we determine we have to retry.
+ */
+#define RETRY_NO_YIELD_COUNT 5
+
/*
* Open a file with authorization, updating the contents of the structures
* pointed to by ndp, fmodep, and vap as necessary to perform the requested
boolean_t need_vnop_open;
boolean_t batched;
boolean_t ref_failed;
+ int nretries = 0;
again:
vp = NULL;
fmode = *fmodep;
origcnflags = ndp->ni_cnd.cn_flags;
+ // If raw encrypted mode is requested, handle that here
+ if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags)
+ && ISSET(vap->va_dataprotect_flags, VA_DP_RAWENCRYPTED)) {
+ fmode |= FENCRYPTED;
+ }
+
/*
* O_CREAT
*/
if (error) {
/*
- * Check for a creation or unlink race.
+ * Check for a create race.
*/
- if (((error == EEXIST) && !(fmode & O_EXCL)) ||
- ((error == ENOENT) && (fmode & O_CREAT))){
+ if ((error == EEXIST) && !(fmode & O_EXCL)){
if (vp)
vnode_put(vp);
goto again;
/* open calls are allowed for resource forks. */
ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
#endif
+ if (fmode & FENCRYPTED)
+ ndp->ni_cnd.cn_flags |= CN_RAW_ENCRYPTED | CN_SKIPNAMECACHE;
ndp->ni_flag = NAMEI_COMPOUNDOPEN;
/* preserve NOFOLLOW from vnode_open() */
panic("Haven't cleaned up adequately in vn_open_auth()");
}
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp) && (fmode & (FWRITE | O_TRUNC)) && (ctx != vfs_context_kernel())) {
+ error = EPERM;
+ goto bad;
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
/*
* Expect to use this code for filesystems without compound VNOPs, for the root
* of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(),
}
}
-#if CONFIG_PROTECT
- /*
- * Perform any content protection access checks prior to calling
- * into the filesystem, if the raw encrypted mode was not
- * requested.
- *
- * If the va_dataprotect_flags are NOT active, or if they are,
- * but they do not have the VA_DP_RAWENCRYPTED bit set, then we need
- * to perform the checks.
- */
- if (!(VATTR_IS_ACTIVE (vap, va_dataprotect_flags)) ||
- ((vap->va_dataprotect_flags & VA_DP_RAWENCRYPTED) == 0)) {
- error = cp_handle_open (vp, fmode);
- if (error) {
+ if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags)
+ && ISSET(vap->va_dataprotect_flags, VA_DP_RAWUNENCRYPTED)) {
+ /* Don't allow unencrypted io request from user space unless entitled */
+ boolean_t entitled = FALSE;
+#if !SECURE_KERNEL
+ entitled = IOTaskHasEntitlement(current_task(), "com.apple.private.security.file-unencrypt-access");
+#endif
+ if (!entitled) {
+ error = EPERM;
goto bad;
}
+ fmode |= FUNENCRYPTED;
}
-#endif
error = VNOP_OPEN(vp, fmode, ctx);
if (error) {
* EREDRIVEOPEN: means that we were hit by the tty allocation race.
*/
if (((error == ENOENT) && (*fmodep & O_CREAT)) || (error == EREDRIVEOPEN) || ref_failed) {
+ /*
+ * We'll retry here but it may be possible that we get
+ * into a retry "spin" inside the kernel and not allow
+ * threads, which need to run in order for the retry
+ * loop to end, to run. An example is an open of a
+ * terminal which is getting revoked and we spin here
+ * without yielding becasue namei and VNOP_OPEN are
+ * successful but vnode_ref fails. The revoke needs
+ * threads with an iocount to run but if spin here we
+ * may possibly be blcoking other threads from running.
+ *
+ * We start yielding the CPU after some number of
+ * retries for increasing durations. Note that this is
+ * still a loop without an exit condition.
+ */
+ nretries += 1;
+ if (nretries > RETRY_NO_YIELD_COUNT) {
+ /* Every hz/100 secs is 10 msecs ... */
+ tsleep(&nretries, PVFS, "vn_open_auth_retry",
+ MIN((nretries * (hz/100)), hz));
+ }
goto again;
}
}
error = VNOP_READ(vp, auio, ioflg, &context);
}
} else {
+
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
error = VNOP_WRITE(vp, auio, ioflg, &context);
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp) && ((ioflg & (IO_SWAP_DISPATCH | IO_SKIP_ENCRYPTION)) == 0)) {
+ error = EPERM;
+ } else {
+ error = VNOP_WRITE(vp, auio, ioflg, &context);
+ }
+#endif /* DEVELOPMENT || DEBUG */
}
}
if (fp->f_fglob->fg_flag & FENCRYPTED) {
ioflag |= IO_ENCRYPTED;
}
+ if (fp->f_fglob->fg_flag & FUNENCRYPTED) {
+ ioflag |= IO_SKIP_ENCRYPTION;
+ }
+ if (fp->f_fglob->fg_flag & O_EVTONLY) {
+ ioflag |= IO_EVTONLY;
+ }
if (fp->f_fglob->fg_flag & FNORDAHEAD)
ioflag |= IO_RAOFF;
}
count = uio_resid(uio);
- if (vnode_isswap(vp)) {
+ if (vnode_isswap(vp) && !(IO_SKIP_ENCRYPTION & ioflag)) {
+
/* special case for swap files */
error = vn_read_swapfile(vp, uio);
} else {
error = VNOP_READ(vp, uio, ioflag, ctx);
}
+
if ((flags & FOF_OFFSET) == 0) {
fp->f_fglob->fg_offset += count - uio_resid(uio);
if (offset_locked) {
return(error);
}
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp)) {
+ (void)vnode_put(vp);
+ error = EPERM;
+ return (error);
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
+
#if CONFIG_MACF
error = mac_vnode_check_write(ctx, vfs_context_ucred(ctx), vp);
if (error) {
ioflag |= IO_NODIRECT;
if (fp->f_fglob->fg_flag & FSINGLE_WRITER)
ioflag |= IO_SINGLE_WRITER;
+ if (fp->f_fglob->fg_flag & O_EVTONLY)
+ ioflag |= IO_EVTONLY;
/*
* Treat synchronous mounts and O_FSYNC on the fd as equivalent.
* vnode_getattr:???
*/
int
-vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat64, vfs_context_t ctx)
+vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat64,
+ vfs_context_t ctx, struct ucred *file_cred)
{
struct vnode_attr va;
int error;
error = vnode_getattr(vp, &va, ctx);
if (error)
goto out;
+#if CONFIG_MACF
+ /*
+ * Give MAC polices a chance to reject or filter the attributes
+ * returned by the filesystem. Note that MAC policies are consulted
+ * *after* calling the filesystem because filesystems can return more
+ * attributes than were requested so policies wouldn't be authoritative
+ * is consulted beforehand. This also gives policies an opportunity
+ * to change the values of attributes retrieved.
+ */
+ error = mac_vnode_check_getattr(ctx, file_cred, vp, &va);
+ if (error)
+ goto out;
+#endif
/*
* Copy from vattr table
*/
sb64->st_atimespec = va.va_access_time;
sb64->st_mtimespec = va.va_modify_time;
sb64->st_ctimespec = va.va_change_time;
- sb64->st_birthtimespec =
- VATTR_IS_SUPPORTED(&va, va_create_time) ? va.va_create_time : va.va_change_time;
+ if (VATTR_IS_SUPPORTED(&va, va_create_time)) {
+ sb64->st_birthtimespec = va.va_create_time;
+ } else {
+ sb64->st_birthtimespec.tv_sec = sb64->st_birthtimespec.tv_nsec = 0;
+ }
sb64->st_blksize = va.va_iosize;
sb64->st_flags = va.va_flags;
sb64->st_blocks = roundup(va.va_total_alloc, 512) / 512;
return(error);
/* actual stat */
- return(vn_stat_noauth(vp, sb, xsec, isstat64, ctx));
+ return(vn_stat_noauth(vp, sb, xsec, isstat64, ctx, NOCRED));
}
static int
vn_closefile(struct fileglob *fg, vfs_context_t ctx)
{
- struct vnode *vp = (struct vnode *)fg->fg_data;
+ struct vnode *vp = fg->fg_data;
int error;
- struct flock lf;
if ( (error = vnode_getwithref(vp)) == 0 ) {
-
- if ((fg->fg_flag & FHASLOCK) &&
- FILEGLOB_DTYPE(fg) == DTYPE_VNODE) {
- lf.l_whence = SEEK_SET;
- lf.l_start = 0;
- lf.l_len = 0;
- lf.l_type = F_UNLCK;
-
- (void)VNOP_ADVLOCK(vp, (caddr_t)fg, F_UNLCK, &lf, F_FLOCK, ctx, NULL);
+ if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE &&
+ ((fg->fg_flag & FHASLOCK) != 0 ||
+ (fg->fg_lflags & FG_HAS_OFDLOCK) != 0)) {
+ struct flock lf = {
+ .l_whence = SEEK_SET,
+ .l_start = 0,
+ .l_len = 0,
+ .l_type = F_UNLCK
+ };
+
+ if ((fg->fg_flag & FHASLOCK) != 0)
+ (void) VNOP_ADVLOCK(vp, (caddr_t)fg,
+ F_UNLCK, &lf, F_FLOCK, ctx, NULL);
+
+ if ((fg->fg_lflags & FG_HAS_OFDLOCK) != 0)
+ (void) VNOP_ADVLOCK(vp, (caddr_t)fg,
+ F_UNLCK, &lf, F_OFD_LOCK, ctx, NULL);
}
error = vn_close(vp, fg->fg_flag, ctx);
-
- (void)vnode_put(vp);
+ (void) vnode_put(vp);
}
- return(error);
+ return (error);
}
/*
static int
vn_kqfilt_add(struct fileproc *fp, struct knote *kn, vfs_context_t ctx)
{
- int error;
struct vnode *vp;
+ int error = 0;
+ int result = 0;
vp = (struct vnode *)fp->f_fglob->fg_data;
}
} else if (!vnode_isreg(vp)) {
- if (vnode_ischr(vp) &&
- (error = spec_kqfilter(vp, kn)) == 0) {
- /* claimed by a special device */
- vnode_put(vp);
- return 0;
+ if (vnode_ischr(vp)) {
+ result = spec_kqfilter(vp, kn);
+ if ((kn->kn_flags & EV_ERROR) == 0) {
+ /* claimed by a special device */
+ vnode_put(vp);
+ return result;
+ }
}
-
error = EINVAL;
}
break;
error = EINVAL;
}
- if (error) {
- vnode_put(vp);
- return error;
- }
+ if (error == 0) {
#if CONFIG_MACF
- error = mac_vnode_check_kqfilter(ctx, fp->f_fglob->fg_cred, kn, vp);
- if (error) {
- vnode_put(vp);
- return error;
- }
+ error = mac_vnode_check_kqfilter(ctx, fp->f_fglob->fg_cred, kn, vp);
+ if (error) {
+ vnode_put(vp);
+ goto out;
+ }
#endif
- kn->kn_hook = (void*)vp;
- kn->kn_hookid = vnode_vid(vp);
- kn->kn_fop = &vnode_filtops;
+ kn->kn_hook = (void*)vp;
+ kn->kn_hookid = vnode_vid(vp);
+ kn->kn_filtid = EVFILTID_VN;
- vnode_lock(vp);
- KNOTE_ATTACH(&vp->v_knotes, kn);
- vnode_unlock(vp);
+ vnode_lock(vp);
+ KNOTE_ATTACH(&vp->v_knotes, kn);
+ result = filt_vnode_common(kn, vp, 0);
+ vnode_unlock(vp);
- /* Ask the filesystem to provide remove notifications, but ignore failure */
- VNOP_MONITOR(vp, 0, VNODE_MONITOR_BEGIN, (void*) kn, ctx);
+ /*
+ * Ask the filesystem to provide remove notifications,
+ * but ignore failure
+ */
+ VNOP_MONITOR(vp, 0, VNODE_MONITOR_BEGIN, (void*) kn, ctx);
+ }
vnode_put(vp);
}
- return (error);
+ out:
+ if (error) {
+ kn->kn_flags = EV_ERROR;
+ kn->kn_data = error;
+ }
+
+ return result;
}
static void
* --If hint is revoke, set special flags and activate
*/
static int
-filt_vnode(struct knote *kn, long hint)
+filt_vnode_common(struct knote *kn, vnode_t vp, long hint)
{
- vnode_t vp = (struct vnode *)kn->kn_hook;
int activate = 0;
- long orig_hint = hint;
-
- if (0 == hint) {
- vnode_lock(vp);
- if (vnode_getiocount(vp, kn->kn_hookid, VNODE_NODEAD | VNODE_WITHID) != 0) {
- /* Is recycled */
- hint = NOTE_REVOKE;
- }
- } else {
- lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
- }
+ lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
/* Special handling for vnodes that are in recycle or already gone */
if (NOTE_REVOKE == hint) {
panic("Invalid knote filter on a vnode!\n");
}
}
+ return (activate);
+}
- if (orig_hint == 0) {
- /*
- * Definitely need to unlock, may need to put
- */
- if (hint == 0) {
- vnode_put_locked(vp);
+static int
+filt_vnode(struct knote *kn, long hint)
+{
+ vnode_t vp = (struct vnode *)kn->kn_hook;
+
+ return filt_vnode_common(kn, vp, hint);
+}
+
+static int
+filt_vntouch(struct knote *kn, struct kevent_internal_s *kev)
+{
+ vnode_t vp = (struct vnode *)kn->kn_hook;
+ int activate;
+ int hint = 0;
+
+ vnode_lock(vp);
+ if (vnode_getiocount(vp, kn->kn_hookid, VNODE_NODEAD | VNODE_WITHID) != 0) {
+ /* is recycled */
+ hint = NOTE_REVOKE;
+ }
+
+ /* accept new input fflags mask */
+ kn->kn_sfflags = kev->fflags;
+ if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
+ kn->kn_udata = kev->udata;
+
+ activate = filt_vnode_common(kn, vp, hint);
+
+ if (hint == 0)
+ vnode_put_locked(vp);
+ vnode_unlock(vp);
+
+ return activate;
+}
+
+static int
+filt_vnprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
+{
+#pragma unused(data)
+ vnode_t vp = (struct vnode *)kn->kn_hook;
+ int activate;
+ int hint = 0;
+
+ vnode_lock(vp);
+ if (vnode_getiocount(vp, kn->kn_hookid, VNODE_NODEAD | VNODE_WITHID) != 0) {
+ /* Is recycled */
+ hint = NOTE_REVOKE;
+ }
+ activate = filt_vnode_common(kn, vp, hint);
+ if (activate) {
+ *kev = kn->kn_kevent;
+ if (kn->kn_flags & EV_CLEAR) {
+ kn->kn_data = 0;
+ kn->kn_fflags = 0;
}
- vnode_unlock(vp);
}
- return (activate);
+ /* Definitely need to unlock, may need to put */
+ if (hint == 0)
+ vnode_put_locked(vp);
+ vnode_unlock(vp);
+
+ return activate;
}
+