#include <security/mac_framework.h>
#endif
-#if CONFIG_PROTECT
-#include <sys/cprotect.h>
-#endif
-
#include <IOKit/IOBSD.h>
static int vn_closefile(struct fileglob *fp, vfs_context_t ctx);
vfs_context_t ctx);
static void filt_vndetach(struct knote *kn);
static int filt_vnode(struct knote *kn, long hint);
+static int filt_vnode_common(struct knote *kn, vnode_t vp, long hint);
static int vn_open_auth_finish(vnode_t vp, int fmode, vfs_context_t ctx);
#if 0
static int vn_kqfilt_remove(struct vnode *vp, uintptr_t ident,
#endif
const struct fileops vnops = {
- DTYPE_VNODE,
- vn_read,
- vn_write,
- vn_ioctl,
- vn_select,
- vn_closefile,
- vn_kqfilt_add,
- NULL
+ .fo_type = DTYPE_VNODE,
+ .fo_read = vn_read,
+ .fo_write = vn_write,
+ .fo_ioctl = vn_ioctl,
+ .fo_select = vn_select,
+ .fo_close = vn_closefile,
+ .fo_kqfilter = vn_kqfilt_add,
+ .fo_drain = NULL,
};
+static int filt_vntouch(struct knote *kn, struct kevent_internal_s *kev);
+static int filt_vnprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
+
struct filterops vnode_filtops = {
.f_isfd = 1,
.f_attach = NULL,
.f_detach = filt_vndetach,
- .f_event = filt_vnode
+ .f_event = filt_vnode,
+ .f_touch = filt_vntouch,
+ .f_process = filt_vnprocess,
};
/*
fmode = *fmodep;
origcnflags = ndp->ni_cnd.cn_flags;
+ // If raw encrypted mode is requested, handle that here
+ if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags)
+ && ISSET(vap->va_dataprotect_flags, VA_DP_RAWENCRYPTED)) {
+ fmode |= FENCRYPTED;
+ }
+
/*
* O_CREAT
*/
/* open calls are allowed for resource forks. */
ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
#endif
+ if (fmode & FENCRYPTED)
+ ndp->ni_cnd.cn_flags |= CN_RAW_ENCRYPTED | CN_SKIPNAMECACHE;
ndp->ni_flag = NAMEI_COMPOUNDOPEN;
/* preserve NOFOLLOW from vnode_open() */
panic("Haven't cleaned up adequately in vn_open_auth()");
}
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp) && (fmode & (FWRITE | O_TRUNC)) && (ctx != vfs_context_kernel())) {
+ error = EPERM;
+ goto bad;
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
/*
* Expect to use this code for filesystems without compound VNOPs, for the root
* of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(),
}
}
-#if CONFIG_PROTECT
- // If raw encrypted mode is requested, handle that here
- if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags)
- && ISSET(vap->va_dataprotect_flags, VA_DP_RAWENCRYPTED)) {
- fmode |= FENCRYPTED;
- }
if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags)
&& ISSET(vap->va_dataprotect_flags, VA_DP_RAWUNENCRYPTED)) {
/* Don't allow unencrypted io request from user space unless entitled */
fmode |= FUNENCRYPTED;
}
- /*
- * Perform any content protection access checks prior to calling
- * into the filesystem.
- */
- error = cp_handle_open (vp, fmode);
- if (error) {
- goto bad;
- }
-#endif
-
error = VNOP_OPEN(vp, fmode, ctx);
if (error) {
goto bad;
error = VNOP_READ(vp, auio, ioflg, &context);
}
} else {
+
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
error = VNOP_WRITE(vp, auio, ioflg, &context);
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp) && ((ioflg & (IO_SWAP_DISPATCH | IO_SKIP_ENCRYPTION)) == 0)) {
+ error = EPERM;
+ } else {
+ error = VNOP_WRITE(vp, auio, ioflg, &context);
+ }
+#endif /* DEVELOPMENT || DEBUG */
}
}
count = uio_resid(uio);
if (vnode_isswap(vp) && !(IO_SKIP_ENCRYPTION & ioflag)) {
+
/* special case for swap files */
error = vn_read_swapfile(vp, uio);
} else {
error = VNOP_READ(vp, uio, ioflag, ctx);
}
+
if ((flags & FOF_OFFSET) == 0) {
fp->f_fglob->fg_offset += count - uio_resid(uio);
if (offset_locked) {
return(error);
}
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp)) {
+ (void)vnode_put(vp);
+ error = EPERM;
+ return (error);
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
+
#if CONFIG_MACF
error = mac_vnode_check_write(ctx, vfs_context_ucred(ctx), vp);
if (error) {
* vnode_getattr:???
*/
int
-vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat64, vfs_context_t ctx)
+vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat64,
+ vfs_context_t ctx, struct ucred *file_cred)
{
struct vnode_attr va;
int error;
error = vnode_getattr(vp, &va, ctx);
if (error)
goto out;
+#if CONFIG_MACF
+ /*
+ * Give MAC polices a chance to reject or filter the attributes
+ * returned by the filesystem. Note that MAC policies are consulted
+ * *after* calling the filesystem because filesystems can return more
+ * attributes than were requested so policies wouldn't be authoritative
+ * is consulted beforehand. This also gives policies an opportunity
+ * to change the values of attributes retrieved.
+ */
+ error = mac_vnode_check_getattr(ctx, file_cred, vp, &va);
+ if (error)
+ goto out;
+#endif
/*
* Copy from vattr table
*/
return(error);
/* actual stat */
- return(vn_stat_noauth(vp, sb, xsec, isstat64, ctx));
+ return(vn_stat_noauth(vp, sb, xsec, isstat64, ctx, NOCRED));
}
static int
vn_kqfilt_add(struct fileproc *fp, struct knote *kn, vfs_context_t ctx)
{
- int error;
struct vnode *vp;
+ int error = 0;
+ int result = 0;
vp = (struct vnode *)fp->f_fglob->fg_data;
}
} else if (!vnode_isreg(vp)) {
- if (vnode_ischr(vp) &&
- (error = spec_kqfilter(vp, kn)) == 0) {
- /* claimed by a special device */
- vnode_put(vp);
- return 0;
+ if (vnode_ischr(vp)) {
+ result = spec_kqfilter(vp, kn);
+ if ((kn->kn_flags & EV_ERROR) == 0) {
+ /* claimed by a special device */
+ vnode_put(vp);
+ return result;
+ }
}
-
error = EINVAL;
}
break;
error = EINVAL;
}
- if (error) {
- vnode_put(vp);
- return error;
- }
+ if (error == 0) {
#if CONFIG_MACF
- error = mac_vnode_check_kqfilter(ctx, fp->f_fglob->fg_cred, kn, vp);
- if (error) {
- vnode_put(vp);
- return error;
- }
+ error = mac_vnode_check_kqfilter(ctx, fp->f_fglob->fg_cred, kn, vp);
+ if (error) {
+ vnode_put(vp);
+ goto out;
+ }
#endif
- kn->kn_hook = (void*)vp;
- kn->kn_hookid = vnode_vid(vp);
- kn->kn_fop = &vnode_filtops;
+ kn->kn_hook = (void*)vp;
+ kn->kn_hookid = vnode_vid(vp);
+ kn->kn_filtid = EVFILTID_VN;
- vnode_lock(vp);
- KNOTE_ATTACH(&vp->v_knotes, kn);
- vnode_unlock(vp);
+ vnode_lock(vp);
+ KNOTE_ATTACH(&vp->v_knotes, kn);
+ result = filt_vnode_common(kn, vp, 0);
+ vnode_unlock(vp);
- /* Ask the filesystem to provide remove notifications, but ignore failure */
- VNOP_MONITOR(vp, 0, VNODE_MONITOR_BEGIN, (void*) kn, ctx);
+ /*
+ * Ask the filesystem to provide remove notifications,
+ * but ignore failure
+ */
+ VNOP_MONITOR(vp, 0, VNODE_MONITOR_BEGIN, (void*) kn, ctx);
+ }
vnode_put(vp);
}
- return (error);
+ out:
+ if (error) {
+ kn->kn_flags = EV_ERROR;
+ kn->kn_data = error;
+ }
+
+ return result;
}
static void
* --If hint is revoke, set special flags and activate
*/
static int
-filt_vnode(struct knote *kn, long hint)
+filt_vnode_common(struct knote *kn, vnode_t vp, long hint)
{
- vnode_t vp = (struct vnode *)kn->kn_hook;
int activate = 0;
- long orig_hint = hint;
- if (0 == hint) {
- vnode_lock(vp);
-
- if (vnode_getiocount(vp, kn->kn_hookid, VNODE_NODEAD | VNODE_WITHID) != 0) {
- /* Is recycled */
- hint = NOTE_REVOKE;
- }
- } else {
- lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
- }
+ lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
/* Special handling for vnodes that are in recycle or already gone */
if (NOTE_REVOKE == hint) {
panic("Invalid knote filter on a vnode!\n");
}
}
+ return (activate);
+}
- if (orig_hint == 0) {
- /*
- * Definitely need to unlock, may need to put
- */
- if (hint == 0) {
- vnode_put_locked(vp);
+static int
+filt_vnode(struct knote *kn, long hint)
+{
+ vnode_t vp = (struct vnode *)kn->kn_hook;
+
+ return filt_vnode_common(kn, vp, hint);
+}
+
+static int
+filt_vntouch(struct knote *kn, struct kevent_internal_s *kev)
+{
+ vnode_t vp = (struct vnode *)kn->kn_hook;
+ int activate;
+ int hint = 0;
+
+ vnode_lock(vp);
+ if (vnode_getiocount(vp, kn->kn_hookid, VNODE_NODEAD | VNODE_WITHID) != 0) {
+ /* is recycled */
+ hint = NOTE_REVOKE;
+ }
+
+ /* accept new input fflags mask */
+ kn->kn_sfflags = kev->fflags;
+ if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
+ kn->kn_udata = kev->udata;
+
+ activate = filt_vnode_common(kn, vp, hint);
+
+ if (hint == 0)
+ vnode_put_locked(vp);
+ vnode_unlock(vp);
+
+ return activate;
+}
+
+static int
+filt_vnprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
+{
+#pragma unused(data)
+ vnode_t vp = (struct vnode *)kn->kn_hook;
+ int activate;
+ int hint = 0;
+
+ vnode_lock(vp);
+ if (vnode_getiocount(vp, kn->kn_hookid, VNODE_NODEAD | VNODE_WITHID) != 0) {
+ /* Is recycled */
+ hint = NOTE_REVOKE;
+ }
+ activate = filt_vnode_common(kn, vp, hint);
+ if (activate) {
+ *kev = kn->kn_kevent;
+ if (kn->kn_flags & EV_CLEAR) {
+ kn->kn_data = 0;
+ kn->kn_fflags = 0;
}
- vnode_unlock(vp);
}
- return (activate);
+ /* Definitely need to unlock, may need to put */
+ if (hint == 0)
+ vnode_put_locked(vp);
+ vnode_unlock(vp);
+
+ return activate;
}
+