/*
- * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/kauth.h>
#include <kern/kalloc.h>
#include <security/audit/audit.h>
-
+#include <sys/dtrace.h> /* to get the prototype for strstr() in sys/dtrace_glue.h */
#if CONFIG_MACF
#include <security/mac_framework.h>
#endif
#define VOLFS_MIN_PATH_LEN 9
-static void kdebug_lookup(struct vnode *dp, struct componentname *cnp);
-
#if CONFIG_VOLFS
static int vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_t ctx);
#define MAX_VOLFS_RESTARTS 5
#if CONFIG_VOLFS
int volfs_restarts = 0;
#endif
+ size_t bytes_copied = 0;
fdp = p->p_fd;
retry_copy:
if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) {
error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf,
- cnp->cn_pnlen, (size_t *)&ndp->ni_pathlen);
+ cnp->cn_pnlen, &bytes_copied);
} else {
error = copystr(CAST_DOWN(void *, ndp->ni_dirp), cnp->cn_pnbuf,
- cnp->cn_pnlen, (size_t *)&ndp->ni_pathlen);
+ cnp->cn_pnlen, &bytes_copied);
}
if (error == ENAMETOOLONG && !(cnp->cn_flags & HASBUF)) {
MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
cnp->cn_flags |= HASBUF;
cnp->cn_pnlen = MAXPATHLEN;
+ bytes_copied = 0;
goto retry_copy;
}
if (error)
goto error_out;
+ ndp->ni_pathlen = bytes_copied;
+ bytes_copied = 0;
/*
* Since the name cache may contain positive entries of
ndp->ni_vp = NULLVP;
for (;;) {
+#if CONFIG_MACF
+ /*
+ * Give MACF policies a chance to reject the lookup
+ * before performing any filesystem operations.
+ * This hook is called before resolving the path and
+ * again each time a symlink is encountered.
+ * NB: policies receive path information as supplied
+ * by the caller and thus cannot be trusted.
+ */
+ error = mac_vnode_check_lookup_preflight(ctx, dp, cnp->cn_nameptr, cnp->cn_namelen);
+ if (error) {
+ goto error_out;
+ }
+#endif
+
ndp->ni_startdir = dp;
if ( (error = lookup(ndp)) ) {
goto error_out;
}
+
/*
* Check for symbolic link
*/
return 0;
}
+
static int
lookup_authorize_search(vnode_t dp, struct componentname *cnp, int dp_authorized_in_cache, vfs_context_t ctx)
{
{
vnode_t svp = NULLVP;
enum nsoperation nsop;
+ int nsflags;
int error;
if (dp->v_type != VREG) {
error = EPERM;
goto out;
}
+
+ nsflags = 0;
+ if (cnp->cn_flags & CN_RAW_ENCRYPTED)
+ nsflags |= NS_GETRAWENCRYPTED;
+
/* Ask the file system for the resource fork. */
- error = vnode_getnamedstream(dp, &svp, XATTR_RESOURCEFORK_NAME, nsop, 0, ctx);
+ error = vnode_getnamedstream(dp, &svp, XATTR_RESOURCEFORK_NAME, nsop, nsflags, ctx);
/* During a create, it OK for stream vnode to be missing. */
if (error == ENOATTR || error == ENOENT) {
goto nextname;
}
-#if CONFIG_TRIGGERS
- if (dp->v_resolve) {
- error = vnode_trigger_resolve(dp, ndp, ctx);
- if (error) {
- goto out;
- }
- }
-#endif /* CONFIG_TRIGGERS */
-
/*
* Take into account any additional components consumed by
* the underlying filesystem.
* .. in the other file system.
*/
if ( (cnp->cn_flags & ISDOTDOT) ) {
+ /*
+ * if this is a chroot'ed process, check if the current
+ * directory is still a subdirectory of the process's
+ * root directory.
+ */
+ if (ndp->ni_rootdir && (ndp->ni_rootdir != rootvnode) &&
+ dp != ndp->ni_rootdir) {
+ int sdir_error;
+ int is_subdir = FALSE;
+
+ sdir_error = vnode_issubdir(dp, ndp->ni_rootdir,
+ &is_subdir, vfs_context_kernel());
+
+ /*
+ * If we couldn't determine if dp is a subdirectory of
+ * ndp->ni_rootdir (sdir_error != 0), we let the request
+ * proceed.
+ */
+ if (!sdir_error && !is_subdir) {
+ vnode_put(dp);
+ dp = ndp->ni_rootdir;
+ /*
+ * There's a ref on the process's root directory
+ * but we can't use vnode_getwithref here as
+ * there is nothing preventing that ref being
+ * released by another thread.
+ */
+ if (vnode_get(dp)) {
+ error = ENOENT;
+ goto bad;
+ }
+ }
+ }
+
for (;;) {
if (dp == ndp->ni_rootdir || dp == rootvnode) {
ndp->ni_dvp = dp;
uint32_t depth = 0;
vnode_t mounted_on_dp;
int current_mount_generation = 0;
+#if CONFIG_TRIGGERS
+ vnode_t triggered_dp = NULLVP;
+ int retry_cnt = 0;
+#define MAX_TRIGGER_RETRIES 1
+#endif
- mounted_on_dp = dp;
- current_mount_generation = mount_generation;
-
- while ((dp->v_type == VDIR) && dp->v_mountedhere &&
- ((cnp->cn_flags & NOCROSSMOUNT) == 0)) {
+ if (dp->v_type != VDIR || cnp->cn_flags & NOCROSSMOUNT)
+ return 0;
- if (dp->v_mountedhere->mnt_lflag & MNT_LFORCE) {
- break; // don't traverse into a forced unmount
- }
+ mounted_on_dp = dp;
#if CONFIG_TRIGGERS
- /*
- * For a trigger vnode, call its resolver when crossing its mount (if requested)
- */
- if (dp->v_resolve) {
- (void) vnode_trigger_resolve(dp, ndp, ctx);
- }
+restart:
#endif
- vnode_lock(dp);
-
- if ((dp->v_type == VDIR) && (mp = dp->v_mountedhere)) {
+ current_mount_generation = mount_generation;
+ while (dp->v_mountedhere) {
+ vnode_lock_spin(dp);
+ if ((mp = dp->v_mountedhere)) {
mp->mnt_crossref++;
vnode_unlock(dp);
+ } else {
+ vnode_unlock(dp);
+ break;
+ }
+ if (ISSET(mp->mnt_lflag, MNT_LFORCE)) {
+ mount_dropcrossref(mp, dp, 0);
+ break; // don't traverse into a forced unmount
+ }
- if (vfs_busy(mp, vbusyflags)) {
- mount_dropcrossref(mp, dp, 0);
- if (vbusyflags == LK_NOWAIT) {
- error = ENOENT;
- goto out;
- }
-
- continue;
- }
-
- error = VFS_ROOT(mp, &tdp, ctx);
+ if (vfs_busy(mp, vbusyflags)) {
mount_dropcrossref(mp, dp, 0);
- vfs_unbusy(mp);
-
- if (error) {
+ if (vbusyflags == LK_NOWAIT) {
+ error = ENOENT;
goto out;
}
- vnode_put(dp);
- ndp->ni_vp = dp = tdp;
- depth++;
+ continue;
+ }
-#if CONFIG_TRIGGERS
- /*
- * Check if root dir is a trigger vnode
- */
- if (dp->v_resolve) {
- error = vnode_trigger_resolve(dp, ndp, ctx);
- if (error) {
- goto out;
- }
- }
-#endif
+ error = VFS_ROOT(mp, &tdp, ctx);
- } else {
- vnode_unlock(dp);
+ mount_dropcrossref(mp, dp, 0);
+ vfs_unbusy(mp);
+
+ if (error) {
+ goto out;
+ }
+
+ vnode_put(dp);
+ ndp->ni_vp = dp = tdp;
+ if (dp->v_type != VDIR) {
+#if DEVELOPMENT || DEBUG
+ panic("%s : Root of filesystem not a directory\n",
+ __FUNCTION__);
+#else
break;
+#endif
}
+ depth++;
}
+#if CONFIG_TRIGGERS
+ /*
+ * The triggered_dp check here is required but is susceptible to a
+ * (unlikely) race in which trigger mount is done from here and is
+ * unmounted before we get past vfs_busy above. We retry to deal with
+ * that case but it has the side effect of unwanted retries for
+ * "special" processes which don't want to trigger mounts.
+ */
+ if (dp->v_resolve && retry_cnt < MAX_TRIGGER_RETRIES) {
+ error = vnode_trigger_resolve(dp, ndp, ctx);
+ if (error)
+ goto out;
+ if (dp == triggered_dp)
+ retry_cnt += 1;
+ else
+ retry_cnt = 0;
+ triggered_dp = dp;
+ goto restart;
+ }
+#endif /* CONFIG_TRIGGERS */
+
if (depth) {
mp = mounted_on_dp->v_mountedhere;
int error;
char *cp; /* pointer into pathname argument */
uio_t auio;
- char uio_buf[ UIO_SIZEOF(1) ];
+ union {
+ union {
+ struct user_iovec s_uiovec;
+ struct kern_iovec s_kiovec;
+ } u_iovec;
+ struct uio s_uio;
+ char uio_buf[ UIO_SIZEOF(1) ];
+ } u_uio_buf; /* union only for aligning uio_buf correctly */
int need_newpathbuf;
u_int linklen;
struct componentname *cnp = &ndp->ni_cnd;
} else {
cp = cnp->cn_pnbuf;
}
- auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
+ auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
+ &u_uio_buf.uio_buf[0], sizeof(u_uio_buf.uio_buf));
uio_addiov(auio, CAST_USER_ADDR_T(cp), MAXPATHLEN);
#if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST)
void
-kdebug_lookup_gen_events(long *dbg_parms, int dbg_namelen, void *dp, boolean_t lookup)
+kdebug_vfs_lookup(long *dbg_parms, int dbg_namelen, void *dp, uint32_t flags)
{
int code;
unsigned int i;
+ bool lookup = flags & KDBG_VFS_LOOKUP_FLAG_LOOKUP;
+ bool noprocfilt = flags & KDBG_VFS_LOOKUP_FLAG_NOPROCFILT;
/*
* In the event that we collect multiple, consecutive pathname
* entries, we must mark the start of the path's string and the end.
*/
- if (lookup == TRUE)
+ if (lookup) {
code = VFS_LOOKUP | DBG_FUNC_START;
- else
+ } else {
code = VFS_LOOKUP_DONE | DBG_FUNC_START;
+ }
if (dbg_namelen <= (int)(3 * sizeof(long)))
code |= DBG_FUNC_END;
- KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, code, VM_KERNEL_ADDRPERM(dp), dbg_parms[0], dbg_parms[1], dbg_parms[2], 0);
+ if (noprocfilt) {
+ KDBG_RELEASE_NOPROCFILT(code, kdebug_vnode(dp), dbg_parms[0],
+ dbg_parms[1], dbg_parms[2]);
+ } else {
+ KDBG_RELEASE(code, kdebug_vnode(dp), dbg_parms[0], dbg_parms[1],
+ dbg_parms[2]);
+ }
code &= ~DBG_FUNC_START;
if (dbg_namelen <= (int)(4 * sizeof(long)))
code |= DBG_FUNC_END;
- KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, code, dbg_parms[i], dbg_parms[i+1], dbg_parms[i+2], dbg_parms[i+3], 0);
+ if (noprocfilt) {
+ KDBG_RELEASE_NOPROCFILT(code, dbg_parms[i], dbg_parms[i + 1],
+ dbg_parms[i + 2], dbg_parms[i + 3]);
+ } else {
+ KDBG_RELEASE(code, dbg_parms[i], dbg_parms[i + 1], dbg_parms[i + 2],
+ dbg_parms[i + 3]);
+ }
}
}
-static void
+void
+kdebug_lookup_gen_events(long *dbg_parms, int dbg_namelen, void *dp,
+ boolean_t lookup)
+{
+ kdebug_vfs_lookup(dbg_parms, dbg_namelen, dp,
+ lookup ? KDBG_VFS_LOOKUP_FLAG_LOOKUP : 0);
+}
+
+void
kdebug_lookup(vnode_t dp, struct componentname *cnp)
{
int dbg_namelen;
*(cnp->cn_nameptr + cnp->cn_namelen) ? '>' : 0,
sizeof(dbg_parms) - dbg_namelen);
}
- kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)dp, TRUE);
-}
+ kdebug_vfs_lookup(dbg_parms, dbg_namelen, (void *)dp,
+ KDBG_VFS_LOOKUP_FLAG_LOOKUP);
+}
#else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) */
void
-kdebug_lookup_gen_events(long *dbg_parms __unused, int dbg_namelen __unused, void *dp __unused)
+kdebug_vfs_lookup(long *dbg_parms __unused, int dbg_namelen __unused,
+ void *dp __unused, __unused uint32_t flags)
{
}