#include <vm/vm_protos.h>
#define f_flag f_fglob->fg_flag
-#define f_type f_fglob->fg_type
+#define f_type f_fglob->fg_ops->fo_type
#define f_msgcount f_fglob->fg_msgcount
#define f_cred f_fglob->fg_cred
#define f_ops f_fglob->fg_ops
};
#define PSHMCACHE_NULL (struct pshmcache *)0
+#define PSHMCACHE_NOTFOUND (0)
+#define PSHMCACHE_FOUND (-1)
+#define PSHMCACHE_NEGATIVE (ENOENT)
+
struct pshmstats {
long goodhits; /* hits that we can really use */
long neghits; /* negative hits that we can use */
static int pshm_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx);
int pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, proc_t p);
+int pshm_cache_purge_all(proc_t p);
+
static int pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp);
static void pshm_cache_delete(struct pshmcache *pcp);
-#if NOT_USED
-static void pshm_cache_purge(void);
-#endif /* NOT_USED */
static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
struct pshmcache **pcache, int addref);
-
-struct fileops pshmops =
- { pshm_read, pshm_write, pshm_ioctl, pshm_select, pshm_closefile, pshm_kqfilter, 0 };
+static int pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache);
+
+static const struct fileops pshmops = {
+ DTYPE_PSXSHM,
+ pshm_read,
+ pshm_write,
+ pshm_ioctl,
+ pshm_select,
+ pshm_closefile,
+ pshm_kqfilter,
+ 0
+};
static lck_grp_t *psx_shm_subsys_lck_grp;
static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr;
#define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
#define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
+#define PSHM_SUBSYS_ASSERT_HELD() lck_mtx_assert(&psx_shm_subsys_mutex, LCK_MTX_ASSERT_OWNED)
/* Initialize the mutex governing access to the posix shm subsystem */
if (pnp->pshm_namelen > PSHMNAMLEN) {
pshmstats.longnames++;
- return (0);
+ return PSHMCACHE_NOTFOUND;
}
pcpp = PSHMHASH(pnp);
if (pcp == 0) {
pshmstats.miss++;
- return (0);
+ return PSHMCACHE_NOTFOUND;
}
/* We found a "positive" match, return the vnode */
*pcache = pcp;
if (addref)
pcp->pshminfo->pshm_usecount++;
- return (-1);
+ return PSHMCACHE_FOUND;
}
/*
* We found a "negative" match, ENOENT notifies client of this match.
- * The nc_vpid field records whether this is a whiteout.
*/
pshmstats.neghits++;
- return (ENOENT);
+ return PSHMCACHE_NEGATIVE;
}
/*
/* if the entry has already been added by some one else return */
- if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == -1) {
- return(EEXIST);
+ if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == PSHMCACHE_FOUND) {
+ return EEXIST;
}
pshmnument++;
/*
* Fill in cache info, if vp is NULL this is a "negative" cache entry.
- * For negative entries, we have to record whether it is a whiteout.
- * the whiteout flag is stored in the nc_vpid field which is
- * otherwise unused.
*/
pcp->pshminfo = pshmp;
pcp->pshm_nlen = pnp->pshm_namelen;
}
#endif
LIST_INSERT_HEAD(pcpp, pcp, pshm_hash);
- return(0);
+ return 0;
}
/*
pshmhashtbl = hashinit(desiredvnodes / 8, M_SHM, &pshmhash);
}
-#if NOT_USED
/*
- * Invalidate a all entries to particular vnode.
- *
+ * Invalidate all entries and delete all objects associated with it. Entire
+ * non Kernel entries are going away. Just dump'em all
+ *
* We actually just increment the v_id, that will do it. The entries will
* be purged by lookup as they get found. If the v_id wraps around, we
* need to ditch the entire cache, to avoid confusion. No valid vnode will
* ever have (v_id == 0).
*/
-static void
-pshm_cache_purge(void)
+int
+pshm_cache_purge_all(__unused proc_t p)
{
- struct pshmcache *pcp;
+ struct pshmcache *pcp, *tmppcp;
struct pshmhashhead *pcpp;
+ int error = 0;
+
+ if (kauth_cred_issuser(kauth_cred_get()) == 0)
+ return EPERM;
+ PSHM_SUBSYS_LOCK();
for (pcpp = &pshmhashtbl[pshmhash]; pcpp >= pshmhashtbl; pcpp--) {
- while ( (pcp = pcpp->lh_first) )
- pshm_cache_delete(pcp);
+ LIST_FOREACH_SAFE(pcp, pcpp, pshm_hash, tmppcp) {
+ assert(pcp->pshm_nlen);
+ error = pshm_unlink_internal(pcp->pshminfo, pcp);
+ if (error)
+ goto out;
+ }
}
+ assert(pshmnument == 0);
+
+out:
+ PSHM_SUBSYS_UNLOCK();
+
+ if (error)
+ printf("%s: Error %d removing shm cache: %ld remain!\n",
+ __func__, error, pshmnument);
+ return error;
}
-#endif /* NOT_USED */
static void
pshm_cache_delete(struct pshmcache *pcp)
PSHM_SUBSYS_UNLOCK();
- if (error == ENOENT) {
+ if (error == PSHMCACHE_NEGATIVE) {
error = EINVAL;
goto bad;
}
- if (!error) {
+ if (error == PSHMCACHE_NOTFOUND) {
incache = 0;
if (fmode & O_CREAT) {
/* create a new one (commit the allocation) */
proc_fdlock(p);
fp->f_flag = fmode & FMASK;
- fp->f_type = DTYPE_PSXSHM;
fp->f_ops = &pshmops;
fp->f_data = (caddr_t)new_pnode;
*fdflags(p, indx) |= UF_EXCLOSE;
mach_vm_size_t total_size, alloc_size;
memory_object_size_t mosize;
struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p;
+ vm_map_t user_map;
#if CONFIG_MACF
int error;
#endif
+ user_map = current_map();
+
if (fp->f_type != DTYPE_PSXSHM) {
return(EINVAL);
}
#endif
pinfo->pshm_flags |= PSHM_ALLOCATING;
- total_size = round_page_64(length);
+ total_size = vm_map_round_page(length,
+ vm_map_page_mask(user_map));
pshmobj_next_p = &pinfo->pshm_memobjects;
for (alloc_size = 0;
pshmobj_next_p = &pshmobj->pshmo_next;
}
- pinfo->pshm_flags = PSHM_ALLOCATED;
+ pinfo->pshm_flags |= PSHM_ALLOCATED;
+ pinfo->pshm_flags &= ~(PSHM_ALLOCATING);
pinfo->pshm_length = total_size;
PSHM_SUBSYS_UNLOCK();
return(0);
if ((flags & MAP_FIXED) == 0) {
alloc_flags = VM_FLAGS_ANYWHERE;
- user_addr = vm_map_round_page(user_addr);
+ user_addr = vm_map_round_page(user_addr,
+ vm_map_page_mask(user_map));
} else {
- if (user_addr != vm_map_round_page(user_addr))
+ if (user_addr != vm_map_round_page(user_addr,
+ vm_map_page_mask(user_map)))
return (EINVAL);
/*
* We do not get rid of the existing mappings here because
}
+static int
+pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache)
+{
+ struct pshmobj *pshmobj, *pshmobj_next;
+
+ PSHM_SUBSYS_ASSERT_HELD();
+
+ if (!pinfo || !pcache)
+ return EINVAL;
+
+ if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED)) == 0)
+ return EINVAL;
+
+ if (pinfo->pshm_flags & PSHM_INDELETE)
+ return 0;
+
+ pinfo->pshm_flags |= PSHM_INDELETE;
+ pinfo->pshm_usecount--;
+
+ pshm_cache_delete(pcache);
+ pinfo->pshm_flags |= PSHM_REMOVED;
+
+ /* release the existence reference */
+ if (!pinfo->pshm_usecount) {
+#if CONFIG_MACF
+ mac_posixshm_label_destroy(pinfo);
+#endif
+ /*
+ * If this is the last reference going away on the object,
+ * then we need to destroy the backing object. The name
+ * has an implied but uncounted reference on the object,
+ * once it's created, since it's used as a rendezvous, and
+ * therefore may be subsequently reopened.
+ */
+ for (pshmobj = pinfo->pshm_memobjects;
+ pshmobj != NULL;
+ pshmobj = pshmobj_next) {
+ mach_memory_entry_port_release(pshmobj->pshmo_memobject);
+ pshmobj_next = pshmobj->pshmo_next;
+ FREE(pshmobj, M_SHM);
+ }
+ FREE(pinfo,M_SHM);
+ }
+
+ FREE(pcache, M_SHM);
+
+ return 0;
+}
+
int
-shm_unlink(__unused proc_t p, struct shm_unlink_args *uap,
- __unused int32_t *retval)
+shm_unlink(proc_t p, struct shm_unlink_args *uap, __unused int32_t *retval)
{
size_t i;
- int error=0;
+ char * pnbuf;
+ size_t pathlen;
+ int error = 0;
+
struct pshmname nd;
struct pshminfo *pinfo;
- char * pnbuf;
char * nameptr;
char * cp;
- size_t pathlen, plen;
- int incache = 0;
struct pshmcache *pcache = PSHMCACHE_NULL;
- struct pshmobj *pshmobj, *pshmobj_next;
pinfo = PSHMINFO_NULL;
+
MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
if (pnbuf == NULL) {
return(ENOSPC); /* XXX non-standard */
goto bad;
}
+ nameptr = pnbuf;
#ifdef PSXSHM_NAME_RESTRICT
- nameptr = pnbuf;
if (*nameptr == '/') {
while (*(nameptr++) == '/') {
- plen--;
+ pathlen--;
error = EINVAL;
goto bad;
}
}
#endif /* PSXSHM_NAME_RESTRICT */
- plen = pathlen;
- nameptr = pnbuf;
nd.pshm_nameptr = nameptr;
- nd.pshm_namelen = plen;
- nd. pshm_hash =0;
+ nd.pshm_namelen = pathlen;
+ nd.pshm_hash = 0;
- for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) {
+ for (cp = nameptr, i=1; *cp != 0 && i <= pathlen; i++, cp++) {
nd.pshm_hash += (unsigned char)*cp * i;
}
PSHM_SUBSYS_LOCK();
error = pshm_cache_search(&pinfo, &nd, &pcache, 0);
- if (error == ENOENT) {
+ /* During unlink lookup failure also implies ENOENT */
+ if (error != PSHMCACHE_FOUND) {
PSHM_SUBSYS_UNLOCK();
- error = EINVAL;
+ error = ENOENT;
goto bad;
}
- if (!error) {
- PSHM_SUBSYS_UNLOCK();
- error = EINVAL;
- goto bad;
- } else
- incache = 1;
if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))==0) {
PSHM_SUBSYS_UNLOCK();
error = 0;
goto bad;
}
+
#if CONFIG_MACF
error = mac_posixshm_check_unlink(kauth_cred_get(), pinfo, nameptr);
if (error) {
pinfo->pshm_mode);
/*
- * following file semantics, unlink should be allowed
- * for users with write permission only.
+ * following file semantics, unlink should be allowed
+ * for users with write permission only.
*/
if ( (error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p)) ) {
PSHM_SUBSYS_UNLOCK();
goto bad;
}
- pinfo->pshm_flags |= PSHM_INDELETE;
- pshm_cache_delete(pcache);
- pinfo->pshm_flags |= PSHM_REMOVED;
- /* release the existence reference */
- if (!--pinfo->pshm_usecount) {
-#if CONFIG_MACF
- mac_posixshm_label_destroy(pinfo);
-#endif
- PSHM_SUBSYS_UNLOCK();
- /*
- * If this is the last reference going away on the object,
- * then we need to destroy the backing object. The name
- * has an implied but uncounted reference on the object,
- * once it's created, since it's used as a rendezvous, and
- * therefore may be subsequently reopened.
- */
- for (pshmobj = pinfo->pshm_memobjects;
- pshmobj != NULL;
- pshmobj = pshmobj_next) {
- mach_memory_entry_port_release(pshmobj->pshmo_memobject);
- pshmobj_next = pshmobj->pshmo_next;
- FREE(pshmobj, M_SHM);
- }
- FREE(pinfo,M_SHM);
- } else {
- PSHM_SUBSYS_UNLOCK();
- }
- FREE(pcache, M_SHM);
- error = 0;
+ error = pshm_unlink_internal(pinfo, pcache);
+ PSHM_SUBSYS_UNLOCK();
+
bad:
FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
- return (error);
+ return error;
}
/* already called locked */