#include <vm/vm_protos.h>
#define f_flag f_fglob->fg_flag
-#define f_type f_fglob->fg_type
+#define f_type f_fglob->fg_ops->fo_type
#define f_msgcount f_fglob->fg_msgcount
#define f_cred f_fglob->fg_cred
#define f_ops f_fglob->fg_ops
static int pshm_ioctl (struct fileproc *fp, u_long com,
caddr_t data, vfs_context_t ctx);
static int pshm_select (struct fileproc *fp, int which, void *wql, vfs_context_t ctx);
-static int pshm_close(struct pshmnode *pnode);
+static int pshm_close(struct pshminfo *pinfo, int dropref);
static int pshm_closefile (struct fileglob *fg, vfs_context_t ctx);
static int pshm_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx);
static void pshm_cache_purge(void);
#endif /* NOT_USED */
static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
- struct pshmcache **pcache);
-
-struct fileops pshmops =
- { pshm_read, pshm_write, pshm_ioctl, pshm_select, pshm_closefile, pshm_kqfilter, 0 };
+ struct pshmcache **pcache, int addref);
+
+static const struct fileops pshmops = {
+ DTYPE_PSXSHM,
+ pshm_read,
+ pshm_write,
+ pshm_ioctl,
+ pshm_select,
+ pshm_closefile,
+ pshm_kqfilter,
+ 0
+};
static lck_grp_t *psx_shm_subsys_lck_grp;
static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr;
static int
pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
- struct pshmcache **pcache)
+ struct pshmcache **pcache, int addref)
{
struct pshmcache *pcp, *nnp;
struct pshmhashhead *pcpp;
/* TOUCH(ncp); */
*pshmp = pcp->pshminfo;
*pcache = pcp;
+ if (addref)
+ pcp->pshminfo->pshm_usecount++;
return (-1);
}
/*
* We found a "negative" match, ENOENT notifies client of this match.
- * The nc_vpid field records whether this is a whiteout.
*/
pshmstats.neghits++;
return (ENOENT);
/* if the entry has already been added by some one else return */
- if (pshm_cache_search(&dpinfo, pnp, &dpcp) == -1) {
+ if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == -1) {
return(EEXIST);
}
pshmnument++;
/*
* Fill in cache info, if vp is NULL this is a "negative" cache entry.
- * For negative entries, we have to record whether it is a whiteout.
- * the whiteout flag is stored in the nc_vpid field which is
- * otherwise unused.
*/
pcp->pshminfo = pshmp;
pcp->pshm_nlen = pnp->pshm_namelen;
if (error)
goto bad;
+ cmode &= ALLPERMS;
+
+ fmode = FFLAGS(uap->oflag);
+ if ((fmode & (FREAD | FWRITE)) == 0) {
+ error = EINVAL;
+ goto bad;
+ }
+
/*
* We allocate a new entry if we are less than the maximum
* allowed and the one at the front of the LRU list is in use.
PSHM_SUBSYS_LOCK();
- error = pshm_cache_search(&pinfo, &nd, &pcache);
+ /*
+ * If we find the entry in the cache, this will take a reference,
+ * allowing us to unlock it for the permissions check.
+ */
+ error = pshm_cache_search(&pinfo, &nd, &pcache, 1);
+
+ PSHM_SUBSYS_UNLOCK();
if (error == ENOENT) {
error = EINVAL;
- goto bad_locked;
-
+ goto bad;
}
+
if (!error) {
incache = 0;
- } else
+ if (fmode & O_CREAT) {
+ /* create a new one (commit the allocation) */
+ pinfo = new_pinfo;
+ pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE;
+ pinfo->pshm_usecount = 1; /* existence reference */
+ pinfo->pshm_mode = cmode;
+ pinfo->pshm_uid = kauth_getuid();
+ pinfo->pshm_gid = kauth_getgid();
+ bcopy(pnbuf, &pinfo->pshm_name[0], pathlen);
+ pinfo->pshm_name[pathlen]=0;
+#if CONFIG_MACF
+ error = mac_posixshm_check_create(kauth_cred_get(), nameptr);
+ if (error) {
+ goto bad;
+ }
+ mac_posixshm_label_associate(kauth_cred_get(), pinfo, nameptr);
+#endif
+ }
+ } else {
incache = 1;
- fmode = FFLAGS(uap->oflag);
- if ((fmode & (FREAD | FWRITE))==0) {
- error = EINVAL;
- goto bad_locked;
- }
-
- cmode &= ALLPERMS;
-
- if (fmode & O_CREAT) {
- if (incache) {
+ if (fmode & O_CREAT) {
/* already exists */
if ((fmode & O_EXCL)) {
AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
/* shm obj exists and opened O_EXCL */
error = EEXIST;
- goto bad_locked;
+ goto bad;
}
if( pinfo->pshm_flags & PSHM_INDELETE) {
error = ENOENT;
- goto bad_locked;
+ goto bad;
}
AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
pinfo->pshm_gid, pinfo->pshm_mode);
#if CONFIG_MACF
- if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo))) {
- goto bad_locked;
+ if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
+ goto bad;
}
#endif
if ( (error = pshm_access(pinfo, fmode, kauth_cred_get(), p)) ) {
- goto bad_locked;
+ goto bad;
}
- } else {
- /* create a new one (commit the allocation) */
- pinfo = new_pinfo;
- pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE;
- pinfo->pshm_usecount = 1; /* existence reference */
- pinfo->pshm_mode = cmode;
- pinfo->pshm_uid = kauth_cred_getuid(kauth_cred_get());
- pinfo->pshm_gid = kauth_cred_get()->cr_gid;
- bcopy(pnbuf, &pinfo->pshm_name[0], PSHMNAMLEN);
- pinfo->pshm_name[PSHMNAMLEN]=0;
-#if CONFIG_MACF
- error = mac_posixshm_check_create(kauth_cred_get(), nameptr);
- if (error) {
- goto bad_locked;
- }
- mac_posixshm_label_associate(kauth_cred_get(), pinfo, nameptr);
-#endif
}
- } else {
+ }
+ if (!(fmode & O_CREAT)) {
if (!incache) {
/* O_CREAT is not set and the object does not exist */
error = ENOENT;
- goto bad_locked;
+ goto bad;
}
if( pinfo->pshm_flags & PSHM_INDELETE) {
error = ENOENT;
- goto bad_locked;
+ goto bad;
}
#if CONFIG_MACF
- if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo))) {
- goto bad_locked;
+ if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
+ goto bad;
}
#endif
if ((error = pshm_access(pinfo, fmode, kauth_cred_get(), p))) {
- goto bad_locked;
+ goto bad;
}
}
if (fmode & O_TRUNC) {
error = EINVAL;
- goto bad_locked;
+ goto bad;
}
+
+
+ PSHM_SUBSYS_LOCK();
+
#if DIAGNOSTIC
if (fmode & FWRITE)
pinfo->pshm_writecount++;
if ( (error = pshm_cache_add(pinfo, &nd, pcp)) ) {
goto bad_locked;
}
+ /*
+ * add reference for the new entry; otherwise, we obtained
+ * one from the cache hit earlier.
+ */
+ pinfo->pshm_usecount++;
}
pinfo->pshm_flags &= ~PSHM_INCREATE;
- pinfo->pshm_usecount++; /* extra reference for the new fd */
new_pnode->pinfo = pinfo;
PSHM_SUBSYS_UNLOCK();
proc_fdlock(p);
fp->f_flag = fmode & FMASK;
- fp->f_type = DTYPE_PSXSHM;
fp->f_ops = &pshmops;
fp->f_data = (caddr_t)new_pnode;
*fdflags(p, indx) |= UF_EXCLOSE;
bad_locked:
PSHM_SUBSYS_UNLOCK();
bad:
+ /*
+ * If we obtained the entry from the cache, we need to drop the
+ * reference; holding the reference may have prevented unlinking,
+ * so we need to call pshm_close() to get the full effect.
+ */
+ if (incache) {
+ PSHM_SUBSYS_LOCK();
+ pshm_close(pinfo, 1);
+ PSHM_SUBSYS_UNLOCK();
+ }
+
if (pcp != NULL)
FREE(pcp, M_SHM);
struct pshmnode * pnode ;
kern_return_t kret;
mem_entry_name_port_t mem_object;
- mach_vm_size_t size, total_size, alloc_size;
+ mach_vm_size_t total_size, alloc_size;
+ memory_object_size_t mosize;
struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p;
+ vm_map_t user_map;
#if CONFIG_MACF
int error;
#endif
+ user_map = current_map();
+
if (fp->f_type != DTYPE_PSXSHM) {
return(EINVAL);
}
return(EINVAL);
}
#if CONFIG_MACF
- error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, size);
+ error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, length);
if (error) {
PSHM_SUBSYS_UNLOCK();
return(error);
#endif
pinfo->pshm_flags |= PSHM_ALLOCATING;
- total_size = round_page_64(length);
+ total_size = vm_map_round_page(length,
+ vm_map_page_mask(user_map));
pshmobj_next_p = &pinfo->pshm_memobjects;
for (alloc_size = 0;
alloc_size < total_size;
- alloc_size += size) {
+ alloc_size += mosize) {
PSHM_SUBSYS_UNLOCK();
- size = MIN(total_size - alloc_size, ANON_MAX_SIZE);
+ mosize = MIN(total_size - alloc_size, ANON_MAX_SIZE);
kret = mach_make_memory_entry_64(
VM_MAP_NULL,
- &size,
+ &mosize,
0,
MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
&mem_object,
PSHM_SUBSYS_LOCK();
pshmobj->pshmo_memobject = (void *) mem_object;
- pshmobj->pshmo_size = size;
+ pshmobj->pshmo_size = mosize;
pshmobj->pshmo_next = NULL;
*pshmobj_next_p = pshmobj;
pshmobj_next_p = &pshmobj->pshmo_next;
}
- pinfo->pshm_flags = PSHM_ALLOCATED;
+ pinfo->pshm_flags |= PSHM_ALLOCATED;
+ pinfo->pshm_flags &= ~(PSHM_ALLOCATING);
pinfo->pshm_length = total_size;
PSHM_SUBSYS_UNLOCK();
return(0);
int
pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, __unused proc_t p)
{
- mode_t mask;
- int is_member;
+ int mode_req = ((mode & FREAD) ? S_IRUSR : 0) |
+ ((mode & FWRITE) ? S_IWUSR : 0);
/* Otherwise, user id 0 always gets access. */
if (!suser(cred, NULL))
return (0);
- mask = 0;
-
- /* Otherwise, check the owner. */
- if (kauth_cred_getuid(cred) == pinfo->pshm_uid) {
- if (mode & FREAD)
- mask |= S_IRUSR;
- if (mode & FWRITE)
- mask |= S_IWUSR;
- return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES);
- }
-
- /* Otherwise, check the groups. */
- if (kauth_cred_ismember_gid(cred, pinfo->pshm_gid, &is_member) == 0 && is_member) {
- if (mode & FREAD)
- mask |= S_IRGRP;
- if (mode & FWRITE)
- mask |= S_IWGRP;
- return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES);
- }
-
- /* Otherwise, check everyone else. */
- if (mode & FREAD)
- mask |= S_IROTH;
- if (mode & FWRITE)
- mask |= S_IWOTH;
- return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES);
+ return(posix_cred_access(cred, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode, mode_req));
}
int
pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct fileproc *fp, off_t pageoff)
{
- mach_vm_offset_t user_addr = (mach_vm_offset_t)uap->addr;
- mach_vm_size_t user_size = (mach_vm_size_t)uap->len ;
- mach_vm_offset_t user_start_addr;
- mach_vm_size_t map_size, mapped_size;
+ vm_map_offset_t user_addr = (vm_map_offset_t)uap->addr;
+ vm_map_size_t user_size = (vm_map_size_t)uap->len ;
+ vm_map_offset_t user_start_addr;
+ vm_map_size_t map_size, mapped_size;
int prot = uap->prot;
int flags = uap->flags;
vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos;
if ((flags & MAP_FIXED) == 0) {
alloc_flags = VM_FLAGS_ANYWHERE;
- user_addr = mach_vm_round_page(user_addr);
+ user_addr = vm_map_round_page(user_addr,
+ vm_map_page_mask(user_map));
} else {
- if (user_addr != mach_vm_trunc_page(user_addr))
+ if (user_addr != vm_map_round_page(user_addr,
+ vm_map_page_mask(user_map)))
return (EINVAL);
/*
* We do not get rid of the existing mappings here because
}
PSHM_SUBSYS_LOCK();
- error = pshm_cache_search(&pinfo, &nd, &pcache);
+ error = pshm_cache_search(&pinfo, &nd, &pcache, 0);
if (error == ENOENT) {
PSHM_SUBSYS_UNLOCK();
- error = EINVAL;
goto bad;
}
+ /* During unlink lookup failure also implies ENOENT */
if (!error) {
PSHM_SUBSYS_UNLOCK();
- error = EINVAL;
+ error = ENOENT;
goto bad;
} else
incache = 1;
AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, pinfo->pshm_gid,
pinfo->pshm_mode);
- /*
- * JMM - How should permissions be checked?
+ /*
+ * following file semantics, unlink should be allowed
+ * for users with write permission only.
*/
+ if ( (error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p)) ) {
+ PSHM_SUBSYS_UNLOCK();
+ goto bad;
+ }
pinfo->pshm_flags |= PSHM_INDELETE;
pshm_cache_delete(pcache);
pinfo->pshm_flags |= PSHM_REMOVED;
/* release the existence reference */
if (!--pinfo->pshm_usecount) {
+#if CONFIG_MACF
+ mac_posixshm_label_destroy(pinfo);
+#endif
PSHM_SUBSYS_UNLOCK();
/*
* If this is the last reference going away on the object,
/* already called locked */
static int
-pshm_close(struct pshmnode *pnode)
+pshm_close(struct pshminfo *pinfo, int dropref)
{
- int error=0;
- struct pshminfo *pinfo;
+ int error = 0;
struct pshmobj *pshmobj, *pshmobj_next;
- if ((pinfo = pnode->pinfo) == PSHMINFO_NULL)
- return(EINVAL);
-
- if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) {
+ /*
+ * If we are dropping the reference we took on the cache object, don't
+ * enforce the allocation requirement.
+ */
+ if ( !dropref && ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED)) {
return(EINVAL);
}
#if DIAGNOSTIC
PSHM_SUBSYS_LOCK();
FREE(pinfo,M_SHM);
}
- FREE(pnode, M_SHM);
return (error);
}
static int
pshm_closefile(struct fileglob *fg, __unused vfs_context_t ctx)
{
- int error;
+ int error = EINVAL;
+ struct pshmnode *pnode;
PSHM_SUBSYS_LOCK();
- error = pshm_close(((struct pshmnode *)fg->fg_data));
+
+ if ((pnode = (struct pshmnode *)fg->fg_data) != NULL) {
+ if (pnode->pinfo != PSHMINFO_NULL) {
+ error = pshm_close(pnode->pinfo, 0);
+ }
+ FREE(pnode, M_SHM);
+ }
+
PSHM_SUBSYS_UNLOCK();
+
return(error);
}