#include <sys/stat.h>
#include <sys/sysproto.h>
#include <sys/proc_info.h>
-#include <bsm/audit_kernel.h>
+#include <security/audit/audit.h>
#if CONFIG_MACF
#include <security/mac_framework.h>
#include <vm/vm_protos.h>
#define f_flag f_fglob->fg_flag
-#define f_type f_fglob->fg_type
+#define f_type f_fglob->fg_ops->fo_type
#define f_msgcount f_fglob->fg_msgcount
#define f_cred f_fglob->fg_cred
#define f_ops f_fglob->fg_ops
#define f_data f_fglob->fg_data
#define PSHMNAMLEN 31 /* maximum name segment length we bother with */
+struct pshmobj {
+ void * pshmo_memobject;
+ memory_object_size_t pshmo_size;
+ struct pshmobj * pshmo_next;
+};
+
struct pshminfo {
unsigned int pshm_flags;
unsigned int pshm_usecount;
uid_t pshm_uid;
gid_t pshm_gid;
char pshm_name[PSHMNAMLEN + 1]; /* segment name */
- void * pshm_memobject;
+ struct pshmobj *pshm_memobjects;
#if DIAGNOSTIC
unsigned int pshm_readcount;
unsigned int pshm_writecount;
};
#define PSHMINFO_NULL (struct pshminfo *)0
-#define PSHM_NONE 1
-#define PSHM_DEFINED 2
-#define PSHM_ALLOCATED 4
-#define PSHM_MAPPED 8
-#define PSHM_INUSE 0x10
-#define PSHM_REMOVED 0x20
-#define PSHM_INCREATE 0x40
-#define PSHM_INDELETE 0x80
+#define PSHM_NONE 0x001
+#define PSHM_DEFINED 0x002
+#define PSHM_ALLOCATED 0x004
+#define PSHM_MAPPED 0x008
+#define PSHM_INUSE 0x010
+#define PSHM_REMOVED 0x020
+#define PSHM_INCREATE 0x040
+#define PSHM_INDELETE 0x080
+#define PSHM_ALLOCATING 0x100
struct pshmcache {
LIST_ENTRY(pshmcache) pshm_hash; /* hash chain */
struct pshmnode {
off_t mapp_addr;
- user_size_t map_size;
+ user_size_t map_size; /* XXX unused ? */
struct pshminfo *pinfo;
unsigned int pshm_usecount;
#if DIAGNOSTIC
static int pshm_ioctl (struct fileproc *fp, u_long com,
caddr_t data, vfs_context_t ctx);
static int pshm_select (struct fileproc *fp, int which, void *wql, vfs_context_t ctx);
-static int pshm_close(struct pshmnode *pnode);
+static int pshm_close(struct pshminfo *pinfo, int dropref);
static int pshm_closefile (struct fileglob *fg, vfs_context_t ctx);
static int pshm_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx);
static void pshm_cache_purge(void);
#endif /* NOT_USED */
static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
- struct pshmcache **pcache);
-
-struct fileops pshmops =
- { pshm_read, pshm_write, pshm_ioctl, pshm_select, pshm_closefile, pshm_kqfilter, 0 };
+ struct pshmcache **pcache, int addref);
+
+static const struct fileops pshmops = {
+ DTYPE_PSXSHM,
+ pshm_read,
+ pshm_write,
+ pshm_ioctl,
+ pshm_select,
+ pshm_closefile,
+ pshm_kqfilter,
+ 0
+};
static lck_grp_t *psx_shm_subsys_lck_grp;
static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr;
static int
pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
- struct pshmcache **pcache)
+ struct pshmcache **pcache, int addref)
{
struct pshmcache *pcp, *nnp;
struct pshmhashhead *pcpp;
/* TOUCH(ncp); */
*pshmp = pcp->pshminfo;
*pcache = pcp;
+ if (addref)
+ pcp->pshminfo->pshm_usecount++;
return (-1);
}
/*
* We found a "negative" match, ENOENT notifies client of this match.
- * The nc_vpid field records whether this is a whiteout.
*/
pshmstats.neghits++;
return (ENOENT);
/* if the entry has already been added by some one else return */
- if (pshm_cache_search(&dpinfo, pnp, &dpcp) == -1) {
+ if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == -1) {
return(EEXIST);
}
pshmnument++;
/*
* Fill in cache info, if vp is NULL this is a "negative" cache entry.
- * For negative entries, we have to record whether it is a whiteout.
- * the whiteout flag is stored in the nc_vpid field which is
- * otherwise unused.
*/
pcp->pshminfo = pshmp;
pcp->pshm_nlen = pnp->pshm_namelen;
int
-shm_open(proc_t p, struct shm_open_args *uap, register_t *retval)
+shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval)
{
- struct fileproc *fp;
size_t i;
- struct fileproc *nfp;
int indx, error;
struct pshmname nd;
struct pshminfo *pinfo;
- char * pnbuf;
+ struct fileproc *fp = NULL;
+ char *pnbuf = NULL;
+ struct pshminfo *new_pinfo = PSHMINFO_NULL;
+ struct pshmnode *new_pnode = PSHMNODE_NULL;
+ struct pshmcache *pcache = PSHMCACHE_NULL; /* ignored on return */
char * nameptr;
char * cp;
size_t pathlen, plen;
int fmode ;
int cmode = uap->mode;
int incache = 0;
- struct pshmnode * pnode = PSHMNODE_NULL;
- struct pshmcache * pcache = PSHMCACHE_NULL;
- struct pshmcache *pcp = NULL; /* protected by !incache */
- int pinfo_alloc=0;
+ struct pshmcache *pcp = NULL;
AUDIT_ARG(fflags, uap->oflag);
AUDIT_ARG(mode, uap->mode);
pinfo = PSHMINFO_NULL;
+ /*
+ * Preallocate everything we might need up front to avoid taking
+ * and dropping the lock, opening us up to race conditions.
+ */
MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
if (pnbuf == NULL) {
- return(ENOSPC);
+ error = ENOSPC;
+ goto bad;
}
pathlen = MAXPATHLEN;
error = ENAMETOOLONG;
goto bad;
}
-
-
#ifdef PSXSHM_NAME_RESTRICT
nameptr = pnbuf;
if (*nameptr == '/') {
nd.pshm_hash += (unsigned char)*cp * i;
}
- PSHM_SUBSYS_LOCK();
- error = pshm_cache_search(&pinfo, &nd, &pcache);
-
- if (error == ENOENT) {
- PSHM_SUBSYS_UNLOCK();
- error = EINVAL;
+ /*
+ * attempt to allocate a new fp; if unsuccessful, the fp will be
+ * left unmodified (NULL).
+ */
+ error = falloc(p, &fp, &indx, vfs_context_current());
+ if (error)
goto bad;
- }
- if (!error) {
- incache = 0;
- } else
- incache = 1;
+ cmode &= ALLPERMS;
+
fmode = FFLAGS(uap->oflag);
- if ((fmode & (FREAD | FWRITE))==0) {
- PSHM_SUBSYS_UNLOCK();
+ if ((fmode & (FREAD | FWRITE)) == 0) {
error = EINVAL;
goto bad;
}
/*
- * XXXXXXXXXX TBD XXXXXXXXXX
- * There is a race that existed with the funnels as well.
- * Need to be fixed later
+ * We allocate a new entry if we are less than the maximum
+ * allowed and the one at the front of the LRU list is in use.
+ * Otherwise we use the one at the front of the LRU list.
*/
- PSHM_SUBSYS_UNLOCK();
- error = falloc(p, &nfp, &indx, vfs_context_current());
- if (error )
+ MALLOC(pcp, struct pshmcache *, sizeof(struct pshmcache), M_SHM, M_WAITOK|M_ZERO);
+ if (pcp == NULL) {
+ error = ENOSPC;
+ goto bad;
+ }
+
+ MALLOC(new_pinfo, struct pshminfo *, sizeof(struct pshminfo), M_SHM, M_WAITOK|M_ZERO);
+ if (new_pinfo == PSHMINFO_NULL) {
+ error = ENOSPC;
goto bad;
+ }
+#if CONFIG_MACF
+ mac_posixshm_label_init(new_pinfo);
+#endif
+
+ MALLOC(new_pnode, struct pshmnode *, sizeof(struct pshmnode), M_SHM, M_WAITOK|M_ZERO);
+ if (new_pnode == PSHMNODE_NULL) {
+ error = ENOSPC;
+ goto bad;
+ }
+
PSHM_SUBSYS_LOCK();
- fp = nfp;
+ /*
+ * If we find the entry in the cache, this will take a reference,
+ * allowing us to unlock it for the permissions check.
+ */
+ error = pshm_cache_search(&pinfo, &nd, &pcache, 1);
- cmode &= ALLPERMS;
+ PSHM_SUBSYS_UNLOCK();
- if (fmode & O_CREAT) {
- if ((fmode & O_EXCL) && incache) {
- AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
- pinfo->pshm_gid, pinfo->pshm_mode);
+ if (error == ENOENT) {
+ error = EINVAL;
+ goto bad;
+ }
- /* shm obj exists and opened O_EXCL */
-#if notyet
- if (pinfo->pshm_flags & PSHM_INDELETE) {
- }
-#endif
- error = EEXIST;
- PSHM_SUBSYS_UNLOCK();
- goto bad1;
- }
- if (!incache) {
- PSHM_SUBSYS_UNLOCK();
- /* create a new one */
- MALLOC(pinfo, struct pshminfo *, sizeof(struct pshminfo), M_SHM, M_WAITOK|M_ZERO);
- if (pinfo == NULL) {
- error = ENOSPC;
- goto bad1;
- }
- PSHM_SUBSYS_LOCK();
- pinfo_alloc = 1;
+ if (!error) {
+ incache = 0;
+ if (fmode & O_CREAT) {
+ /* create a new one (commit the allocation) */
+ pinfo = new_pinfo;
pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE;
pinfo->pshm_usecount = 1; /* existence reference */
pinfo->pshm_mode = cmode;
- pinfo->pshm_uid = kauth_cred_getuid(kauth_cred_get());
- pinfo->pshm_gid = kauth_cred_get()->cr_gid;
- bcopy(pnbuf, &pinfo->pshm_name[0], PSHMNAMLEN);
- pinfo->pshm_name[PSHMNAMLEN]=0;
+ pinfo->pshm_uid = kauth_getuid();
+ pinfo->pshm_gid = kauth_getgid();
+ bcopy(pnbuf, &pinfo->pshm_name[0], pathlen);
+ pinfo->pshm_name[pathlen]=0;
#if CONFIG_MACF
- PSHM_SUBSYS_UNLOCK();
- mac_posixshm_label_init(pinfo);
- PSHM_SUBSYS_LOCK();
error = mac_posixshm_check_create(kauth_cred_get(), nameptr);
if (error) {
- PSHM_SUBSYS_UNLOCK();
- goto bad2;
+ goto bad;
}
mac_posixshm_label_associate(kauth_cred_get(), pinfo, nameptr);
#endif
- } else {
+ }
+ } else {
+ incache = 1;
+ if (fmode & O_CREAT) {
/* already exists */
+ if ((fmode & O_EXCL)) {
+ AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
+ pinfo->pshm_gid,
+ pinfo->pshm_mode);
+
+ /* shm obj exists and opened O_EXCL */
+ error = EEXIST;
+ goto bad;
+ }
+
if( pinfo->pshm_flags & PSHM_INDELETE) {
- PSHM_SUBSYS_UNLOCK();
error = ENOENT;
- goto bad1;
+ goto bad;
}
AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
pinfo->pshm_gid, pinfo->pshm_mode);
#if CONFIG_MACF
- if ((error = mac_posixshm_check_open(
- kauth_cred_get(), pinfo))) {
- PSHM_SUBSYS_UNLOCK();
- goto bad1;
+ if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
+ goto bad;
}
#endif
if ( (error = pshm_access(pinfo, fmode, kauth_cred_get(), p)) ) {
- PSHM_SUBSYS_UNLOCK();
- goto bad1;
+ goto bad;
}
}
- } else {
+ }
+ if (!(fmode & O_CREAT)) {
if (!incache) {
- /* O_CREAT is not set and the shm obecj does not exist */
- PSHM_SUBSYS_UNLOCK();
+ /* O_CREAT is not set and the object does not exist */
error = ENOENT;
- goto bad1;
+ goto bad;
}
if( pinfo->pshm_flags & PSHM_INDELETE) {
- PSHM_SUBSYS_UNLOCK();
error = ENOENT;
- goto bad1;
+ goto bad;
}
#if CONFIG_MACF
- if ((error = mac_posixshm_check_open(
- kauth_cred_get(), pinfo))) {
- PSHM_SUBSYS_UNLOCK();
- goto bad1;
+ if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
+ goto bad;
}
#endif
- if ( (error = pshm_access(pinfo, fmode, kauth_cred_get(), p)) ) {
- PSHM_SUBSYS_UNLOCK();
- goto bad1;
+ if ((error = pshm_access(pinfo, fmode, kauth_cred_get(), p))) {
+ goto bad;
}
}
if (fmode & O_TRUNC) {
- PSHM_SUBSYS_UNLOCK();
error = EINVAL;
- goto bad2;
+ goto bad;
}
+
+
+ PSHM_SUBSYS_LOCK();
+
#if DIAGNOSTIC
if (fmode & FWRITE)
pinfo->pshm_writecount++;
if (fmode & FREAD)
pinfo->pshm_readcount++;
#endif
- PSHM_SUBSYS_UNLOCK();
- MALLOC(pnode, struct pshmnode *, sizeof(struct pshmnode), M_SHM, M_WAITOK|M_ZERO);
- if (pnode == NULL) {
- error = ENOSPC;
- goto bad2;
- }
if (!incache) {
+ /* if successful, this will consume the pcp */
+ if ( (error = pshm_cache_add(pinfo, &nd, pcp)) ) {
+ goto bad_locked;
+ }
/*
- * We allocate a new entry if we are less than the maximum
- * allowed and the one at the front of the LRU list is in use.
- * Otherwise we use the one at the front of the LRU list.
+ * add reference for the new entry; otherwise, we obtained
+ * one from the cache hit earlier.
*/
- MALLOC(pcp, struct pshmcache *, sizeof(struct pshmcache), M_SHM, M_WAITOK|M_ZERO);
- if (pcp == NULL) {
- error = ENOSPC;
- goto bad2;
- }
-
+ pinfo->pshm_usecount++;
}
- PSHM_SUBSYS_LOCK();
+ pinfo->pshm_flags &= ~PSHM_INCREATE;
+ new_pnode->pinfo = pinfo;
- if (!incache) {
- if ( (error = pshm_cache_add(pinfo, &nd, pcp)) ) {
- PSHM_SUBSYS_UNLOCK();
- FREE(pcp, M_SHM);
- goto bad3;
+ PSHM_SUBSYS_UNLOCK();
+
+ /*
+ * if incache, we did not use the new pcp or new_pinfo and must
+ * free them
+ */
+ if (incache) {
+ FREE(pcp, M_SHM);
+
+ if (new_pinfo != PSHMINFO_NULL) {
+#if CONFIG_MACF
+ mac_posixshm_label_destroy(new_pinfo);
+#endif
+ FREE(new_pinfo, M_SHM);
}
}
- pinfo->pshm_flags &= ~PSHM_INCREATE;
- pinfo->pshm_usecount++; /* extra reference for the new fd */
- pnode->pinfo = pinfo;
- PSHM_SUBSYS_UNLOCK();
proc_fdlock(p);
fp->f_flag = fmode & FMASK;
- fp->f_type = DTYPE_PSXSHM;
fp->f_ops = &pshmops;
- fp->f_data = (caddr_t)pnode;
+ fp->f_data = (caddr_t)new_pnode;
*fdflags(p, indx) |= UF_EXCLOSE;
procfdtbl_releasefd(p, indx, NULL);
fp_drop(p, indx, fp, 1);
*retval = indx;
FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
return (0);
-bad3:
- FREE(pnode, M_SHM);
-bad2:
- if (pinfo_alloc) {
+bad_locked:
+ PSHM_SUBSYS_UNLOCK();
+bad:
+ /*
+ * If we obtained the entry from the cache, we need to drop the
+ * reference; holding the reference may have prevented unlinking,
+ * so we need to call pshm_close() to get the full effect.
+ */
+ if (incache) {
+ PSHM_SUBSYS_LOCK();
+ pshm_close(pinfo, 1);
+ PSHM_SUBSYS_UNLOCK();
+ }
+
+ if (pcp != NULL)
+ FREE(pcp, M_SHM);
+
+ if (new_pnode != PSHMNODE_NULL)
+ FREE(new_pnode, M_SHM);
+
+ if (fp != NULL)
+ fp_free(p, indx, fp);
+
+ if (new_pinfo != PSHMINFO_NULL) {
#if CONFIG_MACF
- mac_posixshm_label_destroy(pinfo);
+ mac_posixshm_label_destroy(new_pinfo);
#endif
- FREE(pinfo, M_SHM);
+ FREE(new_pinfo, M_SHM);
}
-bad1:
- fp_free(p, indx, fp);
-bad:
- FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
+ if (pnbuf != NULL)
+ FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
return (error);
}
int
pshm_truncate(__unused proc_t p, struct fileproc *fp, __unused int fd,
- off_t length, __unused register_t *retval)
+ off_t length, __unused int32_t *retval)
{
struct pshminfo * pinfo;
struct pshmnode * pnode ;
kern_return_t kret;
- mach_vm_offset_t user_addr;
mem_entry_name_port_t mem_object;
- mach_vm_size_t size;
+ mach_vm_size_t total_size, alloc_size;
+ memory_object_size_t mosize;
+ struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p;
+ vm_map_t user_map;
#if CONFIG_MACF
int error;
#endif
+ user_map = current_map();
+
if (fp->f_type != DTYPE_PSXSHM) {
return(EINVAL);
}
PSHM_SUBSYS_UNLOCK();
return(EINVAL);
}
- if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))
+ if ((pinfo->pshm_flags & (PSHM_DEFINED|PSHM_ALLOCATING|PSHM_ALLOCATED))
!= PSHM_DEFINED) {
PSHM_SUBSYS_UNLOCK();
return(EINVAL);
}
#if CONFIG_MACF
- error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, size);
+ error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, length);
if (error) {
PSHM_SUBSYS_UNLOCK();
return(error);
}
#endif
- PSHM_SUBSYS_UNLOCK();
- size = round_page_64(length);
- kret = mach_vm_allocate(current_map(), &user_addr, size, VM_FLAGS_ANYWHERE);
- if (kret != KERN_SUCCESS)
- goto out;
- kret = mach_make_memory_entry_64 (current_map(), &size,
- user_addr, VM_PROT_DEFAULT, &mem_object, 0);
+ pinfo->pshm_flags |= PSHM_ALLOCATING;
+ total_size = vm_map_round_page(length,
+ vm_map_page_mask(user_map));
+ pshmobj_next_p = &pinfo->pshm_memobjects;
- if (kret != KERN_SUCCESS)
- goto out;
-
- mach_vm_deallocate(current_map(), user_addr, size);
+ for (alloc_size = 0;
+ alloc_size < total_size;
+ alloc_size += mosize) {
- PSHM_SUBSYS_LOCK();
- pinfo->pshm_flags &= ~PSHM_DEFINED;
+ PSHM_SUBSYS_UNLOCK();
+
+ mosize = MIN(total_size - alloc_size, ANON_MAX_SIZE);
+ kret = mach_make_memory_entry_64(
+ VM_MAP_NULL,
+ &mosize,
+ 0,
+ MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
+ &mem_object,
+ 0);
+
+ if (kret != KERN_SUCCESS)
+ goto out;
+
+ MALLOC(pshmobj, struct pshmobj *, sizeof (struct pshmobj),
+ M_SHM, M_WAITOK);
+ if (pshmobj == NULL) {
+ kret = KERN_NO_SPACE;
+ mach_memory_entry_port_release(mem_object);
+ mem_object = NULL;
+ goto out;
+ }
+
+ PSHM_SUBSYS_LOCK();
+
+ pshmobj->pshmo_memobject = (void *) mem_object;
+ pshmobj->pshmo_size = mosize;
+ pshmobj->pshmo_next = NULL;
+
+ *pshmobj_next_p = pshmobj;
+ pshmobj_next_p = &pshmobj->pshmo_next;
+ }
+
pinfo->pshm_flags = PSHM_ALLOCATED;
- pinfo->pshm_memobject = (void *)mem_object;
- pinfo->pshm_length = size;
+ pinfo->pshm_length = total_size;
PSHM_SUBSYS_UNLOCK();
return(0);
out:
+ PSHM_SUBSYS_LOCK();
+ for (pshmobj = pinfo->pshm_memobjects;
+ pshmobj != NULL;
+ pshmobj = pshmobj_next) {
+ pshmobj_next = pshmobj->pshmo_next;
+ mach_memory_entry_port_release(pshmobj->pshmo_memobject);
+ FREE(pshmobj, M_SHM);
+ }
+ pinfo->pshm_memobjects = NULL;
+ pinfo->pshm_flags &= ~PSHM_ALLOCATING;
+ PSHM_SUBSYS_UNLOCK();
+
switch (kret) {
case KERN_INVALID_ADDRESS:
case KERN_NO_SPACE:
int
pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, __unused proc_t p)
{
- mode_t mask;
- int is_member;
+ int mode_req = ((mode & FREAD) ? S_IRUSR : 0) |
+ ((mode & FWRITE) ? S_IWUSR : 0);
/* Otherwise, user id 0 always gets access. */
if (!suser(cred, NULL))
return (0);
- mask = 0;
-
- /* Otherwise, check the owner. */
- if (kauth_cred_getuid(cred) == pinfo->pshm_uid) {
- if (mode & FREAD)
- mask |= S_IRUSR;
- if (mode & FWRITE)
- mask |= S_IWUSR;
- return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES);
- }
-
- /* Otherwise, check the groups. */
- if (kauth_cred_ismember_gid(cred, pinfo->pshm_gid, &is_member) == 0 && is_member) {
- if (mode & FREAD)
- mask |= S_IRGRP;
- if (mode & FWRITE)
- mask |= S_IWGRP;
- return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES);
- }
-
- /* Otherwise, check everyone else. */
- if (mode & FREAD)
- mask |= S_IROTH;
- if (mode & FWRITE)
- mask |= S_IWOTH;
- return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES);
+ return(posix_cred_access(cred, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode, mode_req));
}
int
pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct fileproc *fp, off_t pageoff)
{
- mach_vm_offset_t user_addr = (mach_vm_offset_t)uap->addr;
- mach_vm_size_t user_size = (mach_vm_size_t)uap->len ;
+ vm_map_offset_t user_addr = (vm_map_offset_t)uap->addr;
+ vm_map_size_t user_size = (vm_map_size_t)uap->len ;
+ vm_map_offset_t user_start_addr;
+ vm_map_size_t map_size, mapped_size;
int prot = uap->prot;
int flags = uap->flags;
vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos;
+ vm_object_offset_t map_pos;
vm_map_t user_map;
int alloc_flags;
boolean_t docow;
kern_return_t kret;
struct pshminfo * pinfo;
struct pshmnode * pnode;
- void * mem_object;
+ struct pshmobj * pshmobj;
#if CONFIG_MACF
int error;
#endif
PSHM_SUBSYS_UNLOCK();
return(EINVAL);
}
- if ((mem_object = pinfo->pshm_memobject) == NULL) {
+ if ((pshmobj = pinfo->pshm_memobjects) == NULL) {
PSHM_SUBSYS_UNLOCK();
return(EINVAL);
}
if ((flags & MAP_FIXED) == 0) {
alloc_flags = VM_FLAGS_ANYWHERE;
- user_addr = mach_vm_round_page(user_addr);
+ user_addr = vm_map_round_page(user_addr,
+ vm_map_page_mask(user_map));
} else {
- if (user_addr != mach_vm_trunc_page(user_addr))
+ if (user_addr != vm_map_round_page(user_addr,
+ vm_map_page_mask(user_map)))
return (EINVAL);
/*
* We do not get rid of the existing mappings here because
}
docow = FALSE;
- kret = vm_map_enter_mem_object(user_map, &user_addr, user_size,
- 0, alloc_flags,
- pinfo->pshm_memobject, file_pos, docow,
- prot, VM_PROT_DEFAULT,
- VM_INHERIT_SHARE);
- if (kret != KERN_SUCCESS)
- goto out;
- /* LP64todo - this should be superfluous at this point */
- kret = mach_vm_inherit(user_map, user_addr, user_size,
- VM_INHERIT_SHARE);
+ mapped_size = 0;
+
+ /* reserver the entire space first... */
+ kret = vm_map_enter_mem_object(user_map,
+ &user_addr,
+ user_size,
+ 0,
+ alloc_flags,
+ IPC_PORT_NULL,
+ 0,
+ FALSE,
+ VM_PROT_NONE,
+ VM_PROT_NONE,
+ VM_INHERIT_NONE);
+ user_start_addr = user_addr;
if (kret != KERN_SUCCESS) {
- (void) mach_vm_deallocate(user_map, user_addr, user_size);
goto out;
}
+
+ /* ... and overwrite with the real mappings */
+ for (map_pos = 0, pshmobj = pinfo->pshm_memobjects;
+ user_size != 0;
+ map_pos += pshmobj->pshmo_size, pshmobj = pshmobj->pshmo_next) {
+ if (pshmobj == NULL) {
+ /* nothing there to map !? */
+ goto out;
+ }
+ if (file_pos >= map_pos + pshmobj->pshmo_size) {
+ continue;
+ }
+ map_size = pshmobj->pshmo_size - (file_pos - map_pos);
+ if (map_size > user_size) {
+ map_size = user_size;
+ }
+ kret = vm_map_enter_mem_object(
+ user_map,
+ &user_addr,
+ map_size,
+ 0,
+ VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
+ pshmobj->pshmo_memobject,
+ file_pos - map_pos,
+ docow,
+ prot,
+ VM_PROT_DEFAULT,
+ VM_INHERIT_SHARE);
+ if (kret != KERN_SUCCESS)
+ goto out;
+
+ user_addr += map_size;
+ user_size -= map_size;
+ mapped_size += map_size;
+ file_pos += map_size;
+ }
+
PSHM_SUBSYS_LOCK();
- pnode->mapp_addr = user_addr;
- pnode->map_size = user_size;
+ pnode->mapp_addr = user_start_addr;
+ pnode->map_size = mapped_size;
pinfo->pshm_flags |= (PSHM_MAPPED | PSHM_INUSE);
PSHM_SUBSYS_UNLOCK();
out:
+ if (kret != KERN_SUCCESS) {
+ if (mapped_size != 0) {
+ (void) mach_vm_deallocate(current_map(),
+ user_start_addr,
+ mapped_size);
+ }
+ }
+
switch (kret) {
case KERN_SUCCESS:
- *retval = (user_addr + pageoff);
+ *retval = (user_start_addr + pageoff);
return (0);
case KERN_INVALID_ADDRESS:
case KERN_NO_SPACE:
int
shm_unlink(__unused proc_t p, struct shm_unlink_args *uap,
- __unused register_t *retval)
+ __unused int32_t *retval)
{
size_t i;
int error=0;
size_t pathlen, plen;
int incache = 0;
struct pshmcache *pcache = PSHMCACHE_NULL;
+ struct pshmobj *pshmobj, *pshmobj_next;
pinfo = PSHMINFO_NULL;
}
PSHM_SUBSYS_LOCK();
- error = pshm_cache_search(&pinfo, &nd, &pcache);
+ error = pshm_cache_search(&pinfo, &nd, &pcache, 0);
if (error == ENOENT) {
PSHM_SUBSYS_UNLOCK();
- error = EINVAL;
goto bad;
}
+ /* During unlink lookup failure also implies ENOENT */
if (!error) {
PSHM_SUBSYS_UNLOCK();
- error = EINVAL;
+ error = ENOENT;
goto bad;
} else
incache = 1;
goto bad;
}
+ if (pinfo->pshm_flags & PSHM_ALLOCATING) {
+ /* XXX should we wait for flag to clear and then proceed ? */
+ PSHM_SUBSYS_UNLOCK();
+ error = EAGAIN;
+ goto bad;
+ }
+
if (pinfo->pshm_flags & PSHM_INDELETE) {
PSHM_SUBSYS_UNLOCK();
error = 0;
AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, pinfo->pshm_gid,
pinfo->pshm_mode);
- /*
- * JMM - How should permissions be checked?
+ /*
+ * following file semantics, unlink should be allowed
+ * for users with write permission only.
*/
+ if ( (error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p)) ) {
+ PSHM_SUBSYS_UNLOCK();
+ goto bad;
+ }
pinfo->pshm_flags |= PSHM_INDELETE;
pshm_cache_delete(pcache);
pinfo->pshm_flags |= PSHM_REMOVED;
/* release the existence reference */
if (!--pinfo->pshm_usecount) {
+#if CONFIG_MACF
+ mac_posixshm_label_destroy(pinfo);
+#endif
PSHM_SUBSYS_UNLOCK();
/*
* If this is the last reference going away on the object,
* then we need to destroy the backing object. The name
* has an implied but uncounted reference on the object,
- * once it's created, since it's used as a rendesvous, and
+ * once it's created, since it's used as a rendezvous, and
* therefore may be subsequently reopened.
*/
- if (pinfo->pshm_memobject != NULL)
- mach_memory_entry_port_release(pinfo->pshm_memobject);
- PSHM_SUBSYS_LOCK();
+ for (pshmobj = pinfo->pshm_memobjects;
+ pshmobj != NULL;
+ pshmobj = pshmobj_next) {
+ mach_memory_entry_port_release(pshmobj->pshmo_memobject);
+ pshmobj_next = pshmobj->pshmo_next;
+ FREE(pshmobj, M_SHM);
+ }
FREE(pinfo,M_SHM);
+ } else {
+ PSHM_SUBSYS_UNLOCK();
}
- PSHM_SUBSYS_UNLOCK();
FREE(pcache, M_SHM);
error = 0;
bad:
/* already called locked */
static int
-pshm_close(struct pshmnode *pnode)
+pshm_close(struct pshminfo *pinfo, int dropref)
{
- int error=0;
- struct pshminfo *pinfo;
-
- if ((pinfo = pnode->pinfo) == PSHMINFO_NULL)
- return(EINVAL);
+ int error = 0;
+ struct pshmobj *pshmobj, *pshmobj_next;
- if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) {
+ /*
+ * If we are dropping the reference we took on the cache object, don't
+ * enforce the allocation requirement.
+ */
+ if ( !dropref && ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED)) {
return(EINVAL);
}
#if DIAGNOSTIC
pinfo->pshm_usecount--; /* release this fd's reference */
if ((pinfo->pshm_flags & PSHM_REMOVED) && !pinfo->pshm_usecount) {
+#if CONFIG_MACF
+ mac_posixshm_label_destroy(pinfo);
+#endif
PSHM_SUBSYS_UNLOCK();
/*
* If this is the last reference going away on the object,
* then we need to destroy the backing object.
*/
- if (pinfo->pshm_memobject != NULL)
- mach_memory_entry_port_release(pinfo->pshm_memobject);
+ for (pshmobj = pinfo->pshm_memobjects;
+ pshmobj != NULL;
+ pshmobj = pshmobj_next) {
+ mach_memory_entry_port_release(pshmobj->pshmo_memobject);
+ pshmobj_next = pshmobj->pshmo_next;
+ FREE(pshmobj, M_SHM);
+ }
PSHM_SUBSYS_LOCK();
-#if CONFIG_MACF
- mac_posixshm_label_destroy(pinfo);
-#endif
FREE(pinfo,M_SHM);
}
- FREE(pnode, M_SHM);
return (error);
}
static int
pshm_closefile(struct fileglob *fg, __unused vfs_context_t ctx)
{
- int error;
+ int error = EINVAL;
+ struct pshmnode *pnode;
PSHM_SUBSYS_LOCK();
- error = pshm_close(((struct pshmnode *)fg->fg_data));
+
+ if ((pnode = (struct pshmnode *)fg->fg_data) != NULL) {
+ if (pnode->pinfo != PSHMINFO_NULL) {
+ error = pshm_close(pnode->pinfo, 0);
+ }
+ FREE(pnode, M_SHM);
+ }
+
PSHM_SUBSYS_UNLOCK();
+
return(error);
}