#include <vm/vm_protos.h>
#define f_flag f_fglob->fg_flag
-#define f_type f_fglob->fg_type
+#define f_type f_fglob->fg_ops->fo_type
#define f_msgcount f_fglob->fg_msgcount
#define f_cred f_fglob->fg_cred
#define f_ops f_fglob->fg_ops
static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
struct pshmcache **pcache, int addref);
-struct fileops pshmops =
- { pshm_read, pshm_write, pshm_ioctl, pshm_select, pshm_closefile, pshm_kqfilter, 0 };
+static const struct fileops pshmops = {
+ DTYPE_PSXSHM,
+ pshm_read,
+ pshm_write,
+ pshm_ioctl,
+ pshm_select,
+ pshm_closefile,
+ pshm_kqfilter,
+ 0
+};
static lck_grp_t *psx_shm_subsys_lck_grp;
static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr;
/*
* We found a "negative" match, ENOENT notifies client of this match.
- * The nc_vpid field records whether this is a whiteout.
*/
pshmstats.neghits++;
return (ENOENT);
/*
* Fill in cache info, if vp is NULL this is a "negative" cache entry.
- * For negative entries, we have to record whether it is a whiteout.
- * the whiteout flag is stored in the nc_vpid field which is
- * otherwise unused.
*/
pcp->pshminfo = pshmp;
pcp->pshm_nlen = pnp->pshm_namelen;
proc_fdlock(p);
fp->f_flag = fmode & FMASK;
- fp->f_type = DTYPE_PSXSHM;
fp->f_ops = &pshmops;
fp->f_data = (caddr_t)new_pnode;
*fdflags(p, indx) |= UF_EXCLOSE;
mach_vm_size_t total_size, alloc_size;
memory_object_size_t mosize;
struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p;
+ vm_map_t user_map;
#if CONFIG_MACF
int error;
#endif
+ user_map = current_map();
+
if (fp->f_type != DTYPE_PSXSHM) {
return(EINVAL);
}
#endif
pinfo->pshm_flags |= PSHM_ALLOCATING;
- total_size = round_page_64(length);
+ total_size = vm_map_round_page(length,
+ vm_map_page_mask(user_map));
pshmobj_next_p = &pinfo->pshm_memobjects;
for (alloc_size = 0;
if ((flags & MAP_FIXED) == 0) {
alloc_flags = VM_FLAGS_ANYWHERE;
- user_addr = vm_map_round_page(user_addr);
+ user_addr = vm_map_round_page(user_addr,
+ vm_map_page_mask(user_map));
} else {
- if (user_addr != vm_map_round_page(user_addr))
+ if (user_addr != vm_map_round_page(user_addr,
+ vm_map_page_mask(user_map)))
return (EINVAL);
/*
* We do not get rid of the existing mappings here because
if (error == ENOENT) {
PSHM_SUBSYS_UNLOCK();
- error = EINVAL;
goto bad;
}
+ /* During unlink lookup failure also implies ENOENT */
if (!error) {
PSHM_SUBSYS_UNLOCK();
- error = EINVAL;
+ error = ENOENT;
goto bad;
} else
incache = 1;