X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/55e303ae13a4cf49d70f2294092726f2fffb9ef2..7e41aa883dd258f888d0470250eead40a53ef1f5:/bsd/kern/posix_shm.c diff --git a/bsd/kern/posix_shm.c b/bsd/kern/posix_shm.c index 11c319808..38faf2939 100644 --- a/bsd/kern/posix_shm.c +++ b/bsd/kern/posix_shm.c @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright (c) 1990, 1996-1998 Apple Computer, Inc. @@ -37,33 +40,65 @@ * Created for MacOSX * */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ #include #include #include #include -#include +#include #include #include -#include -#include +#include +#include #include #include #include +#include #include #include #include #include #include +#include +#include +#include + +#if CONFIG_MACF +#include +#endif + #include +#include +#include #include #include #include #include +#include +#include +#define f_flag f_fglob->fg_flag +#define f_type f_fglob->fg_ops->fo_type +#define f_msgcount f_fglob->fg_msgcount +#define f_cred f_fglob->fg_cred +#define f_ops f_fglob->fg_ops +#define f_offset f_fglob->fg_offset +#define f_data f_fglob->fg_data #define PSHMNAMLEN 31 /* maximum name segment length we bother with */ +struct pshmobj { + void * pshmo_memobject; + memory_object_size_t pshmo_size; + struct pshmobj * pshmo_next; +}; + struct pshminfo { unsigned int pshm_flags; unsigned int pshm_usecount; @@ -72,23 +107,25 @@ struct pshminfo { uid_t pshm_uid; gid_t pshm_gid; char pshm_name[PSHMNAMLEN + 1]; /* segment name */ - void * pshm_memobject; + struct pshmobj *pshm_memobjects; #if DIAGNOSTIC unsigned int pshm_readcount; unsigned int pshm_writecount; - struct proc * pshm_proc; + proc_t pshm_proc; #endif /* DIAGNOSTIC */ + struct label* pshm_label; }; #define PSHMINFO_NULL (struct pshminfo *)0 -#define PSHM_NONE 1 -#define PSHM_DEFINED 2 -#define PSHM_ALLOCATED 4 -#define PSHM_MAPPED 8 -#define PSHM_INUSE 0x10 -#define PSHM_REMOVED 0x20 -#define PSHM_INCREATE 0x40 -#define PSHM_INDELETE 0x80 +#define PSHM_NONE 0x001 +#define PSHM_DEFINED 0x002 +#define PSHM_ALLOCATED 0x004 +#define PSHM_MAPPED 0x008 +#define PSHM_INUSE 0x010 +#define PSHM_REMOVED 0x020 +#define PSHM_INCREATE 0x040 +#define PSHM_INDELETE 0x080 +#define PSHM_ALLOCATING 0x100 struct pshmcache { LIST_ENTRY(pshmcache) pshm_hash; /* hash chain */ @@ -98,6 +135,10 @@ struct pshmcache { }; #define PSHMCACHE_NULL (struct pshmcache *)0 +#define PSHMCACHE_NOTFOUND (0) +#define PSHMCACHE_FOUND (-1) +#define PSHMCACHE_NEGATIVE (ENOENT) + struct pshmstats { long goodhits; /* hits that we can really use */ long neghits; /* negative hits that we can use */ @@ -114,8 +155,8 @@ struct pshmname { }; struct pshmnode { - off_t mapp_addr; - size_t map_size; + off_t mapp_addr; + user_size_t map_size; /* XXX unused ? */ struct pshminfo *pinfo; unsigned int pshm_usecount; #if DIAGNOSTIC @@ -128,25 +169,66 @@ struct pshmnode { #define PSHMHASH(pnp) \ (&pshmhashtbl[(pnp)->pshm_hash & pshmhash]) + LIST_HEAD(pshmhashhead, pshmcache) *pshmhashtbl; /* Hash Table */ u_long pshmhash; /* size of hash table - 1 */ long pshmnument; /* number of cache entries allocated */ struct pshmstats pshmstats; /* cache effectiveness statistics */ -static int pshm_read __P((struct file *fp, struct uio *uio, - struct ucred *cred, int flags, struct proc *p)); -static int pshm_write __P((struct file *fp, struct uio *uio, - struct ucred *cred, int flags, struct proc *p)); -static int pshm_ioctl __P((struct file *fp, u_long com, - caddr_t data, struct proc *p)); -static int pshm_select __P((struct file *fp, int which, void *wql, - struct proc *p)); -static int pshm_closefile __P((struct file *fp, struct proc *p)); +static int pshm_read (struct fileproc *fp, struct uio *uio, + int flags, vfs_context_t ctx); +static int pshm_write (struct fileproc *fp, struct uio *uio, + int flags, vfs_context_t ctx); +static int pshm_ioctl (struct fileproc *fp, u_long com, + caddr_t data, vfs_context_t ctx); +static int pshm_select (struct fileproc *fp, int which, void *wql, vfs_context_t ctx); +static int pshm_close(struct pshminfo *pinfo, int dropref); +static int pshm_closefile (struct fileglob *fg, vfs_context_t ctx); + +static int pshm_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx); + +int pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, proc_t p); +int pshm_cache_purge_all(proc_t p); + +static int pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp); +static void pshm_cache_delete(struct pshmcache *pcp); +static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp, + struct pshmcache **pcache, int addref); +static int pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache); + +static const struct fileops pshmops = { + DTYPE_PSXSHM, + pshm_read, + pshm_write, + pshm_ioctl, + pshm_select, + pshm_closefile, + pshm_kqfilter, + 0 +}; + +static lck_grp_t *psx_shm_subsys_lck_grp; +static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr; +static lck_attr_t *psx_shm_subsys_lck_attr; +static lck_mtx_t psx_shm_subsys_mutex; + +#define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex) +#define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex) +#define PSHM_SUBSYS_ASSERT_HELD() lck_mtx_assert(&psx_shm_subsys_mutex, LCK_MTX_ASSERT_OWNED) + + +/* Initialize the mutex governing access to the posix shm subsystem */ +__private_extern__ void +pshm_lock_init( void ) +{ + + psx_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); -static int pshm_kqfilter __P((struct file *fp, struct knote *kn, struct proc *p)); + psx_shm_subsys_lck_grp = lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr); -struct fileops pshmops = - { pshm_read, pshm_write, pshm_ioctl, pshm_select, pshm_closefile, pshm_kqfilter }; + psx_shm_subsys_lck_attr = lck_attr_alloc_init(); + lck_mtx_init(& psx_shm_subsys_mutex, psx_shm_subsys_lck_grp, psx_shm_subsys_lck_attr); +} /* * Lookup an entry in the cache @@ -158,18 +240,16 @@ struct fileops pshmops = * fails, a status of zero is returned. */ -int -pshm_cache_search(pshmp, pnp, pcache) - struct pshminfo **pshmp; - struct pshmname *pnp; - struct pshmcache **pcache; +static int +pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp, + struct pshmcache **pcache, int addref) { - register struct pshmcache *pcp, *nnp; - register struct pshmhashhead *pcpp; + struct pshmcache *pcp, *nnp; + struct pshmhashhead *pcpp; if (pnp->pshm_namelen > PSHMNAMLEN) { pshmstats.longnames++; - return (0); + return PSHMCACHE_NOTFOUND; } pcpp = PSHMHASH(pnp); @@ -182,7 +262,7 @@ pshm_cache_search(pshmp, pnp, pcache) if (pcp == 0) { pshmstats.miss++; - return (0); + return PSHMCACHE_NOTFOUND; } /* We found a "positive" match, return the vnode */ @@ -191,54 +271,43 @@ pshm_cache_search(pshmp, pnp, pcache) /* TOUCH(ncp); */ *pshmp = pcp->pshminfo; *pcache = pcp; - return (-1); + if (addref) + pcp->pshminfo->pshm_usecount++; + return PSHMCACHE_FOUND; } /* * We found a "negative" match, ENOENT notifies client of this match. - * The nc_vpid field records whether this is a whiteout. */ pshmstats.neghits++; - return (ENOENT); + return PSHMCACHE_NEGATIVE; } /* * Add an entry to the cache. + * XXX should be static? */ -int -pshm_cache_add(pshmp, pnp) - struct pshminfo *pshmp; - struct pshmname *pnp; +static int +pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp) { - register struct pshmcache *pcp; - register struct pshmhashhead *pcpp; + struct pshmhashhead *pcpp; struct pshminfo *dpinfo; struct pshmcache *dpcp; #if DIAGNOSTIC - if (pnp->pshm_namelen > NCHNAMLEN) + if (pnp->pshm_namelen > PSHMNAMLEN) panic("cache_enter: name too long"); #endif - /* - * We allocate a new entry if we are less than the maximum - * allowed and the one at the front of the LRU list is in use. - * Otherwise we use the one at the front of the LRU list. - */ - pcp = (struct pshmcache *)_MALLOC(sizeof(struct pshmcache), M_SHM, M_WAITOK); + /* if the entry has already been added by some one else return */ - if (pshm_cache_search(&dpinfo, pnp, &dpcp) == -1) { - _FREE(pcp, M_SHM); - return(EEXIST); + if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == PSHMCACHE_FOUND) { + return EEXIST; } pshmnument++; - bzero(pcp, sizeof(struct pshmcache)); /* * Fill in cache info, if vp is NULL this is a "negative" cache entry. - * For negative entries, we have to record whether it is a whiteout. - * the whiteout flag is stored in the nc_vpid field which is - * otherwise unused. */ pcp->pshminfo = pshmp; pcp->pshm_nlen = pnp->pshm_namelen; @@ -246,7 +315,7 @@ pshm_cache_add(pshmp, pnp) pcpp = PSHMHASH(pnp); #if DIAGNOSTIC { - register struct pshmcache *p; + struct pshmcache *p; for (p = pcpp->lh_first; p != 0; p = p->pshm_hash.le_next) if (p == pcp) @@ -254,40 +323,59 @@ pshm_cache_add(pshmp, pnp) } #endif LIST_INSERT_HEAD(pcpp, pcp, pshm_hash); - return(0); + return 0; } /* * Name cache initialization, from vfs_init() when we are booting */ void -pshm_cache_init() +pshm_cache_init(void) { - pshmhashtbl = hashinit(desiredvnodes, M_SHM, &pshmhash); + pshmhashtbl = hashinit(desiredvnodes / 8, M_SHM, &pshmhash); } /* - * Invalidate a all entries to particular vnode. - * + * Invalidate all entries and delete all objects associated with it. Entire + * non Kernel entries are going away. Just dump'em all + * * We actually just increment the v_id, that will do it. The entries will * be purged by lookup as they get found. If the v_id wraps around, we * need to ditch the entire cache, to avoid confusion. No valid vnode will * ever have (v_id == 0). */ -void -pshm_cache_purge(void) +int +pshm_cache_purge_all(__unused proc_t p) { - struct pshmcache *pcp; + struct pshmcache *pcp, *tmppcp; struct pshmhashhead *pcpp; + int error = 0; + + if (kauth_cred_issuser(kauth_cred_get()) == 0) + return EPERM; + PSHM_SUBSYS_LOCK(); for (pcpp = &pshmhashtbl[pshmhash]; pcpp >= pshmhashtbl; pcpp--) { - while (pcp = pcpp->lh_first) - pshm_cache_delete(pcp); + LIST_FOREACH_SAFE(pcp, pcpp, pshm_hash, tmppcp) { + assert(pcp->pshm_nlen); + error = pshm_unlink_internal(pcp->pshminfo, pcp); + if (error) + goto out; + } } + assert(pshmnument == 0); + +out: + PSHM_SUBSYS_UNLOCK(); + + if (error) + printf("%s: Error %d removing shm cache: %ld remain!\n", + __func__, error, pshmnument); + return error; } -pshm_cache_delete(pcp) - struct pshmcache *pcp; +static void +pshm_cache_delete(struct pshmcache *pcp) { #if DIAGNOSTIC if (pcp->pshm_hash.le_prev == 0) @@ -301,55 +389,51 @@ pshm_cache_delete(pcp) } -struct shm_open_args { - const char *name; - int oflag; - int mode; -}; - int -shm_open(p, uap, retval) - struct proc *p; - register struct shm_open_args *uap; - register_t *retval; +shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval) { - register struct filedesc *fdp = p->p_fd; - register struct file *fp; - register struct vnode *vp; - int i; - struct file *nfp; - int type, indx, error; + size_t i; + int indx, error; struct pshmname nd; struct pshminfo *pinfo; - extern struct fileops pshmops; - char * pnbuf; + struct fileproc *fp = NULL; + char *pnbuf = NULL; + struct pshminfo *new_pinfo = PSHMINFO_NULL; + struct pshmnode *new_pnode = PSHMNODE_NULL; + struct pshmcache *pcache = PSHMCACHE_NULL; /* ignored on return */ char * nameptr; char * cp; size_t pathlen, plen; int fmode ; int cmode = uap->mode; int incache = 0; - struct pshmnode * pnode = PSHMNODE_NULL; - struct pshmcache * pcache = PSHMCACHE_NULL; - int pinfo_alloc=0; + struct pshmcache *pcp = NULL; + AUDIT_ARG(fflags, uap->oflag); + AUDIT_ARG(mode, uap->mode); pinfo = PSHMINFO_NULL; - MALLOC_ZONE(pnbuf, caddr_t, - MAXPATHLEN, M_NAMEI, M_WAITOK); + /* + * Preallocate everything we might need up front to avoid taking + * and dropping the lock, opening us up to race conditions. + */ + MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); + if (pnbuf == NULL) { + error = ENOSPC; + goto bad; + } + pathlen = MAXPATHLEN; - error = copyinstr((void *)uap->name, (void *)pnbuf, - MAXPATHLEN, &pathlen); + error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen); if (error) { goto bad; } + AUDIT_ARG(text, pnbuf); if (pathlen > PSHMNAMLEN) { error = ENAMETOOLONG; goto bad; } - - #ifdef PSXSHM_NAME_RESTRICT nameptr = pnbuf; if (*nameptr == '/') { @@ -358,7 +442,7 @@ shm_open(p, uap, retval) error = EINVAL; goto bad; } - } else { + } else { error = EINVAL; goto bad; } @@ -370,134 +454,247 @@ shm_open(p, uap, retval) nd.pshm_namelen = plen; nd. pshm_hash =0; - for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) { - nd.pshm_hash += (unsigned char)*cp * i; + for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) { + nd.pshm_hash += (unsigned char)*cp * i; } - error = pshm_cache_search(&pinfo, &nd, &pcache); + /* + * attempt to allocate a new fp; if unsuccessful, the fp will be + * left unmodified (NULL). + */ + error = falloc(p, &fp, &indx, vfs_context_current()); + if (error) + goto bad; - if (error == ENOENT) { + cmode &= ALLPERMS; + + fmode = FFLAGS(uap->oflag); + if ((fmode & (FREAD | FWRITE)) == 0) { error = EINVAL; goto bad; + } + /* + * We allocate a new entry if we are less than the maximum + * allowed and the one at the front of the LRU list is in use. + * Otherwise we use the one at the front of the LRU list. + */ + MALLOC(pcp, struct pshmcache *, sizeof(struct pshmcache), M_SHM, M_WAITOK|M_ZERO); + if (pcp == NULL) { + error = ENOSPC; + goto bad; } - if (!error) { - incache = 0; - } else - incache = 1; - fmode = FFLAGS(uap->oflag); - if ((fmode & (FREAD | FWRITE))==0) { - error = EINVAL; + + MALLOC(new_pinfo, struct pshminfo *, sizeof(struct pshminfo), M_SHM, M_WAITOK|M_ZERO); + if (new_pinfo == PSHMINFO_NULL) { + error = ENOSPC; goto bad; } +#if CONFIG_MACF + mac_posixshm_label_init(new_pinfo); +#endif - if (error = falloc(p, &nfp, &indx)) + MALLOC(new_pnode, struct pshmnode *, sizeof(struct pshmnode), M_SHM, M_WAITOK|M_ZERO); + if (new_pnode == PSHMNODE_NULL) { + error = ENOSPC; goto bad; - fp = nfp; + } - cmode &= ALLPERMS; + PSHM_SUBSYS_LOCK(); + + /* + * If we find the entry in the cache, this will take a reference, + * allowing us to unlock it for the permissions check. + */ + error = pshm_cache_search(&pinfo, &nd, &pcache, 1); + + PSHM_SUBSYS_UNLOCK(); - if (fmode & O_CREAT) { - if ((fmode & O_EXCL) && incache) { - /* shm obj exists and opened O_EXCL */ -#if notyet - if (pinfo->pshm_flags & PSHM_INDELETE) { - } -#endif - error = EEXIST; - goto bad1; - } - if (!incache) { - /* create a new one */ - pinfo = (struct pshminfo *)_MALLOC(sizeof(struct pshminfo), M_SHM, M_WAITOK); - bzero(pinfo, sizeof(struct pshminfo)); - pinfo_alloc = 1; - pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE; - pinfo->pshm_usecount = 1; - pinfo->pshm_mode = cmode; - pinfo->pshm_uid = p->p_ucred->cr_uid; - pinfo->pshm_gid = p->p_ucred->cr_gid; - } else { - /* already exists */ - if( pinfo->pshm_flags & PSHM_INDELETE) { - error = ENOENT; - goto bad1; - } - if (error = pshm_access(pinfo, fmode, p->p_ucred, p)) - goto bad1; - } + if (error == PSHMCACHE_NEGATIVE) { + error = EINVAL; + goto bad; + } + + if (error == PSHMCACHE_NOTFOUND) { + incache = 0; + if (fmode & O_CREAT) { + /* create a new one (commit the allocation) */ + pinfo = new_pinfo; + pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE; + pinfo->pshm_usecount = 1; /* existence reference */ + pinfo->pshm_mode = cmode; + pinfo->pshm_uid = kauth_getuid(); + pinfo->pshm_gid = kauth_getgid(); + bcopy(pnbuf, &pinfo->pshm_name[0], pathlen); + pinfo->pshm_name[pathlen]=0; +#if CONFIG_MACF + error = mac_posixshm_check_create(kauth_cred_get(), nameptr); + if (error) { + goto bad; + } + mac_posixshm_label_associate(kauth_cred_get(), pinfo, nameptr); +#endif + } } else { + incache = 1; + if (fmode & O_CREAT) { + /* already exists */ + if ((fmode & O_EXCL)) { + AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, + pinfo->pshm_gid, + pinfo->pshm_mode); + + /* shm obj exists and opened O_EXCL */ + error = EEXIST; + goto bad; + } + + if( pinfo->pshm_flags & PSHM_INDELETE) { + error = ENOENT; + goto bad; + } + AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, + pinfo->pshm_gid, pinfo->pshm_mode); +#if CONFIG_MACF + if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) { + goto bad; + } +#endif + if ( (error = pshm_access(pinfo, fmode, kauth_cred_get(), p)) ) { + goto bad; + } + } + } + if (!(fmode & O_CREAT)) { if (!incache) { - /* O_CREAT is not set and the shm obecj does not exist */ + /* O_CREAT is not set and the object does not exist */ error = ENOENT; - goto bad1; + goto bad; } if( pinfo->pshm_flags & PSHM_INDELETE) { error = ENOENT; - goto bad1; + goto bad; } - if (error = pshm_access(pinfo, fmode, p->p_ucred, p)) - goto bad1; +#if CONFIG_MACF + if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) { + goto bad; + } +#endif + + if ((error = pshm_access(pinfo, fmode, kauth_cred_get(), p))) { + goto bad; + } } if (fmode & O_TRUNC) { error = EINVAL; - goto bad2; + goto bad; } + + + PSHM_SUBSYS_LOCK(); + #if DIAGNOSTIC if (fmode & FWRITE) pinfo->pshm_writecount++; if (fmode & FREAD) pinfo->pshm_readcount++; #endif - pnode = (struct pshmnode *)_MALLOC(sizeof(struct pshmnode), M_SHM, M_WAITOK); - bzero(pnode, sizeof(struct pshmnode)); - if (!incache) { - if (error = pshm_cache_add(pinfo, &nd)) { - goto bad3; + /* if successful, this will consume the pcp */ + if ( (error = pshm_cache_add(pinfo, &nd, pcp)) ) { + goto bad_locked; } + /* + * add reference for the new entry; otherwise, we obtained + * one from the cache hit earlier. + */ + pinfo->pshm_usecount++; } pinfo->pshm_flags &= ~PSHM_INCREATE; - pinfo->pshm_usecount++; - pnode->pinfo = pinfo; + new_pnode->pinfo = pinfo; + + PSHM_SUBSYS_UNLOCK(); + + /* + * if incache, we did not use the new pcp or new_pinfo and must + * free them + */ + if (incache) { + FREE(pcp, M_SHM); + + if (new_pinfo != PSHMINFO_NULL) { +#if CONFIG_MACF + mac_posixshm_label_destroy(new_pinfo); +#endif + FREE(new_pinfo, M_SHM); + } + } + + proc_fdlock(p); fp->f_flag = fmode & FMASK; - fp->f_type = DTYPE_PSXSHM; fp->f_ops = &pshmops; - fp->f_data = (caddr_t)pnode; - *fdflags(p, indx) &= ~UF_RESERVED; + fp->f_data = (caddr_t)new_pnode; + *fdflags(p, indx) |= UF_EXCLOSE; + procfdtbl_releasefd(p, indx, NULL); + fp_drop(p, indx, fp, 1); + proc_fdunlock(p); + *retval = indx; FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); return (0); -bad3: - _FREE(pnode, M_SHM); - -bad2: - if (pinfo_alloc) - _FREE(pinfo, M_SHM); -bad1: - fdrelse(p, indx); - ffree(nfp); + +bad_locked: + PSHM_SUBSYS_UNLOCK(); bad: - FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + /* + * If we obtained the entry from the cache, we need to drop the + * reference; holding the reference may have prevented unlinking, + * so we need to call pshm_close() to get the full effect. + */ + if (incache) { + PSHM_SUBSYS_LOCK(); + pshm_close(pinfo, 1); + PSHM_SUBSYS_UNLOCK(); + } + + if (pcp != NULL) + FREE(pcp, M_SHM); + + if (new_pnode != PSHMNODE_NULL) + FREE(new_pnode, M_SHM); + + if (fp != NULL) + fp_free(p, indx, fp); + + if (new_pinfo != PSHMINFO_NULL) { +#if CONFIG_MACF + mac_posixshm_label_destroy(new_pinfo); +#endif + FREE(new_pinfo, M_SHM); + } + if (pnbuf != NULL) + FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); return (error); } -/* ARGSUSED */ int -pshm_truncate(p, fp, fd, length, retval) - struct proc *p; - struct file *fp; - int fd; - off_t length; - register_t *retval; +pshm_truncate(__unused proc_t p, struct fileproc *fp, __unused int fd, + off_t length, __unused int32_t *retval) { struct pshminfo * pinfo; struct pshmnode * pnode ; kern_return_t kret; - vm_offset_t user_addr; - void * mem_object; - vm_size_t size; + mem_entry_name_port_t mem_object; + mach_vm_size_t total_size, alloc_size; + memory_object_size_t mosize; + struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p; + vm_map_t user_map; +#if CONFIG_MACF + int error; +#endif + + user_map = current_map(); if (fp->f_type != DTYPE_PSXSHM) { return(EINVAL); @@ -507,33 +704,85 @@ pshm_truncate(p, fp, fd, length, retval) if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL ) return(EINVAL); - if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) + PSHM_SUBSYS_LOCK(); + if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) { + PSHM_SUBSYS_UNLOCK(); return(EINVAL); - if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED)) + } + if ((pinfo->pshm_flags & (PSHM_DEFINED|PSHM_ALLOCATING|PSHM_ALLOCATED)) != PSHM_DEFINED) { + PSHM_SUBSYS_UNLOCK(); return(EINVAL); } +#if CONFIG_MACF + error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, length); + if (error) { + PSHM_SUBSYS_UNLOCK(); + return(error); + } +#endif - size = round_page_64(length); - kret = vm_allocate(current_map(), &user_addr, size, TRUE); - if (kret != KERN_SUCCESS) - goto out; + pinfo->pshm_flags |= PSHM_ALLOCATING; + total_size = vm_map_round_page(length, + vm_map_page_mask(user_map)); + pshmobj_next_p = &pinfo->pshm_memobjects; - kret = mach_make_memory_entry (current_map(), &size, - user_addr, VM_PROT_DEFAULT, &mem_object, 0); + for (alloc_size = 0; + alloc_size < total_size; + alloc_size += mosize) { - if (kret != KERN_SUCCESS) - goto out; - - vm_deallocate(current_map(), user_addr, size); + PSHM_SUBSYS_UNLOCK(); + + mosize = MIN(total_size - alloc_size, ANON_MAX_SIZE); + kret = mach_make_memory_entry_64( + VM_MAP_NULL, + &mosize, + 0, + MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT, + &mem_object, + 0); + + if (kret != KERN_SUCCESS) + goto out; + + MALLOC(pshmobj, struct pshmobj *, sizeof (struct pshmobj), + M_SHM, M_WAITOK); + if (pshmobj == NULL) { + kret = KERN_NO_SPACE; + mach_memory_entry_port_release(mem_object); + mem_object = NULL; + goto out; + } - pinfo->pshm_flags &= ~PSHM_DEFINED; - pinfo->pshm_flags = PSHM_ALLOCATED; - pinfo->pshm_memobject = mem_object; - pinfo->pshm_length = size; + PSHM_SUBSYS_LOCK(); + + pshmobj->pshmo_memobject = (void *) mem_object; + pshmobj->pshmo_size = mosize; + pshmobj->pshmo_next = NULL; + + *pshmobj_next_p = pshmobj; + pshmobj_next_p = &pshmobj->pshmo_next; + } + + pinfo->pshm_flags |= PSHM_ALLOCATED; + pinfo->pshm_flags &= ~(PSHM_ALLOCATING); + pinfo->pshm_length = total_size; + PSHM_SUBSYS_UNLOCK(); return(0); out: + PSHM_SUBSYS_LOCK(); + for (pshmobj = pinfo->pshm_memobjects; + pshmobj != NULL; + pshmobj = pshmobj_next) { + pshmobj_next = pshmobj->pshmo_next; + mach_memory_entry_port_release(pshmobj->pshmo_memobject); + FREE(pshmobj, M_SHM); + } + pinfo->pshm_memobjects = NULL; + pinfo->pshm_flags &= ~PSHM_ALLOCATING; + PSHM_SUBSYS_UNLOCK(); + switch (kret) { case KERN_INVALID_ADDRESS: case KERN_NO_SPACE: @@ -547,91 +796,87 @@ out: } int -pshm_stat(pnode, sb) -struct pshmnode *pnode; -struct stat *sb; +pshm_stat(struct pshmnode *pnode, void *ub, int isstat64) { + struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */ + struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */ struct pshminfo *pinfo; +#if CONFIG_MACF + int error; +#endif - if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) + PSHM_SUBSYS_LOCK(); + if ((pinfo = pnode->pinfo) == PSHMINFO_NULL){ + PSHM_SUBSYS_UNLOCK(); return(EINVAL); + } + +#if CONFIG_MACF + error = mac_posixshm_check_stat(kauth_cred_get(), pinfo); + if (error) { + PSHM_SUBSYS_UNLOCK(); + return(error); + } +#endif - bzero(sb, sizeof(struct stat)); - sb->st_mode = pinfo->pshm_mode; - sb->st_uid = pinfo->pshm_uid; - sb->st_gid = pinfo->pshm_gid; - sb->st_size = pinfo->pshm_length; + if (isstat64 != 0) { + sb64 = (struct stat64 *)ub; + bzero(sb64, sizeof(struct stat64)); + sb64->st_mode = pinfo->pshm_mode; + sb64->st_uid = pinfo->pshm_uid; + sb64->st_gid = pinfo->pshm_gid; + sb64->st_size = pinfo->pshm_length; + } else { + sb = (struct stat *)ub; + bzero(sb, sizeof(struct stat)); + sb->st_mode = pinfo->pshm_mode; + sb->st_uid = pinfo->pshm_uid; + sb->st_gid = pinfo->pshm_gid; + sb->st_size = pinfo->pshm_length; + } + PSHM_SUBSYS_UNLOCK(); return(0); } +/* + * This is called only from shm_open which holds pshm_lock(); + * XXX This code is repeated many times + */ int -pshm_access(struct pshminfo *pinfo, int mode, struct ucred *cred, struct proc *p) +pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, __unused proc_t p) { - mode_t mask; - register gid_t *gp; - int i, error; + int mode_req = ((mode & FREAD) ? S_IRUSR : 0) | + ((mode & FWRITE) ? S_IWUSR : 0); /* Otherwise, user id 0 always gets access. */ - if (cred->cr_uid == 0) + if (!suser(cred, NULL)) return (0); - mask = 0; - - /* Otherwise, check the owner. */ - if (cred->cr_uid == pinfo->pshm_uid) { - if (mode & FREAD) - mask |= S_IRUSR; - if (mode & FWRITE) - mask |= S_IWUSR; - return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES); - } - - /* Otherwise, check the groups. */ - for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) - if (pinfo->pshm_gid == *gp) { - if (mode & FREAD) - mask |= S_IRGRP; - if (mode & FWRITE) - mask |= S_IWGRP; - return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES); - } - - /* Otherwise, check everyone else. */ - if (mode & FREAD) - mask |= S_IROTH; - if (mode & FWRITE) - mask |= S_IWOTH; - return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES); + return(posix_cred_access(cred, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode, mode_req)); } -struct mmap_args { - caddr_t addr; - size_t len; - int prot; - int flags; - int fd; -#ifdef DOUBLE_ALIGN_PARAMS - long pad; -#endif - off_t pos; -}; - int -pshm_mmap(struct proc *p, struct mmap_args *uap, register_t *retval, struct file *fp, vm_size_t pageoff) +pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct fileproc *fp, off_t pageoff) { - vm_offset_t user_addr = (vm_offset_t)uap->addr; - vm_size_t user_size = (vm_size_t)uap->len ; + vm_map_offset_t user_addr = (vm_map_offset_t)uap->addr; + vm_map_size_t user_size = (vm_map_size_t)uap->len ; + vm_map_offset_t user_start_addr; + vm_map_size_t map_size, mapped_size; int prot = uap->prot; int flags = uap->flags; vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos; - int fd = uap->fd; + vm_object_offset_t map_pos; vm_map_t user_map; - boolean_t find_space,docow; + int alloc_flags; + boolean_t docow; kern_return_t kret; struct pshminfo * pinfo; struct pshmnode * pnode; - void * mem_object; + struct pshmobj * pshmobj; +#if CONFIG_MACF + int error; +#endif if (user_size == 0) return(0); @@ -647,57 +892,130 @@ pshm_mmap(struct proc *p, struct mmap_args *uap, register_t *retval, struct file if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL ) return(EINVAL); - if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) + PSHM_SUBSYS_LOCK(); + if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) { + PSHM_SUBSYS_UNLOCK(); return(EINVAL); + } if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) { + PSHM_SUBSYS_UNLOCK(); return(EINVAL); } - if (user_size > pinfo->pshm_length) { + if ((off_t)user_size > pinfo->pshm_length) { + PSHM_SUBSYS_UNLOCK(); return(EINVAL); } - if ((off_t)user_size + file_pos > pinfo->pshm_length) { + if ((off_t)(user_size + file_pos) > pinfo->pshm_length) { + PSHM_SUBSYS_UNLOCK(); return(EINVAL); } - if ((mem_object = pinfo->pshm_memobject) == NULL) { + if ((pshmobj = pinfo->pshm_memobjects) == NULL) { + PSHM_SUBSYS_UNLOCK(); return(EINVAL); } - +#if CONFIG_MACF + error = mac_posixshm_check_mmap(kauth_cred_get(), pinfo, prot, flags); + if (error) { + PSHM_SUBSYS_UNLOCK(); + return(error); + } +#endif + + PSHM_SUBSYS_UNLOCK(); user_map = current_map(); if ((flags & MAP_FIXED) == 0) { - find_space = TRUE; - user_addr = round_page_32(user_addr); + alloc_flags = VM_FLAGS_ANYWHERE; + user_addr = vm_map_round_page(user_addr, + vm_map_page_mask(user_map)); } else { - if (user_addr != trunc_page_32(user_addr)) + if (user_addr != vm_map_round_page(user_addr, + vm_map_page_mask(user_map))) return (EINVAL); - find_space = FALSE; - (void) vm_deallocate(user_map, user_addr, user_size); + /* + * We do not get rid of the existing mappings here because + * it wouldn't be atomic (see comment in mmap()). We let + * Mach VM know that we want it to replace any existing + * mapping with the new one. + */ + alloc_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE; } docow = FALSE; - kret = vm_map_64(user_map, &user_addr, user_size, - 0, find_space, pinfo->pshm_memobject, file_pos, docow, - prot, VM_PROT_DEFAULT, - VM_INHERIT_DEFAULT); - - if (kret != KERN_SUCCESS) - goto out; - kret = vm_inherit(user_map, user_addr, user_size, - VM_INHERIT_SHARE); + mapped_size = 0; + + /* reserver the entire space first... */ + kret = vm_map_enter_mem_object(user_map, + &user_addr, + user_size, + 0, + alloc_flags, + IPC_PORT_NULL, + 0, + FALSE, + VM_PROT_NONE, + VM_PROT_NONE, + VM_INHERIT_NONE); + user_start_addr = user_addr; if (kret != KERN_SUCCESS) { - (void) vm_deallocate(user_map, user_addr, user_size); goto out; } - pnode->mapp_addr = user_addr; - pnode->map_size = user_size; + + /* ... and overwrite with the real mappings */ + for (map_pos = 0, pshmobj = pinfo->pshm_memobjects; + user_size != 0; + map_pos += pshmobj->pshmo_size, pshmobj = pshmobj->pshmo_next) { + if (pshmobj == NULL) { + /* nothing there to map !? */ + goto out; + } + if (file_pos >= map_pos + pshmobj->pshmo_size) { + continue; + } + map_size = pshmobj->pshmo_size - (file_pos - map_pos); + if (map_size > user_size) { + map_size = user_size; + } + kret = vm_map_enter_mem_object( + user_map, + &user_addr, + map_size, + 0, + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, + pshmobj->pshmo_memobject, + file_pos - map_pos, + docow, + prot, + VM_PROT_DEFAULT, + VM_INHERIT_SHARE); + if (kret != KERN_SUCCESS) + goto out; + + user_addr += map_size; + user_size -= map_size; + mapped_size += map_size; + file_pos += map_size; + } + + PSHM_SUBSYS_LOCK(); + pnode->mapp_addr = user_start_addr; + pnode->map_size = mapped_size; pinfo->pshm_flags |= (PSHM_MAPPED | PSHM_INUSE); + PSHM_SUBSYS_UNLOCK(); out: + if (kret != KERN_SUCCESS) { + if (mapped_size != 0) { + (void) mach_vm_deallocate(current_map(), + user_start_addr, + mapped_size); + } + } + switch (kret) { case KERN_SUCCESS: - *fdflags(p, fd) |= UF_MAPPED; - *retval = (register_t)(user_addr + pageoff); + *retval = (user_start_addr + pageoff); return (0); case KERN_INVALID_ADDRESS: case KERN_NO_SPACE: @@ -710,54 +1028,93 @@ out: } -struct shm_unlink_args { - const char *name; -}; +static int +pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache) +{ + struct pshmobj *pshmobj, *pshmobj_next; + + PSHM_SUBSYS_ASSERT_HELD(); + + if (!pinfo || !pcache) + return EINVAL; + + if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED)) == 0) + return EINVAL; + + if (pinfo->pshm_flags & PSHM_INDELETE) + return 0; + + pinfo->pshm_flags |= PSHM_INDELETE; + pinfo->pshm_usecount--; + + pshm_cache_delete(pcache); + pinfo->pshm_flags |= PSHM_REMOVED; + + /* release the existence reference */ + if (!pinfo->pshm_usecount) { +#if CONFIG_MACF + mac_posixshm_label_destroy(pinfo); +#endif + /* + * If this is the last reference going away on the object, + * then we need to destroy the backing object. The name + * has an implied but uncounted reference on the object, + * once it's created, since it's used as a rendezvous, and + * therefore may be subsequently reopened. + */ + for (pshmobj = pinfo->pshm_memobjects; + pshmobj != NULL; + pshmobj = pshmobj_next) { + mach_memory_entry_port_release(pshmobj->pshmo_memobject); + pshmobj_next = pshmobj->pshmo_next; + FREE(pshmobj, M_SHM); + } + FREE(pinfo,M_SHM); + } + + FREE(pcache, M_SHM); + + return 0; +} int -shm_unlink(p, uap, retval) - struct proc *p; - register struct shm_unlink_args *uap; - register_t *retval; +shm_unlink(proc_t p, struct shm_unlink_args *uap, __unused int32_t *retval) { - register struct filedesc *fdp = p->p_fd; - register struct file *fp; - int flags, i; - int error=0; + size_t i; + char * pnbuf; + size_t pathlen; + int error = 0; + struct pshmname nd; struct pshminfo *pinfo; - extern struct fileops pshmops; - char * pnbuf; char * nameptr; char * cp; - size_t pathlen, plen; - int fmode, cmode ; - int incache = 0; - struct pshmnode * pnode = PSHMNODE_NULL; struct pshmcache *pcache = PSHMCACHE_NULL; - kern_return_t kret; pinfo = PSHMINFO_NULL; - MALLOC_ZONE(pnbuf, caddr_t, - MAXPATHLEN, M_NAMEI, M_WAITOK); + + MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); + if (pnbuf == NULL) { + return(ENOSPC); /* XXX non-standard */ + } pathlen = MAXPATHLEN; - error = copyinstr((void *)uap->name, (void *)pnbuf, - MAXPATHLEN, &pathlen); + error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen); if (error) { goto bad; } + AUDIT_ARG(text, pnbuf); if (pathlen > PSHMNAMLEN) { error = ENAMETOOLONG; goto bad; } + nameptr = pnbuf; #ifdef PSXSHM_NAME_RESTRICT - nameptr = pnbuf; if (*nameptr == '/') { while (*(nameptr++) == '/') { - plen--; + pathlen--; error = EINVAL; goto bad; } @@ -767,78 +1124,84 @@ shm_unlink(p, uap, retval) } #endif /* PSXSHM_NAME_RESTRICT */ - plen = pathlen; - nameptr = pnbuf; nd.pshm_nameptr = nameptr; - nd.pshm_namelen = plen; - nd. pshm_hash =0; + nd.pshm_namelen = pathlen; + nd.pshm_hash = 0; - for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) { + for (cp = nameptr, i=1; *cp != 0 && i <= pathlen; i++, cp++) { nd.pshm_hash += (unsigned char)*cp * i; } - error = pshm_cache_search(&pinfo, &nd, &pcache); + PSHM_SUBSYS_LOCK(); + error = pshm_cache_search(&pinfo, &nd, &pcache, 0); - if (error == ENOENT) { - error = EINVAL; + /* During unlink lookup failure also implies ENOENT */ + if (error != PSHMCACHE_FOUND) { + PSHM_SUBSYS_UNLOCK(); + error = ENOENT; goto bad; } - if (!error) { + + if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))==0) { + PSHM_SUBSYS_UNLOCK(); error = EINVAL; goto bad; - } else - incache = 1; + } - if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))==0) { - return (EINVAL); + if (pinfo->pshm_flags & PSHM_ALLOCATING) { + /* XXX should we wait for flag to clear and then proceed ? */ + PSHM_SUBSYS_UNLOCK(); + error = EAGAIN; + goto bad; } if (pinfo->pshm_flags & PSHM_INDELETE) { + PSHM_SUBSYS_UNLOCK(); error = 0; goto bad; } - if (pinfo->pshm_memobject == NULL) { - error = EINVAL; +#if CONFIG_MACF + error = mac_posixshm_check_unlink(kauth_cred_get(), pinfo, nameptr); + if (error) { + PSHM_SUBSYS_UNLOCK(); goto bad; } +#endif + + AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, pinfo->pshm_gid, + pinfo->pshm_mode); + + /* + * following file semantics, unlink should be allowed + * for users with write permission only. + */ + if ( (error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p)) ) { + PSHM_SUBSYS_UNLOCK(); + goto bad; + } + + error = pshm_unlink_internal(pinfo, pcache); + PSHM_SUBSYS_UNLOCK(); - pinfo->pshm_flags |= PSHM_INDELETE; - pinfo->pshm_usecount--; - kret = mach_destroy_memory_entry(pinfo->pshm_memobject); - pshm_cache_delete(pcache); - _FREE(pcache, M_SHM); - pinfo->pshm_flags |= PSHM_REMOVED; - error = 0; bad: FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); - return (error); -out: - switch (kret) { - case KERN_INVALID_ADDRESS: - case KERN_PROTECTION_FAILURE: - return (EACCES); - default: - return (EINVAL); - } + return error; } -int -pshm_close(pnode, flags, cred, p) - register struct pshmnode *pnode; - int flags; - struct ucred *cred; - struct proc *p; +/* already called locked */ +static int +pshm_close(struct pshminfo *pinfo, int dropref) { - int error=0; - kern_return_t kret; - register struct pshminfo *pinfo; - - if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) - return(EINVAL); + int error = 0; + struct pshmobj *pshmobj, *pshmobj_next; - if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) { + /* + * If we are dropping the reference we took on the cache object, don't + * enforce the allocation requirement. + */ + if ( !dropref && ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED)) { return(EINVAL); } #if DIAGNOSTIC @@ -846,71 +1209,129 @@ pshm_close(pnode, flags, cred, p) kprintf("negative usecount in pshm_close\n"); } #endif /* DIAGNOSTIC */ - pinfo->pshm_usecount--; + pinfo->pshm_usecount--; /* release this fd's reference */ if ((pinfo->pshm_flags & PSHM_REMOVED) && !pinfo->pshm_usecount) { - _FREE(pinfo,M_SHM); +#if CONFIG_MACF + mac_posixshm_label_destroy(pinfo); +#endif + PSHM_SUBSYS_UNLOCK(); + /* + * If this is the last reference going away on the object, + * then we need to destroy the backing object. + */ + for (pshmobj = pinfo->pshm_memobjects; + pshmobj != NULL; + pshmobj = pshmobj_next) { + mach_memory_entry_port_release(pshmobj->pshmo_memobject); + pshmobj_next = pshmobj->pshmo_next; + FREE(pshmobj, M_SHM); + } + PSHM_SUBSYS_LOCK(); + FREE(pinfo,M_SHM); } - _FREE(pnode, M_SHM); return (error); } +/* vfs_context_t passed to match prototype for struct fileops */ static int -pshm_closefile(fp, p) - struct file *fp; - struct proc *p; +pshm_closefile(struct fileglob *fg, __unused vfs_context_t ctx) { - return (pshm_close(((struct pshmnode *)fp->f_data), fp->f_flag, - fp->f_cred, p)); + int error = EINVAL; + struct pshmnode *pnode; + + PSHM_SUBSYS_LOCK(); + + if ((pnode = (struct pshmnode *)fg->fg_data) != NULL) { + if (pnode->pinfo != PSHMINFO_NULL) { + error = pshm_close(pnode->pinfo, 0); + } + FREE(pnode, M_SHM); + } + + PSHM_SUBSYS_UNLOCK(); + + return(error); } static int -pshm_read(fp, uio, cred, flags, p) - struct file *fp; - struct uio *uio; - struct ucred *cred; - int flags; - struct proc *p; +pshm_read(__unused struct fileproc *fp, __unused struct uio *uio, + __unused int flags, __unused vfs_context_t ctx) { - return(EOPNOTSUPP); + return(ENOTSUP); } static int -pshm_write(fp, uio, cred, flags, p) - struct file *fp; - struct uio *uio; - struct ucred *cred; - int flags; - struct proc *p; +pshm_write(__unused struct fileproc *fp, __unused struct uio *uio, + __unused int flags, __unused vfs_context_t ctx) { - return(EOPNOTSUPP); + return(ENOTSUP); } static int -pshm_ioctl(fp, com, data, p) - struct file *fp; - u_long com; - caddr_t data; - struct proc *p; +pshm_ioctl(__unused struct fileproc *fp, __unused u_long com, + __unused caddr_t data, __unused vfs_context_t ctx) { - return(EOPNOTSUPP); + return(ENOTSUP); } static int -pshm_select(fp, which, wql, p) - struct file *fp; - int which; - void *wql; - struct proc *p; +pshm_select(__unused struct fileproc *fp, __unused int which, __unused void *wql, + __unused vfs_context_t ctx) { - return(EOPNOTSUPP); + return(ENOTSUP); } static int -pshm_kqfilter(fp, kn, p) - struct file *fp; - struct knote *kn; - struct proc *p; +pshm_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn, + __unused vfs_context_t ctx) +{ + return(ENOTSUP); +} + +int +fill_pshminfo(struct pshmnode * pshm, struct pshm_info * info) +{ + struct pshminfo *pinfo; + struct vinfo_stat *sb; + + PSHM_SUBSYS_LOCK(); + if ((pinfo = pshm->pinfo) == PSHMINFO_NULL){ + PSHM_SUBSYS_UNLOCK(); + return(EINVAL); + } + + sb = &info->pshm_stat; + + bzero(sb, sizeof(struct vinfo_stat)); + sb->vst_mode = pinfo->pshm_mode; + sb->vst_uid = pinfo->pshm_uid; + sb->vst_gid = pinfo->pshm_gid; + sb->vst_size = pinfo->pshm_length; + + info->pshm_mappaddr = pshm->mapp_addr; + bcopy(&pinfo->pshm_name[0], &info->pshm_name[0], PSHMNAMLEN+1); + + PSHM_SUBSYS_UNLOCK(); + return(0); +} + +#if CONFIG_MACF +void +pshm_label_associate(struct fileproc *fp, struct vnode *vp, vfs_context_t ctx) { - return(EOPNOTSUPP); + struct pshmnode *pnode; + struct pshminfo *pshm; + + PSHM_SUBSYS_LOCK(); + pnode = (struct pshmnode *)fp->f_fglob->fg_data; + if (pnode != NULL) { + pshm = pnode->pinfo; + if (pshm != NULL) + mac_posixshm_vnode_label_associate( + vfs_context_ucred(ctx), pshm, pshm->pshm_label, + vp, vp->v_label); + } + PSHM_SUBSYS_UNLOCK(); } +#endif