2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1990, 1996-1998 Apple Computer, Inc.
30 * All Rights Reserved.
33 * posix_shm.c : Support for POSIX shared memory APIs
36 * Author: Ananthakrishna Ramesh
44 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
45 * support for mandatory and extensible security protections. This notice
46 * is included in support of clause 2.2 (b) of the Apple Public License,
50 #include <sys/cdefs.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/file_internal.h>
55 #include <sys/filedesc.h>
57 #include <sys/proc_internal.h>
58 #include <sys/kauth.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/vnode_internal.h>
63 #include <sys/ioctl.h>
65 #include <sys/malloc.h>
68 #include <sys/sysproto.h>
69 #include <sys/proc_info.h>
70 #include <security/audit/audit.h>
73 #include <security/mac_framework.h>
76 #include <mach/mach_types.h>
77 #include <mach/mach_vm.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/kern_return.h>
82 #include <mach/memory_object_control.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_protos.h>
87 #define f_flag f_fglob->fg_flag
88 #define f_type f_fglob->fg_ops->fo_type
89 #define f_msgcount f_fglob->fg_msgcount
90 #define f_cred f_fglob->fg_cred
91 #define f_ops f_fglob->fg_ops
92 #define f_offset f_fglob->fg_offset
93 #define f_data f_fglob->fg_data
94 #define PSHMNAMLEN 31 /* maximum name segment length we bother with */
97 void * pshmo_memobject
;
98 memory_object_size_t pshmo_size
;
99 struct pshmobj
* pshmo_next
;
103 unsigned int pshm_flags
;
104 unsigned int pshm_usecount
;
109 char pshm_name
[PSHMNAMLEN
+ 1]; /* segment name */
110 struct pshmobj
*pshm_memobjects
;
112 unsigned int pshm_readcount
;
113 unsigned int pshm_writecount
;
115 #endif /* DIAGNOSTIC */
116 struct label
* pshm_label
;
118 #define PSHMINFO_NULL (struct pshminfo *)0
120 #define PSHM_NONE 0x001
121 #define PSHM_DEFINED 0x002
122 #define PSHM_ALLOCATED 0x004
123 #define PSHM_MAPPED 0x008
124 #define PSHM_INUSE 0x010
125 #define PSHM_REMOVED 0x020
126 #define PSHM_INCREATE 0x040
127 #define PSHM_INDELETE 0x080
128 #define PSHM_ALLOCATING 0x100
131 LIST_ENTRY(pshmcache
) pshm_hash
; /* hash chain */
132 struct pshminfo
*pshminfo
; /* vnode the name refers to */
133 int pshm_nlen
; /* length of name */
134 char pshm_name
[PSHMNAMLEN
+ 1]; /* segment name */
136 #define PSHMCACHE_NULL (struct pshmcache *)0
139 long goodhits
; /* hits that we can really use */
140 long neghits
; /* negative hits that we can use */
141 long badhits
; /* hits we must drop */
142 long falsehits
; /* hits with id mismatch */
143 long miss
; /* misses */
144 long longnames
; /* long names that ignore cache */
148 char *pshm_nameptr
; /* pointer to looked up name */
149 long pshm_namelen
; /* length of looked up component */
150 u_long pshm_hash
; /* hash value of looked up name */
155 user_size_t map_size
; /* XXX unused ? */
156 struct pshminfo
*pinfo
;
157 unsigned int pshm_usecount
;
159 unsigned int readcnt
;
160 unsigned int writecnt
;
163 #define PSHMNODE_NULL (struct pshmnode *)0
166 #define PSHMHASH(pnp) \
167 (&pshmhashtbl[(pnp)->pshm_hash & pshmhash])
169 LIST_HEAD(pshmhashhead
, pshmcache
) *pshmhashtbl
; /* Hash Table */
170 u_long pshmhash
; /* size of hash table - 1 */
171 long pshmnument
; /* number of cache entries allocated */
172 struct pshmstats pshmstats
; /* cache effectiveness statistics */
174 static int pshm_read (struct fileproc
*fp
, struct uio
*uio
,
175 int flags
, vfs_context_t ctx
);
176 static int pshm_write (struct fileproc
*fp
, struct uio
*uio
,
177 int flags
, vfs_context_t ctx
);
178 static int pshm_ioctl (struct fileproc
*fp
, u_long com
,
179 caddr_t data
, vfs_context_t ctx
);
180 static int pshm_select (struct fileproc
*fp
, int which
, void *wql
, vfs_context_t ctx
);
181 static int pshm_close(struct pshminfo
*pinfo
, int dropref
);
182 static int pshm_closefile (struct fileglob
*fg
, vfs_context_t ctx
);
184 static int pshm_kqfilter(struct fileproc
*fp
, struct knote
*kn
, vfs_context_t ctx
);
186 int pshm_access(struct pshminfo
*pinfo
, int mode
, kauth_cred_t cred
, proc_t p
);
187 static int pshm_cache_add(struct pshminfo
*pshmp
, struct pshmname
*pnp
, struct pshmcache
*pcp
);
188 static void pshm_cache_delete(struct pshmcache
*pcp
);
190 static void pshm_cache_purge(void);
191 #endif /* NOT_USED */
192 static int pshm_cache_search(struct pshminfo
**pshmp
, struct pshmname
*pnp
,
193 struct pshmcache
**pcache
, int addref
);
195 static const struct fileops pshmops
= {
206 static lck_grp_t
*psx_shm_subsys_lck_grp
;
207 static lck_grp_attr_t
*psx_shm_subsys_lck_grp_attr
;
208 static lck_attr_t
*psx_shm_subsys_lck_attr
;
209 static lck_mtx_t psx_shm_subsys_mutex
;
211 #define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
212 #define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
215 /* Initialize the mutex governing access to the posix shm subsystem */
216 __private_extern__
void
217 pshm_lock_init( void )
220 psx_shm_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
222 psx_shm_subsys_lck_grp
= lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr
);
224 psx_shm_subsys_lck_attr
= lck_attr_alloc_init();
225 lck_mtx_init(& psx_shm_subsys_mutex
, psx_shm_subsys_lck_grp
, psx_shm_subsys_lck_attr
);
229 * Lookup an entry in the cache
232 * status of -1 is returned if matches
233 * If the lookup determines that the name does not exist
234 * (negative cacheing), a status of ENOENT is returned. If the lookup
235 * fails, a status of zero is returned.
239 pshm_cache_search(struct pshminfo
**pshmp
, struct pshmname
*pnp
,
240 struct pshmcache
**pcache
, int addref
)
242 struct pshmcache
*pcp
, *nnp
;
243 struct pshmhashhead
*pcpp
;
245 if (pnp
->pshm_namelen
> PSHMNAMLEN
) {
246 pshmstats
.longnames
++;
250 pcpp
= PSHMHASH(pnp
);
251 for (pcp
= pcpp
->lh_first
; pcp
!= 0; pcp
= nnp
) {
252 nnp
= pcp
->pshm_hash
.le_next
;
253 if (pcp
->pshm_nlen
== pnp
->pshm_namelen
&&
254 !bcmp(pcp
->pshm_name
, pnp
->pshm_nameptr
, (u_int
)pcp
-> pshm_nlen
))
263 /* We found a "positive" match, return the vnode */
265 pshmstats
.goodhits
++;
267 *pshmp
= pcp
->pshminfo
;
270 pcp
->pshminfo
->pshm_usecount
++;
275 * We found a "negative" match, ENOENT notifies client of this match.
276 * The nc_vpid field records whether this is a whiteout.
283 * Add an entry to the cache.
284 * XXX should be static?
287 pshm_cache_add(struct pshminfo
*pshmp
, struct pshmname
*pnp
, struct pshmcache
*pcp
)
289 struct pshmhashhead
*pcpp
;
290 struct pshminfo
*dpinfo
;
291 struct pshmcache
*dpcp
;
294 if (pnp
->pshm_namelen
> PSHMNAMLEN
)
295 panic("cache_enter: name too long");
299 /* if the entry has already been added by some one else return */
300 if (pshm_cache_search(&dpinfo
, pnp
, &dpcp
, 0) == -1) {
306 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
307 * For negative entries, we have to record whether it is a whiteout.
308 * the whiteout flag is stored in the nc_vpid field which is
311 pcp
->pshminfo
= pshmp
;
312 pcp
->pshm_nlen
= pnp
->pshm_namelen
;
313 bcopy(pnp
->pshm_nameptr
, pcp
->pshm_name
, (unsigned)pcp
->pshm_nlen
);
314 pcpp
= PSHMHASH(pnp
);
319 for (p
= pcpp
->lh_first
; p
!= 0; p
= p
->pshm_hash
.le_next
)
321 panic("cache_enter: duplicate");
324 LIST_INSERT_HEAD(pcpp
, pcp
, pshm_hash
);
329 * Name cache initialization, from vfs_init() when we are booting
332 pshm_cache_init(void)
334 pshmhashtbl
= hashinit(desiredvnodes
/ 8, M_SHM
, &pshmhash
);
339 * Invalidate a all entries to particular vnode.
341 * We actually just increment the v_id, that will do it. The entries will
342 * be purged by lookup as they get found. If the v_id wraps around, we
343 * need to ditch the entire cache, to avoid confusion. No valid vnode will
344 * ever have (v_id == 0).
347 pshm_cache_purge(void)
349 struct pshmcache
*pcp
;
350 struct pshmhashhead
*pcpp
;
352 for (pcpp
= &pshmhashtbl
[pshmhash
]; pcpp
>= pshmhashtbl
; pcpp
--) {
353 while ( (pcp
= pcpp
->lh_first
) )
354 pshm_cache_delete(pcp
);
357 #endif /* NOT_USED */
360 pshm_cache_delete(struct pshmcache
*pcp
)
363 if (pcp
->pshm_hash
.le_prev
== 0)
364 panic("namecache purge le_prev");
365 if (pcp
->pshm_hash
.le_next
== pcp
)
366 panic("namecache purge le_next");
367 #endif /* DIAGNOSTIC */
368 LIST_REMOVE(pcp
, pshm_hash
);
369 pcp
->pshm_hash
.le_prev
= 0;
375 shm_open(proc_t p
, struct shm_open_args
*uap
, int32_t *retval
)
380 struct pshminfo
*pinfo
;
381 struct fileproc
*fp
= NULL
;
383 struct pshminfo
*new_pinfo
= PSHMINFO_NULL
;
384 struct pshmnode
*new_pnode
= PSHMNODE_NULL
;
385 struct pshmcache
*pcache
= PSHMCACHE_NULL
; /* ignored on return */
388 size_t pathlen
, plen
;
390 int cmode
= uap
->mode
;
392 struct pshmcache
*pcp
= NULL
;
394 AUDIT_ARG(fflags
, uap
->oflag
);
395 AUDIT_ARG(mode
, uap
->mode
);
397 pinfo
= PSHMINFO_NULL
;
400 * Preallocate everything we might need up front to avoid taking
401 * and dropping the lock, opening us up to race conditions.
403 MALLOC_ZONE(pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
409 pathlen
= MAXPATHLEN
;
410 error
= copyinstr(uap
->name
, (void *)pnbuf
, MAXPATHLEN
, &pathlen
);
414 AUDIT_ARG(text
, pnbuf
);
415 if (pathlen
> PSHMNAMLEN
) {
416 error
= ENAMETOOLONG
;
419 #ifdef PSXSHM_NAME_RESTRICT
421 if (*nameptr
== '/') {
422 while (*(nameptr
++) == '/') {
431 #endif /* PSXSHM_NAME_RESTRICT */
435 nd
.pshm_nameptr
= nameptr
;
436 nd
.pshm_namelen
= plen
;
439 for (cp
= nameptr
, i
=1; *cp
!= 0 && i
<= plen
; i
++, cp
++) {
440 nd
.pshm_hash
+= (unsigned char)*cp
* i
;
444 * attempt to allocate a new fp; if unsuccessful, the fp will be
445 * left unmodified (NULL).
447 error
= falloc(p
, &fp
, &indx
, vfs_context_current());
453 fmode
= FFLAGS(uap
->oflag
);
454 if ((fmode
& (FREAD
| FWRITE
)) == 0) {
460 * We allocate a new entry if we are less than the maximum
461 * allowed and the one at the front of the LRU list is in use.
462 * Otherwise we use the one at the front of the LRU list.
464 MALLOC(pcp
, struct pshmcache
*, sizeof(struct pshmcache
), M_SHM
, M_WAITOK
|M_ZERO
);
470 MALLOC(new_pinfo
, struct pshminfo
*, sizeof(struct pshminfo
), M_SHM
, M_WAITOK
|M_ZERO
);
471 if (new_pinfo
== PSHMINFO_NULL
) {
476 mac_posixshm_label_init(new_pinfo
);
479 MALLOC(new_pnode
, struct pshmnode
*, sizeof(struct pshmnode
), M_SHM
, M_WAITOK
|M_ZERO
);
480 if (new_pnode
== PSHMNODE_NULL
) {
488 * If we find the entry in the cache, this will take a reference,
489 * allowing us to unlock it for the permissions check.
491 error
= pshm_cache_search(&pinfo
, &nd
, &pcache
, 1);
493 PSHM_SUBSYS_UNLOCK();
495 if (error
== ENOENT
) {
502 if (fmode
& O_CREAT
) {
503 /* create a new one (commit the allocation) */
505 pinfo
->pshm_flags
= PSHM_DEFINED
| PSHM_INCREATE
;
506 pinfo
->pshm_usecount
= 1; /* existence reference */
507 pinfo
->pshm_mode
= cmode
;
508 pinfo
->pshm_uid
= kauth_getuid();
509 pinfo
->pshm_gid
= kauth_getgid();
510 bcopy(pnbuf
, &pinfo
->pshm_name
[0], pathlen
);
511 pinfo
->pshm_name
[pathlen
]=0;
513 error
= mac_posixshm_check_create(kauth_cred_get(), nameptr
);
517 mac_posixshm_label_associate(kauth_cred_get(), pinfo
, nameptr
);
522 if (fmode
& O_CREAT
) {
524 if ((fmode
& O_EXCL
)) {
525 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
,
529 /* shm obj exists and opened O_EXCL */
534 if( pinfo
->pshm_flags
& PSHM_INDELETE
) {
538 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
,
539 pinfo
->pshm_gid
, pinfo
->pshm_mode
);
541 if ((error
= mac_posixshm_check_open(kauth_cred_get(), pinfo
, fmode
))) {
545 if ( (error
= pshm_access(pinfo
, fmode
, kauth_cred_get(), p
)) ) {
550 if (!(fmode
& O_CREAT
)) {
552 /* O_CREAT is not set and the object does not exist */
556 if( pinfo
->pshm_flags
& PSHM_INDELETE
) {
561 if ((error
= mac_posixshm_check_open(kauth_cred_get(), pinfo
, fmode
))) {
566 if ((error
= pshm_access(pinfo
, fmode
, kauth_cred_get(), p
))) {
570 if (fmode
& O_TRUNC
) {
580 pinfo
->pshm_writecount
++;
582 pinfo
->pshm_readcount
++;
585 /* if successful, this will consume the pcp */
586 if ( (error
= pshm_cache_add(pinfo
, &nd
, pcp
)) ) {
590 * add reference for the new entry; otherwise, we obtained
591 * one from the cache hit earlier.
593 pinfo
->pshm_usecount
++;
595 pinfo
->pshm_flags
&= ~PSHM_INCREATE
;
596 new_pnode
->pinfo
= pinfo
;
598 PSHM_SUBSYS_UNLOCK();
601 * if incache, we did not use the new pcp or new_pinfo and must
607 if (new_pinfo
!= PSHMINFO_NULL
) {
609 mac_posixshm_label_destroy(new_pinfo
);
611 FREE(new_pinfo
, M_SHM
);
616 fp
->f_flag
= fmode
& FMASK
;
617 fp
->f_ops
= &pshmops
;
618 fp
->f_data
= (caddr_t
)new_pnode
;
619 *fdflags(p
, indx
) |= UF_EXCLOSE
;
620 procfdtbl_releasefd(p
, indx
, NULL
);
621 fp_drop(p
, indx
, fp
, 1);
625 FREE_ZONE(pnbuf
, MAXPATHLEN
, M_NAMEI
);
629 PSHM_SUBSYS_UNLOCK();
632 * If we obtained the entry from the cache, we need to drop the
633 * reference; holding the reference may have prevented unlinking,
634 * so we need to call pshm_close() to get the full effect.
638 pshm_close(pinfo
, 1);
639 PSHM_SUBSYS_UNLOCK();
645 if (new_pnode
!= PSHMNODE_NULL
)
646 FREE(new_pnode
, M_SHM
);
649 fp_free(p
, indx
, fp
);
651 if (new_pinfo
!= PSHMINFO_NULL
) {
653 mac_posixshm_label_destroy(new_pinfo
);
655 FREE(new_pinfo
, M_SHM
);
658 FREE_ZONE(pnbuf
, MAXPATHLEN
, M_NAMEI
);
664 pshm_truncate(__unused proc_t p
, struct fileproc
*fp
, __unused
int fd
,
665 off_t length
, __unused
int32_t *retval
)
667 struct pshminfo
* pinfo
;
668 struct pshmnode
* pnode
;
670 mem_entry_name_port_t mem_object
;
671 mach_vm_size_t total_size
, alloc_size
;
672 memory_object_size_t mosize
;
673 struct pshmobj
*pshmobj
, *pshmobj_next
, **pshmobj_next_p
;
679 user_map
= current_map();
681 if (fp
->f_type
!= DTYPE_PSXSHM
) {
686 if (((pnode
= (struct pshmnode
*)fp
->f_data
)) == PSHMNODE_NULL
)
690 if ((pinfo
= pnode
->pinfo
) == PSHMINFO_NULL
) {
691 PSHM_SUBSYS_UNLOCK();
694 if ((pinfo
->pshm_flags
& (PSHM_DEFINED
|PSHM_ALLOCATING
|PSHM_ALLOCATED
))
696 PSHM_SUBSYS_UNLOCK();
700 error
= mac_posixshm_check_truncate(kauth_cred_get(), pinfo
, length
);
702 PSHM_SUBSYS_UNLOCK();
707 pinfo
->pshm_flags
|= PSHM_ALLOCATING
;
708 total_size
= vm_map_round_page(length
,
709 vm_map_page_mask(user_map
));
710 pshmobj_next_p
= &pinfo
->pshm_memobjects
;
713 alloc_size
< total_size
;
714 alloc_size
+= mosize
) {
716 PSHM_SUBSYS_UNLOCK();
718 mosize
= MIN(total_size
- alloc_size
, ANON_MAX_SIZE
);
719 kret
= mach_make_memory_entry_64(
723 MAP_MEM_NAMED_CREATE
| VM_PROT_DEFAULT
,
727 if (kret
!= KERN_SUCCESS
)
730 MALLOC(pshmobj
, struct pshmobj
*, sizeof (struct pshmobj
),
732 if (pshmobj
== NULL
) {
733 kret
= KERN_NO_SPACE
;
734 mach_memory_entry_port_release(mem_object
);
741 pshmobj
->pshmo_memobject
= (void *) mem_object
;
742 pshmobj
->pshmo_size
= mosize
;
743 pshmobj
->pshmo_next
= NULL
;
745 *pshmobj_next_p
= pshmobj
;
746 pshmobj_next_p
= &pshmobj
->pshmo_next
;
749 pinfo
->pshm_flags
= PSHM_ALLOCATED
;
750 pinfo
->pshm_length
= total_size
;
751 PSHM_SUBSYS_UNLOCK();
756 for (pshmobj
= pinfo
->pshm_memobjects
;
758 pshmobj
= pshmobj_next
) {
759 pshmobj_next
= pshmobj
->pshmo_next
;
760 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
761 FREE(pshmobj
, M_SHM
);
763 pinfo
->pshm_memobjects
= NULL
;
764 pinfo
->pshm_flags
&= ~PSHM_ALLOCATING
;
765 PSHM_SUBSYS_UNLOCK();
768 case KERN_INVALID_ADDRESS
:
771 case KERN_PROTECTION_FAILURE
:
780 pshm_stat(struct pshmnode
*pnode
, void *ub
, int isstat64
)
782 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
783 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
784 struct pshminfo
*pinfo
;
790 if ((pinfo
= pnode
->pinfo
) == PSHMINFO_NULL
){
791 PSHM_SUBSYS_UNLOCK();
796 error
= mac_posixshm_check_stat(kauth_cred_get(), pinfo
);
798 PSHM_SUBSYS_UNLOCK();
804 sb64
= (struct stat64
*)ub
;
805 bzero(sb64
, sizeof(struct stat64
));
806 sb64
->st_mode
= pinfo
->pshm_mode
;
807 sb64
->st_uid
= pinfo
->pshm_uid
;
808 sb64
->st_gid
= pinfo
->pshm_gid
;
809 sb64
->st_size
= pinfo
->pshm_length
;
811 sb
= (struct stat
*)ub
;
812 bzero(sb
, sizeof(struct stat
));
813 sb
->st_mode
= pinfo
->pshm_mode
;
814 sb
->st_uid
= pinfo
->pshm_uid
;
815 sb
->st_gid
= pinfo
->pshm_gid
;
816 sb
->st_size
= pinfo
->pshm_length
;
818 PSHM_SUBSYS_UNLOCK();
824 * This is called only from shm_open which holds pshm_lock();
825 * XXX This code is repeated many times
828 pshm_access(struct pshminfo
*pinfo
, int mode
, kauth_cred_t cred
, __unused proc_t p
)
830 int mode_req
= ((mode
& FREAD
) ? S_IRUSR
: 0) |
831 ((mode
& FWRITE
) ? S_IWUSR
: 0);
833 /* Otherwise, user id 0 always gets access. */
834 if (!suser(cred
, NULL
))
837 return(posix_cred_access(cred
, pinfo
->pshm_uid
, pinfo
->pshm_gid
, pinfo
->pshm_mode
, mode_req
));
841 pshm_mmap(__unused proc_t p
, struct mmap_args
*uap
, user_addr_t
*retval
, struct fileproc
*fp
, off_t pageoff
)
843 vm_map_offset_t user_addr
= (vm_map_offset_t
)uap
->addr
;
844 vm_map_size_t user_size
= (vm_map_size_t
)uap
->len
;
845 vm_map_offset_t user_start_addr
;
846 vm_map_size_t map_size
, mapped_size
;
847 int prot
= uap
->prot
;
848 int flags
= uap
->flags
;
849 vm_object_offset_t file_pos
= (vm_object_offset_t
)uap
->pos
;
850 vm_object_offset_t map_pos
;
855 struct pshminfo
* pinfo
;
856 struct pshmnode
* pnode
;
857 struct pshmobj
* pshmobj
;
865 if ((flags
& MAP_SHARED
) == 0)
869 if ((prot
& PROT_WRITE
) && ((fp
->f_flag
& FWRITE
) == 0)) {
873 if (((pnode
= (struct pshmnode
*)fp
->f_data
)) == PSHMNODE_NULL
)
877 if ((pinfo
= pnode
->pinfo
) == PSHMINFO_NULL
) {
878 PSHM_SUBSYS_UNLOCK();
882 if ((pinfo
->pshm_flags
& PSHM_ALLOCATED
) != PSHM_ALLOCATED
) {
883 PSHM_SUBSYS_UNLOCK();
886 if ((off_t
)user_size
> pinfo
->pshm_length
) {
887 PSHM_SUBSYS_UNLOCK();
890 if ((off_t
)(user_size
+ file_pos
) > pinfo
->pshm_length
) {
891 PSHM_SUBSYS_UNLOCK();
894 if ((pshmobj
= pinfo
->pshm_memobjects
) == NULL
) {
895 PSHM_SUBSYS_UNLOCK();
900 error
= mac_posixshm_check_mmap(kauth_cred_get(), pinfo
, prot
, flags
);
902 PSHM_SUBSYS_UNLOCK();
907 PSHM_SUBSYS_UNLOCK();
908 user_map
= current_map();
910 if ((flags
& MAP_FIXED
) == 0) {
911 alloc_flags
= VM_FLAGS_ANYWHERE
;
912 user_addr
= vm_map_round_page(user_addr
,
913 vm_map_page_mask(user_map
));
915 if (user_addr
!= vm_map_round_page(user_addr
,
916 vm_map_page_mask(user_map
)))
919 * We do not get rid of the existing mappings here because
920 * it wouldn't be atomic (see comment in mmap()). We let
921 * Mach VM know that we want it to replace any existing
922 * mapping with the new one.
924 alloc_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
930 /* reserver the entire space first... */
931 kret
= vm_map_enter_mem_object(user_map
,
942 user_start_addr
= user_addr
;
943 if (kret
!= KERN_SUCCESS
) {
947 /* ... and overwrite with the real mappings */
948 for (map_pos
= 0, pshmobj
= pinfo
->pshm_memobjects
;
950 map_pos
+= pshmobj
->pshmo_size
, pshmobj
= pshmobj
->pshmo_next
) {
951 if (pshmobj
== NULL
) {
952 /* nothing there to map !? */
955 if (file_pos
>= map_pos
+ pshmobj
->pshmo_size
) {
958 map_size
= pshmobj
->pshmo_size
- (file_pos
- map_pos
);
959 if (map_size
> user_size
) {
960 map_size
= user_size
;
962 kret
= vm_map_enter_mem_object(
967 VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
,
968 pshmobj
->pshmo_memobject
,
974 if (kret
!= KERN_SUCCESS
)
977 user_addr
+= map_size
;
978 user_size
-= map_size
;
979 mapped_size
+= map_size
;
980 file_pos
+= map_size
;
984 pnode
->mapp_addr
= user_start_addr
;
985 pnode
->map_size
= mapped_size
;
986 pinfo
->pshm_flags
|= (PSHM_MAPPED
| PSHM_INUSE
);
987 PSHM_SUBSYS_UNLOCK();
989 if (kret
!= KERN_SUCCESS
) {
990 if (mapped_size
!= 0) {
991 (void) mach_vm_deallocate(current_map(),
999 *retval
= (user_start_addr
+ pageoff
);
1001 case KERN_INVALID_ADDRESS
:
1004 case KERN_PROTECTION_FAILURE
:
1013 shm_unlink(__unused proc_t p
, struct shm_unlink_args
*uap
,
1014 __unused
int32_t *retval
)
1019 struct pshminfo
*pinfo
;
1023 size_t pathlen
, plen
;
1025 struct pshmcache
*pcache
= PSHMCACHE_NULL
;
1026 struct pshmobj
*pshmobj
, *pshmobj_next
;
1028 pinfo
= PSHMINFO_NULL
;
1030 MALLOC_ZONE(pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1031 if (pnbuf
== NULL
) {
1032 return(ENOSPC
); /* XXX non-standard */
1034 pathlen
= MAXPATHLEN
;
1035 error
= copyinstr(uap
->name
, (void *)pnbuf
, MAXPATHLEN
, &pathlen
);
1039 AUDIT_ARG(text
, pnbuf
);
1040 if (pathlen
> PSHMNAMLEN
) {
1041 error
= ENAMETOOLONG
;
1046 #ifdef PSXSHM_NAME_RESTRICT
1048 if (*nameptr
== '/') {
1049 while (*(nameptr
++) == '/') {
1058 #endif /* PSXSHM_NAME_RESTRICT */
1062 nd
.pshm_nameptr
= nameptr
;
1063 nd
.pshm_namelen
= plen
;
1066 for (cp
= nameptr
, i
=1; *cp
!= 0 && i
<= plen
; i
++, cp
++) {
1067 nd
.pshm_hash
+= (unsigned char)*cp
* i
;
1071 error
= pshm_cache_search(&pinfo
, &nd
, &pcache
, 0);
1073 if (error
== ENOENT
) {
1074 PSHM_SUBSYS_UNLOCK();
1078 /* During unlink lookup failure also implies ENOENT */
1080 PSHM_SUBSYS_UNLOCK();
1086 if ((pinfo
->pshm_flags
& (PSHM_DEFINED
| PSHM_ALLOCATED
))==0) {
1087 PSHM_SUBSYS_UNLOCK();
1092 if (pinfo
->pshm_flags
& PSHM_ALLOCATING
) {
1093 /* XXX should we wait for flag to clear and then proceed ? */
1094 PSHM_SUBSYS_UNLOCK();
1099 if (pinfo
->pshm_flags
& PSHM_INDELETE
) {
1100 PSHM_SUBSYS_UNLOCK();
1105 error
= mac_posixshm_check_unlink(kauth_cred_get(), pinfo
, nameptr
);
1107 PSHM_SUBSYS_UNLOCK();
1112 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
, pinfo
->pshm_gid
,
1116 * following file semantics, unlink should be allowed
1117 * for users with write permission only.
1119 if ( (error
= pshm_access(pinfo
, FWRITE
, kauth_cred_get(), p
)) ) {
1120 PSHM_SUBSYS_UNLOCK();
1124 pinfo
->pshm_flags
|= PSHM_INDELETE
;
1125 pshm_cache_delete(pcache
);
1126 pinfo
->pshm_flags
|= PSHM_REMOVED
;
1127 /* release the existence reference */
1128 if (!--pinfo
->pshm_usecount
) {
1130 mac_posixshm_label_destroy(pinfo
);
1132 PSHM_SUBSYS_UNLOCK();
1134 * If this is the last reference going away on the object,
1135 * then we need to destroy the backing object. The name
1136 * has an implied but uncounted reference on the object,
1137 * once it's created, since it's used as a rendezvous, and
1138 * therefore may be subsequently reopened.
1140 for (pshmobj
= pinfo
->pshm_memobjects
;
1142 pshmobj
= pshmobj_next
) {
1143 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
1144 pshmobj_next
= pshmobj
->pshmo_next
;
1145 FREE(pshmobj
, M_SHM
);
1149 PSHM_SUBSYS_UNLOCK();
1151 FREE(pcache
, M_SHM
);
1154 FREE_ZONE(pnbuf
, MAXPATHLEN
, M_NAMEI
);
1158 /* already called locked */
1160 pshm_close(struct pshminfo
*pinfo
, int dropref
)
1163 struct pshmobj
*pshmobj
, *pshmobj_next
;
1166 * If we are dropping the reference we took on the cache object, don't
1167 * enforce the allocation requirement.
1169 if ( !dropref
&& ((pinfo
->pshm_flags
& PSHM_ALLOCATED
) != PSHM_ALLOCATED
)) {
1173 if(!pinfo
->pshm_usecount
) {
1174 kprintf("negative usecount in pshm_close\n");
1176 #endif /* DIAGNOSTIC */
1177 pinfo
->pshm_usecount
--; /* release this fd's reference */
1179 if ((pinfo
->pshm_flags
& PSHM_REMOVED
) && !pinfo
->pshm_usecount
) {
1181 mac_posixshm_label_destroy(pinfo
);
1183 PSHM_SUBSYS_UNLOCK();
1185 * If this is the last reference going away on the object,
1186 * then we need to destroy the backing object.
1188 for (pshmobj
= pinfo
->pshm_memobjects
;
1190 pshmobj
= pshmobj_next
) {
1191 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
1192 pshmobj_next
= pshmobj
->pshmo_next
;
1193 FREE(pshmobj
, M_SHM
);
1201 /* vfs_context_t passed to match prototype for struct fileops */
1203 pshm_closefile(struct fileglob
*fg
, __unused vfs_context_t ctx
)
1206 struct pshmnode
*pnode
;
1210 if ((pnode
= (struct pshmnode
*)fg
->fg_data
) != NULL
) {
1211 if (pnode
->pinfo
!= PSHMINFO_NULL
) {
1212 error
= pshm_close(pnode
->pinfo
, 0);
1217 PSHM_SUBSYS_UNLOCK();
1223 pshm_read(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
1224 __unused
int flags
, __unused vfs_context_t ctx
)
1230 pshm_write(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
1231 __unused
int flags
, __unused vfs_context_t ctx
)
1237 pshm_ioctl(__unused
struct fileproc
*fp
, __unused u_long com
,
1238 __unused caddr_t data
, __unused vfs_context_t ctx
)
1244 pshm_select(__unused
struct fileproc
*fp
, __unused
int which
, __unused
void *wql
,
1245 __unused vfs_context_t ctx
)
1251 pshm_kqfilter(__unused
struct fileproc
*fp
, __unused
struct knote
*kn
,
1252 __unused vfs_context_t ctx
)
1258 fill_pshminfo(struct pshmnode
* pshm
, struct pshm_info
* info
)
1260 struct pshminfo
*pinfo
;
1261 struct vinfo_stat
*sb
;
1264 if ((pinfo
= pshm
->pinfo
) == PSHMINFO_NULL
){
1265 PSHM_SUBSYS_UNLOCK();
1269 sb
= &info
->pshm_stat
;
1271 bzero(sb
, sizeof(struct vinfo_stat
));
1272 sb
->vst_mode
= pinfo
->pshm_mode
;
1273 sb
->vst_uid
= pinfo
->pshm_uid
;
1274 sb
->vst_gid
= pinfo
->pshm_gid
;
1275 sb
->vst_size
= pinfo
->pshm_length
;
1277 info
->pshm_mappaddr
= pshm
->mapp_addr
;
1278 bcopy(&pinfo
->pshm_name
[0], &info
->pshm_name
[0], PSHMNAMLEN
+1);
1280 PSHM_SUBSYS_UNLOCK();
1286 pshm_label_associate(struct fileproc
*fp
, struct vnode
*vp
, vfs_context_t ctx
)
1288 struct pshmnode
*pnode
;
1289 struct pshminfo
*pshm
;
1292 pnode
= (struct pshmnode
*)fp
->f_fglob
->fg_data
;
1293 if (pnode
!= NULL
) {
1294 pshm
= pnode
->pinfo
;
1296 mac_posixshm_vnode_label_associate(
1297 vfs_context_ucred(ctx
), pshm
, pshm
->pshm_label
,
1300 PSHM_SUBSYS_UNLOCK();