2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1990, 1996-1998 Apple Computer, Inc.
30 * All Rights Reserved.
33 * posix_shm.c : Support for POSIX shared memory APIs
36 * Author: Ananthakrishna Ramesh
44 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
45 * support for mandatory and extensible security protections. This notice
46 * is included in support of clause 2.2 (b) of the Apple Public License,
50 #include <sys/cdefs.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/file_internal.h>
55 #include <sys/filedesc.h>
57 #include <sys/proc_internal.h>
58 #include <sys/kauth.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/vnode_internal.h>
63 #include <sys/ioctl.h>
65 #include <sys/malloc.h>
68 #include <sys/sysproto.h>
69 #include <sys/proc_info.h>
70 #include <security/audit/audit.h>
73 #include <security/mac_framework.h>
76 #include <mach/mach_types.h>
77 #include <mach/mach_vm.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/kern_return.h>
82 #include <mach/memory_object_control.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_protos.h>
87 #define f_flag f_fglob->fg_flag
88 #define f_type f_fglob->fg_ops->fo_type
89 #define f_msgcount f_fglob->fg_msgcount
90 #define f_cred f_fglob->fg_cred
91 #define f_ops f_fglob->fg_ops
92 #define f_offset f_fglob->fg_offset
93 #define f_data f_fglob->fg_data
94 #define PSHMNAMLEN 31 /* maximum name segment length we bother with */
97 void * pshmo_memobject
;
98 memory_object_size_t pshmo_size
;
99 struct pshmobj
* pshmo_next
;
103 unsigned int pshm_flags
;
104 unsigned int pshm_usecount
;
109 char pshm_name
[PSHMNAMLEN
+ 1]; /* segment name */
110 struct pshmobj
*pshm_memobjects
;
112 unsigned int pshm_readcount
;
113 unsigned int pshm_writecount
;
115 #endif /* DIAGNOSTIC */
116 struct label
* pshm_label
;
118 #define PSHMINFO_NULL (struct pshminfo *)0
120 #define PSHM_NONE 0x001
121 #define PSHM_DEFINED 0x002
122 #define PSHM_ALLOCATED 0x004
123 #define PSHM_MAPPED 0x008
124 #define PSHM_INUSE 0x010
125 #define PSHM_REMOVED 0x020
126 #define PSHM_INCREATE 0x040
127 #define PSHM_INDELETE 0x080
128 #define PSHM_ALLOCATING 0x100
131 LIST_ENTRY(pshmcache
) pshm_hash
; /* hash chain */
132 struct pshminfo
*pshminfo
; /* vnode the name refers to */
133 int pshm_nlen
; /* length of name */
134 char pshm_name
[PSHMNAMLEN
+ 1]; /* segment name */
136 #define PSHMCACHE_NULL (struct pshmcache *)0
138 #define PSHMCACHE_NOTFOUND (0)
139 #define PSHMCACHE_FOUND (-1)
140 #define PSHMCACHE_NEGATIVE (ENOENT)
143 long goodhits
; /* hits that we can really use */
144 long neghits
; /* negative hits that we can use */
145 long badhits
; /* hits we must drop */
146 long falsehits
; /* hits with id mismatch */
147 long miss
; /* misses */
148 long longnames
; /* long names that ignore cache */
152 char *pshm_nameptr
; /* pointer to looked up name */
153 long pshm_namelen
; /* length of looked up component */
154 u_long pshm_hash
; /* hash value of looked up name */
159 user_size_t map_size
; /* XXX unused ? */
160 struct pshminfo
*pinfo
;
161 unsigned int pshm_usecount
;
163 unsigned int readcnt
;
164 unsigned int writecnt
;
167 #define PSHMNODE_NULL (struct pshmnode *)0
170 #define PSHMHASH(pnp) \
171 (&pshmhashtbl[(pnp)->pshm_hash & pshmhash])
173 LIST_HEAD(pshmhashhead
, pshmcache
) *pshmhashtbl
; /* Hash Table */
174 u_long pshmhash
; /* size of hash table - 1 */
175 long pshmnument
; /* number of cache entries allocated */
176 struct pshmstats pshmstats
; /* cache effectiveness statistics */
178 static int pshm_read (struct fileproc
*fp
, struct uio
*uio
,
179 int flags
, vfs_context_t ctx
);
180 static int pshm_write (struct fileproc
*fp
, struct uio
*uio
,
181 int flags
, vfs_context_t ctx
);
182 static int pshm_ioctl (struct fileproc
*fp
, u_long com
,
183 caddr_t data
, vfs_context_t ctx
);
184 static int pshm_select (struct fileproc
*fp
, int which
, void *wql
, vfs_context_t ctx
);
185 static int pshm_close(struct pshminfo
*pinfo
, int dropref
);
186 static int pshm_closefile (struct fileglob
*fg
, vfs_context_t ctx
);
188 static int pshm_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
189 struct kevent_internal_s
*kev
, vfs_context_t ctx
);
191 int pshm_access(struct pshminfo
*pinfo
, int mode
, kauth_cred_t cred
, proc_t p
);
192 int pshm_cache_purge_all(proc_t p
);
194 static int pshm_cache_add(struct pshminfo
*pshmp
, struct pshmname
*pnp
, struct pshmcache
*pcp
);
195 static void pshm_cache_delete(struct pshmcache
*pcp
);
196 static int pshm_cache_search(struct pshminfo
**pshmp
, struct pshmname
*pnp
,
197 struct pshmcache
**pcache
, int addref
);
198 static int pshm_unlink_internal(struct pshminfo
*pinfo
, struct pshmcache
*pcache
);
200 static const struct fileops pshmops
= {
201 .fo_type
= DTYPE_PSXSHM
,
202 .fo_read
= pshm_read
,
203 .fo_write
= pshm_write
,
204 .fo_ioctl
= pshm_ioctl
,
205 .fo_select
= pshm_select
,
206 .fo_close
= pshm_closefile
,
207 .fo_kqfilter
= pshm_kqfilter
,
211 static lck_grp_t
*psx_shm_subsys_lck_grp
;
212 static lck_grp_attr_t
*psx_shm_subsys_lck_grp_attr
;
213 static lck_attr_t
*psx_shm_subsys_lck_attr
;
214 static lck_mtx_t psx_shm_subsys_mutex
;
216 #define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
217 #define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
218 #define PSHM_SUBSYS_ASSERT_HELD() LCK_MTX_ASSERT(&psx_shm_subsys_mutex, LCK_MTX_ASSERT_OWNED)
221 /* Initialize the mutex governing access to the posix shm subsystem */
222 __private_extern__
void
223 pshm_lock_init( void )
226 psx_shm_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
228 psx_shm_subsys_lck_grp
= lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr
);
230 psx_shm_subsys_lck_attr
= lck_attr_alloc_init();
231 lck_mtx_init(& psx_shm_subsys_mutex
, psx_shm_subsys_lck_grp
, psx_shm_subsys_lck_attr
);
235 * Lookup an entry in the cache
238 * status of -1 is returned if matches
239 * If the lookup determines that the name does not exist
240 * (negative cacheing), a status of ENOENT is returned. If the lookup
241 * fails, a status of zero is returned.
245 pshm_cache_search(struct pshminfo
**pshmp
, struct pshmname
*pnp
,
246 struct pshmcache
**pcache
, int addref
)
248 struct pshmcache
*pcp
, *nnp
;
249 struct pshmhashhead
*pcpp
;
251 if (pnp
->pshm_namelen
> PSHMNAMLEN
) {
252 pshmstats
.longnames
++;
253 return PSHMCACHE_NOTFOUND
;
256 pcpp
= PSHMHASH(pnp
);
257 for (pcp
= pcpp
->lh_first
; pcp
!= 0; pcp
= nnp
) {
258 nnp
= pcp
->pshm_hash
.le_next
;
259 if (pcp
->pshm_nlen
== pnp
->pshm_namelen
&&
260 !bcmp(pcp
->pshm_name
, pnp
->pshm_nameptr
, (u_int
)pcp
-> pshm_nlen
))
266 return PSHMCACHE_NOTFOUND
;
269 /* We found a "positive" match, return the vnode */
271 pshmstats
.goodhits
++;
273 *pshmp
= pcp
->pshminfo
;
276 pcp
->pshminfo
->pshm_usecount
++;
277 return PSHMCACHE_FOUND
;
281 * We found a "negative" match, ENOENT notifies client of this match.
284 return PSHMCACHE_NEGATIVE
;
288 * Add an entry to the cache.
289 * XXX should be static?
292 pshm_cache_add(struct pshminfo
*pshmp
, struct pshmname
*pnp
, struct pshmcache
*pcp
)
294 struct pshmhashhead
*pcpp
;
295 struct pshminfo
*dpinfo
;
296 struct pshmcache
*dpcp
;
299 if (pnp
->pshm_namelen
> PSHMNAMLEN
)
300 panic("cache_enter: name too long");
304 /* if the entry has already been added by some one else return */
305 if (pshm_cache_search(&dpinfo
, pnp
, &dpcp
, 0) == PSHMCACHE_FOUND
) {
311 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
313 pcp
->pshminfo
= pshmp
;
314 pcp
->pshm_nlen
= pnp
->pshm_namelen
;
315 bcopy(pnp
->pshm_nameptr
, pcp
->pshm_name
, (unsigned)pcp
->pshm_nlen
);
316 pcpp
= PSHMHASH(pnp
);
321 for (p
= pcpp
->lh_first
; p
!= 0; p
= p
->pshm_hash
.le_next
)
323 panic("cache_enter: duplicate");
326 LIST_INSERT_HEAD(pcpp
, pcp
, pshm_hash
);
331 * Name cache initialization, from vfs_init() when we are booting
334 pshm_cache_init(void)
336 pshmhashtbl
= hashinit(desiredvnodes
/ 8, M_SHM
, &pshmhash
);
340 * Invalidate all entries and delete all objects associated with it. Entire
341 * non Kernel entries are going away. Just dump'em all
343 * We actually just increment the v_id, that will do it. The entries will
344 * be purged by lookup as they get found. If the v_id wraps around, we
345 * need to ditch the entire cache, to avoid confusion. No valid vnode will
346 * ever have (v_id == 0).
349 pshm_cache_purge_all(__unused proc_t p
)
351 struct pshmcache
*pcp
, *tmppcp
;
352 struct pshmhashhead
*pcpp
;
355 if (kauth_cred_issuser(kauth_cred_get()) == 0)
359 for (pcpp
= &pshmhashtbl
[pshmhash
]; pcpp
>= pshmhashtbl
; pcpp
--) {
360 LIST_FOREACH_SAFE(pcp
, pcpp
, pshm_hash
, tmppcp
) {
361 assert(pcp
->pshm_nlen
);
362 error
= pshm_unlink_internal(pcp
->pshminfo
, pcp
);
367 assert(pshmnument
== 0);
370 PSHM_SUBSYS_UNLOCK();
373 printf("%s: Error %d removing shm cache: %ld remain!\n",
374 __func__
, error
, pshmnument
);
379 pshm_cache_delete(struct pshmcache
*pcp
)
382 if (pcp
->pshm_hash
.le_prev
== 0)
383 panic("namecache purge le_prev");
384 if (pcp
->pshm_hash
.le_next
== pcp
)
385 panic("namecache purge le_next");
386 #endif /* DIAGNOSTIC */
387 LIST_REMOVE(pcp
, pshm_hash
);
388 pcp
->pshm_hash
.le_prev
= 0;
394 shm_open(proc_t p
, struct shm_open_args
*uap
, int32_t *retval
)
399 struct pshminfo
*pinfo
;
400 struct fileproc
*fp
= NULL
;
402 struct pshminfo
*new_pinfo
= PSHMINFO_NULL
;
403 struct pshmnode
*new_pnode
= PSHMNODE_NULL
;
404 struct pshmcache
*pcache
= PSHMCACHE_NULL
; /* ignored on return */
407 size_t pathlen
, plen
;
409 int cmode
= uap
->mode
;
411 struct pshmcache
*pcp
= NULL
;
413 AUDIT_ARG(fflags
, uap
->oflag
);
414 AUDIT_ARG(mode
, uap
->mode
);
416 pinfo
= PSHMINFO_NULL
;
419 * Preallocate everything we might need up front to avoid taking
420 * and dropping the lock, opening us up to race conditions.
422 MALLOC_ZONE(pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
428 pathlen
= MAXPATHLEN
;
429 error
= copyinstr(uap
->name
, (void *)pnbuf
, MAXPATHLEN
, &pathlen
);
433 AUDIT_ARG(text
, pnbuf
);
434 if (pathlen
> PSHMNAMLEN
) {
435 error
= ENAMETOOLONG
;
438 #ifdef PSXSHM_NAME_RESTRICT
440 if (*nameptr
== '/') {
441 while (*(nameptr
++) == '/') {
450 #endif /* PSXSHM_NAME_RESTRICT */
454 nd
.pshm_nameptr
= nameptr
;
455 nd
.pshm_namelen
= plen
;
458 for (cp
= nameptr
, i
=1; *cp
!= 0 && i
<= plen
; i
++, cp
++) {
459 nd
.pshm_hash
+= (unsigned char)*cp
* i
;
463 * attempt to allocate a new fp; if unsuccessful, the fp will be
464 * left unmodified (NULL).
466 error
= falloc(p
, &fp
, &indx
, vfs_context_current());
472 fmode
= FFLAGS(uap
->oflag
);
473 if ((fmode
& (FREAD
| FWRITE
)) == 0) {
479 * We allocate a new entry if we are less than the maximum
480 * allowed and the one at the front of the LRU list is in use.
481 * Otherwise we use the one at the front of the LRU list.
483 MALLOC(pcp
, struct pshmcache
*, sizeof(struct pshmcache
), M_SHM
, M_WAITOK
|M_ZERO
);
489 MALLOC(new_pinfo
, struct pshminfo
*, sizeof(struct pshminfo
), M_SHM
, M_WAITOK
|M_ZERO
);
490 if (new_pinfo
== PSHMINFO_NULL
) {
495 mac_posixshm_label_init(new_pinfo
);
498 MALLOC(new_pnode
, struct pshmnode
*, sizeof(struct pshmnode
), M_SHM
, M_WAITOK
|M_ZERO
);
499 if (new_pnode
== PSHMNODE_NULL
) {
507 * If we find the entry in the cache, this will take a reference,
508 * allowing us to unlock it for the permissions check.
510 error
= pshm_cache_search(&pinfo
, &nd
, &pcache
, 1);
512 PSHM_SUBSYS_UNLOCK();
514 if (error
== PSHMCACHE_NEGATIVE
) {
519 if (error
== PSHMCACHE_NOTFOUND
) {
521 if (fmode
& O_CREAT
) {
522 /* create a new one (commit the allocation) */
524 pinfo
->pshm_flags
= PSHM_DEFINED
| PSHM_INCREATE
;
525 pinfo
->pshm_usecount
= 1; /* existence reference */
526 pinfo
->pshm_mode
= cmode
;
527 pinfo
->pshm_uid
= kauth_getuid();
528 pinfo
->pshm_gid
= kauth_getgid();
529 bcopy(pnbuf
, &pinfo
->pshm_name
[0], pathlen
);
530 pinfo
->pshm_name
[pathlen
]=0;
532 error
= mac_posixshm_check_create(kauth_cred_get(), nameptr
);
536 mac_posixshm_label_associate(kauth_cred_get(), pinfo
, nameptr
);
541 if (fmode
& O_CREAT
) {
543 if ((fmode
& O_EXCL
)) {
544 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
,
548 /* shm obj exists and opened O_EXCL */
553 if( pinfo
->pshm_flags
& PSHM_INDELETE
) {
557 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
,
558 pinfo
->pshm_gid
, pinfo
->pshm_mode
);
560 if ((error
= mac_posixshm_check_open(kauth_cred_get(), pinfo
, fmode
))) {
564 if ( (error
= pshm_access(pinfo
, fmode
, kauth_cred_get(), p
)) ) {
569 if (!(fmode
& O_CREAT
)) {
571 /* O_CREAT is not set and the object does not exist */
575 if( pinfo
->pshm_flags
& PSHM_INDELETE
) {
580 if ((error
= mac_posixshm_check_open(kauth_cred_get(), pinfo
, fmode
))) {
585 if ((error
= pshm_access(pinfo
, fmode
, kauth_cred_get(), p
))) {
589 if (fmode
& O_TRUNC
) {
599 pinfo
->pshm_writecount
++;
601 pinfo
->pshm_readcount
++;
604 /* if successful, this will consume the pcp */
605 if ( (error
= pshm_cache_add(pinfo
, &nd
, pcp
)) ) {
609 * add reference for the new entry; otherwise, we obtained
610 * one from the cache hit earlier.
612 pinfo
->pshm_usecount
++;
614 pinfo
->pshm_flags
&= ~PSHM_INCREATE
;
615 new_pnode
->pinfo
= pinfo
;
617 PSHM_SUBSYS_UNLOCK();
620 * if incache, we did not use the new pcp or new_pinfo and must
626 if (new_pinfo
!= PSHMINFO_NULL
) {
628 mac_posixshm_label_destroy(new_pinfo
);
630 FREE(new_pinfo
, M_SHM
);
635 fp
->f_flag
= fmode
& FMASK
;
636 fp
->f_ops
= &pshmops
;
637 fp
->f_data
= (caddr_t
)new_pnode
;
638 *fdflags(p
, indx
) |= UF_EXCLOSE
;
639 procfdtbl_releasefd(p
, indx
, NULL
);
640 fp_drop(p
, indx
, fp
, 1);
644 FREE_ZONE(pnbuf
, MAXPATHLEN
, M_NAMEI
);
648 PSHM_SUBSYS_UNLOCK();
651 * If we obtained the entry from the cache, we need to drop the
652 * reference; holding the reference may have prevented unlinking,
653 * so we need to call pshm_close() to get the full effect.
657 pshm_close(pinfo
, 1);
658 PSHM_SUBSYS_UNLOCK();
664 if (new_pnode
!= PSHMNODE_NULL
)
665 FREE(new_pnode
, M_SHM
);
668 fp_free(p
, indx
, fp
);
670 if (new_pinfo
!= PSHMINFO_NULL
) {
672 mac_posixshm_label_destroy(new_pinfo
);
674 FREE(new_pinfo
, M_SHM
);
677 FREE_ZONE(pnbuf
, MAXPATHLEN
, M_NAMEI
);
683 pshm_truncate(__unused proc_t p
, struct fileproc
*fp
, __unused
int fd
,
684 off_t length
, __unused
int32_t *retval
)
686 struct pshminfo
* pinfo
;
687 struct pshmnode
* pnode
;
689 mem_entry_name_port_t mem_object
;
690 mach_vm_size_t total_size
, alloc_size
;
691 memory_object_size_t mosize
;
692 struct pshmobj
*pshmobj
, *pshmobj_next
, **pshmobj_next_p
;
698 user_map
= current_map();
700 if (fp
->f_type
!= DTYPE_PSXSHM
) {
705 if (((pnode
= (struct pshmnode
*)fp
->f_data
)) == PSHMNODE_NULL
)
709 if ((pinfo
= pnode
->pinfo
) == PSHMINFO_NULL
) {
710 PSHM_SUBSYS_UNLOCK();
713 if ((pinfo
->pshm_flags
& (PSHM_DEFINED
|PSHM_ALLOCATING
|PSHM_ALLOCATED
))
715 PSHM_SUBSYS_UNLOCK();
719 error
= mac_posixshm_check_truncate(kauth_cred_get(), pinfo
, length
);
721 PSHM_SUBSYS_UNLOCK();
726 pinfo
->pshm_flags
|= PSHM_ALLOCATING
;
727 total_size
= vm_map_round_page(length
,
728 vm_map_page_mask(user_map
));
729 pshmobj_next_p
= &pinfo
->pshm_memobjects
;
732 alloc_size
< total_size
;
733 alloc_size
+= mosize
) {
735 PSHM_SUBSYS_UNLOCK();
737 mosize
= MIN(total_size
- alloc_size
, ANON_MAX_SIZE
);
738 kret
= mach_make_memory_entry_64(
742 MAP_MEM_NAMED_CREATE
| VM_PROT_DEFAULT
,
746 if (kret
!= KERN_SUCCESS
)
749 MALLOC(pshmobj
, struct pshmobj
*, sizeof (struct pshmobj
),
751 if (pshmobj
== NULL
) {
752 kret
= KERN_NO_SPACE
;
753 mach_memory_entry_port_release(mem_object
);
760 pshmobj
->pshmo_memobject
= (void *) mem_object
;
761 pshmobj
->pshmo_size
= mosize
;
762 pshmobj
->pshmo_next
= NULL
;
764 *pshmobj_next_p
= pshmobj
;
765 pshmobj_next_p
= &pshmobj
->pshmo_next
;
768 pinfo
->pshm_flags
|= PSHM_ALLOCATED
;
769 pinfo
->pshm_flags
&= ~(PSHM_ALLOCATING
);
770 pinfo
->pshm_length
= total_size
;
771 PSHM_SUBSYS_UNLOCK();
776 for (pshmobj
= pinfo
->pshm_memobjects
;
778 pshmobj
= pshmobj_next
) {
779 pshmobj_next
= pshmobj
->pshmo_next
;
780 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
781 FREE(pshmobj
, M_SHM
);
783 pinfo
->pshm_memobjects
= NULL
;
784 pinfo
->pshm_flags
&= ~PSHM_ALLOCATING
;
785 PSHM_SUBSYS_UNLOCK();
788 case KERN_INVALID_ADDRESS
:
791 case KERN_PROTECTION_FAILURE
:
800 pshm_stat(struct pshmnode
*pnode
, void *ub
, int isstat64
)
802 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
803 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
804 struct pshminfo
*pinfo
;
810 if ((pinfo
= pnode
->pinfo
) == PSHMINFO_NULL
){
811 PSHM_SUBSYS_UNLOCK();
816 error
= mac_posixshm_check_stat(kauth_cred_get(), pinfo
);
818 PSHM_SUBSYS_UNLOCK();
824 sb64
= (struct stat64
*)ub
;
825 bzero(sb64
, sizeof(struct stat64
));
826 sb64
->st_mode
= pinfo
->pshm_mode
;
827 sb64
->st_uid
= pinfo
->pshm_uid
;
828 sb64
->st_gid
= pinfo
->pshm_gid
;
829 sb64
->st_size
= pinfo
->pshm_length
;
831 sb
= (struct stat
*)ub
;
832 bzero(sb
, sizeof(struct stat
));
833 sb
->st_mode
= pinfo
->pshm_mode
;
834 sb
->st_uid
= pinfo
->pshm_uid
;
835 sb
->st_gid
= pinfo
->pshm_gid
;
836 sb
->st_size
= pinfo
->pshm_length
;
838 PSHM_SUBSYS_UNLOCK();
844 * This is called only from shm_open which holds pshm_lock();
845 * XXX This code is repeated many times
848 pshm_access(struct pshminfo
*pinfo
, int mode
, kauth_cred_t cred
, __unused proc_t p
)
850 int mode_req
= ((mode
& FREAD
) ? S_IRUSR
: 0) |
851 ((mode
& FWRITE
) ? S_IWUSR
: 0);
853 /* Otherwise, user id 0 always gets access. */
854 if (!suser(cred
, NULL
))
857 return(posix_cred_access(cred
, pinfo
->pshm_uid
, pinfo
->pshm_gid
, pinfo
->pshm_mode
, mode_req
));
861 pshm_mmap(__unused proc_t p
, struct mmap_args
*uap
, user_addr_t
*retval
, struct fileproc
*fp
, off_t pageoff
)
863 vm_map_offset_t user_addr
= (vm_map_offset_t
)uap
->addr
;
864 vm_map_size_t user_size
= (vm_map_size_t
)uap
->len
;
865 vm_map_offset_t user_start_addr
;
866 vm_map_size_t map_size
, mapped_size
;
867 int prot
= uap
->prot
;
868 int flags
= uap
->flags
;
869 vm_object_offset_t file_pos
= (vm_object_offset_t
)uap
->pos
;
870 vm_object_offset_t map_pos
;
873 vm_map_kernel_flags_t vmk_flags
;
876 struct pshminfo
* pinfo
;
877 struct pshmnode
* pnode
;
878 struct pshmobj
* pshmobj
;
886 if ((flags
& MAP_SHARED
) == 0)
890 if ((prot
& PROT_WRITE
) && ((fp
->f_flag
& FWRITE
) == 0)) {
894 if (((pnode
= (struct pshmnode
*)fp
->f_data
)) == PSHMNODE_NULL
)
898 if ((pinfo
= pnode
->pinfo
) == PSHMINFO_NULL
) {
899 PSHM_SUBSYS_UNLOCK();
903 if ((pinfo
->pshm_flags
& PSHM_ALLOCATED
) != PSHM_ALLOCATED
) {
904 PSHM_SUBSYS_UNLOCK();
907 if (user_size
> (vm_map_size_t
)pinfo
->pshm_length
) {
908 PSHM_SUBSYS_UNLOCK();
911 vm_map_size_t end_pos
= 0;
912 if (os_add_overflow(user_size
, file_pos
, &end_pos
)) {
913 PSHM_SUBSYS_UNLOCK();
916 if (end_pos
> (vm_map_size_t
)pinfo
->pshm_length
) {
917 PSHM_SUBSYS_UNLOCK();
920 if ((pshmobj
= pinfo
->pshm_memobjects
) == NULL
) {
921 PSHM_SUBSYS_UNLOCK();
926 error
= mac_posixshm_check_mmap(kauth_cred_get(), pinfo
, prot
, flags
);
928 PSHM_SUBSYS_UNLOCK();
933 PSHM_SUBSYS_UNLOCK();
934 user_map
= current_map();
936 if ((flags
& MAP_FIXED
) == 0) {
937 alloc_flags
= VM_FLAGS_ANYWHERE
;
938 user_addr
= vm_map_round_page(user_addr
,
939 vm_map_page_mask(user_map
));
941 if (user_addr
!= vm_map_round_page(user_addr
,
942 vm_map_page_mask(user_map
)))
945 * We do not get rid of the existing mappings here because
946 * it wouldn't be atomic (see comment in mmap()). We let
947 * Mach VM know that we want it to replace any existing
948 * mapping with the new one.
950 alloc_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
955 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
956 /* reserve the entire space first... */
957 kret
= vm_map_enter_mem_object(user_map
,
970 user_start_addr
= user_addr
;
971 if (kret
!= KERN_SUCCESS
) {
975 /* ... and overwrite with the real mappings */
976 for (map_pos
= 0, pshmobj
= pinfo
->pshm_memobjects
;
978 map_pos
+= pshmobj
->pshmo_size
, pshmobj
= pshmobj
->pshmo_next
) {
979 if (pshmobj
== NULL
) {
980 /* nothing there to map !? */
983 if (file_pos
>= map_pos
+ pshmobj
->pshmo_size
) {
986 map_size
= pshmobj
->pshmo_size
- (file_pos
- map_pos
);
987 if (map_size
> user_size
) {
988 map_size
= user_size
;
990 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
991 kret
= vm_map_enter_mem_object(
996 VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
,
999 pshmobj
->pshmo_memobject
,
1005 if (kret
!= KERN_SUCCESS
)
1008 user_addr
+= map_size
;
1009 user_size
-= map_size
;
1010 mapped_size
+= map_size
;
1011 file_pos
+= map_size
;
1015 pnode
->mapp_addr
= user_start_addr
;
1016 pnode
->map_size
= mapped_size
;
1017 pinfo
->pshm_flags
|= (PSHM_MAPPED
| PSHM_INUSE
);
1018 PSHM_SUBSYS_UNLOCK();
1020 if (kret
!= KERN_SUCCESS
) {
1021 if (mapped_size
!= 0) {
1022 (void) mach_vm_deallocate(current_map(),
1030 *retval
= (user_start_addr
+ pageoff
);
1032 case KERN_INVALID_ADDRESS
:
1035 case KERN_PROTECTION_FAILURE
:
1044 pshm_unlink_internal(struct pshminfo
*pinfo
, struct pshmcache
*pcache
)
1046 struct pshmobj
*pshmobj
, *pshmobj_next
;
1048 PSHM_SUBSYS_ASSERT_HELD();
1050 if (!pinfo
|| !pcache
)
1053 if ((pinfo
->pshm_flags
& (PSHM_DEFINED
| PSHM_ALLOCATED
)) == 0)
1056 if (pinfo
->pshm_flags
& PSHM_INDELETE
)
1059 pinfo
->pshm_flags
|= PSHM_INDELETE
;
1060 pinfo
->pshm_usecount
--;
1062 pshm_cache_delete(pcache
);
1063 pinfo
->pshm_flags
|= PSHM_REMOVED
;
1065 /* release the existence reference */
1066 if (!pinfo
->pshm_usecount
) {
1068 mac_posixshm_label_destroy(pinfo
);
1071 * If this is the last reference going away on the object,
1072 * then we need to destroy the backing object. The name
1073 * has an implied but uncounted reference on the object,
1074 * once it's created, since it's used as a rendezvous, and
1075 * therefore may be subsequently reopened.
1077 for (pshmobj
= pinfo
->pshm_memobjects
;
1079 pshmobj
= pshmobj_next
) {
1080 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
1081 pshmobj_next
= pshmobj
->pshmo_next
;
1082 FREE(pshmobj
, M_SHM
);
1087 FREE(pcache
, M_SHM
);
1093 shm_unlink(proc_t p
, struct shm_unlink_args
*uap
, __unused
int32_t *retval
)
1101 struct pshminfo
*pinfo
;
1104 struct pshmcache
*pcache
= PSHMCACHE_NULL
;
1106 pinfo
= PSHMINFO_NULL
;
1109 MALLOC_ZONE(pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1110 if (pnbuf
== NULL
) {
1111 return(ENOSPC
); /* XXX non-standard */
1113 pathlen
= MAXPATHLEN
;
1114 error
= copyinstr(uap
->name
, (void *)pnbuf
, MAXPATHLEN
, &pathlen
);
1118 AUDIT_ARG(text
, pnbuf
);
1119 if (pathlen
> PSHMNAMLEN
) {
1120 error
= ENAMETOOLONG
;
1126 #ifdef PSXSHM_NAME_RESTRICT
1127 if (*nameptr
== '/') {
1128 while (*(nameptr
++) == '/') {
1137 #endif /* PSXSHM_NAME_RESTRICT */
1139 nd
.pshm_nameptr
= nameptr
;
1140 nd
.pshm_namelen
= pathlen
;
1143 for (cp
= nameptr
, i
=1; *cp
!= 0 && i
<= pathlen
; i
++, cp
++) {
1144 nd
.pshm_hash
+= (unsigned char)*cp
* i
;
1148 error
= pshm_cache_search(&pinfo
, &nd
, &pcache
, 0);
1150 /* During unlink lookup failure also implies ENOENT */
1151 if (error
!= PSHMCACHE_FOUND
) {
1152 PSHM_SUBSYS_UNLOCK();
1158 if ((pinfo
->pshm_flags
& (PSHM_DEFINED
| PSHM_ALLOCATED
))==0) {
1159 PSHM_SUBSYS_UNLOCK();
1164 if (pinfo
->pshm_flags
& PSHM_ALLOCATING
) {
1165 /* XXX should we wait for flag to clear and then proceed ? */
1166 PSHM_SUBSYS_UNLOCK();
1171 if (pinfo
->pshm_flags
& PSHM_INDELETE
) {
1172 PSHM_SUBSYS_UNLOCK();
1178 error
= mac_posixshm_check_unlink(kauth_cred_get(), pinfo
, nameptr
);
1180 PSHM_SUBSYS_UNLOCK();
1185 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
, pinfo
->pshm_gid
,
1189 * following file semantics, unlink should be allowed
1190 * for users with write permission only.
1192 if ( (error
= pshm_access(pinfo
, FWRITE
, kauth_cred_get(), p
)) ) {
1193 PSHM_SUBSYS_UNLOCK();
1197 error
= pshm_unlink_internal(pinfo
, pcache
);
1198 PSHM_SUBSYS_UNLOCK();
1201 FREE_ZONE(pnbuf
, MAXPATHLEN
, M_NAMEI
);
1205 /* already called locked */
1207 pshm_close(struct pshminfo
*pinfo
, int dropref
)
1210 struct pshmobj
*pshmobj
, *pshmobj_next
;
1213 * If we are dropping the reference we took on the cache object, don't
1214 * enforce the allocation requirement.
1216 if ( !dropref
&& ((pinfo
->pshm_flags
& PSHM_ALLOCATED
) != PSHM_ALLOCATED
)) {
1220 if(!pinfo
->pshm_usecount
) {
1221 kprintf("negative usecount in pshm_close\n");
1223 #endif /* DIAGNOSTIC */
1224 pinfo
->pshm_usecount
--; /* release this fd's reference */
1226 if ((pinfo
->pshm_flags
& PSHM_REMOVED
) && !pinfo
->pshm_usecount
) {
1228 mac_posixshm_label_destroy(pinfo
);
1230 PSHM_SUBSYS_UNLOCK();
1232 * If this is the last reference going away on the object,
1233 * then we need to destroy the backing object.
1235 for (pshmobj
= pinfo
->pshm_memobjects
;
1237 pshmobj
= pshmobj_next
) {
1238 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
1239 pshmobj_next
= pshmobj
->pshmo_next
;
1240 FREE(pshmobj
, M_SHM
);
1248 /* vfs_context_t passed to match prototype for struct fileops */
1250 pshm_closefile(struct fileglob
*fg
, __unused vfs_context_t ctx
)
1253 struct pshmnode
*pnode
;
1257 if ((pnode
= (struct pshmnode
*)fg
->fg_data
) != NULL
) {
1258 if (pnode
->pinfo
!= PSHMINFO_NULL
) {
1259 error
= pshm_close(pnode
->pinfo
, 0);
1264 PSHM_SUBSYS_UNLOCK();
1270 pshm_read(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
1271 __unused
int flags
, __unused vfs_context_t ctx
)
1277 pshm_write(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
1278 __unused
int flags
, __unused vfs_context_t ctx
)
1284 pshm_ioctl(__unused
struct fileproc
*fp
, __unused u_long com
,
1285 __unused caddr_t data
, __unused vfs_context_t ctx
)
1291 pshm_select(__unused
struct fileproc
*fp
, __unused
int which
, __unused
void *wql
,
1292 __unused vfs_context_t ctx
)
1298 pshm_kqfilter(__unused
struct fileproc
*fp
, struct knote
*kn
,
1299 __unused
struct kevent_internal_s
*kev
, __unused vfs_context_t ctx
)
1301 kn
->kn_flags
= EV_ERROR
;
1302 kn
->kn_data
= ENOTSUP
;
1307 fill_pshminfo(struct pshmnode
* pshm
, struct pshm_info
* info
)
1309 struct pshminfo
*pinfo
;
1310 struct vinfo_stat
*sb
;
1313 if ((pinfo
= pshm
->pinfo
) == PSHMINFO_NULL
){
1314 PSHM_SUBSYS_UNLOCK();
1318 sb
= &info
->pshm_stat
;
1320 bzero(sb
, sizeof(struct vinfo_stat
));
1321 sb
->vst_mode
= pinfo
->pshm_mode
;
1322 sb
->vst_uid
= pinfo
->pshm_uid
;
1323 sb
->vst_gid
= pinfo
->pshm_gid
;
1324 sb
->vst_size
= pinfo
->pshm_length
;
1326 info
->pshm_mappaddr
= pshm
->mapp_addr
;
1327 bcopy(&pinfo
->pshm_name
[0], &info
->pshm_name
[0], PSHMNAMLEN
+1);
1329 PSHM_SUBSYS_UNLOCK();
1335 pshm_label_associate(struct fileproc
*fp
, struct vnode
*vp
, vfs_context_t ctx
)
1337 struct pshmnode
*pnode
;
1338 struct pshminfo
*pshm
;
1341 pnode
= (struct pshmnode
*)fp
->f_fglob
->fg_data
;
1342 if (pnode
!= NULL
) {
1343 pshm
= pnode
->pinfo
;
1345 mac_posixshm_vnode_label_associate(
1346 vfs_context_ucred(ctx
), pshm
, pshm
->pshm_label
,
1349 PSHM_SUBSYS_UNLOCK();