2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1990, 1996-1998 Apple Computer, Inc.
30 * All Rights Reserved.
33 * posix_shm.c : Support for POSIX shared memory APIs
36 * Author: Ananthakrishna Ramesh
44 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
45 * support for mandatory and extensible security protections. This notice
46 * is included in support of clause 2.2 (b) of the Apple Public License,
50 #include <sys/cdefs.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/file_internal.h>
55 #include <sys/filedesc.h>
57 #include <sys/proc_internal.h>
58 #include <sys/kauth.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/vnode_internal.h>
63 #include <sys/ioctl.h>
65 #include <sys/malloc.h>
68 #include <sys/sysproto.h>
69 #include <sys/proc_info.h>
70 #include <security/audit/audit.h>
73 #include <security/mac_framework.h>
76 #include <mach/mach_types.h>
77 #include <mach/mach_vm.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/kern_return.h>
82 #include <mach/memory_object_control.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_protos.h>
87 #define f_flag f_fglob->fg_flag
88 #define f_type f_fglob->fg_ops->fo_type
89 #define f_msgcount f_fglob->fg_msgcount
90 #define f_cred f_fglob->fg_cred
91 #define f_ops f_fglob->fg_ops
92 #define f_offset f_fglob->fg_offset
93 #define f_data f_fglob->fg_data
94 #define PSHMNAMLEN 31 /* maximum name segment length we bother with */
97 void * pshmo_memobject
;
98 memory_object_size_t pshmo_size
;
99 struct pshmobj
* pshmo_next
;
103 unsigned int pshm_flags
;
104 unsigned int pshm_usecount
;
109 char pshm_name
[PSHMNAMLEN
+ 1]; /* segment name */
110 struct pshmobj
*pshm_memobjects
;
112 unsigned int pshm_readcount
;
113 unsigned int pshm_writecount
;
115 #endif /* DIAGNOSTIC */
116 struct label
* pshm_label
;
118 #define PSHMINFO_NULL (struct pshminfo *)0
120 #define PSHM_NONE 0x001
121 #define PSHM_DEFINED 0x002
122 #define PSHM_ALLOCATED 0x004
123 #define PSHM_MAPPED 0x008
124 #define PSHM_INUSE 0x010
125 #define PSHM_REMOVED 0x020
126 #define PSHM_INCREATE 0x040
127 #define PSHM_INDELETE 0x080
128 #define PSHM_ALLOCATING 0x100
131 LIST_ENTRY(pshmcache
) pshm_hash
; /* hash chain */
132 struct pshminfo
*pshminfo
; /* vnode the name refers to */
133 int pshm_nlen
; /* length of name */
134 char pshm_name
[PSHMNAMLEN
+ 1]; /* segment name */
136 #define PSHMCACHE_NULL (struct pshmcache *)0
138 #define PSHMCACHE_NOTFOUND (0)
139 #define PSHMCACHE_FOUND (-1)
140 #define PSHMCACHE_NEGATIVE (ENOENT)
143 long goodhits
; /* hits that we can really use */
144 long neghits
; /* negative hits that we can use */
145 long badhits
; /* hits we must drop */
146 long falsehits
; /* hits with id mismatch */
147 long miss
; /* misses */
148 long longnames
; /* long names that ignore cache */
152 char *pshm_nameptr
; /* pointer to looked up name */
153 long pshm_namelen
; /* length of looked up component */
154 u_long pshm_hash
; /* hash value of looked up name */
159 user_size_t map_size
; /* XXX unused ? */
160 struct pshminfo
*pinfo
;
161 unsigned int pshm_usecount
;
163 unsigned int readcnt
;
164 unsigned int writecnt
;
167 #define PSHMNODE_NULL (struct pshmnode *)0
170 #define PSHMHASH(pnp) \
171 (&pshmhashtbl[(pnp)->pshm_hash & pshmhash])
173 LIST_HEAD(pshmhashhead
, pshmcache
) *pshmhashtbl
; /* Hash Table */
174 u_long pshmhash
; /* size of hash table - 1 */
175 long pshmnument
; /* number of cache entries allocated */
176 struct pshmstats pshmstats
; /* cache effectiveness statistics */
178 static int pshm_read (struct fileproc
*fp
, struct uio
*uio
,
179 int flags
, vfs_context_t ctx
);
180 static int pshm_write (struct fileproc
*fp
, struct uio
*uio
,
181 int flags
, vfs_context_t ctx
);
182 static int pshm_ioctl (struct fileproc
*fp
, u_long com
,
183 caddr_t data
, vfs_context_t ctx
);
184 static int pshm_select (struct fileproc
*fp
, int which
, void *wql
, vfs_context_t ctx
);
185 static int pshm_close(struct pshminfo
*pinfo
, int dropref
);
186 static int pshm_closefile (struct fileglob
*fg
, vfs_context_t ctx
);
188 static int pshm_kqfilter(struct fileproc
*fp
, struct knote
*kn
, vfs_context_t ctx
);
190 int pshm_access(struct pshminfo
*pinfo
, int mode
, kauth_cred_t cred
, proc_t p
);
191 int pshm_cache_purge_all(proc_t p
);
193 static int pshm_cache_add(struct pshminfo
*pshmp
, struct pshmname
*pnp
, struct pshmcache
*pcp
);
194 static void pshm_cache_delete(struct pshmcache
*pcp
);
195 static int pshm_cache_search(struct pshminfo
**pshmp
, struct pshmname
*pnp
,
196 struct pshmcache
**pcache
, int addref
);
197 static int pshm_unlink_internal(struct pshminfo
*pinfo
, struct pshmcache
*pcache
);
199 static const struct fileops pshmops
= {
200 .fo_type
= DTYPE_PSXSHM
,
201 .fo_read
= pshm_read
,
202 .fo_write
= pshm_write
,
203 .fo_ioctl
= pshm_ioctl
,
204 .fo_select
= pshm_select
,
205 .fo_close
= pshm_closefile
,
206 .fo_kqfilter
= pshm_kqfilter
,
210 static lck_grp_t
*psx_shm_subsys_lck_grp
;
211 static lck_grp_attr_t
*psx_shm_subsys_lck_grp_attr
;
212 static lck_attr_t
*psx_shm_subsys_lck_attr
;
213 static lck_mtx_t psx_shm_subsys_mutex
;
215 #define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
216 #define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
217 #define PSHM_SUBSYS_ASSERT_HELD() LCK_MTX_ASSERT(&psx_shm_subsys_mutex, LCK_MTX_ASSERT_OWNED)
220 /* Initialize the mutex governing access to the posix shm subsystem */
221 __private_extern__
void
222 pshm_lock_init( void )
225 psx_shm_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
227 psx_shm_subsys_lck_grp
= lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr
);
229 psx_shm_subsys_lck_attr
= lck_attr_alloc_init();
230 lck_mtx_init(& psx_shm_subsys_mutex
, psx_shm_subsys_lck_grp
, psx_shm_subsys_lck_attr
);
234 * Lookup an entry in the cache
237 * status of -1 is returned if matches
238 * If the lookup determines that the name does not exist
239 * (negative cacheing), a status of ENOENT is returned. If the lookup
240 * fails, a status of zero is returned.
244 pshm_cache_search(struct pshminfo
**pshmp
, struct pshmname
*pnp
,
245 struct pshmcache
**pcache
, int addref
)
247 struct pshmcache
*pcp
, *nnp
;
248 struct pshmhashhead
*pcpp
;
250 if (pnp
->pshm_namelen
> PSHMNAMLEN
) {
251 pshmstats
.longnames
++;
252 return PSHMCACHE_NOTFOUND
;
255 pcpp
= PSHMHASH(pnp
);
256 for (pcp
= pcpp
->lh_first
; pcp
!= 0; pcp
= nnp
) {
257 nnp
= pcp
->pshm_hash
.le_next
;
258 if (pcp
->pshm_nlen
== pnp
->pshm_namelen
&&
259 !bcmp(pcp
->pshm_name
, pnp
->pshm_nameptr
, (u_int
)pcp
-> pshm_nlen
))
265 return PSHMCACHE_NOTFOUND
;
268 /* We found a "positive" match, return the vnode */
270 pshmstats
.goodhits
++;
272 *pshmp
= pcp
->pshminfo
;
275 pcp
->pshminfo
->pshm_usecount
++;
276 return PSHMCACHE_FOUND
;
280 * We found a "negative" match, ENOENT notifies client of this match.
283 return PSHMCACHE_NEGATIVE
;
287 * Add an entry to the cache.
288 * XXX should be static?
291 pshm_cache_add(struct pshminfo
*pshmp
, struct pshmname
*pnp
, struct pshmcache
*pcp
)
293 struct pshmhashhead
*pcpp
;
294 struct pshminfo
*dpinfo
;
295 struct pshmcache
*dpcp
;
298 if (pnp
->pshm_namelen
> PSHMNAMLEN
)
299 panic("cache_enter: name too long");
303 /* if the entry has already been added by some one else return */
304 if (pshm_cache_search(&dpinfo
, pnp
, &dpcp
, 0) == PSHMCACHE_FOUND
) {
310 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
312 pcp
->pshminfo
= pshmp
;
313 pcp
->pshm_nlen
= pnp
->pshm_namelen
;
314 bcopy(pnp
->pshm_nameptr
, pcp
->pshm_name
, (unsigned)pcp
->pshm_nlen
);
315 pcpp
= PSHMHASH(pnp
);
320 for (p
= pcpp
->lh_first
; p
!= 0; p
= p
->pshm_hash
.le_next
)
322 panic("cache_enter: duplicate");
325 LIST_INSERT_HEAD(pcpp
, pcp
, pshm_hash
);
330 * Name cache initialization, from vfs_init() when we are booting
333 pshm_cache_init(void)
335 pshmhashtbl
= hashinit(desiredvnodes
/ 8, M_SHM
, &pshmhash
);
339 * Invalidate all entries and delete all objects associated with it. Entire
340 * non Kernel entries are going away. Just dump'em all
342 * We actually just increment the v_id, that will do it. The entries will
343 * be purged by lookup as they get found. If the v_id wraps around, we
344 * need to ditch the entire cache, to avoid confusion. No valid vnode will
345 * ever have (v_id == 0).
348 pshm_cache_purge_all(__unused proc_t p
)
350 struct pshmcache
*pcp
, *tmppcp
;
351 struct pshmhashhead
*pcpp
;
354 if (kauth_cred_issuser(kauth_cred_get()) == 0)
358 for (pcpp
= &pshmhashtbl
[pshmhash
]; pcpp
>= pshmhashtbl
; pcpp
--) {
359 LIST_FOREACH_SAFE(pcp
, pcpp
, pshm_hash
, tmppcp
) {
360 assert(pcp
->pshm_nlen
);
361 error
= pshm_unlink_internal(pcp
->pshminfo
, pcp
);
366 assert(pshmnument
== 0);
369 PSHM_SUBSYS_UNLOCK();
372 printf("%s: Error %d removing shm cache: %ld remain!\n",
373 __func__
, error
, pshmnument
);
378 pshm_cache_delete(struct pshmcache
*pcp
)
381 if (pcp
->pshm_hash
.le_prev
== 0)
382 panic("namecache purge le_prev");
383 if (pcp
->pshm_hash
.le_next
== pcp
)
384 panic("namecache purge le_next");
385 #endif /* DIAGNOSTIC */
386 LIST_REMOVE(pcp
, pshm_hash
);
387 pcp
->pshm_hash
.le_prev
= 0;
393 shm_open(proc_t p
, struct shm_open_args
*uap
, int32_t *retval
)
398 struct pshminfo
*pinfo
;
399 struct fileproc
*fp
= NULL
;
401 struct pshminfo
*new_pinfo
= PSHMINFO_NULL
;
402 struct pshmnode
*new_pnode
= PSHMNODE_NULL
;
403 struct pshmcache
*pcache
= PSHMCACHE_NULL
; /* ignored on return */
406 size_t pathlen
, plen
;
408 int cmode
= uap
->mode
;
410 struct pshmcache
*pcp
= NULL
;
412 AUDIT_ARG(fflags
, uap
->oflag
);
413 AUDIT_ARG(mode
, uap
->mode
);
415 pinfo
= PSHMINFO_NULL
;
418 * Preallocate everything we might need up front to avoid taking
419 * and dropping the lock, opening us up to race conditions.
421 MALLOC_ZONE(pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
427 pathlen
= MAXPATHLEN
;
428 error
= copyinstr(uap
->name
, (void *)pnbuf
, MAXPATHLEN
, &pathlen
);
432 AUDIT_ARG(text
, pnbuf
);
433 if (pathlen
> PSHMNAMLEN
) {
434 error
= ENAMETOOLONG
;
437 #ifdef PSXSHM_NAME_RESTRICT
439 if (*nameptr
== '/') {
440 while (*(nameptr
++) == '/') {
449 #endif /* PSXSHM_NAME_RESTRICT */
453 nd
.pshm_nameptr
= nameptr
;
454 nd
.pshm_namelen
= plen
;
457 for (cp
= nameptr
, i
=1; *cp
!= 0 && i
<= plen
; i
++, cp
++) {
458 nd
.pshm_hash
+= (unsigned char)*cp
* i
;
462 * attempt to allocate a new fp; if unsuccessful, the fp will be
463 * left unmodified (NULL).
465 error
= falloc(p
, &fp
, &indx
, vfs_context_current());
471 fmode
= FFLAGS(uap
->oflag
);
472 if ((fmode
& (FREAD
| FWRITE
)) == 0) {
478 * We allocate a new entry if we are less than the maximum
479 * allowed and the one at the front of the LRU list is in use.
480 * Otherwise we use the one at the front of the LRU list.
482 MALLOC(pcp
, struct pshmcache
*, sizeof(struct pshmcache
), M_SHM
, M_WAITOK
|M_ZERO
);
488 MALLOC(new_pinfo
, struct pshminfo
*, sizeof(struct pshminfo
), M_SHM
, M_WAITOK
|M_ZERO
);
489 if (new_pinfo
== PSHMINFO_NULL
) {
494 mac_posixshm_label_init(new_pinfo
);
497 MALLOC(new_pnode
, struct pshmnode
*, sizeof(struct pshmnode
), M_SHM
, M_WAITOK
|M_ZERO
);
498 if (new_pnode
== PSHMNODE_NULL
) {
506 * If we find the entry in the cache, this will take a reference,
507 * allowing us to unlock it for the permissions check.
509 error
= pshm_cache_search(&pinfo
, &nd
, &pcache
, 1);
511 PSHM_SUBSYS_UNLOCK();
513 if (error
== PSHMCACHE_NEGATIVE
) {
518 if (error
== PSHMCACHE_NOTFOUND
) {
520 if (fmode
& O_CREAT
) {
521 /* create a new one (commit the allocation) */
523 pinfo
->pshm_flags
= PSHM_DEFINED
| PSHM_INCREATE
;
524 pinfo
->pshm_usecount
= 1; /* existence reference */
525 pinfo
->pshm_mode
= cmode
;
526 pinfo
->pshm_uid
= kauth_getuid();
527 pinfo
->pshm_gid
= kauth_getgid();
528 bcopy(pnbuf
, &pinfo
->pshm_name
[0], pathlen
);
529 pinfo
->pshm_name
[pathlen
]=0;
531 error
= mac_posixshm_check_create(kauth_cred_get(), nameptr
);
535 mac_posixshm_label_associate(kauth_cred_get(), pinfo
, nameptr
);
540 if (fmode
& O_CREAT
) {
542 if ((fmode
& O_EXCL
)) {
543 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
,
547 /* shm obj exists and opened O_EXCL */
552 if( pinfo
->pshm_flags
& PSHM_INDELETE
) {
556 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
,
557 pinfo
->pshm_gid
, pinfo
->pshm_mode
);
559 if ((error
= mac_posixshm_check_open(kauth_cred_get(), pinfo
, fmode
))) {
563 if ( (error
= pshm_access(pinfo
, fmode
, kauth_cred_get(), p
)) ) {
568 if (!(fmode
& O_CREAT
)) {
570 /* O_CREAT is not set and the object does not exist */
574 if( pinfo
->pshm_flags
& PSHM_INDELETE
) {
579 if ((error
= mac_posixshm_check_open(kauth_cred_get(), pinfo
, fmode
))) {
584 if ((error
= pshm_access(pinfo
, fmode
, kauth_cred_get(), p
))) {
588 if (fmode
& O_TRUNC
) {
598 pinfo
->pshm_writecount
++;
600 pinfo
->pshm_readcount
++;
603 /* if successful, this will consume the pcp */
604 if ( (error
= pshm_cache_add(pinfo
, &nd
, pcp
)) ) {
608 * add reference for the new entry; otherwise, we obtained
609 * one from the cache hit earlier.
611 pinfo
->pshm_usecount
++;
613 pinfo
->pshm_flags
&= ~PSHM_INCREATE
;
614 new_pnode
->pinfo
= pinfo
;
616 PSHM_SUBSYS_UNLOCK();
619 * if incache, we did not use the new pcp or new_pinfo and must
625 if (new_pinfo
!= PSHMINFO_NULL
) {
627 mac_posixshm_label_destroy(new_pinfo
);
629 FREE(new_pinfo
, M_SHM
);
634 fp
->f_flag
= fmode
& FMASK
;
635 fp
->f_ops
= &pshmops
;
636 fp
->f_data
= (caddr_t
)new_pnode
;
637 *fdflags(p
, indx
) |= UF_EXCLOSE
;
638 procfdtbl_releasefd(p
, indx
, NULL
);
639 fp_drop(p
, indx
, fp
, 1);
643 FREE_ZONE(pnbuf
, MAXPATHLEN
, M_NAMEI
);
647 PSHM_SUBSYS_UNLOCK();
650 * If we obtained the entry from the cache, we need to drop the
651 * reference; holding the reference may have prevented unlinking,
652 * so we need to call pshm_close() to get the full effect.
656 pshm_close(pinfo
, 1);
657 PSHM_SUBSYS_UNLOCK();
663 if (new_pnode
!= PSHMNODE_NULL
)
664 FREE(new_pnode
, M_SHM
);
667 fp_free(p
, indx
, fp
);
669 if (new_pinfo
!= PSHMINFO_NULL
) {
671 mac_posixshm_label_destroy(new_pinfo
);
673 FREE(new_pinfo
, M_SHM
);
676 FREE_ZONE(pnbuf
, MAXPATHLEN
, M_NAMEI
);
682 pshm_truncate(__unused proc_t p
, struct fileproc
*fp
, __unused
int fd
,
683 off_t length
, __unused
int32_t *retval
)
685 struct pshminfo
* pinfo
;
686 struct pshmnode
* pnode
;
688 mem_entry_name_port_t mem_object
;
689 mach_vm_size_t total_size
, alloc_size
;
690 memory_object_size_t mosize
;
691 struct pshmobj
*pshmobj
, *pshmobj_next
, **pshmobj_next_p
;
697 user_map
= current_map();
699 if (fp
->f_type
!= DTYPE_PSXSHM
) {
704 if (((pnode
= (struct pshmnode
*)fp
->f_data
)) == PSHMNODE_NULL
)
708 if ((pinfo
= pnode
->pinfo
) == PSHMINFO_NULL
) {
709 PSHM_SUBSYS_UNLOCK();
712 if ((pinfo
->pshm_flags
& (PSHM_DEFINED
|PSHM_ALLOCATING
|PSHM_ALLOCATED
))
714 PSHM_SUBSYS_UNLOCK();
718 error
= mac_posixshm_check_truncate(kauth_cred_get(), pinfo
, length
);
720 PSHM_SUBSYS_UNLOCK();
725 pinfo
->pshm_flags
|= PSHM_ALLOCATING
;
726 total_size
= vm_map_round_page(length
,
727 vm_map_page_mask(user_map
));
728 pshmobj_next_p
= &pinfo
->pshm_memobjects
;
731 alloc_size
< total_size
;
732 alloc_size
+= mosize
) {
734 PSHM_SUBSYS_UNLOCK();
736 mosize
= MIN(total_size
- alloc_size
, ANON_MAX_SIZE
);
737 kret
= mach_make_memory_entry_64(
741 MAP_MEM_NAMED_CREATE
| VM_PROT_DEFAULT
,
745 if (kret
!= KERN_SUCCESS
)
748 MALLOC(pshmobj
, struct pshmobj
*, sizeof (struct pshmobj
),
750 if (pshmobj
== NULL
) {
751 kret
= KERN_NO_SPACE
;
752 mach_memory_entry_port_release(mem_object
);
759 pshmobj
->pshmo_memobject
= (void *) mem_object
;
760 pshmobj
->pshmo_size
= mosize
;
761 pshmobj
->pshmo_next
= NULL
;
763 *pshmobj_next_p
= pshmobj
;
764 pshmobj_next_p
= &pshmobj
->pshmo_next
;
767 pinfo
->pshm_flags
|= PSHM_ALLOCATED
;
768 pinfo
->pshm_flags
&= ~(PSHM_ALLOCATING
);
769 pinfo
->pshm_length
= total_size
;
770 PSHM_SUBSYS_UNLOCK();
775 for (pshmobj
= pinfo
->pshm_memobjects
;
777 pshmobj
= pshmobj_next
) {
778 pshmobj_next
= pshmobj
->pshmo_next
;
779 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
780 FREE(pshmobj
, M_SHM
);
782 pinfo
->pshm_memobjects
= NULL
;
783 pinfo
->pshm_flags
&= ~PSHM_ALLOCATING
;
784 PSHM_SUBSYS_UNLOCK();
787 case KERN_INVALID_ADDRESS
:
790 case KERN_PROTECTION_FAILURE
:
799 pshm_stat(struct pshmnode
*pnode
, void *ub
, int isstat64
)
801 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
802 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
803 struct pshminfo
*pinfo
;
809 if ((pinfo
= pnode
->pinfo
) == PSHMINFO_NULL
){
810 PSHM_SUBSYS_UNLOCK();
815 error
= mac_posixshm_check_stat(kauth_cred_get(), pinfo
);
817 PSHM_SUBSYS_UNLOCK();
823 sb64
= (struct stat64
*)ub
;
824 bzero(sb64
, sizeof(struct stat64
));
825 sb64
->st_mode
= pinfo
->pshm_mode
;
826 sb64
->st_uid
= pinfo
->pshm_uid
;
827 sb64
->st_gid
= pinfo
->pshm_gid
;
828 sb64
->st_size
= pinfo
->pshm_length
;
830 sb
= (struct stat
*)ub
;
831 bzero(sb
, sizeof(struct stat
));
832 sb
->st_mode
= pinfo
->pshm_mode
;
833 sb
->st_uid
= pinfo
->pshm_uid
;
834 sb
->st_gid
= pinfo
->pshm_gid
;
835 sb
->st_size
= pinfo
->pshm_length
;
837 PSHM_SUBSYS_UNLOCK();
843 * This is called only from shm_open which holds pshm_lock();
844 * XXX This code is repeated many times
847 pshm_access(struct pshminfo
*pinfo
, int mode
, kauth_cred_t cred
, __unused proc_t p
)
849 int mode_req
= ((mode
& FREAD
) ? S_IRUSR
: 0) |
850 ((mode
& FWRITE
) ? S_IWUSR
: 0);
852 /* Otherwise, user id 0 always gets access. */
853 if (!suser(cred
, NULL
))
856 return(posix_cred_access(cred
, pinfo
->pshm_uid
, pinfo
->pshm_gid
, pinfo
->pshm_mode
, mode_req
));
860 pshm_mmap(__unused proc_t p
, struct mmap_args
*uap
, user_addr_t
*retval
, struct fileproc
*fp
, off_t pageoff
)
862 vm_map_offset_t user_addr
= (vm_map_offset_t
)uap
->addr
;
863 vm_map_size_t user_size
= (vm_map_size_t
)uap
->len
;
864 vm_map_offset_t user_start_addr
;
865 vm_map_size_t map_size
, mapped_size
;
866 int prot
= uap
->prot
;
867 int flags
= uap
->flags
;
868 vm_object_offset_t file_pos
= (vm_object_offset_t
)uap
->pos
;
869 vm_object_offset_t map_pos
;
874 struct pshminfo
* pinfo
;
875 struct pshmnode
* pnode
;
876 struct pshmobj
* pshmobj
;
884 if ((flags
& MAP_SHARED
) == 0)
888 if ((prot
& PROT_WRITE
) && ((fp
->f_flag
& FWRITE
) == 0)) {
892 if (((pnode
= (struct pshmnode
*)fp
->f_data
)) == PSHMNODE_NULL
)
896 if ((pinfo
= pnode
->pinfo
) == PSHMINFO_NULL
) {
897 PSHM_SUBSYS_UNLOCK();
901 if ((pinfo
->pshm_flags
& PSHM_ALLOCATED
) != PSHM_ALLOCATED
) {
902 PSHM_SUBSYS_UNLOCK();
905 if ((off_t
)user_size
> pinfo
->pshm_length
) {
906 PSHM_SUBSYS_UNLOCK();
909 if ((off_t
)(user_size
+ file_pos
) > pinfo
->pshm_length
) {
910 PSHM_SUBSYS_UNLOCK();
913 if ((pshmobj
= pinfo
->pshm_memobjects
) == NULL
) {
914 PSHM_SUBSYS_UNLOCK();
919 error
= mac_posixshm_check_mmap(kauth_cred_get(), pinfo
, prot
, flags
);
921 PSHM_SUBSYS_UNLOCK();
926 PSHM_SUBSYS_UNLOCK();
927 user_map
= current_map();
929 if ((flags
& MAP_FIXED
) == 0) {
930 alloc_flags
= VM_FLAGS_ANYWHERE
;
931 user_addr
= vm_map_round_page(user_addr
,
932 vm_map_page_mask(user_map
));
934 if (user_addr
!= vm_map_round_page(user_addr
,
935 vm_map_page_mask(user_map
)))
938 * We do not get rid of the existing mappings here because
939 * it wouldn't be atomic (see comment in mmap()). We let
940 * Mach VM know that we want it to replace any existing
941 * mapping with the new one.
943 alloc_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
949 /* reserver the entire space first... */
950 kret
= vm_map_enter_mem_object(user_map
,
961 user_start_addr
= user_addr
;
962 if (kret
!= KERN_SUCCESS
) {
966 /* ... and overwrite with the real mappings */
967 for (map_pos
= 0, pshmobj
= pinfo
->pshm_memobjects
;
969 map_pos
+= pshmobj
->pshmo_size
, pshmobj
= pshmobj
->pshmo_next
) {
970 if (pshmobj
== NULL
) {
971 /* nothing there to map !? */
974 if (file_pos
>= map_pos
+ pshmobj
->pshmo_size
) {
977 map_size
= pshmobj
->pshmo_size
- (file_pos
- map_pos
);
978 if (map_size
> user_size
) {
979 map_size
= user_size
;
981 kret
= vm_map_enter_mem_object(
986 VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
,
987 pshmobj
->pshmo_memobject
,
993 if (kret
!= KERN_SUCCESS
)
996 user_addr
+= map_size
;
997 user_size
-= map_size
;
998 mapped_size
+= map_size
;
999 file_pos
+= map_size
;
1003 pnode
->mapp_addr
= user_start_addr
;
1004 pnode
->map_size
= mapped_size
;
1005 pinfo
->pshm_flags
|= (PSHM_MAPPED
| PSHM_INUSE
);
1006 PSHM_SUBSYS_UNLOCK();
1008 if (kret
!= KERN_SUCCESS
) {
1009 if (mapped_size
!= 0) {
1010 (void) mach_vm_deallocate(current_map(),
1018 *retval
= (user_start_addr
+ pageoff
);
1020 case KERN_INVALID_ADDRESS
:
1023 case KERN_PROTECTION_FAILURE
:
1032 pshm_unlink_internal(struct pshminfo
*pinfo
, struct pshmcache
*pcache
)
1034 struct pshmobj
*pshmobj
, *pshmobj_next
;
1036 PSHM_SUBSYS_ASSERT_HELD();
1038 if (!pinfo
|| !pcache
)
1041 if ((pinfo
->pshm_flags
& (PSHM_DEFINED
| PSHM_ALLOCATED
)) == 0)
1044 if (pinfo
->pshm_flags
& PSHM_INDELETE
)
1047 pinfo
->pshm_flags
|= PSHM_INDELETE
;
1048 pinfo
->pshm_usecount
--;
1050 pshm_cache_delete(pcache
);
1051 pinfo
->pshm_flags
|= PSHM_REMOVED
;
1053 /* release the existence reference */
1054 if (!pinfo
->pshm_usecount
) {
1056 mac_posixshm_label_destroy(pinfo
);
1059 * If this is the last reference going away on the object,
1060 * then we need to destroy the backing object. The name
1061 * has an implied but uncounted reference on the object,
1062 * once it's created, since it's used as a rendezvous, and
1063 * therefore may be subsequently reopened.
1065 for (pshmobj
= pinfo
->pshm_memobjects
;
1067 pshmobj
= pshmobj_next
) {
1068 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
1069 pshmobj_next
= pshmobj
->pshmo_next
;
1070 FREE(pshmobj
, M_SHM
);
1075 FREE(pcache
, M_SHM
);
1081 shm_unlink(proc_t p
, struct shm_unlink_args
*uap
, __unused
int32_t *retval
)
1089 struct pshminfo
*pinfo
;
1092 struct pshmcache
*pcache
= PSHMCACHE_NULL
;
1094 pinfo
= PSHMINFO_NULL
;
1097 MALLOC_ZONE(pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1098 if (pnbuf
== NULL
) {
1099 return(ENOSPC
); /* XXX non-standard */
1101 pathlen
= MAXPATHLEN
;
1102 error
= copyinstr(uap
->name
, (void *)pnbuf
, MAXPATHLEN
, &pathlen
);
1106 AUDIT_ARG(text
, pnbuf
);
1107 if (pathlen
> PSHMNAMLEN
) {
1108 error
= ENAMETOOLONG
;
1114 #ifdef PSXSHM_NAME_RESTRICT
1115 if (*nameptr
== '/') {
1116 while (*(nameptr
++) == '/') {
1125 #endif /* PSXSHM_NAME_RESTRICT */
1127 nd
.pshm_nameptr
= nameptr
;
1128 nd
.pshm_namelen
= pathlen
;
1131 for (cp
= nameptr
, i
=1; *cp
!= 0 && i
<= pathlen
; i
++, cp
++) {
1132 nd
.pshm_hash
+= (unsigned char)*cp
* i
;
1136 error
= pshm_cache_search(&pinfo
, &nd
, &pcache
, 0);
1138 /* During unlink lookup failure also implies ENOENT */
1139 if (error
!= PSHMCACHE_FOUND
) {
1140 PSHM_SUBSYS_UNLOCK();
1146 if ((pinfo
->pshm_flags
& (PSHM_DEFINED
| PSHM_ALLOCATED
))==0) {
1147 PSHM_SUBSYS_UNLOCK();
1152 if (pinfo
->pshm_flags
& PSHM_ALLOCATING
) {
1153 /* XXX should we wait for flag to clear and then proceed ? */
1154 PSHM_SUBSYS_UNLOCK();
1159 if (pinfo
->pshm_flags
& PSHM_INDELETE
) {
1160 PSHM_SUBSYS_UNLOCK();
1166 error
= mac_posixshm_check_unlink(kauth_cred_get(), pinfo
, nameptr
);
1168 PSHM_SUBSYS_UNLOCK();
1173 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
, pinfo
->pshm_gid
,
1177 * following file semantics, unlink should be allowed
1178 * for users with write permission only.
1180 if ( (error
= pshm_access(pinfo
, FWRITE
, kauth_cred_get(), p
)) ) {
1181 PSHM_SUBSYS_UNLOCK();
1185 error
= pshm_unlink_internal(pinfo
, pcache
);
1186 PSHM_SUBSYS_UNLOCK();
1189 FREE_ZONE(pnbuf
, MAXPATHLEN
, M_NAMEI
);
1193 /* already called locked */
1195 pshm_close(struct pshminfo
*pinfo
, int dropref
)
1198 struct pshmobj
*pshmobj
, *pshmobj_next
;
1201 * If we are dropping the reference we took on the cache object, don't
1202 * enforce the allocation requirement.
1204 if ( !dropref
&& ((pinfo
->pshm_flags
& PSHM_ALLOCATED
) != PSHM_ALLOCATED
)) {
1208 if(!pinfo
->pshm_usecount
) {
1209 kprintf("negative usecount in pshm_close\n");
1211 #endif /* DIAGNOSTIC */
1212 pinfo
->pshm_usecount
--; /* release this fd's reference */
1214 if ((pinfo
->pshm_flags
& PSHM_REMOVED
) && !pinfo
->pshm_usecount
) {
1216 mac_posixshm_label_destroy(pinfo
);
1218 PSHM_SUBSYS_UNLOCK();
1220 * If this is the last reference going away on the object,
1221 * then we need to destroy the backing object.
1223 for (pshmobj
= pinfo
->pshm_memobjects
;
1225 pshmobj
= pshmobj_next
) {
1226 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
1227 pshmobj_next
= pshmobj
->pshmo_next
;
1228 FREE(pshmobj
, M_SHM
);
1236 /* vfs_context_t passed to match prototype for struct fileops */
1238 pshm_closefile(struct fileglob
*fg
, __unused vfs_context_t ctx
)
1241 struct pshmnode
*pnode
;
1245 if ((pnode
= (struct pshmnode
*)fg
->fg_data
) != NULL
) {
1246 if (pnode
->pinfo
!= PSHMINFO_NULL
) {
1247 error
= pshm_close(pnode
->pinfo
, 0);
1252 PSHM_SUBSYS_UNLOCK();
1258 pshm_read(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
1259 __unused
int flags
, __unused vfs_context_t ctx
)
1265 pshm_write(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
1266 __unused
int flags
, __unused vfs_context_t ctx
)
1272 pshm_ioctl(__unused
struct fileproc
*fp
, __unused u_long com
,
1273 __unused caddr_t data
, __unused vfs_context_t ctx
)
1279 pshm_select(__unused
struct fileproc
*fp
, __unused
int which
, __unused
void *wql
,
1280 __unused vfs_context_t ctx
)
1286 pshm_kqfilter(__unused
struct fileproc
*fp
, struct knote
*kn
,
1287 __unused vfs_context_t ctx
)
1289 kn
->kn_flags
= EV_ERROR
;
1290 kn
->kn_data
= ENOTSUP
;
1295 fill_pshminfo(struct pshmnode
* pshm
, struct pshm_info
* info
)
1297 struct pshminfo
*pinfo
;
1298 struct vinfo_stat
*sb
;
1301 if ((pinfo
= pshm
->pinfo
) == PSHMINFO_NULL
){
1302 PSHM_SUBSYS_UNLOCK();
1306 sb
= &info
->pshm_stat
;
1308 bzero(sb
, sizeof(struct vinfo_stat
));
1309 sb
->vst_mode
= pinfo
->pshm_mode
;
1310 sb
->vst_uid
= pinfo
->pshm_uid
;
1311 sb
->vst_gid
= pinfo
->pshm_gid
;
1312 sb
->vst_size
= pinfo
->pshm_length
;
1314 info
->pshm_mappaddr
= pshm
->mapp_addr
;
1315 bcopy(&pinfo
->pshm_name
[0], &info
->pshm_name
[0], PSHMNAMLEN
+1);
1317 PSHM_SUBSYS_UNLOCK();
1323 pshm_label_associate(struct fileproc
*fp
, struct vnode
*vp
, vfs_context_t ctx
)
1325 struct pshmnode
*pnode
;
1326 struct pshminfo
*pshm
;
1329 pnode
= (struct pshmnode
*)fp
->f_fglob
->fg_data
;
1330 if (pnode
!= NULL
) {
1331 pshm
= pnode
->pinfo
;
1333 mac_posixshm_vnode_label_associate(
1334 vfs_context_ucred(ctx
), pshm
, pshm
->pshm_label
,
1337 PSHM_SUBSYS_UNLOCK();