2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1990, 1996-1998 Apple Computer, Inc.
30 * All Rights Reserved.
33 * posix_shm.c : Support for POSIX shared memory APIs
36 * Author: Ananthakrishna Ramesh
44 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
45 * support for mandatory and extensible security protections. This notice
46 * is included in support of clause 2.2 (b) of the Apple Public License,
50 #include <sys/cdefs.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/file_internal.h>
55 #include <sys/filedesc.h>
57 #include <sys/proc_internal.h>
58 #include <sys/kauth.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/vnode_internal.h>
63 #include <sys/ioctl.h>
65 #include <sys/malloc.h>
68 #include <sys/sysproto.h>
69 #include <sys/proc_info.h>
70 #include <sys/posix_shm.h>
71 #include <security/audit/audit.h>
75 #include <security/mac_framework.h>
78 #include <mach/mach_types.h>
79 #include <mach/mach_vm.h>
80 #include <mach/vm_map.h>
81 #include <mach/vm_prot.h>
82 #include <mach/vm_inherit.h>
83 #include <mach/kern_return.h>
84 #include <mach/memory_object_control.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_protos.h>
89 #define f_flag fp_glob->fg_flag
90 #define f_ops fp_glob->fg_ops
91 #define f_data fp_glob->fg_data
94 * Used to construct the list of memory objects
95 * assigned to a populated shared memory segment.
97 typedef struct pshm_mobj
{
98 void *pshmo_memobject
;
99 memory_object_size_t pshmo_size
;
100 SLIST_ENTRY(pshm_mobj
) pshmo_next
;
104 * This represents an existing Posix shared memory object.
106 * It comes into existence with a shm_open(...O_CREAT...)
107 * call and goes away only after it has been shm_unlink()ed
108 * and the last remaining shm_open() file reference is closed.
110 * To keep track of that lifetime, pshm_usecount is used as a reference
111 * counter. It's incremented for every successful shm_open() and
112 * one extra time for the shm_unlink() to release. Internally
113 * you can temporarily use an additional reference whenever the
114 * subsystem lock has to be dropped for other reasons.
116 typedef struct internal_pshminfo
{
117 struct pshminfo pshm_hdr
;
118 SLIST_HEAD(pshm_mobjhead
, pshm_mobj
) pshm_mobjs
;
119 RB_ENTRY(internal_pshminfo
) pshm_links
; /* links for red/black tree */
121 #define pshm_flags pshm_hdr.pshm_flags
122 #define pshm_usecount pshm_hdr.pshm_usecount
123 #define pshm_length pshm_hdr.pshm_length
124 #define pshm_mode pshm_hdr.pshm_mode
125 #define pshm_uid pshm_hdr.pshm_uid
126 #define pshm_gid pshm_hdr.pshm_gid
127 #define pshm_label pshm_hdr.pshm_label
129 /* Values for pshm_flags that are still used */
130 #define PSHM_ALLOCATED 0x004 /* backing storage is allocated */
131 #define PSHM_MAPPED 0x008 /* mapped at least once */
132 #define PSHM_INUSE 0x010 /* mapped at least once */
133 #define PSHM_REMOVED 0x020 /* no longer in the name cache due to shm_unlink() */
134 #define PSHM_ALLOCATING 0x100 /* storage is being allocated */
137 * These handle reference counting pshm_info_t structs using pshm_usecount.
139 static int pshm_ref(pshm_info_t
*pinfo
);
140 static void pshm_deref(pshm_info_t
*pinfo
);
141 #define PSHM_MAXCOUNT UINT_MAX
144 * For every shm_open, we get a new one of these.
145 * The only reason we don't just use pshm_info directly is that
146 * you can query the mapped memory objects via proc_pidinfo to
147 * query the mapped address. Note that even this is a hack. If
148 * you mmap() the same fd multiple times, we only save/report
151 typedef struct pshmnode
{
157 /* compare function for the red black tree */
159 pshm_compare(pshm_info_t
*a
, pshm_info_t
*b
)
161 int cmp
= strncmp(a
->pshm_hdr
.pshm_name
, b
->pshm_hdr
.pshm_name
, PSHMNAMLEN
+ 1);
174 * shared memory "paths" are stored in a red black tree for lookup
176 u_long pshmnument
; /* count of entries allocated in the red black tree */
177 RB_HEAD(pshmhead
, internal_pshminfo
) pshm_head
;
178 RB_PROTOTYPE(pshmhead
, internal_pshminfo
, pshm_links
, pshm_compare
)
179 RB_GENERATE(pshmhead
, internal_pshminfo
, pshm_links
, pshm_compare
)
181 /* lookup, add, remove functions */
182 static pshm_info_t
*pshm_cache_search(pshm_info_t
* look
);
183 static void pshm_cache_add(pshm_info_t
*entry
);
184 static void pshm_cache_delete(pshm_info_t
*entry
);
186 static int pshm_closefile(struct fileglob
*fg
, vfs_context_t ctx
);
188 static int pshm_access(pshm_info_t
*pinfo
, int mode
, kauth_cred_t cred
, proc_t p
);
189 int pshm_cache_purge_all(proc_t p
);
191 static int pshm_unlink_internal(pshm_info_t
*pinfo
);
193 static const struct fileops pshmops
= {
194 .fo_type
= DTYPE_PSXSHM
,
195 .fo_read
= fo_no_read
,
196 .fo_write
= fo_no_write
,
197 .fo_ioctl
= fo_no_ioctl
,
198 .fo_select
= fo_no_select
,
199 .fo_close
= pshm_closefile
,
200 .fo_drain
= fo_no_drain
,
201 .fo_kqfilter
= fo_no_kqfilter
,
205 * Everything here is protected by a single mutex.
207 static lck_grp_t
*psx_shm_subsys_lck_grp
;
208 static lck_grp_attr_t
*psx_shm_subsys_lck_grp_attr
;
209 static lck_attr_t
*psx_shm_subsys_lck_attr
;
210 static lck_mtx_t psx_shm_subsys_mutex
;
212 #define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
213 #define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
214 #define PSHM_SUBSYS_ASSERT_HELD() LCK_MTX_ASSERT(&psx_shm_subsys_mutex, LCK_MTX_ASSERT_OWNED)
217 __private_extern__
void
218 pshm_lock_init( void )
220 psx_shm_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
222 psx_shm_subsys_lck_grp
=
223 lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr
);
225 psx_shm_subsys_lck_attr
= lck_attr_alloc_init();
226 lck_mtx_init(&psx_shm_subsys_mutex
, psx_shm_subsys_lck_grp
, psx_shm_subsys_lck_attr
);
230 * Lookup an entry in the cache. Only the name is used from "look".
233 pshm_cache_search(pshm_info_t
*look
)
235 PSHM_SUBSYS_ASSERT_HELD();
236 return RB_FIND(pshmhead
, &pshm_head
, look
);
240 * Add a new entry to the cache.
243 pshm_cache_add(pshm_info_t
*entry
)
245 pshm_info_t
*conflict
;
247 PSHM_SUBSYS_ASSERT_HELD();
248 conflict
= RB_INSERT(pshmhead
, &pshm_head
, entry
);
249 if (conflict
!= NULL
) {
250 panic("pshm_cache_add() found %p", conflict
);
256 * Remove the given entry from the red black tree.
259 pshm_cache_delete(pshm_info_t
*entry
)
261 PSHM_SUBSYS_ASSERT_HELD();
262 assert(!(entry
->pshm_flags
& PSHM_REMOVED
));
263 RB_REMOVE(pshmhead
, &pshm_head
, entry
);
268 * Initialize the red black tree.
271 pshm_cache_init(void)
277 * Invalidate all entries and delete all objects associated with them
278 * XXX - due to the reference counting, this only works if all userland
279 * references to it via file descriptors are also closed already. Is this
280 * known to be called after all user processes are killed?
283 pshm_cache_purge_all(__unused proc_t proc
)
289 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
294 RB_FOREACH_SAFE(p
, pshmhead
, &pshm_head
, tmp
) {
295 error
= pshm_unlink_internal(p
);
296 if (error
) { /* XXX: why give up on failure, should keep going */
300 assert(pshmnument
== 0);
303 PSHM_SUBSYS_UNLOCK();
306 printf("%s: Error %d removing posix shm cache: %ld remain!\n",
307 __func__
, error
, pshmnument
);
313 * Utility to get the shared memory name from userspace and
314 * populate a pshm_info_t with it. If there's a problem
315 * reading the name or it's malformed, will return an error code.
318 pshm_get_name(pshm_info_t
*pinfo
, const user_addr_t user_addr
)
320 size_t bytes_copied
= 0;
324 error
= copyinstr(user_addr
, &pinfo
->pshm_hdr
.pshm_name
[0], PSHMNAMLEN
+ 1, &bytes_copied
);
328 assert(bytes_copied
<= PSHMNAMLEN
+ 1);
329 assert(pinfo
->pshm_hdr
.pshm_name
[bytes_copied
- 1] == 0);
330 if (bytes_copied
< 2) { /* 2: expect at least one character and terminating zero */
333 AUDIT_ARG(text
, &pinfo
->pshm_hdr
.pshm_name
[0]);
338 * Process a shm_open() system call.
341 shm_open(proc_t p
, struct shm_open_args
*uap
, int32_t *retval
)
345 pshm_info_t
*pinfo
= NULL
;
346 pshm_info_t
*new_pinfo
= NULL
;
347 pshmnode_t
*new_pnode
= NULL
;
348 struct fileproc
*fp
= NULL
;
350 mode_t cmode
= (mode_t
)uap
->mode
;
351 bool incache
= false;
352 bool have_label
= false;
354 AUDIT_ARG(fflags
, uap
->oflag
);
355 AUDIT_ARG(mode
, cmode
);
358 * Allocate data structures we need. We parse the userspace name into
359 * a pshm_info_t, even when we don't need to O_CREAT.
361 MALLOC(new_pinfo
, pshm_info_t
*, sizeof(pshm_info_t
), M_SHM
, M_WAITOK
| M_ZERO
);
362 if (new_pinfo
== NULL
) {
368 * Get and check the name.
370 error
= pshm_get_name(new_pinfo
, uap
->name
);
376 * Attempt to allocate a new fp. If unsuccessful, the fp will be
377 * left unmodified (NULL).
379 error
= falloc(p
, &fp
, &indx
, vfs_context_current());
386 fmode
= FFLAGS(uap
->oflag
);
387 if ((fmode
& (FREAD
| FWRITE
)) == 0) {
393 * Will need a new pnode for the file pointer
395 MALLOC(new_pnode
, pshmnode_t
*, sizeof(pshmnode_t
), M_SHM
, M_WAITOK
| M_ZERO
);
396 if (new_pnode
== NULL
) {
402 * If creating a new segment, fill in its information.
403 * If we find a pre-exisitng one in cache lookup we'll just toss this one later.
405 if (fmode
& O_CREAT
) {
406 new_pinfo
->pshm_usecount
= 2; /* one each for: file pointer, shm_unlink */
407 new_pinfo
->pshm_length
= 0;
408 new_pinfo
->pshm_mode
= cmode
;
409 new_pinfo
->pshm_uid
= kauth_getuid();
410 new_pinfo
->pshm_gid
= kauth_getgid();
411 SLIST_INIT(&new_pinfo
->pshm_mobjs
);
413 mac_posixshm_label_init(&new_pinfo
->pshm_hdr
);
415 error
= mac_posixshm_check_create(kauth_cred_get(), new_pinfo
->pshm_hdr
.pshm_name
);
423 * Look up the named shared memory segment in the cache, possibly adding
428 pinfo
= pshm_cache_search(new_pinfo
);
432 /* Get a new reference to go with the file pointer.*/
433 error
= pshm_ref(pinfo
);
435 pinfo
= NULL
; /* so cleanup code doesn't deref */
439 /* can't have pre-existing if O_EXCL */
440 if ((fmode
& (O_CREAT
| O_EXCL
)) == (O_CREAT
| O_EXCL
)) {
445 /* O_TRUNC is only valid while length is not yet set */
446 if ((fmode
& O_TRUNC
) &&
447 (pinfo
->pshm_flags
& (PSHM_ALLOCATING
| PSHM_ALLOCATED
))) {
454 /* if it wasn't found, must have O_CREAT */
455 if (!(fmode
& O_CREAT
)) {
460 /* Add the new region to the cache. */
462 pshm_cache_add(pinfo
);
463 new_pinfo
= NULL
; /* so that it doesn't get free'd */
466 PSHM_SUBSYS_UNLOCK();
469 * Check we have permission to access any pre-existing segment
472 if (fmode
& O_CREAT
) {
473 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
,
474 pinfo
->pshm_gid
, pinfo
->pshm_mode
);
477 if ((error
= mac_posixshm_check_open(kauth_cred_get(), &pinfo
->pshm_hdr
, fmode
))) {
481 if ((error
= pshm_access(pinfo
, fmode
, kauth_cred_get(), p
))) {
486 mac_posixshm_label_associate(kauth_cred_get(), &pinfo
->pshm_hdr
, pinfo
->pshm_hdr
.pshm_name
);
491 fp
->f_flag
= fmode
& FMASK
;
492 fp
->f_ops
= &pshmops
;
493 new_pnode
->pinfo
= pinfo
;
494 fp
->f_data
= (caddr_t
)new_pnode
;
495 *fdflags(p
, indx
) |= UF_EXCLOSE
;
496 procfdtbl_releasefd(p
, indx
, NULL
);
497 fp_drop(p
, indx
, fp
, 1);
505 PSHM_SUBSYS_UNLOCK();
508 * Drop any new reference to a pre-existing shared memory region.
510 if (incache
&& pinfo
!= NULL
) {
513 PSHM_SUBSYS_UNLOCK();
517 * Delete any allocated unused data structures.
519 if (new_pnode
!= NULL
) {
520 FREE(new_pnode
, M_SHM
);
524 fp_free(p
, indx
, fp
);
528 if (new_pinfo
!= NULL
) {
531 mac_posixshm_label_destroy(&new_pinfo
->pshm_hdr
);
534 FREE(new_pinfo
, M_SHM
);
541 * The truncate call associates memory with shared memory region. It can
542 * only be succesfully done with a non-zero length once per shared memory region.
550 __unused
int32_t *retval
)
555 mem_entry_name_port_t mem_object
;
556 mach_vm_size_t total_size
, alloc_size
;
557 memory_object_size_t mosize
;
558 pshm_mobj_t
*pshmobj
, *pshmobj_last
;
562 user_map
= current_map();
564 if (FILEGLOB_DTYPE(fp
->fp_glob
) != DTYPE_PSXSHM
) {
570 * Can't enforce this yet, some third party tools don't
571 * specify O_RDWR like they ought to. See radar 48692182
573 /* ftruncate() requires write permission */
574 if (!(fp
->f_flag
& FWRITE
)) {
580 if (((pnode
= (pshmnode_t
*)fp
->f_data
)) == NULL
) {
581 PSHM_SUBSYS_UNLOCK();
585 if ((pinfo
= pnode
->pinfo
) == NULL
) {
586 PSHM_SUBSYS_UNLOCK();
590 /* We only allow one ftruncate() per lifetime of the shm object. */
591 if (pinfo
->pshm_flags
& (PSHM_ALLOCATING
| PSHM_ALLOCATED
)) {
592 PSHM_SUBSYS_UNLOCK();
597 error
= mac_posixshm_check_truncate(kauth_cred_get(), &pinfo
->pshm_hdr
, length
);
599 PSHM_SUBSYS_UNLOCK();
604 * Grab an extra reference, so we can drop the lock while allocating and
605 * ensure the objects don't disappear.
607 error
= pshm_ref(pinfo
);
609 PSHM_SUBSYS_UNLOCK();
613 /* set ALLOCATING, so another truncate can't start */
614 pinfo
->pshm_flags
|= PSHM_ALLOCATING
;
615 total_size
= vm_map_round_page(length
, vm_map_page_mask(user_map
));
618 for (alloc_size
= 0; alloc_size
< total_size
; alloc_size
+= mosize
) {
619 PSHM_SUBSYS_UNLOCK();
621 /* get a memory object back some of the shared memory */
622 mosize
= MIN(total_size
- alloc_size
, ANON_MAX_SIZE
);
623 kret
= mach_make_memory_entry_64(VM_MAP_NULL
, &mosize
, 0,
624 MAP_MEM_NAMED_CREATE
| VM_PROT_DEFAULT
, &mem_object
, 0);
626 if (kret
!= KERN_SUCCESS
) {
630 /* get a list entry to track the memory object */
631 MALLOC(pshmobj
, pshm_mobj_t
*, sizeof(pshm_mobj_t
), M_SHM
, M_WAITOK
);
632 if (pshmobj
== NULL
) {
633 kret
= KERN_NO_SPACE
;
634 mach_memory_entry_port_release(mem_object
);
641 /* link in the new entry */
642 pshmobj
->pshmo_memobject
= (void *)mem_object
;
643 pshmobj
->pshmo_size
= mosize
;
644 SLIST_NEXT(pshmobj
, pshmo_next
) = NULL
;
646 if (pshmobj_last
== NULL
) {
647 SLIST_FIRST(&pinfo
->pshm_mobjs
) = pshmobj
;
649 SLIST_INSERT_AFTER(pshmobj_last
, pshmobj
, pshmo_next
);
651 pshmobj_last
= pshmobj
;
654 /* all done, change flags to ALLOCATED and return success */
655 pinfo
->pshm_flags
|= PSHM_ALLOCATED
;
656 pinfo
->pshm_flags
&= ~(PSHM_ALLOCATING
);
657 pinfo
->pshm_length
= total_size
;
658 pshm_deref(pinfo
); /* drop the "allocating" reference */
659 PSHM_SUBSYS_UNLOCK();
663 /* clean up any partially allocated objects */
665 while ((pshmobj
= SLIST_FIRST(&pinfo
->pshm_mobjs
)) != NULL
) {
666 SLIST_REMOVE_HEAD(&pinfo
->pshm_mobjs
, pshmo_next
);
667 PSHM_SUBSYS_UNLOCK();
668 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
669 FREE(pshmobj
, M_SHM
);
672 pinfo
->pshm_flags
&= ~PSHM_ALLOCATING
;
673 pshm_deref(pinfo
); /* drop the "allocating" reference */
674 PSHM_SUBSYS_UNLOCK();
677 case KERN_INVALID_ADDRESS
:
680 case KERN_PROTECTION_FAILURE
:
688 pshm_stat(pshmnode_t
*pnode
, void *ub
, int isstat64
)
690 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
691 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
698 if ((pinfo
= pnode
->pinfo
) == NULL
) {
699 PSHM_SUBSYS_UNLOCK();
704 error
= mac_posixshm_check_stat(kauth_cred_get(), &pinfo
->pshm_hdr
);
706 PSHM_SUBSYS_UNLOCK();
712 sb64
= (struct stat64
*)ub
;
713 bzero(sb64
, sizeof(struct stat64
));
714 sb64
->st_mode
= pinfo
->pshm_mode
;
715 sb64
->st_uid
= pinfo
->pshm_uid
;
716 sb64
->st_gid
= pinfo
->pshm_gid
;
717 sb64
->st_size
= pinfo
->pshm_length
;
719 sb
= (struct stat
*)ub
;
720 bzero(sb
, sizeof(struct stat
));
721 sb
->st_mode
= pinfo
->pshm_mode
;
722 sb
->st_uid
= pinfo
->pshm_uid
;
723 sb
->st_gid
= pinfo
->pshm_gid
;
724 sb
->st_size
= pinfo
->pshm_length
;
726 PSHM_SUBSYS_UNLOCK();
732 * Verify access to a shared memory region.
735 pshm_access(pshm_info_t
*pinfo
, int mode
, kauth_cred_t cred
, __unused proc_t p
)
737 mode_t mode_req
= ((mode
& FREAD
) ? S_IRUSR
: 0) |
738 ((mode
& FWRITE
) ? S_IWUSR
: 0);
740 /* Otherwise, user id 0 always gets access. */
741 if (!suser(cred
, NULL
)) {
745 return posix_cred_access(cred
, pinfo
->pshm_uid
, pinfo
->pshm_gid
, pinfo
->pshm_mode
, mode_req
);
751 struct mmap_args
*uap
,
756 vm_map_offset_t user_addr
= (vm_map_offset_t
)uap
->addr
;
757 vm_map_size_t user_size
= (vm_map_size_t
)uap
->len
;
758 vm_map_offset_t user_start_addr
;
759 vm_map_size_t map_size
, mapped_size
;
760 int prot
= uap
->prot
;
761 int max_prot
= VM_PROT_DEFAULT
;
762 int flags
= uap
->flags
;
763 vm_object_offset_t file_pos
= (vm_object_offset_t
)uap
->pos
;
764 vm_object_offset_t map_pos
;
767 vm_map_kernel_flags_t vmk_flags
;
769 kern_return_t kret
= KERN_SUCCESS
;
772 pshm_mobj_t
*pshmobj
;
775 if (user_size
== 0) {
779 if (!(flags
& MAP_SHARED
)) {
783 /* Can't allow write permission if the shm_open() didn't allow them. */
784 if (!(fp
->f_flag
& FWRITE
)) {
785 if (prot
& VM_PROT_WRITE
) {
788 max_prot
&= ~VM_PROT_WRITE
;
792 pnode
= (pshmnode_t
*)fp
->f_data
;
794 PSHM_SUBSYS_UNLOCK();
798 pinfo
= pnode
->pinfo
;
800 PSHM_SUBSYS_UNLOCK();
804 if (!(pinfo
->pshm_flags
& PSHM_ALLOCATED
)) {
805 PSHM_SUBSYS_UNLOCK();
809 if (user_size
> (vm_map_size_t
)pinfo
->pshm_length
) {
810 PSHM_SUBSYS_UNLOCK();
814 vm_map_size_t end_pos
= 0;
815 if (os_add_overflow(user_size
, file_pos
, &end_pos
)) {
816 PSHM_SUBSYS_UNLOCK();
819 if (end_pos
> (vm_map_size_t
)pinfo
->pshm_length
) {
820 PSHM_SUBSYS_UNLOCK();
824 pshmobj
= SLIST_FIRST(&pinfo
->pshm_mobjs
);
825 if (pshmobj
== NULL
) {
826 PSHM_SUBSYS_UNLOCK();
831 error
= mac_posixshm_check_mmap(kauth_cred_get(), &pinfo
->pshm_hdr
, prot
, flags
);
833 PSHM_SUBSYS_UNLOCK();
837 /* Grab an extra reference, so we can drop the lock while mapping. */
838 error
= pshm_ref(pinfo
);
840 PSHM_SUBSYS_UNLOCK();
844 PSHM_SUBSYS_UNLOCK();
845 user_map
= current_map();
847 if (!(flags
& MAP_FIXED
)) {
848 alloc_flags
= VM_FLAGS_ANYWHERE
;
849 user_addr
= vm_map_round_page(user_addr
,
850 vm_map_page_mask(user_map
));
852 if (user_addr
!= vm_map_round_page(user_addr
,
853 vm_map_page_mask(user_map
))) {
859 * We do not get rid of the existing mappings here because
860 * it wouldn't be atomic (see comment in mmap()). We let
861 * Mach VM know that we want it to replace any existing
862 * mapping with the new one.
864 alloc_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
869 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
870 /* reserve the entire space first... */
871 kret
= vm_map_enter_mem_object(user_map
,
884 user_start_addr
= user_addr
;
885 if (kret
!= KERN_SUCCESS
) {
889 /* Now overwrite with the real mappings. */
890 for (map_pos
= 0, pshmobj
= SLIST_FIRST(&pinfo
->pshm_mobjs
);
892 map_pos
+= pshmobj
->pshmo_size
, pshmobj
= SLIST_NEXT(pshmobj
, pshmo_next
)) {
893 if (pshmobj
== NULL
) {
894 /* nothing there to map !? */
897 if (file_pos
>= map_pos
+ pshmobj
->pshmo_size
) {
900 map_size
= (vm_map_size_t
)(pshmobj
->pshmo_size
- (file_pos
- map_pos
));
901 if (map_size
> user_size
) {
902 map_size
= user_size
;
904 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
905 kret
= vm_map_enter_mem_object(
910 VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
,
913 pshmobj
->pshmo_memobject
,
919 if (kret
!= KERN_SUCCESS
) {
923 user_addr
+= map_size
;
924 user_size
-= map_size
;
925 mapped_size
+= map_size
;
926 file_pos
+= map_size
;
930 pnode
->mapp_addr
= user_start_addr
;
931 pinfo
->pshm_flags
|= (PSHM_MAPPED
| PSHM_INUSE
);
932 PSHM_SUBSYS_UNLOCK();
935 pshm_deref(pinfo
); /* drop the extra reference we had while mapping. */
936 PSHM_SUBSYS_UNLOCK();
937 if (kret
!= KERN_SUCCESS
) {
938 if (mapped_size
!= 0) {
939 (void) mach_vm_deallocate(current_map(),
947 *retval
= (user_addr_t
)(user_start_addr
+ pageoff
);
949 case KERN_INVALID_ADDRESS
:
952 case KERN_PROTECTION_FAILURE
:
960 * Remove a shared memory region name from the name lookup cache.
963 pshm_unlink_internal(pshm_info_t
*pinfo
)
965 PSHM_SUBSYS_ASSERT_HELD();
971 pshm_cache_delete(pinfo
);
972 pinfo
->pshm_flags
|= PSHM_REMOVED
;
974 /* release the "unlink" reference */
981 shm_unlink(proc_t p
, struct shm_unlink_args
*uap
, __unused
int32_t *retval
)
984 pshm_info_t
*pinfo
= NULL
;
985 pshm_info_t
*name_pinfo
= NULL
;
988 * Get the name from user args.
990 MALLOC(name_pinfo
, pshm_info_t
*, sizeof(pshm_info_t
), M_SHM
, M_WAITOK
| M_ZERO
);
991 if (name_pinfo
== NULL
) {
995 error
= pshm_get_name(name_pinfo
, uap
->name
);
1002 pinfo
= pshm_cache_search(name_pinfo
);
1004 if (pinfo
== NULL
) {
1010 error
= mac_posixshm_check_unlink(kauth_cred_get(), &pinfo
->pshm_hdr
, name_pinfo
->pshm_hdr
.pshm_name
);
1016 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
, pinfo
->pshm_gid
, pinfo
->pshm_mode
);
1019 * Following file semantics, unlink should normally be allowed
1020 * for users with write permission only. We also allow the creator
1021 * of a segment to be able to delete, even w/o write permission.
1022 * That's because there's no equivalent of write permission for the
1023 * directory containing a file.
1025 error
= pshm_access(pinfo
, FWRITE
, kauth_cred_get(), p
);
1026 if (error
!= 0 && pinfo
->pshm_uid
!= kauth_getuid()) {
1030 error
= pshm_unlink_internal(pinfo
);
1032 PSHM_SUBSYS_UNLOCK();
1034 if (name_pinfo
!= NULL
) {
1035 FREE(name_pinfo
, M_SHM
);
1041 * Add a new reference to a shared memory region.
1042 * Fails if we will overflow the reference counter.
1045 pshm_ref(pshm_info_t
*pinfo
)
1047 PSHM_SUBSYS_ASSERT_HELD();
1049 if (pinfo
->pshm_usecount
== PSHM_MAXCOUNT
) {
1052 pinfo
->pshm_usecount
++;
1057 * Dereference a pshm_info_t. Delete the region if
1058 * this was the final reference count.
1061 pshm_deref(pshm_info_t
*pinfo
)
1063 pshm_mobj_t
*pshmobj
;
1065 PSHM_SUBSYS_ASSERT_HELD();
1066 if (pinfo
->pshm_usecount
== 0) {
1067 panic("negative usecount in pshm_close\n");
1069 pinfo
->pshm_usecount
--; /* release this fd's reference */
1071 if (pinfo
->pshm_usecount
== 0) {
1073 mac_posixshm_label_destroy(&pinfo
->pshm_hdr
);
1075 PSHM_SUBSYS_UNLOCK();
1078 * Release references to any backing objects.
1080 while ((pshmobj
= SLIST_FIRST(&pinfo
->pshm_mobjs
)) != NULL
) {
1081 SLIST_REMOVE_HEAD(&pinfo
->pshm_mobjs
, pshmo_next
);
1082 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
1083 FREE(pshmobj
, M_SHM
);
1086 /* free the pinfo itself */
1093 /* vfs_context_t passed to match prototype for struct fileops */
1095 pshm_closefile(struct fileglob
*fg
, __unused vfs_context_t ctx
)
1102 pnode
= (pshmnode_t
*)fg
->fg_data
;
1103 if (pnode
!= NULL
) {
1105 fg
->fg_data
= NULL
; /* set fg_data to NULL to avoid racing close()es */
1106 if (pnode
->pinfo
!= NULL
) {
1107 pshm_deref(pnode
->pinfo
);
1108 pnode
->pinfo
= NULL
;
1112 PSHM_SUBSYS_UNLOCK();
1113 if (pnode
!= NULL
) {
1121 fill_pshminfo(pshmnode_t
* pshm
, struct pshm_info
* info
)
1124 struct vinfo_stat
*sb
;
1127 if ((pinfo
= pshm
->pinfo
) == NULL
) {
1128 PSHM_SUBSYS_UNLOCK();
1132 sb
= &info
->pshm_stat
;
1134 bzero(sb
, sizeof(struct vinfo_stat
));
1135 sb
->vst_mode
= pinfo
->pshm_mode
;
1136 sb
->vst_uid
= pinfo
->pshm_uid
;
1137 sb
->vst_gid
= pinfo
->pshm_gid
;
1138 sb
->vst_size
= pinfo
->pshm_length
;
1140 info
->pshm_mappaddr
= pshm
->mapp_addr
;
1141 bcopy(&pinfo
->pshm_hdr
.pshm_name
[0], &info
->pshm_name
[0], PSHMNAMLEN
+ 1);
1143 PSHM_SUBSYS_UNLOCK();
1149 pshm_label_associate(struct fileproc
*fp
, struct vnode
*vp
, vfs_context_t ctx
)
1155 pnode
= (pshmnode_t
*)fp
->f_data
;
1156 if (pnode
!= NULL
) {
1157 pshm
= pnode
->pinfo
;
1159 mac_posixshm_vnode_label_associate(
1160 vfs_context_ucred(ctx
), &pshm
->pshm_hdr
, pshm
->pshm_label
,
1164 PSHM_SUBSYS_UNLOCK();