2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1990, 1996-1998 Apple Computer, Inc.
30 * All Rights Reserved.
33 * posix_shm.c : Support for POSIX shared memory APIs
36 * Author: Ananthakrishna Ramesh
44 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
45 * support for mandatory and extensible security protections. This notice
46 * is included in support of clause 2.2 (b) of the Apple Public License,
50 #include <sys/cdefs.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/file_internal.h>
55 #include <sys/filedesc.h>
57 #include <sys/proc_internal.h>
58 #include <sys/kauth.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/vnode_internal.h>
63 #include <sys/ioctl.h>
65 #include <sys/malloc.h>
68 #include <sys/sysproto.h>
69 #include <sys/proc_info.h>
70 #include <sys/posix_shm.h>
71 #include <security/audit/audit.h>
75 #include <security/mac_framework.h>
78 #include <mach/mach_types.h>
79 #include <mach/mach_vm.h>
80 #include <mach/vm_map.h>
81 #include <mach/vm_prot.h>
82 #include <mach/vm_inherit.h>
83 #include <mach/kern_return.h>
84 #include <mach/memory_object_control.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_protos.h>
89 #define f_flag f_fglob->fg_flag
90 #define f_type f_fglob->fg_ops->fo_type
91 #define f_msgcount f_fglob->fg_msgcount
92 #define f_cred f_fglob->fg_cred
93 #define f_ops f_fglob->fg_ops
94 #define f_offset f_fglob->fg_offset
95 #define f_data f_fglob->fg_data
98 * Used to construct the list of memory objects
99 * assigned to a populated shared memory segment.
101 typedef struct pshm_mobj
{
102 void *pshmo_memobject
;
103 memory_object_size_t pshmo_size
;
104 SLIST_ENTRY(pshm_mobj
) pshmo_next
;
108 * This represents an existing Posix shared memory object.
110 * It comes into existence with a shm_open(...O_CREAT...)
111 * call and goes away only after it has been shm_unlink()ed
112 * and the last remaining shm_open() file reference is closed.
114 * To keep track of that lifetime, pshm_usecount is used as a reference
115 * counter. It's incremented for every successful shm_open() and
116 * one extra time for the shm_unlink() to release. Internally
117 * you can temporarily use an additional reference whenever the
118 * subsystem lock has to be dropped for other reasons.
120 typedef struct internal_pshminfo
{
121 struct pshminfo pshm_hdr
;
122 SLIST_HEAD(pshm_mobjhead
, pshm_mobj
) pshm_mobjs
;
123 RB_ENTRY(internal_pshminfo
) pshm_links
; /* links for red/black tree */
125 #define pshm_flags pshm_hdr.pshm_flags
126 #define pshm_usecount pshm_hdr.pshm_usecount
127 #define pshm_length pshm_hdr.pshm_length
128 #define pshm_mode pshm_hdr.pshm_mode
129 #define pshm_uid pshm_hdr.pshm_uid
130 #define pshm_gid pshm_hdr.pshm_gid
131 #define pshm_label pshm_hdr.pshm_label
133 /* Values for pshm_flags that are still used */
134 #define PSHM_ALLOCATED 0x004 /* backing storage is allocated */
135 #define PSHM_MAPPED 0x008 /* mapped at least once */
136 #define PSHM_INUSE 0x010 /* mapped at least once */
137 #define PSHM_REMOVED 0x020 /* no longer in the name cache due to shm_unlink() */
138 #define PSHM_ALLOCATING 0x100 /* storage is being allocated */
141 * These handle reference counting pshm_info_t structs using pshm_usecount.
143 static int pshm_ref(pshm_info_t
*pinfo
);
144 static void pshm_deref(pshm_info_t
*pinfo
);
145 #define PSHM_MAXCOUNT UINT_MAX
148 * For every shm_open, we get a new one of these.
149 * The only reason we don't just use pshm_info directly is that
150 * you can query the mapped memory objects via proc_pidinfo to
151 * query the mapped address. Note that even this is a hack. If
152 * you mmap() the same fd multiple times, we only save/report
155 typedef struct pshmnode
{
161 /* compare function for the red black tree */
163 pshm_compare(pshm_info_t
*a
, pshm_info_t
*b
)
165 int cmp
= strncmp(a
->pshm_hdr
.pshm_name
, b
->pshm_hdr
.pshm_name
, PSHMNAMLEN
+ 1);
178 * shared memory "paths" are stored in a red black tree for lookup
180 u_long pshmnument
; /* count of entries allocated in the red black tree */
181 RB_HEAD(pshmhead
, internal_pshminfo
) pshm_head
;
182 RB_PROTOTYPE(pshmhead
, internal_pshminfo
, pshm_links
, pshm_compare
)
183 RB_GENERATE(pshmhead
, internal_pshminfo
, pshm_links
, pshm_compare
)
185 /* lookup, add, remove functions */
186 static pshm_info_t
*pshm_cache_search(pshm_info_t
* look
);
187 static void pshm_cache_add(pshm_info_t
*entry
);
188 static void pshm_cache_delete(pshm_info_t
*entry
);
190 static int pshm_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
);
191 static int pshm_write(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
);
192 static int pshm_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
, vfs_context_t ctx
);
193 static int pshm_select(struct fileproc
*fp
, int which
, void *wql
, vfs_context_t ctx
);
194 static int pshm_closefile(struct fileglob
*fg
, vfs_context_t ctx
);
196 static int pshm_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
197 struct kevent_internal_s
*kev
, vfs_context_t ctx
);
199 static int pshm_access(pshm_info_t
*pinfo
, int mode
, kauth_cred_t cred
, proc_t p
);
200 int pshm_cache_purge_all(proc_t p
);
202 static int pshm_unlink_internal(pshm_info_t
*pinfo
);
204 static const struct fileops pshmops
= {
205 .fo_type
= DTYPE_PSXSHM
,
206 .fo_read
= pshm_read
,
207 .fo_write
= pshm_write
,
208 .fo_ioctl
= pshm_ioctl
,
209 .fo_select
= pshm_select
,
210 .fo_close
= pshm_closefile
,
211 .fo_kqfilter
= pshm_kqfilter
,
216 * Everything here is protected by a single mutex.
218 static lck_grp_t
*psx_shm_subsys_lck_grp
;
219 static lck_grp_attr_t
*psx_shm_subsys_lck_grp_attr
;
220 static lck_attr_t
*psx_shm_subsys_lck_attr
;
221 static lck_mtx_t psx_shm_subsys_mutex
;
223 #define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
224 #define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
225 #define PSHM_SUBSYS_ASSERT_HELD() LCK_MTX_ASSERT(&psx_shm_subsys_mutex, LCK_MTX_ASSERT_OWNED)
228 __private_extern__
void
229 pshm_lock_init( void )
231 psx_shm_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
233 psx_shm_subsys_lck_grp
=
234 lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr
);
236 psx_shm_subsys_lck_attr
= lck_attr_alloc_init();
237 lck_mtx_init(&psx_shm_subsys_mutex
, psx_shm_subsys_lck_grp
, psx_shm_subsys_lck_attr
);
241 * Lookup an entry in the cache. Only the name is used from "look".
244 pshm_cache_search(pshm_info_t
*look
)
246 PSHM_SUBSYS_ASSERT_HELD();
247 return RB_FIND(pshmhead
, &pshm_head
, look
);
251 * Add a new entry to the cache.
254 pshm_cache_add(pshm_info_t
*entry
)
256 pshm_info_t
*conflict
;
258 PSHM_SUBSYS_ASSERT_HELD();
259 conflict
= RB_INSERT(pshmhead
, &pshm_head
, entry
);
260 if (conflict
!= NULL
) {
261 panic("pshm_cache_add() found %p", conflict
);
267 * Remove the given entry from the red black tree.
270 pshm_cache_delete(pshm_info_t
*entry
)
272 PSHM_SUBSYS_ASSERT_HELD();
273 assert(!(entry
->pshm_flags
& PSHM_REMOVED
));
274 RB_REMOVE(pshmhead
, &pshm_head
, entry
);
279 * Initialize the red black tree.
282 pshm_cache_init(void)
288 * Invalidate all entries and delete all objects associated with them
289 * XXX - due to the reference counting, this only works if all userland
290 * references to it via file descriptors are also closed already. Is this
291 * known to be called after all user processes are killed?
294 pshm_cache_purge_all(__unused proc_t proc
)
300 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
305 RB_FOREACH_SAFE(p
, pshmhead
, &pshm_head
, tmp
) {
306 error
= pshm_unlink_internal(p
);
307 if (error
) { /* XXX: why give up on failure, should keep going */
311 assert(pshmnument
== 0);
314 PSHM_SUBSYS_UNLOCK();
317 printf("%s: Error %d removing posix shm cache: %ld remain!\n",
318 __func__
, error
, pshmnument
);
324 * Utility to get the shared memory name from userspace and
325 * populate a pshm_info_t with it. If there's a problem
326 * reading the name or it's malformed, will return an error code.
329 pshm_get_name(pshm_info_t
*pinfo
, const user_addr_t user_addr
)
331 size_t bytes_copied
= 0;
335 error
= copyinstr(user_addr
, &pinfo
->pshm_hdr
.pshm_name
[0], PSHMNAMLEN
+ 1, &bytes_copied
);
339 assert(bytes_copied
<= PSHMNAMLEN
+ 1);
340 assert(pinfo
->pshm_hdr
.pshm_name
[bytes_copied
- 1] == 0);
341 if (bytes_copied
< 2) { /* 2: expect at least one character and terminating zero */
344 AUDIT_ARG(text
, &pinfo
->pshm_hdr
.pshm_name
[0]);
349 * Process a shm_open() system call.
352 shm_open(proc_t p
, struct shm_open_args
*uap
, int32_t *retval
)
356 pshm_info_t
*pinfo
= NULL
;
357 pshm_info_t
*new_pinfo
= NULL
;
358 pshmnode_t
*new_pnode
= NULL
;
359 struct fileproc
*fp
= NULL
;
361 int cmode
= uap
->mode
;
362 bool incache
= false;
363 bool have_label
= false;
365 AUDIT_ARG(fflags
, uap
->oflag
);
366 AUDIT_ARG(mode
, uap
->mode
);
369 * Allocate data structures we need. We parse the userspace name into
370 * a pshm_info_t, even when we don't need to O_CREAT.
372 MALLOC(new_pinfo
, pshm_info_t
*, sizeof(pshm_info_t
), M_SHM
, M_WAITOK
| M_ZERO
);
373 if (new_pinfo
== NULL
) {
379 * Get and check the name.
381 error
= pshm_get_name(new_pinfo
, uap
->name
);
387 * Attempt to allocate a new fp. If unsuccessful, the fp will be
388 * left unmodified (NULL).
390 error
= falloc(p
, &fp
, &indx
, vfs_context_current());
397 fmode
= FFLAGS(uap
->oflag
);
398 if ((fmode
& (FREAD
| FWRITE
)) == 0) {
404 * Will need a new pnode for the file pointer
406 MALLOC(new_pnode
, pshmnode_t
*, sizeof(pshmnode_t
), M_SHM
, M_WAITOK
| M_ZERO
);
407 if (new_pnode
== NULL
) {
413 * If creating a new segment, fill in its information.
414 * If we find a pre-exisitng one in cache lookup we'll just toss this one later.
416 if (fmode
& O_CREAT
) {
417 new_pinfo
->pshm_usecount
= 2; /* one each for: file pointer, shm_unlink */
418 new_pinfo
->pshm_length
= 0;
419 new_pinfo
->pshm_mode
= cmode
;
420 new_pinfo
->pshm_uid
= kauth_getuid();
421 new_pinfo
->pshm_gid
= kauth_getgid();
422 SLIST_INIT(&new_pinfo
->pshm_mobjs
);
424 mac_posixshm_label_init(&new_pinfo
->pshm_hdr
);
426 error
= mac_posixshm_check_create(kauth_cred_get(), new_pinfo
->pshm_hdr
.pshm_name
);
434 * Look up the named shared memory segment in the cache, possibly adding
439 pinfo
= pshm_cache_search(new_pinfo
);
443 /* Get a new reference to go with the file pointer.*/
444 error
= pshm_ref(pinfo
);
446 pinfo
= NULL
; /* so cleanup code doesn't deref */
450 /* can't have pre-existing if O_EXCL */
451 if ((fmode
& (O_CREAT
| O_EXCL
)) == (O_CREAT
| O_EXCL
)) {
456 /* O_TRUNC is only valid while length is not yet set */
457 if ((fmode
& O_TRUNC
) &&
458 (pinfo
->pshm_flags
& (PSHM_ALLOCATING
| PSHM_ALLOCATED
))) {
465 /* if it wasn't found, must have O_CREAT */
466 if (!(fmode
& O_CREAT
)) {
471 /* Add the new region to the cache. */
473 pshm_cache_add(pinfo
);
474 new_pinfo
= NULL
; /* so that it doesn't get free'd */
477 PSHM_SUBSYS_UNLOCK();
480 * Check we have permission to access any pre-existing segment
483 if (fmode
& O_CREAT
) {
484 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
,
485 pinfo
->pshm_gid
, pinfo
->pshm_mode
);
488 if ((error
= mac_posixshm_check_open(kauth_cred_get(), &pinfo
->pshm_hdr
, fmode
))) {
492 if ((error
= pshm_access(pinfo
, fmode
, kauth_cred_get(), p
))) {
497 mac_posixshm_label_associate(kauth_cred_get(), &pinfo
->pshm_hdr
, pinfo
->pshm_hdr
.pshm_name
);
502 fp
->f_flag
= fmode
& FMASK
;
503 fp
->f_ops
= &pshmops
;
504 new_pnode
->pinfo
= pinfo
;
505 fp
->f_data
= (caddr_t
)new_pnode
;
506 *fdflags(p
, indx
) |= UF_EXCLOSE
;
507 procfdtbl_releasefd(p
, indx
, NULL
);
508 fp_drop(p
, indx
, fp
, 1);
516 PSHM_SUBSYS_UNLOCK();
519 * Drop any new reference to a pre-existing shared memory region.
521 if (incache
&& pinfo
!= NULL
) {
524 PSHM_SUBSYS_UNLOCK();
528 * Delete any allocated unused data structures.
530 if (new_pnode
!= NULL
) {
531 FREE(new_pnode
, M_SHM
);
535 fp_free(p
, indx
, fp
);
539 if (new_pinfo
!= NULL
) {
542 mac_posixshm_label_destroy(&new_pinfo
->pshm_hdr
);
545 FREE(new_pinfo
, M_SHM
);
552 * The truncate call associates memory with shared memory region. It can
553 * only be succesfully done with a non-zero length once per shared memory region.
561 __unused
int32_t *retval
)
566 mem_entry_name_port_t mem_object
;
567 mach_vm_size_t total_size
, alloc_size
;
568 memory_object_size_t mosize
;
569 pshm_mobj_t
*pshmobj
, *pshmobj_last
;
573 user_map
= current_map();
575 if (fp
->f_type
!= DTYPE_PSXSHM
) {
581 * Can't enforce this yet, some third party tools don't
582 * specify O_RDWR like they ought to. See radar 48692182
584 /* ftruncate() requires write permission */
585 if (!(fp
->f_flag
& FWRITE
)) {
591 if (((pnode
= (pshmnode_t
*)fp
->f_data
)) == NULL
) {
592 PSHM_SUBSYS_UNLOCK();
596 if ((pinfo
= pnode
->pinfo
) == NULL
) {
597 PSHM_SUBSYS_UNLOCK();
601 /* We only allow one ftruncate() per lifetime of the shm object. */
602 if (pinfo
->pshm_flags
& (PSHM_ALLOCATING
| PSHM_ALLOCATED
)) {
603 PSHM_SUBSYS_UNLOCK();
608 error
= mac_posixshm_check_truncate(kauth_cred_get(), &pinfo
->pshm_hdr
, length
);
610 PSHM_SUBSYS_UNLOCK();
615 * Grab an extra reference, so we can drop the lock while allocating and
616 * ensure the objects don't disappear.
618 error
= pshm_ref(pinfo
);
620 PSHM_SUBSYS_UNLOCK();
624 /* set ALLOCATING, so another truncate can't start */
625 pinfo
->pshm_flags
|= PSHM_ALLOCATING
;
626 total_size
= vm_map_round_page(length
, vm_map_page_mask(user_map
));
629 for (alloc_size
= 0; alloc_size
< total_size
; alloc_size
+= mosize
) {
630 PSHM_SUBSYS_UNLOCK();
632 /* get a memory object back some of the shared memory */
633 mosize
= MIN(total_size
- alloc_size
, ANON_MAX_SIZE
);
634 kret
= mach_make_memory_entry_64(VM_MAP_NULL
, &mosize
, 0,
635 MAP_MEM_NAMED_CREATE
| VM_PROT_DEFAULT
, &mem_object
, 0);
637 if (kret
!= KERN_SUCCESS
) {
641 /* get a list entry to track the memory object */
642 MALLOC(pshmobj
, pshm_mobj_t
*, sizeof(pshm_mobj_t
), M_SHM
, M_WAITOK
);
643 if (pshmobj
== NULL
) {
644 kret
= KERN_NO_SPACE
;
645 mach_memory_entry_port_release(mem_object
);
652 /* link in the new entry */
653 pshmobj
->pshmo_memobject
= (void *)mem_object
;
654 pshmobj
->pshmo_size
= mosize
;
655 SLIST_NEXT(pshmobj
, pshmo_next
) = NULL
;
657 if (pshmobj_last
== NULL
) {
658 SLIST_FIRST(&pinfo
->pshm_mobjs
) = pshmobj
;
660 SLIST_INSERT_AFTER(pshmobj_last
, pshmobj
, pshmo_next
);
662 pshmobj_last
= pshmobj
;
665 /* all done, change flags to ALLOCATED and return success */
666 pinfo
->pshm_flags
|= PSHM_ALLOCATED
;
667 pinfo
->pshm_flags
&= ~(PSHM_ALLOCATING
);
668 pinfo
->pshm_length
= total_size
;
669 pshm_deref(pinfo
); /* drop the "allocating" reference */
670 PSHM_SUBSYS_UNLOCK();
674 /* clean up any partially allocated objects */
676 while ((pshmobj
= SLIST_FIRST(&pinfo
->pshm_mobjs
)) != NULL
) {
677 SLIST_REMOVE_HEAD(&pinfo
->pshm_mobjs
, pshmo_next
);
678 PSHM_SUBSYS_UNLOCK();
679 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
680 FREE(pshmobj
, M_SHM
);
683 pinfo
->pshm_flags
&= ~PSHM_ALLOCATING
;
684 pshm_deref(pinfo
); /* drop the "allocating" reference */
685 PSHM_SUBSYS_UNLOCK();
688 case KERN_INVALID_ADDRESS
:
691 case KERN_PROTECTION_FAILURE
:
699 pshm_stat(pshmnode_t
*pnode
, void *ub
, int isstat64
)
701 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
702 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
709 if ((pinfo
= pnode
->pinfo
) == NULL
) {
710 PSHM_SUBSYS_UNLOCK();
715 error
= mac_posixshm_check_stat(kauth_cred_get(), &pinfo
->pshm_hdr
);
717 PSHM_SUBSYS_UNLOCK();
723 sb64
= (struct stat64
*)ub
;
724 bzero(sb64
, sizeof(struct stat64
));
725 sb64
->st_mode
= pinfo
->pshm_mode
;
726 sb64
->st_uid
= pinfo
->pshm_uid
;
727 sb64
->st_gid
= pinfo
->pshm_gid
;
728 sb64
->st_size
= pinfo
->pshm_length
;
730 sb
= (struct stat
*)ub
;
731 bzero(sb
, sizeof(struct stat
));
732 sb
->st_mode
= pinfo
->pshm_mode
;
733 sb
->st_uid
= pinfo
->pshm_uid
;
734 sb
->st_gid
= pinfo
->pshm_gid
;
735 sb
->st_size
= pinfo
->pshm_length
;
737 PSHM_SUBSYS_UNLOCK();
743 * Verify access to a shared memory region.
746 pshm_access(pshm_info_t
*pinfo
, int mode
, kauth_cred_t cred
, __unused proc_t p
)
748 int mode_req
= ((mode
& FREAD
) ? S_IRUSR
: 0) |
749 ((mode
& FWRITE
) ? S_IWUSR
: 0);
751 /* Otherwise, user id 0 always gets access. */
752 if (!suser(cred
, NULL
)) {
756 return posix_cred_access(cred
, pinfo
->pshm_uid
, pinfo
->pshm_gid
, pinfo
->pshm_mode
, mode_req
);
762 struct mmap_args
*uap
,
767 vm_map_offset_t user_addr
= (vm_map_offset_t
)uap
->addr
;
768 vm_map_size_t user_size
= (vm_map_size_t
)uap
->len
;
769 vm_map_offset_t user_start_addr
;
770 vm_map_size_t map_size
, mapped_size
;
771 int prot
= uap
->prot
;
772 int max_prot
= VM_PROT_DEFAULT
;
773 int flags
= uap
->flags
;
774 vm_object_offset_t file_pos
= (vm_object_offset_t
)uap
->pos
;
775 vm_object_offset_t map_pos
;
778 vm_map_kernel_flags_t vmk_flags
;
780 kern_return_t kret
= KERN_SUCCESS
;
783 pshm_mobj_t
*pshmobj
;
786 if (user_size
== 0) {
790 if (!(flags
& MAP_SHARED
)) {
794 /* Can't allow write permission if the shm_open() didn't allow them. */
795 if (!(fp
->f_flag
& FWRITE
)) {
796 if (prot
& VM_PROT_WRITE
) {
799 max_prot
&= ~VM_PROT_WRITE
;
803 pnode
= (pshmnode_t
*)fp
->f_data
;
805 PSHM_SUBSYS_UNLOCK();
809 pinfo
= pnode
->pinfo
;
811 PSHM_SUBSYS_UNLOCK();
815 if (!(pinfo
->pshm_flags
& PSHM_ALLOCATED
)) {
816 PSHM_SUBSYS_UNLOCK();
820 if (user_size
> (vm_map_size_t
)pinfo
->pshm_length
) {
821 PSHM_SUBSYS_UNLOCK();
825 vm_map_size_t end_pos
= 0;
826 if (os_add_overflow(user_size
, file_pos
, &end_pos
)) {
827 PSHM_SUBSYS_UNLOCK();
830 if (end_pos
> (vm_map_size_t
)pinfo
->pshm_length
) {
831 PSHM_SUBSYS_UNLOCK();
835 pshmobj
= SLIST_FIRST(&pinfo
->pshm_mobjs
);
836 if (pshmobj
== NULL
) {
837 PSHM_SUBSYS_UNLOCK();
842 error
= mac_posixshm_check_mmap(kauth_cred_get(), &pinfo
->pshm_hdr
, prot
, flags
);
844 PSHM_SUBSYS_UNLOCK();
848 /* Grab an extra reference, so we can drop the lock while mapping. */
849 error
= pshm_ref(pinfo
);
851 PSHM_SUBSYS_UNLOCK();
855 PSHM_SUBSYS_UNLOCK();
856 user_map
= current_map();
858 if (!(flags
& MAP_FIXED
)) {
859 alloc_flags
= VM_FLAGS_ANYWHERE
;
860 user_addr
= vm_map_round_page(user_addr
,
861 vm_map_page_mask(user_map
));
863 if (user_addr
!= vm_map_round_page(user_addr
,
864 vm_map_page_mask(user_map
))) {
870 * We do not get rid of the existing mappings here because
871 * it wouldn't be atomic (see comment in mmap()). We let
872 * Mach VM know that we want it to replace any existing
873 * mapping with the new one.
875 alloc_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
880 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
881 /* reserve the entire space first... */
882 kret
= vm_map_enter_mem_object(user_map
,
895 user_start_addr
= user_addr
;
896 if (kret
!= KERN_SUCCESS
) {
900 /* Now overwrite with the real mappings. */
901 for (map_pos
= 0, pshmobj
= SLIST_FIRST(&pinfo
->pshm_mobjs
);
903 map_pos
+= pshmobj
->pshmo_size
, pshmobj
= SLIST_NEXT(pshmobj
, pshmo_next
)) {
904 if (pshmobj
== NULL
) {
905 /* nothing there to map !? */
908 if (file_pos
>= map_pos
+ pshmobj
->pshmo_size
) {
911 map_size
= pshmobj
->pshmo_size
- (file_pos
- map_pos
);
912 if (map_size
> user_size
) {
913 map_size
= user_size
;
915 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
916 kret
= vm_map_enter_mem_object(
921 VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
,
924 pshmobj
->pshmo_memobject
,
930 if (kret
!= KERN_SUCCESS
) {
934 user_addr
+= map_size
;
935 user_size
-= map_size
;
936 mapped_size
+= map_size
;
937 file_pos
+= map_size
;
941 pnode
->mapp_addr
= user_start_addr
;
942 pinfo
->pshm_flags
|= (PSHM_MAPPED
| PSHM_INUSE
);
943 PSHM_SUBSYS_UNLOCK();
946 pshm_deref(pinfo
); /* drop the extra reference we had while mapping. */
947 PSHM_SUBSYS_UNLOCK();
948 if (kret
!= KERN_SUCCESS
) {
949 if (mapped_size
!= 0) {
950 (void) mach_vm_deallocate(current_map(),
958 *retval
= (user_start_addr
+ pageoff
);
960 case KERN_INVALID_ADDRESS
:
963 case KERN_PROTECTION_FAILURE
:
971 * Remove a shared memory region name from the name lookup cache.
974 pshm_unlink_internal(pshm_info_t
*pinfo
)
976 PSHM_SUBSYS_ASSERT_HELD();
982 pshm_cache_delete(pinfo
);
983 pinfo
->pshm_flags
|= PSHM_REMOVED
;
985 /* release the "unlink" reference */
992 shm_unlink(proc_t p
, struct shm_unlink_args
*uap
, __unused
int32_t *retval
)
995 pshm_info_t
*pinfo
= NULL
;
996 pshm_info_t
*name_pinfo
= NULL
;
999 * Get the name from user args.
1001 MALLOC(name_pinfo
, pshm_info_t
*, sizeof(pshm_info_t
), M_SHM
, M_WAITOK
| M_ZERO
);
1002 if (name_pinfo
== NULL
) {
1006 error
= pshm_get_name(name_pinfo
, uap
->name
);
1013 pinfo
= pshm_cache_search(name_pinfo
);
1015 if (pinfo
== NULL
) {
1021 error
= mac_posixshm_check_unlink(kauth_cred_get(), &pinfo
->pshm_hdr
, name_pinfo
->pshm_hdr
.pshm_name
);
1027 AUDIT_ARG(posix_ipc_perm
, pinfo
->pshm_uid
, pinfo
->pshm_gid
, pinfo
->pshm_mode
);
1030 * Following file semantics, unlink should normally be allowed
1031 * for users with write permission only. We also allow the creator
1032 * of a segment to be able to delete, even w/o write permission.
1033 * That's because there's no equivalent of write permission for the
1034 * directory containing a file.
1036 error
= pshm_access(pinfo
, FWRITE
, kauth_cred_get(), p
);
1037 if (error
!= 0 && pinfo
->pshm_uid
!= kauth_getuid()) {
1041 error
= pshm_unlink_internal(pinfo
);
1043 PSHM_SUBSYS_UNLOCK();
1045 if (name_pinfo
!= NULL
) {
1046 FREE(name_pinfo
, M_SHM
);
1052 * Add a new reference to a shared memory region.
1053 * Fails if we will overflow the reference counter.
1056 pshm_ref(pshm_info_t
*pinfo
)
1058 PSHM_SUBSYS_ASSERT_HELD();
1060 if (pinfo
->pshm_usecount
== PSHM_MAXCOUNT
) {
1063 pinfo
->pshm_usecount
++;
1068 * Dereference a pshm_info_t. Delete the region if
1069 * this was the final reference count.
1072 pshm_deref(pshm_info_t
*pinfo
)
1074 pshm_mobj_t
*pshmobj
;
1076 PSHM_SUBSYS_ASSERT_HELD();
1077 if (pinfo
->pshm_usecount
== 0) {
1078 panic("negative usecount in pshm_close\n");
1080 pinfo
->pshm_usecount
--; /* release this fd's reference */
1082 if (pinfo
->pshm_usecount
== 0) {
1084 mac_posixshm_label_destroy(&pinfo
->pshm_hdr
);
1086 PSHM_SUBSYS_UNLOCK();
1089 * Release references to any backing objects.
1091 while ((pshmobj
= SLIST_FIRST(&pinfo
->pshm_mobjs
)) != NULL
) {
1092 SLIST_REMOVE_HEAD(&pinfo
->pshm_mobjs
, pshmo_next
);
1093 mach_memory_entry_port_release(pshmobj
->pshmo_memobject
);
1094 FREE(pshmobj
, M_SHM
);
1097 /* free the pinfo itself */
1104 /* vfs_context_t passed to match prototype for struct fileops */
1106 pshm_closefile(struct fileglob
*fg
, __unused vfs_context_t ctx
)
1113 pnode
= (pshmnode_t
*)fg
->fg_data
;
1114 if (pnode
!= NULL
) {
1116 fg
->fg_data
= NULL
; /* set fg_data to NULL to avoid racing close()es */
1117 if (pnode
->pinfo
!= NULL
) {
1118 pshm_deref(pnode
->pinfo
);
1119 pnode
->pinfo
= NULL
;
1123 PSHM_SUBSYS_UNLOCK();
1124 if (pnode
!= NULL
) {
1132 pshm_read(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
1133 __unused
int flags
, __unused vfs_context_t ctx
)
1139 pshm_write(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
1140 __unused
int flags
, __unused vfs_context_t ctx
)
1146 pshm_ioctl(__unused
struct fileproc
*fp
, __unused u_long com
,
1147 __unused caddr_t data
, __unused vfs_context_t ctx
)
1153 pshm_select(__unused
struct fileproc
*fp
, __unused
int which
, __unused
void *wql
,
1154 __unused vfs_context_t ctx
)
1160 pshm_kqfilter(__unused
struct fileproc
*fp
, struct knote
*kn
,
1161 __unused
struct kevent_internal_s
*kev
, __unused vfs_context_t ctx
)
1163 kn
->kn_flags
= EV_ERROR
;
1164 kn
->kn_data
= ENOTSUP
;
1169 fill_pshminfo(pshmnode_t
* pshm
, struct pshm_info
* info
)
1172 struct vinfo_stat
*sb
;
1175 if ((pinfo
= pshm
->pinfo
) == NULL
) {
1176 PSHM_SUBSYS_UNLOCK();
1180 sb
= &info
->pshm_stat
;
1182 bzero(sb
, sizeof(struct vinfo_stat
));
1183 sb
->vst_mode
= pinfo
->pshm_mode
;
1184 sb
->vst_uid
= pinfo
->pshm_uid
;
1185 sb
->vst_gid
= pinfo
->pshm_gid
;
1186 sb
->vst_size
= pinfo
->pshm_length
;
1188 info
->pshm_mappaddr
= pshm
->mapp_addr
;
1189 bcopy(&pinfo
->pshm_hdr
.pshm_name
[0], &info
->pshm_name
[0], PSHMNAMLEN
+ 1);
1191 PSHM_SUBSYS_UNLOCK();
1197 pshm_label_associate(struct fileproc
*fp
, struct vnode
*vp
, vfs_context_t ctx
)
1203 pnode
= (pshmnode_t
*)fp
->f_data
;
1204 if (pnode
!= NULL
) {
1205 pshm
= pnode
->pinfo
;
1207 mac_posixshm_vnode_label_associate(
1208 vfs_context_ucred(ctx
), &pshm
->pshm_hdr
, pshm
->pshm_label
,
1212 PSHM_SUBSYS_UNLOCK();