2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
31 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by Adam Glass and Charles
45 * 4. The names of the authors may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
61 * support for mandatory and extensible security protections. This notice
62 * is included in support of clause 2.2 (b) of the Apple Public License,
64 * Copyright (c) 2005-2006 SPARTA, Inc.
68 #include <sys/appleapiopts.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/shm_internal.h>
73 #include <sys/proc_internal.h>
74 #include <sys/kauth.h>
75 #include <sys/malloc.h>
78 #include <sys/sysctl.h>
80 #include <sys/sysent.h>
81 #include <sys/sysproto.h>
83 #include <security/mac_framework.h>
86 #include <security/audit/audit.h>
88 #include <mach/mach_types.h>
89 #include <mach/vm_inherit.h>
90 #include <mach/vm_map.h>
92 #include <mach/mach_vm.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_protos.h>
96 #include <vm/vm_kern.h>
98 #include <kern/locks.h>
99 #include <os/overflow.h>
101 /* Uncomment this line to see MAC debugging output. */
102 /* #define MAC_DEBUG */
103 #if CONFIG_MACF_DEBUG
104 #define MPRINTF(a) printf a
110 static int shminit(void);
112 static lck_grp_t
*sysv_shm_subsys_lck_grp
;
113 static lck_grp_attr_t
*sysv_shm_subsys_lck_grp_attr
;
114 static lck_attr_t
*sysv_shm_subsys_lck_attr
;
115 static lck_mtx_t sysv_shm_subsys_mutex
;
117 #define SYSV_SHM_SUBSYS_LOCK() lck_mtx_lock(&sysv_shm_subsys_mutex)
118 #define SYSV_SHM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_shm_subsys_mutex)
120 static int oshmctl(void *p
, void *uap
, void *retval
);
121 static int shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
, int * retval
);
122 static int shmget_existing(struct shmget_args
*uap
, int mode
, int segnum
, int * retval
);
123 static void shmid_ds_64to32(struct user_shmid_ds
*in
, struct user32_shmid_ds
*out
);
124 static void shmid_ds_32to64(struct user32_shmid_ds
*in
, struct user_shmid_ds
*out
);
126 /* XXX casting to (sy_call_t *) is bogus, as usual. */
127 static sy_call_t
* const shmcalls
[] = {
128 (sy_call_t
*)shmat
, (sy_call_t
*)oshmctl
,
129 (sy_call_t
*)shmdt
, (sy_call_t
*)shmget
,
133 #define SHMSEG_FREE 0x0200
134 #define SHMSEG_REMOVED 0x0400
135 #define SHMSEG_ALLOCATED 0x0800
136 #define SHMSEG_WANTED 0x1000
138 static int shm_last_free
, shm_nused
, shm_committed
;
139 struct shmid_kernel
*shmsegs
; /* 64 bit version */
140 static int shm_inited
= 0;
143 * Since anonymous memory chunks are limited to ANON_MAX_SIZE bytes,
144 * we have to keep a list of chunks when we want to handle a shared memory
145 * segment bigger than ANON_MAX_SIZE.
146 * Each chunk points to a VM named entry of up to ANON_MAX_SIZE bytes
147 * of anonymous memory.
150 void * shm_object
; /* named entry for this chunk*/
151 memory_object_size_t shm_handle_size
; /* size of this chunk */
152 struct shm_handle
*shm_handle_next
; /* next chunk */
155 struct shmmap_state
{
156 mach_vm_address_t va
; /* user address */
157 int shmid
; /* segment id */
160 static void shm_deallocate_segment(struct shmid_kernel
*);
161 static int shm_find_segment_by_key(key_t
);
162 static struct shmid_kernel
*shm_find_segment_by_shmid(int);
163 static int shm_delete_mapping(struct proc
*, struct shmmap_state
*, int);
165 #ifdef __APPLE_API_PRIVATE
166 #define DEFAULT_SHMMAX (4 * 1024 * 1024)
167 #define DEFAULT_SHMMIN 1
168 #define DEFAULT_SHMMNI 32
169 #define DEFAULT_SHMSEG 8
170 #define DEFAULT_SHMALL 1024
172 struct shminfo shminfo
= {
173 .shmmax
= DEFAULT_SHMMAX
,
174 .shmmin
= DEFAULT_SHMMIN
,
175 .shmmni
= DEFAULT_SHMMNI
,
176 .shmseg
= DEFAULT_SHMSEG
,
177 .shmall
= DEFAULT_SHMALL
180 #define SHMID_IS_VALID(x) ((x) >= 0)
181 #define SHMID_UNALLOCATED (-1)
182 #define SHMID_SENTINEL (-2)
184 #endif /* __APPLE_API_PRIVATE */
186 void sysv_shm_lock_init(void);
188 static __inline__
time_t
197 * This conversion is safe, since if we are converting for a 32 bit process,
198 * then it's value of (struct shmid_ds)->shm_segsz will never exceed 4G.
200 * NOTE: Source and target may *NOT* overlap! (target is smaller)
203 shmid_ds_64to32(struct user_shmid_ds
*in
, struct user32_shmid_ds
*out
)
205 out
->shm_perm
= in
->shm_perm
;
206 out
->shm_segsz
= in
->shm_segsz
;
207 out
->shm_lpid
= in
->shm_lpid
;
208 out
->shm_cpid
= in
->shm_cpid
;
209 out
->shm_nattch
= in
->shm_nattch
;
210 out
->shm_atime
= in
->shm_atime
;
211 out
->shm_dtime
= in
->shm_dtime
;
212 out
->shm_ctime
= in
->shm_ctime
;
213 out
->shm_internal
= CAST_DOWN_EXPLICIT(int, in
->shm_internal
);
217 * NOTE: Source and target may are permitted to overlap! (source is smaller);
218 * this works because we copy fields in order from the end of the struct to
222 shmid_ds_32to64(struct user32_shmid_ds
*in
, struct user_shmid_ds
*out
)
224 out
->shm_internal
= in
->shm_internal
;
225 out
->shm_ctime
= in
->shm_ctime
;
226 out
->shm_dtime
= in
->shm_dtime
;
227 out
->shm_atime
= in
->shm_atime
;
228 out
->shm_nattch
= in
->shm_nattch
;
229 out
->shm_cpid
= in
->shm_cpid
;
230 out
->shm_lpid
= in
->shm_lpid
;
231 out
->shm_segsz
= in
->shm_segsz
;
232 out
->shm_perm
= in
->shm_perm
;
237 shm_find_segment_by_key(key_t key
)
241 for (i
= 0; i
< shminfo
.shmmni
; i
++) {
242 if ((shmsegs
[i
].u
.shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
243 shmsegs
[i
].u
.shm_perm
._key
== key
) {
250 static struct shmid_kernel
*
251 shm_find_segment_by_shmid(int shmid
)
254 struct shmid_kernel
*shmseg
;
256 segnum
= IPCID_TO_IX(shmid
);
257 if (segnum
< 0 || segnum
>= shminfo
.shmmni
) {
260 shmseg
= &shmsegs
[segnum
];
261 if ((shmseg
->u
.shm_perm
.mode
& (SHMSEG_ALLOCATED
| SHMSEG_REMOVED
))
262 != SHMSEG_ALLOCATED
||
263 shmseg
->u
.shm_perm
._seq
!= IPCID_TO_SEQ(shmid
)) {
270 shm_deallocate_segment(struct shmid_kernel
*shmseg
)
272 struct shm_handle
*shm_handle
, *shm_handle_next
;
275 for (shm_handle
= CAST_DOWN(void *, shmseg
->u
.shm_internal
); /* tunnel */
277 shm_handle
= shm_handle_next
) {
278 shm_handle_next
= shm_handle
->shm_handle_next
;
279 mach_memory_entry_port_release(shm_handle
->shm_object
);
280 FREE(shm_handle
, M_SHM
);
282 shmseg
->u
.shm_internal
= USER_ADDR_NULL
; /* tunnel */
283 size
= vm_map_round_page(shmseg
->u
.shm_segsz
,
284 vm_map_page_mask(current_map()));
285 shm_committed
-= btoc(size
);
287 shmseg
->u
.shm_perm
.mode
= SHMSEG_FREE
;
289 /* Reset the MAC label */
290 mac_sysvshm_label_recycle(shmseg
);
295 shm_delete_mapping(__unused
struct proc
*p
, struct shmmap_state
*shmmap_s
,
298 struct shmid_kernel
*shmseg
;
302 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
303 shmseg
= &shmsegs
[segnum
];
304 size
= vm_map_round_page(shmseg
->u
.shm_segsz
,
305 vm_map_page_mask(current_map())); /* XXX done for us? */
307 result
= mach_vm_deallocate(current_map(), shmmap_s
->va
, size
);
308 if (result
!= KERN_SUCCESS
) {
312 shmmap_s
->shmid
= SHMID_UNALLOCATED
;
313 shmseg
->u
.shm_dtime
= sysv_shmtime();
314 if ((--shmseg
->u
.shm_nattch
<= 0) &&
315 (shmseg
->u
.shm_perm
.mode
& SHMSEG_REMOVED
)) {
316 shm_deallocate_segment(shmseg
);
317 shm_last_free
= segnum
;
323 shmdt(struct proc
*p
, struct shmdt_args
*uap
, int32_t *retval
)
326 struct shmid_kernel
*shmsegptr
;
328 struct shmmap_state
*shmmap_s
;
332 AUDIT_ARG(svipc_addr
, uap
->shmaddr
);
334 SYSV_SHM_SUBSYS_LOCK();
336 if ((shmdtret
= shminit())) {
340 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
341 if (shmmap_s
== NULL
) {
346 for (; shmmap_s
->shmid
!= SHMID_SENTINEL
; shmmap_s
++) {
347 if (SHMID_IS_VALID(shmmap_s
->shmid
) &&
348 shmmap_s
->va
== (mach_vm_offset_t
)uap
->shmaddr
) {
353 if (!SHMID_IS_VALID(shmmap_s
->shmid
)) {
360 * XXX: It might be useful to move this into the shm_delete_mapping
363 shmsegptr
= &shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)];
364 shmdtret
= mac_sysvshm_check_shmdt(kauth_cred_get(), shmsegptr
);
369 i
= shm_delete_mapping(p
, shmmap_s
, 1);
376 SYSV_SHM_SUBSYS_UNLOCK();
381 shmat(struct proc
*p
, struct shmat_args
*uap
, user_addr_t
*retval
)
384 struct shmid_kernel
*shmseg
;
385 struct shmmap_state
*shmmap_s
= NULL
;
386 struct shm_handle
*shm_handle
;
387 mach_vm_address_t attach_va
; /* attach address in/out */
388 mach_vm_address_t shmlba
;
389 mach_vm_size_t map_size
; /* size of map entry */
390 mach_vm_size_t mapped_size
;
399 AUDIT_ARG(svipc_id
, uap
->shmid
);
400 AUDIT_ARG(svipc_addr
, uap
->shmaddr
);
402 SYSV_SHM_SUBSYS_LOCK();
404 if ((shmat_ret
= shminit())) {
408 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
409 if (shmmap_s
== NULL
) {
410 /* lazily allocate the shm map */
412 int nsegs
= shminfo
.shmseg
;
418 /* +1 for the sentinel */
419 if (os_add_and_mul_overflow(nsegs
, 1, sizeof(struct shmmap_state
), &size
)) {
424 MALLOC(shmmap_s
, struct shmmap_state
*, size
, M_SHM
, M_WAITOK
| M_NULL
);
425 if (shmmap_s
== NULL
) {
430 /* initialize the entries */
431 for (i
= 0; i
< nsegs
; i
++) {
432 shmmap_s
[i
].shmid
= SHMID_UNALLOCATED
;
434 shmmap_s
[i
].shmid
= SHMID_SENTINEL
;
436 p
->vm_shm
= (caddr_t
)shmmap_s
;
439 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
440 if (shmseg
== NULL
) {
445 AUDIT_ARG(svipc_perm
, &shmseg
->u
.shm_perm
);
446 error
= ipcperm(kauth_cred_get(), &shmseg
->u
.shm_perm
,
447 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
| IPC_W
);
454 error
= mac_sysvshm_check_shmat(kauth_cred_get(), shmseg
, uap
->shmflg
);
461 /* find a free shmid */
462 while (SHMID_IS_VALID(shmmap_s
->shmid
)) {
465 if (shmmap_s
->shmid
!= SHMID_UNALLOCATED
) {
471 map_size
= vm_map_round_page(shmseg
->u
.shm_segsz
,
472 vm_map_page_mask(current_map()));
474 if ((uap
->shmflg
& SHM_RDONLY
) == 0) {
475 prot
|= VM_PROT_WRITE
;
477 flags
= MAP_ANON
| MAP_SHARED
;
482 attach_va
= (mach_vm_address_t
)uap
->shmaddr
;
483 shmlba
= vm_map_page_size(current_map()); /* XXX instead of SHMLBA */
484 if (uap
->shmflg
& SHM_RND
) {
485 attach_va
&= ~(shmlba
- 1);
486 } else if ((attach_va
& (shmlba
- 1)) != 0) {
491 if (flags
& MAP_FIXED
) {
492 vm_flags
= VM_FLAGS_FIXED
;
494 vm_flags
= VM_FLAGS_ANYWHERE
;
499 /* first reserve enough space... */
500 rv
= mach_vm_map_kernel(current_map(),
505 VM_MAP_KERNEL_FLAGS_NONE
,
513 if (rv
!= KERN_SUCCESS
) {
517 shmmap_s
->va
= attach_va
;
519 /* ... then map the shared memory over the reserved space */
520 for (shm_handle
= CAST_DOWN(void *, shmseg
->u
.shm_internal
);/* tunnel */
522 shm_handle
= shm_handle
->shm_handle_next
) {
523 vm_map_size_t chunk_size
;
525 assert(mapped_size
< map_size
);
526 chunk_size
= shm_handle
->shm_handle_size
;
527 if (chunk_size
> map_size
- mapped_size
) {
529 * Partial mapping of last chunk due to
530 * page size mismatch.
532 assert(vm_map_page_shift(current_map()) < PAGE_SHIFT
);
533 assert(shm_handle
->shm_handle_next
== NULL
);
534 chunk_size
= map_size
- mapped_size
;
536 rv
= vm_map_enter_mem_object(
537 current_map(), /* process map */
538 &attach_va
, /* attach address */
539 chunk_size
, /* size to map */
540 (mach_vm_offset_t
)0, /* alignment mask */
541 VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
,
542 VM_MAP_KERNEL_FLAGS_NONE
,
544 shm_handle
->shm_object
,
550 if (rv
!= KERN_SUCCESS
) {
554 mapped_size
+= chunk_size
;
555 attach_va
= attach_va
+ chunk_size
;
558 shmmap_s
->shmid
= uap
->shmid
;
559 shmseg
->u
.shm_lpid
= p
->p_pid
;
560 shmseg
->u
.shm_atime
= sysv_shmtime();
561 shmseg
->u
.shm_nattch
++;
562 *retval
= shmmap_s
->va
; /* XXX return -1 on error */
566 if (mapped_size
> 0) {
567 (void) mach_vm_deallocate(current_map(),
572 case KERN_INVALID_ADDRESS
:
576 case KERN_PROTECTION_FAILURE
:
584 SYSV_SHM_SUBSYS_UNLOCK();
589 oshmctl(__unused
void *p
, __unused
void *uap
, __unused
void *retval
)
603 shmctl(__unused
struct proc
*p
, struct shmctl_args
*uap
, int32_t *retval
)
606 kauth_cred_t cred
= kauth_cred_get();
607 struct user_shmid_ds inbuf
;
608 struct shmid_kernel
*shmseg
;
612 AUDIT_ARG(svipc_cmd
, uap
->cmd
);
613 AUDIT_ARG(svipc_id
, uap
->shmid
);
615 SYSV_SHM_SUBSYS_LOCK();
617 if ((shmctl_ret
= shminit())) {
621 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
622 if (shmseg
== NULL
) {
627 /* XXAUDIT: This is the perms BEFORE any change by this call. This
628 * may not be what is desired.
630 AUDIT_ARG(svipc_perm
, &shmseg
->u
.shm_perm
);
633 error
= mac_sysvshm_check_shmctl(cred
, shmseg
, uap
->cmd
);
641 error
= ipcperm(cred
, &shmseg
->u
.shm_perm
, IPC_R
);
647 if (IS_64BIT_PROCESS(p
)) {
648 struct user_shmid_ds shmid_ds
= {};
649 memcpy(&shmid_ds
, &shmseg
->u
, sizeof(struct user_shmid_ds
));
651 /* Clear kernel reserved pointer before copying to user space */
652 shmid_ds
.shm_internal
= USER_ADDR_NULL
;
654 error
= copyout(&shmid_ds
, uap
->buf
, sizeof(shmid_ds
));
656 struct user32_shmid_ds shmid_ds32
= {};
657 shmid_ds_64to32(&shmseg
->u
, &shmid_ds32
);
659 /* Clear kernel reserved pointer before copying to user space */
660 shmid_ds32
.shm_internal
= (user32_addr_t
)0;
662 error
= copyout(&shmid_ds32
, uap
->buf
, sizeof(shmid_ds32
));
670 error
= ipcperm(cred
, &shmseg
->u
.shm_perm
, IPC_M
);
675 if (IS_64BIT_PROCESS(p
)) {
676 error
= copyin(uap
->buf
, &inbuf
, sizeof(struct user_shmid_ds
));
678 struct user32_shmid_ds shmid_ds32
;
679 error
= copyin(uap
->buf
, &shmid_ds32
, sizeof(shmid_ds32
));
680 /* convert in place; ugly, but safe */
681 shmid_ds_32to64(&shmid_ds32
, &inbuf
);
687 shmseg
->u
.shm_perm
.uid
= inbuf
.shm_perm
.uid
;
688 shmseg
->u
.shm_perm
.gid
= inbuf
.shm_perm
.gid
;
689 shmseg
->u
.shm_perm
.mode
=
690 (shmseg
->u
.shm_perm
.mode
& ~ACCESSPERMS
) |
691 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
692 shmseg
->u
.shm_ctime
= sysv_shmtime();
695 error
= ipcperm(cred
, &shmseg
->u
.shm_perm
, IPC_M
);
700 shmseg
->u
.shm_perm
._key
= IPC_PRIVATE
;
701 shmseg
->u
.shm_perm
.mode
|= SHMSEG_REMOVED
;
702 if (shmseg
->u
.shm_nattch
<= 0) {
703 shm_deallocate_segment(shmseg
);
704 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
718 SYSV_SHM_SUBSYS_UNLOCK();
723 shmget_existing(struct shmget_args
*uap
, int mode
, int segnum
, int *retval
)
725 struct shmid_kernel
*shmseg
;
728 shmseg
= &shmsegs
[segnum
];
729 if (shmseg
->u
.shm_perm
.mode
& SHMSEG_REMOVED
) {
731 * This segment is in the process of being allocated. Wait
732 * until it's done, and look the key up again (in case the
733 * allocation failed or it was freed).
735 shmseg
->u
.shm_perm
.mode
|= SHMSEG_WANTED
;
736 error
= tsleep((caddr_t
)shmseg
, PLOCK
| PCATCH
, "shmget", 0);
744 * The low 9 bits of shmflag are the mode bits being requested, which
745 * are the actual mode bits desired on the segment, and not in IPC_R
746 * form; therefore it would be incorrect to call ipcperm() to validate
747 * them; instead, we AND the existing mode with the requested mode, and
748 * verify that it matches the requested mode; otherwise, we fail with
749 * EACCES (access denied).
751 if ((shmseg
->u
.shm_perm
.mode
& mode
) != mode
) {
756 error
= mac_sysvshm_check_shmget(kauth_cred_get(), shmseg
, uap
->shmflg
);
762 if (uap
->size
&& uap
->size
> shmseg
->u
.shm_segsz
) {
766 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
)) {
770 *retval
= IXSEQ_TO_IPCID(segnum
, shmseg
->u
.shm_perm
);
775 shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
,
778 int i
, segnum
, shmid
;
779 kauth_cred_t cred
= kauth_cred_get();
780 struct shmid_kernel
*shmseg
;
781 struct shm_handle
*shm_handle
;
783 mach_vm_size_t total_size
, size
, alloc_size
;
785 struct shm_handle
*shm_handle_next
, **shm_handle_next_p
;
787 if (uap
->size
<= 0 ||
788 uap
->size
< (user_size_t
)shminfo
.shmmin
||
789 uap
->size
> (user_size_t
)shminfo
.shmmax
) {
792 if (shm_nused
>= shminfo
.shmmni
) { /* any shmids left? */
795 if (mach_vm_round_page_overflow(uap
->size
, &total_size
)) {
798 if ((user_ssize_t
)(shm_committed
+ btoc(total_size
)) > shminfo
.shmall
) {
801 if (shm_last_free
< 0) {
802 for (i
= 0; i
< shminfo
.shmmni
; i
++) {
803 if (shmsegs
[i
].u
.shm_perm
.mode
& SHMSEG_FREE
) {
807 if (i
== shminfo
.shmmni
) {
808 panic("shmseg free count inconsistent");
812 segnum
= shm_last_free
;
815 shmseg
= &shmsegs
[segnum
];
818 * In case we sleep in malloc(), mark the segment present but deleted
819 * so that noone else tries to create the same key.
820 * XXX but we don't release the global lock !?
822 shmseg
->u
.shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
823 shmseg
->u
.shm_perm
._key
= uap
->key
;
824 shmseg
->u
.shm_perm
._seq
= (shmseg
->u
.shm_perm
._seq
+ 1) & 0x7fff;
826 shm_handle_next_p
= NULL
;
828 alloc_size
< total_size
;
829 alloc_size
+= size
) {
830 size
= MIN(total_size
- alloc_size
, ANON_MAX_SIZE
);
831 kret
= mach_make_memory_entry_64(
833 (memory_object_size_t
*) &size
,
834 (memory_object_offset_t
) 0,
835 MAP_MEM_NAMED_CREATE
| VM_PROT_DEFAULT
,
836 (ipc_port_t
*) &mem_object
, 0);
837 if (kret
!= KERN_SUCCESS
) {
841 MALLOC(shm_handle
, struct shm_handle
*, sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
842 if (shm_handle
== NULL
) {
843 kret
= KERN_NO_SPACE
;
844 mach_memory_entry_port_release(mem_object
);
848 shm_handle
->shm_object
= mem_object
;
849 shm_handle
->shm_handle_size
= size
;
850 shm_handle
->shm_handle_next
= NULL
;
851 if (shm_handle_next_p
== NULL
) {
852 shmseg
->u
.shm_internal
= CAST_USER_ADDR_T(shm_handle
);/* tunnel */
854 *shm_handle_next_p
= shm_handle
;
856 shm_handle_next_p
= &shm_handle
->shm_handle_next
;
859 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->u
.shm_perm
);
861 shmseg
->u
.shm_perm
.cuid
= shmseg
->u
.shm_perm
.uid
= kauth_cred_getuid(cred
);
862 shmseg
->u
.shm_perm
.cgid
= shmseg
->u
.shm_perm
.gid
= kauth_cred_getgid(cred
);
863 shmseg
->u
.shm_perm
.mode
= (shmseg
->u
.shm_perm
.mode
& SHMSEG_WANTED
) |
864 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
865 shmseg
->u
.shm_segsz
= uap
->size
;
866 shmseg
->u
.shm_cpid
= p
->p_pid
;
867 shmseg
->u
.shm_lpid
= shmseg
->u
.shm_nattch
= 0;
868 shmseg
->u
.shm_atime
= shmseg
->u
.shm_dtime
= 0;
870 mac_sysvshm_label_associate(cred
, shmseg
);
872 shmseg
->u
.shm_ctime
= sysv_shmtime();
873 shm_committed
+= btoc(size
);
875 AUDIT_ARG(svipc_perm
, &shmseg
->u
.shm_perm
);
876 if (shmseg
->u
.shm_perm
.mode
& SHMSEG_WANTED
) {
878 * Somebody else wanted this key while we were asleep. Wake
881 shmseg
->u
.shm_perm
.mode
&= ~SHMSEG_WANTED
;
882 wakeup((caddr_t
)shmseg
);
885 AUDIT_ARG(svipc_id
, shmid
);
888 if (kret
!= KERN_SUCCESS
) {
889 for (shm_handle
= CAST_DOWN(void *, shmseg
->u
.shm_internal
); /* tunnel */
891 shm_handle
= shm_handle_next
) {
892 shm_handle_next
= shm_handle
->shm_handle_next
;
893 mach_memory_entry_port_release(shm_handle
->shm_object
);
894 FREE(shm_handle
, M_SHM
);
896 shmseg
->u
.shm_internal
= USER_ADDR_NULL
; /* tunnel */
900 case KERN_INVALID_ADDRESS
:
903 case KERN_PROTECTION_FAILURE
:
911 shmget(struct proc
*p
, struct shmget_args
*uap
, int32_t *retval
)
913 int segnum
, mode
, error
;
916 /* Auditing is actually done in shmget_allocate_segment() */
918 SYSV_SHM_SUBSYS_LOCK();
920 if ((shmget_ret
= shminit())) {
924 mode
= uap
->shmflg
& ACCESSPERMS
;
925 if (uap
->key
!= IPC_PRIVATE
) {
927 segnum
= shm_find_segment_by_key(uap
->key
);
929 error
= shmget_existing(uap
, mode
, segnum
, retval
);
930 if (error
== EAGAIN
) {
936 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
941 shmget_ret
= shmget_allocate_segment(p
, uap
, mode
, retval
);
943 SYSV_SHM_SUBSYS_UNLOCK();
950 * Entry point for all SHM calls: shmat, oshmctl, shmdt, shmget, shmctl
952 * Parameters: p Process requesting the call
953 * uap User argument descriptor (see below)
954 * retval Return value of the selected shm call
956 * Indirect parameters: uap->which msg call to invoke (index in array of shm calls)
957 * uap->a2 User argument descriptor
962 * Implicit returns: retval Return value of the selected shm call
964 * DEPRECATED: This interface should not be used to call the other SHM
965 * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct
966 * usage is to call the other SHM functions directly.
969 shmsys(struct proc
*p
, struct shmsys_args
*uap
, int32_t *retval
)
971 /* The routine that we are dispatching already does this */
973 if (uap
->which
>= sizeof(shmcalls
) / sizeof(shmcalls
[0])) {
976 return (*shmcalls
[uap
->which
])(p
, &uap
->a2
, retval
);
980 * Return 0 on success, 1 on failure.
983 shmfork(struct proc
*p1
, struct proc
*p2
)
985 struct shmmap_state
*shmmap_s
;
990 SYSV_SHM_SUBSYS_LOCK();
997 struct shmmap_state
*src
= (struct shmmap_state
*)p1
->vm_shm
;
1000 /* count number of shmid entries in src */
1001 for (struct shmmap_state
*s
= src
; s
->shmid
!= SHMID_SENTINEL
; s
++) {
1005 if (os_add_and_mul_overflow(nsegs
, 1, sizeof(struct shmmap_state
), &size
)) {
1009 MALLOC(shmmap_s
, struct shmmap_state
*, size
, M_SHM
, M_WAITOK
);
1010 if (shmmap_s
== NULL
) {
1015 bcopy(src
, (caddr_t
)shmmap_s
, size
);
1016 p2
->vm_shm
= (caddr_t
)shmmap_s
;
1017 for (; shmmap_s
->shmid
!= SHMID_SENTINEL
; shmmap_s
++) {
1018 if (SHMID_IS_VALID(shmmap_s
->shmid
)) {
1019 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].u
.shm_nattch
++;
1024 SYSV_SHM_SUBSYS_UNLOCK();
1029 shmcleanup(struct proc
*p
, int deallocate
)
1031 struct shmmap_state
*shmmap_s
;
1033 SYSV_SHM_SUBSYS_LOCK();
1035 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
1036 for (; shmmap_s
->shmid
!= SHMID_SENTINEL
; shmmap_s
++) {
1037 if (SHMID_IS_VALID(shmmap_s
->shmid
)) {
1039 * XXX: Should the MAC framework enforce
1040 * check here as well.
1042 shm_delete_mapping(p
, shmmap_s
, deallocate
);
1046 FREE(p
->vm_shm
, M_SHM
);
1048 SYSV_SHM_SUBSYS_UNLOCK();
1052 shmexit(struct proc
*p
)
1058 * shmexec() is like shmexit(), only it doesn't delete the mappings,
1059 * since the old address space has already been destroyed and the new
1060 * one instantiated. Instead, it just does the housekeeping work we
1061 * need to do to keep the System V shared memory subsystem sane.
1063 __private_extern__
void
1064 shmexec(struct proc
*p
)
1077 * we store internally 64 bit, since if we didn't, we would
1078 * be unable to represent a segment size in excess of 32 bits
1079 * with the (struct shmid_ds)->shm_segsz field; also, POSIX
1080 * dictates this filed be a size_t, which is 64 bits when
1081 * running 64 bit binaries.
1083 if (os_mul_overflow(shminfo
.shmmni
, sizeof(struct shmid_kernel
), &sz
)) {
1087 MALLOC(shmsegs
, struct shmid_kernel
*, sz
, M_SHM
, M_WAITOK
| M_ZERO
);
1088 if (shmsegs
== NULL
) {
1091 for (i
= 0; i
< shminfo
.shmmni
; i
++) {
1092 shmsegs
[i
].u
.shm_perm
.mode
= SHMSEG_FREE
;
1093 shmsegs
[i
].u
.shm_perm
._seq
= 0;
1095 mac_sysvshm_label_init(&shmsegs
[i
]);
1107 /* Initialize the mutex governing access to the SysV shm subsystem */
1108 __private_extern__
void
1109 sysv_shm_lock_init( void )
1111 sysv_shm_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
1113 sysv_shm_subsys_lck_grp
= lck_grp_alloc_init("sysv_shm_subsys_lock", sysv_shm_subsys_lck_grp_attr
);
1115 sysv_shm_subsys_lck_attr
= lck_attr_alloc_init();
1116 lck_mtx_init(&sysv_shm_subsys_mutex
, sysv_shm_subsys_lck_grp
, sysv_shm_subsys_lck_attr
);
1119 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1120 * struct sysctl_req *req) */
1122 sysctl_shminfo(__unused
struct sysctl_oid
*oidp
, void *arg1
,
1123 __unused
int arg2
, struct sysctl_req
*req
)
1126 int sysctl_shminfo_ret
= 0;
1127 int64_t saved_shmmax
;
1128 int64_t saved_shmmin
;
1129 int64_t saved_shmseg
;
1130 int64_t saved_shmmni
;
1131 int64_t saved_shmall
;
1133 error
= SYSCTL_OUT(req
, arg1
, sizeof(int64_t));
1134 if (error
|| req
->newptr
== USER_ADDR_NULL
) {
1138 SYSV_SHM_SUBSYS_LOCK();
1140 /* shmmni can not be changed after SysV SHM has been initialized */
1141 if (shm_inited
&& arg1
== &shminfo
.shmmni
) {
1142 sysctl_shminfo_ret
= EPERM
;
1143 goto sysctl_shminfo_out
;
1145 saved_shmmax
= shminfo
.shmmax
;
1146 saved_shmmin
= shminfo
.shmmin
;
1147 saved_shmseg
= shminfo
.shmseg
;
1148 saved_shmmni
= shminfo
.shmmni
;
1149 saved_shmall
= shminfo
.shmall
;
1151 if ((error
= SYSCTL_IN(req
, arg1
, sizeof(int64_t))) != 0) {
1152 sysctl_shminfo_ret
= error
;
1153 goto sysctl_shminfo_out
;
1156 if (arg1
== &shminfo
.shmmax
) {
1157 /* shmmax needs to be page-aligned */
1158 if (shminfo
.shmmax
& PAGE_MASK_64
|| shminfo
.shmmax
< 0) {
1159 shminfo
.shmmax
= saved_shmmax
;
1160 sysctl_shminfo_ret
= EINVAL
;
1161 goto sysctl_shminfo_out
;
1163 } else if (arg1
== &shminfo
.shmmin
) {
1164 if (shminfo
.shmmin
< 0) {
1165 shminfo
.shmmin
= saved_shmmin
;
1166 sysctl_shminfo_ret
= EINVAL
;
1167 goto sysctl_shminfo_out
;
1169 } else if (arg1
== &shminfo
.shmseg
) {
1170 /* add a sanity check - 20847256 */
1171 if (shminfo
.shmseg
> INT32_MAX
|| shminfo
.shmseg
< 0) {
1172 shminfo
.shmseg
= saved_shmseg
;
1173 sysctl_shminfo_ret
= EINVAL
;
1174 goto sysctl_shminfo_out
;
1176 } else if (arg1
== &shminfo
.shmmni
) {
1177 /* add a sanity check - 20847256 */
1178 if (shminfo
.shmmni
> INT32_MAX
|| shminfo
.shmmni
< 0) {
1179 shminfo
.shmmni
= saved_shmmni
;
1180 sysctl_shminfo_ret
= EINVAL
;
1181 goto sysctl_shminfo_out
;
1183 } else if (arg1
== &shminfo
.shmall
) {
1184 /* add a sanity check - 20847256 */
1185 if (shminfo
.shmall
> INT32_MAX
|| shminfo
.shmall
< 0) {
1186 shminfo
.shmall
= saved_shmall
;
1187 sysctl_shminfo_ret
= EINVAL
;
1188 goto sysctl_shminfo_out
;
1191 sysctl_shminfo_ret
= 0;
1193 SYSV_SHM_SUBSYS_UNLOCK();
1194 return sysctl_shminfo_ret
;
1198 IPCS_shm_sysctl(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
1199 __unused
int arg2
, struct sysctl_req
*req
)
1204 struct user32_IPCS_command u32
;
1205 struct user_IPCS_command u64
;
1207 struct user32_shmid_ds shmid_ds32
= { }; /* post conversion, 32 bit version */
1208 struct user_shmid_ds shmid_ds
= { }; /* 64 bit version */
1210 size_t ipcs_sz
= sizeof(struct user_IPCS_command
);
1211 size_t shmid_ds_sz
= sizeof(struct user_shmid_ds
);
1212 struct proc
*p
= current_proc();
1214 SYSV_SHM_SUBSYS_LOCK();
1216 if ((error
= shminit())) {
1217 goto ipcs_shm_sysctl_out
;
1220 if (!IS_64BIT_PROCESS(p
)) {
1221 ipcs_sz
= sizeof(struct user32_IPCS_command
);
1222 shmid_ds_sz
= sizeof(struct user32_shmid_ds
);
1225 /* Copy in the command structure */
1226 if ((error
= SYSCTL_IN(req
, &ipcs
, ipcs_sz
)) != 0) {
1227 goto ipcs_shm_sysctl_out
;
1230 if (!IS_64BIT_PROCESS(p
)) { /* convert in place */
1231 ipcs
.u64
.ipcs_data
= CAST_USER_ADDR_T(ipcs
.u32
.ipcs_data
);
1234 /* Let us version this interface... */
1235 if (ipcs
.u64
.ipcs_magic
!= IPCS_MAGIC
) {
1237 goto ipcs_shm_sysctl_out
;
1240 switch (ipcs
.u64
.ipcs_op
) {
1241 case IPCS_SHM_CONF
: /* Obtain global configuration data */
1242 if (ipcs
.u64
.ipcs_datalen
!= sizeof(struct shminfo
)) {
1243 if (ipcs
.u64
.ipcs_cursor
!= 0) { /* fwd. compat. */
1250 error
= copyout(&shminfo
, ipcs
.u64
.ipcs_data
, ipcs
.u64
.ipcs_datalen
);
1253 case IPCS_SHM_ITER
: /* Iterate over existing segments */
1254 cursor
= ipcs
.u64
.ipcs_cursor
;
1255 if (cursor
< 0 || cursor
>= shminfo
.shmmni
) {
1259 if (ipcs
.u64
.ipcs_datalen
!= (int)shmid_ds_sz
) {
1263 for (; cursor
< shminfo
.shmmni
; cursor
++) {
1264 if (shmsegs
[cursor
].u
.shm_perm
.mode
& SHMSEG_ALLOCATED
) {
1269 if (cursor
== shminfo
.shmmni
) {
1274 shmid_dsp
= &shmsegs
[cursor
]; /* default: 64 bit */
1277 * If necessary, convert the 64 bit kernel segment
1278 * descriptor to a 32 bit user one.
1280 if (!IS_64BIT_PROCESS(p
)) {
1281 shmid_ds_64to32(shmid_dsp
, &shmid_ds32
);
1283 /* Clear kernel reserved pointer before copying to user space */
1284 shmid_ds32
.shm_internal
= (user32_addr_t
)0;
1286 shmid_dsp
= &shmid_ds32
;
1288 memcpy(&shmid_ds
, shmid_dsp
, sizeof(shmid_ds
));
1290 /* Clear kernel reserved pointer before copying to user space */
1291 shmid_ds
.shm_internal
= USER_ADDR_NULL
;
1293 shmid_dsp
= &shmid_ds
;
1295 error
= copyout(shmid_dsp
, ipcs
.u64
.ipcs_data
, ipcs
.u64
.ipcs_datalen
);
1298 ipcs
.u64
.ipcs_cursor
= cursor
+ 1;
1300 if (!IS_64BIT_PROCESS(p
)) { /* convert in place */
1301 ipcs
.u32
.ipcs_data
= CAST_DOWN_EXPLICIT(user32_addr_t
, ipcs
.u64
.ipcs_data
);
1304 error
= SYSCTL_OUT(req
, &ipcs
, ipcs_sz
);
1312 ipcs_shm_sysctl_out
:
1313 SYSV_SHM_SUBSYS_UNLOCK();
1317 SYSCTL_NODE(_kern
, KERN_SYSV
, sysv
, CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
, 0, "SYSV");
1319 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmmax
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1320 &shminfo
.shmmax
, 0, &sysctl_shminfo
, "Q", "shmmax");
1322 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmmin
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1323 &shminfo
.shmmin
, 0, &sysctl_shminfo
, "Q", "shmmin");
1325 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmmni
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1326 &shminfo
.shmmni
, 0, &sysctl_shminfo
, "Q", "shmmni");
1328 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmseg
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1329 &shminfo
.shmseg
, 0, &sysctl_shminfo
, "Q", "shmseg");
1331 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmall
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1332 &shminfo
.shmall
, 0, &sysctl_shminfo
, "Q", "shmall");
1334 SYSCTL_NODE(_kern_sysv
, OID_AUTO
, ipcs
, CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
, 0, "SYSVIPCS");
1336 SYSCTL_PROC(_kern_sysv_ipcs
, OID_AUTO
, shm
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1337 0, 0, IPCS_shm_sysctl
,
1338 "S,IPCS_shm_command",
1339 "ipcs shm command interface");
1340 #endif /* SYSV_SHM */
1342 /* DSEP Review Done pl-20051108-v02 @2743,@2908,@2913,@3009 */