2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
31 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by Adam Glass and Charles
45 * 4. The names of the authors may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
61 * support for mandatory and extensible security protections. This notice
62 * is included in support of clause 2.2 (b) of the Apple Public License,
64 * Copyright (c) 2005-2006 SPARTA, Inc.
68 #include <sys/appleapiopts.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/shm_internal.h>
73 #include <sys/proc_internal.h>
74 #include <sys/kauth.h>
75 #include <sys/malloc.h>
78 #include <sys/sysctl.h>
80 #include <sys/sysent.h>
81 #include <sys/sysproto.h>
83 #include <security/mac_framework.h>
86 #include <security/audit/audit.h>
88 #include <mach/mach_types.h>
89 #include <mach/vm_inherit.h>
90 #include <mach/vm_map.h>
92 #include <mach/mach_vm.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_protos.h>
96 #include <vm/vm_kern.h>
98 #include <kern/locks.h>
99 #include <os/overflow.h>
101 /* Uncomment this line to see MAC debugging output. */
102 /* #define MAC_DEBUG */
103 #if CONFIG_MACF_DEBUG
104 #define MPRINTF(a) printf a
110 static int shminit(void);
112 static LCK_GRP_DECLARE(sysv_shm_subsys_lck_grp
, "sysv_shm_subsys_lock");
113 static LCK_MTX_DECLARE(sysv_shm_subsys_mutex
, &sysv_shm_subsys_lck_grp
);
115 #define SYSV_SHM_SUBSYS_LOCK() lck_mtx_lock(&sysv_shm_subsys_mutex)
116 #define SYSV_SHM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_shm_subsys_mutex)
118 static int oshmctl(void *p
, void *uap
, void *retval
);
119 static int shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
, int * retval
);
120 static int shmget_existing(struct shmget_args
*uap
, int mode
, int segnum
, int * retval
);
121 static void shmid_ds_64to32(struct user_shmid_ds
*in
, struct user32_shmid_ds
*out
);
122 static void shmid_ds_32to64(struct user32_shmid_ds
*in
, struct user_shmid_ds
*out
);
124 /* XXX casting to (sy_call_t *) is bogus, as usual. */
125 static sy_call_t
* const shmcalls
[] = {
126 (sy_call_t
*)shmat
, (sy_call_t
*)oshmctl
,
127 (sy_call_t
*)shmdt
, (sy_call_t
*)shmget
,
131 #define SHMSEG_FREE 0x0200
132 #define SHMSEG_REMOVED 0x0400
133 #define SHMSEG_ALLOCATED 0x0800
134 #define SHMSEG_WANTED 0x1000
136 static int shm_last_free
, shm_nused
, shm_committed
;
137 struct shmid_kernel
*shmsegs
; /* 64 bit version */
138 static int shm_inited
= 0;
141 * Since anonymous memory chunks are limited to ANON_MAX_SIZE bytes,
142 * we have to keep a list of chunks when we want to handle a shared memory
143 * segment bigger than ANON_MAX_SIZE.
144 * Each chunk points to a VM named entry of up to ANON_MAX_SIZE bytes
145 * of anonymous memory.
148 void * shm_object
; /* named entry for this chunk*/
149 memory_object_size_t shm_handle_size
; /* size of this chunk */
150 struct shm_handle
*shm_handle_next
; /* next chunk */
153 struct shmmap_state
{
154 mach_vm_address_t va
; /* user address */
155 int shmid
; /* segment id */
158 static void shm_deallocate_segment(struct shmid_kernel
*);
159 static int shm_find_segment_by_key(key_t
);
160 static struct shmid_kernel
*shm_find_segment_by_shmid(int);
161 static int shm_delete_mapping(struct proc
*, struct shmmap_state
*, int);
163 #ifdef __APPLE_API_PRIVATE
164 #define DEFAULT_SHMMAX (4 * 1024 * 1024)
165 #define DEFAULT_SHMMIN 1
166 #define DEFAULT_SHMMNI 32
167 #define DEFAULT_SHMSEG 8
168 #define DEFAULT_SHMALL 1024
170 struct shminfo shminfo
= {
171 .shmmax
= DEFAULT_SHMMAX
,
172 .shmmin
= DEFAULT_SHMMIN
,
173 .shmmni
= DEFAULT_SHMMNI
,
174 .shmseg
= DEFAULT_SHMSEG
,
175 .shmall
= DEFAULT_SHMALL
178 #define SHMID_IS_VALID(x) ((x) >= 0)
179 #define SHMID_UNALLOCATED (-1)
180 #define SHMID_SENTINEL (-2)
182 #endif /* __APPLE_API_PRIVATE */
184 static __inline__
time_t
193 * This conversion is safe, since if we are converting for a 32 bit process,
194 * then it's value of (struct shmid_ds)->shm_segsz will never exceed 4G.
196 * NOTE: Source and target may *NOT* overlap! (target is smaller)
199 shmid_ds_64to32(struct user_shmid_ds
*in
, struct user32_shmid_ds
*out
)
201 out
->shm_perm
= in
->shm_perm
;
202 out
->shm_segsz
= in
->shm_segsz
;
203 out
->shm_lpid
= in
->shm_lpid
;
204 out
->shm_cpid
= in
->shm_cpid
;
205 out
->shm_nattch
= in
->shm_nattch
;
206 out
->shm_atime
= in
->shm_atime
;
207 out
->shm_dtime
= in
->shm_dtime
;
208 out
->shm_ctime
= in
->shm_ctime
;
209 out
->shm_internal
= CAST_DOWN_EXPLICIT(int, in
->shm_internal
);
213 * NOTE: Source and target may are permitted to overlap! (source is smaller);
214 * this works because we copy fields in order from the end of the struct to
218 shmid_ds_32to64(struct user32_shmid_ds
*in
, struct user_shmid_ds
*out
)
220 out
->shm_internal
= in
->shm_internal
;
221 out
->shm_ctime
= in
->shm_ctime
;
222 out
->shm_dtime
= in
->shm_dtime
;
223 out
->shm_atime
= in
->shm_atime
;
224 out
->shm_nattch
= in
->shm_nattch
;
225 out
->shm_cpid
= in
->shm_cpid
;
226 out
->shm_lpid
= in
->shm_lpid
;
227 out
->shm_segsz
= in
->shm_segsz
;
228 out
->shm_perm
= in
->shm_perm
;
233 shm_find_segment_by_key(key_t key
)
237 for (i
= 0; i
< shminfo
.shmmni
; i
++) {
238 if ((shmsegs
[i
].u
.shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
239 shmsegs
[i
].u
.shm_perm
._key
== key
) {
246 static struct shmid_kernel
*
247 shm_find_segment_by_shmid(int shmid
)
250 struct shmid_kernel
*shmseg
;
252 segnum
= IPCID_TO_IX(shmid
);
253 if (segnum
< 0 || segnum
>= shminfo
.shmmni
) {
256 shmseg
= &shmsegs
[segnum
];
257 if ((shmseg
->u
.shm_perm
.mode
& (SHMSEG_ALLOCATED
| SHMSEG_REMOVED
))
258 != SHMSEG_ALLOCATED
||
259 shmseg
->u
.shm_perm
._seq
!= IPCID_TO_SEQ(shmid
)) {
266 shm_deallocate_segment(struct shmid_kernel
*shmseg
)
268 struct shm_handle
*shm_handle
, *shm_handle_next
;
271 for (shm_handle
= CAST_DOWN(void *, shmseg
->u
.shm_internal
); /* tunnel */
273 shm_handle
= shm_handle_next
) {
274 shm_handle_next
= shm_handle
->shm_handle_next
;
275 mach_memory_entry_port_release(shm_handle
->shm_object
);
276 kheap_free(KM_SHM
, shm_handle
, sizeof(struct shm_handle
));
278 shmseg
->u
.shm_internal
= USER_ADDR_NULL
; /* tunnel */
279 size
= vm_map_round_page(shmseg
->u
.shm_segsz
,
280 vm_map_page_mask(current_map()));
281 shm_committed
-= btoc(size
);
283 shmseg
->u
.shm_perm
.mode
= SHMSEG_FREE
;
285 /* Reset the MAC label */
286 mac_sysvshm_label_recycle(shmseg
);
291 shm_delete_mapping(__unused
struct proc
*p
, struct shmmap_state
*shmmap_s
,
294 struct shmid_kernel
*shmseg
;
298 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
299 shmseg
= &shmsegs
[segnum
];
300 size
= vm_map_round_page(shmseg
->u
.shm_segsz
,
301 vm_map_page_mask(current_map())); /* XXX done for us? */
303 result
= mach_vm_deallocate(current_map(), shmmap_s
->va
, size
);
304 if (result
!= KERN_SUCCESS
) {
308 shmmap_s
->shmid
= SHMID_UNALLOCATED
;
309 shmseg
->u
.shm_dtime
= sysv_shmtime();
310 if ((--shmseg
->u
.shm_nattch
<= 0) &&
311 (shmseg
->u
.shm_perm
.mode
& SHMSEG_REMOVED
)) {
312 shm_deallocate_segment(shmseg
);
313 shm_last_free
= segnum
;
319 shmdt(struct proc
*p
, struct shmdt_args
*uap
, int32_t *retval
)
322 struct shmid_kernel
*shmsegptr
;
324 struct shmmap_state
*shmmap_s
;
328 AUDIT_ARG(svipc_addr
, uap
->shmaddr
);
330 SYSV_SHM_SUBSYS_LOCK();
332 if ((shmdtret
= shminit())) {
336 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
337 if (shmmap_s
== NULL
) {
342 for (; shmmap_s
->shmid
!= SHMID_SENTINEL
; shmmap_s
++) {
343 if (SHMID_IS_VALID(shmmap_s
->shmid
) &&
344 shmmap_s
->va
== (mach_vm_offset_t
)uap
->shmaddr
) {
349 if (!SHMID_IS_VALID(shmmap_s
->shmid
)) {
356 * XXX: It might be useful to move this into the shm_delete_mapping
359 shmsegptr
= &shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)];
360 shmdtret
= mac_sysvshm_check_shmdt(kauth_cred_get(), shmsegptr
);
365 i
= shm_delete_mapping(p
, shmmap_s
, 1);
372 SYSV_SHM_SUBSYS_UNLOCK();
377 shmat(struct proc
*p
, struct shmat_args
*uap
, user_addr_t
*retval
)
380 struct shmid_kernel
*shmseg
;
381 struct shmmap_state
*shmmap_s
= NULL
;
382 struct shm_handle
*shm_handle
;
383 mach_vm_address_t attach_va
; /* attach address in/out */
384 mach_vm_address_t shmlba
;
385 mach_vm_size_t map_size
; /* size of map entry */
386 mach_vm_size_t mapped_size
;
395 AUDIT_ARG(svipc_id
, uap
->shmid
);
396 AUDIT_ARG(svipc_addr
, uap
->shmaddr
);
398 SYSV_SHM_SUBSYS_LOCK();
400 if ((shmat_ret
= shminit())) {
404 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
405 if (shmmap_s
== NULL
) {
406 /* lazily allocate the shm map */
408 int nsegs
= shminfo
.shmseg
;
414 /* +1 for the sentinel */
415 if (os_add_and_mul_overflow(nsegs
, 1, sizeof(struct shmmap_state
), &size
)) {
420 shmmap_s
= kheap_alloc(KM_SHM
, size
, Z_WAITOK
);
421 if (shmmap_s
== NULL
) {
426 /* initialize the entries */
427 for (i
= 0; i
< nsegs
; i
++) {
428 shmmap_s
[i
].shmid
= SHMID_UNALLOCATED
;
430 shmmap_s
[i
].shmid
= SHMID_SENTINEL
;
432 p
->vm_shm
= (caddr_t
)shmmap_s
;
435 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
436 if (shmseg
== NULL
) {
441 AUDIT_ARG(svipc_perm
, &shmseg
->u
.shm_perm
);
442 error
= ipcperm(kauth_cred_get(), &shmseg
->u
.shm_perm
,
443 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
| IPC_W
);
450 error
= mac_sysvshm_check_shmat(kauth_cred_get(), shmseg
, uap
->shmflg
);
457 /* find a free shmid */
458 while (SHMID_IS_VALID(shmmap_s
->shmid
)) {
461 if (shmmap_s
->shmid
!= SHMID_UNALLOCATED
) {
467 map_size
= vm_map_round_page(shmseg
->u
.shm_segsz
,
468 vm_map_page_mask(current_map()));
470 if ((uap
->shmflg
& SHM_RDONLY
) == 0) {
471 prot
|= VM_PROT_WRITE
;
473 flags
= MAP_ANON
| MAP_SHARED
;
478 attach_va
= (mach_vm_address_t
)uap
->shmaddr
;
479 shmlba
= vm_map_page_size(current_map()); /* XXX instead of SHMLBA */
480 if (uap
->shmflg
& SHM_RND
) {
481 attach_va
&= ~(shmlba
- 1);
482 } else if ((attach_va
& (shmlba
- 1)) != 0) {
487 if (flags
& MAP_FIXED
) {
488 vm_flags
= VM_FLAGS_FIXED
;
490 vm_flags
= VM_FLAGS_ANYWHERE
;
495 /* first reserve enough space... */
496 rv
= mach_vm_map_kernel(current_map(),
501 VM_MAP_KERNEL_FLAGS_NONE
,
509 if (rv
!= KERN_SUCCESS
) {
513 shmmap_s
->va
= attach_va
;
515 /* ... then map the shared memory over the reserved space */
516 for (shm_handle
= CAST_DOWN(void *, shmseg
->u
.shm_internal
);/* tunnel */
518 shm_handle
= shm_handle
->shm_handle_next
) {
519 vm_map_size_t chunk_size
;
521 assert(mapped_size
< map_size
);
522 chunk_size
= shm_handle
->shm_handle_size
;
523 if (chunk_size
> map_size
- mapped_size
) {
525 * Partial mapping of last chunk due to
526 * page size mismatch.
528 assert(vm_map_page_shift(current_map()) < PAGE_SHIFT
);
529 assert(shm_handle
->shm_handle_next
== NULL
);
530 chunk_size
= map_size
- mapped_size
;
532 rv
= vm_map_enter_mem_object(
533 current_map(), /* process map */
534 &attach_va
, /* attach address */
535 chunk_size
, /* size to map */
536 (mach_vm_offset_t
)0, /* alignment mask */
537 VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
,
538 VM_MAP_KERNEL_FLAGS_NONE
,
540 shm_handle
->shm_object
,
546 if (rv
!= KERN_SUCCESS
) {
550 mapped_size
+= chunk_size
;
551 attach_va
= attach_va
+ chunk_size
;
554 shmmap_s
->shmid
= uap
->shmid
;
555 shmseg
->u
.shm_lpid
= p
->p_pid
;
556 shmseg
->u
.shm_atime
= sysv_shmtime();
557 shmseg
->u
.shm_nattch
++;
558 *retval
= shmmap_s
->va
; /* XXX return -1 on error */
562 if (mapped_size
> 0) {
563 (void) mach_vm_deallocate(current_map(),
568 case KERN_INVALID_ADDRESS
:
572 case KERN_PROTECTION_FAILURE
:
580 SYSV_SHM_SUBSYS_UNLOCK();
585 oshmctl(__unused
void *p
, __unused
void *uap
, __unused
void *retval
)
599 shmctl(__unused
struct proc
*p
, struct shmctl_args
*uap
, int32_t *retval
)
602 kauth_cred_t cred
= kauth_cred_get();
603 struct user_shmid_ds inbuf
;
604 struct shmid_kernel
*shmseg
;
608 AUDIT_ARG(svipc_cmd
, uap
->cmd
);
609 AUDIT_ARG(svipc_id
, uap
->shmid
);
611 SYSV_SHM_SUBSYS_LOCK();
613 if ((shmctl_ret
= shminit())) {
617 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
618 if (shmseg
== NULL
) {
623 /* XXAUDIT: This is the perms BEFORE any change by this call. This
624 * may not be what is desired.
626 AUDIT_ARG(svipc_perm
, &shmseg
->u
.shm_perm
);
629 error
= mac_sysvshm_check_shmctl(cred
, shmseg
, uap
->cmd
);
637 error
= ipcperm(cred
, &shmseg
->u
.shm_perm
, IPC_R
);
643 if (IS_64BIT_PROCESS(p
)) {
644 struct user_shmid_ds shmid_ds
= {};
645 memcpy(&shmid_ds
, &shmseg
->u
, sizeof(struct user_shmid_ds
));
647 /* Clear kernel reserved pointer before copying to user space */
648 shmid_ds
.shm_internal
= USER_ADDR_NULL
;
650 error
= copyout(&shmid_ds
, uap
->buf
, sizeof(shmid_ds
));
652 struct user32_shmid_ds shmid_ds32
= {};
653 shmid_ds_64to32(&shmseg
->u
, &shmid_ds32
);
655 /* Clear kernel reserved pointer before copying to user space */
656 shmid_ds32
.shm_internal
= (user32_addr_t
)0;
658 error
= copyout(&shmid_ds32
, uap
->buf
, sizeof(shmid_ds32
));
666 error
= ipcperm(cred
, &shmseg
->u
.shm_perm
, IPC_M
);
671 if (IS_64BIT_PROCESS(p
)) {
672 error
= copyin(uap
->buf
, &inbuf
, sizeof(struct user_shmid_ds
));
674 struct user32_shmid_ds shmid_ds32
;
675 error
= copyin(uap
->buf
, &shmid_ds32
, sizeof(shmid_ds32
));
676 /* convert in place; ugly, but safe */
677 shmid_ds_32to64(&shmid_ds32
, &inbuf
);
683 shmseg
->u
.shm_perm
.uid
= inbuf
.shm_perm
.uid
;
684 shmseg
->u
.shm_perm
.gid
= inbuf
.shm_perm
.gid
;
685 shmseg
->u
.shm_perm
.mode
=
686 (shmseg
->u
.shm_perm
.mode
& ~ACCESSPERMS
) |
687 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
688 shmseg
->u
.shm_ctime
= sysv_shmtime();
691 error
= ipcperm(cred
, &shmseg
->u
.shm_perm
, IPC_M
);
696 shmseg
->u
.shm_perm
._key
= IPC_PRIVATE
;
697 shmseg
->u
.shm_perm
.mode
|= SHMSEG_REMOVED
;
698 if (shmseg
->u
.shm_nattch
<= 0) {
699 shm_deallocate_segment(shmseg
);
700 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
714 SYSV_SHM_SUBSYS_UNLOCK();
719 shmget_existing(struct shmget_args
*uap
, int mode
, int segnum
, int *retval
)
721 struct shmid_kernel
*shmseg
;
724 shmseg
= &shmsegs
[segnum
];
725 if (shmseg
->u
.shm_perm
.mode
& SHMSEG_REMOVED
) {
727 * This segment is in the process of being allocated. Wait
728 * until it's done, and look the key up again (in case the
729 * allocation failed or it was freed).
731 shmseg
->u
.shm_perm
.mode
|= SHMSEG_WANTED
;
732 error
= tsleep((caddr_t
)shmseg
, PLOCK
| PCATCH
, "shmget", 0);
740 * The low 9 bits of shmflag are the mode bits being requested, which
741 * are the actual mode bits desired on the segment, and not in IPC_R
742 * form; therefore it would be incorrect to call ipcperm() to validate
743 * them; instead, we AND the existing mode with the requested mode, and
744 * verify that it matches the requested mode; otherwise, we fail with
745 * EACCES (access denied).
747 if ((shmseg
->u
.shm_perm
.mode
& mode
) != mode
) {
752 error
= mac_sysvshm_check_shmget(kauth_cred_get(), shmseg
, uap
->shmflg
);
758 if (uap
->size
&& uap
->size
> shmseg
->u
.shm_segsz
) {
762 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
)) {
766 *retval
= IXSEQ_TO_IPCID(segnum
, shmseg
->u
.shm_perm
);
771 shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
,
774 int i
, segnum
, shmid
;
775 kauth_cred_t cred
= kauth_cred_get();
776 struct shmid_kernel
*shmseg
;
777 struct shm_handle
*shm_handle
;
779 mach_vm_size_t total_size
, size
, alloc_size
;
781 struct shm_handle
*shm_handle_next
, **shm_handle_next_p
;
783 if (uap
->size
<= 0 ||
784 uap
->size
< (user_size_t
)shminfo
.shmmin
||
785 uap
->size
> (user_size_t
)shminfo
.shmmax
) {
788 if (shm_nused
>= shminfo
.shmmni
) { /* any shmids left? */
791 if (mach_vm_round_page_overflow(uap
->size
, &total_size
)) {
794 if ((user_ssize_t
)(shm_committed
+ btoc(total_size
)) > shminfo
.shmall
) {
797 if (shm_last_free
< 0) {
798 for (i
= 0; i
< shminfo
.shmmni
; i
++) {
799 if (shmsegs
[i
].u
.shm_perm
.mode
& SHMSEG_FREE
) {
803 if (i
== shminfo
.shmmni
) {
804 panic("shmseg free count inconsistent");
808 segnum
= shm_last_free
;
811 shmseg
= &shmsegs
[segnum
];
814 * In case we sleep in malloc(), mark the segment present but deleted
815 * so that noone else tries to create the same key.
816 * XXX but we don't release the global lock !?
818 shmseg
->u
.shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
819 shmseg
->u
.shm_perm
._key
= uap
->key
;
820 shmseg
->u
.shm_perm
._seq
= (shmseg
->u
.shm_perm
._seq
+ 1) & 0x7fff;
822 shm_handle_next_p
= NULL
;
824 alloc_size
< total_size
;
825 alloc_size
+= size
) {
826 size
= MIN(total_size
- alloc_size
, ANON_MAX_SIZE
);
827 kret
= mach_make_memory_entry_64(
829 (memory_object_size_t
*) &size
,
830 (memory_object_offset_t
) 0,
831 MAP_MEM_NAMED_CREATE
| VM_PROT_DEFAULT
,
832 (ipc_port_t
*) &mem_object
, 0);
833 if (kret
!= KERN_SUCCESS
) {
837 shm_handle
= kheap_alloc(KM_SHM
, sizeof(struct shm_handle
), Z_WAITOK
);
838 if (shm_handle
== NULL
) {
839 kret
= KERN_NO_SPACE
;
840 mach_memory_entry_port_release(mem_object
);
844 shm_handle
->shm_object
= mem_object
;
845 shm_handle
->shm_handle_size
= size
;
846 shm_handle
->shm_handle_next
= NULL
;
847 if (shm_handle_next_p
== NULL
) {
848 shmseg
->u
.shm_internal
= CAST_USER_ADDR_T(shm_handle
);/* tunnel */
850 *shm_handle_next_p
= shm_handle
;
852 shm_handle_next_p
= &shm_handle
->shm_handle_next
;
855 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->u
.shm_perm
);
857 shmseg
->u
.shm_perm
.cuid
= shmseg
->u
.shm_perm
.uid
= kauth_cred_getuid(cred
);
858 shmseg
->u
.shm_perm
.cgid
= shmseg
->u
.shm_perm
.gid
= kauth_cred_getgid(cred
);
859 shmseg
->u
.shm_perm
.mode
= (shmseg
->u
.shm_perm
.mode
& SHMSEG_WANTED
) |
860 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
861 shmseg
->u
.shm_segsz
= uap
->size
;
862 shmseg
->u
.shm_cpid
= p
->p_pid
;
863 shmseg
->u
.shm_lpid
= shmseg
->u
.shm_nattch
= 0;
864 shmseg
->u
.shm_atime
= shmseg
->u
.shm_dtime
= 0;
866 mac_sysvshm_label_associate(cred
, shmseg
);
868 shmseg
->u
.shm_ctime
= sysv_shmtime();
869 shm_committed
+= btoc(size
);
871 AUDIT_ARG(svipc_perm
, &shmseg
->u
.shm_perm
);
872 if (shmseg
->u
.shm_perm
.mode
& SHMSEG_WANTED
) {
874 * Somebody else wanted this key while we were asleep. Wake
877 shmseg
->u
.shm_perm
.mode
&= ~SHMSEG_WANTED
;
878 wakeup((caddr_t
)shmseg
);
881 AUDIT_ARG(svipc_id
, shmid
);
884 if (kret
!= KERN_SUCCESS
) {
885 for (shm_handle
= CAST_DOWN(void *, shmseg
->u
.shm_internal
); /* tunnel */
887 shm_handle
= shm_handle_next
) {
888 shm_handle_next
= shm_handle
->shm_handle_next
;
889 mach_memory_entry_port_release(shm_handle
->shm_object
);
890 kheap_free(KM_SHM
, shm_handle
, sizeof(struct shm_handle
));
892 shmseg
->u
.shm_internal
= USER_ADDR_NULL
; /* tunnel */
896 case KERN_INVALID_ADDRESS
:
899 case KERN_PROTECTION_FAILURE
:
907 shmget(struct proc
*p
, struct shmget_args
*uap
, int32_t *retval
)
909 int segnum
, mode
, error
;
912 /* Auditing is actually done in shmget_allocate_segment() */
914 SYSV_SHM_SUBSYS_LOCK();
916 if ((shmget_ret
= shminit())) {
920 mode
= uap
->shmflg
& ACCESSPERMS
;
921 if (uap
->key
!= IPC_PRIVATE
) {
923 segnum
= shm_find_segment_by_key(uap
->key
);
925 error
= shmget_existing(uap
, mode
, segnum
, retval
);
926 if (error
== EAGAIN
) {
932 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
937 shmget_ret
= shmget_allocate_segment(p
, uap
, mode
, retval
);
939 SYSV_SHM_SUBSYS_UNLOCK();
946 * Entry point for all SHM calls: shmat, oshmctl, shmdt, shmget, shmctl
948 * Parameters: p Process requesting the call
949 * uap User argument descriptor (see below)
950 * retval Return value of the selected shm call
952 * Indirect parameters: uap->which msg call to invoke (index in array of shm calls)
953 * uap->a2 User argument descriptor
958 * Implicit returns: retval Return value of the selected shm call
960 * DEPRECATED: This interface should not be used to call the other SHM
961 * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct
962 * usage is to call the other SHM functions directly.
965 shmsys(struct proc
*p
, struct shmsys_args
*uap
, int32_t *retval
)
967 /* The routine that we are dispatching already does this */
969 if (uap
->which
>= sizeof(shmcalls
) / sizeof(shmcalls
[0])) {
972 return (*shmcalls
[uap
->which
])(p
, &uap
->a2
, retval
);
976 * Return 0 on success, 1 on failure.
979 shmfork(struct proc
*p1
, struct proc
*p2
)
981 struct shmmap_state
*shmmap_s
;
986 SYSV_SHM_SUBSYS_LOCK();
993 struct shmmap_state
*src
= (struct shmmap_state
*)p1
->vm_shm
;
996 /* count number of shmid entries in src */
997 for (struct shmmap_state
*s
= src
; s
->shmid
!= SHMID_SENTINEL
; s
++) {
1001 if (os_add_and_mul_overflow(nsegs
, 1, sizeof(struct shmmap_state
), &size
)) {
1005 shmmap_s
= kheap_alloc(KM_SHM
, size
, Z_WAITOK
);
1006 if (shmmap_s
== NULL
) {
1011 bcopy(src
, (caddr_t
)shmmap_s
, size
);
1012 p2
->vm_shm
= (caddr_t
)shmmap_s
;
1013 for (; shmmap_s
->shmid
!= SHMID_SENTINEL
; shmmap_s
++) {
1014 if (SHMID_IS_VALID(shmmap_s
->shmid
)) {
1015 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].u
.shm_nattch
++;
1020 SYSV_SHM_SUBSYS_UNLOCK();
1025 shmcleanup(struct proc
*p
, int deallocate
)
1027 struct shmmap_state
*shmmap_s
;
1031 SYSV_SHM_SUBSYS_LOCK();
1033 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
1034 for (; shmmap_s
->shmid
!= SHMID_SENTINEL
; shmmap_s
++) {
1036 if (SHMID_IS_VALID(shmmap_s
->shmid
)) {
1038 * XXX: Should the MAC framework enforce
1039 * check here as well.
1041 shm_delete_mapping(p
, shmmap_s
, deallocate
);
1045 if (os_add_and_mul_overflow(nsegs
, 1, sizeof(struct shmmap_state
), &size
)) {
1046 panic("shmcleanup: p->vm_shm buffer was correupted\n");
1048 kheap_free(KM_SHM
, p
->vm_shm
, size
);
1049 SYSV_SHM_SUBSYS_UNLOCK();
1053 shmexit(struct proc
*p
)
1059 * shmexec() is like shmexit(), only it doesn't delete the mappings,
1060 * since the old address space has already been destroyed and the new
1061 * one instantiated. Instead, it just does the housekeeping work we
1062 * need to do to keep the System V shared memory subsystem sane.
1064 __private_extern__
void
1065 shmexec(struct proc
*p
)
1078 * we store internally 64 bit, since if we didn't, we would
1079 * be unable to represent a segment size in excess of 32 bits
1080 * with the (struct shmid_ds)->shm_segsz field; also, POSIX
1081 * dictates this filed be a size_t, which is 64 bits when
1082 * running 64 bit binaries.
1084 if (os_mul_overflow(shminfo
.shmmni
, sizeof(struct shmid_kernel
), &sz
)) {
1088 shmsegs
= zalloc_permanent(sz
, ZALIGN_PTR
);
1089 if (shmsegs
== NULL
) {
1092 for (i
= 0; i
< shminfo
.shmmni
; i
++) {
1093 shmsegs
[i
].u
.shm_perm
.mode
= SHMSEG_FREE
;
1094 shmsegs
[i
].u
.shm_perm
._seq
= 0;
1096 mac_sysvshm_label_init(&shmsegs
[i
]);
1108 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1109 * struct sysctl_req *req) */
1111 sysctl_shminfo(__unused
struct sysctl_oid
*oidp
, void *arg1
,
1112 __unused
int arg2
, struct sysctl_req
*req
)
1115 int sysctl_shminfo_ret
= 0;
1116 int64_t saved_shmmax
;
1117 int64_t saved_shmmin
;
1118 int64_t saved_shmseg
;
1119 int64_t saved_shmmni
;
1120 int64_t saved_shmall
;
1122 error
= SYSCTL_OUT(req
, arg1
, sizeof(int64_t));
1123 if (error
|| req
->newptr
== USER_ADDR_NULL
) {
1127 SYSV_SHM_SUBSYS_LOCK();
1129 /* shmmni can not be changed after SysV SHM has been initialized */
1130 if (shm_inited
&& arg1
== &shminfo
.shmmni
) {
1131 sysctl_shminfo_ret
= EPERM
;
1132 goto sysctl_shminfo_out
;
1134 saved_shmmax
= shminfo
.shmmax
;
1135 saved_shmmin
= shminfo
.shmmin
;
1136 saved_shmseg
= shminfo
.shmseg
;
1137 saved_shmmni
= shminfo
.shmmni
;
1138 saved_shmall
= shminfo
.shmall
;
1140 if ((error
= SYSCTL_IN(req
, arg1
, sizeof(int64_t))) != 0) {
1141 sysctl_shminfo_ret
= error
;
1142 goto sysctl_shminfo_out
;
1145 if (arg1
== &shminfo
.shmmax
) {
1146 /* shmmax needs to be page-aligned */
1147 if (shminfo
.shmmax
& PAGE_MASK_64
|| shminfo
.shmmax
< 0) {
1148 shminfo
.shmmax
= saved_shmmax
;
1149 sysctl_shminfo_ret
= EINVAL
;
1150 goto sysctl_shminfo_out
;
1152 } else if (arg1
== &shminfo
.shmmin
) {
1153 if (shminfo
.shmmin
< 0) {
1154 shminfo
.shmmin
= saved_shmmin
;
1155 sysctl_shminfo_ret
= EINVAL
;
1156 goto sysctl_shminfo_out
;
1158 } else if (arg1
== &shminfo
.shmseg
) {
1159 /* add a sanity check - 20847256 */
1160 if (shminfo
.shmseg
> INT32_MAX
|| shminfo
.shmseg
< 0) {
1161 shminfo
.shmseg
= saved_shmseg
;
1162 sysctl_shminfo_ret
= EINVAL
;
1163 goto sysctl_shminfo_out
;
1165 } else if (arg1
== &shminfo
.shmmni
) {
1166 /* add a sanity check - 20847256 */
1167 if (shminfo
.shmmni
> INT32_MAX
|| shminfo
.shmmni
< 0) {
1168 shminfo
.shmmni
= saved_shmmni
;
1169 sysctl_shminfo_ret
= EINVAL
;
1170 goto sysctl_shminfo_out
;
1172 } else if (arg1
== &shminfo
.shmall
) {
1173 /* add a sanity check - 20847256 */
1174 if (shminfo
.shmall
> INT32_MAX
|| shminfo
.shmall
< 0) {
1175 shminfo
.shmall
= saved_shmall
;
1176 sysctl_shminfo_ret
= EINVAL
;
1177 goto sysctl_shminfo_out
;
1180 sysctl_shminfo_ret
= 0;
1182 SYSV_SHM_SUBSYS_UNLOCK();
1183 return sysctl_shminfo_ret
;
1187 IPCS_shm_sysctl(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
1188 __unused
int arg2
, struct sysctl_req
*req
)
1193 struct user32_IPCS_command u32
;
1194 struct user_IPCS_command u64
;
1196 struct user32_shmid_ds shmid_ds32
= { }; /* post conversion, 32 bit version */
1197 struct user_shmid_ds shmid_ds
= { }; /* 64 bit version */
1199 size_t ipcs_sz
= sizeof(struct user_IPCS_command
);
1200 size_t shmid_ds_sz
= sizeof(struct user_shmid_ds
);
1201 struct proc
*p
= current_proc();
1203 SYSV_SHM_SUBSYS_LOCK();
1205 if ((error
= shminit())) {
1206 goto ipcs_shm_sysctl_out
;
1209 if (!IS_64BIT_PROCESS(p
)) {
1210 ipcs_sz
= sizeof(struct user32_IPCS_command
);
1211 shmid_ds_sz
= sizeof(struct user32_shmid_ds
);
1214 /* Copy in the command structure */
1215 if ((error
= SYSCTL_IN(req
, &ipcs
, ipcs_sz
)) != 0) {
1216 goto ipcs_shm_sysctl_out
;
1219 if (!IS_64BIT_PROCESS(p
)) { /* convert in place */
1220 ipcs
.u64
.ipcs_data
= CAST_USER_ADDR_T(ipcs
.u32
.ipcs_data
);
1223 /* Let us version this interface... */
1224 if (ipcs
.u64
.ipcs_magic
!= IPCS_MAGIC
) {
1226 goto ipcs_shm_sysctl_out
;
1229 switch (ipcs
.u64
.ipcs_op
) {
1230 case IPCS_SHM_CONF
: /* Obtain global configuration data */
1231 if (ipcs
.u64
.ipcs_datalen
!= sizeof(struct shminfo
)) {
1232 if (ipcs
.u64
.ipcs_cursor
!= 0) { /* fwd. compat. */
1239 error
= copyout(&shminfo
, ipcs
.u64
.ipcs_data
, ipcs
.u64
.ipcs_datalen
);
1242 case IPCS_SHM_ITER
: /* Iterate over existing segments */
1243 cursor
= ipcs
.u64
.ipcs_cursor
;
1244 if (cursor
< 0 || cursor
>= shminfo
.shmmni
) {
1248 if (ipcs
.u64
.ipcs_datalen
!= (int)shmid_ds_sz
) {
1252 for (; cursor
< shminfo
.shmmni
; cursor
++) {
1253 if (shmsegs
[cursor
].u
.shm_perm
.mode
& SHMSEG_ALLOCATED
) {
1258 if (cursor
== shminfo
.shmmni
) {
1263 shmid_dsp
= &shmsegs
[cursor
]; /* default: 64 bit */
1266 * If necessary, convert the 64 bit kernel segment
1267 * descriptor to a 32 bit user one.
1269 if (!IS_64BIT_PROCESS(p
)) {
1270 shmid_ds_64to32(shmid_dsp
, &shmid_ds32
);
1272 /* Clear kernel reserved pointer before copying to user space */
1273 shmid_ds32
.shm_internal
= (user32_addr_t
)0;
1275 shmid_dsp
= &shmid_ds32
;
1277 memcpy(&shmid_ds
, shmid_dsp
, sizeof(shmid_ds
));
1279 /* Clear kernel reserved pointer before copying to user space */
1280 shmid_ds
.shm_internal
= USER_ADDR_NULL
;
1282 shmid_dsp
= &shmid_ds
;
1284 error
= copyout(shmid_dsp
, ipcs
.u64
.ipcs_data
, ipcs
.u64
.ipcs_datalen
);
1287 ipcs
.u64
.ipcs_cursor
= cursor
+ 1;
1289 if (!IS_64BIT_PROCESS(p
)) { /* convert in place */
1290 ipcs
.u32
.ipcs_data
= CAST_DOWN_EXPLICIT(user32_addr_t
, ipcs
.u64
.ipcs_data
);
1293 error
= SYSCTL_OUT(req
, &ipcs
, ipcs_sz
);
1301 ipcs_shm_sysctl_out
:
1302 SYSV_SHM_SUBSYS_UNLOCK();
1306 SYSCTL_NODE(_kern
, KERN_SYSV
, sysv
, CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
, 0, "SYSV");
1308 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmmax
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1309 &shminfo
.shmmax
, 0, &sysctl_shminfo
, "Q", "shmmax");
1311 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmmin
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1312 &shminfo
.shmmin
, 0, &sysctl_shminfo
, "Q", "shmmin");
1314 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmmni
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1315 &shminfo
.shmmni
, 0, &sysctl_shminfo
, "Q", "shmmni");
1317 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmseg
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1318 &shminfo
.shmseg
, 0, &sysctl_shminfo
, "Q", "shmseg");
1320 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, shmall
, CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1321 &shminfo
.shmall
, 0, &sysctl_shminfo
, "Q", "shmall");
1323 SYSCTL_NODE(_kern_sysv
, OID_AUTO
, ipcs
, CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
, 0, "SYSVIPCS");
1325 SYSCTL_PROC(_kern_sysv_ipcs
, OID_AUTO
, shm
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1326 0, 0, IPCS_shm_sysctl
,
1327 "S,IPCS_shm_command",
1328 "ipcs shm command interface");
1329 #endif /* SYSV_SHM */
1331 /* DSEP Review Done pl-20051108-v02 @2743,@2908,@2913,@3009 */