2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
25 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by Adam Glass and Charles
39 * 4. The names of the authors may not be used to endorse or promote products
40 * derived from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
43 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
44 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
45 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
46 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
47 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
51 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 #include <sys/appleapiopts.h>
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/shm_internal.h>
60 #include <sys/proc_internal.h>
61 #include <sys/kauth.h>
62 #include <sys/malloc.h>
65 #include <sys/sysctl.h>
67 #include <sys/sysent.h>
68 #include <sys/sysproto.h>
70 #include <bsm/audit_kernel.h>
72 #include <mach/mach_types.h>
73 #include <mach/vm_inherit.h>
74 #include <mach/vm_map.h>
76 #include <mach/mach_vm.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_shared_memory_server.h>
80 #include <vm/vm_protos.h>
82 #include <kern/locks.h>
84 static void shminit(void *);
86 SYSINIT(sysv_shm
, SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
, shminit
, NULL
)
89 static lck_grp_t
*sysv_shm_subsys_lck_grp
;
90 static lck_grp_attr_t
*sysv_shm_subsys_lck_grp_attr
;
91 static lck_attr_t
*sysv_shm_subsys_lck_attr
;
92 static lck_mtx_t sysv_shm_subsys_mutex
;
94 #define SYSV_SHM_SUBSYS_LOCK() lck_mtx_lock(&sysv_shm_subsys_mutex)
95 #define SYSV_SHM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_shm_subsys_mutex)
97 static int oshmctl(void *p
, void *uap
, void *retval
);
98 static int shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
, int * retval
);
99 static int shmget_existing(struct shmget_args
*uap
, int mode
, int segnum
, int * retval
);
100 static void shmid_ds_64to32(struct user_shmid_ds
*in
, struct shmid_ds
*out
);
101 static void shmid_ds_32to64(struct shmid_ds
*in
, struct user_shmid_ds
*out
);
103 /* XXX casting to (sy_call_t *) is bogus, as usual. */
104 static sy_call_t
*shmcalls
[] = {
105 (sy_call_t
*)shmat
, (sy_call_t
*)oshmctl
,
106 (sy_call_t
*)shmdt
, (sy_call_t
*)shmget
,
110 #define SHMSEG_FREE 0x0200
111 #define SHMSEG_REMOVED 0x0400
112 #define SHMSEG_ALLOCATED 0x0800
113 #define SHMSEG_WANTED 0x1000
115 static int shm_last_free
, shm_nused
, shm_committed
;
116 struct user_shmid_ds
*shmsegs
; /* 64 bit version */
117 static int shm_inited
= 0;
120 void * shm_object
; /* vm_offset_t kva; */
123 struct shmmap_state
{
124 mach_vm_address_t va
; /* user address */
125 int shmid
; /* segment id */
128 static void shm_deallocate_segment(struct user_shmid_ds
*);
129 static int shm_find_segment_by_key(key_t
);
130 static struct user_shmid_ds
*shm_find_segment_by_shmid(int);
131 static int shm_delete_mapping(struct proc
*, struct shmmap_state
*, int);
133 #ifdef __APPLE_API_PRIVATE
134 struct shminfo shminfo
= {
135 -1, /* SHMMAX 4096 *1024 */
139 -1 /* SHMALL = 1024 */
141 #endif /* __APPLE_API_PRIVATE */
143 void sysv_shm_lock_init(void);
145 static __inline__
time_t
154 * This conversion is safe, since if we are converting for a 32 bit process,
155 * then it's value of (struct shmid_ds)->shm_segsz will never exceed 4G.
157 * NOTE: Source and target may *NOT* overlap! (target is smaller)
160 shmid_ds_64to32(struct user_shmid_ds
*in
, struct shmid_ds
*out
)
162 out
->shm_perm
= in
->shm_perm
;
163 out
->shm_segsz
= (size_t)in
->shm_segsz
;
164 out
->shm_lpid
= in
->shm_lpid
;
165 out
->shm_cpid
= in
->shm_cpid
;
166 out
->shm_nattch
= in
->shm_nattch
;
167 out
->shm_atime
= in
->shm_atime
;
168 out
->shm_dtime
= in
->shm_dtime
;
169 out
->shm_ctime
= in
->shm_ctime
;
170 out
->shm_internal
= CAST_DOWN(void *,in
->shm_internal
);
174 * NOTE: Source and target may are permitted to overlap! (source is smaller);
175 * this works because we copy fields in order from the end of the struct to
179 shmid_ds_32to64(struct shmid_ds
*in
, struct user_shmid_ds
*out
)
181 out
->shm_internal
= CAST_USER_ADDR_T(in
->shm_internal
);
182 out
->shm_ctime
= in
->shm_ctime
;
183 out
->shm_dtime
= in
->shm_dtime
;
184 out
->shm_atime
= in
->shm_atime
;
185 out
->shm_nattch
= in
->shm_nattch
;
186 out
->shm_cpid
= in
->shm_cpid
;
187 out
->shm_lpid
= in
->shm_lpid
;
188 out
->shm_segsz
= (user_size_t
)in
->shm_segsz
;
189 out
->shm_perm
= in
->shm_perm
;
194 shm_find_segment_by_key(key_t key
)
198 for (i
= 0; i
< shminfo
.shmmni
; i
++)
199 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
200 shmsegs
[i
].shm_perm
.key
== key
)
205 static struct user_shmid_ds
*
206 shm_find_segment_by_shmid(int shmid
)
209 struct user_shmid_ds
*shmseg
;
211 segnum
= IPCID_TO_IX(shmid
);
212 if (segnum
< 0 || segnum
>= shminfo
.shmmni
)
214 shmseg
= &shmsegs
[segnum
];
215 if ((shmseg
->shm_perm
.mode
& (SHMSEG_ALLOCATED
| SHMSEG_REMOVED
))
216 != SHMSEG_ALLOCATED
||
217 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
))
223 shm_deallocate_segment(struct user_shmid_ds
*shmseg
)
225 struct shm_handle
*shm_handle
;
228 shm_handle
= CAST_DOWN(void *,shmseg
->shm_internal
); /* tunnel */
229 size
= mach_vm_round_page(shmseg
->shm_segsz
);
230 mach_memory_entry_port_release(shm_handle
->shm_object
);
231 shm_handle
->shm_object
= NULL
;
232 FREE((caddr_t
)shm_handle
, M_SHM
);
233 shmseg
->shm_internal
= USER_ADDR_NULL
; /* tunnel */
234 shm_committed
-= btoc(size
);
236 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
240 shm_delete_mapping(__unused
struct proc
*p
, struct shmmap_state
*shmmap_s
,
243 struct user_shmid_ds
*shmseg
;
247 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
248 shmseg
= &shmsegs
[segnum
];
249 size
= mach_vm_round_page(shmseg
->shm_segsz
); /* XXX done for us? */
251 result
= mach_vm_deallocate(current_map(), shmmap_s
->va
, size
);
252 if (result
!= KERN_SUCCESS
)
255 shmmap_s
->shmid
= -1;
256 shmseg
->shm_dtime
= sysv_shmtime();
257 if ((--shmseg
->shm_nattch
<= 0) &&
258 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
259 shm_deallocate_segment(shmseg
);
260 shm_last_free
= segnum
;
266 shmdt(struct proc
*p
, struct shmdt_args
*uap
, register_t
*retval
)
268 struct shmmap_state
*shmmap_s
;
272 // LP64todo - fix this
273 AUDIT_ARG(svipc_addr
, CAST_DOWN(void *,uap
->shmaddr
));
275 SYSV_SHM_SUBSYS_LOCK();
281 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
282 if (shmmap_s
== NULL
) {
287 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
288 if (shmmap_s
->shmid
!= -1 &&
289 shmmap_s
->va
== (mach_vm_offset_t
)uap
->shmaddr
)
291 if (i
== shminfo
.shmseg
) {
295 i
= shm_delete_mapping(p
, shmmap_s
, 1);
301 SYSV_SHM_SUBSYS_UNLOCK();
306 shmat(struct proc
*p
, struct shmat_args
*uap
, register_t
*retval
)
309 struct user_shmid_ds
*shmseg
;
310 struct shmmap_state
*shmmap_s
= NULL
;
311 struct shm_handle
*shm_handle
;
312 mach_vm_address_t attach_va
; /* attach address in/out */
313 mach_vm_size_t map_size
; /* size of map entry */
319 AUDIT_ARG(svipc_id
, uap
->shmid
);
320 // LP64todo - fix this
321 AUDIT_ARG(svipc_addr
, CAST_DOWN(void *,uap
->shmaddr
));
323 SYSV_SHM_SUBSYS_LOCK();
330 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
332 if (shmmap_s
== NULL
) {
333 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
334 MALLOC(shmmap_s
, struct shmmap_state
*, size
, M_SHM
, M_WAITOK
);
335 if (shmmap_s
== NULL
) {
339 for (i
= 0; i
< shminfo
.shmseg
; i
++)
340 shmmap_s
[i
].shmid
= -1;
341 p
->vm_shm
= (caddr_t
)shmmap_s
;
343 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
344 if (shmseg
== NULL
) {
349 AUDIT_ARG(svipc_perm
, &shmseg
->shm_perm
);
350 error
= ipcperm(kauth_cred_get(), &shmseg
->shm_perm
,
351 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
357 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
358 if (shmmap_s
->shmid
== -1)
362 if (i
>= shminfo
.shmseg
) {
367 map_size
= mach_vm_round_page(shmseg
->shm_segsz
);
369 if ((uap
->shmflg
& SHM_RDONLY
) == 0)
370 prot
|= VM_PROT_WRITE
;
371 flags
= MAP_ANON
| MAP_SHARED
;
375 attach_va
= (mach_vm_address_t
)uap
->shmaddr
;
376 if (uap
->shmflg
& SHM_RND
)
377 attach_va
&= ~(SHMLBA
-1);
378 else if ((attach_va
& (SHMLBA
-1)) != 0) {
383 shm_handle
= CAST_DOWN(void *, shmseg
->shm_internal
); /* tunnel */
385 rv
= mach_vm_map(current_map(), /* process map */
386 &attach_va
, /* attach address */
387 map_size
, /* segment size */
388 (mach_vm_offset_t
)0, /* alignment mask */
389 (flags
& MAP_FIXED
)? VM_FLAGS_FIXED
: VM_FLAGS_ANYWHERE
,
390 shm_handle
->shm_object
,
396 if (rv
!= KERN_SUCCESS
)
399 rv
= mach_vm_inherit(current_map(), attach_va
, map_size
, VM_INHERIT_SHARE
);
400 if (rv
!= KERN_SUCCESS
) {
401 (void)mach_vm_deallocate(current_map(), attach_va
, map_size
);
405 shmmap_s
->va
= attach_va
;
406 shmmap_s
->shmid
= uap
->shmid
;
407 shmseg
->shm_lpid
= p
->p_pid
;
408 shmseg
->shm_atime
= sysv_shmtime();
409 shmseg
->shm_nattch
++;
410 *retval
= attach_va
; /* XXX return -1 on error */
415 case KERN_INVALID_ADDRESS
:
418 case KERN_PROTECTION_FAILURE
:
424 SYSV_SHM_SUBSYS_UNLOCK();
429 oshmctl(__unused
void *p
, __unused
void *uap
, __unused
void *retval
)
435 shmctl(__unused
struct proc
*p
, struct shmctl_args
*uap
, register_t
*retval
)
438 kauth_cred_t cred
= kauth_cred_get();
439 struct user_shmid_ds inbuf
;
440 struct user_shmid_ds
*shmseg
;
441 size_t shmid_ds_sz
= sizeof(struct user_shmid_ds
);
445 AUDIT_ARG(svipc_cmd
, uap
->cmd
);
446 AUDIT_ARG(svipc_id
, uap
->shmid
);
448 SYSV_SHM_SUBSYS_LOCK();
455 if (!IS_64BIT_PROCESS(p
))
456 shmid_ds_sz
= sizeof(struct shmid_ds
);
458 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
459 if (shmseg
== NULL
) {
464 /* XXAUDIT: This is the perms BEFORE any change by this call. This
465 * may not be what is desired.
467 AUDIT_ARG(svipc_perm
, &shmseg
->shm_perm
);
471 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_R
);
477 if (IS_64BIT_PROCESS(p
)) {
478 error
= copyout(shmseg
, uap
->buf
, sizeof(struct user_shmid_ds
));
480 struct shmid_ds shmid_ds32
;
481 shmid_ds_64to32(shmseg
, &shmid_ds32
);
482 error
= copyout(&shmid_ds32
, uap
->buf
, sizeof(struct shmid_ds
));
490 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
);
495 if (IS_64BIT_PROCESS(p
)) {
496 error
= copyin(uap
->buf
, &inbuf
, sizeof(struct user_shmid_ds
));
498 error
= copyin(uap
->buf
, &inbuf
, sizeof(struct shmid_ds
));
499 /* convert in place; ugly, but safe */
500 shmid_ds_32to64((struct shmid_ds
*)&inbuf
, &inbuf
);
506 shmseg
->shm_perm
.uid
= inbuf
.shm_perm
.uid
;
507 shmseg
->shm_perm
.gid
= inbuf
.shm_perm
.gid
;
508 shmseg
->shm_perm
.mode
=
509 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
510 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
511 shmseg
->shm_ctime
= sysv_shmtime();
514 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
);
519 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
520 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
521 if (shmseg
->shm_nattch
<= 0) {
522 shm_deallocate_segment(shmseg
);
523 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
537 SYSV_SHM_SUBSYS_UNLOCK();
542 shmget_existing(struct shmget_args
*uap
, int mode
, int segnum
, int *retval
)
544 struct user_shmid_ds
*shmseg
;
547 shmseg
= &shmsegs
[segnum
];
548 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
550 * This segment is in the process of being allocated. Wait
551 * until it's done, and look the key up again (in case the
552 * allocation failed or it was freed).
554 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
555 error
= tsleep((caddr_t
)shmseg
, PLOCK
| PCATCH
, "shmget", 0);
560 error
= ipcperm(kauth_cred_get(), &shmseg
->shm_perm
, mode
);
563 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
565 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
567 *retval
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
572 shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
,
575 int i
, segnum
, shmid
, size
;
576 kauth_cred_t cred
= kauth_cred_get();
577 struct user_shmid_ds
*shmseg
;
578 struct shm_handle
*shm_handle
;
580 vm_offset_t user_addr
;
583 if (uap
->size
< (user_size_t
)shminfo
.shmmin
||
584 uap
->size
> (user_size_t
)shminfo
.shmmax
)
586 if (shm_nused
>= shminfo
.shmmni
) /* any shmids left? */
588 size
= mach_vm_round_page(uap
->size
);
589 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
591 if (shm_last_free
< 0) {
592 for (i
= 0; i
< shminfo
.shmmni
; i
++)
593 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
595 if (i
== shminfo
.shmmni
)
596 panic("shmseg free count inconsistent");
599 segnum
= shm_last_free
;
602 shmseg
= &shmsegs
[segnum
];
604 * In case we sleep in malloc(), mark the segment present but deleted
605 * so that noone else tries to create the same key.
607 kret
= vm_allocate(current_map(), &user_addr
, size
, VM_FLAGS_ANYWHERE
);
608 if (kret
!= KERN_SUCCESS
)
611 kret
= mach_make_memory_entry (current_map(), &size
, user_addr
,
612 VM_PROT_DEFAULT
, (mem_entry_name_port_t
*)&mem_object
, 0);
614 if (kret
!= KERN_SUCCESS
)
617 vm_deallocate(current_map(), user_addr
, size
);
619 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
620 shmseg
->shm_perm
.key
= uap
->key
;
621 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
622 MALLOC(shm_handle
, struct shm_handle
*, sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
623 if (shm_handle
== NULL
) {
624 kret
= KERN_NO_SPACE
;
625 mach_memory_entry_port_release(mem_object
);
629 shm_handle
->shm_object
= mem_object
;
630 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
632 shmseg
->shm_internal
= CAST_USER_ADDR_T(shm_handle
); /* tunnel */
633 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= kauth_cred_getuid(cred
);
634 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
635 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
636 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
637 shmseg
->shm_segsz
= uap
->size
;
638 shmseg
->shm_cpid
= p
->p_pid
;
639 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
640 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
641 shmseg
->shm_ctime
= sysv_shmtime();
642 shm_committed
+= btoc(size
);
644 AUDIT_ARG(svipc_perm
, &shmseg
->shm_perm
);
645 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
647 * Somebody else wanted this key while we were asleep. Wake
650 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
651 wakeup((caddr_t
)shmseg
);
654 AUDIT_ARG(svipc_id
, shmid
);
658 case KERN_INVALID_ADDRESS
:
661 case KERN_PROTECTION_FAILURE
:
670 shmget(struct proc
*p
, struct shmget_args
*uap
, register_t
*retval
)
672 int segnum
, mode
, error
;
675 /* Auditing is actually done in shmget_allocate_segment() */
677 SYSV_SHM_SUBSYS_LOCK();
684 mode
= uap
->shmflg
& ACCESSPERMS
;
685 if (uap
->key
!= IPC_PRIVATE
) {
687 segnum
= shm_find_segment_by_key(uap
->key
);
689 error
= shmget_existing(uap
, mode
, segnum
, retval
);
695 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
700 shmget_ret
= shmget_allocate_segment(p
, uap
, mode
, retval
);
702 SYSV_SHM_SUBSYS_UNLOCK();
708 /* XXX actually varargs. */
710 shmsys(struct proc
*p
, struct shmsys_args
*uap
, register_t
*retval
)
713 /* The routine that we are dispatching already does this */
715 if (uap
->which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
717 return ((*shmcalls
[uap
->which
])(p
, &uap
->a2
, retval
));
721 * Return 0 on success, 1 on failure.
724 shmfork(struct proc
*p1
, struct proc
*p2
)
726 struct shmmap_state
*shmmap_s
;
731 SYSV_SHM_SUBSYS_LOCK();
738 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
739 MALLOC(shmmap_s
, struct shmmap_state
*, size
, M_SHM
, M_WAITOK
);
740 if (shmmap_s
!= NULL
) {
741 bcopy((caddr_t
)p1
->vm_shm
, (caddr_t
)shmmap_s
, size
);
742 p2
->vm_shm
= (caddr_t
)shmmap_s
;
743 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
744 if (shmmap_s
->shmid
!= -1)
745 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
750 shmfork_ret
= 1; /* failed to copy to child - ENOMEM */
752 SYSV_SHM_SUBSYS_UNLOCK();
757 shmexit(struct proc
*p
)
759 struct shmmap_state
*shmmap_s
;
762 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
764 SYSV_SHM_SUBSYS_LOCK();
765 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
766 if (shmmap_s
->shmid
!= -1)
767 shm_delete_mapping(p
, shmmap_s
, 1);
768 FREE((caddr_t
)p
->vm_shm
, M_SHM
);
770 SYSV_SHM_SUBSYS_UNLOCK();
774 * shmexec() is like shmexit(), only it doesn't delete the mappings,
775 * since the old address space has already been destroyed and the new
776 * one instantiated. Instead, it just does the housekeeping work we
777 * need to do to keep the System V shared memory subsystem sane.
779 __private_extern__
void
780 shmexec(struct proc
*p
)
782 struct shmmap_state
*shmmap_s
;
785 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
786 SYSV_SHM_SUBSYS_LOCK();
787 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
788 if (shmmap_s
->shmid
!= -1)
789 shm_delete_mapping(p
, shmmap_s
, 0);
790 FREE((caddr_t
)p
->vm_shm
, M_SHM
);
792 SYSV_SHM_SUBSYS_UNLOCK();
796 shminit(__unused
void *dummy
)
803 * we store internally 64 bit, since if we didn't, we would
804 * be unable to represent a segment size in excess of 32 bits
805 * with the (struct shmid_ds)->shm_segsz field; also, POSIX
806 * dictates this filed be a size_t, which is 64 bits when
807 * running 64 bit binaries.
809 s
= sizeof(struct user_shmid_ds
) * shminfo
.shmmni
;
811 MALLOC(shmsegs
, struct user_shmid_ds
*, s
, M_SHM
, M_WAITOK
);
812 if (shmsegs
== NULL
) {
813 /* XXX fail safely: leave shared memory uninited */
816 for (i
= 0; i
< shminfo
.shmmni
; i
++) {
817 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
818 shmsegs
[i
].shm_perm
.seq
= 0;
826 /* Initialize the mutex governing access to the SysV shm subsystem */
827 __private_extern__
void
828 sysv_shm_lock_init( void )
831 sysv_shm_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
833 sysv_shm_subsys_lck_grp
= lck_grp_alloc_init("sysv_shm_subsys_lock", sysv_shm_subsys_lck_grp_attr
);
835 sysv_shm_subsys_lck_attr
= lck_attr_alloc_init();
836 lck_mtx_init(&sysv_shm_subsys_mutex
, sysv_shm_subsys_lck_grp
, sysv_shm_subsys_lck_attr
);
839 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
840 struct sysctl_req *req) */
842 sysctl_shminfo(__unused
struct sysctl_oid
*oidp
, void *arg1
,
843 __unused
int arg2
, struct sysctl_req
*req
)
846 int sysctl_shminfo_ret
= 0;
848 error
= SYSCTL_OUT(req
, arg1
, sizeof(int64_t));
849 if (error
|| req
->newptr
== USER_ADDR_NULL
)
852 SYSV_SHM_SUBSYS_LOCK();
853 /* Set the values only if shared memory is not initialised */
855 if ((error
= SYSCTL_IN(req
, arg1
, sizeof(int64_t)))
857 sysctl_shminfo_ret
= error
;
858 goto sysctl_shminfo_out
;
861 if (arg1
== &shminfo
.shmmax
) {
862 if (shminfo
.shmmax
& PAGE_MASK_64
) {
863 shminfo
.shmmax
= (int64_t)-1;
864 sysctl_shminfo_ret
= EINVAL
;
865 goto sysctl_shminfo_out
;
869 /* Initialize only when all values are set */
870 if ((shminfo
.shmmax
!= (int64_t)-1) &&
871 (shminfo
.shmmin
!= (int64_t)-1) &&
872 (shminfo
.shmmni
!= (int64_t)-1) &&
873 (shminfo
.shmseg
!= (int64_t)-1) &&
874 (shminfo
.shmall
!= (int64_t)-1)) {
878 sysctl_shminfo_ret
= 0;
880 SYSV_SHM_SUBSYS_UNLOCK();
881 return sysctl_shminfo_ret
;
885 IPCS_shm_sysctl(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
886 __unused
int arg2
, struct sysctl_req
*req
)
891 struct IPCS_command u32
;
892 struct user_IPCS_command u64
;
894 struct shmid_ds shmid_ds32
; /* post conversion, 32 bit version */
896 size_t ipcs_sz
= sizeof(struct user_IPCS_command
);
897 size_t shmid_ds_sz
= sizeof(struct user_shmid_ds
);
898 struct proc
*p
= current_proc();
900 int ipcs__shminfo_ret
= 0;
902 SYSV_SHM_SUBSYS_LOCK();
906 goto ipcs_shm_sysctl_out
;
909 if (!IS_64BIT_PROCESS(p
)) {
910 ipcs_sz
= sizeof(struct IPCS_command
);
911 shmid_ds_sz
= sizeof(struct shmid_ds
);
914 /* Copy in the command structure */
915 if ((error
= SYSCTL_IN(req
, &ipcs
, ipcs_sz
)) != 0) {
916 goto ipcs_shm_sysctl_out
;
919 if (!IS_64BIT_PROCESS(p
)) /* convert in place */
920 ipcs
.u64
.ipcs_data
= CAST_USER_ADDR_T(ipcs
.u32
.ipcs_data
);
922 /* Let us version this interface... */
923 if (ipcs
.u64
.ipcs_magic
!= IPCS_MAGIC
) {
925 goto ipcs_shm_sysctl_out
;
928 switch(ipcs
.u64
.ipcs_op
) {
929 case IPCS_SHM_CONF
: /* Obtain global configuration data */
930 if (ipcs
.u64
.ipcs_datalen
!= sizeof(struct shminfo
)) {
931 if (ipcs
.u64
.ipcs_cursor
!= 0) { /* fwd. compat. */
938 error
= copyout(&shminfo
, ipcs
.u64
.ipcs_data
, ipcs
.u64
.ipcs_datalen
);
941 case IPCS_SHM_ITER
: /* Iterate over existing segments */
942 cursor
= ipcs
.u64
.ipcs_cursor
;
943 if (cursor
< 0 || cursor
>= shminfo
.shmmni
) {
947 if (ipcs
.u64
.ipcs_datalen
!= (int)shmid_ds_sz
) {
951 for( ; cursor
< shminfo
.shmmni
; cursor
++) {
952 if (shmsegs
[cursor
].shm_perm
.mode
& SHMSEG_ALLOCATED
)
956 if (cursor
== shminfo
.shmmni
) {
961 shmid_dsp
= &shmsegs
[cursor
]; /* default: 64 bit */
964 * If necessary, convert the 64 bit kernel segment
965 * descriptor to a 32 bit user one.
967 if (!IS_64BIT_PROCESS(p
)) {
968 shmid_ds_64to32(shmid_dsp
, &shmid_ds32
);
969 shmid_dsp
= &shmid_ds32
;
971 error
= copyout(shmid_dsp
, ipcs
.u64
.ipcs_data
, ipcs
.u64
.ipcs_datalen
);
974 ipcs
.u64
.ipcs_cursor
= cursor
+ 1;
976 if (!IS_64BIT_PROCESS(p
)) /* convert in place */
977 ipcs
.u32
.ipcs_data
= CAST_DOWN(void *,ipcs
.u64
.ipcs_data
);
978 error
= SYSCTL_OUT(req
, &ipcs
, ipcs_sz
);
987 SYSV_SHM_SUBSYS_UNLOCK();
991 SYSCTL_NODE(_kern
, KERN_SYSV
, sysv
, CTLFLAG_RW
, 0, "SYSV");
993 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMMAX
, shmmax
, CTLTYPE_QUAD
| CTLFLAG_RW
,
994 &shminfo
.shmmax
, 0, &sysctl_shminfo
,"Q","shmmax");
996 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMMIN
, shmmin
, CTLTYPE_QUAD
| CTLFLAG_RW
,
997 &shminfo
.shmmin
, 0, &sysctl_shminfo
,"Q","shmmin");
999 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMMNI
, shmmni
, CTLTYPE_QUAD
| CTLFLAG_RW
,
1000 &shminfo
.shmmni
, 0, &sysctl_shminfo
,"Q","shmmni");
1002 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMSEG
, shmseg
, CTLTYPE_QUAD
| CTLFLAG_RW
,
1003 &shminfo
.shmseg
, 0, &sysctl_shminfo
,"Q","shmseg");
1005 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMALL
, shmall
, CTLTYPE_QUAD
| CTLFLAG_RW
,
1006 &shminfo
.shmall
, 0, &sysctl_shminfo
,"Q","shmall");
1008 SYSCTL_NODE(_kern_sysv
, OID_AUTO
, ipcs
, CTLFLAG_RW
, 0, "SYSVIPCS");
1010 SYSCTL_PROC(_kern_sysv_ipcs
, OID_AUTO
, shm
, CTLFLAG_RW
|CTLFLAG_ANYBODY
,
1011 0, 0, IPCS_shm_sysctl
,
1012 "S,IPCS_shm_command",
1013 "ipcs shm command interface");