2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
28 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by Adam Glass and Charles
42 * 4. The names of the authors may not be used to endorse or promote products
43 * derived from this software without specific prior written permission.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
46 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
47 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
48 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
49 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
50 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
51 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
52 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
53 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
54 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 #include <sys/appleapiopts.h>
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
64 #include <sys/malloc.h>
67 #include <sys/sysctl.h>
68 #include <sys/kern_audit.h>
70 #include <mach/mach_types.h>
71 #include <mach/vm_inherit.h>
72 #include <vm/vm_map.h>
75 extern int shmat
__P((struct proc
*p
, struct shmat_args
*uap
, int *retval
));
77 extern int shmctl
__P((struct proc
*p
, struct shmctl_args
*uap
, int *retval
));
79 extern int shmdt
__P((struct proc
*p
, struct shmdt_args
*uap
, int *retval
));
81 extern int shmget
__P((struct proc
*p
, struct shmget_args
*uap
, int *retval
));
84 static void shminit
__P((void *));
85 SYSINIT(sysv_shm
, SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
, shminit
, NULL
)
89 static int oshmctl
__P((struct proc
*p
, struct oshmctl_args
*uap
, int * retval
));
90 static int shmget_allocate_segment
__P((struct proc
*p
, struct shmget_args
*uap
, int mode
, int * retval
));
91 static int shmget_existing
__P((struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
, int * retval
));
93 typedef int sy_call_t
__P((struct proc
*, void *, int *));
95 /* XXX casting to (sy_call_t *) is bogus, as usual. */
96 static sy_call_t
*shmcalls
[] = {
97 (sy_call_t
*)shmat
, (sy_call_t
*)oshmctl
,
98 (sy_call_t
*)shmdt
, (sy_call_t
*)shmget
,
102 #define SHMSEG_FREE 0x0200
103 #define SHMSEG_REMOVED 0x0400
104 #define SHMSEG_ALLOCATED 0x0800
105 #define SHMSEG_WANTED 0x1000
107 static int shm_last_free
, shm_nused
, shm_committed
;
108 struct shmid_ds
*shmsegs
;
109 static int shm_inited
= 0;
112 /* vm_offset_t kva; */
116 struct shmmap_state
{
121 static void shm_deallocate_segment
__P((struct shmid_ds
*));
122 static int shm_find_segment_by_key
__P((key_t
));
123 static struct shmid_ds
*shm_find_segment_by_shmid
__P((int));
124 static int shm_delete_mapping
__P((struct proc
*, struct shmmap_state
*, int));
126 #ifdef __APPLE_API_PRIVATE
127 struct shminfo shminfo
= {
128 -1, /* SHMMAX 4096 *1024 */
132 -1 /* SHMALL = 1024 */
134 #endif /* __APPLE_API_PRIVATE */
137 shm_find_segment_by_key(key
)
142 for (i
= 0; i
< shminfo
.shmmni
; i
++)
143 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
144 shmsegs
[i
].shm_perm
.key
== key
)
149 static struct shmid_ds
*
150 shm_find_segment_by_shmid(shmid
)
154 struct shmid_ds
*shmseg
;
156 segnum
= IPCID_TO_IX(shmid
);
157 if (segnum
< 0 || segnum
>= shminfo
.shmmni
)
159 shmseg
= &shmsegs
[segnum
];
160 if ((shmseg
->shm_perm
.mode
& (SHMSEG_ALLOCATED
| SHMSEG_REMOVED
))
161 != SHMSEG_ALLOCATED
||
162 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
))
168 shm_deallocate_segment(shmseg
)
169 struct shmid_ds
*shmseg
;
171 struct shm_handle
*shm_handle
;
172 struct shmmap_state
*shmmap_s
=NULL
;
176 shm_handle
= shmseg
->shm_internal
;
177 size
= round_page_32(shmseg
->shm_segsz
);
178 mach_destroy_memory_entry(shm_handle
->shm_object
);
179 FREE((caddr_t
)shm_handle
, M_SHM
);
180 shmseg
->shm_internal
= NULL
;
181 shm_committed
-= btoc(size
);
183 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
187 shm_delete_mapping(p
, shmmap_s
, deallocate
)
189 struct shmmap_state
*shmmap_s
;
192 struct shmid_ds
*shmseg
;
196 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
197 shmseg
= &shmsegs
[segnum
];
198 size
= round_page_32(shmseg
->shm_segsz
);
200 result
= vm_deallocate(current_map(), shmmap_s
->va
, size
);
201 if (result
!= KERN_SUCCESS
)
204 shmmap_s
->shmid
= -1;
205 shmseg
->shm_dtime
= time_second
;
206 if ((--shmseg
->shm_nattch
<= 0) &&
207 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
208 shm_deallocate_segment(shmseg
);
209 shm_last_free
= segnum
;
219 shmdt(p
, uap
, retval
)
221 struct shmdt_args
*uap
;
224 struct shmmap_state
*shmmap_s
;
227 AUDIT_ARG(svipc_addr
, uap
->shmaddr
);
230 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
231 if (shmmap_s
== NULL
)
233 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
234 if (shmmap_s
->shmid
!= -1 &&
235 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
)
237 if (i
== shminfo
.shmseg
)
239 return shm_delete_mapping(p
, shmmap_s
, 1);
242 #ifndef _SYS_SYSPROTO_H_
251 shmat(p
, uap
, retval
)
253 struct shmat_args
*uap
;
257 struct ucred
*cred
= p
->p_ucred
;
258 struct shmid_ds
*shmseg
;
259 struct shmmap_state
*shmmap_s
= NULL
;
260 struct shm_handle
*shm_handle
;
261 vm_offset_t attach_va
;
266 AUDIT_ARG(svipc_id
, uap
->shmid
);
267 AUDIT_ARG(svipc_addr
, uap
->shmaddr
);
270 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
271 if (shmmap_s
== NULL
) {
272 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
273 shmmap_s
= (struct shmmap_state
*)_MALLOC(size
, M_SHM
, M_WAITOK
);
274 for (i
= 0; i
< shminfo
.shmseg
; i
++)
275 shmmap_s
[i
].shmid
= -1;
276 p
->vm_shm
= (caddr_t
)shmmap_s
;
278 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
282 AUDIT_ARG(svipc_perm
, &shmseg
->shm_perm
);
283 error
= ipcperm(cred
, &shmseg
->shm_perm
,
284 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
287 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
288 if (shmmap_s
->shmid
== -1)
292 if (i
>= shminfo
.shmseg
)
294 size
= round_page_32(shmseg
->shm_segsz
);
296 if ((uap
->shmflg
& SHM_RDONLY
) == 0)
297 prot
|= VM_PROT_WRITE
;
298 flags
= MAP_ANON
| MAP_SHARED
;
301 if (uap
->shmflg
& SHM_RND
)
302 attach_va
= (vm_offset_t
)uap
->shmaddr
& ~(SHMLBA
-1);
303 else if (((vm_offset_t
)uap
->shmaddr
& (SHMLBA
-1)) == 0)
304 attach_va
= (vm_offset_t
)uap
->shmaddr
;
308 attach_va
= round_page_32((unsigned int)uap
->shmaddr
);
311 shm_handle
= shmseg
->shm_internal
;
312 rv
= vm_map(current_map(), &attach_va
, size
, 0, (flags
& MAP_FIXED
)? FALSE
: TRUE
,
313 shm_handle
->shm_object
, 0, FALSE
, prot
, prot
, VM_INHERIT_DEFAULT
);
314 if (rv
!= KERN_SUCCESS
)
316 rv
= vm_inherit(current_map(), attach_va
, size
,
318 if (rv
!= KERN_SUCCESS
) {
319 (void) vm_deallocate(current_map(), attach_va
, size
);
323 shmmap_s
->va
= attach_va
;
324 shmmap_s
->shmid
= uap
->shmid
;
325 shmseg
->shm_lpid
= p
->p_pid
;
326 shmseg
->shm_atime
= time_second
;
327 shmseg
->shm_nattch
++;
332 case KERN_INVALID_ADDRESS
:
335 case KERN_PROTECTION_FAILURE
:
344 struct ipc_perm shm_perm
; /* operation perms */
345 int shm_segsz
; /* size of segment (bytes) */
346 ushort shm_cpid
; /* pid, creator */
347 ushort shm_lpid
; /* pid, last operation */
348 short shm_nattch
; /* no. of current attaches */
349 time_t shm_atime
; /* last attach time */
350 time_t shm_dtime
; /* last detach time */
351 time_t shm_ctime
; /* last change time */
352 void *shm_handle
; /* internal handle for shm segment */
355 struct oshmctl_args
{
358 struct oshmid_ds
*ubuf
;
362 oshmctl(p
, uap
, retval
)
364 struct oshmctl_args
*uap
;
369 struct ucred
*cred
= p
->p_ucred
;
370 struct shmid_ds
*shmseg
;
371 struct oshmid_ds outbuf
;
375 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
380 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_R
);
383 outbuf
.shm_perm
= shmseg
->shm_perm
;
384 outbuf
.shm_segsz
= shmseg
->shm_segsz
;
385 outbuf
.shm_cpid
= shmseg
->shm_cpid
;
386 outbuf
.shm_lpid
= shmseg
->shm_lpid
;
387 outbuf
.shm_nattch
= shmseg
->shm_nattch
;
388 outbuf
.shm_atime
= shmseg
->shm_atime
;
389 outbuf
.shm_dtime
= shmseg
->shm_dtime
;
390 outbuf
.shm_ctime
= shmseg
->shm_ctime
;
391 outbuf
.shm_handle
= shmseg
->shm_internal
;
392 error
= copyout((caddr_t
)&outbuf
, uap
->ubuf
, sizeof(outbuf
));
397 /* XXX casting to (sy_call_t *) is bogus, as usual. */
398 return ((sy_call_t
*)shmctl
)(p
, uap
, retval
);
406 #ifndef _SYS_SYSPROTO_H_
410 struct shmid_ds
*buf
;
415 shmctl(p
, uap
, retval
)
417 struct shmctl_args
*uap
;
421 struct ucred
*cred
= p
->p_ucred
;
422 struct shmid_ds inbuf
;
423 struct shmid_ds
*shmseg
;
425 AUDIT_ARG(svipc_cmd
, uap
->cmd
);
426 AUDIT_ARG(svipc_id
, uap
->shmid
);
429 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
432 /* XXAUDIT: This is the perms BEFORE any change by this call. This
433 * may not be what is desired.
435 AUDIT_ARG(svipc_perm
, &shmseg
->shm_perm
);
439 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_R
);
442 error
= copyout((caddr_t
)shmseg
, uap
->buf
, sizeof(inbuf
));
447 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
);
450 error
= copyin(uap
->buf
, (caddr_t
)&inbuf
, sizeof(inbuf
));
453 shmseg
->shm_perm
.uid
= inbuf
.shm_perm
.uid
;
454 shmseg
->shm_perm
.gid
= inbuf
.shm_perm
.gid
;
455 shmseg
->shm_perm
.mode
=
456 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
457 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
458 shmseg
->shm_ctime
= time_second
;
461 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
);
464 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
465 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
466 if (shmseg
->shm_nattch
<= 0) {
467 shm_deallocate_segment(shmseg
);
468 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
481 #ifndef _SYS_SYSPROTO_H_
490 shmget_existing(p
, uap
, mode
, segnum
, retval
)
492 struct shmget_args
*uap
;
497 struct shmid_ds
*shmseg
;
498 struct ucred
*cred
= p
->p_ucred
;
501 shmseg
= &shmsegs
[segnum
];
502 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
504 * This segment is in the process of being allocated. Wait
505 * until it's done, and look the key up again (in case the
506 * allocation failed or it was freed).
508 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
509 error
= tsleep((caddr_t
)shmseg
, PLOCK
| PCATCH
, "shmget", 0);
514 error
= ipcperm(cred
, &shmseg
->shm_perm
, mode
);
517 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
519 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
521 *retval
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
526 shmget_allocate_segment(p
, uap
, mode
, retval
)
528 struct shmget_args
*uap
;
532 int i
, segnum
, shmid
, size
;
533 struct ucred
*cred
= p
->p_ucred
;
534 struct shmid_ds
*shmseg
;
535 struct shm_handle
*shm_handle
;
537 vm_offset_t user_addr
;
540 if (uap
->size
< shminfo
.shmmin
|| uap
->size
> shminfo
.shmmax
)
542 if (shm_nused
>= shminfo
.shmmni
) /* any shmids left? */
544 size
= round_page_32(uap
->size
);
545 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
547 if (shm_last_free
< 0) {
548 for (i
= 0; i
< shminfo
.shmmni
; i
++)
549 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
551 if (i
== shminfo
.shmmni
)
552 panic("shmseg free count inconsistent");
555 segnum
= shm_last_free
;
558 shmseg
= &shmsegs
[segnum
];
560 * In case we sleep in malloc(), mark the segment present but deleted
561 * so that noone else tries to create the same key.
563 kret
= vm_allocate(current_map(), &user_addr
, size
, TRUE
);
564 if (kret
!= KERN_SUCCESS
)
567 kret
= mach_make_memory_entry (current_map(), &size
,
568 user_addr
, VM_PROT_DEFAULT
, &mem_object
, 0);
570 if (kret
!= KERN_SUCCESS
)
572 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
573 shmseg
->shm_perm
.key
= uap
->key
;
574 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
575 shm_handle
= (struct shm_handle
*)
576 _MALLOC(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
577 shm_handle
->shm_object
= mem_object
;
578 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
580 shmseg
->shm_internal
= shm_handle
;
581 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
582 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
583 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
584 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
585 shmseg
->shm_segsz
= uap
->size
;
586 shmseg
->shm_cpid
= p
->p_pid
;
587 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
588 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
589 shmseg
->shm_ctime
= time_second
;
590 shm_committed
+= btoc(size
);
592 AUDIT_ARG(svipc_perm
, &shmseg
->shm_perm
);
593 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
595 * Somebody else wanted this key while we were asleep. Wake
598 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
599 wakeup((caddr_t
)shmseg
);
602 AUDIT_ARG(svipc_id
, shmid
);
606 case KERN_INVALID_ADDRESS
:
609 case KERN_PROTECTION_FAILURE
:
618 shmget(p
, uap
, retval
)
620 struct shmget_args
*uap
;
623 int segnum
, mode
, error
;
625 /* Auditing is actually done in shmget_allocate_segment() */
629 mode
= uap
->shmflg
& ACCESSPERMS
;
630 if (uap
->key
!= IPC_PRIVATE
) {
632 segnum
= shm_find_segment_by_key(uap
->key
);
634 error
= shmget_existing(p
, uap
, mode
, segnum
, retval
);
639 if ((uap
->shmflg
& IPC_CREAT
) == 0)
642 return( shmget_allocate_segment(p
, uap
, mode
, retval
));;
654 shmsys(p
, uap
, retval
)
656 /* XXX actually varargs. */
657 struct shmsys_args
*uap
;
664 if (uap
->which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
666 return ((*shmcalls
[uap
->which
])(p
, &uap
->a2
, retval
));
671 struct proc
*p1
, *p2
;
673 struct shmmap_state
*shmmap_s
;
679 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
680 shmmap_s
= (struct shmmap_state
*)_MALLOC(size
, M_SHM
, M_WAITOK
);
681 bcopy((caddr_t
)p1
->vm_shm
, (caddr_t
)shmmap_s
, size
);
682 p2
->vm_shm
= (caddr_t
)shmmap_s
;
683 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
684 if (shmmap_s
->shmid
!= -1)
685 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
692 struct shmmap_state
*shmmap_s
;
695 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
696 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
697 if (shmmap_s
->shmid
!= -1)
698 shm_delete_mapping(p
, shmmap_s
, 1);
699 FREE((caddr_t
)p
->vm_shm
, M_SHM
);
704 * shmexec() is like shmexit(), only it doesn't delete the mappings,
705 * since the old address space has already been destroyed and the new
706 * one instantiated. Instead, it just does the housekeeping work we
707 * need to do to keep the System V shared memory subsystem sane.
709 __private_extern__
void
713 struct shmmap_state
*shmmap_s
;
716 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
717 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
718 if (shmmap_s
->shmid
!= -1)
719 shm_delete_mapping(p
, shmmap_s
, 0);
720 FREE((caddr_t
)p
->vm_shm
, M_SHM
);
732 s
= sizeof(struct shmid_ds
) * shminfo
.shmmni
;
734 MALLOC(shmsegs
, struct shmid_ds
*, s
,
736 for (i
= 0; i
< shminfo
.shmmni
; i
++) {
737 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
738 shmsegs
[i
].shm_perm
.seq
= 0;
747 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
748 struct sysctl_req *req) */
750 sysctl_shminfo SYSCTL_HANDLER_ARGS
754 error
= SYSCTL_OUT(req
, arg1
, sizeof(int));
755 if (error
|| !req
->newptr
)
758 /* Set the values only if shared memory is not initialised */
760 if (error
= SYSCTL_IN(req
, arg1
, sizeof(int)))
762 if (arg1
== &shminfo
.shmmax
) {
763 if (shminfo
.shmmax
& PAGE_MASK
) {
769 /* Initialize only when all values are set */
770 if ((shminfo
.shmmax
!= -1) &&
771 (shminfo
.shmmin
!= -1) &&
772 (shminfo
.shmmni
!= -1) &&
773 (shminfo
.shmseg
!= -1) &&
774 (shminfo
.shmall
!= -1)) {
781 SYSCTL_NODE(_kern
, KERN_SYSV
, sysv
, CTLFLAG_RW
, 0, "SYSV");
783 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMMAX
, shmmax
, CTLTYPE_INT
| CTLFLAG_RW
,
784 &shminfo
.shmmax
, 0, &sysctl_shminfo
,"I","shmmax");
786 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMMIN
, shmmin
, CTLTYPE_INT
| CTLFLAG_RW
,
787 &shminfo
.shmmin
, 0, &sysctl_shminfo
,"I","shmmin");
789 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMMNI
, shmmni
, CTLTYPE_INT
| CTLFLAG_RW
,
790 &shminfo
.shmmni
, 0, &sysctl_shminfo
,"I","shmmni");
792 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMSEG
, shmseg
, CTLTYPE_INT
| CTLFLAG_RW
,
793 &shminfo
.shmseg
, 0, &sysctl_shminfo
,"I","shmseg");
795 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMALL
, shmall
, CTLTYPE_INT
| CTLFLAG_RW
,
796 &shminfo
.shmall
, 0, &sysctl_shminfo
,"I","shmall");