2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
25 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by Adam Glass and Charles
39 * 4. The names of the authors may not be used to endorse or promote products
40 * derived from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
43 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
44 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
45 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
46 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
47 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
51 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 #include <sys/appleapiopts.h>
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
61 #include <sys/malloc.h>
64 #include <sys/sysctl.h>
66 #include <bsm/audit_kernel.h>
68 #include <mach/mach_types.h>
69 #include <mach/vm_inherit.h>
70 #include <vm/vm_map.h>
73 extern int shmat
__P((struct proc
*p
, struct shmat_args
*uap
, int *retval
));
75 extern int shmctl
__P((struct proc
*p
, struct shmctl_args
*uap
, int *retval
));
77 extern int shmdt
__P((struct proc
*p
, struct shmdt_args
*uap
, int *retval
));
79 extern int shmget
__P((struct proc
*p
, struct shmget_args
*uap
, int *retval
));
82 static void shminit
__P((void *));
83 SYSINIT(sysv_shm
, SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
, shminit
, NULL
)
87 static int oshmctl
__P((struct proc
*p
, struct oshmctl_args
*uap
, int * retval
));
88 static int shmget_allocate_segment
__P((struct proc
*p
, struct shmget_args
*uap
, int mode
, int * retval
));
89 static int shmget_existing
__P((struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
, int * retval
));
91 typedef int sy_call_t
__P((struct proc
*, void *, int *));
93 /* XXX casting to (sy_call_t *) is bogus, as usual. */
94 static sy_call_t
*shmcalls
[] = {
95 (sy_call_t
*)shmat
, (sy_call_t
*)oshmctl
,
96 (sy_call_t
*)shmdt
, (sy_call_t
*)shmget
,
100 #define SHMSEG_FREE 0x0200
101 #define SHMSEG_REMOVED 0x0400
102 #define SHMSEG_ALLOCATED 0x0800
103 #define SHMSEG_WANTED 0x1000
105 static int shm_last_free
, shm_nused
, shm_committed
;
106 struct shmid_ds
*shmsegs
;
107 static int shm_inited
= 0;
110 /* vm_offset_t kva; */
114 struct shmmap_state
{
119 static void shm_deallocate_segment
__P((struct shmid_ds
*));
120 static int shm_find_segment_by_key
__P((key_t
));
121 static struct shmid_ds
*shm_find_segment_by_shmid
__P((int));
122 static int shm_delete_mapping
__P((struct proc
*, struct shmmap_state
*, int));
124 #ifdef __APPLE_API_PRIVATE
125 struct shminfo shminfo
= {
126 -1, /* SHMMAX 4096 *1024 */
130 -1 /* SHMALL = 1024 */
132 #endif /* __APPLE_API_PRIVATE */
135 shm_find_segment_by_key(key
)
140 for (i
= 0; i
< shminfo
.shmmni
; i
++)
141 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
142 shmsegs
[i
].shm_perm
.key
== key
)
147 static struct shmid_ds
*
148 shm_find_segment_by_shmid(shmid
)
152 struct shmid_ds
*shmseg
;
154 segnum
= IPCID_TO_IX(shmid
);
155 if (segnum
< 0 || segnum
>= shminfo
.shmmni
)
157 shmseg
= &shmsegs
[segnum
];
158 if ((shmseg
->shm_perm
.mode
& (SHMSEG_ALLOCATED
| SHMSEG_REMOVED
))
159 != SHMSEG_ALLOCATED
||
160 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
))
166 shm_deallocate_segment(shmseg
)
167 struct shmid_ds
*shmseg
;
169 struct shm_handle
*shm_handle
;
170 struct shmmap_state
*shmmap_s
=NULL
;
174 shm_handle
= shmseg
->shm_internal
;
175 size
= round_page_32(shmseg
->shm_segsz
);
176 mach_destroy_memory_entry(shm_handle
->shm_object
);
177 FREE((caddr_t
)shm_handle
, M_SHM
);
178 shmseg
->shm_internal
= NULL
;
179 shm_committed
-= btoc(size
);
181 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
185 shm_delete_mapping(p
, shmmap_s
, deallocate
)
187 struct shmmap_state
*shmmap_s
;
190 struct shmid_ds
*shmseg
;
194 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
195 shmseg
= &shmsegs
[segnum
];
196 size
= round_page_32(shmseg
->shm_segsz
);
198 result
= vm_deallocate(current_map(), shmmap_s
->va
, size
);
199 if (result
!= KERN_SUCCESS
)
202 shmmap_s
->shmid
= -1;
203 shmseg
->shm_dtime
= time_second
;
204 if ((--shmseg
->shm_nattch
<= 0) &&
205 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
206 shm_deallocate_segment(shmseg
);
207 shm_last_free
= segnum
;
217 shmdt(p
, uap
, retval
)
219 struct shmdt_args
*uap
;
222 struct shmmap_state
*shmmap_s
;
225 AUDIT_ARG(svipc_addr
, uap
->shmaddr
);
228 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
229 if (shmmap_s
== NULL
)
231 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
232 if (shmmap_s
->shmid
!= -1 &&
233 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
)
235 if (i
== shminfo
.shmseg
)
237 return shm_delete_mapping(p
, shmmap_s
, 1);
240 #ifndef _SYS_SYSPROTO_H_
249 shmat(p
, uap
, retval
)
251 struct shmat_args
*uap
;
255 struct ucred
*cred
= p
->p_ucred
;
256 struct shmid_ds
*shmseg
;
257 struct shmmap_state
*shmmap_s
= NULL
;
258 struct shm_handle
*shm_handle
;
259 vm_offset_t attach_va
;
264 AUDIT_ARG(svipc_id
, uap
->shmid
);
265 AUDIT_ARG(svipc_addr
, uap
->shmaddr
);
268 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
269 if (shmmap_s
== NULL
) {
270 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
271 shmmap_s
= (struct shmmap_state
*)_MALLOC(size
, M_SHM
, M_WAITOK
);
272 for (i
= 0; i
< shminfo
.shmseg
; i
++)
273 shmmap_s
[i
].shmid
= -1;
274 p
->vm_shm
= (caddr_t
)shmmap_s
;
276 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
280 AUDIT_ARG(svipc_perm
, &shmseg
->shm_perm
);
281 error
= ipcperm(cred
, &shmseg
->shm_perm
,
282 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
285 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
286 if (shmmap_s
->shmid
== -1)
290 if (i
>= shminfo
.shmseg
)
292 size
= round_page_32(shmseg
->shm_segsz
);
294 if ((uap
->shmflg
& SHM_RDONLY
) == 0)
295 prot
|= VM_PROT_WRITE
;
296 flags
= MAP_ANON
| MAP_SHARED
;
299 if (uap
->shmflg
& SHM_RND
)
300 attach_va
= (vm_offset_t
)uap
->shmaddr
& ~(SHMLBA
-1);
301 else if (((vm_offset_t
)uap
->shmaddr
& (SHMLBA
-1)) == 0)
302 attach_va
= (vm_offset_t
)uap
->shmaddr
;
306 attach_va
= round_page_32((unsigned int)uap
->shmaddr
);
309 shm_handle
= shmseg
->shm_internal
;
310 rv
= vm_map(current_map(), &attach_va
, size
, 0, (flags
& MAP_FIXED
)? FALSE
: TRUE
,
311 shm_handle
->shm_object
, 0, FALSE
, prot
, prot
, VM_INHERIT_DEFAULT
);
312 if (rv
!= KERN_SUCCESS
)
314 rv
= vm_inherit(current_map(), attach_va
, size
,
316 if (rv
!= KERN_SUCCESS
) {
317 (void) vm_deallocate(current_map(), attach_va
, size
);
321 shmmap_s
->va
= attach_va
;
322 shmmap_s
->shmid
= uap
->shmid
;
323 shmseg
->shm_lpid
= p
->p_pid
;
324 shmseg
->shm_atime
= time_second
;
325 shmseg
->shm_nattch
++;
330 case KERN_INVALID_ADDRESS
:
333 case KERN_PROTECTION_FAILURE
:
342 struct ipc_perm shm_perm
; /* operation perms */
343 int shm_segsz
; /* size of segment (bytes) */
344 ushort shm_cpid
; /* pid, creator */
345 ushort shm_lpid
; /* pid, last operation */
346 short shm_nattch
; /* no. of current attaches */
347 time_t shm_atime
; /* last attach time */
348 time_t shm_dtime
; /* last detach time */
349 time_t shm_ctime
; /* last change time */
350 void *shm_handle
; /* internal handle for shm segment */
353 struct oshmctl_args
{
356 struct oshmid_ds
*ubuf
;
360 oshmctl(p
, uap
, retval
)
362 struct oshmctl_args
*uap
;
367 struct ucred
*cred
= p
->p_ucred
;
368 struct shmid_ds
*shmseg
;
369 struct oshmid_ds outbuf
;
373 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
378 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_R
);
381 outbuf
.shm_perm
= shmseg
->shm_perm
;
382 outbuf
.shm_segsz
= shmseg
->shm_segsz
;
383 outbuf
.shm_cpid
= shmseg
->shm_cpid
;
384 outbuf
.shm_lpid
= shmseg
->shm_lpid
;
385 outbuf
.shm_nattch
= shmseg
->shm_nattch
;
386 outbuf
.shm_atime
= shmseg
->shm_atime
;
387 outbuf
.shm_dtime
= shmseg
->shm_dtime
;
388 outbuf
.shm_ctime
= shmseg
->shm_ctime
;
389 outbuf
.shm_handle
= shmseg
->shm_internal
;
390 error
= copyout((caddr_t
)&outbuf
, uap
->ubuf
, sizeof(outbuf
));
395 /* XXX casting to (sy_call_t *) is bogus, as usual. */
396 return ((sy_call_t
*)shmctl
)(p
, uap
, retval
);
404 #ifndef _SYS_SYSPROTO_H_
408 struct shmid_ds
*buf
;
413 shmctl(p
, uap
, retval
)
415 struct shmctl_args
*uap
;
419 struct ucred
*cred
= p
->p_ucred
;
420 struct shmid_ds inbuf
;
421 struct shmid_ds
*shmseg
;
423 AUDIT_ARG(svipc_cmd
, uap
->cmd
);
424 AUDIT_ARG(svipc_id
, uap
->shmid
);
427 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
430 /* XXAUDIT: This is the perms BEFORE any change by this call. This
431 * may not be what is desired.
433 AUDIT_ARG(svipc_perm
, &shmseg
->shm_perm
);
437 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_R
);
440 error
= copyout((caddr_t
)shmseg
, uap
->buf
, sizeof(inbuf
));
445 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
);
448 error
= copyin(uap
->buf
, (caddr_t
)&inbuf
, sizeof(inbuf
));
451 shmseg
->shm_perm
.uid
= inbuf
.shm_perm
.uid
;
452 shmseg
->shm_perm
.gid
= inbuf
.shm_perm
.gid
;
453 shmseg
->shm_perm
.mode
=
454 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
455 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
456 shmseg
->shm_ctime
= time_second
;
459 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
);
462 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
463 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
464 if (shmseg
->shm_nattch
<= 0) {
465 shm_deallocate_segment(shmseg
);
466 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
479 #ifndef _SYS_SYSPROTO_H_
488 shmget_existing(p
, uap
, mode
, segnum
, retval
)
490 struct shmget_args
*uap
;
495 struct shmid_ds
*shmseg
;
496 struct ucred
*cred
= p
->p_ucred
;
499 shmseg
= &shmsegs
[segnum
];
500 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
502 * This segment is in the process of being allocated. Wait
503 * until it's done, and look the key up again (in case the
504 * allocation failed or it was freed).
506 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
507 error
= tsleep((caddr_t
)shmseg
, PLOCK
| PCATCH
, "shmget", 0);
512 error
= ipcperm(cred
, &shmseg
->shm_perm
, mode
);
515 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
517 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
519 *retval
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
524 shmget_allocate_segment(p
, uap
, mode
, retval
)
526 struct shmget_args
*uap
;
530 int i
, segnum
, shmid
, size
;
531 struct ucred
*cred
= p
->p_ucred
;
532 struct shmid_ds
*shmseg
;
533 struct shm_handle
*shm_handle
;
535 vm_offset_t user_addr
;
538 if (uap
->size
< shminfo
.shmmin
|| uap
->size
> shminfo
.shmmax
)
540 if (shm_nused
>= shminfo
.shmmni
) /* any shmids left? */
542 size
= round_page_32(uap
->size
);
543 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
545 if (shm_last_free
< 0) {
546 for (i
= 0; i
< shminfo
.shmmni
; i
++)
547 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
549 if (i
== shminfo
.shmmni
)
550 panic("shmseg free count inconsistent");
553 segnum
= shm_last_free
;
556 shmseg
= &shmsegs
[segnum
];
558 * In case we sleep in malloc(), mark the segment present but deleted
559 * so that noone else tries to create the same key.
561 kret
= vm_allocate(current_map(), &user_addr
, size
, TRUE
);
562 if (kret
!= KERN_SUCCESS
)
565 kret
= mach_make_memory_entry (current_map(), &size
,
566 user_addr
, VM_PROT_DEFAULT
, &mem_object
, 0);
568 if (kret
!= KERN_SUCCESS
)
570 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
571 shmseg
->shm_perm
.key
= uap
->key
;
572 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
573 shm_handle
= (struct shm_handle
*)
574 _MALLOC(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
575 shm_handle
->shm_object
= mem_object
;
576 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
578 shmseg
->shm_internal
= shm_handle
;
579 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
580 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
581 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
582 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
583 shmseg
->shm_segsz
= uap
->size
;
584 shmseg
->shm_cpid
= p
->p_pid
;
585 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
586 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
587 shmseg
->shm_ctime
= time_second
;
588 shm_committed
+= btoc(size
);
590 AUDIT_ARG(svipc_perm
, &shmseg
->shm_perm
);
591 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
593 * Somebody else wanted this key while we were asleep. Wake
596 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
597 wakeup((caddr_t
)shmseg
);
600 AUDIT_ARG(svipc_id
, shmid
);
604 case KERN_INVALID_ADDRESS
:
607 case KERN_PROTECTION_FAILURE
:
616 shmget(p
, uap
, retval
)
618 struct shmget_args
*uap
;
621 int segnum
, mode
, error
;
623 /* Auditing is actually done in shmget_allocate_segment() */
627 mode
= uap
->shmflg
& ACCESSPERMS
;
628 if (uap
->key
!= IPC_PRIVATE
) {
630 segnum
= shm_find_segment_by_key(uap
->key
);
632 error
= shmget_existing(p
, uap
, mode
, segnum
, retval
);
637 if ((uap
->shmflg
& IPC_CREAT
) == 0)
640 return( shmget_allocate_segment(p
, uap
, mode
, retval
));;
652 shmsys(p
, uap
, retval
)
654 /* XXX actually varargs. */
655 struct shmsys_args
*uap
;
662 if (uap
->which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
664 return ((*shmcalls
[uap
->which
])(p
, &uap
->a2
, retval
));
669 struct proc
*p1
, *p2
;
671 struct shmmap_state
*shmmap_s
;
677 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
678 shmmap_s
= (struct shmmap_state
*)_MALLOC(size
, M_SHM
, M_WAITOK
);
679 bcopy((caddr_t
)p1
->vm_shm
, (caddr_t
)shmmap_s
, size
);
680 p2
->vm_shm
= (caddr_t
)shmmap_s
;
681 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
682 if (shmmap_s
->shmid
!= -1)
683 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
690 struct shmmap_state
*shmmap_s
;
693 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
694 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
695 if (shmmap_s
->shmid
!= -1)
696 shm_delete_mapping(p
, shmmap_s
, 1);
697 FREE((caddr_t
)p
->vm_shm
, M_SHM
);
702 * shmexec() is like shmexit(), only it doesn't delete the mappings,
703 * since the old address space has already been destroyed and the new
704 * one instantiated. Instead, it just does the housekeeping work we
705 * need to do to keep the System V shared memory subsystem sane.
707 __private_extern__
void
711 struct shmmap_state
*shmmap_s
;
714 shmmap_s
= (struct shmmap_state
*)p
->vm_shm
;
715 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
716 if (shmmap_s
->shmid
!= -1)
717 shm_delete_mapping(p
, shmmap_s
, 0);
718 FREE((caddr_t
)p
->vm_shm
, M_SHM
);
730 s
= sizeof(struct shmid_ds
) * shminfo
.shmmni
;
732 MALLOC(shmsegs
, struct shmid_ds
*, s
,
734 for (i
= 0; i
< shminfo
.shmmni
; i
++) {
735 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
736 shmsegs
[i
].shm_perm
.seq
= 0;
745 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
746 struct sysctl_req *req) */
748 sysctl_shminfo SYSCTL_HANDLER_ARGS
752 error
= SYSCTL_OUT(req
, arg1
, sizeof(int));
753 if (error
|| !req
->newptr
)
756 /* Set the values only if shared memory is not initialised */
758 if (error
= SYSCTL_IN(req
, arg1
, sizeof(int)))
760 if (arg1
== &shminfo
.shmmax
) {
761 if (shminfo
.shmmax
& PAGE_MASK
) {
767 /* Initialize only when all values are set */
768 if ((shminfo
.shmmax
!= -1) &&
769 (shminfo
.shmmin
!= -1) &&
770 (shminfo
.shmmni
!= -1) &&
771 (shminfo
.shmseg
!= -1) &&
772 (shminfo
.shmall
!= -1)) {
779 SYSCTL_NODE(_kern
, KERN_SYSV
, sysv
, CTLFLAG_RW
, 0, "SYSV");
781 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMMAX
, shmmax
, CTLTYPE_INT
| CTLFLAG_RW
,
782 &shminfo
.shmmax
, 0, &sysctl_shminfo
,"I","shmmax");
784 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMMIN
, shmmin
, CTLTYPE_INT
| CTLFLAG_RW
,
785 &shminfo
.shmmin
, 0, &sysctl_shminfo
,"I","shmmin");
787 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMMNI
, shmmni
, CTLTYPE_INT
| CTLFLAG_RW
,
788 &shminfo
.shmmni
, 0, &sysctl_shminfo
,"I","shmmni");
790 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMSEG
, shmseg
, CTLTYPE_INT
| CTLFLAG_RW
,
791 &shminfo
.shmseg
, 0, &sysctl_shminfo
,"I","shmseg");
793 SYSCTL_PROC(_kern_sysv
, KSYSV_SHMALL
, shmall
, CTLTYPE_INT
| CTLFLAG_RW
,
794 &shminfo
.shmall
, 0, &sysctl_shminfo
,"I","shmall");