2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Implementation of SVID semaphores
31 * Author: Daniel Boulet
33 * This software is provided ``AS IS'' without any warranties of any kind.
36 * John Bellardo modified the implementation for Darwin. 12/2000
39 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
40 * support for mandatory and extensible security protections. This notice
41 * is included in support of clause 2.2 (b) of the Apple Public License,
43 * Copyright (c) 2005-2006 SPARTA, Inc.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/proc_internal.h>
50 #include <sys/kauth.h>
51 #include <sys/sem_internal.h>
52 #include <sys/malloc.h>
53 #include <mach/mach_types.h>
55 #include <sys/filedesc.h>
56 #include <sys/file_internal.h>
57 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/sysproto.h>
62 #include <security/mac_framework.h>
65 #include <security/audit/audit.h>
70 /* Uncomment this line to see the debugging output */
71 /* #define SEM_DEBUG */
73 /* Uncomment this line to see MAC debugging output. */
74 /* #define MAC_DEBUG */
76 #define MPRINTF(a) printf(a)
81 #define KM_SYSVSEM KHEAP_DEFAULT
84 /* Hard system limits to avoid resource starvation / DOS attacks.
85 * These are not needed if we can make the semaphore pages swappable.
87 static struct seminfo limitseminfo
= {
88 .semmap
= SEMMAP
, /* # of entries in semaphore map */
89 .semmni
= SEMMNI
, /* # of semaphore identifiers */
90 .semmns
= SEMMNS
, /* # of semaphores in system */
91 .semmnu
= SEMMNU
, /* # of undo structures in system */
92 .semmsl
= SEMMSL
, /* max # of semaphores per id */
93 .semopm
= SEMOPM
, /* max # of operations per semop call */
94 .semume
= SEMUME
, /* max # of undo entries per process */
95 .semusz
= SEMUSZ
, /* size in bytes of undo structure */
96 .semvmx
= SEMVMX
, /* semaphore maximum value */
97 .semaem
= SEMAEM
/* adjust on exit max value */
100 /* Current system allocations. We use this structure to track how many
101 * resources we have allocated so far. This way we can set large hard limits
102 * and not allocate the memory for them up front.
104 struct seminfo seminfo
= {
105 .semmap
= SEMMAP
, /* Unused, # of entries in semaphore map */
106 .semmni
= 0, /* # of semaphore identifiers */
107 .semmns
= 0, /* # of semaphores in system */
108 .semmnu
= 0, /* # of undo entries in system */
109 .semmsl
= SEMMSL
, /* max # of semaphores per id */
110 .semopm
= SEMOPM
, /* max # of operations per semop call */
111 .semume
= SEMUME
, /* max # of undo entries per process */
112 .semusz
= SEMUSZ
, /* size in bytes of undo structure */
113 .semvmx
= SEMVMX
, /* semaphore maximum value */
114 .semaem
= SEMAEM
/* adjust on exit max value */
118 static int semu_alloc(struct proc
*p
);
119 static int semundo_adjust(struct proc
*p
, int *supidx
,
120 int semid
, int semnum
, int adjval
);
121 static void semundo_clear(int semid
, int semnum
);
123 /* XXX casting to (sy_call_t *) is bogus, as usual. */
124 static sy_call_t
* const semcalls
[] = {
125 (sy_call_t
*)semctl
, (sy_call_t
*)semget
,
129 static int semtot
= 0; /* # of used semaphores */
130 struct semid_kernel
*sema
= NULL
; /* semaphore id pool */
131 struct sem
*sem_pool
= NULL
; /* semaphore pool */
132 static int semu_list_idx
= -1; /* active undo structures */
133 struct sem_undo
*semu
= NULL
; /* semaphore undo pool */
136 static LCK_GRP_DECLARE(sysv_sem_subsys_lck_grp
, "sysv_sem_subsys_lock");
137 static LCK_MTX_DECLARE(sysv_sem_subsys_mutex
, &sysv_sem_subsys_lck_grp
);
139 #define SYSV_SEM_SUBSYS_LOCK() lck_mtx_lock(&sysv_sem_subsys_mutex)
140 #define SYSV_SEM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_sem_subsys_mutex)
142 static __inline__ user_time_t
151 * XXX conversion of internal user_time_t to external tume_t loses
152 * XXX precision; not an issue for us now, since we are only ever
153 * XXX setting 32 bits worth of time into it.
155 * pad field contents are not moved correspondingly; contents will be lost
157 * NOTE: Source and target may *NOT* overlap! (target is smaller)
160 semid_ds_kernelto32(struct user_semid_ds
*in
, struct user32_semid_ds
*out
)
162 out
->sem_perm
= in
->sem_perm
;
163 out
->sem_base
= CAST_DOWN_EXPLICIT(__int32_t
, in
->sem_base
);
164 out
->sem_nsems
= in
->sem_nsems
;
165 out
->sem_otime
= in
->sem_otime
; /* XXX loses precision */
166 out
->sem_ctime
= in
->sem_ctime
; /* XXX loses precision */
170 semid_ds_kernelto64(struct user_semid_ds
*in
, struct user64_semid_ds
*out
)
172 out
->sem_perm
= in
->sem_perm
;
173 out
->sem_base
= CAST_DOWN_EXPLICIT(__int32_t
, in
->sem_base
);
174 out
->sem_nsems
= in
->sem_nsems
;
175 out
->sem_otime
= in
->sem_otime
; /* XXX loses precision */
176 out
->sem_ctime
= in
->sem_ctime
; /* XXX loses precision */
180 * pad field contents are not moved correspondingly; contents will be lost
182 * NOTE: Source and target may are permitted to overlap! (source is smaller);
183 * this works because we copy fields in order from the end of the struct to
186 * XXX use CAST_USER_ADDR_T() for lack of a CAST_USER_TIME_T(); net effect
190 semid_ds_32tokernel(struct user32_semid_ds
*in
, struct user_semid_ds
*out
)
192 out
->sem_ctime
= in
->sem_ctime
;
193 out
->sem_otime
= in
->sem_otime
;
194 out
->sem_nsems
= in
->sem_nsems
;
195 out
->sem_base
= (void *)(uintptr_t)in
->sem_base
;
196 out
->sem_perm
= in
->sem_perm
;
200 semid_ds_64tokernel(struct user64_semid_ds
*in
, struct user_semid_ds
*out
)
202 out
->sem_ctime
= in
->sem_ctime
;
203 out
->sem_otime
= in
->sem_otime
;
204 out
->sem_nsems
= in
->sem_nsems
;
205 out
->sem_base
= (void *)(uintptr_t)in
->sem_base
;
206 out
->sem_perm
= in
->sem_perm
;
213 * Entry point for all SEM calls: semctl, semget, semop
215 * Parameters: p Process requesting the call
216 * uap User argument descriptor (see below)
217 * retval Return value of the selected sem call
219 * Indirect parameters: uap->which sem call to invoke (index in array of sem calls)
220 * uap->a2 User argument descriptor
225 * Implicit returns: retval Return value of the selected sem call
227 * DEPRECATED: This interface should not be used to call the other SEM
228 * functions (semctl, semget, semop). The correct usage is
229 * to call the other SEM functions directly.
233 semsys(struct proc
*p
, struct semsys_args
*uap
, int32_t *retval
)
235 /* The individual calls handling the locking now */
237 if (uap
->which
>= sizeof(semcalls
) / sizeof(semcalls
[0])) {
240 return (*semcalls
[uap
->which
])(p
, &uap
->a2
, retval
);
244 * Expand the semu array to the given capacity. If the expansion fails
245 * return 0, otherwise return 1.
247 * Assumes we already have the subsystem lock.
250 grow_semu_array(int newSize
)
253 struct sem_undo
*newSemu
;
255 if (newSize
<= seminfo
.semmnu
) {
258 if (newSize
> limitseminfo
.semmnu
) { /* enforce hard limit */
260 printf("undo structure hard limit of %d reached, requested %d\n",
261 limitseminfo
.semmnu
, newSize
);
265 newSize
= (newSize
/ SEMMNU_INC
+ 1) * SEMMNU_INC
;
266 newSize
= newSize
> limitseminfo
.semmnu
? limitseminfo
.semmnu
: newSize
;
269 printf("growing semu[] from %d to %d\n", seminfo
.semmnu
, newSize
);
271 newSemu
= kheap_alloc(KM_SYSVSEM
, sizeof(struct sem_undo
) * newSize
,
273 if (NULL
== newSemu
) {
275 printf("allocation failed. no changes made.\n");
280 /* copy the old data to the new array */
281 for (i
= 0; i
< seminfo
.semmnu
; i
++) {
282 newSemu
[i
] = semu
[i
];
285 * The new elements (from newSemu[i] to newSemu[newSize-1]) have their
286 * "un_proc" set to 0 (i.e. NULL) by the Z_ZERO flag to kheap_alloc
287 * above, so they're already marked as "not in use".
290 /* Clean up the old array */
291 kheap_free(KM_SYSVSEM
, semu
, sizeof(struct sem_undo
) * seminfo
.semmnu
);
294 seminfo
.semmnu
= newSize
;
296 printf("expansion successful\n");
302 * Expand the sema array to the given capacity. If the expansion fails
303 * we return 0, otherwise we return 1.
305 * Assumes we already have the subsystem lock.
308 grow_sema_array(int newSize
)
310 struct semid_kernel
*newSema
;
313 if (newSize
<= seminfo
.semmni
) {
316 if (newSize
> limitseminfo
.semmni
) { /* enforce hard limit */
318 printf("identifier hard limit of %d reached, requested %d\n",
319 limitseminfo
.semmni
, newSize
);
323 newSize
= (newSize
/ SEMMNI_INC
+ 1) * SEMMNI_INC
;
324 newSize
= newSize
> limitseminfo
.semmni
? limitseminfo
.semmni
: newSize
;
327 printf("growing sema[] from %d to %d\n", seminfo
.semmni
, newSize
);
329 newSema
= kheap_alloc(KM_SYSVSEM
, sizeof(struct semid_kernel
) * newSize
,
331 if (NULL
== newSema
) {
333 printf("allocation failed. no changes made.\n");
338 /* copy over the old ids */
339 for (i
= 0; i
< seminfo
.semmni
; i
++) {
340 newSema
[i
] = sema
[i
];
341 /* This is a hack. What we really want to be able to
342 * do is change the value a process is waiting on
343 * without waking it up, but I don't know how to do
344 * this with the existing code, so we wake up the
345 * process and let it do a lot of work to determine the
346 * semaphore set is really not available yet, and then
347 * sleep on the correct, reallocated semid_kernel pointer.
349 if (sema
[i
].u
.sem_perm
.mode
& SEM_ALLOC
) {
350 wakeup((caddr_t
)&sema
[i
]);
355 for (i
= seminfo
.semmni
; i
< newSize
; i
++) {
356 mac_sysvsem_label_init(&newSema
[i
]);
361 * The new elements (from newSema[i] to newSema[newSize-1]) have their
362 * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the Z_ZERO
363 * flag to kheap_alloc above, so they're already marked as "not in use".
366 /* Clean up the old array */
367 kheap_free(KM_SYSVSEM
, sema
,
368 sizeof(struct semid_kernel
) * seminfo
.semmni
);
371 seminfo
.semmni
= newSize
;
373 printf("expansion successful\n");
379 * Expand the sem_pool array to the given capacity. If the expansion fails
380 * we return 0 (fail), otherwise we return 1 (success).
382 * Assumes we already hold the subsystem lock.
385 grow_sem_pool(int new_pool_size
)
387 struct sem
*new_sem_pool
= NULL
;
388 struct sem
*sem_free
;
391 if (new_pool_size
< semtot
) {
394 /* enforce hard limit */
395 if (new_pool_size
> limitseminfo
.semmns
) {
397 printf("semaphore hard limit of %d reached, requested %d\n",
398 limitseminfo
.semmns
, new_pool_size
);
403 new_pool_size
= (new_pool_size
/ SEMMNS_INC
+ 1) * SEMMNS_INC
;
404 new_pool_size
= new_pool_size
> limitseminfo
.semmns
? limitseminfo
.semmns
: new_pool_size
;
407 printf("growing sem_pool array from %d to %d\n", seminfo
.semmns
, new_pool_size
);
409 new_sem_pool
= kheap_alloc(KM_SYSVSEM
, sizeof(struct sem
) * new_pool_size
,
411 if (NULL
== new_sem_pool
) {
413 printf("allocation failed. no changes made.\n");
418 /* We have our new memory, now copy the old contents over */
420 for (i
= 0; i
< seminfo
.semmns
; i
++) {
421 new_sem_pool
[i
] = sem_pool
[i
];
425 /* Update our id structures to point to the new semaphores */
426 for (i
= 0; i
< seminfo
.semmni
; i
++) {
427 if (sema
[i
].u
.sem_perm
.mode
& SEM_ALLOC
) { /* ID in use */
428 sema
[i
].u
.sem_base
= new_sem_pool
+
429 (sema
[i
].u
.sem_base
- sem_pool
);
434 sem_pool
= new_sem_pool
;
436 /* clean up the old array */
437 kheap_free(KM_SYSVSEM
, sem_free
, sizeof(struct sem
) * seminfo
.semmns
);
439 seminfo
.semmns
= new_pool_size
;
441 printf("expansion complete\n");
447 * Allocate a new sem_undo structure for a process
448 * (returns ptr to structure or NULL if no more room)
450 * Assumes we already hold the subsystem lock.
454 semu_alloc(struct proc
*p
)
457 struct sem_undo
*suptr
;
462 * Try twice to allocate something.
463 * (we'll purge any empty structures after the first pass so
464 * two passes are always enough)
467 for (attempt
= 0; attempt
< 2; attempt
++) {
469 * Look for a free structure.
470 * Fill it in and return it if we find one.
473 for (i
= 0; i
< seminfo
.semmnu
; i
++) {
475 if (suptr
->un_proc
== NULL
) {
476 suptr
->un_next_idx
= semu_list_idx
;
479 suptr
->un_ent
= NULL
;
486 * We didn't find a free one, if this is the first attempt
487 * then try to free some structures.
491 /* All the structures are in use - try to free some */
492 int did_something
= 0;
494 supidx
= &semu_list_idx
;
495 while (*supidx
!= -1) {
496 suptr
= SEMU(*supidx
);
497 if (suptr
->un_cnt
== 0) {
498 suptr
->un_proc
= NULL
;
499 *supidx
= suptr
->un_next_idx
;
502 supidx
= &(suptr
->un_next_idx
);
506 /* If we didn't free anything. Try expanding
507 * the semu[] array. If that doesn't work
508 * then fail. We expand last to get the
509 * most reuse out of existing resources.
511 if (!did_something
) {
512 if (!grow_semu_array(seminfo
.semmnu
+ 1)) {
518 * The second pass failed even though we freed
519 * something after the first pass!
520 * This is IMPOSSIBLE!
522 panic("semu_alloc - second attempt failed");
529 * Adjust a particular entry for a particular proc
531 * Assumes we already hold the subsystem lock.
534 semundo_adjust(struct proc
*p
, int *supidx
, int semid
,
535 int semnum
, int adjval
)
537 struct sem_undo
*suptr
;
539 struct undo
*sueptr
, **suepptr
, *new_sueptr
;
543 * Look for and remember the sem_undo if the caller doesn't provide it
548 for (suidx
= semu_list_idx
; suidx
!= -1;
549 suidx
= suptr
->un_next_idx
) {
551 if (suptr
->un_proc
== p
) {
560 suidx
= semu_alloc(p
);
569 * Look for the requested entry and adjust it (delete if adjval becomes
574 for (i
= 0, suepptr
= &suptr
->un_ent
, sueptr
= suptr
->un_ent
;
576 i
++, suepptr
= &sueptr
->une_next
, sueptr
= sueptr
->une_next
) {
577 if (sueptr
->une_id
!= semid
|| sueptr
->une_num
!= semnum
) {
581 sueptr
->une_adjval
= 0;
583 sueptr
->une_adjval
+= adjval
;
585 if (sueptr
->une_adjval
== 0) {
587 *suepptr
= sueptr
->une_next
;
588 kheap_free(KM_SYSVSEM
, sueptr
, sizeof(struct undo
));
593 /* Didn't find the right entry - create it */
595 /* no adjustment: no need for a new entry */
599 if (suptr
->un_cnt
== limitseminfo
.semume
) {
600 /* reached the limit number of semaphore undo entries */
604 /* allocate a new semaphore undo entry */
605 new_sueptr
= kheap_alloc(KM_SYSVSEM
, sizeof(struct undo
), Z_WAITOK
);
606 if (new_sueptr
== NULL
) {
610 /* fill in the new semaphore undo entry */
611 new_sueptr
->une_next
= suptr
->un_ent
;
612 suptr
->un_ent
= new_sueptr
;
614 new_sueptr
->une_adjval
= adjval
;
615 new_sueptr
->une_id
= semid
;
616 new_sueptr
->une_num
= semnum
;
621 /* Assumes we already hold the subsystem lock.
624 semundo_clear(int semid
, int semnum
)
626 struct sem_undo
*suptr
;
629 for (suidx
= semu_list_idx
; suidx
!= -1; suidx
= suptr
->un_next_idx
) {
631 struct undo
**suepptr
;
635 sueptr
= suptr
->un_ent
;
636 suepptr
= &suptr
->un_ent
;
637 while (i
< suptr
->un_cnt
) {
638 if (sueptr
->une_id
== semid
) {
639 if (semnum
== -1 || sueptr
->une_num
== semnum
) {
641 *suepptr
= sueptr
->une_next
;
642 kheap_free(KM_SYSVSEM
, sueptr
, sizeof(struct undo
));
651 suepptr
= &sueptr
->une_next
;
652 sueptr
= sueptr
->une_next
;
658 * Note that the user-mode half of this passes a union coerced to a
659 * user_addr_t. The union contains either an int or a pointer, and
660 * so we have to coerce it back, variant on whether the calling
661 * process is 64 bit or not. The coercion works for the 'val' element
662 * because the alignment is the same in user and kernel space.
665 semctl(struct proc
*p
, struct semctl_args
*uap
, int32_t *retval
)
667 int semid
= uap
->semid
;
668 int semnum
= uap
->semnum
;
670 user_semun_t user_arg
= (user_semun_t
)uap
->arg
;
671 kauth_cred_t cred
= kauth_cred_get();
673 struct user_semid_ds sbuf
;
674 struct semid_kernel
*semakptr
;
677 AUDIT_ARG(svipc_cmd
, cmd
);
678 AUDIT_ARG(svipc_id
, semid
);
680 SYSV_SEM_SUBSYS_LOCK();
683 printf("call to semctl(%d, %d, %d, 0x%qx)\n", semid
, semnum
, cmd
, user_arg
);
686 semid
= IPCID_TO_IX(semid
);
688 if (semid
< 0 || semid
>= seminfo
.semmni
) {
690 printf("Invalid semid\n");
696 semakptr
= &sema
[semid
];
697 if ((semakptr
->u
.sem_perm
.mode
& SEM_ALLOC
) == 0 ||
698 semakptr
->u
.sem_perm
._seq
!= IPCID_TO_SEQ(uap
->semid
)) {
703 eval
= mac_sysvsem_check_semctl(cred
, semakptr
, cmd
);
714 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_M
))) {
718 semakptr
->u
.sem_perm
.cuid
= kauth_cred_getuid(cred
);
719 semakptr
->u
.sem_perm
.uid
= kauth_cred_getuid(cred
);
720 semtot
-= semakptr
->u
.sem_nsems
;
721 for (i
= semakptr
->u
.sem_base
- sem_pool
; i
< semtot
; i
++) {
722 sem_pool
[i
] = sem_pool
[i
+ semakptr
->u
.sem_nsems
];
724 for (i
= 0; i
< seminfo
.semmni
; i
++) {
725 if ((sema
[i
].u
.sem_perm
.mode
& SEM_ALLOC
) &&
726 sema
[i
].u
.sem_base
> semakptr
->u
.sem_base
) {
727 sema
[i
].u
.sem_base
-= semakptr
->u
.sem_nsems
;
730 semakptr
->u
.sem_perm
.mode
= 0;
732 mac_sysvsem_label_recycle(semakptr
);
734 semundo_clear(semid
, -1);
735 wakeup((caddr_t
)semakptr
);
739 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_M
))) {
743 if (IS_64BIT_PROCESS(p
)) {
744 struct user64_semid_ds ds64
;
745 eval
= copyin(user_arg
.buf
, &ds64
, sizeof(ds64
));
746 semid_ds_64tokernel(&ds64
, &sbuf
);
748 struct user32_semid_ds ds32
;
749 eval
= copyin(user_arg
.buf
, &ds32
, sizeof(ds32
));
750 semid_ds_32tokernel(&ds32
, &sbuf
);
757 semakptr
->u
.sem_perm
.uid
= sbuf
.sem_perm
.uid
;
758 semakptr
->u
.sem_perm
.gid
= sbuf
.sem_perm
.gid
;
759 semakptr
->u
.sem_perm
.mode
= (semakptr
->u
.sem_perm
.mode
&
760 ~0777) | (sbuf
.sem_perm
.mode
& 0777);
761 semakptr
->u
.sem_ctime
= sysv_semtime();
765 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_R
))) {
769 if (IS_64BIT_PROCESS(p
)) {
770 struct user64_semid_ds semid_ds64
;
771 bzero(&semid_ds64
, sizeof(semid_ds64
));
772 semid_ds_kernelto64(&semakptr
->u
, &semid_ds64
);
773 eval
= copyout(&semid_ds64
, user_arg
.buf
, sizeof(semid_ds64
));
775 struct user32_semid_ds semid_ds32
;
776 bzero(&semid_ds32
, sizeof(semid_ds32
));
777 semid_ds_kernelto32(&semakptr
->u
, &semid_ds32
);
778 eval
= copyout(&semid_ds32
, user_arg
.buf
, sizeof(semid_ds32
));
783 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_R
))) {
786 if (semnum
< 0 || semnum
>= semakptr
->u
.sem_nsems
) {
790 rval
= semakptr
->u
.sem_base
[semnum
].semncnt
;
794 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_R
))) {
797 if (semnum
< 0 || semnum
>= semakptr
->u
.sem_nsems
) {
801 rval
= semakptr
->u
.sem_base
[semnum
].sempid
;
805 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_R
))) {
808 if (semnum
< 0 || semnum
>= semakptr
->u
.sem_nsems
) {
812 rval
= semakptr
->u
.sem_base
[semnum
].semval
;
816 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_R
))) {
819 /* XXXXXXXXXXXXXXXX TBD XXXXXXXXXXXXXXXX */
820 for (i
= 0; i
< semakptr
->u
.sem_nsems
; i
++) {
821 /* XXX could be done in one go... */
822 eval
= copyout((caddr_t
)&semakptr
->u
.sem_base
[i
].semval
,
823 user_arg
.array
+ (i
* sizeof(unsigned short)),
824 sizeof(unsigned short));
832 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_R
))) {
835 if (semnum
< 0 || semnum
>= semakptr
->u
.sem_nsems
) {
839 rval
= semakptr
->u
.sem_base
[semnum
].semzcnt
;
843 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_W
))) {
845 printf("Invalid credentials for write\n");
849 if (semnum
< 0 || semnum
>= semakptr
->u
.sem_nsems
) {
851 printf("Invalid number out of range for set\n");
858 * Cast down a pointer instead of using 'val' member directly
859 * to avoid introducing endieness and a pad field into the
860 * header file. Ugly, but it works.
862 u_int newsemval
= CAST_DOWN_EXPLICIT(u_int
, user_arg
.buf
);
865 * The check is being performed as unsigned values to match
866 * eventual destination
868 if (newsemval
> (u_int
)seminfo
.semvmx
) {
870 printf("Out of range sem value for set\n");
875 semakptr
->u
.sem_base
[semnum
].semval
= newsemval
;
876 semakptr
->u
.sem_base
[semnum
].sempid
= p
->p_pid
;
877 /* XXX scottl Should there be a MAC call here? */
878 semundo_clear(semid
, semnum
);
879 wakeup((caddr_t
)semakptr
);
883 if ((eval
= ipcperm(cred
, &semakptr
->u
.sem_perm
, IPC_W
))) {
886 /*** XXXXXXXXXXXX TBD ********/
887 for (i
= 0; i
< semakptr
->u
.sem_nsems
; i
++) {
888 /* XXX could be done in one go... */
889 eval
= copyin(user_arg
.array
+ (i
* sizeof(unsigned short)),
890 (caddr_t
)&semakptr
->u
.sem_base
[i
].semval
,
891 sizeof(unsigned short));
895 semakptr
->u
.sem_base
[i
].sempid
= p
->p_pid
;
897 /* XXX scottl Should there be a MAC call here? */
898 semundo_clear(semid
, -1);
899 wakeup((caddr_t
)semakptr
);
911 SYSV_SEM_SUBSYS_UNLOCK();
916 semget(__unused
struct proc
*p
, struct semget_args
*uap
, int32_t *retval
)
920 int nsems
= uap
->nsems
;
921 int semflg
= uap
->semflg
;
922 kauth_cred_t cred
= kauth_cred_get();
925 if (key
!= IPC_PRIVATE
) {
926 printf("semget(0x%x, %d, 0%o)\n", key
, nsems
, semflg
);
928 printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems
, semflg
);
933 SYSV_SEM_SUBSYS_LOCK();
936 if (key
!= IPC_PRIVATE
) {
937 for (semid
= 0; semid
< seminfo
.semmni
; semid
++) {
938 if ((sema
[semid
].u
.sem_perm
.mode
& SEM_ALLOC
) &&
939 sema
[semid
].u
.sem_perm
._key
== key
) {
943 if (semid
< seminfo
.semmni
) {
945 printf("found public key\n");
947 if ((eval
= ipcperm(cred
, &sema
[semid
].u
.sem_perm
,
951 if (nsems
< 0 || sema
[semid
].u
.sem_nsems
< nsems
) {
953 printf("too small\n");
958 if ((semflg
& IPC_CREAT
) && (semflg
& IPC_EXCL
)) {
960 printf("not exclusive\n");
966 eval
= mac_sysvsem_check_semget(cred
, &sema
[semid
]);
976 printf("need to allocate an id for the request\n");
978 if (key
== IPC_PRIVATE
|| (semflg
& IPC_CREAT
)) {
979 if (nsems
<= 0 || nsems
> limitseminfo
.semmsl
) {
981 printf("nsems out of range (0<%d<=%d)\n", nsems
,
987 if (nsems
> seminfo
.semmns
- semtot
) {
989 printf("not enough semaphores left (need %d, got %d)\n",
990 nsems
, seminfo
.semmns
- semtot
);
992 if (!grow_sem_pool(semtot
+ nsems
)) {
994 printf("failed to grow the sem array\n");
1000 for (semid
= 0; semid
< seminfo
.semmni
; semid
++) {
1001 if ((sema
[semid
].u
.sem_perm
.mode
& SEM_ALLOC
) == 0) {
1005 if (semid
== seminfo
.semmni
) {
1007 printf("no more id's available\n");
1009 if (!grow_sema_array(seminfo
.semmni
+ 1)) {
1011 printf("failed to grow sema array\n");
1018 printf("semid %d is available\n", semid
);
1020 sema
[semid
].u
.sem_perm
._key
= key
;
1021 sema
[semid
].u
.sem_perm
.cuid
= kauth_cred_getuid(cred
);
1022 sema
[semid
].u
.sem_perm
.uid
= kauth_cred_getuid(cred
);
1023 sema
[semid
].u
.sem_perm
.cgid
= kauth_cred_getgid(cred
);
1024 sema
[semid
].u
.sem_perm
.gid
= kauth_cred_getgid(cred
);
1025 sema
[semid
].u
.sem_perm
.mode
= (semflg
& 0777) | SEM_ALLOC
;
1026 sema
[semid
].u
.sem_perm
._seq
=
1027 (sema
[semid
].u
.sem_perm
._seq
+ 1) & 0x7fff;
1028 sema
[semid
].u
.sem_nsems
= nsems
;
1029 sema
[semid
].u
.sem_otime
= 0;
1030 sema
[semid
].u
.sem_ctime
= sysv_semtime();
1031 sema
[semid
].u
.sem_base
= &sem_pool
[semtot
];
1033 bzero(sema
[semid
].u
.sem_base
,
1034 sizeof(sema
[semid
].u
.sem_base
[0]) * nsems
);
1036 mac_sysvsem_label_associate(cred
, &sema
[semid
]);
1039 printf("sembase = 0x%x, next = 0x%x\n", sema
[semid
].u
.sem_base
,
1044 printf("didn't find it and wasn't asked to create it\n");
1051 *retval
= IXSEQ_TO_IPCID(semid
, sema
[semid
].u
.sem_perm
);
1052 AUDIT_ARG(svipc_id
, *retval
);
1054 printf("semget is done, returning %d\n", *retval
);
1059 SYSV_SEM_SUBSYS_UNLOCK();
1064 semop(struct proc
*p
, struct semop_args
*uap
, int32_t *retval
)
1066 int semid
= uap
->semid
;
1067 int nsops
= uap
->nsops
;
1068 struct sembuf sops
[seminfo
.semopm
];
1069 struct semid_kernel
*semakptr
;
1070 struct sembuf
*sopptr
= NULL
; /* protected by 'semptr' */
1071 struct sem
*semptr
= NULL
; /* protected by 'if' */
1074 int do_wakeup
, do_undos
;
1076 AUDIT_ARG(svipc_id
, uap
->semid
);
1078 SYSV_SEM_SUBSYS_LOCK();
1081 printf("call to semop(%d, 0x%x, %d)\n", semid
, sops
, nsops
);
1084 semid
= IPCID_TO_IX(semid
); /* Convert back to zero origin */
1086 if (semid
< 0 || semid
>= seminfo
.semmni
) {
1091 semakptr
= &sema
[semid
];
1092 if ((semakptr
->u
.sem_perm
.mode
& SEM_ALLOC
) == 0) {
1096 if (semakptr
->u
.sem_perm
._seq
!= IPCID_TO_SEQ(uap
->semid
)) {
1101 if ((eval
= ipcperm(kauth_cred_get(), &semakptr
->u
.sem_perm
, IPC_W
))) {
1103 printf("eval = %d from ipaccess\n", eval
);
1108 if (nsops
< 0 || nsops
> seminfo
.semopm
) {
1110 printf("too many sops (max=%d, nsops=%d)\n",
1111 seminfo
.semopm
, nsops
);
1117 /* OK for LP64, since sizeof(struct sembuf) is currently invariant */
1118 if ((eval
= copyin(uap
->sops
, &sops
, nsops
* sizeof(struct sembuf
))) != 0) {
1120 printf("eval = %d from copyin(%08x, %08x, %ld)\n", eval
,
1121 uap
->sops
, &sops
, nsops
* sizeof(struct sembuf
));
1128 * Initial pass thru sops to see what permissions are needed.
1130 j
= 0; /* permission needed */
1131 for (i
= 0; i
< nsops
; i
++) {
1132 j
|= (sops
[i
].sem_op
== 0) ? SEM_R
: SEM_A
;
1136 * The MAC hook checks whether the thread has read (and possibly
1137 * write) permissions to the semaphore array based on the
1138 * sopptr->sem_op value.
1140 eval
= mac_sysvsem_check_semop(kauth_cred_get(), semakptr
, j
);
1147 * Loop trying to satisfy the vector of requests.
1148 * If we reach a point where we must wait, any requests already
1149 * performed are rolled back and we go to sleep until some other
1150 * process wakes us up. At this point, we start all over again.
1152 * This ensures that from the perspective of other tasks, a set
1153 * of requests is atomic (never partially satisfied).
1160 for (i
= 0; i
< nsops
; i
++) {
1163 if (sopptr
->sem_num
>= semakptr
->u
.sem_nsems
) {
1168 semptr
= &semakptr
->u
.sem_base
[sopptr
->sem_num
];
1171 printf("semop: semakptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
1172 semakptr
, semakptr
->u
.sem_base
, semptr
,
1173 sopptr
->sem_num
, semptr
->semval
, sopptr
->sem_op
,
1174 (sopptr
->sem_flg
& IPC_NOWAIT
) ? "nowait" : "wait");
1177 if (sopptr
->sem_op
< 0) {
1178 if (semptr
->semval
+ sopptr
->sem_op
< 0) {
1180 printf("semop: can't do it now\n");
1184 semptr
->semval
+= sopptr
->sem_op
;
1185 if (semptr
->semval
== 0 &&
1186 semptr
->semzcnt
> 0) {
1190 if (sopptr
->sem_flg
& SEM_UNDO
) {
1193 } else if (sopptr
->sem_op
== 0) {
1194 if (semptr
->semval
> 0) {
1196 printf("semop: not zero now\n");
1201 if (semptr
->semncnt
> 0) {
1204 semptr
->semval
+= sopptr
->sem_op
;
1205 if (sopptr
->sem_flg
& SEM_UNDO
) {
1212 * Did we get through the entire vector?
1219 * No ... rollback anything that we've already done
1222 printf("semop: rollback 0 through %d\n", i
- 1);
1224 for (j
= 0; j
< i
; j
++) {
1225 semakptr
->u
.sem_base
[sops
[j
].sem_num
].semval
-=
1230 * If the request that we couldn't satisfy has the
1231 * NOWAIT flag set then return with EAGAIN.
1233 if (sopptr
->sem_flg
& IPC_NOWAIT
) {
1238 if (sopptr
->sem_op
== 0) {
1245 printf("semop: good night!\n");
1247 /* Release our lock on the semaphore subsystem so
1248 * another thread can get at the semaphore we are
1249 * waiting for. We will get the lock back after we
1252 eval
= msleep((caddr_t
)semakptr
, &sysv_sem_subsys_mutex
, (PZERO
- 4) | PCATCH
,
1256 printf("semop: good morning (eval=%d)!\n", eval
);
1263 * IMPORTANT: while we were asleep, the semaphore array might
1264 * have been reallocated somewhere else (see grow_sema_array()).
1265 * When we wake up, we have to re-lookup the semaphore
1266 * structures and re-validate them.
1272 * Make sure that the semaphore still exists
1274 * XXX POSIX: Third test this 'if' and 'EINTR' precedence may
1275 * fail testing; if so, we will need to revert this code.
1277 semakptr
= &sema
[semid
]; /* sema may have been reallocated */
1278 if ((semakptr
->u
.sem_perm
.mode
& SEM_ALLOC
) == 0 ||
1279 semakptr
->u
.sem_perm
._seq
!= IPCID_TO_SEQ(uap
->semid
) ||
1280 sopptr
->sem_num
>= semakptr
->u
.sem_nsems
) {
1281 /* The man page says to return EIDRM. */
1282 /* Unfortunately, BSD doesn't define that code! */
1283 if (eval
== EINTR
) {
1285 * EINTR takes precedence over the fact that
1286 * the semaphore disappeared while we were
1293 eval
= EINVAL
; /* Ancient past */
1300 * The semaphore is still alive. Readjust the count of
1301 * waiting processes. semptr needs to be recomputed
1302 * because the sem[] may have been reallocated while
1303 * we were sleeping, updating our sem_base pointer.
1305 semptr
= &semakptr
->u
.sem_base
[sopptr
->sem_num
];
1306 if (sopptr
->sem_op
== 0) {
1312 if (eval
!= 0) { /* EINTR */
1319 * Process any SEM_UNDO requests.
1322 for (i
= 0; i
< nsops
; i
++) {
1324 * We only need to deal with SEM_UNDO's for non-zero
1329 if ((sops
[i
].sem_flg
& SEM_UNDO
) == 0) {
1332 adjval
= sops
[i
].sem_op
;
1336 eval
= semundo_adjust(p
, &supidx
, semid
,
1337 sops
[i
].sem_num
, -adjval
);
1343 * Oh-Oh! We ran out of either sem_undo's or undo's.
1344 * Rollback the adjustments to this point and then
1345 * rollback the semaphore ups and down so we can return
1346 * with an error with all structures restored. We
1347 * rollback the undo's in the exact reverse order that
1348 * we applied them. This guarantees that we won't run
1349 * out of space as we roll things back out.
1351 for (j
= i
- 1; j
>= 0; j
--) {
1352 if ((sops
[j
].sem_flg
& SEM_UNDO
) == 0) {
1355 adjval
= sops
[j
].sem_op
;
1359 if (semundo_adjust(p
, &supidx
, semid
,
1360 sops
[j
].sem_num
, adjval
) != 0) {
1361 panic("semop - can't undo undos");
1365 for (j
= 0; j
< nsops
; j
++) {
1366 semakptr
->u
.sem_base
[sops
[j
].sem_num
].semval
-=
1371 printf("eval = %d from semundo_adjust\n", eval
);
1374 } /* loop through the sops */
1375 } /* if (do_undos) */
1377 /* We're definitely done - set the sempid's */
1378 for (i
= 0; i
< nsops
; i
++) {
1380 semptr
= &semakptr
->u
.sem_base
[sopptr
->sem_num
];
1381 semptr
->sempid
= p
->p_pid
;
1383 semakptr
->u
.sem_otime
= sysv_semtime();
1387 printf("semop: doing wakeup\n");
1389 sem_wakeup((caddr_t
)semakptr
);
1391 wakeup((caddr_t
)semakptr
);
1393 printf("semop: back from wakeup\n");
1395 wakeup((caddr_t
)semakptr
);
1399 printf("semop: done\n");
1404 SYSV_SEM_SUBSYS_UNLOCK();
1409 * Go through the undo structures for this process and apply the adjustments to
1413 semexit(struct proc
*p
)
1415 struct sem_undo
*suptr
= NULL
;
1420 /* If we have not allocated our semaphores yet there can't be
1421 * anything to undo, but we need the lock to prevent
1422 * dynamic memory race conditions.
1424 SYSV_SEM_SUBSYS_LOCK();
1427 SYSV_SEM_SUBSYS_UNLOCK();
1433 * Go through the chain of undo vectors looking for one
1434 * associated with this process.
1437 for (supidx
= &semu_list_idx
; (suidx
= *supidx
) != -1;
1438 supidx
= &suptr
->un_next_idx
) {
1439 suptr
= SEMU(suidx
);
1440 if (suptr
->un_proc
== p
) {
1450 printf("proc @%08x has undo structure with %d entries\n", p
,
1455 * If there are any active undo elements then process them.
1457 if (suptr
->un_cnt
> 0) {
1458 while (suptr
->un_ent
!= NULL
) {
1459 struct undo
*sueptr
;
1463 struct semid_kernel
*semakptr
;
1465 sueptr
= suptr
->un_ent
;
1466 semid
= sueptr
->une_id
;
1467 semnum
= sueptr
->une_num
;
1468 adjval
= sueptr
->une_adjval
;
1470 semakptr
= &sema
[semid
];
1471 if ((semakptr
->u
.sem_perm
.mode
& SEM_ALLOC
) == 0) {
1472 panic("semexit - semid not allocated");
1474 if (semnum
>= semakptr
->u
.sem_nsems
) {
1475 panic("semexit - semnum out of range");
1479 printf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n",
1484 semakptr
->u
.sem_base
[semnum
].semval
);
1488 if (semakptr
->u
.sem_base
[semnum
].semval
< -adjval
) {
1489 semakptr
->u
.sem_base
[semnum
].semval
= 0;
1491 semakptr
->u
.sem_base
[semnum
].semval
+=
1495 semakptr
->u
.sem_base
[semnum
].semval
+= adjval
;
1498 /* Maybe we should build a list of semakptr's to wake
1499 * up, finish all access to data structures, release the
1500 * subsystem lock, and wake all the processes. Something
1504 sem_wakeup((caddr_t
)semakptr
);
1506 wakeup((caddr_t
)semakptr
);
1509 printf("semexit: back from wakeup\n");
1512 suptr
->un_ent
= sueptr
->une_next
;
1513 kheap_free(KM_SYSVSEM
, sueptr
, sizeof(struct undo
));
1518 * Deallocate the undo vector.
1521 printf("removing vector\n");
1523 suptr
->un_proc
= NULL
;
1524 *supidx
= suptr
->un_next_idx
;
1528 * There is a semaphore leak (i.e. memory leak) in this code.
1529 * We should be deleting the IPC_PRIVATE semaphores when they are
1530 * no longer needed, and we dont. We would have to track which processes
1531 * know about which IPC_PRIVATE semaphores, updating the list after
1532 * every fork. We can't just delete them semaphore when the process
1533 * that created it dies, because that process may well have forked
1534 * some children. So we need to wait until all of it's children have
1535 * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
1536 * with the creating group ID, count the number of processes left in
1537 * that group, and delete the semaphore when the group is gone.
1538 * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
1539 * There is an upper bound on the size of our semaphore array, so
1540 * leaking the semaphores should not work as a DOS attack.
1542 * Please note that the original BSD code this file is based on had the
1543 * same leaky semaphore problem.
1546 SYSV_SEM_SUBSYS_UNLOCK();
1550 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1551 * struct sysctl_req *req) */
1553 sysctl_seminfo(__unused
struct sysctl_oid
*oidp
, void *arg1
,
1554 __unused
int arg2
, struct sysctl_req
*req
)
1558 error
= SYSCTL_OUT(req
, arg1
, sizeof(int));
1559 if (error
|| req
->newptr
== USER_ADDR_NULL
) {
1563 SYSV_SEM_SUBSYS_LOCK();
1565 /* Set the values only if shared memory is not initialised */
1566 if ((sem_pool
== NULL
) &&
1569 (semu_list_idx
== -1)) {
1570 if ((error
= SYSCTL_IN(req
, arg1
, sizeof(int)))) {
1577 SYSV_SEM_SUBSYS_UNLOCK();
1581 /* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
1582 extern struct sysctl_oid_list sysctl__kern_sysv_children
;
1583 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, semmni
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1584 &limitseminfo
.semmni
, 0, &sysctl_seminfo
, "I", "semmni");
1586 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, semmns
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1587 &limitseminfo
.semmns
, 0, &sysctl_seminfo
, "I", "semmns");
1589 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, semmnu
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1590 &limitseminfo
.semmnu
, 0, &sysctl_seminfo
, "I", "semmnu");
1592 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, semmsl
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1593 &limitseminfo
.semmsl
, 0, &sysctl_seminfo
, "I", "semmsl");
1595 SYSCTL_PROC(_kern_sysv
, OID_AUTO
, semume
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1596 &limitseminfo
.semume
, 0, &sysctl_seminfo
, "I", "semume");
1600 IPCS_sem_sysctl(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
1601 __unused
int arg2
, struct sysctl_req
*req
)
1606 struct user32_IPCS_command u32
;
1607 struct user_IPCS_command u64
;
1609 struct user32_semid_ds semid_ds32
= { }; /* post conversion, 32 bit version */
1610 struct user64_semid_ds semid_ds64
= { }; /* post conversion, 64 bit version */
1614 struct proc
*p
= current_proc();
1616 if (IS_64BIT_PROCESS(p
)) {
1617 ipcs_sz
= sizeof(struct user_IPCS_command
);
1618 semid_ds_sz
= sizeof(struct user64_semid_ds
);
1620 ipcs_sz
= sizeof(struct user32_IPCS_command
);
1621 semid_ds_sz
= sizeof(struct user32_semid_ds
);
1624 /* Copy in the command structure */
1625 if ((error
= SYSCTL_IN(req
, &ipcs
, ipcs_sz
)) != 0) {
1629 if (!IS_64BIT_PROCESS(p
)) { /* convert in place */
1630 ipcs
.u64
.ipcs_data
= CAST_USER_ADDR_T(ipcs
.u32
.ipcs_data
);
1633 /* Let us version this interface... */
1634 if (ipcs
.u64
.ipcs_magic
!= IPCS_MAGIC
) {
1638 SYSV_SEM_SUBSYS_LOCK();
1639 switch (ipcs
.u64
.ipcs_op
) {
1640 case IPCS_SEM_CONF
: /* Obtain global configuration data */
1641 if (ipcs
.u64
.ipcs_datalen
!= sizeof(struct seminfo
)) {
1645 if (ipcs
.u64
.ipcs_cursor
!= 0) { /* fwd. compat. */
1649 error
= copyout(&seminfo
, ipcs
.u64
.ipcs_data
, ipcs
.u64
.ipcs_datalen
);
1652 case IPCS_SEM_ITER
: /* Iterate over existing segments */
1653 cursor
= ipcs
.u64
.ipcs_cursor
;
1654 if (cursor
< 0 || cursor
>= seminfo
.semmni
) {
1658 if (ipcs
.u64
.ipcs_datalen
!= (int)semid_ds_sz
) {
1662 for (; cursor
< seminfo
.semmni
; cursor
++) {
1663 if (sema
[cursor
].u
.sem_perm
.mode
& SEM_ALLOC
) {
1668 if (cursor
== seminfo
.semmni
) {
1673 semid_dsp
= &sema
[cursor
].u
; /* default: 64 bit */
1676 * If necessary, convert the 64 bit kernel segment
1677 * descriptor to a 32 bit user one.
1679 if (!IS_64BIT_PROCESS(p
)) {
1680 bzero(&semid_ds32
, sizeof(semid_ds32
));
1681 semid_ds_kernelto32(semid_dsp
, &semid_ds32
);
1682 semid_dsp
= &semid_ds32
;
1684 bzero(&semid_ds64
, sizeof(semid_ds64
));
1685 semid_ds_kernelto64(semid_dsp
, &semid_ds64
);
1686 semid_dsp
= &semid_ds64
;
1689 error
= copyout(semid_dsp
, ipcs
.u64
.ipcs_data
, ipcs
.u64
.ipcs_datalen
);
1692 ipcs
.u64
.ipcs_cursor
= cursor
+ 1;
1694 if (!IS_64BIT_PROCESS(p
)) { /* convert in place */
1695 ipcs
.u32
.ipcs_data
= CAST_DOWN_EXPLICIT(user32_addr_t
, ipcs
.u64
.ipcs_data
);
1698 error
= SYSCTL_OUT(req
, &ipcs
, ipcs_sz
);
1706 SYSV_SEM_SUBSYS_UNLOCK();
1710 SYSCTL_DECL(_kern_sysv_ipcs
);
1711 SYSCTL_PROC(_kern_sysv_ipcs
, OID_AUTO
, sem
, CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
1712 0, 0, IPCS_sem_sysctl
,
1713 "S,IPCS_sem_command",
1714 "ipcs sem command interface");
1716 #endif /* SYSV_SEM */