* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
/* Hard system limits to avoid resource starvation / DOS attacks.
* These are not needed if we can make the semaphore pages swappable.
*/
static struct seminfo limitseminfo = {
/* Hard system limits to avoid resource starvation / DOS attacks.
* These are not needed if we can make the semaphore pages swappable.
*/
static struct seminfo limitseminfo = {
- SEMMAP, /* # of entries in semaphore map */
- SEMMNI, /* # of semaphore identifiers */
- SEMMNS, /* # of semaphores in system */
- SEMMNU, /* # of undo structures in system */
- SEMMSL, /* max # of semaphores per id */
- SEMOPM, /* max # of operations per semop call */
- SEMUME, /* max # of undo entries per process */
- SEMUSZ, /* size in bytes of undo structure */
- SEMVMX, /* semaphore maximum value */
- SEMAEM /* adjust on exit max value */
+ .semmap = SEMMAP, /* # of entries in semaphore map */
+ .semmni = SEMMNI, /* # of semaphore identifiers */
+ .semmns = SEMMNS, /* # of semaphores in system */
+ .semmnu = SEMMNU, /* # of undo structures in system */
+ .semmsl = SEMMSL, /* max # of semaphores per id */
+ .semopm = SEMOPM, /* max # of operations per semop call */
+ .semume = SEMUME, /* max # of undo entries per process */
+ .semusz = SEMUSZ, /* size in bytes of undo structure */
+ .semvmx = SEMVMX, /* semaphore maximum value */
+ .semaem = SEMAEM /* adjust on exit max value */
- SEMMAP, /* Unused, # of entries in semaphore map */
- 0, /* # of semaphore identifiers */
- 0, /* # of semaphores in system */
- 0, /* # of undo entries in system */
- SEMMSL, /* max # of semaphores per id */
- SEMOPM, /* max # of operations per semop call */
- SEMUME, /* max # of undo entries per process */
- SEMUSZ, /* size in bytes of undo structure */
- SEMVMX, /* semaphore maximum value */
- SEMAEM /* adjust on exit max value */
+ .semmap = SEMMAP, /* Unused, # of entries in semaphore map */
+ .semmni = 0, /* # of semaphore identifiers */
+ .semmns = 0, /* # of semaphores in system */
+ .semmnu = 0, /* # of undo entries in system */
+ .semmsl = SEMMSL, /* max # of semaphores per id */
+ .semopm = SEMOPM, /* max # of operations per semop call */
+ .semume = SEMUME, /* max # of undo entries per process */
+ .semusz = SEMUSZ, /* size in bytes of undo structure */
+ .semvmx = SEMVMX, /* semaphore maximum value */
+ .semaem = SEMAEM /* adjust on exit max value */
-static int semundo_adjust(struct proc *p, int *supidx,
- int semid, int semnum, int adjval);
+static int semundo_adjust(struct proc *p, int *supidx,
+ int semid, int semnum, int adjval);
static void semundo_clear(int semid, int semnum);
/* XXX casting to (sy_call_t *) is bogus, as usual. */
static void semundo_clear(int semid, int semnum);
/* XXX casting to (sy_call_t *) is bogus, as usual. */
-static int semtot = 0; /* # of used semaphores */
-struct semid_kernel *sema = NULL; /* semaphore id pool */
-struct sem *sem_pool = NULL; /* semaphore pool */
-static int semu_list_idx = -1; /* active undo structures */
-struct sem_undo *semu = NULL; /* semaphore undo pool */
+static int semtot = 0; /* # of used semaphores */
+struct semid_kernel *sema = NULL; /* semaphore id pool */
+struct sem *sem_pool = NULL; /* semaphore pool */
+static int semu_list_idx = -1; /* active undo structures */
+struct sem_undo *semu = NULL; /* semaphore undo pool */
-void sysv_sem_lock_init(void);
-static lck_grp_t *sysv_sem_subsys_lck_grp;
-static lck_grp_attr_t *sysv_sem_subsys_lck_grp_attr;
-static lck_attr_t *sysv_sem_subsys_lck_attr;
-static lck_mtx_t sysv_sem_subsys_mutex;
+static LCK_GRP_DECLARE(sysv_sem_subsys_lck_grp, "sysv_sem_subsys_lock");
+static LCK_MTX_DECLARE(sysv_sem_subsys_mutex, &sysv_sem_subsys_lck_grp);
#define SYSV_SEM_SUBSYS_LOCK() lck_mtx_lock(&sysv_sem_subsys_mutex)
#define SYSV_SEM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_sem_subsys_mutex)
#define SYSV_SEM_SUBSYS_LOCK() lck_mtx_lock(&sysv_sem_subsys_mutex)
#define SYSV_SEM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_sem_subsys_mutex)
-
-__private_extern__ void
-sysv_sem_lock_init( void )
-{
-
- sysv_sem_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
-
- sysv_sem_subsys_lck_grp = lck_grp_alloc_init("sysv_sem_subsys_lock", sysv_sem_subsys_lck_grp_attr);
-
- sysv_sem_subsys_lck_attr = lck_attr_alloc_init();
- lck_mtx_init(&sysv_sem_subsys_mutex, sysv_sem_subsys_lck_grp, sysv_sem_subsys_lck_attr);
-}
-
- out->sem_otime = in->sem_otime; /* XXX loses precision */
- out->sem_ctime = in->sem_ctime; /* XXX loses precision */
+ out->sem_otime = in->sem_otime; /* XXX loses precision */
+ out->sem_ctime = in->sem_ctime; /* XXX loses precision */
+}
+
+static void
+semid_ds_kernelto64(struct user_semid_ds *in, struct user64_semid_ds *out)
+{
+ out->sem_perm = in->sem_perm;
+ out->sem_base = CAST_DOWN_EXPLICIT(__int32_t, in->sem_base);
+ out->sem_nsems = in->sem_nsems;
+ out->sem_otime = in->sem_otime; /* XXX loses precision */
+ out->sem_ctime = in->sem_ctime; /* XXX loses precision */
{
out->sem_ctime = in->sem_ctime;
out->sem_otime = in->sem_otime;
out->sem_nsems = in->sem_nsems;
{
out->sem_ctime = in->sem_ctime;
out->sem_otime = in->sem_otime;
out->sem_nsems = in->sem_nsems;
- out->sem_base = (void *)in->sem_base;
+ out->sem_base = (void *)(uintptr_t)in->sem_base;
+ out->sem_perm = in->sem_perm;
+}
+
+static void
+semid_ds_64tokernel(struct user64_semid_ds *in, struct user_semid_ds *out)
+{
+ out->sem_ctime = in->sem_ctime;
+ out->sem_otime = in->sem_otime;
+ out->sem_nsems = in->sem_nsems;
+ out->sem_base = (void *)(uintptr_t)in->sem_base;
- * Entry point for all SEM calls
+ * semsys
+ *
+ * Entry point for all SEM calls: semctl, semget, semop
+ *
+ * Parameters: p Process requesting the call
+ * uap User argument descriptor (see below)
+ * retval Return value of the selected sem call
+ *
+ * Indirect parameters: uap->which sem call to invoke (index in array of sem calls)
+ * uap->a2 User argument descriptor
+ *
+ * Returns: 0 Success
+ * !0 Not success
+ *
+ * Implicit returns: retval Return value of the selected sem call
+ *
+ * DEPRECATED: This interface should not be used to call the other SEM
+ * functions (semctl, semget, semop). The correct usage is
+ * to call the other SEM functions directly.
- if (uap->which >= sizeof(semcalls)/sizeof(semcalls[0]))
- return (EINVAL);
- return ((*semcalls[uap->which])(p, &uap->a2, retval));
+ if (uap->which >= sizeof(semcalls) / sizeof(semcalls[0])) {
+ return EINVAL;
+ }
+ return (*semcalls[uap->which])(p, &uap->a2, retval);
newSize = newSize > limitseminfo.semmnu ? limitseminfo.semmnu : newSize;
#ifdef SEM_DEBUG
printf("growing semu[] from %d to %d\n", seminfo.semmnu, newSize);
#endif
newSize = newSize > limitseminfo.semmnu ? limitseminfo.semmnu : newSize;
#ifdef SEM_DEBUG
printf("growing semu[] from %d to %d\n", seminfo.semmnu, newSize);
#endif
- MALLOC(newSemu, struct sem_undo *, sizeof (struct sem_undo) * newSize,
- M_SYSVSEM, M_WAITOK | M_ZERO);
- if (NULL == newSemu)
- {
+ newSemu = kheap_alloc(KM_SYSVSEM, sizeof(struct sem_undo) * newSize,
+ Z_WAITOK | Z_ZERO);
+ if (NULL == newSemu) {
- * "un_proc" set to 0 (i.e. NULL) by the M_ZERO flag to MALLOC() above,
- * so they're already marked as "not in use".
+ * "un_proc" set to 0 (i.e. NULL) by the Z_ZERO flag to kheap_alloc
+ * above, so they're already marked as "not in use".
newSize = newSize > limitseminfo.semmni ? limitseminfo.semmni : newSize;
#ifdef SEM_DEBUG
printf("growing sema[] from %d to %d\n", seminfo.semmni, newSize);
#endif
newSize = newSize > limitseminfo.semmni ? limitseminfo.semmni : newSize;
#ifdef SEM_DEBUG
printf("growing sema[] from %d to %d\n", seminfo.semmni, newSize);
#endif
- MALLOC(newSema, struct semid_kernel *,
- sizeof (struct semid_kernel) * newSize,
- M_SYSVSEM, M_WAITOK | M_ZERO);
- if (NULL == newSema)
- {
+ newSema = kheap_alloc(KM_SYSVSEM, sizeof(struct semid_kernel) * newSize,
+ Z_WAITOK | Z_ZERO);
+ if (NULL == newSema) {
* semaphore set is really not available yet, and then
* sleep on the correct, reallocated semid_kernel pointer.
*/
* semaphore set is really not available yet, and then
* sleep on the correct, reallocated semid_kernel pointer.
*/
- * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the M_ZERO
- * flag to MALLOC() above, so they're already marked as "not in use".
+ * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the Z_ZERO
+ * flag to kheap_alloc above, so they're already marked as "not in use".
/* enforce hard limit */
if (new_pool_size > limitseminfo.semmns) {
#ifdef SEM_DEBUG
printf("semaphore hard limit of %d reached, requested %d\n",
/* enforce hard limit */
if (new_pool_size > limitseminfo.semmns) {
#ifdef SEM_DEBUG
printf("semaphore hard limit of %d reached, requested %d\n",
new_pool_size = new_pool_size > limitseminfo.semmns ? limitseminfo.semmns : new_pool_size;
#ifdef SEM_DEBUG
printf("growing sem_pool array from %d to %d\n", seminfo.semmns, new_pool_size);
#endif
new_pool_size = new_pool_size > limitseminfo.semmns ? limitseminfo.semmns : new_pool_size;
#ifdef SEM_DEBUG
printf("growing sem_pool array from %d to %d\n", seminfo.semmns, new_pool_size);
#endif
- MALLOC(new_sem_pool, struct sem *, sizeof (struct sem) * new_pool_size,
- M_SYSVSEM, M_WAITOK | M_ZERO);
+ new_sem_pool = kheap_alloc(KM_SYSVSEM, sizeof(struct sem) * new_pool_size,
+ Z_WAITOK | Z_ZERO);
- for(i = 0; i < seminfo.semmni; i++) {
- if (sema[i].u.sem_perm.mode & SEM_ALLOC) /* ID in use */
- sema[i].u.sem_base += (new_sem_pool - sem_pool);
+ for (i = 0; i < seminfo.semmni; i++) {
+ if (sema[i].u.sem_perm.mode & SEM_ALLOC) { /* ID in use */
+ sema[i].u.sem_base = new_sem_pool +
+ (sema[i].u.sem_base - sem_pool);
+ }
*/
static int
semundo_adjust(struct proc *p, int *supidx, int semid,
*/
static int
semundo_adjust(struct proc *p, int *supidx, int semid,
- i < suptr->un_cnt;
- i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) {
- if (sueptr->une_id != semid || sueptr->une_num != semnum)
+ i < suptr->un_cnt;
+ i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) {
+ if (sueptr->une_id != semid || sueptr->une_num != semnum) {
AUDIT_ARG(svipc_cmd, cmd);
AUDIT_ARG(svipc_id, semid);
AUDIT_ARG(svipc_cmd, cmd);
AUDIT_ARG(svipc_id, semid);
semakptr->u.sem_perm.cuid = kauth_cred_getuid(cred);
semakptr->u.sem_perm.uid = kauth_cred_getuid(cred);
semtot -= semakptr->u.sem_nsems;
semakptr->u.sem_perm.cuid = kauth_cred_getuid(cred);
semakptr->u.sem_perm.uid = kauth_cred_getuid(cred);
semtot -= semakptr->u.sem_nsems;
- eval = copyin(user_arg.buf, &sbuf, sizeof(struct user_semid_ds));
+ struct user64_semid_ds ds64;
+ eval = copyin(user_arg.buf, &ds64, sizeof(ds64));
+ semid_ds_64tokernel(&ds64, &sbuf);
- eval = copyin(user_arg.buf, &sbuf, sizeof(struct semid_ds));
- /* convert in place; ugly, but safe */
- semid_ds_32to64((struct semid_ds *)&sbuf, &sbuf);
+ struct user32_semid_ds ds32;
+ eval = copyin(user_arg.buf, &ds32, sizeof(ds32));
+ semid_ds_32tokernel(&ds32, &sbuf);
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R)))
- goto semctlout;
- bcopy((caddr_t)&semakptr->u, &uds, sizeof(struct user_semid_ds));
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
+
- eval = copyout(&uds, user_arg.buf, sizeof(struct user_semid_ds));
+ struct user64_semid_ds semid_ds64;
+ bzero(&semid_ds64, sizeof(semid_ds64));
+ semid_ds_kernelto64(&semakptr->u, &semid_ds64);
+ eval = copyout(&semid_ds64, user_arg.buf, sizeof(semid_ds64));
- struct semid_ds semid_ds32;
- semid_ds_64to32(&uds, &semid_ds32);
- eval = copyout(&semid_ds32, user_arg.buf, sizeof(struct semid_ds));
+ struct user32_semid_ds semid_ds32;
+ bzero(&semid_ds32, sizeof(semid_ds32));
+ semid_ds_kernelto32(&semakptr->u, &semid_ds32);
+ eval = copyout(&semid_ds32, user_arg.buf, sizeof(semid_ds32));
/* XXXXXXXXXXXXXXXX TBD XXXXXXXXXXXXXXXX */
for (i = 0; i < semakptr->u.sem_nsems; i++) {
/* XXX could be done in one go... */
eval = copyout((caddr_t)&semakptr->u.sem_base[i].semval,
user_arg.array + (i * sizeof(unsigned short)),
sizeof(unsigned short));
/* XXXXXXXXXXXXXXXX TBD XXXXXXXXXXXXXXXX */
for (i = 0; i < semakptr->u.sem_nsems; i++) {
/* XXX could be done in one go... */
eval = copyout((caddr_t)&semakptr->u.sem_base[i].semval,
user_arg.array + (i * sizeof(unsigned short)),
sizeof(unsigned short));
/*
* Cast down a pointer instead of using 'val' member directly
* to avoid introducing endieness and a pad field into the
* header file. Ugly, but it works.
*/
/*
* Cast down a pointer instead of using 'val' member directly
* to avoid introducing endieness and a pad field into the
* header file. Ugly, but it works.
*/
- semakptr->u.sem_base[semnum].semval = CAST_DOWN(int,user_arg.buf);
+ u_int newsemval = CAST_DOWN_EXPLICIT(u_int, user_arg.buf);
+
+ /*
+ * The check is being performed as unsigned values to match
+ * eventual destination
+ */
+ if (newsemval > (u_int)seminfo.semvmx) {
+#ifdef SEM_DEBUG
+ printf("Out of range sem value for set\n");
+#endif
+ eval = ERANGE;
+ goto semctlout;
+ }
+ semakptr->u.sem_base[semnum].semval = newsemval;
semakptr->u.sem_base[semnum].sempid = p->p_pid;
/* XXX scottl Should there be a MAC call here? */
semundo_clear(semid, semnum);
semakptr->u.sem_base[semnum].sempid = p->p_pid;
/* XXX scottl Should there be a MAC call here? */
semundo_clear(semid, semnum);
/*** XXXXXXXXXXXX TBD ********/
for (i = 0; i < semakptr->u.sem_nsems; i++) {
/* XXX could be done in one go... */
eval = copyin(user_arg.array + (i * sizeof(unsigned short)),
(caddr_t)&semakptr->u.sem_base[i].semval,
sizeof(unsigned short));
/*** XXXXXXXXXXXX TBD ********/
for (i = 0; i < semakptr->u.sem_nsems; i++) {
/* XXX could be done in one go... */
eval = copyin(user_arg.array + (i * sizeof(unsigned short)),
(caddr_t)&semakptr->u.sem_base[i].semval,
sizeof(unsigned short));
printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems, semflg);
printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems, semflg);
if (key != IPC_PRIVATE) {
for (semid = 0; semid < seminfo.semmni; semid++) {
if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) &&
if (key != IPC_PRIVATE) {
for (semid = 0; semid < seminfo.semmni; semid++) {
if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) &&
}
if (semid < seminfo.semmni) {
#ifdef SEM_DEBUG
printf("found public key\n");
#endif
if ((eval = ipcperm(cred, &sema[semid].u.sem_perm,
}
if (semid < seminfo.semmni) {
#ifdef SEM_DEBUG
printf("found public key\n");
#endif
if ((eval = ipcperm(cred, &sema[semid].u.sem_perm,
sema[semid].u.sem_perm._key = key;
sema[semid].u.sem_perm.cuid = kauth_cred_getuid(cred);
sema[semid].u.sem_perm.uid = kauth_cred_getuid(cred);
sema[semid].u.sem_perm._key = key;
sema[semid].u.sem_perm.cuid = kauth_cred_getuid(cred);
sema[semid].u.sem_perm.uid = kauth_cred_getuid(cred);
sema[semid].u.sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
sema[semid].u.sem_perm._seq =
(sema[semid].u.sem_perm._seq + 1) & 0x7fff;
sema[semid].u.sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
sema[semid].u.sem_perm._seq =
(sema[semid].u.sem_perm._seq + 1) & 0x7fff;
sema[semid].u.sem_base = &sem_pool[semtot];
semtot += nsems;
bzero(sema[semid].u.sem_base,
sema[semid].u.sem_base = &sem_pool[semtot];
semtot += nsems;
bzero(sema[semid].u.sem_base,
- struct sembuf sops[MAX_SOPS];
- register struct semid_kernel *semakptr;
- register struct sembuf *sopptr = NULL; /* protected by 'semptr' */
- register struct sem *semptr = NULL; /* protected by 'if' */
+ struct sembuf sops[seminfo.semopm];
+ struct semid_kernel *semakptr;
+ struct sembuf *sopptr = NULL; /* protected by 'semptr' */
+ struct sem *semptr = NULL; /* protected by 'if' */
printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops);
#endif
printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops);
#endif
- printf("too many sops (max=%d, nsops=%d)\n", MAX_SOPS, nsops);
+ printf("too many sops (max=%d, nsops=%d)\n",
+ seminfo.semopm, nsops);
+ /* OK for LP64, since sizeof(struct sembuf) is currently invariant */
+ if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) {
+#ifdef SEM_DEBUG
+ printf("eval = %d from copyin(%08x, %08x, %ld)\n", eval,
+ uap->sops, &sops, nsops * sizeof(struct sembuf));
+#endif
+ goto semopout;
+ }
+
- if (eval)
- goto semopout;
-#endif
-
- /* OK for LP64, since sizeof(struct sembuf) is currently invariant */
- if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) {
-#ifdef SEM_DEBUG
- printf("eval = %d from copyin(%08x, %08x, %ld)\n", eval,
- uap->sops, &sops, nsops * sizeof(struct sembuf));
-#endif
- eval = msleep((caddr_t)semakptr, &sysv_sem_subsys_mutex , (PZERO - 4) | PCATCH,
+ eval = msleep((caddr_t)semakptr, &sysv_sem_subsys_mutex, (PZERO - 4) | PCATCH,
/*
* IMPORTANT: while we were asleep, the semaphore array might
* have been reallocated somewhere else (see grow_sema_array()).
/*
* IMPORTANT: while we were asleep, the semaphore array might
* have been reallocated somewhere else (see grow_sema_array()).
* XXX POSIX: Third test this 'if' and 'EINTR' precedence may
* fail testing; if so, we will need to revert this code.
*/
* XXX POSIX: Third test this 'if' and 'EINTR' precedence may
* fail testing; if so, we will need to revert this code.
*/
if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
semakptr->u.sem_perm._seq != IPCID_TO_SEQ(uap->semid) ||
sopptr->sem_num >= semakptr->u.sem_nsems) {
if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
semakptr->u.sem_perm._seq != IPCID_TO_SEQ(uap->semid) ||
sopptr->sem_num >= semakptr->u.sem_nsems) {
* we were sleeping, updating our sem_base pointer.
*/
semptr = &semakptr->u.sem_base[sopptr->sem_num];
* we were sleeping, updating our sem_base pointer.
*/
semptr = &semakptr->u.sem_base[sopptr->sem_num];
eval = semundo_adjust(p, &supidx, semid,
sops[i].sem_num, -adjval);
eval = semundo_adjust(p, &supidx, semid,
sops[i].sem_num, -adjval);
if (semundo_adjust(p, &supidx, semid,
if (semundo_adjust(p, &supidx, semid,
- /* Maybe we should build a list of semakptr's to wake
- * up, finish all access to data structures, release the
- * subsystem lock, and wake all the processes. Something
- * to think about. It wouldn't buy us anything unless
- * wakeup had the potential to block, or the syscall
- * funnel state was changed to allow multiple threads
- * in the BSD code at once.
- */
+ /* Maybe we should build a list of semakptr's to wake
+ * up, finish all access to data structures, release the
+ * subsystem lock, and wake all the processes. Something
+ * to think about.
+ */
- * There is a semaphore leak (i.e. memory leak) in this code.
- * We should be deleting the IPC_PRIVATE semaphores when they are
- * no longer needed, and we dont. We would have to track which processes
- * know about which IPC_PRIVATE semaphores, updating the list after
- * every fork. We can't just delete them semaphore when the process
- * that created it dies, because that process may well have forked
- * some children. So we need to wait until all of it's children have
- * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
- * with the creating group ID, count the number of processes left in
- * that group, and delete the semaphore when the group is gone.
- * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
- * There is an upper bound on the size of our semaphore array, so
- * leaking the semaphores should not work as a DOS attack.
- *
- * Please note that the original BSD code this file is based on had the
- * same leaky semaphore problem.
- */
+ * There is a semaphore leak (i.e. memory leak) in this code.
+ * We should be deleting the IPC_PRIVATE semaphores when they are
+ * no longer needed, and we dont. We would have to track which processes
+ * know about which IPC_PRIVATE semaphores, updating the list after
+ * every fork. We can't just delete them semaphore when the process
+ * that created it dies, because that process may well have forked
+ * some children. So we need to wait until all of it's children have
+ * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
+ * with the creating group ID, count the number of processes left in
+ * that group, and delete the semaphore when the group is gone.
+ * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
+ * There is an upper bound on the size of our semaphore array, so
+ * leaking the semaphores should not work as a DOS attack.
+ *
+ * Please note that the original BSD code this file is based on had the
+ * same leaky semaphore problem.
+ */
SYSV_SEM_SUBSYS_UNLOCK();
}
/* (struct sysctl_oid *oidp, void *arg1, int arg2, \
SYSV_SEM_SUBSYS_UNLOCK();
}
/* (struct sysctl_oid *oidp, void *arg1, int arg2, \
- if ((sem_pool == NULL) &&
- (sema == NULL) &&
- (semu == NULL) &&
- (semu_list_idx == -1)) {
- if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) {
- goto out;
- }
- } else
+ if ((sem_pool == NULL) &&
+ (sema == NULL) &&
+ (semu == NULL) &&
+ (semu_list_idx == -1)) {
+ if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) {
+ goto out;
+ }
+ } else {
}
/* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
extern struct sysctl_oid_list sysctl__kern_sysv_children;
}
/* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
extern struct sysctl_oid_list sysctl__kern_sysv_children;
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semmni, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semmni, 0, &sysctl_seminfo ,"I","semmni");
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmni, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semmni, 0, &sysctl_seminfo, "I", "semmni");
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semmns, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semmns, 0, &sysctl_seminfo ,"I","semmns");
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmns, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semmns, 0, &sysctl_seminfo, "I", "semmns");
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semmnu, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semmnu, 0, &sysctl_seminfo ,"I","semmnu");
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmnu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semmnu, 0, &sysctl_seminfo, "I", "semmnu");
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semmsl, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semmsl, 0, &sysctl_seminfo ,"I","semmsl");
-
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semume, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semume, 0, &sysctl_seminfo ,"I","semume");
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmsl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semmsl, 0, &sysctl_seminfo, "I", "semmsl");
+
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semume, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semume, 0, &sysctl_seminfo, "I", "semume");
- } ipcs;
- struct semid_ds semid_ds32; /* post conversion, 32 bit version */
+ } ipcs = { };
+ struct user32_semid_ds semid_ds32 = { }; /* post conversion, 32 bit version */
+ struct user64_semid_ds semid_ds64 = { }; /* post conversion, 64 bit version */
- if (!IS_64BIT_PROCESS(p)) {
- ipcs_sz = sizeof(struct IPCS_command);
- semid_ds_sz = sizeof(struct semid_ds);
+ if (IS_64BIT_PROCESS(p)) {
+ ipcs_sz = sizeof(struct user_IPCS_command);
+ semid_ds_sz = sizeof(struct user64_semid_ds);
+ } else {
+ ipcs_sz = sizeof(struct user32_IPCS_command);
+ semid_ds_sz = sizeof(struct user32_semid_ds);
}
/* Copy in the command structure */
if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
}
/* Copy in the command structure */
if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
/*
* If necessary, convert the 64 bit kernel segment
* descriptor to a 32 bit user one.
*/
if (!IS_64BIT_PROCESS(p)) {
/*
* If necessary, convert the 64 bit kernel segment
* descriptor to a 32 bit user one.
*/
if (!IS_64BIT_PROCESS(p)) {
error = copyout(semid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
if (!error) {
/* update cursor */
ipcs.u64.ipcs_cursor = cursor + 1;
error = copyout(semid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
if (!error) {
/* update cursor */
ipcs.u64.ipcs_cursor = cursor + 1;
-SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW|CTLFLAG_ANYBODY,
- 0, 0, IPCS_sem_sysctl,
- "S,IPCS_sem_command",
- "ipcs sem command interface");
+SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
+ 0, 0, IPCS_sem_sysctl,
+ "S,IPCS_sem_command",
+ "ipcs sem command interface");