- * @APPLE_LICENSE_HEADER_START@
- *
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
- *
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+/*
+ * NOTICE: This file was modified by McAfee Research in 2004 to introduce
+ * support for mandatory and extensible security protections. This notice
+ * is included in support of clause 2.2 (b) of the Apple Public License,
+ * Version 2.0.
+ * Copyright (c) 2005-2006 SPARTA, Inc.
+ */
/* Hard system limits to avoid resource starvation / DOS attacks.
* These are not needed if we can make the semaphore pages swappable.
*/
static struct seminfo limitseminfo = {
/* Hard system limits to avoid resource starvation / DOS attacks.
* These are not needed if we can make the semaphore pages swappable.
*/
static struct seminfo limitseminfo = {
- SEMMAP, /* # of entries in semaphore map */
- SEMMNI, /* # of semaphore identifiers */
- SEMMNS, /* # of semaphores in system */
- SEMMNU, /* # of undo structures in system */
- SEMMSL, /* max # of semaphores per id */
- SEMOPM, /* max # of operations per semop call */
- SEMUME, /* max # of undo entries per process */
- SEMUSZ, /* size in bytes of undo structure */
- SEMVMX, /* semaphore maximum value */
- SEMAEM /* adjust on exit max value */
+ .semmap = SEMMAP, /* # of entries in semaphore map */
+ .semmni = SEMMNI, /* # of semaphore identifiers */
+ .semmns = SEMMNS, /* # of semaphores in system */
+ .semmnu = SEMMNU, /* # of undo structures in system */
+ .semmsl = SEMMSL, /* max # of semaphores per id */
+ .semopm = SEMOPM, /* max # of operations per semop call */
+ .semume = SEMUME, /* max # of undo entries per process */
+ .semusz = SEMUSZ, /* size in bytes of undo structure */
+ .semvmx = SEMVMX, /* semaphore maximum value */
+ .semaem = SEMAEM /* adjust on exit max value */
- SEMMAP, /* Unused, # of entries in semaphore map */
- 0, /* # of semaphore identifiers */
- 0, /* # of semaphores in system */
- 0, /* # of undo entries in system */
- SEMMSL, /* max # of semaphores per id */
- SEMOPM, /* max # of operations per semop call */
- SEMUME, /* max # of undo entries per process */
- SEMUSZ, /* size in bytes of undo structure */
- SEMVMX, /* semaphore maximum value */
- SEMAEM /* adjust on exit max value */
+ .semmap = SEMMAP, /* Unused, # of entries in semaphore map */
+ .semmni = 0, /* # of semaphore identifiers */
+ .semmns = 0, /* # of semaphores in system */
+ .semmnu = 0, /* # of undo entries in system */
+ .semmsl = SEMMSL, /* max # of semaphores per id */
+ .semopm = SEMOPM, /* max # of operations per semop call */
+ .semume = SEMUME, /* max # of undo entries per process */
+ .semusz = SEMUSZ, /* size in bytes of undo structure */
+ .semvmx = SEMVMX, /* semaphore maximum value */
+ .semaem = SEMAEM /* adjust on exit max value */
-static struct sem_undo *semu_alloc(struct proc *p);
-static int semundo_adjust(struct proc *p, struct sem_undo **supptr,
- int semid, int semnum, int adjval);
+static int semu_alloc(struct proc *p);
+static int semundo_adjust(struct proc *p, int *supidx,
+ int semid, int semnum, int adjval);
static void semundo_clear(int semid, int semnum);
/* XXX casting to (sy_call_t *) is bogus, as usual. */
static void semundo_clear(int semid, int semnum);
/* XXX casting to (sy_call_t *) is bogus, as usual. */
-static int semtot = 0; /* # of used semaphores */
-struct user_semid_ds *sema = NULL; /* semaphore id pool */
-struct sem *sem_pool = NULL; /* semaphore pool */
-static struct sem_undo *semu_list = NULL; /* active undo structures */
-struct sem_undo *semu = NULL; /* semaphore undo pool */
+static int semtot = 0; /* # of used semaphores */
+struct semid_kernel *sema = NULL; /* semaphore id pool */
+struct sem *sem_pool = NULL; /* semaphore pool */
+static int semu_list_idx = -1; /* active undo structures */
+struct sem_undo *semu = NULL; /* semaphore undo pool */
-void sysv_sem_lock_init(void);
-static lck_grp_t *sysv_sem_subsys_lck_grp;
-static lck_grp_attr_t *sysv_sem_subsys_lck_grp_attr;
-static lck_attr_t *sysv_sem_subsys_lck_attr;
-static lck_mtx_t sysv_sem_subsys_mutex;
+static LCK_GRP_DECLARE(sysv_sem_subsys_lck_grp, "sysv_sem_subsys_lock");
+static LCK_MTX_DECLARE(sysv_sem_subsys_mutex, &sysv_sem_subsys_lck_grp);
#define SYSV_SEM_SUBSYS_LOCK() lck_mtx_lock(&sysv_sem_subsys_mutex)
#define SYSV_SEM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_sem_subsys_mutex)
#define SYSV_SEM_SUBSYS_LOCK() lck_mtx_lock(&sysv_sem_subsys_mutex)
#define SYSV_SEM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_sem_subsys_mutex)
-
-__private_extern__ void
-sysv_sem_lock_init( void )
-{
-
- sysv_sem_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
-
- sysv_sem_subsys_lck_grp = lck_grp_alloc_init("sysv_sem_subsys_lock", sysv_sem_subsys_lck_grp_attr);
-
- sysv_sem_subsys_lck_attr = lck_attr_alloc_init();
- lck_mtx_init(&sysv_sem_subsys_mutex, sysv_sem_subsys_lck_grp, sysv_sem_subsys_lck_attr);
-}
-
-semid_ds_64to32(struct user_semid_ds *in, struct semid_ds *out)
+semid_ds_kernelto32(struct user_semid_ds *in, struct user32_semid_ds *out)
+{
+ out->sem_perm = in->sem_perm;
+ out->sem_base = CAST_DOWN_EXPLICIT(__int32_t, in->sem_base);
+ out->sem_nsems = in->sem_nsems;
+ out->sem_otime = in->sem_otime; /* XXX loses precision */
+ out->sem_ctime = in->sem_ctime; /* XXX loses precision */
+}
+
+static void
+semid_ds_kernelto64(struct user_semid_ds *in, struct user64_semid_ds *out)
-semid_ds_32to64(struct semid_ds *in, struct user_semid_ds *out)
+semid_ds_32tokernel(struct user32_semid_ds *in, struct user_semid_ds *out)
+{
+ out->sem_ctime = in->sem_ctime;
+ out->sem_otime = in->sem_otime;
+ out->sem_nsems = in->sem_nsems;
+ out->sem_base = (void *)(uintptr_t)in->sem_base;
+ out->sem_perm = in->sem_perm;
+}
+
+static void
+semid_ds_64tokernel(struct user64_semid_ds *in, struct user_semid_ds *out)
{
out->sem_ctime = in->sem_ctime;
out->sem_otime = in->sem_otime;
out->sem_nsems = in->sem_nsems;
{
out->sem_ctime = in->sem_ctime;
out->sem_otime = in->sem_otime;
out->sem_nsems = in->sem_nsems;
- * Entry point for all SEM calls
+ * semsys
+ *
+ * Entry point for all SEM calls: semctl, semget, semop
+ *
+ * Parameters: p Process requesting the call
+ * uap User argument descriptor (see below)
+ * retval Return value of the selected sem call
+ *
+ * Indirect parameters: uap->which sem call to invoke (index in array of sem calls)
+ * uap->a2 User argument descriptor
+ *
+ * Returns: 0 Success
+ * !0 Not success
+ *
+ * Implicit returns: retval Return value of the selected sem call
+ *
+ * DEPRECATED: This interface should not be used to call the other SEM
+ * functions (semctl, semget, semop). The correct usage is
+ * to call the other SEM functions directly.
- if (uap->which >= sizeof(semcalls)/sizeof(semcalls[0]))
- return (EINVAL);
- return ((*semcalls[uap->which])(p, &uap->a2, retval));
+ if (uap->which >= sizeof(semcalls) / sizeof(semcalls[0])) {
+ return EINVAL;
+ }
+ return (*semcalls[uap->which])(p, &uap->a2, retval);
newSize = newSize > limitseminfo.semmnu ? limitseminfo.semmnu : newSize;
#ifdef SEM_DEBUG
printf("growing semu[] from %d to %d\n", seminfo.semmnu, newSize);
#endif
newSize = newSize > limitseminfo.semmnu ? limitseminfo.semmnu : newSize;
#ifdef SEM_DEBUG
printf("growing semu[] from %d to %d\n", seminfo.semmnu, newSize);
#endif
- MALLOC(newSemu, struct sem_undo *, sizeof (struct sem_undo) * newSize,
- M_SYSVSEM, M_WAITOK | M_ZERO);
- if (NULL == newSemu)
- {
+ newSemu = kheap_alloc(KM_SYSVSEM, sizeof(struct sem_undo) * newSize,
+ Z_WAITOK | Z_ZERO);
+ if (NULL == newSemu) {
- * "un_proc" set to 0 (i.e. NULL) by the M_ZERO flag to MALLOC() above,
- * so they're already marked as "not in use".
+ * "un_proc" set to 0 (i.e. NULL) by the Z_ZERO flag to kheap_alloc
+ * above, so they're already marked as "not in use".
newSize = newSize > limitseminfo.semmni ? limitseminfo.semmni : newSize;
#ifdef SEM_DEBUG
printf("growing sema[] from %d to %d\n", seminfo.semmni, newSize);
#endif
newSize = newSize > limitseminfo.semmni ? limitseminfo.semmni : newSize;
#ifdef SEM_DEBUG
printf("growing sema[] from %d to %d\n", seminfo.semmni, newSize);
#endif
- MALLOC(newSema, struct user_semid_ds *,
- sizeof (struct user_semid_ds) * newSize,
- M_SYSVSEM, M_WAITOK | M_ZERO);
- if (NULL == newSema)
- {
+ newSema = kheap_alloc(KM_SYSVSEM, sizeof(struct semid_kernel) * newSize,
+ Z_WAITOK | Z_ZERO);
+ if (NULL == newSema) {
* this with the existing code, so we wake up the
* process and let it do a lot of work to determine the
* semaphore set is really not available yet, and then
* this with the existing code, so we wake up the
* process and let it do a lot of work to determine the
* semaphore set is really not available yet, and then
- * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the M_ZERO
- * flag to MALLOC() above, so they're already marked as "not in use".
+ * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the Z_ZERO
+ * flag to kheap_alloc above, so they're already marked as "not in use".
/* enforce hard limit */
if (new_pool_size > limitseminfo.semmns) {
#ifdef SEM_DEBUG
printf("semaphore hard limit of %d reached, requested %d\n",
/* enforce hard limit */
if (new_pool_size > limitseminfo.semmns) {
#ifdef SEM_DEBUG
printf("semaphore hard limit of %d reached, requested %d\n",
new_pool_size = new_pool_size > limitseminfo.semmns ? limitseminfo.semmns : new_pool_size;
#ifdef SEM_DEBUG
printf("growing sem_pool array from %d to %d\n", seminfo.semmns, new_pool_size);
#endif
new_pool_size = new_pool_size > limitseminfo.semmns ? limitseminfo.semmns : new_pool_size;
#ifdef SEM_DEBUG
printf("growing sem_pool array from %d to %d\n", seminfo.semmns, new_pool_size);
#endif
- MALLOC(new_sem_pool, struct sem *, sizeof (struct sem) * new_pool_size,
- M_SYSVSEM, M_WAITOK | M_ZERO);
+ new_sem_pool = kheap_alloc(KM_SYSVSEM, sizeof(struct sem) * new_pool_size,
+ Z_WAITOK | Z_ZERO);
- for(i = 0; i < seminfo.semmni; i++) {
- if (sema[i].sem_perm.mode & SEM_ALLOC) /* ID in use */
- sema[i].sem_base += (new_sem_pool - sem_pool);
+ for (i = 0; i < seminfo.semmni; i++) {
+ if (sema[i].u.sem_perm.mode & SEM_ALLOC) { /* ID in use */
+ sema[i].u.sem_base = new_sem_pool +
+ (sema[i].u.sem_base - sem_pool);
+ }
- supptr = &semu_list;
- while ((suptr = *supptr) != NULL) {
- if (suptr->un_cnt == 0) {
+ supidx = &semu_list_idx;
+ while (*supidx != -1) {
+ suptr = SEMU(*supidx);
+ if (suptr->un_cnt == 0) {
-semundo_adjust(struct proc *p, struct sem_undo **supptr, int semid,
- int semnum, int adjval)
+semundo_adjust(struct proc *p, int *supidx, int semid,
+ int semnum, int adjval)
- register struct sem_undo *suptr;
- register struct undo *sueptr, **suepptr, *new_sueptr;
+ struct sem_undo *suptr;
+ int suidx;
+ struct undo *sueptr, **suepptr, *new_sueptr;
- i < suptr->un_cnt;
- i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) {
- if (sueptr->une_id != semid || sueptr->une_num != semnum)
+ i < suptr->un_cnt;
+ i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) {
+ if (sueptr->une_id != semid || sueptr->une_num != semnum) {
AUDIT_ARG(svipc_cmd, cmd);
AUDIT_ARG(svipc_id, semid);
AUDIT_ARG(svipc_cmd, cmd);
AUDIT_ARG(svipc_id, semid);
- semaptr = &sema[semid];
- if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
- semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid)) {
+ semakptr = &sema[semid];
+ if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
+ semakptr->u.sem_perm._seq != IPCID_TO_SEQ(uap->semid)) {
- semaptr->sem_perm.cuid = kauth_cred_getuid(cred);
- semaptr->sem_perm.uid = kauth_cred_getuid(cred);
- semtot -= semaptr->sem_nsems;
- for (i = semaptr->sem_base - sem_pool; i < semtot; i++)
- sem_pool[i] = sem_pool[i + semaptr->sem_nsems];
+ semakptr->u.sem_perm.cuid = kauth_cred_getuid(cred);
+ semakptr->u.sem_perm.uid = kauth_cred_getuid(cred);
+ semtot -= semakptr->u.sem_nsems;
+ for (i = semakptr->u.sem_base - sem_pool; i < semtot; i++) {
+ sem_pool[i] = sem_pool[i + semakptr->u.sem_nsems];
+ }
- if ((sema[i].sem_perm.mode & SEM_ALLOC) &&
- sema[i].sem_base > semaptr->sem_base)
- sema[i].sem_base -= semaptr->sem_nsems;
+ if ((sema[i].u.sem_perm.mode & SEM_ALLOC) &&
+ sema[i].u.sem_base > semakptr->u.sem_base) {
+ sema[i].u.sem_base -= semakptr->u.sem_nsems;
+ }
- eval = copyin(user_arg.buf, &sbuf, sizeof(struct user_semid_ds));
+ struct user64_semid_ds ds64;
+ eval = copyin(user_arg.buf, &ds64, sizeof(ds64));
+ semid_ds_64tokernel(&ds64, &sbuf);
- eval = copyin(user_arg.buf, &sbuf, sizeof(struct semid_ds));
- /* convert in place; ugly, but safe */
- semid_ds_32to64((struct semid_ds *)&sbuf, &sbuf);
+ struct user32_semid_ds ds32;
+ eval = copyin(user_arg.buf, &ds32, sizeof(ds32));
+ semid_ds_32tokernel(&ds32, &sbuf);
- semaptr->sem_perm.uid = sbuf.sem_perm.uid;
- semaptr->sem_perm.gid = sbuf.sem_perm.gid;
- semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) |
- (sbuf.sem_perm.mode & 0777);
- semaptr->sem_ctime = sysv_semtime();
+ semakptr->u.sem_perm.uid = sbuf.sem_perm.uid;
+ semakptr->u.sem_perm.gid = sbuf.sem_perm.gid;
+ semakptr->u.sem_perm.mode = (semakptr->u.sem_perm.mode &
+ ~0777) | (sbuf.sem_perm.mode & 0777);
+ semakptr->u.sem_ctime = sysv_semtime();
- if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
- goto semctlout;
- bcopy(semaptr, &uds, sizeof(struct user_semid_ds));
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
+
- eval = copyout(&uds, user_arg.buf, sizeof(struct user_semid_ds));
+ struct user64_semid_ds semid_ds64;
+ bzero(&semid_ds64, sizeof(semid_ds64));
+ semid_ds_kernelto64(&semakptr->u, &semid_ds64);
+ eval = copyout(&semid_ds64, user_arg.buf, sizeof(semid_ds64));
- struct semid_ds semid_ds32;
- semid_ds_64to32(&uds, &semid_ds32);
- eval = copyout(&semid_ds32, user_arg.buf, sizeof(struct semid_ds));
+ struct user32_semid_ds semid_ds32;
+ bzero(&semid_ds32, sizeof(semid_ds32));
+ semid_ds_kernelto32(&semakptr->u, &semid_ds32);
+ eval = copyout(&semid_ds32, user_arg.buf, sizeof(semid_ds32));
- if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
- goto semctlout;
- if (semnum < 0 || semnum >= semaptr->sem_nsems) {
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
+ if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
- if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
- goto semctlout;
- if (semnum < 0 || semnum >= semaptr->sem_nsems) {
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
+ if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
- if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
- goto semctlout;
- if (semnum < 0 || semnum >= semaptr->sem_nsems) {
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
+ if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
- if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
- goto semctlout;
- if (semnum < 0 || semnum >= semaptr->sem_nsems) {
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
+ if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
/*
* Cast down a pointer instead of using 'val' member directly
* to avoid introducing endieness and a pad field into the
* header file. Ugly, but it works.
*/
/*
* Cast down a pointer instead of using 'val' member directly
* to avoid introducing endieness and a pad field into the
* header file. Ugly, but it works.
*/
- semaptr->sem_base[semnum].semval = CAST_DOWN(int,user_arg.buf);
+ u_int newsemval = CAST_DOWN_EXPLICIT(u_int, user_arg.buf);
+
+ /*
+ * The check is being performed as unsigned values to match
+ * eventual destination
+ */
+ if (newsemval > (u_int)seminfo.semvmx) {
+#ifdef SEM_DEBUG
+ printf("Out of range sem value for set\n");
+#endif
+ eval = ERANGE;
+ goto semctlout;
+ }
+ semakptr->u.sem_base[semnum].semval = newsemval;
+ semakptr->u.sem_base[semnum].sempid = p->p_pid;
+ /* XXX scottl Should there be a MAC call here? */
/* XXX could be done in one go... */
eval = copyin(user_arg.array + (i * sizeof(unsigned short)),
/* XXX could be done in one go... */
eval = copyin(user_arg.array + (i * sizeof(unsigned short)),
printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems, semflg);
printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems, semflg);
- sema[semid].sem_perm.key = key;
- sema[semid].sem_perm.cuid = kauth_cred_getuid(cred);
- sema[semid].sem_perm.uid = kauth_cred_getuid(cred);
- sema[semid].sem_perm.cgid = cred->cr_gid;
- sema[semid].sem_perm.gid = cred->cr_gid;
- sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
- sema[semid].sem_perm.seq =
- (sema[semid].sem_perm.seq + 1) & 0x7fff;
- sema[semid].sem_nsems = nsems;
- sema[semid].sem_otime = 0;
- sema[semid].sem_ctime = sysv_semtime();
- sema[semid].sem_base = &sem_pool[semtot];
+ sema[semid].u.sem_perm._key = key;
+ sema[semid].u.sem_perm.cuid = kauth_cred_getuid(cred);
+ sema[semid].u.sem_perm.uid = kauth_cred_getuid(cred);
+ sema[semid].u.sem_perm.cgid = kauth_cred_getgid(cred);
+ sema[semid].u.sem_perm.gid = kauth_cred_getgid(cred);
+ sema[semid].u.sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
+ sema[semid].u.sem_perm._seq =
+ (sema[semid].u.sem_perm._seq + 1) & 0x7fff;
+ sema[semid].u.sem_nsems = nsems;
+ sema[semid].u.sem_otime = 0;
+ sema[semid].u.sem_ctime = sysv_semtime();
+ sema[semid].u.sem_base = &sem_pool[semtot];
- bzero(sema[semid].sem_base,
- sizeof(sema[semid].sem_base[0])*nsems);
+ bzero(sema[semid].u.sem_base,
+ sizeof(sema[semid].u.sem_base[0]) * nsems);
+#if CONFIG_MACF
+ mac_sysvsem_label_associate(cred, &sema[semid]);
+#endif
- printf("sembase = 0x%x, next = 0x%x\n", sema[semid].sem_base,
+ printf("sembase = 0x%x, next = 0x%x\n", sema[semid].u.sem_base,
AUDIT_ARG(svipc_id, *retval);
#ifdef SEM_DEBUG
printf("semget is done, returning %d\n", *retval);
AUDIT_ARG(svipc_id, *retval);
#ifdef SEM_DEBUG
printf("semget is done, returning %d\n", *retval);
- struct sembuf sops[MAX_SOPS];
- register struct user_semid_ds *semaptr;
- register struct sembuf *sopptr = NULL; /* protected by 'semptr' */
- register struct sem *semptr = NULL; /* protected by 'if' */
- struct sem_undo *suptr = NULL;
+ struct sembuf sops[seminfo.semopm];
+ struct semid_kernel *semakptr;
+ struct sembuf *sopptr = NULL; /* protected by 'semptr' */
+ struct sem *semptr = NULL; /* protected by 'if' */
+ int supidx = -1;
printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops);
#endif
printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops);
#endif
- printf("too many sops (max=%d, nsops=%d)\n", MAX_SOPS, nsops);
+ printf("too many sops (max=%d, nsops=%d)\n",
+ seminfo.semopm, nsops);
+#if CONFIG_MACF
+ /*
+ * Initial pass thru sops to see what permissions are needed.
+ */
+ j = 0; /* permission needed */
+ for (i = 0; i < nsops; i++) {
+ j |= (sops[i].sem_op == 0) ? SEM_R : SEM_A;
+ }
+
+ /*
+ * The MAC hook checks whether the thread has read (and possibly
+ * write) permissions to the semaphore array based on the
+ * sopptr->sem_op value.
+ */
+ eval = mac_sysvsem_check_semop(kauth_cred_get(), semakptr, j);
+ if (eval) {
+ goto semopout;
+ }
+#endif
+
/*
* Loop trying to satisfy the vector of requests.
* If we reach a point where we must wait, any requests already
/*
* Loop trying to satisfy the vector of requests.
* If we reach a point where we must wait, any requests already
- printf("semop: semaptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
- semaptr, semaptr->sem_base, semptr,
+ printf("semop: semakptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
+ semakptr, semakptr->u.sem_base, semptr,
sopptr->sem_num, semptr->semval, sopptr->sem_op,
(sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
#endif
sopptr->sem_num, semptr->semval, sopptr->sem_op,
(sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
#endif
- eval = msleep((caddr_t)semaptr, &sysv_sem_subsys_mutex , (PZERO - 4) | PCATCH,
+ eval = msleep((caddr_t)semakptr, &sysv_sem_subsys_mutex, (PZERO - 4) | PCATCH,
/*
* IMPORTANT: while we were asleep, the semaphore array might
* have been reallocated somewhere else (see grow_sema_array()).
/*
* IMPORTANT: while we were asleep, the semaphore array might
* have been reallocated somewhere else (see grow_sema_array()).
- if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
- semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid) ||
- sopptr->sem_num >= semaptr->sem_nsems) {
+ semakptr = &sema[semid]; /* sema may have been reallocated */
+ if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
+ semakptr->u.sem_perm._seq != IPCID_TO_SEQ(uap->semid) ||
+ sopptr->sem_num >= semakptr->u.sem_nsems) {
+ /* The man page says to return EIDRM. */
+ /* Unfortunately, BSD doesn't define that code! */
* because the sem[] may have been reallocated while
* we were sleeping, updating our sem_base pointer.
*/
* because the sem[] may have been reallocated while
* we were sleeping, updating our sem_base pointer.
*/
- eval = semundo_adjust(p, &suptr, semid,
+ }
+ eval = semundo_adjust(p, &supidx, semid,
- if (semundo_adjust(p, &suptr, semid,
- sops[j].sem_num, adjval) != 0)
+ }
+ if (semundo_adjust(p, &supidx, semid,
+ sops[j].sem_num, adjval) != 0) {
- for (supptr = &semu_list; (suptr = *supptr) != NULL;
- supptr = &suptr->un_next) {
- if (suptr->un_proc == p)
+ for (supidx = &semu_list_idx; (suidx = *supidx) != -1;
+ supidx = &suptr->un_next_idx) {
+ suptr = SEMU(suidx);
+ if (suptr->un_proc == p) {
- if (semaptr->sem_base[semnum].semval < -adjval)
- semaptr->sem_base[semnum].semval = 0;
- else
- semaptr->sem_base[semnum].semval +=
+ if (semakptr->u.sem_base[semnum].semval < -adjval) {
+ semakptr->u.sem_base[semnum].semval = 0;
+ } else {
+ semakptr->u.sem_base[semnum].semval +=
- } else
- semaptr->sem_base[semnum].semval += adjval;
-
- /* Maybe we should build a list of semaptr's to wake
- * up, finish all access to data structures, release the
- * subsystem lock, and wake all the processes. Something
- * to think about. It wouldn't buy us anything unless
- * wakeup had the potential to block, or the syscall
- * funnel state was changed to allow multiple threads
- * in the BSD code at once.
- */
+ }
+ } else {
+ semakptr->u.sem_base[semnum].semval += adjval;
+ }
+
+ /* Maybe we should build a list of semakptr's to wake
+ * up, finish all access to data structures, release the
+ * subsystem lock, and wake all the processes. Something
+ * to think about.
+ */
- * There is a semaphore leak (i.e. memory leak) in this code.
- * We should be deleting the IPC_PRIVATE semaphores when they are
- * no longer needed, and we dont. We would have to track which processes
- * know about which IPC_PRIVATE semaphores, updating the list after
- * every fork. We can't just delete them semaphore when the process
- * that created it dies, because that process may well have forked
- * some children. So we need to wait until all of it's children have
- * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
- * with the creating group ID, count the number of processes left in
- * that group, and delete the semaphore when the group is gone.
- * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
- * There is an upper bound on the size of our semaphore array, so
- * leaking the semaphores should not work as a DOS attack.
- *
- * Please note that the original BSD code this file is based on had the
- * same leaky semaphore problem.
- */
+ * There is a semaphore leak (i.e. memory leak) in this code.
+ * We should be deleting the IPC_PRIVATE semaphores when they are
+ * no longer needed, and we dont. We would have to track which processes
+ * know about which IPC_PRIVATE semaphores, updating the list after
+ * every fork. We can't just delete them semaphore when the process
+ * that created it dies, because that process may well have forked
+ * some children. So we need to wait until all of it's children have
+ * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
+ * with the creating group ID, count the number of processes left in
+ * that group, and delete the semaphore when the group is gone.
+ * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
+ * There is an upper bound on the size of our semaphore array, so
+ * leaking the semaphores should not work as a DOS attack.
+ *
+ * Please note that the original BSD code this file is based on had the
+ * same leaky semaphore problem.
+ */
SYSV_SEM_SUBSYS_UNLOCK();
}
/* (struct sysctl_oid *oidp, void *arg1, int arg2, \
SYSV_SEM_SUBSYS_UNLOCK();
}
/* (struct sysctl_oid *oidp, void *arg1, int arg2, \
- if ((sem_pool == NULL) &&
- (sema == NULL) &&
- (semu == NULL) &&
- (semu_list == NULL)) {
- if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) {
- goto out;
- }
- } else
+ if ((sem_pool == NULL) &&
+ (sema == NULL) &&
+ (semu == NULL) &&
+ (semu_list_idx == -1)) {
+ if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) {
+ goto out;
+ }
+ } else {
}
/* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
extern struct sysctl_oid_list sysctl__kern_sysv_children;
}
/* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
extern struct sysctl_oid_list sysctl__kern_sysv_children;
-SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNI, semmni, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semmni, 0, &sysctl_seminfo ,"I","semmni");
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmni, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semmni, 0, &sysctl_seminfo, "I", "semmni");
+
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmns, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semmns, 0, &sysctl_seminfo, "I", "semmns");
-SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNS, semmns, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semmns, 0, &sysctl_seminfo ,"I","semmns");
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmnu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semmnu, 0, &sysctl_seminfo, "I", "semmnu");
-SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNU, semmnu, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semmnu, 0, &sysctl_seminfo ,"I","semmnu");
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmsl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semmsl, 0, &sysctl_seminfo, "I", "semmsl");
-SYSCTL_PROC(_kern_sysv, KSYSV_SEMMSL, semmsl, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semmsl, 0, &sysctl_seminfo ,"I","semmsl");
-
-SYSCTL_PROC(_kern_sysv, KSYSV_SEMUNE, semume, CTLTYPE_INT | CTLFLAG_RW,
- &limitseminfo.semume, 0, &sysctl_seminfo ,"I","semume");
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semume, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &limitseminfo.semume, 0, &sysctl_seminfo, "I", "semume");
- } ipcs;
- struct semid_ds semid_ds32; /* post conversion, 32 bit version */
+ } ipcs = { };
+ struct user32_semid_ds semid_ds32 = { }; /* post conversion, 32 bit version */
+ struct user64_semid_ds semid_ds64 = { }; /* post conversion, 64 bit version */
+ if (IS_64BIT_PROCESS(p)) {
+ ipcs_sz = sizeof(struct user_IPCS_command);
+ semid_ds_sz = sizeof(struct user64_semid_ds);
+ } else {
+ ipcs_sz = sizeof(struct user32_IPCS_command);
+ semid_ds_sz = sizeof(struct user32_semid_ds);
+ }
+
/* Copy in the command structure */
if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
/* Copy in the command structure */
if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
- if (!IS_64BIT_PROCESS(p)) {
- ipcs_sz = sizeof(struct IPCS_command);
- semid_ds_sz = sizeof(struct semid_ds);
+ if (!IS_64BIT_PROCESS(p)) { /* convert in place */
+ ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data);
/*
* If necessary, convert the 64 bit kernel segment
* descriptor to a 32 bit user one.
*/
if (!IS_64BIT_PROCESS(p)) {
/*
* If necessary, convert the 64 bit kernel segment
* descriptor to a 32 bit user one.
*/
if (!IS_64BIT_PROCESS(p)) {
error = copyout(semid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
if (!error) {
/* update cursor */
ipcs.u64.ipcs_cursor = cursor + 1;
error = copyout(semid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
if (!error) {
/* update cursor */
ipcs.u64.ipcs_cursor = cursor + 1;
-SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW|CTLFLAG_ANYBODY,
- 0, 0, IPCS_sem_sysctl,
- "S,IPCS_sem_command",
- "ipcs sem command interface");
+SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
+ 0, 0, IPCS_sem_sysctl,
+ "S,IPCS_sem_command",
+ "ipcs sem command interface");
+
+#endif /* SYSV_SEM */