/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
/* Uncomment this line to see MAC debugging output. */
/* #define MAC_DEBUG */
#if CONFIG_MACF_DEBUG
-#define MPRINTF(a) printf(a)
+#define MPRINTF(a) printf(a)
#else
-#define MPRINTF(a)
+#define MPRINTF(a)
#endif
-#define M_SYSVSEM M_TEMP
+#define KM_SYSVSEM KHEAP_DEFAULT
/* Hard system limits to avoid resource starvation / DOS attacks.
* These are not needed if we can make the semaphore pages swappable.
*/
static struct seminfo limitseminfo = {
- SEMMAP, /* # of entries in semaphore map */
- SEMMNI, /* # of semaphore identifiers */
- SEMMNS, /* # of semaphores in system */
- SEMMNU, /* # of undo structures in system */
- SEMMSL, /* max # of semaphores per id */
- SEMOPM, /* max # of operations per semop call */
- SEMUME, /* max # of undo entries per process */
- SEMUSZ, /* size in bytes of undo structure */
- SEMVMX, /* semaphore maximum value */
- SEMAEM /* adjust on exit max value */
+ .semmap = SEMMAP, /* # of entries in semaphore map */
+ .semmni = SEMMNI, /* # of semaphore identifiers */
+ .semmns = SEMMNS, /* # of semaphores in system */
+ .semmnu = SEMMNU, /* # of undo structures in system */
+ .semmsl = SEMMSL, /* max # of semaphores per id */
+ .semopm = SEMOPM, /* max # of operations per semop call */
+ .semume = SEMUME, /* max # of undo entries per process */
+ .semusz = SEMUSZ, /* size in bytes of undo structure */
+ .semvmx = SEMVMX, /* semaphore maximum value */
+ .semaem = SEMAEM /* adjust on exit max value */
};
/* Current system allocations. We use this structure to track how many
* and not allocate the memory for them up front.
*/
struct seminfo seminfo = {
- SEMMAP, /* Unused, # of entries in semaphore map */
- 0, /* # of semaphore identifiers */
- 0, /* # of semaphores in system */
- 0, /* # of undo entries in system */
- SEMMSL, /* max # of semaphores per id */
- SEMOPM, /* max # of operations per semop call */
- SEMUME, /* max # of undo entries per process */
- SEMUSZ, /* size in bytes of undo structure */
- SEMVMX, /* semaphore maximum value */
- SEMAEM /* adjust on exit max value */
+ .semmap = SEMMAP, /* Unused, # of entries in semaphore map */
+ .semmni = 0, /* # of semaphore identifiers */
+ .semmns = 0, /* # of semaphores in system */
+ .semmnu = 0, /* # of undo entries in system */
+ .semmsl = SEMMSL, /* max # of semaphores per id */
+ .semopm = SEMOPM, /* max # of operations per semop call */
+ .semume = SEMUME, /* max # of undo entries per process */
+ .semusz = SEMUSZ, /* size in bytes of undo structure */
+ .semvmx = SEMVMX, /* semaphore maximum value */
+ .semaem = SEMAEM /* adjust on exit max value */
};
static int semu_alloc(struct proc *p);
-static int semundo_adjust(struct proc *p, int *supidx,
- int semid, int semnum, int adjval);
+static int semundo_adjust(struct proc *p, int *supidx,
+ int semid, int semnum, int adjval);
static void semundo_clear(int semid, int semnum);
/* XXX casting to (sy_call_t *) is bogus, as usual. */
-static sy_call_t *semcalls[] = {
+static sy_call_t* const semcalls[] = {
(sy_call_t *)semctl, (sy_call_t *)semget,
(sy_call_t *)semop
};
-static int semtot = 0; /* # of used semaphores */
-struct semid_kernel *sema = NULL; /* semaphore id pool */
-struct sem *sem_pool = NULL; /* semaphore pool */
-static int semu_list_idx = -1; /* active undo structures */
-struct sem_undo *semu = NULL; /* semaphore undo pool */
+static int semtot = 0; /* # of used semaphores */
+struct semid_kernel *sema = NULL; /* semaphore id pool */
+struct sem *sem_pool = NULL; /* semaphore pool */
+static int semu_list_idx = -1; /* active undo structures */
+struct sem_undo *semu = NULL; /* semaphore undo pool */
-void sysv_sem_lock_init(void);
-static lck_grp_t *sysv_sem_subsys_lck_grp;
-static lck_grp_attr_t *sysv_sem_subsys_lck_grp_attr;
-static lck_attr_t *sysv_sem_subsys_lck_attr;
-static lck_mtx_t sysv_sem_subsys_mutex;
+static LCK_GRP_DECLARE(sysv_sem_subsys_lck_grp, "sysv_sem_subsys_lock");
+static LCK_MTX_DECLARE(sysv_sem_subsys_mutex, &sysv_sem_subsys_lck_grp);
#define SYSV_SEM_SUBSYS_LOCK() lck_mtx_lock(&sysv_sem_subsys_mutex)
#define SYSV_SEM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_sem_subsys_mutex)
-
-__private_extern__ void
-sysv_sem_lock_init( void )
-{
-
- sysv_sem_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
-
- sysv_sem_subsys_lck_grp = lck_grp_alloc_init("sysv_sem_subsys_lock", sysv_sem_subsys_lck_grp_attr);
-
- sysv_sem_subsys_lck_attr = lck_attr_alloc_init();
- lck_mtx_init(&sysv_sem_subsys_mutex, sysv_sem_subsys_lck_grp, sysv_sem_subsys_lck_attr);
-}
-
static __inline__ user_time_t
sysv_semtime(void)
{
- struct timeval tv;
+ struct timeval tv;
microtime(&tv);
- return (tv.tv_sec);
+ return tv.tv_sec;
}
/*
semid_ds_kernelto32(struct user_semid_ds *in, struct user32_semid_ds *out)
{
out->sem_perm = in->sem_perm;
- out->sem_base = CAST_DOWN_EXPLICIT(__int32_t,in->sem_base);
+ out->sem_base = CAST_DOWN_EXPLICIT(__int32_t, in->sem_base);
out->sem_nsems = in->sem_nsems;
- out->sem_otime = in->sem_otime; /* XXX loses precision */
- out->sem_ctime = in->sem_ctime; /* XXX loses precision */
+ out->sem_otime = in->sem_otime; /* XXX loses precision */
+ out->sem_ctime = in->sem_ctime; /* XXX loses precision */
}
static void
semid_ds_kernelto64(struct user_semid_ds *in, struct user64_semid_ds *out)
{
out->sem_perm = in->sem_perm;
- out->sem_base = CAST_DOWN_EXPLICIT(__int32_t,in->sem_base);
+ out->sem_base = CAST_DOWN_EXPLICIT(__int32_t, in->sem_base);
out->sem_nsems = in->sem_nsems;
- out->sem_otime = in->sem_otime; /* XXX loses precision */
- out->sem_ctime = in->sem_ctime; /* XXX loses precision */
+ out->sem_otime = in->sem_otime; /* XXX loses precision */
+ out->sem_ctime = in->sem_ctime; /* XXX loses precision */
}
/*
* Entry point for all SEM calls: semctl, semget, semop
*
* Parameters: p Process requesting the call
- * uap User argument descriptor (see below)
- * retval Return value of the selected sem call
+ * uap User argument descriptor (see below)
+ * retval Return value of the selected sem call
*
* Indirect parameters: uap->which sem call to invoke (index in array of sem calls)
- * uap->a2 User argument descriptor
- *
+ * uap->a2 User argument descriptor
+ *
* Returns: 0 Success
* !0 Not success
*
* Implicit returns: retval Return value of the selected sem call
*
* DEPRECATED: This interface should not be used to call the other SEM
- * functions (semctl, semget, semop). The correct usage is
- * to call the other SEM functions directly.
+ * functions (semctl, semget, semop). The correct usage is
+ * to call the other SEM functions directly.
*
*/
int
semsys(struct proc *p, struct semsys_args *uap, int32_t *retval)
{
-
/* The individual calls handling the locking now */
- if (uap->which >= sizeof(semcalls)/sizeof(semcalls[0]))
- return (EINVAL);
- return ((*semcalls[uap->which])(p, &uap->a2, retval));
+ if (uap->which >= sizeof(semcalls) / sizeof(semcalls[0])) {
+ return EINVAL;
+ }
+ return (*semcalls[uap->which])(p, &uap->a2, retval);
}
/*
static int
grow_semu_array(int newSize)
{
- register int i;
- register struct sem_undo *newSemu;
+ int i;
+ struct sem_undo *newSemu;
- if (newSize <= seminfo.semmnu)
+ if (newSize <= seminfo.semmnu) {
return 1;
- if (newSize > limitseminfo.semmnu) /* enforce hard limit */
- {
+ }
+ if (newSize > limitseminfo.semmnu) { /* enforce hard limit */
#ifdef SEM_DEBUG
printf("undo structure hard limit of %d reached, requested %d\n",
- limitseminfo.semmnu, newSize);
+ limitseminfo.semmnu, newSize);
#endif
return 0;
}
- newSize = (newSize/SEMMNU_INC + 1) * SEMMNU_INC;
+ newSize = (newSize / SEMMNU_INC + 1) * SEMMNU_INC;
newSize = newSize > limitseminfo.semmnu ? limitseminfo.semmnu : newSize;
#ifdef SEM_DEBUG
printf("growing semu[] from %d to %d\n", seminfo.semmnu, newSize);
#endif
- MALLOC(newSemu, struct sem_undo *, sizeof (struct sem_undo) * newSize,
- M_SYSVSEM, M_WAITOK | M_ZERO);
- if (NULL == newSemu)
- {
+ newSemu = kheap_alloc(KM_SYSVSEM, sizeof(struct sem_undo) * newSize,
+ Z_WAITOK | Z_ZERO);
+ if (NULL == newSemu) {
#ifdef SEM_DEBUG
printf("allocation failed. no changes made.\n");
#endif
return 0;
}
- /* copy the old data to the new array */
- for (i = 0; i < seminfo.semmnu; i++)
- {
+ /* copy the old data to the new array */
+ for (i = 0; i < seminfo.semmnu; i++) {
newSemu[i] = semu[i];
}
/*
* The new elements (from newSemu[i] to newSemu[newSize-1]) have their
- * "un_proc" set to 0 (i.e. NULL) by the M_ZERO flag to MALLOC() above,
- * so they're already marked as "not in use".
+ * "un_proc" set to 0 (i.e. NULL) by the Z_ZERO flag to kheap_alloc
+ * above, so they're already marked as "not in use".
*/
/* Clean up the old array */
- if (semu)
- FREE(semu, M_SYSVSEM);
+ kheap_free(KM_SYSVSEM, semu, sizeof(struct sem_undo) * seminfo.semmnu);
semu = newSemu;
seminfo.semmnu = newSize;
static int
grow_sema_array(int newSize)
{
- register struct semid_kernel *newSema;
- register int i;
+ struct semid_kernel *newSema;
+ int i;
- if (newSize <= seminfo.semmni)
+ if (newSize <= seminfo.semmni) {
return 0;
- if (newSize > limitseminfo.semmni) /* enforce hard limit */
- {
+ }
+ if (newSize > limitseminfo.semmni) { /* enforce hard limit */
#ifdef SEM_DEBUG
printf("identifier hard limit of %d reached, requested %d\n",
- limitseminfo.semmni, newSize);
+ limitseminfo.semmni, newSize);
#endif
return 0;
}
- newSize = (newSize/SEMMNI_INC + 1) * SEMMNI_INC;
+ newSize = (newSize / SEMMNI_INC + 1) * SEMMNI_INC;
newSize = newSize > limitseminfo.semmni ? limitseminfo.semmni : newSize;
#ifdef SEM_DEBUG
printf("growing sema[] from %d to %d\n", seminfo.semmni, newSize);
#endif
- MALLOC(newSema, struct semid_kernel *,
- sizeof (struct semid_kernel) * newSize,
- M_SYSVSEM, M_WAITOK | M_ZERO);
- if (NULL == newSema)
- {
+ newSema = kheap_alloc(KM_SYSVSEM, sizeof(struct semid_kernel) * newSize,
+ Z_WAITOK | Z_ZERO);
+ if (NULL == newSema) {
#ifdef SEM_DEBUG
printf("allocation failed. no changes made.\n");
#endif
}
/* copy over the old ids */
- for (i = 0; i < seminfo.semmni; i++)
- {
+ for (i = 0; i < seminfo.semmni; i++) {
newSema[i] = sema[i];
/* This is a hack. What we really want to be able to
* do is change the value a process is waiting on
* semaphore set is really not available yet, and then
* sleep on the correct, reallocated semid_kernel pointer.
*/
- if (sema[i].u.sem_perm.mode & SEM_ALLOC)
+ if (sema[i].u.sem_perm.mode & SEM_ALLOC) {
wakeup((caddr_t)&sema[i]);
+ }
}
#if CONFIG_MACF
- for (i = seminfo.semmni; i < newSize; i++)
- {
+ for (i = seminfo.semmni; i < newSize; i++) {
mac_sysvsem_label_init(&newSema[i]);
}
#endif
-
+
/*
* The new elements (from newSema[i] to newSema[newSize-1]) have their
- * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the M_ZERO
- * flag to MALLOC() above, so they're already marked as "not in use".
+ * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the Z_ZERO
+ * flag to kheap_alloc above, so they're already marked as "not in use".
*/
/* Clean up the old array */
- if (sema)
- FREE(sema, M_SYSVSEM);
+ kheap_free(KM_SYSVSEM, sema,
+ sizeof(struct semid_kernel) * seminfo.semmni);
sema = newSema;
seminfo.semmni = newSize;
struct sem *sem_free;
int i;
- if (new_pool_size < semtot)
+ if (new_pool_size < semtot) {
return 0;
+ }
/* enforce hard limit */
if (new_pool_size > limitseminfo.semmns) {
#ifdef SEM_DEBUG
printf("semaphore hard limit of %d reached, requested %d\n",
- limitseminfo.semmns, new_pool_size);
+ limitseminfo.semmns, new_pool_size);
#endif
return 0;
}
- new_pool_size = (new_pool_size/SEMMNS_INC + 1) * SEMMNS_INC;
+ new_pool_size = (new_pool_size / SEMMNS_INC + 1) * SEMMNS_INC;
new_pool_size = new_pool_size > limitseminfo.semmns ? limitseminfo.semmns : new_pool_size;
#ifdef SEM_DEBUG
printf("growing sem_pool array from %d to %d\n", seminfo.semmns, new_pool_size);
#endif
- MALLOC(new_sem_pool, struct sem *, sizeof (struct sem) * new_pool_size,
- M_SYSVSEM, M_WAITOK | M_ZERO);
+ new_sem_pool = kheap_alloc(KM_SYSVSEM, sizeof(struct sem) * new_pool_size,
+ Z_WAITOK | Z_ZERO);
if (NULL == new_sem_pool) {
#ifdef SEM_DEBUG
printf("allocation failed. no changes made.\n");
}
/* We have our new memory, now copy the old contents over */
- if (sem_pool)
- for(i = 0; i < seminfo.semmns; i++)
+ if (sem_pool) {
+ for (i = 0; i < seminfo.semmns; i++) {
new_sem_pool[i] = sem_pool[i];
+ }
+ }
/* Update our id structures to point to the new semaphores */
- for(i = 0; i < seminfo.semmni; i++) {
- if (sema[i].u.sem_perm.mode & SEM_ALLOC) /* ID in use */
- sema[i].u.sem_base = new_sem_pool +
- (sema[i].u.sem_base - sem_pool);
+ for (i = 0; i < seminfo.semmni; i++) {
+ if (sema[i].u.sem_perm.mode & SEM_ALLOC) { /* ID in use */
+ sema[i].u.sem_base = new_sem_pool +
+ (sema[i].u.sem_base - sem_pool);
+ }
}
sem_free = sem_pool;
sem_pool = new_sem_pool;
/* clean up the old array */
- if (sem_free != NULL)
- FREE(sem_free, M_SYSVSEM);
+ kheap_free(KM_SYSVSEM, sem_free, sizeof(struct sem) * seminfo.semmns);
seminfo.semmns = new_pool_size;
#ifdef SEM_DEBUG
static int
semu_alloc(struct proc *p)
{
- register int i;
- register struct sem_undo *suptr;
+ int i;
+ struct sem_undo *suptr;
int *supidx;
int attempt;
supidx = &semu_list_idx;
while (*supidx != -1) {
suptr = SEMU(*supidx);
- if (suptr->un_cnt == 0) {
+ if (suptr->un_cnt == 0) {
suptr->un_proc = NULL;
*supidx = suptr->un_next_idx;
did_something = 1;
- } else
+ } else {
supidx = &(suptr->un_next_idx);
+ }
}
/* If we didn't free anything. Try expanding
* then fail. We expand last to get the
* most reuse out of existing resources.
*/
- if (!did_something)
- if (!grow_semu_array(seminfo.semmnu + 1))
+ if (!did_something) {
+ if (!grow_semu_array(seminfo.semmnu + 1)) {
return -1;
+ }
+ }
} else {
/*
* The second pass failed even though we freed
*/
static int
semundo_adjust(struct proc *p, int *supidx, int semid,
- int semnum, int adjval)
+ int semnum, int adjval)
{
- register struct sem_undo *suptr;
+ struct sem_undo *suptr;
int suidx;
- register struct undo *sueptr, **suepptr, *new_sueptr;
+ struct undo *sueptr, **suepptr, *new_sueptr;
int i;
/*
}
}
if (suidx == -1) {
- if (adjval == 0)
- return(0);
+ if (adjval == 0) {
+ return 0;
+ }
suidx = semu_alloc(p);
- if (suidx == -1)
- return(ENOSPC);
+ if (suidx == -1) {
+ return ENOSPC;
+ }
*supidx = suidx;
}
}
suptr = SEMU(suidx);
new_sueptr = NULL;
for (i = 0, suepptr = &suptr->un_ent, sueptr = suptr->un_ent;
- i < suptr->un_cnt;
- i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) {
- if (sueptr->une_id != semid || sueptr->une_num != semnum)
+ i < suptr->un_cnt;
+ i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) {
+ if (sueptr->une_id != semid || sueptr->une_num != semnum) {
continue;
- if (adjval == 0)
+ }
+ if (adjval == 0) {
sueptr->une_adjval = 0;
- else
+ } else {
sueptr->une_adjval += adjval;
+ }
if (sueptr->une_adjval == 0) {
suptr->un_cnt--;
*suepptr = sueptr->une_next;
- FREE(sueptr, M_SYSVSEM);
- sueptr = NULL;
+ kheap_free(KM_SYSVSEM, sueptr, sizeof(struct undo));
}
return 0;
}
}
/* allocate a new semaphore undo entry */
- MALLOC(new_sueptr, struct undo *, sizeof (struct undo),
- M_SYSVSEM, M_WAITOK);
+ new_sueptr = kheap_alloc(KM_SYSVSEM, sizeof(struct undo), Z_WAITOK);
if (new_sueptr == NULL) {
return ENOMEM;
}
if (semnum == -1 || sueptr->une_num == semnum) {
suptr->un_cnt--;
*suepptr = sueptr->une_next;
- FREE(sueptr, M_SYSVSEM);
+ kheap_free(KM_SYSVSEM, sueptr, sizeof(struct undo));
sueptr = *suepptr;
continue;
}
- if (semnum != -1)
+ if (semnum != -1) {
break;
+ }
}
i++;
suepptr = &sueptr->une_next;
int i, rval, eval;
struct user_semid_ds sbuf;
struct semid_kernel *semakptr;
-
+
AUDIT_ARG(svipc_cmd, cmd);
AUDIT_ARG(svipc_id, semid);
}
#if CONFIG_MACF
eval = mac_sysvsem_check_semctl(cred, semakptr, cmd);
- if (eval)
+ if (eval) {
goto semctlout;
+ }
#endif
eval = 0;
switch (cmd) {
case IPC_RMID:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M)))
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M))) {
goto semctlout;
+ }
semakptr->u.sem_perm.cuid = kauth_cred_getuid(cred);
semakptr->u.sem_perm.uid = kauth_cred_getuid(cred);
semtot -= semakptr->u.sem_nsems;
- for (i = semakptr->u.sem_base - sem_pool; i < semtot; i++)
+ for (i = semakptr->u.sem_base - sem_pool; i < semtot; i++) {
sem_pool[i] = sem_pool[i + semakptr->u.sem_nsems];
+ }
for (i = 0; i < seminfo.semmni; i++) {
if ((sema[i].u.sem_perm.mode & SEM_ALLOC) &&
- sema[i].u.sem_base > semakptr->u.sem_base)
+ sema[i].u.sem_base > semakptr->u.sem_base) {
sema[i].u.sem_base -= semakptr->u.sem_nsems;
+ }
}
semakptr->u.sem_perm.mode = 0;
#if CONFIG_MACF
break;
case IPC_SET:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M)))
- goto semctlout;
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M))) {
+ goto semctlout;
+ }
if (IS_64BIT_PROCESS(p)) {
struct user64_semid_ds ds64;
eval = copyin(user_arg.buf, &ds32, sizeof(ds32));
semid_ds_32tokernel(&ds32, &sbuf);
}
-
+
if (eval != 0) {
goto semctlout;
}
break;
case IPC_STAT:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R)))
- goto semctlout;
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
if (IS_64BIT_PROCESS(p)) {
struct user64_semid_ds semid_ds64;
break;
case GETNCNT:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R)))
- goto semctlout;
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
eval = EINVAL;
goto semctlout;
break;
case GETPID:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R)))
- goto semctlout;
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
eval = EINVAL;
goto semctlout;
break;
case GETVAL:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R)))
- goto semctlout;
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
eval = EINVAL;
goto semctlout;
break;
case GETALL:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R)))
- goto semctlout;
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
/* XXXXXXXXXXXXXXXX TBD XXXXXXXXXXXXXXXX */
for (i = 0; i < semakptr->u.sem_nsems; i++) {
/* XXX could be done in one go... */
eval = copyout((caddr_t)&semakptr->u.sem_base[i].semval,
user_arg.array + (i * sizeof(unsigned short)),
sizeof(unsigned short));
- if (eval != 0)
+ if (eval != 0) {
break;
+ }
}
break;
case GETZCNT:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R)))
- goto semctlout;
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
+ goto semctlout;
+ }
if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
eval = EINVAL;
goto semctlout;
break;
case SETVAL:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W)))
- {
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W))) {
#ifdef SEM_DEBUG
printf("Invalid credentials for write\n");
#endif
- goto semctlout;
+ goto semctlout;
}
- if (semnum < 0 || semnum >= semakptr->u.sem_nsems)
- {
+ if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
#ifdef SEM_DEBUG
printf("Invalid number out of range for set\n");
#endif
eval = EINVAL;
goto semctlout;
}
-
+
/*
* Cast down a pointer instead of using 'val' member directly
* to avoid introducing endieness and a pad field into the
* header file. Ugly, but it works.
*/
u_int newsemval = CAST_DOWN_EXPLICIT(u_int, user_arg.buf);
-
+
/*
- * The check is being performed as unsigned values to match
+ * The check is being performed as unsigned values to match
* eventual destination
- */
- if (newsemval > (u_int)seminfo.semvmx)
- {
+ */
+ if (newsemval > (u_int)seminfo.semvmx) {
#ifdef SEM_DEBUG
printf("Out of range sem value for set\n");
#endif
break;
case SETALL:
- if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W)))
- goto semctlout;
+ if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W))) {
+ goto semctlout;
+ }
/*** XXXXXXXXXXXX TBD ********/
for (i = 0; i < semakptr->u.sem_nsems; i++) {
/* XXX could be done in one go... */
eval = copyin(user_arg.array + (i * sizeof(unsigned short)),
(caddr_t)&semakptr->u.sem_base[i].semval,
sizeof(unsigned short));
- if (eval != 0)
+ if (eval != 0) {
break;
+ }
semakptr->u.sem_base[i].sempid = p->p_pid;
}
/* XXX scottl Should there be a MAC call here? */
break;
default:
- eval = EINVAL;
- goto semctlout;
+ eval = EINVAL;
+ goto semctlout;
}
- if (eval == 0)
+ if (eval == 0) {
*retval = rval;
+ }
semctlout:
SYSV_SEM_SUBSYS_UNLOCK();
- return(eval);
+ return eval;
}
int
kauth_cred_t cred = kauth_cred_get();
#ifdef SEM_DEBUG
- if (key != IPC_PRIVATE)
+ if (key != IPC_PRIVATE) {
printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
- else
+ } else {
printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems, semflg);
+ }
#endif
SYSV_SEM_SUBSYS_LOCK();
-
+
if (key != IPC_PRIVATE) {
for (semid = 0; semid < seminfo.semmni; semid++) {
if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) &&
- sema[semid].u.sem_perm._key == key)
+ sema[semid].u.sem_perm._key == key) {
break;
+ }
}
if (semid < seminfo.semmni) {
#ifdef SEM_DEBUG
printf("found public key\n");
#endif
if ((eval = ipcperm(cred, &sema[semid].u.sem_perm,
- semflg & 0700)))
+ semflg & 0700))) {
goto semgetout;
+ }
if (nsems < 0 || sema[semid].u.sem_nsems < nsems) {
#ifdef SEM_DEBUG
printf("too small\n");
}
#if CONFIG_MACF
eval = mac_sysvsem_check_semget(cred, &sema[semid]);
- if (eval)
+ if (eval) {
goto semgetout;
+ }
#endif
goto found;
}
}
}
for (semid = 0; semid < seminfo.semmni; semid++) {
- if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) == 0)
+ if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) == 0) {
break;
+ }
}
if (semid == seminfo.semmni) {
#ifdef SEM_DEBUG
printf("no more id's available\n");
#endif
- if (!grow_sema_array(seminfo.semmni + 1))
- {
+ if (!grow_sema_array(seminfo.semmni + 1)) {
#ifdef SEM_DEBUG
printf("failed to grow sema array\n");
#endif
sema[semid].u.sem_base = &sem_pool[semtot];
semtot += nsems;
bzero(sema[semid].u.sem_base,
- sizeof(sema[semid].u.sem_base[0])*nsems);
+ sizeof(sema[semid].u.sem_base[0]) * nsems);
#if CONFIG_MACF
mac_sysvsem_label_associate(cred, &sema[semid]);
#endif
semgetout:
SYSV_SEM_SUBSYS_UNLOCK();
- return(eval);
+ return eval;
}
int
int semid = uap->semid;
int nsops = uap->nsops;
struct sembuf sops[seminfo.semopm];
- register struct semid_kernel *semakptr;
- register struct sembuf *sopptr = NULL; /* protected by 'semptr' */
- register struct sem *semptr = NULL; /* protected by 'if' */
+ struct semid_kernel *semakptr;
+ struct sembuf *sopptr = NULL; /* protected by 'semptr' */
+ struct sem *semptr = NULL; /* protected by 'if' */
int supidx = -1;
int i, j, eval;
int do_wakeup, do_undos;
printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops);
#endif
- semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
+ semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
if (semid < 0 || semid >= seminfo.semmni) {
eval = EINVAL;
eval = E2BIG;
goto semopout;
}
-
+
/* OK for LP64, since sizeof(struct sembuf) is currently invariant */
if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) {
#ifdef SEM_DEBUG
/*
* Initial pass thru sops to see what permissions are needed.
*/
- j = 0; /* permission needed */
- for (i = 0; i < nsops; i++)
+ j = 0; /* permission needed */
+ for (i = 0; i < nsops; i++) {
j |= (sops[i].sem_op == 0) ? SEM_R : SEM_A;
+ }
/*
* The MAC hook checks whether the thread has read (and possibly
* sopptr->sem_op value.
*/
eval = mac_sysvsem_check_semop(kauth_cred_get(), semakptr, j);
- if (eval)
+ if (eval) {
goto semopout;
+ }
#endif
/*
} else {
semptr->semval += sopptr->sem_op;
if (semptr->semval == 0 &&
- semptr->semzcnt > 0)
+ semptr->semzcnt > 0) {
do_wakeup = 1;
+ }
}
- if (sopptr->sem_flg & SEM_UNDO)
+ if (sopptr->sem_flg & SEM_UNDO) {
do_undos = 1;
+ }
} else if (sopptr->sem_op == 0) {
if (semptr->semval > 0) {
#ifdef SEM_DEBUG
break;
}
} else {
- if (semptr->semncnt > 0)
+ if (semptr->semncnt > 0) {
do_wakeup = 1;
+ }
semptr->semval += sopptr->sem_op;
- if (sopptr->sem_flg & SEM_UNDO)
+ if (sopptr->sem_flg & SEM_UNDO) {
do_undos = 1;
+ }
}
}
/*
* Did we get through the entire vector?
*/
- if (i >= nsops)
+ if (i >= nsops) {
goto done;
+ }
/*
* No ... rollback anything that we've already done
*/
#ifdef SEM_DEBUG
- printf("semop: rollback 0 through %d\n", i-1);
+ printf("semop: rollback 0 through %d\n", i - 1);
#endif
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
semakptr->u.sem_base[sops[j].sem_num].semval -=
sops[j].sem_op;
+ }
/*
* If the request that we couldn't satisfy has the
goto semopout;
}
- if (sopptr->sem_op == 0)
+ if (sopptr->sem_op == 0) {
semptr->semzcnt++;
- else
+ } else {
semptr->semncnt++;
+ }
#ifdef SEM_DEBUG
printf("semop: good night!\n");
* waiting for. We will get the lock back after we
* wake up.
*/
- eval = msleep((caddr_t)semakptr, &sysv_sem_subsys_mutex , (PZERO - 4) | PCATCH,
+ eval = msleep((caddr_t)semakptr, &sysv_sem_subsys_mutex, (PZERO - 4) | PCATCH,
"semwait", 0);
-
+
#ifdef SEM_DEBUG
printf("semop: good morning (eval=%d)!\n", eval);
#endif
/*
* IMPORTANT: while we were asleep, the semaphore array might
* have been reallocated somewhere else (see grow_sema_array()).
- * When we wake up, we have to re-lookup the semaphore
+ * When we wake up, we have to re-lookup the semaphore
* structures and re-validate them.
*/
* XXX POSIX: Third test this 'if' and 'EINTR' precedence may
* fail testing; if so, we will need to revert this code.
*/
- semakptr = &sema[semid]; /* sema may have been reallocated */
+ semakptr = &sema[semid]; /* sema may have been reallocated */
if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
semakptr->u.sem_perm._seq != IPCID_TO_SEQ(uap->semid) ||
sopptr->sem_num >= semakptr->u.sem_nsems) {
#ifdef EIDRM
eval = EIDRM;
#else
- eval = EINVAL; /* Ancient past */
+ eval = EINVAL; /* Ancient past */
#endif
}
goto semopout;
* we were sleeping, updating our sem_base pointer.
*/
semptr = &semakptr->u.sem_base[sopptr->sem_num];
- if (sopptr->sem_op == 0)
+ if (sopptr->sem_op == 0) {
semptr->semzcnt--;
- else
+ } else {
semptr->semncnt--;
+ }
if (eval != 0) { /* EINTR */
goto semopout;
*/
int adjval;
- if ((sops[i].sem_flg & SEM_UNDO) == 0)
+ if ((sops[i].sem_flg & SEM_UNDO) == 0) {
continue;
+ }
adjval = sops[i].sem_op;
- if (adjval == 0)
+ if (adjval == 0) {
continue;
+ }
eval = semundo_adjust(p, &supidx, semid,
sops[i].sem_num, -adjval);
- if (eval == 0)
+ if (eval == 0) {
continue;
+ }
/*
* Oh-Oh! We ran out of either sem_undo's or undo's.
* out of space as we roll things back out.
*/
for (j = i - 1; j >= 0; j--) {
- if ((sops[j].sem_flg & SEM_UNDO) == 0)
+ if ((sops[j].sem_flg & SEM_UNDO) == 0) {
continue;
+ }
adjval = sops[j].sem_op;
- if (adjval == 0)
+ if (adjval == 0) {
continue;
+ }
if (semundo_adjust(p, &supidx, semid,
- sops[j].sem_num, adjval) != 0)
+ sops[j].sem_num, adjval) != 0) {
panic("semop - can't undo undos");
+ }
}
- for (j = 0; j < nsops; j++)
+ for (j = 0; j < nsops; j++) {
semakptr->u.sem_base[sops[j].sem_num].semval -=
sops[j].sem_op;
+ }
#ifdef SEM_DEBUG
printf("eval = %d from semundo_adjust\n", eval);
eval = 0;
semopout:
SYSV_SEM_SUBSYS_UNLOCK();
- return(eval);
+ return eval;
}
/*
void
semexit(struct proc *p)
{
- register struct sem_undo *suptr = NULL;
+ struct sem_undo *suptr = NULL;
int suidx;
int *supidx;
int did_something;
*/
SYSV_SEM_SUBSYS_LOCK();
- if (!sem_pool)
- {
+ if (!sem_pool) {
SYSV_SEM_SUBSYS_UNLOCK();
return;
}
for (supidx = &semu_list_idx; (suidx = *supidx) != -1;
supidx = &suptr->un_next_idx) {
suptr = SEMU(suidx);
- if (suptr->un_proc == p)
+ if (suptr->un_proc == p) {
break;
+ }
}
- if (suidx == -1)
+ if (suidx == -1) {
goto unlock;
+ }
#ifdef SEM_DEBUG
printf("proc @%08x has undo structure with %d entries\n", p,
adjval = sueptr->une_adjval;
semakptr = &sema[semid];
- if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0)
+ if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0) {
panic("semexit - semid not allocated");
- if (semnum >= semakptr->u.sem_nsems)
+ }
+ if (semnum >= semakptr->u.sem_nsems) {
panic("semexit - semnum out of range");
+ }
#ifdef SEM_DEBUG
printf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n",
- suptr->un_proc,
- semid,
- semnum,
- adjval,
- semakptr->u.sem_base[semnum].semval);
+ suptr->un_proc,
+ semid,
+ semnum,
+ adjval,
+ semakptr->u.sem_base[semnum].semval);
#endif
if (adjval < 0) {
- if (semakptr->u.sem_base[semnum].semval < -adjval)
+ if (semakptr->u.sem_base[semnum].semval < -adjval) {
semakptr->u.sem_base[semnum].semval = 0;
- else
+ } else {
semakptr->u.sem_base[semnum].semval +=
adjval;
- } else
+ }
+ } else {
semakptr->u.sem_base[semnum].semval += adjval;
+ }
- /* Maybe we should build a list of semakptr's to wake
- * up, finish all access to data structures, release the
- * subsystem lock, and wake all the processes. Something
- * to think about. It wouldn't buy us anything unless
- * wakeup had the potential to block, or the syscall
- * funnel state was changed to allow multiple threads
- * in the BSD code at once.
- */
+ /* Maybe we should build a list of semakptr's to wake
+ * up, finish all access to data structures, release the
+ * subsystem lock, and wake all the processes. Something
+ * to think about.
+ */
#ifdef SEM_WAKEUP
sem_wakeup((caddr_t)semakptr);
#else
#endif
suptr->un_cnt--;
suptr->un_ent = sueptr->une_next;
- FREE(sueptr, M_SYSVSEM);
- sueptr = NULL;
+ kheap_free(KM_SYSVSEM, sueptr, sizeof(struct undo));
}
}
unlock:
/*
- * There is a semaphore leak (i.e. memory leak) in this code.
- * We should be deleting the IPC_PRIVATE semaphores when they are
- * no longer needed, and we dont. We would have to track which processes
- * know about which IPC_PRIVATE semaphores, updating the list after
- * every fork. We can't just delete them semaphore when the process
- * that created it dies, because that process may well have forked
- * some children. So we need to wait until all of it's children have
- * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
- * with the creating group ID, count the number of processes left in
- * that group, and delete the semaphore when the group is gone.
- * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
- * There is an upper bound on the size of our semaphore array, so
- * leaking the semaphores should not work as a DOS attack.
- *
- * Please note that the original BSD code this file is based on had the
- * same leaky semaphore problem.
- */
+ * There is a semaphore leak (i.e. memory leak) in this code.
+ * We should be deleting the IPC_PRIVATE semaphores when they are
+ * no longer needed, and we dont. We would have to track which processes
+ * know about which IPC_PRIVATE semaphores, updating the list after
+ * every fork. We can't just delete them semaphore when the process
+ * that created it dies, because that process may well have forked
+ * some children. So we need to wait until all of it's children have
+ * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
+ * with the creating group ID, count the number of processes left in
+ * that group, and delete the semaphore when the group is gone.
+ * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
+ * There is an upper bound on the size of our semaphore array, so
+ * leaking the semaphores should not work as a DOS attack.
+ *
+ * Please note that the original BSD code this file is based on had the
+ * same leaky semaphore problem.
+ */
SYSV_SEM_SUBSYS_UNLOCK();
}
/* (struct sysctl_oid *oidp, void *arg1, int arg2, \
- struct sysctl_req *req) */
+ * struct sysctl_req *req) */
static int
sysctl_seminfo(__unused struct sysctl_oid *oidp, void *arg1,
- __unused int arg2, struct sysctl_req *req)
+ __unused int arg2, struct sysctl_req *req)
{
int error = 0;
error = SYSCTL_OUT(req, arg1, sizeof(int));
- if (error || req->newptr == USER_ADDR_NULL)
- return(error);
+ if (error || req->newptr == USER_ADDR_NULL) {
+ return error;
+ }
SYSV_SEM_SUBSYS_LOCK();
/* Set the values only if shared memory is not initialised */
- if ((sem_pool == NULL) &&
- (sema == NULL) &&
- (semu == NULL) &&
- (semu_list_idx == -1)) {
- if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) {
- goto out;
- }
- } else
+ if ((sem_pool == NULL) &&
+ (sema == NULL) &&
+ (semu == NULL) &&
+ (semu_list_idx == -1)) {
+ if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) {
+ goto out;
+ }
+ } else {
error = EINVAL;
+ }
out:
SYSV_SEM_SUBSYS_UNLOCK();
- return(error);
-
+ return error;
}
/* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
extern struct sysctl_oid_list sysctl__kern_sysv_children;
SYSCTL_PROC(_kern_sysv, OID_AUTO, semmni, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
- &limitseminfo.semmni, 0, &sysctl_seminfo ,"I","semmni");
+ &limitseminfo.semmni, 0, &sysctl_seminfo, "I", "semmni");
SYSCTL_PROC(_kern_sysv, OID_AUTO, semmns, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
- &limitseminfo.semmns, 0, &sysctl_seminfo ,"I","semmns");
+ &limitseminfo.semmns, 0, &sysctl_seminfo, "I", "semmns");
SYSCTL_PROC(_kern_sysv, OID_AUTO, semmnu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
- &limitseminfo.semmnu, 0, &sysctl_seminfo ,"I","semmnu");
+ &limitseminfo.semmnu, 0, &sysctl_seminfo, "I", "semmnu");
SYSCTL_PROC(_kern_sysv, OID_AUTO, semmsl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
- &limitseminfo.semmsl, 0, &sysctl_seminfo ,"I","semmsl");
-
+ &limitseminfo.semmsl, 0, &sysctl_seminfo, "I", "semmsl");
+
SYSCTL_PROC(_kern_sysv, OID_AUTO, semume, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
- &limitseminfo.semume, 0, &sysctl_seminfo ,"I","semume");
+ &limitseminfo.semume, 0, &sysctl_seminfo, "I", "semume");
static int
IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
- __unused int arg2, struct sysctl_req *req)
+ __unused int arg2, struct sysctl_req *req)
{
int error;
int cursor;
union {
struct user32_IPCS_command u32;
struct user_IPCS_command u64;
- } ipcs;
- struct user32_semid_ds semid_ds32; /* post conversion, 32 bit version */
- struct user64_semid_ds semid_ds64; /* post conversion, 64 bit version */
+ } ipcs = { };
+ struct user32_semid_ds semid_ds32 = { }; /* post conversion, 32 bit version */
+ struct user64_semid_ds semid_ds64 = { }; /* post conversion, 64 bit version */
void *semid_dsp;
size_t ipcs_sz;
size_t semid_ds_sz;
/* Copy in the command structure */
if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
- return(error);
+ return error;
}
- if (!IS_64BIT_PROCESS(p)) /* convert in place */
+ if (!IS_64BIT_PROCESS(p)) { /* convert in place */
ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data);
+ }
/* Let us version this interface... */
if (ipcs.u64.ipcs_magic != IPCS_MAGIC) {
- return(EINVAL);
+ return EINVAL;
}
SYSV_SEM_SUBSYS_LOCK();
- switch(ipcs.u64.ipcs_op) {
- case IPCS_SEM_CONF: /* Obtain global configuration data */
+ switch (ipcs.u64.ipcs_op) {
+ case IPCS_SEM_CONF: /* Obtain global configuration data */
if (ipcs.u64.ipcs_datalen != sizeof(struct seminfo)) {
error = ERANGE;
break;
}
- if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
+ if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
error = EINVAL;
break;
}
error = copyout(&seminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
break;
- case IPCS_SEM_ITER: /* Iterate over existing segments */
+ case IPCS_SEM_ITER: /* Iterate over existing segments */
cursor = ipcs.u64.ipcs_cursor;
if (cursor < 0 || cursor >= seminfo.semmni) {
error = ERANGE;
break;
}
- if (ipcs.u64.ipcs_datalen != (int)semid_ds_sz ) {
+ if (ipcs.u64.ipcs_datalen != (int)semid_ds_sz) {
error = EINVAL;
break;
}
- for( ; cursor < seminfo.semmni; cursor++) {
- if (sema[cursor].u.sem_perm.mode & SEM_ALLOC)
+ for (; cursor < seminfo.semmni; cursor++) {
+ if (sema[cursor].u.sem_perm.mode & SEM_ALLOC) {
break;
+ }
continue;
}
if (cursor == seminfo.semmni) {
break;
}
- semid_dsp = &sema[cursor].u; /* default: 64 bit */
+ semid_dsp = &sema[cursor].u; /* default: 64 bit */
/*
* If necessary, convert the 64 bit kernel segment
/* update cursor */
ipcs.u64.ipcs_cursor = cursor + 1;
- if (!IS_64BIT_PROCESS(p)) /* convert in place */
- ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t,ipcs.u64.ipcs_data);
+ if (!IS_64BIT_PROCESS(p)) { /* convert in place */
+ ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t, ipcs.u64.ipcs_data);
+ }
error = SYSCTL_OUT(req, &ipcs, ipcs_sz);
}
break;
}
SYSV_SEM_SUBSYS_UNLOCK();
- return(error);
+ return error;
}
SYSCTL_DECL(_kern_sysv_ipcs);
SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
- 0, 0, IPCS_sem_sysctl,
- "S,IPCS_sem_command",
- "ipcs sem command interface");
+ 0, 0, IPCS_sem_sysctl,
+ "S,IPCS_sem_command",
+ "ipcs sem command interface");
#endif /* SYSV_SEM */