/* Update our id structures to point to the new semaphores */
for(i = 0; i < seminfo.semmni; i++) {
if (sema[i].u.sem_perm.mode & SEM_ALLOC) /* ID in use */
- sema[i].u.sem_base += (new_sem_pool - sem_pool);
+ sema[i].u.sem_base = new_sem_pool +
+ (sema[i].u.sem_base - sem_pool);
}
sem_free = sem_pool;
if (IS_64BIT_PROCESS(p)) {
struct user64_semid_ds semid_ds64;
+ bzero(&semid_ds64, sizeof(semid_ds64));
semid_ds_kernelto64(&semakptr->u, &semid_ds64);
eval = copyout(&semid_ds64, user_arg.buf, sizeof(semid_ds64));
} else {
struct user32_semid_ds semid_ds32;
+ bzero(&semid_ds32, sizeof(semid_ds32));
semid_ds_kernelto32(&semakptr->u, &semid_ds32);
eval = copyout(&semid_ds32, user_arg.buf, sizeof(semid_ds32));
}
eval = EINVAL;
goto semctlout;
}
+
/*
* Cast down a pointer instead of using 'val' member directly
* to avoid introducing endieness and a pad field into the
* header file. Ugly, but it works.
*/
- semakptr->u.sem_base[semnum].semval = CAST_DOWN_EXPLICIT(int,user_arg.buf);
+ u_int newsemval = CAST_DOWN_EXPLICIT(u_int, user_arg.buf);
+
+ /*
+ * The check is being performed as unsigned values to match
+ * eventual destination
+ */
+ if (newsemval > (u_int)seminfo.semvmx)
+ {
+#ifdef SEM_DEBUG
+ printf("Out of range sem value for set\n");
+#endif
+ eval = ERANGE;
+ goto semctlout;
+ }
+ semakptr->u.sem_base[semnum].semval = newsemval;
semakptr->u.sem_base[semnum].sempid = p->p_pid;
/* XXX scottl Should there be a MAC call here? */
semundo_clear(semid, semnum);
sema[semid].u.sem_perm._key = key;
sema[semid].u.sem_perm.cuid = kauth_cred_getuid(cred);
sema[semid].u.sem_perm.uid = kauth_cred_getuid(cred);
- sema[semid].u.sem_perm.cgid = cred->cr_gid;
- sema[semid].u.sem_perm.gid = cred->cr_gid;
+ sema[semid].u.sem_perm.cgid = kauth_cred_getgid(cred);
+ sema[semid].u.sem_perm.gid = kauth_cred_getgid(cred);
sema[semid].u.sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
sema[semid].u.sem_perm._seq =
(sema[semid].u.sem_perm._seq + 1) & 0x7fff;
{
int semid = uap->semid;
int nsops = uap->nsops;
- struct sembuf sops[MAX_SOPS];
+ struct sembuf sops[seminfo.semopm];
register struct semid_kernel *semakptr;
register struct sembuf *sopptr = NULL; /* protected by 'semptr' */
register struct sem *semptr = NULL; /* protected by 'if' */
goto semopout;
}
- if (nsops < 0 || nsops > MAX_SOPS) {
+ if (nsops < 0 || nsops > seminfo.semopm) {
#ifdef SEM_DEBUG
- printf("too many sops (max=%d, nsops=%d)\n", MAX_SOPS, nsops);
+ printf("too many sops (max=%d, nsops=%d)\n",
+ seminfo.semopm, nsops);
#endif
eval = E2BIG;
goto semopout;
}
+
+ /* OK for LP64, since sizeof(struct sembuf) is currently invariant */
+ if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) {
+#ifdef SEM_DEBUG
+ printf("eval = %d from copyin(%08x, %08x, %ld)\n", eval,
+ uap->sops, &sops, nsops * sizeof(struct sembuf));
+#endif
+ goto semopout;
+ }
#if CONFIG_MACF
/*
goto semopout;
#endif
- /* OK for LP64, since sizeof(struct sembuf) is currently invariant */
- if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) {
-#ifdef SEM_DEBUG
- printf("eval = %d from copyin(%08x, %08x, %ld)\n", eval,
- uap->sops, &sops, nsops * sizeof(struct sembuf));
-#endif
- goto semopout;
- }
-
/*
* Loop trying to satisfy the vector of requests.
* If we reach a point where we must wait, any requests already
/* Maybe we should build a list of semakptr's to wake
* up, finish all access to data structures, release the
* subsystem lock, and wake all the processes. Something
- * to think about. It wouldn't buy us anything unless
- * wakeup had the potential to block, or the syscall
- * funnel state was changed to allow multiple threads
- * in the BSD code at once.
+ * to think about.
*/
#ifdef SEM_WAKEUP
sem_wakeup((caddr_t)semakptr);
/* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
extern struct sysctl_oid_list sysctl__kern_sysv_children;
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semmni, CTLTYPE_INT | CTLFLAG_RW,
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmni, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
&limitseminfo.semmni, 0, &sysctl_seminfo ,"I","semmni");
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semmns, CTLTYPE_INT | CTLFLAG_RW,
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmns, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
&limitseminfo.semmns, 0, &sysctl_seminfo ,"I","semmns");
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semmnu, CTLTYPE_INT | CTLFLAG_RW,
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmnu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
&limitseminfo.semmnu, 0, &sysctl_seminfo ,"I","semmnu");
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semmsl, CTLTYPE_INT | CTLFLAG_RW,
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semmsl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
&limitseminfo.semmsl, 0, &sysctl_seminfo ,"I","semmsl");
-SYSCTL_PROC(_kern_sysv, OID_AUTO, semume, CTLTYPE_INT | CTLFLAG_RW,
+SYSCTL_PROC(_kern_sysv, OID_AUTO, semume, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
&limitseminfo.semume, 0, &sysctl_seminfo ,"I","semume");
* descriptor to a 32 bit user one.
*/
if (!IS_64BIT_PROCESS(p)) {
+ bzero(&semid_ds32, sizeof(semid_ds32));
semid_ds_kernelto32(semid_dsp, &semid_ds32);
semid_dsp = &semid_ds32;
} else {
+ bzero(&semid_ds64, sizeof(semid_ds64));
semid_ds_kernelto64(semid_dsp, &semid_ds64);
semid_dsp = &semid_ds64;
}
}
SYSCTL_DECL(_kern_sysv_ipcs);
-SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW|CTLFLAG_ANYBODY,
+SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
0, 0, IPCS_sem_sysctl,
"S,IPCS_sem_command",
"ipcs sem command interface");