]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sysv_sem.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / bsd / kern / sysv_sem.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Implementation of SVID semaphores
30 *
31 * Author: Daniel Boulet
32 *
33 * This software is provided ``AS IS'' without any warranties of any kind.
34 */
35 /*
36 * John Bellardo modified the implementation for Darwin. 12/2000
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/proc_internal.h>
43 #include <sys/kauth.h>
44 #include <sys/sem_internal.h>
45 #include <sys/malloc.h>
46 #include <mach/mach_types.h>
47
48 #include <sys/filedesc.h>
49 #include <sys/file_internal.h>
50 #include <sys/sysctl.h>
51 #include <sys/ipcs.h>
52 #include <sys/sysent.h>
53 #include <sys/sysproto.h>
54
55 #include <bsm/audit_kernel.h>
56
57
58 /* Uncomment this line to see the debugging output */
59 /* #define SEM_DEBUG */
60
61 #define M_SYSVSEM M_TEMP
62
63
64 /* Hard system limits to avoid resource starvation / DOS attacks.
65 * These are not needed if we can make the semaphore pages swappable.
66 */
67 static struct seminfo limitseminfo = {
68 SEMMAP, /* # of entries in semaphore map */
69 SEMMNI, /* # of semaphore identifiers */
70 SEMMNS, /* # of semaphores in system */
71 SEMMNU, /* # of undo structures in system */
72 SEMMSL, /* max # of semaphores per id */
73 SEMOPM, /* max # of operations per semop call */
74 SEMUME, /* max # of undo entries per process */
75 SEMUSZ, /* size in bytes of undo structure */
76 SEMVMX, /* semaphore maximum value */
77 SEMAEM /* adjust on exit max value */
78 };
79
80 /* Current system allocations. We use this structure to track how many
81 * resources we have allocated so far. This way we can set large hard limits
82 * and not allocate the memory for them up front.
83 */
84 struct seminfo seminfo = {
85 SEMMAP, /* Unused, # of entries in semaphore map */
86 0, /* # of semaphore identifiers */
87 0, /* # of semaphores in system */
88 0, /* # of undo entries in system */
89 SEMMSL, /* max # of semaphores per id */
90 SEMOPM, /* max # of operations per semop call */
91 SEMUME, /* max # of undo entries per process */
92 SEMUSZ, /* size in bytes of undo structure */
93 SEMVMX, /* semaphore maximum value */
94 SEMAEM /* adjust on exit max value */
95 };
96
97
98 static struct sem_undo *semu_alloc(struct proc *p);
99 static int semundo_adjust(struct proc *p, struct sem_undo **supptr,
100 int semid, int semnum, int adjval);
101 static void semundo_clear(int semid, int semnum);
102
103 /* XXX casting to (sy_call_t *) is bogus, as usual. */
104 static sy_call_t *semcalls[] = {
105 (sy_call_t *)semctl, (sy_call_t *)semget,
106 (sy_call_t *)semop
107 };
108
109 static int semtot = 0; /* # of used semaphores */
110 struct user_semid_ds *sema = NULL; /* semaphore id pool */
111 struct sem *sem_pool = NULL; /* semaphore pool */
112 static struct sem_undo *semu_list = NULL; /* active undo structures */
113 struct sem_undo *semu = NULL; /* semaphore undo pool */
114
115
116 void sysv_sem_lock_init(void);
117 static lck_grp_t *sysv_sem_subsys_lck_grp;
118 static lck_grp_attr_t *sysv_sem_subsys_lck_grp_attr;
119 static lck_attr_t *sysv_sem_subsys_lck_attr;
120 static lck_mtx_t sysv_sem_subsys_mutex;
121
122 #define SYSV_SEM_SUBSYS_LOCK() lck_mtx_lock(&sysv_sem_subsys_mutex)
123 #define SYSV_SEM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_sem_subsys_mutex)
124
125
126 __private_extern__ void
127 sysv_sem_lock_init( void )
128 {
129
130 sysv_sem_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
131 lck_grp_attr_setstat(sysv_sem_subsys_lck_grp_attr);
132
133 sysv_sem_subsys_lck_grp = lck_grp_alloc_init("sysv_shm_subsys_lock", sysv_sem_subsys_lck_grp_attr);
134
135 sysv_sem_subsys_lck_attr = lck_attr_alloc_init();
136 lck_attr_setdebug(sysv_sem_subsys_lck_attr);
137 lck_mtx_init(&sysv_sem_subsys_mutex, sysv_sem_subsys_lck_grp, sysv_sem_subsys_lck_attr);
138 }
139
140 static __inline__ user_time_t
141 sysv_semtime(void)
142 {
143 struct timeval tv;
144 microtime(&tv);
145 return (tv.tv_sec);
146 }
147
148 /*
149 * XXX conversion of internal user_time_t to external tume_t loses
150 * XXX precision; not an issue for us now, since we are only ever
151 * XXX setting 32 bits worth of time into it.
152 *
153 * pad field contents are not moved correspondingly; contents will be lost
154 *
155 * NOTE: Source and target may *NOT* overlap! (target is smaller)
156 */
157 static void
158 semid_ds_64to32(struct user_semid_ds *in, struct semid_ds *out)
159 {
160 out->sem_perm = in->sem_perm;
161 out->sem_base = (__int32_t)in->sem_base;
162 out->sem_nsems = in->sem_nsems;
163 out->sem_otime = in->sem_otime; /* XXX loses precision */
164 out->sem_ctime = in->sem_ctime; /* XXX loses precision */
165 }
166
167 /*
168 * pad field contents are not moved correspondingly; contents will be lost
169 *
170 * NOTE: Source and target may are permitted to overlap! (source is smaller);
171 * this works because we copy fields in order from the end of the struct to
172 * the beginning.
173 *
174 * XXX use CAST_USER_ADDR_T() for lack of a CAST_USER_TIME_T(); net effect
175 * XXX is the same.
176 */
177 static void
178 semid_ds_32to64(struct semid_ds *in, struct user_semid_ds *out)
179 {
180 out->sem_ctime = in->sem_ctime;
181 out->sem_otime = in->sem_otime;
182 out->sem_nsems = in->sem_nsems;
183 out->sem_base = (void *)in->sem_base;
184 out->sem_perm = in->sem_perm;
185 }
186
187
188 /*
189 * Entry point for all SEM calls
190 *
191 * In Darwin this is no longer the entry point. It will be removed after
192 * the code has been tested better.
193 */
194 /* XXX actually varargs. */
195 int
196 semsys(struct proc *p, struct semsys_args *uap, register_t *retval)
197 {
198
199 /* The individual calls handling the locking now */
200
201 if (uap->which >= sizeof(semcalls)/sizeof(semcalls[0]))
202 return (EINVAL);
203 return ((*semcalls[uap->which])(p, &uap->a2, retval));
204 }
205
206 /*
207 * Expand the semu array to the given capacity. If the expansion fails
208 * return 0, otherwise return 1.
209 *
210 * Assumes we already have the subsystem lock.
211 */
212 static int
213 grow_semu_array(int newSize)
214 {
215 register int i;
216 register struct sem_undo *newSemu;
217
218 if (newSize <= seminfo.semmnu)
219 return 1;
220 if (newSize > limitseminfo.semmnu) /* enforce hard limit */
221 {
222 #ifdef SEM_DEBUG
223 printf("undo structure hard limit of %d reached, requested %d\n",
224 limitseminfo.semmnu, newSize);
225 #endif
226 return 0;
227 }
228 newSize = (newSize/SEMMNU_INC + 1) * SEMMNU_INC;
229 newSize = newSize > limitseminfo.semmnu ? limitseminfo.semmnu : newSize;
230
231 #ifdef SEM_DEBUG
232 printf("growing semu[] from %d to %d\n", seminfo.semmnu, newSize);
233 #endif
234 MALLOC(newSemu, struct sem_undo *, sizeof (struct sem_undo) * newSize,
235 M_SYSVSEM, M_WAITOK | M_ZERO);
236 if (NULL == newSemu)
237 {
238 #ifdef SEM_DEBUG
239 printf("allocation failed. no changes made.\n");
240 #endif
241 return 0;
242 }
243
244 /* copy the old data to the new array */
245 for (i = 0; i < seminfo.semmnu; i++)
246 {
247 newSemu[i] = semu[i];
248 }
249 /*
250 * The new elements (from newSemu[i] to newSemu[newSize-1]) have their
251 * "un_proc" set to 0 (i.e. NULL) by the M_ZERO flag to MALLOC() above,
252 * so they're already marked as "not in use".
253 */
254
255 /* Clean up the old array */
256 if (semu)
257 FREE(semu, M_SYSVSEM);
258
259 semu = newSemu;
260 seminfo.semmnu = newSize;
261 #ifdef SEM_DEBUG
262 printf("expansion successful\n");
263 #endif
264 return 1;
265 }
266
267 /*
268 * Expand the sema array to the given capacity. If the expansion fails
269 * we return 0, otherwise we return 1.
270 *
271 * Assumes we already have the subsystem lock.
272 */
273 static int
274 grow_sema_array(int newSize)
275 {
276 register struct user_semid_ds *newSema;
277 register int i;
278
279 if (newSize <= seminfo.semmni)
280 return 0;
281 if (newSize > limitseminfo.semmni) /* enforce hard limit */
282 {
283 #ifdef SEM_DEBUG
284 printf("identifier hard limit of %d reached, requested %d\n",
285 limitseminfo.semmni, newSize);
286 #endif
287 return 0;
288 }
289 newSize = (newSize/SEMMNI_INC + 1) * SEMMNI_INC;
290 newSize = newSize > limitseminfo.semmni ? limitseminfo.semmni : newSize;
291
292 #ifdef SEM_DEBUG
293 printf("growing sema[] from %d to %d\n", seminfo.semmni, newSize);
294 #endif
295 MALLOC(newSema, struct user_semid_ds *,
296 sizeof (struct user_semid_ds) * newSize,
297 M_SYSVSEM, M_WAITOK | M_ZERO);
298 if (NULL == newSema)
299 {
300 #ifdef SEM_DEBUG
301 printf("allocation failed. no changes made.\n");
302 #endif
303 return 0;
304 }
305
306 /* copy over the old ids */
307 for (i = 0; i < seminfo.semmni; i++)
308 {
309 newSema[i] = sema[i];
310 /* This is a hack. What we really want to be able to
311 * do is change the value a process is waiting on
312 * without waking it up, but I don't know how to do
313 * this with the existing code, so we wake up the
314 * process and let it do a lot of work to determine the
315 * semaphore set is really not available yet, and then
316 * sleep on the correct, reallocated user_semid_ds pointer.
317 */
318 if (sema[i].sem_perm.mode & SEM_ALLOC)
319 wakeup((caddr_t)&sema[i]);
320 }
321 /*
322 * The new elements (from newSema[i] to newSema[newSize-1]) have their
323 * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the M_ZERO
324 * flag to MALLOC() above, so they're already marked as "not in use".
325 */
326
327 /* Clean up the old array */
328 if (sema)
329 FREE(sema, M_SYSVSEM);
330
331 sema = newSema;
332 seminfo.semmni = newSize;
333 #ifdef SEM_DEBUG
334 printf("expansion successful\n");
335 #endif
336 return 1;
337 }
338
339 /*
340 * Expand the sem_pool array to the given capacity. If the expansion fails
341 * we return 0 (fail), otherwise we return 1 (success).
342 *
343 * Assumes we already hold the subsystem lock.
344 */
345 static int
346 grow_sem_pool(int new_pool_size)
347 {
348 struct sem *new_sem_pool = NULL;
349 struct sem *sem_free;
350 int i;
351
352 if (new_pool_size < semtot)
353 return 0;
354 /* enforce hard limit */
355 if (new_pool_size > limitseminfo.semmns) {
356 #ifdef SEM_DEBUG
357 printf("semaphore hard limit of %d reached, requested %d\n",
358 limitseminfo.semmns, new_pool_size);
359 #endif
360 return 0;
361 }
362
363 new_pool_size = (new_pool_size/SEMMNS_INC + 1) * SEMMNS_INC;
364 new_pool_size = new_pool_size > limitseminfo.semmns ? limitseminfo.semmns : new_pool_size;
365
366 #ifdef SEM_DEBUG
367 printf("growing sem_pool array from %d to %d\n", seminfo.semmns, new_pool_size);
368 #endif
369 MALLOC(new_sem_pool, struct sem *, sizeof (struct sem) * new_pool_size,
370 M_SYSVSEM, M_WAITOK | M_ZERO);
371 if (NULL == new_sem_pool) {
372 #ifdef SEM_DEBUG
373 printf("allocation failed. no changes made.\n");
374 #endif
375 return 0;
376 }
377
378 /* We have our new memory, now copy the old contents over */
379 if (sem_pool)
380 for(i = 0; i < seminfo.semmns; i++)
381 new_sem_pool[i] = sem_pool[i];
382
383 /* Update our id structures to point to the new semaphores */
384 for(i = 0; i < seminfo.semmni; i++) {
385 if (sema[i].sem_perm.mode & SEM_ALLOC) /* ID in use */
386 sema[i].sem_base += (new_sem_pool - sem_pool);
387 }
388
389 sem_free = sem_pool;
390 sem_pool = new_sem_pool;
391
392 /* clean up the old array */
393 if (sem_free != NULL)
394 FREE(sem_free, M_SYSVSEM);
395
396 seminfo.semmns = new_pool_size;
397 #ifdef SEM_DEBUG
398 printf("expansion complete\n");
399 #endif
400 return 1;
401 }
402
403 /*
404 * Allocate a new sem_undo structure for a process
405 * (returns ptr to structure or NULL if no more room)
406 *
407 * Assumes we already hold the subsystem lock.
408 */
409
410 static struct sem_undo *
411 semu_alloc(struct proc *p)
412 {
413 register int i;
414 register struct sem_undo *suptr;
415 register struct sem_undo **supptr;
416 int attempt;
417
418 /*
419 * Try twice to allocate something.
420 * (we'll purge any empty structures after the first pass so
421 * two passes are always enough)
422 */
423
424 for (attempt = 0; attempt < 2; attempt++) {
425 /*
426 * Look for a free structure.
427 * Fill it in and return it if we find one.
428 */
429
430 for (i = 0; i < seminfo.semmnu; i++) {
431 suptr = SEMU(i);
432 if (suptr->un_proc == NULL) {
433 suptr->un_next = semu_list;
434 semu_list = suptr;
435 suptr->un_cnt = 0;
436 suptr->un_ent = NULL;
437 suptr->un_proc = p;
438 return(suptr);
439 }
440 }
441
442 /*
443 * We didn't find a free one, if this is the first attempt
444 * then try to free some structures.
445 */
446
447 if (attempt == 0) {
448 /* All the structures are in use - try to free some */
449 int did_something = 0;
450
451 supptr = &semu_list;
452 while ((suptr = *supptr) != NULL) {
453 if (suptr->un_cnt == 0) {
454 suptr->un_proc = NULL;
455 *supptr = suptr->un_next;
456 did_something = 1;
457 } else
458 supptr = &(suptr->un_next);
459 }
460
461 /* If we didn't free anything. Try expanding
462 * the semu[] array. If that doesn't work
463 * then fail. We expand last to get the
464 * most reuse out of existing resources.
465 */
466 if (!did_something)
467 if (!grow_semu_array(seminfo.semmnu + 1))
468 return(NULL);
469 } else {
470 /*
471 * The second pass failed even though we freed
472 * something after the first pass!
473 * This is IMPOSSIBLE!
474 */
475 panic("semu_alloc - second attempt failed");
476 }
477 }
478 return (NULL);
479 }
480
481 /*
482 * Adjust a particular entry for a particular proc
483 *
484 * Assumes we already hold the subsystem lock.
485 */
486 static int
487 semundo_adjust(struct proc *p, struct sem_undo **supptr, int semid,
488 int semnum, int adjval)
489 {
490 register struct sem_undo *suptr;
491 register struct undo *sueptr, **suepptr, *new_sueptr;
492 int i;
493
494 /*
495 * Look for and remember the sem_undo if the caller doesn't provide it
496 */
497
498 suptr = *supptr;
499 if (suptr == NULL) {
500 for (suptr = semu_list; suptr != NULL;
501 suptr = suptr->un_next) {
502 if (suptr->un_proc == p) {
503 *supptr = suptr;
504 break;
505 }
506 }
507 if (suptr == NULL) {
508 if (adjval == 0)
509 return(0);
510 suptr = semu_alloc(p);
511 if (suptr == NULL)
512 return(ENOSPC);
513 *supptr = suptr;
514 }
515 }
516
517 /*
518 * Look for the requested entry and adjust it (delete if adjval becomes
519 * 0).
520 */
521 new_sueptr = NULL;
522 for (i = 0, suepptr = &suptr->un_ent, sueptr = suptr->un_ent;
523 i < suptr->un_cnt;
524 i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) {
525 if (sueptr->une_id != semid || sueptr->une_num != semnum)
526 continue;
527 if (adjval == 0)
528 sueptr->une_adjval = 0;
529 else
530 sueptr->une_adjval += adjval;
531 if (sueptr->une_adjval == 0) {
532 suptr->un_cnt--;
533 *suepptr = sueptr->une_next;
534 FREE(sueptr, M_SYSVSEM);
535 sueptr = NULL;
536 }
537 return 0;
538 }
539
540 /* Didn't find the right entry - create it */
541 if (adjval == 0) {
542 /* no adjustment: no need for a new entry */
543 return 0;
544 }
545
546 if (suptr->un_cnt == limitseminfo.semume) {
547 /* reached the limit number of semaphore undo entries */
548 return EINVAL;
549 }
550
551 /* allocate a new semaphore undo entry */
552 MALLOC(new_sueptr, struct undo *, sizeof (struct undo),
553 M_SYSVSEM, M_WAITOK);
554 if (new_sueptr == NULL) {
555 return ENOMEM;
556 }
557
558 /* fill in the new semaphore undo entry */
559 new_sueptr->une_next = suptr->un_ent;
560 suptr->un_ent = new_sueptr;
561 suptr->un_cnt++;
562 new_sueptr->une_adjval = adjval;
563 new_sueptr->une_id = semid;
564 new_sueptr->une_num = semnum;
565
566 return 0;
567 }
568
569 /* Assumes we already hold the subsystem lock.
570 */
571 static void
572 semundo_clear(int semid, int semnum)
573 {
574 struct sem_undo *suptr;
575
576 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) {
577 struct undo *sueptr;
578 struct undo **suepptr;
579 int i = 0;
580
581 sueptr = suptr->un_ent;
582 suepptr = &suptr->un_ent;
583 while (i < suptr->un_cnt) {
584 if (sueptr->une_id == semid) {
585 if (semnum == -1 || sueptr->une_num == semnum) {
586 suptr->un_cnt--;
587 *suepptr = sueptr->une_next;
588 FREE(sueptr, M_SYSVSEM);
589 sueptr = *suepptr;
590 continue;
591 }
592 if (semnum != -1)
593 break;
594 }
595 i++;
596 suepptr = &sueptr->une_next;
597 sueptr = sueptr->une_next;
598 }
599 }
600 }
601
602 /*
603 * Note that the user-mode half of this passes a union coerced to a
604 * user_addr_t. The union contains either an int or a pointer, and
605 * so we have to coerce it back, variant on whether the calling
606 * process is 64 bit or not. The coercion works for the 'val' element
607 * because the alignment is the same in user and kernel space.
608 */
609 int
610 semctl(struct proc *p, struct semctl_args *uap, register_t *retval)
611 {
612 int semid = uap->semid;
613 int semnum = uap->semnum;
614 int cmd = uap->cmd;
615 user_semun_t user_arg = (user_semun_t)uap->arg;
616 kauth_cred_t cred = kauth_cred_get();
617 int i, rval, eval;
618 struct user_semid_ds sbuf;
619 struct user_semid_ds *semaptr;
620 struct user_semid_ds uds;
621
622
623 AUDIT_ARG(svipc_cmd, cmd);
624 AUDIT_ARG(svipc_id, semid);
625
626 SYSV_SEM_SUBSYS_LOCK();
627
628 #ifdef SEM_DEBUG
629 printf("call to semctl(%d, %d, %d, 0x%qx)\n", semid, semnum, cmd, user_arg);
630 #endif
631
632 semid = IPCID_TO_IX(semid);
633
634 if (semid < 0 || semid >= seminfo.semmni) {
635 #ifdef SEM_DEBUG
636 printf("Invalid semid\n");
637 #endif
638 eval = EINVAL;
639 goto semctlout;
640 }
641
642 semaptr = &sema[semid];
643 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
644 semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid)) {
645 eval = EINVAL;
646 goto semctlout;
647 }
648
649 eval = 0;
650 rval = 0;
651
652 switch (cmd) {
653 case IPC_RMID:
654 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_M)))
655 goto semctlout;
656
657 semaptr->sem_perm.cuid = kauth_cred_getuid(cred);
658 semaptr->sem_perm.uid = kauth_cred_getuid(cred);
659 semtot -= semaptr->sem_nsems;
660 for (i = semaptr->sem_base - sem_pool; i < semtot; i++)
661 sem_pool[i] = sem_pool[i + semaptr->sem_nsems];
662 for (i = 0; i < seminfo.semmni; i++) {
663 if ((sema[i].sem_perm.mode & SEM_ALLOC) &&
664 sema[i].sem_base > semaptr->sem_base)
665 sema[i].sem_base -= semaptr->sem_nsems;
666 }
667 semaptr->sem_perm.mode = 0;
668 semundo_clear(semid, -1);
669 wakeup((caddr_t)semaptr);
670 break;
671
672 case IPC_SET:
673 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_M)))
674 goto semctlout;
675
676 if (IS_64BIT_PROCESS(p)) {
677 eval = copyin(user_arg.buf, &sbuf, sizeof(struct user_semid_ds));
678 } else {
679 eval = copyin(user_arg.buf, &sbuf, sizeof(struct semid_ds));
680 /* convert in place; ugly, but safe */
681 semid_ds_32to64((struct semid_ds *)&sbuf, &sbuf);
682 }
683
684 if (eval != 0) {
685 goto semctlout;
686 }
687
688 semaptr->sem_perm.uid = sbuf.sem_perm.uid;
689 semaptr->sem_perm.gid = sbuf.sem_perm.gid;
690 semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) |
691 (sbuf.sem_perm.mode & 0777);
692 semaptr->sem_ctime = sysv_semtime();
693 break;
694
695 case IPC_STAT:
696 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
697 goto semctlout;
698 bcopy(semaptr, &uds, sizeof(struct user_semid_ds));
699 if (IS_64BIT_PROCESS(p)) {
700 eval = copyout(&uds, user_arg.buf, sizeof(struct user_semid_ds));
701 } else {
702 struct semid_ds semid_ds32;
703 semid_ds_64to32(&uds, &semid_ds32);
704 eval = copyout(&semid_ds32, user_arg.buf, sizeof(struct semid_ds));
705 }
706 break;
707
708 case GETNCNT:
709 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
710 goto semctlout;
711 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
712 eval = EINVAL;
713 goto semctlout;
714 }
715 rval = semaptr->sem_base[semnum].semncnt;
716 break;
717
718 case GETPID:
719 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
720 goto semctlout;
721 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
722 eval = EINVAL;
723 goto semctlout;
724 }
725 rval = semaptr->sem_base[semnum].sempid;
726 break;
727
728 case GETVAL:
729 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
730 goto semctlout;
731 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
732 eval = EINVAL;
733 goto semctlout;
734 }
735 rval = semaptr->sem_base[semnum].semval;
736 break;
737
738 case GETALL:
739 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
740 goto semctlout;
741 /* XXXXXXXXXXXXXXXX TBD XXXXXXXXXXXXXXXX */
742 for (i = 0; i < semaptr->sem_nsems; i++) {
743 /* XXX could be done in one go... */
744 eval = copyout((caddr_t)&semaptr->sem_base[i].semval,
745 user_arg.array + (i * sizeof(unsigned short)),
746 sizeof(unsigned short));
747 if (eval != 0)
748 break;
749 }
750 break;
751
752 case GETZCNT:
753 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
754 goto semctlout;
755 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
756 eval = EINVAL;
757 goto semctlout;
758 }
759 rval = semaptr->sem_base[semnum].semzcnt;
760 break;
761
762 case SETVAL:
763 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_W)))
764 {
765 #ifdef SEM_DEBUG
766 printf("Invalid credentials for write\n");
767 #endif
768 goto semctlout;
769 }
770 if (semnum < 0 || semnum >= semaptr->sem_nsems)
771 {
772 #ifdef SEM_DEBUG
773 printf("Invalid number out of range for set\n");
774 #endif
775 eval = EINVAL;
776 goto semctlout;
777 }
778 /*
779 * Cast down a pointer instead of using 'val' member directly
780 * to avoid introducing endieness and a pad field into the
781 * header file. Ugly, but it works.
782 */
783 semaptr->sem_base[semnum].semval = CAST_DOWN(int,user_arg.buf);
784 semundo_clear(semid, semnum);
785 wakeup((caddr_t)semaptr);
786 break;
787
788 case SETALL:
789 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_W)))
790 goto semctlout;
791 /*** XXXXXXXXXXXX TBD ********/
792 for (i = 0; i < semaptr->sem_nsems; i++) {
793 /* XXX could be done in one go... */
794 eval = copyin(user_arg.array + (i * sizeof(unsigned short)),
795 (caddr_t)&semaptr->sem_base[i].semval,
796 sizeof(unsigned short));
797 if (eval != 0)
798 break;
799 }
800 semundo_clear(semid, -1);
801 wakeup((caddr_t)semaptr);
802 break;
803
804 default:
805 eval = EINVAL;
806 goto semctlout;
807 }
808
809 if (eval == 0)
810 *retval = rval;
811 semctlout:
812 SYSV_SEM_SUBSYS_UNLOCK();
813 return(eval);
814 }
815
816 int
817 semget(__unused struct proc *p, struct semget_args *uap, register_t *retval)
818 {
819 int semid, eval;
820 int key = uap->key;
821 int nsems = uap->nsems;
822 int semflg = uap->semflg;
823 kauth_cred_t cred = kauth_cred_get();
824
825 #ifdef SEM_DEBUG
826 if (key != IPC_PRIVATE)
827 printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
828 else
829 printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems, semflg);
830 #endif
831
832
833 SYSV_SEM_SUBSYS_LOCK();
834
835
836 if (key != IPC_PRIVATE) {
837 for (semid = 0; semid < seminfo.semmni; semid++) {
838 if ((sema[semid].sem_perm.mode & SEM_ALLOC) &&
839 sema[semid].sem_perm.key == key)
840 break;
841 }
842 if (semid < seminfo.semmni) {
843 #ifdef SEM_DEBUG
844 printf("found public key\n");
845 #endif
846 if ((eval = ipcperm(cred, &sema[semid].sem_perm,
847 semflg & 0700)))
848 goto semgetout;
849 if (nsems < 0 || sema[semid].sem_nsems < nsems) {
850 #ifdef SEM_DEBUG
851 printf("too small\n");
852 #endif
853 eval = EINVAL;
854 goto semgetout;
855 }
856 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) {
857 #ifdef SEM_DEBUG
858 printf("not exclusive\n");
859 #endif
860 eval = EEXIST;
861 goto semgetout;
862 }
863 goto found;
864 }
865 }
866
867 #ifdef SEM_DEBUG
868 printf("need to allocate an id for the request\n");
869 #endif
870 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) {
871 if (nsems <= 0 || nsems > limitseminfo.semmsl) {
872 #ifdef SEM_DEBUG
873 printf("nsems out of range (0<%d<=%d)\n", nsems,
874 seminfo.semmsl);
875 #endif
876 eval = EINVAL;
877 goto semgetout;
878 }
879 if (nsems > seminfo.semmns - semtot) {
880 #ifdef SEM_DEBUG
881 printf("not enough semaphores left (need %d, got %d)\n",
882 nsems, seminfo.semmns - semtot);
883 #endif
884 if (!grow_sem_pool(semtot + nsems)) {
885 #ifdef SEM_DEBUG
886 printf("failed to grow the sem array\n");
887 #endif
888 eval = ENOSPC;
889 goto semgetout;
890 }
891 }
892 for (semid = 0; semid < seminfo.semmni; semid++) {
893 if ((sema[semid].sem_perm.mode & SEM_ALLOC) == 0)
894 break;
895 }
896 if (semid == seminfo.semmni) {
897 #ifdef SEM_DEBUG
898 printf("no more id's available\n");
899 #endif
900 if (!grow_sema_array(seminfo.semmni + 1))
901 {
902 #ifdef SEM_DEBUG
903 printf("failed to grow sema array\n");
904 #endif
905 eval = ENOSPC;
906 goto semgetout;
907 }
908 }
909 #ifdef SEM_DEBUG
910 printf("semid %d is available\n", semid);
911 #endif
912 sema[semid].sem_perm.key = key;
913 sema[semid].sem_perm.cuid = kauth_cred_getuid(cred);
914 sema[semid].sem_perm.uid = kauth_cred_getuid(cred);
915 sema[semid].sem_perm.cgid = cred->cr_gid;
916 sema[semid].sem_perm.gid = cred->cr_gid;
917 sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
918 sema[semid].sem_perm.seq =
919 (sema[semid].sem_perm.seq + 1) & 0x7fff;
920 sema[semid].sem_nsems = nsems;
921 sema[semid].sem_otime = 0;
922 sema[semid].sem_ctime = sysv_semtime();
923 sema[semid].sem_base = &sem_pool[semtot];
924 semtot += nsems;
925 bzero(sema[semid].sem_base,
926 sizeof(sema[semid].sem_base[0])*nsems);
927 #ifdef SEM_DEBUG
928 printf("sembase = 0x%x, next = 0x%x\n", sema[semid].sem_base,
929 &sem_pool[semtot]);
930 #endif
931 } else {
932 #ifdef SEM_DEBUG
933 printf("didn't find it and wasn't asked to create it\n");
934 #endif
935 eval = ENOENT;
936 goto semgetout;
937 }
938
939 found:
940 *retval = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm);
941 AUDIT_ARG(svipc_id, *retval);
942 #ifdef SEM_DEBUG
943 printf("semget is done, returning %d\n", *retval);
944 #endif
945 eval = 0;
946
947 semgetout:
948 SYSV_SEM_SUBSYS_UNLOCK();
949 return(eval);
950 }
951
952 int
953 semop(struct proc *p, struct semop_args *uap, register_t *retval)
954 {
955 int semid = uap->semid;
956 int nsops = uap->nsops;
957 struct sembuf sops[MAX_SOPS];
958 register struct user_semid_ds *semaptr;
959 register struct sembuf *sopptr = NULL; /* protected by 'semptr' */
960 register struct sem *semptr = NULL; /* protected by 'if' */
961 struct sem_undo *suptr = NULL;
962 int i, j, eval;
963 int do_wakeup, do_undos;
964
965 AUDIT_ARG(svipc_id, uap->semid);
966
967 SYSV_SEM_SUBSYS_LOCK();
968
969 #ifdef SEM_DEBUG
970 printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops);
971 #endif
972
973 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
974
975 if (semid < 0 || semid >= seminfo.semmni) {
976 eval = EINVAL;
977 goto semopout;
978 }
979
980 semaptr = &sema[semid];
981 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0) {
982 eval = EINVAL;
983 goto semopout;
984 }
985 if (semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid)) {
986 eval = EINVAL;
987 goto semopout;
988 }
989
990 if ((eval = ipcperm(kauth_cred_get(), &semaptr->sem_perm, IPC_W))) {
991 #ifdef SEM_DEBUG
992 printf("eval = %d from ipaccess\n", eval);
993 #endif
994 goto semopout;
995 }
996
997 if (nsops < 0 || nsops > MAX_SOPS) {
998 #ifdef SEM_DEBUG
999 printf("too many sops (max=%d, nsops=%d)\n", MAX_SOPS, nsops);
1000 #endif
1001 eval = E2BIG;
1002 goto semopout;
1003 }
1004
1005 /* OK for LP64, since sizeof(struct sembuf) is currently invariant */
1006 if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) {
1007 #ifdef SEM_DEBUG
1008 printf("eval = %d from copyin(%08x, %08x, %ld)\n", eval,
1009 uap->sops, &sops, nsops * sizeof(struct sembuf));
1010 #endif
1011 goto semopout;
1012 }
1013
1014 /*
1015 * Loop trying to satisfy the vector of requests.
1016 * If we reach a point where we must wait, any requests already
1017 * performed are rolled back and we go to sleep until some other
1018 * process wakes us up. At this point, we start all over again.
1019 *
1020 * This ensures that from the perspective of other tasks, a set
1021 * of requests is atomic (never partially satisfied).
1022 */
1023 do_undos = 0;
1024
1025 for (;;) {
1026 do_wakeup = 0;
1027
1028 for (i = 0; i < nsops; i++) {
1029 sopptr = &sops[i];
1030
1031 if (sopptr->sem_num >= semaptr->sem_nsems) {
1032 eval = EFBIG;
1033 goto semopout;
1034 }
1035
1036 semptr = &semaptr->sem_base[sopptr->sem_num];
1037
1038 #ifdef SEM_DEBUG
1039 printf("semop: semaptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
1040 semaptr, semaptr->sem_base, semptr,
1041 sopptr->sem_num, semptr->semval, sopptr->sem_op,
1042 (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
1043 #endif
1044
1045 if (sopptr->sem_op < 0) {
1046 if (semptr->semval + sopptr->sem_op < 0) {
1047 #ifdef SEM_DEBUG
1048 printf("semop: can't do it now\n");
1049 #endif
1050 break;
1051 } else {
1052 semptr->semval += sopptr->sem_op;
1053 if (semptr->semval == 0 &&
1054 semptr->semzcnt > 0)
1055 do_wakeup = 1;
1056 }
1057 if (sopptr->sem_flg & SEM_UNDO)
1058 do_undos = 1;
1059 } else if (sopptr->sem_op == 0) {
1060 if (semptr->semval > 0) {
1061 #ifdef SEM_DEBUG
1062 printf("semop: not zero now\n");
1063 #endif
1064 break;
1065 }
1066 } else {
1067 if (semptr->semncnt > 0)
1068 do_wakeup = 1;
1069 semptr->semval += sopptr->sem_op;
1070 if (sopptr->sem_flg & SEM_UNDO)
1071 do_undos = 1;
1072 }
1073 }
1074
1075 /*
1076 * Did we get through the entire vector?
1077 */
1078 if (i >= nsops)
1079 goto done;
1080
1081 /*
1082 * No ... rollback anything that we've already done
1083 */
1084 #ifdef SEM_DEBUG
1085 printf("semop: rollback 0 through %d\n", i-1);
1086 #endif
1087 for (j = 0; j < i; j++)
1088 semaptr->sem_base[sops[j].sem_num].semval -=
1089 sops[j].sem_op;
1090
1091 /*
1092 * If the request that we couldn't satisfy has the
1093 * NOWAIT flag set then return with EAGAIN.
1094 */
1095 if (sopptr->sem_flg & IPC_NOWAIT) {
1096 eval = EAGAIN;
1097 goto semopout;
1098 }
1099
1100 if (sopptr->sem_op == 0)
1101 semptr->semzcnt++;
1102 else
1103 semptr->semncnt++;
1104
1105 #ifdef SEM_DEBUG
1106 printf("semop: good night!\n");
1107 #endif
1108 /* Release our lock on the semaphore subsystem so
1109 * another thread can get at the semaphore we are
1110 * waiting for. We will get the lock back after we
1111 * wake up.
1112 */
1113 eval = msleep((caddr_t)semaptr, &sysv_sem_subsys_mutex , (PZERO - 4) | PCATCH,
1114 "semwait", 0);
1115
1116 #ifdef SEM_DEBUG
1117 printf("semop: good morning (eval=%d)!\n", eval);
1118 #endif
1119 if (eval != 0) {
1120 eval = EINTR;
1121 }
1122
1123 /*
1124 * IMPORTANT: while we were asleep, the semaphore array might
1125 * have been reallocated somewhere else (see grow_sema_array()).
1126 * When we wake up, we have to re-lookup the semaphore
1127 * structures and re-validate them.
1128 */
1129
1130 suptr = NULL; /* sem_undo may have been reallocated */
1131 semaptr = &sema[semid]; /* sema may have been reallocated */
1132
1133 /*
1134 * Make sure that the semaphore still exists
1135 */
1136 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
1137 semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid) ||
1138 sopptr->sem_num >= semaptr->sem_nsems) {
1139 if (eval == EINTR) {
1140 /*
1141 * EINTR takes precedence over the fact that
1142 * the semaphore disappeared while we were
1143 * sleeping...
1144 */
1145 } else {
1146 /*
1147 * The man page says to return EIDRM.
1148 * Unfortunately, BSD doesn't define that code!
1149 */
1150 #ifdef EIDRM
1151 eval = EIDRM;
1152 #else
1153 eval = EINVAL;
1154 #endif
1155 }
1156 goto semopout;
1157 }
1158
1159 /*
1160 * The semaphore is still alive. Readjust the count of
1161 * waiting processes. semptr needs to be recomputed
1162 * because the sem[] may have been reallocated while
1163 * we were sleeping, updating our sem_base pointer.
1164 */
1165 semptr = &semaptr->sem_base[sopptr->sem_num];
1166 if (sopptr->sem_op == 0)
1167 semptr->semzcnt--;
1168 else
1169 semptr->semncnt--;
1170
1171 if (eval != 0) { /* EINTR */
1172 goto semopout;
1173 }
1174 }
1175
1176 done:
1177 /*
1178 * Process any SEM_UNDO requests.
1179 */
1180 if (do_undos) {
1181 for (i = 0; i < nsops; i++) {
1182 /*
1183 * We only need to deal with SEM_UNDO's for non-zero
1184 * op's.
1185 */
1186 int adjval;
1187
1188 if ((sops[i].sem_flg & SEM_UNDO) == 0)
1189 continue;
1190 adjval = sops[i].sem_op;
1191 if (adjval == 0)
1192 continue;
1193 eval = semundo_adjust(p, &suptr, semid,
1194 sops[i].sem_num, -adjval);
1195 if (eval == 0)
1196 continue;
1197
1198 /*
1199 * Oh-Oh! We ran out of either sem_undo's or undo's.
1200 * Rollback the adjustments to this point and then
1201 * rollback the semaphore ups and down so we can return
1202 * with an error with all structures restored. We
1203 * rollback the undo's in the exact reverse order that
1204 * we applied them. This guarantees that we won't run
1205 * out of space as we roll things back out.
1206 */
1207 for (j = i - 1; j >= 0; j--) {
1208 if ((sops[j].sem_flg & SEM_UNDO) == 0)
1209 continue;
1210 adjval = sops[j].sem_op;
1211 if (adjval == 0)
1212 continue;
1213 if (semundo_adjust(p, &suptr, semid,
1214 sops[j].sem_num, adjval) != 0)
1215 panic("semop - can't undo undos");
1216 }
1217
1218 for (j = 0; j < nsops; j++)
1219 semaptr->sem_base[sops[j].sem_num].semval -=
1220 sops[j].sem_op;
1221
1222 #ifdef SEM_DEBUG
1223 printf("eval = %d from semundo_adjust\n", eval);
1224 #endif
1225 goto semopout;
1226 } /* loop through the sops */
1227 } /* if (do_undos) */
1228
1229 /* We're definitely done - set the sempid's */
1230 for (i = 0; i < nsops; i++) {
1231 sopptr = &sops[i];
1232 semptr = &semaptr->sem_base[sopptr->sem_num];
1233 semptr->sempid = p->p_pid;
1234 }
1235
1236 if (do_wakeup) {
1237 #ifdef SEM_DEBUG
1238 printf("semop: doing wakeup\n");
1239 #ifdef SEM_WAKEUP
1240 sem_wakeup((caddr_t)semaptr);
1241 #else
1242 wakeup((caddr_t)semaptr);
1243 #endif
1244 printf("semop: back from wakeup\n");
1245 #else
1246 wakeup((caddr_t)semaptr);
1247 #endif
1248 }
1249 #ifdef SEM_DEBUG
1250 printf("semop: done\n");
1251 #endif
1252 *retval = 0;
1253 eval = 0;
1254 semopout:
1255 SYSV_SEM_SUBSYS_UNLOCK();
1256 return(eval);
1257 }
1258
1259 /*
1260 * Go through the undo structures for this process and apply the adjustments to
1261 * semaphores.
1262 */
1263 void
1264 semexit(struct proc *p)
1265 {
1266 register struct sem_undo *suptr;
1267 register struct sem_undo **supptr;
1268 int did_something;
1269
1270 /* If we have not allocated our semaphores yet there can't be
1271 * anything to undo, but we need the lock to prevent
1272 * dynamic memory race conditions.
1273 */
1274 SYSV_SEM_SUBSYS_LOCK();
1275
1276 if (!sem_pool)
1277 {
1278 SYSV_SEM_SUBSYS_UNLOCK();
1279 return;
1280 }
1281 did_something = 0;
1282
1283 /*
1284 * Go through the chain of undo vectors looking for one
1285 * associated with this process.
1286 */
1287
1288 for (supptr = &semu_list; (suptr = *supptr) != NULL;
1289 supptr = &suptr->un_next) {
1290 if (suptr->un_proc == p)
1291 break;
1292 }
1293
1294 if (suptr == NULL)
1295 goto unlock;
1296
1297 #ifdef SEM_DEBUG
1298 printf("proc @%08x has undo structure with %d entries\n", p,
1299 suptr->un_cnt);
1300 #endif
1301
1302 /*
1303 * If there are any active undo elements then process them.
1304 */
1305 if (suptr->un_cnt > 0) {
1306 while (suptr->un_ent != NULL) {
1307 struct undo *sueptr;
1308 int semid;
1309 int semnum;
1310 int adjval;
1311 struct user_semid_ds *semaptr;
1312
1313 sueptr = suptr->un_ent;
1314 semid = sueptr->une_id;
1315 semnum = sueptr->une_num;
1316 adjval = sueptr->une_adjval;
1317
1318 semaptr = &sema[semid];
1319 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0)
1320 panic("semexit - semid not allocated");
1321 if (semnum >= semaptr->sem_nsems)
1322 panic("semexit - semnum out of range");
1323
1324 #ifdef SEM_DEBUG
1325 printf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n",
1326 suptr->un_proc,
1327 semid,
1328 semnum,
1329 adjval,
1330 semaptr->sem_base[semnum].semval);
1331 #endif
1332
1333 if (adjval < 0) {
1334 if (semaptr->sem_base[semnum].semval < -adjval)
1335 semaptr->sem_base[semnum].semval = 0;
1336 else
1337 semaptr->sem_base[semnum].semval +=
1338 adjval;
1339 } else
1340 semaptr->sem_base[semnum].semval += adjval;
1341
1342 /* Maybe we should build a list of semaptr's to wake
1343 * up, finish all access to data structures, release the
1344 * subsystem lock, and wake all the processes. Something
1345 * to think about. It wouldn't buy us anything unless
1346 * wakeup had the potential to block, or the syscall
1347 * funnel state was changed to allow multiple threads
1348 * in the BSD code at once.
1349 */
1350 #ifdef SEM_WAKEUP
1351 sem_wakeup((caddr_t)semaptr);
1352 #else
1353 wakeup((caddr_t)semaptr);
1354 #endif
1355 #ifdef SEM_DEBUG
1356 printf("semexit: back from wakeup\n");
1357 #endif
1358 suptr->un_cnt--;
1359 suptr->un_ent = sueptr->une_next;
1360 FREE(sueptr, M_SYSVSEM);
1361 sueptr = NULL;
1362 }
1363 }
1364
1365 /*
1366 * Deallocate the undo vector.
1367 */
1368 #ifdef SEM_DEBUG
1369 printf("removing vector\n");
1370 #endif
1371 suptr->un_proc = NULL;
1372 *supptr = suptr->un_next;
1373
1374 unlock:
1375 /*
1376 * There is a semaphore leak (i.e. memory leak) in this code.
1377 * We should be deleting the IPC_PRIVATE semaphores when they are
1378 * no longer needed, and we dont. We would have to track which processes
1379 * know about which IPC_PRIVATE semaphores, updating the list after
1380 * every fork. We can't just delete them semaphore when the process
1381 * that created it dies, because that process may well have forked
1382 * some children. So we need to wait until all of it's children have
1383 * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
1384 * with the creating group ID, count the number of processes left in
1385 * that group, and delete the semaphore when the group is gone.
1386 * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
1387 * There is an upper bound on the size of our semaphore array, so
1388 * leaking the semaphores should not work as a DOS attack.
1389 *
1390 * Please note that the original BSD code this file is based on had the
1391 * same leaky semaphore problem.
1392 */
1393
1394 SYSV_SEM_SUBSYS_UNLOCK();
1395 }
1396
1397
1398 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1399 struct sysctl_req *req) */
1400 static int
1401 sysctl_seminfo(__unused struct sysctl_oid *oidp, void *arg1,
1402 __unused int arg2, struct sysctl_req *req)
1403 {
1404 int error = 0;
1405
1406 error = SYSCTL_OUT(req, arg1, sizeof(int));
1407 if (error || req->newptr == USER_ADDR_NULL)
1408 return(error);
1409
1410 SYSV_SEM_SUBSYS_LOCK();
1411
1412 /* Set the values only if shared memory is not initialised */
1413 if ((sem_pool == NULL) &&
1414 (sema == NULL) &&
1415 (semu == NULL) &&
1416 (semu_list == NULL)) {
1417 if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) {
1418 goto out;
1419 }
1420 } else
1421 error = EINVAL;
1422 out:
1423 SYSV_SEM_SUBSYS_UNLOCK();
1424 return(error);
1425
1426 }
1427
1428 /* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
1429 extern struct sysctl_oid_list sysctl__kern_sysv_children;
1430 SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNI, semmni, CTLTYPE_INT | CTLFLAG_RW,
1431 &limitseminfo.semmni, 0, &sysctl_seminfo ,"I","semmni");
1432
1433 SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNS, semmns, CTLTYPE_INT | CTLFLAG_RW,
1434 &limitseminfo.semmns, 0, &sysctl_seminfo ,"I","semmns");
1435
1436 SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNU, semmnu, CTLTYPE_INT | CTLFLAG_RW,
1437 &limitseminfo.semmnu, 0, &sysctl_seminfo ,"I","semmnu");
1438
1439 SYSCTL_PROC(_kern_sysv, KSYSV_SEMMSL, semmsl, CTLTYPE_INT | CTLFLAG_RW,
1440 &limitseminfo.semmsl, 0, &sysctl_seminfo ,"I","semmsl");
1441
1442 SYSCTL_PROC(_kern_sysv, KSYSV_SEMUNE, semume, CTLTYPE_INT | CTLFLAG_RW,
1443 &limitseminfo.semume, 0, &sysctl_seminfo ,"I","semume");
1444
1445
1446 static int
1447 IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
1448 __unused int arg2, struct sysctl_req *req)
1449 {
1450 int error;
1451 int cursor;
1452 union {
1453 struct IPCS_command u32;
1454 struct user_IPCS_command u64;
1455 } ipcs;
1456 struct semid_ds semid_ds32; /* post conversion, 32 bit version */
1457 void *semid_dsp;
1458 size_t ipcs_sz = sizeof(struct user_IPCS_command);
1459 size_t semid_ds_sz = sizeof(struct user_semid_ds);
1460 struct proc *p = current_proc();
1461
1462 /* Copy in the command structure */
1463 if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
1464 return(error);
1465 }
1466
1467 if (!IS_64BIT_PROCESS(p)) {
1468 ipcs_sz = sizeof(struct IPCS_command);
1469 semid_ds_sz = sizeof(struct semid_ds);
1470 }
1471
1472 /* Let us version this interface... */
1473 if (ipcs.u64.ipcs_magic != IPCS_MAGIC) {
1474 return(EINVAL);
1475 }
1476
1477 SYSV_SEM_SUBSYS_LOCK();
1478 switch(ipcs.u64.ipcs_op) {
1479 case IPCS_SEM_CONF: /* Obtain global configuration data */
1480 if (ipcs.u64.ipcs_datalen != sizeof(struct seminfo)) {
1481 error = ERANGE;
1482 break;
1483 }
1484 if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
1485 error = EINVAL;
1486 break;
1487 }
1488 error = copyout(&seminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1489 break;
1490
1491 case IPCS_SEM_ITER: /* Iterate over existing segments */
1492 cursor = ipcs.u64.ipcs_cursor;
1493 if (cursor < 0 || cursor >= seminfo.semmni) {
1494 error = ERANGE;
1495 break;
1496 }
1497 if (ipcs.u64.ipcs_datalen != (int)semid_ds_sz ) {
1498 error = EINVAL;
1499 break;
1500 }
1501 for( ; cursor < seminfo.semmni; cursor++) {
1502 if (sema[cursor].sem_perm.mode & SEM_ALLOC)
1503 break;
1504 continue;
1505 }
1506 if (cursor == seminfo.semmni) {
1507 error = ENOENT;
1508 break;
1509 }
1510
1511 semid_dsp = &sema[cursor]; /* default: 64 bit */
1512
1513 /*
1514 * If necessary, convert the 64 bit kernel segment
1515 * descriptor to a 32 bit user one.
1516 */
1517 if (!IS_64BIT_PROCESS(p)) {
1518 semid_ds_64to32(semid_dsp, &semid_ds32);
1519 semid_dsp = &semid_ds32;
1520 }
1521 error = copyout(semid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1522 if (!error) {
1523 /* update cursor */
1524 ipcs.u64.ipcs_cursor = cursor + 1;
1525 error = SYSCTL_OUT(req, &ipcs, ipcs_sz);
1526 }
1527 break;
1528
1529 default:
1530 error = EINVAL;
1531 break;
1532 }
1533 SYSV_SEM_SUBSYS_UNLOCK();
1534 return(error);
1535 }
1536
1537 SYSCTL_DECL(_kern_sysv_ipcs);
1538 SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW|CTLFLAG_ANYBODY,
1539 0, 0, IPCS_sem_sysctl,
1540 "S,IPCS_sem_command",
1541 "ipcs sem command interface");