]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sysv_sem.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / bsd / kern / sysv_sem.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * Implementation of SVID semaphores
32 *
33 * Author: Daniel Boulet
34 *
35 * This software is provided ``AS IS'' without any warranties of any kind.
36 */
37 /*
38 * John Bellardo modified the implementation for Darwin. 12/2000
39 */
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/proc_internal.h>
45 #include <sys/kauth.h>
46 #include <sys/sem_internal.h>
47 #include <sys/malloc.h>
48 #include <mach/mach_types.h>
49
50 #include <sys/filedesc.h>
51 #include <sys/file_internal.h>
52 #include <sys/sysctl.h>
53 #include <sys/ipcs.h>
54 #include <sys/sysent.h>
55 #include <sys/sysproto.h>
56
57 #include <bsm/audit_kernel.h>
58
59
60 /* Uncomment this line to see the debugging output */
61 /* #define SEM_DEBUG */
62
63 #define M_SYSVSEM M_TEMP
64
65
66 /* Hard system limits to avoid resource starvation / DOS attacks.
67 * These are not needed if we can make the semaphore pages swappable.
68 */
69 static struct seminfo limitseminfo = {
70 SEMMAP, /* # of entries in semaphore map */
71 SEMMNI, /* # of semaphore identifiers */
72 SEMMNS, /* # of semaphores in system */
73 SEMMNU, /* # of undo structures in system */
74 SEMMSL, /* max # of semaphores per id */
75 SEMOPM, /* max # of operations per semop call */
76 SEMUME, /* max # of undo entries per process */
77 SEMUSZ, /* size in bytes of undo structure */
78 SEMVMX, /* semaphore maximum value */
79 SEMAEM /* adjust on exit max value */
80 };
81
82 /* Current system allocations. We use this structure to track how many
83 * resources we have allocated so far. This way we can set large hard limits
84 * and not allocate the memory for them up front.
85 */
86 struct seminfo seminfo = {
87 SEMMAP, /* Unused, # of entries in semaphore map */
88 0, /* # of semaphore identifiers */
89 0, /* # of semaphores in system */
90 0, /* # of undo entries in system */
91 SEMMSL, /* max # of semaphores per id */
92 SEMOPM, /* max # of operations per semop call */
93 SEMUME, /* max # of undo entries per process */
94 SEMUSZ, /* size in bytes of undo structure */
95 SEMVMX, /* semaphore maximum value */
96 SEMAEM /* adjust on exit max value */
97 };
98
99
100 static struct sem_undo *semu_alloc(struct proc *p);
101 static int semundo_adjust(struct proc *p, struct sem_undo **supptr,
102 int semid, int semnum, int adjval);
103 static void semundo_clear(int semid, int semnum);
104
105 /* XXX casting to (sy_call_t *) is bogus, as usual. */
106 static sy_call_t *semcalls[] = {
107 (sy_call_t *)semctl, (sy_call_t *)semget,
108 (sy_call_t *)semop
109 };
110
111 static int semtot = 0; /* # of used semaphores */
112 struct user_semid_ds *sema = NULL; /* semaphore id pool */
113 struct sem *sem_pool = NULL; /* semaphore pool */
114 static struct sem_undo *semu_list = NULL; /* active undo structures */
115 struct sem_undo *semu = NULL; /* semaphore undo pool */
116
117
118 void sysv_sem_lock_init(void);
119 static lck_grp_t *sysv_sem_subsys_lck_grp;
120 static lck_grp_attr_t *sysv_sem_subsys_lck_grp_attr;
121 static lck_attr_t *sysv_sem_subsys_lck_attr;
122 static lck_mtx_t sysv_sem_subsys_mutex;
123
124 #define SYSV_SEM_SUBSYS_LOCK() lck_mtx_lock(&sysv_sem_subsys_mutex)
125 #define SYSV_SEM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_sem_subsys_mutex)
126
127
128 __private_extern__ void
129 sysv_sem_lock_init( void )
130 {
131
132 sysv_sem_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
133 lck_grp_attr_setstat(sysv_sem_subsys_lck_grp_attr);
134
135 sysv_sem_subsys_lck_grp = lck_grp_alloc_init("sysv_shm_subsys_lock", sysv_sem_subsys_lck_grp_attr);
136
137 sysv_sem_subsys_lck_attr = lck_attr_alloc_init();
138 lck_attr_setdebug(sysv_sem_subsys_lck_attr);
139 lck_mtx_init(&sysv_sem_subsys_mutex, sysv_sem_subsys_lck_grp, sysv_sem_subsys_lck_attr);
140 }
141
142 static __inline__ user_time_t
143 sysv_semtime(void)
144 {
145 struct timeval tv;
146 microtime(&tv);
147 return (tv.tv_sec);
148 }
149
150 /*
151 * XXX conversion of internal user_time_t to external tume_t loses
152 * XXX precision; not an issue for us now, since we are only ever
153 * XXX setting 32 bits worth of time into it.
154 *
155 * pad field contents are not moved correspondingly; contents will be lost
156 *
157 * NOTE: Source and target may *NOT* overlap! (target is smaller)
158 */
159 static void
160 semid_ds_64to32(struct user_semid_ds *in, struct semid_ds *out)
161 {
162 out->sem_perm = in->sem_perm;
163 out->sem_base = (__int32_t)in->sem_base;
164 out->sem_nsems = in->sem_nsems;
165 out->sem_otime = in->sem_otime; /* XXX loses precision */
166 out->sem_ctime = in->sem_ctime; /* XXX loses precision */
167 }
168
169 /*
170 * pad field contents are not moved correspondingly; contents will be lost
171 *
172 * NOTE: Source and target may are permitted to overlap! (source is smaller);
173 * this works because we copy fields in order from the end of the struct to
174 * the beginning.
175 *
176 * XXX use CAST_USER_ADDR_T() for lack of a CAST_USER_TIME_T(); net effect
177 * XXX is the same.
178 */
179 static void
180 semid_ds_32to64(struct semid_ds *in, struct user_semid_ds *out)
181 {
182 out->sem_ctime = in->sem_ctime;
183 out->sem_otime = in->sem_otime;
184 out->sem_nsems = in->sem_nsems;
185 out->sem_base = (void *)in->sem_base;
186 out->sem_perm = in->sem_perm;
187 }
188
189
190 /*
191 * Entry point for all SEM calls
192 *
193 * In Darwin this is no longer the entry point. It will be removed after
194 * the code has been tested better.
195 */
196 /* XXX actually varargs. */
197 int
198 semsys(struct proc *p, struct semsys_args *uap, register_t *retval)
199 {
200
201 /* The individual calls handling the locking now */
202
203 if (uap->which >= sizeof(semcalls)/sizeof(semcalls[0]))
204 return (EINVAL);
205 return ((*semcalls[uap->which])(p, &uap->a2, retval));
206 }
207
208 /*
209 * Expand the semu array to the given capacity. If the expansion fails
210 * return 0, otherwise return 1.
211 *
212 * Assumes we already have the subsystem lock.
213 */
214 static int
215 grow_semu_array(int newSize)
216 {
217 register int i;
218 register struct sem_undo *newSemu;
219
220 if (newSize <= seminfo.semmnu)
221 return 1;
222 if (newSize > limitseminfo.semmnu) /* enforce hard limit */
223 {
224 #ifdef SEM_DEBUG
225 printf("undo structure hard limit of %d reached, requested %d\n",
226 limitseminfo.semmnu, newSize);
227 #endif
228 return 0;
229 }
230 newSize = (newSize/SEMMNU_INC + 1) * SEMMNU_INC;
231 newSize = newSize > limitseminfo.semmnu ? limitseminfo.semmnu : newSize;
232
233 #ifdef SEM_DEBUG
234 printf("growing semu[] from %d to %d\n", seminfo.semmnu, newSize);
235 #endif
236 MALLOC(newSemu, struct sem_undo *, sizeof (struct sem_undo) * newSize,
237 M_SYSVSEM, M_WAITOK | M_ZERO);
238 if (NULL == newSemu)
239 {
240 #ifdef SEM_DEBUG
241 printf("allocation failed. no changes made.\n");
242 #endif
243 return 0;
244 }
245
246 /* copy the old data to the new array */
247 for (i = 0; i < seminfo.semmnu; i++)
248 {
249 newSemu[i] = semu[i];
250 }
251 /*
252 * The new elements (from newSemu[i] to newSemu[newSize-1]) have their
253 * "un_proc" set to 0 (i.e. NULL) by the M_ZERO flag to MALLOC() above,
254 * so they're already marked as "not in use".
255 */
256
257 /* Clean up the old array */
258 if (semu)
259 FREE(semu, M_SYSVSEM);
260
261 semu = newSemu;
262 seminfo.semmnu = newSize;
263 #ifdef SEM_DEBUG
264 printf("expansion successful\n");
265 #endif
266 return 1;
267 }
268
269 /*
270 * Expand the sema array to the given capacity. If the expansion fails
271 * we return 0, otherwise we return 1.
272 *
273 * Assumes we already have the subsystem lock.
274 */
275 static int
276 grow_sema_array(int newSize)
277 {
278 register struct user_semid_ds *newSema;
279 register int i;
280
281 if (newSize <= seminfo.semmni)
282 return 0;
283 if (newSize > limitseminfo.semmni) /* enforce hard limit */
284 {
285 #ifdef SEM_DEBUG
286 printf("identifier hard limit of %d reached, requested %d\n",
287 limitseminfo.semmni, newSize);
288 #endif
289 return 0;
290 }
291 newSize = (newSize/SEMMNI_INC + 1) * SEMMNI_INC;
292 newSize = newSize > limitseminfo.semmni ? limitseminfo.semmni : newSize;
293
294 #ifdef SEM_DEBUG
295 printf("growing sema[] from %d to %d\n", seminfo.semmni, newSize);
296 #endif
297 MALLOC(newSema, struct user_semid_ds *,
298 sizeof (struct user_semid_ds) * newSize,
299 M_SYSVSEM, M_WAITOK | M_ZERO);
300 if (NULL == newSema)
301 {
302 #ifdef SEM_DEBUG
303 printf("allocation failed. no changes made.\n");
304 #endif
305 return 0;
306 }
307
308 /* copy over the old ids */
309 for (i = 0; i < seminfo.semmni; i++)
310 {
311 newSema[i] = sema[i];
312 /* This is a hack. What we really want to be able to
313 * do is change the value a process is waiting on
314 * without waking it up, but I don't know how to do
315 * this with the existing code, so we wake up the
316 * process and let it do a lot of work to determine the
317 * semaphore set is really not available yet, and then
318 * sleep on the correct, reallocated user_semid_ds pointer.
319 */
320 if (sema[i].sem_perm.mode & SEM_ALLOC)
321 wakeup((caddr_t)&sema[i]);
322 }
323 /*
324 * The new elements (from newSema[i] to newSema[newSize-1]) have their
325 * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the M_ZERO
326 * flag to MALLOC() above, so they're already marked as "not in use".
327 */
328
329 /* Clean up the old array */
330 if (sema)
331 FREE(sema, M_SYSVSEM);
332
333 sema = newSema;
334 seminfo.semmni = newSize;
335 #ifdef SEM_DEBUG
336 printf("expansion successful\n");
337 #endif
338 return 1;
339 }
340
341 /*
342 * Expand the sem_pool array to the given capacity. If the expansion fails
343 * we return 0 (fail), otherwise we return 1 (success).
344 *
345 * Assumes we already hold the subsystem lock.
346 */
347 static int
348 grow_sem_pool(int new_pool_size)
349 {
350 struct sem *new_sem_pool = NULL;
351 struct sem *sem_free;
352 int i;
353
354 if (new_pool_size < semtot)
355 return 0;
356 /* enforce hard limit */
357 if (new_pool_size > limitseminfo.semmns) {
358 #ifdef SEM_DEBUG
359 printf("semaphore hard limit of %d reached, requested %d\n",
360 limitseminfo.semmns, new_pool_size);
361 #endif
362 return 0;
363 }
364
365 new_pool_size = (new_pool_size/SEMMNS_INC + 1) * SEMMNS_INC;
366 new_pool_size = new_pool_size > limitseminfo.semmns ? limitseminfo.semmns : new_pool_size;
367
368 #ifdef SEM_DEBUG
369 printf("growing sem_pool array from %d to %d\n", seminfo.semmns, new_pool_size);
370 #endif
371 MALLOC(new_sem_pool, struct sem *, sizeof (struct sem) * new_pool_size,
372 M_SYSVSEM, M_WAITOK | M_ZERO);
373 if (NULL == new_sem_pool) {
374 #ifdef SEM_DEBUG
375 printf("allocation failed. no changes made.\n");
376 #endif
377 return 0;
378 }
379
380 /* We have our new memory, now copy the old contents over */
381 if (sem_pool)
382 for(i = 0; i < seminfo.semmns; i++)
383 new_sem_pool[i] = sem_pool[i];
384
385 /* Update our id structures to point to the new semaphores */
386 for(i = 0; i < seminfo.semmni; i++) {
387 if (sema[i].sem_perm.mode & SEM_ALLOC) /* ID in use */
388 sema[i].sem_base += (new_sem_pool - sem_pool);
389 }
390
391 sem_free = sem_pool;
392 sem_pool = new_sem_pool;
393
394 /* clean up the old array */
395 if (sem_free != NULL)
396 FREE(sem_free, M_SYSVSEM);
397
398 seminfo.semmns = new_pool_size;
399 #ifdef SEM_DEBUG
400 printf("expansion complete\n");
401 #endif
402 return 1;
403 }
404
405 /*
406 * Allocate a new sem_undo structure for a process
407 * (returns ptr to structure or NULL if no more room)
408 *
409 * Assumes we already hold the subsystem lock.
410 */
411
412 static struct sem_undo *
413 semu_alloc(struct proc *p)
414 {
415 register int i;
416 register struct sem_undo *suptr;
417 register struct sem_undo **supptr;
418 int attempt;
419
420 /*
421 * Try twice to allocate something.
422 * (we'll purge any empty structures after the first pass so
423 * two passes are always enough)
424 */
425
426 for (attempt = 0; attempt < 2; attempt++) {
427 /*
428 * Look for a free structure.
429 * Fill it in and return it if we find one.
430 */
431
432 for (i = 0; i < seminfo.semmnu; i++) {
433 suptr = SEMU(i);
434 if (suptr->un_proc == NULL) {
435 suptr->un_next = semu_list;
436 semu_list = suptr;
437 suptr->un_cnt = 0;
438 suptr->un_ent = NULL;
439 suptr->un_proc = p;
440 return(suptr);
441 }
442 }
443
444 /*
445 * We didn't find a free one, if this is the first attempt
446 * then try to free some structures.
447 */
448
449 if (attempt == 0) {
450 /* All the structures are in use - try to free some */
451 int did_something = 0;
452
453 supptr = &semu_list;
454 while ((suptr = *supptr) != NULL) {
455 if (suptr->un_cnt == 0) {
456 suptr->un_proc = NULL;
457 *supptr = suptr->un_next;
458 did_something = 1;
459 } else
460 supptr = &(suptr->un_next);
461 }
462
463 /* If we didn't free anything. Try expanding
464 * the semu[] array. If that doesn't work
465 * then fail. We expand last to get the
466 * most reuse out of existing resources.
467 */
468 if (!did_something)
469 if (!grow_semu_array(seminfo.semmnu + 1))
470 return(NULL);
471 } else {
472 /*
473 * The second pass failed even though we freed
474 * something after the first pass!
475 * This is IMPOSSIBLE!
476 */
477 panic("semu_alloc - second attempt failed");
478 }
479 }
480 return (NULL);
481 }
482
483 /*
484 * Adjust a particular entry for a particular proc
485 *
486 * Assumes we already hold the subsystem lock.
487 */
488 static int
489 semundo_adjust(struct proc *p, struct sem_undo **supptr, int semid,
490 int semnum, int adjval)
491 {
492 register struct sem_undo *suptr;
493 register struct undo *sueptr, **suepptr, *new_sueptr;
494 int i;
495
496 /*
497 * Look for and remember the sem_undo if the caller doesn't provide it
498 */
499
500 suptr = *supptr;
501 if (suptr == NULL) {
502 for (suptr = semu_list; suptr != NULL;
503 suptr = suptr->un_next) {
504 if (suptr->un_proc == p) {
505 *supptr = suptr;
506 break;
507 }
508 }
509 if (suptr == NULL) {
510 if (adjval == 0)
511 return(0);
512 suptr = semu_alloc(p);
513 if (suptr == NULL)
514 return(ENOSPC);
515 *supptr = suptr;
516 }
517 }
518
519 /*
520 * Look for the requested entry and adjust it (delete if adjval becomes
521 * 0).
522 */
523 new_sueptr = NULL;
524 for (i = 0, suepptr = &suptr->un_ent, sueptr = suptr->un_ent;
525 i < suptr->un_cnt;
526 i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) {
527 if (sueptr->une_id != semid || sueptr->une_num != semnum)
528 continue;
529 if (adjval == 0)
530 sueptr->une_adjval = 0;
531 else
532 sueptr->une_adjval += adjval;
533 if (sueptr->une_adjval == 0) {
534 suptr->un_cnt--;
535 *suepptr = sueptr->une_next;
536 FREE(sueptr, M_SYSVSEM);
537 sueptr = NULL;
538 }
539 return 0;
540 }
541
542 /* Didn't find the right entry - create it */
543 if (adjval == 0) {
544 /* no adjustment: no need for a new entry */
545 return 0;
546 }
547
548 if (suptr->un_cnt == limitseminfo.semume) {
549 /* reached the limit number of semaphore undo entries */
550 return EINVAL;
551 }
552
553 /* allocate a new semaphore undo entry */
554 MALLOC(new_sueptr, struct undo *, sizeof (struct undo),
555 M_SYSVSEM, M_WAITOK);
556 if (new_sueptr == NULL) {
557 return ENOMEM;
558 }
559
560 /* fill in the new semaphore undo entry */
561 new_sueptr->une_next = suptr->un_ent;
562 suptr->un_ent = new_sueptr;
563 suptr->un_cnt++;
564 new_sueptr->une_adjval = adjval;
565 new_sueptr->une_id = semid;
566 new_sueptr->une_num = semnum;
567
568 return 0;
569 }
570
571 /* Assumes we already hold the subsystem lock.
572 */
573 static void
574 semundo_clear(int semid, int semnum)
575 {
576 struct sem_undo *suptr;
577
578 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) {
579 struct undo *sueptr;
580 struct undo **suepptr;
581 int i = 0;
582
583 sueptr = suptr->un_ent;
584 suepptr = &suptr->un_ent;
585 while (i < suptr->un_cnt) {
586 if (sueptr->une_id == semid) {
587 if (semnum == -1 || sueptr->une_num == semnum) {
588 suptr->un_cnt--;
589 *suepptr = sueptr->une_next;
590 FREE(sueptr, M_SYSVSEM);
591 sueptr = *suepptr;
592 continue;
593 }
594 if (semnum != -1)
595 break;
596 }
597 i++;
598 suepptr = &sueptr->une_next;
599 sueptr = sueptr->une_next;
600 }
601 }
602 }
603
604 /*
605 * Note that the user-mode half of this passes a union coerced to a
606 * user_addr_t. The union contains either an int or a pointer, and
607 * so we have to coerce it back, variant on whether the calling
608 * process is 64 bit or not. The coercion works for the 'val' element
609 * because the alignment is the same in user and kernel space.
610 */
611 int
612 semctl(struct proc *p, struct semctl_args *uap, register_t *retval)
613 {
614 int semid = uap->semid;
615 int semnum = uap->semnum;
616 int cmd = uap->cmd;
617 user_semun_t user_arg = (user_semun_t)uap->arg;
618 kauth_cred_t cred = kauth_cred_get();
619 int i, rval, eval;
620 struct user_semid_ds sbuf;
621 struct user_semid_ds *semaptr;
622 struct user_semid_ds uds;
623
624
625 AUDIT_ARG(svipc_cmd, cmd);
626 AUDIT_ARG(svipc_id, semid);
627
628 SYSV_SEM_SUBSYS_LOCK();
629
630 #ifdef SEM_DEBUG
631 printf("call to semctl(%d, %d, %d, 0x%qx)\n", semid, semnum, cmd, user_arg);
632 #endif
633
634 semid = IPCID_TO_IX(semid);
635
636 if (semid < 0 || semid >= seminfo.semmni) {
637 #ifdef SEM_DEBUG
638 printf("Invalid semid\n");
639 #endif
640 eval = EINVAL;
641 goto semctlout;
642 }
643
644 semaptr = &sema[semid];
645 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
646 semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid)) {
647 eval = EINVAL;
648 goto semctlout;
649 }
650
651 eval = 0;
652 rval = 0;
653
654 switch (cmd) {
655 case IPC_RMID:
656 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_M)))
657 goto semctlout;
658
659 semaptr->sem_perm.cuid = kauth_cred_getuid(cred);
660 semaptr->sem_perm.uid = kauth_cred_getuid(cred);
661 semtot -= semaptr->sem_nsems;
662 for (i = semaptr->sem_base - sem_pool; i < semtot; i++)
663 sem_pool[i] = sem_pool[i + semaptr->sem_nsems];
664 for (i = 0; i < seminfo.semmni; i++) {
665 if ((sema[i].sem_perm.mode & SEM_ALLOC) &&
666 sema[i].sem_base > semaptr->sem_base)
667 sema[i].sem_base -= semaptr->sem_nsems;
668 }
669 semaptr->sem_perm.mode = 0;
670 semundo_clear(semid, -1);
671 wakeup((caddr_t)semaptr);
672 break;
673
674 case IPC_SET:
675 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_M)))
676 goto semctlout;
677
678 if (IS_64BIT_PROCESS(p)) {
679 eval = copyin(user_arg.buf, &sbuf, sizeof(struct user_semid_ds));
680 } else {
681 eval = copyin(user_arg.buf, &sbuf, sizeof(struct semid_ds));
682 /* convert in place; ugly, but safe */
683 semid_ds_32to64((struct semid_ds *)&sbuf, &sbuf);
684 }
685
686 if (eval != 0) {
687 goto semctlout;
688 }
689
690 semaptr->sem_perm.uid = sbuf.sem_perm.uid;
691 semaptr->sem_perm.gid = sbuf.sem_perm.gid;
692 semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) |
693 (sbuf.sem_perm.mode & 0777);
694 semaptr->sem_ctime = sysv_semtime();
695 break;
696
697 case IPC_STAT:
698 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
699 goto semctlout;
700 bcopy(semaptr, &uds, sizeof(struct user_semid_ds));
701 if (IS_64BIT_PROCESS(p)) {
702 eval = copyout(&uds, user_arg.buf, sizeof(struct user_semid_ds));
703 } else {
704 struct semid_ds semid_ds32;
705 semid_ds_64to32(&uds, &semid_ds32);
706 eval = copyout(&semid_ds32, user_arg.buf, sizeof(struct semid_ds));
707 }
708 break;
709
710 case GETNCNT:
711 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
712 goto semctlout;
713 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
714 eval = EINVAL;
715 goto semctlout;
716 }
717 rval = semaptr->sem_base[semnum].semncnt;
718 break;
719
720 case GETPID:
721 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
722 goto semctlout;
723 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
724 eval = EINVAL;
725 goto semctlout;
726 }
727 rval = semaptr->sem_base[semnum].sempid;
728 break;
729
730 case GETVAL:
731 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
732 goto semctlout;
733 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
734 eval = EINVAL;
735 goto semctlout;
736 }
737 rval = semaptr->sem_base[semnum].semval;
738 break;
739
740 case GETALL:
741 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
742 goto semctlout;
743 /* XXXXXXXXXXXXXXXX TBD XXXXXXXXXXXXXXXX */
744 for (i = 0; i < semaptr->sem_nsems; i++) {
745 /* XXX could be done in one go... */
746 eval = copyout((caddr_t)&semaptr->sem_base[i].semval,
747 user_arg.array + (i * sizeof(unsigned short)),
748 sizeof(unsigned short));
749 if (eval != 0)
750 break;
751 }
752 break;
753
754 case GETZCNT:
755 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
756 goto semctlout;
757 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
758 eval = EINVAL;
759 goto semctlout;
760 }
761 rval = semaptr->sem_base[semnum].semzcnt;
762 break;
763
764 case SETVAL:
765 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_W)))
766 {
767 #ifdef SEM_DEBUG
768 printf("Invalid credentials for write\n");
769 #endif
770 goto semctlout;
771 }
772 if (semnum < 0 || semnum >= semaptr->sem_nsems)
773 {
774 #ifdef SEM_DEBUG
775 printf("Invalid number out of range for set\n");
776 #endif
777 eval = EINVAL;
778 goto semctlout;
779 }
780 /*
781 * Cast down a pointer instead of using 'val' member directly
782 * to avoid introducing endieness and a pad field into the
783 * header file. Ugly, but it works.
784 */
785 semaptr->sem_base[semnum].semval = CAST_DOWN(int,user_arg.buf);
786 semundo_clear(semid, semnum);
787 wakeup((caddr_t)semaptr);
788 break;
789
790 case SETALL:
791 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_W)))
792 goto semctlout;
793 /*** XXXXXXXXXXXX TBD ********/
794 for (i = 0; i < semaptr->sem_nsems; i++) {
795 /* XXX could be done in one go... */
796 eval = copyin(user_arg.array + (i * sizeof(unsigned short)),
797 (caddr_t)&semaptr->sem_base[i].semval,
798 sizeof(unsigned short));
799 if (eval != 0)
800 break;
801 }
802 semundo_clear(semid, -1);
803 wakeup((caddr_t)semaptr);
804 break;
805
806 default:
807 eval = EINVAL;
808 goto semctlout;
809 }
810
811 if (eval == 0)
812 *retval = rval;
813 semctlout:
814 SYSV_SEM_SUBSYS_UNLOCK();
815 return(eval);
816 }
817
818 int
819 semget(__unused struct proc *p, struct semget_args *uap, register_t *retval)
820 {
821 int semid, eval;
822 int key = uap->key;
823 int nsems = uap->nsems;
824 int semflg = uap->semflg;
825 kauth_cred_t cred = kauth_cred_get();
826
827 #ifdef SEM_DEBUG
828 if (key != IPC_PRIVATE)
829 printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
830 else
831 printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems, semflg);
832 #endif
833
834
835 SYSV_SEM_SUBSYS_LOCK();
836
837
838 if (key != IPC_PRIVATE) {
839 for (semid = 0; semid < seminfo.semmni; semid++) {
840 if ((sema[semid].sem_perm.mode & SEM_ALLOC) &&
841 sema[semid].sem_perm.key == key)
842 break;
843 }
844 if (semid < seminfo.semmni) {
845 #ifdef SEM_DEBUG
846 printf("found public key\n");
847 #endif
848 if ((eval = ipcperm(cred, &sema[semid].sem_perm,
849 semflg & 0700)))
850 goto semgetout;
851 if (nsems < 0 || sema[semid].sem_nsems < nsems) {
852 #ifdef SEM_DEBUG
853 printf("too small\n");
854 #endif
855 eval = EINVAL;
856 goto semgetout;
857 }
858 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) {
859 #ifdef SEM_DEBUG
860 printf("not exclusive\n");
861 #endif
862 eval = EEXIST;
863 goto semgetout;
864 }
865 goto found;
866 }
867 }
868
869 #ifdef SEM_DEBUG
870 printf("need to allocate an id for the request\n");
871 #endif
872 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) {
873 if (nsems <= 0 || nsems > limitseminfo.semmsl) {
874 #ifdef SEM_DEBUG
875 printf("nsems out of range (0<%d<=%d)\n", nsems,
876 seminfo.semmsl);
877 #endif
878 eval = EINVAL;
879 goto semgetout;
880 }
881 if (nsems > seminfo.semmns - semtot) {
882 #ifdef SEM_DEBUG
883 printf("not enough semaphores left (need %d, got %d)\n",
884 nsems, seminfo.semmns - semtot);
885 #endif
886 if (!grow_sem_pool(semtot + nsems)) {
887 #ifdef SEM_DEBUG
888 printf("failed to grow the sem array\n");
889 #endif
890 eval = ENOSPC;
891 goto semgetout;
892 }
893 }
894 for (semid = 0; semid < seminfo.semmni; semid++) {
895 if ((sema[semid].sem_perm.mode & SEM_ALLOC) == 0)
896 break;
897 }
898 if (semid == seminfo.semmni) {
899 #ifdef SEM_DEBUG
900 printf("no more id's available\n");
901 #endif
902 if (!grow_sema_array(seminfo.semmni + 1))
903 {
904 #ifdef SEM_DEBUG
905 printf("failed to grow sema array\n");
906 #endif
907 eval = ENOSPC;
908 goto semgetout;
909 }
910 }
911 #ifdef SEM_DEBUG
912 printf("semid %d is available\n", semid);
913 #endif
914 sema[semid].sem_perm.key = key;
915 sema[semid].sem_perm.cuid = kauth_cred_getuid(cred);
916 sema[semid].sem_perm.uid = kauth_cred_getuid(cred);
917 sema[semid].sem_perm.cgid = cred->cr_gid;
918 sema[semid].sem_perm.gid = cred->cr_gid;
919 sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
920 sema[semid].sem_perm.seq =
921 (sema[semid].sem_perm.seq + 1) & 0x7fff;
922 sema[semid].sem_nsems = nsems;
923 sema[semid].sem_otime = 0;
924 sema[semid].sem_ctime = sysv_semtime();
925 sema[semid].sem_base = &sem_pool[semtot];
926 semtot += nsems;
927 bzero(sema[semid].sem_base,
928 sizeof(sema[semid].sem_base[0])*nsems);
929 #ifdef SEM_DEBUG
930 printf("sembase = 0x%x, next = 0x%x\n", sema[semid].sem_base,
931 &sem_pool[semtot]);
932 #endif
933 } else {
934 #ifdef SEM_DEBUG
935 printf("didn't find it and wasn't asked to create it\n");
936 #endif
937 eval = ENOENT;
938 goto semgetout;
939 }
940
941 found:
942 *retval = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm);
943 AUDIT_ARG(svipc_id, *retval);
944 #ifdef SEM_DEBUG
945 printf("semget is done, returning %d\n", *retval);
946 #endif
947 eval = 0;
948
949 semgetout:
950 SYSV_SEM_SUBSYS_UNLOCK();
951 return(eval);
952 }
953
954 int
955 semop(struct proc *p, struct semop_args *uap, register_t *retval)
956 {
957 int semid = uap->semid;
958 int nsops = uap->nsops;
959 struct sembuf sops[MAX_SOPS];
960 register struct user_semid_ds *semaptr;
961 register struct sembuf *sopptr = NULL; /* protected by 'semptr' */
962 register struct sem *semptr = NULL; /* protected by 'if' */
963 struct sem_undo *suptr = NULL;
964 int i, j, eval;
965 int do_wakeup, do_undos;
966
967 AUDIT_ARG(svipc_id, uap->semid);
968
969 SYSV_SEM_SUBSYS_LOCK();
970
971 #ifdef SEM_DEBUG
972 printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops);
973 #endif
974
975 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
976
977 if (semid < 0 || semid >= seminfo.semmni) {
978 eval = EINVAL;
979 goto semopout;
980 }
981
982 semaptr = &sema[semid];
983 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0) {
984 eval = EINVAL;
985 goto semopout;
986 }
987 if (semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid)) {
988 eval = EINVAL;
989 goto semopout;
990 }
991
992 if ((eval = ipcperm(kauth_cred_get(), &semaptr->sem_perm, IPC_W))) {
993 #ifdef SEM_DEBUG
994 printf("eval = %d from ipaccess\n", eval);
995 #endif
996 goto semopout;
997 }
998
999 if (nsops < 0 || nsops > MAX_SOPS) {
1000 #ifdef SEM_DEBUG
1001 printf("too many sops (max=%d, nsops=%d)\n", MAX_SOPS, nsops);
1002 #endif
1003 eval = E2BIG;
1004 goto semopout;
1005 }
1006
1007 /* OK for LP64, since sizeof(struct sembuf) is currently invariant */
1008 if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) {
1009 #ifdef SEM_DEBUG
1010 printf("eval = %d from copyin(%08x, %08x, %ld)\n", eval,
1011 uap->sops, &sops, nsops * sizeof(struct sembuf));
1012 #endif
1013 goto semopout;
1014 }
1015
1016 /*
1017 * Loop trying to satisfy the vector of requests.
1018 * If we reach a point where we must wait, any requests already
1019 * performed are rolled back and we go to sleep until some other
1020 * process wakes us up. At this point, we start all over again.
1021 *
1022 * This ensures that from the perspective of other tasks, a set
1023 * of requests is atomic (never partially satisfied).
1024 */
1025 do_undos = 0;
1026
1027 for (;;) {
1028 do_wakeup = 0;
1029
1030 for (i = 0; i < nsops; i++) {
1031 sopptr = &sops[i];
1032
1033 if (sopptr->sem_num >= semaptr->sem_nsems) {
1034 eval = EFBIG;
1035 goto semopout;
1036 }
1037
1038 semptr = &semaptr->sem_base[sopptr->sem_num];
1039
1040 #ifdef SEM_DEBUG
1041 printf("semop: semaptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
1042 semaptr, semaptr->sem_base, semptr,
1043 sopptr->sem_num, semptr->semval, sopptr->sem_op,
1044 (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
1045 #endif
1046
1047 if (sopptr->sem_op < 0) {
1048 if (semptr->semval + sopptr->sem_op < 0) {
1049 #ifdef SEM_DEBUG
1050 printf("semop: can't do it now\n");
1051 #endif
1052 break;
1053 } else {
1054 semptr->semval += sopptr->sem_op;
1055 if (semptr->semval == 0 &&
1056 semptr->semzcnt > 0)
1057 do_wakeup = 1;
1058 }
1059 if (sopptr->sem_flg & SEM_UNDO)
1060 do_undos = 1;
1061 } else if (sopptr->sem_op == 0) {
1062 if (semptr->semval > 0) {
1063 #ifdef SEM_DEBUG
1064 printf("semop: not zero now\n");
1065 #endif
1066 break;
1067 }
1068 } else {
1069 if (semptr->semncnt > 0)
1070 do_wakeup = 1;
1071 semptr->semval += sopptr->sem_op;
1072 if (sopptr->sem_flg & SEM_UNDO)
1073 do_undos = 1;
1074 }
1075 }
1076
1077 /*
1078 * Did we get through the entire vector?
1079 */
1080 if (i >= nsops)
1081 goto done;
1082
1083 /*
1084 * No ... rollback anything that we've already done
1085 */
1086 #ifdef SEM_DEBUG
1087 printf("semop: rollback 0 through %d\n", i-1);
1088 #endif
1089 for (j = 0; j < i; j++)
1090 semaptr->sem_base[sops[j].sem_num].semval -=
1091 sops[j].sem_op;
1092
1093 /*
1094 * If the request that we couldn't satisfy has the
1095 * NOWAIT flag set then return with EAGAIN.
1096 */
1097 if (sopptr->sem_flg & IPC_NOWAIT) {
1098 eval = EAGAIN;
1099 goto semopout;
1100 }
1101
1102 if (sopptr->sem_op == 0)
1103 semptr->semzcnt++;
1104 else
1105 semptr->semncnt++;
1106
1107 #ifdef SEM_DEBUG
1108 printf("semop: good night!\n");
1109 #endif
1110 /* Release our lock on the semaphore subsystem so
1111 * another thread can get at the semaphore we are
1112 * waiting for. We will get the lock back after we
1113 * wake up.
1114 */
1115 eval = msleep((caddr_t)semaptr, &sysv_sem_subsys_mutex , (PZERO - 4) | PCATCH,
1116 "semwait", 0);
1117
1118 #ifdef SEM_DEBUG
1119 printf("semop: good morning (eval=%d)!\n", eval);
1120 #endif
1121 if (eval != 0) {
1122 eval = EINTR;
1123 }
1124
1125 /*
1126 * IMPORTANT: while we were asleep, the semaphore array might
1127 * have been reallocated somewhere else (see grow_sema_array()).
1128 * When we wake up, we have to re-lookup the semaphore
1129 * structures and re-validate them.
1130 */
1131
1132 suptr = NULL; /* sem_undo may have been reallocated */
1133 semaptr = &sema[semid]; /* sema may have been reallocated */
1134
1135 /*
1136 * Make sure that the semaphore still exists
1137 */
1138 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
1139 semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid) ||
1140 sopptr->sem_num >= semaptr->sem_nsems) {
1141 if (eval == EINTR) {
1142 /*
1143 * EINTR takes precedence over the fact that
1144 * the semaphore disappeared while we were
1145 * sleeping...
1146 */
1147 } else {
1148 /*
1149 * The man page says to return EIDRM.
1150 * Unfortunately, BSD doesn't define that code!
1151 */
1152 #ifdef EIDRM
1153 eval = EIDRM;
1154 #else
1155 eval = EINVAL;
1156 #endif
1157 }
1158 goto semopout;
1159 }
1160
1161 /*
1162 * The semaphore is still alive. Readjust the count of
1163 * waiting processes. semptr needs to be recomputed
1164 * because the sem[] may have been reallocated while
1165 * we were sleeping, updating our sem_base pointer.
1166 */
1167 semptr = &semaptr->sem_base[sopptr->sem_num];
1168 if (sopptr->sem_op == 0)
1169 semptr->semzcnt--;
1170 else
1171 semptr->semncnt--;
1172
1173 if (eval != 0) { /* EINTR */
1174 goto semopout;
1175 }
1176 }
1177
1178 done:
1179 /*
1180 * Process any SEM_UNDO requests.
1181 */
1182 if (do_undos) {
1183 for (i = 0; i < nsops; i++) {
1184 /*
1185 * We only need to deal with SEM_UNDO's for non-zero
1186 * op's.
1187 */
1188 int adjval;
1189
1190 if ((sops[i].sem_flg & SEM_UNDO) == 0)
1191 continue;
1192 adjval = sops[i].sem_op;
1193 if (adjval == 0)
1194 continue;
1195 eval = semundo_adjust(p, &suptr, semid,
1196 sops[i].sem_num, -adjval);
1197 if (eval == 0)
1198 continue;
1199
1200 /*
1201 * Oh-Oh! We ran out of either sem_undo's or undo's.
1202 * Rollback the adjustments to this point and then
1203 * rollback the semaphore ups and down so we can return
1204 * with an error with all structures restored. We
1205 * rollback the undo's in the exact reverse order that
1206 * we applied them. This guarantees that we won't run
1207 * out of space as we roll things back out.
1208 */
1209 for (j = i - 1; j >= 0; j--) {
1210 if ((sops[j].sem_flg & SEM_UNDO) == 0)
1211 continue;
1212 adjval = sops[j].sem_op;
1213 if (adjval == 0)
1214 continue;
1215 if (semundo_adjust(p, &suptr, semid,
1216 sops[j].sem_num, adjval) != 0)
1217 panic("semop - can't undo undos");
1218 }
1219
1220 for (j = 0; j < nsops; j++)
1221 semaptr->sem_base[sops[j].sem_num].semval -=
1222 sops[j].sem_op;
1223
1224 #ifdef SEM_DEBUG
1225 printf("eval = %d from semundo_adjust\n", eval);
1226 #endif
1227 goto semopout;
1228 } /* loop through the sops */
1229 } /* if (do_undos) */
1230
1231 /* We're definitely done - set the sempid's */
1232 for (i = 0; i < nsops; i++) {
1233 sopptr = &sops[i];
1234 semptr = &semaptr->sem_base[sopptr->sem_num];
1235 semptr->sempid = p->p_pid;
1236 }
1237
1238 if (do_wakeup) {
1239 #ifdef SEM_DEBUG
1240 printf("semop: doing wakeup\n");
1241 #ifdef SEM_WAKEUP
1242 sem_wakeup((caddr_t)semaptr);
1243 #else
1244 wakeup((caddr_t)semaptr);
1245 #endif
1246 printf("semop: back from wakeup\n");
1247 #else
1248 wakeup((caddr_t)semaptr);
1249 #endif
1250 }
1251 #ifdef SEM_DEBUG
1252 printf("semop: done\n");
1253 #endif
1254 *retval = 0;
1255 eval = 0;
1256 semopout:
1257 SYSV_SEM_SUBSYS_UNLOCK();
1258 return(eval);
1259 }
1260
1261 /*
1262 * Go through the undo structures for this process and apply the adjustments to
1263 * semaphores.
1264 */
1265 void
1266 semexit(struct proc *p)
1267 {
1268 register struct sem_undo *suptr;
1269 register struct sem_undo **supptr;
1270 int did_something;
1271
1272 /* If we have not allocated our semaphores yet there can't be
1273 * anything to undo, but we need the lock to prevent
1274 * dynamic memory race conditions.
1275 */
1276 SYSV_SEM_SUBSYS_LOCK();
1277
1278 if (!sem_pool)
1279 {
1280 SYSV_SEM_SUBSYS_UNLOCK();
1281 return;
1282 }
1283 did_something = 0;
1284
1285 /*
1286 * Go through the chain of undo vectors looking for one
1287 * associated with this process.
1288 */
1289
1290 for (supptr = &semu_list; (suptr = *supptr) != NULL;
1291 supptr = &suptr->un_next) {
1292 if (suptr->un_proc == p)
1293 break;
1294 }
1295
1296 if (suptr == NULL)
1297 goto unlock;
1298
1299 #ifdef SEM_DEBUG
1300 printf("proc @%08x has undo structure with %d entries\n", p,
1301 suptr->un_cnt);
1302 #endif
1303
1304 /*
1305 * If there are any active undo elements then process them.
1306 */
1307 if (suptr->un_cnt > 0) {
1308 while (suptr->un_ent != NULL) {
1309 struct undo *sueptr;
1310 int semid;
1311 int semnum;
1312 int adjval;
1313 struct user_semid_ds *semaptr;
1314
1315 sueptr = suptr->un_ent;
1316 semid = sueptr->une_id;
1317 semnum = sueptr->une_num;
1318 adjval = sueptr->une_adjval;
1319
1320 semaptr = &sema[semid];
1321 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0)
1322 panic("semexit - semid not allocated");
1323 if (semnum >= semaptr->sem_nsems)
1324 panic("semexit - semnum out of range");
1325
1326 #ifdef SEM_DEBUG
1327 printf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n",
1328 suptr->un_proc,
1329 semid,
1330 semnum,
1331 adjval,
1332 semaptr->sem_base[semnum].semval);
1333 #endif
1334
1335 if (adjval < 0) {
1336 if (semaptr->sem_base[semnum].semval < -adjval)
1337 semaptr->sem_base[semnum].semval = 0;
1338 else
1339 semaptr->sem_base[semnum].semval +=
1340 adjval;
1341 } else
1342 semaptr->sem_base[semnum].semval += adjval;
1343
1344 /* Maybe we should build a list of semaptr's to wake
1345 * up, finish all access to data structures, release the
1346 * subsystem lock, and wake all the processes. Something
1347 * to think about. It wouldn't buy us anything unless
1348 * wakeup had the potential to block, or the syscall
1349 * funnel state was changed to allow multiple threads
1350 * in the BSD code at once.
1351 */
1352 #ifdef SEM_WAKEUP
1353 sem_wakeup((caddr_t)semaptr);
1354 #else
1355 wakeup((caddr_t)semaptr);
1356 #endif
1357 #ifdef SEM_DEBUG
1358 printf("semexit: back from wakeup\n");
1359 #endif
1360 suptr->un_cnt--;
1361 suptr->un_ent = sueptr->une_next;
1362 FREE(sueptr, M_SYSVSEM);
1363 sueptr = NULL;
1364 }
1365 }
1366
1367 /*
1368 * Deallocate the undo vector.
1369 */
1370 #ifdef SEM_DEBUG
1371 printf("removing vector\n");
1372 #endif
1373 suptr->un_proc = NULL;
1374 *supptr = suptr->un_next;
1375
1376 unlock:
1377 /*
1378 * There is a semaphore leak (i.e. memory leak) in this code.
1379 * We should be deleting the IPC_PRIVATE semaphores when they are
1380 * no longer needed, and we dont. We would have to track which processes
1381 * know about which IPC_PRIVATE semaphores, updating the list after
1382 * every fork. We can't just delete them semaphore when the process
1383 * that created it dies, because that process may well have forked
1384 * some children. So we need to wait until all of it's children have
1385 * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
1386 * with the creating group ID, count the number of processes left in
1387 * that group, and delete the semaphore when the group is gone.
1388 * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
1389 * There is an upper bound on the size of our semaphore array, so
1390 * leaking the semaphores should not work as a DOS attack.
1391 *
1392 * Please note that the original BSD code this file is based on had the
1393 * same leaky semaphore problem.
1394 */
1395
1396 SYSV_SEM_SUBSYS_UNLOCK();
1397 }
1398
1399
1400 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1401 struct sysctl_req *req) */
1402 static int
1403 sysctl_seminfo(__unused struct sysctl_oid *oidp, void *arg1,
1404 __unused int arg2, struct sysctl_req *req)
1405 {
1406 int error = 0;
1407
1408 error = SYSCTL_OUT(req, arg1, sizeof(int));
1409 if (error || req->newptr == USER_ADDR_NULL)
1410 return(error);
1411
1412 SYSV_SEM_SUBSYS_LOCK();
1413
1414 /* Set the values only if shared memory is not initialised */
1415 if ((sem_pool == NULL) &&
1416 (sema == NULL) &&
1417 (semu == NULL) &&
1418 (semu_list == NULL)) {
1419 if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) {
1420 goto out;
1421 }
1422 } else
1423 error = EINVAL;
1424 out:
1425 SYSV_SEM_SUBSYS_UNLOCK();
1426 return(error);
1427
1428 }
1429
1430 /* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
1431 extern struct sysctl_oid_list sysctl__kern_sysv_children;
1432 SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNI, semmni, CTLTYPE_INT | CTLFLAG_RW,
1433 &limitseminfo.semmni, 0, &sysctl_seminfo ,"I","semmni");
1434
1435 SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNS, semmns, CTLTYPE_INT | CTLFLAG_RW,
1436 &limitseminfo.semmns, 0, &sysctl_seminfo ,"I","semmns");
1437
1438 SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNU, semmnu, CTLTYPE_INT | CTLFLAG_RW,
1439 &limitseminfo.semmnu, 0, &sysctl_seminfo ,"I","semmnu");
1440
1441 SYSCTL_PROC(_kern_sysv, KSYSV_SEMMSL, semmsl, CTLTYPE_INT | CTLFLAG_RW,
1442 &limitseminfo.semmsl, 0, &sysctl_seminfo ,"I","semmsl");
1443
1444 SYSCTL_PROC(_kern_sysv, KSYSV_SEMUNE, semume, CTLTYPE_INT | CTLFLAG_RW,
1445 &limitseminfo.semume, 0, &sysctl_seminfo ,"I","semume");
1446
1447
1448 static int
1449 IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
1450 __unused int arg2, struct sysctl_req *req)
1451 {
1452 int error;
1453 int cursor;
1454 union {
1455 struct IPCS_command u32;
1456 struct user_IPCS_command u64;
1457 } ipcs;
1458 struct semid_ds semid_ds32; /* post conversion, 32 bit version */
1459 void *semid_dsp;
1460 size_t ipcs_sz = sizeof(struct user_IPCS_command);
1461 size_t semid_ds_sz = sizeof(struct user_semid_ds);
1462 struct proc *p = current_proc();
1463
1464 /* Copy in the command structure */
1465 if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
1466 return(error);
1467 }
1468
1469 if (!IS_64BIT_PROCESS(p)) {
1470 ipcs_sz = sizeof(struct IPCS_command);
1471 semid_ds_sz = sizeof(struct semid_ds);
1472 }
1473
1474 /* Let us version this interface... */
1475 if (ipcs.u64.ipcs_magic != IPCS_MAGIC) {
1476 return(EINVAL);
1477 }
1478
1479 SYSV_SEM_SUBSYS_LOCK();
1480 switch(ipcs.u64.ipcs_op) {
1481 case IPCS_SEM_CONF: /* Obtain global configuration data */
1482 if (ipcs.u64.ipcs_datalen != sizeof(struct seminfo)) {
1483 error = ERANGE;
1484 break;
1485 }
1486 if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
1487 error = EINVAL;
1488 break;
1489 }
1490 error = copyout(&seminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1491 break;
1492
1493 case IPCS_SEM_ITER: /* Iterate over existing segments */
1494 cursor = ipcs.u64.ipcs_cursor;
1495 if (cursor < 0 || cursor >= seminfo.semmni) {
1496 error = ERANGE;
1497 break;
1498 }
1499 if (ipcs.u64.ipcs_datalen != (int)semid_ds_sz ) {
1500 error = EINVAL;
1501 break;
1502 }
1503 for( ; cursor < seminfo.semmni; cursor++) {
1504 if (sema[cursor].sem_perm.mode & SEM_ALLOC)
1505 break;
1506 continue;
1507 }
1508 if (cursor == seminfo.semmni) {
1509 error = ENOENT;
1510 break;
1511 }
1512
1513 semid_dsp = &sema[cursor]; /* default: 64 bit */
1514
1515 /*
1516 * If necessary, convert the 64 bit kernel segment
1517 * descriptor to a 32 bit user one.
1518 */
1519 if (!IS_64BIT_PROCESS(p)) {
1520 semid_ds_64to32(semid_dsp, &semid_ds32);
1521 semid_dsp = &semid_ds32;
1522 }
1523 error = copyout(semid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1524 if (!error) {
1525 /* update cursor */
1526 ipcs.u64.ipcs_cursor = cursor + 1;
1527 error = SYSCTL_OUT(req, &ipcs, ipcs_sz);
1528 }
1529 break;
1530
1531 default:
1532 error = EINVAL;
1533 break;
1534 }
1535 SYSV_SEM_SUBSYS_UNLOCK();
1536 return(error);
1537 }
1538
1539 SYSCTL_DECL(_kern_sysv_ipcs);
1540 SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW|CTLFLAG_ANYBODY,
1541 0, 0, IPCS_sem_sysctl,
1542 "S,IPCS_sem_command",
1543 "ipcs sem command interface");