]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sysv_sem.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / sysv_sem.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Implementation of SVID semaphores
30 *
31 * Author: Daniel Boulet
32 *
33 * This software is provided ``AS IS'' without any warranties of any kind.
34 */
35 /*
36 * John Bellardo modified the implementation for Darwin. 12/2000
37 */
38 /*
39 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
40 * support for mandatory and extensible security protections. This notice
41 * is included in support of clause 2.2 (b) of the Apple Public License,
42 * Version 2.0.
43 * Copyright (c) 2005-2006 SPARTA, Inc.
44 */
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/proc_internal.h>
50 #include <sys/kauth.h>
51 #include <sys/sem_internal.h>
52 #include <sys/malloc.h>
53 #include <mach/mach_types.h>
54
55 #include <sys/filedesc.h>
56 #include <sys/file_internal.h>
57 #include <sys/sysctl.h>
58 #include <sys/ipcs.h>
59 #include <sys/sysent.h>
60 #include <sys/sysproto.h>
61 #if CONFIG_MACF
62 #include <security/mac_framework.h>
63 #endif
64
65 #include <security/audit/audit.h>
66
67 #if SYSV_SEM
68
69
70 /* Uncomment this line to see the debugging output */
71 /* #define SEM_DEBUG */
72
73 /* Uncomment this line to see MAC debugging output. */
74 /* #define MAC_DEBUG */
75 #if CONFIG_MACF_DEBUG
76 #define MPRINTF(a) printf(a)
77 #else
78 #define MPRINTF(a)
79 #endif
80
81 #define KM_SYSVSEM KHEAP_DEFAULT
82
83
84 /* Hard system limits to avoid resource starvation / DOS attacks.
85 * These are not needed if we can make the semaphore pages swappable.
86 */
87 static struct seminfo limitseminfo = {
88 .semmap = SEMMAP, /* # of entries in semaphore map */
89 .semmni = SEMMNI, /* # of semaphore identifiers */
90 .semmns = SEMMNS, /* # of semaphores in system */
91 .semmnu = SEMMNU, /* # of undo structures in system */
92 .semmsl = SEMMSL, /* max # of semaphores per id */
93 .semopm = SEMOPM, /* max # of operations per semop call */
94 .semume = SEMUME, /* max # of undo entries per process */
95 .semusz = SEMUSZ, /* size in bytes of undo structure */
96 .semvmx = SEMVMX, /* semaphore maximum value */
97 .semaem = SEMAEM /* adjust on exit max value */
98 };
99
100 /* Current system allocations. We use this structure to track how many
101 * resources we have allocated so far. This way we can set large hard limits
102 * and not allocate the memory for them up front.
103 */
104 struct seminfo seminfo = {
105 .semmap = SEMMAP, /* Unused, # of entries in semaphore map */
106 .semmni = 0, /* # of semaphore identifiers */
107 .semmns = 0, /* # of semaphores in system */
108 .semmnu = 0, /* # of undo entries in system */
109 .semmsl = SEMMSL, /* max # of semaphores per id */
110 .semopm = SEMOPM, /* max # of operations per semop call */
111 .semume = SEMUME, /* max # of undo entries per process */
112 .semusz = SEMUSZ, /* size in bytes of undo structure */
113 .semvmx = SEMVMX, /* semaphore maximum value */
114 .semaem = SEMAEM /* adjust on exit max value */
115 };
116
117
118 static int semu_alloc(struct proc *p);
119 static int semundo_adjust(struct proc *p, int *supidx,
120 int semid, int semnum, int adjval);
121 static void semundo_clear(int semid, int semnum);
122
123 /* XXX casting to (sy_call_t *) is bogus, as usual. */
124 static sy_call_t* const semcalls[] = {
125 (sy_call_t *)semctl, (sy_call_t *)semget,
126 (sy_call_t *)semop
127 };
128
129 static int semtot = 0; /* # of used semaphores */
130 struct semid_kernel *sema = NULL; /* semaphore id pool */
131 struct sem *sem_pool = NULL; /* semaphore pool */
132 static int semu_list_idx = -1; /* active undo structures */
133 struct sem_undo *semu = NULL; /* semaphore undo pool */
134
135
136 static LCK_GRP_DECLARE(sysv_sem_subsys_lck_grp, "sysv_sem_subsys_lock");
137 static LCK_MTX_DECLARE(sysv_sem_subsys_mutex, &sysv_sem_subsys_lck_grp);
138
139 #define SYSV_SEM_SUBSYS_LOCK() lck_mtx_lock(&sysv_sem_subsys_mutex)
140 #define SYSV_SEM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_sem_subsys_mutex)
141
142 static __inline__ user_time_t
143 sysv_semtime(void)
144 {
145 struct timeval tv;
146 microtime(&tv);
147 return tv.tv_sec;
148 }
149
150 /*
151 * XXX conversion of internal user_time_t to external tume_t loses
152 * XXX precision; not an issue for us now, since we are only ever
153 * XXX setting 32 bits worth of time into it.
154 *
155 * pad field contents are not moved correspondingly; contents will be lost
156 *
157 * NOTE: Source and target may *NOT* overlap! (target is smaller)
158 */
159 static void
160 semid_ds_kernelto32(struct user_semid_ds *in, struct user32_semid_ds *out)
161 {
162 out->sem_perm = in->sem_perm;
163 out->sem_base = CAST_DOWN_EXPLICIT(__int32_t, in->sem_base);
164 out->sem_nsems = in->sem_nsems;
165 out->sem_otime = in->sem_otime; /* XXX loses precision */
166 out->sem_ctime = in->sem_ctime; /* XXX loses precision */
167 }
168
169 static void
170 semid_ds_kernelto64(struct user_semid_ds *in, struct user64_semid_ds *out)
171 {
172 out->sem_perm = in->sem_perm;
173 out->sem_base = CAST_DOWN_EXPLICIT(__int32_t, in->sem_base);
174 out->sem_nsems = in->sem_nsems;
175 out->sem_otime = in->sem_otime; /* XXX loses precision */
176 out->sem_ctime = in->sem_ctime; /* XXX loses precision */
177 }
178
179 /*
180 * pad field contents are not moved correspondingly; contents will be lost
181 *
182 * NOTE: Source and target may are permitted to overlap! (source is smaller);
183 * this works because we copy fields in order from the end of the struct to
184 * the beginning.
185 *
186 * XXX use CAST_USER_ADDR_T() for lack of a CAST_USER_TIME_T(); net effect
187 * XXX is the same.
188 */
189 static void
190 semid_ds_32tokernel(struct user32_semid_ds *in, struct user_semid_ds *out)
191 {
192 out->sem_ctime = in->sem_ctime;
193 out->sem_otime = in->sem_otime;
194 out->sem_nsems = in->sem_nsems;
195 out->sem_base = (void *)(uintptr_t)in->sem_base;
196 out->sem_perm = in->sem_perm;
197 }
198
199 static void
200 semid_ds_64tokernel(struct user64_semid_ds *in, struct user_semid_ds *out)
201 {
202 out->sem_ctime = in->sem_ctime;
203 out->sem_otime = in->sem_otime;
204 out->sem_nsems = in->sem_nsems;
205 out->sem_base = (void *)(uintptr_t)in->sem_base;
206 out->sem_perm = in->sem_perm;
207 }
208
209
210 /*
211 * semsys
212 *
213 * Entry point for all SEM calls: semctl, semget, semop
214 *
215 * Parameters: p Process requesting the call
216 * uap User argument descriptor (see below)
217 * retval Return value of the selected sem call
218 *
219 * Indirect parameters: uap->which sem call to invoke (index in array of sem calls)
220 * uap->a2 User argument descriptor
221 *
222 * Returns: 0 Success
223 * !0 Not success
224 *
225 * Implicit returns: retval Return value of the selected sem call
226 *
227 * DEPRECATED: This interface should not be used to call the other SEM
228 * functions (semctl, semget, semop). The correct usage is
229 * to call the other SEM functions directly.
230 *
231 */
232 int
233 semsys(struct proc *p, struct semsys_args *uap, int32_t *retval)
234 {
235 /* The individual calls handling the locking now */
236
237 if (uap->which >= sizeof(semcalls) / sizeof(semcalls[0])) {
238 return EINVAL;
239 }
240 return (*semcalls[uap->which])(p, &uap->a2, retval);
241 }
242
243 /*
244 * Expand the semu array to the given capacity. If the expansion fails
245 * return 0, otherwise return 1.
246 *
247 * Assumes we already have the subsystem lock.
248 */
249 static int
250 grow_semu_array(int newSize)
251 {
252 int i;
253 struct sem_undo *newSemu;
254
255 if (newSize <= seminfo.semmnu) {
256 return 1;
257 }
258 if (newSize > limitseminfo.semmnu) { /* enforce hard limit */
259 #ifdef SEM_DEBUG
260 printf("undo structure hard limit of %d reached, requested %d\n",
261 limitseminfo.semmnu, newSize);
262 #endif
263 return 0;
264 }
265 newSize = (newSize / SEMMNU_INC + 1) * SEMMNU_INC;
266 newSize = newSize > limitseminfo.semmnu ? limitseminfo.semmnu : newSize;
267
268 #ifdef SEM_DEBUG
269 printf("growing semu[] from %d to %d\n", seminfo.semmnu, newSize);
270 #endif
271 newSemu = kheap_alloc(KM_SYSVSEM, sizeof(struct sem_undo) * newSize,
272 Z_WAITOK | Z_ZERO);
273 if (NULL == newSemu) {
274 #ifdef SEM_DEBUG
275 printf("allocation failed. no changes made.\n");
276 #endif
277 return 0;
278 }
279
280 /* copy the old data to the new array */
281 for (i = 0; i < seminfo.semmnu; i++) {
282 newSemu[i] = semu[i];
283 }
284 /*
285 * The new elements (from newSemu[i] to newSemu[newSize-1]) have their
286 * "un_proc" set to 0 (i.e. NULL) by the Z_ZERO flag to kheap_alloc
287 * above, so they're already marked as "not in use".
288 */
289
290 /* Clean up the old array */
291 kheap_free(KM_SYSVSEM, semu, sizeof(struct sem_undo) * seminfo.semmnu);
292
293 semu = newSemu;
294 seminfo.semmnu = newSize;
295 #ifdef SEM_DEBUG
296 printf("expansion successful\n");
297 #endif
298 return 1;
299 }
300
301 /*
302 * Expand the sema array to the given capacity. If the expansion fails
303 * we return 0, otherwise we return 1.
304 *
305 * Assumes we already have the subsystem lock.
306 */
307 static int
308 grow_sema_array(int newSize)
309 {
310 struct semid_kernel *newSema;
311 int i;
312
313 if (newSize <= seminfo.semmni) {
314 return 0;
315 }
316 if (newSize > limitseminfo.semmni) { /* enforce hard limit */
317 #ifdef SEM_DEBUG
318 printf("identifier hard limit of %d reached, requested %d\n",
319 limitseminfo.semmni, newSize);
320 #endif
321 return 0;
322 }
323 newSize = (newSize / SEMMNI_INC + 1) * SEMMNI_INC;
324 newSize = newSize > limitseminfo.semmni ? limitseminfo.semmni : newSize;
325
326 #ifdef SEM_DEBUG
327 printf("growing sema[] from %d to %d\n", seminfo.semmni, newSize);
328 #endif
329 newSema = kheap_alloc(KM_SYSVSEM, sizeof(struct semid_kernel) * newSize,
330 Z_WAITOK | Z_ZERO);
331 if (NULL == newSema) {
332 #ifdef SEM_DEBUG
333 printf("allocation failed. no changes made.\n");
334 #endif
335 return 0;
336 }
337
338 /* copy over the old ids */
339 for (i = 0; i < seminfo.semmni; i++) {
340 newSema[i] = sema[i];
341 /* This is a hack. What we really want to be able to
342 * do is change the value a process is waiting on
343 * without waking it up, but I don't know how to do
344 * this with the existing code, so we wake up the
345 * process and let it do a lot of work to determine the
346 * semaphore set is really not available yet, and then
347 * sleep on the correct, reallocated semid_kernel pointer.
348 */
349 if (sema[i].u.sem_perm.mode & SEM_ALLOC) {
350 wakeup((caddr_t)&sema[i]);
351 }
352 }
353
354 #if CONFIG_MACF
355 for (i = seminfo.semmni; i < newSize; i++) {
356 mac_sysvsem_label_init(&newSema[i]);
357 }
358 #endif
359
360 /*
361 * The new elements (from newSema[i] to newSema[newSize-1]) have their
362 * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the Z_ZERO
363 * flag to kheap_alloc above, so they're already marked as "not in use".
364 */
365
366 /* Clean up the old array */
367 kheap_free(KM_SYSVSEM, sema,
368 sizeof(struct semid_kernel) * seminfo.semmni);
369
370 sema = newSema;
371 seminfo.semmni = newSize;
372 #ifdef SEM_DEBUG
373 printf("expansion successful\n");
374 #endif
375 return 1;
376 }
377
378 /*
379 * Expand the sem_pool array to the given capacity. If the expansion fails
380 * we return 0 (fail), otherwise we return 1 (success).
381 *
382 * Assumes we already hold the subsystem lock.
383 */
384 static int
385 grow_sem_pool(int new_pool_size)
386 {
387 struct sem *new_sem_pool = NULL;
388 struct sem *sem_free;
389 int i;
390
391 if (new_pool_size < semtot) {
392 return 0;
393 }
394 /* enforce hard limit */
395 if (new_pool_size > limitseminfo.semmns) {
396 #ifdef SEM_DEBUG
397 printf("semaphore hard limit of %d reached, requested %d\n",
398 limitseminfo.semmns, new_pool_size);
399 #endif
400 return 0;
401 }
402
403 new_pool_size = (new_pool_size / SEMMNS_INC + 1) * SEMMNS_INC;
404 new_pool_size = new_pool_size > limitseminfo.semmns ? limitseminfo.semmns : new_pool_size;
405
406 #ifdef SEM_DEBUG
407 printf("growing sem_pool array from %d to %d\n", seminfo.semmns, new_pool_size);
408 #endif
409 new_sem_pool = kheap_alloc(KM_SYSVSEM, sizeof(struct sem) * new_pool_size,
410 Z_WAITOK | Z_ZERO);
411 if (NULL == new_sem_pool) {
412 #ifdef SEM_DEBUG
413 printf("allocation failed. no changes made.\n");
414 #endif
415 return 0;
416 }
417
418 /* We have our new memory, now copy the old contents over */
419 if (sem_pool) {
420 for (i = 0; i < seminfo.semmns; i++) {
421 new_sem_pool[i] = sem_pool[i];
422 }
423 }
424
425 /* Update our id structures to point to the new semaphores */
426 for (i = 0; i < seminfo.semmni; i++) {
427 if (sema[i].u.sem_perm.mode & SEM_ALLOC) { /* ID in use */
428 sema[i].u.sem_base = new_sem_pool +
429 (sema[i].u.sem_base - sem_pool);
430 }
431 }
432
433 sem_free = sem_pool;
434 sem_pool = new_sem_pool;
435
436 /* clean up the old array */
437 kheap_free(KM_SYSVSEM, sem_free, sizeof(struct sem) * seminfo.semmns);
438
439 seminfo.semmns = new_pool_size;
440 #ifdef SEM_DEBUG
441 printf("expansion complete\n");
442 #endif
443 return 1;
444 }
445
446 /*
447 * Allocate a new sem_undo structure for a process
448 * (returns ptr to structure or NULL if no more room)
449 *
450 * Assumes we already hold the subsystem lock.
451 */
452
453 static int
454 semu_alloc(struct proc *p)
455 {
456 int i;
457 struct sem_undo *suptr;
458 int *supidx;
459 int attempt;
460
461 /*
462 * Try twice to allocate something.
463 * (we'll purge any empty structures after the first pass so
464 * two passes are always enough)
465 */
466
467 for (attempt = 0; attempt < 2; attempt++) {
468 /*
469 * Look for a free structure.
470 * Fill it in and return it if we find one.
471 */
472
473 for (i = 0; i < seminfo.semmnu; i++) {
474 suptr = SEMU(i);
475 if (suptr->un_proc == NULL) {
476 suptr->un_next_idx = semu_list_idx;
477 semu_list_idx = i;
478 suptr->un_cnt = 0;
479 suptr->un_ent = NULL;
480 suptr->un_proc = p;
481 return i;
482 }
483 }
484
485 /*
486 * We didn't find a free one, if this is the first attempt
487 * then try to free some structures.
488 */
489
490 if (attempt == 0) {
491 /* All the structures are in use - try to free some */
492 int did_something = 0;
493
494 supidx = &semu_list_idx;
495 while (*supidx != -1) {
496 suptr = SEMU(*supidx);
497 if (suptr->un_cnt == 0) {
498 suptr->un_proc = NULL;
499 *supidx = suptr->un_next_idx;
500 did_something = 1;
501 } else {
502 supidx = &(suptr->un_next_idx);
503 }
504 }
505
506 /* If we didn't free anything. Try expanding
507 * the semu[] array. If that doesn't work
508 * then fail. We expand last to get the
509 * most reuse out of existing resources.
510 */
511 if (!did_something) {
512 if (!grow_semu_array(seminfo.semmnu + 1)) {
513 return -1;
514 }
515 }
516 } else {
517 /*
518 * The second pass failed even though we freed
519 * something after the first pass!
520 * This is IMPOSSIBLE!
521 */
522 panic("semu_alloc - second attempt failed");
523 }
524 }
525 return -1;
526 }
527
528 /*
529 * Adjust a particular entry for a particular proc
530 *
531 * Assumes we already hold the subsystem lock.
532 */
533 static int
534 semundo_adjust(struct proc *p, int *supidx, int semid,
535 int semnum, int adjval)
536 {
537 struct sem_undo *suptr;
538 int suidx;
539 struct undo *sueptr, **suepptr, *new_sueptr;
540 int i;
541
542 /*
543 * Look for and remember the sem_undo if the caller doesn't provide it
544 */
545
546 suidx = *supidx;
547 if (suidx == -1) {
548 for (suidx = semu_list_idx; suidx != -1;
549 suidx = suptr->un_next_idx) {
550 suptr = SEMU(suidx);
551 if (suptr->un_proc == p) {
552 *supidx = suidx;
553 break;
554 }
555 }
556 if (suidx == -1) {
557 if (adjval == 0) {
558 return 0;
559 }
560 suidx = semu_alloc(p);
561 if (suidx == -1) {
562 return ENOSPC;
563 }
564 *supidx = suidx;
565 }
566 }
567
568 /*
569 * Look for the requested entry and adjust it (delete if adjval becomes
570 * 0).
571 */
572 suptr = SEMU(suidx);
573 new_sueptr = NULL;
574 for (i = 0, suepptr = &suptr->un_ent, sueptr = suptr->un_ent;
575 i < suptr->un_cnt;
576 i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) {
577 if (sueptr->une_id != semid || sueptr->une_num != semnum) {
578 continue;
579 }
580 if (adjval == 0) {
581 sueptr->une_adjval = 0;
582 } else {
583 sueptr->une_adjval += adjval;
584 }
585 if (sueptr->une_adjval == 0) {
586 suptr->un_cnt--;
587 *suepptr = sueptr->une_next;
588 kheap_free(KM_SYSVSEM, sueptr, sizeof(struct undo));
589 }
590 return 0;
591 }
592
593 /* Didn't find the right entry - create it */
594 if (adjval == 0) {
595 /* no adjustment: no need for a new entry */
596 return 0;
597 }
598
599 if (suptr->un_cnt == limitseminfo.semume) {
600 /* reached the limit number of semaphore undo entries */
601 return EINVAL;
602 }
603
604 /* allocate a new semaphore undo entry */
605 new_sueptr = kheap_alloc(KM_SYSVSEM, sizeof(struct undo), Z_WAITOK);
606 if (new_sueptr == NULL) {
607 return ENOMEM;
608 }
609
610 /* fill in the new semaphore undo entry */
611 new_sueptr->une_next = suptr->un_ent;
612 suptr->un_ent = new_sueptr;
613 suptr->un_cnt++;
614 new_sueptr->une_adjval = adjval;
615 new_sueptr->une_id = semid;
616 new_sueptr->une_num = semnum;
617
618 return 0;
619 }
620
621 /* Assumes we already hold the subsystem lock.
622 */
623 static void
624 semundo_clear(int semid, int semnum)
625 {
626 struct sem_undo *suptr;
627 int suidx;
628
629 for (suidx = semu_list_idx; suidx != -1; suidx = suptr->un_next_idx) {
630 struct undo *sueptr;
631 struct undo **suepptr;
632 int i = 0;
633
634 suptr = SEMU(suidx);
635 sueptr = suptr->un_ent;
636 suepptr = &suptr->un_ent;
637 while (i < suptr->un_cnt) {
638 if (sueptr->une_id == semid) {
639 if (semnum == -1 || sueptr->une_num == semnum) {
640 suptr->un_cnt--;
641 *suepptr = sueptr->une_next;
642 kheap_free(KM_SYSVSEM, sueptr, sizeof(struct undo));
643 sueptr = *suepptr;
644 continue;
645 }
646 if (semnum != -1) {
647 break;
648 }
649 }
650 i++;
651 suepptr = &sueptr->une_next;
652 sueptr = sueptr->une_next;
653 }
654 }
655 }
656
657 /*
658 * Note that the user-mode half of this passes a union coerced to a
659 * user_addr_t. The union contains either an int or a pointer, and
660 * so we have to coerce it back, variant on whether the calling
661 * process is 64 bit or not. The coercion works for the 'val' element
662 * because the alignment is the same in user and kernel space.
663 */
664 int
665 semctl(struct proc *p, struct semctl_args *uap, int32_t *retval)
666 {
667 int semid = uap->semid;
668 int semnum = uap->semnum;
669 int cmd = uap->cmd;
670 user_semun_t user_arg = (user_semun_t)uap->arg;
671 kauth_cred_t cred = kauth_cred_get();
672 int i, rval, eval;
673 struct user_semid_ds sbuf;
674 struct semid_kernel *semakptr;
675
676
677 AUDIT_ARG(svipc_cmd, cmd);
678 AUDIT_ARG(svipc_id, semid);
679
680 SYSV_SEM_SUBSYS_LOCK();
681
682 #ifdef SEM_DEBUG
683 printf("call to semctl(%d, %d, %d, 0x%qx)\n", semid, semnum, cmd, user_arg);
684 #endif
685
686 semid = IPCID_TO_IX(semid);
687
688 if (semid < 0 || semid >= seminfo.semmni) {
689 #ifdef SEM_DEBUG
690 printf("Invalid semid\n");
691 #endif
692 eval = EINVAL;
693 goto semctlout;
694 }
695
696 semakptr = &sema[semid];
697 if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
698 semakptr->u.sem_perm._seq != IPCID_TO_SEQ(uap->semid)) {
699 eval = EINVAL;
700 goto semctlout;
701 }
702 #if CONFIG_MACF
703 eval = mac_sysvsem_check_semctl(cred, semakptr, cmd);
704 if (eval) {
705 goto semctlout;
706 }
707 #endif
708
709 eval = 0;
710 rval = 0;
711
712 switch (cmd) {
713 case IPC_RMID:
714 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M))) {
715 goto semctlout;
716 }
717
718 semakptr->u.sem_perm.cuid = kauth_cred_getuid(cred);
719 semakptr->u.sem_perm.uid = kauth_cred_getuid(cred);
720 semtot -= semakptr->u.sem_nsems;
721 for (i = semakptr->u.sem_base - sem_pool; i < semtot; i++) {
722 sem_pool[i] = sem_pool[i + semakptr->u.sem_nsems];
723 }
724 for (i = 0; i < seminfo.semmni; i++) {
725 if ((sema[i].u.sem_perm.mode & SEM_ALLOC) &&
726 sema[i].u.sem_base > semakptr->u.sem_base) {
727 sema[i].u.sem_base -= semakptr->u.sem_nsems;
728 }
729 }
730 semakptr->u.sem_perm.mode = 0;
731 #if CONFIG_MACF
732 mac_sysvsem_label_recycle(semakptr);
733 #endif
734 semundo_clear(semid, -1);
735 wakeup((caddr_t)semakptr);
736 break;
737
738 case IPC_SET:
739 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M))) {
740 goto semctlout;
741 }
742
743 if (IS_64BIT_PROCESS(p)) {
744 struct user64_semid_ds ds64;
745 eval = copyin(user_arg.buf, &ds64, sizeof(ds64));
746 semid_ds_64tokernel(&ds64, &sbuf);
747 } else {
748 struct user32_semid_ds ds32;
749 eval = copyin(user_arg.buf, &ds32, sizeof(ds32));
750 semid_ds_32tokernel(&ds32, &sbuf);
751 }
752
753 if (eval != 0) {
754 goto semctlout;
755 }
756
757 semakptr->u.sem_perm.uid = sbuf.sem_perm.uid;
758 semakptr->u.sem_perm.gid = sbuf.sem_perm.gid;
759 semakptr->u.sem_perm.mode = (semakptr->u.sem_perm.mode &
760 ~0777) | (sbuf.sem_perm.mode & 0777);
761 semakptr->u.sem_ctime = sysv_semtime();
762 break;
763
764 case IPC_STAT:
765 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
766 goto semctlout;
767 }
768
769 if (IS_64BIT_PROCESS(p)) {
770 struct user64_semid_ds semid_ds64;
771 bzero(&semid_ds64, sizeof(semid_ds64));
772 semid_ds_kernelto64(&semakptr->u, &semid_ds64);
773 eval = copyout(&semid_ds64, user_arg.buf, sizeof(semid_ds64));
774 } else {
775 struct user32_semid_ds semid_ds32;
776 bzero(&semid_ds32, sizeof(semid_ds32));
777 semid_ds_kernelto32(&semakptr->u, &semid_ds32);
778 eval = copyout(&semid_ds32, user_arg.buf, sizeof(semid_ds32));
779 }
780 break;
781
782 case GETNCNT:
783 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
784 goto semctlout;
785 }
786 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
787 eval = EINVAL;
788 goto semctlout;
789 }
790 rval = semakptr->u.sem_base[semnum].semncnt;
791 break;
792
793 case GETPID:
794 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
795 goto semctlout;
796 }
797 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
798 eval = EINVAL;
799 goto semctlout;
800 }
801 rval = semakptr->u.sem_base[semnum].sempid;
802 break;
803
804 case GETVAL:
805 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
806 goto semctlout;
807 }
808 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
809 eval = EINVAL;
810 goto semctlout;
811 }
812 rval = semakptr->u.sem_base[semnum].semval;
813 break;
814
815 case GETALL:
816 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
817 goto semctlout;
818 }
819 /* XXXXXXXXXXXXXXXX TBD XXXXXXXXXXXXXXXX */
820 for (i = 0; i < semakptr->u.sem_nsems; i++) {
821 /* XXX could be done in one go... */
822 eval = copyout((caddr_t)&semakptr->u.sem_base[i].semval,
823 user_arg.array + (i * sizeof(unsigned short)),
824 sizeof(unsigned short));
825 if (eval != 0) {
826 break;
827 }
828 }
829 break;
830
831 case GETZCNT:
832 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) {
833 goto semctlout;
834 }
835 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
836 eval = EINVAL;
837 goto semctlout;
838 }
839 rval = semakptr->u.sem_base[semnum].semzcnt;
840 break;
841
842 case SETVAL:
843 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W))) {
844 #ifdef SEM_DEBUG
845 printf("Invalid credentials for write\n");
846 #endif
847 goto semctlout;
848 }
849 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
850 #ifdef SEM_DEBUG
851 printf("Invalid number out of range for set\n");
852 #endif
853 eval = EINVAL;
854 goto semctlout;
855 }
856
857 /*
858 * Cast down a pointer instead of using 'val' member directly
859 * to avoid introducing endieness and a pad field into the
860 * header file. Ugly, but it works.
861 */
862 u_int newsemval = CAST_DOWN_EXPLICIT(u_int, user_arg.buf);
863
864 /*
865 * The check is being performed as unsigned values to match
866 * eventual destination
867 */
868 if (newsemval > (u_int)seminfo.semvmx) {
869 #ifdef SEM_DEBUG
870 printf("Out of range sem value for set\n");
871 #endif
872 eval = ERANGE;
873 goto semctlout;
874 }
875 semakptr->u.sem_base[semnum].semval = newsemval;
876 semakptr->u.sem_base[semnum].sempid = p->p_pid;
877 /* XXX scottl Should there be a MAC call here? */
878 semundo_clear(semid, semnum);
879 wakeup((caddr_t)semakptr);
880 break;
881
882 case SETALL:
883 if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W))) {
884 goto semctlout;
885 }
886 /*** XXXXXXXXXXXX TBD ********/
887 for (i = 0; i < semakptr->u.sem_nsems; i++) {
888 /* XXX could be done in one go... */
889 eval = copyin(user_arg.array + (i * sizeof(unsigned short)),
890 (caddr_t)&semakptr->u.sem_base[i].semval,
891 sizeof(unsigned short));
892 if (eval != 0) {
893 break;
894 }
895 semakptr->u.sem_base[i].sempid = p->p_pid;
896 }
897 /* XXX scottl Should there be a MAC call here? */
898 semundo_clear(semid, -1);
899 wakeup((caddr_t)semakptr);
900 break;
901
902 default:
903 eval = EINVAL;
904 goto semctlout;
905 }
906
907 if (eval == 0) {
908 *retval = rval;
909 }
910 semctlout:
911 SYSV_SEM_SUBSYS_UNLOCK();
912 return eval;
913 }
914
915 int
916 semget(__unused struct proc *p, struct semget_args *uap, int32_t *retval)
917 {
918 int semid, eval;
919 int key = uap->key;
920 int nsems = uap->nsems;
921 int semflg = uap->semflg;
922 kauth_cred_t cred = kauth_cred_get();
923
924 #ifdef SEM_DEBUG
925 if (key != IPC_PRIVATE) {
926 printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
927 } else {
928 printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems, semflg);
929 }
930 #endif
931
932
933 SYSV_SEM_SUBSYS_LOCK();
934
935
936 if (key != IPC_PRIVATE) {
937 for (semid = 0; semid < seminfo.semmni; semid++) {
938 if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) &&
939 sema[semid].u.sem_perm._key == key) {
940 break;
941 }
942 }
943 if (semid < seminfo.semmni) {
944 #ifdef SEM_DEBUG
945 printf("found public key\n");
946 #endif
947 if ((eval = ipcperm(cred, &sema[semid].u.sem_perm,
948 semflg & 0700))) {
949 goto semgetout;
950 }
951 if (nsems < 0 || sema[semid].u.sem_nsems < nsems) {
952 #ifdef SEM_DEBUG
953 printf("too small\n");
954 #endif
955 eval = EINVAL;
956 goto semgetout;
957 }
958 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) {
959 #ifdef SEM_DEBUG
960 printf("not exclusive\n");
961 #endif
962 eval = EEXIST;
963 goto semgetout;
964 }
965 #if CONFIG_MACF
966 eval = mac_sysvsem_check_semget(cred, &sema[semid]);
967 if (eval) {
968 goto semgetout;
969 }
970 #endif
971 goto found;
972 }
973 }
974
975 #ifdef SEM_DEBUG
976 printf("need to allocate an id for the request\n");
977 #endif
978 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) {
979 if (nsems <= 0 || nsems > limitseminfo.semmsl) {
980 #ifdef SEM_DEBUG
981 printf("nsems out of range (0<%d<=%d)\n", nsems,
982 seminfo.semmsl);
983 #endif
984 eval = EINVAL;
985 goto semgetout;
986 }
987 if (nsems > seminfo.semmns - semtot) {
988 #ifdef SEM_DEBUG
989 printf("not enough semaphores left (need %d, got %d)\n",
990 nsems, seminfo.semmns - semtot);
991 #endif
992 if (!grow_sem_pool(semtot + nsems)) {
993 #ifdef SEM_DEBUG
994 printf("failed to grow the sem array\n");
995 #endif
996 eval = ENOSPC;
997 goto semgetout;
998 }
999 }
1000 for (semid = 0; semid < seminfo.semmni; semid++) {
1001 if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) == 0) {
1002 break;
1003 }
1004 }
1005 if (semid == seminfo.semmni) {
1006 #ifdef SEM_DEBUG
1007 printf("no more id's available\n");
1008 #endif
1009 if (!grow_sema_array(seminfo.semmni + 1)) {
1010 #ifdef SEM_DEBUG
1011 printf("failed to grow sema array\n");
1012 #endif
1013 eval = ENOSPC;
1014 goto semgetout;
1015 }
1016 }
1017 #ifdef SEM_DEBUG
1018 printf("semid %d is available\n", semid);
1019 #endif
1020 sema[semid].u.sem_perm._key = key;
1021 sema[semid].u.sem_perm.cuid = kauth_cred_getuid(cred);
1022 sema[semid].u.sem_perm.uid = kauth_cred_getuid(cred);
1023 sema[semid].u.sem_perm.cgid = kauth_cred_getgid(cred);
1024 sema[semid].u.sem_perm.gid = kauth_cred_getgid(cred);
1025 sema[semid].u.sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
1026 sema[semid].u.sem_perm._seq =
1027 (sema[semid].u.sem_perm._seq + 1) & 0x7fff;
1028 sema[semid].u.sem_nsems = nsems;
1029 sema[semid].u.sem_otime = 0;
1030 sema[semid].u.sem_ctime = sysv_semtime();
1031 sema[semid].u.sem_base = &sem_pool[semtot];
1032 semtot += nsems;
1033 bzero(sema[semid].u.sem_base,
1034 sizeof(sema[semid].u.sem_base[0]) * nsems);
1035 #if CONFIG_MACF
1036 mac_sysvsem_label_associate(cred, &sema[semid]);
1037 #endif
1038 #ifdef SEM_DEBUG
1039 printf("sembase = 0x%x, next = 0x%x\n", sema[semid].u.sem_base,
1040 &sem_pool[semtot]);
1041 #endif
1042 } else {
1043 #ifdef SEM_DEBUG
1044 printf("didn't find it and wasn't asked to create it\n");
1045 #endif
1046 eval = ENOENT;
1047 goto semgetout;
1048 }
1049
1050 found:
1051 *retval = IXSEQ_TO_IPCID(semid, sema[semid].u.sem_perm);
1052 AUDIT_ARG(svipc_id, *retval);
1053 #ifdef SEM_DEBUG
1054 printf("semget is done, returning %d\n", *retval);
1055 #endif
1056 eval = 0;
1057
1058 semgetout:
1059 SYSV_SEM_SUBSYS_UNLOCK();
1060 return eval;
1061 }
1062
1063 int
1064 semop(struct proc *p, struct semop_args *uap, int32_t *retval)
1065 {
1066 int semid = uap->semid;
1067 int nsops = uap->nsops;
1068 struct sembuf sops[seminfo.semopm];
1069 struct semid_kernel *semakptr;
1070 struct sembuf *sopptr = NULL; /* protected by 'semptr' */
1071 struct sem *semptr = NULL; /* protected by 'if' */
1072 int supidx = -1;
1073 int i, j, eval;
1074 int do_wakeup, do_undos;
1075
1076 AUDIT_ARG(svipc_id, uap->semid);
1077
1078 SYSV_SEM_SUBSYS_LOCK();
1079
1080 #ifdef SEM_DEBUG
1081 printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops);
1082 #endif
1083
1084 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
1085
1086 if (semid < 0 || semid >= seminfo.semmni) {
1087 eval = EINVAL;
1088 goto semopout;
1089 }
1090
1091 semakptr = &sema[semid];
1092 if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0) {
1093 eval = EINVAL;
1094 goto semopout;
1095 }
1096 if (semakptr->u.sem_perm._seq != IPCID_TO_SEQ(uap->semid)) {
1097 eval = EINVAL;
1098 goto semopout;
1099 }
1100
1101 if ((eval = ipcperm(kauth_cred_get(), &semakptr->u.sem_perm, IPC_W))) {
1102 #ifdef SEM_DEBUG
1103 printf("eval = %d from ipaccess\n", eval);
1104 #endif
1105 goto semopout;
1106 }
1107
1108 if (nsops < 0 || nsops > seminfo.semopm) {
1109 #ifdef SEM_DEBUG
1110 printf("too many sops (max=%d, nsops=%d)\n",
1111 seminfo.semopm, nsops);
1112 #endif
1113 eval = E2BIG;
1114 goto semopout;
1115 }
1116
1117 /* OK for LP64, since sizeof(struct sembuf) is currently invariant */
1118 if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) {
1119 #ifdef SEM_DEBUG
1120 printf("eval = %d from copyin(%08x, %08x, %ld)\n", eval,
1121 uap->sops, &sops, nsops * sizeof(struct sembuf));
1122 #endif
1123 goto semopout;
1124 }
1125
1126 #if CONFIG_MACF
1127 /*
1128 * Initial pass thru sops to see what permissions are needed.
1129 */
1130 j = 0; /* permission needed */
1131 for (i = 0; i < nsops; i++) {
1132 j |= (sops[i].sem_op == 0) ? SEM_R : SEM_A;
1133 }
1134
1135 /*
1136 * The MAC hook checks whether the thread has read (and possibly
1137 * write) permissions to the semaphore array based on the
1138 * sopptr->sem_op value.
1139 */
1140 eval = mac_sysvsem_check_semop(kauth_cred_get(), semakptr, j);
1141 if (eval) {
1142 goto semopout;
1143 }
1144 #endif
1145
1146 /*
1147 * Loop trying to satisfy the vector of requests.
1148 * If we reach a point where we must wait, any requests already
1149 * performed are rolled back and we go to sleep until some other
1150 * process wakes us up. At this point, we start all over again.
1151 *
1152 * This ensures that from the perspective of other tasks, a set
1153 * of requests is atomic (never partially satisfied).
1154 */
1155 do_undos = 0;
1156
1157 for (;;) {
1158 do_wakeup = 0;
1159
1160 for (i = 0; i < nsops; i++) {
1161 sopptr = &sops[i];
1162
1163 if (sopptr->sem_num >= semakptr->u.sem_nsems) {
1164 eval = EFBIG;
1165 goto semopout;
1166 }
1167
1168 semptr = &semakptr->u.sem_base[sopptr->sem_num];
1169
1170 #ifdef SEM_DEBUG
1171 printf("semop: semakptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
1172 semakptr, semakptr->u.sem_base, semptr,
1173 sopptr->sem_num, semptr->semval, sopptr->sem_op,
1174 (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
1175 #endif
1176
1177 if (sopptr->sem_op < 0) {
1178 if (semptr->semval + sopptr->sem_op < 0) {
1179 #ifdef SEM_DEBUG
1180 printf("semop: can't do it now\n");
1181 #endif
1182 break;
1183 } else {
1184 semptr->semval += sopptr->sem_op;
1185 if (semptr->semval == 0 &&
1186 semptr->semzcnt > 0) {
1187 do_wakeup = 1;
1188 }
1189 }
1190 if (sopptr->sem_flg & SEM_UNDO) {
1191 do_undos = 1;
1192 }
1193 } else if (sopptr->sem_op == 0) {
1194 if (semptr->semval > 0) {
1195 #ifdef SEM_DEBUG
1196 printf("semop: not zero now\n");
1197 #endif
1198 break;
1199 }
1200 } else {
1201 if (semptr->semncnt > 0) {
1202 do_wakeup = 1;
1203 }
1204 semptr->semval += sopptr->sem_op;
1205 if (sopptr->sem_flg & SEM_UNDO) {
1206 do_undos = 1;
1207 }
1208 }
1209 }
1210
1211 /*
1212 * Did we get through the entire vector?
1213 */
1214 if (i >= nsops) {
1215 goto done;
1216 }
1217
1218 /*
1219 * No ... rollback anything that we've already done
1220 */
1221 #ifdef SEM_DEBUG
1222 printf("semop: rollback 0 through %d\n", i - 1);
1223 #endif
1224 for (j = 0; j < i; j++) {
1225 semakptr->u.sem_base[sops[j].sem_num].semval -=
1226 sops[j].sem_op;
1227 }
1228
1229 /*
1230 * If the request that we couldn't satisfy has the
1231 * NOWAIT flag set then return with EAGAIN.
1232 */
1233 if (sopptr->sem_flg & IPC_NOWAIT) {
1234 eval = EAGAIN;
1235 goto semopout;
1236 }
1237
1238 if (sopptr->sem_op == 0) {
1239 semptr->semzcnt++;
1240 } else {
1241 semptr->semncnt++;
1242 }
1243
1244 #ifdef SEM_DEBUG
1245 printf("semop: good night!\n");
1246 #endif
1247 /* Release our lock on the semaphore subsystem so
1248 * another thread can get at the semaphore we are
1249 * waiting for. We will get the lock back after we
1250 * wake up.
1251 */
1252 eval = msleep((caddr_t)semakptr, &sysv_sem_subsys_mutex, (PZERO - 4) | PCATCH,
1253 "semwait", 0);
1254
1255 #ifdef SEM_DEBUG
1256 printf("semop: good morning (eval=%d)!\n", eval);
1257 #endif
1258 if (eval != 0) {
1259 eval = EINTR;
1260 }
1261
1262 /*
1263 * IMPORTANT: while we were asleep, the semaphore array might
1264 * have been reallocated somewhere else (see grow_sema_array()).
1265 * When we wake up, we have to re-lookup the semaphore
1266 * structures and re-validate them.
1267 */
1268
1269 semptr = NULL;
1270
1271 /*
1272 * Make sure that the semaphore still exists
1273 *
1274 * XXX POSIX: Third test this 'if' and 'EINTR' precedence may
1275 * fail testing; if so, we will need to revert this code.
1276 */
1277 semakptr = &sema[semid]; /* sema may have been reallocated */
1278 if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
1279 semakptr->u.sem_perm._seq != IPCID_TO_SEQ(uap->semid) ||
1280 sopptr->sem_num >= semakptr->u.sem_nsems) {
1281 /* The man page says to return EIDRM. */
1282 /* Unfortunately, BSD doesn't define that code! */
1283 if (eval == EINTR) {
1284 /*
1285 * EINTR takes precedence over the fact that
1286 * the semaphore disappeared while we were
1287 * sleeping...
1288 */
1289 } else {
1290 #ifdef EIDRM
1291 eval = EIDRM;
1292 #else
1293 eval = EINVAL; /* Ancient past */
1294 #endif
1295 }
1296 goto semopout;
1297 }
1298
1299 /*
1300 * The semaphore is still alive. Readjust the count of
1301 * waiting processes. semptr needs to be recomputed
1302 * because the sem[] may have been reallocated while
1303 * we were sleeping, updating our sem_base pointer.
1304 */
1305 semptr = &semakptr->u.sem_base[sopptr->sem_num];
1306 if (sopptr->sem_op == 0) {
1307 semptr->semzcnt--;
1308 } else {
1309 semptr->semncnt--;
1310 }
1311
1312 if (eval != 0) { /* EINTR */
1313 goto semopout;
1314 }
1315 }
1316
1317 done:
1318 /*
1319 * Process any SEM_UNDO requests.
1320 */
1321 if (do_undos) {
1322 for (i = 0; i < nsops; i++) {
1323 /*
1324 * We only need to deal with SEM_UNDO's for non-zero
1325 * op's.
1326 */
1327 int adjval;
1328
1329 if ((sops[i].sem_flg & SEM_UNDO) == 0) {
1330 continue;
1331 }
1332 adjval = sops[i].sem_op;
1333 if (adjval == 0) {
1334 continue;
1335 }
1336 eval = semundo_adjust(p, &supidx, semid,
1337 sops[i].sem_num, -adjval);
1338 if (eval == 0) {
1339 continue;
1340 }
1341
1342 /*
1343 * Oh-Oh! We ran out of either sem_undo's or undo's.
1344 * Rollback the adjustments to this point and then
1345 * rollback the semaphore ups and down so we can return
1346 * with an error with all structures restored. We
1347 * rollback the undo's in the exact reverse order that
1348 * we applied them. This guarantees that we won't run
1349 * out of space as we roll things back out.
1350 */
1351 for (j = i - 1; j >= 0; j--) {
1352 if ((sops[j].sem_flg & SEM_UNDO) == 0) {
1353 continue;
1354 }
1355 adjval = sops[j].sem_op;
1356 if (adjval == 0) {
1357 continue;
1358 }
1359 if (semundo_adjust(p, &supidx, semid,
1360 sops[j].sem_num, adjval) != 0) {
1361 panic("semop - can't undo undos");
1362 }
1363 }
1364
1365 for (j = 0; j < nsops; j++) {
1366 semakptr->u.sem_base[sops[j].sem_num].semval -=
1367 sops[j].sem_op;
1368 }
1369
1370 #ifdef SEM_DEBUG
1371 printf("eval = %d from semundo_adjust\n", eval);
1372 #endif
1373 goto semopout;
1374 } /* loop through the sops */
1375 } /* if (do_undos) */
1376
1377 /* We're definitely done - set the sempid's */
1378 for (i = 0; i < nsops; i++) {
1379 sopptr = &sops[i];
1380 semptr = &semakptr->u.sem_base[sopptr->sem_num];
1381 semptr->sempid = p->p_pid;
1382 }
1383 semakptr->u.sem_otime = sysv_semtime();
1384
1385 if (do_wakeup) {
1386 #ifdef SEM_DEBUG
1387 printf("semop: doing wakeup\n");
1388 #ifdef SEM_WAKEUP
1389 sem_wakeup((caddr_t)semakptr);
1390 #else
1391 wakeup((caddr_t)semakptr);
1392 #endif
1393 printf("semop: back from wakeup\n");
1394 #else
1395 wakeup((caddr_t)semakptr);
1396 #endif
1397 }
1398 #ifdef SEM_DEBUG
1399 printf("semop: done\n");
1400 #endif
1401 *retval = 0;
1402 eval = 0;
1403 semopout:
1404 SYSV_SEM_SUBSYS_UNLOCK();
1405 return eval;
1406 }
1407
1408 /*
1409 * Go through the undo structures for this process and apply the adjustments to
1410 * semaphores.
1411 */
1412 void
1413 semexit(struct proc *p)
1414 {
1415 struct sem_undo *suptr = NULL;
1416 int suidx;
1417 int *supidx;
1418 int did_something;
1419
1420 /* If we have not allocated our semaphores yet there can't be
1421 * anything to undo, but we need the lock to prevent
1422 * dynamic memory race conditions.
1423 */
1424 SYSV_SEM_SUBSYS_LOCK();
1425
1426 if (!sem_pool) {
1427 SYSV_SEM_SUBSYS_UNLOCK();
1428 return;
1429 }
1430 did_something = 0;
1431
1432 /*
1433 * Go through the chain of undo vectors looking for one
1434 * associated with this process.
1435 */
1436
1437 for (supidx = &semu_list_idx; (suidx = *supidx) != -1;
1438 supidx = &suptr->un_next_idx) {
1439 suptr = SEMU(suidx);
1440 if (suptr->un_proc == p) {
1441 break;
1442 }
1443 }
1444
1445 if (suidx == -1) {
1446 goto unlock;
1447 }
1448
1449 #ifdef SEM_DEBUG
1450 printf("proc @%08x has undo structure with %d entries\n", p,
1451 suptr->un_cnt);
1452 #endif
1453
1454 /*
1455 * If there are any active undo elements then process them.
1456 */
1457 if (suptr->un_cnt > 0) {
1458 while (suptr->un_ent != NULL) {
1459 struct undo *sueptr;
1460 int semid;
1461 int semnum;
1462 int adjval;
1463 struct semid_kernel *semakptr;
1464
1465 sueptr = suptr->un_ent;
1466 semid = sueptr->une_id;
1467 semnum = sueptr->une_num;
1468 adjval = sueptr->une_adjval;
1469
1470 semakptr = &sema[semid];
1471 if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0) {
1472 panic("semexit - semid not allocated");
1473 }
1474 if (semnum >= semakptr->u.sem_nsems) {
1475 panic("semexit - semnum out of range");
1476 }
1477
1478 #ifdef SEM_DEBUG
1479 printf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n",
1480 suptr->un_proc,
1481 semid,
1482 semnum,
1483 adjval,
1484 semakptr->u.sem_base[semnum].semval);
1485 #endif
1486
1487 if (adjval < 0) {
1488 if (semakptr->u.sem_base[semnum].semval < -adjval) {
1489 semakptr->u.sem_base[semnum].semval = 0;
1490 } else {
1491 semakptr->u.sem_base[semnum].semval +=
1492 adjval;
1493 }
1494 } else {
1495 semakptr->u.sem_base[semnum].semval += adjval;
1496 }
1497
1498 /* Maybe we should build a list of semakptr's to wake
1499 * up, finish all access to data structures, release the
1500 * subsystem lock, and wake all the processes. Something
1501 * to think about.
1502 */
1503 #ifdef SEM_WAKEUP
1504 sem_wakeup((caddr_t)semakptr);
1505 #else
1506 wakeup((caddr_t)semakptr);
1507 #endif
1508 #ifdef SEM_DEBUG
1509 printf("semexit: back from wakeup\n");
1510 #endif
1511 suptr->un_cnt--;
1512 suptr->un_ent = sueptr->une_next;
1513 kheap_free(KM_SYSVSEM, sueptr, sizeof(struct undo));
1514 }
1515 }
1516
1517 /*
1518 * Deallocate the undo vector.
1519 */
1520 #ifdef SEM_DEBUG
1521 printf("removing vector\n");
1522 #endif
1523 suptr->un_proc = NULL;
1524 *supidx = suptr->un_next_idx;
1525
1526 unlock:
1527 /*
1528 * There is a semaphore leak (i.e. memory leak) in this code.
1529 * We should be deleting the IPC_PRIVATE semaphores when they are
1530 * no longer needed, and we dont. We would have to track which processes
1531 * know about which IPC_PRIVATE semaphores, updating the list after
1532 * every fork. We can't just delete them semaphore when the process
1533 * that created it dies, because that process may well have forked
1534 * some children. So we need to wait until all of it's children have
1535 * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore
1536 * with the creating group ID, count the number of processes left in
1537 * that group, and delete the semaphore when the group is gone.
1538 * Until that code gets implemented we will leak IPC_PRIVATE semaphores.
1539 * There is an upper bound on the size of our semaphore array, so
1540 * leaking the semaphores should not work as a DOS attack.
1541 *
1542 * Please note that the original BSD code this file is based on had the
1543 * same leaky semaphore problem.
1544 */
1545
1546 SYSV_SEM_SUBSYS_UNLOCK();
1547 }
1548
1549
1550 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1551 * struct sysctl_req *req) */
1552 static int
1553 sysctl_seminfo(__unused struct sysctl_oid *oidp, void *arg1,
1554 __unused int arg2, struct sysctl_req *req)
1555 {
1556 int error = 0;
1557
1558 error = SYSCTL_OUT(req, arg1, sizeof(int));
1559 if (error || req->newptr == USER_ADDR_NULL) {
1560 return error;
1561 }
1562
1563 SYSV_SEM_SUBSYS_LOCK();
1564
1565 /* Set the values only if shared memory is not initialised */
1566 if ((sem_pool == NULL) &&
1567 (sema == NULL) &&
1568 (semu == NULL) &&
1569 (semu_list_idx == -1)) {
1570 if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) {
1571 goto out;
1572 }
1573 } else {
1574 error = EINVAL;
1575 }
1576 out:
1577 SYSV_SEM_SUBSYS_UNLOCK();
1578 return error;
1579 }
1580
1581 /* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */
1582 extern struct sysctl_oid_list sysctl__kern_sysv_children;
1583 SYSCTL_PROC(_kern_sysv, OID_AUTO, semmni, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1584 &limitseminfo.semmni, 0, &sysctl_seminfo, "I", "semmni");
1585
1586 SYSCTL_PROC(_kern_sysv, OID_AUTO, semmns, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1587 &limitseminfo.semmns, 0, &sysctl_seminfo, "I", "semmns");
1588
1589 SYSCTL_PROC(_kern_sysv, OID_AUTO, semmnu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1590 &limitseminfo.semmnu, 0, &sysctl_seminfo, "I", "semmnu");
1591
1592 SYSCTL_PROC(_kern_sysv, OID_AUTO, semmsl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1593 &limitseminfo.semmsl, 0, &sysctl_seminfo, "I", "semmsl");
1594
1595 SYSCTL_PROC(_kern_sysv, OID_AUTO, semume, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1596 &limitseminfo.semume, 0, &sysctl_seminfo, "I", "semume");
1597
1598
1599 static int
1600 IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
1601 __unused int arg2, struct sysctl_req *req)
1602 {
1603 int error;
1604 int cursor;
1605 union {
1606 struct user32_IPCS_command u32;
1607 struct user_IPCS_command u64;
1608 } ipcs = { };
1609 struct user32_semid_ds semid_ds32 = { }; /* post conversion, 32 bit version */
1610 struct user64_semid_ds semid_ds64 = { }; /* post conversion, 64 bit version */
1611 void *semid_dsp;
1612 size_t ipcs_sz;
1613 size_t semid_ds_sz;
1614 struct proc *p = current_proc();
1615
1616 if (IS_64BIT_PROCESS(p)) {
1617 ipcs_sz = sizeof(struct user_IPCS_command);
1618 semid_ds_sz = sizeof(struct user64_semid_ds);
1619 } else {
1620 ipcs_sz = sizeof(struct user32_IPCS_command);
1621 semid_ds_sz = sizeof(struct user32_semid_ds);
1622 }
1623
1624 /* Copy in the command structure */
1625 if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
1626 return error;
1627 }
1628
1629 if (!IS_64BIT_PROCESS(p)) { /* convert in place */
1630 ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data);
1631 }
1632
1633 /* Let us version this interface... */
1634 if (ipcs.u64.ipcs_magic != IPCS_MAGIC) {
1635 return EINVAL;
1636 }
1637
1638 SYSV_SEM_SUBSYS_LOCK();
1639 switch (ipcs.u64.ipcs_op) {
1640 case IPCS_SEM_CONF: /* Obtain global configuration data */
1641 if (ipcs.u64.ipcs_datalen != sizeof(struct seminfo)) {
1642 error = ERANGE;
1643 break;
1644 }
1645 if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
1646 error = EINVAL;
1647 break;
1648 }
1649 error = copyout(&seminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1650 break;
1651
1652 case IPCS_SEM_ITER: /* Iterate over existing segments */
1653 cursor = ipcs.u64.ipcs_cursor;
1654 if (cursor < 0 || cursor >= seminfo.semmni) {
1655 error = ERANGE;
1656 break;
1657 }
1658 if (ipcs.u64.ipcs_datalen != (int)semid_ds_sz) {
1659 error = EINVAL;
1660 break;
1661 }
1662 for (; cursor < seminfo.semmni; cursor++) {
1663 if (sema[cursor].u.sem_perm.mode & SEM_ALLOC) {
1664 break;
1665 }
1666 continue;
1667 }
1668 if (cursor == seminfo.semmni) {
1669 error = ENOENT;
1670 break;
1671 }
1672
1673 semid_dsp = &sema[cursor].u; /* default: 64 bit */
1674
1675 /*
1676 * If necessary, convert the 64 bit kernel segment
1677 * descriptor to a 32 bit user one.
1678 */
1679 if (!IS_64BIT_PROCESS(p)) {
1680 bzero(&semid_ds32, sizeof(semid_ds32));
1681 semid_ds_kernelto32(semid_dsp, &semid_ds32);
1682 semid_dsp = &semid_ds32;
1683 } else {
1684 bzero(&semid_ds64, sizeof(semid_ds64));
1685 semid_ds_kernelto64(semid_dsp, &semid_ds64);
1686 semid_dsp = &semid_ds64;
1687 }
1688
1689 error = copyout(semid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1690 if (!error) {
1691 /* update cursor */
1692 ipcs.u64.ipcs_cursor = cursor + 1;
1693
1694 if (!IS_64BIT_PROCESS(p)) { /* convert in place */
1695 ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t, ipcs.u64.ipcs_data);
1696 }
1697
1698 error = SYSCTL_OUT(req, &ipcs, ipcs_sz);
1699 }
1700 break;
1701
1702 default:
1703 error = EINVAL;
1704 break;
1705 }
1706 SYSV_SEM_SUBSYS_UNLOCK();
1707 return error;
1708 }
1709
1710 SYSCTL_DECL(_kern_sysv_ipcs);
1711 SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1712 0, 0, IPCS_sem_sysctl,
1713 "S,IPCS_sem_command",
1714 "ipcs sem command interface");
1715
1716 #endif /* SYSV_SEM */