]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sysv_shm.c
xnu-2782.30.5.tar.gz
[apple/xnu.git] / bsd / kern / sysv_shm.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
29
30 /*
31 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by Adam Glass and Charles
44 * Hannum.
45 * 4. The names of the authors may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 */
59 /*
60 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
61 * support for mandatory and extensible security protections. This notice
62 * is included in support of clause 2.2 (b) of the Apple Public License,
63 * Version 2.0.
64 * Copyright (c) 2005-2006 SPARTA, Inc.
65 */
66
67
68 #include <sys/appleapiopts.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/shm_internal.h>
73 #include <sys/proc_internal.h>
74 #include <sys/kauth.h>
75 #include <sys/malloc.h>
76 #include <sys/mman.h>
77 #include <sys/stat.h>
78 #include <sys/sysctl.h>
79 #include <sys/ipcs.h>
80 #include <sys/sysent.h>
81 #include <sys/sysproto.h>
82 #if CONFIG_MACF
83 #include <security/mac_framework.h>
84 #endif
85
86 #include <security/audit/audit.h>
87
88 #include <mach/mach_types.h>
89 #include <mach/vm_inherit.h>
90 #include <mach/vm_map.h>
91
92 #include <mach/mach_vm.h>
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_protos.h>
96
97 #include <kern/locks.h>
98
99 /* Uncomment this line to see MAC debugging output. */
100 /* #define MAC_DEBUG */
101 #if CONFIG_MACF_DEBUG
102 #define MPRINTF(a) printf a
103 #else
104 #define MPRINTF(a)
105 #endif
106
107 #if SYSV_SHM
108 static void shminit(void *);
109 #if 0
110 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL)
111 #endif
112
113 static lck_grp_t *sysv_shm_subsys_lck_grp;
114 static lck_grp_attr_t *sysv_shm_subsys_lck_grp_attr;
115 static lck_attr_t *sysv_shm_subsys_lck_attr;
116 static lck_mtx_t sysv_shm_subsys_mutex;
117
118 #define SYSV_SHM_SUBSYS_LOCK() lck_mtx_lock(&sysv_shm_subsys_mutex)
119 #define SYSV_SHM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_shm_subsys_mutex)
120
121 static int oshmctl(void *p, void *uap, void *retval);
122 static int shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, int * retval);
123 static int shmget_existing(struct shmget_args *uap, int mode, int segnum, int * retval);
124 static void shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out);
125 static void shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out);
126
127 /* XXX casting to (sy_call_t *) is bogus, as usual. */
128 static sy_call_t *shmcalls[] = {
129 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
130 (sy_call_t *)shmdt, (sy_call_t *)shmget,
131 (sy_call_t *)shmctl
132 };
133
134 #define SHMSEG_FREE 0x0200
135 #define SHMSEG_REMOVED 0x0400
136 #define SHMSEG_ALLOCATED 0x0800
137 #define SHMSEG_WANTED 0x1000
138
139 static int shm_last_free, shm_nused, shm_committed;
140 struct shmid_kernel *shmsegs; /* 64 bit version */
141 static int shm_inited = 0;
142
143 /*
144 * Since anonymous memory chunks are limited to ANON_MAX_SIZE bytes,
145 * we have to keep a list of chunks when we want to handle a shared memory
146 * segment bigger than ANON_MAX_SIZE.
147 * Each chunk points to a VM named entry of up to ANON_MAX_SIZE bytes
148 * of anonymous memory.
149 */
150 struct shm_handle {
151 void * shm_object; /* named entry for this chunk*/
152 memory_object_size_t shm_handle_size; /* size of this chunk */
153 struct shm_handle *shm_handle_next; /* next chunk */
154 };
155
156 struct shmmap_state {
157 mach_vm_address_t va; /* user address */
158 int shmid; /* segment id */
159 };
160
161 static void shm_deallocate_segment(struct shmid_kernel *);
162 static int shm_find_segment_by_key(key_t);
163 static struct shmid_kernel *shm_find_segment_by_shmid(int);
164 static int shm_delete_mapping(struct proc *, struct shmmap_state *, int);
165
166 #ifdef __APPLE_API_PRIVATE
167 #define DEFAULT_SHMMAX (4 * 1024 * 1024)
168 #define DEFAULT_SHMMIN 1
169 #define DEFAULT_SHMMNI 32
170 #define DEFAULT_SHMSEG 8
171 #define DEFAULT_SHMALL 1024
172 struct shminfo shminfo = {
173 DEFAULT_SHMMAX,
174 DEFAULT_SHMMIN,
175 DEFAULT_SHMMNI,
176 DEFAULT_SHMSEG,
177 DEFAULT_SHMALL
178 };
179 #endif /* __APPLE_API_PRIVATE */
180
181 void sysv_shm_lock_init(void);
182
183 static __inline__ time_t
184 sysv_shmtime(void)
185 {
186 struct timeval tv;
187 microtime(&tv);
188 return (tv.tv_sec);
189 }
190
191 /*
192 * This conversion is safe, since if we are converting for a 32 bit process,
193 * then it's value of (struct shmid_ds)->shm_segsz will never exceed 4G.
194 *
195 * NOTE: Source and target may *NOT* overlap! (target is smaller)
196 */
197 static void
198 shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out)
199 {
200 out->shm_perm = in->shm_perm;
201 out->shm_segsz = in->shm_segsz;
202 out->shm_lpid = in->shm_lpid;
203 out->shm_cpid = in->shm_cpid;
204 out->shm_nattch = in->shm_nattch;
205 out->shm_atime = in->shm_atime;
206 out->shm_dtime = in->shm_dtime;
207 out->shm_ctime = in->shm_ctime;
208 out->shm_internal = CAST_DOWN_EXPLICIT(int,in->shm_internal);
209 }
210
211 /*
212 * NOTE: Source and target may are permitted to overlap! (source is smaller);
213 * this works because we copy fields in order from the end of the struct to
214 * the beginning.
215 */
216 static void
217 shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out)
218 {
219 out->shm_internal = in->shm_internal;
220 out->shm_ctime = in->shm_ctime;
221 out->shm_dtime = in->shm_dtime;
222 out->shm_atime = in->shm_atime;
223 out->shm_nattch = in->shm_nattch;
224 out->shm_cpid = in->shm_cpid;
225 out->shm_lpid = in->shm_lpid;
226 out->shm_segsz = in->shm_segsz;
227 out->shm_perm = in->shm_perm;
228 }
229
230
231 static int
232 shm_find_segment_by_key(key_t key)
233 {
234 int i;
235
236 for (i = 0; i < shminfo.shmmni; i++)
237 if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
238 shmsegs[i].u.shm_perm._key == key)
239 return i;
240 return -1;
241 }
242
243 static struct shmid_kernel *
244 shm_find_segment_by_shmid(int shmid)
245 {
246 int segnum;
247 struct shmid_kernel *shmseg;
248
249 segnum = IPCID_TO_IX(shmid);
250 if (segnum < 0 || segnum >= shminfo.shmmni)
251 return NULL;
252 shmseg = &shmsegs[segnum];
253 if ((shmseg->u.shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
254 != SHMSEG_ALLOCATED ||
255 shmseg->u.shm_perm._seq != IPCID_TO_SEQ(shmid))
256 return NULL;
257 return shmseg;
258 }
259
260 static void
261 shm_deallocate_segment(struct shmid_kernel *shmseg)
262 {
263 struct shm_handle *shm_handle, *shm_handle_next;
264 mach_vm_size_t size;
265
266 for (shm_handle = CAST_DOWN(void *,shmseg->u.shm_internal); /* tunnel */
267 shm_handle != NULL;
268 shm_handle = shm_handle_next) {
269 shm_handle_next = shm_handle->shm_handle_next;
270 mach_memory_entry_port_release(shm_handle->shm_object);
271 FREE((caddr_t) shm_handle, M_SHM);
272 }
273 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
274 size = mach_vm_round_page(shmseg->u.shm_segsz);
275 shm_committed -= btoc(size);
276 shm_nused--;
277 shmseg->u.shm_perm.mode = SHMSEG_FREE;
278 #if CONFIG_MACF
279 /* Reset the MAC label */
280 mac_sysvshm_label_recycle(shmseg);
281 #endif
282 }
283
284 static int
285 shm_delete_mapping(__unused struct proc *p, struct shmmap_state *shmmap_s,
286 int deallocate)
287 {
288 struct shmid_kernel *shmseg;
289 int segnum, result;
290 mach_vm_size_t size;
291
292 segnum = IPCID_TO_IX(shmmap_s->shmid);
293 shmseg = &shmsegs[segnum];
294 size = mach_vm_round_page(shmseg->u.shm_segsz); /* XXX done for us? */
295 if (deallocate) {
296 result = mach_vm_deallocate(current_map(), shmmap_s->va, size);
297 if (result != KERN_SUCCESS)
298 return EINVAL;
299 }
300 shmmap_s->shmid = -1;
301 shmseg->u.shm_dtime = sysv_shmtime();
302 if ((--shmseg->u.shm_nattch <= 0) &&
303 (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) {
304 shm_deallocate_segment(shmseg);
305 shm_last_free = segnum;
306 }
307 return 0;
308 }
309
310 int
311 shmdt(struct proc *p, struct shmdt_args *uap, int32_t *retval)
312 {
313 #if CONFIG_MACF
314 struct shmid_kernel *shmsegptr;
315 #endif
316 struct shmmap_state *shmmap_s;
317 int i;
318 int shmdtret = 0;
319
320 AUDIT_ARG(svipc_addr, uap->shmaddr);
321
322 SYSV_SHM_SUBSYS_LOCK();
323
324 if (!shm_inited) {
325 shminit(NULL);
326 }
327 shmmap_s = (struct shmmap_state *)p->vm_shm;
328 if (shmmap_s == NULL) {
329 shmdtret = EINVAL;
330 goto shmdt_out;
331 }
332
333 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
334 if (shmmap_s->shmid != -1 &&
335 shmmap_s->va == (mach_vm_offset_t)uap->shmaddr)
336 break;
337 if (i == shminfo.shmseg) {
338 shmdtret = EINVAL;
339 goto shmdt_out;
340 }
341 #if CONFIG_MACF
342 /*
343 * XXX: It might be useful to move this into the shm_delete_mapping
344 * function
345 */
346 shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)];
347 shmdtret = mac_sysvshm_check_shmdt(kauth_cred_get(), shmsegptr);
348 if (shmdtret)
349 goto shmdt_out;
350 #endif
351 i = shm_delete_mapping(p, shmmap_s, 1);
352
353 if (i == 0)
354 *retval = 0;
355 shmdtret = i;
356 shmdt_out:
357 SYSV_SHM_SUBSYS_UNLOCK();
358 return shmdtret;
359 }
360
361 int
362 shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval)
363 {
364 int error, i, flags;
365 struct shmid_kernel *shmseg;
366 struct shmmap_state *shmmap_s = NULL;
367 struct shm_handle *shm_handle;
368 mach_vm_address_t attach_va; /* attach address in/out */
369 mach_vm_size_t map_size; /* size of map entry */
370 mach_vm_size_t mapped_size;
371 vm_prot_t prot;
372 size_t size;
373 kern_return_t rv;
374 int shmat_ret;
375 int vm_flags;
376
377 shmat_ret = 0;
378
379 AUDIT_ARG(svipc_id, uap->shmid);
380 AUDIT_ARG(svipc_addr, uap->shmaddr);
381
382 SYSV_SHM_SUBSYS_LOCK();
383
384 if (!shm_inited) {
385 shminit(NULL);
386 }
387
388 shmmap_s = (struct shmmap_state *)p->vm_shm;
389
390 if (shmmap_s == NULL) {
391 size = shminfo.shmseg * sizeof(struct shmmap_state);
392 MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK);
393 if (shmmap_s == NULL) {
394 shmat_ret = ENOMEM;
395 goto shmat_out;
396 }
397 for (i = 0; i < shminfo.shmseg; i++)
398 shmmap_s[i].shmid = -1;
399 p->vm_shm = (caddr_t)shmmap_s;
400 }
401 shmseg = shm_find_segment_by_shmid(uap->shmid);
402 if (shmseg == NULL) {
403 shmat_ret = EINVAL;
404 goto shmat_out;
405 }
406
407 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
408 error = ipcperm(kauth_cred_get(), &shmseg->u.shm_perm,
409 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
410 if (error) {
411 shmat_ret = error;
412 goto shmat_out;
413 }
414
415 #if CONFIG_MACF
416 error = mac_sysvshm_check_shmat(kauth_cred_get(), shmseg, uap->shmflg);
417 if (error) {
418 shmat_ret = error;
419 goto shmat_out;
420 }
421 #endif
422 for (i = 0; i < shminfo.shmseg; i++) {
423 if (shmmap_s->shmid == -1)
424 break;
425 shmmap_s++;
426 }
427 if (i >= shminfo.shmseg) {
428 shmat_ret = EMFILE;
429 goto shmat_out;
430 }
431
432 map_size = mach_vm_round_page(shmseg->u.shm_segsz);
433 prot = VM_PROT_READ;
434 if ((uap->shmflg & SHM_RDONLY) == 0)
435 prot |= VM_PROT_WRITE;
436 flags = MAP_ANON | MAP_SHARED;
437 if (uap->shmaddr)
438 flags |= MAP_FIXED;
439
440 attach_va = (mach_vm_address_t)uap->shmaddr;
441 if (uap->shmflg & SHM_RND)
442 attach_va &= ~(SHMLBA-1);
443 else if ((attach_va & (SHMLBA-1)) != 0) {
444 shmat_ret = EINVAL;
445 goto shmat_out;
446 }
447
448 if (flags & MAP_FIXED) {
449 vm_flags = VM_FLAGS_FIXED;
450 } else {
451 vm_flags = VM_FLAGS_ANYWHERE;
452 }
453
454 mapped_size = 0;
455
456 /* first reserve enough space... */
457 rv = mach_vm_map(current_map(),
458 &attach_va,
459 map_size,
460 0,
461 vm_flags,
462 IPC_PORT_NULL,
463 0,
464 FALSE,
465 VM_PROT_NONE,
466 VM_PROT_NONE,
467 VM_INHERIT_NONE);
468 if (rv != KERN_SUCCESS) {
469 goto out;
470 }
471
472 shmmap_s->va = attach_va;
473
474 /* ... then map the shared memory over the reserved space */
475 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal);/* tunnel */
476 shm_handle != NULL;
477 shm_handle = shm_handle->shm_handle_next) {
478
479 rv = vm_map_enter_mem_object(
480 current_map(), /* process map */
481 &attach_va, /* attach address */
482 shm_handle->shm_handle_size, /* segment size */
483 (mach_vm_offset_t)0, /* alignment mask */
484 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
485 shm_handle->shm_object,
486 (mach_vm_offset_t)0,
487 FALSE,
488 prot,
489 prot,
490 VM_INHERIT_SHARE);
491 if (rv != KERN_SUCCESS)
492 goto out;
493
494 mapped_size += shm_handle->shm_handle_size;
495 attach_va = attach_va + shm_handle->shm_handle_size;
496 }
497
498 shmmap_s->shmid = uap->shmid;
499 shmseg->u.shm_lpid = p->p_pid;
500 shmseg->u.shm_atime = sysv_shmtime();
501 shmseg->u.shm_nattch++;
502 *retval = shmmap_s->va; /* XXX return -1 on error */
503 shmat_ret = 0;
504 goto shmat_out;
505 out:
506 if (mapped_size > 0) {
507 (void) mach_vm_deallocate(current_map(),
508 shmmap_s->va,
509 mapped_size);
510 }
511 switch (rv) {
512 case KERN_INVALID_ADDRESS:
513 case KERN_NO_SPACE:
514 shmat_ret = ENOMEM;
515 break;
516 case KERN_PROTECTION_FAILURE:
517 shmat_ret = EACCES;
518 break;
519 default:
520 shmat_ret = EINVAL;
521 break;
522 }
523 shmat_out:
524 SYSV_SHM_SUBSYS_UNLOCK();
525 return shmat_ret;
526 }
527
528 static int
529 oshmctl(__unused void *p, __unused void *uap, __unused void *retval)
530 {
531 return EINVAL;
532 }
533
534 /*
535 * Returns: 0 Success
536 * EINVAL
537 * copyout:EFAULT
538 * copyin:EFAULT
539 * ipcperm:EPERM
540 * ipcperm:EACCES
541 */
542 int
543 shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval)
544 {
545 int error;
546 kauth_cred_t cred = kauth_cred_get();
547 struct user_shmid_ds inbuf;
548 struct shmid_kernel *shmseg;
549
550 int shmctl_ret = 0;
551
552 AUDIT_ARG(svipc_cmd, uap->cmd);
553 AUDIT_ARG(svipc_id, uap->shmid);
554
555 SYSV_SHM_SUBSYS_LOCK();
556
557 if (!shm_inited) {
558 shminit(NULL);
559 }
560
561 shmseg = shm_find_segment_by_shmid(uap->shmid);
562 if (shmseg == NULL) {
563 shmctl_ret = EINVAL;
564 goto shmctl_out;
565 }
566
567 /* XXAUDIT: This is the perms BEFORE any change by this call. This
568 * may not be what is desired.
569 */
570 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
571
572 #if CONFIG_MACF
573 error = mac_sysvshm_check_shmctl(cred, shmseg, uap->cmd);
574 if (error) {
575 shmctl_ret = error;
576 goto shmctl_out;
577 }
578 #endif
579 switch (uap->cmd) {
580 case IPC_STAT:
581 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_R);
582 if (error) {
583 shmctl_ret = error;
584 goto shmctl_out;
585 }
586
587 if (IS_64BIT_PROCESS(p)) {
588 struct user_shmid_ds shmid_ds;
589 memcpy(&shmid_ds, &shmseg->u, sizeof(struct user_shmid_ds));
590
591 /* Clear kernel reserved pointer before copying to user space */
592 shmid_ds.shm_internal = USER_ADDR_NULL;
593
594 error = copyout(&shmid_ds, uap->buf, sizeof(shmid_ds));
595 } else {
596 struct user32_shmid_ds shmid_ds32;
597 shmid_ds_64to32(&shmseg->u, &shmid_ds32);
598
599 /* Clear kernel reserved pointer before copying to user space */
600 shmid_ds32.shm_internal = (user32_addr_t)0;
601
602 error = copyout(&shmid_ds32, uap->buf, sizeof(shmid_ds32));
603 }
604 if (error) {
605 shmctl_ret = error;
606 goto shmctl_out;
607 }
608 break;
609 case IPC_SET:
610 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
611 if (error) {
612 shmctl_ret = error;
613 goto shmctl_out;
614 }
615 if (IS_64BIT_PROCESS(p)) {
616 error = copyin(uap->buf, &inbuf, sizeof(struct user_shmid_ds));
617 } else {
618 struct user32_shmid_ds shmid_ds32;
619 error = copyin(uap->buf, &shmid_ds32, sizeof(shmid_ds32));
620 /* convert in place; ugly, but safe */
621 shmid_ds_32to64(&shmid_ds32, &inbuf);
622 }
623 if (error) {
624 shmctl_ret = error;
625 goto shmctl_out;
626 }
627 shmseg->u.shm_perm.uid = inbuf.shm_perm.uid;
628 shmseg->u.shm_perm.gid = inbuf.shm_perm.gid;
629 shmseg->u.shm_perm.mode =
630 (shmseg->u.shm_perm.mode & ~ACCESSPERMS) |
631 (inbuf.shm_perm.mode & ACCESSPERMS);
632 shmseg->u.shm_ctime = sysv_shmtime();
633 break;
634 case IPC_RMID:
635 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
636 if (error) {
637 shmctl_ret = error;
638 goto shmctl_out;
639 }
640 shmseg->u.shm_perm._key = IPC_PRIVATE;
641 shmseg->u.shm_perm.mode |= SHMSEG_REMOVED;
642 if (shmseg->u.shm_nattch <= 0) {
643 shm_deallocate_segment(shmseg);
644 shm_last_free = IPCID_TO_IX(uap->shmid);
645 }
646 break;
647 #if 0
648 case SHM_LOCK:
649 case SHM_UNLOCK:
650 #endif
651 default:
652 shmctl_ret = EINVAL;
653 goto shmctl_out;
654 }
655 *retval = 0;
656 shmctl_ret = 0;
657 shmctl_out:
658 SYSV_SHM_SUBSYS_UNLOCK();
659 return shmctl_ret;
660 }
661
662 static int
663 shmget_existing(struct shmget_args *uap, int mode, int segnum, int *retval)
664 {
665 struct shmid_kernel *shmseg;
666 int error = 0;
667
668 shmseg = &shmsegs[segnum];
669 if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) {
670 /*
671 * This segment is in the process of being allocated. Wait
672 * until it's done, and look the key up again (in case the
673 * allocation failed or it was freed).
674 */
675 shmseg->u.shm_perm.mode |= SHMSEG_WANTED;
676 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
677 if (error)
678 return error;
679 return EAGAIN;
680 }
681
682 /*
683 * The low 9 bits of shmflag are the mode bits being requested, which
684 * are the actual mode bits desired on the segment, and not in IPC_R
685 * form; therefore it would be incorrect to call ipcperm() to validate
686 * them; instead, we AND the existing mode with the requested mode, and
687 * verify that it matches the requested mode; otherwise, we fail with
688 * EACCES (access denied).
689 */
690 if ((shmseg->u.shm_perm.mode & mode) != mode)
691 return EACCES;
692
693 #if CONFIG_MACF
694 error = mac_sysvshm_check_shmget(kauth_cred_get(), shmseg, uap->shmflg);
695 if (error)
696 return (error);
697 #endif
698
699 if (uap->size && uap->size > shmseg->u.shm_segsz)
700 return EINVAL;
701
702 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
703 return EEXIST;
704
705 *retval = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
706 return 0;
707 }
708
709 static int
710 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode,
711 int *retval)
712 {
713 int i, segnum, shmid;
714 kauth_cred_t cred = kauth_cred_get();
715 struct shmid_kernel *shmseg;
716 struct shm_handle *shm_handle;
717 kern_return_t kret;
718 mach_vm_size_t total_size, size, alloc_size;
719 void * mem_object;
720 struct shm_handle *shm_handle_next, **shm_handle_next_p;
721
722 if (uap->size < (user_size_t)shminfo.shmmin ||
723 uap->size > (user_size_t)shminfo.shmmax)
724 return EINVAL;
725 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
726 return ENOSPC;
727 total_size = mach_vm_round_page(uap->size);
728 if ((user_ssize_t)(shm_committed + btoc(total_size)) > shminfo.shmall)
729 return ENOMEM;
730 if (shm_last_free < 0) {
731 for (i = 0; i < shminfo.shmmni; i++)
732 if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE)
733 break;
734 if (i == shminfo.shmmni)
735 panic("shmseg free count inconsistent");
736 segnum = i;
737 } else {
738 segnum = shm_last_free;
739 shm_last_free = -1;
740 }
741 shmseg = &shmsegs[segnum];
742
743 /*
744 * In case we sleep in malloc(), mark the segment present but deleted
745 * so that noone else tries to create the same key.
746 * XXX but we don't release the global lock !?
747 */
748 shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
749 shmseg->u.shm_perm._key = uap->key;
750 shmseg->u.shm_perm._seq = (shmseg->u.shm_perm._seq + 1) & 0x7fff;
751
752 shm_handle_next_p = NULL;
753 for (alloc_size = 0;
754 alloc_size < total_size;
755 alloc_size += size) {
756 size = MIN(total_size - alloc_size, ANON_MAX_SIZE);
757 kret = mach_make_memory_entry_64(
758 VM_MAP_NULL,
759 (memory_object_size_t *) &size,
760 (memory_object_offset_t) 0,
761 MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
762 (ipc_port_t *) &mem_object, 0);
763 if (kret != KERN_SUCCESS)
764 goto out;
765
766 MALLOC(shm_handle, struct shm_handle *, sizeof(struct shm_handle), M_SHM, M_WAITOK);
767 if (shm_handle == NULL) {
768 kret = KERN_NO_SPACE;
769 mach_memory_entry_port_release(mem_object);
770 mem_object = NULL;
771 goto out;
772 }
773 shm_handle->shm_object = mem_object;
774 shm_handle->shm_handle_size = size;
775 shm_handle->shm_handle_next = NULL;
776 if (shm_handle_next_p == NULL) {
777 shmseg->u.shm_internal = CAST_USER_ADDR_T(shm_handle);/* tunnel */
778 } else {
779 *shm_handle_next_p = shm_handle;
780 }
781 shm_handle_next_p = &shm_handle->shm_handle_next;
782 }
783
784 shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
785
786 shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = kauth_cred_getuid(cred);
787 shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = kauth_cred_getgid(cred);
788 shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) |
789 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
790 shmseg->u.shm_segsz = uap->size;
791 shmseg->u.shm_cpid = p->p_pid;
792 shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
793 shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
794 #if CONFIG_MACF
795 mac_sysvshm_label_associate(cred, shmseg);
796 #endif
797 shmseg->u.shm_ctime = sysv_shmtime();
798 shm_committed += btoc(size);
799 shm_nused++;
800 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
801 if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) {
802 /*
803 * Somebody else wanted this key while we were asleep. Wake
804 * them up now.
805 */
806 shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED;
807 wakeup((caddr_t)shmseg);
808 }
809 *retval = shmid;
810 AUDIT_ARG(svipc_id, shmid);
811 return 0;
812 out:
813 if (kret != KERN_SUCCESS) {
814 for (shm_handle = CAST_DOWN(void *,shmseg->u.shm_internal); /* tunnel */
815 shm_handle != NULL;
816 shm_handle = shm_handle_next) {
817 shm_handle_next = shm_handle->shm_handle_next;
818 mach_memory_entry_port_release(shm_handle->shm_object);
819 FREE((caddr_t) shm_handle, M_SHM);
820 }
821 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
822 }
823
824 switch (kret) {
825 case KERN_INVALID_ADDRESS:
826 case KERN_NO_SPACE:
827 return (ENOMEM);
828 case KERN_PROTECTION_FAILURE:
829 return (EACCES);
830 default:
831 return (EINVAL);
832 }
833
834 }
835
836 int
837 shmget(struct proc *p, struct shmget_args *uap, int32_t *retval)
838 {
839 int segnum, mode, error;
840 int shmget_ret = 0;
841
842 /* Auditing is actually done in shmget_allocate_segment() */
843
844 SYSV_SHM_SUBSYS_LOCK();
845
846 if (!shm_inited) {
847 shminit(NULL);
848 }
849
850 mode = uap->shmflg & ACCESSPERMS;
851 if (uap->key != IPC_PRIVATE) {
852 again:
853 segnum = shm_find_segment_by_key(uap->key);
854 if (segnum >= 0) {
855 error = shmget_existing(uap, mode, segnum, retval);
856 if (error == EAGAIN)
857 goto again;
858 shmget_ret = error;
859 goto shmget_out;
860 }
861 if ((uap->shmflg & IPC_CREAT) == 0) {
862 shmget_ret = ENOENT;
863 goto shmget_out;
864 }
865 }
866 shmget_ret = shmget_allocate_segment(p, uap, mode, retval);
867 shmget_out:
868 SYSV_SHM_SUBSYS_UNLOCK();
869 return shmget_ret;
870 /*NOTREACHED*/
871
872 }
873
874 /*
875 * shmsys
876 *
877 * Entry point for all SHM calls: shmat, oshmctl, shmdt, shmget, shmctl
878 *
879 * Parameters: p Process requesting the call
880 * uap User argument descriptor (see below)
881 * retval Return value of the selected shm call
882 *
883 * Indirect parameters: uap->which msg call to invoke (index in array of shm calls)
884 * uap->a2 User argument descriptor
885 *
886 * Returns: 0 Success
887 * !0 Not success
888 *
889 * Implicit returns: retval Return value of the selected shm call
890 *
891 * DEPRECATED: This interface should not be used to call the other SHM
892 * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct
893 * usage is to call the other SHM functions directly.
894 */
895 int
896 shmsys(struct proc *p, struct shmsys_args *uap, int32_t *retval)
897 {
898
899 /* The routine that we are dispatching already does this */
900
901 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
902 return EINVAL;
903 return ((*shmcalls[uap->which])(p, &uap->a2, retval));
904 }
905
906 /*
907 * Return 0 on success, 1 on failure.
908 */
909 int
910 shmfork(struct proc *p1, struct proc *p2)
911 {
912 struct shmmap_state *shmmap_s;
913 size_t size;
914 int i;
915 int shmfork_ret = 0;
916
917 SYSV_SHM_SUBSYS_LOCK();
918
919 if (!shm_inited) {
920 shminit(NULL);
921 }
922
923 size = shminfo.shmseg * sizeof(struct shmmap_state);
924 MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK);
925 if (shmmap_s != NULL) {
926 bcopy((caddr_t)p1->vm_shm, (caddr_t)shmmap_s, size);
927 p2->vm_shm = (caddr_t)shmmap_s;
928 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
929 if (shmmap_s->shmid != -1)
930 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
931 shmfork_ret = 0;
932 goto shmfork_out;
933 }
934
935 shmfork_ret = 1; /* failed to copy to child - ENOMEM */
936 shmfork_out:
937 SYSV_SHM_SUBSYS_UNLOCK();
938 return shmfork_ret;
939 }
940
941 void
942 shmexit(struct proc *p)
943 {
944 struct shmmap_state *shmmap_s;
945 int i;
946
947 shmmap_s = (struct shmmap_state *)p->vm_shm;
948
949 SYSV_SHM_SUBSYS_LOCK();
950 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
951 if (shmmap_s->shmid != -1)
952 /*
953 * XXX: Should the MAC framework enforce
954 * check here as well.
955 */
956 shm_delete_mapping(p, shmmap_s, 1);
957 FREE((caddr_t)p->vm_shm, M_SHM);
958 p->vm_shm = NULL;
959 SYSV_SHM_SUBSYS_UNLOCK();
960 }
961
962 /*
963 * shmexec() is like shmexit(), only it doesn't delete the mappings,
964 * since the old address space has already been destroyed and the new
965 * one instantiated. Instead, it just does the housekeeping work we
966 * need to do to keep the System V shared memory subsystem sane.
967 */
968 __private_extern__ void
969 shmexec(struct proc *p)
970 {
971 struct shmmap_state *shmmap_s;
972 int i;
973
974 shmmap_s = (struct shmmap_state *)p->vm_shm;
975 SYSV_SHM_SUBSYS_LOCK();
976 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
977 if (shmmap_s->shmid != -1)
978 shm_delete_mapping(p, shmmap_s, 0);
979 FREE((caddr_t)p->vm_shm, M_SHM);
980 p->vm_shm = NULL;
981 SYSV_SHM_SUBSYS_UNLOCK();
982 }
983
984 void
985 shminit(__unused void *dummy)
986 {
987 int i;
988 int s;
989
990 if (!shm_inited) {
991 /*
992 * we store internally 64 bit, since if we didn't, we would
993 * be unable to represent a segment size in excess of 32 bits
994 * with the (struct shmid_ds)->shm_segsz field; also, POSIX
995 * dictates this filed be a size_t, which is 64 bits when
996 * running 64 bit binaries.
997 */
998 s = sizeof(struct shmid_kernel) * shminfo.shmmni;
999
1000 MALLOC(shmsegs, struct shmid_kernel *, s, M_SHM, M_WAITOK);
1001 if (shmsegs == NULL) {
1002 /* XXX fail safely: leave shared memory uninited */
1003 return;
1004 }
1005 for (i = 0; i < shminfo.shmmni; i++) {
1006 shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
1007 shmsegs[i].u.shm_perm._seq = 0;
1008 #if CONFIG_MACF
1009 mac_sysvshm_label_init(&shmsegs[i]);
1010 #endif
1011 }
1012 shm_last_free = 0;
1013 shm_nused = 0;
1014 shm_committed = 0;
1015 shm_inited = 1;
1016 }
1017 }
1018 /* Initialize the mutex governing access to the SysV shm subsystem */
1019 __private_extern__ void
1020 sysv_shm_lock_init( void )
1021 {
1022
1023 sysv_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1024
1025 sysv_shm_subsys_lck_grp = lck_grp_alloc_init("sysv_shm_subsys_lock", sysv_shm_subsys_lck_grp_attr);
1026
1027 sysv_shm_subsys_lck_attr = lck_attr_alloc_init();
1028 lck_mtx_init(&sysv_shm_subsys_mutex, sysv_shm_subsys_lck_grp, sysv_shm_subsys_lck_attr);
1029 }
1030
1031 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1032 struct sysctl_req *req) */
1033 static int
1034 sysctl_shminfo(__unused struct sysctl_oid *oidp, void *arg1,
1035 __unused int arg2, struct sysctl_req *req)
1036 {
1037 int error = 0;
1038 int sysctl_shminfo_ret = 0;
1039 uint64_t saved_shmmax;
1040
1041 error = SYSCTL_OUT(req, arg1, sizeof(int64_t));
1042 if (error || req->newptr == USER_ADDR_NULL)
1043 return(error);
1044
1045 SYSV_SHM_SUBSYS_LOCK();
1046
1047 /* shmmni can not be changed after SysV SHM has been initialized */
1048 if (shm_inited && arg1 == &shminfo.shmmni) {
1049 sysctl_shminfo_ret = EPERM;
1050 goto sysctl_shminfo_out;
1051 }
1052 saved_shmmax = shminfo.shmmax;
1053
1054 if ((error = SYSCTL_IN(req, arg1, sizeof(int64_t))) != 0) {
1055 sysctl_shminfo_ret = error;
1056 goto sysctl_shminfo_out;
1057 }
1058
1059 if (arg1 == &shminfo.shmmax) {
1060 /* shmmax needs to be page-aligned */
1061 if (shminfo.shmmax & PAGE_MASK_64) {
1062 shminfo.shmmax = saved_shmmax;
1063 sysctl_shminfo_ret = EINVAL;
1064 goto sysctl_shminfo_out;
1065 }
1066 }
1067 sysctl_shminfo_ret = 0;
1068 sysctl_shminfo_out:
1069 SYSV_SHM_SUBSYS_UNLOCK();
1070 return sysctl_shminfo_ret;
1071 }
1072
1073 static int
1074 IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
1075 __unused int arg2, struct sysctl_req *req)
1076 {
1077 int error;
1078 int cursor;
1079 union {
1080 struct user32_IPCS_command u32;
1081 struct user_IPCS_command u64;
1082 } ipcs;
1083 struct user32_shmid_ds shmid_ds32; /* post conversion, 32 bit version */
1084 struct user_shmid_ds shmid_ds; /* 64 bit version */
1085 void *shmid_dsp;
1086 size_t ipcs_sz = sizeof(struct user_IPCS_command);
1087 size_t shmid_ds_sz = sizeof(struct user_shmid_ds);
1088 struct proc *p = current_proc();
1089
1090 SYSV_SHM_SUBSYS_LOCK();
1091
1092 if (!shm_inited) {
1093 shminit(NULL);
1094 }
1095
1096 if (!IS_64BIT_PROCESS(p)) {
1097 ipcs_sz = sizeof(struct user32_IPCS_command);
1098 shmid_ds_sz = sizeof(struct user32_shmid_ds);
1099 }
1100
1101 /* Copy in the command structure */
1102 if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
1103 goto ipcs_shm_sysctl_out;
1104 }
1105
1106 if (!IS_64BIT_PROCESS(p)) /* convert in place */
1107 ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data);
1108
1109 /* Let us version this interface... */
1110 if (ipcs.u64.ipcs_magic != IPCS_MAGIC) {
1111 error = EINVAL;
1112 goto ipcs_shm_sysctl_out;
1113 }
1114
1115 switch(ipcs.u64.ipcs_op) {
1116 case IPCS_SHM_CONF: /* Obtain global configuration data */
1117 if (ipcs.u64.ipcs_datalen != sizeof(struct shminfo)) {
1118 if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
1119 error = ENOMEM;
1120 break;
1121 }
1122 error = ERANGE;
1123 break;
1124 }
1125 error = copyout(&shminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1126 break;
1127
1128 case IPCS_SHM_ITER: /* Iterate over existing segments */
1129 cursor = ipcs.u64.ipcs_cursor;
1130 if (cursor < 0 || cursor >= shminfo.shmmni) {
1131 error = ERANGE;
1132 break;
1133 }
1134 if (ipcs.u64.ipcs_datalen != (int)shmid_ds_sz) {
1135 error = EINVAL;
1136 break;
1137 }
1138 for( ; cursor < shminfo.shmmni; cursor++) {
1139 if (shmsegs[cursor].u.shm_perm.mode & SHMSEG_ALLOCATED)
1140 break;
1141 continue;
1142 }
1143 if (cursor == shminfo.shmmni) {
1144 error = ENOENT;
1145 break;
1146 }
1147
1148 shmid_dsp = &shmsegs[cursor]; /* default: 64 bit */
1149
1150 /*
1151 * If necessary, convert the 64 bit kernel segment
1152 * descriptor to a 32 bit user one.
1153 */
1154 if (!IS_64BIT_PROCESS(p)) {
1155 shmid_ds_64to32(shmid_dsp, &shmid_ds32);
1156
1157 /* Clear kernel reserved pointer before copying to user space */
1158 shmid_ds32.shm_internal = (user32_addr_t)0;
1159
1160 shmid_dsp = &shmid_ds32;
1161 } else {
1162 memcpy(&shmid_ds, shmid_dsp, sizeof(shmid_ds));
1163
1164 /* Clear kernel reserved pointer before copying to user space */
1165 shmid_ds.shm_internal = USER_ADDR_NULL;
1166
1167 shmid_dsp = &shmid_ds;
1168 }
1169 error = copyout(shmid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1170 if (!error) {
1171 /* update cursor */
1172 ipcs.u64.ipcs_cursor = cursor + 1;
1173
1174 if (!IS_64BIT_PROCESS(p)) /* convert in place */
1175 ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t,ipcs.u64.ipcs_data);
1176
1177 error = SYSCTL_OUT(req, &ipcs, ipcs_sz);
1178 }
1179 break;
1180
1181 default:
1182 error = EINVAL;
1183 break;
1184 }
1185 ipcs_shm_sysctl_out:
1186 SYSV_SHM_SUBSYS_UNLOCK();
1187 return(error);
1188 }
1189
1190 SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSV");
1191
1192 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmax, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1193 &shminfo.shmmax, 0, &sysctl_shminfo ,"Q","shmmax");
1194
1195 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmin, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1196 &shminfo.shmmin, 0, &sysctl_shminfo ,"Q","shmmin");
1197
1198 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmni, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1199 &shminfo.shmmni, 0, &sysctl_shminfo ,"Q","shmmni");
1200
1201 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmseg, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1202 &shminfo.shmseg, 0, &sysctl_shminfo ,"Q","shmseg");
1203
1204 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmall, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1205 &shminfo.shmall, 0, &sysctl_shminfo ,"Q","shmall");
1206
1207 SYSCTL_NODE(_kern_sysv, OID_AUTO, ipcs, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSVIPCS");
1208
1209 SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, shm, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1210 0, 0, IPCS_shm_sysctl,
1211 "S,IPCS_shm_command",
1212 "ipcs shm command interface");
1213 #endif /* SYSV_SHM */
1214
1215 /* DSEP Review Done pl-20051108-v02 @2743,@2908,@2913,@3009 */