]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sysv_shm.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / sysv_shm.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
29
30 /*
31 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by Adam Glass and Charles
44 * Hannum.
45 * 4. The names of the authors may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 */
59 /*
60 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
61 * support for mandatory and extensible security protections. This notice
62 * is included in support of clause 2.2 (b) of the Apple Public License,
63 * Version 2.0.
64 * Copyright (c) 2005-2006 SPARTA, Inc.
65 */
66
67
68 #include <sys/appleapiopts.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/shm_internal.h>
73 #include <sys/proc_internal.h>
74 #include <sys/kauth.h>
75 #include <sys/malloc.h>
76 #include <sys/mman.h>
77 #include <sys/stat.h>
78 #include <sys/sysctl.h>
79 #include <sys/ipcs.h>
80 #include <sys/sysent.h>
81 #include <sys/sysproto.h>
82 #if CONFIG_MACF
83 #include <security/mac_framework.h>
84 #endif
85
86 #include <security/audit/audit.h>
87
88 #include <mach/mach_types.h>
89 #include <mach/vm_inherit.h>
90 #include <mach/vm_map.h>
91
92 #include <mach/mach_vm.h>
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_protos.h>
96 #include <vm/vm_kern.h>
97
98 #include <kern/locks.h>
99 #include <os/overflow.h>
100
101 /* Uncomment this line to see MAC debugging output. */
102 /* #define MAC_DEBUG */
103 #if CONFIG_MACF_DEBUG
104 #define MPRINTF(a) printf a
105 #else
106 #define MPRINTF(a)
107 #endif
108
109 #if SYSV_SHM
110 static int shminit(void);
111
112 static LCK_GRP_DECLARE(sysv_shm_subsys_lck_grp, "sysv_shm_subsys_lock");
113 static LCK_MTX_DECLARE(sysv_shm_subsys_mutex, &sysv_shm_subsys_lck_grp);
114
115 #define SYSV_SHM_SUBSYS_LOCK() lck_mtx_lock(&sysv_shm_subsys_mutex)
116 #define SYSV_SHM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_shm_subsys_mutex)
117
118 static int oshmctl(void *p, void *uap, void *retval);
119 static int shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, int * retval);
120 static int shmget_existing(struct shmget_args *uap, int mode, int segnum, int * retval);
121 static void shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out);
122 static void shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out);
123
124 /* XXX casting to (sy_call_t *) is bogus, as usual. */
125 static sy_call_t* const shmcalls[] = {
126 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
127 (sy_call_t *)shmdt, (sy_call_t *)shmget,
128 (sy_call_t *)shmctl
129 };
130
131 #define SHMSEG_FREE 0x0200
132 #define SHMSEG_REMOVED 0x0400
133 #define SHMSEG_ALLOCATED 0x0800
134 #define SHMSEG_WANTED 0x1000
135
136 static int shm_last_free, shm_nused, shm_committed;
137 struct shmid_kernel *shmsegs; /* 64 bit version */
138 static int shm_inited = 0;
139
140 /*
141 * Since anonymous memory chunks are limited to ANON_MAX_SIZE bytes,
142 * we have to keep a list of chunks when we want to handle a shared memory
143 * segment bigger than ANON_MAX_SIZE.
144 * Each chunk points to a VM named entry of up to ANON_MAX_SIZE bytes
145 * of anonymous memory.
146 */
147 struct shm_handle {
148 void * shm_object; /* named entry for this chunk*/
149 memory_object_size_t shm_handle_size; /* size of this chunk */
150 struct shm_handle *shm_handle_next; /* next chunk */
151 };
152
153 struct shmmap_state {
154 mach_vm_address_t va; /* user address */
155 int shmid; /* segment id */
156 };
157
158 static void shm_deallocate_segment(struct shmid_kernel *);
159 static int shm_find_segment_by_key(key_t);
160 static struct shmid_kernel *shm_find_segment_by_shmid(int);
161 static int shm_delete_mapping(struct proc *, struct shmmap_state *, int);
162
163 #ifdef __APPLE_API_PRIVATE
164 #define DEFAULT_SHMMAX (4 * 1024 * 1024)
165 #define DEFAULT_SHMMIN 1
166 #define DEFAULT_SHMMNI 32
167 #define DEFAULT_SHMSEG 8
168 #define DEFAULT_SHMALL 1024
169
170 struct shminfo shminfo = {
171 .shmmax = DEFAULT_SHMMAX,
172 .shmmin = DEFAULT_SHMMIN,
173 .shmmni = DEFAULT_SHMMNI,
174 .shmseg = DEFAULT_SHMSEG,
175 .shmall = DEFAULT_SHMALL
176 };
177
178 #define SHMID_IS_VALID(x) ((x) >= 0)
179 #define SHMID_UNALLOCATED (-1)
180 #define SHMID_SENTINEL (-2)
181
182 #endif /* __APPLE_API_PRIVATE */
183
184 static __inline__ time_t
185 sysv_shmtime(void)
186 {
187 struct timeval tv;
188 microtime(&tv);
189 return tv.tv_sec;
190 }
191
192 /*
193 * This conversion is safe, since if we are converting for a 32 bit process,
194 * then it's value of (struct shmid_ds)->shm_segsz will never exceed 4G.
195 *
196 * NOTE: Source and target may *NOT* overlap! (target is smaller)
197 */
198 static void
199 shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out)
200 {
201 out->shm_perm = in->shm_perm;
202 out->shm_segsz = in->shm_segsz;
203 out->shm_lpid = in->shm_lpid;
204 out->shm_cpid = in->shm_cpid;
205 out->shm_nattch = in->shm_nattch;
206 out->shm_atime = in->shm_atime;
207 out->shm_dtime = in->shm_dtime;
208 out->shm_ctime = in->shm_ctime;
209 out->shm_internal = CAST_DOWN_EXPLICIT(int, in->shm_internal);
210 }
211
212 /*
213 * NOTE: Source and target may are permitted to overlap! (source is smaller);
214 * this works because we copy fields in order from the end of the struct to
215 * the beginning.
216 */
217 static void
218 shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out)
219 {
220 out->shm_internal = in->shm_internal;
221 out->shm_ctime = in->shm_ctime;
222 out->shm_dtime = in->shm_dtime;
223 out->shm_atime = in->shm_atime;
224 out->shm_nattch = in->shm_nattch;
225 out->shm_cpid = in->shm_cpid;
226 out->shm_lpid = in->shm_lpid;
227 out->shm_segsz = in->shm_segsz;
228 out->shm_perm = in->shm_perm;
229 }
230
231
232 static int
233 shm_find_segment_by_key(key_t key)
234 {
235 int i;
236
237 for (i = 0; i < shminfo.shmmni; i++) {
238 if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
239 shmsegs[i].u.shm_perm._key == key) {
240 return i;
241 }
242 }
243 return -1;
244 }
245
246 static struct shmid_kernel *
247 shm_find_segment_by_shmid(int shmid)
248 {
249 int segnum;
250 struct shmid_kernel *shmseg;
251
252 segnum = IPCID_TO_IX(shmid);
253 if (segnum < 0 || segnum >= shminfo.shmmni) {
254 return NULL;
255 }
256 shmseg = &shmsegs[segnum];
257 if ((shmseg->u.shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
258 != SHMSEG_ALLOCATED ||
259 shmseg->u.shm_perm._seq != IPCID_TO_SEQ(shmid)) {
260 return NULL;
261 }
262 return shmseg;
263 }
264
265 static void
266 shm_deallocate_segment(struct shmid_kernel *shmseg)
267 {
268 struct shm_handle *shm_handle, *shm_handle_next;
269 mach_vm_size_t size;
270
271 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal); /* tunnel */
272 shm_handle != NULL;
273 shm_handle = shm_handle_next) {
274 shm_handle_next = shm_handle->shm_handle_next;
275 mach_memory_entry_port_release(shm_handle->shm_object);
276 kheap_free(KM_SHM, shm_handle, sizeof(struct shm_handle));
277 }
278 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
279 size = vm_map_round_page(shmseg->u.shm_segsz,
280 vm_map_page_mask(current_map()));
281 shm_committed -= btoc(size);
282 shm_nused--;
283 shmseg->u.shm_perm.mode = SHMSEG_FREE;
284 #if CONFIG_MACF
285 /* Reset the MAC label */
286 mac_sysvshm_label_recycle(shmseg);
287 #endif
288 }
289
290 static int
291 shm_delete_mapping(__unused struct proc *p, struct shmmap_state *shmmap_s,
292 int deallocate)
293 {
294 struct shmid_kernel *shmseg;
295 int segnum, result;
296 mach_vm_size_t size;
297
298 segnum = IPCID_TO_IX(shmmap_s->shmid);
299 shmseg = &shmsegs[segnum];
300 size = vm_map_round_page(shmseg->u.shm_segsz,
301 vm_map_page_mask(current_map())); /* XXX done for us? */
302 if (deallocate) {
303 result = mach_vm_deallocate(current_map(), shmmap_s->va, size);
304 if (result != KERN_SUCCESS) {
305 return EINVAL;
306 }
307 }
308 shmmap_s->shmid = SHMID_UNALLOCATED;
309 shmseg->u.shm_dtime = sysv_shmtime();
310 if ((--shmseg->u.shm_nattch <= 0) &&
311 (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) {
312 shm_deallocate_segment(shmseg);
313 shm_last_free = segnum;
314 }
315 return 0;
316 }
317
318 int
319 shmdt(struct proc *p, struct shmdt_args *uap, int32_t *retval)
320 {
321 #if CONFIG_MACF
322 struct shmid_kernel *shmsegptr;
323 #endif
324 struct shmmap_state *shmmap_s;
325 int i;
326 int shmdtret = 0;
327
328 AUDIT_ARG(svipc_addr, uap->shmaddr);
329
330 SYSV_SHM_SUBSYS_LOCK();
331
332 if ((shmdtret = shminit())) {
333 goto shmdt_out;
334 }
335
336 shmmap_s = (struct shmmap_state *)p->vm_shm;
337 if (shmmap_s == NULL) {
338 shmdtret = EINVAL;
339 goto shmdt_out;
340 }
341
342 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
343 if (SHMID_IS_VALID(shmmap_s->shmid) &&
344 shmmap_s->va == (mach_vm_offset_t)uap->shmaddr) {
345 break;
346 }
347 }
348
349 if (!SHMID_IS_VALID(shmmap_s->shmid)) {
350 shmdtret = EINVAL;
351 goto shmdt_out;
352 }
353
354 #if CONFIG_MACF
355 /*
356 * XXX: It might be useful to move this into the shm_delete_mapping
357 * function
358 */
359 shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)];
360 shmdtret = mac_sysvshm_check_shmdt(kauth_cred_get(), shmsegptr);
361 if (shmdtret) {
362 goto shmdt_out;
363 }
364 #endif
365 i = shm_delete_mapping(p, shmmap_s, 1);
366
367 if (i == 0) {
368 *retval = 0;
369 }
370 shmdtret = i;
371 shmdt_out:
372 SYSV_SHM_SUBSYS_UNLOCK();
373 return shmdtret;
374 }
375
376 int
377 shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval)
378 {
379 int error, i, flags;
380 struct shmid_kernel *shmseg;
381 struct shmmap_state *shmmap_s = NULL;
382 struct shm_handle *shm_handle;
383 mach_vm_address_t attach_va; /* attach address in/out */
384 mach_vm_address_t shmlba;
385 mach_vm_size_t map_size; /* size of map entry */
386 mach_vm_size_t mapped_size;
387 vm_prot_t prot;
388 size_t size;
389 kern_return_t rv;
390 int shmat_ret;
391 int vm_flags;
392
393 shmat_ret = 0;
394
395 AUDIT_ARG(svipc_id, uap->shmid);
396 AUDIT_ARG(svipc_addr, uap->shmaddr);
397
398 SYSV_SHM_SUBSYS_LOCK();
399
400 if ((shmat_ret = shminit())) {
401 goto shmat_out;
402 }
403
404 shmmap_s = (struct shmmap_state *)p->vm_shm;
405 if (shmmap_s == NULL) {
406 /* lazily allocate the shm map */
407
408 int nsegs = shminfo.shmseg;
409 if (nsegs <= 0) {
410 shmat_ret = EMFILE;
411 goto shmat_out;
412 }
413
414 /* +1 for the sentinel */
415 if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)) {
416 shmat_ret = ENOMEM;
417 goto shmat_out;
418 }
419
420 shmmap_s = kheap_alloc(KM_SHM, size, Z_WAITOK);
421 if (shmmap_s == NULL) {
422 shmat_ret = ENOMEM;
423 goto shmat_out;
424 }
425
426 /* initialize the entries */
427 for (i = 0; i < nsegs; i++) {
428 shmmap_s[i].shmid = SHMID_UNALLOCATED;
429 }
430 shmmap_s[i].shmid = SHMID_SENTINEL;
431
432 p->vm_shm = (caddr_t)shmmap_s;
433 }
434
435 shmseg = shm_find_segment_by_shmid(uap->shmid);
436 if (shmseg == NULL) {
437 shmat_ret = EINVAL;
438 goto shmat_out;
439 }
440
441 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
442 error = ipcperm(kauth_cred_get(), &shmseg->u.shm_perm,
443 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R | IPC_W);
444 if (error) {
445 shmat_ret = error;
446 goto shmat_out;
447 }
448
449 #if CONFIG_MACF
450 error = mac_sysvshm_check_shmat(kauth_cred_get(), shmseg, uap->shmflg);
451 if (error) {
452 shmat_ret = error;
453 goto shmat_out;
454 }
455 #endif
456
457 /* find a free shmid */
458 while (SHMID_IS_VALID(shmmap_s->shmid)) {
459 shmmap_s++;
460 }
461 if (shmmap_s->shmid != SHMID_UNALLOCATED) {
462 /* no free shmids */
463 shmat_ret = EMFILE;
464 goto shmat_out;
465 }
466
467 map_size = vm_map_round_page(shmseg->u.shm_segsz,
468 vm_map_page_mask(current_map()));
469 prot = VM_PROT_READ;
470 if ((uap->shmflg & SHM_RDONLY) == 0) {
471 prot |= VM_PROT_WRITE;
472 }
473 flags = MAP_ANON | MAP_SHARED;
474 if (uap->shmaddr) {
475 flags |= MAP_FIXED;
476 }
477
478 attach_va = (mach_vm_address_t)uap->shmaddr;
479 shmlba = vm_map_page_size(current_map()); /* XXX instead of SHMLBA */
480 if (uap->shmflg & SHM_RND) {
481 attach_va &= ~(shmlba - 1);
482 } else if ((attach_va & (shmlba - 1)) != 0) {
483 shmat_ret = EINVAL;
484 goto shmat_out;
485 }
486
487 if (flags & MAP_FIXED) {
488 vm_flags = VM_FLAGS_FIXED;
489 } else {
490 vm_flags = VM_FLAGS_ANYWHERE;
491 }
492
493 mapped_size = 0;
494
495 /* first reserve enough space... */
496 rv = mach_vm_map_kernel(current_map(),
497 &attach_va,
498 map_size,
499 0,
500 vm_flags,
501 VM_MAP_KERNEL_FLAGS_NONE,
502 VM_KERN_MEMORY_NONE,
503 IPC_PORT_NULL,
504 0,
505 FALSE,
506 VM_PROT_NONE,
507 VM_PROT_NONE,
508 VM_INHERIT_NONE);
509 if (rv != KERN_SUCCESS) {
510 goto out;
511 }
512
513 shmmap_s->va = attach_va;
514
515 /* ... then map the shared memory over the reserved space */
516 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal);/* tunnel */
517 shm_handle != NULL;
518 shm_handle = shm_handle->shm_handle_next) {
519 vm_map_size_t chunk_size;
520
521 assert(mapped_size < map_size);
522 chunk_size = shm_handle->shm_handle_size;
523 if (chunk_size > map_size - mapped_size) {
524 /*
525 * Partial mapping of last chunk due to
526 * page size mismatch.
527 */
528 assert(vm_map_page_shift(current_map()) < PAGE_SHIFT);
529 assert(shm_handle->shm_handle_next == NULL);
530 chunk_size = map_size - mapped_size;
531 }
532 rv = vm_map_enter_mem_object(
533 current_map(), /* process map */
534 &attach_va, /* attach address */
535 chunk_size, /* size to map */
536 (mach_vm_offset_t)0, /* alignment mask */
537 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
538 VM_MAP_KERNEL_FLAGS_NONE,
539 VM_KERN_MEMORY_NONE,
540 shm_handle->shm_object,
541 (mach_vm_offset_t)0,
542 FALSE,
543 prot,
544 prot,
545 VM_INHERIT_SHARE);
546 if (rv != KERN_SUCCESS) {
547 goto out;
548 }
549
550 mapped_size += chunk_size;
551 attach_va = attach_va + chunk_size;
552 }
553
554 shmmap_s->shmid = uap->shmid;
555 shmseg->u.shm_lpid = p->p_pid;
556 shmseg->u.shm_atime = sysv_shmtime();
557 shmseg->u.shm_nattch++;
558 *retval = shmmap_s->va; /* XXX return -1 on error */
559 shmat_ret = 0;
560 goto shmat_out;
561 out:
562 if (mapped_size > 0) {
563 (void) mach_vm_deallocate(current_map(),
564 shmmap_s->va,
565 mapped_size);
566 }
567 switch (rv) {
568 case KERN_INVALID_ADDRESS:
569 case KERN_NO_SPACE:
570 shmat_ret = ENOMEM;
571 break;
572 case KERN_PROTECTION_FAILURE:
573 shmat_ret = EACCES;
574 break;
575 default:
576 shmat_ret = EINVAL;
577 break;
578 }
579 shmat_out:
580 SYSV_SHM_SUBSYS_UNLOCK();
581 return shmat_ret;
582 }
583
584 static int
585 oshmctl(__unused void *p, __unused void *uap, __unused void *retval)
586 {
587 return EINVAL;
588 }
589
590 /*
591 * Returns: 0 Success
592 * EINVAL
593 * copyout:EFAULT
594 * copyin:EFAULT
595 * ipcperm:EPERM
596 * ipcperm:EACCES
597 */
598 int
599 shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval)
600 {
601 int error;
602 kauth_cred_t cred = kauth_cred_get();
603 struct user_shmid_ds inbuf;
604 struct shmid_kernel *shmseg;
605
606 int shmctl_ret = 0;
607
608 AUDIT_ARG(svipc_cmd, uap->cmd);
609 AUDIT_ARG(svipc_id, uap->shmid);
610
611 SYSV_SHM_SUBSYS_LOCK();
612
613 if ((shmctl_ret = shminit())) {
614 goto shmctl_out;
615 }
616
617 shmseg = shm_find_segment_by_shmid(uap->shmid);
618 if (shmseg == NULL) {
619 shmctl_ret = EINVAL;
620 goto shmctl_out;
621 }
622
623 /* XXAUDIT: This is the perms BEFORE any change by this call. This
624 * may not be what is desired.
625 */
626 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
627
628 #if CONFIG_MACF
629 error = mac_sysvshm_check_shmctl(cred, shmseg, uap->cmd);
630 if (error) {
631 shmctl_ret = error;
632 goto shmctl_out;
633 }
634 #endif
635 switch (uap->cmd) {
636 case IPC_STAT:
637 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_R);
638 if (error) {
639 shmctl_ret = error;
640 goto shmctl_out;
641 }
642
643 if (IS_64BIT_PROCESS(p)) {
644 struct user_shmid_ds shmid_ds = {};
645 memcpy(&shmid_ds, &shmseg->u, sizeof(struct user_shmid_ds));
646
647 /* Clear kernel reserved pointer before copying to user space */
648 shmid_ds.shm_internal = USER_ADDR_NULL;
649
650 error = copyout(&shmid_ds, uap->buf, sizeof(shmid_ds));
651 } else {
652 struct user32_shmid_ds shmid_ds32 = {};
653 shmid_ds_64to32(&shmseg->u, &shmid_ds32);
654
655 /* Clear kernel reserved pointer before copying to user space */
656 shmid_ds32.shm_internal = (user32_addr_t)0;
657
658 error = copyout(&shmid_ds32, uap->buf, sizeof(shmid_ds32));
659 }
660 if (error) {
661 shmctl_ret = error;
662 goto shmctl_out;
663 }
664 break;
665 case IPC_SET:
666 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
667 if (error) {
668 shmctl_ret = error;
669 goto shmctl_out;
670 }
671 if (IS_64BIT_PROCESS(p)) {
672 error = copyin(uap->buf, &inbuf, sizeof(struct user_shmid_ds));
673 } else {
674 struct user32_shmid_ds shmid_ds32;
675 error = copyin(uap->buf, &shmid_ds32, sizeof(shmid_ds32));
676 /* convert in place; ugly, but safe */
677 shmid_ds_32to64(&shmid_ds32, &inbuf);
678 }
679 if (error) {
680 shmctl_ret = error;
681 goto shmctl_out;
682 }
683 shmseg->u.shm_perm.uid = inbuf.shm_perm.uid;
684 shmseg->u.shm_perm.gid = inbuf.shm_perm.gid;
685 shmseg->u.shm_perm.mode =
686 (shmseg->u.shm_perm.mode & ~ACCESSPERMS) |
687 (inbuf.shm_perm.mode & ACCESSPERMS);
688 shmseg->u.shm_ctime = sysv_shmtime();
689 break;
690 case IPC_RMID:
691 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
692 if (error) {
693 shmctl_ret = error;
694 goto shmctl_out;
695 }
696 shmseg->u.shm_perm._key = IPC_PRIVATE;
697 shmseg->u.shm_perm.mode |= SHMSEG_REMOVED;
698 if (shmseg->u.shm_nattch <= 0) {
699 shm_deallocate_segment(shmseg);
700 shm_last_free = IPCID_TO_IX(uap->shmid);
701 }
702 break;
703 #if 0
704 case SHM_LOCK:
705 case SHM_UNLOCK:
706 #endif
707 default:
708 shmctl_ret = EINVAL;
709 goto shmctl_out;
710 }
711 *retval = 0;
712 shmctl_ret = 0;
713 shmctl_out:
714 SYSV_SHM_SUBSYS_UNLOCK();
715 return shmctl_ret;
716 }
717
718 static int
719 shmget_existing(struct shmget_args *uap, int mode, int segnum, int *retval)
720 {
721 struct shmid_kernel *shmseg;
722 int error = 0;
723
724 shmseg = &shmsegs[segnum];
725 if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) {
726 /*
727 * This segment is in the process of being allocated. Wait
728 * until it's done, and look the key up again (in case the
729 * allocation failed or it was freed).
730 */
731 shmseg->u.shm_perm.mode |= SHMSEG_WANTED;
732 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
733 if (error) {
734 return error;
735 }
736 return EAGAIN;
737 }
738
739 /*
740 * The low 9 bits of shmflag are the mode bits being requested, which
741 * are the actual mode bits desired on the segment, and not in IPC_R
742 * form; therefore it would be incorrect to call ipcperm() to validate
743 * them; instead, we AND the existing mode with the requested mode, and
744 * verify that it matches the requested mode; otherwise, we fail with
745 * EACCES (access denied).
746 */
747 if ((shmseg->u.shm_perm.mode & mode) != mode) {
748 return EACCES;
749 }
750
751 #if CONFIG_MACF
752 error = mac_sysvshm_check_shmget(kauth_cred_get(), shmseg, uap->shmflg);
753 if (error) {
754 return error;
755 }
756 #endif
757
758 if (uap->size && uap->size > shmseg->u.shm_segsz) {
759 return EINVAL;
760 }
761
762 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) {
763 return EEXIST;
764 }
765
766 *retval = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
767 return 0;
768 }
769
770 static int
771 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode,
772 int *retval)
773 {
774 int i, segnum, shmid;
775 kauth_cred_t cred = kauth_cred_get();
776 struct shmid_kernel *shmseg;
777 struct shm_handle *shm_handle;
778 kern_return_t kret;
779 mach_vm_size_t total_size, size, alloc_size;
780 void * mem_object;
781 struct shm_handle *shm_handle_next, **shm_handle_next_p;
782
783 if (uap->size <= 0 ||
784 uap->size < (user_size_t)shminfo.shmmin ||
785 uap->size > (user_size_t)shminfo.shmmax) {
786 return EINVAL;
787 }
788 if (shm_nused >= shminfo.shmmni) { /* any shmids left? */
789 return ENOSPC;
790 }
791 if (mach_vm_round_page_overflow(uap->size, &total_size)) {
792 return EINVAL;
793 }
794 if ((user_ssize_t)(shm_committed + btoc(total_size)) > shminfo.shmall) {
795 return ENOMEM;
796 }
797 if (shm_last_free < 0) {
798 for (i = 0; i < shminfo.shmmni; i++) {
799 if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) {
800 break;
801 }
802 }
803 if (i == shminfo.shmmni) {
804 panic("shmseg free count inconsistent");
805 }
806 segnum = i;
807 } else {
808 segnum = shm_last_free;
809 shm_last_free = -1;
810 }
811 shmseg = &shmsegs[segnum];
812
813 /*
814 * In case we sleep in malloc(), mark the segment present but deleted
815 * so that noone else tries to create the same key.
816 * XXX but we don't release the global lock !?
817 */
818 shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
819 shmseg->u.shm_perm._key = uap->key;
820 shmseg->u.shm_perm._seq = (shmseg->u.shm_perm._seq + 1) & 0x7fff;
821
822 shm_handle_next_p = NULL;
823 for (alloc_size = 0;
824 alloc_size < total_size;
825 alloc_size += size) {
826 size = MIN(total_size - alloc_size, ANON_MAX_SIZE);
827 kret = mach_make_memory_entry_64(
828 VM_MAP_NULL,
829 (memory_object_size_t *) &size,
830 (memory_object_offset_t) 0,
831 MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
832 (ipc_port_t *) &mem_object, 0);
833 if (kret != KERN_SUCCESS) {
834 goto out;
835 }
836
837 shm_handle = kheap_alloc(KM_SHM, sizeof(struct shm_handle), Z_WAITOK);
838 if (shm_handle == NULL) {
839 kret = KERN_NO_SPACE;
840 mach_memory_entry_port_release(mem_object);
841 mem_object = NULL;
842 goto out;
843 }
844 shm_handle->shm_object = mem_object;
845 shm_handle->shm_handle_size = size;
846 shm_handle->shm_handle_next = NULL;
847 if (shm_handle_next_p == NULL) {
848 shmseg->u.shm_internal = CAST_USER_ADDR_T(shm_handle);/* tunnel */
849 } else {
850 *shm_handle_next_p = shm_handle;
851 }
852 shm_handle_next_p = &shm_handle->shm_handle_next;
853 }
854
855 shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
856
857 shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = kauth_cred_getuid(cred);
858 shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = kauth_cred_getgid(cred);
859 shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) |
860 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
861 shmseg->u.shm_segsz = uap->size;
862 shmseg->u.shm_cpid = p->p_pid;
863 shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
864 shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
865 #if CONFIG_MACF
866 mac_sysvshm_label_associate(cred, shmseg);
867 #endif
868 shmseg->u.shm_ctime = sysv_shmtime();
869 shm_committed += btoc(size);
870 shm_nused++;
871 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
872 if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) {
873 /*
874 * Somebody else wanted this key while we were asleep. Wake
875 * them up now.
876 */
877 shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED;
878 wakeup((caddr_t)shmseg);
879 }
880 *retval = shmid;
881 AUDIT_ARG(svipc_id, shmid);
882 return 0;
883 out:
884 if (kret != KERN_SUCCESS) {
885 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal); /* tunnel */
886 shm_handle != NULL;
887 shm_handle = shm_handle_next) {
888 shm_handle_next = shm_handle->shm_handle_next;
889 mach_memory_entry_port_release(shm_handle->shm_object);
890 kheap_free(KM_SHM, shm_handle, sizeof(struct shm_handle));
891 }
892 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
893 }
894
895 switch (kret) {
896 case KERN_INVALID_ADDRESS:
897 case KERN_NO_SPACE:
898 return ENOMEM;
899 case KERN_PROTECTION_FAILURE:
900 return EACCES;
901 default:
902 return EINVAL;
903 }
904 }
905
906 int
907 shmget(struct proc *p, struct shmget_args *uap, int32_t *retval)
908 {
909 int segnum, mode, error;
910 int shmget_ret = 0;
911
912 /* Auditing is actually done in shmget_allocate_segment() */
913
914 SYSV_SHM_SUBSYS_LOCK();
915
916 if ((shmget_ret = shminit())) {
917 goto shmget_out;
918 }
919
920 mode = uap->shmflg & ACCESSPERMS;
921 if (uap->key != IPC_PRIVATE) {
922 again:
923 segnum = shm_find_segment_by_key(uap->key);
924 if (segnum >= 0) {
925 error = shmget_existing(uap, mode, segnum, retval);
926 if (error == EAGAIN) {
927 goto again;
928 }
929 shmget_ret = error;
930 goto shmget_out;
931 }
932 if ((uap->shmflg & IPC_CREAT) == 0) {
933 shmget_ret = ENOENT;
934 goto shmget_out;
935 }
936 }
937 shmget_ret = shmget_allocate_segment(p, uap, mode, retval);
938 shmget_out:
939 SYSV_SHM_SUBSYS_UNLOCK();
940 return shmget_ret;
941 }
942
943 /*
944 * shmsys
945 *
946 * Entry point for all SHM calls: shmat, oshmctl, shmdt, shmget, shmctl
947 *
948 * Parameters: p Process requesting the call
949 * uap User argument descriptor (see below)
950 * retval Return value of the selected shm call
951 *
952 * Indirect parameters: uap->which msg call to invoke (index in array of shm calls)
953 * uap->a2 User argument descriptor
954 *
955 * Returns: 0 Success
956 * !0 Not success
957 *
958 * Implicit returns: retval Return value of the selected shm call
959 *
960 * DEPRECATED: This interface should not be used to call the other SHM
961 * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct
962 * usage is to call the other SHM functions directly.
963 */
964 int
965 shmsys(struct proc *p, struct shmsys_args *uap, int32_t *retval)
966 {
967 /* The routine that we are dispatching already does this */
968
969 if (uap->which >= sizeof(shmcalls) / sizeof(shmcalls[0])) {
970 return EINVAL;
971 }
972 return (*shmcalls[uap->which])(p, &uap->a2, retval);
973 }
974
975 /*
976 * Return 0 on success, 1 on failure.
977 */
978 int
979 shmfork(struct proc *p1, struct proc *p2)
980 {
981 struct shmmap_state *shmmap_s;
982 size_t size;
983 int nsegs = 0;
984 int ret = 0;
985
986 SYSV_SHM_SUBSYS_LOCK();
987
988 if (shminit()) {
989 ret = 1;
990 goto shmfork_out;
991 }
992
993 struct shmmap_state *src = (struct shmmap_state *)p1->vm_shm;
994 assert(src);
995
996 /* count number of shmid entries in src */
997 for (struct shmmap_state *s = src; s->shmid != SHMID_SENTINEL; s++) {
998 nsegs++;
999 }
1000
1001 if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)) {
1002 ret = 1;
1003 goto shmfork_out;
1004 }
1005 shmmap_s = kheap_alloc(KM_SHM, size, Z_WAITOK);
1006 if (shmmap_s == NULL) {
1007 ret = 1;
1008 goto shmfork_out;
1009 }
1010
1011 bcopy(src, (caddr_t)shmmap_s, size);
1012 p2->vm_shm = (caddr_t)shmmap_s;
1013 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
1014 if (SHMID_IS_VALID(shmmap_s->shmid)) {
1015 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
1016 }
1017 }
1018
1019 shmfork_out:
1020 SYSV_SHM_SUBSYS_UNLOCK();
1021 return ret;
1022 }
1023
1024 static void
1025 shmcleanup(struct proc *p, int deallocate)
1026 {
1027 struct shmmap_state *shmmap_s;
1028 size_t size = 0;
1029 int nsegs = 0;
1030
1031 SYSV_SHM_SUBSYS_LOCK();
1032
1033 shmmap_s = (struct shmmap_state *)p->vm_shm;
1034 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
1035 nsegs++;
1036 if (SHMID_IS_VALID(shmmap_s->shmid)) {
1037 /*
1038 * XXX: Should the MAC framework enforce
1039 * check here as well.
1040 */
1041 shm_delete_mapping(p, shmmap_s, deallocate);
1042 }
1043 }
1044
1045 if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)) {
1046 panic("shmcleanup: p->vm_shm buffer was correupted\n");
1047 }
1048 kheap_free(KM_SHM, p->vm_shm, size);
1049 SYSV_SHM_SUBSYS_UNLOCK();
1050 }
1051
1052 void
1053 shmexit(struct proc *p)
1054 {
1055 shmcleanup(p, 1);
1056 }
1057
1058 /*
1059 * shmexec() is like shmexit(), only it doesn't delete the mappings,
1060 * since the old address space has already been destroyed and the new
1061 * one instantiated. Instead, it just does the housekeeping work we
1062 * need to do to keep the System V shared memory subsystem sane.
1063 */
1064 __private_extern__ void
1065 shmexec(struct proc *p)
1066 {
1067 shmcleanup(p, 0);
1068 }
1069
1070 int
1071 shminit(void)
1072 {
1073 size_t sz;
1074 int i;
1075
1076 if (!shm_inited) {
1077 /*
1078 * we store internally 64 bit, since if we didn't, we would
1079 * be unable to represent a segment size in excess of 32 bits
1080 * with the (struct shmid_ds)->shm_segsz field; also, POSIX
1081 * dictates this filed be a size_t, which is 64 bits when
1082 * running 64 bit binaries.
1083 */
1084 if (os_mul_overflow(shminfo.shmmni, sizeof(struct shmid_kernel), &sz)) {
1085 return ENOMEM;
1086 }
1087
1088 shmsegs = zalloc_permanent(sz, ZALIGN_PTR);
1089 if (shmsegs == NULL) {
1090 return ENOMEM;
1091 }
1092 for (i = 0; i < shminfo.shmmni; i++) {
1093 shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
1094 shmsegs[i].u.shm_perm._seq = 0;
1095 #if CONFIG_MACF
1096 mac_sysvshm_label_init(&shmsegs[i]);
1097 #endif
1098 }
1099 shm_last_free = 0;
1100 shm_nused = 0;
1101 shm_committed = 0;
1102 shm_inited = 1;
1103 }
1104
1105 return 0;
1106 }
1107
1108 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1109 * struct sysctl_req *req) */
1110 static int
1111 sysctl_shminfo(__unused struct sysctl_oid *oidp, void *arg1,
1112 __unused int arg2, struct sysctl_req *req)
1113 {
1114 int error = 0;
1115 int sysctl_shminfo_ret = 0;
1116 int64_t saved_shmmax;
1117 int64_t saved_shmmin;
1118 int64_t saved_shmseg;
1119 int64_t saved_shmmni;
1120 int64_t saved_shmall;
1121
1122 error = SYSCTL_OUT(req, arg1, sizeof(int64_t));
1123 if (error || req->newptr == USER_ADDR_NULL) {
1124 return error;
1125 }
1126
1127 SYSV_SHM_SUBSYS_LOCK();
1128
1129 /* shmmni can not be changed after SysV SHM has been initialized */
1130 if (shm_inited && arg1 == &shminfo.shmmni) {
1131 sysctl_shminfo_ret = EPERM;
1132 goto sysctl_shminfo_out;
1133 }
1134 saved_shmmax = shminfo.shmmax;
1135 saved_shmmin = shminfo.shmmin;
1136 saved_shmseg = shminfo.shmseg;
1137 saved_shmmni = shminfo.shmmni;
1138 saved_shmall = shminfo.shmall;
1139
1140 if ((error = SYSCTL_IN(req, arg1, sizeof(int64_t))) != 0) {
1141 sysctl_shminfo_ret = error;
1142 goto sysctl_shminfo_out;
1143 }
1144
1145 if (arg1 == &shminfo.shmmax) {
1146 /* shmmax needs to be page-aligned */
1147 if (shminfo.shmmax & PAGE_MASK_64 || shminfo.shmmax < 0) {
1148 shminfo.shmmax = saved_shmmax;
1149 sysctl_shminfo_ret = EINVAL;
1150 goto sysctl_shminfo_out;
1151 }
1152 } else if (arg1 == &shminfo.shmmin) {
1153 if (shminfo.shmmin < 0) {
1154 shminfo.shmmin = saved_shmmin;
1155 sysctl_shminfo_ret = EINVAL;
1156 goto sysctl_shminfo_out;
1157 }
1158 } else if (arg1 == &shminfo.shmseg) {
1159 /* add a sanity check - 20847256 */
1160 if (shminfo.shmseg > INT32_MAX || shminfo.shmseg < 0) {
1161 shminfo.shmseg = saved_shmseg;
1162 sysctl_shminfo_ret = EINVAL;
1163 goto sysctl_shminfo_out;
1164 }
1165 } else if (arg1 == &shminfo.shmmni) {
1166 /* add a sanity check - 20847256 */
1167 if (shminfo.shmmni > INT32_MAX || shminfo.shmmni < 0) {
1168 shminfo.shmmni = saved_shmmni;
1169 sysctl_shminfo_ret = EINVAL;
1170 goto sysctl_shminfo_out;
1171 }
1172 } else if (arg1 == &shminfo.shmall) {
1173 /* add a sanity check - 20847256 */
1174 if (shminfo.shmall > INT32_MAX || shminfo.shmall < 0) {
1175 shminfo.shmall = saved_shmall;
1176 sysctl_shminfo_ret = EINVAL;
1177 goto sysctl_shminfo_out;
1178 }
1179 }
1180 sysctl_shminfo_ret = 0;
1181 sysctl_shminfo_out:
1182 SYSV_SHM_SUBSYS_UNLOCK();
1183 return sysctl_shminfo_ret;
1184 }
1185
1186 static int
1187 IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
1188 __unused int arg2, struct sysctl_req *req)
1189 {
1190 int error;
1191 int cursor;
1192 union {
1193 struct user32_IPCS_command u32;
1194 struct user_IPCS_command u64;
1195 } ipcs = { };
1196 struct user32_shmid_ds shmid_ds32 = { }; /* post conversion, 32 bit version */
1197 struct user_shmid_ds shmid_ds = { }; /* 64 bit version */
1198 void *shmid_dsp;
1199 size_t ipcs_sz = sizeof(struct user_IPCS_command);
1200 size_t shmid_ds_sz = sizeof(struct user_shmid_ds);
1201 struct proc *p = current_proc();
1202
1203 SYSV_SHM_SUBSYS_LOCK();
1204
1205 if ((error = shminit())) {
1206 goto ipcs_shm_sysctl_out;
1207 }
1208
1209 if (!IS_64BIT_PROCESS(p)) {
1210 ipcs_sz = sizeof(struct user32_IPCS_command);
1211 shmid_ds_sz = sizeof(struct user32_shmid_ds);
1212 }
1213
1214 /* Copy in the command structure */
1215 if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
1216 goto ipcs_shm_sysctl_out;
1217 }
1218
1219 if (!IS_64BIT_PROCESS(p)) { /* convert in place */
1220 ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data);
1221 }
1222
1223 /* Let us version this interface... */
1224 if (ipcs.u64.ipcs_magic != IPCS_MAGIC) {
1225 error = EINVAL;
1226 goto ipcs_shm_sysctl_out;
1227 }
1228
1229 switch (ipcs.u64.ipcs_op) {
1230 case IPCS_SHM_CONF: /* Obtain global configuration data */
1231 if (ipcs.u64.ipcs_datalen != sizeof(struct shminfo)) {
1232 if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
1233 error = ENOMEM;
1234 break;
1235 }
1236 error = ERANGE;
1237 break;
1238 }
1239 error = copyout(&shminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1240 break;
1241
1242 case IPCS_SHM_ITER: /* Iterate over existing segments */
1243 cursor = ipcs.u64.ipcs_cursor;
1244 if (cursor < 0 || cursor >= shminfo.shmmni) {
1245 error = ERANGE;
1246 break;
1247 }
1248 if (ipcs.u64.ipcs_datalen != (int)shmid_ds_sz) {
1249 error = EINVAL;
1250 break;
1251 }
1252 for (; cursor < shminfo.shmmni; cursor++) {
1253 if (shmsegs[cursor].u.shm_perm.mode & SHMSEG_ALLOCATED) {
1254 break;
1255 }
1256 continue;
1257 }
1258 if (cursor == shminfo.shmmni) {
1259 error = ENOENT;
1260 break;
1261 }
1262
1263 shmid_dsp = &shmsegs[cursor]; /* default: 64 bit */
1264
1265 /*
1266 * If necessary, convert the 64 bit kernel segment
1267 * descriptor to a 32 bit user one.
1268 */
1269 if (!IS_64BIT_PROCESS(p)) {
1270 shmid_ds_64to32(shmid_dsp, &shmid_ds32);
1271
1272 /* Clear kernel reserved pointer before copying to user space */
1273 shmid_ds32.shm_internal = (user32_addr_t)0;
1274
1275 shmid_dsp = &shmid_ds32;
1276 } else {
1277 memcpy(&shmid_ds, shmid_dsp, sizeof(shmid_ds));
1278
1279 /* Clear kernel reserved pointer before copying to user space */
1280 shmid_ds.shm_internal = USER_ADDR_NULL;
1281
1282 shmid_dsp = &shmid_ds;
1283 }
1284 error = copyout(shmid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1285 if (!error) {
1286 /* update cursor */
1287 ipcs.u64.ipcs_cursor = cursor + 1;
1288
1289 if (!IS_64BIT_PROCESS(p)) { /* convert in place */
1290 ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t, ipcs.u64.ipcs_data);
1291 }
1292
1293 error = SYSCTL_OUT(req, &ipcs, ipcs_sz);
1294 }
1295 break;
1296
1297 default:
1298 error = EINVAL;
1299 break;
1300 }
1301 ipcs_shm_sysctl_out:
1302 SYSV_SHM_SUBSYS_UNLOCK();
1303 return error;
1304 }
1305
1306 SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSV");
1307
1308 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmax, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1309 &shminfo.shmmax, 0, &sysctl_shminfo, "Q", "shmmax");
1310
1311 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmin, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1312 &shminfo.shmmin, 0, &sysctl_shminfo, "Q", "shmmin");
1313
1314 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmni, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1315 &shminfo.shmmni, 0, &sysctl_shminfo, "Q", "shmmni");
1316
1317 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmseg, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1318 &shminfo.shmseg, 0, &sysctl_shminfo, "Q", "shmseg");
1319
1320 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmall, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1321 &shminfo.shmall, 0, &sysctl_shminfo, "Q", "shmall");
1322
1323 SYSCTL_NODE(_kern_sysv, OID_AUTO, ipcs, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSVIPCS");
1324
1325 SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, shm, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1326 0, 0, IPCS_shm_sysctl,
1327 "S,IPCS_shm_command",
1328 "ipcs shm command interface");
1329 #endif /* SYSV_SHM */
1330
1331 /* DSEP Review Done pl-20051108-v02 @2743,@2908,@2913,@3009 */