]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sysv_shm.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / bsd / kern / sysv_shm.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
29
30 /*
31 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by Adam Glass and Charles
44 * Hannum.
45 * 4. The names of the authors may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 */
59 /*
60 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
61 * support for mandatory and extensible security protections. This notice
62 * is included in support of clause 2.2 (b) of the Apple Public License,
63 * Version 2.0.
64 * Copyright (c) 2005-2006 SPARTA, Inc.
65 */
66
67
68 #include <sys/appleapiopts.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/shm_internal.h>
73 #include <sys/proc_internal.h>
74 #include <sys/kauth.h>
75 #include <sys/malloc.h>
76 #include <sys/mman.h>
77 #include <sys/stat.h>
78 #include <sys/sysctl.h>
79 #include <sys/ipcs.h>
80 #include <sys/sysent.h>
81 #include <sys/sysproto.h>
82 #if CONFIG_MACF
83 #include <security/mac_framework.h>
84 #endif
85
86 #include <security/audit/audit.h>
87
88 #include <mach/mach_types.h>
89 #include <mach/vm_inherit.h>
90 #include <mach/vm_map.h>
91
92 #include <mach/mach_vm.h>
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_protos.h>
96 #include <vm/vm_kern.h>
97
98 #include <kern/locks.h>
99 #include <os/overflow.h>
100
101 /* Uncomment this line to see MAC debugging output. */
102 /* #define MAC_DEBUG */
103 #if CONFIG_MACF_DEBUG
104 #define MPRINTF(a) printf a
105 #else
106 #define MPRINTF(a)
107 #endif
108
109 #if SYSV_SHM
110 static int shminit(void);
111
112 static lck_grp_t *sysv_shm_subsys_lck_grp;
113 static lck_grp_attr_t *sysv_shm_subsys_lck_grp_attr;
114 static lck_attr_t *sysv_shm_subsys_lck_attr;
115 static lck_mtx_t sysv_shm_subsys_mutex;
116
117 #define SYSV_SHM_SUBSYS_LOCK() lck_mtx_lock(&sysv_shm_subsys_mutex)
118 #define SYSV_SHM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_shm_subsys_mutex)
119
120 static int oshmctl(void *p, void *uap, void *retval);
121 static int shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, int * retval);
122 static int shmget_existing(struct shmget_args *uap, int mode, int segnum, int * retval);
123 static void shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out);
124 static void shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out);
125
126 /* XXX casting to (sy_call_t *) is bogus, as usual. */
127 static sy_call_t* const shmcalls[] = {
128 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
129 (sy_call_t *)shmdt, (sy_call_t *)shmget,
130 (sy_call_t *)shmctl
131 };
132
133 #define SHMSEG_FREE 0x0200
134 #define SHMSEG_REMOVED 0x0400
135 #define SHMSEG_ALLOCATED 0x0800
136 #define SHMSEG_WANTED 0x1000
137
138 static int shm_last_free, shm_nused, shm_committed;
139 struct shmid_kernel *shmsegs; /* 64 bit version */
140 static int shm_inited = 0;
141
142 /*
143 * Since anonymous memory chunks are limited to ANON_MAX_SIZE bytes,
144 * we have to keep a list of chunks when we want to handle a shared memory
145 * segment bigger than ANON_MAX_SIZE.
146 * Each chunk points to a VM named entry of up to ANON_MAX_SIZE bytes
147 * of anonymous memory.
148 */
149 struct shm_handle {
150 void * shm_object; /* named entry for this chunk*/
151 memory_object_size_t shm_handle_size; /* size of this chunk */
152 struct shm_handle *shm_handle_next; /* next chunk */
153 };
154
155 struct shmmap_state {
156 mach_vm_address_t va; /* user address */
157 int shmid; /* segment id */
158 };
159
160 static void shm_deallocate_segment(struct shmid_kernel *);
161 static int shm_find_segment_by_key(key_t);
162 static struct shmid_kernel *shm_find_segment_by_shmid(int);
163 static int shm_delete_mapping(struct proc *, struct shmmap_state *, int);
164
165 #ifdef __APPLE_API_PRIVATE
166 #define DEFAULT_SHMMAX (4 * 1024 * 1024)
167 #define DEFAULT_SHMMIN 1
168 #define DEFAULT_SHMMNI 32
169 #define DEFAULT_SHMSEG 8
170 #define DEFAULT_SHMALL 1024
171
172 struct shminfo shminfo = {
173 .shmmax = DEFAULT_SHMMAX,
174 .shmmin = DEFAULT_SHMMIN,
175 .shmmni = DEFAULT_SHMMNI,
176 .shmseg = DEFAULT_SHMSEG,
177 .shmall = DEFAULT_SHMALL
178 };
179
180 #define SHMID_IS_VALID(x) ((x) >= 0)
181 #define SHMID_UNALLOCATED (-1)
182 #define SHMID_SENTINEL (-2)
183
184 #endif /* __APPLE_API_PRIVATE */
185
186 void sysv_shm_lock_init(void);
187
188 static __inline__ time_t
189 sysv_shmtime(void)
190 {
191 struct timeval tv;
192 microtime(&tv);
193 return tv.tv_sec;
194 }
195
196 /*
197 * This conversion is safe, since if we are converting for a 32 bit process,
198 * then it's value of (struct shmid_ds)->shm_segsz will never exceed 4G.
199 *
200 * NOTE: Source and target may *NOT* overlap! (target is smaller)
201 */
202 static void
203 shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out)
204 {
205 out->shm_perm = in->shm_perm;
206 out->shm_segsz = in->shm_segsz;
207 out->shm_lpid = in->shm_lpid;
208 out->shm_cpid = in->shm_cpid;
209 out->shm_nattch = in->shm_nattch;
210 out->shm_atime = in->shm_atime;
211 out->shm_dtime = in->shm_dtime;
212 out->shm_ctime = in->shm_ctime;
213 out->shm_internal = CAST_DOWN_EXPLICIT(int, in->shm_internal);
214 }
215
216 /*
217 * NOTE: Source and target may are permitted to overlap! (source is smaller);
218 * this works because we copy fields in order from the end of the struct to
219 * the beginning.
220 */
221 static void
222 shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out)
223 {
224 out->shm_internal = in->shm_internal;
225 out->shm_ctime = in->shm_ctime;
226 out->shm_dtime = in->shm_dtime;
227 out->shm_atime = in->shm_atime;
228 out->shm_nattch = in->shm_nattch;
229 out->shm_cpid = in->shm_cpid;
230 out->shm_lpid = in->shm_lpid;
231 out->shm_segsz = in->shm_segsz;
232 out->shm_perm = in->shm_perm;
233 }
234
235
236 static int
237 shm_find_segment_by_key(key_t key)
238 {
239 int i;
240
241 for (i = 0; i < shminfo.shmmni; i++) {
242 if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
243 shmsegs[i].u.shm_perm._key == key) {
244 return i;
245 }
246 }
247 return -1;
248 }
249
250 static struct shmid_kernel *
251 shm_find_segment_by_shmid(int shmid)
252 {
253 int segnum;
254 struct shmid_kernel *shmseg;
255
256 segnum = IPCID_TO_IX(shmid);
257 if (segnum < 0 || segnum >= shminfo.shmmni) {
258 return NULL;
259 }
260 shmseg = &shmsegs[segnum];
261 if ((shmseg->u.shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
262 != SHMSEG_ALLOCATED ||
263 shmseg->u.shm_perm._seq != IPCID_TO_SEQ(shmid)) {
264 return NULL;
265 }
266 return shmseg;
267 }
268
269 static void
270 shm_deallocate_segment(struct shmid_kernel *shmseg)
271 {
272 struct shm_handle *shm_handle, *shm_handle_next;
273 mach_vm_size_t size;
274
275 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal); /* tunnel */
276 shm_handle != NULL;
277 shm_handle = shm_handle_next) {
278 shm_handle_next = shm_handle->shm_handle_next;
279 mach_memory_entry_port_release(shm_handle->shm_object);
280 FREE(shm_handle, M_SHM);
281 }
282 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
283 size = vm_map_round_page(shmseg->u.shm_segsz,
284 vm_map_page_mask(current_map()));
285 shm_committed -= btoc(size);
286 shm_nused--;
287 shmseg->u.shm_perm.mode = SHMSEG_FREE;
288 #if CONFIG_MACF
289 /* Reset the MAC label */
290 mac_sysvshm_label_recycle(shmseg);
291 #endif
292 }
293
294 static int
295 shm_delete_mapping(__unused struct proc *p, struct shmmap_state *shmmap_s,
296 int deallocate)
297 {
298 struct shmid_kernel *shmseg;
299 int segnum, result;
300 mach_vm_size_t size;
301
302 segnum = IPCID_TO_IX(shmmap_s->shmid);
303 shmseg = &shmsegs[segnum];
304 size = vm_map_round_page(shmseg->u.shm_segsz,
305 vm_map_page_mask(current_map())); /* XXX done for us? */
306 if (deallocate) {
307 result = mach_vm_deallocate(current_map(), shmmap_s->va, size);
308 if (result != KERN_SUCCESS) {
309 return EINVAL;
310 }
311 }
312 shmmap_s->shmid = SHMID_UNALLOCATED;
313 shmseg->u.shm_dtime = sysv_shmtime();
314 if ((--shmseg->u.shm_nattch <= 0) &&
315 (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) {
316 shm_deallocate_segment(shmseg);
317 shm_last_free = segnum;
318 }
319 return 0;
320 }
321
322 int
323 shmdt(struct proc *p, struct shmdt_args *uap, int32_t *retval)
324 {
325 #if CONFIG_MACF
326 struct shmid_kernel *shmsegptr;
327 #endif
328 struct shmmap_state *shmmap_s;
329 int i;
330 int shmdtret = 0;
331
332 AUDIT_ARG(svipc_addr, uap->shmaddr);
333
334 SYSV_SHM_SUBSYS_LOCK();
335
336 if ((shmdtret = shminit())) {
337 goto shmdt_out;
338 }
339
340 shmmap_s = (struct shmmap_state *)p->vm_shm;
341 if (shmmap_s == NULL) {
342 shmdtret = EINVAL;
343 goto shmdt_out;
344 }
345
346 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
347 if (SHMID_IS_VALID(shmmap_s->shmid) &&
348 shmmap_s->va == (mach_vm_offset_t)uap->shmaddr) {
349 break;
350 }
351 }
352
353 if (!SHMID_IS_VALID(shmmap_s->shmid)) {
354 shmdtret = EINVAL;
355 goto shmdt_out;
356 }
357
358 #if CONFIG_MACF
359 /*
360 * XXX: It might be useful to move this into the shm_delete_mapping
361 * function
362 */
363 shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)];
364 shmdtret = mac_sysvshm_check_shmdt(kauth_cred_get(), shmsegptr);
365 if (shmdtret) {
366 goto shmdt_out;
367 }
368 #endif
369 i = shm_delete_mapping(p, shmmap_s, 1);
370
371 if (i == 0) {
372 *retval = 0;
373 }
374 shmdtret = i;
375 shmdt_out:
376 SYSV_SHM_SUBSYS_UNLOCK();
377 return shmdtret;
378 }
379
380 int
381 shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval)
382 {
383 int error, i, flags;
384 struct shmid_kernel *shmseg;
385 struct shmmap_state *shmmap_s = NULL;
386 struct shm_handle *shm_handle;
387 mach_vm_address_t attach_va; /* attach address in/out */
388 mach_vm_address_t shmlba;
389 mach_vm_size_t map_size; /* size of map entry */
390 mach_vm_size_t mapped_size;
391 vm_prot_t prot;
392 size_t size;
393 kern_return_t rv;
394 int shmat_ret;
395 int vm_flags;
396
397 shmat_ret = 0;
398
399 AUDIT_ARG(svipc_id, uap->shmid);
400 AUDIT_ARG(svipc_addr, uap->shmaddr);
401
402 SYSV_SHM_SUBSYS_LOCK();
403
404 if ((shmat_ret = shminit())) {
405 goto shmat_out;
406 }
407
408 shmmap_s = (struct shmmap_state *)p->vm_shm;
409 if (shmmap_s == NULL) {
410 /* lazily allocate the shm map */
411
412 int nsegs = shminfo.shmseg;
413 if (nsegs <= 0) {
414 shmat_ret = EMFILE;
415 goto shmat_out;
416 }
417
418 /* +1 for the sentinel */
419 if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)) {
420 shmat_ret = ENOMEM;
421 goto shmat_out;
422 }
423
424 MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK | M_NULL);
425 if (shmmap_s == NULL) {
426 shmat_ret = ENOMEM;
427 goto shmat_out;
428 }
429
430 /* initialize the entries */
431 for (i = 0; i < nsegs; i++) {
432 shmmap_s[i].shmid = SHMID_UNALLOCATED;
433 }
434 shmmap_s[i].shmid = SHMID_SENTINEL;
435
436 p->vm_shm = (caddr_t)shmmap_s;
437 }
438
439 shmseg = shm_find_segment_by_shmid(uap->shmid);
440 if (shmseg == NULL) {
441 shmat_ret = EINVAL;
442 goto shmat_out;
443 }
444
445 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
446 error = ipcperm(kauth_cred_get(), &shmseg->u.shm_perm,
447 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R | IPC_W);
448 if (error) {
449 shmat_ret = error;
450 goto shmat_out;
451 }
452
453 #if CONFIG_MACF
454 error = mac_sysvshm_check_shmat(kauth_cred_get(), shmseg, uap->shmflg);
455 if (error) {
456 shmat_ret = error;
457 goto shmat_out;
458 }
459 #endif
460
461 /* find a free shmid */
462 while (SHMID_IS_VALID(shmmap_s->shmid)) {
463 shmmap_s++;
464 }
465 if (shmmap_s->shmid != SHMID_UNALLOCATED) {
466 /* no free shmids */
467 shmat_ret = EMFILE;
468 goto shmat_out;
469 }
470
471 map_size = vm_map_round_page(shmseg->u.shm_segsz,
472 vm_map_page_mask(current_map()));
473 prot = VM_PROT_READ;
474 if ((uap->shmflg & SHM_RDONLY) == 0) {
475 prot |= VM_PROT_WRITE;
476 }
477 flags = MAP_ANON | MAP_SHARED;
478 if (uap->shmaddr) {
479 flags |= MAP_FIXED;
480 }
481
482 attach_va = (mach_vm_address_t)uap->shmaddr;
483 shmlba = vm_map_page_size(current_map()); /* XXX instead of SHMLBA */
484 if (uap->shmflg & SHM_RND) {
485 attach_va &= ~(shmlba - 1);
486 } else if ((attach_va & (shmlba - 1)) != 0) {
487 shmat_ret = EINVAL;
488 goto shmat_out;
489 }
490
491 if (flags & MAP_FIXED) {
492 vm_flags = VM_FLAGS_FIXED;
493 } else {
494 vm_flags = VM_FLAGS_ANYWHERE;
495 }
496
497 mapped_size = 0;
498
499 /* first reserve enough space... */
500 rv = mach_vm_map_kernel(current_map(),
501 &attach_va,
502 map_size,
503 0,
504 vm_flags,
505 VM_MAP_KERNEL_FLAGS_NONE,
506 VM_KERN_MEMORY_NONE,
507 IPC_PORT_NULL,
508 0,
509 FALSE,
510 VM_PROT_NONE,
511 VM_PROT_NONE,
512 VM_INHERIT_NONE);
513 if (rv != KERN_SUCCESS) {
514 goto out;
515 }
516
517 shmmap_s->va = attach_va;
518
519 /* ... then map the shared memory over the reserved space */
520 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal);/* tunnel */
521 shm_handle != NULL;
522 shm_handle = shm_handle->shm_handle_next) {
523 vm_map_size_t chunk_size;
524
525 assert(mapped_size < map_size);
526 chunk_size = shm_handle->shm_handle_size;
527 if (chunk_size > map_size - mapped_size) {
528 /*
529 * Partial mapping of last chunk due to
530 * page size mismatch.
531 */
532 assert(vm_map_page_shift(current_map()) < PAGE_SHIFT);
533 assert(shm_handle->shm_handle_next == NULL);
534 chunk_size = map_size - mapped_size;
535 }
536 rv = vm_map_enter_mem_object(
537 current_map(), /* process map */
538 &attach_va, /* attach address */
539 chunk_size, /* size to map */
540 (mach_vm_offset_t)0, /* alignment mask */
541 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
542 VM_MAP_KERNEL_FLAGS_NONE,
543 VM_KERN_MEMORY_NONE,
544 shm_handle->shm_object,
545 (mach_vm_offset_t)0,
546 FALSE,
547 prot,
548 prot,
549 VM_INHERIT_SHARE);
550 if (rv != KERN_SUCCESS) {
551 goto out;
552 }
553
554 mapped_size += chunk_size;
555 attach_va = attach_va + chunk_size;
556 }
557
558 shmmap_s->shmid = uap->shmid;
559 shmseg->u.shm_lpid = p->p_pid;
560 shmseg->u.shm_atime = sysv_shmtime();
561 shmseg->u.shm_nattch++;
562 *retval = shmmap_s->va; /* XXX return -1 on error */
563 shmat_ret = 0;
564 goto shmat_out;
565 out:
566 if (mapped_size > 0) {
567 (void) mach_vm_deallocate(current_map(),
568 shmmap_s->va,
569 mapped_size);
570 }
571 switch (rv) {
572 case KERN_INVALID_ADDRESS:
573 case KERN_NO_SPACE:
574 shmat_ret = ENOMEM;
575 break;
576 case KERN_PROTECTION_FAILURE:
577 shmat_ret = EACCES;
578 break;
579 default:
580 shmat_ret = EINVAL;
581 break;
582 }
583 shmat_out:
584 SYSV_SHM_SUBSYS_UNLOCK();
585 return shmat_ret;
586 }
587
588 static int
589 oshmctl(__unused void *p, __unused void *uap, __unused void *retval)
590 {
591 return EINVAL;
592 }
593
594 /*
595 * Returns: 0 Success
596 * EINVAL
597 * copyout:EFAULT
598 * copyin:EFAULT
599 * ipcperm:EPERM
600 * ipcperm:EACCES
601 */
602 int
603 shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval)
604 {
605 int error;
606 kauth_cred_t cred = kauth_cred_get();
607 struct user_shmid_ds inbuf;
608 struct shmid_kernel *shmseg;
609
610 int shmctl_ret = 0;
611
612 AUDIT_ARG(svipc_cmd, uap->cmd);
613 AUDIT_ARG(svipc_id, uap->shmid);
614
615 SYSV_SHM_SUBSYS_LOCK();
616
617 if ((shmctl_ret = shminit())) {
618 goto shmctl_out;
619 }
620
621 shmseg = shm_find_segment_by_shmid(uap->shmid);
622 if (shmseg == NULL) {
623 shmctl_ret = EINVAL;
624 goto shmctl_out;
625 }
626
627 /* XXAUDIT: This is the perms BEFORE any change by this call. This
628 * may not be what is desired.
629 */
630 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
631
632 #if CONFIG_MACF
633 error = mac_sysvshm_check_shmctl(cred, shmseg, uap->cmd);
634 if (error) {
635 shmctl_ret = error;
636 goto shmctl_out;
637 }
638 #endif
639 switch (uap->cmd) {
640 case IPC_STAT:
641 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_R);
642 if (error) {
643 shmctl_ret = error;
644 goto shmctl_out;
645 }
646
647 if (IS_64BIT_PROCESS(p)) {
648 struct user_shmid_ds shmid_ds = {};
649 memcpy(&shmid_ds, &shmseg->u, sizeof(struct user_shmid_ds));
650
651 /* Clear kernel reserved pointer before copying to user space */
652 shmid_ds.shm_internal = USER_ADDR_NULL;
653
654 error = copyout(&shmid_ds, uap->buf, sizeof(shmid_ds));
655 } else {
656 struct user32_shmid_ds shmid_ds32 = {};
657 shmid_ds_64to32(&shmseg->u, &shmid_ds32);
658
659 /* Clear kernel reserved pointer before copying to user space */
660 shmid_ds32.shm_internal = (user32_addr_t)0;
661
662 error = copyout(&shmid_ds32, uap->buf, sizeof(shmid_ds32));
663 }
664 if (error) {
665 shmctl_ret = error;
666 goto shmctl_out;
667 }
668 break;
669 case IPC_SET:
670 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
671 if (error) {
672 shmctl_ret = error;
673 goto shmctl_out;
674 }
675 if (IS_64BIT_PROCESS(p)) {
676 error = copyin(uap->buf, &inbuf, sizeof(struct user_shmid_ds));
677 } else {
678 struct user32_shmid_ds shmid_ds32;
679 error = copyin(uap->buf, &shmid_ds32, sizeof(shmid_ds32));
680 /* convert in place; ugly, but safe */
681 shmid_ds_32to64(&shmid_ds32, &inbuf);
682 }
683 if (error) {
684 shmctl_ret = error;
685 goto shmctl_out;
686 }
687 shmseg->u.shm_perm.uid = inbuf.shm_perm.uid;
688 shmseg->u.shm_perm.gid = inbuf.shm_perm.gid;
689 shmseg->u.shm_perm.mode =
690 (shmseg->u.shm_perm.mode & ~ACCESSPERMS) |
691 (inbuf.shm_perm.mode & ACCESSPERMS);
692 shmseg->u.shm_ctime = sysv_shmtime();
693 break;
694 case IPC_RMID:
695 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
696 if (error) {
697 shmctl_ret = error;
698 goto shmctl_out;
699 }
700 shmseg->u.shm_perm._key = IPC_PRIVATE;
701 shmseg->u.shm_perm.mode |= SHMSEG_REMOVED;
702 if (shmseg->u.shm_nattch <= 0) {
703 shm_deallocate_segment(shmseg);
704 shm_last_free = IPCID_TO_IX(uap->shmid);
705 }
706 break;
707 #if 0
708 case SHM_LOCK:
709 case SHM_UNLOCK:
710 #endif
711 default:
712 shmctl_ret = EINVAL;
713 goto shmctl_out;
714 }
715 *retval = 0;
716 shmctl_ret = 0;
717 shmctl_out:
718 SYSV_SHM_SUBSYS_UNLOCK();
719 return shmctl_ret;
720 }
721
722 static int
723 shmget_existing(struct shmget_args *uap, int mode, int segnum, int *retval)
724 {
725 struct shmid_kernel *shmseg;
726 int error = 0;
727
728 shmseg = &shmsegs[segnum];
729 if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) {
730 /*
731 * This segment is in the process of being allocated. Wait
732 * until it's done, and look the key up again (in case the
733 * allocation failed or it was freed).
734 */
735 shmseg->u.shm_perm.mode |= SHMSEG_WANTED;
736 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
737 if (error) {
738 return error;
739 }
740 return EAGAIN;
741 }
742
743 /*
744 * The low 9 bits of shmflag are the mode bits being requested, which
745 * are the actual mode bits desired on the segment, and not in IPC_R
746 * form; therefore it would be incorrect to call ipcperm() to validate
747 * them; instead, we AND the existing mode with the requested mode, and
748 * verify that it matches the requested mode; otherwise, we fail with
749 * EACCES (access denied).
750 */
751 if ((shmseg->u.shm_perm.mode & mode) != mode) {
752 return EACCES;
753 }
754
755 #if CONFIG_MACF
756 error = mac_sysvshm_check_shmget(kauth_cred_get(), shmseg, uap->shmflg);
757 if (error) {
758 return error;
759 }
760 #endif
761
762 if (uap->size && uap->size > shmseg->u.shm_segsz) {
763 return EINVAL;
764 }
765
766 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) {
767 return EEXIST;
768 }
769
770 *retval = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
771 return 0;
772 }
773
774 static int
775 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode,
776 int *retval)
777 {
778 int i, segnum, shmid;
779 kauth_cred_t cred = kauth_cred_get();
780 struct shmid_kernel *shmseg;
781 struct shm_handle *shm_handle;
782 kern_return_t kret;
783 mach_vm_size_t total_size, size, alloc_size;
784 void * mem_object;
785 struct shm_handle *shm_handle_next, **shm_handle_next_p;
786
787 if (uap->size <= 0 ||
788 uap->size < (user_size_t)shminfo.shmmin ||
789 uap->size > (user_size_t)shminfo.shmmax) {
790 return EINVAL;
791 }
792 if (shm_nused >= shminfo.shmmni) { /* any shmids left? */
793 return ENOSPC;
794 }
795 if (mach_vm_round_page_overflow(uap->size, &total_size)) {
796 return EINVAL;
797 }
798 if ((user_ssize_t)(shm_committed + btoc(total_size)) > shminfo.shmall) {
799 return ENOMEM;
800 }
801 if (shm_last_free < 0) {
802 for (i = 0; i < shminfo.shmmni; i++) {
803 if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) {
804 break;
805 }
806 }
807 if (i == shminfo.shmmni) {
808 panic("shmseg free count inconsistent");
809 }
810 segnum = i;
811 } else {
812 segnum = shm_last_free;
813 shm_last_free = -1;
814 }
815 shmseg = &shmsegs[segnum];
816
817 /*
818 * In case we sleep in malloc(), mark the segment present but deleted
819 * so that noone else tries to create the same key.
820 * XXX but we don't release the global lock !?
821 */
822 shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
823 shmseg->u.shm_perm._key = uap->key;
824 shmseg->u.shm_perm._seq = (shmseg->u.shm_perm._seq + 1) & 0x7fff;
825
826 shm_handle_next_p = NULL;
827 for (alloc_size = 0;
828 alloc_size < total_size;
829 alloc_size += size) {
830 size = MIN(total_size - alloc_size, ANON_MAX_SIZE);
831 kret = mach_make_memory_entry_64(
832 VM_MAP_NULL,
833 (memory_object_size_t *) &size,
834 (memory_object_offset_t) 0,
835 MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
836 (ipc_port_t *) &mem_object, 0);
837 if (kret != KERN_SUCCESS) {
838 goto out;
839 }
840
841 MALLOC(shm_handle, struct shm_handle *, sizeof(struct shm_handle), M_SHM, M_WAITOK);
842 if (shm_handle == NULL) {
843 kret = KERN_NO_SPACE;
844 mach_memory_entry_port_release(mem_object);
845 mem_object = NULL;
846 goto out;
847 }
848 shm_handle->shm_object = mem_object;
849 shm_handle->shm_handle_size = size;
850 shm_handle->shm_handle_next = NULL;
851 if (shm_handle_next_p == NULL) {
852 shmseg->u.shm_internal = CAST_USER_ADDR_T(shm_handle);/* tunnel */
853 } else {
854 *shm_handle_next_p = shm_handle;
855 }
856 shm_handle_next_p = &shm_handle->shm_handle_next;
857 }
858
859 shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
860
861 shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = kauth_cred_getuid(cred);
862 shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = kauth_cred_getgid(cred);
863 shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) |
864 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
865 shmseg->u.shm_segsz = uap->size;
866 shmseg->u.shm_cpid = p->p_pid;
867 shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
868 shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
869 #if CONFIG_MACF
870 mac_sysvshm_label_associate(cred, shmseg);
871 #endif
872 shmseg->u.shm_ctime = sysv_shmtime();
873 shm_committed += btoc(size);
874 shm_nused++;
875 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
876 if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) {
877 /*
878 * Somebody else wanted this key while we were asleep. Wake
879 * them up now.
880 */
881 shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED;
882 wakeup((caddr_t)shmseg);
883 }
884 *retval = shmid;
885 AUDIT_ARG(svipc_id, shmid);
886 return 0;
887 out:
888 if (kret != KERN_SUCCESS) {
889 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal); /* tunnel */
890 shm_handle != NULL;
891 shm_handle = shm_handle_next) {
892 shm_handle_next = shm_handle->shm_handle_next;
893 mach_memory_entry_port_release(shm_handle->shm_object);
894 FREE(shm_handle, M_SHM);
895 }
896 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
897 }
898
899 switch (kret) {
900 case KERN_INVALID_ADDRESS:
901 case KERN_NO_SPACE:
902 return ENOMEM;
903 case KERN_PROTECTION_FAILURE:
904 return EACCES;
905 default:
906 return EINVAL;
907 }
908 }
909
910 int
911 shmget(struct proc *p, struct shmget_args *uap, int32_t *retval)
912 {
913 int segnum, mode, error;
914 int shmget_ret = 0;
915
916 /* Auditing is actually done in shmget_allocate_segment() */
917
918 SYSV_SHM_SUBSYS_LOCK();
919
920 if ((shmget_ret = shminit())) {
921 goto shmget_out;
922 }
923
924 mode = uap->shmflg & ACCESSPERMS;
925 if (uap->key != IPC_PRIVATE) {
926 again:
927 segnum = shm_find_segment_by_key(uap->key);
928 if (segnum >= 0) {
929 error = shmget_existing(uap, mode, segnum, retval);
930 if (error == EAGAIN) {
931 goto again;
932 }
933 shmget_ret = error;
934 goto shmget_out;
935 }
936 if ((uap->shmflg & IPC_CREAT) == 0) {
937 shmget_ret = ENOENT;
938 goto shmget_out;
939 }
940 }
941 shmget_ret = shmget_allocate_segment(p, uap, mode, retval);
942 shmget_out:
943 SYSV_SHM_SUBSYS_UNLOCK();
944 return shmget_ret;
945 }
946
947 /*
948 * shmsys
949 *
950 * Entry point for all SHM calls: shmat, oshmctl, shmdt, shmget, shmctl
951 *
952 * Parameters: p Process requesting the call
953 * uap User argument descriptor (see below)
954 * retval Return value of the selected shm call
955 *
956 * Indirect parameters: uap->which msg call to invoke (index in array of shm calls)
957 * uap->a2 User argument descriptor
958 *
959 * Returns: 0 Success
960 * !0 Not success
961 *
962 * Implicit returns: retval Return value of the selected shm call
963 *
964 * DEPRECATED: This interface should not be used to call the other SHM
965 * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct
966 * usage is to call the other SHM functions directly.
967 */
968 int
969 shmsys(struct proc *p, struct shmsys_args *uap, int32_t *retval)
970 {
971 /* The routine that we are dispatching already does this */
972
973 if (uap->which >= sizeof(shmcalls) / sizeof(shmcalls[0])) {
974 return EINVAL;
975 }
976 return (*shmcalls[uap->which])(p, &uap->a2, retval);
977 }
978
979 /*
980 * Return 0 on success, 1 on failure.
981 */
982 int
983 shmfork(struct proc *p1, struct proc *p2)
984 {
985 struct shmmap_state *shmmap_s;
986 size_t size;
987 int nsegs = 0;
988 int ret = 0;
989
990 SYSV_SHM_SUBSYS_LOCK();
991
992 if (shminit()) {
993 ret = 1;
994 goto shmfork_out;
995 }
996
997 struct shmmap_state *src = (struct shmmap_state *)p1->vm_shm;
998 assert(src);
999
1000 /* count number of shmid entries in src */
1001 for (struct shmmap_state *s = src; s->shmid != SHMID_SENTINEL; s++) {
1002 nsegs++;
1003 }
1004
1005 if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)) {
1006 ret = 1;
1007 goto shmfork_out;
1008 }
1009 MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK);
1010 if (shmmap_s == NULL) {
1011 ret = 1;
1012 goto shmfork_out;
1013 }
1014
1015 bcopy(src, (caddr_t)shmmap_s, size);
1016 p2->vm_shm = (caddr_t)shmmap_s;
1017 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
1018 if (SHMID_IS_VALID(shmmap_s->shmid)) {
1019 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
1020 }
1021 }
1022
1023 shmfork_out:
1024 SYSV_SHM_SUBSYS_UNLOCK();
1025 return ret;
1026 }
1027
1028 static void
1029 shmcleanup(struct proc *p, int deallocate)
1030 {
1031 struct shmmap_state *shmmap_s;
1032
1033 SYSV_SHM_SUBSYS_LOCK();
1034
1035 shmmap_s = (struct shmmap_state *)p->vm_shm;
1036 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
1037 if (SHMID_IS_VALID(shmmap_s->shmid)) {
1038 /*
1039 * XXX: Should the MAC framework enforce
1040 * check here as well.
1041 */
1042 shm_delete_mapping(p, shmmap_s, deallocate);
1043 }
1044 }
1045
1046 FREE(p->vm_shm, M_SHM);
1047 p->vm_shm = NULL;
1048 SYSV_SHM_SUBSYS_UNLOCK();
1049 }
1050
1051 void
1052 shmexit(struct proc *p)
1053 {
1054 shmcleanup(p, 1);
1055 }
1056
1057 /*
1058 * shmexec() is like shmexit(), only it doesn't delete the mappings,
1059 * since the old address space has already been destroyed and the new
1060 * one instantiated. Instead, it just does the housekeeping work we
1061 * need to do to keep the System V shared memory subsystem sane.
1062 */
1063 __private_extern__ void
1064 shmexec(struct proc *p)
1065 {
1066 shmcleanup(p, 0);
1067 }
1068
1069 int
1070 shminit(void)
1071 {
1072 size_t sz;
1073 int i;
1074
1075 if (!shm_inited) {
1076 /*
1077 * we store internally 64 bit, since if we didn't, we would
1078 * be unable to represent a segment size in excess of 32 bits
1079 * with the (struct shmid_ds)->shm_segsz field; also, POSIX
1080 * dictates this filed be a size_t, which is 64 bits when
1081 * running 64 bit binaries.
1082 */
1083 if (os_mul_overflow(shminfo.shmmni, sizeof(struct shmid_kernel), &sz)) {
1084 return ENOMEM;
1085 }
1086
1087 MALLOC(shmsegs, struct shmid_kernel *, sz, M_SHM, M_WAITOK | M_ZERO);
1088 if (shmsegs == NULL) {
1089 return ENOMEM;
1090 }
1091 for (i = 0; i < shminfo.shmmni; i++) {
1092 shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
1093 shmsegs[i].u.shm_perm._seq = 0;
1094 #if CONFIG_MACF
1095 mac_sysvshm_label_init(&shmsegs[i]);
1096 #endif
1097 }
1098 shm_last_free = 0;
1099 shm_nused = 0;
1100 shm_committed = 0;
1101 shm_inited = 1;
1102 }
1103
1104 return 0;
1105 }
1106
1107 /* Initialize the mutex governing access to the SysV shm subsystem */
1108 __private_extern__ void
1109 sysv_shm_lock_init( void )
1110 {
1111 sysv_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1112
1113 sysv_shm_subsys_lck_grp = lck_grp_alloc_init("sysv_shm_subsys_lock", sysv_shm_subsys_lck_grp_attr);
1114
1115 sysv_shm_subsys_lck_attr = lck_attr_alloc_init();
1116 lck_mtx_init(&sysv_shm_subsys_mutex, sysv_shm_subsys_lck_grp, sysv_shm_subsys_lck_attr);
1117 }
1118
1119 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1120 * struct sysctl_req *req) */
1121 static int
1122 sysctl_shminfo(__unused struct sysctl_oid *oidp, void *arg1,
1123 __unused int arg2, struct sysctl_req *req)
1124 {
1125 int error = 0;
1126 int sysctl_shminfo_ret = 0;
1127 int64_t saved_shmmax;
1128 int64_t saved_shmmin;
1129 int64_t saved_shmseg;
1130 int64_t saved_shmmni;
1131 int64_t saved_shmall;
1132
1133 error = SYSCTL_OUT(req, arg1, sizeof(int64_t));
1134 if (error || req->newptr == USER_ADDR_NULL) {
1135 return error;
1136 }
1137
1138 SYSV_SHM_SUBSYS_LOCK();
1139
1140 /* shmmni can not be changed after SysV SHM has been initialized */
1141 if (shm_inited && arg1 == &shminfo.shmmni) {
1142 sysctl_shminfo_ret = EPERM;
1143 goto sysctl_shminfo_out;
1144 }
1145 saved_shmmax = shminfo.shmmax;
1146 saved_shmmin = shminfo.shmmin;
1147 saved_shmseg = shminfo.shmseg;
1148 saved_shmmni = shminfo.shmmni;
1149 saved_shmall = shminfo.shmall;
1150
1151 if ((error = SYSCTL_IN(req, arg1, sizeof(int64_t))) != 0) {
1152 sysctl_shminfo_ret = error;
1153 goto sysctl_shminfo_out;
1154 }
1155
1156 if (arg1 == &shminfo.shmmax) {
1157 /* shmmax needs to be page-aligned */
1158 if (shminfo.shmmax & PAGE_MASK_64 || shminfo.shmmax < 0) {
1159 shminfo.shmmax = saved_shmmax;
1160 sysctl_shminfo_ret = EINVAL;
1161 goto sysctl_shminfo_out;
1162 }
1163 } else if (arg1 == &shminfo.shmmin) {
1164 if (shminfo.shmmin < 0) {
1165 shminfo.shmmin = saved_shmmin;
1166 sysctl_shminfo_ret = EINVAL;
1167 goto sysctl_shminfo_out;
1168 }
1169 } else if (arg1 == &shminfo.shmseg) {
1170 /* add a sanity check - 20847256 */
1171 if (shminfo.shmseg > INT32_MAX || shminfo.shmseg < 0) {
1172 shminfo.shmseg = saved_shmseg;
1173 sysctl_shminfo_ret = EINVAL;
1174 goto sysctl_shminfo_out;
1175 }
1176 } else if (arg1 == &shminfo.shmmni) {
1177 /* add a sanity check - 20847256 */
1178 if (shminfo.shmmni > INT32_MAX || shminfo.shmmni < 0) {
1179 shminfo.shmmni = saved_shmmni;
1180 sysctl_shminfo_ret = EINVAL;
1181 goto sysctl_shminfo_out;
1182 }
1183 } else if (arg1 == &shminfo.shmall) {
1184 /* add a sanity check - 20847256 */
1185 if (shminfo.shmall > INT32_MAX || shminfo.shmall < 0) {
1186 shminfo.shmall = saved_shmall;
1187 sysctl_shminfo_ret = EINVAL;
1188 goto sysctl_shminfo_out;
1189 }
1190 }
1191 sysctl_shminfo_ret = 0;
1192 sysctl_shminfo_out:
1193 SYSV_SHM_SUBSYS_UNLOCK();
1194 return sysctl_shminfo_ret;
1195 }
1196
1197 static int
1198 IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
1199 __unused int arg2, struct sysctl_req *req)
1200 {
1201 int error;
1202 int cursor;
1203 union {
1204 struct user32_IPCS_command u32;
1205 struct user_IPCS_command u64;
1206 } ipcs = { };
1207 struct user32_shmid_ds shmid_ds32 = { }; /* post conversion, 32 bit version */
1208 struct user_shmid_ds shmid_ds = { }; /* 64 bit version */
1209 void *shmid_dsp;
1210 size_t ipcs_sz = sizeof(struct user_IPCS_command);
1211 size_t shmid_ds_sz = sizeof(struct user_shmid_ds);
1212 struct proc *p = current_proc();
1213
1214 SYSV_SHM_SUBSYS_LOCK();
1215
1216 if ((error = shminit())) {
1217 goto ipcs_shm_sysctl_out;
1218 }
1219
1220 if (!IS_64BIT_PROCESS(p)) {
1221 ipcs_sz = sizeof(struct user32_IPCS_command);
1222 shmid_ds_sz = sizeof(struct user32_shmid_ds);
1223 }
1224
1225 /* Copy in the command structure */
1226 if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
1227 goto ipcs_shm_sysctl_out;
1228 }
1229
1230 if (!IS_64BIT_PROCESS(p)) { /* convert in place */
1231 ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data);
1232 }
1233
1234 /* Let us version this interface... */
1235 if (ipcs.u64.ipcs_magic != IPCS_MAGIC) {
1236 error = EINVAL;
1237 goto ipcs_shm_sysctl_out;
1238 }
1239
1240 switch (ipcs.u64.ipcs_op) {
1241 case IPCS_SHM_CONF: /* Obtain global configuration data */
1242 if (ipcs.u64.ipcs_datalen != sizeof(struct shminfo)) {
1243 if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
1244 error = ENOMEM;
1245 break;
1246 }
1247 error = ERANGE;
1248 break;
1249 }
1250 error = copyout(&shminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1251 break;
1252
1253 case IPCS_SHM_ITER: /* Iterate over existing segments */
1254 cursor = ipcs.u64.ipcs_cursor;
1255 if (cursor < 0 || cursor >= shminfo.shmmni) {
1256 error = ERANGE;
1257 break;
1258 }
1259 if (ipcs.u64.ipcs_datalen != (int)shmid_ds_sz) {
1260 error = EINVAL;
1261 break;
1262 }
1263 for (; cursor < shminfo.shmmni; cursor++) {
1264 if (shmsegs[cursor].u.shm_perm.mode & SHMSEG_ALLOCATED) {
1265 break;
1266 }
1267 continue;
1268 }
1269 if (cursor == shminfo.shmmni) {
1270 error = ENOENT;
1271 break;
1272 }
1273
1274 shmid_dsp = &shmsegs[cursor]; /* default: 64 bit */
1275
1276 /*
1277 * If necessary, convert the 64 bit kernel segment
1278 * descriptor to a 32 bit user one.
1279 */
1280 if (!IS_64BIT_PROCESS(p)) {
1281 shmid_ds_64to32(shmid_dsp, &shmid_ds32);
1282
1283 /* Clear kernel reserved pointer before copying to user space */
1284 shmid_ds32.shm_internal = (user32_addr_t)0;
1285
1286 shmid_dsp = &shmid_ds32;
1287 } else {
1288 memcpy(&shmid_ds, shmid_dsp, sizeof(shmid_ds));
1289
1290 /* Clear kernel reserved pointer before copying to user space */
1291 shmid_ds.shm_internal = USER_ADDR_NULL;
1292
1293 shmid_dsp = &shmid_ds;
1294 }
1295 error = copyout(shmid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1296 if (!error) {
1297 /* update cursor */
1298 ipcs.u64.ipcs_cursor = cursor + 1;
1299
1300 if (!IS_64BIT_PROCESS(p)) { /* convert in place */
1301 ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t, ipcs.u64.ipcs_data);
1302 }
1303
1304 error = SYSCTL_OUT(req, &ipcs, ipcs_sz);
1305 }
1306 break;
1307
1308 default:
1309 error = EINVAL;
1310 break;
1311 }
1312 ipcs_shm_sysctl_out:
1313 SYSV_SHM_SUBSYS_UNLOCK();
1314 return error;
1315 }
1316
1317 SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSV");
1318
1319 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmax, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1320 &shminfo.shmmax, 0, &sysctl_shminfo, "Q", "shmmax");
1321
1322 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmin, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1323 &shminfo.shmmin, 0, &sysctl_shminfo, "Q", "shmmin");
1324
1325 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmni, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1326 &shminfo.shmmni, 0, &sysctl_shminfo, "Q", "shmmni");
1327
1328 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmseg, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1329 &shminfo.shmseg, 0, &sysctl_shminfo, "Q", "shmseg");
1330
1331 SYSCTL_PROC(_kern_sysv, OID_AUTO, shmall, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1332 &shminfo.shmall, 0, &sysctl_shminfo, "Q", "shmall");
1333
1334 SYSCTL_NODE(_kern_sysv, OID_AUTO, ipcs, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSVIPCS");
1335
1336 SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, shm, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1337 0, 0, IPCS_shm_sysctl,
1338 "S,IPCS_shm_command",
1339 "ipcs shm command interface");
1340 #endif /* SYSV_SHM */
1341
1342 /* DSEP Review Done pl-20051108-v02 @2743,@2908,@2913,@3009 */