]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sysv_shm.c
c86c5d59058f2ea3d8a7227a182f266e9967962f
[apple/xnu.git] / bsd / kern / sysv_shm.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
23
24 /*
25 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by Adam Glass and Charles
38 * Hannum.
39 * 4. The names of the authors may not be used to endorse or promote products
40 * derived from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
43 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
44 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
45 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
46 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
47 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
51 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54
55 #include <sys/appleapiopts.h>
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/shm.h>
60 #include <sys/proc.h>
61 #include <sys/malloc.h>
62 #include <sys/mman.h>
63 #include <sys/stat.h>
64 #include <sys/sysctl.h>
65
66 #include <mach/mach_types.h>
67 #include <mach/vm_inherit.h>
68 #include <vm/vm_map.h>
69
70 struct shmat_args;
71 extern int shmat __P((struct proc *p, struct shmat_args *uap, int *retval));
72 struct shmctl_args;
73 extern int shmctl __P((struct proc *p, struct shmctl_args *uap, int *retval));
74 struct shmdt_args;
75 extern int shmdt __P((struct proc *p, struct shmdt_args *uap, int *retval));
76 struct shmget_args;
77 extern int shmget __P((struct proc *p, struct shmget_args *uap, int *retval));
78
79 #if 0
80 static void shminit __P((void *));
81 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL)
82 #endif 0
83
84 struct oshmctl_args;
85 static int oshmctl __P((struct proc *p, struct oshmctl_args *uap, int * retval));
86 static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode, int * retval));
87 static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum, int * retval));
88
89 typedef int sy_call_t __P((struct proc *, void *, int *));
90
91 /* XXX casting to (sy_call_t *) is bogus, as usual. */
92 static sy_call_t *shmcalls[] = {
93 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
94 (sy_call_t *)shmdt, (sy_call_t *)shmget,
95 (sy_call_t *)shmctl
96 };
97
98 #define SHMSEG_FREE 0x0200
99 #define SHMSEG_REMOVED 0x0400
100 #define SHMSEG_ALLOCATED 0x0800
101 #define SHMSEG_WANTED 0x1000
102
103 static int shm_last_free, shm_nused, shm_committed;
104 struct shmid_ds *shmsegs;
105 static int shm_inited = 0;
106
107 struct shm_handle {
108 /* vm_offset_t kva; */
109 void * shm_object;
110 };
111
112 struct shmmap_state {
113 vm_offset_t va;
114 int shmid;
115 };
116
117 static void shm_deallocate_segment __P((struct shmid_ds *));
118 static int shm_find_segment_by_key __P((key_t));
119 static struct shmid_ds *shm_find_segment_by_shmid __P((int));
120 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
121
122 #ifdef __APPLE_API_PRIVATE
123 struct shminfo shminfo = {
124 -1, /* SHMMAX 4096 *1024 */
125 -1, /* SHMMIN = 1 */
126 -1, /* SHMMNI = 1 */
127 -1, /* SHMSEG = 8 */
128 -1 /* SHMALL = 1024 */
129 };
130 #endif /* __APPLE_API_PRIVATE */
131
132 static int
133 shm_find_segment_by_key(key)
134 key_t key;
135 {
136 int i;
137
138 for (i = 0; i < shminfo.shmmni; i++)
139 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
140 shmsegs[i].shm_perm.key == key)
141 return i;
142 return -1;
143 }
144
145 static struct shmid_ds *
146 shm_find_segment_by_shmid(shmid)
147 int shmid;
148 {
149 int segnum;
150 struct shmid_ds *shmseg;
151
152 segnum = IPCID_TO_IX(shmid);
153 if (segnum < 0 || segnum >= shminfo.shmmni)
154 return NULL;
155 shmseg = &shmsegs[segnum];
156 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
157 != SHMSEG_ALLOCATED ||
158 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
159 return NULL;
160 return shmseg;
161 }
162
163 static void
164 shm_deallocate_segment(shmseg)
165 struct shmid_ds *shmseg;
166 {
167 struct shm_handle *shm_handle;
168 struct shmmap_state *shmmap_s=NULL;
169 size_t size;
170 char * ptr;
171
172 shm_handle = shmseg->shm_internal;
173 size = round_page(shmseg->shm_segsz);
174 mach_destroy_memory_entry(shm_handle->shm_object);
175 FREE((caddr_t)shm_handle, M_SHM);
176 shmseg->shm_internal = NULL;
177 shm_committed -= btoc(size);
178 shm_nused--;
179 shmseg->shm_perm.mode = SHMSEG_FREE;
180 }
181
182 static int
183 shm_delete_mapping(p, shmmap_s)
184 struct proc *p;
185 struct shmmap_state *shmmap_s;
186 {
187 struct shmid_ds *shmseg;
188 int segnum, result;
189 size_t size;
190
191 segnum = IPCID_TO_IX(shmmap_s->shmid);
192 shmseg = &shmsegs[segnum];
193 size = round_page(shmseg->shm_segsz);
194 result = vm_deallocate(current_map(), shmmap_s->va, size);
195 if (result != KERN_SUCCESS)
196 return EINVAL;
197 shmmap_s->shmid = -1;
198 shmseg->shm_dtime = time_second;
199 if ((--shmseg->shm_nattch <= 0) &&
200 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
201 shm_deallocate_segment(shmseg);
202 shm_last_free = segnum;
203 }
204 return 0;
205 }
206
207 struct shmdt_args {
208 void *shmaddr;
209 };
210
211 int
212 shmdt(p, uap, retval)
213 struct proc *p;
214 struct shmdt_args *uap;
215 register_t *retval;
216 {
217 struct shmmap_state *shmmap_s;
218 int i;
219
220 if (!shm_inited)
221 return(EINVAL);
222 shmmap_s = (struct shmmap_state *)p->vm_shm;
223 if (shmmap_s == NULL)
224 return EINVAL;
225 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
226 if (shmmap_s->shmid != -1 &&
227 shmmap_s->va == (vm_offset_t)uap->shmaddr)
228 break;
229 if (i == shminfo.shmseg)
230 return EINVAL;
231 return shm_delete_mapping(p, shmmap_s);
232 }
233
234 #ifndef _SYS_SYSPROTO_H_
235 struct shmat_args {
236 int shmid;
237 void *shmaddr;
238 int shmflg;
239 };
240 #endif
241
242 int
243 shmat(p, uap, retval)
244 struct proc *p;
245 struct shmat_args *uap;
246 register_t *retval;
247 {
248 int error, i, flags;
249 struct ucred *cred = p->p_ucred;
250 struct shmid_ds *shmseg;
251 struct shmmap_state *shmmap_s = NULL;
252 struct shm_handle *shm_handle;
253 vm_offset_t attach_va;
254 vm_prot_t prot;
255 vm_size_t size;
256 kern_return_t rv;
257
258 if (!shm_inited)
259 return(EINVAL);
260 shmmap_s = (struct shmmap_state *)p->vm_shm;
261 if (shmmap_s == NULL) {
262 size = shminfo.shmseg * sizeof(struct shmmap_state);
263 shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK);
264 for (i = 0; i < shminfo.shmseg; i++)
265 shmmap_s[i].shmid = -1;
266 p->vm_shm = (caddr_t)shmmap_s;
267 }
268 shmseg = shm_find_segment_by_shmid(uap->shmid);
269 if (shmseg == NULL)
270 return EINVAL;
271 error = ipcperm(cred, &shmseg->shm_perm,
272 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
273 if (error)
274 return error;
275 for (i = 0; i < shminfo.shmseg; i++) {
276 if (shmmap_s->shmid == -1)
277 break;
278 shmmap_s++;
279 }
280 if (i >= shminfo.shmseg)
281 return EMFILE;
282 size = round_page(shmseg->shm_segsz);
283 prot = VM_PROT_READ;
284 if ((uap->shmflg & SHM_RDONLY) == 0)
285 prot |= VM_PROT_WRITE;
286 flags = MAP_ANON | MAP_SHARED;
287 if (uap->shmaddr) {
288 flags |= MAP_FIXED;
289 if (uap->shmflg & SHM_RND)
290 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
291 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
292 attach_va = (vm_offset_t)uap->shmaddr;
293 else
294 return EINVAL;
295 } else {
296 attach_va = round_page(uap->shmaddr);
297 }
298
299 shm_handle = shmseg->shm_internal;
300 rv = vm_map(current_map(), &attach_va, size, 0, (flags & MAP_FIXED)? FALSE: TRUE,
301 shm_handle->shm_object, 0, FALSE, prot, prot, VM_INHERIT_DEFAULT);
302 if (rv != KERN_SUCCESS)
303 goto out;
304 rv = vm_inherit(current_map(), attach_va, size,
305 VM_INHERIT_SHARE);
306 if (rv != KERN_SUCCESS) {
307 (void) vm_deallocate(current_map(), attach_va, size);
308 goto out;
309 }
310
311 shmmap_s->va = attach_va;
312 shmmap_s->shmid = uap->shmid;
313 shmseg->shm_lpid = p->p_pid;
314 shmseg->shm_atime = time_second;
315 shmseg->shm_nattch++;
316 *retval = attach_va;
317 return( 0);
318 out:
319 switch (rv) {
320 case KERN_INVALID_ADDRESS:
321 case KERN_NO_SPACE:
322 return (ENOMEM);
323 case KERN_PROTECTION_FAILURE:
324 return (EACCES);
325 default:
326 return (EINVAL);
327 }
328
329 }
330
331 struct oshmid_ds {
332 struct ipc_perm shm_perm; /* operation perms */
333 int shm_segsz; /* size of segment (bytes) */
334 ushort shm_cpid; /* pid, creator */
335 ushort shm_lpid; /* pid, last operation */
336 short shm_nattch; /* no. of current attaches */
337 time_t shm_atime; /* last attach time */
338 time_t shm_dtime; /* last detach time */
339 time_t shm_ctime; /* last change time */
340 void *shm_handle; /* internal handle for shm segment */
341 };
342
343 struct oshmctl_args {
344 int shmid;
345 int cmd;
346 struct oshmid_ds *ubuf;
347 };
348
349 static int
350 oshmctl(p, uap, retval)
351 struct proc *p;
352 struct oshmctl_args *uap;
353 register_t *retval;
354 {
355 #ifdef COMPAT_43
356 int error;
357 struct ucred *cred = p->p_ucred;
358 struct shmid_ds *shmseg;
359 struct oshmid_ds outbuf;
360
361 if (!shm_inited)
362 return(EINVAL);
363 shmseg = shm_find_segment_by_shmid(uap->shmid);
364 if (shmseg == NULL)
365 return EINVAL;
366 switch (uap->cmd) {
367 case IPC_STAT:
368 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
369 if (error)
370 return error;
371 outbuf.shm_perm = shmseg->shm_perm;
372 outbuf.shm_segsz = shmseg->shm_segsz;
373 outbuf.shm_cpid = shmseg->shm_cpid;
374 outbuf.shm_lpid = shmseg->shm_lpid;
375 outbuf.shm_nattch = shmseg->shm_nattch;
376 outbuf.shm_atime = shmseg->shm_atime;
377 outbuf.shm_dtime = shmseg->shm_dtime;
378 outbuf.shm_ctime = shmseg->shm_ctime;
379 outbuf.shm_handle = shmseg->shm_internal;
380 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
381 if (error)
382 return error;
383 break;
384 default:
385 /* XXX casting to (sy_call_t *) is bogus, as usual. */
386 return ((sy_call_t *)shmctl)(p, uap, retval);
387 }
388 return 0;
389 #else
390 return EINVAL;
391 #endif
392 }
393
394 #ifndef _SYS_SYSPROTO_H_
395 struct shmctl_args {
396 int shmid;
397 int cmd;
398 struct shmid_ds *buf;
399 };
400 #endif
401
402 int
403 shmctl(p, uap, retval)
404 struct proc *p;
405 struct shmctl_args *uap;
406 register_t *retval;
407 {
408 int error;
409 struct ucred *cred = p->p_ucred;
410 struct shmid_ds inbuf;
411 struct shmid_ds *shmseg;
412
413 if (!shm_inited)
414 return(EINVAL);
415 shmseg = shm_find_segment_by_shmid(uap->shmid);
416 if (shmseg == NULL)
417 return EINVAL;
418 switch (uap->cmd) {
419 case IPC_STAT:
420 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
421 if (error)
422 return error;
423 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
424 if (error)
425 return error;
426 break;
427 case IPC_SET:
428 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
429 if (error)
430 return error;
431 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
432 if (error)
433 return error;
434 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
435 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
436 shmseg->shm_perm.mode =
437 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
438 (inbuf.shm_perm.mode & ACCESSPERMS);
439 shmseg->shm_ctime = time_second;
440 break;
441 case IPC_RMID:
442 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
443 if (error)
444 return error;
445 shmseg->shm_perm.key = IPC_PRIVATE;
446 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
447 if (shmseg->shm_nattch <= 0) {
448 shm_deallocate_segment(shmseg);
449 shm_last_free = IPCID_TO_IX(uap->shmid);
450 }
451 break;
452 #if 0
453 case SHM_LOCK:
454 case SHM_UNLOCK:
455 #endif
456 default:
457 return EINVAL;
458 }
459 return 0;
460 }
461
462 #ifndef _SYS_SYSPROTO_H_
463 struct shmget_args {
464 key_t key;
465 size_t size;
466 int shmflg;
467 };
468 #endif
469
470 static int
471 shmget_existing(p, uap, mode, segnum, retval)
472 struct proc *p;
473 struct shmget_args *uap;
474 int mode;
475 int segnum;
476 int *retval;
477 {
478 struct shmid_ds *shmseg;
479 struct ucred *cred = p->p_ucred;
480 int error;
481
482 shmseg = &shmsegs[segnum];
483 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
484 /*
485 * This segment is in the process of being allocated. Wait
486 * until it's done, and look the key up again (in case the
487 * allocation failed or it was freed).
488 */
489 shmseg->shm_perm.mode |= SHMSEG_WANTED;
490 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
491 if (error)
492 return error;
493 return EAGAIN;
494 }
495 error = ipcperm(cred, &shmseg->shm_perm, mode);
496 if (error)
497 return error;
498 if (uap->size && uap->size > shmseg->shm_segsz)
499 return EINVAL;
500 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
501 return EEXIST;
502 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
503 return 0;
504 }
505
506 static int
507 shmget_allocate_segment(p, uap, mode, retval)
508 struct proc *p;
509 struct shmget_args *uap;
510 int mode;
511 int * retval;
512 {
513 int i, segnum, shmid, size;
514 struct ucred *cred = p->p_ucred;
515 struct shmid_ds *shmseg;
516 struct shm_handle *shm_handle;
517 kern_return_t kret;
518 vm_offset_t user_addr;
519 void * mem_object;
520
521 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
522 return EINVAL;
523 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
524 return ENOSPC;
525 size = round_page(uap->size);
526 if (shm_committed + btoc(size) > shminfo.shmall)
527 return ENOMEM;
528 if (shm_last_free < 0) {
529 for (i = 0; i < shminfo.shmmni; i++)
530 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
531 break;
532 if (i == shminfo.shmmni)
533 panic("shmseg free count inconsistent");
534 segnum = i;
535 } else {
536 segnum = shm_last_free;
537 shm_last_free = -1;
538 }
539 shmseg = &shmsegs[segnum];
540 /*
541 * In case we sleep in malloc(), mark the segment present but deleted
542 * so that noone else tries to create the same key.
543 */
544 kret = vm_allocate(current_map(), &user_addr, size, TRUE);
545 if (kret != KERN_SUCCESS)
546 goto out;
547
548 kret = mach_make_memory_entry (current_map(), &size,
549 user_addr, VM_PROT_DEFAULT, &mem_object, 0);
550
551 if (kret != KERN_SUCCESS)
552 goto out;
553 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
554 shmseg->shm_perm.key = uap->key;
555 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
556 shm_handle = (struct shm_handle *)
557 _MALLOC(sizeof(struct shm_handle), M_SHM, M_WAITOK);
558 shm_handle->shm_object = mem_object;
559 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
560
561 shmseg->shm_internal = shm_handle;
562 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
563 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
564 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
565 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
566 shmseg->shm_segsz = uap->size;
567 shmseg->shm_cpid = p->p_pid;
568 shmseg->shm_lpid = shmseg->shm_nattch = 0;
569 shmseg->shm_atime = shmseg->shm_dtime = 0;
570 shmseg->shm_ctime = time_second;
571 shm_committed += btoc(size);
572 shm_nused++;
573 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
574 /*
575 * Somebody else wanted this key while we were asleep. Wake
576 * them up now.
577 */
578 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
579 wakeup((caddr_t)shmseg);
580 }
581 *retval = shmid;
582 return 0;
583 out:
584 switch (kret) {
585 case KERN_INVALID_ADDRESS:
586 case KERN_NO_SPACE:
587 return (ENOMEM);
588 case KERN_PROTECTION_FAILURE:
589 return (EACCES);
590 default:
591 return (EINVAL);
592 }
593
594 }
595
596 int
597 shmget(p, uap, retval)
598 struct proc *p;
599 struct shmget_args *uap;
600 register_t *retval;
601 {
602 int segnum, mode, error;
603
604 if (!shm_inited)
605 return(EINVAL);
606
607 mode = uap->shmflg & ACCESSPERMS;
608 if (uap->key != IPC_PRIVATE) {
609 again:
610 segnum = shm_find_segment_by_key(uap->key);
611 if (segnum >= 0) {
612 error = shmget_existing(p, uap, mode, segnum, retval);
613 if (error == EAGAIN)
614 goto again;
615 return(error);
616 }
617 if ((uap->shmflg & IPC_CREAT) == 0)
618 return ENOENT;
619 }
620 return( shmget_allocate_segment(p, uap, mode, retval));;
621 /*NOTREACHED*/
622
623 }
624
625 struct shmsys_args {
626 u_int which;
627 int a2;
628 int a3;
629 int a4;
630 };
631 int
632 shmsys(p, uap, retval)
633 struct proc *p;
634 /* XXX actually varargs. */
635 struct shmsys_args *uap;
636 register_t *retval;
637 {
638
639 if (!shm_inited)
640 return(EINVAL);
641
642 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
643 return EINVAL;
644 return ((*shmcalls[uap->which])(p, &uap->a2, retval));
645 }
646
647 void
648 shmfork(p1, p2)
649 struct proc *p1, *p2;
650 {
651 struct shmmap_state *shmmap_s;
652 size_t size;
653 int i;
654
655 if (!shm_inited)
656 return;
657 size = shminfo.shmseg * sizeof(struct shmmap_state);
658 shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK);
659 bcopy((caddr_t)p1->vm_shm, (caddr_t)shmmap_s, size);
660 p2->vm_shm = (caddr_t)shmmap_s;
661 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
662 if (shmmap_s->shmid != -1)
663 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
664 }
665
666 void
667 shmexit(p)
668 struct proc *p;
669 {
670 struct shmmap_state *shmmap_s;
671 int i;
672
673 shmmap_s = (struct shmmap_state *)p->vm_shm;
674 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
675 if (shmmap_s->shmid != -1)
676 shm_delete_mapping(p, shmmap_s);
677 FREE((caddr_t)p->vm_shm, M_SHM);
678 p->vm_shm = NULL;
679 }
680
681 void
682 shminit(dummy)
683 void *dummy;
684 {
685 int i;
686 int s;
687
688 if (!shm_inited) {
689 s = sizeof(struct shmid_ds) * shminfo.shmmni;
690
691 MALLOC(shmsegs, struct shmid_ds *, s,
692 M_SHM, M_WAITOK);
693 for (i = 0; i < shminfo.shmmni; i++) {
694 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
695 shmsegs[i].shm_perm.seq = 0;
696 }
697 shm_last_free = 0;
698 shm_nused = 0;
699 shm_committed = 0;
700 shm_inited = 1;
701 }
702 }
703
704 /* (struct sysctl_oid *oidp, void *arg1, int arg2, \
705 struct sysctl_req *req) */
706 static int
707 sysctl_shminfo SYSCTL_HANDLER_ARGS
708 {
709 int error = 0;
710
711 error = SYSCTL_OUT(req, arg1, sizeof(int));
712 if (error || !req->newptr)
713 return(error);
714
715 /* Set the values only if shared memory is not initialised */
716 if (!shm_inited) {
717 if (error = SYSCTL_IN(req, arg1, sizeof(int)))
718 return(error);
719 if (arg1 == &shminfo.shmmax) {
720 if (shminfo.shmmax & PAGE_MASK) {
721 shminfo.shmmax = -1;
722 return(EINVAL);
723 }
724 }
725
726 /* Initialize only when all values are set */
727 if ((shminfo.shmmax != -1) &&
728 (shminfo.shmmin != -1) &&
729 (shminfo.shmmni != -1) &&
730 (shminfo.shmseg != -1) &&
731 (shminfo.shmall != -1)) {
732 shminit();
733 }
734 }
735 return(0);
736 }
737
738 SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV");
739
740 SYSCTL_PROC(_kern_sysv, KSYSV_SHMMAX, shmmax, CTLTYPE_INT | CTLFLAG_RW,
741 &shminfo.shmmax, 0, &sysctl_shminfo ,"I","shmmax");
742
743 SYSCTL_PROC(_kern_sysv, KSYSV_SHMMIN, shmmin, CTLTYPE_INT | CTLFLAG_RW,
744 &shminfo.shmmin, 0, &sysctl_shminfo ,"I","shmmin");
745
746 SYSCTL_PROC(_kern_sysv, KSYSV_SHMMNI, shmmni, CTLTYPE_INT | CTLFLAG_RW,
747 &shminfo.shmmni, 0, &sysctl_shminfo ,"I","shmmni");
748
749 SYSCTL_PROC(_kern_sysv, KSYSV_SHMSEG, shmseg, CTLTYPE_INT | CTLFLAG_RW,
750 &shminfo.shmseg, 0, &sysctl_shminfo ,"I","shmseg");
751
752 SYSCTL_PROC(_kern_sysv, KSYSV_SHMALL, shmall, CTLTYPE_INT | CTLFLAG_RW,
753 &shminfo.shmall, 0, &sysctl_shminfo ,"I","shmall");
754
755