]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/sysv_shm.c
xnu-344.49.tar.gz
[apple/xnu.git] / bsd / kern / sysv_shm.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
26
27/*
28 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by Adam Glass and Charles
41 * Hannum.
42 * 4. The names of the authors may not be used to endorse or promote products
43 * derived from this software without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
46 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
47 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
48 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
49 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
50 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
51 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
52 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
53 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
54 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 */
56
57
9bccf70c 58#include <sys/appleapiopts.h>
1c79356b
A
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/kernel.h>
62#include <sys/shm.h>
63#include <sys/proc.h>
64#include <sys/malloc.h>
65#include <sys/mman.h>
66#include <sys/stat.h>
9bccf70c 67#include <sys/sysctl.h>
1c79356b
A
68
69#include <mach/mach_types.h>
70#include <mach/vm_inherit.h>
71#include <vm/vm_map.h>
72
73struct shmat_args;
74extern int shmat __P((struct proc *p, struct shmat_args *uap, int *retval));
75struct shmctl_args;
76extern int shmctl __P((struct proc *p, struct shmctl_args *uap, int *retval));
77struct shmdt_args;
78extern int shmdt __P((struct proc *p, struct shmdt_args *uap, int *retval));
79struct shmget_args;
80extern int shmget __P((struct proc *p, struct shmget_args *uap, int *retval));
81
82#if 0
83static void shminit __P((void *));
84SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL)
85#endif 0
86
87struct oshmctl_args;
88static int oshmctl __P((struct proc *p, struct oshmctl_args *uap, int * retval));
89static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode, int * retval));
90static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum, int * retval));
91
92typedef int sy_call_t __P((struct proc *, void *, int *));
93
94/* XXX casting to (sy_call_t *) is bogus, as usual. */
95static sy_call_t *shmcalls[] = {
96 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
97 (sy_call_t *)shmdt, (sy_call_t *)shmget,
98 (sy_call_t *)shmctl
99};
100
101#define SHMSEG_FREE 0x0200
102#define SHMSEG_REMOVED 0x0400
103#define SHMSEG_ALLOCATED 0x0800
104#define SHMSEG_WANTED 0x1000
105
106static int shm_last_free, shm_nused, shm_committed;
107struct shmid_ds *shmsegs;
9bccf70c 108static int shm_inited = 0;
1c79356b
A
109
110struct shm_handle {
111 /* vm_offset_t kva; */
112 void * shm_object;
113};
114
115struct shmmap_state {
116 vm_offset_t va;
117 int shmid;
118};
119
120static void shm_deallocate_segment __P((struct shmid_ds *));
121static int shm_find_segment_by_key __P((key_t));
122static struct shmid_ds *shm_find_segment_by_shmid __P((int));
123static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
124
9bccf70c
A
125#ifdef __APPLE_API_PRIVATE
126struct shminfo shminfo = {
127 -1, /* SHMMAX 4096 *1024 */
128 -1, /* SHMMIN = 1 */
129 -1, /* SHMMNI = 1 */
130 -1, /* SHMSEG = 8 */
131 -1 /* SHMALL = 1024 */
132};
133#endif /* __APPLE_API_PRIVATE */
134
1c79356b
A
135static int
136shm_find_segment_by_key(key)
137 key_t key;
138{
139 int i;
140
141 for (i = 0; i < shminfo.shmmni; i++)
142 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
143 shmsegs[i].shm_perm.key == key)
144 return i;
145 return -1;
146}
147
148static struct shmid_ds *
149shm_find_segment_by_shmid(shmid)
150 int shmid;
151{
152 int segnum;
153 struct shmid_ds *shmseg;
154
155 segnum = IPCID_TO_IX(shmid);
156 if (segnum < 0 || segnum >= shminfo.shmmni)
157 return NULL;
158 shmseg = &shmsegs[segnum];
159 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
160 != SHMSEG_ALLOCATED ||
161 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
162 return NULL;
163 return shmseg;
164}
165
166static void
167shm_deallocate_segment(shmseg)
168 struct shmid_ds *shmseg;
169{
170 struct shm_handle *shm_handle;
171 struct shmmap_state *shmmap_s=NULL;
172 size_t size;
173 char * ptr;
174
175 shm_handle = shmseg->shm_internal;
de355530 176 size = round_page(shmseg->shm_segsz);
1c79356b
A
177 mach_destroy_memory_entry(shm_handle->shm_object);
178 FREE((caddr_t)shm_handle, M_SHM);
179 shmseg->shm_internal = NULL;
180 shm_committed -= btoc(size);
181 shm_nused--;
182 shmseg->shm_perm.mode = SHMSEG_FREE;
183}
184
185static int
186shm_delete_mapping(p, shmmap_s)
187 struct proc *p;
188 struct shmmap_state *shmmap_s;
189{
190 struct shmid_ds *shmseg;
191 int segnum, result;
192 size_t size;
193
194 segnum = IPCID_TO_IX(shmmap_s->shmid);
195 shmseg = &shmsegs[segnum];
de355530 196 size = round_page(shmseg->shm_segsz);
1c79356b
A
197 result = vm_deallocate(current_map(), shmmap_s->va, size);
198 if (result != KERN_SUCCESS)
199 return EINVAL;
200 shmmap_s->shmid = -1;
201 shmseg->shm_dtime = time_second;
202 if ((--shmseg->shm_nattch <= 0) &&
203 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
204 shm_deallocate_segment(shmseg);
205 shm_last_free = segnum;
206 }
207 return 0;
208}
209
210struct shmdt_args {
211 void *shmaddr;
212};
213
214int
215shmdt(p, uap, retval)
216 struct proc *p;
217 struct shmdt_args *uap;
218 register_t *retval;
219{
220 struct shmmap_state *shmmap_s;
221 int i;
222
9bccf70c
A
223 if (!shm_inited)
224 return(EINVAL);
1c79356b
A
225 shmmap_s = (struct shmmap_state *)p->vm_shm;
226 if (shmmap_s == NULL)
227 return EINVAL;
228 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
229 if (shmmap_s->shmid != -1 &&
230 shmmap_s->va == (vm_offset_t)uap->shmaddr)
231 break;
232 if (i == shminfo.shmseg)
233 return EINVAL;
234 return shm_delete_mapping(p, shmmap_s);
235}
236
237#ifndef _SYS_SYSPROTO_H_
238struct shmat_args {
239 int shmid;
240 void *shmaddr;
241 int shmflg;
242};
243#endif
244
245int
246shmat(p, uap, retval)
247 struct proc *p;
248 struct shmat_args *uap;
249 register_t *retval;
250{
251 int error, i, flags;
252 struct ucred *cred = p->p_ucred;
253 struct shmid_ds *shmseg;
254 struct shmmap_state *shmmap_s = NULL;
255 struct shm_handle *shm_handle;
256 vm_offset_t attach_va;
257 vm_prot_t prot;
258 vm_size_t size;
259 kern_return_t rv;
260
9bccf70c
A
261 if (!shm_inited)
262 return(EINVAL);
1c79356b
A
263 shmmap_s = (struct shmmap_state *)p->vm_shm;
264 if (shmmap_s == NULL) {
265 size = shminfo.shmseg * sizeof(struct shmmap_state);
266 shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK);
267 for (i = 0; i < shminfo.shmseg; i++)
268 shmmap_s[i].shmid = -1;
269 p->vm_shm = (caddr_t)shmmap_s;
270 }
271 shmseg = shm_find_segment_by_shmid(uap->shmid);
272 if (shmseg == NULL)
273 return EINVAL;
274 error = ipcperm(cred, &shmseg->shm_perm,
275 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
276 if (error)
277 return error;
278 for (i = 0; i < shminfo.shmseg; i++) {
279 if (shmmap_s->shmid == -1)
280 break;
281 shmmap_s++;
282 }
283 if (i >= shminfo.shmseg)
284 return EMFILE;
de355530 285 size = round_page(shmseg->shm_segsz);
1c79356b
A
286 prot = VM_PROT_READ;
287 if ((uap->shmflg & SHM_RDONLY) == 0)
288 prot |= VM_PROT_WRITE;
289 flags = MAP_ANON | MAP_SHARED;
290 if (uap->shmaddr) {
291 flags |= MAP_FIXED;
292 if (uap->shmflg & SHM_RND)
293 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
294 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
295 attach_va = (vm_offset_t)uap->shmaddr;
296 else
297 return EINVAL;
298 } else {
de355530 299 attach_va = round_page(uap->shmaddr);
1c79356b
A
300 }
301
302 shm_handle = shmseg->shm_internal;
303 rv = vm_map(current_map(), &attach_va, size, 0, (flags & MAP_FIXED)? FALSE: TRUE,
304 shm_handle->shm_object, 0, FALSE, prot, prot, VM_INHERIT_DEFAULT);
305 if (rv != KERN_SUCCESS)
306 goto out;
307 rv = vm_inherit(current_map(), attach_va, size,
308 VM_INHERIT_SHARE);
309 if (rv != KERN_SUCCESS) {
310 (void) vm_deallocate(current_map(), attach_va, size);
311 goto out;
312 }
313
314 shmmap_s->va = attach_va;
315 shmmap_s->shmid = uap->shmid;
316 shmseg->shm_lpid = p->p_pid;
317 shmseg->shm_atime = time_second;
318 shmseg->shm_nattch++;
319 *retval = attach_va;
320 return( 0);
321out:
322 switch (rv) {
323 case KERN_INVALID_ADDRESS:
324 case KERN_NO_SPACE:
325 return (ENOMEM);
326 case KERN_PROTECTION_FAILURE:
327 return (EACCES);
328 default:
329 return (EINVAL);
330 }
331
332}
333
334struct oshmid_ds {
335 struct ipc_perm shm_perm; /* operation perms */
336 int shm_segsz; /* size of segment (bytes) */
337 ushort shm_cpid; /* pid, creator */
338 ushort shm_lpid; /* pid, last operation */
339 short shm_nattch; /* no. of current attaches */
340 time_t shm_atime; /* last attach time */
341 time_t shm_dtime; /* last detach time */
342 time_t shm_ctime; /* last change time */
343 void *shm_handle; /* internal handle for shm segment */
344};
345
346struct oshmctl_args {
347 int shmid;
348 int cmd;
349 struct oshmid_ds *ubuf;
350};
351
352static int
353oshmctl(p, uap, retval)
354 struct proc *p;
355 struct oshmctl_args *uap;
356 register_t *retval;
357{
358#ifdef COMPAT_43
359 int error;
360 struct ucred *cred = p->p_ucred;
361 struct shmid_ds *shmseg;
362 struct oshmid_ds outbuf;
363
9bccf70c
A
364 if (!shm_inited)
365 return(EINVAL);
1c79356b
A
366 shmseg = shm_find_segment_by_shmid(uap->shmid);
367 if (shmseg == NULL)
368 return EINVAL;
369 switch (uap->cmd) {
370 case IPC_STAT:
371 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
372 if (error)
373 return error;
374 outbuf.shm_perm = shmseg->shm_perm;
375 outbuf.shm_segsz = shmseg->shm_segsz;
376 outbuf.shm_cpid = shmseg->shm_cpid;
377 outbuf.shm_lpid = shmseg->shm_lpid;
378 outbuf.shm_nattch = shmseg->shm_nattch;
379 outbuf.shm_atime = shmseg->shm_atime;
380 outbuf.shm_dtime = shmseg->shm_dtime;
381 outbuf.shm_ctime = shmseg->shm_ctime;
382 outbuf.shm_handle = shmseg->shm_internal;
383 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
384 if (error)
385 return error;
386 break;
387 default:
388 /* XXX casting to (sy_call_t *) is bogus, as usual. */
389 return ((sy_call_t *)shmctl)(p, uap, retval);
390 }
391 return 0;
392#else
393 return EINVAL;
394#endif
395}
396
397#ifndef _SYS_SYSPROTO_H_
398struct shmctl_args {
399 int shmid;
400 int cmd;
401 struct shmid_ds *buf;
402};
403#endif
404
405int
406shmctl(p, uap, retval)
407 struct proc *p;
408 struct shmctl_args *uap;
409 register_t *retval;
410{
411 int error;
412 struct ucred *cred = p->p_ucred;
413 struct shmid_ds inbuf;
414 struct shmid_ds *shmseg;
415
9bccf70c
A
416 if (!shm_inited)
417 return(EINVAL);
1c79356b
A
418 shmseg = shm_find_segment_by_shmid(uap->shmid);
419 if (shmseg == NULL)
420 return EINVAL;
421 switch (uap->cmd) {
422 case IPC_STAT:
423 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
424 if (error)
425 return error;
426 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
427 if (error)
428 return error;
429 break;
430 case IPC_SET:
431 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
432 if (error)
433 return error;
434 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
435 if (error)
436 return error;
437 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
438 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
439 shmseg->shm_perm.mode =
440 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
441 (inbuf.shm_perm.mode & ACCESSPERMS);
442 shmseg->shm_ctime = time_second;
443 break;
444 case IPC_RMID:
445 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
446 if (error)
447 return error;
448 shmseg->shm_perm.key = IPC_PRIVATE;
449 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
450 if (shmseg->shm_nattch <= 0) {
451 shm_deallocate_segment(shmseg);
452 shm_last_free = IPCID_TO_IX(uap->shmid);
453 }
454 break;
455#if 0
456 case SHM_LOCK:
457 case SHM_UNLOCK:
458#endif
459 default:
460 return EINVAL;
461 }
462 return 0;
463}
464
465#ifndef _SYS_SYSPROTO_H_
466struct shmget_args {
467 key_t key;
468 size_t size;
469 int shmflg;
470};
471#endif
472
473static int
474shmget_existing(p, uap, mode, segnum, retval)
475 struct proc *p;
476 struct shmget_args *uap;
477 int mode;
478 int segnum;
479 int *retval;
480{
481 struct shmid_ds *shmseg;
482 struct ucred *cred = p->p_ucred;
483 int error;
484
485 shmseg = &shmsegs[segnum];
486 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
487 /*
488 * This segment is in the process of being allocated. Wait
489 * until it's done, and look the key up again (in case the
490 * allocation failed or it was freed).
491 */
492 shmseg->shm_perm.mode |= SHMSEG_WANTED;
493 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
494 if (error)
495 return error;
496 return EAGAIN;
497 }
498 error = ipcperm(cred, &shmseg->shm_perm, mode);
499 if (error)
500 return error;
501 if (uap->size && uap->size > shmseg->shm_segsz)
502 return EINVAL;
503 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
504 return EEXIST;
505 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
506 return 0;
507}
508
509static int
510shmget_allocate_segment(p, uap, mode, retval)
511 struct proc *p;
512 struct shmget_args *uap;
513 int mode;
514 int * retval;
515{
516 int i, segnum, shmid, size;
517 struct ucred *cred = p->p_ucred;
518 struct shmid_ds *shmseg;
519 struct shm_handle *shm_handle;
520 kern_return_t kret;
521 vm_offset_t user_addr;
522 void * mem_object;
523
524 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
525 return EINVAL;
526 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
527 return ENOSPC;
de355530 528 size = round_page(uap->size);
1c79356b
A
529 if (shm_committed + btoc(size) > shminfo.shmall)
530 return ENOMEM;
531 if (shm_last_free < 0) {
532 for (i = 0; i < shminfo.shmmni; i++)
533 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
534 break;
535 if (i == shminfo.shmmni)
536 panic("shmseg free count inconsistent");
537 segnum = i;
538 } else {
539 segnum = shm_last_free;
540 shm_last_free = -1;
541 }
542 shmseg = &shmsegs[segnum];
543 /*
544 * In case we sleep in malloc(), mark the segment present but deleted
545 * so that noone else tries to create the same key.
546 */
547 kret = vm_allocate(current_map(), &user_addr, size, TRUE);
548 if (kret != KERN_SUCCESS)
549 goto out;
550
551 kret = mach_make_memory_entry (current_map(), &size,
552 user_addr, VM_PROT_DEFAULT, &mem_object, 0);
553
554 if (kret != KERN_SUCCESS)
555 goto out;
556 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
557 shmseg->shm_perm.key = uap->key;
558 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
559 shm_handle = (struct shm_handle *)
560 _MALLOC(sizeof(struct shm_handle), M_SHM, M_WAITOK);
561 shm_handle->shm_object = mem_object;
562 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
563
564 shmseg->shm_internal = shm_handle;
565 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
566 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
567 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
568 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
569 shmseg->shm_segsz = uap->size;
570 shmseg->shm_cpid = p->p_pid;
571 shmseg->shm_lpid = shmseg->shm_nattch = 0;
572 shmseg->shm_atime = shmseg->shm_dtime = 0;
573 shmseg->shm_ctime = time_second;
574 shm_committed += btoc(size);
575 shm_nused++;
576 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
577 /*
578 * Somebody else wanted this key while we were asleep. Wake
579 * them up now.
580 */
581 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
582 wakeup((caddr_t)shmseg);
583 }
584 *retval = shmid;
585 return 0;
586out:
587 switch (kret) {
588 case KERN_INVALID_ADDRESS:
589 case KERN_NO_SPACE:
590 return (ENOMEM);
591 case KERN_PROTECTION_FAILURE:
592 return (EACCES);
593 default:
594 return (EINVAL);
595 }
596
597}
598
599int
600shmget(p, uap, retval)
601 struct proc *p;
602 struct shmget_args *uap;
603 register_t *retval;
604{
605 int segnum, mode, error;
606
9bccf70c
A
607 if (!shm_inited)
608 return(EINVAL);
609
1c79356b
A
610 mode = uap->shmflg & ACCESSPERMS;
611 if (uap->key != IPC_PRIVATE) {
612 again:
613 segnum = shm_find_segment_by_key(uap->key);
614 if (segnum >= 0) {
615 error = shmget_existing(p, uap, mode, segnum, retval);
616 if (error == EAGAIN)
617 goto again;
618 return(error);
619 }
620 if ((uap->shmflg & IPC_CREAT) == 0)
621 return ENOENT;
622 }
623 return( shmget_allocate_segment(p, uap, mode, retval));;
624 /*NOTREACHED*/
625
626}
627
628struct shmsys_args {
629 u_int which;
630 int a2;
631 int a3;
632 int a4;
633};
634int
635shmsys(p, uap, retval)
636 struct proc *p;
637 /* XXX actually varargs. */
638 struct shmsys_args *uap;
639 register_t *retval;
640{
641
9bccf70c
A
642 if (!shm_inited)
643 return(EINVAL);
644
1c79356b
A
645 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
646 return EINVAL;
647 return ((*shmcalls[uap->which])(p, &uap->a2, retval));
648}
649
650void
651shmfork(p1, p2)
652 struct proc *p1, *p2;
653{
654 struct shmmap_state *shmmap_s;
655 size_t size;
656 int i;
657
9bccf70c
A
658 if (!shm_inited)
659 return;
1c79356b
A
660 size = shminfo.shmseg * sizeof(struct shmmap_state);
661 shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK);
662 bcopy((caddr_t)p1->vm_shm, (caddr_t)shmmap_s, size);
663 p2->vm_shm = (caddr_t)shmmap_s;
664 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
665 if (shmmap_s->shmid != -1)
666 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
667}
668
669void
670shmexit(p)
671 struct proc *p;
672{
673 struct shmmap_state *shmmap_s;
674 int i;
675
676 shmmap_s = (struct shmmap_state *)p->vm_shm;
677 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
678 if (shmmap_s->shmid != -1)
679 shm_delete_mapping(p, shmmap_s);
680 FREE((caddr_t)p->vm_shm, M_SHM);
681 p->vm_shm = NULL;
682}
683
684void
685shminit(dummy)
686 void *dummy;
687{
688 int i;
689 int s;
690
9bccf70c
A
691 if (!shm_inited) {
692 s = sizeof(struct shmid_ds) * shminfo.shmmni;
693
694 MALLOC(shmsegs, struct shmid_ds *, s,
695 M_SHM, M_WAITOK);
696 for (i = 0; i < shminfo.shmmni; i++) {
697 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
698 shmsegs[i].shm_perm.seq = 0;
699 }
700 shm_last_free = 0;
701 shm_nused = 0;
702 shm_committed = 0;
703 shm_inited = 1;
704 }
705}
706
707/* (struct sysctl_oid *oidp, void *arg1, int arg2, \
708 struct sysctl_req *req) */
709static int
710sysctl_shminfo SYSCTL_HANDLER_ARGS
711{
712 int error = 0;
713
714 error = SYSCTL_OUT(req, arg1, sizeof(int));
715 if (error || !req->newptr)
716 return(error);
1c79356b 717
9bccf70c
A
718 /* Set the values only if shared memory is not initialised */
719 if (!shm_inited) {
720 if (error = SYSCTL_IN(req, arg1, sizeof(int)))
721 return(error);
722 if (arg1 == &shminfo.shmmax) {
723 if (shminfo.shmmax & PAGE_MASK) {
724 shminfo.shmmax = -1;
725 return(EINVAL);
726 }
727 }
728
729 /* Initialize only when all values are set */
730 if ((shminfo.shmmax != -1) &&
731 (shminfo.shmmin != -1) &&
732 (shminfo.shmmni != -1) &&
733 (shminfo.shmseg != -1) &&
734 (shminfo.shmall != -1)) {
735 shminit();
736 }
1c79356b 737 }
9bccf70c 738 return(0);
1c79356b 739}
9bccf70c
A
740
741SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV");
742
743SYSCTL_PROC(_kern_sysv, KSYSV_SHMMAX, shmmax, CTLTYPE_INT | CTLFLAG_RW,
744 &shminfo.shmmax, 0, &sysctl_shminfo ,"I","shmmax");
745
746SYSCTL_PROC(_kern_sysv, KSYSV_SHMMIN, shmmin, CTLTYPE_INT | CTLFLAG_RW,
747 &shminfo.shmmin, 0, &sysctl_shminfo ,"I","shmmin");
748
749SYSCTL_PROC(_kern_sysv, KSYSV_SHMMNI, shmmni, CTLTYPE_INT | CTLFLAG_RW,
750 &shminfo.shmmni, 0, &sysctl_shminfo ,"I","shmmni");
751
752SYSCTL_PROC(_kern_sysv, KSYSV_SHMSEG, shmseg, CTLTYPE_INT | CTLFLAG_RW,
753 &shminfo.shmseg, 0, &sysctl_shminfo ,"I","shmseg");
754
755SYSCTL_PROC(_kern_sysv, KSYSV_SHMALL, shmall, CTLTYPE_INT | CTLFLAG_RW,
756 &shminfo.shmall, 0, &sysctl_shminfo ,"I","shmall");
757
758