]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/sysv_shm.c
xnu-201.19.tar.gz
[apple/xnu.git] / bsd / kern / sysv_shm.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
23
24 /*
25 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by Adam Glass and Charles
38 * Hannum.
39 * 4. The names of the authors may not be used to endorse or promote products
40 * derived from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
43 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
44 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
45 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
46 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
47 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
51 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/shm.h>
59 #include <sys/proc.h>
60 #include <sys/malloc.h>
61 #include <sys/mman.h>
62 #include <sys/stat.h>
63
64 #include <mach/mach_types.h>
65 #include <mach/vm_inherit.h>
66 #include <vm/vm_map.h>
67
68 struct shmat_args;
69 extern int shmat __P((struct proc *p, struct shmat_args *uap, int *retval));
70 struct shmctl_args;
71 extern int shmctl __P((struct proc *p, struct shmctl_args *uap, int *retval));
72 struct shmdt_args;
73 extern int shmdt __P((struct proc *p, struct shmdt_args *uap, int *retval));
74 struct shmget_args;
75 extern int shmget __P((struct proc *p, struct shmget_args *uap, int *retval));
76
77 #if 0
78 static void shminit __P((void *));
79 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL)
80 #endif 0
81
82 struct oshmctl_args;
83 static int oshmctl __P((struct proc *p, struct oshmctl_args *uap, int * retval));
84 static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode, int * retval));
85 static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum, int * retval));
86
87 typedef int sy_call_t __P((struct proc *, void *, int *));
88
89 /* XXX casting to (sy_call_t *) is bogus, as usual. */
90 static sy_call_t *shmcalls[] = {
91 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
92 (sy_call_t *)shmdt, (sy_call_t *)shmget,
93 (sy_call_t *)shmctl
94 };
95
96 #define SHMSEG_FREE 0x0200
97 #define SHMSEG_REMOVED 0x0400
98 #define SHMSEG_ALLOCATED 0x0800
99 #define SHMSEG_WANTED 0x1000
100
101 static int shm_last_free, shm_nused, shm_committed;
102 struct shmid_ds *shmsegs;
103
104 struct shm_handle {
105 /* vm_offset_t kva; */
106 void * shm_object;
107 };
108
109 struct shmmap_state {
110 vm_offset_t va;
111 int shmid;
112 };
113
114 static void shm_deallocate_segment __P((struct shmid_ds *));
115 static int shm_find_segment_by_key __P((key_t));
116 static struct shmid_ds *shm_find_segment_by_shmid __P((int));
117 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
118
119 static int
120 shm_find_segment_by_key(key)
121 key_t key;
122 {
123 int i;
124
125 for (i = 0; i < shminfo.shmmni; i++)
126 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
127 shmsegs[i].shm_perm.key == key)
128 return i;
129 return -1;
130 }
131
132 static struct shmid_ds *
133 shm_find_segment_by_shmid(shmid)
134 int shmid;
135 {
136 int segnum;
137 struct shmid_ds *shmseg;
138
139 segnum = IPCID_TO_IX(shmid);
140 if (segnum < 0 || segnum >= shminfo.shmmni)
141 return NULL;
142 shmseg = &shmsegs[segnum];
143 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
144 != SHMSEG_ALLOCATED ||
145 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
146 return NULL;
147 return shmseg;
148 }
149
150 static void
151 shm_deallocate_segment(shmseg)
152 struct shmid_ds *shmseg;
153 {
154 struct shm_handle *shm_handle;
155 struct shmmap_state *shmmap_s=NULL;
156 size_t size;
157 char * ptr;
158
159 shm_handle = shmseg->shm_internal;
160 size = round_page(shmseg->shm_segsz);
161 mach_destroy_memory_entry(shm_handle->shm_object);
162 FREE((caddr_t)shm_handle, M_SHM);
163 shmseg->shm_internal = NULL;
164 shm_committed -= btoc(size);
165 shm_nused--;
166 shmseg->shm_perm.mode = SHMSEG_FREE;
167 }
168
169 static int
170 shm_delete_mapping(p, shmmap_s)
171 struct proc *p;
172 struct shmmap_state *shmmap_s;
173 {
174 struct shmid_ds *shmseg;
175 int segnum, result;
176 size_t size;
177
178 segnum = IPCID_TO_IX(shmmap_s->shmid);
179 shmseg = &shmsegs[segnum];
180 size = round_page(shmseg->shm_segsz);
181 result = vm_deallocate(current_map(), shmmap_s->va, size);
182 if (result != KERN_SUCCESS)
183 return EINVAL;
184 shmmap_s->shmid = -1;
185 shmseg->shm_dtime = time_second;
186 if ((--shmseg->shm_nattch <= 0) &&
187 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
188 shm_deallocate_segment(shmseg);
189 shm_last_free = segnum;
190 }
191 return 0;
192 }
193
194 struct shmdt_args {
195 void *shmaddr;
196 };
197
198 int
199 shmdt(p, uap, retval)
200 struct proc *p;
201 struct shmdt_args *uap;
202 register_t *retval;
203 {
204 struct shmmap_state *shmmap_s;
205 int i;
206
207 shmmap_s = (struct shmmap_state *)p->vm_shm;
208 if (shmmap_s == NULL)
209 return EINVAL;
210 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
211 if (shmmap_s->shmid != -1 &&
212 shmmap_s->va == (vm_offset_t)uap->shmaddr)
213 break;
214 if (i == shminfo.shmseg)
215 return EINVAL;
216 return shm_delete_mapping(p, shmmap_s);
217 }
218
219 #ifndef _SYS_SYSPROTO_H_
220 struct shmat_args {
221 int shmid;
222 void *shmaddr;
223 int shmflg;
224 };
225 #endif
226
227 int
228 shmat(p, uap, retval)
229 struct proc *p;
230 struct shmat_args *uap;
231 register_t *retval;
232 {
233 int error, i, flags;
234 struct ucred *cred = p->p_ucred;
235 struct shmid_ds *shmseg;
236 struct shmmap_state *shmmap_s = NULL;
237 struct shm_handle *shm_handle;
238 vm_offset_t attach_va;
239 vm_prot_t prot;
240 vm_size_t size;
241 kern_return_t rv;
242
243 shmmap_s = (struct shmmap_state *)p->vm_shm;
244 if (shmmap_s == NULL) {
245 size = shminfo.shmseg * sizeof(struct shmmap_state);
246 shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK);
247 for (i = 0; i < shminfo.shmseg; i++)
248 shmmap_s[i].shmid = -1;
249 p->vm_shm = (caddr_t)shmmap_s;
250 }
251 shmseg = shm_find_segment_by_shmid(uap->shmid);
252 if (shmseg == NULL)
253 return EINVAL;
254 error = ipcperm(cred, &shmseg->shm_perm,
255 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
256 if (error)
257 return error;
258 for (i = 0; i < shminfo.shmseg; i++) {
259 if (shmmap_s->shmid == -1)
260 break;
261 shmmap_s++;
262 }
263 if (i >= shminfo.shmseg)
264 return EMFILE;
265 size = round_page(shmseg->shm_segsz);
266 prot = VM_PROT_READ;
267 if ((uap->shmflg & SHM_RDONLY) == 0)
268 prot |= VM_PROT_WRITE;
269 flags = MAP_ANON | MAP_SHARED;
270 if (uap->shmaddr) {
271 flags |= MAP_FIXED;
272 if (uap->shmflg & SHM_RND)
273 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
274 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
275 attach_va = (vm_offset_t)uap->shmaddr;
276 else
277 return EINVAL;
278 } else {
279 attach_va = round_page(uap->shmaddr);
280 }
281
282 shm_handle = shmseg->shm_internal;
283 rv = vm_map(current_map(), &attach_va, size, 0, (flags & MAP_FIXED)? FALSE: TRUE,
284 shm_handle->shm_object, 0, FALSE, prot, prot, VM_INHERIT_DEFAULT);
285 if (rv != KERN_SUCCESS)
286 goto out;
287 rv = vm_inherit(current_map(), attach_va, size,
288 VM_INHERIT_SHARE);
289 if (rv != KERN_SUCCESS) {
290 (void) vm_deallocate(current_map(), attach_va, size);
291 goto out;
292 }
293
294 shmmap_s->va = attach_va;
295 shmmap_s->shmid = uap->shmid;
296 shmseg->shm_lpid = p->p_pid;
297 shmseg->shm_atime = time_second;
298 shmseg->shm_nattch++;
299 *retval = attach_va;
300 return( 0);
301 out:
302 switch (rv) {
303 case KERN_INVALID_ADDRESS:
304 case KERN_NO_SPACE:
305 return (ENOMEM);
306 case KERN_PROTECTION_FAILURE:
307 return (EACCES);
308 default:
309 return (EINVAL);
310 }
311
312 }
313
314 struct oshmid_ds {
315 struct ipc_perm shm_perm; /* operation perms */
316 int shm_segsz; /* size of segment (bytes) */
317 ushort shm_cpid; /* pid, creator */
318 ushort shm_lpid; /* pid, last operation */
319 short shm_nattch; /* no. of current attaches */
320 time_t shm_atime; /* last attach time */
321 time_t shm_dtime; /* last detach time */
322 time_t shm_ctime; /* last change time */
323 void *shm_handle; /* internal handle for shm segment */
324 };
325
326 struct oshmctl_args {
327 int shmid;
328 int cmd;
329 struct oshmid_ds *ubuf;
330 };
331
332 static int
333 oshmctl(p, uap, retval)
334 struct proc *p;
335 struct oshmctl_args *uap;
336 register_t *retval;
337 {
338 #ifdef COMPAT_43
339 int error;
340 struct ucred *cred = p->p_ucred;
341 struct shmid_ds *shmseg;
342 struct oshmid_ds outbuf;
343
344 shmseg = shm_find_segment_by_shmid(uap->shmid);
345 if (shmseg == NULL)
346 return EINVAL;
347 switch (uap->cmd) {
348 case IPC_STAT:
349 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
350 if (error)
351 return error;
352 outbuf.shm_perm = shmseg->shm_perm;
353 outbuf.shm_segsz = shmseg->shm_segsz;
354 outbuf.shm_cpid = shmseg->shm_cpid;
355 outbuf.shm_lpid = shmseg->shm_lpid;
356 outbuf.shm_nattch = shmseg->shm_nattch;
357 outbuf.shm_atime = shmseg->shm_atime;
358 outbuf.shm_dtime = shmseg->shm_dtime;
359 outbuf.shm_ctime = shmseg->shm_ctime;
360 outbuf.shm_handle = shmseg->shm_internal;
361 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
362 if (error)
363 return error;
364 break;
365 default:
366 /* XXX casting to (sy_call_t *) is bogus, as usual. */
367 return ((sy_call_t *)shmctl)(p, uap, retval);
368 }
369 return 0;
370 #else
371 return EINVAL;
372 #endif
373 }
374
375 #ifndef _SYS_SYSPROTO_H_
376 struct shmctl_args {
377 int shmid;
378 int cmd;
379 struct shmid_ds *buf;
380 };
381 #endif
382
383 int
384 shmctl(p, uap, retval)
385 struct proc *p;
386 struct shmctl_args *uap;
387 register_t *retval;
388 {
389 int error;
390 struct ucred *cred = p->p_ucred;
391 struct shmid_ds inbuf;
392 struct shmid_ds *shmseg;
393
394 shmseg = shm_find_segment_by_shmid(uap->shmid);
395 if (shmseg == NULL)
396 return EINVAL;
397 switch (uap->cmd) {
398 case IPC_STAT:
399 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
400 if (error)
401 return error;
402 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
403 if (error)
404 return error;
405 break;
406 case IPC_SET:
407 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
408 if (error)
409 return error;
410 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
411 if (error)
412 return error;
413 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
414 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
415 shmseg->shm_perm.mode =
416 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
417 (inbuf.shm_perm.mode & ACCESSPERMS);
418 shmseg->shm_ctime = time_second;
419 break;
420 case IPC_RMID:
421 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
422 if (error)
423 return error;
424 shmseg->shm_perm.key = IPC_PRIVATE;
425 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
426 if (shmseg->shm_nattch <= 0) {
427 shm_deallocate_segment(shmseg);
428 shm_last_free = IPCID_TO_IX(uap->shmid);
429 }
430 break;
431 #if 0
432 case SHM_LOCK:
433 case SHM_UNLOCK:
434 #endif
435 default:
436 return EINVAL;
437 }
438 return 0;
439 }
440
441 #ifndef _SYS_SYSPROTO_H_
442 struct shmget_args {
443 key_t key;
444 size_t size;
445 int shmflg;
446 };
447 #endif
448
449 static int
450 shmget_existing(p, uap, mode, segnum, retval)
451 struct proc *p;
452 struct shmget_args *uap;
453 int mode;
454 int segnum;
455 int *retval;
456 {
457 struct shmid_ds *shmseg;
458 struct ucred *cred = p->p_ucred;
459 int error;
460
461 shmseg = &shmsegs[segnum];
462 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
463 /*
464 * This segment is in the process of being allocated. Wait
465 * until it's done, and look the key up again (in case the
466 * allocation failed or it was freed).
467 */
468 shmseg->shm_perm.mode |= SHMSEG_WANTED;
469 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
470 if (error)
471 return error;
472 return EAGAIN;
473 }
474 error = ipcperm(cred, &shmseg->shm_perm, mode);
475 if (error)
476 return error;
477 if (uap->size && uap->size > shmseg->shm_segsz)
478 return EINVAL;
479 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
480 return EEXIST;
481 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
482 return 0;
483 }
484
485 static int
486 shmget_allocate_segment(p, uap, mode, retval)
487 struct proc *p;
488 struct shmget_args *uap;
489 int mode;
490 int * retval;
491 {
492 int i, segnum, shmid, size;
493 struct ucred *cred = p->p_ucred;
494 struct shmid_ds *shmseg;
495 struct shm_handle *shm_handle;
496 kern_return_t kret;
497 vm_offset_t user_addr;
498 void * mem_object;
499
500 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
501 return EINVAL;
502 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
503 return ENOSPC;
504 size = round_page(uap->size);
505 if (shm_committed + btoc(size) > shminfo.shmall)
506 return ENOMEM;
507 if (shm_last_free < 0) {
508 for (i = 0; i < shminfo.shmmni; i++)
509 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
510 break;
511 if (i == shminfo.shmmni)
512 panic("shmseg free count inconsistent");
513 segnum = i;
514 } else {
515 segnum = shm_last_free;
516 shm_last_free = -1;
517 }
518 shmseg = &shmsegs[segnum];
519 /*
520 * In case we sleep in malloc(), mark the segment present but deleted
521 * so that noone else tries to create the same key.
522 */
523 kret = vm_allocate(current_map(), &user_addr, size, TRUE);
524 if (kret != KERN_SUCCESS)
525 goto out;
526
527 kret = mach_make_memory_entry (current_map(), &size,
528 user_addr, VM_PROT_DEFAULT, &mem_object, 0);
529
530 if (kret != KERN_SUCCESS)
531 goto out;
532 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
533 shmseg->shm_perm.key = uap->key;
534 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
535 shm_handle = (struct shm_handle *)
536 _MALLOC(sizeof(struct shm_handle), M_SHM, M_WAITOK);
537 shm_handle->shm_object = mem_object;
538 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
539
540 shmseg->shm_internal = shm_handle;
541 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
542 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
543 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
544 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
545 shmseg->shm_segsz = uap->size;
546 shmseg->shm_cpid = p->p_pid;
547 shmseg->shm_lpid = shmseg->shm_nattch = 0;
548 shmseg->shm_atime = shmseg->shm_dtime = 0;
549 shmseg->shm_ctime = time_second;
550 shm_committed += btoc(size);
551 shm_nused++;
552 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
553 /*
554 * Somebody else wanted this key while we were asleep. Wake
555 * them up now.
556 */
557 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
558 wakeup((caddr_t)shmseg);
559 }
560 *retval = shmid;
561 return 0;
562 out:
563 switch (kret) {
564 case KERN_INVALID_ADDRESS:
565 case KERN_NO_SPACE:
566 return (ENOMEM);
567 case KERN_PROTECTION_FAILURE:
568 return (EACCES);
569 default:
570 return (EINVAL);
571 }
572
573 }
574
575 int
576 shmget(p, uap, retval)
577 struct proc *p;
578 struct shmget_args *uap;
579 register_t *retval;
580 {
581 int segnum, mode, error;
582
583 mode = uap->shmflg & ACCESSPERMS;
584 if (uap->key != IPC_PRIVATE) {
585 again:
586 segnum = shm_find_segment_by_key(uap->key);
587 if (segnum >= 0) {
588 error = shmget_existing(p, uap, mode, segnum, retval);
589 if (error == EAGAIN)
590 goto again;
591 return(error);
592 }
593 if ((uap->shmflg & IPC_CREAT) == 0)
594 return ENOENT;
595 }
596 return( shmget_allocate_segment(p, uap, mode, retval));;
597 /*NOTREACHED*/
598
599 }
600
601 struct shmsys_args {
602 u_int which;
603 int a2;
604 int a3;
605 int a4;
606 };
607 int
608 shmsys(p, uap, retval)
609 struct proc *p;
610 /* XXX actually varargs. */
611 struct shmsys_args *uap;
612 register_t *retval;
613 {
614
615 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
616 return EINVAL;
617 return ((*shmcalls[uap->which])(p, &uap->a2, retval));
618 }
619
620 void
621 shmfork(p1, p2)
622 struct proc *p1, *p2;
623 {
624 struct shmmap_state *shmmap_s;
625 size_t size;
626 int i;
627
628 size = shminfo.shmseg * sizeof(struct shmmap_state);
629 shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK);
630 bcopy((caddr_t)p1->vm_shm, (caddr_t)shmmap_s, size);
631 p2->vm_shm = (caddr_t)shmmap_s;
632 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
633 if (shmmap_s->shmid != -1)
634 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
635 }
636
637 void
638 shmexit(p)
639 struct proc *p;
640 {
641 struct shmmap_state *shmmap_s;
642 int i;
643
644 shmmap_s = (struct shmmap_state *)p->vm_shm;
645 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
646 if (shmmap_s->shmid != -1)
647 shm_delete_mapping(p, shmmap_s);
648 FREE((caddr_t)p->vm_shm, M_SHM);
649 p->vm_shm = NULL;
650 }
651
652 void
653 shminit(dummy)
654 void *dummy;
655 {
656 int i;
657 int s;
658
659 s = sizeof(struct shmid_ds) * shminfo.shmmni;
660
661 MALLOC(shmsegs, struct shmid_ds *, s,
662 M_SHM, M_WAITOK);
663 for (i = 0; i < shminfo.shmmni; i++) {
664 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
665 shmsegs[i].shm_perm.seq = 0;
666 }
667 shm_last_free = 0;
668 shm_nused = 0;
669 shm_committed = 0;
670 }