2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * The contents of this file constitute Original Code as defined in and 
   7  * are subject to the Apple Public Source License Version 1.1 (the 
   8  * "License").  You may not use this file except in compliance with the 
   9  * License.  Please obtain a copy of the License at 
  10  * http://www.apple.com/publicsource and read it before using this file. 
  12  * This Original Code and all software distributed under the License are 
  13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the 
  17  * License for the specific language governing rights and limitations 
  20  * @APPLE_LICENSE_HEADER_END@ 
  22 /*      $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $      */ 
  25  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved. 
  27  * Redistribution and use in source and binary forms, with or without 
  28  * modification, are permitted provided that the following conditions 
  30  * 1. Redistributions of source code must retain the above copyright 
  31  *    notice, this list of conditions and the following disclaimer. 
  32  * 2. Redistributions in binary form must reproduce the above copyright 
  33  *    notice, this list of conditions and the following disclaimer in the 
  34  *    documentation and/or other materials provided with the distribution. 
  35  * 3. All advertising materials mentioning features or use of this software 
  36  *    must display the following acknowledgement: 
  37  *      This product includes software developed by Adam Glass and Charles 
  39  * 4. The names of the authors may not be used to endorse or promote products 
  40  *    derived from this software without specific prior written permission. 
  42  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 
  43  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 
  44  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 
  45  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 
  46  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 
  47  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
  48  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
  49  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
  50  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 
  51  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  55 #include <sys/param.h> 
  56 #include <sys/systm.h> 
  57 #include <sys/kernel.h> 
  60 #include <sys/malloc.h> 
  64 #include <mach/mach_types.h> 
  65 #include <mach/vm_inherit.h> 
  66 #include <vm/vm_map.h> 
  69 extern int shmat 
__P((struct proc 
*p
, struct shmat_args 
*uap
, int *retval
)); 
  71 extern int shmctl 
__P((struct proc 
*p
, struct shmctl_args 
*uap
, int *retval
)); 
  73 extern int shmdt 
__P((struct proc 
*p
, struct shmdt_args 
*uap
, int *retval
)); 
  75 extern int shmget 
__P((struct proc 
*p
, struct shmget_args 
*uap
, int *retval
)); 
  78 static void shminit 
__P((void *)); 
  79 SYSINIT(sysv_shm
, SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
, shminit
, NULL
) 
  83 static int oshmctl 
__P((struct proc 
*p
, struct oshmctl_args 
*uap
, int * retval
)); 
  84 static int shmget_allocate_segment 
__P((struct proc 
*p
, struct shmget_args 
*uap
, int mode
, int * retval
)); 
  85 static int shmget_existing 
__P((struct proc 
*p
, struct shmget_args 
*uap
, int mode
, int segnum
, int  * retval
)); 
  87 typedef int     sy_call_t 
__P((struct proc 
*, void *, int *)); 
  89 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 
  90 static sy_call_t 
*shmcalls
[] = { 
  91         (sy_call_t 
*)shmat
, (sy_call_t 
*)oshmctl
, 
  92         (sy_call_t 
*)shmdt
, (sy_call_t 
*)shmget
, 
  96 #define SHMSEG_FREE             0x0200 
  97 #define SHMSEG_REMOVED          0x0400 
  98 #define SHMSEG_ALLOCATED        0x0800 
  99 #define SHMSEG_WANTED           0x1000 
 101 static int shm_last_free
, shm_nused
, shm_committed
; 
 102 struct shmid_ds 
*shmsegs
; 
 105         /* vm_offset_t kva; */ 
 109 struct shmmap_state 
{ 
 114 static void shm_deallocate_segment 
__P((struct shmid_ds 
*)); 
 115 static int shm_find_segment_by_key 
__P((key_t
)); 
 116 static struct shmid_ds 
*shm_find_segment_by_shmid 
__P((int)); 
 117 static int shm_delete_mapping 
__P((struct proc 
*, struct shmmap_state 
*)); 
 120 shm_find_segment_by_key(key
) 
 125         for (i 
= 0; i 
< shminfo
.shmmni
; i
++) 
 126                 if ((shmsegs
[i
].shm_perm
.mode 
& SHMSEG_ALLOCATED
) && 
 127                     shmsegs
[i
].shm_perm
.key 
== key
) 
 132 static struct shmid_ds 
* 
 133 shm_find_segment_by_shmid(shmid
) 
 137         struct shmid_ds 
*shmseg
; 
 139         segnum 
= IPCID_TO_IX(shmid
); 
 140         if (segnum 
< 0 || segnum 
>= shminfo
.shmmni
) 
 142         shmseg 
= &shmsegs
[segnum
]; 
 143         if ((shmseg
->shm_perm
.mode 
& (SHMSEG_ALLOCATED 
| SHMSEG_REMOVED
)) 
 144             != SHMSEG_ALLOCATED 
|| 
 145             shmseg
->shm_perm
.seq 
!= IPCID_TO_SEQ(shmid
)) 
 151 shm_deallocate_segment(shmseg
) 
 152         struct shmid_ds 
*shmseg
; 
 154         struct shm_handle 
*shm_handle
; 
 155         struct shmmap_state 
*shmmap_s
=NULL
; 
 159         shm_handle 
= shmseg
->shm_internal
; 
 160         size 
= round_page(shmseg
->shm_segsz
); 
 161         mach_destroy_memory_entry(shm_handle
->shm_object
); 
 162         FREE((caddr_t
)shm_handle
, M_SHM
); 
 163         shmseg
->shm_internal 
= NULL
; 
 164         shm_committed 
-= btoc(size
); 
 166         shmseg
->shm_perm
.mode 
= SHMSEG_FREE
; 
 170 shm_delete_mapping(p
, shmmap_s
) 
 172         struct shmmap_state 
*shmmap_s
; 
 174         struct shmid_ds 
*shmseg
; 
 178         segnum 
= IPCID_TO_IX(shmmap_s
->shmid
); 
 179         shmseg 
= &shmsegs
[segnum
]; 
 180         size 
= round_page(shmseg
->shm_segsz
); 
 181         result 
= vm_deallocate(current_map(), shmmap_s
->va
, size
); 
 182         if (result 
!= KERN_SUCCESS
) 
 184         shmmap_s
->shmid 
= -1; 
 185         shmseg
->shm_dtime 
= time_second
; 
 186         if ((--shmseg
->shm_nattch 
<= 0) && 
 187             (shmseg
->shm_perm
.mode 
& SHMSEG_REMOVED
)) { 
 188                 shm_deallocate_segment(shmseg
); 
 189                 shm_last_free 
= segnum
; 
 199 shmdt(p
, uap
, retval
) 
 201         struct shmdt_args 
*uap
; 
 204         struct shmmap_state 
*shmmap_s
; 
 207         shmmap_s 
= (struct shmmap_state 
*)p
->vm_shm
; 
 208         if (shmmap_s 
== NULL
) 
 210         for (i 
= 0; i 
< shminfo
.shmseg
; i
++, shmmap_s
++) 
 211                 if (shmmap_s
->shmid 
!= -1 && 
 212                     shmmap_s
->va 
== (vm_offset_t
)uap
->shmaddr
) 
 214         if (i 
== shminfo
.shmseg
) 
 216         return shm_delete_mapping(p
, shmmap_s
); 
 219 #ifndef _SYS_SYSPROTO_H_ 
 228 shmat(p
, uap
, retval
) 
 230         struct shmat_args 
*uap
; 
 234         struct ucred 
*cred 
= p
->p_ucred
; 
 235         struct shmid_ds 
*shmseg
; 
 236         struct shmmap_state 
*shmmap_s 
= NULL
; 
 237         struct shm_handle 
*shm_handle
; 
 238         vm_offset_t attach_va
; 
 243         shmmap_s 
= (struct shmmap_state 
*)p
->vm_shm
; 
 244         if (shmmap_s 
== NULL
) { 
 245                 size 
= shminfo
.shmseg 
* sizeof(struct shmmap_state
); 
 246                 shmmap_s 
= (struct shmmap_state 
*)_MALLOC(size
, M_SHM
, M_WAITOK
); 
 247                 for (i 
= 0; i 
< shminfo
.shmseg
; i
++) 
 248                         shmmap_s
[i
].shmid 
= -1; 
 249                 p
->vm_shm 
= (caddr_t
)shmmap_s
; 
 251         shmseg 
= shm_find_segment_by_shmid(uap
->shmid
); 
 254         error 
= ipcperm(cred
, &shmseg
->shm_perm
, 
 255             (uap
->shmflg 
& SHM_RDONLY
) ? IPC_R 
: IPC_R
|IPC_W
); 
 258         for (i 
= 0; i 
< shminfo
.shmseg
; i
++) { 
 259                 if (shmmap_s
->shmid 
== -1) 
 263         if (i 
>= shminfo
.shmseg
) 
 265         size 
= round_page(shmseg
->shm_segsz
); 
 267         if ((uap
->shmflg 
& SHM_RDONLY
) == 0) 
 268                 prot 
|= VM_PROT_WRITE
; 
 269         flags 
= MAP_ANON 
| MAP_SHARED
; 
 272                 if (uap
->shmflg 
& SHM_RND
) 
 273                         attach_va 
= (vm_offset_t
)uap
->shmaddr 
& ~(SHMLBA
-1); 
 274                 else if (((vm_offset_t
)uap
->shmaddr 
& (SHMLBA
-1)) == 0) 
 275                         attach_va 
= (vm_offset_t
)uap
->shmaddr
; 
 279                 attach_va 
= round_page(uap
->shmaddr
); 
 282         shm_handle 
= shmseg
->shm_internal
; 
 283         rv 
= vm_map(current_map(), &attach_va
, size
, 0, (flags 
& MAP_FIXED
)? FALSE
: TRUE
, 
 284                 shm_handle
->shm_object
, 0, FALSE
, prot
, prot
, VM_INHERIT_DEFAULT
); 
 285         if (rv 
!= KERN_SUCCESS
)  
 287         rv 
= vm_inherit(current_map(), attach_va
, size
, 
 289         if (rv 
!= KERN_SUCCESS
) { 
 290                 (void) vm_deallocate(current_map(), attach_va
, size
); 
 294         shmmap_s
->va 
= attach_va
; 
 295         shmmap_s
->shmid 
= uap
->shmid
; 
 296         shmseg
->shm_lpid 
= p
->p_pid
; 
 297         shmseg
->shm_atime 
= time_second
; 
 298         shmseg
->shm_nattch
++; 
 303         case KERN_INVALID_ADDRESS
: 
 306         case KERN_PROTECTION_FAILURE
: 
 315         struct  ipc_perm shm_perm
;      /* operation perms */ 
 316         int     shm_segsz
;              /* size of segment (bytes) */ 
 317         ushort  shm_cpid
;               /* pid, creator */ 
 318         ushort  shm_lpid
;               /* pid, last operation */ 
 319         short   shm_nattch
;             /* no. of current attaches */ 
 320         time_t  shm_atime
;              /* last attach time */ 
 321         time_t  shm_dtime
;              /* last detach time */ 
 322         time_t  shm_ctime
;              /* last change time */ 
 323         void    *shm_handle
;            /* internal handle for shm segment */ 
 326 struct oshmctl_args 
{ 
 329         struct oshmid_ds 
*ubuf
; 
 333 oshmctl(p
, uap
, retval
) 
 335         struct oshmctl_args 
*uap
; 
 340         struct ucred 
*cred 
= p
->p_ucred
; 
 341         struct shmid_ds 
*shmseg
; 
 342         struct oshmid_ds outbuf
; 
 344         shmseg 
= shm_find_segment_by_shmid(uap
->shmid
); 
 349                 error 
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_R
); 
 352                 outbuf
.shm_perm 
= shmseg
->shm_perm
; 
 353                 outbuf
.shm_segsz 
= shmseg
->shm_segsz
; 
 354                 outbuf
.shm_cpid 
= shmseg
->shm_cpid
; 
 355                 outbuf
.shm_lpid 
= shmseg
->shm_lpid
; 
 356                 outbuf
.shm_nattch 
= shmseg
->shm_nattch
; 
 357                 outbuf
.shm_atime 
= shmseg
->shm_atime
; 
 358                 outbuf
.shm_dtime 
= shmseg
->shm_dtime
; 
 359                 outbuf
.shm_ctime 
= shmseg
->shm_ctime
; 
 360                 outbuf
.shm_handle 
= shmseg
->shm_internal
; 
 361                 error 
= copyout((caddr_t
)&outbuf
, uap
->ubuf
, sizeof(outbuf
)); 
 366                 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 
 367                 return ((sy_call_t 
*)shmctl
)(p
, uap
, retval
); 
 375 #ifndef _SYS_SYSPROTO_H_ 
 379         struct shmid_ds 
*buf
; 
 384 shmctl(p
, uap
,  retval
) 
 386         struct shmctl_args 
*uap
; 
 390         struct ucred 
*cred 
= p
->p_ucred
; 
 391         struct shmid_ds inbuf
; 
 392         struct shmid_ds 
*shmseg
; 
 394         shmseg 
= shm_find_segment_by_shmid(uap
->shmid
); 
 399                 error 
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_R
); 
 402                 error 
= copyout((caddr_t
)shmseg
, uap
->buf
, sizeof(inbuf
)); 
 407                 error 
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
); 
 410                 error 
= copyin(uap
->buf
, (caddr_t
)&inbuf
, sizeof(inbuf
)); 
 413                 shmseg
->shm_perm
.uid 
= inbuf
.shm_perm
.uid
; 
 414                 shmseg
->shm_perm
.gid 
= inbuf
.shm_perm
.gid
; 
 415                 shmseg
->shm_perm
.mode 
= 
 416                     (shmseg
->shm_perm
.mode 
& ~ACCESSPERMS
) | 
 417                     (inbuf
.shm_perm
.mode 
& ACCESSPERMS
); 
 418                 shmseg
->shm_ctime 
= time_second
; 
 421                 error 
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
); 
 424                 shmseg
->shm_perm
.key 
= IPC_PRIVATE
; 
 425                 shmseg
->shm_perm
.mode 
|= SHMSEG_REMOVED
; 
 426                 if (shmseg
->shm_nattch 
<= 0) { 
 427                         shm_deallocate_segment(shmseg
); 
 428                         shm_last_free 
= IPCID_TO_IX(uap
->shmid
); 
 441 #ifndef _SYS_SYSPROTO_H_ 
 450 shmget_existing(p
, uap
, mode
, segnum
, retval
) 
 452         struct shmget_args 
*uap
; 
 457         struct shmid_ds 
*shmseg
; 
 458         struct ucred 
*cred 
= p
->p_ucred
; 
 461         shmseg 
= &shmsegs
[segnum
]; 
 462         if (shmseg
->shm_perm
.mode 
& SHMSEG_REMOVED
) { 
 464                  * This segment is in the process of being allocated.  Wait 
 465                  * until it's done, and look the key up again (in case the 
 466                  * allocation failed or it was freed). 
 468                 shmseg
->shm_perm
.mode 
|= SHMSEG_WANTED
; 
 469                 error 
= tsleep((caddr_t
)shmseg
, PLOCK 
| PCATCH
, "shmget", 0); 
 474         error 
= ipcperm(cred
, &shmseg
->shm_perm
, mode
); 
 477         if (uap
->size 
&& uap
->size 
> shmseg
->shm_segsz
) 
 479        if ((uap
->shmflg 
& (IPC_CREAT 
| IPC_EXCL
)) == (IPC_CREAT 
| IPC_EXCL
)) 
 481         *retval 
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
); 
 486 shmget_allocate_segment(p
, uap
, mode
, retval
) 
 488         struct shmget_args 
*uap
; 
 492         int i
, segnum
, shmid
, size
; 
 493         struct ucred 
*cred 
= p
->p_ucred
; 
 494         struct shmid_ds 
*shmseg
; 
 495         struct shm_handle 
*shm_handle
; 
 497         vm_offset_t user_addr
; 
 500         if (uap
->size 
< shminfo
.shmmin 
|| uap
->size 
> shminfo
.shmmax
) 
 502         if (shm_nused 
>= shminfo
.shmmni
) /* any shmids left? */ 
 504         size 
= round_page(uap
->size
); 
 505         if (shm_committed 
+ btoc(size
) > shminfo
.shmall
) 
 507         if (shm_last_free 
< 0) { 
 508                 for (i 
= 0; i 
< shminfo
.shmmni
; i
++) 
 509                         if (shmsegs
[i
].shm_perm
.mode 
& SHMSEG_FREE
) 
 511                 if (i 
== shminfo
.shmmni
) 
 512                         panic("shmseg free count inconsistent"); 
 515                 segnum 
= shm_last_free
; 
 518         shmseg 
= &shmsegs
[segnum
]; 
 520          * In case we sleep in malloc(), mark the segment present but deleted 
 521          * so that noone else tries to create the same key. 
 523         kret 
= vm_allocate(current_map(), &user_addr
, size
, TRUE
); 
 524         if (kret 
!= KERN_SUCCESS
)  
 527         kret 
= mach_make_memory_entry (current_map(), &size
, 
 528                         user_addr
, VM_PROT_DEFAULT
, &mem_object
, 0); 
 530         if (kret 
!= KERN_SUCCESS
)  
 532         shmseg
->shm_perm
.mode 
= SHMSEG_ALLOCATED 
| SHMSEG_REMOVED
; 
 533         shmseg
->shm_perm
.key 
= uap
->key
; 
 534         shmseg
->shm_perm
.seq 
= (shmseg
->shm_perm
.seq 
+ 1) & 0x7fff; 
 535         shm_handle 
= (struct shm_handle 
*) 
 536             _MALLOC(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
); 
 537         shm_handle
->shm_object 
= mem_object
; 
 538         shmid 
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
); 
 540         shmseg
->shm_internal 
= shm_handle
; 
 541         shmseg
->shm_perm
.cuid 
= shmseg
->shm_perm
.uid 
= cred
->cr_uid
; 
 542         shmseg
->shm_perm
.cgid 
= shmseg
->shm_perm
.gid 
= cred
->cr_gid
; 
 543         shmseg
->shm_perm
.mode 
= (shmseg
->shm_perm
.mode 
& SHMSEG_WANTED
) | 
 544             (mode 
& ACCESSPERMS
) | SHMSEG_ALLOCATED
; 
 545         shmseg
->shm_segsz 
= uap
->size
; 
 546         shmseg
->shm_cpid 
= p
->p_pid
; 
 547         shmseg
->shm_lpid 
= shmseg
->shm_nattch 
= 0; 
 548         shmseg
->shm_atime 
= shmseg
->shm_dtime 
= 0; 
 549         shmseg
->shm_ctime 
= time_second
; 
 550         shm_committed 
+= btoc(size
); 
 552         if (shmseg
->shm_perm
.mode 
& SHMSEG_WANTED
) { 
 554                  * Somebody else wanted this key while we were asleep.  Wake 
 557                 shmseg
->shm_perm
.mode 
&= ~SHMSEG_WANTED
; 
 558                 wakeup((caddr_t
)shmseg
); 
 564         case KERN_INVALID_ADDRESS
: 
 567         case KERN_PROTECTION_FAILURE
: 
 576 shmget(p
, uap
, retval
) 
 578         struct shmget_args 
*uap
; 
 581         int segnum
, mode
, error
; 
 583         mode 
= uap
->shmflg 
& ACCESSPERMS
; 
 584         if (uap
->key 
!= IPC_PRIVATE
) { 
 586                 segnum 
= shm_find_segment_by_key(uap
->key
); 
 588                         error 
= shmget_existing(p
, uap
, mode
, segnum
, retval
); 
 593                 if ((uap
->shmflg 
& IPC_CREAT
) == 0) 
 596         return( shmget_allocate_segment(p
, uap
, mode
, retval
));; 
 608 shmsys(p
, uap
, retval
) 
 610         /* XXX actually varargs. */ 
 611         struct shmsys_args 
*uap
; 
 615         if (uap
->which 
>= sizeof(shmcalls
)/sizeof(shmcalls
[0])) 
 617         return ((*shmcalls
[uap
->which
])(p
, &uap
->a2
, retval
)); 
 622         struct proc 
*p1
, *p2
; 
 624         struct shmmap_state 
*shmmap_s
; 
 628         size 
= shminfo
.shmseg 
* sizeof(struct shmmap_state
); 
 629         shmmap_s 
= (struct shmmap_state 
*)_MALLOC(size
, M_SHM
, M_WAITOK
); 
 630         bcopy((caddr_t
)p1
->vm_shm
, (caddr_t
)shmmap_s
, size
); 
 631         p2
->vm_shm 
= (caddr_t
)shmmap_s
; 
 632         for (i 
= 0; i 
< shminfo
.shmseg
; i
++, shmmap_s
++) 
 633                 if (shmmap_s
->shmid 
!= -1) 
 634                         shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++; 
 641         struct shmmap_state 
*shmmap_s
; 
 644         shmmap_s 
= (struct shmmap_state 
*)p
->vm_shm
; 
 645         for (i 
= 0; i 
< shminfo
.shmseg
; i
++, shmmap_s
++) 
 646                 if (shmmap_s
->shmid 
!= -1) 
 647                         shm_delete_mapping(p
, shmmap_s
); 
 648         FREE((caddr_t
)p
->vm_shm
, M_SHM
); 
 659         s 
= sizeof(struct shmid_ds
) * shminfo
.shmmni
; 
 661         MALLOC(shmsegs
, struct shmid_ds 
*, s
,  
 663         for (i 
= 0; i 
< shminfo
.shmmni
; i
++) { 
 664                 shmsegs
[i
].shm_perm
.mode 
= SHMSEG_FREE
; 
 665                 shmsegs
[i
].shm_perm
.seq 
= 0;