]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/posix_shm.c
4a0a848a2227b3f59852268cbe7fc565641fffc4
[apple/xnu.git] / bsd / kern / posix_shm.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1990, 1996-1998 Apple Computer, Inc.
30 * All Rights Reserved.
31 */
32 /*
33 * posix_shm.c : Support for POSIX shared memory APIs
34 *
35 * File: posix_shm.c
36 * Author: Ananthakrishna Ramesh
37 *
38 * HISTORY
39 * 2-Sep-1999 A.Ramesh
40 * Created for MacOSX
41 *
42 */
43 /*
44 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
45 * support for mandatory and extensible security protections. This notice
46 * is included in support of clause 2.2 (b) of the Apple Public License,
47 * Version 2.0.
48 */
49
50 #include <sys/cdefs.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/file_internal.h>
55 #include <sys/filedesc.h>
56 #include <sys/stat.h>
57 #include <sys/proc_internal.h>
58 #include <sys/kauth.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/vnode_internal.h>
63 #include <sys/ioctl.h>
64 #include <sys/tty.h>
65 #include <sys/malloc.h>
66 #include <sys/mman.h>
67 #include <sys/stat.h>
68 #include <sys/sysproto.h>
69 #include <sys/proc_info.h>
70 #include <security/audit/audit.h>
71
72 #if CONFIG_MACF
73 #include <security/mac_framework.h>
74 #endif
75
76 #include <mach/mach_types.h>
77 #include <mach/mach_vm.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/kern_return.h>
82 #include <mach/memory_object_control.h>
83
84 #include <vm/vm_map.h>
85 #include <vm/vm_protos.h>
86
87 #define f_flag f_fglob->fg_flag
88 #define f_type f_fglob->fg_ops->fo_type
89 #define f_msgcount f_fglob->fg_msgcount
90 #define f_cred f_fglob->fg_cred
91 #define f_ops f_fglob->fg_ops
92 #define f_offset f_fglob->fg_offset
93 #define f_data f_fglob->fg_data
94 #define PSHMNAMLEN 31 /* maximum name segment length we bother with */
95
96 struct pshmobj {
97 void * pshmo_memobject;
98 memory_object_size_t pshmo_size;
99 struct pshmobj * pshmo_next;
100 };
101
102 struct pshminfo {
103 unsigned int pshm_flags;
104 unsigned int pshm_usecount;
105 off_t pshm_length;
106 mode_t pshm_mode;
107 uid_t pshm_uid;
108 gid_t pshm_gid;
109 char pshm_name[PSHMNAMLEN + 1]; /* segment name */
110 struct pshmobj *pshm_memobjects;
111 #if DIAGNOSTIC
112 unsigned int pshm_readcount;
113 unsigned int pshm_writecount;
114 proc_t pshm_proc;
115 #endif /* DIAGNOSTIC */
116 struct label* pshm_label;
117 };
118 #define PSHMINFO_NULL (struct pshminfo *)0
119
120 #define PSHM_NONE 0x001
121 #define PSHM_DEFINED 0x002
122 #define PSHM_ALLOCATED 0x004
123 #define PSHM_MAPPED 0x008
124 #define PSHM_INUSE 0x010
125 #define PSHM_REMOVED 0x020
126 #define PSHM_INCREATE 0x040
127 #define PSHM_INDELETE 0x080
128 #define PSHM_ALLOCATING 0x100
129
130 struct pshmcache {
131 LIST_ENTRY(pshmcache) pshm_hash; /* hash chain */
132 struct pshminfo *pshminfo; /* vnode the name refers to */
133 int pshm_nlen; /* length of name */
134 char pshm_name[PSHMNAMLEN + 1]; /* segment name */
135 };
136 #define PSHMCACHE_NULL (struct pshmcache *)0
137
138 struct pshmstats {
139 long goodhits; /* hits that we can really use */
140 long neghits; /* negative hits that we can use */
141 long badhits; /* hits we must drop */
142 long falsehits; /* hits with id mismatch */
143 long miss; /* misses */
144 long longnames; /* long names that ignore cache */
145 };
146
147 struct pshmname {
148 char *pshm_nameptr; /* pointer to looked up name */
149 long pshm_namelen; /* length of looked up component */
150 u_long pshm_hash; /* hash value of looked up name */
151 };
152
153 struct pshmnode {
154 off_t mapp_addr;
155 user_size_t map_size; /* XXX unused ? */
156 struct pshminfo *pinfo;
157 unsigned int pshm_usecount;
158 #if DIAGNOSTIC
159 unsigned int readcnt;
160 unsigned int writecnt;
161 #endif
162 };
163 #define PSHMNODE_NULL (struct pshmnode *)0
164
165
166 #define PSHMHASH(pnp) \
167 (&pshmhashtbl[(pnp)->pshm_hash & pshmhash])
168
169 LIST_HEAD(pshmhashhead, pshmcache) *pshmhashtbl; /* Hash Table */
170 u_long pshmhash; /* size of hash table - 1 */
171 long pshmnument; /* number of cache entries allocated */
172 struct pshmstats pshmstats; /* cache effectiveness statistics */
173
174 static int pshm_read (struct fileproc *fp, struct uio *uio,
175 int flags, vfs_context_t ctx);
176 static int pshm_write (struct fileproc *fp, struct uio *uio,
177 int flags, vfs_context_t ctx);
178 static int pshm_ioctl (struct fileproc *fp, u_long com,
179 caddr_t data, vfs_context_t ctx);
180 static int pshm_select (struct fileproc *fp, int which, void *wql, vfs_context_t ctx);
181 static int pshm_close(struct pshminfo *pinfo, int dropref);
182 static int pshm_closefile (struct fileglob *fg, vfs_context_t ctx);
183
184 static int pshm_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx);
185
186 int pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, proc_t p);
187 static int pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp);
188 static void pshm_cache_delete(struct pshmcache *pcp);
189 #if NOT_USED
190 static void pshm_cache_purge(void);
191 #endif /* NOT_USED */
192 static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
193 struct pshmcache **pcache, int addref);
194
195 static const struct fileops pshmops = {
196 DTYPE_PSXSHM,
197 pshm_read,
198 pshm_write,
199 pshm_ioctl,
200 pshm_select,
201 pshm_closefile,
202 pshm_kqfilter,
203 0
204 };
205
206 static lck_grp_t *psx_shm_subsys_lck_grp;
207 static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr;
208 static lck_attr_t *psx_shm_subsys_lck_attr;
209 static lck_mtx_t psx_shm_subsys_mutex;
210
211 #define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
212 #define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
213
214
215 /* Initialize the mutex governing access to the posix shm subsystem */
216 __private_extern__ void
217 pshm_lock_init( void )
218 {
219
220 psx_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
221
222 psx_shm_subsys_lck_grp = lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr);
223
224 psx_shm_subsys_lck_attr = lck_attr_alloc_init();
225 lck_mtx_init(& psx_shm_subsys_mutex, psx_shm_subsys_lck_grp, psx_shm_subsys_lck_attr);
226 }
227
228 /*
229 * Lookup an entry in the cache
230 *
231 *
232 * status of -1 is returned if matches
233 * If the lookup determines that the name does not exist
234 * (negative cacheing), a status of ENOENT is returned. If the lookup
235 * fails, a status of zero is returned.
236 */
237
238 static int
239 pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
240 struct pshmcache **pcache, int addref)
241 {
242 struct pshmcache *pcp, *nnp;
243 struct pshmhashhead *pcpp;
244
245 if (pnp->pshm_namelen > PSHMNAMLEN) {
246 pshmstats.longnames++;
247 return (0);
248 }
249
250 pcpp = PSHMHASH(pnp);
251 for (pcp = pcpp->lh_first; pcp != 0; pcp = nnp) {
252 nnp = pcp->pshm_hash.le_next;
253 if (pcp->pshm_nlen == pnp->pshm_namelen &&
254 !bcmp(pcp->pshm_name, pnp->pshm_nameptr, (u_int)pcp-> pshm_nlen))
255 break;
256 }
257
258 if (pcp == 0) {
259 pshmstats.miss++;
260 return (0);
261 }
262
263 /* We found a "positive" match, return the vnode */
264 if (pcp->pshminfo) {
265 pshmstats.goodhits++;
266 /* TOUCH(ncp); */
267 *pshmp = pcp->pshminfo;
268 *pcache = pcp;
269 if (addref)
270 pcp->pshminfo->pshm_usecount++;
271 return (-1);
272 }
273
274 /*
275 * We found a "negative" match, ENOENT notifies client of this match.
276 */
277 pshmstats.neghits++;
278 return (ENOENT);
279 }
280
281 /*
282 * Add an entry to the cache.
283 * XXX should be static?
284 */
285 static int
286 pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp)
287 {
288 struct pshmhashhead *pcpp;
289 struct pshminfo *dpinfo;
290 struct pshmcache *dpcp;
291
292 #if DIAGNOSTIC
293 if (pnp->pshm_namelen > PSHMNAMLEN)
294 panic("cache_enter: name too long");
295 #endif
296
297
298 /* if the entry has already been added by some one else return */
299 if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == -1) {
300 return(EEXIST);
301 }
302 pshmnument++;
303
304 /*
305 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
306 */
307 pcp->pshminfo = pshmp;
308 pcp->pshm_nlen = pnp->pshm_namelen;
309 bcopy(pnp->pshm_nameptr, pcp->pshm_name, (unsigned)pcp->pshm_nlen);
310 pcpp = PSHMHASH(pnp);
311 #if DIAGNOSTIC
312 {
313 struct pshmcache *p;
314
315 for (p = pcpp->lh_first; p != 0; p = p->pshm_hash.le_next)
316 if (p == pcp)
317 panic("cache_enter: duplicate");
318 }
319 #endif
320 LIST_INSERT_HEAD(pcpp, pcp, pshm_hash);
321 return(0);
322 }
323
324 /*
325 * Name cache initialization, from vfs_init() when we are booting
326 */
327 void
328 pshm_cache_init(void)
329 {
330 pshmhashtbl = hashinit(desiredvnodes / 8, M_SHM, &pshmhash);
331 }
332
333 #if NOT_USED
334 /*
335 * Invalidate a all entries to particular vnode.
336 *
337 * We actually just increment the v_id, that will do it. The entries will
338 * be purged by lookup as they get found. If the v_id wraps around, we
339 * need to ditch the entire cache, to avoid confusion. No valid vnode will
340 * ever have (v_id == 0).
341 */
342 static void
343 pshm_cache_purge(void)
344 {
345 struct pshmcache *pcp;
346 struct pshmhashhead *pcpp;
347
348 for (pcpp = &pshmhashtbl[pshmhash]; pcpp >= pshmhashtbl; pcpp--) {
349 while ( (pcp = pcpp->lh_first) )
350 pshm_cache_delete(pcp);
351 }
352 }
353 #endif /* NOT_USED */
354
355 static void
356 pshm_cache_delete(struct pshmcache *pcp)
357 {
358 #if DIAGNOSTIC
359 if (pcp->pshm_hash.le_prev == 0)
360 panic("namecache purge le_prev");
361 if (pcp->pshm_hash.le_next == pcp)
362 panic("namecache purge le_next");
363 #endif /* DIAGNOSTIC */
364 LIST_REMOVE(pcp, pshm_hash);
365 pcp->pshm_hash.le_prev = 0;
366 pshmnument--;
367 }
368
369
370 int
371 shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval)
372 {
373 size_t i;
374 int indx, error;
375 struct pshmname nd;
376 struct pshminfo *pinfo;
377 struct fileproc *fp = NULL;
378 char *pnbuf = NULL;
379 struct pshminfo *new_pinfo = PSHMINFO_NULL;
380 struct pshmnode *new_pnode = PSHMNODE_NULL;
381 struct pshmcache *pcache = PSHMCACHE_NULL; /* ignored on return */
382 char * nameptr;
383 char * cp;
384 size_t pathlen, plen;
385 int fmode ;
386 int cmode = uap->mode;
387 int incache = 0;
388 struct pshmcache *pcp = NULL;
389
390 AUDIT_ARG(fflags, uap->oflag);
391 AUDIT_ARG(mode, uap->mode);
392
393 pinfo = PSHMINFO_NULL;
394
395 /*
396 * Preallocate everything we might need up front to avoid taking
397 * and dropping the lock, opening us up to race conditions.
398 */
399 MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
400 if (pnbuf == NULL) {
401 error = ENOSPC;
402 goto bad;
403 }
404
405 pathlen = MAXPATHLEN;
406 error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen);
407 if (error) {
408 goto bad;
409 }
410 AUDIT_ARG(text, pnbuf);
411 if (pathlen > PSHMNAMLEN) {
412 error = ENAMETOOLONG;
413 goto bad;
414 }
415 #ifdef PSXSHM_NAME_RESTRICT
416 nameptr = pnbuf;
417 if (*nameptr == '/') {
418 while (*(nameptr++) == '/') {
419 plen--;
420 error = EINVAL;
421 goto bad;
422 }
423 } else {
424 error = EINVAL;
425 goto bad;
426 }
427 #endif /* PSXSHM_NAME_RESTRICT */
428
429 plen = pathlen;
430 nameptr = pnbuf;
431 nd.pshm_nameptr = nameptr;
432 nd.pshm_namelen = plen;
433 nd. pshm_hash =0;
434
435 for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) {
436 nd.pshm_hash += (unsigned char)*cp * i;
437 }
438
439 /*
440 * attempt to allocate a new fp; if unsuccessful, the fp will be
441 * left unmodified (NULL).
442 */
443 error = falloc(p, &fp, &indx, vfs_context_current());
444 if (error)
445 goto bad;
446
447 cmode &= ALLPERMS;
448
449 fmode = FFLAGS(uap->oflag);
450 if ((fmode & (FREAD | FWRITE)) == 0) {
451 error = EINVAL;
452 goto bad;
453 }
454
455 /*
456 * We allocate a new entry if we are less than the maximum
457 * allowed and the one at the front of the LRU list is in use.
458 * Otherwise we use the one at the front of the LRU list.
459 */
460 MALLOC(pcp, struct pshmcache *, sizeof(struct pshmcache), M_SHM, M_WAITOK|M_ZERO);
461 if (pcp == NULL) {
462 error = ENOSPC;
463 goto bad;
464 }
465
466 MALLOC(new_pinfo, struct pshminfo *, sizeof(struct pshminfo), M_SHM, M_WAITOK|M_ZERO);
467 if (new_pinfo == PSHMINFO_NULL) {
468 error = ENOSPC;
469 goto bad;
470 }
471 #if CONFIG_MACF
472 mac_posixshm_label_init(new_pinfo);
473 #endif
474
475 MALLOC(new_pnode, struct pshmnode *, sizeof(struct pshmnode), M_SHM, M_WAITOK|M_ZERO);
476 if (new_pnode == PSHMNODE_NULL) {
477 error = ENOSPC;
478 goto bad;
479 }
480
481 PSHM_SUBSYS_LOCK();
482
483 /*
484 * If we find the entry in the cache, this will take a reference,
485 * allowing us to unlock it for the permissions check.
486 */
487 error = pshm_cache_search(&pinfo, &nd, &pcache, 1);
488
489 PSHM_SUBSYS_UNLOCK();
490
491 if (error == ENOENT) {
492 error = EINVAL;
493 goto bad;
494 }
495
496 if (!error) {
497 incache = 0;
498 if (fmode & O_CREAT) {
499 /* create a new one (commit the allocation) */
500 pinfo = new_pinfo;
501 pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE;
502 pinfo->pshm_usecount = 1; /* existence reference */
503 pinfo->pshm_mode = cmode;
504 pinfo->pshm_uid = kauth_getuid();
505 pinfo->pshm_gid = kauth_getgid();
506 bcopy(pnbuf, &pinfo->pshm_name[0], pathlen);
507 pinfo->pshm_name[pathlen]=0;
508 #if CONFIG_MACF
509 error = mac_posixshm_check_create(kauth_cred_get(), nameptr);
510 if (error) {
511 goto bad;
512 }
513 mac_posixshm_label_associate(kauth_cred_get(), pinfo, nameptr);
514 #endif
515 }
516 } else {
517 incache = 1;
518 if (fmode & O_CREAT) {
519 /* already exists */
520 if ((fmode & O_EXCL)) {
521 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
522 pinfo->pshm_gid,
523 pinfo->pshm_mode);
524
525 /* shm obj exists and opened O_EXCL */
526 error = EEXIST;
527 goto bad;
528 }
529
530 if( pinfo->pshm_flags & PSHM_INDELETE) {
531 error = ENOENT;
532 goto bad;
533 }
534 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
535 pinfo->pshm_gid, pinfo->pshm_mode);
536 #if CONFIG_MACF
537 if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
538 goto bad;
539 }
540 #endif
541 if ( (error = pshm_access(pinfo, fmode, kauth_cred_get(), p)) ) {
542 goto bad;
543 }
544 }
545 }
546 if (!(fmode & O_CREAT)) {
547 if (!incache) {
548 /* O_CREAT is not set and the object does not exist */
549 error = ENOENT;
550 goto bad;
551 }
552 if( pinfo->pshm_flags & PSHM_INDELETE) {
553 error = ENOENT;
554 goto bad;
555 }
556 #if CONFIG_MACF
557 if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
558 goto bad;
559 }
560 #endif
561
562 if ((error = pshm_access(pinfo, fmode, kauth_cred_get(), p))) {
563 goto bad;
564 }
565 }
566 if (fmode & O_TRUNC) {
567 error = EINVAL;
568 goto bad;
569 }
570
571
572 PSHM_SUBSYS_LOCK();
573
574 #if DIAGNOSTIC
575 if (fmode & FWRITE)
576 pinfo->pshm_writecount++;
577 if (fmode & FREAD)
578 pinfo->pshm_readcount++;
579 #endif
580 if (!incache) {
581 /* if successful, this will consume the pcp */
582 if ( (error = pshm_cache_add(pinfo, &nd, pcp)) ) {
583 goto bad_locked;
584 }
585 /*
586 * add reference for the new entry; otherwise, we obtained
587 * one from the cache hit earlier.
588 */
589 pinfo->pshm_usecount++;
590 }
591 pinfo->pshm_flags &= ~PSHM_INCREATE;
592 new_pnode->pinfo = pinfo;
593
594 PSHM_SUBSYS_UNLOCK();
595
596 /*
597 * if incache, we did not use the new pcp or new_pinfo and must
598 * free them
599 */
600 if (incache) {
601 FREE(pcp, M_SHM);
602
603 if (new_pinfo != PSHMINFO_NULL) {
604 #if CONFIG_MACF
605 mac_posixshm_label_destroy(new_pinfo);
606 #endif
607 FREE(new_pinfo, M_SHM);
608 }
609 }
610
611 proc_fdlock(p);
612 fp->f_flag = fmode & FMASK;
613 fp->f_ops = &pshmops;
614 fp->f_data = (caddr_t)new_pnode;
615 *fdflags(p, indx) |= UF_EXCLOSE;
616 procfdtbl_releasefd(p, indx, NULL);
617 fp_drop(p, indx, fp, 1);
618 proc_fdunlock(p);
619
620 *retval = indx;
621 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
622 return (0);
623
624 bad_locked:
625 PSHM_SUBSYS_UNLOCK();
626 bad:
627 /*
628 * If we obtained the entry from the cache, we need to drop the
629 * reference; holding the reference may have prevented unlinking,
630 * so we need to call pshm_close() to get the full effect.
631 */
632 if (incache) {
633 PSHM_SUBSYS_LOCK();
634 pshm_close(pinfo, 1);
635 PSHM_SUBSYS_UNLOCK();
636 }
637
638 if (pcp != NULL)
639 FREE(pcp, M_SHM);
640
641 if (new_pnode != PSHMNODE_NULL)
642 FREE(new_pnode, M_SHM);
643
644 if (fp != NULL)
645 fp_free(p, indx, fp);
646
647 if (new_pinfo != PSHMINFO_NULL) {
648 #if CONFIG_MACF
649 mac_posixshm_label_destroy(new_pinfo);
650 #endif
651 FREE(new_pinfo, M_SHM);
652 }
653 if (pnbuf != NULL)
654 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
655 return (error);
656 }
657
658
659 int
660 pshm_truncate(__unused proc_t p, struct fileproc *fp, __unused int fd,
661 off_t length, __unused int32_t *retval)
662 {
663 struct pshminfo * pinfo;
664 struct pshmnode * pnode ;
665 kern_return_t kret;
666 mem_entry_name_port_t mem_object;
667 mach_vm_size_t total_size, alloc_size;
668 memory_object_size_t mosize;
669 struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p;
670 vm_map_t user_map;
671 #if CONFIG_MACF
672 int error;
673 #endif
674
675 user_map = current_map();
676
677 if (fp->f_type != DTYPE_PSXSHM) {
678 return(EINVAL);
679 }
680
681
682 if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
683 return(EINVAL);
684
685 PSHM_SUBSYS_LOCK();
686 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) {
687 PSHM_SUBSYS_UNLOCK();
688 return(EINVAL);
689 }
690 if ((pinfo->pshm_flags & (PSHM_DEFINED|PSHM_ALLOCATING|PSHM_ALLOCATED))
691 != PSHM_DEFINED) {
692 PSHM_SUBSYS_UNLOCK();
693 return(EINVAL);
694 }
695 #if CONFIG_MACF
696 error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, length);
697 if (error) {
698 PSHM_SUBSYS_UNLOCK();
699 return(error);
700 }
701 #endif
702
703 pinfo->pshm_flags |= PSHM_ALLOCATING;
704 total_size = vm_map_round_page(length,
705 vm_map_page_mask(user_map));
706 pshmobj_next_p = &pinfo->pshm_memobjects;
707
708 for (alloc_size = 0;
709 alloc_size < total_size;
710 alloc_size += mosize) {
711
712 PSHM_SUBSYS_UNLOCK();
713
714 mosize = MIN(total_size - alloc_size, ANON_MAX_SIZE);
715 kret = mach_make_memory_entry_64(
716 VM_MAP_NULL,
717 &mosize,
718 0,
719 MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
720 &mem_object,
721 0);
722
723 if (kret != KERN_SUCCESS)
724 goto out;
725
726 MALLOC(pshmobj, struct pshmobj *, sizeof (struct pshmobj),
727 M_SHM, M_WAITOK);
728 if (pshmobj == NULL) {
729 kret = KERN_NO_SPACE;
730 mach_memory_entry_port_release(mem_object);
731 mem_object = NULL;
732 goto out;
733 }
734
735 PSHM_SUBSYS_LOCK();
736
737 pshmobj->pshmo_memobject = (void *) mem_object;
738 pshmobj->pshmo_size = mosize;
739 pshmobj->pshmo_next = NULL;
740
741 *pshmobj_next_p = pshmobj;
742 pshmobj_next_p = &pshmobj->pshmo_next;
743 }
744
745 pinfo->pshm_flags = PSHM_ALLOCATED;
746 pinfo->pshm_length = total_size;
747 PSHM_SUBSYS_UNLOCK();
748 return(0);
749
750 out:
751 PSHM_SUBSYS_LOCK();
752 for (pshmobj = pinfo->pshm_memobjects;
753 pshmobj != NULL;
754 pshmobj = pshmobj_next) {
755 pshmobj_next = pshmobj->pshmo_next;
756 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
757 FREE(pshmobj, M_SHM);
758 }
759 pinfo->pshm_memobjects = NULL;
760 pinfo->pshm_flags &= ~PSHM_ALLOCATING;
761 PSHM_SUBSYS_UNLOCK();
762
763 switch (kret) {
764 case KERN_INVALID_ADDRESS:
765 case KERN_NO_SPACE:
766 return (ENOMEM);
767 case KERN_PROTECTION_FAILURE:
768 return (EACCES);
769 default:
770 return (EINVAL);
771
772 }
773 }
774
775 int
776 pshm_stat(struct pshmnode *pnode, void *ub, int isstat64)
777 {
778 struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */
779 struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */
780 struct pshminfo *pinfo;
781 #if CONFIG_MACF
782 int error;
783 #endif
784
785 PSHM_SUBSYS_LOCK();
786 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL){
787 PSHM_SUBSYS_UNLOCK();
788 return(EINVAL);
789 }
790
791 #if CONFIG_MACF
792 error = mac_posixshm_check_stat(kauth_cred_get(), pinfo);
793 if (error) {
794 PSHM_SUBSYS_UNLOCK();
795 return(error);
796 }
797 #endif
798
799 if (isstat64 != 0) {
800 sb64 = (struct stat64 *)ub;
801 bzero(sb64, sizeof(struct stat64));
802 sb64->st_mode = pinfo->pshm_mode;
803 sb64->st_uid = pinfo->pshm_uid;
804 sb64->st_gid = pinfo->pshm_gid;
805 sb64->st_size = pinfo->pshm_length;
806 } else {
807 sb = (struct stat *)ub;
808 bzero(sb, sizeof(struct stat));
809 sb->st_mode = pinfo->pshm_mode;
810 sb->st_uid = pinfo->pshm_uid;
811 sb->st_gid = pinfo->pshm_gid;
812 sb->st_size = pinfo->pshm_length;
813 }
814 PSHM_SUBSYS_UNLOCK();
815
816 return(0);
817 }
818
819 /*
820 * This is called only from shm_open which holds pshm_lock();
821 * XXX This code is repeated many times
822 */
823 int
824 pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, __unused proc_t p)
825 {
826 int mode_req = ((mode & FREAD) ? S_IRUSR : 0) |
827 ((mode & FWRITE) ? S_IWUSR : 0);
828
829 /* Otherwise, user id 0 always gets access. */
830 if (!suser(cred, NULL))
831 return (0);
832
833 return(posix_cred_access(cred, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode, mode_req));
834 }
835
836 int
837 pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct fileproc *fp, off_t pageoff)
838 {
839 vm_map_offset_t user_addr = (vm_map_offset_t)uap->addr;
840 vm_map_size_t user_size = (vm_map_size_t)uap->len ;
841 vm_map_offset_t user_start_addr;
842 vm_map_size_t map_size, mapped_size;
843 int prot = uap->prot;
844 int flags = uap->flags;
845 vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos;
846 vm_object_offset_t map_pos;
847 vm_map_t user_map;
848 int alloc_flags;
849 boolean_t docow;
850 kern_return_t kret;
851 struct pshminfo * pinfo;
852 struct pshmnode * pnode;
853 struct pshmobj * pshmobj;
854 #if CONFIG_MACF
855 int error;
856 #endif
857
858 if (user_size == 0)
859 return(0);
860
861 if ((flags & MAP_SHARED) == 0)
862 return(EINVAL);
863
864
865 if ((prot & PROT_WRITE) && ((fp->f_flag & FWRITE) == 0)) {
866 return(EPERM);
867 }
868
869 if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
870 return(EINVAL);
871
872 PSHM_SUBSYS_LOCK();
873 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) {
874 PSHM_SUBSYS_UNLOCK();
875 return(EINVAL);
876 }
877
878 if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) {
879 PSHM_SUBSYS_UNLOCK();
880 return(EINVAL);
881 }
882 if ((off_t)user_size > pinfo->pshm_length) {
883 PSHM_SUBSYS_UNLOCK();
884 return(EINVAL);
885 }
886 if ((off_t)(user_size + file_pos) > pinfo->pshm_length) {
887 PSHM_SUBSYS_UNLOCK();
888 return(EINVAL);
889 }
890 if ((pshmobj = pinfo->pshm_memobjects) == NULL) {
891 PSHM_SUBSYS_UNLOCK();
892 return(EINVAL);
893 }
894
895 #if CONFIG_MACF
896 error = mac_posixshm_check_mmap(kauth_cred_get(), pinfo, prot, flags);
897 if (error) {
898 PSHM_SUBSYS_UNLOCK();
899 return(error);
900 }
901 #endif
902
903 PSHM_SUBSYS_UNLOCK();
904 user_map = current_map();
905
906 if ((flags & MAP_FIXED) == 0) {
907 alloc_flags = VM_FLAGS_ANYWHERE;
908 user_addr = vm_map_round_page(user_addr,
909 vm_map_page_mask(user_map));
910 } else {
911 if (user_addr != vm_map_round_page(user_addr,
912 vm_map_page_mask(user_map)))
913 return (EINVAL);
914 /*
915 * We do not get rid of the existing mappings here because
916 * it wouldn't be atomic (see comment in mmap()). We let
917 * Mach VM know that we want it to replace any existing
918 * mapping with the new one.
919 */
920 alloc_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
921 }
922 docow = FALSE;
923
924 mapped_size = 0;
925
926 /* reserver the entire space first... */
927 kret = vm_map_enter_mem_object(user_map,
928 &user_addr,
929 user_size,
930 0,
931 alloc_flags,
932 IPC_PORT_NULL,
933 0,
934 FALSE,
935 VM_PROT_NONE,
936 VM_PROT_NONE,
937 VM_INHERIT_NONE);
938 user_start_addr = user_addr;
939 if (kret != KERN_SUCCESS) {
940 goto out;
941 }
942
943 /* ... and overwrite with the real mappings */
944 for (map_pos = 0, pshmobj = pinfo->pshm_memobjects;
945 user_size != 0;
946 map_pos += pshmobj->pshmo_size, pshmobj = pshmobj->pshmo_next) {
947 if (pshmobj == NULL) {
948 /* nothing there to map !? */
949 goto out;
950 }
951 if (file_pos >= map_pos + pshmobj->pshmo_size) {
952 continue;
953 }
954 map_size = pshmobj->pshmo_size - (file_pos - map_pos);
955 if (map_size > user_size) {
956 map_size = user_size;
957 }
958 kret = vm_map_enter_mem_object(
959 user_map,
960 &user_addr,
961 map_size,
962 0,
963 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
964 pshmobj->pshmo_memobject,
965 file_pos - map_pos,
966 docow,
967 prot,
968 VM_PROT_DEFAULT,
969 VM_INHERIT_SHARE);
970 if (kret != KERN_SUCCESS)
971 goto out;
972
973 user_addr += map_size;
974 user_size -= map_size;
975 mapped_size += map_size;
976 file_pos += map_size;
977 }
978
979 PSHM_SUBSYS_LOCK();
980 pnode->mapp_addr = user_start_addr;
981 pnode->map_size = mapped_size;
982 pinfo->pshm_flags |= (PSHM_MAPPED | PSHM_INUSE);
983 PSHM_SUBSYS_UNLOCK();
984 out:
985 if (kret != KERN_SUCCESS) {
986 if (mapped_size != 0) {
987 (void) mach_vm_deallocate(current_map(),
988 user_start_addr,
989 mapped_size);
990 }
991 }
992
993 switch (kret) {
994 case KERN_SUCCESS:
995 *retval = (user_start_addr + pageoff);
996 return (0);
997 case KERN_INVALID_ADDRESS:
998 case KERN_NO_SPACE:
999 return (ENOMEM);
1000 case KERN_PROTECTION_FAILURE:
1001 return (EACCES);
1002 default:
1003 return (EINVAL);
1004 }
1005
1006 }
1007
1008 int
1009 shm_unlink(__unused proc_t p, struct shm_unlink_args *uap,
1010 __unused int32_t *retval)
1011 {
1012 size_t i;
1013 int error=0;
1014 struct pshmname nd;
1015 struct pshminfo *pinfo;
1016 char * pnbuf;
1017 char * nameptr;
1018 char * cp;
1019 size_t pathlen, plen;
1020 int incache = 0;
1021 struct pshmcache *pcache = PSHMCACHE_NULL;
1022 struct pshmobj *pshmobj, *pshmobj_next;
1023
1024 pinfo = PSHMINFO_NULL;
1025
1026 MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
1027 if (pnbuf == NULL) {
1028 return(ENOSPC); /* XXX non-standard */
1029 }
1030 pathlen = MAXPATHLEN;
1031 error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen);
1032 if (error) {
1033 goto bad;
1034 }
1035 AUDIT_ARG(text, pnbuf);
1036 if (pathlen > PSHMNAMLEN) {
1037 error = ENAMETOOLONG;
1038 goto bad;
1039 }
1040
1041
1042 #ifdef PSXSHM_NAME_RESTRICT
1043 nameptr = pnbuf;
1044 if (*nameptr == '/') {
1045 while (*(nameptr++) == '/') {
1046 plen--;
1047 error = EINVAL;
1048 goto bad;
1049 }
1050 } else {
1051 error = EINVAL;
1052 goto bad;
1053 }
1054 #endif /* PSXSHM_NAME_RESTRICT */
1055
1056 plen = pathlen;
1057 nameptr = pnbuf;
1058 nd.pshm_nameptr = nameptr;
1059 nd.pshm_namelen = plen;
1060 nd. pshm_hash =0;
1061
1062 for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) {
1063 nd.pshm_hash += (unsigned char)*cp * i;
1064 }
1065
1066 PSHM_SUBSYS_LOCK();
1067 error = pshm_cache_search(&pinfo, &nd, &pcache, 0);
1068
1069 if (error == ENOENT) {
1070 PSHM_SUBSYS_UNLOCK();
1071 goto bad;
1072
1073 }
1074 /* During unlink lookup failure also implies ENOENT */
1075 if (!error) {
1076 PSHM_SUBSYS_UNLOCK();
1077 error = ENOENT;
1078 goto bad;
1079 } else
1080 incache = 1;
1081
1082 if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))==0) {
1083 PSHM_SUBSYS_UNLOCK();
1084 error = EINVAL;
1085 goto bad;
1086 }
1087
1088 if (pinfo->pshm_flags & PSHM_ALLOCATING) {
1089 /* XXX should we wait for flag to clear and then proceed ? */
1090 PSHM_SUBSYS_UNLOCK();
1091 error = EAGAIN;
1092 goto bad;
1093 }
1094
1095 if (pinfo->pshm_flags & PSHM_INDELETE) {
1096 PSHM_SUBSYS_UNLOCK();
1097 error = 0;
1098 goto bad;
1099 }
1100 #if CONFIG_MACF
1101 error = mac_posixshm_check_unlink(kauth_cred_get(), pinfo, nameptr);
1102 if (error) {
1103 PSHM_SUBSYS_UNLOCK();
1104 goto bad;
1105 }
1106 #endif
1107
1108 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, pinfo->pshm_gid,
1109 pinfo->pshm_mode);
1110
1111 /*
1112 * following file semantics, unlink should be allowed
1113 * for users with write permission only.
1114 */
1115 if ( (error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p)) ) {
1116 PSHM_SUBSYS_UNLOCK();
1117 goto bad;
1118 }
1119
1120 pinfo->pshm_flags |= PSHM_INDELETE;
1121 pshm_cache_delete(pcache);
1122 pinfo->pshm_flags |= PSHM_REMOVED;
1123 /* release the existence reference */
1124 if (!--pinfo->pshm_usecount) {
1125 #if CONFIG_MACF
1126 mac_posixshm_label_destroy(pinfo);
1127 #endif
1128 PSHM_SUBSYS_UNLOCK();
1129 /*
1130 * If this is the last reference going away on the object,
1131 * then we need to destroy the backing object. The name
1132 * has an implied but uncounted reference on the object,
1133 * once it's created, since it's used as a rendezvous, and
1134 * therefore may be subsequently reopened.
1135 */
1136 for (pshmobj = pinfo->pshm_memobjects;
1137 pshmobj != NULL;
1138 pshmobj = pshmobj_next) {
1139 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
1140 pshmobj_next = pshmobj->pshmo_next;
1141 FREE(pshmobj, M_SHM);
1142 }
1143 FREE(pinfo,M_SHM);
1144 } else {
1145 PSHM_SUBSYS_UNLOCK();
1146 }
1147 FREE(pcache, M_SHM);
1148 error = 0;
1149 bad:
1150 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
1151 return (error);
1152 }
1153
1154 /* already called locked */
1155 static int
1156 pshm_close(struct pshminfo *pinfo, int dropref)
1157 {
1158 int error = 0;
1159 struct pshmobj *pshmobj, *pshmobj_next;
1160
1161 /*
1162 * If we are dropping the reference we took on the cache object, don't
1163 * enforce the allocation requirement.
1164 */
1165 if ( !dropref && ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED)) {
1166 return(EINVAL);
1167 }
1168 #if DIAGNOSTIC
1169 if(!pinfo->pshm_usecount) {
1170 kprintf("negative usecount in pshm_close\n");
1171 }
1172 #endif /* DIAGNOSTIC */
1173 pinfo->pshm_usecount--; /* release this fd's reference */
1174
1175 if ((pinfo->pshm_flags & PSHM_REMOVED) && !pinfo->pshm_usecount) {
1176 #if CONFIG_MACF
1177 mac_posixshm_label_destroy(pinfo);
1178 #endif
1179 PSHM_SUBSYS_UNLOCK();
1180 /*
1181 * If this is the last reference going away on the object,
1182 * then we need to destroy the backing object.
1183 */
1184 for (pshmobj = pinfo->pshm_memobjects;
1185 pshmobj != NULL;
1186 pshmobj = pshmobj_next) {
1187 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
1188 pshmobj_next = pshmobj->pshmo_next;
1189 FREE(pshmobj, M_SHM);
1190 }
1191 PSHM_SUBSYS_LOCK();
1192 FREE(pinfo,M_SHM);
1193 }
1194 return (error);
1195 }
1196
1197 /* vfs_context_t passed to match prototype for struct fileops */
1198 static int
1199 pshm_closefile(struct fileglob *fg, __unused vfs_context_t ctx)
1200 {
1201 int error = EINVAL;
1202 struct pshmnode *pnode;
1203
1204 PSHM_SUBSYS_LOCK();
1205
1206 if ((pnode = (struct pshmnode *)fg->fg_data) != NULL) {
1207 if (pnode->pinfo != PSHMINFO_NULL) {
1208 error = pshm_close(pnode->pinfo, 0);
1209 }
1210 FREE(pnode, M_SHM);
1211 }
1212
1213 PSHM_SUBSYS_UNLOCK();
1214
1215 return(error);
1216 }
1217
1218 static int
1219 pshm_read(__unused struct fileproc *fp, __unused struct uio *uio,
1220 __unused int flags, __unused vfs_context_t ctx)
1221 {
1222 return(ENOTSUP);
1223 }
1224
1225 static int
1226 pshm_write(__unused struct fileproc *fp, __unused struct uio *uio,
1227 __unused int flags, __unused vfs_context_t ctx)
1228 {
1229 return(ENOTSUP);
1230 }
1231
1232 static int
1233 pshm_ioctl(__unused struct fileproc *fp, __unused u_long com,
1234 __unused caddr_t data, __unused vfs_context_t ctx)
1235 {
1236 return(ENOTSUP);
1237 }
1238
1239 static int
1240 pshm_select(__unused struct fileproc *fp, __unused int which, __unused void *wql,
1241 __unused vfs_context_t ctx)
1242 {
1243 return(ENOTSUP);
1244 }
1245
1246 static int
1247 pshm_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn,
1248 __unused vfs_context_t ctx)
1249 {
1250 return(ENOTSUP);
1251 }
1252
1253 int
1254 fill_pshminfo(struct pshmnode * pshm, struct pshm_info * info)
1255 {
1256 struct pshminfo *pinfo;
1257 struct vinfo_stat *sb;
1258
1259 PSHM_SUBSYS_LOCK();
1260 if ((pinfo = pshm->pinfo) == PSHMINFO_NULL){
1261 PSHM_SUBSYS_UNLOCK();
1262 return(EINVAL);
1263 }
1264
1265 sb = &info->pshm_stat;
1266
1267 bzero(sb, sizeof(struct vinfo_stat));
1268 sb->vst_mode = pinfo->pshm_mode;
1269 sb->vst_uid = pinfo->pshm_uid;
1270 sb->vst_gid = pinfo->pshm_gid;
1271 sb->vst_size = pinfo->pshm_length;
1272
1273 info->pshm_mappaddr = pshm->mapp_addr;
1274 bcopy(&pinfo->pshm_name[0], &info->pshm_name[0], PSHMNAMLEN+1);
1275
1276 PSHM_SUBSYS_UNLOCK();
1277 return(0);
1278 }
1279
1280 #if CONFIG_MACF
1281 void
1282 pshm_label_associate(struct fileproc *fp, struct vnode *vp, vfs_context_t ctx)
1283 {
1284 struct pshmnode *pnode;
1285 struct pshminfo *pshm;
1286
1287 PSHM_SUBSYS_LOCK();
1288 pnode = (struct pshmnode *)fp->f_fglob->fg_data;
1289 if (pnode != NULL) {
1290 pshm = pnode->pinfo;
1291 if (pshm != NULL)
1292 mac_posixshm_vnode_label_associate(
1293 vfs_context_ucred(ctx), pshm, pshm->pshm_label,
1294 vp, vp->v_label);
1295 }
1296 PSHM_SUBSYS_UNLOCK();
1297 }
1298 #endif