]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/posix_shm.c
xnu-3248.30.4.tar.gz
[apple/xnu.git] / bsd / kern / posix_shm.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1990, 1996-1998 Apple Computer, Inc.
30 * All Rights Reserved.
31 */
32 /*
33 * posix_shm.c : Support for POSIX shared memory APIs
34 *
35 * File: posix_shm.c
36 * Author: Ananthakrishna Ramesh
37 *
38 * HISTORY
39 * 2-Sep-1999 A.Ramesh
40 * Created for MacOSX
41 *
42 */
43 /*
44 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
45 * support for mandatory and extensible security protections. This notice
46 * is included in support of clause 2.2 (b) of the Apple Public License,
47 * Version 2.0.
48 */
49
50 #include <sys/cdefs.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/file_internal.h>
55 #include <sys/filedesc.h>
56 #include <sys/stat.h>
57 #include <sys/proc_internal.h>
58 #include <sys/kauth.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/vnode_internal.h>
63 #include <sys/ioctl.h>
64 #include <sys/tty.h>
65 #include <sys/malloc.h>
66 #include <sys/mman.h>
67 #include <sys/stat.h>
68 #include <sys/sysproto.h>
69 #include <sys/proc_info.h>
70 #include <security/audit/audit.h>
71
72 #if CONFIG_MACF
73 #include <security/mac_framework.h>
74 #endif
75
76 #include <mach/mach_types.h>
77 #include <mach/mach_vm.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/kern_return.h>
82 #include <mach/memory_object_control.h>
83
84 #include <vm/vm_map.h>
85 #include <vm/vm_protos.h>
86
87 #define f_flag f_fglob->fg_flag
88 #define f_type f_fglob->fg_ops->fo_type
89 #define f_msgcount f_fglob->fg_msgcount
90 #define f_cred f_fglob->fg_cred
91 #define f_ops f_fglob->fg_ops
92 #define f_offset f_fglob->fg_offset
93 #define f_data f_fglob->fg_data
94 #define PSHMNAMLEN 31 /* maximum name segment length we bother with */
95
96 struct pshmobj {
97 void * pshmo_memobject;
98 memory_object_size_t pshmo_size;
99 struct pshmobj * pshmo_next;
100 };
101
102 struct pshminfo {
103 unsigned int pshm_flags;
104 unsigned int pshm_usecount;
105 off_t pshm_length;
106 mode_t pshm_mode;
107 uid_t pshm_uid;
108 gid_t pshm_gid;
109 char pshm_name[PSHMNAMLEN + 1]; /* segment name */
110 struct pshmobj *pshm_memobjects;
111 #if DIAGNOSTIC
112 unsigned int pshm_readcount;
113 unsigned int pshm_writecount;
114 proc_t pshm_proc;
115 #endif /* DIAGNOSTIC */
116 struct label* pshm_label;
117 };
118 #define PSHMINFO_NULL (struct pshminfo *)0
119
120 #define PSHM_NONE 0x001
121 #define PSHM_DEFINED 0x002
122 #define PSHM_ALLOCATED 0x004
123 #define PSHM_MAPPED 0x008
124 #define PSHM_INUSE 0x010
125 #define PSHM_REMOVED 0x020
126 #define PSHM_INCREATE 0x040
127 #define PSHM_INDELETE 0x080
128 #define PSHM_ALLOCATING 0x100
129
130 struct pshmcache {
131 LIST_ENTRY(pshmcache) pshm_hash; /* hash chain */
132 struct pshminfo *pshminfo; /* vnode the name refers to */
133 int pshm_nlen; /* length of name */
134 char pshm_name[PSHMNAMLEN + 1]; /* segment name */
135 };
136 #define PSHMCACHE_NULL (struct pshmcache *)0
137
138 struct pshmstats {
139 long goodhits; /* hits that we can really use */
140 long neghits; /* negative hits that we can use */
141 long badhits; /* hits we must drop */
142 long falsehits; /* hits with id mismatch */
143 long miss; /* misses */
144 long longnames; /* long names that ignore cache */
145 };
146
147 struct pshmname {
148 char *pshm_nameptr; /* pointer to looked up name */
149 long pshm_namelen; /* length of looked up component */
150 u_long pshm_hash; /* hash value of looked up name */
151 };
152
153 struct pshmnode {
154 off_t mapp_addr;
155 user_size_t map_size; /* XXX unused ? */
156 struct pshminfo *pinfo;
157 unsigned int pshm_usecount;
158 #if DIAGNOSTIC
159 unsigned int readcnt;
160 unsigned int writecnt;
161 #endif
162 };
163 #define PSHMNODE_NULL (struct pshmnode *)0
164
165
166 #define PSHMHASH(pnp) \
167 (&pshmhashtbl[(pnp)->pshm_hash & pshmhash])
168
169 LIST_HEAD(pshmhashhead, pshmcache) *pshmhashtbl; /* Hash Table */
170 u_long pshmhash; /* size of hash table - 1 */
171 long pshmnument; /* number of cache entries allocated */
172 struct pshmstats pshmstats; /* cache effectiveness statistics */
173
174 static int pshm_read (struct fileproc *fp, struct uio *uio,
175 int flags, vfs_context_t ctx);
176 static int pshm_write (struct fileproc *fp, struct uio *uio,
177 int flags, vfs_context_t ctx);
178 static int pshm_ioctl (struct fileproc *fp, u_long com,
179 caddr_t data, vfs_context_t ctx);
180 static int pshm_select (struct fileproc *fp, int which, void *wql, vfs_context_t ctx);
181 static int pshm_close(struct pshminfo *pinfo, int dropref);
182 static int pshm_closefile (struct fileglob *fg, vfs_context_t ctx);
183
184 static int pshm_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx);
185
186 int pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, proc_t p);
187 static int pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp);
188 static void pshm_cache_delete(struct pshmcache *pcp);
189 #if NOT_USED
190 static void pshm_cache_purge(void);
191 #endif /* NOT_USED */
192 static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
193 struct pshmcache **pcache, int addref);
194
195 static const struct fileops pshmops = {
196 DTYPE_PSXSHM,
197 pshm_read,
198 pshm_write,
199 pshm_ioctl,
200 pshm_select,
201 pshm_closefile,
202 pshm_kqfilter,
203 0
204 };
205
206 static lck_grp_t *psx_shm_subsys_lck_grp;
207 static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr;
208 static lck_attr_t *psx_shm_subsys_lck_attr;
209 static lck_mtx_t psx_shm_subsys_mutex;
210
211 #define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
212 #define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
213
214
215 /* Initialize the mutex governing access to the posix shm subsystem */
216 __private_extern__ void
217 pshm_lock_init( void )
218 {
219
220 psx_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
221
222 psx_shm_subsys_lck_grp = lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr);
223
224 psx_shm_subsys_lck_attr = lck_attr_alloc_init();
225 lck_mtx_init(& psx_shm_subsys_mutex, psx_shm_subsys_lck_grp, psx_shm_subsys_lck_attr);
226 }
227
228 /*
229 * Lookup an entry in the cache
230 *
231 *
232 * status of -1 is returned if matches
233 * If the lookup determines that the name does not exist
234 * (negative cacheing), a status of ENOENT is returned. If the lookup
235 * fails, a status of zero is returned.
236 */
237
238 static int
239 pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
240 struct pshmcache **pcache, int addref)
241 {
242 struct pshmcache *pcp, *nnp;
243 struct pshmhashhead *pcpp;
244
245 if (pnp->pshm_namelen > PSHMNAMLEN) {
246 pshmstats.longnames++;
247 return (0);
248 }
249
250 pcpp = PSHMHASH(pnp);
251 for (pcp = pcpp->lh_first; pcp != 0; pcp = nnp) {
252 nnp = pcp->pshm_hash.le_next;
253 if (pcp->pshm_nlen == pnp->pshm_namelen &&
254 !bcmp(pcp->pshm_name, pnp->pshm_nameptr, (u_int)pcp-> pshm_nlen))
255 break;
256 }
257
258 if (pcp == 0) {
259 pshmstats.miss++;
260 return (0);
261 }
262
263 /* We found a "positive" match, return the vnode */
264 if (pcp->pshminfo) {
265 pshmstats.goodhits++;
266 /* TOUCH(ncp); */
267 *pshmp = pcp->pshminfo;
268 *pcache = pcp;
269 if (addref)
270 pcp->pshminfo->pshm_usecount++;
271 return (-1);
272 }
273
274 /*
275 * We found a "negative" match, ENOENT notifies client of this match.
276 */
277 pshmstats.neghits++;
278 return (ENOENT);
279 }
280
281 /*
282 * Add an entry to the cache.
283 * XXX should be static?
284 */
285 static int
286 pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp)
287 {
288 struct pshmhashhead *pcpp;
289 struct pshminfo *dpinfo;
290 struct pshmcache *dpcp;
291
292 #if DIAGNOSTIC
293 if (pnp->pshm_namelen > PSHMNAMLEN)
294 panic("cache_enter: name too long");
295 #endif
296
297
298 /* if the entry has already been added by some one else return */
299 if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == -1) {
300 return(EEXIST);
301 }
302 pshmnument++;
303
304 /*
305 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
306 */
307 pcp->pshminfo = pshmp;
308 pcp->pshm_nlen = pnp->pshm_namelen;
309 bcopy(pnp->pshm_nameptr, pcp->pshm_name, (unsigned)pcp->pshm_nlen);
310 pcpp = PSHMHASH(pnp);
311 #if DIAGNOSTIC
312 {
313 struct pshmcache *p;
314
315 for (p = pcpp->lh_first; p != 0; p = p->pshm_hash.le_next)
316 if (p == pcp)
317 panic("cache_enter: duplicate");
318 }
319 #endif
320 LIST_INSERT_HEAD(pcpp, pcp, pshm_hash);
321 return(0);
322 }
323
324 /*
325 * Name cache initialization, from vfs_init() when we are booting
326 */
327 void
328 pshm_cache_init(void)
329 {
330 pshmhashtbl = hashinit(desiredvnodes / 8, M_SHM, &pshmhash);
331 }
332
333 #if NOT_USED
334 /*
335 * Invalidate a all entries to particular vnode.
336 *
337 * We actually just increment the v_id, that will do it. The entries will
338 * be purged by lookup as they get found. If the v_id wraps around, we
339 * need to ditch the entire cache, to avoid confusion. No valid vnode will
340 * ever have (v_id == 0).
341 */
342 static void
343 pshm_cache_purge(void)
344 {
345 struct pshmcache *pcp;
346 struct pshmhashhead *pcpp;
347
348 for (pcpp = &pshmhashtbl[pshmhash]; pcpp >= pshmhashtbl; pcpp--) {
349 while ( (pcp = pcpp->lh_first) )
350 pshm_cache_delete(pcp);
351 }
352 }
353 #endif /* NOT_USED */
354
355 static void
356 pshm_cache_delete(struct pshmcache *pcp)
357 {
358 #if DIAGNOSTIC
359 if (pcp->pshm_hash.le_prev == 0)
360 panic("namecache purge le_prev");
361 if (pcp->pshm_hash.le_next == pcp)
362 panic("namecache purge le_next");
363 #endif /* DIAGNOSTIC */
364 LIST_REMOVE(pcp, pshm_hash);
365 pcp->pshm_hash.le_prev = 0;
366 pshmnument--;
367 }
368
369
370 int
371 shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval)
372 {
373 size_t i;
374 int indx, error;
375 struct pshmname nd;
376 struct pshminfo *pinfo;
377 struct fileproc *fp = NULL;
378 char *pnbuf = NULL;
379 struct pshminfo *new_pinfo = PSHMINFO_NULL;
380 struct pshmnode *new_pnode = PSHMNODE_NULL;
381 struct pshmcache *pcache = PSHMCACHE_NULL; /* ignored on return */
382 char * nameptr;
383 char * cp;
384 size_t pathlen, plen;
385 int fmode ;
386 int cmode = uap->mode;
387 int incache = 0;
388 struct pshmcache *pcp = NULL;
389
390 AUDIT_ARG(fflags, uap->oflag);
391 AUDIT_ARG(mode, uap->mode);
392
393 pinfo = PSHMINFO_NULL;
394
395 /*
396 * Preallocate everything we might need up front to avoid taking
397 * and dropping the lock, opening us up to race conditions.
398 */
399 MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
400 if (pnbuf == NULL) {
401 error = ENOSPC;
402 goto bad;
403 }
404
405 pathlen = MAXPATHLEN;
406 error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen);
407 if (error) {
408 goto bad;
409 }
410 AUDIT_ARG(text, pnbuf);
411 if (pathlen > PSHMNAMLEN) {
412 error = ENAMETOOLONG;
413 goto bad;
414 }
415 #ifdef PSXSHM_NAME_RESTRICT
416 nameptr = pnbuf;
417 if (*nameptr == '/') {
418 while (*(nameptr++) == '/') {
419 plen--;
420 error = EINVAL;
421 goto bad;
422 }
423 } else {
424 error = EINVAL;
425 goto bad;
426 }
427 #endif /* PSXSHM_NAME_RESTRICT */
428
429 plen = pathlen;
430 nameptr = pnbuf;
431 nd.pshm_nameptr = nameptr;
432 nd.pshm_namelen = plen;
433 nd. pshm_hash =0;
434
435 for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) {
436 nd.pshm_hash += (unsigned char)*cp * i;
437 }
438
439 /*
440 * attempt to allocate a new fp; if unsuccessful, the fp will be
441 * left unmodified (NULL).
442 */
443 error = falloc(p, &fp, &indx, vfs_context_current());
444 if (error)
445 goto bad;
446
447 cmode &= ALLPERMS;
448
449 fmode = FFLAGS(uap->oflag);
450 if ((fmode & (FREAD | FWRITE)) == 0) {
451 error = EINVAL;
452 goto bad;
453 }
454
455 /*
456 * We allocate a new entry if we are less than the maximum
457 * allowed and the one at the front of the LRU list is in use.
458 * Otherwise we use the one at the front of the LRU list.
459 */
460 MALLOC(pcp, struct pshmcache *, sizeof(struct pshmcache), M_SHM, M_WAITOK|M_ZERO);
461 if (pcp == NULL) {
462 error = ENOSPC;
463 goto bad;
464 }
465
466 MALLOC(new_pinfo, struct pshminfo *, sizeof(struct pshminfo), M_SHM, M_WAITOK|M_ZERO);
467 if (new_pinfo == PSHMINFO_NULL) {
468 error = ENOSPC;
469 goto bad;
470 }
471 #if CONFIG_MACF
472 mac_posixshm_label_init(new_pinfo);
473 #endif
474
475 MALLOC(new_pnode, struct pshmnode *, sizeof(struct pshmnode), M_SHM, M_WAITOK|M_ZERO);
476 if (new_pnode == PSHMNODE_NULL) {
477 error = ENOSPC;
478 goto bad;
479 }
480
481 PSHM_SUBSYS_LOCK();
482
483 /*
484 * If we find the entry in the cache, this will take a reference,
485 * allowing us to unlock it for the permissions check.
486 */
487 error = pshm_cache_search(&pinfo, &nd, &pcache, 1);
488
489 PSHM_SUBSYS_UNLOCK();
490
491 if (error == ENOENT) {
492 error = EINVAL;
493 goto bad;
494 }
495
496 if (!error) {
497 incache = 0;
498 if (fmode & O_CREAT) {
499 /* create a new one (commit the allocation) */
500 pinfo = new_pinfo;
501 pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE;
502 pinfo->pshm_usecount = 1; /* existence reference */
503 pinfo->pshm_mode = cmode;
504 pinfo->pshm_uid = kauth_getuid();
505 pinfo->pshm_gid = kauth_getgid();
506 bcopy(pnbuf, &pinfo->pshm_name[0], pathlen);
507 pinfo->pshm_name[pathlen]=0;
508 #if CONFIG_MACF
509 error = mac_posixshm_check_create(kauth_cred_get(), nameptr);
510 if (error) {
511 goto bad;
512 }
513 mac_posixshm_label_associate(kauth_cred_get(), pinfo, nameptr);
514 #endif
515 }
516 } else {
517 incache = 1;
518 if (fmode & O_CREAT) {
519 /* already exists */
520 if ((fmode & O_EXCL)) {
521 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
522 pinfo->pshm_gid,
523 pinfo->pshm_mode);
524
525 /* shm obj exists and opened O_EXCL */
526 error = EEXIST;
527 goto bad;
528 }
529
530 if( pinfo->pshm_flags & PSHM_INDELETE) {
531 error = ENOENT;
532 goto bad;
533 }
534 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
535 pinfo->pshm_gid, pinfo->pshm_mode);
536 #if CONFIG_MACF
537 if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
538 goto bad;
539 }
540 #endif
541 if ( (error = pshm_access(pinfo, fmode, kauth_cred_get(), p)) ) {
542 goto bad;
543 }
544 }
545 }
546 if (!(fmode & O_CREAT)) {
547 if (!incache) {
548 /* O_CREAT is not set and the object does not exist */
549 error = ENOENT;
550 goto bad;
551 }
552 if( pinfo->pshm_flags & PSHM_INDELETE) {
553 error = ENOENT;
554 goto bad;
555 }
556 #if CONFIG_MACF
557 if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
558 goto bad;
559 }
560 #endif
561
562 if ((error = pshm_access(pinfo, fmode, kauth_cred_get(), p))) {
563 goto bad;
564 }
565 }
566 if (fmode & O_TRUNC) {
567 error = EINVAL;
568 goto bad;
569 }
570
571
572 PSHM_SUBSYS_LOCK();
573
574 #if DIAGNOSTIC
575 if (fmode & FWRITE)
576 pinfo->pshm_writecount++;
577 if (fmode & FREAD)
578 pinfo->pshm_readcount++;
579 #endif
580 if (!incache) {
581 /* if successful, this will consume the pcp */
582 if ( (error = pshm_cache_add(pinfo, &nd, pcp)) ) {
583 goto bad_locked;
584 }
585 /*
586 * add reference for the new entry; otherwise, we obtained
587 * one from the cache hit earlier.
588 */
589 pinfo->pshm_usecount++;
590 }
591 pinfo->pshm_flags &= ~PSHM_INCREATE;
592 new_pnode->pinfo = pinfo;
593
594 PSHM_SUBSYS_UNLOCK();
595
596 /*
597 * if incache, we did not use the new pcp or new_pinfo and must
598 * free them
599 */
600 if (incache) {
601 FREE(pcp, M_SHM);
602
603 if (new_pinfo != PSHMINFO_NULL) {
604 #if CONFIG_MACF
605 mac_posixshm_label_destroy(new_pinfo);
606 #endif
607 FREE(new_pinfo, M_SHM);
608 }
609 }
610
611 proc_fdlock(p);
612 fp->f_flag = fmode & FMASK;
613 fp->f_ops = &pshmops;
614 fp->f_data = (caddr_t)new_pnode;
615 *fdflags(p, indx) |= UF_EXCLOSE;
616 procfdtbl_releasefd(p, indx, NULL);
617 fp_drop(p, indx, fp, 1);
618 proc_fdunlock(p);
619
620 *retval = indx;
621 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
622 return (0);
623
624 bad_locked:
625 PSHM_SUBSYS_UNLOCK();
626 bad:
627 /*
628 * If we obtained the entry from the cache, we need to drop the
629 * reference; holding the reference may have prevented unlinking,
630 * so we need to call pshm_close() to get the full effect.
631 */
632 if (incache) {
633 PSHM_SUBSYS_LOCK();
634 pshm_close(pinfo, 1);
635 PSHM_SUBSYS_UNLOCK();
636 }
637
638 if (pcp != NULL)
639 FREE(pcp, M_SHM);
640
641 if (new_pnode != PSHMNODE_NULL)
642 FREE(new_pnode, M_SHM);
643
644 if (fp != NULL)
645 fp_free(p, indx, fp);
646
647 if (new_pinfo != PSHMINFO_NULL) {
648 #if CONFIG_MACF
649 mac_posixshm_label_destroy(new_pinfo);
650 #endif
651 FREE(new_pinfo, M_SHM);
652 }
653 if (pnbuf != NULL)
654 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
655 return (error);
656 }
657
658
659 int
660 pshm_truncate(__unused proc_t p, struct fileproc *fp, __unused int fd,
661 off_t length, __unused int32_t *retval)
662 {
663 struct pshminfo * pinfo;
664 struct pshmnode * pnode ;
665 kern_return_t kret;
666 mem_entry_name_port_t mem_object;
667 mach_vm_size_t total_size, alloc_size;
668 memory_object_size_t mosize;
669 struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p;
670 vm_map_t user_map;
671 #if CONFIG_MACF
672 int error;
673 #endif
674
675 user_map = current_map();
676
677 if (fp->f_type != DTYPE_PSXSHM) {
678 return(EINVAL);
679 }
680
681
682 if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
683 return(EINVAL);
684
685 PSHM_SUBSYS_LOCK();
686 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) {
687 PSHM_SUBSYS_UNLOCK();
688 return(EINVAL);
689 }
690 if ((pinfo->pshm_flags & (PSHM_DEFINED|PSHM_ALLOCATING|PSHM_ALLOCATED))
691 != PSHM_DEFINED) {
692 PSHM_SUBSYS_UNLOCK();
693 return(EINVAL);
694 }
695 #if CONFIG_MACF
696 error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, length);
697 if (error) {
698 PSHM_SUBSYS_UNLOCK();
699 return(error);
700 }
701 #endif
702
703 pinfo->pshm_flags |= PSHM_ALLOCATING;
704 total_size = vm_map_round_page(length,
705 vm_map_page_mask(user_map));
706 pshmobj_next_p = &pinfo->pshm_memobjects;
707
708 for (alloc_size = 0;
709 alloc_size < total_size;
710 alloc_size += mosize) {
711
712 PSHM_SUBSYS_UNLOCK();
713
714 mosize = MIN(total_size - alloc_size, ANON_MAX_SIZE);
715 kret = mach_make_memory_entry_64(
716 VM_MAP_NULL,
717 &mosize,
718 0,
719 MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
720 &mem_object,
721 0);
722
723 if (kret != KERN_SUCCESS)
724 goto out;
725
726 MALLOC(pshmobj, struct pshmobj *, sizeof (struct pshmobj),
727 M_SHM, M_WAITOK);
728 if (pshmobj == NULL) {
729 kret = KERN_NO_SPACE;
730 mach_memory_entry_port_release(mem_object);
731 mem_object = NULL;
732 goto out;
733 }
734
735 PSHM_SUBSYS_LOCK();
736
737 pshmobj->pshmo_memobject = (void *) mem_object;
738 pshmobj->pshmo_size = mosize;
739 pshmobj->pshmo_next = NULL;
740
741 *pshmobj_next_p = pshmobj;
742 pshmobj_next_p = &pshmobj->pshmo_next;
743 }
744
745 pinfo->pshm_flags |= PSHM_ALLOCATED;
746 pinfo->pshm_flags &= ~(PSHM_ALLOCATING);
747 pinfo->pshm_length = total_size;
748 PSHM_SUBSYS_UNLOCK();
749 return(0);
750
751 out:
752 PSHM_SUBSYS_LOCK();
753 for (pshmobj = pinfo->pshm_memobjects;
754 pshmobj != NULL;
755 pshmobj = pshmobj_next) {
756 pshmobj_next = pshmobj->pshmo_next;
757 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
758 FREE(pshmobj, M_SHM);
759 }
760 pinfo->pshm_memobjects = NULL;
761 pinfo->pshm_flags &= ~PSHM_ALLOCATING;
762 PSHM_SUBSYS_UNLOCK();
763
764 switch (kret) {
765 case KERN_INVALID_ADDRESS:
766 case KERN_NO_SPACE:
767 return (ENOMEM);
768 case KERN_PROTECTION_FAILURE:
769 return (EACCES);
770 default:
771 return (EINVAL);
772
773 }
774 }
775
776 int
777 pshm_stat(struct pshmnode *pnode, void *ub, int isstat64)
778 {
779 struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */
780 struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */
781 struct pshminfo *pinfo;
782 #if CONFIG_MACF
783 int error;
784 #endif
785
786 PSHM_SUBSYS_LOCK();
787 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL){
788 PSHM_SUBSYS_UNLOCK();
789 return(EINVAL);
790 }
791
792 #if CONFIG_MACF
793 error = mac_posixshm_check_stat(kauth_cred_get(), pinfo);
794 if (error) {
795 PSHM_SUBSYS_UNLOCK();
796 return(error);
797 }
798 #endif
799
800 if (isstat64 != 0) {
801 sb64 = (struct stat64 *)ub;
802 bzero(sb64, sizeof(struct stat64));
803 sb64->st_mode = pinfo->pshm_mode;
804 sb64->st_uid = pinfo->pshm_uid;
805 sb64->st_gid = pinfo->pshm_gid;
806 sb64->st_size = pinfo->pshm_length;
807 } else {
808 sb = (struct stat *)ub;
809 bzero(sb, sizeof(struct stat));
810 sb->st_mode = pinfo->pshm_mode;
811 sb->st_uid = pinfo->pshm_uid;
812 sb->st_gid = pinfo->pshm_gid;
813 sb->st_size = pinfo->pshm_length;
814 }
815 PSHM_SUBSYS_UNLOCK();
816
817 return(0);
818 }
819
820 /*
821 * This is called only from shm_open which holds pshm_lock();
822 * XXX This code is repeated many times
823 */
824 int
825 pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, __unused proc_t p)
826 {
827 int mode_req = ((mode & FREAD) ? S_IRUSR : 0) |
828 ((mode & FWRITE) ? S_IWUSR : 0);
829
830 /* Otherwise, user id 0 always gets access. */
831 if (!suser(cred, NULL))
832 return (0);
833
834 return(posix_cred_access(cred, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode, mode_req));
835 }
836
837 int
838 pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct fileproc *fp, off_t pageoff)
839 {
840 vm_map_offset_t user_addr = (vm_map_offset_t)uap->addr;
841 vm_map_size_t user_size = (vm_map_size_t)uap->len ;
842 vm_map_offset_t user_start_addr;
843 vm_map_size_t map_size, mapped_size;
844 int prot = uap->prot;
845 int flags = uap->flags;
846 vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos;
847 vm_object_offset_t map_pos;
848 vm_map_t user_map;
849 int alloc_flags;
850 boolean_t docow;
851 kern_return_t kret;
852 struct pshminfo * pinfo;
853 struct pshmnode * pnode;
854 struct pshmobj * pshmobj;
855 #if CONFIG_MACF
856 int error;
857 #endif
858
859 if (user_size == 0)
860 return(0);
861
862 if ((flags & MAP_SHARED) == 0)
863 return(EINVAL);
864
865
866 if ((prot & PROT_WRITE) && ((fp->f_flag & FWRITE) == 0)) {
867 return(EPERM);
868 }
869
870 if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
871 return(EINVAL);
872
873 PSHM_SUBSYS_LOCK();
874 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) {
875 PSHM_SUBSYS_UNLOCK();
876 return(EINVAL);
877 }
878
879 if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) {
880 PSHM_SUBSYS_UNLOCK();
881 return(EINVAL);
882 }
883 if ((off_t)user_size > pinfo->pshm_length) {
884 PSHM_SUBSYS_UNLOCK();
885 return(EINVAL);
886 }
887 if ((off_t)(user_size + file_pos) > pinfo->pshm_length) {
888 PSHM_SUBSYS_UNLOCK();
889 return(EINVAL);
890 }
891 if ((pshmobj = pinfo->pshm_memobjects) == NULL) {
892 PSHM_SUBSYS_UNLOCK();
893 return(EINVAL);
894 }
895
896 #if CONFIG_MACF
897 error = mac_posixshm_check_mmap(kauth_cred_get(), pinfo, prot, flags);
898 if (error) {
899 PSHM_SUBSYS_UNLOCK();
900 return(error);
901 }
902 #endif
903
904 PSHM_SUBSYS_UNLOCK();
905 user_map = current_map();
906
907 if ((flags & MAP_FIXED) == 0) {
908 alloc_flags = VM_FLAGS_ANYWHERE;
909 user_addr = vm_map_round_page(user_addr,
910 vm_map_page_mask(user_map));
911 } else {
912 if (user_addr != vm_map_round_page(user_addr,
913 vm_map_page_mask(user_map)))
914 return (EINVAL);
915 /*
916 * We do not get rid of the existing mappings here because
917 * it wouldn't be atomic (see comment in mmap()). We let
918 * Mach VM know that we want it to replace any existing
919 * mapping with the new one.
920 */
921 alloc_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
922 }
923 docow = FALSE;
924
925 mapped_size = 0;
926
927 /* reserver the entire space first... */
928 kret = vm_map_enter_mem_object(user_map,
929 &user_addr,
930 user_size,
931 0,
932 alloc_flags,
933 IPC_PORT_NULL,
934 0,
935 FALSE,
936 VM_PROT_NONE,
937 VM_PROT_NONE,
938 VM_INHERIT_NONE);
939 user_start_addr = user_addr;
940 if (kret != KERN_SUCCESS) {
941 goto out;
942 }
943
944 /* ... and overwrite with the real mappings */
945 for (map_pos = 0, pshmobj = pinfo->pshm_memobjects;
946 user_size != 0;
947 map_pos += pshmobj->pshmo_size, pshmobj = pshmobj->pshmo_next) {
948 if (pshmobj == NULL) {
949 /* nothing there to map !? */
950 goto out;
951 }
952 if (file_pos >= map_pos + pshmobj->pshmo_size) {
953 continue;
954 }
955 map_size = pshmobj->pshmo_size - (file_pos - map_pos);
956 if (map_size > user_size) {
957 map_size = user_size;
958 }
959 kret = vm_map_enter_mem_object(
960 user_map,
961 &user_addr,
962 map_size,
963 0,
964 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
965 pshmobj->pshmo_memobject,
966 file_pos - map_pos,
967 docow,
968 prot,
969 VM_PROT_DEFAULT,
970 VM_INHERIT_SHARE);
971 if (kret != KERN_SUCCESS)
972 goto out;
973
974 user_addr += map_size;
975 user_size -= map_size;
976 mapped_size += map_size;
977 file_pos += map_size;
978 }
979
980 PSHM_SUBSYS_LOCK();
981 pnode->mapp_addr = user_start_addr;
982 pnode->map_size = mapped_size;
983 pinfo->pshm_flags |= (PSHM_MAPPED | PSHM_INUSE);
984 PSHM_SUBSYS_UNLOCK();
985 out:
986 if (kret != KERN_SUCCESS) {
987 if (mapped_size != 0) {
988 (void) mach_vm_deallocate(current_map(),
989 user_start_addr,
990 mapped_size);
991 }
992 }
993
994 switch (kret) {
995 case KERN_SUCCESS:
996 *retval = (user_start_addr + pageoff);
997 return (0);
998 case KERN_INVALID_ADDRESS:
999 case KERN_NO_SPACE:
1000 return (ENOMEM);
1001 case KERN_PROTECTION_FAILURE:
1002 return (EACCES);
1003 default:
1004 return (EINVAL);
1005 }
1006
1007 }
1008
1009 int
1010 shm_unlink(__unused proc_t p, struct shm_unlink_args *uap,
1011 __unused int32_t *retval)
1012 {
1013 size_t i;
1014 int error=0;
1015 struct pshmname nd;
1016 struct pshminfo *pinfo;
1017 char * pnbuf;
1018 char * nameptr;
1019 char * cp;
1020 size_t pathlen, plen;
1021 int incache = 0;
1022 struct pshmcache *pcache = PSHMCACHE_NULL;
1023 struct pshmobj *pshmobj, *pshmobj_next;
1024
1025 pinfo = PSHMINFO_NULL;
1026
1027 MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
1028 if (pnbuf == NULL) {
1029 return(ENOSPC); /* XXX non-standard */
1030 }
1031 pathlen = MAXPATHLEN;
1032 error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen);
1033 if (error) {
1034 goto bad;
1035 }
1036 AUDIT_ARG(text, pnbuf);
1037 if (pathlen > PSHMNAMLEN) {
1038 error = ENAMETOOLONG;
1039 goto bad;
1040 }
1041
1042
1043 #ifdef PSXSHM_NAME_RESTRICT
1044 nameptr = pnbuf;
1045 if (*nameptr == '/') {
1046 while (*(nameptr++) == '/') {
1047 plen--;
1048 error = EINVAL;
1049 goto bad;
1050 }
1051 } else {
1052 error = EINVAL;
1053 goto bad;
1054 }
1055 #endif /* PSXSHM_NAME_RESTRICT */
1056
1057 plen = pathlen;
1058 nameptr = pnbuf;
1059 nd.pshm_nameptr = nameptr;
1060 nd.pshm_namelen = plen;
1061 nd. pshm_hash =0;
1062
1063 for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) {
1064 nd.pshm_hash += (unsigned char)*cp * i;
1065 }
1066
1067 PSHM_SUBSYS_LOCK();
1068 error = pshm_cache_search(&pinfo, &nd, &pcache, 0);
1069
1070 if (error == ENOENT) {
1071 PSHM_SUBSYS_UNLOCK();
1072 goto bad;
1073
1074 }
1075 /* During unlink lookup failure also implies ENOENT */
1076 if (!error) {
1077 PSHM_SUBSYS_UNLOCK();
1078 error = ENOENT;
1079 goto bad;
1080 } else
1081 incache = 1;
1082
1083 if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))==0) {
1084 PSHM_SUBSYS_UNLOCK();
1085 error = EINVAL;
1086 goto bad;
1087 }
1088
1089 if (pinfo->pshm_flags & PSHM_ALLOCATING) {
1090 /* XXX should we wait for flag to clear and then proceed ? */
1091 PSHM_SUBSYS_UNLOCK();
1092 error = EAGAIN;
1093 goto bad;
1094 }
1095
1096 if (pinfo->pshm_flags & PSHM_INDELETE) {
1097 PSHM_SUBSYS_UNLOCK();
1098 error = 0;
1099 goto bad;
1100 }
1101 #if CONFIG_MACF
1102 error = mac_posixshm_check_unlink(kauth_cred_get(), pinfo, nameptr);
1103 if (error) {
1104 PSHM_SUBSYS_UNLOCK();
1105 goto bad;
1106 }
1107 #endif
1108
1109 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, pinfo->pshm_gid,
1110 pinfo->pshm_mode);
1111
1112 /*
1113 * following file semantics, unlink should be allowed
1114 * for users with write permission only.
1115 */
1116 if ( (error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p)) ) {
1117 PSHM_SUBSYS_UNLOCK();
1118 goto bad;
1119 }
1120
1121 pinfo->pshm_flags |= PSHM_INDELETE;
1122 pshm_cache_delete(pcache);
1123 pinfo->pshm_flags |= PSHM_REMOVED;
1124 /* release the existence reference */
1125 if (!--pinfo->pshm_usecount) {
1126 #if CONFIG_MACF
1127 mac_posixshm_label_destroy(pinfo);
1128 #endif
1129 PSHM_SUBSYS_UNLOCK();
1130 /*
1131 * If this is the last reference going away on the object,
1132 * then we need to destroy the backing object. The name
1133 * has an implied but uncounted reference on the object,
1134 * once it's created, since it's used as a rendezvous, and
1135 * therefore may be subsequently reopened.
1136 */
1137 for (pshmobj = pinfo->pshm_memobjects;
1138 pshmobj != NULL;
1139 pshmobj = pshmobj_next) {
1140 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
1141 pshmobj_next = pshmobj->pshmo_next;
1142 FREE(pshmobj, M_SHM);
1143 }
1144 FREE(pinfo,M_SHM);
1145 } else {
1146 PSHM_SUBSYS_UNLOCK();
1147 }
1148 FREE(pcache, M_SHM);
1149 error = 0;
1150 bad:
1151 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
1152 return (error);
1153 }
1154
1155 /* already called locked */
1156 static int
1157 pshm_close(struct pshminfo *pinfo, int dropref)
1158 {
1159 int error = 0;
1160 struct pshmobj *pshmobj, *pshmobj_next;
1161
1162 /*
1163 * If we are dropping the reference we took on the cache object, don't
1164 * enforce the allocation requirement.
1165 */
1166 if ( !dropref && ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED)) {
1167 return(EINVAL);
1168 }
1169 #if DIAGNOSTIC
1170 if(!pinfo->pshm_usecount) {
1171 kprintf("negative usecount in pshm_close\n");
1172 }
1173 #endif /* DIAGNOSTIC */
1174 pinfo->pshm_usecount--; /* release this fd's reference */
1175
1176 if ((pinfo->pshm_flags & PSHM_REMOVED) && !pinfo->pshm_usecount) {
1177 #if CONFIG_MACF
1178 mac_posixshm_label_destroy(pinfo);
1179 #endif
1180 PSHM_SUBSYS_UNLOCK();
1181 /*
1182 * If this is the last reference going away on the object,
1183 * then we need to destroy the backing object.
1184 */
1185 for (pshmobj = pinfo->pshm_memobjects;
1186 pshmobj != NULL;
1187 pshmobj = pshmobj_next) {
1188 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
1189 pshmobj_next = pshmobj->pshmo_next;
1190 FREE(pshmobj, M_SHM);
1191 }
1192 PSHM_SUBSYS_LOCK();
1193 FREE(pinfo,M_SHM);
1194 }
1195 return (error);
1196 }
1197
1198 /* vfs_context_t passed to match prototype for struct fileops */
1199 static int
1200 pshm_closefile(struct fileglob *fg, __unused vfs_context_t ctx)
1201 {
1202 int error = EINVAL;
1203 struct pshmnode *pnode;
1204
1205 PSHM_SUBSYS_LOCK();
1206
1207 if ((pnode = (struct pshmnode *)fg->fg_data) != NULL) {
1208 if (pnode->pinfo != PSHMINFO_NULL) {
1209 error = pshm_close(pnode->pinfo, 0);
1210 }
1211 FREE(pnode, M_SHM);
1212 }
1213
1214 PSHM_SUBSYS_UNLOCK();
1215
1216 return(error);
1217 }
1218
1219 static int
1220 pshm_read(__unused struct fileproc *fp, __unused struct uio *uio,
1221 __unused int flags, __unused vfs_context_t ctx)
1222 {
1223 return(ENOTSUP);
1224 }
1225
1226 static int
1227 pshm_write(__unused struct fileproc *fp, __unused struct uio *uio,
1228 __unused int flags, __unused vfs_context_t ctx)
1229 {
1230 return(ENOTSUP);
1231 }
1232
1233 static int
1234 pshm_ioctl(__unused struct fileproc *fp, __unused u_long com,
1235 __unused caddr_t data, __unused vfs_context_t ctx)
1236 {
1237 return(ENOTSUP);
1238 }
1239
1240 static int
1241 pshm_select(__unused struct fileproc *fp, __unused int which, __unused void *wql,
1242 __unused vfs_context_t ctx)
1243 {
1244 return(ENOTSUP);
1245 }
1246
1247 static int
1248 pshm_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn,
1249 __unused vfs_context_t ctx)
1250 {
1251 return(ENOTSUP);
1252 }
1253
1254 int
1255 fill_pshminfo(struct pshmnode * pshm, struct pshm_info * info)
1256 {
1257 struct pshminfo *pinfo;
1258 struct vinfo_stat *sb;
1259
1260 PSHM_SUBSYS_LOCK();
1261 if ((pinfo = pshm->pinfo) == PSHMINFO_NULL){
1262 PSHM_SUBSYS_UNLOCK();
1263 return(EINVAL);
1264 }
1265
1266 sb = &info->pshm_stat;
1267
1268 bzero(sb, sizeof(struct vinfo_stat));
1269 sb->vst_mode = pinfo->pshm_mode;
1270 sb->vst_uid = pinfo->pshm_uid;
1271 sb->vst_gid = pinfo->pshm_gid;
1272 sb->vst_size = pinfo->pshm_length;
1273
1274 info->pshm_mappaddr = pshm->mapp_addr;
1275 bcopy(&pinfo->pshm_name[0], &info->pshm_name[0], PSHMNAMLEN+1);
1276
1277 PSHM_SUBSYS_UNLOCK();
1278 return(0);
1279 }
1280
1281 #if CONFIG_MACF
1282 void
1283 pshm_label_associate(struct fileproc *fp, struct vnode *vp, vfs_context_t ctx)
1284 {
1285 struct pshmnode *pnode;
1286 struct pshminfo *pshm;
1287
1288 PSHM_SUBSYS_LOCK();
1289 pnode = (struct pshmnode *)fp->f_fglob->fg_data;
1290 if (pnode != NULL) {
1291 pshm = pnode->pinfo;
1292 if (pshm != NULL)
1293 mac_posixshm_vnode_label_associate(
1294 vfs_context_ucred(ctx), pshm, pshm->pshm_label,
1295 vp, vp->v_label);
1296 }
1297 PSHM_SUBSYS_UNLOCK();
1298 }
1299 #endif