]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/posix_shm.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / bsd / kern / posix_shm.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1990, 1996-1998 Apple Computer, Inc.
30 * All Rights Reserved.
31 */
32 /*
33 * posix_shm.c : Support for POSIX shared memory APIs
34 *
35 * File: posix_shm.c
36 * Author: Ananthakrishna Ramesh
37 *
38 * HISTORY
39 * 2-Sep-1999 A.Ramesh
40 * Created for MacOSX
41 *
42 */
43 /*
44 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
45 * support for mandatory and extensible security protections. This notice
46 * is included in support of clause 2.2 (b) of the Apple Public License,
47 * Version 2.0.
48 */
49
50 #include <sys/cdefs.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/file_internal.h>
55 #include <sys/filedesc.h>
56 #include <sys/stat.h>
57 #include <sys/proc_internal.h>
58 #include <sys/kauth.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/vnode_internal.h>
63 #include <sys/ioctl.h>
64 #include <sys/tty.h>
65 #include <sys/malloc.h>
66 #include <sys/mman.h>
67 #include <sys/stat.h>
68 #include <sys/sysproto.h>
69 #include <sys/proc_info.h>
70 #include <security/audit/audit.h>
71
72 #if CONFIG_MACF
73 #include <security/mac_framework.h>
74 #endif
75
76 #include <mach/mach_types.h>
77 #include <mach/mach_vm.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/kern_return.h>
82 #include <mach/memory_object_control.h>
83
84 #include <vm/vm_map.h>
85 #include <vm/vm_protos.h>
86
87 #define f_flag f_fglob->fg_flag
88 #define f_type f_fglob->fg_ops->fo_type
89 #define f_msgcount f_fglob->fg_msgcount
90 #define f_cred f_fglob->fg_cred
91 #define f_ops f_fglob->fg_ops
92 #define f_offset f_fglob->fg_offset
93 #define f_data f_fglob->fg_data
94 #define PSHMNAMLEN 31 /* maximum name segment length we bother with */
95
96 struct pshmobj {
97 void * pshmo_memobject;
98 memory_object_size_t pshmo_size;
99 struct pshmobj * pshmo_next;
100 };
101
102 struct pshminfo {
103 unsigned int pshm_flags;
104 unsigned int pshm_usecount;
105 off_t pshm_length;
106 mode_t pshm_mode;
107 uid_t pshm_uid;
108 gid_t pshm_gid;
109 char pshm_name[PSHMNAMLEN + 1]; /* segment name */
110 struct pshmobj *pshm_memobjects;
111 #if DIAGNOSTIC
112 unsigned int pshm_readcount;
113 unsigned int pshm_writecount;
114 proc_t pshm_proc;
115 #endif /* DIAGNOSTIC */
116 struct label* pshm_label;
117 };
118 #define PSHMINFO_NULL (struct pshminfo *)0
119
120 #define PSHM_NONE 0x001
121 #define PSHM_DEFINED 0x002
122 #define PSHM_ALLOCATED 0x004
123 #define PSHM_MAPPED 0x008
124 #define PSHM_INUSE 0x010
125 #define PSHM_REMOVED 0x020
126 #define PSHM_INCREATE 0x040
127 #define PSHM_INDELETE 0x080
128 #define PSHM_ALLOCATING 0x100
129
130 struct pshmcache {
131 LIST_ENTRY(pshmcache) pshm_hash; /* hash chain */
132 struct pshminfo *pshminfo; /* vnode the name refers to */
133 int pshm_nlen; /* length of name */
134 char pshm_name[PSHMNAMLEN + 1]; /* segment name */
135 };
136 #define PSHMCACHE_NULL (struct pshmcache *)0
137
138 #define PSHMCACHE_NOTFOUND (0)
139 #define PSHMCACHE_FOUND (-1)
140 #define PSHMCACHE_NEGATIVE (ENOENT)
141
142 struct pshmstats {
143 long goodhits; /* hits that we can really use */
144 long neghits; /* negative hits that we can use */
145 long badhits; /* hits we must drop */
146 long falsehits; /* hits with id mismatch */
147 long miss; /* misses */
148 long longnames; /* long names that ignore cache */
149 };
150
151 struct pshmname {
152 char *pshm_nameptr; /* pointer to looked up name */
153 long pshm_namelen; /* length of looked up component */
154 u_long pshm_hash; /* hash value of looked up name */
155 };
156
157 struct pshmnode {
158 off_t mapp_addr;
159 user_size_t map_size; /* XXX unused ? */
160 struct pshminfo *pinfo;
161 unsigned int pshm_usecount;
162 #if DIAGNOSTIC
163 unsigned int readcnt;
164 unsigned int writecnt;
165 #endif
166 };
167 #define PSHMNODE_NULL (struct pshmnode *)0
168
169
170 #define PSHMHASH(pnp) \
171 (&pshmhashtbl[(pnp)->pshm_hash & pshmhash])
172
173 LIST_HEAD(pshmhashhead, pshmcache) *pshmhashtbl; /* Hash Table */
174 u_long pshmhash; /* size of hash table - 1 */
175 long pshmnument; /* number of cache entries allocated */
176 struct pshmstats pshmstats; /* cache effectiveness statistics */
177
178 static int pshm_read (struct fileproc *fp, struct uio *uio,
179 int flags, vfs_context_t ctx);
180 static int pshm_write (struct fileproc *fp, struct uio *uio,
181 int flags, vfs_context_t ctx);
182 static int pshm_ioctl (struct fileproc *fp, u_long com,
183 caddr_t data, vfs_context_t ctx);
184 static int pshm_select (struct fileproc *fp, int which, void *wql, vfs_context_t ctx);
185 static int pshm_close(struct pshminfo *pinfo, int dropref);
186 static int pshm_closefile (struct fileglob *fg, vfs_context_t ctx);
187
188 static int pshm_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx);
189
190 int pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, proc_t p);
191 int pshm_cache_purge_all(proc_t p);
192
193 static int pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp);
194 static void pshm_cache_delete(struct pshmcache *pcp);
195 static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
196 struct pshmcache **pcache, int addref);
197 static int pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache);
198
199 static const struct fileops pshmops = {
200 .fo_type = DTYPE_PSXSHM,
201 .fo_read = pshm_read,
202 .fo_write = pshm_write,
203 .fo_ioctl = pshm_ioctl,
204 .fo_select = pshm_select,
205 .fo_close = pshm_closefile,
206 .fo_kqfilter = pshm_kqfilter,
207 .fo_drain = NULL,
208 };
209
210 static lck_grp_t *psx_shm_subsys_lck_grp;
211 static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr;
212 static lck_attr_t *psx_shm_subsys_lck_attr;
213 static lck_mtx_t psx_shm_subsys_mutex;
214
215 #define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
216 #define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
217 #define PSHM_SUBSYS_ASSERT_HELD() LCK_MTX_ASSERT(&psx_shm_subsys_mutex, LCK_MTX_ASSERT_OWNED)
218
219
220 /* Initialize the mutex governing access to the posix shm subsystem */
221 __private_extern__ void
222 pshm_lock_init( void )
223 {
224
225 psx_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
226
227 psx_shm_subsys_lck_grp = lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr);
228
229 psx_shm_subsys_lck_attr = lck_attr_alloc_init();
230 lck_mtx_init(& psx_shm_subsys_mutex, psx_shm_subsys_lck_grp, psx_shm_subsys_lck_attr);
231 }
232
233 /*
234 * Lookup an entry in the cache
235 *
236 *
237 * status of -1 is returned if matches
238 * If the lookup determines that the name does not exist
239 * (negative cacheing), a status of ENOENT is returned. If the lookup
240 * fails, a status of zero is returned.
241 */
242
243 static int
244 pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
245 struct pshmcache **pcache, int addref)
246 {
247 struct pshmcache *pcp, *nnp;
248 struct pshmhashhead *pcpp;
249
250 if (pnp->pshm_namelen > PSHMNAMLEN) {
251 pshmstats.longnames++;
252 return PSHMCACHE_NOTFOUND;
253 }
254
255 pcpp = PSHMHASH(pnp);
256 for (pcp = pcpp->lh_first; pcp != 0; pcp = nnp) {
257 nnp = pcp->pshm_hash.le_next;
258 if (pcp->pshm_nlen == pnp->pshm_namelen &&
259 !bcmp(pcp->pshm_name, pnp->pshm_nameptr, (u_int)pcp-> pshm_nlen))
260 break;
261 }
262
263 if (pcp == 0) {
264 pshmstats.miss++;
265 return PSHMCACHE_NOTFOUND;
266 }
267
268 /* We found a "positive" match, return the vnode */
269 if (pcp->pshminfo) {
270 pshmstats.goodhits++;
271 /* TOUCH(ncp); */
272 *pshmp = pcp->pshminfo;
273 *pcache = pcp;
274 if (addref)
275 pcp->pshminfo->pshm_usecount++;
276 return PSHMCACHE_FOUND;
277 }
278
279 /*
280 * We found a "negative" match, ENOENT notifies client of this match.
281 */
282 pshmstats.neghits++;
283 return PSHMCACHE_NEGATIVE;
284 }
285
286 /*
287 * Add an entry to the cache.
288 * XXX should be static?
289 */
290 static int
291 pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp)
292 {
293 struct pshmhashhead *pcpp;
294 struct pshminfo *dpinfo;
295 struct pshmcache *dpcp;
296
297 #if DIAGNOSTIC
298 if (pnp->pshm_namelen > PSHMNAMLEN)
299 panic("cache_enter: name too long");
300 #endif
301
302
303 /* if the entry has already been added by some one else return */
304 if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == PSHMCACHE_FOUND) {
305 return EEXIST;
306 }
307 pshmnument++;
308
309 /*
310 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
311 */
312 pcp->pshminfo = pshmp;
313 pcp->pshm_nlen = pnp->pshm_namelen;
314 bcopy(pnp->pshm_nameptr, pcp->pshm_name, (unsigned)pcp->pshm_nlen);
315 pcpp = PSHMHASH(pnp);
316 #if DIAGNOSTIC
317 {
318 struct pshmcache *p;
319
320 for (p = pcpp->lh_first; p != 0; p = p->pshm_hash.le_next)
321 if (p == pcp)
322 panic("cache_enter: duplicate");
323 }
324 #endif
325 LIST_INSERT_HEAD(pcpp, pcp, pshm_hash);
326 return 0;
327 }
328
329 /*
330 * Name cache initialization, from vfs_init() when we are booting
331 */
332 void
333 pshm_cache_init(void)
334 {
335 pshmhashtbl = hashinit(desiredvnodes / 8, M_SHM, &pshmhash);
336 }
337
338 /*
339 * Invalidate all entries and delete all objects associated with it. Entire
340 * non Kernel entries are going away. Just dump'em all
341 *
342 * We actually just increment the v_id, that will do it. The entries will
343 * be purged by lookup as they get found. If the v_id wraps around, we
344 * need to ditch the entire cache, to avoid confusion. No valid vnode will
345 * ever have (v_id == 0).
346 */
347 int
348 pshm_cache_purge_all(__unused proc_t p)
349 {
350 struct pshmcache *pcp, *tmppcp;
351 struct pshmhashhead *pcpp;
352 int error = 0;
353
354 if (kauth_cred_issuser(kauth_cred_get()) == 0)
355 return EPERM;
356
357 PSHM_SUBSYS_LOCK();
358 for (pcpp = &pshmhashtbl[pshmhash]; pcpp >= pshmhashtbl; pcpp--) {
359 LIST_FOREACH_SAFE(pcp, pcpp, pshm_hash, tmppcp) {
360 assert(pcp->pshm_nlen);
361 error = pshm_unlink_internal(pcp->pshminfo, pcp);
362 if (error)
363 goto out;
364 }
365 }
366 assert(pshmnument == 0);
367
368 out:
369 PSHM_SUBSYS_UNLOCK();
370
371 if (error)
372 printf("%s: Error %d removing shm cache: %ld remain!\n",
373 __func__, error, pshmnument);
374 return error;
375 }
376
377 static void
378 pshm_cache_delete(struct pshmcache *pcp)
379 {
380 #if DIAGNOSTIC
381 if (pcp->pshm_hash.le_prev == 0)
382 panic("namecache purge le_prev");
383 if (pcp->pshm_hash.le_next == pcp)
384 panic("namecache purge le_next");
385 #endif /* DIAGNOSTIC */
386 LIST_REMOVE(pcp, pshm_hash);
387 pcp->pshm_hash.le_prev = 0;
388 pshmnument--;
389 }
390
391
392 int
393 shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval)
394 {
395 size_t i;
396 int indx, error;
397 struct pshmname nd;
398 struct pshminfo *pinfo;
399 struct fileproc *fp = NULL;
400 char *pnbuf = NULL;
401 struct pshminfo *new_pinfo = PSHMINFO_NULL;
402 struct pshmnode *new_pnode = PSHMNODE_NULL;
403 struct pshmcache *pcache = PSHMCACHE_NULL; /* ignored on return */
404 char * nameptr;
405 char * cp;
406 size_t pathlen, plen;
407 int fmode ;
408 int cmode = uap->mode;
409 int incache = 0;
410 struct pshmcache *pcp = NULL;
411
412 AUDIT_ARG(fflags, uap->oflag);
413 AUDIT_ARG(mode, uap->mode);
414
415 pinfo = PSHMINFO_NULL;
416
417 /*
418 * Preallocate everything we might need up front to avoid taking
419 * and dropping the lock, opening us up to race conditions.
420 */
421 MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
422 if (pnbuf == NULL) {
423 error = ENOSPC;
424 goto bad;
425 }
426
427 pathlen = MAXPATHLEN;
428 error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen);
429 if (error) {
430 goto bad;
431 }
432 AUDIT_ARG(text, pnbuf);
433 if (pathlen > PSHMNAMLEN) {
434 error = ENAMETOOLONG;
435 goto bad;
436 }
437 #ifdef PSXSHM_NAME_RESTRICT
438 nameptr = pnbuf;
439 if (*nameptr == '/') {
440 while (*(nameptr++) == '/') {
441 plen--;
442 error = EINVAL;
443 goto bad;
444 }
445 } else {
446 error = EINVAL;
447 goto bad;
448 }
449 #endif /* PSXSHM_NAME_RESTRICT */
450
451 plen = pathlen;
452 nameptr = pnbuf;
453 nd.pshm_nameptr = nameptr;
454 nd.pshm_namelen = plen;
455 nd. pshm_hash =0;
456
457 for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) {
458 nd.pshm_hash += (unsigned char)*cp * i;
459 }
460
461 /*
462 * attempt to allocate a new fp; if unsuccessful, the fp will be
463 * left unmodified (NULL).
464 */
465 error = falloc(p, &fp, &indx, vfs_context_current());
466 if (error)
467 goto bad;
468
469 cmode &= ALLPERMS;
470
471 fmode = FFLAGS(uap->oflag);
472 if ((fmode & (FREAD | FWRITE)) == 0) {
473 error = EINVAL;
474 goto bad;
475 }
476
477 /*
478 * We allocate a new entry if we are less than the maximum
479 * allowed and the one at the front of the LRU list is in use.
480 * Otherwise we use the one at the front of the LRU list.
481 */
482 MALLOC(pcp, struct pshmcache *, sizeof(struct pshmcache), M_SHM, M_WAITOK|M_ZERO);
483 if (pcp == NULL) {
484 error = ENOSPC;
485 goto bad;
486 }
487
488 MALLOC(new_pinfo, struct pshminfo *, sizeof(struct pshminfo), M_SHM, M_WAITOK|M_ZERO);
489 if (new_pinfo == PSHMINFO_NULL) {
490 error = ENOSPC;
491 goto bad;
492 }
493 #if CONFIG_MACF
494 mac_posixshm_label_init(new_pinfo);
495 #endif
496
497 MALLOC(new_pnode, struct pshmnode *, sizeof(struct pshmnode), M_SHM, M_WAITOK|M_ZERO);
498 if (new_pnode == PSHMNODE_NULL) {
499 error = ENOSPC;
500 goto bad;
501 }
502
503 PSHM_SUBSYS_LOCK();
504
505 /*
506 * If we find the entry in the cache, this will take a reference,
507 * allowing us to unlock it for the permissions check.
508 */
509 error = pshm_cache_search(&pinfo, &nd, &pcache, 1);
510
511 PSHM_SUBSYS_UNLOCK();
512
513 if (error == PSHMCACHE_NEGATIVE) {
514 error = EINVAL;
515 goto bad;
516 }
517
518 if (error == PSHMCACHE_NOTFOUND) {
519 incache = 0;
520 if (fmode & O_CREAT) {
521 /* create a new one (commit the allocation) */
522 pinfo = new_pinfo;
523 pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE;
524 pinfo->pshm_usecount = 1; /* existence reference */
525 pinfo->pshm_mode = cmode;
526 pinfo->pshm_uid = kauth_getuid();
527 pinfo->pshm_gid = kauth_getgid();
528 bcopy(pnbuf, &pinfo->pshm_name[0], pathlen);
529 pinfo->pshm_name[pathlen]=0;
530 #if CONFIG_MACF
531 error = mac_posixshm_check_create(kauth_cred_get(), nameptr);
532 if (error) {
533 goto bad;
534 }
535 mac_posixshm_label_associate(kauth_cred_get(), pinfo, nameptr);
536 #endif
537 }
538 } else {
539 incache = 1;
540 if (fmode & O_CREAT) {
541 /* already exists */
542 if ((fmode & O_EXCL)) {
543 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
544 pinfo->pshm_gid,
545 pinfo->pshm_mode);
546
547 /* shm obj exists and opened O_EXCL */
548 error = EEXIST;
549 goto bad;
550 }
551
552 if( pinfo->pshm_flags & PSHM_INDELETE) {
553 error = ENOENT;
554 goto bad;
555 }
556 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
557 pinfo->pshm_gid, pinfo->pshm_mode);
558 #if CONFIG_MACF
559 if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
560 goto bad;
561 }
562 #endif
563 if ( (error = pshm_access(pinfo, fmode, kauth_cred_get(), p)) ) {
564 goto bad;
565 }
566 }
567 }
568 if (!(fmode & O_CREAT)) {
569 if (!incache) {
570 /* O_CREAT is not set and the object does not exist */
571 error = ENOENT;
572 goto bad;
573 }
574 if( pinfo->pshm_flags & PSHM_INDELETE) {
575 error = ENOENT;
576 goto bad;
577 }
578 #if CONFIG_MACF
579 if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
580 goto bad;
581 }
582 #endif
583
584 if ((error = pshm_access(pinfo, fmode, kauth_cred_get(), p))) {
585 goto bad;
586 }
587 }
588 if (fmode & O_TRUNC) {
589 error = EINVAL;
590 goto bad;
591 }
592
593
594 PSHM_SUBSYS_LOCK();
595
596 #if DIAGNOSTIC
597 if (fmode & FWRITE)
598 pinfo->pshm_writecount++;
599 if (fmode & FREAD)
600 pinfo->pshm_readcount++;
601 #endif
602 if (!incache) {
603 /* if successful, this will consume the pcp */
604 if ( (error = pshm_cache_add(pinfo, &nd, pcp)) ) {
605 goto bad_locked;
606 }
607 /*
608 * add reference for the new entry; otherwise, we obtained
609 * one from the cache hit earlier.
610 */
611 pinfo->pshm_usecount++;
612 }
613 pinfo->pshm_flags &= ~PSHM_INCREATE;
614 new_pnode->pinfo = pinfo;
615
616 PSHM_SUBSYS_UNLOCK();
617
618 /*
619 * if incache, we did not use the new pcp or new_pinfo and must
620 * free them
621 */
622 if (incache) {
623 FREE(pcp, M_SHM);
624
625 if (new_pinfo != PSHMINFO_NULL) {
626 #if CONFIG_MACF
627 mac_posixshm_label_destroy(new_pinfo);
628 #endif
629 FREE(new_pinfo, M_SHM);
630 }
631 }
632
633 proc_fdlock(p);
634 fp->f_flag = fmode & FMASK;
635 fp->f_ops = &pshmops;
636 fp->f_data = (caddr_t)new_pnode;
637 *fdflags(p, indx) |= UF_EXCLOSE;
638 procfdtbl_releasefd(p, indx, NULL);
639 fp_drop(p, indx, fp, 1);
640 proc_fdunlock(p);
641
642 *retval = indx;
643 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
644 return (0);
645
646 bad_locked:
647 PSHM_SUBSYS_UNLOCK();
648 bad:
649 /*
650 * If we obtained the entry from the cache, we need to drop the
651 * reference; holding the reference may have prevented unlinking,
652 * so we need to call pshm_close() to get the full effect.
653 */
654 if (incache) {
655 PSHM_SUBSYS_LOCK();
656 pshm_close(pinfo, 1);
657 PSHM_SUBSYS_UNLOCK();
658 }
659
660 if (pcp != NULL)
661 FREE(pcp, M_SHM);
662
663 if (new_pnode != PSHMNODE_NULL)
664 FREE(new_pnode, M_SHM);
665
666 if (fp != NULL)
667 fp_free(p, indx, fp);
668
669 if (new_pinfo != PSHMINFO_NULL) {
670 #if CONFIG_MACF
671 mac_posixshm_label_destroy(new_pinfo);
672 #endif
673 FREE(new_pinfo, M_SHM);
674 }
675 if (pnbuf != NULL)
676 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
677 return (error);
678 }
679
680
681 int
682 pshm_truncate(__unused proc_t p, struct fileproc *fp, __unused int fd,
683 off_t length, __unused int32_t *retval)
684 {
685 struct pshminfo * pinfo;
686 struct pshmnode * pnode ;
687 kern_return_t kret;
688 mem_entry_name_port_t mem_object;
689 mach_vm_size_t total_size, alloc_size;
690 memory_object_size_t mosize;
691 struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p;
692 vm_map_t user_map;
693 #if CONFIG_MACF
694 int error;
695 #endif
696
697 user_map = current_map();
698
699 if (fp->f_type != DTYPE_PSXSHM) {
700 return(EINVAL);
701 }
702
703
704 if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
705 return(EINVAL);
706
707 PSHM_SUBSYS_LOCK();
708 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) {
709 PSHM_SUBSYS_UNLOCK();
710 return(EINVAL);
711 }
712 if ((pinfo->pshm_flags & (PSHM_DEFINED|PSHM_ALLOCATING|PSHM_ALLOCATED))
713 != PSHM_DEFINED) {
714 PSHM_SUBSYS_UNLOCK();
715 return(EINVAL);
716 }
717 #if CONFIG_MACF
718 error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, length);
719 if (error) {
720 PSHM_SUBSYS_UNLOCK();
721 return(error);
722 }
723 #endif
724
725 pinfo->pshm_flags |= PSHM_ALLOCATING;
726 total_size = vm_map_round_page(length,
727 vm_map_page_mask(user_map));
728 pshmobj_next_p = &pinfo->pshm_memobjects;
729
730 for (alloc_size = 0;
731 alloc_size < total_size;
732 alloc_size += mosize) {
733
734 PSHM_SUBSYS_UNLOCK();
735
736 mosize = MIN(total_size - alloc_size, ANON_MAX_SIZE);
737 kret = mach_make_memory_entry_64(
738 VM_MAP_NULL,
739 &mosize,
740 0,
741 MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
742 &mem_object,
743 0);
744
745 if (kret != KERN_SUCCESS)
746 goto out;
747
748 MALLOC(pshmobj, struct pshmobj *, sizeof (struct pshmobj),
749 M_SHM, M_WAITOK);
750 if (pshmobj == NULL) {
751 kret = KERN_NO_SPACE;
752 mach_memory_entry_port_release(mem_object);
753 mem_object = NULL;
754 goto out;
755 }
756
757 PSHM_SUBSYS_LOCK();
758
759 pshmobj->pshmo_memobject = (void *) mem_object;
760 pshmobj->pshmo_size = mosize;
761 pshmobj->pshmo_next = NULL;
762
763 *pshmobj_next_p = pshmobj;
764 pshmobj_next_p = &pshmobj->pshmo_next;
765 }
766
767 pinfo->pshm_flags |= PSHM_ALLOCATED;
768 pinfo->pshm_flags &= ~(PSHM_ALLOCATING);
769 pinfo->pshm_length = total_size;
770 PSHM_SUBSYS_UNLOCK();
771 return(0);
772
773 out:
774 PSHM_SUBSYS_LOCK();
775 for (pshmobj = pinfo->pshm_memobjects;
776 pshmobj != NULL;
777 pshmobj = pshmobj_next) {
778 pshmobj_next = pshmobj->pshmo_next;
779 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
780 FREE(pshmobj, M_SHM);
781 }
782 pinfo->pshm_memobjects = NULL;
783 pinfo->pshm_flags &= ~PSHM_ALLOCATING;
784 PSHM_SUBSYS_UNLOCK();
785
786 switch (kret) {
787 case KERN_INVALID_ADDRESS:
788 case KERN_NO_SPACE:
789 return (ENOMEM);
790 case KERN_PROTECTION_FAILURE:
791 return (EACCES);
792 default:
793 return (EINVAL);
794
795 }
796 }
797
798 int
799 pshm_stat(struct pshmnode *pnode, void *ub, int isstat64)
800 {
801 struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */
802 struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */
803 struct pshminfo *pinfo;
804 #if CONFIG_MACF
805 int error;
806 #endif
807
808 PSHM_SUBSYS_LOCK();
809 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL){
810 PSHM_SUBSYS_UNLOCK();
811 return(EINVAL);
812 }
813
814 #if CONFIG_MACF
815 error = mac_posixshm_check_stat(kauth_cred_get(), pinfo);
816 if (error) {
817 PSHM_SUBSYS_UNLOCK();
818 return(error);
819 }
820 #endif
821
822 if (isstat64 != 0) {
823 sb64 = (struct stat64 *)ub;
824 bzero(sb64, sizeof(struct stat64));
825 sb64->st_mode = pinfo->pshm_mode;
826 sb64->st_uid = pinfo->pshm_uid;
827 sb64->st_gid = pinfo->pshm_gid;
828 sb64->st_size = pinfo->pshm_length;
829 } else {
830 sb = (struct stat *)ub;
831 bzero(sb, sizeof(struct stat));
832 sb->st_mode = pinfo->pshm_mode;
833 sb->st_uid = pinfo->pshm_uid;
834 sb->st_gid = pinfo->pshm_gid;
835 sb->st_size = pinfo->pshm_length;
836 }
837 PSHM_SUBSYS_UNLOCK();
838
839 return(0);
840 }
841
842 /*
843 * This is called only from shm_open which holds pshm_lock();
844 * XXX This code is repeated many times
845 */
846 int
847 pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, __unused proc_t p)
848 {
849 int mode_req = ((mode & FREAD) ? S_IRUSR : 0) |
850 ((mode & FWRITE) ? S_IWUSR : 0);
851
852 /* Otherwise, user id 0 always gets access. */
853 if (!suser(cred, NULL))
854 return (0);
855
856 return(posix_cred_access(cred, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode, mode_req));
857 }
858
859 int
860 pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct fileproc *fp, off_t pageoff)
861 {
862 vm_map_offset_t user_addr = (vm_map_offset_t)uap->addr;
863 vm_map_size_t user_size = (vm_map_size_t)uap->len ;
864 vm_map_offset_t user_start_addr;
865 vm_map_size_t map_size, mapped_size;
866 int prot = uap->prot;
867 int flags = uap->flags;
868 vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos;
869 vm_object_offset_t map_pos;
870 vm_map_t user_map;
871 int alloc_flags;
872 boolean_t docow;
873 kern_return_t kret;
874 struct pshminfo * pinfo;
875 struct pshmnode * pnode;
876 struct pshmobj * pshmobj;
877 #if CONFIG_MACF
878 int error;
879 #endif
880
881 if (user_size == 0)
882 return(0);
883
884 if ((flags & MAP_SHARED) == 0)
885 return(EINVAL);
886
887
888 if ((prot & PROT_WRITE) && ((fp->f_flag & FWRITE) == 0)) {
889 return(EPERM);
890 }
891
892 if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
893 return(EINVAL);
894
895 PSHM_SUBSYS_LOCK();
896 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) {
897 PSHM_SUBSYS_UNLOCK();
898 return(EINVAL);
899 }
900
901 if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) {
902 PSHM_SUBSYS_UNLOCK();
903 return(EINVAL);
904 }
905 if ((off_t)user_size > pinfo->pshm_length) {
906 PSHM_SUBSYS_UNLOCK();
907 return(EINVAL);
908 }
909 if ((off_t)(user_size + file_pos) > pinfo->pshm_length) {
910 PSHM_SUBSYS_UNLOCK();
911 return(EINVAL);
912 }
913 if ((pshmobj = pinfo->pshm_memobjects) == NULL) {
914 PSHM_SUBSYS_UNLOCK();
915 return(EINVAL);
916 }
917
918 #if CONFIG_MACF
919 error = mac_posixshm_check_mmap(kauth_cred_get(), pinfo, prot, flags);
920 if (error) {
921 PSHM_SUBSYS_UNLOCK();
922 return(error);
923 }
924 #endif
925
926 PSHM_SUBSYS_UNLOCK();
927 user_map = current_map();
928
929 if ((flags & MAP_FIXED) == 0) {
930 alloc_flags = VM_FLAGS_ANYWHERE;
931 user_addr = vm_map_round_page(user_addr,
932 vm_map_page_mask(user_map));
933 } else {
934 if (user_addr != vm_map_round_page(user_addr,
935 vm_map_page_mask(user_map)))
936 return (EINVAL);
937 /*
938 * We do not get rid of the existing mappings here because
939 * it wouldn't be atomic (see comment in mmap()). We let
940 * Mach VM know that we want it to replace any existing
941 * mapping with the new one.
942 */
943 alloc_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
944 }
945 docow = FALSE;
946
947 mapped_size = 0;
948
949 /* reserver the entire space first... */
950 kret = vm_map_enter_mem_object(user_map,
951 &user_addr,
952 user_size,
953 0,
954 alloc_flags,
955 IPC_PORT_NULL,
956 0,
957 FALSE,
958 VM_PROT_NONE,
959 VM_PROT_NONE,
960 VM_INHERIT_NONE);
961 user_start_addr = user_addr;
962 if (kret != KERN_SUCCESS) {
963 goto out;
964 }
965
966 /* ... and overwrite with the real mappings */
967 for (map_pos = 0, pshmobj = pinfo->pshm_memobjects;
968 user_size != 0;
969 map_pos += pshmobj->pshmo_size, pshmobj = pshmobj->pshmo_next) {
970 if (pshmobj == NULL) {
971 /* nothing there to map !? */
972 goto out;
973 }
974 if (file_pos >= map_pos + pshmobj->pshmo_size) {
975 continue;
976 }
977 map_size = pshmobj->pshmo_size - (file_pos - map_pos);
978 if (map_size > user_size) {
979 map_size = user_size;
980 }
981 kret = vm_map_enter_mem_object(
982 user_map,
983 &user_addr,
984 map_size,
985 0,
986 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
987 pshmobj->pshmo_memobject,
988 file_pos - map_pos,
989 docow,
990 prot,
991 VM_PROT_DEFAULT,
992 VM_INHERIT_SHARE);
993 if (kret != KERN_SUCCESS)
994 goto out;
995
996 user_addr += map_size;
997 user_size -= map_size;
998 mapped_size += map_size;
999 file_pos += map_size;
1000 }
1001
1002 PSHM_SUBSYS_LOCK();
1003 pnode->mapp_addr = user_start_addr;
1004 pnode->map_size = mapped_size;
1005 pinfo->pshm_flags |= (PSHM_MAPPED | PSHM_INUSE);
1006 PSHM_SUBSYS_UNLOCK();
1007 out:
1008 if (kret != KERN_SUCCESS) {
1009 if (mapped_size != 0) {
1010 (void) mach_vm_deallocate(current_map(),
1011 user_start_addr,
1012 mapped_size);
1013 }
1014 }
1015
1016 switch (kret) {
1017 case KERN_SUCCESS:
1018 *retval = (user_start_addr + pageoff);
1019 return (0);
1020 case KERN_INVALID_ADDRESS:
1021 case KERN_NO_SPACE:
1022 return (ENOMEM);
1023 case KERN_PROTECTION_FAILURE:
1024 return (EACCES);
1025 default:
1026 return (EINVAL);
1027 }
1028
1029 }
1030
1031 static int
1032 pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache)
1033 {
1034 struct pshmobj *pshmobj, *pshmobj_next;
1035
1036 PSHM_SUBSYS_ASSERT_HELD();
1037
1038 if (!pinfo || !pcache)
1039 return EINVAL;
1040
1041 if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED)) == 0)
1042 return EINVAL;
1043
1044 if (pinfo->pshm_flags & PSHM_INDELETE)
1045 return 0;
1046
1047 pinfo->pshm_flags |= PSHM_INDELETE;
1048 pinfo->pshm_usecount--;
1049
1050 pshm_cache_delete(pcache);
1051 pinfo->pshm_flags |= PSHM_REMOVED;
1052
1053 /* release the existence reference */
1054 if (!pinfo->pshm_usecount) {
1055 #if CONFIG_MACF
1056 mac_posixshm_label_destroy(pinfo);
1057 #endif
1058 /*
1059 * If this is the last reference going away on the object,
1060 * then we need to destroy the backing object. The name
1061 * has an implied but uncounted reference on the object,
1062 * once it's created, since it's used as a rendezvous, and
1063 * therefore may be subsequently reopened.
1064 */
1065 for (pshmobj = pinfo->pshm_memobjects;
1066 pshmobj != NULL;
1067 pshmobj = pshmobj_next) {
1068 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
1069 pshmobj_next = pshmobj->pshmo_next;
1070 FREE(pshmobj, M_SHM);
1071 }
1072 FREE(pinfo,M_SHM);
1073 }
1074
1075 FREE(pcache, M_SHM);
1076
1077 return 0;
1078 }
1079
1080 int
1081 shm_unlink(proc_t p, struct shm_unlink_args *uap, __unused int32_t *retval)
1082 {
1083 size_t i;
1084 char * pnbuf;
1085 size_t pathlen;
1086 int error = 0;
1087
1088 struct pshmname nd;
1089 struct pshminfo *pinfo;
1090 char * nameptr;
1091 char * cp;
1092 struct pshmcache *pcache = PSHMCACHE_NULL;
1093
1094 pinfo = PSHMINFO_NULL;
1095
1096
1097 MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
1098 if (pnbuf == NULL) {
1099 return(ENOSPC); /* XXX non-standard */
1100 }
1101 pathlen = MAXPATHLEN;
1102 error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen);
1103 if (error) {
1104 goto bad;
1105 }
1106 AUDIT_ARG(text, pnbuf);
1107 if (pathlen > PSHMNAMLEN) {
1108 error = ENAMETOOLONG;
1109 goto bad;
1110 }
1111
1112 nameptr = pnbuf;
1113
1114 #ifdef PSXSHM_NAME_RESTRICT
1115 if (*nameptr == '/') {
1116 while (*(nameptr++) == '/') {
1117 pathlen--;
1118 error = EINVAL;
1119 goto bad;
1120 }
1121 } else {
1122 error = EINVAL;
1123 goto bad;
1124 }
1125 #endif /* PSXSHM_NAME_RESTRICT */
1126
1127 nd.pshm_nameptr = nameptr;
1128 nd.pshm_namelen = pathlen;
1129 nd.pshm_hash = 0;
1130
1131 for (cp = nameptr, i=1; *cp != 0 && i <= pathlen; i++, cp++) {
1132 nd.pshm_hash += (unsigned char)*cp * i;
1133 }
1134
1135 PSHM_SUBSYS_LOCK();
1136 error = pshm_cache_search(&pinfo, &nd, &pcache, 0);
1137
1138 /* During unlink lookup failure also implies ENOENT */
1139 if (error != PSHMCACHE_FOUND) {
1140 PSHM_SUBSYS_UNLOCK();
1141 error = ENOENT;
1142 goto bad;
1143
1144 }
1145
1146 if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))==0) {
1147 PSHM_SUBSYS_UNLOCK();
1148 error = EINVAL;
1149 goto bad;
1150 }
1151
1152 if (pinfo->pshm_flags & PSHM_ALLOCATING) {
1153 /* XXX should we wait for flag to clear and then proceed ? */
1154 PSHM_SUBSYS_UNLOCK();
1155 error = EAGAIN;
1156 goto bad;
1157 }
1158
1159 if (pinfo->pshm_flags & PSHM_INDELETE) {
1160 PSHM_SUBSYS_UNLOCK();
1161 error = 0;
1162 goto bad;
1163 }
1164
1165 #if CONFIG_MACF
1166 error = mac_posixshm_check_unlink(kauth_cred_get(), pinfo, nameptr);
1167 if (error) {
1168 PSHM_SUBSYS_UNLOCK();
1169 goto bad;
1170 }
1171 #endif
1172
1173 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, pinfo->pshm_gid,
1174 pinfo->pshm_mode);
1175
1176 /*
1177 * following file semantics, unlink should be allowed
1178 * for users with write permission only.
1179 */
1180 if ( (error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p)) ) {
1181 PSHM_SUBSYS_UNLOCK();
1182 goto bad;
1183 }
1184
1185 error = pshm_unlink_internal(pinfo, pcache);
1186 PSHM_SUBSYS_UNLOCK();
1187
1188 bad:
1189 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
1190 return error;
1191 }
1192
1193 /* already called locked */
1194 static int
1195 pshm_close(struct pshminfo *pinfo, int dropref)
1196 {
1197 int error = 0;
1198 struct pshmobj *pshmobj, *pshmobj_next;
1199
1200 /*
1201 * If we are dropping the reference we took on the cache object, don't
1202 * enforce the allocation requirement.
1203 */
1204 if ( !dropref && ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED)) {
1205 return(EINVAL);
1206 }
1207 #if DIAGNOSTIC
1208 if(!pinfo->pshm_usecount) {
1209 kprintf("negative usecount in pshm_close\n");
1210 }
1211 #endif /* DIAGNOSTIC */
1212 pinfo->pshm_usecount--; /* release this fd's reference */
1213
1214 if ((pinfo->pshm_flags & PSHM_REMOVED) && !pinfo->pshm_usecount) {
1215 #if CONFIG_MACF
1216 mac_posixshm_label_destroy(pinfo);
1217 #endif
1218 PSHM_SUBSYS_UNLOCK();
1219 /*
1220 * If this is the last reference going away on the object,
1221 * then we need to destroy the backing object.
1222 */
1223 for (pshmobj = pinfo->pshm_memobjects;
1224 pshmobj != NULL;
1225 pshmobj = pshmobj_next) {
1226 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
1227 pshmobj_next = pshmobj->pshmo_next;
1228 FREE(pshmobj, M_SHM);
1229 }
1230 PSHM_SUBSYS_LOCK();
1231 FREE(pinfo,M_SHM);
1232 }
1233 return (error);
1234 }
1235
1236 /* vfs_context_t passed to match prototype for struct fileops */
1237 static int
1238 pshm_closefile(struct fileglob *fg, __unused vfs_context_t ctx)
1239 {
1240 int error = EINVAL;
1241 struct pshmnode *pnode;
1242
1243 PSHM_SUBSYS_LOCK();
1244
1245 if ((pnode = (struct pshmnode *)fg->fg_data) != NULL) {
1246 if (pnode->pinfo != PSHMINFO_NULL) {
1247 error = pshm_close(pnode->pinfo, 0);
1248 }
1249 FREE(pnode, M_SHM);
1250 }
1251
1252 PSHM_SUBSYS_UNLOCK();
1253
1254 return(error);
1255 }
1256
1257 static int
1258 pshm_read(__unused struct fileproc *fp, __unused struct uio *uio,
1259 __unused int flags, __unused vfs_context_t ctx)
1260 {
1261 return(ENOTSUP);
1262 }
1263
1264 static int
1265 pshm_write(__unused struct fileproc *fp, __unused struct uio *uio,
1266 __unused int flags, __unused vfs_context_t ctx)
1267 {
1268 return(ENOTSUP);
1269 }
1270
1271 static int
1272 pshm_ioctl(__unused struct fileproc *fp, __unused u_long com,
1273 __unused caddr_t data, __unused vfs_context_t ctx)
1274 {
1275 return(ENOTSUP);
1276 }
1277
1278 static int
1279 pshm_select(__unused struct fileproc *fp, __unused int which, __unused void *wql,
1280 __unused vfs_context_t ctx)
1281 {
1282 return(ENOTSUP);
1283 }
1284
1285 static int
1286 pshm_kqfilter(__unused struct fileproc *fp, struct knote *kn,
1287 __unused vfs_context_t ctx)
1288 {
1289 kn->kn_flags = EV_ERROR;
1290 kn->kn_data = ENOTSUP;
1291 return 0;
1292 }
1293
1294 int
1295 fill_pshminfo(struct pshmnode * pshm, struct pshm_info * info)
1296 {
1297 struct pshminfo *pinfo;
1298 struct vinfo_stat *sb;
1299
1300 PSHM_SUBSYS_LOCK();
1301 if ((pinfo = pshm->pinfo) == PSHMINFO_NULL){
1302 PSHM_SUBSYS_UNLOCK();
1303 return(EINVAL);
1304 }
1305
1306 sb = &info->pshm_stat;
1307
1308 bzero(sb, sizeof(struct vinfo_stat));
1309 sb->vst_mode = pinfo->pshm_mode;
1310 sb->vst_uid = pinfo->pshm_uid;
1311 sb->vst_gid = pinfo->pshm_gid;
1312 sb->vst_size = pinfo->pshm_length;
1313
1314 info->pshm_mappaddr = pshm->mapp_addr;
1315 bcopy(&pinfo->pshm_name[0], &info->pshm_name[0], PSHMNAMLEN+1);
1316
1317 PSHM_SUBSYS_UNLOCK();
1318 return(0);
1319 }
1320
1321 #if CONFIG_MACF
1322 void
1323 pshm_label_associate(struct fileproc *fp, struct vnode *vp, vfs_context_t ctx)
1324 {
1325 struct pshmnode *pnode;
1326 struct pshminfo *pshm;
1327
1328 PSHM_SUBSYS_LOCK();
1329 pnode = (struct pshmnode *)fp->f_fglob->fg_data;
1330 if (pnode != NULL) {
1331 pshm = pnode->pinfo;
1332 if (pshm != NULL)
1333 mac_posixshm_vnode_label_associate(
1334 vfs_context_ucred(ctx), pshm, pshm->pshm_label,
1335 vp, vp->v_label);
1336 }
1337 PSHM_SUBSYS_UNLOCK();
1338 }
1339 #endif