]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/posix_shm.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / bsd / kern / posix_shm.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1990, 1996-1998 Apple Computer, Inc.
30 * All Rights Reserved.
31 */
32 /*
33 * posix_shm.c : Support for POSIX shared memory APIs
34 *
35 * File: posix_shm.c
36 * Author: Ananthakrishna Ramesh
37 *
38 * HISTORY
39 * 2-Sep-1999 A.Ramesh
40 * Created for MacOSX
41 *
42 */
43 /*
44 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
45 * support for mandatory and extensible security protections. This notice
46 * is included in support of clause 2.2 (b) of the Apple Public License,
47 * Version 2.0.
48 */
49
50 #include <sys/cdefs.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/file_internal.h>
55 #include <sys/filedesc.h>
56 #include <sys/stat.h>
57 #include <sys/proc_internal.h>
58 #include <sys/kauth.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/vnode_internal.h>
63 #include <sys/ioctl.h>
64 #include <sys/tty.h>
65 #include <sys/malloc.h>
66 #include <sys/mman.h>
67 #include <sys/stat.h>
68 #include <sys/sysproto.h>
69 #include <sys/proc_info.h>
70 #include <security/audit/audit.h>
71
72 #if CONFIG_MACF
73 #include <security/mac_framework.h>
74 #endif
75
76 #include <mach/mach_types.h>
77 #include <mach/mach_vm.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/kern_return.h>
82 #include <mach/memory_object_control.h>
83
84 #include <vm/vm_map.h>
85 #include <vm/vm_protos.h>
86
87 #define f_flag f_fglob->fg_flag
88 #define f_type f_fglob->fg_ops->fo_type
89 #define f_msgcount f_fglob->fg_msgcount
90 #define f_cred f_fglob->fg_cred
91 #define f_ops f_fglob->fg_ops
92 #define f_offset f_fglob->fg_offset
93 #define f_data f_fglob->fg_data
94 #define PSHMNAMLEN 31 /* maximum name segment length we bother with */
95
96 struct pshmobj {
97 void * pshmo_memobject;
98 memory_object_size_t pshmo_size;
99 struct pshmobj * pshmo_next;
100 };
101
102 struct pshminfo {
103 unsigned int pshm_flags;
104 unsigned int pshm_usecount;
105 off_t pshm_length;
106 mode_t pshm_mode;
107 uid_t pshm_uid;
108 gid_t pshm_gid;
109 char pshm_name[PSHMNAMLEN + 1]; /* segment name */
110 struct pshmobj *pshm_memobjects;
111 #if DIAGNOSTIC
112 unsigned int pshm_readcount;
113 unsigned int pshm_writecount;
114 proc_t pshm_proc;
115 #endif /* DIAGNOSTIC */
116 struct label* pshm_label;
117 };
118 #define PSHMINFO_NULL (struct pshminfo *)0
119
120 #define PSHM_NONE 0x001
121 #define PSHM_DEFINED 0x002
122 #define PSHM_ALLOCATED 0x004
123 #define PSHM_MAPPED 0x008
124 #define PSHM_INUSE 0x010
125 #define PSHM_REMOVED 0x020
126 #define PSHM_INCREATE 0x040
127 #define PSHM_INDELETE 0x080
128 #define PSHM_ALLOCATING 0x100
129
130 struct pshmcache {
131 LIST_ENTRY(pshmcache) pshm_hash; /* hash chain */
132 struct pshminfo *pshminfo; /* vnode the name refers to */
133 int pshm_nlen; /* length of name */
134 char pshm_name[PSHMNAMLEN + 1]; /* segment name */
135 };
136 #define PSHMCACHE_NULL (struct pshmcache *)0
137
138 #define PSHMCACHE_NOTFOUND (0)
139 #define PSHMCACHE_FOUND (-1)
140 #define PSHMCACHE_NEGATIVE (ENOENT)
141
142 struct pshmstats {
143 long goodhits; /* hits that we can really use */
144 long neghits; /* negative hits that we can use */
145 long badhits; /* hits we must drop */
146 long falsehits; /* hits with id mismatch */
147 long miss; /* misses */
148 long longnames; /* long names that ignore cache */
149 };
150
151 struct pshmname {
152 char *pshm_nameptr; /* pointer to looked up name */
153 long pshm_namelen; /* length of looked up component */
154 u_long pshm_hash; /* hash value of looked up name */
155 };
156
157 struct pshmnode {
158 off_t mapp_addr;
159 user_size_t map_size; /* XXX unused ? */
160 struct pshminfo *pinfo;
161 unsigned int pshm_usecount;
162 #if DIAGNOSTIC
163 unsigned int readcnt;
164 unsigned int writecnt;
165 #endif
166 };
167 #define PSHMNODE_NULL (struct pshmnode *)0
168
169
170 #define PSHMHASH(pnp) \
171 (&pshmhashtbl[(pnp)->pshm_hash & pshmhash])
172
173 LIST_HEAD(pshmhashhead, pshmcache) *pshmhashtbl; /* Hash Table */
174 u_long pshmhash; /* size of hash table - 1 */
175 long pshmnument; /* number of cache entries allocated */
176 struct pshmstats pshmstats; /* cache effectiveness statistics */
177
178 static int pshm_read (struct fileproc *fp, struct uio *uio,
179 int flags, vfs_context_t ctx);
180 static int pshm_write (struct fileproc *fp, struct uio *uio,
181 int flags, vfs_context_t ctx);
182 static int pshm_ioctl (struct fileproc *fp, u_long com,
183 caddr_t data, vfs_context_t ctx);
184 static int pshm_select (struct fileproc *fp, int which, void *wql, vfs_context_t ctx);
185 static int pshm_close(struct pshminfo *pinfo, int dropref);
186 static int pshm_closefile (struct fileglob *fg, vfs_context_t ctx);
187
188 static int pshm_kqfilter(struct fileproc *fp, struct knote *kn,
189 struct kevent_internal_s *kev, vfs_context_t ctx);
190
191 int pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, proc_t p);
192 int pshm_cache_purge_all(proc_t p);
193
194 static int pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp);
195 static void pshm_cache_delete(struct pshmcache *pcp);
196 static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
197 struct pshmcache **pcache, int addref);
198 static int pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache);
199
200 static const struct fileops pshmops = {
201 .fo_type = DTYPE_PSXSHM,
202 .fo_read = pshm_read,
203 .fo_write = pshm_write,
204 .fo_ioctl = pshm_ioctl,
205 .fo_select = pshm_select,
206 .fo_close = pshm_closefile,
207 .fo_kqfilter = pshm_kqfilter,
208 .fo_drain = NULL,
209 };
210
211 static lck_grp_t *psx_shm_subsys_lck_grp;
212 static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr;
213 static lck_attr_t *psx_shm_subsys_lck_attr;
214 static lck_mtx_t psx_shm_subsys_mutex;
215
216 #define PSHM_SUBSYS_LOCK() lck_mtx_lock(& psx_shm_subsys_mutex)
217 #define PSHM_SUBSYS_UNLOCK() lck_mtx_unlock(& psx_shm_subsys_mutex)
218 #define PSHM_SUBSYS_ASSERT_HELD() LCK_MTX_ASSERT(&psx_shm_subsys_mutex, LCK_MTX_ASSERT_OWNED)
219
220
221 /* Initialize the mutex governing access to the posix shm subsystem */
222 __private_extern__ void
223 pshm_lock_init( void )
224 {
225
226 psx_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
227
228 psx_shm_subsys_lck_grp = lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr);
229
230 psx_shm_subsys_lck_attr = lck_attr_alloc_init();
231 lck_mtx_init(& psx_shm_subsys_mutex, psx_shm_subsys_lck_grp, psx_shm_subsys_lck_attr);
232 }
233
234 /*
235 * Lookup an entry in the cache
236 *
237 *
238 * status of -1 is returned if matches
239 * If the lookup determines that the name does not exist
240 * (negative cacheing), a status of ENOENT is returned. If the lookup
241 * fails, a status of zero is returned.
242 */
243
244 static int
245 pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp,
246 struct pshmcache **pcache, int addref)
247 {
248 struct pshmcache *pcp, *nnp;
249 struct pshmhashhead *pcpp;
250
251 if (pnp->pshm_namelen > PSHMNAMLEN) {
252 pshmstats.longnames++;
253 return PSHMCACHE_NOTFOUND;
254 }
255
256 pcpp = PSHMHASH(pnp);
257 for (pcp = pcpp->lh_first; pcp != 0; pcp = nnp) {
258 nnp = pcp->pshm_hash.le_next;
259 if (pcp->pshm_nlen == pnp->pshm_namelen &&
260 !bcmp(pcp->pshm_name, pnp->pshm_nameptr, (u_int)pcp-> pshm_nlen))
261 break;
262 }
263
264 if (pcp == 0) {
265 pshmstats.miss++;
266 return PSHMCACHE_NOTFOUND;
267 }
268
269 /* We found a "positive" match, return the vnode */
270 if (pcp->pshminfo) {
271 pshmstats.goodhits++;
272 /* TOUCH(ncp); */
273 *pshmp = pcp->pshminfo;
274 *pcache = pcp;
275 if (addref)
276 pcp->pshminfo->pshm_usecount++;
277 return PSHMCACHE_FOUND;
278 }
279
280 /*
281 * We found a "negative" match, ENOENT notifies client of this match.
282 */
283 pshmstats.neghits++;
284 return PSHMCACHE_NEGATIVE;
285 }
286
287 /*
288 * Add an entry to the cache.
289 * XXX should be static?
290 */
291 static int
292 pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp)
293 {
294 struct pshmhashhead *pcpp;
295 struct pshminfo *dpinfo;
296 struct pshmcache *dpcp;
297
298 #if DIAGNOSTIC
299 if (pnp->pshm_namelen > PSHMNAMLEN)
300 panic("cache_enter: name too long");
301 #endif
302
303
304 /* if the entry has already been added by some one else return */
305 if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == PSHMCACHE_FOUND) {
306 return EEXIST;
307 }
308 pshmnument++;
309
310 /*
311 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
312 */
313 pcp->pshminfo = pshmp;
314 pcp->pshm_nlen = pnp->pshm_namelen;
315 bcopy(pnp->pshm_nameptr, pcp->pshm_name, (unsigned)pcp->pshm_nlen);
316 pcpp = PSHMHASH(pnp);
317 #if DIAGNOSTIC
318 {
319 struct pshmcache *p;
320
321 for (p = pcpp->lh_first; p != 0; p = p->pshm_hash.le_next)
322 if (p == pcp)
323 panic("cache_enter: duplicate");
324 }
325 #endif
326 LIST_INSERT_HEAD(pcpp, pcp, pshm_hash);
327 return 0;
328 }
329
330 /*
331 * Name cache initialization, from vfs_init() when we are booting
332 */
333 void
334 pshm_cache_init(void)
335 {
336 pshmhashtbl = hashinit(desiredvnodes / 8, M_SHM, &pshmhash);
337 }
338
339 /*
340 * Invalidate all entries and delete all objects associated with it. Entire
341 * non Kernel entries are going away. Just dump'em all
342 *
343 * We actually just increment the v_id, that will do it. The entries will
344 * be purged by lookup as they get found. If the v_id wraps around, we
345 * need to ditch the entire cache, to avoid confusion. No valid vnode will
346 * ever have (v_id == 0).
347 */
348 int
349 pshm_cache_purge_all(__unused proc_t p)
350 {
351 struct pshmcache *pcp, *tmppcp;
352 struct pshmhashhead *pcpp;
353 int error = 0;
354
355 if (kauth_cred_issuser(kauth_cred_get()) == 0)
356 return EPERM;
357
358 PSHM_SUBSYS_LOCK();
359 for (pcpp = &pshmhashtbl[pshmhash]; pcpp >= pshmhashtbl; pcpp--) {
360 LIST_FOREACH_SAFE(pcp, pcpp, pshm_hash, tmppcp) {
361 assert(pcp->pshm_nlen);
362 error = pshm_unlink_internal(pcp->pshminfo, pcp);
363 if (error)
364 goto out;
365 }
366 }
367 assert(pshmnument == 0);
368
369 out:
370 PSHM_SUBSYS_UNLOCK();
371
372 if (error)
373 printf("%s: Error %d removing shm cache: %ld remain!\n",
374 __func__, error, pshmnument);
375 return error;
376 }
377
378 static void
379 pshm_cache_delete(struct pshmcache *pcp)
380 {
381 #if DIAGNOSTIC
382 if (pcp->pshm_hash.le_prev == 0)
383 panic("namecache purge le_prev");
384 if (pcp->pshm_hash.le_next == pcp)
385 panic("namecache purge le_next");
386 #endif /* DIAGNOSTIC */
387 LIST_REMOVE(pcp, pshm_hash);
388 pcp->pshm_hash.le_prev = 0;
389 pshmnument--;
390 }
391
392
393 int
394 shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval)
395 {
396 size_t i;
397 int indx, error;
398 struct pshmname nd;
399 struct pshminfo *pinfo;
400 struct fileproc *fp = NULL;
401 char *pnbuf = NULL;
402 struct pshminfo *new_pinfo = PSHMINFO_NULL;
403 struct pshmnode *new_pnode = PSHMNODE_NULL;
404 struct pshmcache *pcache = PSHMCACHE_NULL; /* ignored on return */
405 char * nameptr;
406 char * cp;
407 size_t pathlen, plen;
408 int fmode ;
409 int cmode = uap->mode;
410 int incache = 0;
411 struct pshmcache *pcp = NULL;
412
413 AUDIT_ARG(fflags, uap->oflag);
414 AUDIT_ARG(mode, uap->mode);
415
416 pinfo = PSHMINFO_NULL;
417
418 /*
419 * Preallocate everything we might need up front to avoid taking
420 * and dropping the lock, opening us up to race conditions.
421 */
422 MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
423 if (pnbuf == NULL) {
424 error = ENOSPC;
425 goto bad;
426 }
427
428 pathlen = MAXPATHLEN;
429 error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen);
430 if (error) {
431 goto bad;
432 }
433 AUDIT_ARG(text, pnbuf);
434 if (pathlen > PSHMNAMLEN) {
435 error = ENAMETOOLONG;
436 goto bad;
437 }
438 #ifdef PSXSHM_NAME_RESTRICT
439 nameptr = pnbuf;
440 if (*nameptr == '/') {
441 while (*(nameptr++) == '/') {
442 plen--;
443 error = EINVAL;
444 goto bad;
445 }
446 } else {
447 error = EINVAL;
448 goto bad;
449 }
450 #endif /* PSXSHM_NAME_RESTRICT */
451
452 plen = pathlen;
453 nameptr = pnbuf;
454 nd.pshm_nameptr = nameptr;
455 nd.pshm_namelen = plen;
456 nd. pshm_hash =0;
457
458 for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) {
459 nd.pshm_hash += (unsigned char)*cp * i;
460 }
461
462 /*
463 * attempt to allocate a new fp; if unsuccessful, the fp will be
464 * left unmodified (NULL).
465 */
466 error = falloc(p, &fp, &indx, vfs_context_current());
467 if (error)
468 goto bad;
469
470 cmode &= ALLPERMS;
471
472 fmode = FFLAGS(uap->oflag);
473 if ((fmode & (FREAD | FWRITE)) == 0) {
474 error = EINVAL;
475 goto bad;
476 }
477
478 /*
479 * We allocate a new entry if we are less than the maximum
480 * allowed and the one at the front of the LRU list is in use.
481 * Otherwise we use the one at the front of the LRU list.
482 */
483 MALLOC(pcp, struct pshmcache *, sizeof(struct pshmcache), M_SHM, M_WAITOK|M_ZERO);
484 if (pcp == NULL) {
485 error = ENOSPC;
486 goto bad;
487 }
488
489 MALLOC(new_pinfo, struct pshminfo *, sizeof(struct pshminfo), M_SHM, M_WAITOK|M_ZERO);
490 if (new_pinfo == PSHMINFO_NULL) {
491 error = ENOSPC;
492 goto bad;
493 }
494 #if CONFIG_MACF
495 mac_posixshm_label_init(new_pinfo);
496 #endif
497
498 MALLOC(new_pnode, struct pshmnode *, sizeof(struct pshmnode), M_SHM, M_WAITOK|M_ZERO);
499 if (new_pnode == PSHMNODE_NULL) {
500 error = ENOSPC;
501 goto bad;
502 }
503
504 PSHM_SUBSYS_LOCK();
505
506 /*
507 * If we find the entry in the cache, this will take a reference,
508 * allowing us to unlock it for the permissions check.
509 */
510 error = pshm_cache_search(&pinfo, &nd, &pcache, 1);
511
512 PSHM_SUBSYS_UNLOCK();
513
514 if (error == PSHMCACHE_NEGATIVE) {
515 error = EINVAL;
516 goto bad;
517 }
518
519 if (error == PSHMCACHE_NOTFOUND) {
520 incache = 0;
521 if (fmode & O_CREAT) {
522 /* create a new one (commit the allocation) */
523 pinfo = new_pinfo;
524 pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE;
525 pinfo->pshm_usecount = 1; /* existence reference */
526 pinfo->pshm_mode = cmode;
527 pinfo->pshm_uid = kauth_getuid();
528 pinfo->pshm_gid = kauth_getgid();
529 bcopy(pnbuf, &pinfo->pshm_name[0], pathlen);
530 pinfo->pshm_name[pathlen]=0;
531 #if CONFIG_MACF
532 error = mac_posixshm_check_create(kauth_cred_get(), nameptr);
533 if (error) {
534 goto bad;
535 }
536 mac_posixshm_label_associate(kauth_cred_get(), pinfo, nameptr);
537 #endif
538 }
539 } else {
540 incache = 1;
541 if (fmode & O_CREAT) {
542 /* already exists */
543 if ((fmode & O_EXCL)) {
544 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
545 pinfo->pshm_gid,
546 pinfo->pshm_mode);
547
548 /* shm obj exists and opened O_EXCL */
549 error = EEXIST;
550 goto bad;
551 }
552
553 if( pinfo->pshm_flags & PSHM_INDELETE) {
554 error = ENOENT;
555 goto bad;
556 }
557 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid,
558 pinfo->pshm_gid, pinfo->pshm_mode);
559 #if CONFIG_MACF
560 if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
561 goto bad;
562 }
563 #endif
564 if ( (error = pshm_access(pinfo, fmode, kauth_cred_get(), p)) ) {
565 goto bad;
566 }
567 }
568 }
569 if (!(fmode & O_CREAT)) {
570 if (!incache) {
571 /* O_CREAT is not set and the object does not exist */
572 error = ENOENT;
573 goto bad;
574 }
575 if( pinfo->pshm_flags & PSHM_INDELETE) {
576 error = ENOENT;
577 goto bad;
578 }
579 #if CONFIG_MACF
580 if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) {
581 goto bad;
582 }
583 #endif
584
585 if ((error = pshm_access(pinfo, fmode, kauth_cred_get(), p))) {
586 goto bad;
587 }
588 }
589 if (fmode & O_TRUNC) {
590 error = EINVAL;
591 goto bad;
592 }
593
594
595 PSHM_SUBSYS_LOCK();
596
597 #if DIAGNOSTIC
598 if (fmode & FWRITE)
599 pinfo->pshm_writecount++;
600 if (fmode & FREAD)
601 pinfo->pshm_readcount++;
602 #endif
603 if (!incache) {
604 /* if successful, this will consume the pcp */
605 if ( (error = pshm_cache_add(pinfo, &nd, pcp)) ) {
606 goto bad_locked;
607 }
608 /*
609 * add reference for the new entry; otherwise, we obtained
610 * one from the cache hit earlier.
611 */
612 pinfo->pshm_usecount++;
613 }
614 pinfo->pshm_flags &= ~PSHM_INCREATE;
615 new_pnode->pinfo = pinfo;
616
617 PSHM_SUBSYS_UNLOCK();
618
619 /*
620 * if incache, we did not use the new pcp or new_pinfo and must
621 * free them
622 */
623 if (incache) {
624 FREE(pcp, M_SHM);
625
626 if (new_pinfo != PSHMINFO_NULL) {
627 #if CONFIG_MACF
628 mac_posixshm_label_destroy(new_pinfo);
629 #endif
630 FREE(new_pinfo, M_SHM);
631 }
632 }
633
634 proc_fdlock(p);
635 fp->f_flag = fmode & FMASK;
636 fp->f_ops = &pshmops;
637 fp->f_data = (caddr_t)new_pnode;
638 *fdflags(p, indx) |= UF_EXCLOSE;
639 procfdtbl_releasefd(p, indx, NULL);
640 fp_drop(p, indx, fp, 1);
641 proc_fdunlock(p);
642
643 *retval = indx;
644 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
645 return (0);
646
647 bad_locked:
648 PSHM_SUBSYS_UNLOCK();
649 bad:
650 /*
651 * If we obtained the entry from the cache, we need to drop the
652 * reference; holding the reference may have prevented unlinking,
653 * so we need to call pshm_close() to get the full effect.
654 */
655 if (incache) {
656 PSHM_SUBSYS_LOCK();
657 pshm_close(pinfo, 1);
658 PSHM_SUBSYS_UNLOCK();
659 }
660
661 if (pcp != NULL)
662 FREE(pcp, M_SHM);
663
664 if (new_pnode != PSHMNODE_NULL)
665 FREE(new_pnode, M_SHM);
666
667 if (fp != NULL)
668 fp_free(p, indx, fp);
669
670 if (new_pinfo != PSHMINFO_NULL) {
671 #if CONFIG_MACF
672 mac_posixshm_label_destroy(new_pinfo);
673 #endif
674 FREE(new_pinfo, M_SHM);
675 }
676 if (pnbuf != NULL)
677 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
678 return (error);
679 }
680
681
682 int
683 pshm_truncate(__unused proc_t p, struct fileproc *fp, __unused int fd,
684 off_t length, __unused int32_t *retval)
685 {
686 struct pshminfo * pinfo;
687 struct pshmnode * pnode ;
688 kern_return_t kret;
689 mem_entry_name_port_t mem_object;
690 mach_vm_size_t total_size, alloc_size;
691 memory_object_size_t mosize;
692 struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p;
693 vm_map_t user_map;
694 #if CONFIG_MACF
695 int error;
696 #endif
697
698 user_map = current_map();
699
700 if (fp->f_type != DTYPE_PSXSHM) {
701 return(EINVAL);
702 }
703
704
705 if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
706 return(EINVAL);
707
708 PSHM_SUBSYS_LOCK();
709 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) {
710 PSHM_SUBSYS_UNLOCK();
711 return(EINVAL);
712 }
713 if ((pinfo->pshm_flags & (PSHM_DEFINED|PSHM_ALLOCATING|PSHM_ALLOCATED))
714 != PSHM_DEFINED) {
715 PSHM_SUBSYS_UNLOCK();
716 return(EINVAL);
717 }
718 #if CONFIG_MACF
719 error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, length);
720 if (error) {
721 PSHM_SUBSYS_UNLOCK();
722 return(error);
723 }
724 #endif
725
726 pinfo->pshm_flags |= PSHM_ALLOCATING;
727 total_size = vm_map_round_page(length,
728 vm_map_page_mask(user_map));
729 pshmobj_next_p = &pinfo->pshm_memobjects;
730
731 for (alloc_size = 0;
732 alloc_size < total_size;
733 alloc_size += mosize) {
734
735 PSHM_SUBSYS_UNLOCK();
736
737 mosize = MIN(total_size - alloc_size, ANON_MAX_SIZE);
738 kret = mach_make_memory_entry_64(
739 VM_MAP_NULL,
740 &mosize,
741 0,
742 MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
743 &mem_object,
744 0);
745
746 if (kret != KERN_SUCCESS)
747 goto out;
748
749 MALLOC(pshmobj, struct pshmobj *, sizeof (struct pshmobj),
750 M_SHM, M_WAITOK);
751 if (pshmobj == NULL) {
752 kret = KERN_NO_SPACE;
753 mach_memory_entry_port_release(mem_object);
754 mem_object = NULL;
755 goto out;
756 }
757
758 PSHM_SUBSYS_LOCK();
759
760 pshmobj->pshmo_memobject = (void *) mem_object;
761 pshmobj->pshmo_size = mosize;
762 pshmobj->pshmo_next = NULL;
763
764 *pshmobj_next_p = pshmobj;
765 pshmobj_next_p = &pshmobj->pshmo_next;
766 }
767
768 pinfo->pshm_flags |= PSHM_ALLOCATED;
769 pinfo->pshm_flags &= ~(PSHM_ALLOCATING);
770 pinfo->pshm_length = total_size;
771 PSHM_SUBSYS_UNLOCK();
772 return(0);
773
774 out:
775 PSHM_SUBSYS_LOCK();
776 for (pshmobj = pinfo->pshm_memobjects;
777 pshmobj != NULL;
778 pshmobj = pshmobj_next) {
779 pshmobj_next = pshmobj->pshmo_next;
780 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
781 FREE(pshmobj, M_SHM);
782 }
783 pinfo->pshm_memobjects = NULL;
784 pinfo->pshm_flags &= ~PSHM_ALLOCATING;
785 PSHM_SUBSYS_UNLOCK();
786
787 switch (kret) {
788 case KERN_INVALID_ADDRESS:
789 case KERN_NO_SPACE:
790 return (ENOMEM);
791 case KERN_PROTECTION_FAILURE:
792 return (EACCES);
793 default:
794 return (EINVAL);
795
796 }
797 }
798
799 int
800 pshm_stat(struct pshmnode *pnode, void *ub, int isstat64)
801 {
802 struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */
803 struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */
804 struct pshminfo *pinfo;
805 #if CONFIG_MACF
806 int error;
807 #endif
808
809 PSHM_SUBSYS_LOCK();
810 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL){
811 PSHM_SUBSYS_UNLOCK();
812 return(EINVAL);
813 }
814
815 #if CONFIG_MACF
816 error = mac_posixshm_check_stat(kauth_cred_get(), pinfo);
817 if (error) {
818 PSHM_SUBSYS_UNLOCK();
819 return(error);
820 }
821 #endif
822
823 if (isstat64 != 0) {
824 sb64 = (struct stat64 *)ub;
825 bzero(sb64, sizeof(struct stat64));
826 sb64->st_mode = pinfo->pshm_mode;
827 sb64->st_uid = pinfo->pshm_uid;
828 sb64->st_gid = pinfo->pshm_gid;
829 sb64->st_size = pinfo->pshm_length;
830 } else {
831 sb = (struct stat *)ub;
832 bzero(sb, sizeof(struct stat));
833 sb->st_mode = pinfo->pshm_mode;
834 sb->st_uid = pinfo->pshm_uid;
835 sb->st_gid = pinfo->pshm_gid;
836 sb->st_size = pinfo->pshm_length;
837 }
838 PSHM_SUBSYS_UNLOCK();
839
840 return(0);
841 }
842
843 /*
844 * This is called only from shm_open which holds pshm_lock();
845 * XXX This code is repeated many times
846 */
847 int
848 pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, __unused proc_t p)
849 {
850 int mode_req = ((mode & FREAD) ? S_IRUSR : 0) |
851 ((mode & FWRITE) ? S_IWUSR : 0);
852
853 /* Otherwise, user id 0 always gets access. */
854 if (!suser(cred, NULL))
855 return (0);
856
857 return(posix_cred_access(cred, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode, mode_req));
858 }
859
860 int
861 pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct fileproc *fp, off_t pageoff)
862 {
863 vm_map_offset_t user_addr = (vm_map_offset_t)uap->addr;
864 vm_map_size_t user_size = (vm_map_size_t)uap->len ;
865 vm_map_offset_t user_start_addr;
866 vm_map_size_t map_size, mapped_size;
867 int prot = uap->prot;
868 int max_prot = VM_PROT_DEFAULT;
869 int flags = uap->flags;
870 vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos;
871 vm_object_offset_t map_pos;
872 vm_map_t user_map;
873 int alloc_flags;
874 vm_map_kernel_flags_t vmk_flags;
875 boolean_t docow;
876 kern_return_t kret;
877 struct pshminfo * pinfo;
878 struct pshmnode * pnode;
879 struct pshmobj * pshmobj;
880 #if CONFIG_MACF
881 int error;
882 #endif
883
884 if (user_size == 0)
885 return(0);
886
887 if ((flags & MAP_SHARED) == 0)
888 return(EINVAL);
889
890
891 /* Can't allow write permission if the shm_open() didn't */
892 if (!(fp->f_flag & FWRITE)) {
893 if (prot & VM_PROT_WRITE) {
894 return EPERM;
895 }
896 max_prot &= ~VM_PROT_WRITE;
897 }
898
899 if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
900 return(EINVAL);
901
902 PSHM_SUBSYS_LOCK();
903 if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) {
904 PSHM_SUBSYS_UNLOCK();
905 return(EINVAL);
906 }
907
908 if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) {
909 PSHM_SUBSYS_UNLOCK();
910 return(EINVAL);
911 }
912 if (user_size > (vm_map_size_t)pinfo->pshm_length) {
913 PSHM_SUBSYS_UNLOCK();
914 return(EINVAL);
915 }
916 vm_map_size_t end_pos = 0;
917 if (os_add_overflow(user_size, file_pos, &end_pos)) {
918 PSHM_SUBSYS_UNLOCK();
919 return(EINVAL);
920 }
921 if (end_pos > (vm_map_size_t)pinfo->pshm_length) {
922 PSHM_SUBSYS_UNLOCK();
923 return(EINVAL);
924 }
925 if ((pshmobj = pinfo->pshm_memobjects) == NULL) {
926 PSHM_SUBSYS_UNLOCK();
927 return(EINVAL);
928 }
929
930 #if CONFIG_MACF
931 error = mac_posixshm_check_mmap(kauth_cred_get(), pinfo, prot, flags);
932 if (error) {
933 PSHM_SUBSYS_UNLOCK();
934 return(error);
935 }
936 #endif
937
938 PSHM_SUBSYS_UNLOCK();
939 user_map = current_map();
940
941 if ((flags & MAP_FIXED) == 0) {
942 alloc_flags = VM_FLAGS_ANYWHERE;
943 user_addr = vm_map_round_page(user_addr,
944 vm_map_page_mask(user_map));
945 } else {
946 if (user_addr != vm_map_round_page(user_addr,
947 vm_map_page_mask(user_map)))
948 return (EINVAL);
949 /*
950 * We do not get rid of the existing mappings here because
951 * it wouldn't be atomic (see comment in mmap()). We let
952 * Mach VM know that we want it to replace any existing
953 * mapping with the new one.
954 */
955 alloc_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
956 }
957 docow = FALSE;
958
959 mapped_size = 0;
960 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
961 /* reserve the entire space first... */
962 kret = vm_map_enter_mem_object(user_map,
963 &user_addr,
964 user_size,
965 0,
966 alloc_flags,
967 vmk_flags,
968 VM_KERN_MEMORY_NONE,
969 IPC_PORT_NULL,
970 0,
971 FALSE,
972 VM_PROT_NONE,
973 VM_PROT_NONE,
974 VM_INHERIT_NONE);
975 user_start_addr = user_addr;
976 if (kret != KERN_SUCCESS) {
977 goto out;
978 }
979
980 /* ... and overwrite with the real mappings */
981 for (map_pos = 0, pshmobj = pinfo->pshm_memobjects;
982 user_size != 0;
983 map_pos += pshmobj->pshmo_size, pshmobj = pshmobj->pshmo_next) {
984 if (pshmobj == NULL) {
985 /* nothing there to map !? */
986 goto out;
987 }
988 if (file_pos >= map_pos + pshmobj->pshmo_size) {
989 continue;
990 }
991 map_size = pshmobj->pshmo_size - (file_pos - map_pos);
992 if (map_size > user_size) {
993 map_size = user_size;
994 }
995 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
996 kret = vm_map_enter_mem_object(
997 user_map,
998 &user_addr,
999 map_size,
1000 0,
1001 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1002 vmk_flags,
1003 VM_KERN_MEMORY_NONE,
1004 pshmobj->pshmo_memobject,
1005 file_pos - map_pos,
1006 docow,
1007 prot,
1008 max_prot,
1009 VM_INHERIT_SHARE);
1010 if (kret != KERN_SUCCESS)
1011 goto out;
1012
1013 user_addr += map_size;
1014 user_size -= map_size;
1015 mapped_size += map_size;
1016 file_pos += map_size;
1017 }
1018
1019 PSHM_SUBSYS_LOCK();
1020 pnode->mapp_addr = user_start_addr;
1021 pnode->map_size = mapped_size;
1022 pinfo->pshm_flags |= (PSHM_MAPPED | PSHM_INUSE);
1023 PSHM_SUBSYS_UNLOCK();
1024 out:
1025 if (kret != KERN_SUCCESS) {
1026 if (mapped_size != 0) {
1027 (void) mach_vm_deallocate(current_map(),
1028 user_start_addr,
1029 mapped_size);
1030 }
1031 }
1032
1033 switch (kret) {
1034 case KERN_SUCCESS:
1035 *retval = (user_start_addr + pageoff);
1036 return (0);
1037 case KERN_INVALID_ADDRESS:
1038 case KERN_NO_SPACE:
1039 return (ENOMEM);
1040 case KERN_PROTECTION_FAILURE:
1041 return (EACCES);
1042 default:
1043 return (EINVAL);
1044 }
1045
1046 }
1047
1048 static int
1049 pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache)
1050 {
1051 struct pshmobj *pshmobj, *pshmobj_next;
1052
1053 PSHM_SUBSYS_ASSERT_HELD();
1054
1055 if (!pinfo || !pcache)
1056 return EINVAL;
1057
1058 if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED)) == 0)
1059 return EINVAL;
1060
1061 if (pinfo->pshm_flags & PSHM_INDELETE)
1062 return 0;
1063
1064 pinfo->pshm_flags |= PSHM_INDELETE;
1065 pinfo->pshm_usecount--;
1066
1067 pshm_cache_delete(pcache);
1068 pinfo->pshm_flags |= PSHM_REMOVED;
1069
1070 /* release the existence reference */
1071 if (!pinfo->pshm_usecount) {
1072 #if CONFIG_MACF
1073 mac_posixshm_label_destroy(pinfo);
1074 #endif
1075 /*
1076 * If this is the last reference going away on the object,
1077 * then we need to destroy the backing object. The name
1078 * has an implied but uncounted reference on the object,
1079 * once it's created, since it's used as a rendezvous, and
1080 * therefore may be subsequently reopened.
1081 */
1082 for (pshmobj = pinfo->pshm_memobjects;
1083 pshmobj != NULL;
1084 pshmobj = pshmobj_next) {
1085 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
1086 pshmobj_next = pshmobj->pshmo_next;
1087 FREE(pshmobj, M_SHM);
1088 }
1089 FREE(pinfo,M_SHM);
1090 }
1091
1092 FREE(pcache, M_SHM);
1093
1094 return 0;
1095 }
1096
1097 int
1098 shm_unlink(proc_t p, struct shm_unlink_args *uap, __unused int32_t *retval)
1099 {
1100 size_t i;
1101 char * pnbuf;
1102 size_t pathlen;
1103 int error = 0;
1104
1105 struct pshmname nd;
1106 struct pshminfo *pinfo;
1107 char * nameptr;
1108 char * cp;
1109 struct pshmcache *pcache = PSHMCACHE_NULL;
1110
1111 pinfo = PSHMINFO_NULL;
1112
1113
1114 MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
1115 if (pnbuf == NULL) {
1116 return(ENOSPC); /* XXX non-standard */
1117 }
1118 pathlen = MAXPATHLEN;
1119 error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen);
1120 if (error) {
1121 goto bad;
1122 }
1123 AUDIT_ARG(text, pnbuf);
1124 if (pathlen > PSHMNAMLEN) {
1125 error = ENAMETOOLONG;
1126 goto bad;
1127 }
1128
1129 nameptr = pnbuf;
1130
1131 #ifdef PSXSHM_NAME_RESTRICT
1132 if (*nameptr == '/') {
1133 while (*(nameptr++) == '/') {
1134 pathlen--;
1135 error = EINVAL;
1136 goto bad;
1137 }
1138 } else {
1139 error = EINVAL;
1140 goto bad;
1141 }
1142 #endif /* PSXSHM_NAME_RESTRICT */
1143
1144 nd.pshm_nameptr = nameptr;
1145 nd.pshm_namelen = pathlen;
1146 nd.pshm_hash = 0;
1147
1148 for (cp = nameptr, i=1; *cp != 0 && i <= pathlen; i++, cp++) {
1149 nd.pshm_hash += (unsigned char)*cp * i;
1150 }
1151
1152 PSHM_SUBSYS_LOCK();
1153 error = pshm_cache_search(&pinfo, &nd, &pcache, 0);
1154
1155 /* During unlink lookup failure also implies ENOENT */
1156 if (error != PSHMCACHE_FOUND) {
1157 PSHM_SUBSYS_UNLOCK();
1158 error = ENOENT;
1159 goto bad;
1160
1161 }
1162
1163 if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))==0) {
1164 PSHM_SUBSYS_UNLOCK();
1165 error = EINVAL;
1166 goto bad;
1167 }
1168
1169 if (pinfo->pshm_flags & PSHM_ALLOCATING) {
1170 /* XXX should we wait for flag to clear and then proceed ? */
1171 PSHM_SUBSYS_UNLOCK();
1172 error = EAGAIN;
1173 goto bad;
1174 }
1175
1176 if (pinfo->pshm_flags & PSHM_INDELETE) {
1177 PSHM_SUBSYS_UNLOCK();
1178 error = 0;
1179 goto bad;
1180 }
1181
1182 #if CONFIG_MACF
1183 error = mac_posixshm_check_unlink(kauth_cred_get(), pinfo, nameptr);
1184 if (error) {
1185 PSHM_SUBSYS_UNLOCK();
1186 goto bad;
1187 }
1188 #endif
1189
1190 AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, pinfo->pshm_gid,
1191 pinfo->pshm_mode);
1192
1193 /*
1194 * following file semantics, unlink should be allowed
1195 * for users with write permission only.
1196 */
1197 if ( (error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p)) ) {
1198 PSHM_SUBSYS_UNLOCK();
1199 goto bad;
1200 }
1201
1202 error = pshm_unlink_internal(pinfo, pcache);
1203 PSHM_SUBSYS_UNLOCK();
1204
1205 bad:
1206 FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI);
1207 return error;
1208 }
1209
1210 /* already called locked */
1211 static int
1212 pshm_close(struct pshminfo *pinfo, int dropref)
1213 {
1214 int error = 0;
1215 struct pshmobj *pshmobj, *pshmobj_next;
1216
1217 /*
1218 * If we are dropping the reference we took on the cache object, don't
1219 * enforce the allocation requirement.
1220 */
1221 if ( !dropref && ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED)) {
1222 return(EINVAL);
1223 }
1224 #if DIAGNOSTIC
1225 if(!pinfo->pshm_usecount) {
1226 kprintf("negative usecount in pshm_close\n");
1227 }
1228 #endif /* DIAGNOSTIC */
1229 pinfo->pshm_usecount--; /* release this fd's reference */
1230
1231 if ((pinfo->pshm_flags & PSHM_REMOVED) && !pinfo->pshm_usecount) {
1232 #if CONFIG_MACF
1233 mac_posixshm_label_destroy(pinfo);
1234 #endif
1235 PSHM_SUBSYS_UNLOCK();
1236 /*
1237 * If this is the last reference going away on the object,
1238 * then we need to destroy the backing object.
1239 */
1240 for (pshmobj = pinfo->pshm_memobjects;
1241 pshmobj != NULL;
1242 pshmobj = pshmobj_next) {
1243 mach_memory_entry_port_release(pshmobj->pshmo_memobject);
1244 pshmobj_next = pshmobj->pshmo_next;
1245 FREE(pshmobj, M_SHM);
1246 }
1247 PSHM_SUBSYS_LOCK();
1248 FREE(pinfo,M_SHM);
1249 }
1250 return (error);
1251 }
1252
1253 /* vfs_context_t passed to match prototype for struct fileops */
1254 static int
1255 pshm_closefile(struct fileglob *fg, __unused vfs_context_t ctx)
1256 {
1257 int error = EINVAL;
1258 struct pshmnode *pnode;
1259
1260 PSHM_SUBSYS_LOCK();
1261
1262 if ((pnode = (struct pshmnode *)fg->fg_data) != NULL) {
1263 if (pnode->pinfo != PSHMINFO_NULL) {
1264 error = pshm_close(pnode->pinfo, 0);
1265 }
1266 FREE(pnode, M_SHM);
1267 }
1268
1269 PSHM_SUBSYS_UNLOCK();
1270
1271 return(error);
1272 }
1273
1274 static int
1275 pshm_read(__unused struct fileproc *fp, __unused struct uio *uio,
1276 __unused int flags, __unused vfs_context_t ctx)
1277 {
1278 return(ENOTSUP);
1279 }
1280
1281 static int
1282 pshm_write(__unused struct fileproc *fp, __unused struct uio *uio,
1283 __unused int flags, __unused vfs_context_t ctx)
1284 {
1285 return(ENOTSUP);
1286 }
1287
1288 static int
1289 pshm_ioctl(__unused struct fileproc *fp, __unused u_long com,
1290 __unused caddr_t data, __unused vfs_context_t ctx)
1291 {
1292 return(ENOTSUP);
1293 }
1294
1295 static int
1296 pshm_select(__unused struct fileproc *fp, __unused int which, __unused void *wql,
1297 __unused vfs_context_t ctx)
1298 {
1299 return(ENOTSUP);
1300 }
1301
1302 static int
1303 pshm_kqfilter(__unused struct fileproc *fp, struct knote *kn,
1304 __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx)
1305 {
1306 kn->kn_flags = EV_ERROR;
1307 kn->kn_data = ENOTSUP;
1308 return 0;
1309 }
1310
1311 int
1312 fill_pshminfo(struct pshmnode * pshm, struct pshm_info * info)
1313 {
1314 struct pshminfo *pinfo;
1315 struct vinfo_stat *sb;
1316
1317 PSHM_SUBSYS_LOCK();
1318 if ((pinfo = pshm->pinfo) == PSHMINFO_NULL){
1319 PSHM_SUBSYS_UNLOCK();
1320 return(EINVAL);
1321 }
1322
1323 sb = &info->pshm_stat;
1324
1325 bzero(sb, sizeof(struct vinfo_stat));
1326 sb->vst_mode = pinfo->pshm_mode;
1327 sb->vst_uid = pinfo->pshm_uid;
1328 sb->vst_gid = pinfo->pshm_gid;
1329 sb->vst_size = pinfo->pshm_length;
1330
1331 info->pshm_mappaddr = pshm->mapp_addr;
1332 bcopy(&pinfo->pshm_name[0], &info->pshm_name[0], PSHMNAMLEN+1);
1333
1334 PSHM_SUBSYS_UNLOCK();
1335 return(0);
1336 }
1337
1338 #if CONFIG_MACF
1339 void
1340 pshm_label_associate(struct fileproc *fp, struct vnode *vp, vfs_context_t ctx)
1341 {
1342 struct pshmnode *pnode;
1343 struct pshminfo *pshm;
1344
1345 PSHM_SUBSYS_LOCK();
1346 pnode = (struct pshmnode *)fp->f_fglob->fg_data;
1347 if (pnode != NULL) {
1348 pshm = pnode->pinfo;
1349 if (pshm != NULL)
1350 mac_posixshm_vnode_label_associate(
1351 vfs_context_ucred(ctx), pshm, pshm->pshm_label,
1352 vp, vp->v_label);
1353 }
1354 PSHM_SUBSYS_UNLOCK();
1355 }
1356 #endif