]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-2782.30.5.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
95 #include <sys/mbuf.h>
96 #include <sys/syslog.h>
97 #include <sys/ubc.h>
98 #include <sys/vm.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
106
107 #include <kern/assert.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
110
111 #include <libkern/OSByteOrder.h>
112
113 #include <miscfs/specfs/specdev.h>
114
115 #include <mach/mach_types.h>
116 #include <mach/memory_object_types.h>
117 #include <mach/task.h>
118
119 #if CONFIG_MACF
120 #include <security/mac_framework.h>
121 #endif
122
123 #include <sys/sdt.h>
124
125 #define ESUCCESS 0
126 #undef mount_t
127 #undef vnode_t
128
129 #define COMPAT_ONLY
130
131 #define NATIVE_XATTR(VP) \
132 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
133
134 #if CONFIG_APPLEDOUBLE
135 static void xattrfile_remove(vnode_t dvp, const char *basename,
136 vfs_context_t ctx, int force);
137 static void xattrfile_setattr(vnode_t dvp, const char * basename,
138 struct vnode_attr * vap, vfs_context_t ctx);
139 #endif /* CONFIG_APPLEDOUBLE */
140
141 /*
142 * vnode_setneedinactive
143 *
144 * Description: Indicate that when the last iocount on this vnode goes away,
145 * and the usecount is also zero, we should inform the filesystem
146 * via VNOP_INACTIVE.
147 *
148 * Parameters: vnode_t vnode to mark
149 *
150 * Returns: Nothing
151 *
152 * Notes: Notably used when we're deleting a file--we need not have a
153 * usecount, so VNOP_INACTIVE may not get called by anyone. We
154 * want it called when we drop our iocount.
155 */
156 void
157 vnode_setneedinactive(vnode_t vp)
158 {
159 cache_purge(vp);
160
161 vnode_lock_spin(vp);
162 vp->v_lflag |= VL_NEEDINACTIVE;
163 vnode_unlock(vp);
164 }
165
166
167 /* ====================================================================== */
168 /* ************ EXTERNAL KERNEL APIS ********************************** */
169 /* ====================================================================== */
170
171 /*
172 * implementations of exported VFS operations
173 */
174 int
175 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
176 {
177 int error;
178
179 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
180 return(ENOTSUP);
181
182 if (vfs_context_is64bit(ctx)) {
183 if (vfs_64bitready(mp)) {
184 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
185 }
186 else {
187 error = ENOTSUP;
188 }
189 }
190 else {
191 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
192 }
193
194 return (error);
195 }
196
197 int
198 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
199 {
200 int error;
201
202 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
203 return(ENOTSUP);
204
205 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
206
207 return (error);
208 }
209
210 int
211 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
212 {
213 int error;
214
215 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
216 return(ENOTSUP);
217
218 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
219
220 return (error);
221 }
222
223 /*
224 * Returns: 0 Success
225 * ENOTSUP Not supported
226 * <vfs_root>:ENOENT
227 * <vfs_root>:???
228 *
229 * Note: The return codes from the underlying VFS's root routine can't
230 * be fully enumerated here, since third party VFS authors may not
231 * limit their error returns to the ones documented here, even
232 * though this may result in some programs functioning incorrectly.
233 *
234 * The return codes documented above are those which may currently
235 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
236 * for a call to hfs_vget on the volume mount poit, not including
237 * additional error codes which may be propagated from underlying
238 * routines called by hfs_vget.
239 */
240 int
241 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
242 {
243 int error;
244
245 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
246 return(ENOTSUP);
247
248 if (ctx == NULL) {
249 ctx = vfs_context_current();
250 }
251
252 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
253
254 return (error);
255 }
256
257 int
258 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
259 {
260 int error;
261
262 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
263 return(ENOTSUP);
264
265 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
266
267 return (error);
268 }
269
270 int
271 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
272 {
273 int error;
274
275 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
276 return(ENOTSUP);
277
278 if (ctx == NULL) {
279 ctx = vfs_context_current();
280 }
281
282 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
283
284 return(error);
285 }
286
287 int
288 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
289 {
290 int error;
291
292 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
293 return(ENOTSUP);
294
295 if (ctx == NULL) {
296 ctx = vfs_context_current();
297 }
298
299 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
300
301 return(error);
302 }
303
304 int
305 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
306 {
307 int error;
308
309 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
310 return(ENOTSUP);
311
312 if (ctx == NULL) {
313 ctx = vfs_context_current();
314 }
315
316 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
317
318 return(error);
319 }
320
321 int
322 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
323 {
324 int error;
325
326 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
327 return(ENOTSUP);
328
329 if (ctx == NULL) {
330 ctx = vfs_context_current();
331 }
332
333 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
334
335 return(error);
336 }
337
338 int
339 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx)
340 {
341 int error;
342
343 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
344 return(ENOTSUP);
345
346 if (ctx == NULL) {
347 ctx = vfs_context_current();
348 }
349
350 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
351
352 return(error);
353 }
354
355 int
356 VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx)
357 {
358 int error;
359
360 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
361 return(ENOTSUP);
362
363 if (ctx == NULL) {
364 ctx = vfs_context_current();
365 }
366
367 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
368
369 return(error);
370 }
371
372
373 /* returns the cached throttle mask for the mount_t */
374 uint64_t
375 vfs_throttle_mask(mount_t mp)
376 {
377 return(mp->mnt_throttle_mask);
378 }
379
380 /* returns a copy of vfs type name for the mount_t */
381 void
382 vfs_name(mount_t mp, char * buffer)
383 {
384 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
385 }
386
387 /* returns vfs type number for the mount_t */
388 int
389 vfs_typenum(mount_t mp)
390 {
391 return(mp->mnt_vtable->vfc_typenum);
392 }
393
394 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
395 void*
396 vfs_mntlabel(mount_t mp)
397 {
398 return (void*)mp->mnt_mntlabel;
399 }
400
401 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
402 uint64_t
403 vfs_flags(mount_t mp)
404 {
405 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
406 }
407
408 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
409 void
410 vfs_setflags(mount_t mp, uint64_t flags)
411 {
412 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
413
414 mount_lock(mp);
415 mp->mnt_flag |= lflags;
416 mount_unlock(mp);
417 }
418
419 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
420 void
421 vfs_clearflags(mount_t mp , uint64_t flags)
422 {
423 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
424
425 mount_lock(mp);
426 mp->mnt_flag &= ~lflags;
427 mount_unlock(mp);
428 }
429
430 /* Is the mount_t ronly and upgrade read/write requested? */
431 int
432 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
433 {
434 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
435 }
436
437
438 /* Is the mount_t mounted ronly */
439 int
440 vfs_isrdonly(mount_t mp)
441 {
442 return (mp->mnt_flag & MNT_RDONLY);
443 }
444
445 /* Is the mount_t mounted for filesystem synchronous writes? */
446 int
447 vfs_issynchronous(mount_t mp)
448 {
449 return (mp->mnt_flag & MNT_SYNCHRONOUS);
450 }
451
452 /* Is the mount_t mounted read/write? */
453 int
454 vfs_isrdwr(mount_t mp)
455 {
456 return ((mp->mnt_flag & MNT_RDONLY) == 0);
457 }
458
459
460 /* Is mount_t marked for update (ie MNT_UPDATE) */
461 int
462 vfs_isupdate(mount_t mp)
463 {
464 return (mp->mnt_flag & MNT_UPDATE);
465 }
466
467
468 /* Is mount_t marked for reload (ie MNT_RELOAD) */
469 int
470 vfs_isreload(mount_t mp)
471 {
472 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
473 }
474
475 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
476 int
477 vfs_isforce(mount_t mp)
478 {
479 if (mp->mnt_lflag & MNT_LFORCE)
480 return(1);
481 else
482 return(0);
483 }
484
485 int
486 vfs_isunmount(mount_t mp)
487 {
488 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
489 return 1;
490 } else {
491 return 0;
492 }
493 }
494
495 int
496 vfs_64bitready(mount_t mp)
497 {
498 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
499 return(1);
500 else
501 return(0);
502 }
503
504
505 int
506 vfs_authcache_ttl(mount_t mp)
507 {
508 if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) )
509 return (mp->mnt_authcache_ttl);
510 else
511 return (CACHED_RIGHT_INFINITE_TTL);
512 }
513
514 void
515 vfs_setauthcache_ttl(mount_t mp, int ttl)
516 {
517 mount_lock(mp);
518 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
519 mp->mnt_authcache_ttl = ttl;
520 mount_unlock(mp);
521 }
522
523 void
524 vfs_clearauthcache_ttl(mount_t mp)
525 {
526 mount_lock(mp);
527 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
528 /*
529 * back to the default TTL value in case
530 * MNTK_AUTH_OPAQUE is set on this mount
531 */
532 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
533 mount_unlock(mp);
534 }
535
536 int
537 vfs_authopaque(mount_t mp)
538 {
539 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
540 return(1);
541 else
542 return(0);
543 }
544
545 int
546 vfs_authopaqueaccess(mount_t mp)
547 {
548 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
549 return(1);
550 else
551 return(0);
552 }
553
554 void
555 vfs_setauthopaque(mount_t mp)
556 {
557 mount_lock(mp);
558 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
559 mount_unlock(mp);
560 }
561
562 void
563 vfs_setauthopaqueaccess(mount_t mp)
564 {
565 mount_lock(mp);
566 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
567 mount_unlock(mp);
568 }
569
570 void
571 vfs_clearauthopaque(mount_t mp)
572 {
573 mount_lock(mp);
574 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
575 mount_unlock(mp);
576 }
577
578 void
579 vfs_clearauthopaqueaccess(mount_t mp)
580 {
581 mount_lock(mp);
582 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
583 mount_unlock(mp);
584 }
585
586 void
587 vfs_setextendedsecurity(mount_t mp)
588 {
589 mount_lock(mp);
590 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
591 mount_unlock(mp);
592 }
593
594 void
595 vfs_clearextendedsecurity(mount_t mp)
596 {
597 mount_lock(mp);
598 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
599 mount_unlock(mp);
600 }
601
602 int
603 vfs_extendedsecurity(mount_t mp)
604 {
605 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
606 }
607
608 /* returns the max size of short symlink in this mount_t */
609 uint32_t
610 vfs_maxsymlen(mount_t mp)
611 {
612 return(mp->mnt_maxsymlinklen);
613 }
614
615 /* set max size of short symlink on mount_t */
616 void
617 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
618 {
619 mp->mnt_maxsymlinklen = symlen;
620 }
621
622 /* return a pointer to the RO vfs_statfs associated with mount_t */
623 struct vfsstatfs *
624 vfs_statfs(mount_t mp)
625 {
626 return(&mp->mnt_vfsstat);
627 }
628
629 int
630 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
631 {
632 int error;
633
634 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
635 return(error);
636
637 /*
638 * If we have a filesystem create time, use it to default some others.
639 */
640 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
641 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
642 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
643 }
644
645 return(0);
646 }
647
648 int
649 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
650 {
651 int error;
652
653 if (vfs_isrdonly(mp))
654 return EROFS;
655
656 error = VFS_SETATTR(mp, vfa, ctx);
657
658 /*
659 * If we had alternate ways of setting vfs attributes, we'd
660 * fall back here.
661 */
662
663 return error;
664 }
665
666 /* return the private data handle stored in mount_t */
667 void *
668 vfs_fsprivate(mount_t mp)
669 {
670 return(mp->mnt_data);
671 }
672
673 /* set the private data handle in mount_t */
674 void
675 vfs_setfsprivate(mount_t mp, void *mntdata)
676 {
677 mount_lock(mp);
678 mp->mnt_data = mntdata;
679 mount_unlock(mp);
680 }
681
682 /* query whether the mount point supports native EAs */
683 int
684 vfs_nativexattrs(mount_t mp) {
685 return (mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS);
686 }
687
688 /*
689 * return the block size of the underlying
690 * device associated with mount_t
691 */
692 int
693 vfs_devblocksize(mount_t mp) {
694
695 return(mp->mnt_devblocksize);
696 }
697
698 /*
699 * Returns vnode with an iocount that must be released with vnode_put()
700 */
701 vnode_t
702 vfs_vnodecovered(mount_t mp)
703 {
704 vnode_t vp = mp->mnt_vnodecovered;
705 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
706 return NULL;
707 } else {
708 return vp;
709 }
710 }
711
712 /*
713 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
714 * The iocount must be released with vnode_put(). Note that this KPI is subtle
715 * with respect to the validity of using this device vnode for anything substantial
716 * (which is discouraged). If commands are sent to the device driver without
717 * taking proper steps to ensure that the device is still open, chaos may ensue.
718 * Similarly, this routine should only be called if there is some guarantee that
719 * the mount itself is still valid.
720 */
721 vnode_t
722 vfs_devvp(mount_t mp)
723 {
724 vnode_t vp = mp->mnt_devvp;
725
726 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
727 return vp;
728 }
729
730 return NULLVP;
731 }
732
733 /*
734 * return the io attributes associated with mount_t
735 */
736 void
737 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
738 {
739 if (mp == NULL) {
740 ioattrp->io_maxreadcnt = MAXPHYS;
741 ioattrp->io_maxwritecnt = MAXPHYS;
742 ioattrp->io_segreadcnt = 32;
743 ioattrp->io_segwritecnt = 32;
744 ioattrp->io_maxsegreadsize = MAXPHYS;
745 ioattrp->io_maxsegwritesize = MAXPHYS;
746 ioattrp->io_devblocksize = DEV_BSIZE;
747 ioattrp->io_flags = 0;
748 } else {
749 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
750 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
751 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
752 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
753 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
754 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
755 ioattrp->io_devblocksize = mp->mnt_devblocksize;
756 ioattrp->io_flags = mp->mnt_ioflags;
757 }
758 ioattrp->io_reserved[0] = NULL;
759 ioattrp->io_reserved[1] = NULL;
760 }
761
762
763 /*
764 * set the IO attributes associated with mount_t
765 */
766 void
767 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
768 {
769 if (mp == NULL)
770 return;
771 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
772 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
773 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
774 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
775 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
776 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
777 mp->mnt_devblocksize = ioattrp->io_devblocksize;
778 mp->mnt_ioflags = ioattrp->io_flags;
779 }
780
781 /*
782 * Add a new filesystem into the kernel specified in passed in
783 * vfstable structure. It fills in the vnode
784 * dispatch vector that is to be passed to when vnodes are created.
785 * It returns a handle which is to be used to when the FS is to be removed
786 */
787 typedef int (*PFI)(void *);
788 extern int vfs_opv_numops;
789 errno_t
790 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
791 {
792 struct vfstable *newvfstbl = NULL;
793 int i,j;
794 int (***opv_desc_vector_p)(void *);
795 int (**opv_desc_vector)(void *);
796 struct vnodeopv_entry_desc *opve_descp;
797 int desccount;
798 int descsize;
799 PFI *descptr;
800
801 /*
802 * This routine is responsible for all the initialization that would
803 * ordinarily be done as part of the system startup;
804 */
805
806 if (vfe == (struct vfs_fsentry *)0)
807 return(EINVAL);
808
809 desccount = vfe->vfe_vopcnt;
810 if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
811 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
812 return(EINVAL);
813
814 /* Non-threadsafe filesystems are not supported */
815 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
816 return (EINVAL);
817 }
818
819 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
820 M_WAITOK);
821 bzero(newvfstbl, sizeof(struct vfstable));
822 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
823 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
824 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
825 newvfstbl->vfc_typenum = maxvfstypenum++;
826 else
827 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
828
829 newvfstbl->vfc_refcount = 0;
830 newvfstbl->vfc_flags = 0;
831 newvfstbl->vfc_mountroot = NULL;
832 newvfstbl->vfc_next = NULL;
833 newvfstbl->vfc_vfsflags = 0;
834 if (vfe->vfe_flags & VFS_TBL64BITREADY)
835 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
836 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2)
837 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
838 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2)
839 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
840 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
841 newvfstbl->vfc_flags |= MNT_LOCAL;
842 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
843 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
844 else
845 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
846
847 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR)
848 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
849 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT)
850 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
851 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED)
852 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
853 if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
854 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
855 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME)
856 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
857
858 /*
859 * Allocate and init the vectors.
860 * Also handle backwards compatibility.
861 *
862 * We allocate one large block to hold all <desccount>
863 * vnode operation vectors stored contiguously.
864 */
865 /* XXX - shouldn't be M_TEMP */
866
867 descsize = desccount * vfs_opv_numops * sizeof(PFI);
868 MALLOC(descptr, PFI *, descsize,
869 M_TEMP, M_WAITOK);
870 bzero(descptr, descsize);
871
872 newvfstbl->vfc_descptr = descptr;
873 newvfstbl->vfc_descsize = descsize;
874
875 newvfstbl->vfc_sysctl = NULL;
876
877 for (i= 0; i< desccount; i++ ) {
878 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
879 /*
880 * Fill in the caller's pointer to the start of the i'th vector.
881 * They'll need to supply it when calling vnode_create.
882 */
883 opv_desc_vector = descptr + i * vfs_opv_numops;
884 *opv_desc_vector_p = opv_desc_vector;
885
886 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
887 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
888
889 /*
890 * Sanity check: is this operation listed
891 * in the list of operations? We check this
892 * by seeing if its offset is zero. Since
893 * the default routine should always be listed
894 * first, it should be the only one with a zero
895 * offset. Any other operation with a zero
896 * offset is probably not listed in
897 * vfs_op_descs, and so is probably an error.
898 *
899 * A panic here means the layer programmer
900 * has committed the all-too common bug
901 * of adding a new operation to the layer's
902 * list of vnode operations but
903 * not adding the operation to the system-wide
904 * list of supported operations.
905 */
906 if (opve_descp->opve_op->vdesc_offset == 0 &&
907 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
908 printf("vfs_fsadd: operation %s not listed in %s.\n",
909 opve_descp->opve_op->vdesc_name,
910 "vfs_op_descs");
911 panic("vfs_fsadd: bad operation");
912 }
913 /*
914 * Fill in this entry.
915 */
916 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
917 opve_descp->opve_impl;
918 }
919
920
921 /*
922 * Finally, go back and replace unfilled routines
923 * with their default. (Sigh, an O(n^3) algorithm. I
924 * could make it better, but that'd be work, and n is small.)
925 */
926 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
927
928 /*
929 * Force every operations vector to have a default routine.
930 */
931 opv_desc_vector = *opv_desc_vector_p;
932 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
933 panic("vfs_fsadd: operation vector without default routine.");
934 for (j = 0; j < vfs_opv_numops; j++)
935 if (opv_desc_vector[j] == NULL)
936 opv_desc_vector[j] =
937 opv_desc_vector[VOFFSET(vnop_default)];
938
939 } /* end of each vnodeopv_desc parsing */
940
941
942
943 *handle = vfstable_add(newvfstbl);
944
945 if (newvfstbl->vfc_typenum <= maxvfstypenum )
946 maxvfstypenum = newvfstbl->vfc_typenum + 1;
947
948 if (newvfstbl->vfc_vfsops->vfs_init) {
949 struct vfsconf vfsc;
950 bzero(&vfsc, sizeof(struct vfsconf));
951 vfsc.vfc_reserved1 = 0;
952 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
953 vfsc.vfc_typenum = (*handle)->vfc_typenum;
954 vfsc.vfc_refcount = (*handle)->vfc_refcount;
955 vfsc.vfc_flags = (*handle)->vfc_flags;
956 vfsc.vfc_reserved2 = 0;
957 vfsc.vfc_reserved3 = 0;
958
959 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
960 }
961
962 FREE(newvfstbl, M_TEMP);
963
964 return(0);
965 }
966
967 /*
968 * Removes the filesystem from kernel.
969 * The argument passed in is the handle that was given when
970 * file system was added
971 */
972 errno_t
973 vfs_fsremove(vfstable_t handle)
974 {
975 struct vfstable * vfstbl = (struct vfstable *)handle;
976 void *old_desc = NULL;
977 errno_t err;
978
979 /* Preflight check for any mounts */
980 mount_list_lock();
981 if ( vfstbl->vfc_refcount != 0 ) {
982 mount_list_unlock();
983 return EBUSY;
984 }
985
986 /*
987 * save the old descriptor; the free cannot occur unconditionally,
988 * since vfstable_del() may fail.
989 */
990 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
991 old_desc = vfstbl->vfc_descptr;
992 }
993 err = vfstable_del(vfstbl);
994
995 mount_list_unlock();
996
997 /* free the descriptor if the delete was successful */
998 if (err == 0 && old_desc) {
999 FREE(old_desc, M_TEMP);
1000 }
1001
1002 return(err);
1003 }
1004
1005 int
1006 vfs_context_pid(vfs_context_t ctx)
1007 {
1008 return (proc_pid(vfs_context_proc(ctx)));
1009 }
1010
1011 int
1012 vfs_context_suser(vfs_context_t ctx)
1013 {
1014 return (suser(ctx->vc_ucred, NULL));
1015 }
1016
1017 /*
1018 * Return bit field of signals posted to all threads in the context's process.
1019 *
1020 * XXX Signals should be tied to threads, not processes, for most uses of this
1021 * XXX call.
1022 */
1023 int
1024 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1025 {
1026 proc_t p = vfs_context_proc(ctx);
1027 if (p)
1028 return(proc_pendingsignals(p, mask));
1029 return(0);
1030 }
1031
1032 int
1033 vfs_context_is64bit(vfs_context_t ctx)
1034 {
1035 proc_t proc = vfs_context_proc(ctx);
1036
1037 if (proc)
1038 return(proc_is64bit(proc));
1039 return(0);
1040 }
1041
1042
1043 /*
1044 * vfs_context_proc
1045 *
1046 * Description: Given a vfs_context_t, return the proc_t associated with it.
1047 *
1048 * Parameters: vfs_context_t The context to use
1049 *
1050 * Returns: proc_t The process for this context
1051 *
1052 * Notes: This function will return the current_proc() if any of the
1053 * following conditions are true:
1054 *
1055 * o The supplied context pointer is NULL
1056 * o There is no Mach thread associated with the context
1057 * o There is no Mach task associated with the Mach thread
1058 * o There is no proc_t associated with the Mach task
1059 * o The proc_t has no per process open file table
1060 * o The proc_t is post-vfork()
1061 *
1062 * This causes this function to return a value matching as
1063 * closely as possible the previous behaviour, while at the
1064 * same time avoiding the task lending that results from vfork()
1065 */
1066 proc_t
1067 vfs_context_proc(vfs_context_t ctx)
1068 {
1069 proc_t proc = NULL;
1070
1071 if (ctx != NULL && ctx->vc_thread != NULL)
1072 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1073 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1074 proc = NULL;
1075
1076 return(proc == NULL ? current_proc() : proc);
1077 }
1078
1079 /*
1080 * vfs_context_get_special_port
1081 *
1082 * Description: Return the requested special port from the task associated
1083 * with the given context.
1084 *
1085 * Parameters: vfs_context_t The context to use
1086 * int Index of special port
1087 * ipc_port_t * Pointer to returned port
1088 *
1089 * Returns: kern_return_t see task_get_special_port()
1090 */
1091 kern_return_t
1092 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1093 {
1094 task_t task = NULL;
1095
1096 if (ctx != NULL && ctx->vc_thread != NULL)
1097 task = get_threadtask(ctx->vc_thread);
1098
1099 return task_get_special_port(task, which, portp);
1100 }
1101
1102 /*
1103 * vfs_context_set_special_port
1104 *
1105 * Description: Set the requested special port in the task associated
1106 * with the given context.
1107 *
1108 * Parameters: vfs_context_t The context to use
1109 * int Index of special port
1110 * ipc_port_t New special port
1111 *
1112 * Returns: kern_return_t see task_set_special_port()
1113 */
1114 kern_return_t
1115 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1116 {
1117 task_t task = NULL;
1118
1119 if (ctx != NULL && ctx->vc_thread != NULL)
1120 task = get_threadtask(ctx->vc_thread);
1121
1122 return task_set_special_port(task, which, port);
1123 }
1124
1125 /*
1126 * vfs_context_thread
1127 *
1128 * Description: Return the Mach thread associated with a vfs_context_t
1129 *
1130 * Parameters: vfs_context_t The context to use
1131 *
1132 * Returns: thread_t The thread for this context, or
1133 * NULL, if there is not one.
1134 *
1135 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1136 * as a result of a static vfs_context_t declaration in a function
1137 * and will result in this function returning NULL.
1138 *
1139 * This is intentional; this function should NOT return the
1140 * current_thread() in this case.
1141 */
1142 thread_t
1143 vfs_context_thread(vfs_context_t ctx)
1144 {
1145 return(ctx->vc_thread);
1146 }
1147
1148
1149 /*
1150 * vfs_context_cwd
1151 *
1152 * Description: Returns a reference on the vnode for the current working
1153 * directory for the supplied context
1154 *
1155 * Parameters: vfs_context_t The context to use
1156 *
1157 * Returns: vnode_t The current working directory
1158 * for this context
1159 *
1160 * Notes: The function first attempts to obtain the current directory
1161 * from the thread, and if it is not present there, falls back
1162 * to obtaining it from the process instead. If it can't be
1163 * obtained from either place, we return NULLVP.
1164 */
1165 vnode_t
1166 vfs_context_cwd(vfs_context_t ctx)
1167 {
1168 vnode_t cwd = NULLVP;
1169
1170 if(ctx != NULL && ctx->vc_thread != NULL) {
1171 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1172 proc_t proc;
1173
1174 /*
1175 * Get the cwd from the thread; if there isn't one, get it
1176 * from the process, instead.
1177 */
1178 if ((cwd = uth->uu_cdir) == NULLVP &&
1179 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1180 proc->p_fd != NULL)
1181 cwd = proc->p_fd->fd_cdir;
1182 }
1183
1184 return(cwd);
1185 }
1186
1187 /*
1188 * vfs_context_create
1189 *
1190 * Description: Allocate and initialize a new context.
1191 *
1192 * Parameters: vfs_context_t: Context to copy, or NULL for new
1193 *
1194 * Returns: Pointer to new context
1195 *
1196 * Notes: Copy cred and thread from argument, if available; else
1197 * initialize with current thread and new cred. Returns
1198 * with a reference held on the credential.
1199 */
1200 vfs_context_t
1201 vfs_context_create(vfs_context_t ctx)
1202 {
1203 vfs_context_t newcontext;
1204
1205 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
1206
1207 if (newcontext) {
1208 kauth_cred_t safecred;
1209 if (ctx) {
1210 newcontext->vc_thread = ctx->vc_thread;
1211 safecred = ctx->vc_ucred;
1212 } else {
1213 newcontext->vc_thread = current_thread();
1214 safecred = kauth_cred_get();
1215 }
1216 if (IS_VALID_CRED(safecred))
1217 kauth_cred_ref(safecred);
1218 newcontext->vc_ucred = safecred;
1219 return(newcontext);
1220 }
1221 return(NULL);
1222 }
1223
1224
1225 vfs_context_t
1226 vfs_context_current(void)
1227 {
1228 vfs_context_t ctx = NULL;
1229 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1230
1231 if (ut != NULL ) {
1232 if (ut->uu_context.vc_ucred != NULL) {
1233 ctx = &ut->uu_context;
1234 }
1235 }
1236
1237 return(ctx == NULL ? vfs_context_kernel() : ctx);
1238 }
1239
1240
1241 /*
1242 * XXX Do not ask
1243 *
1244 * Dangerous hack - adopt the first kernel thread as the current thread, to
1245 * get to the vfs_context_t in the uthread associated with a kernel thread.
1246 * This is used by UDF to make the call into IOCDMediaBSDClient,
1247 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1248 * ioctl() is being called from kernel or user space (and all this because
1249 * we do not pass threads into our ioctl()'s, instead of processes).
1250 *
1251 * This is also used by imageboot_setup(), called early from bsd_init() after
1252 * kernproc has been given a credential.
1253 *
1254 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1255 * of many Mach headers to do the reference directly rather than indirectly;
1256 * we will need to forego this convenience when we reture proc_thread().
1257 */
1258 static struct vfs_context kerncontext;
1259 vfs_context_t
1260 vfs_context_kernel(void)
1261 {
1262 if (kerncontext.vc_ucred == NOCRED)
1263 kerncontext.vc_ucred = kernproc->p_ucred;
1264 if (kerncontext.vc_thread == NULL)
1265 kerncontext.vc_thread = proc_thread(kernproc);
1266
1267 return(&kerncontext);
1268 }
1269
1270
1271 int
1272 vfs_context_rele(vfs_context_t ctx)
1273 {
1274 if (ctx) {
1275 if (IS_VALID_CRED(ctx->vc_ucred))
1276 kauth_cred_unref(&ctx->vc_ucred);
1277 kfree(ctx, sizeof(struct vfs_context));
1278 }
1279 return(0);
1280 }
1281
1282
1283 kauth_cred_t
1284 vfs_context_ucred(vfs_context_t ctx)
1285 {
1286 return (ctx->vc_ucred);
1287 }
1288
1289 /*
1290 * Return true if the context is owned by the superuser.
1291 */
1292 int
1293 vfs_context_issuser(vfs_context_t ctx)
1294 {
1295 return(kauth_cred_issuser(vfs_context_ucred(ctx)));
1296 }
1297
1298 /*
1299 * Given a context, for all fields of vfs_context_t which
1300 * are not held with a reference, set those fields to the
1301 * values for the current execution context. Currently, this
1302 * just means the vc_thread.
1303 *
1304 * Returns: 0 for success, nonzero for failure
1305 *
1306 * The intended use is:
1307 * 1. vfs_context_create() gets the caller a context
1308 * 2. vfs_context_bind() sets the unrefcounted data
1309 * 3. vfs_context_rele() releases the context
1310 *
1311 */
1312 int
1313 vfs_context_bind(vfs_context_t ctx)
1314 {
1315 ctx->vc_thread = current_thread();
1316 return 0;
1317 }
1318
1319 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1320
1321
1322 /*
1323 * Convert between vnode types and inode formats (since POSIX.1
1324 * defines mode word of stat structure in terms of inode formats).
1325 */
1326 enum vtype
1327 vnode_iftovt(int mode)
1328 {
1329 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1330 }
1331
1332 int
1333 vnode_vttoif(enum vtype indx)
1334 {
1335 return(vttoif_tab[(int)(indx)]);
1336 }
1337
1338 int
1339 vnode_makeimode(int indx, int mode)
1340 {
1341 return (int)(VTTOIF(indx) | (mode));
1342 }
1343
1344
1345 /*
1346 * vnode manipulation functions.
1347 */
1348
1349 /* returns system root vnode iocount; It should be released using vnode_put() */
1350 vnode_t
1351 vfs_rootvnode(void)
1352 {
1353 int error;
1354
1355 error = vnode_get(rootvnode);
1356 if (error)
1357 return ((vnode_t)0);
1358 else
1359 return rootvnode;
1360 }
1361
1362
1363 uint32_t
1364 vnode_vid(vnode_t vp)
1365 {
1366 return ((uint32_t)(vp->v_id));
1367 }
1368
1369 mount_t
1370 vnode_mount(vnode_t vp)
1371 {
1372 return (vp->v_mount);
1373 }
1374
1375 #if CONFIG_IOSCHED
1376 vnode_t
1377 vnode_mountdevvp(vnode_t vp)
1378 {
1379 if (vp->v_mount)
1380 return (vp->v_mount->mnt_devvp);
1381 else
1382 return ((vnode_t)0);
1383 }
1384 #endif
1385
1386 mount_t
1387 vnode_mountedhere(vnode_t vp)
1388 {
1389 mount_t mp;
1390
1391 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1392 (mp->mnt_vnodecovered == vp))
1393 return (mp);
1394 else
1395 return (mount_t)NULL;
1396 }
1397
1398 /* returns vnode type of vnode_t */
1399 enum vtype
1400 vnode_vtype(vnode_t vp)
1401 {
1402 return (vp->v_type);
1403 }
1404
1405 /* returns FS specific node saved in vnode */
1406 void *
1407 vnode_fsnode(vnode_t vp)
1408 {
1409 return (vp->v_data);
1410 }
1411
1412 void
1413 vnode_clearfsnode(vnode_t vp)
1414 {
1415 vp->v_data = NULL;
1416 }
1417
1418 dev_t
1419 vnode_specrdev(vnode_t vp)
1420 {
1421 return(vp->v_rdev);
1422 }
1423
1424
1425 /* Accessor functions */
1426 /* is vnode_t a root vnode */
1427 int
1428 vnode_isvroot(vnode_t vp)
1429 {
1430 return ((vp->v_flag & VROOT)? 1 : 0);
1431 }
1432
1433 /* is vnode_t a system vnode */
1434 int
1435 vnode_issystem(vnode_t vp)
1436 {
1437 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1438 }
1439
1440 /* is vnode_t a swap file vnode */
1441 int
1442 vnode_isswap(vnode_t vp)
1443 {
1444 return ((vp->v_flag & VSWAP)? 1 : 0);
1445 }
1446
1447 /* is vnode_t a tty */
1448 int
1449 vnode_istty(vnode_t vp)
1450 {
1451 return ((vp->v_flag & VISTTY) ? 1 : 0);
1452 }
1453
1454 /* if vnode_t mount operation in progress */
1455 int
1456 vnode_ismount(vnode_t vp)
1457 {
1458 return ((vp->v_flag & VMOUNT)? 1 : 0);
1459 }
1460
1461 /* is this vnode under recyle now */
1462 int
1463 vnode_isrecycled(vnode_t vp)
1464 {
1465 int ret;
1466
1467 vnode_lock_spin(vp);
1468 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1469 vnode_unlock(vp);
1470 return(ret);
1471 }
1472
1473 /* vnode was created by background task requesting rapid aging
1474 and has not since been referenced by a normal task */
1475 int
1476 vnode_israge(vnode_t vp)
1477 {
1478 return ((vp->v_flag & VRAGE)? 1 : 0);
1479 }
1480
1481 int
1482 vnode_needssnapshots(vnode_t vp)
1483 {
1484 return ((vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0);
1485 }
1486
1487
1488 /* Check the process/thread to see if we should skip atime updates */
1489 int
1490 vfs_ctx_skipatime (vfs_context_t ctx) {
1491 struct uthread *ut;
1492 proc_t proc;
1493 thread_t thr;
1494
1495 proc = vfs_context_proc(ctx);
1496 thr = vfs_context_thread (ctx);
1497
1498 /* Validate pointers in case we were invoked via a kernel context */
1499 if (thr && proc) {
1500 ut = get_bsdthread_info (thr);
1501
1502 if (proc->p_lflag & P_LRAGE_VNODES) {
1503 return 1;
1504 }
1505
1506 if (ut) {
1507 if (ut->uu_flag & UT_RAGE_VNODES) {
1508 return 1;
1509 }
1510 }
1511 }
1512 return 0;
1513 }
1514
1515 /* is vnode_t marked to not keep data cached once it's been consumed */
1516 int
1517 vnode_isnocache(vnode_t vp)
1518 {
1519 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1520 }
1521
1522 /*
1523 * has sequential readahead been disabled on this vnode
1524 */
1525 int
1526 vnode_isnoreadahead(vnode_t vp)
1527 {
1528 return ((vp->v_flag & VRAOFF)? 1 : 0);
1529 }
1530
1531 int
1532 vnode_is_openevt(vnode_t vp)
1533 {
1534 return ((vp->v_flag & VOPENEVT)? 1 : 0);
1535 }
1536
1537 /* is vnode_t a standard one? */
1538 int
1539 vnode_isstandard(vnode_t vp)
1540 {
1541 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1542 }
1543
1544 /* don't vflush() if SKIPSYSTEM */
1545 int
1546 vnode_isnoflush(vnode_t vp)
1547 {
1548 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1549 }
1550
1551 /* is vnode_t a regular file */
1552 int
1553 vnode_isreg(vnode_t vp)
1554 {
1555 return ((vp->v_type == VREG)? 1 : 0);
1556 }
1557
1558 /* is vnode_t a directory? */
1559 int
1560 vnode_isdir(vnode_t vp)
1561 {
1562 return ((vp->v_type == VDIR)? 1 : 0);
1563 }
1564
1565 /* is vnode_t a symbolic link ? */
1566 int
1567 vnode_islnk(vnode_t vp)
1568 {
1569 return ((vp->v_type == VLNK)? 1 : 0);
1570 }
1571
1572 int
1573 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1574 {
1575 struct nameidata *ndp = cnp->cn_ndp;
1576
1577 if (ndp == NULL) {
1578 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1579 }
1580
1581 if (vnode_isdir(vp)) {
1582 if (vp->v_mountedhere != NULL) {
1583 goto yes;
1584 }
1585
1586 #if CONFIG_TRIGGERS
1587 if (vp->v_resolve) {
1588 goto yes;
1589 }
1590 #endif /* CONFIG_TRIGGERS */
1591
1592 }
1593
1594
1595 if (vnode_islnk(vp)) {
1596 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1597 if (cnp->cn_flags & FOLLOW) {
1598 goto yes;
1599 }
1600 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1601 goto yes;
1602 }
1603 }
1604
1605 return 0;
1606
1607 yes:
1608 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1609 return EKEEPLOOKING;
1610 }
1611
1612 /* is vnode_t a fifo ? */
1613 int
1614 vnode_isfifo(vnode_t vp)
1615 {
1616 return ((vp->v_type == VFIFO)? 1 : 0);
1617 }
1618
1619 /* is vnode_t a block device? */
1620 int
1621 vnode_isblk(vnode_t vp)
1622 {
1623 return ((vp->v_type == VBLK)? 1 : 0);
1624 }
1625
1626 int
1627 vnode_isspec(vnode_t vp)
1628 {
1629 return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0);
1630 }
1631
1632 /* is vnode_t a char device? */
1633 int
1634 vnode_ischr(vnode_t vp)
1635 {
1636 return ((vp->v_type == VCHR)? 1 : 0);
1637 }
1638
1639 /* is vnode_t a socket? */
1640 int
1641 vnode_issock(vnode_t vp)
1642 {
1643 return ((vp->v_type == VSOCK)? 1 : 0);
1644 }
1645
1646 /* is vnode_t a device with multiple active vnodes referring to it? */
1647 int
1648 vnode_isaliased(vnode_t vp)
1649 {
1650 enum vtype vt = vp->v_type;
1651 if (!((vt == VCHR) || (vt == VBLK))) {
1652 return 0;
1653 } else {
1654 return (vp->v_specflags & SI_ALIASED);
1655 }
1656 }
1657
1658 /* is vnode_t a named stream? */
1659 int
1660 vnode_isnamedstream(
1661 #if NAMEDSTREAMS
1662 vnode_t vp
1663 #else
1664 __unused vnode_t vp
1665 #endif
1666 )
1667 {
1668 #if NAMEDSTREAMS
1669 return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0);
1670 #else
1671 return (0);
1672 #endif
1673 }
1674
1675 int
1676 vnode_isshadow(
1677 #if NAMEDSTREAMS
1678 vnode_t vp
1679 #else
1680 __unused vnode_t vp
1681 #endif
1682 )
1683 {
1684 #if NAMEDSTREAMS
1685 return ((vp->v_flag & VISSHADOW) ? 1 : 0);
1686 #else
1687 return (0);
1688 #endif
1689 }
1690
1691 /* does vnode have associated named stream vnodes ? */
1692 int
1693 vnode_hasnamedstreams(
1694 #if NAMEDSTREAMS
1695 vnode_t vp
1696 #else
1697 __unused vnode_t vp
1698 #endif
1699 )
1700 {
1701 #if NAMEDSTREAMS
1702 return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0);
1703 #else
1704 return (0);
1705 #endif
1706 }
1707 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1708 void
1709 vnode_setnocache(vnode_t vp)
1710 {
1711 vnode_lock_spin(vp);
1712 vp->v_flag |= VNOCACHE_DATA;
1713 vnode_unlock(vp);
1714 }
1715
1716 void
1717 vnode_clearnocache(vnode_t vp)
1718 {
1719 vnode_lock_spin(vp);
1720 vp->v_flag &= ~VNOCACHE_DATA;
1721 vnode_unlock(vp);
1722 }
1723
1724 void
1725 vnode_set_openevt(vnode_t vp)
1726 {
1727 vnode_lock_spin(vp);
1728 vp->v_flag |= VOPENEVT;
1729 vnode_unlock(vp);
1730 }
1731
1732 void
1733 vnode_clear_openevt(vnode_t vp)
1734 {
1735 vnode_lock_spin(vp);
1736 vp->v_flag &= ~VOPENEVT;
1737 vnode_unlock(vp);
1738 }
1739
1740
1741 void
1742 vnode_setnoreadahead(vnode_t vp)
1743 {
1744 vnode_lock_spin(vp);
1745 vp->v_flag |= VRAOFF;
1746 vnode_unlock(vp);
1747 }
1748
1749 void
1750 vnode_clearnoreadahead(vnode_t vp)
1751 {
1752 vnode_lock_spin(vp);
1753 vp->v_flag &= ~VRAOFF;
1754 vnode_unlock(vp);
1755 }
1756
1757
1758 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1759 void
1760 vnode_setnoflush(vnode_t vp)
1761 {
1762 vnode_lock_spin(vp);
1763 vp->v_flag |= VNOFLUSH;
1764 vnode_unlock(vp);
1765 }
1766
1767 void
1768 vnode_clearnoflush(vnode_t vp)
1769 {
1770 vnode_lock_spin(vp);
1771 vp->v_flag &= ~VNOFLUSH;
1772 vnode_unlock(vp);
1773 }
1774
1775
1776 /* is vnode_t a blkdevice and has a FS mounted on it */
1777 int
1778 vnode_ismountedon(vnode_t vp)
1779 {
1780 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1781 }
1782
1783 void
1784 vnode_setmountedon(vnode_t vp)
1785 {
1786 vnode_lock_spin(vp);
1787 vp->v_specflags |= SI_MOUNTEDON;
1788 vnode_unlock(vp);
1789 }
1790
1791 void
1792 vnode_clearmountedon(vnode_t vp)
1793 {
1794 vnode_lock_spin(vp);
1795 vp->v_specflags &= ~SI_MOUNTEDON;
1796 vnode_unlock(vp);
1797 }
1798
1799
1800 void
1801 vnode_settag(vnode_t vp, int tag)
1802 {
1803 vp->v_tag = tag;
1804
1805 }
1806
1807 int
1808 vnode_tag(vnode_t vp)
1809 {
1810 return(vp->v_tag);
1811 }
1812
1813 vnode_t
1814 vnode_parent(vnode_t vp)
1815 {
1816
1817 return(vp->v_parent);
1818 }
1819
1820 void
1821 vnode_setparent(vnode_t vp, vnode_t dvp)
1822 {
1823 vp->v_parent = dvp;
1824 }
1825
1826 void
1827 vnode_setname(vnode_t vp, char * name)
1828 {
1829 vp->v_name = name;
1830 }
1831
1832 /* return the registered FS name when adding the FS to kernel */
1833 void
1834 vnode_vfsname(vnode_t vp, char * buf)
1835 {
1836 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1837 }
1838
1839 /* return the FS type number */
1840 int
1841 vnode_vfstypenum(vnode_t vp)
1842 {
1843 return(vp->v_mount->mnt_vtable->vfc_typenum);
1844 }
1845
1846 int
1847 vnode_vfs64bitready(vnode_t vp)
1848 {
1849
1850 /*
1851 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
1852 */
1853 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
1854 return(1);
1855 else
1856 return(0);
1857 }
1858
1859
1860
1861 /* return the visible flags on associated mount point of vnode_t */
1862 uint32_t
1863 vnode_vfsvisflags(vnode_t vp)
1864 {
1865 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1866 }
1867
1868 /* return the command modifier flags on associated mount point of vnode_t */
1869 uint32_t
1870 vnode_vfscmdflags(vnode_t vp)
1871 {
1872 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1873 }
1874
1875 /* return the max symlink of short links of vnode_t */
1876 uint32_t
1877 vnode_vfsmaxsymlen(vnode_t vp)
1878 {
1879 return(vp->v_mount->mnt_maxsymlinklen);
1880 }
1881
1882 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1883 struct vfsstatfs *
1884 vnode_vfsstatfs(vnode_t vp)
1885 {
1886 return(&vp->v_mount->mnt_vfsstat);
1887 }
1888
1889 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1890 void *
1891 vnode_vfsfsprivate(vnode_t vp)
1892 {
1893 return(vp->v_mount->mnt_data);
1894 }
1895
1896 /* is vnode_t in a rdonly mounted FS */
1897 int
1898 vnode_vfsisrdonly(vnode_t vp)
1899 {
1900 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1901 }
1902
1903 int
1904 vnode_compound_rename_available(vnode_t vp)
1905 {
1906 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
1907 }
1908 int
1909 vnode_compound_rmdir_available(vnode_t vp)
1910 {
1911 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
1912 }
1913 int
1914 vnode_compound_mkdir_available(vnode_t vp)
1915 {
1916 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
1917 }
1918 int
1919 vnode_compound_remove_available(vnode_t vp)
1920 {
1921 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
1922 }
1923 int
1924 vnode_compound_open_available(vnode_t vp)
1925 {
1926 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
1927 }
1928
1929 int
1930 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
1931 {
1932 return ((vp->v_mount->mnt_compound_ops & opid) != 0);
1933 }
1934
1935 /*
1936 * Returns vnode ref to current working directory; if a per-thread current
1937 * working directory is in effect, return that instead of the per process one.
1938 *
1939 * XXX Published, but not used.
1940 */
1941 vnode_t
1942 current_workingdir(void)
1943 {
1944 return vfs_context_cwd(vfs_context_current());
1945 }
1946
1947 /* returns vnode ref to current root(chroot) directory */
1948 vnode_t
1949 current_rootdir(void)
1950 {
1951 proc_t proc = current_proc();
1952 struct vnode * vp ;
1953
1954 if ( (vp = proc->p_fd->fd_rdir) ) {
1955 if ( (vnode_getwithref(vp)) )
1956 return (NULL);
1957 }
1958 return vp;
1959 }
1960
1961 /*
1962 * Get a filesec and optional acl contents from an extended attribute.
1963 * Function will attempt to retrive ACL, UUID, and GUID information using a
1964 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1965 *
1966 * Parameters: vp The vnode on which to operate.
1967 * fsecp The filesec (and ACL, if any) being
1968 * retrieved.
1969 * ctx The vnode context in which the
1970 * operation is to be attempted.
1971 *
1972 * Returns: 0 Success
1973 * !0 errno value
1974 *
1975 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1976 * host byte order, as will be the ACL contents, if any.
1977 * Internally, we will cannonize these values from network (PPC)
1978 * byte order after we retrieve them so that the on-disk contents
1979 * of the extended attribute are identical for both PPC and Intel
1980 * (if we were not being required to provide this service via
1981 * fallback, this would be the job of the filesystem
1982 * 'VNOP_GETATTR' call).
1983 *
1984 * We use ntohl() because it has a transitive property on Intel
1985 * machines and no effect on PPC mancines. This guarantees us
1986 *
1987 * XXX: Deleting rather than ignoreing a corrupt security structure is
1988 * probably the only way to reset it without assistance from an
1989 * file system integrity checking tool. Right now we ignore it.
1990 *
1991 * XXX: We should enummerate the possible errno values here, and where
1992 * in the code they originated.
1993 */
1994 static int
1995 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
1996 {
1997 kauth_filesec_t fsec;
1998 uio_t fsec_uio;
1999 size_t fsec_size;
2000 size_t xsize, rsize;
2001 int error;
2002 uint32_t host_fsec_magic;
2003 uint32_t host_acl_entrycount;
2004
2005 fsec = NULL;
2006 fsec_uio = NULL;
2007 error = 0;
2008
2009 /* find out how big the EA is */
2010 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
2011 /* no EA, no filesec */
2012 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2013 error = 0;
2014 /* either way, we are done */
2015 goto out;
2016 }
2017
2018 /*
2019 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2020 * ACE entrly ACL, and if it's larger than that, it must have the right
2021 * number of bytes such that it contains an atomic number of ACEs,
2022 * rather than partial entries. Otherwise, we ignore it.
2023 */
2024 if (!KAUTH_FILESEC_VALID(xsize)) {
2025 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2026 error = 0;
2027 goto out;
2028 }
2029
2030 /* how many entries would fit? */
2031 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2032
2033 /* get buffer and uio */
2034 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2035 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2036 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2037 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2038 error = ENOMEM;
2039 goto out;
2040 }
2041
2042 /* read security attribute */
2043 rsize = xsize;
2044 if ((error = vn_getxattr(vp,
2045 KAUTH_FILESEC_XATTR,
2046 fsec_uio,
2047 &rsize,
2048 XATTR_NOSECURITY,
2049 ctx)) != 0) {
2050
2051 /* no attribute - no security data */
2052 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2053 error = 0;
2054 /* either way, we are done */
2055 goto out;
2056 }
2057
2058 /*
2059 * Validate security structure; the validation must take place in host
2060 * byte order. If it's corrupt, we will just ignore it.
2061 */
2062
2063 /* Validate the size before trying to convert it */
2064 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2065 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2066 goto out;
2067 }
2068
2069 /* Validate the magic number before trying to convert it */
2070 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2071 if (fsec->fsec_magic != host_fsec_magic) {
2072 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2073 goto out;
2074 }
2075
2076 /* Validate the entry count before trying to convert it. */
2077 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2078 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2079 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2080 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2081 goto out;
2082 }
2083 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2084 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2085 goto out;
2086 }
2087 }
2088
2089 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2090
2091 *fsecp = fsec;
2092 fsec = NULL;
2093 error = 0;
2094 out:
2095 if (fsec != NULL)
2096 kauth_filesec_free(fsec);
2097 if (fsec_uio != NULL)
2098 uio_free(fsec_uio);
2099 if (error)
2100 *fsecp = NULL;
2101 return(error);
2102 }
2103
2104 /*
2105 * Set a filesec and optional acl contents into an extended attribute.
2106 * function will attempt to store ACL, UUID, and GUID information using a
2107 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2108 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2109 * original caller supplied an acl.
2110 *
2111 * Parameters: vp The vnode on which to operate.
2112 * fsec The filesec being set.
2113 * acl The acl to be associated with 'fsec'.
2114 * ctx The vnode context in which the
2115 * operation is to be attempted.
2116 *
2117 * Returns: 0 Success
2118 * !0 errno value
2119 *
2120 * Notes: Both the fsec and the acl are always valid.
2121 *
2122 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2123 * as are the acl contents, if they are used. Internally, we will
2124 * cannonize these values into network (PPC) byte order before we
2125 * attempt to write them so that the on-disk contents of the
2126 * extended attribute are identical for both PPC and Intel (if we
2127 * were not being required to provide this service via fallback,
2128 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2129 * We reverse this process on the way out, so we leave with the
2130 * same byte order we started with.
2131 *
2132 * XXX: We should enummerate the possible errno values here, and where
2133 * in the code they originated.
2134 */
2135 static int
2136 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2137 {
2138 uio_t fsec_uio;
2139 int error;
2140 uint32_t saved_acl_copysize;
2141
2142 fsec_uio = NULL;
2143
2144 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2145 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2146 error = ENOMEM;
2147 goto out;
2148 }
2149 /*
2150 * Save the pre-converted ACL copysize, because it gets swapped too
2151 * if we are running with the wrong endianness.
2152 */
2153 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2154
2155 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2156
2157 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2158 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2159 error = vn_setxattr(vp,
2160 KAUTH_FILESEC_XATTR,
2161 fsec_uio,
2162 XATTR_NOSECURITY, /* we have auth'ed already */
2163 ctx);
2164 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2165
2166 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2167
2168 out:
2169 if (fsec_uio != NULL)
2170 uio_free(fsec_uio);
2171 return(error);
2172 }
2173
2174
2175 /*
2176 * Returns: 0 Success
2177 * ENOMEM Not enough space [only if has filesec]
2178 * VNOP_GETATTR: ???
2179 * vnode_get_filesec: ???
2180 * kauth_cred_guid2uid: ???
2181 * kauth_cred_guid2gid: ???
2182 * vfs_update_vfsstat: ???
2183 */
2184 int
2185 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2186 {
2187 kauth_filesec_t fsec;
2188 kauth_acl_t facl;
2189 int error;
2190 uid_t nuid;
2191 gid_t ngid;
2192
2193 /* don't ask for extended security data if the filesystem doesn't support it */
2194 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2195 VATTR_CLEAR_ACTIVE(vap, va_acl);
2196 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2197 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2198 }
2199
2200 /*
2201 * If the caller wants size values we might have to synthesise, give the
2202 * filesystem the opportunity to supply better intermediate results.
2203 */
2204 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2205 VATTR_IS_ACTIVE(vap, va_total_size) ||
2206 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2207 VATTR_SET_ACTIVE(vap, va_data_size);
2208 VATTR_SET_ACTIVE(vap, va_data_alloc);
2209 VATTR_SET_ACTIVE(vap, va_total_size);
2210 VATTR_SET_ACTIVE(vap, va_total_alloc);
2211 }
2212
2213 error = VNOP_GETATTR(vp, vap, ctx);
2214 if (error) {
2215 KAUTH_DEBUG("ERROR - returning %d", error);
2216 goto out;
2217 }
2218
2219 /*
2220 * If extended security data was requested but not returned, try the fallback
2221 * path.
2222 */
2223 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2224 fsec = NULL;
2225
2226 if (XATTR_VNODE_SUPPORTED(vp)) {
2227 /* try to get the filesec */
2228 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
2229 goto out;
2230 }
2231 /* if no filesec, no attributes */
2232 if (fsec == NULL) {
2233 VATTR_RETURN(vap, va_acl, NULL);
2234 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2235 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2236 } else {
2237
2238 /* looks good, try to return what we were asked for */
2239 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2240 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2241
2242 /* only return the ACL if we were actually asked for it */
2243 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2244 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2245 VATTR_RETURN(vap, va_acl, NULL);
2246 } else {
2247 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2248 if (facl == NULL) {
2249 kauth_filesec_free(fsec);
2250 error = ENOMEM;
2251 goto out;
2252 }
2253 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2254 VATTR_RETURN(vap, va_acl, facl);
2255 }
2256 }
2257 kauth_filesec_free(fsec);
2258 }
2259 }
2260 /*
2261 * If someone gave us an unsolicited filesec, toss it. We promise that
2262 * we're OK with a filesystem giving us anything back, but our callers
2263 * only expect what they asked for.
2264 */
2265 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2266 if (vap->va_acl != NULL)
2267 kauth_acl_free(vap->va_acl);
2268 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2269 }
2270
2271 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2272 /*
2273 * Handle the case where we need a UID/GID, but only have extended
2274 * security information.
2275 */
2276 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2277 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2278 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2279 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
2280 VATTR_RETURN(vap, va_uid, nuid);
2281 }
2282 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2283 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2284 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2285 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
2286 VATTR_RETURN(vap, va_gid, ngid);
2287 }
2288 #endif
2289
2290 /*
2291 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2292 */
2293 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2294 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2295 nuid = vap->va_uid;
2296 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2297 nuid = vp->v_mount->mnt_fsowner;
2298 if (nuid == KAUTH_UID_NONE)
2299 nuid = 99;
2300 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2301 nuid = vap->va_uid;
2302 } else {
2303 /* this will always be something sensible */
2304 nuid = vp->v_mount->mnt_fsowner;
2305 }
2306 if ((nuid == 99) && !vfs_context_issuser(ctx))
2307 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2308 VATTR_RETURN(vap, va_uid, nuid);
2309 }
2310 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2311 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2312 ngid = vap->va_gid;
2313 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2314 ngid = vp->v_mount->mnt_fsgroup;
2315 if (ngid == KAUTH_GID_NONE)
2316 ngid = 99;
2317 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2318 ngid = vap->va_gid;
2319 } else {
2320 /* this will always be something sensible */
2321 ngid = vp->v_mount->mnt_fsgroup;
2322 }
2323 if ((ngid == 99) && !vfs_context_issuser(ctx))
2324 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2325 VATTR_RETURN(vap, va_gid, ngid);
2326 }
2327
2328 /*
2329 * Synthesise some values that can be reasonably guessed.
2330 */
2331 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
2332 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2333
2334 if (!VATTR_IS_SUPPORTED(vap, va_flags))
2335 VATTR_RETURN(vap, va_flags, 0);
2336
2337 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
2338 VATTR_RETURN(vap, va_filerev, 0);
2339
2340 if (!VATTR_IS_SUPPORTED(vap, va_gen))
2341 VATTR_RETURN(vap, va_gen, 0);
2342
2343 /*
2344 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2345 */
2346 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
2347 VATTR_RETURN(vap, va_data_size, 0);
2348
2349 /* do we want any of the possibly-computed values? */
2350 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2351 VATTR_IS_ACTIVE(vap, va_total_size) ||
2352 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2353 /* make sure f_bsize is valid */
2354 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2355 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0)
2356 goto out;
2357 }
2358
2359 /* default va_data_alloc from va_data_size */
2360 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
2361 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2362
2363 /* default va_total_size from va_data_size */
2364 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
2365 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2366
2367 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2368 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
2369 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2370 }
2371
2372 /*
2373 * If we don't have a change time, pull it from the modtime.
2374 */
2375 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
2376 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2377
2378 /*
2379 * This is really only supported for the creation VNOPs, but since the field is there
2380 * we should populate it correctly.
2381 */
2382 VATTR_RETURN(vap, va_type, vp->v_type);
2383
2384 /*
2385 * The fsid can be obtained from the mountpoint directly.
2386 */
2387 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2388
2389 out:
2390
2391 return(error);
2392 }
2393
2394 /*
2395 * Set the attributes on a vnode in a vnode context.
2396 *
2397 * Parameters: vp The vnode whose attributes to set.
2398 * vap A pointer to the attributes to set.
2399 * ctx The vnode context in which the
2400 * operation is to be attempted.
2401 *
2402 * Returns: 0 Success
2403 * !0 errno value
2404 *
2405 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2406 *
2407 * The contents of the data area pointed to by 'vap' may be
2408 * modified if the vnode is on a filesystem which has been
2409 * mounted with ingore ownership flags, or by the underlyng
2410 * VFS itself, or by the fallback code, if the underlying VFS
2411 * does not support ACL, UUID, or GUUID attributes directly.
2412 *
2413 * XXX: We should enummerate the possible errno values here, and where
2414 * in the code they originated.
2415 */
2416 int
2417 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2418 {
2419 int error, is_perm_change=0;
2420
2421 /*
2422 * Make sure the filesystem is mounted R/W.
2423 * If not, return an error.
2424 */
2425 if (vfs_isrdonly(vp->v_mount)) {
2426 error = EROFS;
2427 goto out;
2428 }
2429 #if NAMEDSTREAMS
2430 /* For streams, va_data_size is the only setable attribute. */
2431 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2432 error = EPERM;
2433 goto out;
2434 }
2435 #endif
2436
2437 /*
2438 * If ownership is being ignored on this volume, we silently discard
2439 * ownership changes.
2440 */
2441 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2442 VATTR_CLEAR_ACTIVE(vap, va_uid);
2443 VATTR_CLEAR_ACTIVE(vap, va_gid);
2444 }
2445
2446 if ( VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)
2447 || VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_acl)) {
2448 is_perm_change = 1;
2449 }
2450
2451 /*
2452 * Make sure that extended security is enabled if we're going to try
2453 * to set any.
2454 */
2455 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2456 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2457 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2458 error = ENOTSUP;
2459 goto out;
2460 }
2461
2462 error = VNOP_SETATTR(vp, vap, ctx);
2463
2464 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2465 error = vnode_setattr_fallback(vp, vap, ctx);
2466
2467 #if CONFIG_FSE
2468 // only send a stat_changed event if this is more than
2469 // just an access or backup time update
2470 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time)) && (vap->va_active != VNODE_ATTR_BIT(va_backup_time))) {
2471 if (is_perm_change) {
2472 if (need_fsevent(FSE_CHOWN, vp)) {
2473 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2474 }
2475 } else if(need_fsevent(FSE_STAT_CHANGED, vp)) {
2476 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2477 }
2478 }
2479 #endif
2480
2481 out:
2482 return(error);
2483 }
2484
2485 /*
2486 * Fallback for setting the attributes on a vnode in a vnode context. This
2487 * Function will attempt to store ACL, UUID, and GUID information utilizing
2488 * a read/modify/write operation against an EA used as a backing store for
2489 * the object.
2490 *
2491 * Parameters: vp The vnode whose attributes to set.
2492 * vap A pointer to the attributes to set.
2493 * ctx The vnode context in which the
2494 * operation is to be attempted.
2495 *
2496 * Returns: 0 Success
2497 * !0 errno value
2498 *
2499 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2500 * as are the fsec and lfsec, if they are used.
2501 *
2502 * The contents of the data area pointed to by 'vap' may be
2503 * modified to indicate that the attribute is supported for
2504 * any given requested attribute.
2505 *
2506 * XXX: We should enummerate the possible errno values here, and where
2507 * in the code they originated.
2508 */
2509 int
2510 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2511 {
2512 kauth_filesec_t fsec;
2513 kauth_acl_t facl;
2514 struct kauth_filesec lfsec;
2515 int error;
2516
2517 error = 0;
2518
2519 /*
2520 * Extended security fallback via extended attributes.
2521 *
2522 * Note that we do not free the filesec; the caller is expected to
2523 * do this.
2524 */
2525 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2526 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2527 VATTR_NOT_RETURNED(vap, va_guuid)) {
2528 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2529
2530 /*
2531 * Fail for file types that we don't permit extended security
2532 * to be set on.
2533 */
2534 if (!XATTR_VNODE_SUPPORTED(vp)) {
2535 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2536 error = EINVAL;
2537 goto out;
2538 }
2539
2540 /*
2541 * If we don't have all the extended security items, we need
2542 * to fetch the existing data to perform a read-modify-write
2543 * operation.
2544 */
2545 fsec = NULL;
2546 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2547 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2548 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2549 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2550 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2551 goto out;
2552 }
2553 }
2554 /* if we didn't get a filesec, use our local one */
2555 if (fsec == NULL) {
2556 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2557 fsec = &lfsec;
2558 } else {
2559 KAUTH_DEBUG("SETATTR - updating existing filesec");
2560 }
2561 /* find the ACL */
2562 facl = &fsec->fsec_acl;
2563
2564 /* if we're using the local filesec, we need to initialise it */
2565 if (fsec == &lfsec) {
2566 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2567 fsec->fsec_owner = kauth_null_guid;
2568 fsec->fsec_group = kauth_null_guid;
2569 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2570 facl->acl_flags = 0;
2571 }
2572
2573 /*
2574 * Update with the supplied attributes.
2575 */
2576 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2577 KAUTH_DEBUG("SETATTR - updating owner UUID");
2578 fsec->fsec_owner = vap->va_uuuid;
2579 VATTR_SET_SUPPORTED(vap, va_uuuid);
2580 }
2581 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2582 KAUTH_DEBUG("SETATTR - updating group UUID");
2583 fsec->fsec_group = vap->va_guuid;
2584 VATTR_SET_SUPPORTED(vap, va_guuid);
2585 }
2586 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2587 if (vap->va_acl == NULL) {
2588 KAUTH_DEBUG("SETATTR - removing ACL");
2589 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2590 } else {
2591 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2592 facl = vap->va_acl;
2593 }
2594 VATTR_SET_SUPPORTED(vap, va_acl);
2595 }
2596
2597 /*
2598 * If the filesec data is all invalid, we can just remove
2599 * the EA completely.
2600 */
2601 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2602 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2603 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2604 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2605 /* no attribute is ok, nothing to delete */
2606 if (error == ENOATTR)
2607 error = 0;
2608 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2609 } else {
2610 /* write the EA */
2611 error = vnode_set_filesec(vp, fsec, facl, ctx);
2612 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2613 }
2614
2615 /* if we fetched a filesec, dispose of the buffer */
2616 if (fsec != &lfsec)
2617 kauth_filesec_free(fsec);
2618 }
2619 out:
2620
2621 return(error);
2622 }
2623
2624 /*
2625 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2626 * event on a vnode.
2627 */
2628 int
2629 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
2630 {
2631 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2632 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
2633 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
2634 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
2635 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
2636 uint32_t knote_events = (events & knote_mask);
2637
2638 /* Permissions are not explicitly part of the kqueue model */
2639 if (events & VNODE_EVENT_PERMS) {
2640 knote_events |= NOTE_ATTRIB;
2641 }
2642
2643 /* Directory contents information just becomes NOTE_WRITE */
2644 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
2645 knote_events |= NOTE_WRITE;
2646 }
2647
2648 if (knote_events) {
2649 lock_vnode_and_post(vp, knote_events);
2650 #if CONFIG_FSE
2651 if (vap != NULL) {
2652 create_fsevent_from_kevent(vp, events, vap);
2653 }
2654 #else
2655 (void)vap;
2656 #endif
2657 }
2658
2659 return 0;
2660 }
2661
2662
2663
2664 int
2665 vnode_isdyldsharedcache(vnode_t vp)
2666 {
2667 return ((vp->v_flag & VSHARED_DYLD) ? 1 : 0);
2668 }
2669
2670
2671 /*
2672 * For a filesystem that isn't tracking its own vnode watchers:
2673 * check whether a vnode is being monitored.
2674 */
2675 int
2676 vnode_ismonitored(vnode_t vp) {
2677 return (vp->v_knotes.slh_first != NULL);
2678 }
2679
2680 /*
2681 * Initialize a struct vnode_attr and activate the attributes required
2682 * by the vnode_notify() call.
2683 */
2684 int
2685 vfs_get_notify_attributes(struct vnode_attr *vap)
2686 {
2687 VATTR_INIT(vap);
2688 vap->va_active = VNODE_NOTIFY_ATTRS;
2689 return 0;
2690 }
2691
2692 #if CONFIG_TRIGGERS
2693 int
2694 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
2695 {
2696 int error;
2697 mount_t mp;
2698
2699 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
2700 if (mp == NULL) {
2701 return ENOENT;
2702 }
2703
2704 error = vfs_busy(mp, LK_NOWAIT);
2705 mount_iterdrop(mp);
2706
2707 if (error != 0) {
2708 return ENOENT;
2709 }
2710
2711 mount_lock(mp);
2712 if (mp->mnt_triggercallback != NULL) {
2713 error = EBUSY;
2714 mount_unlock(mp);
2715 goto out;
2716 }
2717
2718 mp->mnt_triggercallback = vtc;
2719 mp->mnt_triggerdata = data;
2720 mount_unlock(mp);
2721
2722 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
2723
2724 out:
2725 vfs_unbusy(mp);
2726 return 0;
2727 }
2728 #endif /* CONFIG_TRIGGERS */
2729
2730 /*
2731 * Definition of vnode operations.
2732 */
2733
2734 #if 0
2735 /*
2736 *#
2737 *#% lookup dvp L ? ?
2738 *#% lookup vpp - L -
2739 */
2740 struct vnop_lookup_args {
2741 struct vnodeop_desc *a_desc;
2742 vnode_t a_dvp;
2743 vnode_t *a_vpp;
2744 struct componentname *a_cnp;
2745 vfs_context_t a_context;
2746 };
2747 #endif /* 0*/
2748
2749 /*
2750 * Returns: 0 Success
2751 * lock_fsnode:ENOENT No such file or directory [only for VFS
2752 * that is not thread safe & vnode is
2753 * currently being/has been terminated]
2754 * <vfs_lookup>:ENAMETOOLONG
2755 * <vfs_lookup>:ENOENT
2756 * <vfs_lookup>:EJUSTRETURN
2757 * <vfs_lookup>:EPERM
2758 * <vfs_lookup>:EISDIR
2759 * <vfs_lookup>:ENOTDIR
2760 * <vfs_lookup>:???
2761 *
2762 * Note: The return codes from the underlying VFS's lookup routine can't
2763 * be fully enumerated here, since third party VFS authors may not
2764 * limit their error returns to the ones documented here, even
2765 * though this may result in some programs functioning incorrectly.
2766 *
2767 * The return codes documented above are those which may currently
2768 * be returned by HFS from hfs_lookup, not including additional
2769 * error code which may be propagated from underlying routines.
2770 */
2771 errno_t
2772 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
2773 {
2774 int _err;
2775 struct vnop_lookup_args a;
2776
2777 a.a_desc = &vnop_lookup_desc;
2778 a.a_dvp = dvp;
2779 a.a_vpp = vpp;
2780 a.a_cnp = cnp;
2781 a.a_context = ctx;
2782
2783 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2784 if (_err == 0 && *vpp) {
2785 DTRACE_FSINFO(lookup, vnode_t, *vpp);
2786 }
2787
2788 return (_err);
2789 }
2790
2791 #if 0
2792 struct vnop_compound_open_args {
2793 struct vnodeop_desc *a_desc;
2794 vnode_t a_dvp;
2795 vnode_t *a_vpp;
2796 struct componentname *a_cnp;
2797 int32_t a_flags;
2798 int32_t a_fmode;
2799 struct vnode_attr *a_vap;
2800 vfs_context_t a_context;
2801 void *a_reserved;
2802 };
2803 #endif /* 0 */
2804
2805 int
2806 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
2807 {
2808 int _err;
2809 struct vnop_compound_open_args a;
2810 int did_create = 0;
2811 int want_create;
2812 uint32_t tmp_status = 0;
2813 struct componentname *cnp = &ndp->ni_cnd;
2814
2815 want_create = (flags & O_CREAT);
2816
2817 a.a_desc = &vnop_compound_open_desc;
2818 a.a_dvp = dvp;
2819 a.a_vpp = vpp; /* Could be NULL */
2820 a.a_cnp = cnp;
2821 a.a_flags = flags;
2822 a.a_fmode = fmode;
2823 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
2824 a.a_vap = vap;
2825 a.a_context = ctx;
2826 a.a_open_create_authorizer = vn_authorize_create;
2827 a.a_open_existing_authorizer = vn_authorize_open_existing;
2828 a.a_reserved = NULL;
2829
2830 if (dvp == NULLVP) {
2831 panic("No dvp?");
2832 }
2833 if (want_create && !vap) {
2834 panic("Want create, but no vap?");
2835 }
2836 if (!want_create && vap) {
2837 panic("Don't want create, but have a vap?");
2838 }
2839
2840 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
2841 if (want_create) {
2842 if (_err == 0 && *vpp) {
2843 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
2844 } else {
2845 DTRACE_FSINFO(compound_open, vnode_t, dvp);
2846 }
2847 } else {
2848 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
2849 }
2850
2851 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
2852
2853 if (did_create && !want_create) {
2854 panic("Filesystem did a create, even though none was requested?");
2855 }
2856
2857 if (did_create) {
2858 #if CONFIG_APPLEDOUBLE
2859 if (!NATIVE_XATTR(dvp)) {
2860 /*
2861 * Remove stale Apple Double file (if any).
2862 */
2863 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
2864 }
2865 #endif /* CONFIG_APPLEDOUBLE */
2866 /* On create, provide kqueue notification */
2867 post_event_if_success(dvp, _err, NOTE_WRITE);
2868 }
2869
2870 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
2871 #if 0 /* FSEvents... */
2872 if (*vpp && _err && _err != EKEEPLOOKING) {
2873 vnode_put(*vpp);
2874 *vpp = NULLVP;
2875 }
2876 #endif /* 0 */
2877
2878 return (_err);
2879
2880 }
2881
2882 #if 0
2883 struct vnop_create_args {
2884 struct vnodeop_desc *a_desc;
2885 vnode_t a_dvp;
2886 vnode_t *a_vpp;
2887 struct componentname *a_cnp;
2888 struct vnode_attr *a_vap;
2889 vfs_context_t a_context;
2890 };
2891 #endif /* 0*/
2892 errno_t
2893 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
2894 {
2895 int _err;
2896 struct vnop_create_args a;
2897
2898 a.a_desc = &vnop_create_desc;
2899 a.a_dvp = dvp;
2900 a.a_vpp = vpp;
2901 a.a_cnp = cnp;
2902 a.a_vap = vap;
2903 a.a_context = ctx;
2904
2905 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2906 if (_err == 0 && *vpp) {
2907 DTRACE_FSINFO(create, vnode_t, *vpp);
2908 }
2909
2910 #if CONFIG_APPLEDOUBLE
2911 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2912 /*
2913 * Remove stale Apple Double file (if any).
2914 */
2915 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
2916 }
2917 #endif /* CONFIG_APPLEDOUBLE */
2918
2919 post_event_if_success(dvp, _err, NOTE_WRITE);
2920
2921 return (_err);
2922 }
2923
2924 #if 0
2925 /*
2926 *#
2927 *#% whiteout dvp L L L
2928 *#% whiteout cnp - - -
2929 *#% whiteout flag - - -
2930 *#
2931 */
2932 struct vnop_whiteout_args {
2933 struct vnodeop_desc *a_desc;
2934 vnode_t a_dvp;
2935 struct componentname *a_cnp;
2936 int a_flags;
2937 vfs_context_t a_context;
2938 };
2939 #endif /* 0*/
2940 errno_t
2941 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
2942 __unused int flags, __unused vfs_context_t ctx)
2943 {
2944 return (ENOTSUP); // XXX OBSOLETE
2945 }
2946
2947 #if 0
2948 /*
2949 *#
2950 *#% mknod dvp L U U
2951 *#% mknod vpp - X -
2952 *#
2953 */
2954 struct vnop_mknod_args {
2955 struct vnodeop_desc *a_desc;
2956 vnode_t a_dvp;
2957 vnode_t *a_vpp;
2958 struct componentname *a_cnp;
2959 struct vnode_attr *a_vap;
2960 vfs_context_t a_context;
2961 };
2962 #endif /* 0*/
2963 errno_t
2964 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
2965 {
2966
2967 int _err;
2968 struct vnop_mknod_args a;
2969
2970 a.a_desc = &vnop_mknod_desc;
2971 a.a_dvp = dvp;
2972 a.a_vpp = vpp;
2973 a.a_cnp = cnp;
2974 a.a_vap = vap;
2975 a.a_context = ctx;
2976
2977 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
2978 if (_err == 0 && *vpp) {
2979 DTRACE_FSINFO(mknod, vnode_t, *vpp);
2980 }
2981
2982 post_event_if_success(dvp, _err, NOTE_WRITE);
2983
2984 return (_err);
2985 }
2986
2987 #if 0
2988 /*
2989 *#
2990 *#% open vp L L L
2991 *#
2992 */
2993 struct vnop_open_args {
2994 struct vnodeop_desc *a_desc;
2995 vnode_t a_vp;
2996 int a_mode;
2997 vfs_context_t a_context;
2998 };
2999 #endif /* 0*/
3000 errno_t
3001 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3002 {
3003 int _err;
3004 struct vnop_open_args a;
3005
3006 if (ctx == NULL) {
3007 ctx = vfs_context_current();
3008 }
3009 a.a_desc = &vnop_open_desc;
3010 a.a_vp = vp;
3011 a.a_mode = mode;
3012 a.a_context = ctx;
3013
3014 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3015 DTRACE_FSINFO(open, vnode_t, vp);
3016
3017 return (_err);
3018 }
3019
3020 #if 0
3021 /*
3022 *#
3023 *#% close vp U U U
3024 *#
3025 */
3026 struct vnop_close_args {
3027 struct vnodeop_desc *a_desc;
3028 vnode_t a_vp;
3029 int a_fflag;
3030 vfs_context_t a_context;
3031 };
3032 #endif /* 0*/
3033 errno_t
3034 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3035 {
3036 int _err;
3037 struct vnop_close_args a;
3038
3039 if (ctx == NULL) {
3040 ctx = vfs_context_current();
3041 }
3042 a.a_desc = &vnop_close_desc;
3043 a.a_vp = vp;
3044 a.a_fflag = fflag;
3045 a.a_context = ctx;
3046
3047 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3048 DTRACE_FSINFO(close, vnode_t, vp);
3049
3050 return (_err);
3051 }
3052
3053 #if 0
3054 /*
3055 *#
3056 *#% access vp L L L
3057 *#
3058 */
3059 struct vnop_access_args {
3060 struct vnodeop_desc *a_desc;
3061 vnode_t a_vp;
3062 int a_action;
3063 vfs_context_t a_context;
3064 };
3065 #endif /* 0*/
3066 errno_t
3067 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3068 {
3069 int _err;
3070 struct vnop_access_args a;
3071
3072 if (ctx == NULL) {
3073 ctx = vfs_context_current();
3074 }
3075 a.a_desc = &vnop_access_desc;
3076 a.a_vp = vp;
3077 a.a_action = action;
3078 a.a_context = ctx;
3079
3080 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3081 DTRACE_FSINFO(access, vnode_t, vp);
3082
3083 return (_err);
3084 }
3085
3086 #if 0
3087 /*
3088 *#
3089 *#% getattr vp = = =
3090 *#
3091 */
3092 struct vnop_getattr_args {
3093 struct vnodeop_desc *a_desc;
3094 vnode_t a_vp;
3095 struct vnode_attr *a_vap;
3096 vfs_context_t a_context;
3097 };
3098 #endif /* 0*/
3099 errno_t
3100 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3101 {
3102 int _err;
3103 struct vnop_getattr_args a;
3104
3105 a.a_desc = &vnop_getattr_desc;
3106 a.a_vp = vp;
3107 a.a_vap = vap;
3108 a.a_context = ctx;
3109
3110 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3111 DTRACE_FSINFO(getattr, vnode_t, vp);
3112
3113 return (_err);
3114 }
3115
3116 #if 0
3117 /*
3118 *#
3119 *#% setattr vp L L L
3120 *#
3121 */
3122 struct vnop_setattr_args {
3123 struct vnodeop_desc *a_desc;
3124 vnode_t a_vp;
3125 struct vnode_attr *a_vap;
3126 vfs_context_t a_context;
3127 };
3128 #endif /* 0*/
3129 errno_t
3130 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3131 {
3132 int _err;
3133 struct vnop_setattr_args a;
3134
3135 a.a_desc = &vnop_setattr_desc;
3136 a.a_vp = vp;
3137 a.a_vap = vap;
3138 a.a_context = ctx;
3139
3140 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3141 DTRACE_FSINFO(setattr, vnode_t, vp);
3142
3143 #if CONFIG_APPLEDOUBLE
3144 /*
3145 * Shadow uid/gid/mod change to extended attribute file.
3146 */
3147 if (_err == 0 && !NATIVE_XATTR(vp)) {
3148 struct vnode_attr va;
3149 int change = 0;
3150
3151 VATTR_INIT(&va);
3152 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3153 VATTR_SET(&va, va_uid, vap->va_uid);
3154 change = 1;
3155 }
3156 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3157 VATTR_SET(&va, va_gid, vap->va_gid);
3158 change = 1;
3159 }
3160 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3161 VATTR_SET(&va, va_mode, vap->va_mode);
3162 change = 1;
3163 }
3164 if (change) {
3165 vnode_t dvp;
3166 const char *vname;
3167
3168 dvp = vnode_getparent(vp);
3169 vname = vnode_getname(vp);
3170
3171 xattrfile_setattr(dvp, vname, &va, ctx);
3172 if (dvp != NULLVP)
3173 vnode_put(dvp);
3174 if (vname != NULL)
3175 vnode_putname(vname);
3176 }
3177 }
3178 #endif /* CONFIG_APPLEDOUBLE */
3179
3180 /*
3181 * If we have changed any of the things about the file that are likely
3182 * to result in changes to authorization results, blow the vnode auth
3183 * cache
3184 */
3185 if (_err == 0 && (
3186 VATTR_IS_SUPPORTED(vap, va_mode) ||
3187 VATTR_IS_SUPPORTED(vap, va_uid) ||
3188 VATTR_IS_SUPPORTED(vap, va_gid) ||
3189 VATTR_IS_SUPPORTED(vap, va_flags) ||
3190 VATTR_IS_SUPPORTED(vap, va_acl) ||
3191 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3192 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3193 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3194
3195 #if NAMEDSTREAMS
3196 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3197 vnode_t svp;
3198 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3199 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3200 vnode_put(svp);
3201 }
3202 }
3203 #endif /* NAMEDSTREAMS */
3204 }
3205
3206
3207 post_event_if_success(vp, _err, NOTE_ATTRIB);
3208
3209 return (_err);
3210 }
3211
3212
3213 #if 0
3214 /*
3215 *#
3216 *#% read vp L L L
3217 *#
3218 */
3219 struct vnop_read_args {
3220 struct vnodeop_desc *a_desc;
3221 vnode_t a_vp;
3222 struct uio *a_uio;
3223 int a_ioflag;
3224 vfs_context_t a_context;
3225 };
3226 #endif /* 0*/
3227 errno_t
3228 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3229 {
3230 int _err;
3231 struct vnop_read_args a;
3232 #if CONFIG_DTRACE
3233 user_ssize_t resid = uio_resid(uio);
3234 #endif
3235
3236 if (ctx == NULL) {
3237 return EINVAL;
3238 }
3239
3240 a.a_desc = &vnop_read_desc;
3241 a.a_vp = vp;
3242 a.a_uio = uio;
3243 a.a_ioflag = ioflag;
3244 a.a_context = ctx;
3245
3246 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3247 DTRACE_FSINFO_IO(read,
3248 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3249
3250 return (_err);
3251 }
3252
3253
3254 #if 0
3255 /*
3256 *#
3257 *#% write vp L L L
3258 *#
3259 */
3260 struct vnop_write_args {
3261 struct vnodeop_desc *a_desc;
3262 vnode_t a_vp;
3263 struct uio *a_uio;
3264 int a_ioflag;
3265 vfs_context_t a_context;
3266 };
3267 #endif /* 0*/
3268 errno_t
3269 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3270 {
3271 struct vnop_write_args a;
3272 int _err;
3273 #if CONFIG_DTRACE
3274 user_ssize_t resid = uio_resid(uio);
3275 #endif
3276
3277 if (ctx == NULL) {
3278 return EINVAL;
3279 }
3280
3281 a.a_desc = &vnop_write_desc;
3282 a.a_vp = vp;
3283 a.a_uio = uio;
3284 a.a_ioflag = ioflag;
3285 a.a_context = ctx;
3286
3287 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3288 DTRACE_FSINFO_IO(write,
3289 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3290
3291 post_event_if_success(vp, _err, NOTE_WRITE);
3292
3293 return (_err);
3294 }
3295
3296
3297 #if 0
3298 /*
3299 *#
3300 *#% ioctl vp U U U
3301 *#
3302 */
3303 struct vnop_ioctl_args {
3304 struct vnodeop_desc *a_desc;
3305 vnode_t a_vp;
3306 u_long a_command;
3307 caddr_t a_data;
3308 int a_fflag;
3309 vfs_context_t a_context;
3310 };
3311 #endif /* 0*/
3312 errno_t
3313 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3314 {
3315 int _err;
3316 struct vnop_ioctl_args a;
3317
3318 if (ctx == NULL) {
3319 ctx = vfs_context_current();
3320 }
3321
3322 /*
3323 * This check should probably have been put in the TTY code instead...
3324 *
3325 * We have to be careful about what we assume during startup and shutdown.
3326 * We have to be able to use the root filesystem's device vnode even when
3327 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3328 * structure. If there is no data pointer, it doesn't matter whether
3329 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE)
3330 * which passes NULL for its data pointer can therefore be used during
3331 * mount or unmount of the root filesystem.
3332 *
3333 * Depending on what root filesystems need to do during mount/unmount, we
3334 * may need to loosen this check again in the future.
3335 */
3336 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3337 if (data != NULL && !vnode_vfs64bitready(vp)) {
3338 return(ENOTTY);
3339 }
3340 }
3341
3342 a.a_desc = &vnop_ioctl_desc;
3343 a.a_vp = vp;
3344 a.a_command = command;
3345 a.a_data = data;
3346 a.a_fflag = fflag;
3347 a.a_context= ctx;
3348
3349 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3350 DTRACE_FSINFO(ioctl, vnode_t, vp);
3351
3352 return (_err);
3353 }
3354
3355
3356 #if 0
3357 /*
3358 *#
3359 *#% select vp U U U
3360 *#
3361 */
3362 struct vnop_select_args {
3363 struct vnodeop_desc *a_desc;
3364 vnode_t a_vp;
3365 int a_which;
3366 int a_fflags;
3367 void *a_wql;
3368 vfs_context_t a_context;
3369 };
3370 #endif /* 0*/
3371 errno_t
3372 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx)
3373 {
3374 int _err;
3375 struct vnop_select_args a;
3376
3377 if (ctx == NULL) {
3378 ctx = vfs_context_current();
3379 }
3380 a.a_desc = &vnop_select_desc;
3381 a.a_vp = vp;
3382 a.a_which = which;
3383 a.a_fflags = fflags;
3384 a.a_context = ctx;
3385 a.a_wql = wql;
3386
3387 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3388 DTRACE_FSINFO(select, vnode_t, vp);
3389
3390 return (_err);
3391 }
3392
3393
3394 #if 0
3395 /*
3396 *#
3397 *#% exchange fvp L L L
3398 *#% exchange tvp L L L
3399 *#
3400 */
3401 struct vnop_exchange_args {
3402 struct vnodeop_desc *a_desc;
3403 vnode_t a_fvp;
3404 vnode_t a_tvp;
3405 int a_options;
3406 vfs_context_t a_context;
3407 };
3408 #endif /* 0*/
3409 errno_t
3410 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3411 {
3412 int _err;
3413 struct vnop_exchange_args a;
3414
3415 a.a_desc = &vnop_exchange_desc;
3416 a.a_fvp = fvp;
3417 a.a_tvp = tvp;
3418 a.a_options = options;
3419 a.a_context = ctx;
3420
3421 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3422 DTRACE_FSINFO(exchange, vnode_t, fvp);
3423
3424 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3425 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3426 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3427
3428 return (_err);
3429 }
3430
3431
3432 #if 0
3433 /*
3434 *#
3435 *#% revoke vp U U U
3436 *#
3437 */
3438 struct vnop_revoke_args {
3439 struct vnodeop_desc *a_desc;
3440 vnode_t a_vp;
3441 int a_flags;
3442 vfs_context_t a_context;
3443 };
3444 #endif /* 0*/
3445 errno_t
3446 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3447 {
3448 struct vnop_revoke_args a;
3449 int _err;
3450
3451 a.a_desc = &vnop_revoke_desc;
3452 a.a_vp = vp;
3453 a.a_flags = flags;
3454 a.a_context = ctx;
3455
3456 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3457 DTRACE_FSINFO(revoke, vnode_t, vp);
3458
3459 return (_err);
3460 }
3461
3462
3463 #if 0
3464 /*
3465 *#
3466 *# mmap - vp U U U
3467 *#
3468 */
3469 struct vnop_mmap_args {
3470 struct vnodeop_desc *a_desc;
3471 vnode_t a_vp;
3472 int a_fflags;
3473 vfs_context_t a_context;
3474 };
3475 #endif /* 0*/
3476 errno_t
3477 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3478 {
3479 int _err;
3480 struct vnop_mmap_args a;
3481
3482 a.a_desc = &vnop_mmap_desc;
3483 a.a_vp = vp;
3484 a.a_fflags = fflags;
3485 a.a_context = ctx;
3486
3487 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3488 DTRACE_FSINFO(mmap, vnode_t, vp);
3489
3490 return (_err);
3491 }
3492
3493
3494 #if 0
3495 /*
3496 *#
3497 *# mnomap - vp U U U
3498 *#
3499 */
3500 struct vnop_mnomap_args {
3501 struct vnodeop_desc *a_desc;
3502 vnode_t a_vp;
3503 vfs_context_t a_context;
3504 };
3505 #endif /* 0*/
3506 errno_t
3507 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
3508 {
3509 int _err;
3510 struct vnop_mnomap_args a;
3511
3512 a.a_desc = &vnop_mnomap_desc;
3513 a.a_vp = vp;
3514 a.a_context = ctx;
3515
3516 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3517 DTRACE_FSINFO(mnomap, vnode_t, vp);
3518
3519 return (_err);
3520 }
3521
3522
3523 #if 0
3524 /*
3525 *#
3526 *#% fsync vp L L L
3527 *#
3528 */
3529 struct vnop_fsync_args {
3530 struct vnodeop_desc *a_desc;
3531 vnode_t a_vp;
3532 int a_waitfor;
3533 vfs_context_t a_context;
3534 };
3535 #endif /* 0*/
3536 errno_t
3537 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
3538 {
3539 struct vnop_fsync_args a;
3540 int _err;
3541
3542 a.a_desc = &vnop_fsync_desc;
3543 a.a_vp = vp;
3544 a.a_waitfor = waitfor;
3545 a.a_context = ctx;
3546
3547 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3548 DTRACE_FSINFO(fsync, vnode_t, vp);
3549
3550 return (_err);
3551 }
3552
3553
3554 #if 0
3555 /*
3556 *#
3557 *#% remove dvp L U U
3558 *#% remove vp L U U
3559 *#
3560 */
3561 struct vnop_remove_args {
3562 struct vnodeop_desc *a_desc;
3563 vnode_t a_dvp;
3564 vnode_t a_vp;
3565 struct componentname *a_cnp;
3566 int a_flags;
3567 vfs_context_t a_context;
3568 };
3569 #endif /* 0*/
3570 errno_t
3571 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
3572 {
3573 int _err;
3574 struct vnop_remove_args a;
3575
3576 a.a_desc = &vnop_remove_desc;
3577 a.a_dvp = dvp;
3578 a.a_vp = vp;
3579 a.a_cnp = cnp;
3580 a.a_flags = flags;
3581 a.a_context = ctx;
3582
3583 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3584 DTRACE_FSINFO(remove, vnode_t, vp);
3585
3586 if (_err == 0) {
3587 vnode_setneedinactive(vp);
3588 #if CONFIG_APPLEDOUBLE
3589 if ( !(NATIVE_XATTR(dvp)) ) {
3590 /*
3591 * Remove any associated extended attribute file (._ AppleDouble file).
3592 */
3593 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
3594 }
3595 #endif /* CONFIG_APPLEDOUBLE */
3596 }
3597
3598 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
3599 post_event_if_success(dvp, _err, NOTE_WRITE);
3600
3601 return (_err);
3602 }
3603
3604 int
3605 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
3606 {
3607 int _err;
3608 struct vnop_compound_remove_args a;
3609 int no_vp = (*vpp == NULLVP);
3610
3611 a.a_desc = &vnop_compound_remove_desc;
3612 a.a_dvp = dvp;
3613 a.a_vpp = vpp;
3614 a.a_cnp = &ndp->ni_cnd;
3615 a.a_flags = flags;
3616 a.a_vap = vap;
3617 a.a_context = ctx;
3618 a.a_remove_authorizer = vn_authorize_unlink;
3619
3620 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
3621 if (_err == 0 && *vpp) {
3622 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
3623 } else {
3624 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
3625 }
3626 if (_err == 0) {
3627 vnode_setneedinactive(*vpp);
3628 #if CONFIG_APPLEDOUBLE
3629 if ( !(NATIVE_XATTR(dvp)) ) {
3630 /*
3631 * Remove any associated extended attribute file (._ AppleDouble file).
3632 */
3633 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
3634 }
3635 #endif /* CONFIG_APPLEDOUBLE */
3636 }
3637
3638 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
3639 post_event_if_success(dvp, _err, NOTE_WRITE);
3640
3641 if (no_vp) {
3642 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
3643 if (*vpp && _err && _err != EKEEPLOOKING) {
3644 vnode_put(*vpp);
3645 *vpp = NULLVP;
3646 }
3647 }
3648
3649 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
3650
3651 return (_err);
3652 }
3653
3654 #if 0
3655 /*
3656 *#
3657 *#% link vp U U U
3658 *#% link tdvp L U U
3659 *#
3660 */
3661 struct vnop_link_args {
3662 struct vnodeop_desc *a_desc;
3663 vnode_t a_vp;
3664 vnode_t a_tdvp;
3665 struct componentname *a_cnp;
3666 vfs_context_t a_context;
3667 };
3668 #endif /* 0*/
3669 errno_t
3670 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
3671 {
3672 int _err;
3673 struct vnop_link_args a;
3674
3675 #if CONFIG_APPLEDOUBLE
3676 /*
3677 * For file systems with non-native extended attributes,
3678 * disallow linking to an existing "._" Apple Double file.
3679 */
3680 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3681 const char *vname;
3682
3683 vname = vnode_getname(vp);
3684 if (vname != NULL) {
3685 _err = 0;
3686 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3687 _err = EPERM;
3688 }
3689 vnode_putname(vname);
3690 if (_err)
3691 return (_err);
3692 }
3693 }
3694 #endif /* CONFIG_APPLEDOUBLE */
3695
3696 a.a_desc = &vnop_link_desc;
3697 a.a_vp = vp;
3698 a.a_tdvp = tdvp;
3699 a.a_cnp = cnp;
3700 a.a_context = ctx;
3701
3702 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3703 DTRACE_FSINFO(link, vnode_t, vp);
3704
3705 post_event_if_success(vp, _err, NOTE_LINK);
3706 post_event_if_success(tdvp, _err, NOTE_WRITE);
3707
3708 return (_err);
3709 }
3710
3711 errno_t
3712 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
3713 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
3714 uint32_t flags, vfs_context_t ctx)
3715 {
3716 int _err;
3717 struct nameidata *fromnd = NULL;
3718 struct nameidata *tond = NULL;
3719 #if CONFIG_APPLEDOUBLE
3720 vnode_t src_attr_vp = NULLVP;
3721 vnode_t dst_attr_vp = NULLVP;
3722 char smallname1[48];
3723 char smallname2[48];
3724 char *xfromname = NULL;
3725 char *xtoname = NULL;
3726 #endif /* CONFIG_APPLEDOUBLE */
3727 int batched;
3728 uint32_t tdfflags; // Target directory file flags
3729
3730 batched = vnode_compound_rename_available(fdvp);
3731
3732 if (!batched) {
3733 if (*fvpp == NULLVP)
3734 panic("Not batched, and no fvp?");
3735 }
3736
3737 #if CONFIG_SECLUDED_RENAME
3738 if ((fcnp->cn_flags & CN_SECLUDE_RENAME) &&
3739 (((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) == 0)) {
3740 return ENOTSUP;
3741 }
3742 #endif
3743
3744 #if CONFIG_APPLEDOUBLE
3745 /*
3746 * We need to preflight any potential AppleDouble file for the source file
3747 * before doing the rename operation, since we could potentially be doing
3748 * this operation on a network filesystem, and would end up duplicating
3749 * the work. Also, save the source and destination names. Skip it if the
3750 * source has a "._" prefix.
3751 */
3752
3753 if (!NATIVE_XATTR(fdvp) &&
3754 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3755 size_t len;
3756 int error;
3757
3758 /* Get source attribute file name. */
3759 len = fcnp->cn_namelen + 3;
3760 if (len > sizeof(smallname1)) {
3761 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3762 } else {
3763 xfromname = &smallname1[0];
3764 }
3765 strlcpy(xfromname, "._", min(sizeof smallname1, len));
3766 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3767 xfromname[len-1] = '\0';
3768
3769 /* Get destination attribute file name. */
3770 len = tcnp->cn_namelen + 3;
3771 if (len > sizeof(smallname2)) {
3772 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3773 } else {
3774 xtoname = &smallname2[0];
3775 }
3776 strlcpy(xtoname, "._", min(sizeof smallname2, len));
3777 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3778 xtoname[len-1] = '\0';
3779
3780 /*
3781 * Look up source attribute file, keep reference on it if exists.
3782 * Note that we do the namei with the nameiop of RENAME, which is different than
3783 * in the rename syscall. It's OK if the source file does not exist, since this
3784 * is only for AppleDouble files.
3785 */
3786 if (xfromname != NULL) {
3787 MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK);
3788 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
3789 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
3790 fromnd->ni_dvp = fdvp;
3791 error = namei(fromnd);
3792
3793 /*
3794 * If there was an error looking up source attribute file,
3795 * we'll behave as if it didn't exist.
3796 */
3797
3798 if (error == 0) {
3799 if (fromnd->ni_vp) {
3800 /* src_attr_vp indicates need to call vnode_put / nameidone later */
3801 src_attr_vp = fromnd->ni_vp;
3802
3803 if (fromnd->ni_vp->v_type != VREG) {
3804 src_attr_vp = NULLVP;
3805 vnode_put(fromnd->ni_vp);
3806 }
3807 }
3808 /*
3809 * Either we got an invalid vnode type (not a regular file) or the namei lookup
3810 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
3811 * have a vnode here, so we drop our namei buffer for the source attribute file
3812 */
3813 if (src_attr_vp == NULLVP) {
3814 nameidone(fromnd);
3815 }
3816 }
3817 }
3818 }
3819 #endif /* CONFIG_APPLEDOUBLE */
3820
3821 if (batched) {
3822 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
3823 if (_err != 0) {
3824 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
3825 }
3826 } else {
3827 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
3828 }
3829 #if CONFIG_MACF
3830 if (_err == 0) {
3831 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
3832 }
3833 #endif
3834
3835 /*
3836 * If moved to a new directory that is restricted,
3837 * set the restricted flag on the item moved.
3838 */
3839 if (_err == 0) {
3840 _err = vnode_flags(tdvp, &tdfflags, ctx);
3841 if (_err == 0 && (tdfflags & SF_RESTRICTED)) {
3842 uint32_t fflags;
3843 _err = vnode_flags(*fvpp, &fflags, ctx);
3844 if (_err == 0 && !(fflags & SF_RESTRICTED)) {
3845 struct vnode_attr va;
3846 VATTR_INIT(&va);
3847 VATTR_SET(&va, va_flags, fflags | SF_RESTRICTED);
3848 _err = vnode_setattr(*fvpp, &va, ctx);
3849 }
3850 }
3851 }
3852
3853 #if CONFIG_APPLEDOUBLE
3854 /*
3855 * Rename any associated extended attribute file (._ AppleDouble file).
3856 */
3857 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3858 int error = 0;
3859
3860 /*
3861 * Get destination attribute file vnode.
3862 * Note that tdvp already has an iocount reference. Make sure to check that we
3863 * get a valid vnode from namei.
3864 */
3865 MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
3866 NDINIT(tond, RENAME, OP_RENAME,
3867 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
3868 CAST_USER_ADDR_T(xtoname), ctx);
3869 tond->ni_dvp = tdvp;
3870 error = namei(tond);
3871
3872 if (error)
3873 goto ad_error;
3874
3875 if (tond->ni_vp) {
3876 dst_attr_vp = tond->ni_vp;
3877 }
3878
3879 if (src_attr_vp) {
3880 const char *old_name = src_attr_vp->v_name;
3881 vnode_t old_parent = src_attr_vp->v_parent;
3882
3883 if (batched) {
3884 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
3885 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
3886 0, ctx);
3887 } else {
3888 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
3889 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
3890 }
3891
3892 if (error == 0 && old_name == src_attr_vp->v_name &&
3893 old_parent == src_attr_vp->v_parent) {
3894 int update_flags = VNODE_UPDATE_NAME;
3895
3896 if (fdvp != tdvp)
3897 update_flags |= VNODE_UPDATE_PARENT;
3898
3899 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
3900 vnode_update_identity(src_attr_vp, tdvp,
3901 tond->ni_cnd.cn_nameptr,
3902 tond->ni_cnd.cn_namelen,
3903 tond->ni_cnd.cn_hash,
3904 update_flags);
3905 }
3906 }
3907
3908 /* kevent notifications for moving resource files
3909 * _err is zero if we're here, so no need to notify directories, code
3910 * below will do that. only need to post the rename on the source and
3911 * possibly a delete on the dest
3912 */
3913 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
3914 if (dst_attr_vp) {
3915 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
3916 }
3917
3918 } else if (dst_attr_vp) {
3919 /*
3920 * Just delete destination attribute file vnode if it exists, since
3921 * we didn't have a source attribute file.
3922 * Note that tdvp already has an iocount reference.
3923 */
3924
3925 struct vnop_remove_args args;
3926
3927 args.a_desc = &vnop_remove_desc;
3928 args.a_dvp = tdvp;
3929 args.a_vp = dst_attr_vp;
3930 args.a_cnp = &tond->ni_cnd;
3931 args.a_context = ctx;
3932
3933 if (error == 0) {
3934 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3935
3936 if (error == 0)
3937 vnode_setneedinactive(dst_attr_vp);
3938 }
3939
3940 /* kevent notification for deleting the destination's attribute file
3941 * if it existed. Only need to post the delete on the destination, since
3942 * the code below will handle the directories.
3943 */
3944 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
3945 }
3946 }
3947 ad_error:
3948 if (src_attr_vp) {
3949 vnode_put(src_attr_vp);
3950 nameidone(fromnd);
3951 }
3952 if (dst_attr_vp) {
3953 vnode_put(dst_attr_vp);
3954 nameidone(tond);
3955 }
3956 if (xfromname && xfromname != &smallname1[0]) {
3957 FREE(xfromname, M_TEMP);
3958 }
3959 if (xtoname && xtoname != &smallname2[0]) {
3960 FREE(xtoname, M_TEMP);
3961 }
3962 #endif /* CONFIG_APPLEDOUBLE */
3963 if (fromnd) {
3964 FREE(fromnd, M_TEMP);
3965 }
3966 if (tond) {
3967 FREE(tond, M_TEMP);
3968 }
3969 return _err;
3970 }
3971
3972
3973 #if 0
3974 /*
3975 *#
3976 *#% rename fdvp U U U
3977 *#% rename fvp U U U
3978 *#% rename tdvp L U U
3979 *#% rename tvp X U U
3980 *#
3981 */
3982 struct vnop_rename_args {
3983 struct vnodeop_desc *a_desc;
3984 vnode_t a_fdvp;
3985 vnode_t a_fvp;
3986 struct componentname *a_fcnp;
3987 vnode_t a_tdvp;
3988 vnode_t a_tvp;
3989 struct componentname *a_tcnp;
3990 vfs_context_t a_context;
3991 };
3992 #endif /* 0*/
3993 errno_t
3994 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
3995 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
3996 vfs_context_t ctx)
3997 {
3998 int _err = 0;
3999 int events;
4000 struct vnop_rename_args a;
4001
4002 a.a_desc = &vnop_rename_desc;
4003 a.a_fdvp = fdvp;
4004 a.a_fvp = fvp;
4005 a.a_fcnp = fcnp;
4006 a.a_tdvp = tdvp;
4007 a.a_tvp = tvp;
4008 a.a_tcnp = tcnp;
4009 a.a_context = ctx;
4010
4011 /* do the rename of the main file. */
4012 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4013 DTRACE_FSINFO(rename, vnode_t, fdvp);
4014
4015 if (_err == 0) {
4016 if (tvp && tvp != fvp)
4017 vnode_setneedinactive(tvp);
4018 }
4019
4020 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4021 if (_err == 0) {
4022 events = NOTE_WRITE;
4023 if (vnode_isdir(fvp)) {
4024 /* Link count on dir changed only if we are moving a dir and...
4025 * --Moved to new dir, not overwriting there
4026 * --Kept in same dir and DID overwrite
4027 */
4028 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4029 events |= NOTE_LINK;
4030 }
4031 }
4032
4033 lock_vnode_and_post(fdvp, events);
4034 if (fdvp != tdvp) {
4035 lock_vnode_and_post(tdvp, events);
4036 }
4037
4038 /* If you're replacing the target, post a deletion for it */
4039 if (tvp)
4040 {
4041 lock_vnode_and_post(tvp, NOTE_DELETE);
4042 }
4043
4044 lock_vnode_and_post(fvp, NOTE_RENAME);
4045 }
4046
4047 return (_err);
4048 }
4049
4050 int
4051 VNOP_COMPOUND_RENAME(
4052 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4053 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4054 uint32_t flags, vfs_context_t ctx)
4055 {
4056 int _err = 0;
4057 int events;
4058 struct vnop_compound_rename_args a;
4059 int no_fvp, no_tvp;
4060
4061 no_fvp = (*fvpp) == NULLVP;
4062 no_tvp = (*tvpp) == NULLVP;
4063
4064 a.a_desc = &vnop_compound_rename_desc;
4065
4066 a.a_fdvp = fdvp;
4067 a.a_fvpp = fvpp;
4068 a.a_fcnp = fcnp;
4069 a.a_fvap = fvap;
4070
4071 a.a_tdvp = tdvp;
4072 a.a_tvpp = tvpp;
4073 a.a_tcnp = tcnp;
4074 a.a_tvap = tvap;
4075
4076 a.a_flags = flags;
4077 a.a_context = ctx;
4078 a.a_rename_authorizer = vn_authorize_rename;
4079 a.a_reserved = NULL;
4080
4081 /* do the rename of the main file. */
4082 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4083 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4084
4085 if (_err == 0) {
4086 if (*tvpp && *tvpp != *fvpp)
4087 vnode_setneedinactive(*tvpp);
4088 }
4089
4090 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4091 if (_err == 0 && *fvpp != *tvpp) {
4092 if (!*fvpp) {
4093 panic("No fvpp after compound rename?");
4094 }
4095
4096 events = NOTE_WRITE;
4097 if (vnode_isdir(*fvpp)) {
4098 /* Link count on dir changed only if we are moving a dir and...
4099 * --Moved to new dir, not overwriting there
4100 * --Kept in same dir and DID overwrite
4101 */
4102 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4103 events |= NOTE_LINK;
4104 }
4105 }
4106
4107 lock_vnode_and_post(fdvp, events);
4108 if (fdvp != tdvp) {
4109 lock_vnode_and_post(tdvp, events);
4110 }
4111
4112 /* If you're replacing the target, post a deletion for it */
4113 if (*tvpp)
4114 {
4115 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4116 }
4117
4118 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4119 }
4120
4121 if (no_fvp) {
4122 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4123 }
4124 if (no_tvp && *tvpp != NULLVP) {
4125 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4126 }
4127
4128 if (_err && _err != EKEEPLOOKING) {
4129 if (*fvpp) {
4130 vnode_put(*fvpp);
4131 *fvpp = NULLVP;
4132 }
4133 if (*tvpp) {
4134 vnode_put(*tvpp);
4135 *tvpp = NULLVP;
4136 }
4137 }
4138
4139 return (_err);
4140 }
4141
4142 int
4143 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4144 struct vnode_attr *vap, vfs_context_t ctx)
4145 {
4146 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4147 panic("Non-CREATE nameiop in vn_mkdir()?");
4148 }
4149
4150 if (vnode_compound_mkdir_available(dvp)) {
4151 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4152 } else {
4153 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4154 }
4155 }
4156
4157 #if 0
4158 /*
4159 *#
4160 *#% mkdir dvp L U U
4161 *#% mkdir vpp - L -
4162 *#
4163 */
4164 struct vnop_mkdir_args {
4165 struct vnodeop_desc *a_desc;
4166 vnode_t a_dvp;
4167 vnode_t *a_vpp;
4168 struct componentname *a_cnp;
4169 struct vnode_attr *a_vap;
4170 vfs_context_t a_context;
4171 };
4172 #endif /* 0*/
4173 errno_t
4174 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4175 struct vnode_attr *vap, vfs_context_t ctx)
4176 {
4177 int _err;
4178 struct vnop_mkdir_args a;
4179
4180 a.a_desc = &vnop_mkdir_desc;
4181 a.a_dvp = dvp;
4182 a.a_vpp = vpp;
4183 a.a_cnp = cnp;
4184 a.a_vap = vap;
4185 a.a_context = ctx;
4186
4187 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4188 if (_err == 0 && *vpp) {
4189 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4190 }
4191 #if CONFIG_APPLEDOUBLE
4192 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4193 /*
4194 * Remove stale Apple Double file (if any).
4195 */
4196 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4197 }
4198 #endif /* CONFIG_APPLEDOUBLE */
4199
4200 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4201
4202 return (_err);
4203 }
4204
4205 int
4206 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4207 struct vnode_attr *vap, vfs_context_t ctx)
4208 {
4209 int _err;
4210 struct vnop_compound_mkdir_args a;
4211
4212 a.a_desc = &vnop_compound_mkdir_desc;
4213 a.a_dvp = dvp;
4214 a.a_vpp = vpp;
4215 a.a_cnp = &ndp->ni_cnd;
4216 a.a_vap = vap;
4217 a.a_flags = 0;
4218 a.a_context = ctx;
4219 #if 0
4220 a.a_mkdir_authorizer = vn_authorize_mkdir;
4221 #endif /* 0 */
4222 a.a_reserved = NULL;
4223
4224 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4225 if (_err == 0 && *vpp) {
4226 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4227 }
4228 #if CONFIG_APPLEDOUBLE
4229 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4230 /*
4231 * Remove stale Apple Double file (if any).
4232 */
4233 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4234 }
4235 #endif /* CONFIG_APPLEDOUBLE */
4236
4237 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4238
4239 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4240 if (*vpp && _err && _err != EKEEPLOOKING) {
4241 vnode_put(*vpp);
4242 *vpp = NULLVP;
4243 }
4244
4245 return (_err);
4246 }
4247
4248 int
4249 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4250 {
4251 if (vnode_compound_rmdir_available(dvp)) {
4252 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4253 } else {
4254 if (*vpp == NULLVP) {
4255 panic("NULL vp, but not a compound VNOP?");
4256 }
4257 if (vap != NULL) {
4258 panic("Non-NULL vap, but not a compound VNOP?");
4259 }
4260 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4261 }
4262 }
4263
4264 #if 0
4265 /*
4266 *#
4267 *#% rmdir dvp L U U
4268 *#% rmdir vp L U U
4269 *#
4270 */
4271 struct vnop_rmdir_args {
4272 struct vnodeop_desc *a_desc;
4273 vnode_t a_dvp;
4274 vnode_t a_vp;
4275 struct componentname *a_cnp;
4276 vfs_context_t a_context;
4277 };
4278
4279 #endif /* 0*/
4280 errno_t
4281 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4282 {
4283 int _err;
4284 struct vnop_rmdir_args a;
4285
4286 a.a_desc = &vnop_rmdir_desc;
4287 a.a_dvp = dvp;
4288 a.a_vp = vp;
4289 a.a_cnp = cnp;
4290 a.a_context = ctx;
4291
4292 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4293 DTRACE_FSINFO(rmdir, vnode_t, vp);
4294
4295 if (_err == 0) {
4296 vnode_setneedinactive(vp);
4297 #if CONFIG_APPLEDOUBLE
4298 if ( !(NATIVE_XATTR(dvp)) ) {
4299 /*
4300 * Remove any associated extended attribute file (._ AppleDouble file).
4301 */
4302 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4303 }
4304 #endif
4305 }
4306
4307 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4308 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4309 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4310
4311 return (_err);
4312 }
4313
4314 int
4315 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4316 struct vnode_attr *vap, vfs_context_t ctx)
4317 {
4318 int _err;
4319 struct vnop_compound_rmdir_args a;
4320 int no_vp;
4321
4322 a.a_desc = &vnop_mkdir_desc;
4323 a.a_dvp = dvp;
4324 a.a_vpp = vpp;
4325 a.a_cnp = &ndp->ni_cnd;
4326 a.a_vap = vap;
4327 a.a_flags = 0;
4328 a.a_context = ctx;
4329 a.a_rmdir_authorizer = vn_authorize_rmdir;
4330 a.a_reserved = NULL;
4331
4332 no_vp = (*vpp == NULLVP);
4333
4334 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4335 if (_err == 0 && *vpp) {
4336 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4337 }
4338 #if CONFIG_APPLEDOUBLE
4339 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4340 /*
4341 * Remove stale Apple Double file (if any).
4342 */
4343 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4344 }
4345 #endif
4346
4347 if (*vpp) {
4348 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4349 }
4350 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4351
4352 if (no_vp) {
4353 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4354
4355 #if 0 /* Removing orphaned ._ files requires a vp.... */
4356 if (*vpp && _err && _err != EKEEPLOOKING) {
4357 vnode_put(*vpp);
4358 *vpp = NULLVP;
4359 }
4360 #endif /* 0 */
4361 }
4362
4363 return (_err);
4364 }
4365
4366 #if CONFIG_APPLEDOUBLE
4367 /*
4368 * Remove a ._ AppleDouble file
4369 */
4370 #define AD_STALE_SECS (180)
4371 static void
4372 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4373 {
4374 vnode_t xvp;
4375 struct nameidata nd;
4376 char smallname[64];
4377 char *filename = NULL;
4378 size_t len;
4379
4380 if ((basename == NULL) || (basename[0] == '\0') ||
4381 (basename[0] == '.' && basename[1] == '_')) {
4382 return;
4383 }
4384 filename = &smallname[0];
4385 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4386 if (len >= sizeof(smallname)) {
4387 len++; /* snprintf result doesn't include '\0' */
4388 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4389 len = snprintf(filename, len, "._%s", basename);
4390 }
4391 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4392 CAST_USER_ADDR_T(filename), ctx);
4393 nd.ni_dvp = dvp;
4394 if (namei(&nd) != 0)
4395 goto out2;
4396
4397 xvp = nd.ni_vp;
4398 nameidone(&nd);
4399 if (xvp->v_type != VREG)
4400 goto out1;
4401
4402 /*
4403 * When creating a new object and a "._" file already
4404 * exists, check to see if its a stale "._" file.
4405 *
4406 */
4407 if (!force) {
4408 struct vnode_attr va;
4409
4410 VATTR_INIT(&va);
4411 VATTR_WANTED(&va, va_data_size);
4412 VATTR_WANTED(&va, va_modify_time);
4413 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4414 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4415 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4416 va.va_data_size != 0) {
4417 struct timeval tv;
4418
4419 microtime(&tv);
4420 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4421 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4422 force = 1; /* must be stale */
4423 }
4424 }
4425 }
4426 if (force) {
4427 int error;
4428
4429 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
4430 if (error == 0)
4431 vnode_setneedinactive(xvp);
4432
4433 post_event_if_success(xvp, error, NOTE_DELETE);
4434 post_event_if_success(dvp, error, NOTE_WRITE);
4435 }
4436
4437 out1:
4438 vnode_put(dvp);
4439 vnode_put(xvp);
4440 out2:
4441 if (filename && filename != &smallname[0]) {
4442 FREE(filename, M_TEMP);
4443 }
4444 }
4445
4446 /*
4447 * Shadow uid/gid/mod to a ._ AppleDouble file
4448 */
4449 static void
4450 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
4451 vfs_context_t ctx)
4452 {
4453 vnode_t xvp;
4454 struct nameidata nd;
4455 char smallname[64];
4456 char *filename = NULL;
4457 size_t len;
4458
4459 if ((dvp == NULLVP) ||
4460 (basename == NULL) || (basename[0] == '\0') ||
4461 (basename[0] == '.' && basename[1] == '_')) {
4462 return;
4463 }
4464 filename = &smallname[0];
4465 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4466 if (len >= sizeof(smallname)) {
4467 len++; /* snprintf result doesn't include '\0' */
4468 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4469 len = snprintf(filename, len, "._%s", basename);
4470 }
4471 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
4472 CAST_USER_ADDR_T(filename), ctx);
4473 nd.ni_dvp = dvp;
4474 if (namei(&nd) != 0)
4475 goto out2;
4476
4477 xvp = nd.ni_vp;
4478 nameidone(&nd);
4479
4480 if (xvp->v_type == VREG) {
4481 struct vnop_setattr_args a;
4482
4483 a.a_desc = &vnop_setattr_desc;
4484 a.a_vp = xvp;
4485 a.a_vap = vap;
4486 a.a_context = ctx;
4487
4488 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
4489 }
4490
4491 vnode_put(xvp);
4492 out2:
4493 if (filename && filename != &smallname[0]) {
4494 FREE(filename, M_TEMP);
4495 }
4496 }
4497 #endif /* CONFIG_APPLEDOUBLE */
4498
4499 #if 0
4500 /*
4501 *#
4502 *#% symlink dvp L U U
4503 *#% symlink vpp - U -
4504 *#
4505 */
4506 struct vnop_symlink_args {
4507 struct vnodeop_desc *a_desc;
4508 vnode_t a_dvp;
4509 vnode_t *a_vpp;
4510 struct componentname *a_cnp;
4511 struct vnode_attr *a_vap;
4512 char *a_target;
4513 vfs_context_t a_context;
4514 };
4515
4516 #endif /* 0*/
4517 errno_t
4518 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4519 struct vnode_attr *vap, char *target, vfs_context_t ctx)
4520 {
4521 int _err;
4522 struct vnop_symlink_args a;
4523
4524 a.a_desc = &vnop_symlink_desc;
4525 a.a_dvp = dvp;
4526 a.a_vpp = vpp;
4527 a.a_cnp = cnp;
4528 a.a_vap = vap;
4529 a.a_target = target;
4530 a.a_context = ctx;
4531
4532 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
4533 DTRACE_FSINFO(symlink, vnode_t, dvp);
4534 #if CONFIG_APPLEDOUBLE
4535 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4536 /*
4537 * Remove stale Apple Double file (if any). Posts its own knotes
4538 */
4539 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4540 }
4541 #endif /* CONFIG_APPLEDOUBLE */
4542
4543 post_event_if_success(dvp, _err, NOTE_WRITE);
4544
4545 return (_err);
4546 }
4547
4548 #if 0
4549 /*
4550 *#
4551 *#% readdir vp L L L
4552 *#
4553 */
4554 struct vnop_readdir_args {
4555 struct vnodeop_desc *a_desc;
4556 vnode_t a_vp;
4557 struct uio *a_uio;
4558 int a_flags;
4559 int *a_eofflag;
4560 int *a_numdirent;
4561 vfs_context_t a_context;
4562 };
4563
4564 #endif /* 0*/
4565 errno_t
4566 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
4567 int *numdirent, vfs_context_t ctx)
4568 {
4569 int _err;
4570 struct vnop_readdir_args a;
4571 #if CONFIG_DTRACE
4572 user_ssize_t resid = uio_resid(uio);
4573 #endif
4574
4575 a.a_desc = &vnop_readdir_desc;
4576 a.a_vp = vp;
4577 a.a_uio = uio;
4578 a.a_flags = flags;
4579 a.a_eofflag = eofflag;
4580 a.a_numdirent = numdirent;
4581 a.a_context = ctx;
4582
4583 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
4584 DTRACE_FSINFO_IO(readdir,
4585 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4586
4587 return (_err);
4588 }
4589
4590 #if 0
4591 /*
4592 *#
4593 *#% readdirattr vp L L L
4594 *#
4595 */
4596 struct vnop_readdirattr_args {
4597 struct vnodeop_desc *a_desc;
4598 vnode_t a_vp;
4599 struct attrlist *a_alist;
4600 struct uio *a_uio;
4601 uint32_t a_maxcount;
4602 uint32_t a_options;
4603 uint32_t *a_newstate;
4604 int *a_eofflag;
4605 uint32_t *a_actualcount;
4606 vfs_context_t a_context;
4607 };
4608
4609 #endif /* 0*/
4610 errno_t
4611 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
4612 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
4613 {
4614 int _err;
4615 struct vnop_readdirattr_args a;
4616 #if CONFIG_DTRACE
4617 user_ssize_t resid = uio_resid(uio);
4618 #endif
4619
4620 a.a_desc = &vnop_readdirattr_desc;
4621 a.a_vp = vp;
4622 a.a_alist = alist;
4623 a.a_uio = uio;
4624 a.a_maxcount = maxcount;
4625 a.a_options = options;
4626 a.a_newstate = newstate;
4627 a.a_eofflag = eofflag;
4628 a.a_actualcount = actualcount;
4629 a.a_context = ctx;
4630
4631 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
4632 DTRACE_FSINFO_IO(readdirattr,
4633 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4634
4635 return (_err);
4636 }
4637
4638 #if 0
4639 struct vnop_getttrlistbulk_args {
4640 struct vnodeop_desc *a_desc;
4641 vnode_t a_vp;
4642 struct attrlist *a_alist;
4643 struct vnode_attr *a_vap;
4644 struct uio *a_uio;
4645 void *a_private
4646 uint64_t a_options;
4647 int *a_eofflag;
4648 uint32_t *a_actualcount;
4649 vfs_context_t a_context;
4650 };
4651 #endif /* 0*/
4652 errno_t
4653 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
4654 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
4655 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
4656 {
4657 int _err;
4658 struct vnop_getattrlistbulk_args a;
4659 #if CONFIG_DTRACE
4660 user_ssize_t resid = uio_resid(uio);
4661 #endif
4662
4663 a.a_desc = &vnop_getattrlistbulk_desc;
4664 a.a_vp = vp;
4665 a.a_alist = alist;
4666 a.a_vap = vap;
4667 a.a_uio = uio;
4668 a.a_private = private;
4669 a.a_options = options;
4670 a.a_eofflag = eofflag;
4671 a.a_actualcount = actualcount;
4672 a.a_context = ctx;
4673
4674 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
4675 DTRACE_FSINFO_IO(getattrlistbulk,
4676 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4677
4678 return (_err);
4679 }
4680
4681 #if 0
4682 /*
4683 *#
4684 *#% readlink vp L L L
4685 *#
4686 */
4687 struct vnop_readlink_args {
4688 struct vnodeop_desc *a_desc;
4689 vnode_t a_vp;
4690 struct uio *a_uio;
4691 vfs_context_t a_context;
4692 };
4693 #endif /* 0 */
4694
4695 /*
4696 * Returns: 0 Success
4697 * lock_fsnode:ENOENT No such file or directory [only for VFS
4698 * that is not thread safe & vnode is
4699 * currently being/has been terminated]
4700 * <vfs_readlink>:EINVAL
4701 * <vfs_readlink>:???
4702 *
4703 * Note: The return codes from the underlying VFS's readlink routine
4704 * can't be fully enumerated here, since third party VFS authors
4705 * may not limit their error returns to the ones documented here,
4706 * even though this may result in some programs functioning
4707 * incorrectly.
4708 *
4709 * The return codes documented above are those which may currently
4710 * be returned by HFS from hfs_vnop_readlink, not including
4711 * additional error code which may be propagated from underlying
4712 * routines.
4713 */
4714 errno_t
4715 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
4716 {
4717 int _err;
4718 struct vnop_readlink_args a;
4719 #if CONFIG_DTRACE
4720 user_ssize_t resid = uio_resid(uio);
4721 #endif
4722 a.a_desc = &vnop_readlink_desc;
4723 a.a_vp = vp;
4724 a.a_uio = uio;
4725 a.a_context = ctx;
4726
4727 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
4728 DTRACE_FSINFO_IO(readlink,
4729 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4730
4731 return (_err);
4732 }
4733
4734 #if 0
4735 /*
4736 *#
4737 *#% inactive vp L U U
4738 *#
4739 */
4740 struct vnop_inactive_args {
4741 struct vnodeop_desc *a_desc;
4742 vnode_t a_vp;
4743 vfs_context_t a_context;
4744 };
4745 #endif /* 0*/
4746 errno_t
4747 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
4748 {
4749 int _err;
4750 struct vnop_inactive_args a;
4751
4752 a.a_desc = &vnop_inactive_desc;
4753 a.a_vp = vp;
4754 a.a_context = ctx;
4755
4756 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
4757 DTRACE_FSINFO(inactive, vnode_t, vp);
4758
4759 #if NAMEDSTREAMS
4760 /* For file systems that do not support namedstream natively, mark
4761 * the shadow stream file vnode to be recycled as soon as the last
4762 * reference goes away. To avoid re-entering reclaim code, do not
4763 * call recycle on terminating namedstream vnodes.
4764 */
4765 if (vnode_isnamedstream(vp) &&
4766 (vp->v_parent != NULLVP) &&
4767 vnode_isshadow(vp) &&
4768 ((vp->v_lflag & VL_TERMINATE) == 0)) {
4769 vnode_recycle(vp);
4770 }
4771 #endif
4772
4773 return (_err);
4774 }
4775
4776
4777 #if 0
4778 /*
4779 *#
4780 *#% reclaim vp U U U
4781 *#
4782 */
4783 struct vnop_reclaim_args {
4784 struct vnodeop_desc *a_desc;
4785 vnode_t a_vp;
4786 vfs_context_t a_context;
4787 };
4788 #endif /* 0*/
4789 errno_t
4790 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
4791 {
4792 int _err;
4793 struct vnop_reclaim_args a;
4794
4795 a.a_desc = &vnop_reclaim_desc;
4796 a.a_vp = vp;
4797 a.a_context = ctx;
4798
4799 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
4800 DTRACE_FSINFO(reclaim, vnode_t, vp);
4801
4802 return (_err);
4803 }
4804
4805
4806 /*
4807 * Returns: 0 Success
4808 * lock_fsnode:ENOENT No such file or directory [only for VFS
4809 * that is not thread safe & vnode is
4810 * currently being/has been terminated]
4811 * <vnop_pathconf_desc>:??? [per FS implementation specific]
4812 */
4813 #if 0
4814 /*
4815 *#
4816 *#% pathconf vp L L L
4817 *#
4818 */
4819 struct vnop_pathconf_args {
4820 struct vnodeop_desc *a_desc;
4821 vnode_t a_vp;
4822 int a_name;
4823 int32_t *a_retval;
4824 vfs_context_t a_context;
4825 };
4826 #endif /* 0*/
4827 errno_t
4828 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
4829 {
4830 int _err;
4831 struct vnop_pathconf_args a;
4832
4833 a.a_desc = &vnop_pathconf_desc;
4834 a.a_vp = vp;
4835 a.a_name = name;
4836 a.a_retval = retval;
4837 a.a_context = ctx;
4838
4839 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
4840 DTRACE_FSINFO(pathconf, vnode_t, vp);
4841
4842 return (_err);
4843 }
4844
4845 /*
4846 * Returns: 0 Success
4847 * err_advlock:ENOTSUP
4848 * lf_advlock:???
4849 * <vnop_advlock_desc>:???
4850 *
4851 * Notes: VFS implementations of advisory locking using calls through
4852 * <vnop_advlock_desc> because lock enforcement does not occur
4853 * locally should try to limit themselves to the return codes
4854 * documented above for lf_advlock and err_advlock.
4855 */
4856 #if 0
4857 /*
4858 *#
4859 *#% advlock vp U U U
4860 *#
4861 */
4862 struct vnop_advlock_args {
4863 struct vnodeop_desc *a_desc;
4864 vnode_t a_vp;
4865 caddr_t a_id;
4866 int a_op;
4867 struct flock *a_fl;
4868 int a_flags;
4869 vfs_context_t a_context;
4870 };
4871 #endif /* 0*/
4872 errno_t
4873 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
4874 {
4875 int _err;
4876 struct vnop_advlock_args a;
4877
4878 a.a_desc = &vnop_advlock_desc;
4879 a.a_vp = vp;
4880 a.a_id = id;
4881 a.a_op = op;
4882 a.a_fl = fl;
4883 a.a_flags = flags;
4884 a.a_context = ctx;
4885 a.a_timeout = timeout;
4886
4887 /* Disallow advisory locking on non-seekable vnodes */
4888 if (vnode_isfifo(vp)) {
4889 _err = err_advlock(&a);
4890 } else {
4891 if ((vp->v_flag & VLOCKLOCAL)) {
4892 /* Advisory locking done at this layer */
4893 _err = lf_advlock(&a);
4894 } else {
4895 /* Advisory locking done by underlying filesystem */
4896 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4897 }
4898 DTRACE_FSINFO(advlock, vnode_t, vp);
4899 }
4900
4901 return (_err);
4902 }
4903
4904
4905
4906 #if 0
4907 /*
4908 *#
4909 *#% allocate vp L L L
4910 *#
4911 */
4912 struct vnop_allocate_args {
4913 struct vnodeop_desc *a_desc;
4914 vnode_t a_vp;
4915 off_t a_length;
4916 u_int32_t a_flags;
4917 off_t *a_bytesallocated;
4918 off_t a_offset;
4919 vfs_context_t a_context;
4920 };
4921
4922 #endif /* 0*/
4923 errno_t
4924 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
4925 {
4926 int _err;
4927 struct vnop_allocate_args a;
4928
4929 a.a_desc = &vnop_allocate_desc;
4930 a.a_vp = vp;
4931 a.a_length = length;
4932 a.a_flags = flags;
4933 a.a_bytesallocated = bytesallocated;
4934 a.a_offset = offset;
4935 a.a_context = ctx;
4936
4937 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4938 DTRACE_FSINFO(allocate, vnode_t, vp);
4939 #if CONFIG_FSE
4940 if (_err == 0) {
4941 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
4942 }
4943 #endif
4944
4945 return (_err);
4946 }
4947
4948 #if 0
4949 /*
4950 *#
4951 *#% pagein vp = = =
4952 *#
4953 */
4954 struct vnop_pagein_args {
4955 struct vnodeop_desc *a_desc;
4956 vnode_t a_vp;
4957 upl_t a_pl;
4958 upl_offset_t a_pl_offset;
4959 off_t a_f_offset;
4960 size_t a_size;
4961 int a_flags;
4962 vfs_context_t a_context;
4963 };
4964 #endif /* 0*/
4965 errno_t
4966 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
4967 {
4968 int _err;
4969 struct vnop_pagein_args a;
4970
4971 a.a_desc = &vnop_pagein_desc;
4972 a.a_vp = vp;
4973 a.a_pl = pl;
4974 a.a_pl_offset = pl_offset;
4975 a.a_f_offset = f_offset;
4976 a.a_size = size;
4977 a.a_flags = flags;
4978 a.a_context = ctx;
4979
4980 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
4981 DTRACE_FSINFO(pagein, vnode_t, vp);
4982
4983 return (_err);
4984 }
4985
4986 #if 0
4987 /*
4988 *#
4989 *#% pageout vp = = =
4990 *#
4991 */
4992 struct vnop_pageout_args {
4993 struct vnodeop_desc *a_desc;
4994 vnode_t a_vp;
4995 upl_t a_pl;
4996 upl_offset_t a_pl_offset;
4997 off_t a_f_offset;
4998 size_t a_size;
4999 int a_flags;
5000 vfs_context_t a_context;
5001 };
5002
5003 #endif /* 0*/
5004 errno_t
5005 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5006 {
5007 int _err;
5008 struct vnop_pageout_args a;
5009
5010 a.a_desc = &vnop_pageout_desc;
5011 a.a_vp = vp;
5012 a.a_pl = pl;
5013 a.a_pl_offset = pl_offset;
5014 a.a_f_offset = f_offset;
5015 a.a_size = size;
5016 a.a_flags = flags;
5017 a.a_context = ctx;
5018
5019 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5020 DTRACE_FSINFO(pageout, vnode_t, vp);
5021
5022 post_event_if_success(vp, _err, NOTE_WRITE);
5023
5024 return (_err);
5025 }
5026
5027 int
5028 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5029 {
5030 if (vnode_compound_remove_available(dvp)) {
5031 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5032 } else {
5033 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5034 }
5035 }
5036
5037 #if CONFIG_SEARCHFS
5038
5039 #if 0
5040 /*
5041 *#
5042 *#% searchfs vp L L L
5043 *#
5044 */
5045 struct vnop_searchfs_args {
5046 struct vnodeop_desc *a_desc;
5047 vnode_t a_vp;
5048 void *a_searchparams1;
5049 void *a_searchparams2;
5050 struct attrlist *a_searchattrs;
5051 uint32_t a_maxmatches;
5052 struct timeval *a_timelimit;
5053 struct attrlist *a_returnattrs;
5054 uint32_t *a_nummatches;
5055 uint32_t a_scriptcode;
5056 uint32_t a_options;
5057 struct uio *a_uio;
5058 struct searchstate *a_searchstate;
5059 vfs_context_t a_context;
5060 };
5061
5062 #endif /* 0*/
5063 errno_t
5064 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5065 {
5066 int _err;
5067 struct vnop_searchfs_args a;
5068
5069 a.a_desc = &vnop_searchfs_desc;
5070 a.a_vp = vp;
5071 a.a_searchparams1 = searchparams1;
5072 a.a_searchparams2 = searchparams2;
5073 a.a_searchattrs = searchattrs;
5074 a.a_maxmatches = maxmatches;
5075 a.a_timelimit = timelimit;
5076 a.a_returnattrs = returnattrs;
5077 a.a_nummatches = nummatches;
5078 a.a_scriptcode = scriptcode;
5079 a.a_options = options;
5080 a.a_uio = uio;
5081 a.a_searchstate = searchstate;
5082 a.a_context = ctx;
5083
5084 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5085 DTRACE_FSINFO(searchfs, vnode_t, vp);
5086
5087 return (_err);
5088 }
5089 #endif /* CONFIG_SEARCHFS */
5090
5091 #if 0
5092 /*
5093 *#
5094 *#% copyfile fvp U U U
5095 *#% copyfile tdvp L U U
5096 *#% copyfile tvp X U U
5097 *#
5098 */
5099 struct vnop_copyfile_args {
5100 struct vnodeop_desc *a_desc;
5101 vnode_t a_fvp;
5102 vnode_t a_tdvp;
5103 vnode_t a_tvp;
5104 struct componentname *a_tcnp;
5105 int a_mode;
5106 int a_flags;
5107 vfs_context_t a_context;
5108 };
5109 #endif /* 0*/
5110 errno_t
5111 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5112 int mode, int flags, vfs_context_t ctx)
5113 {
5114 int _err;
5115 struct vnop_copyfile_args a;
5116 a.a_desc = &vnop_copyfile_desc;
5117 a.a_fvp = fvp;
5118 a.a_tdvp = tdvp;
5119 a.a_tvp = tvp;
5120 a.a_tcnp = tcnp;
5121 a.a_mode = mode;
5122 a.a_flags = flags;
5123 a.a_context = ctx;
5124 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5125 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5126 return (_err);
5127 }
5128
5129 errno_t
5130 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5131 {
5132 struct vnop_getxattr_args a;
5133 int error;
5134
5135 a.a_desc = &vnop_getxattr_desc;
5136 a.a_vp = vp;
5137 a.a_name = name;
5138 a.a_uio = uio;
5139 a.a_size = size;
5140 a.a_options = options;
5141 a.a_context = ctx;
5142
5143 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5144 DTRACE_FSINFO(getxattr, vnode_t, vp);
5145
5146 return (error);
5147 }
5148
5149 errno_t
5150 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5151 {
5152 struct vnop_setxattr_args a;
5153 int error;
5154
5155 a.a_desc = &vnop_setxattr_desc;
5156 a.a_vp = vp;
5157 a.a_name = name;
5158 a.a_uio = uio;
5159 a.a_options = options;
5160 a.a_context = ctx;
5161
5162 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5163 DTRACE_FSINFO(setxattr, vnode_t, vp);
5164
5165 if (error == 0)
5166 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5167
5168 post_event_if_success(vp, error, NOTE_ATTRIB);
5169
5170 return (error);
5171 }
5172
5173 errno_t
5174 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5175 {
5176 struct vnop_removexattr_args a;
5177 int error;
5178
5179 a.a_desc = &vnop_removexattr_desc;
5180 a.a_vp = vp;
5181 a.a_name = name;
5182 a.a_options = options;
5183 a.a_context = ctx;
5184
5185 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5186 DTRACE_FSINFO(removexattr, vnode_t, vp);
5187
5188 post_event_if_success(vp, error, NOTE_ATTRIB);
5189
5190 return (error);
5191 }
5192
5193 errno_t
5194 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5195 {
5196 struct vnop_listxattr_args a;
5197 int error;
5198
5199 a.a_desc = &vnop_listxattr_desc;
5200 a.a_vp = vp;
5201 a.a_uio = uio;
5202 a.a_size = size;
5203 a.a_options = options;
5204 a.a_context = ctx;
5205
5206 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5207 DTRACE_FSINFO(listxattr, vnode_t, vp);
5208
5209 return (error);
5210 }
5211
5212
5213 #if 0
5214 /*
5215 *#
5216 *#% blktooff vp = = =
5217 *#
5218 */
5219 struct vnop_blktooff_args {
5220 struct vnodeop_desc *a_desc;
5221 vnode_t a_vp;
5222 daddr64_t a_lblkno;
5223 off_t *a_offset;
5224 };
5225 #endif /* 0*/
5226 errno_t
5227 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5228 {
5229 int _err;
5230 struct vnop_blktooff_args a;
5231
5232 a.a_desc = &vnop_blktooff_desc;
5233 a.a_vp = vp;
5234 a.a_lblkno = lblkno;
5235 a.a_offset = offset;
5236
5237 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5238 DTRACE_FSINFO(blktooff, vnode_t, vp);
5239
5240 return (_err);
5241 }
5242
5243 #if 0
5244 /*
5245 *#
5246 *#% offtoblk vp = = =
5247 *#
5248 */
5249 struct vnop_offtoblk_args {
5250 struct vnodeop_desc *a_desc;
5251 vnode_t a_vp;
5252 off_t a_offset;
5253 daddr64_t *a_lblkno;
5254 };
5255 #endif /* 0*/
5256 errno_t
5257 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5258 {
5259 int _err;
5260 struct vnop_offtoblk_args a;
5261
5262 a.a_desc = &vnop_offtoblk_desc;
5263 a.a_vp = vp;
5264 a.a_offset = offset;
5265 a.a_lblkno = lblkno;
5266
5267 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5268 DTRACE_FSINFO(offtoblk, vnode_t, vp);
5269
5270 return (_err);
5271 }
5272
5273 #if 0
5274 /*
5275 *#
5276 *#% blockmap vp L L L
5277 *#
5278 */
5279 struct vnop_blockmap_args {
5280 struct vnodeop_desc *a_desc;
5281 vnode_t a_vp;
5282 off_t a_foffset;
5283 size_t a_size;
5284 daddr64_t *a_bpn;
5285 size_t *a_run;
5286 void *a_poff;
5287 int a_flags;
5288 vfs_context_t a_context;
5289 };
5290 #endif /* 0*/
5291 errno_t
5292 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
5293 {
5294 int _err;
5295 struct vnop_blockmap_args a;
5296 size_t localrun = 0;
5297
5298 if (ctx == NULL) {
5299 ctx = vfs_context_current();
5300 }
5301 a.a_desc = &vnop_blockmap_desc;
5302 a.a_vp = vp;
5303 a.a_foffset = foffset;
5304 a.a_size = size;
5305 a.a_bpn = bpn;
5306 a.a_run = &localrun;
5307 a.a_poff = poff;
5308 a.a_flags = flags;
5309 a.a_context = ctx;
5310
5311 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
5312 DTRACE_FSINFO(blockmap, vnode_t, vp);
5313
5314 /*
5315 * We used a local variable to request information from the underlying
5316 * filesystem about the length of the I/O run in question. If
5317 * we get malformed output from the filesystem, we cap it to the length
5318 * requested, at most. Update 'run' on the way out.
5319 */
5320 if (_err == 0) {
5321 if (localrun > size) {
5322 localrun = size;
5323 }
5324
5325 if (run) {
5326 *run = localrun;
5327 }
5328 }
5329
5330 return (_err);
5331 }
5332
5333 #if 0
5334 struct vnop_strategy_args {
5335 struct vnodeop_desc *a_desc;
5336 struct buf *a_bp;
5337 };
5338
5339 #endif /* 0*/
5340 errno_t
5341 VNOP_STRATEGY(struct buf *bp)
5342 {
5343 int _err;
5344 struct vnop_strategy_args a;
5345 vnode_t vp = buf_vnode(bp);
5346 a.a_desc = &vnop_strategy_desc;
5347 a.a_bp = bp;
5348 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
5349 DTRACE_FSINFO(strategy, vnode_t, vp);
5350 return (_err);
5351 }
5352
5353 #if 0
5354 struct vnop_bwrite_args {
5355 struct vnodeop_desc *a_desc;
5356 buf_t a_bp;
5357 };
5358 #endif /* 0*/
5359 errno_t
5360 VNOP_BWRITE(struct buf *bp)
5361 {
5362 int _err;
5363 struct vnop_bwrite_args a;
5364 vnode_t vp = buf_vnode(bp);
5365 a.a_desc = &vnop_bwrite_desc;
5366 a.a_bp = bp;
5367 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
5368 DTRACE_FSINFO(bwrite, vnode_t, vp);
5369 return (_err);
5370 }
5371
5372 #if 0
5373 struct vnop_kqfilt_add_args {
5374 struct vnodeop_desc *a_desc;
5375 struct vnode *a_vp;
5376 struct knote *a_kn;
5377 vfs_context_t a_context;
5378 };
5379 #endif
5380 errno_t
5381 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
5382 {
5383 int _err;
5384 struct vnop_kqfilt_add_args a;
5385
5386 a.a_desc = VDESC(vnop_kqfilt_add);
5387 a.a_vp = vp;
5388 a.a_kn = kn;
5389 a.a_context = ctx;
5390
5391 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
5392 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
5393
5394 return(_err);
5395 }
5396
5397 #if 0
5398 struct vnop_kqfilt_remove_args {
5399 struct vnodeop_desc *a_desc;
5400 struct vnode *a_vp;
5401 uintptr_t a_ident;
5402 vfs_context_t a_context;
5403 };
5404 #endif
5405 errno_t
5406 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
5407 {
5408 int _err;
5409 struct vnop_kqfilt_remove_args a;
5410
5411 a.a_desc = VDESC(vnop_kqfilt_remove);
5412 a.a_vp = vp;
5413 a.a_ident = ident;
5414 a.a_context = ctx;
5415
5416 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
5417 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
5418
5419 return(_err);
5420 }
5421
5422 errno_t
5423 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
5424 {
5425 int _err;
5426 struct vnop_monitor_args a;
5427
5428 a.a_desc = VDESC(vnop_monitor);
5429 a.a_vp = vp;
5430 a.a_events = events;
5431 a.a_flags = flags;
5432 a.a_handle = handle;
5433 a.a_context = ctx;
5434
5435 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
5436 DTRACE_FSINFO(monitor, vnode_t, vp);
5437
5438 return(_err);
5439 }
5440
5441 #if 0
5442 struct vnop_setlabel_args {
5443 struct vnodeop_desc *a_desc;
5444 struct vnode *a_vp;
5445 struct label *a_vl;
5446 vfs_context_t a_context;
5447 };
5448 #endif
5449 errno_t
5450 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
5451 {
5452 int _err;
5453 struct vnop_setlabel_args a;
5454
5455 a.a_desc = VDESC(vnop_setlabel);
5456 a.a_vp = vp;
5457 a.a_vl = label;
5458 a.a_context = ctx;
5459
5460 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
5461 DTRACE_FSINFO(setlabel, vnode_t, vp);
5462
5463 return(_err);
5464 }
5465
5466
5467 #if NAMEDSTREAMS
5468 /*
5469 * Get a named streamed
5470 */
5471 errno_t
5472 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
5473 {
5474 int _err;
5475 struct vnop_getnamedstream_args a;
5476
5477 a.a_desc = &vnop_getnamedstream_desc;
5478 a.a_vp = vp;
5479 a.a_svpp = svpp;
5480 a.a_name = name;
5481 a.a_operation = operation;
5482 a.a_flags = flags;
5483 a.a_context = ctx;
5484
5485 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
5486 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
5487 return (_err);
5488 }
5489
5490 /*
5491 * Create a named streamed
5492 */
5493 errno_t
5494 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
5495 {
5496 int _err;
5497 struct vnop_makenamedstream_args a;
5498
5499 a.a_desc = &vnop_makenamedstream_desc;
5500 a.a_vp = vp;
5501 a.a_svpp = svpp;
5502 a.a_name = name;
5503 a.a_flags = flags;
5504 a.a_context = ctx;
5505
5506 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
5507 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
5508 return (_err);
5509 }
5510
5511
5512 /*
5513 * Remove a named streamed
5514 */
5515 errno_t
5516 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
5517 {
5518 int _err;
5519 struct vnop_removenamedstream_args a;
5520
5521 a.a_desc = &vnop_removenamedstream_desc;
5522 a.a_vp = vp;
5523 a.a_svp = svp;
5524 a.a_name = name;
5525 a.a_flags = flags;
5526 a.a_context = ctx;
5527
5528 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
5529 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
5530 return (_err);
5531 }
5532 #endif