]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-3248.30.4.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
95 #include <sys/mbuf.h>
96 #include <sys/syslog.h>
97 #include <sys/ubc.h>
98 #include <sys/vm.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
106
107 #include <kern/assert.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
110
111 #include <libkern/OSByteOrder.h>
112
113 #include <miscfs/specfs/specdev.h>
114
115 #include <mach/mach_types.h>
116 #include <mach/memory_object_types.h>
117 #include <mach/task.h>
118
119 #if CONFIG_MACF
120 #include <security/mac_framework.h>
121 #endif
122
123 #include <sys/sdt.h>
124
125 #define ESUCCESS 0
126 #undef mount_t
127 #undef vnode_t
128
129 #define COMPAT_ONLY
130
131 #define NATIVE_XATTR(VP) \
132 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
133
134 #if CONFIG_APPLEDOUBLE
135 static void xattrfile_remove(vnode_t dvp, const char *basename,
136 vfs_context_t ctx, int force);
137 static void xattrfile_setattr(vnode_t dvp, const char * basename,
138 struct vnode_attr * vap, vfs_context_t ctx);
139 #endif /* CONFIG_APPLEDOUBLE */
140
141 /*
142 * vnode_setneedinactive
143 *
144 * Description: Indicate that when the last iocount on this vnode goes away,
145 * and the usecount is also zero, we should inform the filesystem
146 * via VNOP_INACTIVE.
147 *
148 * Parameters: vnode_t vnode to mark
149 *
150 * Returns: Nothing
151 *
152 * Notes: Notably used when we're deleting a file--we need not have a
153 * usecount, so VNOP_INACTIVE may not get called by anyone. We
154 * want it called when we drop our iocount.
155 */
156 void
157 vnode_setneedinactive(vnode_t vp)
158 {
159 cache_purge(vp);
160
161 vnode_lock_spin(vp);
162 vp->v_lflag |= VL_NEEDINACTIVE;
163 vnode_unlock(vp);
164 }
165
166
167 /* ====================================================================== */
168 /* ************ EXTERNAL KERNEL APIS ********************************** */
169 /* ====================================================================== */
170
171 /*
172 * implementations of exported VFS operations
173 */
174 int
175 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
176 {
177 int error;
178
179 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
180 return(ENOTSUP);
181
182 if (vfs_context_is64bit(ctx)) {
183 if (vfs_64bitready(mp)) {
184 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
185 }
186 else {
187 error = ENOTSUP;
188 }
189 }
190 else {
191 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
192 }
193
194 return (error);
195 }
196
197 int
198 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
199 {
200 int error;
201
202 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
203 return(ENOTSUP);
204
205 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
206
207 return (error);
208 }
209
210 int
211 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
212 {
213 int error;
214
215 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
216 return(ENOTSUP);
217
218 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
219
220 return (error);
221 }
222
223 /*
224 * Returns: 0 Success
225 * ENOTSUP Not supported
226 * <vfs_root>:ENOENT
227 * <vfs_root>:???
228 *
229 * Note: The return codes from the underlying VFS's root routine can't
230 * be fully enumerated here, since third party VFS authors may not
231 * limit their error returns to the ones documented here, even
232 * though this may result in some programs functioning incorrectly.
233 *
234 * The return codes documented above are those which may currently
235 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
236 * for a call to hfs_vget on the volume mount poit, not including
237 * additional error codes which may be propagated from underlying
238 * routines called by hfs_vget.
239 */
240 int
241 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
242 {
243 int error;
244
245 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
246 return(ENOTSUP);
247
248 if (ctx == NULL) {
249 ctx = vfs_context_current();
250 }
251
252 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
253
254 return (error);
255 }
256
257 int
258 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
259 {
260 int error;
261
262 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
263 return(ENOTSUP);
264
265 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
266
267 return (error);
268 }
269
270 int
271 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
272 {
273 int error;
274
275 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
276 return(ENOTSUP);
277
278 if (ctx == NULL) {
279 ctx = vfs_context_current();
280 }
281
282 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
283
284 return(error);
285 }
286
287 int
288 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
289 {
290 int error;
291
292 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
293 return(ENOTSUP);
294
295 if (ctx == NULL) {
296 ctx = vfs_context_current();
297 }
298
299 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
300
301 return(error);
302 }
303
304 int
305 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
306 {
307 int error;
308
309 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
310 return(ENOTSUP);
311
312 if (ctx == NULL) {
313 ctx = vfs_context_current();
314 }
315
316 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
317
318 return(error);
319 }
320
321 int
322 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
323 {
324 int error;
325
326 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
327 return(ENOTSUP);
328
329 if (ctx == NULL) {
330 ctx = vfs_context_current();
331 }
332
333 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
334
335 return(error);
336 }
337
338 int
339 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx)
340 {
341 int error;
342
343 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
344 return(ENOTSUP);
345
346 if (ctx == NULL) {
347 ctx = vfs_context_current();
348 }
349
350 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
351
352 return(error);
353 }
354
355 int
356 VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx)
357 {
358 int error;
359
360 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
361 return(ENOTSUP);
362
363 if (ctx == NULL) {
364 ctx = vfs_context_current();
365 }
366
367 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
368
369 return(error);
370 }
371
372
373 /* returns the cached throttle mask for the mount_t */
374 uint64_t
375 vfs_throttle_mask(mount_t mp)
376 {
377 return(mp->mnt_throttle_mask);
378 }
379
380 /* returns a copy of vfs type name for the mount_t */
381 void
382 vfs_name(mount_t mp, char * buffer)
383 {
384 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
385 }
386
387 /* returns vfs type number for the mount_t */
388 int
389 vfs_typenum(mount_t mp)
390 {
391 return(mp->mnt_vtable->vfc_typenum);
392 }
393
394 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
395 void*
396 vfs_mntlabel(mount_t mp)
397 {
398 return (void*)mp->mnt_mntlabel;
399 }
400
401 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
402 uint64_t
403 vfs_flags(mount_t mp)
404 {
405 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
406 }
407
408 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
409 void
410 vfs_setflags(mount_t mp, uint64_t flags)
411 {
412 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
413
414 mount_lock(mp);
415 mp->mnt_flag |= lflags;
416 mount_unlock(mp);
417 }
418
419 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
420 void
421 vfs_clearflags(mount_t mp , uint64_t flags)
422 {
423 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
424
425 mount_lock(mp);
426 mp->mnt_flag &= ~lflags;
427 mount_unlock(mp);
428 }
429
430 /* Is the mount_t ronly and upgrade read/write requested? */
431 int
432 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
433 {
434 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
435 }
436
437
438 /* Is the mount_t mounted ronly */
439 int
440 vfs_isrdonly(mount_t mp)
441 {
442 return (mp->mnt_flag & MNT_RDONLY);
443 }
444
445 /* Is the mount_t mounted for filesystem synchronous writes? */
446 int
447 vfs_issynchronous(mount_t mp)
448 {
449 return (mp->mnt_flag & MNT_SYNCHRONOUS);
450 }
451
452 /* Is the mount_t mounted read/write? */
453 int
454 vfs_isrdwr(mount_t mp)
455 {
456 return ((mp->mnt_flag & MNT_RDONLY) == 0);
457 }
458
459
460 /* Is mount_t marked for update (ie MNT_UPDATE) */
461 int
462 vfs_isupdate(mount_t mp)
463 {
464 return (mp->mnt_flag & MNT_UPDATE);
465 }
466
467
468 /* Is mount_t marked for reload (ie MNT_RELOAD) */
469 int
470 vfs_isreload(mount_t mp)
471 {
472 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
473 }
474
475 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
476 int
477 vfs_isforce(mount_t mp)
478 {
479 if (mp->mnt_lflag & MNT_LFORCE)
480 return(1);
481 else
482 return(0);
483 }
484
485 int
486 vfs_isunmount(mount_t mp)
487 {
488 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
489 return 1;
490 } else {
491 return 0;
492 }
493 }
494
495 int
496 vfs_64bitready(mount_t mp)
497 {
498 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
499 return(1);
500 else
501 return(0);
502 }
503
504
505 int
506 vfs_authcache_ttl(mount_t mp)
507 {
508 if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) )
509 return (mp->mnt_authcache_ttl);
510 else
511 return (CACHED_RIGHT_INFINITE_TTL);
512 }
513
514 void
515 vfs_setauthcache_ttl(mount_t mp, int ttl)
516 {
517 mount_lock(mp);
518 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
519 mp->mnt_authcache_ttl = ttl;
520 mount_unlock(mp);
521 }
522
523 void
524 vfs_clearauthcache_ttl(mount_t mp)
525 {
526 mount_lock(mp);
527 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
528 /*
529 * back to the default TTL value in case
530 * MNTK_AUTH_OPAQUE is set on this mount
531 */
532 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
533 mount_unlock(mp);
534 }
535
536 int
537 vfs_authopaque(mount_t mp)
538 {
539 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
540 return(1);
541 else
542 return(0);
543 }
544
545 int
546 vfs_authopaqueaccess(mount_t mp)
547 {
548 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
549 return(1);
550 else
551 return(0);
552 }
553
554 void
555 vfs_setauthopaque(mount_t mp)
556 {
557 mount_lock(mp);
558 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
559 mount_unlock(mp);
560 }
561
562 void
563 vfs_setauthopaqueaccess(mount_t mp)
564 {
565 mount_lock(mp);
566 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
567 mount_unlock(mp);
568 }
569
570 void
571 vfs_clearauthopaque(mount_t mp)
572 {
573 mount_lock(mp);
574 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
575 mount_unlock(mp);
576 }
577
578 void
579 vfs_clearauthopaqueaccess(mount_t mp)
580 {
581 mount_lock(mp);
582 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
583 mount_unlock(mp);
584 }
585
586 void
587 vfs_setextendedsecurity(mount_t mp)
588 {
589 mount_lock(mp);
590 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
591 mount_unlock(mp);
592 }
593
594 void
595 vfs_clearextendedsecurity(mount_t mp)
596 {
597 mount_lock(mp);
598 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
599 mount_unlock(mp);
600 }
601
602 int
603 vfs_extendedsecurity(mount_t mp)
604 {
605 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
606 }
607
608 /* returns the max size of short symlink in this mount_t */
609 uint32_t
610 vfs_maxsymlen(mount_t mp)
611 {
612 return(mp->mnt_maxsymlinklen);
613 }
614
615 /* set max size of short symlink on mount_t */
616 void
617 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
618 {
619 mp->mnt_maxsymlinklen = symlen;
620 }
621
622 /* return a pointer to the RO vfs_statfs associated with mount_t */
623 struct vfsstatfs *
624 vfs_statfs(mount_t mp)
625 {
626 return(&mp->mnt_vfsstat);
627 }
628
629 int
630 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
631 {
632 int error;
633
634 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
635 return(error);
636
637 /*
638 * If we have a filesystem create time, use it to default some others.
639 */
640 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
641 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
642 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
643 }
644
645 return(0);
646 }
647
648 int
649 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
650 {
651 int error;
652
653 if (vfs_isrdonly(mp))
654 return EROFS;
655
656 error = VFS_SETATTR(mp, vfa, ctx);
657
658 /*
659 * If we had alternate ways of setting vfs attributes, we'd
660 * fall back here.
661 */
662
663 return error;
664 }
665
666 /* return the private data handle stored in mount_t */
667 void *
668 vfs_fsprivate(mount_t mp)
669 {
670 return(mp->mnt_data);
671 }
672
673 /* set the private data handle in mount_t */
674 void
675 vfs_setfsprivate(mount_t mp, void *mntdata)
676 {
677 mount_lock(mp);
678 mp->mnt_data = mntdata;
679 mount_unlock(mp);
680 }
681
682 /* query whether the mount point supports native EAs */
683 int
684 vfs_nativexattrs(mount_t mp) {
685 return (mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS);
686 }
687
688 /*
689 * return the block size of the underlying
690 * device associated with mount_t
691 */
692 int
693 vfs_devblocksize(mount_t mp) {
694
695 return(mp->mnt_devblocksize);
696 }
697
698 /*
699 * Returns vnode with an iocount that must be released with vnode_put()
700 */
701 vnode_t
702 vfs_vnodecovered(mount_t mp)
703 {
704 vnode_t vp = mp->mnt_vnodecovered;
705 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
706 return NULL;
707 } else {
708 return vp;
709 }
710 }
711
712 /*
713 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
714 * The iocount must be released with vnode_put(). Note that this KPI is subtle
715 * with respect to the validity of using this device vnode for anything substantial
716 * (which is discouraged). If commands are sent to the device driver without
717 * taking proper steps to ensure that the device is still open, chaos may ensue.
718 * Similarly, this routine should only be called if there is some guarantee that
719 * the mount itself is still valid.
720 */
721 vnode_t
722 vfs_devvp(mount_t mp)
723 {
724 vnode_t vp = mp->mnt_devvp;
725
726 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
727 return vp;
728 }
729
730 return NULLVP;
731 }
732
733 /*
734 * return the io attributes associated with mount_t
735 */
736 void
737 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
738 {
739 if (mp == NULL) {
740 ioattrp->io_maxreadcnt = MAXPHYS;
741 ioattrp->io_maxwritecnt = MAXPHYS;
742 ioattrp->io_segreadcnt = 32;
743 ioattrp->io_segwritecnt = 32;
744 ioattrp->io_maxsegreadsize = MAXPHYS;
745 ioattrp->io_maxsegwritesize = MAXPHYS;
746 ioattrp->io_devblocksize = DEV_BSIZE;
747 ioattrp->io_flags = 0;
748 } else {
749 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
750 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
751 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
752 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
753 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
754 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
755 ioattrp->io_devblocksize = mp->mnt_devblocksize;
756 ioattrp->io_flags = mp->mnt_ioflags;
757 }
758 ioattrp->io_reserved[0] = NULL;
759 ioattrp->io_reserved[1] = NULL;
760 }
761
762
763 /*
764 * set the IO attributes associated with mount_t
765 */
766 void
767 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
768 {
769 if (mp == NULL)
770 return;
771 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
772 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
773 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
774 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
775 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
776 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
777 mp->mnt_devblocksize = ioattrp->io_devblocksize;
778 mp->mnt_ioflags = ioattrp->io_flags;
779 }
780
781 /*
782 * Add a new filesystem into the kernel specified in passed in
783 * vfstable structure. It fills in the vnode
784 * dispatch vector that is to be passed to when vnodes are created.
785 * It returns a handle which is to be used to when the FS is to be removed
786 */
787 typedef int (*PFI)(void *);
788 extern int vfs_opv_numops;
789 errno_t
790 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
791 {
792 struct vfstable *newvfstbl = NULL;
793 int i,j;
794 int (***opv_desc_vector_p)(void *);
795 int (**opv_desc_vector)(void *);
796 struct vnodeopv_entry_desc *opve_descp;
797 int desccount;
798 int descsize;
799 PFI *descptr;
800
801 /*
802 * This routine is responsible for all the initialization that would
803 * ordinarily be done as part of the system startup;
804 */
805
806 if (vfe == (struct vfs_fsentry *)0)
807 return(EINVAL);
808
809 desccount = vfe->vfe_vopcnt;
810 if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
811 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
812 return(EINVAL);
813
814 /* Non-threadsafe filesystems are not supported */
815 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
816 return (EINVAL);
817 }
818
819 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
820 M_WAITOK);
821 bzero(newvfstbl, sizeof(struct vfstable));
822 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
823 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
824 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
825 newvfstbl->vfc_typenum = maxvfstypenum++;
826 else
827 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
828
829 newvfstbl->vfc_refcount = 0;
830 newvfstbl->vfc_flags = 0;
831 newvfstbl->vfc_mountroot = NULL;
832 newvfstbl->vfc_next = NULL;
833 newvfstbl->vfc_vfsflags = 0;
834 if (vfe->vfe_flags & VFS_TBL64BITREADY)
835 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
836 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2)
837 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
838 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2)
839 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
840 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
841 newvfstbl->vfc_flags |= MNT_LOCAL;
842 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
843 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
844 else
845 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
846
847 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR)
848 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
849 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT)
850 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
851 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED)
852 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
853 if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
854 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
855 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME)
856 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
857
858 /*
859 * Allocate and init the vectors.
860 * Also handle backwards compatibility.
861 *
862 * We allocate one large block to hold all <desccount>
863 * vnode operation vectors stored contiguously.
864 */
865 /* XXX - shouldn't be M_TEMP */
866
867 descsize = desccount * vfs_opv_numops * sizeof(PFI);
868 MALLOC(descptr, PFI *, descsize,
869 M_TEMP, M_WAITOK);
870 bzero(descptr, descsize);
871
872 newvfstbl->vfc_descptr = descptr;
873 newvfstbl->vfc_descsize = descsize;
874
875 newvfstbl->vfc_sysctl = NULL;
876
877 for (i= 0; i< desccount; i++ ) {
878 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
879 /*
880 * Fill in the caller's pointer to the start of the i'th vector.
881 * They'll need to supply it when calling vnode_create.
882 */
883 opv_desc_vector = descptr + i * vfs_opv_numops;
884 *opv_desc_vector_p = opv_desc_vector;
885
886 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
887 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
888
889 /*
890 * Sanity check: is this operation listed
891 * in the list of operations? We check this
892 * by seeing if its offset is zero. Since
893 * the default routine should always be listed
894 * first, it should be the only one with a zero
895 * offset. Any other operation with a zero
896 * offset is probably not listed in
897 * vfs_op_descs, and so is probably an error.
898 *
899 * A panic here means the layer programmer
900 * has committed the all-too common bug
901 * of adding a new operation to the layer's
902 * list of vnode operations but
903 * not adding the operation to the system-wide
904 * list of supported operations.
905 */
906 if (opve_descp->opve_op->vdesc_offset == 0 &&
907 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
908 printf("vfs_fsadd: operation %s not listed in %s.\n",
909 opve_descp->opve_op->vdesc_name,
910 "vfs_op_descs");
911 panic("vfs_fsadd: bad operation");
912 }
913 /*
914 * Fill in this entry.
915 */
916 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
917 opve_descp->opve_impl;
918 }
919
920
921 /*
922 * Finally, go back and replace unfilled routines
923 * with their default. (Sigh, an O(n^3) algorithm. I
924 * could make it better, but that'd be work, and n is small.)
925 */
926 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
927
928 /*
929 * Force every operations vector to have a default routine.
930 */
931 opv_desc_vector = *opv_desc_vector_p;
932 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
933 panic("vfs_fsadd: operation vector without default routine.");
934 for (j = 0; j < vfs_opv_numops; j++)
935 if (opv_desc_vector[j] == NULL)
936 opv_desc_vector[j] =
937 opv_desc_vector[VOFFSET(vnop_default)];
938
939 } /* end of each vnodeopv_desc parsing */
940
941
942
943 *handle = vfstable_add(newvfstbl);
944
945 if (newvfstbl->vfc_typenum <= maxvfstypenum )
946 maxvfstypenum = newvfstbl->vfc_typenum + 1;
947
948 if (newvfstbl->vfc_vfsops->vfs_init) {
949 struct vfsconf vfsc;
950 bzero(&vfsc, sizeof(struct vfsconf));
951 vfsc.vfc_reserved1 = 0;
952 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
953 vfsc.vfc_typenum = (*handle)->vfc_typenum;
954 vfsc.vfc_refcount = (*handle)->vfc_refcount;
955 vfsc.vfc_flags = (*handle)->vfc_flags;
956 vfsc.vfc_reserved2 = 0;
957 vfsc.vfc_reserved3 = 0;
958
959 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
960 }
961
962 FREE(newvfstbl, M_TEMP);
963
964 return(0);
965 }
966
967 /*
968 * Removes the filesystem from kernel.
969 * The argument passed in is the handle that was given when
970 * file system was added
971 */
972 errno_t
973 vfs_fsremove(vfstable_t handle)
974 {
975 struct vfstable * vfstbl = (struct vfstable *)handle;
976 void *old_desc = NULL;
977 errno_t err;
978
979 /* Preflight check for any mounts */
980 mount_list_lock();
981 if ( vfstbl->vfc_refcount != 0 ) {
982 mount_list_unlock();
983 return EBUSY;
984 }
985
986 /*
987 * save the old descriptor; the free cannot occur unconditionally,
988 * since vfstable_del() may fail.
989 */
990 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
991 old_desc = vfstbl->vfc_descptr;
992 }
993 err = vfstable_del(vfstbl);
994
995 mount_list_unlock();
996
997 /* free the descriptor if the delete was successful */
998 if (err == 0 && old_desc) {
999 FREE(old_desc, M_TEMP);
1000 }
1001
1002 return(err);
1003 }
1004
1005 int
1006 vfs_context_pid(vfs_context_t ctx)
1007 {
1008 return (proc_pid(vfs_context_proc(ctx)));
1009 }
1010
1011 int
1012 vfs_context_suser(vfs_context_t ctx)
1013 {
1014 return (suser(ctx->vc_ucred, NULL));
1015 }
1016
1017 /*
1018 * Return bit field of signals posted to all threads in the context's process.
1019 *
1020 * XXX Signals should be tied to threads, not processes, for most uses of this
1021 * XXX call.
1022 */
1023 int
1024 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1025 {
1026 proc_t p = vfs_context_proc(ctx);
1027 if (p)
1028 return(proc_pendingsignals(p, mask));
1029 return(0);
1030 }
1031
1032 int
1033 vfs_context_is64bit(vfs_context_t ctx)
1034 {
1035 proc_t proc = vfs_context_proc(ctx);
1036
1037 if (proc)
1038 return(proc_is64bit(proc));
1039 return(0);
1040 }
1041
1042
1043 /*
1044 * vfs_context_proc
1045 *
1046 * Description: Given a vfs_context_t, return the proc_t associated with it.
1047 *
1048 * Parameters: vfs_context_t The context to use
1049 *
1050 * Returns: proc_t The process for this context
1051 *
1052 * Notes: This function will return the current_proc() if any of the
1053 * following conditions are true:
1054 *
1055 * o The supplied context pointer is NULL
1056 * o There is no Mach thread associated with the context
1057 * o There is no Mach task associated with the Mach thread
1058 * o There is no proc_t associated with the Mach task
1059 * o The proc_t has no per process open file table
1060 * o The proc_t is post-vfork()
1061 *
1062 * This causes this function to return a value matching as
1063 * closely as possible the previous behaviour, while at the
1064 * same time avoiding the task lending that results from vfork()
1065 */
1066 proc_t
1067 vfs_context_proc(vfs_context_t ctx)
1068 {
1069 proc_t proc = NULL;
1070
1071 if (ctx != NULL && ctx->vc_thread != NULL)
1072 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1073 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1074 proc = NULL;
1075
1076 return(proc == NULL ? current_proc() : proc);
1077 }
1078
1079 /*
1080 * vfs_context_get_special_port
1081 *
1082 * Description: Return the requested special port from the task associated
1083 * with the given context.
1084 *
1085 * Parameters: vfs_context_t The context to use
1086 * int Index of special port
1087 * ipc_port_t * Pointer to returned port
1088 *
1089 * Returns: kern_return_t see task_get_special_port()
1090 */
1091 kern_return_t
1092 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1093 {
1094 task_t task = NULL;
1095
1096 if (ctx != NULL && ctx->vc_thread != NULL)
1097 task = get_threadtask(ctx->vc_thread);
1098
1099 return task_get_special_port(task, which, portp);
1100 }
1101
1102 /*
1103 * vfs_context_set_special_port
1104 *
1105 * Description: Set the requested special port in the task associated
1106 * with the given context.
1107 *
1108 * Parameters: vfs_context_t The context to use
1109 * int Index of special port
1110 * ipc_port_t New special port
1111 *
1112 * Returns: kern_return_t see task_set_special_port()
1113 */
1114 kern_return_t
1115 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1116 {
1117 task_t task = NULL;
1118
1119 if (ctx != NULL && ctx->vc_thread != NULL)
1120 task = get_threadtask(ctx->vc_thread);
1121
1122 return task_set_special_port(task, which, port);
1123 }
1124
1125 /*
1126 * vfs_context_thread
1127 *
1128 * Description: Return the Mach thread associated with a vfs_context_t
1129 *
1130 * Parameters: vfs_context_t The context to use
1131 *
1132 * Returns: thread_t The thread for this context, or
1133 * NULL, if there is not one.
1134 *
1135 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1136 * as a result of a static vfs_context_t declaration in a function
1137 * and will result in this function returning NULL.
1138 *
1139 * This is intentional; this function should NOT return the
1140 * current_thread() in this case.
1141 */
1142 thread_t
1143 vfs_context_thread(vfs_context_t ctx)
1144 {
1145 return(ctx->vc_thread);
1146 }
1147
1148
1149 /*
1150 * vfs_context_cwd
1151 *
1152 * Description: Returns a reference on the vnode for the current working
1153 * directory for the supplied context
1154 *
1155 * Parameters: vfs_context_t The context to use
1156 *
1157 * Returns: vnode_t The current working directory
1158 * for this context
1159 *
1160 * Notes: The function first attempts to obtain the current directory
1161 * from the thread, and if it is not present there, falls back
1162 * to obtaining it from the process instead. If it can't be
1163 * obtained from either place, we return NULLVP.
1164 */
1165 vnode_t
1166 vfs_context_cwd(vfs_context_t ctx)
1167 {
1168 vnode_t cwd = NULLVP;
1169
1170 if(ctx != NULL && ctx->vc_thread != NULL) {
1171 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1172 proc_t proc;
1173
1174 /*
1175 * Get the cwd from the thread; if there isn't one, get it
1176 * from the process, instead.
1177 */
1178 if ((cwd = uth->uu_cdir) == NULLVP &&
1179 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1180 proc->p_fd != NULL)
1181 cwd = proc->p_fd->fd_cdir;
1182 }
1183
1184 return(cwd);
1185 }
1186
1187 /*
1188 * vfs_context_create
1189 *
1190 * Description: Allocate and initialize a new context.
1191 *
1192 * Parameters: vfs_context_t: Context to copy, or NULL for new
1193 *
1194 * Returns: Pointer to new context
1195 *
1196 * Notes: Copy cred and thread from argument, if available; else
1197 * initialize with current thread and new cred. Returns
1198 * with a reference held on the credential.
1199 */
1200 vfs_context_t
1201 vfs_context_create(vfs_context_t ctx)
1202 {
1203 vfs_context_t newcontext;
1204
1205 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
1206
1207 if (newcontext) {
1208 kauth_cred_t safecred;
1209 if (ctx) {
1210 newcontext->vc_thread = ctx->vc_thread;
1211 safecred = ctx->vc_ucred;
1212 } else {
1213 newcontext->vc_thread = current_thread();
1214 safecred = kauth_cred_get();
1215 }
1216 if (IS_VALID_CRED(safecred))
1217 kauth_cred_ref(safecred);
1218 newcontext->vc_ucred = safecred;
1219 return(newcontext);
1220 }
1221 return(NULL);
1222 }
1223
1224
1225 vfs_context_t
1226 vfs_context_current(void)
1227 {
1228 vfs_context_t ctx = NULL;
1229 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1230
1231 if (ut != NULL ) {
1232 if (ut->uu_context.vc_ucred != NULL) {
1233 ctx = &ut->uu_context;
1234 }
1235 }
1236
1237 return(ctx == NULL ? vfs_context_kernel() : ctx);
1238 }
1239
1240
1241 /*
1242 * XXX Do not ask
1243 *
1244 * Dangerous hack - adopt the first kernel thread as the current thread, to
1245 * get to the vfs_context_t in the uthread associated with a kernel thread.
1246 * This is used by UDF to make the call into IOCDMediaBSDClient,
1247 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1248 * ioctl() is being called from kernel or user space (and all this because
1249 * we do not pass threads into our ioctl()'s, instead of processes).
1250 *
1251 * This is also used by imageboot_setup(), called early from bsd_init() after
1252 * kernproc has been given a credential.
1253 *
1254 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1255 * of many Mach headers to do the reference directly rather than indirectly;
1256 * we will need to forego this convenience when we reture proc_thread().
1257 */
1258 static struct vfs_context kerncontext;
1259 vfs_context_t
1260 vfs_context_kernel(void)
1261 {
1262 if (kerncontext.vc_ucred == NOCRED)
1263 kerncontext.vc_ucred = kernproc->p_ucred;
1264 if (kerncontext.vc_thread == NULL)
1265 kerncontext.vc_thread = proc_thread(kernproc);
1266
1267 return(&kerncontext);
1268 }
1269
1270
1271 int
1272 vfs_context_rele(vfs_context_t ctx)
1273 {
1274 if (ctx) {
1275 if (IS_VALID_CRED(ctx->vc_ucred))
1276 kauth_cred_unref(&ctx->vc_ucred);
1277 kfree(ctx, sizeof(struct vfs_context));
1278 }
1279 return(0);
1280 }
1281
1282
1283 kauth_cred_t
1284 vfs_context_ucred(vfs_context_t ctx)
1285 {
1286 return (ctx->vc_ucred);
1287 }
1288
1289 /*
1290 * Return true if the context is owned by the superuser.
1291 */
1292 int
1293 vfs_context_issuser(vfs_context_t ctx)
1294 {
1295 return(kauth_cred_issuser(vfs_context_ucred(ctx)));
1296 }
1297
1298 /*
1299 * Given a context, for all fields of vfs_context_t which
1300 * are not held with a reference, set those fields to the
1301 * values for the current execution context. Currently, this
1302 * just means the vc_thread.
1303 *
1304 * Returns: 0 for success, nonzero for failure
1305 *
1306 * The intended use is:
1307 * 1. vfs_context_create() gets the caller a context
1308 * 2. vfs_context_bind() sets the unrefcounted data
1309 * 3. vfs_context_rele() releases the context
1310 *
1311 */
1312 int
1313 vfs_context_bind(vfs_context_t ctx)
1314 {
1315 ctx->vc_thread = current_thread();
1316 return 0;
1317 }
1318
1319 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1320
1321
1322 /*
1323 * Convert between vnode types and inode formats (since POSIX.1
1324 * defines mode word of stat structure in terms of inode formats).
1325 */
1326 enum vtype
1327 vnode_iftovt(int mode)
1328 {
1329 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1330 }
1331
1332 int
1333 vnode_vttoif(enum vtype indx)
1334 {
1335 return(vttoif_tab[(int)(indx)]);
1336 }
1337
1338 int
1339 vnode_makeimode(int indx, int mode)
1340 {
1341 return (int)(VTTOIF(indx) | (mode));
1342 }
1343
1344
1345 /*
1346 * vnode manipulation functions.
1347 */
1348
1349 /* returns system root vnode iocount; It should be released using vnode_put() */
1350 vnode_t
1351 vfs_rootvnode(void)
1352 {
1353 int error;
1354
1355 error = vnode_get(rootvnode);
1356 if (error)
1357 return ((vnode_t)0);
1358 else
1359 return rootvnode;
1360 }
1361
1362
1363 uint32_t
1364 vnode_vid(vnode_t vp)
1365 {
1366 return ((uint32_t)(vp->v_id));
1367 }
1368
1369 mount_t
1370 vnode_mount(vnode_t vp)
1371 {
1372 return (vp->v_mount);
1373 }
1374
1375 #if CONFIG_IOSCHED
1376 vnode_t
1377 vnode_mountdevvp(vnode_t vp)
1378 {
1379 if (vp->v_mount)
1380 return (vp->v_mount->mnt_devvp);
1381 else
1382 return ((vnode_t)0);
1383 }
1384 #endif
1385
1386 mount_t
1387 vnode_mountedhere(vnode_t vp)
1388 {
1389 mount_t mp;
1390
1391 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1392 (mp->mnt_vnodecovered == vp))
1393 return (mp);
1394 else
1395 return (mount_t)NULL;
1396 }
1397
1398 /* returns vnode type of vnode_t */
1399 enum vtype
1400 vnode_vtype(vnode_t vp)
1401 {
1402 return (vp->v_type);
1403 }
1404
1405 /* returns FS specific node saved in vnode */
1406 void *
1407 vnode_fsnode(vnode_t vp)
1408 {
1409 return (vp->v_data);
1410 }
1411
1412 void
1413 vnode_clearfsnode(vnode_t vp)
1414 {
1415 vp->v_data = NULL;
1416 }
1417
1418 dev_t
1419 vnode_specrdev(vnode_t vp)
1420 {
1421 return(vp->v_rdev);
1422 }
1423
1424
1425 /* Accessor functions */
1426 /* is vnode_t a root vnode */
1427 int
1428 vnode_isvroot(vnode_t vp)
1429 {
1430 return ((vp->v_flag & VROOT)? 1 : 0);
1431 }
1432
1433 /* is vnode_t a system vnode */
1434 int
1435 vnode_issystem(vnode_t vp)
1436 {
1437 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1438 }
1439
1440 /* is vnode_t a swap file vnode */
1441 int
1442 vnode_isswap(vnode_t vp)
1443 {
1444 return ((vp->v_flag & VSWAP)? 1 : 0);
1445 }
1446
1447 /* is vnode_t a tty */
1448 int
1449 vnode_istty(vnode_t vp)
1450 {
1451 return ((vp->v_flag & VISTTY) ? 1 : 0);
1452 }
1453
1454 /* if vnode_t mount operation in progress */
1455 int
1456 vnode_ismount(vnode_t vp)
1457 {
1458 return ((vp->v_flag & VMOUNT)? 1 : 0);
1459 }
1460
1461 /* is this vnode under recyle now */
1462 int
1463 vnode_isrecycled(vnode_t vp)
1464 {
1465 int ret;
1466
1467 vnode_lock_spin(vp);
1468 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1469 vnode_unlock(vp);
1470 return(ret);
1471 }
1472
1473 /* vnode was created by background task requesting rapid aging
1474 and has not since been referenced by a normal task */
1475 int
1476 vnode_israge(vnode_t vp)
1477 {
1478 return ((vp->v_flag & VRAGE)? 1 : 0);
1479 }
1480
1481 int
1482 vnode_needssnapshots(vnode_t vp)
1483 {
1484 return ((vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0);
1485 }
1486
1487
1488 /* Check the process/thread to see if we should skip atime updates */
1489 int
1490 vfs_ctx_skipatime (vfs_context_t ctx) {
1491 struct uthread *ut;
1492 proc_t proc;
1493 thread_t thr;
1494
1495 proc = vfs_context_proc(ctx);
1496 thr = vfs_context_thread (ctx);
1497
1498 /* Validate pointers in case we were invoked via a kernel context */
1499 if (thr && proc) {
1500 ut = get_bsdthread_info (thr);
1501
1502 if (proc->p_lflag & P_LRAGE_VNODES) {
1503 return 1;
1504 }
1505
1506 if (ut) {
1507 if (ut->uu_flag & UT_RAGE_VNODES) {
1508 return 1;
1509 }
1510 }
1511 }
1512 return 0;
1513 }
1514
1515 /* is vnode_t marked to not keep data cached once it's been consumed */
1516 int
1517 vnode_isnocache(vnode_t vp)
1518 {
1519 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1520 }
1521
1522 /*
1523 * has sequential readahead been disabled on this vnode
1524 */
1525 int
1526 vnode_isnoreadahead(vnode_t vp)
1527 {
1528 return ((vp->v_flag & VRAOFF)? 1 : 0);
1529 }
1530
1531 int
1532 vnode_is_openevt(vnode_t vp)
1533 {
1534 return ((vp->v_flag & VOPENEVT)? 1 : 0);
1535 }
1536
1537 /* is vnode_t a standard one? */
1538 int
1539 vnode_isstandard(vnode_t vp)
1540 {
1541 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1542 }
1543
1544 /* don't vflush() if SKIPSYSTEM */
1545 int
1546 vnode_isnoflush(vnode_t vp)
1547 {
1548 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1549 }
1550
1551 /* is vnode_t a regular file */
1552 int
1553 vnode_isreg(vnode_t vp)
1554 {
1555 return ((vp->v_type == VREG)? 1 : 0);
1556 }
1557
1558 /* is vnode_t a directory? */
1559 int
1560 vnode_isdir(vnode_t vp)
1561 {
1562 return ((vp->v_type == VDIR)? 1 : 0);
1563 }
1564
1565 /* is vnode_t a symbolic link ? */
1566 int
1567 vnode_islnk(vnode_t vp)
1568 {
1569 return ((vp->v_type == VLNK)? 1 : 0);
1570 }
1571
1572 int
1573 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1574 {
1575 struct nameidata *ndp = cnp->cn_ndp;
1576
1577 if (ndp == NULL) {
1578 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1579 }
1580
1581 if (vnode_isdir(vp)) {
1582 if (vp->v_mountedhere != NULL) {
1583 goto yes;
1584 }
1585
1586 #if CONFIG_TRIGGERS
1587 if (vp->v_resolve) {
1588 goto yes;
1589 }
1590 #endif /* CONFIG_TRIGGERS */
1591
1592 }
1593
1594
1595 if (vnode_islnk(vp)) {
1596 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1597 if (cnp->cn_flags & FOLLOW) {
1598 goto yes;
1599 }
1600 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1601 goto yes;
1602 }
1603 }
1604
1605 return 0;
1606
1607 yes:
1608 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1609 return EKEEPLOOKING;
1610 }
1611
1612 /* is vnode_t a fifo ? */
1613 int
1614 vnode_isfifo(vnode_t vp)
1615 {
1616 return ((vp->v_type == VFIFO)? 1 : 0);
1617 }
1618
1619 /* is vnode_t a block device? */
1620 int
1621 vnode_isblk(vnode_t vp)
1622 {
1623 return ((vp->v_type == VBLK)? 1 : 0);
1624 }
1625
1626 int
1627 vnode_isspec(vnode_t vp)
1628 {
1629 return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0);
1630 }
1631
1632 /* is vnode_t a char device? */
1633 int
1634 vnode_ischr(vnode_t vp)
1635 {
1636 return ((vp->v_type == VCHR)? 1 : 0);
1637 }
1638
1639 /* is vnode_t a socket? */
1640 int
1641 vnode_issock(vnode_t vp)
1642 {
1643 return ((vp->v_type == VSOCK)? 1 : 0);
1644 }
1645
1646 /* is vnode_t a device with multiple active vnodes referring to it? */
1647 int
1648 vnode_isaliased(vnode_t vp)
1649 {
1650 enum vtype vt = vp->v_type;
1651 if (!((vt == VCHR) || (vt == VBLK))) {
1652 return 0;
1653 } else {
1654 return (vp->v_specflags & SI_ALIASED);
1655 }
1656 }
1657
1658 /* is vnode_t a named stream? */
1659 int
1660 vnode_isnamedstream(
1661 #if NAMEDSTREAMS
1662 vnode_t vp
1663 #else
1664 __unused vnode_t vp
1665 #endif
1666 )
1667 {
1668 #if NAMEDSTREAMS
1669 return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0);
1670 #else
1671 return (0);
1672 #endif
1673 }
1674
1675 int
1676 vnode_isshadow(
1677 #if NAMEDSTREAMS
1678 vnode_t vp
1679 #else
1680 __unused vnode_t vp
1681 #endif
1682 )
1683 {
1684 #if NAMEDSTREAMS
1685 return ((vp->v_flag & VISSHADOW) ? 1 : 0);
1686 #else
1687 return (0);
1688 #endif
1689 }
1690
1691 /* does vnode have associated named stream vnodes ? */
1692 int
1693 vnode_hasnamedstreams(
1694 #if NAMEDSTREAMS
1695 vnode_t vp
1696 #else
1697 __unused vnode_t vp
1698 #endif
1699 )
1700 {
1701 #if NAMEDSTREAMS
1702 return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0);
1703 #else
1704 return (0);
1705 #endif
1706 }
1707 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1708 void
1709 vnode_setnocache(vnode_t vp)
1710 {
1711 vnode_lock_spin(vp);
1712 vp->v_flag |= VNOCACHE_DATA;
1713 vnode_unlock(vp);
1714 }
1715
1716 void
1717 vnode_clearnocache(vnode_t vp)
1718 {
1719 vnode_lock_spin(vp);
1720 vp->v_flag &= ~VNOCACHE_DATA;
1721 vnode_unlock(vp);
1722 }
1723
1724 void
1725 vnode_set_openevt(vnode_t vp)
1726 {
1727 vnode_lock_spin(vp);
1728 vp->v_flag |= VOPENEVT;
1729 vnode_unlock(vp);
1730 }
1731
1732 void
1733 vnode_clear_openevt(vnode_t vp)
1734 {
1735 vnode_lock_spin(vp);
1736 vp->v_flag &= ~VOPENEVT;
1737 vnode_unlock(vp);
1738 }
1739
1740
1741 void
1742 vnode_setnoreadahead(vnode_t vp)
1743 {
1744 vnode_lock_spin(vp);
1745 vp->v_flag |= VRAOFF;
1746 vnode_unlock(vp);
1747 }
1748
1749 void
1750 vnode_clearnoreadahead(vnode_t vp)
1751 {
1752 vnode_lock_spin(vp);
1753 vp->v_flag &= ~VRAOFF;
1754 vnode_unlock(vp);
1755 }
1756
1757 int
1758 vnode_isfastdevicecandidate(vnode_t vp)
1759 {
1760 return ((vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0);
1761 }
1762
1763 void
1764 vnode_setfastdevicecandidate(vnode_t vp)
1765 {
1766 vnode_lock_spin(vp);
1767 vp->v_flag |= VFASTDEVCANDIDATE;
1768 vnode_unlock(vp);
1769 }
1770
1771 void
1772 vnode_clearfastdevicecandidate(vnode_t vp)
1773 {
1774 vnode_lock_spin(vp);
1775 vp->v_flag &= ~VFASTDEVCANDIDATE;
1776 vnode_unlock(vp);
1777 }
1778
1779 int
1780 vnode_isautocandidate(vnode_t vp)
1781 {
1782 return ((vp->v_flag & VAUTOCANDIDATE)? 1 : 0);
1783 }
1784
1785 void
1786 vnode_setautocandidate(vnode_t vp)
1787 {
1788 vnode_lock_spin(vp);
1789 vp->v_flag |= VAUTOCANDIDATE;
1790 vnode_unlock(vp);
1791 }
1792
1793 void
1794 vnode_clearautocandidate(vnode_t vp)
1795 {
1796 vnode_lock_spin(vp);
1797 vp->v_flag &= ~VAUTOCANDIDATE;
1798 vnode_unlock(vp);
1799 }
1800
1801
1802
1803
1804 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1805 void
1806 vnode_setnoflush(vnode_t vp)
1807 {
1808 vnode_lock_spin(vp);
1809 vp->v_flag |= VNOFLUSH;
1810 vnode_unlock(vp);
1811 }
1812
1813 void
1814 vnode_clearnoflush(vnode_t vp)
1815 {
1816 vnode_lock_spin(vp);
1817 vp->v_flag &= ~VNOFLUSH;
1818 vnode_unlock(vp);
1819 }
1820
1821
1822 /* is vnode_t a blkdevice and has a FS mounted on it */
1823 int
1824 vnode_ismountedon(vnode_t vp)
1825 {
1826 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1827 }
1828
1829 void
1830 vnode_setmountedon(vnode_t vp)
1831 {
1832 vnode_lock_spin(vp);
1833 vp->v_specflags |= SI_MOUNTEDON;
1834 vnode_unlock(vp);
1835 }
1836
1837 void
1838 vnode_clearmountedon(vnode_t vp)
1839 {
1840 vnode_lock_spin(vp);
1841 vp->v_specflags &= ~SI_MOUNTEDON;
1842 vnode_unlock(vp);
1843 }
1844
1845
1846 void
1847 vnode_settag(vnode_t vp, int tag)
1848 {
1849 vp->v_tag = tag;
1850
1851 }
1852
1853 int
1854 vnode_tag(vnode_t vp)
1855 {
1856 return(vp->v_tag);
1857 }
1858
1859 vnode_t
1860 vnode_parent(vnode_t vp)
1861 {
1862
1863 return(vp->v_parent);
1864 }
1865
1866 void
1867 vnode_setparent(vnode_t vp, vnode_t dvp)
1868 {
1869 vp->v_parent = dvp;
1870 }
1871
1872 void
1873 vnode_setname(vnode_t vp, char * name)
1874 {
1875 vp->v_name = name;
1876 }
1877
1878 /* return the registered FS name when adding the FS to kernel */
1879 void
1880 vnode_vfsname(vnode_t vp, char * buf)
1881 {
1882 strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1883 }
1884
1885 /* return the FS type number */
1886 int
1887 vnode_vfstypenum(vnode_t vp)
1888 {
1889 return(vp->v_mount->mnt_vtable->vfc_typenum);
1890 }
1891
1892 int
1893 vnode_vfs64bitready(vnode_t vp)
1894 {
1895
1896 /*
1897 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
1898 */
1899 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
1900 return(1);
1901 else
1902 return(0);
1903 }
1904
1905
1906
1907 /* return the visible flags on associated mount point of vnode_t */
1908 uint32_t
1909 vnode_vfsvisflags(vnode_t vp)
1910 {
1911 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1912 }
1913
1914 /* return the command modifier flags on associated mount point of vnode_t */
1915 uint32_t
1916 vnode_vfscmdflags(vnode_t vp)
1917 {
1918 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1919 }
1920
1921 /* return the max symlink of short links of vnode_t */
1922 uint32_t
1923 vnode_vfsmaxsymlen(vnode_t vp)
1924 {
1925 return(vp->v_mount->mnt_maxsymlinklen);
1926 }
1927
1928 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1929 struct vfsstatfs *
1930 vnode_vfsstatfs(vnode_t vp)
1931 {
1932 return(&vp->v_mount->mnt_vfsstat);
1933 }
1934
1935 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1936 void *
1937 vnode_vfsfsprivate(vnode_t vp)
1938 {
1939 return(vp->v_mount->mnt_data);
1940 }
1941
1942 /* is vnode_t in a rdonly mounted FS */
1943 int
1944 vnode_vfsisrdonly(vnode_t vp)
1945 {
1946 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1947 }
1948
1949 int
1950 vnode_compound_rename_available(vnode_t vp)
1951 {
1952 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
1953 }
1954 int
1955 vnode_compound_rmdir_available(vnode_t vp)
1956 {
1957 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
1958 }
1959 int
1960 vnode_compound_mkdir_available(vnode_t vp)
1961 {
1962 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
1963 }
1964 int
1965 vnode_compound_remove_available(vnode_t vp)
1966 {
1967 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
1968 }
1969 int
1970 vnode_compound_open_available(vnode_t vp)
1971 {
1972 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
1973 }
1974
1975 int
1976 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
1977 {
1978 return ((vp->v_mount->mnt_compound_ops & opid) != 0);
1979 }
1980
1981 /*
1982 * Returns vnode ref to current working directory; if a per-thread current
1983 * working directory is in effect, return that instead of the per process one.
1984 *
1985 * XXX Published, but not used.
1986 */
1987 vnode_t
1988 current_workingdir(void)
1989 {
1990 return vfs_context_cwd(vfs_context_current());
1991 }
1992
1993 /* returns vnode ref to current root(chroot) directory */
1994 vnode_t
1995 current_rootdir(void)
1996 {
1997 proc_t proc = current_proc();
1998 struct vnode * vp ;
1999
2000 if ( (vp = proc->p_fd->fd_rdir) ) {
2001 if ( (vnode_getwithref(vp)) )
2002 return (NULL);
2003 }
2004 return vp;
2005 }
2006
2007 /*
2008 * Get a filesec and optional acl contents from an extended attribute.
2009 * Function will attempt to retrive ACL, UUID, and GUID information using a
2010 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2011 *
2012 * Parameters: vp The vnode on which to operate.
2013 * fsecp The filesec (and ACL, if any) being
2014 * retrieved.
2015 * ctx The vnode context in which the
2016 * operation is to be attempted.
2017 *
2018 * Returns: 0 Success
2019 * !0 errno value
2020 *
2021 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2022 * host byte order, as will be the ACL contents, if any.
2023 * Internally, we will cannonize these values from network (PPC)
2024 * byte order after we retrieve them so that the on-disk contents
2025 * of the extended attribute are identical for both PPC and Intel
2026 * (if we were not being required to provide this service via
2027 * fallback, this would be the job of the filesystem
2028 * 'VNOP_GETATTR' call).
2029 *
2030 * We use ntohl() because it has a transitive property on Intel
2031 * machines and no effect on PPC mancines. This guarantees us
2032 *
2033 * XXX: Deleting rather than ignoreing a corrupt security structure is
2034 * probably the only way to reset it without assistance from an
2035 * file system integrity checking tool. Right now we ignore it.
2036 *
2037 * XXX: We should enummerate the possible errno values here, and where
2038 * in the code they originated.
2039 */
2040 static int
2041 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2042 {
2043 kauth_filesec_t fsec;
2044 uio_t fsec_uio;
2045 size_t fsec_size;
2046 size_t xsize, rsize;
2047 int error;
2048 uint32_t host_fsec_magic;
2049 uint32_t host_acl_entrycount;
2050
2051 fsec = NULL;
2052 fsec_uio = NULL;
2053 error = 0;
2054
2055 /* find out how big the EA is */
2056 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
2057 /* no EA, no filesec */
2058 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2059 error = 0;
2060 /* either way, we are done */
2061 goto out;
2062 }
2063
2064 /*
2065 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2066 * ACE entrly ACL, and if it's larger than that, it must have the right
2067 * number of bytes such that it contains an atomic number of ACEs,
2068 * rather than partial entries. Otherwise, we ignore it.
2069 */
2070 if (!KAUTH_FILESEC_VALID(xsize)) {
2071 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2072 error = 0;
2073 goto out;
2074 }
2075
2076 /* how many entries would fit? */
2077 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2078
2079 /* get buffer and uio */
2080 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2081 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2082 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2083 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2084 error = ENOMEM;
2085 goto out;
2086 }
2087
2088 /* read security attribute */
2089 rsize = xsize;
2090 if ((error = vn_getxattr(vp,
2091 KAUTH_FILESEC_XATTR,
2092 fsec_uio,
2093 &rsize,
2094 XATTR_NOSECURITY,
2095 ctx)) != 0) {
2096
2097 /* no attribute - no security data */
2098 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2099 error = 0;
2100 /* either way, we are done */
2101 goto out;
2102 }
2103
2104 /*
2105 * Validate security structure; the validation must take place in host
2106 * byte order. If it's corrupt, we will just ignore it.
2107 */
2108
2109 /* Validate the size before trying to convert it */
2110 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2111 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2112 goto out;
2113 }
2114
2115 /* Validate the magic number before trying to convert it */
2116 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2117 if (fsec->fsec_magic != host_fsec_magic) {
2118 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2119 goto out;
2120 }
2121
2122 /* Validate the entry count before trying to convert it. */
2123 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2124 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2125 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2126 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2127 goto out;
2128 }
2129 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2130 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2131 goto out;
2132 }
2133 }
2134
2135 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2136
2137 *fsecp = fsec;
2138 fsec = NULL;
2139 error = 0;
2140 out:
2141 if (fsec != NULL)
2142 kauth_filesec_free(fsec);
2143 if (fsec_uio != NULL)
2144 uio_free(fsec_uio);
2145 if (error)
2146 *fsecp = NULL;
2147 return(error);
2148 }
2149
2150 /*
2151 * Set a filesec and optional acl contents into an extended attribute.
2152 * function will attempt to store ACL, UUID, and GUID information using a
2153 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2154 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2155 * original caller supplied an acl.
2156 *
2157 * Parameters: vp The vnode on which to operate.
2158 * fsec The filesec being set.
2159 * acl The acl to be associated with 'fsec'.
2160 * ctx The vnode context in which the
2161 * operation is to be attempted.
2162 *
2163 * Returns: 0 Success
2164 * !0 errno value
2165 *
2166 * Notes: Both the fsec and the acl are always valid.
2167 *
2168 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2169 * as are the acl contents, if they are used. Internally, we will
2170 * cannonize these values into network (PPC) byte order before we
2171 * attempt to write them so that the on-disk contents of the
2172 * extended attribute are identical for both PPC and Intel (if we
2173 * were not being required to provide this service via fallback,
2174 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2175 * We reverse this process on the way out, so we leave with the
2176 * same byte order we started with.
2177 *
2178 * XXX: We should enummerate the possible errno values here, and where
2179 * in the code they originated.
2180 */
2181 static int
2182 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2183 {
2184 uio_t fsec_uio;
2185 int error;
2186 uint32_t saved_acl_copysize;
2187
2188 fsec_uio = NULL;
2189
2190 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2191 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2192 error = ENOMEM;
2193 goto out;
2194 }
2195 /*
2196 * Save the pre-converted ACL copysize, because it gets swapped too
2197 * if we are running with the wrong endianness.
2198 */
2199 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2200
2201 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2202
2203 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2204 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2205 error = vn_setxattr(vp,
2206 KAUTH_FILESEC_XATTR,
2207 fsec_uio,
2208 XATTR_NOSECURITY, /* we have auth'ed already */
2209 ctx);
2210 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2211
2212 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2213
2214 out:
2215 if (fsec_uio != NULL)
2216 uio_free(fsec_uio);
2217 return(error);
2218 }
2219
2220
2221 /*
2222 * Returns: 0 Success
2223 * ENOMEM Not enough space [only if has filesec]
2224 * VNOP_GETATTR: ???
2225 * vnode_get_filesec: ???
2226 * kauth_cred_guid2uid: ???
2227 * kauth_cred_guid2gid: ???
2228 * vfs_update_vfsstat: ???
2229 */
2230 int
2231 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2232 {
2233 kauth_filesec_t fsec;
2234 kauth_acl_t facl;
2235 int error;
2236 uid_t nuid;
2237 gid_t ngid;
2238
2239 /* don't ask for extended security data if the filesystem doesn't support it */
2240 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2241 VATTR_CLEAR_ACTIVE(vap, va_acl);
2242 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2243 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2244 }
2245
2246 /*
2247 * If the caller wants size values we might have to synthesise, give the
2248 * filesystem the opportunity to supply better intermediate results.
2249 */
2250 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2251 VATTR_IS_ACTIVE(vap, va_total_size) ||
2252 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2253 VATTR_SET_ACTIVE(vap, va_data_size);
2254 VATTR_SET_ACTIVE(vap, va_data_alloc);
2255 VATTR_SET_ACTIVE(vap, va_total_size);
2256 VATTR_SET_ACTIVE(vap, va_total_alloc);
2257 }
2258
2259 error = VNOP_GETATTR(vp, vap, ctx);
2260 if (error) {
2261 KAUTH_DEBUG("ERROR - returning %d", error);
2262 goto out;
2263 }
2264
2265 /*
2266 * If extended security data was requested but not returned, try the fallback
2267 * path.
2268 */
2269 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2270 fsec = NULL;
2271
2272 if (XATTR_VNODE_SUPPORTED(vp)) {
2273 /* try to get the filesec */
2274 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
2275 goto out;
2276 }
2277 /* if no filesec, no attributes */
2278 if (fsec == NULL) {
2279 VATTR_RETURN(vap, va_acl, NULL);
2280 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2281 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2282 } else {
2283
2284 /* looks good, try to return what we were asked for */
2285 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2286 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2287
2288 /* only return the ACL if we were actually asked for it */
2289 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2290 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2291 VATTR_RETURN(vap, va_acl, NULL);
2292 } else {
2293 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2294 if (facl == NULL) {
2295 kauth_filesec_free(fsec);
2296 error = ENOMEM;
2297 goto out;
2298 }
2299 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2300 VATTR_RETURN(vap, va_acl, facl);
2301 }
2302 }
2303 kauth_filesec_free(fsec);
2304 }
2305 }
2306 /*
2307 * If someone gave us an unsolicited filesec, toss it. We promise that
2308 * we're OK with a filesystem giving us anything back, but our callers
2309 * only expect what they asked for.
2310 */
2311 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2312 if (vap->va_acl != NULL)
2313 kauth_acl_free(vap->va_acl);
2314 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2315 }
2316
2317 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2318 /*
2319 * Handle the case where we need a UID/GID, but only have extended
2320 * security information.
2321 */
2322 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2323 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2324 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2325 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
2326 VATTR_RETURN(vap, va_uid, nuid);
2327 }
2328 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2329 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2330 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2331 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
2332 VATTR_RETURN(vap, va_gid, ngid);
2333 }
2334 #endif
2335
2336 /*
2337 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2338 */
2339 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2340 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2341 nuid = vap->va_uid;
2342 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2343 nuid = vp->v_mount->mnt_fsowner;
2344 if (nuid == KAUTH_UID_NONE)
2345 nuid = 99;
2346 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2347 nuid = vap->va_uid;
2348 } else {
2349 /* this will always be something sensible */
2350 nuid = vp->v_mount->mnt_fsowner;
2351 }
2352 if ((nuid == 99) && !vfs_context_issuser(ctx))
2353 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2354 VATTR_RETURN(vap, va_uid, nuid);
2355 }
2356 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2357 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2358 ngid = vap->va_gid;
2359 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2360 ngid = vp->v_mount->mnt_fsgroup;
2361 if (ngid == KAUTH_GID_NONE)
2362 ngid = 99;
2363 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2364 ngid = vap->va_gid;
2365 } else {
2366 /* this will always be something sensible */
2367 ngid = vp->v_mount->mnt_fsgroup;
2368 }
2369 if ((ngid == 99) && !vfs_context_issuser(ctx))
2370 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2371 VATTR_RETURN(vap, va_gid, ngid);
2372 }
2373
2374 /*
2375 * Synthesise some values that can be reasonably guessed.
2376 */
2377 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
2378 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2379
2380 if (!VATTR_IS_SUPPORTED(vap, va_flags))
2381 VATTR_RETURN(vap, va_flags, 0);
2382
2383 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
2384 VATTR_RETURN(vap, va_filerev, 0);
2385
2386 if (!VATTR_IS_SUPPORTED(vap, va_gen))
2387 VATTR_RETURN(vap, va_gen, 0);
2388
2389 /*
2390 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2391 */
2392 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
2393 VATTR_RETURN(vap, va_data_size, 0);
2394
2395 /* do we want any of the possibly-computed values? */
2396 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2397 VATTR_IS_ACTIVE(vap, va_total_size) ||
2398 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2399 /* make sure f_bsize is valid */
2400 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2401 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0)
2402 goto out;
2403 }
2404
2405 /* default va_data_alloc from va_data_size */
2406 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
2407 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2408
2409 /* default va_total_size from va_data_size */
2410 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
2411 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2412
2413 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2414 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
2415 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2416 }
2417
2418 /*
2419 * If we don't have a change time, pull it from the modtime.
2420 */
2421 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
2422 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2423
2424 /*
2425 * This is really only supported for the creation VNOPs, but since the field is there
2426 * we should populate it correctly.
2427 */
2428 VATTR_RETURN(vap, va_type, vp->v_type);
2429
2430 /*
2431 * The fsid can be obtained from the mountpoint directly.
2432 */
2433 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2434
2435 out:
2436
2437 return(error);
2438 }
2439
2440 /*
2441 * Set the attributes on a vnode in a vnode context.
2442 *
2443 * Parameters: vp The vnode whose attributes to set.
2444 * vap A pointer to the attributes to set.
2445 * ctx The vnode context in which the
2446 * operation is to be attempted.
2447 *
2448 * Returns: 0 Success
2449 * !0 errno value
2450 *
2451 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2452 *
2453 * The contents of the data area pointed to by 'vap' may be
2454 * modified if the vnode is on a filesystem which has been
2455 * mounted with ingore ownership flags, or by the underlyng
2456 * VFS itself, or by the fallback code, if the underlying VFS
2457 * does not support ACL, UUID, or GUUID attributes directly.
2458 *
2459 * XXX: We should enummerate the possible errno values here, and where
2460 * in the code they originated.
2461 */
2462 int
2463 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2464 {
2465 int error, is_perm_change=0;
2466
2467 /*
2468 * Make sure the filesystem is mounted R/W.
2469 * If not, return an error.
2470 */
2471 if (vfs_isrdonly(vp->v_mount)) {
2472 error = EROFS;
2473 goto out;
2474 }
2475 #if NAMEDSTREAMS
2476 /* For streams, va_data_size is the only setable attribute. */
2477 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2478 error = EPERM;
2479 goto out;
2480 }
2481 #endif
2482
2483 /*
2484 * If ownership is being ignored on this volume, we silently discard
2485 * ownership changes.
2486 */
2487 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2488 VATTR_CLEAR_ACTIVE(vap, va_uid);
2489 VATTR_CLEAR_ACTIVE(vap, va_gid);
2490 }
2491
2492 if ( VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)
2493 || VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_acl)) {
2494 is_perm_change = 1;
2495 }
2496
2497 /*
2498 * Make sure that extended security is enabled if we're going to try
2499 * to set any.
2500 */
2501 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2502 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2503 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2504 error = ENOTSUP;
2505 goto out;
2506 }
2507
2508 /* Never allow the setting of any unsupported superuser flags. */
2509 if (VATTR_IS_ACTIVE(vap, va_flags)) {
2510 vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
2511 }
2512
2513 error = VNOP_SETATTR(vp, vap, ctx);
2514
2515 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2516 error = vnode_setattr_fallback(vp, vap, ctx);
2517
2518 #if CONFIG_FSE
2519 // only send a stat_changed event if this is more than
2520 // just an access or backup time update
2521 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time)) && (vap->va_active != VNODE_ATTR_BIT(va_backup_time))) {
2522 if (is_perm_change) {
2523 if (need_fsevent(FSE_CHOWN, vp)) {
2524 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2525 }
2526 } else if(need_fsevent(FSE_STAT_CHANGED, vp)) {
2527 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2528 }
2529 }
2530 #endif
2531
2532 out:
2533 return(error);
2534 }
2535
2536 /*
2537 * Fallback for setting the attributes on a vnode in a vnode context. This
2538 * Function will attempt to store ACL, UUID, and GUID information utilizing
2539 * a read/modify/write operation against an EA used as a backing store for
2540 * the object.
2541 *
2542 * Parameters: vp The vnode whose attributes to set.
2543 * vap A pointer to the attributes to set.
2544 * ctx The vnode context in which the
2545 * operation is to be attempted.
2546 *
2547 * Returns: 0 Success
2548 * !0 errno value
2549 *
2550 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2551 * as are the fsec and lfsec, if they are used.
2552 *
2553 * The contents of the data area pointed to by 'vap' may be
2554 * modified to indicate that the attribute is supported for
2555 * any given requested attribute.
2556 *
2557 * XXX: We should enummerate the possible errno values here, and where
2558 * in the code they originated.
2559 */
2560 int
2561 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2562 {
2563 kauth_filesec_t fsec;
2564 kauth_acl_t facl;
2565 struct kauth_filesec lfsec;
2566 int error;
2567
2568 error = 0;
2569
2570 /*
2571 * Extended security fallback via extended attributes.
2572 *
2573 * Note that we do not free the filesec; the caller is expected to
2574 * do this.
2575 */
2576 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2577 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2578 VATTR_NOT_RETURNED(vap, va_guuid)) {
2579 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2580
2581 /*
2582 * Fail for file types that we don't permit extended security
2583 * to be set on.
2584 */
2585 if (!XATTR_VNODE_SUPPORTED(vp)) {
2586 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2587 error = EINVAL;
2588 goto out;
2589 }
2590
2591 /*
2592 * If we don't have all the extended security items, we need
2593 * to fetch the existing data to perform a read-modify-write
2594 * operation.
2595 */
2596 fsec = NULL;
2597 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2598 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2599 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2600 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2601 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2602 goto out;
2603 }
2604 }
2605 /* if we didn't get a filesec, use our local one */
2606 if (fsec == NULL) {
2607 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2608 fsec = &lfsec;
2609 } else {
2610 KAUTH_DEBUG("SETATTR - updating existing filesec");
2611 }
2612 /* find the ACL */
2613 facl = &fsec->fsec_acl;
2614
2615 /* if we're using the local filesec, we need to initialise it */
2616 if (fsec == &lfsec) {
2617 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2618 fsec->fsec_owner = kauth_null_guid;
2619 fsec->fsec_group = kauth_null_guid;
2620 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2621 facl->acl_flags = 0;
2622 }
2623
2624 /*
2625 * Update with the supplied attributes.
2626 */
2627 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2628 KAUTH_DEBUG("SETATTR - updating owner UUID");
2629 fsec->fsec_owner = vap->va_uuuid;
2630 VATTR_SET_SUPPORTED(vap, va_uuuid);
2631 }
2632 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2633 KAUTH_DEBUG("SETATTR - updating group UUID");
2634 fsec->fsec_group = vap->va_guuid;
2635 VATTR_SET_SUPPORTED(vap, va_guuid);
2636 }
2637 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2638 if (vap->va_acl == NULL) {
2639 KAUTH_DEBUG("SETATTR - removing ACL");
2640 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2641 } else {
2642 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2643 facl = vap->va_acl;
2644 }
2645 VATTR_SET_SUPPORTED(vap, va_acl);
2646 }
2647
2648 /*
2649 * If the filesec data is all invalid, we can just remove
2650 * the EA completely.
2651 */
2652 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2653 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2654 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2655 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2656 /* no attribute is ok, nothing to delete */
2657 if (error == ENOATTR)
2658 error = 0;
2659 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2660 } else {
2661 /* write the EA */
2662 error = vnode_set_filesec(vp, fsec, facl, ctx);
2663 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2664 }
2665
2666 /* if we fetched a filesec, dispose of the buffer */
2667 if (fsec != &lfsec)
2668 kauth_filesec_free(fsec);
2669 }
2670 out:
2671
2672 return(error);
2673 }
2674
2675 /*
2676 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2677 * event on a vnode.
2678 */
2679 int
2680 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
2681 {
2682 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2683 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
2684 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
2685 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
2686 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
2687 uint32_t knote_events = (events & knote_mask);
2688
2689 /* Permissions are not explicitly part of the kqueue model */
2690 if (events & VNODE_EVENT_PERMS) {
2691 knote_events |= NOTE_ATTRIB;
2692 }
2693
2694 /* Directory contents information just becomes NOTE_WRITE */
2695 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
2696 knote_events |= NOTE_WRITE;
2697 }
2698
2699 if (knote_events) {
2700 lock_vnode_and_post(vp, knote_events);
2701 #if CONFIG_FSE
2702 if (vap != NULL) {
2703 create_fsevent_from_kevent(vp, events, vap);
2704 }
2705 #else
2706 (void)vap;
2707 #endif
2708 }
2709
2710 return 0;
2711 }
2712
2713
2714
2715 int
2716 vnode_isdyldsharedcache(vnode_t vp)
2717 {
2718 return ((vp->v_flag & VSHARED_DYLD) ? 1 : 0);
2719 }
2720
2721
2722 /*
2723 * For a filesystem that isn't tracking its own vnode watchers:
2724 * check whether a vnode is being monitored.
2725 */
2726 int
2727 vnode_ismonitored(vnode_t vp) {
2728 return (vp->v_knotes.slh_first != NULL);
2729 }
2730
2731 /*
2732 * Initialize a struct vnode_attr and activate the attributes required
2733 * by the vnode_notify() call.
2734 */
2735 int
2736 vfs_get_notify_attributes(struct vnode_attr *vap)
2737 {
2738 VATTR_INIT(vap);
2739 vap->va_active = VNODE_NOTIFY_ATTRS;
2740 return 0;
2741 }
2742
2743 #if CONFIG_TRIGGERS
2744 int
2745 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
2746 {
2747 int error;
2748 mount_t mp;
2749
2750 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
2751 if (mp == NULL) {
2752 return ENOENT;
2753 }
2754
2755 error = vfs_busy(mp, LK_NOWAIT);
2756 mount_iterdrop(mp);
2757
2758 if (error != 0) {
2759 return ENOENT;
2760 }
2761
2762 mount_lock(mp);
2763 if (mp->mnt_triggercallback != NULL) {
2764 error = EBUSY;
2765 mount_unlock(mp);
2766 goto out;
2767 }
2768
2769 mp->mnt_triggercallback = vtc;
2770 mp->mnt_triggerdata = data;
2771 mount_unlock(mp);
2772
2773 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
2774
2775 out:
2776 vfs_unbusy(mp);
2777 return 0;
2778 }
2779 #endif /* CONFIG_TRIGGERS */
2780
2781 /*
2782 * Definition of vnode operations.
2783 */
2784
2785 #if 0
2786 /*
2787 *#
2788 *#% lookup dvp L ? ?
2789 *#% lookup vpp - L -
2790 */
2791 struct vnop_lookup_args {
2792 struct vnodeop_desc *a_desc;
2793 vnode_t a_dvp;
2794 vnode_t *a_vpp;
2795 struct componentname *a_cnp;
2796 vfs_context_t a_context;
2797 };
2798 #endif /* 0*/
2799
2800 /*
2801 * Returns: 0 Success
2802 * lock_fsnode:ENOENT No such file or directory [only for VFS
2803 * that is not thread safe & vnode is
2804 * currently being/has been terminated]
2805 * <vfs_lookup>:ENAMETOOLONG
2806 * <vfs_lookup>:ENOENT
2807 * <vfs_lookup>:EJUSTRETURN
2808 * <vfs_lookup>:EPERM
2809 * <vfs_lookup>:EISDIR
2810 * <vfs_lookup>:ENOTDIR
2811 * <vfs_lookup>:???
2812 *
2813 * Note: The return codes from the underlying VFS's lookup routine can't
2814 * be fully enumerated here, since third party VFS authors may not
2815 * limit their error returns to the ones documented here, even
2816 * though this may result in some programs functioning incorrectly.
2817 *
2818 * The return codes documented above are those which may currently
2819 * be returned by HFS from hfs_lookup, not including additional
2820 * error code which may be propagated from underlying routines.
2821 */
2822 errno_t
2823 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
2824 {
2825 int _err;
2826 struct vnop_lookup_args a;
2827
2828 a.a_desc = &vnop_lookup_desc;
2829 a.a_dvp = dvp;
2830 a.a_vpp = vpp;
2831 a.a_cnp = cnp;
2832 a.a_context = ctx;
2833
2834 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2835 if (_err == 0 && *vpp) {
2836 DTRACE_FSINFO(lookup, vnode_t, *vpp);
2837 }
2838
2839 return (_err);
2840 }
2841
2842 #if 0
2843 struct vnop_compound_open_args {
2844 struct vnodeop_desc *a_desc;
2845 vnode_t a_dvp;
2846 vnode_t *a_vpp;
2847 struct componentname *a_cnp;
2848 int32_t a_flags;
2849 int32_t a_fmode;
2850 struct vnode_attr *a_vap;
2851 vfs_context_t a_context;
2852 void *a_reserved;
2853 };
2854 #endif /* 0 */
2855
2856 int
2857 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
2858 {
2859 int _err;
2860 struct vnop_compound_open_args a;
2861 int did_create = 0;
2862 int want_create;
2863 uint32_t tmp_status = 0;
2864 struct componentname *cnp = &ndp->ni_cnd;
2865
2866 want_create = (flags & O_CREAT);
2867
2868 a.a_desc = &vnop_compound_open_desc;
2869 a.a_dvp = dvp;
2870 a.a_vpp = vpp; /* Could be NULL */
2871 a.a_cnp = cnp;
2872 a.a_flags = flags;
2873 a.a_fmode = fmode;
2874 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
2875 a.a_vap = vap;
2876 a.a_context = ctx;
2877 a.a_open_create_authorizer = vn_authorize_create;
2878 a.a_open_existing_authorizer = vn_authorize_open_existing;
2879 a.a_reserved = NULL;
2880
2881 if (dvp == NULLVP) {
2882 panic("No dvp?");
2883 }
2884 if (want_create && !vap) {
2885 panic("Want create, but no vap?");
2886 }
2887 if (!want_create && vap) {
2888 panic("Don't want create, but have a vap?");
2889 }
2890
2891 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
2892 if (want_create) {
2893 if (_err == 0 && *vpp) {
2894 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
2895 } else {
2896 DTRACE_FSINFO(compound_open, vnode_t, dvp);
2897 }
2898 } else {
2899 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
2900 }
2901
2902 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
2903
2904 if (did_create && !want_create) {
2905 panic("Filesystem did a create, even though none was requested?");
2906 }
2907
2908 if (did_create) {
2909 #if CONFIG_APPLEDOUBLE
2910 if (!NATIVE_XATTR(dvp)) {
2911 /*
2912 * Remove stale Apple Double file (if any).
2913 */
2914 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
2915 }
2916 #endif /* CONFIG_APPLEDOUBLE */
2917 /* On create, provide kqueue notification */
2918 post_event_if_success(dvp, _err, NOTE_WRITE);
2919 }
2920
2921 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
2922 #if 0 /* FSEvents... */
2923 if (*vpp && _err && _err != EKEEPLOOKING) {
2924 vnode_put(*vpp);
2925 *vpp = NULLVP;
2926 }
2927 #endif /* 0 */
2928
2929 return (_err);
2930
2931 }
2932
2933 #if 0
2934 struct vnop_create_args {
2935 struct vnodeop_desc *a_desc;
2936 vnode_t a_dvp;
2937 vnode_t *a_vpp;
2938 struct componentname *a_cnp;
2939 struct vnode_attr *a_vap;
2940 vfs_context_t a_context;
2941 };
2942 #endif /* 0*/
2943 errno_t
2944 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
2945 {
2946 int _err;
2947 struct vnop_create_args a;
2948
2949 a.a_desc = &vnop_create_desc;
2950 a.a_dvp = dvp;
2951 a.a_vpp = vpp;
2952 a.a_cnp = cnp;
2953 a.a_vap = vap;
2954 a.a_context = ctx;
2955
2956 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2957 if (_err == 0 && *vpp) {
2958 DTRACE_FSINFO(create, vnode_t, *vpp);
2959 }
2960
2961 #if CONFIG_APPLEDOUBLE
2962 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2963 /*
2964 * Remove stale Apple Double file (if any).
2965 */
2966 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
2967 }
2968 #endif /* CONFIG_APPLEDOUBLE */
2969
2970 post_event_if_success(dvp, _err, NOTE_WRITE);
2971
2972 return (_err);
2973 }
2974
2975 #if 0
2976 /*
2977 *#
2978 *#% whiteout dvp L L L
2979 *#% whiteout cnp - - -
2980 *#% whiteout flag - - -
2981 *#
2982 */
2983 struct vnop_whiteout_args {
2984 struct vnodeop_desc *a_desc;
2985 vnode_t a_dvp;
2986 struct componentname *a_cnp;
2987 int a_flags;
2988 vfs_context_t a_context;
2989 };
2990 #endif /* 0*/
2991 errno_t
2992 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
2993 __unused int flags, __unused vfs_context_t ctx)
2994 {
2995 return (ENOTSUP); // XXX OBSOLETE
2996 }
2997
2998 #if 0
2999 /*
3000 *#
3001 *#% mknod dvp L U U
3002 *#% mknod vpp - X -
3003 *#
3004 */
3005 struct vnop_mknod_args {
3006 struct vnodeop_desc *a_desc;
3007 vnode_t a_dvp;
3008 vnode_t *a_vpp;
3009 struct componentname *a_cnp;
3010 struct vnode_attr *a_vap;
3011 vfs_context_t a_context;
3012 };
3013 #endif /* 0*/
3014 errno_t
3015 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3016 {
3017
3018 int _err;
3019 struct vnop_mknod_args a;
3020
3021 a.a_desc = &vnop_mknod_desc;
3022 a.a_dvp = dvp;
3023 a.a_vpp = vpp;
3024 a.a_cnp = cnp;
3025 a.a_vap = vap;
3026 a.a_context = ctx;
3027
3028 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3029 if (_err == 0 && *vpp) {
3030 DTRACE_FSINFO(mknod, vnode_t, *vpp);
3031 }
3032
3033 post_event_if_success(dvp, _err, NOTE_WRITE);
3034
3035 return (_err);
3036 }
3037
3038 #if 0
3039 /*
3040 *#
3041 *#% open vp L L L
3042 *#
3043 */
3044 struct vnop_open_args {
3045 struct vnodeop_desc *a_desc;
3046 vnode_t a_vp;
3047 int a_mode;
3048 vfs_context_t a_context;
3049 };
3050 #endif /* 0*/
3051 errno_t
3052 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3053 {
3054 int _err;
3055 struct vnop_open_args a;
3056
3057 if (ctx == NULL) {
3058 ctx = vfs_context_current();
3059 }
3060 a.a_desc = &vnop_open_desc;
3061 a.a_vp = vp;
3062 a.a_mode = mode;
3063 a.a_context = ctx;
3064
3065 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3066 DTRACE_FSINFO(open, vnode_t, vp);
3067
3068 return (_err);
3069 }
3070
3071 #if 0
3072 /*
3073 *#
3074 *#% close vp U U U
3075 *#
3076 */
3077 struct vnop_close_args {
3078 struct vnodeop_desc *a_desc;
3079 vnode_t a_vp;
3080 int a_fflag;
3081 vfs_context_t a_context;
3082 };
3083 #endif /* 0*/
3084 errno_t
3085 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3086 {
3087 int _err;
3088 struct vnop_close_args a;
3089
3090 if (ctx == NULL) {
3091 ctx = vfs_context_current();
3092 }
3093 a.a_desc = &vnop_close_desc;
3094 a.a_vp = vp;
3095 a.a_fflag = fflag;
3096 a.a_context = ctx;
3097
3098 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3099 DTRACE_FSINFO(close, vnode_t, vp);
3100
3101 return (_err);
3102 }
3103
3104 #if 0
3105 /*
3106 *#
3107 *#% access vp L L L
3108 *#
3109 */
3110 struct vnop_access_args {
3111 struct vnodeop_desc *a_desc;
3112 vnode_t a_vp;
3113 int a_action;
3114 vfs_context_t a_context;
3115 };
3116 #endif /* 0*/
3117 errno_t
3118 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3119 {
3120 int _err;
3121 struct vnop_access_args a;
3122
3123 if (ctx == NULL) {
3124 ctx = vfs_context_current();
3125 }
3126 a.a_desc = &vnop_access_desc;
3127 a.a_vp = vp;
3128 a.a_action = action;
3129 a.a_context = ctx;
3130
3131 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3132 DTRACE_FSINFO(access, vnode_t, vp);
3133
3134 return (_err);
3135 }
3136
3137 #if 0
3138 /*
3139 *#
3140 *#% getattr vp = = =
3141 *#
3142 */
3143 struct vnop_getattr_args {
3144 struct vnodeop_desc *a_desc;
3145 vnode_t a_vp;
3146 struct vnode_attr *a_vap;
3147 vfs_context_t a_context;
3148 };
3149 #endif /* 0*/
3150 errno_t
3151 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3152 {
3153 int _err;
3154 struct vnop_getattr_args a;
3155
3156 a.a_desc = &vnop_getattr_desc;
3157 a.a_vp = vp;
3158 a.a_vap = vap;
3159 a.a_context = ctx;
3160
3161 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3162 DTRACE_FSINFO(getattr, vnode_t, vp);
3163
3164 return (_err);
3165 }
3166
3167 #if 0
3168 /*
3169 *#
3170 *#% setattr vp L L L
3171 *#
3172 */
3173 struct vnop_setattr_args {
3174 struct vnodeop_desc *a_desc;
3175 vnode_t a_vp;
3176 struct vnode_attr *a_vap;
3177 vfs_context_t a_context;
3178 };
3179 #endif /* 0*/
3180 errno_t
3181 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3182 {
3183 int _err;
3184 struct vnop_setattr_args a;
3185
3186 a.a_desc = &vnop_setattr_desc;
3187 a.a_vp = vp;
3188 a.a_vap = vap;
3189 a.a_context = ctx;
3190
3191 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3192 DTRACE_FSINFO(setattr, vnode_t, vp);
3193
3194 #if CONFIG_APPLEDOUBLE
3195 /*
3196 * Shadow uid/gid/mod change to extended attribute file.
3197 */
3198 if (_err == 0 && !NATIVE_XATTR(vp)) {
3199 struct vnode_attr va;
3200 int change = 0;
3201
3202 VATTR_INIT(&va);
3203 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3204 VATTR_SET(&va, va_uid, vap->va_uid);
3205 change = 1;
3206 }
3207 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3208 VATTR_SET(&va, va_gid, vap->va_gid);
3209 change = 1;
3210 }
3211 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3212 VATTR_SET(&va, va_mode, vap->va_mode);
3213 change = 1;
3214 }
3215 if (change) {
3216 vnode_t dvp;
3217 const char *vname;
3218
3219 dvp = vnode_getparent(vp);
3220 vname = vnode_getname(vp);
3221
3222 xattrfile_setattr(dvp, vname, &va, ctx);
3223 if (dvp != NULLVP)
3224 vnode_put(dvp);
3225 if (vname != NULL)
3226 vnode_putname(vname);
3227 }
3228 }
3229 #endif /* CONFIG_APPLEDOUBLE */
3230
3231 /*
3232 * If we have changed any of the things about the file that are likely
3233 * to result in changes to authorization results, blow the vnode auth
3234 * cache
3235 */
3236 if (_err == 0 && (
3237 VATTR_IS_SUPPORTED(vap, va_mode) ||
3238 VATTR_IS_SUPPORTED(vap, va_uid) ||
3239 VATTR_IS_SUPPORTED(vap, va_gid) ||
3240 VATTR_IS_SUPPORTED(vap, va_flags) ||
3241 VATTR_IS_SUPPORTED(vap, va_acl) ||
3242 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3243 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3244 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3245
3246 #if NAMEDSTREAMS
3247 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3248 vnode_t svp;
3249 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3250 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3251 vnode_put(svp);
3252 }
3253 }
3254 #endif /* NAMEDSTREAMS */
3255 }
3256
3257
3258 post_event_if_success(vp, _err, NOTE_ATTRIB);
3259
3260 return (_err);
3261 }
3262
3263
3264 #if 0
3265 /*
3266 *#
3267 *#% read vp L L L
3268 *#
3269 */
3270 struct vnop_read_args {
3271 struct vnodeop_desc *a_desc;
3272 vnode_t a_vp;
3273 struct uio *a_uio;
3274 int a_ioflag;
3275 vfs_context_t a_context;
3276 };
3277 #endif /* 0*/
3278 errno_t
3279 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3280 {
3281 int _err;
3282 struct vnop_read_args a;
3283 #if CONFIG_DTRACE
3284 user_ssize_t resid = uio_resid(uio);
3285 #endif
3286
3287 if (ctx == NULL) {
3288 return EINVAL;
3289 }
3290
3291 a.a_desc = &vnop_read_desc;
3292 a.a_vp = vp;
3293 a.a_uio = uio;
3294 a.a_ioflag = ioflag;
3295 a.a_context = ctx;
3296
3297 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3298 DTRACE_FSINFO_IO(read,
3299 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3300
3301 return (_err);
3302 }
3303
3304
3305 #if 0
3306 /*
3307 *#
3308 *#% write vp L L L
3309 *#
3310 */
3311 struct vnop_write_args {
3312 struct vnodeop_desc *a_desc;
3313 vnode_t a_vp;
3314 struct uio *a_uio;
3315 int a_ioflag;
3316 vfs_context_t a_context;
3317 };
3318 #endif /* 0*/
3319 errno_t
3320 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3321 {
3322 struct vnop_write_args a;
3323 int _err;
3324 #if CONFIG_DTRACE
3325 user_ssize_t resid = uio_resid(uio);
3326 #endif
3327
3328 if (ctx == NULL) {
3329 return EINVAL;
3330 }
3331
3332 a.a_desc = &vnop_write_desc;
3333 a.a_vp = vp;
3334 a.a_uio = uio;
3335 a.a_ioflag = ioflag;
3336 a.a_context = ctx;
3337
3338 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3339 DTRACE_FSINFO_IO(write,
3340 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3341
3342 post_event_if_success(vp, _err, NOTE_WRITE);
3343
3344 return (_err);
3345 }
3346
3347
3348 #if 0
3349 /*
3350 *#
3351 *#% ioctl vp U U U
3352 *#
3353 */
3354 struct vnop_ioctl_args {
3355 struct vnodeop_desc *a_desc;
3356 vnode_t a_vp;
3357 u_long a_command;
3358 caddr_t a_data;
3359 int a_fflag;
3360 vfs_context_t a_context;
3361 };
3362 #endif /* 0*/
3363 errno_t
3364 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3365 {
3366 int _err;
3367 struct vnop_ioctl_args a;
3368
3369 if (ctx == NULL) {
3370 ctx = vfs_context_current();
3371 }
3372
3373 /*
3374 * This check should probably have been put in the TTY code instead...
3375 *
3376 * We have to be careful about what we assume during startup and shutdown.
3377 * We have to be able to use the root filesystem's device vnode even when
3378 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3379 * structure. If there is no data pointer, it doesn't matter whether
3380 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
3381 * which passes NULL for its data pointer can therefore be used during
3382 * mount or unmount of the root filesystem.
3383 *
3384 * Depending on what root filesystems need to do during mount/unmount, we
3385 * may need to loosen this check again in the future.
3386 */
3387 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3388 if (data != NULL && !vnode_vfs64bitready(vp)) {
3389 return(ENOTTY);
3390 }
3391 }
3392
3393 a.a_desc = &vnop_ioctl_desc;
3394 a.a_vp = vp;
3395 a.a_command = command;
3396 a.a_data = data;
3397 a.a_fflag = fflag;
3398 a.a_context= ctx;
3399
3400 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3401 DTRACE_FSINFO(ioctl, vnode_t, vp);
3402
3403 return (_err);
3404 }
3405
3406
3407 #if 0
3408 /*
3409 *#
3410 *#% select vp U U U
3411 *#
3412 */
3413 struct vnop_select_args {
3414 struct vnodeop_desc *a_desc;
3415 vnode_t a_vp;
3416 int a_which;
3417 int a_fflags;
3418 void *a_wql;
3419 vfs_context_t a_context;
3420 };
3421 #endif /* 0*/
3422 errno_t
3423 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx)
3424 {
3425 int _err;
3426 struct vnop_select_args a;
3427
3428 if (ctx == NULL) {
3429 ctx = vfs_context_current();
3430 }
3431 a.a_desc = &vnop_select_desc;
3432 a.a_vp = vp;
3433 a.a_which = which;
3434 a.a_fflags = fflags;
3435 a.a_context = ctx;
3436 a.a_wql = wql;
3437
3438 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3439 DTRACE_FSINFO(select, vnode_t, vp);
3440
3441 return (_err);
3442 }
3443
3444
3445 #if 0
3446 /*
3447 *#
3448 *#% exchange fvp L L L
3449 *#% exchange tvp L L L
3450 *#
3451 */
3452 struct vnop_exchange_args {
3453 struct vnodeop_desc *a_desc;
3454 vnode_t a_fvp;
3455 vnode_t a_tvp;
3456 int a_options;
3457 vfs_context_t a_context;
3458 };
3459 #endif /* 0*/
3460 errno_t
3461 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3462 {
3463 int _err;
3464 struct vnop_exchange_args a;
3465
3466 a.a_desc = &vnop_exchange_desc;
3467 a.a_fvp = fvp;
3468 a.a_tvp = tvp;
3469 a.a_options = options;
3470 a.a_context = ctx;
3471
3472 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3473 DTRACE_FSINFO(exchange, vnode_t, fvp);
3474
3475 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3476 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3477 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3478
3479 return (_err);
3480 }
3481
3482
3483 #if 0
3484 /*
3485 *#
3486 *#% revoke vp U U U
3487 *#
3488 */
3489 struct vnop_revoke_args {
3490 struct vnodeop_desc *a_desc;
3491 vnode_t a_vp;
3492 int a_flags;
3493 vfs_context_t a_context;
3494 };
3495 #endif /* 0*/
3496 errno_t
3497 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3498 {
3499 struct vnop_revoke_args a;
3500 int _err;
3501
3502 a.a_desc = &vnop_revoke_desc;
3503 a.a_vp = vp;
3504 a.a_flags = flags;
3505 a.a_context = ctx;
3506
3507 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3508 DTRACE_FSINFO(revoke, vnode_t, vp);
3509
3510 return (_err);
3511 }
3512
3513
3514 #if 0
3515 /*
3516 *#
3517 *# mmap - vp U U U
3518 *#
3519 */
3520 struct vnop_mmap_args {
3521 struct vnodeop_desc *a_desc;
3522 vnode_t a_vp;
3523 int a_fflags;
3524 vfs_context_t a_context;
3525 };
3526 #endif /* 0*/
3527 errno_t
3528 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3529 {
3530 int _err;
3531 struct vnop_mmap_args a;
3532
3533 a.a_desc = &vnop_mmap_desc;
3534 a.a_vp = vp;
3535 a.a_fflags = fflags;
3536 a.a_context = ctx;
3537
3538 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3539 DTRACE_FSINFO(mmap, vnode_t, vp);
3540
3541 return (_err);
3542 }
3543
3544
3545 #if 0
3546 /*
3547 *#
3548 *# mnomap - vp U U U
3549 *#
3550 */
3551 struct vnop_mnomap_args {
3552 struct vnodeop_desc *a_desc;
3553 vnode_t a_vp;
3554 vfs_context_t a_context;
3555 };
3556 #endif /* 0*/
3557 errno_t
3558 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
3559 {
3560 int _err;
3561 struct vnop_mnomap_args a;
3562
3563 a.a_desc = &vnop_mnomap_desc;
3564 a.a_vp = vp;
3565 a.a_context = ctx;
3566
3567 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3568 DTRACE_FSINFO(mnomap, vnode_t, vp);
3569
3570 return (_err);
3571 }
3572
3573
3574 #if 0
3575 /*
3576 *#
3577 *#% fsync vp L L L
3578 *#
3579 */
3580 struct vnop_fsync_args {
3581 struct vnodeop_desc *a_desc;
3582 vnode_t a_vp;
3583 int a_waitfor;
3584 vfs_context_t a_context;
3585 };
3586 #endif /* 0*/
3587 errno_t
3588 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
3589 {
3590 struct vnop_fsync_args a;
3591 int _err;
3592
3593 a.a_desc = &vnop_fsync_desc;
3594 a.a_vp = vp;
3595 a.a_waitfor = waitfor;
3596 a.a_context = ctx;
3597
3598 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3599 DTRACE_FSINFO(fsync, vnode_t, vp);
3600
3601 return (_err);
3602 }
3603
3604
3605 #if 0
3606 /*
3607 *#
3608 *#% remove dvp L U U
3609 *#% remove vp L U U
3610 *#
3611 */
3612 struct vnop_remove_args {
3613 struct vnodeop_desc *a_desc;
3614 vnode_t a_dvp;
3615 vnode_t a_vp;
3616 struct componentname *a_cnp;
3617 int a_flags;
3618 vfs_context_t a_context;
3619 };
3620 #endif /* 0*/
3621 errno_t
3622 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
3623 {
3624 int _err;
3625 struct vnop_remove_args a;
3626
3627 a.a_desc = &vnop_remove_desc;
3628 a.a_dvp = dvp;
3629 a.a_vp = vp;
3630 a.a_cnp = cnp;
3631 a.a_flags = flags;
3632 a.a_context = ctx;
3633
3634 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3635 DTRACE_FSINFO(remove, vnode_t, vp);
3636
3637 if (_err == 0) {
3638 vnode_setneedinactive(vp);
3639 #if CONFIG_APPLEDOUBLE
3640 if ( !(NATIVE_XATTR(dvp)) ) {
3641 /*
3642 * Remove any associated extended attribute file (._ AppleDouble file).
3643 */
3644 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
3645 }
3646 #endif /* CONFIG_APPLEDOUBLE */
3647 }
3648
3649 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
3650 post_event_if_success(dvp, _err, NOTE_WRITE);
3651
3652 return (_err);
3653 }
3654
3655 int
3656 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
3657 {
3658 int _err;
3659 struct vnop_compound_remove_args a;
3660 int no_vp = (*vpp == NULLVP);
3661
3662 a.a_desc = &vnop_compound_remove_desc;
3663 a.a_dvp = dvp;
3664 a.a_vpp = vpp;
3665 a.a_cnp = &ndp->ni_cnd;
3666 a.a_flags = flags;
3667 a.a_vap = vap;
3668 a.a_context = ctx;
3669 a.a_remove_authorizer = vn_authorize_unlink;
3670
3671 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
3672 if (_err == 0 && *vpp) {
3673 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
3674 } else {
3675 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
3676 }
3677 if (_err == 0) {
3678 vnode_setneedinactive(*vpp);
3679 #if CONFIG_APPLEDOUBLE
3680 if ( !(NATIVE_XATTR(dvp)) ) {
3681 /*
3682 * Remove any associated extended attribute file (._ AppleDouble file).
3683 */
3684 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
3685 }
3686 #endif /* CONFIG_APPLEDOUBLE */
3687 }
3688
3689 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
3690 post_event_if_success(dvp, _err, NOTE_WRITE);
3691
3692 if (no_vp) {
3693 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
3694 if (*vpp && _err && _err != EKEEPLOOKING) {
3695 vnode_put(*vpp);
3696 *vpp = NULLVP;
3697 }
3698 }
3699
3700 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
3701
3702 return (_err);
3703 }
3704
3705 #if 0
3706 /*
3707 *#
3708 *#% link vp U U U
3709 *#% link tdvp L U U
3710 *#
3711 */
3712 struct vnop_link_args {
3713 struct vnodeop_desc *a_desc;
3714 vnode_t a_vp;
3715 vnode_t a_tdvp;
3716 struct componentname *a_cnp;
3717 vfs_context_t a_context;
3718 };
3719 #endif /* 0*/
3720 errno_t
3721 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
3722 {
3723 int _err;
3724 struct vnop_link_args a;
3725
3726 #if CONFIG_APPLEDOUBLE
3727 /*
3728 * For file systems with non-native extended attributes,
3729 * disallow linking to an existing "._" Apple Double file.
3730 */
3731 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3732 const char *vname;
3733
3734 vname = vnode_getname(vp);
3735 if (vname != NULL) {
3736 _err = 0;
3737 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3738 _err = EPERM;
3739 }
3740 vnode_putname(vname);
3741 if (_err)
3742 return (_err);
3743 }
3744 }
3745 #endif /* CONFIG_APPLEDOUBLE */
3746
3747 a.a_desc = &vnop_link_desc;
3748 a.a_vp = vp;
3749 a.a_tdvp = tdvp;
3750 a.a_cnp = cnp;
3751 a.a_context = ctx;
3752
3753 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3754 DTRACE_FSINFO(link, vnode_t, vp);
3755
3756 post_event_if_success(vp, _err, NOTE_LINK);
3757 post_event_if_success(tdvp, _err, NOTE_WRITE);
3758
3759 return (_err);
3760 }
3761
3762 errno_t
3763 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
3764 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
3765 uint32_t flags, vfs_context_t ctx)
3766 {
3767 int _err;
3768 struct nameidata *fromnd = NULL;
3769 struct nameidata *tond = NULL;
3770 #if CONFIG_APPLEDOUBLE
3771 vnode_t src_attr_vp = NULLVP;
3772 vnode_t dst_attr_vp = NULLVP;
3773 char smallname1[48];
3774 char smallname2[48];
3775 char *xfromname = NULL;
3776 char *xtoname = NULL;
3777 #endif /* CONFIG_APPLEDOUBLE */
3778 int batched;
3779 uint32_t tdfflags; // Target directory file flags
3780
3781 batched = vnode_compound_rename_available(fdvp);
3782
3783 if (!batched) {
3784 if (*fvpp == NULLVP)
3785 panic("Not batched, and no fvp?");
3786 }
3787
3788 #if CONFIG_SECLUDED_RENAME
3789 if ((fcnp->cn_flags & CN_SECLUDE_RENAME) &&
3790 (((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) == 0)) {
3791 return ENOTSUP;
3792 }
3793 #endif
3794
3795 #if CONFIG_APPLEDOUBLE
3796 /*
3797 * We need to preflight any potential AppleDouble file for the source file
3798 * before doing the rename operation, since we could potentially be doing
3799 * this operation on a network filesystem, and would end up duplicating
3800 * the work. Also, save the source and destination names. Skip it if the
3801 * source has a "._" prefix.
3802 */
3803
3804 if (!NATIVE_XATTR(fdvp) &&
3805 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3806 size_t len;
3807 int error;
3808
3809 /* Get source attribute file name. */
3810 len = fcnp->cn_namelen + 3;
3811 if (len > sizeof(smallname1)) {
3812 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3813 } else {
3814 xfromname = &smallname1[0];
3815 }
3816 strlcpy(xfromname, "._", min(sizeof smallname1, len));
3817 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3818 xfromname[len-1] = '\0';
3819
3820 /* Get destination attribute file name. */
3821 len = tcnp->cn_namelen + 3;
3822 if (len > sizeof(smallname2)) {
3823 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3824 } else {
3825 xtoname = &smallname2[0];
3826 }
3827 strlcpy(xtoname, "._", min(sizeof smallname2, len));
3828 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3829 xtoname[len-1] = '\0';
3830
3831 /*
3832 * Look up source attribute file, keep reference on it if exists.
3833 * Note that we do the namei with the nameiop of RENAME, which is different than
3834 * in the rename syscall. It's OK if the source file does not exist, since this
3835 * is only for AppleDouble files.
3836 */
3837 if (xfromname != NULL) {
3838 MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK);
3839 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
3840 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
3841 fromnd->ni_dvp = fdvp;
3842 error = namei(fromnd);
3843
3844 /*
3845 * If there was an error looking up source attribute file,
3846 * we'll behave as if it didn't exist.
3847 */
3848
3849 if (error == 0) {
3850 if (fromnd->ni_vp) {
3851 /* src_attr_vp indicates need to call vnode_put / nameidone later */
3852 src_attr_vp = fromnd->ni_vp;
3853
3854 if (fromnd->ni_vp->v_type != VREG) {
3855 src_attr_vp = NULLVP;
3856 vnode_put(fromnd->ni_vp);
3857 }
3858 }
3859 /*
3860 * Either we got an invalid vnode type (not a regular file) or the namei lookup
3861 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
3862 * have a vnode here, so we drop our namei buffer for the source attribute file
3863 */
3864 if (src_attr_vp == NULLVP) {
3865 nameidone(fromnd);
3866 }
3867 }
3868 }
3869 }
3870 #endif /* CONFIG_APPLEDOUBLE */
3871
3872 if (batched) {
3873 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
3874 if (_err != 0) {
3875 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
3876 }
3877 } else {
3878 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
3879 }
3880
3881 /*
3882 * If moved to a new directory that is restricted,
3883 * set the restricted flag on the item moved.
3884 */
3885 if (_err == 0) {
3886 _err = vnode_flags(tdvp, &tdfflags, ctx);
3887 if (_err == 0 && (tdfflags & SF_RESTRICTED)) {
3888 uint32_t fflags;
3889 _err = vnode_flags(*fvpp, &fflags, ctx);
3890 if (_err == 0 && !(fflags & SF_RESTRICTED)) {
3891 struct vnode_attr va;
3892 VATTR_INIT(&va);
3893 VATTR_SET(&va, va_flags, fflags | SF_RESTRICTED);
3894 _err = vnode_setattr(*fvpp, &va, ctx);
3895 }
3896 }
3897 }
3898
3899 #if CONFIG_MACF
3900 if (_err == 0) {
3901 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
3902 }
3903 #endif
3904
3905 #if CONFIG_APPLEDOUBLE
3906 /*
3907 * Rename any associated extended attribute file (._ AppleDouble file).
3908 */
3909 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3910 int error = 0;
3911
3912 /*
3913 * Get destination attribute file vnode.
3914 * Note that tdvp already has an iocount reference. Make sure to check that we
3915 * get a valid vnode from namei.
3916 */
3917 MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
3918 NDINIT(tond, RENAME, OP_RENAME,
3919 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
3920 CAST_USER_ADDR_T(xtoname), ctx);
3921 tond->ni_dvp = tdvp;
3922 error = namei(tond);
3923
3924 if (error)
3925 goto ad_error;
3926
3927 if (tond->ni_vp) {
3928 dst_attr_vp = tond->ni_vp;
3929 }
3930
3931 if (src_attr_vp) {
3932 const char *old_name = src_attr_vp->v_name;
3933 vnode_t old_parent = src_attr_vp->v_parent;
3934
3935 if (batched) {
3936 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
3937 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
3938 0, ctx);
3939 } else {
3940 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
3941 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
3942 }
3943
3944 if (error == 0 && old_name == src_attr_vp->v_name &&
3945 old_parent == src_attr_vp->v_parent) {
3946 int update_flags = VNODE_UPDATE_NAME;
3947
3948 if (fdvp != tdvp)
3949 update_flags |= VNODE_UPDATE_PARENT;
3950
3951 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
3952 vnode_update_identity(src_attr_vp, tdvp,
3953 tond->ni_cnd.cn_nameptr,
3954 tond->ni_cnd.cn_namelen,
3955 tond->ni_cnd.cn_hash,
3956 update_flags);
3957 }
3958 }
3959
3960 /* kevent notifications for moving resource files
3961 * _err is zero if we're here, so no need to notify directories, code
3962 * below will do that. only need to post the rename on the source and
3963 * possibly a delete on the dest
3964 */
3965 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
3966 if (dst_attr_vp) {
3967 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
3968 }
3969
3970 } else if (dst_attr_vp) {
3971 /*
3972 * Just delete destination attribute file vnode if it exists, since
3973 * we didn't have a source attribute file.
3974 * Note that tdvp already has an iocount reference.
3975 */
3976
3977 struct vnop_remove_args args;
3978
3979 args.a_desc = &vnop_remove_desc;
3980 args.a_dvp = tdvp;
3981 args.a_vp = dst_attr_vp;
3982 args.a_cnp = &tond->ni_cnd;
3983 args.a_context = ctx;
3984
3985 if (error == 0) {
3986 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3987
3988 if (error == 0)
3989 vnode_setneedinactive(dst_attr_vp);
3990 }
3991
3992 /* kevent notification for deleting the destination's attribute file
3993 * if it existed. Only need to post the delete on the destination, since
3994 * the code below will handle the directories.
3995 */
3996 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
3997 }
3998 }
3999 ad_error:
4000 if (src_attr_vp) {
4001 vnode_put(src_attr_vp);
4002 nameidone(fromnd);
4003 }
4004 if (dst_attr_vp) {
4005 vnode_put(dst_attr_vp);
4006 nameidone(tond);
4007 }
4008 if (xfromname && xfromname != &smallname1[0]) {
4009 FREE(xfromname, M_TEMP);
4010 }
4011 if (xtoname && xtoname != &smallname2[0]) {
4012 FREE(xtoname, M_TEMP);
4013 }
4014 #endif /* CONFIG_APPLEDOUBLE */
4015 if (fromnd) {
4016 FREE(fromnd, M_TEMP);
4017 }
4018 if (tond) {
4019 FREE(tond, M_TEMP);
4020 }
4021 return _err;
4022 }
4023
4024
4025 #if 0
4026 /*
4027 *#
4028 *#% rename fdvp U U U
4029 *#% rename fvp U U U
4030 *#% rename tdvp L U U
4031 *#% rename tvp X U U
4032 *#
4033 */
4034 struct vnop_rename_args {
4035 struct vnodeop_desc *a_desc;
4036 vnode_t a_fdvp;
4037 vnode_t a_fvp;
4038 struct componentname *a_fcnp;
4039 vnode_t a_tdvp;
4040 vnode_t a_tvp;
4041 struct componentname *a_tcnp;
4042 vfs_context_t a_context;
4043 };
4044 #endif /* 0*/
4045 errno_t
4046 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4047 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4048 vfs_context_t ctx)
4049 {
4050 int _err = 0;
4051 int events;
4052 struct vnop_rename_args a;
4053
4054 a.a_desc = &vnop_rename_desc;
4055 a.a_fdvp = fdvp;
4056 a.a_fvp = fvp;
4057 a.a_fcnp = fcnp;
4058 a.a_tdvp = tdvp;
4059 a.a_tvp = tvp;
4060 a.a_tcnp = tcnp;
4061 a.a_context = ctx;
4062
4063 /* do the rename of the main file. */
4064 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4065 DTRACE_FSINFO(rename, vnode_t, fdvp);
4066
4067 if (_err == 0) {
4068 if (tvp && tvp != fvp)
4069 vnode_setneedinactive(tvp);
4070 }
4071
4072 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4073 if (_err == 0) {
4074 events = NOTE_WRITE;
4075 if (vnode_isdir(fvp)) {
4076 /* Link count on dir changed only if we are moving a dir and...
4077 * --Moved to new dir, not overwriting there
4078 * --Kept in same dir and DID overwrite
4079 */
4080 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4081 events |= NOTE_LINK;
4082 }
4083 }
4084
4085 lock_vnode_and_post(fdvp, events);
4086 if (fdvp != tdvp) {
4087 lock_vnode_and_post(tdvp, events);
4088 }
4089
4090 /* If you're replacing the target, post a deletion for it */
4091 if (tvp)
4092 {
4093 lock_vnode_and_post(tvp, NOTE_DELETE);
4094 }
4095
4096 lock_vnode_and_post(fvp, NOTE_RENAME);
4097 }
4098
4099 return (_err);
4100 }
4101
4102 int
4103 VNOP_COMPOUND_RENAME(
4104 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4105 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4106 uint32_t flags, vfs_context_t ctx)
4107 {
4108 int _err = 0;
4109 int events;
4110 struct vnop_compound_rename_args a;
4111 int no_fvp, no_tvp;
4112
4113 no_fvp = (*fvpp) == NULLVP;
4114 no_tvp = (*tvpp) == NULLVP;
4115
4116 a.a_desc = &vnop_compound_rename_desc;
4117
4118 a.a_fdvp = fdvp;
4119 a.a_fvpp = fvpp;
4120 a.a_fcnp = fcnp;
4121 a.a_fvap = fvap;
4122
4123 a.a_tdvp = tdvp;
4124 a.a_tvpp = tvpp;
4125 a.a_tcnp = tcnp;
4126 a.a_tvap = tvap;
4127
4128 a.a_flags = flags;
4129 a.a_context = ctx;
4130 a.a_rename_authorizer = vn_authorize_rename;
4131 a.a_reserved = NULL;
4132
4133 /* do the rename of the main file. */
4134 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4135 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4136
4137 if (_err == 0) {
4138 if (*tvpp && *tvpp != *fvpp)
4139 vnode_setneedinactive(*tvpp);
4140 }
4141
4142 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4143 if (_err == 0 && *fvpp != *tvpp) {
4144 if (!*fvpp) {
4145 panic("No fvpp after compound rename?");
4146 }
4147
4148 events = NOTE_WRITE;
4149 if (vnode_isdir(*fvpp)) {
4150 /* Link count on dir changed only if we are moving a dir and...
4151 * --Moved to new dir, not overwriting there
4152 * --Kept in same dir and DID overwrite
4153 */
4154 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4155 events |= NOTE_LINK;
4156 }
4157 }
4158
4159 lock_vnode_and_post(fdvp, events);
4160 if (fdvp != tdvp) {
4161 lock_vnode_and_post(tdvp, events);
4162 }
4163
4164 /* If you're replacing the target, post a deletion for it */
4165 if (*tvpp)
4166 {
4167 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4168 }
4169
4170 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4171 }
4172
4173 if (no_fvp) {
4174 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4175 }
4176 if (no_tvp && *tvpp != NULLVP) {
4177 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4178 }
4179
4180 if (_err && _err != EKEEPLOOKING) {
4181 if (*fvpp) {
4182 vnode_put(*fvpp);
4183 *fvpp = NULLVP;
4184 }
4185 if (*tvpp) {
4186 vnode_put(*tvpp);
4187 *tvpp = NULLVP;
4188 }
4189 }
4190
4191 return (_err);
4192 }
4193
4194 int
4195 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4196 struct vnode_attr *vap, vfs_context_t ctx)
4197 {
4198 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4199 panic("Non-CREATE nameiop in vn_mkdir()?");
4200 }
4201
4202 if (vnode_compound_mkdir_available(dvp)) {
4203 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4204 } else {
4205 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4206 }
4207 }
4208
4209 #if 0
4210 /*
4211 *#
4212 *#% mkdir dvp L U U
4213 *#% mkdir vpp - L -
4214 *#
4215 */
4216 struct vnop_mkdir_args {
4217 struct vnodeop_desc *a_desc;
4218 vnode_t a_dvp;
4219 vnode_t *a_vpp;
4220 struct componentname *a_cnp;
4221 struct vnode_attr *a_vap;
4222 vfs_context_t a_context;
4223 };
4224 #endif /* 0*/
4225 errno_t
4226 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4227 struct vnode_attr *vap, vfs_context_t ctx)
4228 {
4229 int _err;
4230 struct vnop_mkdir_args a;
4231
4232 a.a_desc = &vnop_mkdir_desc;
4233 a.a_dvp = dvp;
4234 a.a_vpp = vpp;
4235 a.a_cnp = cnp;
4236 a.a_vap = vap;
4237 a.a_context = ctx;
4238
4239 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4240 if (_err == 0 && *vpp) {
4241 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4242 }
4243 #if CONFIG_APPLEDOUBLE
4244 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4245 /*
4246 * Remove stale Apple Double file (if any).
4247 */
4248 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4249 }
4250 #endif /* CONFIG_APPLEDOUBLE */
4251
4252 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4253
4254 return (_err);
4255 }
4256
4257 int
4258 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4259 struct vnode_attr *vap, vfs_context_t ctx)
4260 {
4261 int _err;
4262 struct vnop_compound_mkdir_args a;
4263
4264 a.a_desc = &vnop_compound_mkdir_desc;
4265 a.a_dvp = dvp;
4266 a.a_vpp = vpp;
4267 a.a_cnp = &ndp->ni_cnd;
4268 a.a_vap = vap;
4269 a.a_flags = 0;
4270 a.a_context = ctx;
4271 #if 0
4272 a.a_mkdir_authorizer = vn_authorize_mkdir;
4273 #endif /* 0 */
4274 a.a_reserved = NULL;
4275
4276 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4277 if (_err == 0 && *vpp) {
4278 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4279 }
4280 #if CONFIG_APPLEDOUBLE
4281 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4282 /*
4283 * Remove stale Apple Double file (if any).
4284 */
4285 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4286 }
4287 #endif /* CONFIG_APPLEDOUBLE */
4288
4289 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4290
4291 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4292 if (*vpp && _err && _err != EKEEPLOOKING) {
4293 vnode_put(*vpp);
4294 *vpp = NULLVP;
4295 }
4296
4297 return (_err);
4298 }
4299
4300 int
4301 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4302 {
4303 if (vnode_compound_rmdir_available(dvp)) {
4304 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4305 } else {
4306 if (*vpp == NULLVP) {
4307 panic("NULL vp, but not a compound VNOP?");
4308 }
4309 if (vap != NULL) {
4310 panic("Non-NULL vap, but not a compound VNOP?");
4311 }
4312 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4313 }
4314 }
4315
4316 #if 0
4317 /*
4318 *#
4319 *#% rmdir dvp L U U
4320 *#% rmdir vp L U U
4321 *#
4322 */
4323 struct vnop_rmdir_args {
4324 struct vnodeop_desc *a_desc;
4325 vnode_t a_dvp;
4326 vnode_t a_vp;
4327 struct componentname *a_cnp;
4328 vfs_context_t a_context;
4329 };
4330
4331 #endif /* 0*/
4332 errno_t
4333 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4334 {
4335 int _err;
4336 struct vnop_rmdir_args a;
4337
4338 a.a_desc = &vnop_rmdir_desc;
4339 a.a_dvp = dvp;
4340 a.a_vp = vp;
4341 a.a_cnp = cnp;
4342 a.a_context = ctx;
4343
4344 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4345 DTRACE_FSINFO(rmdir, vnode_t, vp);
4346
4347 if (_err == 0) {
4348 vnode_setneedinactive(vp);
4349 #if CONFIG_APPLEDOUBLE
4350 if ( !(NATIVE_XATTR(dvp)) ) {
4351 /*
4352 * Remove any associated extended attribute file (._ AppleDouble file).
4353 */
4354 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4355 }
4356 #endif
4357 }
4358
4359 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4360 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4361 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4362
4363 return (_err);
4364 }
4365
4366 int
4367 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4368 struct vnode_attr *vap, vfs_context_t ctx)
4369 {
4370 int _err;
4371 struct vnop_compound_rmdir_args a;
4372 int no_vp;
4373
4374 a.a_desc = &vnop_mkdir_desc;
4375 a.a_dvp = dvp;
4376 a.a_vpp = vpp;
4377 a.a_cnp = &ndp->ni_cnd;
4378 a.a_vap = vap;
4379 a.a_flags = 0;
4380 a.a_context = ctx;
4381 a.a_rmdir_authorizer = vn_authorize_rmdir;
4382 a.a_reserved = NULL;
4383
4384 no_vp = (*vpp == NULLVP);
4385
4386 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4387 if (_err == 0 && *vpp) {
4388 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4389 }
4390 #if CONFIG_APPLEDOUBLE
4391 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4392 /*
4393 * Remove stale Apple Double file (if any).
4394 */
4395 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4396 }
4397 #endif
4398
4399 if (*vpp) {
4400 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4401 }
4402 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4403
4404 if (no_vp) {
4405 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4406
4407 #if 0 /* Removing orphaned ._ files requires a vp.... */
4408 if (*vpp && _err && _err != EKEEPLOOKING) {
4409 vnode_put(*vpp);
4410 *vpp = NULLVP;
4411 }
4412 #endif /* 0 */
4413 }
4414
4415 return (_err);
4416 }
4417
4418 #if CONFIG_APPLEDOUBLE
4419 /*
4420 * Remove a ._ AppleDouble file
4421 */
4422 #define AD_STALE_SECS (180)
4423 static void
4424 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4425 {
4426 vnode_t xvp;
4427 struct nameidata nd;
4428 char smallname[64];
4429 char *filename = NULL;
4430 size_t len;
4431
4432 if ((basename == NULL) || (basename[0] == '\0') ||
4433 (basename[0] == '.' && basename[1] == '_')) {
4434 return;
4435 }
4436 filename = &smallname[0];
4437 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4438 if (len >= sizeof(smallname)) {
4439 len++; /* snprintf result doesn't include '\0' */
4440 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4441 len = snprintf(filename, len, "._%s", basename);
4442 }
4443 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4444 CAST_USER_ADDR_T(filename), ctx);
4445 nd.ni_dvp = dvp;
4446 if (namei(&nd) != 0)
4447 goto out2;
4448
4449 xvp = nd.ni_vp;
4450 nameidone(&nd);
4451 if (xvp->v_type != VREG)
4452 goto out1;
4453
4454 /*
4455 * When creating a new object and a "._" file already
4456 * exists, check to see if its a stale "._" file.
4457 *
4458 */
4459 if (!force) {
4460 struct vnode_attr va;
4461
4462 VATTR_INIT(&va);
4463 VATTR_WANTED(&va, va_data_size);
4464 VATTR_WANTED(&va, va_modify_time);
4465 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4466 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4467 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4468 va.va_data_size != 0) {
4469 struct timeval tv;
4470
4471 microtime(&tv);
4472 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4473 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4474 force = 1; /* must be stale */
4475 }
4476 }
4477 }
4478 if (force) {
4479 int error;
4480
4481 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
4482 if (error == 0)
4483 vnode_setneedinactive(xvp);
4484
4485 post_event_if_success(xvp, error, NOTE_DELETE);
4486 post_event_if_success(dvp, error, NOTE_WRITE);
4487 }
4488
4489 out1:
4490 vnode_put(dvp);
4491 vnode_put(xvp);
4492 out2:
4493 if (filename && filename != &smallname[0]) {
4494 FREE(filename, M_TEMP);
4495 }
4496 }
4497
4498 /*
4499 * Shadow uid/gid/mod to a ._ AppleDouble file
4500 */
4501 static void
4502 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
4503 vfs_context_t ctx)
4504 {
4505 vnode_t xvp;
4506 struct nameidata nd;
4507 char smallname[64];
4508 char *filename = NULL;
4509 size_t len;
4510
4511 if ((dvp == NULLVP) ||
4512 (basename == NULL) || (basename[0] == '\0') ||
4513 (basename[0] == '.' && basename[1] == '_')) {
4514 return;
4515 }
4516 filename = &smallname[0];
4517 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4518 if (len >= sizeof(smallname)) {
4519 len++; /* snprintf result doesn't include '\0' */
4520 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4521 len = snprintf(filename, len, "._%s", basename);
4522 }
4523 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
4524 CAST_USER_ADDR_T(filename), ctx);
4525 nd.ni_dvp = dvp;
4526 if (namei(&nd) != 0)
4527 goto out2;
4528
4529 xvp = nd.ni_vp;
4530 nameidone(&nd);
4531
4532 if (xvp->v_type == VREG) {
4533 struct vnop_setattr_args a;
4534
4535 a.a_desc = &vnop_setattr_desc;
4536 a.a_vp = xvp;
4537 a.a_vap = vap;
4538 a.a_context = ctx;
4539
4540 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
4541 }
4542
4543 vnode_put(xvp);
4544 out2:
4545 if (filename && filename != &smallname[0]) {
4546 FREE(filename, M_TEMP);
4547 }
4548 }
4549 #endif /* CONFIG_APPLEDOUBLE */
4550
4551 #if 0
4552 /*
4553 *#
4554 *#% symlink dvp L U U
4555 *#% symlink vpp - U -
4556 *#
4557 */
4558 struct vnop_symlink_args {
4559 struct vnodeop_desc *a_desc;
4560 vnode_t a_dvp;
4561 vnode_t *a_vpp;
4562 struct componentname *a_cnp;
4563 struct vnode_attr *a_vap;
4564 char *a_target;
4565 vfs_context_t a_context;
4566 };
4567
4568 #endif /* 0*/
4569 errno_t
4570 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4571 struct vnode_attr *vap, char *target, vfs_context_t ctx)
4572 {
4573 int _err;
4574 struct vnop_symlink_args a;
4575
4576 a.a_desc = &vnop_symlink_desc;
4577 a.a_dvp = dvp;
4578 a.a_vpp = vpp;
4579 a.a_cnp = cnp;
4580 a.a_vap = vap;
4581 a.a_target = target;
4582 a.a_context = ctx;
4583
4584 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
4585 DTRACE_FSINFO(symlink, vnode_t, dvp);
4586 #if CONFIG_APPLEDOUBLE
4587 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4588 /*
4589 * Remove stale Apple Double file (if any). Posts its own knotes
4590 */
4591 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4592 }
4593 #endif /* CONFIG_APPLEDOUBLE */
4594
4595 post_event_if_success(dvp, _err, NOTE_WRITE);
4596
4597 return (_err);
4598 }
4599
4600 #if 0
4601 /*
4602 *#
4603 *#% readdir vp L L L
4604 *#
4605 */
4606 struct vnop_readdir_args {
4607 struct vnodeop_desc *a_desc;
4608 vnode_t a_vp;
4609 struct uio *a_uio;
4610 int a_flags;
4611 int *a_eofflag;
4612 int *a_numdirent;
4613 vfs_context_t a_context;
4614 };
4615
4616 #endif /* 0*/
4617 errno_t
4618 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
4619 int *numdirent, vfs_context_t ctx)
4620 {
4621 int _err;
4622 struct vnop_readdir_args a;
4623 #if CONFIG_DTRACE
4624 user_ssize_t resid = uio_resid(uio);
4625 #endif
4626
4627 a.a_desc = &vnop_readdir_desc;
4628 a.a_vp = vp;
4629 a.a_uio = uio;
4630 a.a_flags = flags;
4631 a.a_eofflag = eofflag;
4632 a.a_numdirent = numdirent;
4633 a.a_context = ctx;
4634
4635 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
4636 DTRACE_FSINFO_IO(readdir,
4637 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4638
4639 return (_err);
4640 }
4641
4642 #if 0
4643 /*
4644 *#
4645 *#% readdirattr vp L L L
4646 *#
4647 */
4648 struct vnop_readdirattr_args {
4649 struct vnodeop_desc *a_desc;
4650 vnode_t a_vp;
4651 struct attrlist *a_alist;
4652 struct uio *a_uio;
4653 uint32_t a_maxcount;
4654 uint32_t a_options;
4655 uint32_t *a_newstate;
4656 int *a_eofflag;
4657 uint32_t *a_actualcount;
4658 vfs_context_t a_context;
4659 };
4660
4661 #endif /* 0*/
4662 errno_t
4663 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
4664 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
4665 {
4666 int _err;
4667 struct vnop_readdirattr_args a;
4668 #if CONFIG_DTRACE
4669 user_ssize_t resid = uio_resid(uio);
4670 #endif
4671
4672 a.a_desc = &vnop_readdirattr_desc;
4673 a.a_vp = vp;
4674 a.a_alist = alist;
4675 a.a_uio = uio;
4676 a.a_maxcount = maxcount;
4677 a.a_options = options;
4678 a.a_newstate = newstate;
4679 a.a_eofflag = eofflag;
4680 a.a_actualcount = actualcount;
4681 a.a_context = ctx;
4682
4683 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
4684 DTRACE_FSINFO_IO(readdirattr,
4685 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4686
4687 return (_err);
4688 }
4689
4690 #if 0
4691 struct vnop_getttrlistbulk_args {
4692 struct vnodeop_desc *a_desc;
4693 vnode_t a_vp;
4694 struct attrlist *a_alist;
4695 struct vnode_attr *a_vap;
4696 struct uio *a_uio;
4697 void *a_private
4698 uint64_t a_options;
4699 int *a_eofflag;
4700 uint32_t *a_actualcount;
4701 vfs_context_t a_context;
4702 };
4703 #endif /* 0*/
4704 errno_t
4705 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
4706 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
4707 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
4708 {
4709 int _err;
4710 struct vnop_getattrlistbulk_args a;
4711 #if CONFIG_DTRACE
4712 user_ssize_t resid = uio_resid(uio);
4713 #endif
4714
4715 a.a_desc = &vnop_getattrlistbulk_desc;
4716 a.a_vp = vp;
4717 a.a_alist = alist;
4718 a.a_vap = vap;
4719 a.a_uio = uio;
4720 a.a_private = private;
4721 a.a_options = options;
4722 a.a_eofflag = eofflag;
4723 a.a_actualcount = actualcount;
4724 a.a_context = ctx;
4725
4726 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
4727 DTRACE_FSINFO_IO(getattrlistbulk,
4728 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4729
4730 return (_err);
4731 }
4732
4733 #if 0
4734 /*
4735 *#
4736 *#% readlink vp L L L
4737 *#
4738 */
4739 struct vnop_readlink_args {
4740 struct vnodeop_desc *a_desc;
4741 vnode_t a_vp;
4742 struct uio *a_uio;
4743 vfs_context_t a_context;
4744 };
4745 #endif /* 0 */
4746
4747 /*
4748 * Returns: 0 Success
4749 * lock_fsnode:ENOENT No such file or directory [only for VFS
4750 * that is not thread safe & vnode is
4751 * currently being/has been terminated]
4752 * <vfs_readlink>:EINVAL
4753 * <vfs_readlink>:???
4754 *
4755 * Note: The return codes from the underlying VFS's readlink routine
4756 * can't be fully enumerated here, since third party VFS authors
4757 * may not limit their error returns to the ones documented here,
4758 * even though this may result in some programs functioning
4759 * incorrectly.
4760 *
4761 * The return codes documented above are those which may currently
4762 * be returned by HFS from hfs_vnop_readlink, not including
4763 * additional error code which may be propagated from underlying
4764 * routines.
4765 */
4766 errno_t
4767 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
4768 {
4769 int _err;
4770 struct vnop_readlink_args a;
4771 #if CONFIG_DTRACE
4772 user_ssize_t resid = uio_resid(uio);
4773 #endif
4774 a.a_desc = &vnop_readlink_desc;
4775 a.a_vp = vp;
4776 a.a_uio = uio;
4777 a.a_context = ctx;
4778
4779 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
4780 DTRACE_FSINFO_IO(readlink,
4781 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4782
4783 return (_err);
4784 }
4785
4786 #if 0
4787 /*
4788 *#
4789 *#% inactive vp L U U
4790 *#
4791 */
4792 struct vnop_inactive_args {
4793 struct vnodeop_desc *a_desc;
4794 vnode_t a_vp;
4795 vfs_context_t a_context;
4796 };
4797 #endif /* 0*/
4798 errno_t
4799 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
4800 {
4801 int _err;
4802 struct vnop_inactive_args a;
4803
4804 a.a_desc = &vnop_inactive_desc;
4805 a.a_vp = vp;
4806 a.a_context = ctx;
4807
4808 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
4809 DTRACE_FSINFO(inactive, vnode_t, vp);
4810
4811 #if NAMEDSTREAMS
4812 /* For file systems that do not support namedstream natively, mark
4813 * the shadow stream file vnode to be recycled as soon as the last
4814 * reference goes away. To avoid re-entering reclaim code, do not
4815 * call recycle on terminating namedstream vnodes.
4816 */
4817 if (vnode_isnamedstream(vp) &&
4818 (vp->v_parent != NULLVP) &&
4819 vnode_isshadow(vp) &&
4820 ((vp->v_lflag & VL_TERMINATE) == 0)) {
4821 vnode_recycle(vp);
4822 }
4823 #endif
4824
4825 return (_err);
4826 }
4827
4828
4829 #if 0
4830 /*
4831 *#
4832 *#% reclaim vp U U U
4833 *#
4834 */
4835 struct vnop_reclaim_args {
4836 struct vnodeop_desc *a_desc;
4837 vnode_t a_vp;
4838 vfs_context_t a_context;
4839 };
4840 #endif /* 0*/
4841 errno_t
4842 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
4843 {
4844 int _err;
4845 struct vnop_reclaim_args a;
4846
4847 a.a_desc = &vnop_reclaim_desc;
4848 a.a_vp = vp;
4849 a.a_context = ctx;
4850
4851 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
4852 DTRACE_FSINFO(reclaim, vnode_t, vp);
4853
4854 return (_err);
4855 }
4856
4857
4858 /*
4859 * Returns: 0 Success
4860 * lock_fsnode:ENOENT No such file or directory [only for VFS
4861 * that is not thread safe & vnode is
4862 * currently being/has been terminated]
4863 * <vnop_pathconf_desc>:??? [per FS implementation specific]
4864 */
4865 #if 0
4866 /*
4867 *#
4868 *#% pathconf vp L L L
4869 *#
4870 */
4871 struct vnop_pathconf_args {
4872 struct vnodeop_desc *a_desc;
4873 vnode_t a_vp;
4874 int a_name;
4875 int32_t *a_retval;
4876 vfs_context_t a_context;
4877 };
4878 #endif /* 0*/
4879 errno_t
4880 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
4881 {
4882 int _err;
4883 struct vnop_pathconf_args a;
4884
4885 a.a_desc = &vnop_pathconf_desc;
4886 a.a_vp = vp;
4887 a.a_name = name;
4888 a.a_retval = retval;
4889 a.a_context = ctx;
4890
4891 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
4892 DTRACE_FSINFO(pathconf, vnode_t, vp);
4893
4894 return (_err);
4895 }
4896
4897 /*
4898 * Returns: 0 Success
4899 * err_advlock:ENOTSUP
4900 * lf_advlock:???
4901 * <vnop_advlock_desc>:???
4902 *
4903 * Notes: VFS implementations of advisory locking using calls through
4904 * <vnop_advlock_desc> because lock enforcement does not occur
4905 * locally should try to limit themselves to the return codes
4906 * documented above for lf_advlock and err_advlock.
4907 */
4908 #if 0
4909 /*
4910 *#
4911 *#% advlock vp U U U
4912 *#
4913 */
4914 struct vnop_advlock_args {
4915 struct vnodeop_desc *a_desc;
4916 vnode_t a_vp;
4917 caddr_t a_id;
4918 int a_op;
4919 struct flock *a_fl;
4920 int a_flags;
4921 vfs_context_t a_context;
4922 };
4923 #endif /* 0*/
4924 errno_t
4925 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
4926 {
4927 int _err;
4928 struct vnop_advlock_args a;
4929
4930 a.a_desc = &vnop_advlock_desc;
4931 a.a_vp = vp;
4932 a.a_id = id;
4933 a.a_op = op;
4934 a.a_fl = fl;
4935 a.a_flags = flags;
4936 a.a_context = ctx;
4937 a.a_timeout = timeout;
4938
4939 /* Disallow advisory locking on non-seekable vnodes */
4940 if (vnode_isfifo(vp)) {
4941 _err = err_advlock(&a);
4942 } else {
4943 if ((vp->v_flag & VLOCKLOCAL)) {
4944 /* Advisory locking done at this layer */
4945 _err = lf_advlock(&a);
4946 } else if (flags & F_OFD_LOCK) {
4947 /* Non-local locking doesn't work for OFD locks */
4948 _err = err_advlock(&a);
4949 } else {
4950 /* Advisory locking done by underlying filesystem */
4951 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4952 }
4953 DTRACE_FSINFO(advlock, vnode_t, vp);
4954 }
4955
4956 return (_err);
4957 }
4958
4959
4960
4961 #if 0
4962 /*
4963 *#
4964 *#% allocate vp L L L
4965 *#
4966 */
4967 struct vnop_allocate_args {
4968 struct vnodeop_desc *a_desc;
4969 vnode_t a_vp;
4970 off_t a_length;
4971 u_int32_t a_flags;
4972 off_t *a_bytesallocated;
4973 off_t a_offset;
4974 vfs_context_t a_context;
4975 };
4976
4977 #endif /* 0*/
4978 errno_t
4979 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
4980 {
4981 int _err;
4982 struct vnop_allocate_args a;
4983
4984 a.a_desc = &vnop_allocate_desc;
4985 a.a_vp = vp;
4986 a.a_length = length;
4987 a.a_flags = flags;
4988 a.a_bytesallocated = bytesallocated;
4989 a.a_offset = offset;
4990 a.a_context = ctx;
4991
4992 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4993 DTRACE_FSINFO(allocate, vnode_t, vp);
4994 #if CONFIG_FSE
4995 if (_err == 0) {
4996 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
4997 }
4998 #endif
4999
5000 return (_err);
5001 }
5002
5003 #if 0
5004 /*
5005 *#
5006 *#% pagein vp = = =
5007 *#
5008 */
5009 struct vnop_pagein_args {
5010 struct vnodeop_desc *a_desc;
5011 vnode_t a_vp;
5012 upl_t a_pl;
5013 upl_offset_t a_pl_offset;
5014 off_t a_f_offset;
5015 size_t a_size;
5016 int a_flags;
5017 vfs_context_t a_context;
5018 };
5019 #endif /* 0*/
5020 errno_t
5021 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5022 {
5023 int _err;
5024 struct vnop_pagein_args a;
5025
5026 a.a_desc = &vnop_pagein_desc;
5027 a.a_vp = vp;
5028 a.a_pl = pl;
5029 a.a_pl_offset = pl_offset;
5030 a.a_f_offset = f_offset;
5031 a.a_size = size;
5032 a.a_flags = flags;
5033 a.a_context = ctx;
5034
5035 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5036 DTRACE_FSINFO(pagein, vnode_t, vp);
5037
5038 return (_err);
5039 }
5040
5041 #if 0
5042 /*
5043 *#
5044 *#% pageout vp = = =
5045 *#
5046 */
5047 struct vnop_pageout_args {
5048 struct vnodeop_desc *a_desc;
5049 vnode_t a_vp;
5050 upl_t a_pl;
5051 upl_offset_t a_pl_offset;
5052 off_t a_f_offset;
5053 size_t a_size;
5054 int a_flags;
5055 vfs_context_t a_context;
5056 };
5057
5058 #endif /* 0*/
5059 errno_t
5060 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5061 {
5062 int _err;
5063 struct vnop_pageout_args a;
5064
5065 a.a_desc = &vnop_pageout_desc;
5066 a.a_vp = vp;
5067 a.a_pl = pl;
5068 a.a_pl_offset = pl_offset;
5069 a.a_f_offset = f_offset;
5070 a.a_size = size;
5071 a.a_flags = flags;
5072 a.a_context = ctx;
5073
5074 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5075 DTRACE_FSINFO(pageout, vnode_t, vp);
5076
5077 post_event_if_success(vp, _err, NOTE_WRITE);
5078
5079 return (_err);
5080 }
5081
5082 int
5083 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5084 {
5085 if (vnode_compound_remove_available(dvp)) {
5086 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5087 } else {
5088 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5089 }
5090 }
5091
5092 #if CONFIG_SEARCHFS
5093
5094 #if 0
5095 /*
5096 *#
5097 *#% searchfs vp L L L
5098 *#
5099 */
5100 struct vnop_searchfs_args {
5101 struct vnodeop_desc *a_desc;
5102 vnode_t a_vp;
5103 void *a_searchparams1;
5104 void *a_searchparams2;
5105 struct attrlist *a_searchattrs;
5106 uint32_t a_maxmatches;
5107 struct timeval *a_timelimit;
5108 struct attrlist *a_returnattrs;
5109 uint32_t *a_nummatches;
5110 uint32_t a_scriptcode;
5111 uint32_t a_options;
5112 struct uio *a_uio;
5113 struct searchstate *a_searchstate;
5114 vfs_context_t a_context;
5115 };
5116
5117 #endif /* 0*/
5118 errno_t
5119 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5120 {
5121 int _err;
5122 struct vnop_searchfs_args a;
5123
5124 a.a_desc = &vnop_searchfs_desc;
5125 a.a_vp = vp;
5126 a.a_searchparams1 = searchparams1;
5127 a.a_searchparams2 = searchparams2;
5128 a.a_searchattrs = searchattrs;
5129 a.a_maxmatches = maxmatches;
5130 a.a_timelimit = timelimit;
5131 a.a_returnattrs = returnattrs;
5132 a.a_nummatches = nummatches;
5133 a.a_scriptcode = scriptcode;
5134 a.a_options = options;
5135 a.a_uio = uio;
5136 a.a_searchstate = searchstate;
5137 a.a_context = ctx;
5138
5139 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5140 DTRACE_FSINFO(searchfs, vnode_t, vp);
5141
5142 return (_err);
5143 }
5144 #endif /* CONFIG_SEARCHFS */
5145
5146 #if 0
5147 /*
5148 *#
5149 *#% copyfile fvp U U U
5150 *#% copyfile tdvp L U U
5151 *#% copyfile tvp X U U
5152 *#
5153 */
5154 struct vnop_copyfile_args {
5155 struct vnodeop_desc *a_desc;
5156 vnode_t a_fvp;
5157 vnode_t a_tdvp;
5158 vnode_t a_tvp;
5159 struct componentname *a_tcnp;
5160 int a_mode;
5161 int a_flags;
5162 vfs_context_t a_context;
5163 };
5164 #endif /* 0*/
5165 errno_t
5166 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5167 int mode, int flags, vfs_context_t ctx)
5168 {
5169 int _err;
5170 struct vnop_copyfile_args a;
5171 a.a_desc = &vnop_copyfile_desc;
5172 a.a_fvp = fvp;
5173 a.a_tdvp = tdvp;
5174 a.a_tvp = tvp;
5175 a.a_tcnp = tcnp;
5176 a.a_mode = mode;
5177 a.a_flags = flags;
5178 a.a_context = ctx;
5179 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5180 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5181 return (_err);
5182 }
5183
5184 errno_t
5185 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5186 {
5187 struct vnop_getxattr_args a;
5188 int error;
5189
5190 a.a_desc = &vnop_getxattr_desc;
5191 a.a_vp = vp;
5192 a.a_name = name;
5193 a.a_uio = uio;
5194 a.a_size = size;
5195 a.a_options = options;
5196 a.a_context = ctx;
5197
5198 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5199 DTRACE_FSINFO(getxattr, vnode_t, vp);
5200
5201 return (error);
5202 }
5203
5204 errno_t
5205 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5206 {
5207 struct vnop_setxattr_args a;
5208 int error;
5209
5210 a.a_desc = &vnop_setxattr_desc;
5211 a.a_vp = vp;
5212 a.a_name = name;
5213 a.a_uio = uio;
5214 a.a_options = options;
5215 a.a_context = ctx;
5216
5217 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5218 DTRACE_FSINFO(setxattr, vnode_t, vp);
5219
5220 if (error == 0)
5221 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5222
5223 post_event_if_success(vp, error, NOTE_ATTRIB);
5224
5225 return (error);
5226 }
5227
5228 errno_t
5229 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5230 {
5231 struct vnop_removexattr_args a;
5232 int error;
5233
5234 a.a_desc = &vnop_removexattr_desc;
5235 a.a_vp = vp;
5236 a.a_name = name;
5237 a.a_options = options;
5238 a.a_context = ctx;
5239
5240 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5241 DTRACE_FSINFO(removexattr, vnode_t, vp);
5242
5243 post_event_if_success(vp, error, NOTE_ATTRIB);
5244
5245 return (error);
5246 }
5247
5248 errno_t
5249 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5250 {
5251 struct vnop_listxattr_args a;
5252 int error;
5253
5254 a.a_desc = &vnop_listxattr_desc;
5255 a.a_vp = vp;
5256 a.a_uio = uio;
5257 a.a_size = size;
5258 a.a_options = options;
5259 a.a_context = ctx;
5260
5261 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5262 DTRACE_FSINFO(listxattr, vnode_t, vp);
5263
5264 return (error);
5265 }
5266
5267
5268 #if 0
5269 /*
5270 *#
5271 *#% blktooff vp = = =
5272 *#
5273 */
5274 struct vnop_blktooff_args {
5275 struct vnodeop_desc *a_desc;
5276 vnode_t a_vp;
5277 daddr64_t a_lblkno;
5278 off_t *a_offset;
5279 };
5280 #endif /* 0*/
5281 errno_t
5282 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5283 {
5284 int _err;
5285 struct vnop_blktooff_args a;
5286
5287 a.a_desc = &vnop_blktooff_desc;
5288 a.a_vp = vp;
5289 a.a_lblkno = lblkno;
5290 a.a_offset = offset;
5291
5292 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5293 DTRACE_FSINFO(blktooff, vnode_t, vp);
5294
5295 return (_err);
5296 }
5297
5298 #if 0
5299 /*
5300 *#
5301 *#% offtoblk vp = = =
5302 *#
5303 */
5304 struct vnop_offtoblk_args {
5305 struct vnodeop_desc *a_desc;
5306 vnode_t a_vp;
5307 off_t a_offset;
5308 daddr64_t *a_lblkno;
5309 };
5310 #endif /* 0*/
5311 errno_t
5312 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5313 {
5314 int _err;
5315 struct vnop_offtoblk_args a;
5316
5317 a.a_desc = &vnop_offtoblk_desc;
5318 a.a_vp = vp;
5319 a.a_offset = offset;
5320 a.a_lblkno = lblkno;
5321
5322 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5323 DTRACE_FSINFO(offtoblk, vnode_t, vp);
5324
5325 return (_err);
5326 }
5327
5328 #if 0
5329 /*
5330 *#
5331 *#% blockmap vp L L L
5332 *#
5333 */
5334 struct vnop_blockmap_args {
5335 struct vnodeop_desc *a_desc;
5336 vnode_t a_vp;
5337 off_t a_foffset;
5338 size_t a_size;
5339 daddr64_t *a_bpn;
5340 size_t *a_run;
5341 void *a_poff;
5342 int a_flags;
5343 vfs_context_t a_context;
5344 };
5345 #endif /* 0*/
5346 errno_t
5347 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
5348 {
5349 int _err;
5350 struct vnop_blockmap_args a;
5351 size_t localrun = 0;
5352
5353 if (ctx == NULL) {
5354 ctx = vfs_context_current();
5355 }
5356 a.a_desc = &vnop_blockmap_desc;
5357 a.a_vp = vp;
5358 a.a_foffset = foffset;
5359 a.a_size = size;
5360 a.a_bpn = bpn;
5361 a.a_run = &localrun;
5362 a.a_poff = poff;
5363 a.a_flags = flags;
5364 a.a_context = ctx;
5365
5366 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
5367 DTRACE_FSINFO(blockmap, vnode_t, vp);
5368
5369 /*
5370 * We used a local variable to request information from the underlying
5371 * filesystem about the length of the I/O run in question. If
5372 * we get malformed output from the filesystem, we cap it to the length
5373 * requested, at most. Update 'run' on the way out.
5374 */
5375 if (_err == 0) {
5376 if (localrun > size) {
5377 localrun = size;
5378 }
5379
5380 if (run) {
5381 *run = localrun;
5382 }
5383 }
5384
5385 return (_err);
5386 }
5387
5388 #if 0
5389 struct vnop_strategy_args {
5390 struct vnodeop_desc *a_desc;
5391 struct buf *a_bp;
5392 };
5393
5394 #endif /* 0*/
5395 errno_t
5396 VNOP_STRATEGY(struct buf *bp)
5397 {
5398 int _err;
5399 struct vnop_strategy_args a;
5400 vnode_t vp = buf_vnode(bp);
5401 a.a_desc = &vnop_strategy_desc;
5402 a.a_bp = bp;
5403 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
5404 DTRACE_FSINFO(strategy, vnode_t, vp);
5405 return (_err);
5406 }
5407
5408 #if 0
5409 struct vnop_bwrite_args {
5410 struct vnodeop_desc *a_desc;
5411 buf_t a_bp;
5412 };
5413 #endif /* 0*/
5414 errno_t
5415 VNOP_BWRITE(struct buf *bp)
5416 {
5417 int _err;
5418 struct vnop_bwrite_args a;
5419 vnode_t vp = buf_vnode(bp);
5420 a.a_desc = &vnop_bwrite_desc;
5421 a.a_bp = bp;
5422 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
5423 DTRACE_FSINFO(bwrite, vnode_t, vp);
5424 return (_err);
5425 }
5426
5427 #if 0
5428 struct vnop_kqfilt_add_args {
5429 struct vnodeop_desc *a_desc;
5430 struct vnode *a_vp;
5431 struct knote *a_kn;
5432 vfs_context_t a_context;
5433 };
5434 #endif
5435 errno_t
5436 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
5437 {
5438 int _err;
5439 struct vnop_kqfilt_add_args a;
5440
5441 a.a_desc = VDESC(vnop_kqfilt_add);
5442 a.a_vp = vp;
5443 a.a_kn = kn;
5444 a.a_context = ctx;
5445
5446 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
5447 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
5448
5449 return(_err);
5450 }
5451
5452 #if 0
5453 struct vnop_kqfilt_remove_args {
5454 struct vnodeop_desc *a_desc;
5455 struct vnode *a_vp;
5456 uintptr_t a_ident;
5457 vfs_context_t a_context;
5458 };
5459 #endif
5460 errno_t
5461 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
5462 {
5463 int _err;
5464 struct vnop_kqfilt_remove_args a;
5465
5466 a.a_desc = VDESC(vnop_kqfilt_remove);
5467 a.a_vp = vp;
5468 a.a_ident = ident;
5469 a.a_context = ctx;
5470
5471 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
5472 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
5473
5474 return(_err);
5475 }
5476
5477 errno_t
5478 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
5479 {
5480 int _err;
5481 struct vnop_monitor_args a;
5482
5483 a.a_desc = VDESC(vnop_monitor);
5484 a.a_vp = vp;
5485 a.a_events = events;
5486 a.a_flags = flags;
5487 a.a_handle = handle;
5488 a.a_context = ctx;
5489
5490 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
5491 DTRACE_FSINFO(monitor, vnode_t, vp);
5492
5493 return(_err);
5494 }
5495
5496 #if 0
5497 struct vnop_setlabel_args {
5498 struct vnodeop_desc *a_desc;
5499 struct vnode *a_vp;
5500 struct label *a_vl;
5501 vfs_context_t a_context;
5502 };
5503 #endif
5504 errno_t
5505 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
5506 {
5507 int _err;
5508 struct vnop_setlabel_args a;
5509
5510 a.a_desc = VDESC(vnop_setlabel);
5511 a.a_vp = vp;
5512 a.a_vl = label;
5513 a.a_context = ctx;
5514
5515 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
5516 DTRACE_FSINFO(setlabel, vnode_t, vp);
5517
5518 return(_err);
5519 }
5520
5521
5522 #if NAMEDSTREAMS
5523 /*
5524 * Get a named streamed
5525 */
5526 errno_t
5527 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
5528 {
5529 int _err;
5530 struct vnop_getnamedstream_args a;
5531
5532 a.a_desc = &vnop_getnamedstream_desc;
5533 a.a_vp = vp;
5534 a.a_svpp = svpp;
5535 a.a_name = name;
5536 a.a_operation = operation;
5537 a.a_flags = flags;
5538 a.a_context = ctx;
5539
5540 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
5541 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
5542 return (_err);
5543 }
5544
5545 /*
5546 * Create a named streamed
5547 */
5548 errno_t
5549 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
5550 {
5551 int _err;
5552 struct vnop_makenamedstream_args a;
5553
5554 a.a_desc = &vnop_makenamedstream_desc;
5555 a.a_vp = vp;
5556 a.a_svpp = svpp;
5557 a.a_name = name;
5558 a.a_flags = flags;
5559 a.a_context = ctx;
5560
5561 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
5562 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
5563 return (_err);
5564 }
5565
5566
5567 /*
5568 * Remove a named streamed
5569 */
5570 errno_t
5571 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
5572 {
5573 int _err;
5574 struct vnop_removenamedstream_args a;
5575
5576 a.a_desc = &vnop_removenamedstream_desc;
5577 a.a_vp = vp;
5578 a.a_svp = svp;
5579 a.a_name = name;
5580 a.a_flags = flags;
5581 a.a_context = ctx;
5582
5583 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
5584 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
5585 return (_err);
5586 }
5587 #endif