]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
a72dd4259bd29a842263c1e311ecb24af337e042
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
95 #include <sys/mbuf.h>
96 #include <sys/syslog.h>
97 #include <sys/ubc.h>
98 #include <sys/vm.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
106 #include <sys/kdebug.h>
107
108 #include <kern/assert.h>
109 #include <kern/kalloc.h>
110 #include <kern/task.h>
111 #include <kern/policy_internal.h>
112
113 #include <libkern/OSByteOrder.h>
114
115 #include <miscfs/specfs/specdev.h>
116
117 #include <mach/mach_types.h>
118 #include <mach/memory_object_types.h>
119 #include <mach/task.h>
120
121 #if CONFIG_MACF
122 #include <security/mac_framework.h>
123 #endif
124
125 #if NULLFS
126 #include <miscfs/nullfs/nullfs.h>
127 #endif
128
129 #include <sys/sdt.h>
130
131 #define ESUCCESS 0
132 #undef mount_t
133 #undef vnode_t
134
135 #define COMPAT_ONLY
136
137 #define NATIVE_XATTR(VP) \
138 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
139
140 #if CONFIG_APPLEDOUBLE
141 static void xattrfile_remove(vnode_t dvp, const char *basename,
142 vfs_context_t ctx, int force);
143 static void xattrfile_setattr(vnode_t dvp, const char * basename,
144 struct vnode_attr * vap, vfs_context_t ctx);
145 #endif /* CONFIG_APPLEDOUBLE */
146
147 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
148
149 /*
150 * vnode_setneedinactive
151 *
152 * Description: Indicate that when the last iocount on this vnode goes away,
153 * and the usecount is also zero, we should inform the filesystem
154 * via VNOP_INACTIVE.
155 *
156 * Parameters: vnode_t vnode to mark
157 *
158 * Returns: Nothing
159 *
160 * Notes: Notably used when we're deleting a file--we need not have a
161 * usecount, so VNOP_INACTIVE may not get called by anyone. We
162 * want it called when we drop our iocount.
163 */
164 void
165 vnode_setneedinactive(vnode_t vp)
166 {
167 cache_purge(vp);
168
169 vnode_lock_spin(vp);
170 vp->v_lflag |= VL_NEEDINACTIVE;
171 vnode_unlock(vp);
172 }
173
174
175 /* ====================================================================== */
176 /* ************ EXTERNAL KERNEL APIS ********************************** */
177 /* ====================================================================== */
178
179 /*
180 * implementations of exported VFS operations
181 */
182 int
183 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
184 {
185 int error;
186
187 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) {
188 return ENOTSUP;
189 }
190
191 if (vfs_context_is64bit(ctx)) {
192 if (vfs_64bitready(mp)) {
193 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
194 } else {
195 error = ENOTSUP;
196 }
197 } else {
198 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
199 }
200
201 return error;
202 }
203
204 int
205 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
206 {
207 int error;
208
209 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) {
210 return ENOTSUP;
211 }
212
213 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
214
215 return error;
216 }
217
218 int
219 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
220 {
221 int error;
222
223 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) {
224 return ENOTSUP;
225 }
226
227 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
228
229 return error;
230 }
231
232 /*
233 * Returns: 0 Success
234 * ENOTSUP Not supported
235 * <vfs_root>:ENOENT
236 * <vfs_root>:???
237 *
238 * Note: The return codes from the underlying VFS's root routine can't
239 * be fully enumerated here, since third party VFS authors may not
240 * limit their error returns to the ones documented here, even
241 * though this may result in some programs functioning incorrectly.
242 *
243 * The return codes documented above are those which may currently
244 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
245 * for a call to hfs_vget on the volume mount point, not including
246 * additional error codes which may be propagated from underlying
247 * routines called by hfs_vget.
248 */
249 int
250 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
251 {
252 int error;
253
254 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) {
255 return ENOTSUP;
256 }
257
258 if (ctx == NULL) {
259 ctx = vfs_context_current();
260 }
261
262 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
263
264 return error;
265 }
266
267 int
268 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
269 {
270 int error;
271
272 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) {
273 return ENOTSUP;
274 }
275
276 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
277
278 return error;
279 }
280
281 int
282 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
283 {
284 int error;
285
286 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) {
287 return ENOTSUP;
288 }
289
290 if (ctx == NULL) {
291 ctx = vfs_context_current();
292 }
293
294 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
295
296 return error;
297 }
298
299 int
300 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
301 {
302 int error;
303
304 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) {
305 return ENOTSUP;
306 }
307
308 if (ctx == NULL) {
309 ctx = vfs_context_current();
310 }
311
312 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
313
314 return error;
315 }
316
317 int
318 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
319 {
320 int error;
321
322 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) {
323 return ENOTSUP;
324 }
325
326 if (ctx == NULL) {
327 ctx = vfs_context_current();
328 }
329
330 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
331
332 return error;
333 }
334
335 int
336 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
337 {
338 int error;
339
340 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) {
341 return ENOTSUP;
342 }
343
344 if (ctx == NULL) {
345 ctx = vfs_context_current();
346 }
347
348 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
349
350 return error;
351 }
352
353 int
354 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
355 {
356 int error;
357
358 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) {
359 return ENOTSUP;
360 }
361
362 if (ctx == NULL) {
363 ctx = vfs_context_current();
364 }
365
366 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
367
368 return error;
369 }
370
371 int
372 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
373 {
374 int error;
375
376 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) {
377 return ENOTSUP;
378 }
379
380 if (ctx == NULL) {
381 ctx = vfs_context_current();
382 }
383
384 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
385
386 return error;
387 }
388
389 int
390 VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
391 int flags, vfs_context_t context)
392 {
393 if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) {
394 return ENOTSUP;
395 }
396
397 return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
398 context ?: vfs_context_current());
399 }
400
401 int
402 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
403 {
404 int error;
405
406 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) {
407 return ENOTSUP;
408 }
409
410 if (ctx == NULL) {
411 ctx = vfs_context_current();
412 }
413
414 error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
415
416 return error;
417 }
418
419 /* returns the cached throttle mask for the mount_t */
420 uint64_t
421 vfs_throttle_mask(mount_t mp)
422 {
423 return mp->mnt_throttle_mask;
424 }
425
426 /* returns a copy of vfs type name for the mount_t */
427 void
428 vfs_name(mount_t mp, char *buffer)
429 {
430 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
431 }
432
433 /* returns vfs type number for the mount_t */
434 int
435 vfs_typenum(mount_t mp)
436 {
437 return mp->mnt_vtable->vfc_typenum;
438 }
439
440 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
441 void*
442 vfs_mntlabel(mount_t mp)
443 {
444 return (void*)mp->mnt_mntlabel;
445 }
446
447 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
448 uint64_t
449 vfs_flags(mount_t mp)
450 {
451 return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
452 }
453
454 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
455 void
456 vfs_setflags(mount_t mp, uint64_t flags)
457 {
458 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
459
460 mount_lock(mp);
461 mp->mnt_flag |= lflags;
462 mount_unlock(mp);
463 }
464
465 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
466 void
467 vfs_clearflags(mount_t mp, uint64_t flags)
468 {
469 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
470
471 mount_lock(mp);
472 mp->mnt_flag &= ~lflags;
473 mount_unlock(mp);
474 }
475
476 /* Is the mount_t ronly and upgrade read/write requested? */
477 int
478 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
479 {
480 return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR);
481 }
482
483
484 /* Is the mount_t mounted ronly */
485 int
486 vfs_isrdonly(mount_t mp)
487 {
488 return mp->mnt_flag & MNT_RDONLY;
489 }
490
491 /* Is the mount_t mounted for filesystem synchronous writes? */
492 int
493 vfs_issynchronous(mount_t mp)
494 {
495 return mp->mnt_flag & MNT_SYNCHRONOUS;
496 }
497
498 /* Is the mount_t mounted read/write? */
499 int
500 vfs_isrdwr(mount_t mp)
501 {
502 return (mp->mnt_flag & MNT_RDONLY) == 0;
503 }
504
505
506 /* Is mount_t marked for update (ie MNT_UPDATE) */
507 int
508 vfs_isupdate(mount_t mp)
509 {
510 return mp->mnt_flag & MNT_UPDATE;
511 }
512
513
514 /* Is mount_t marked for reload (ie MNT_RELOAD) */
515 int
516 vfs_isreload(mount_t mp)
517 {
518 return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD);
519 }
520
521 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
522 int
523 vfs_isforce(mount_t mp)
524 {
525 if (mp->mnt_lflag & MNT_LFORCE) {
526 return 1;
527 } else {
528 return 0;
529 }
530 }
531
532 int
533 vfs_isunmount(mount_t mp)
534 {
535 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
536 return 1;
537 } else {
538 return 0;
539 }
540 }
541
542 int
543 vfs_64bitready(mount_t mp)
544 {
545 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
546 return 1;
547 } else {
548 return 0;
549 }
550 }
551
552
553 int
554 vfs_authcache_ttl(mount_t mp)
555 {
556 if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
557 return mp->mnt_authcache_ttl;
558 } else {
559 return CACHED_RIGHT_INFINITE_TTL;
560 }
561 }
562
563 void
564 vfs_setauthcache_ttl(mount_t mp, int ttl)
565 {
566 mount_lock(mp);
567 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
568 mp->mnt_authcache_ttl = ttl;
569 mount_unlock(mp);
570 }
571
572 void
573 vfs_clearauthcache_ttl(mount_t mp)
574 {
575 mount_lock(mp);
576 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
577 /*
578 * back to the default TTL value in case
579 * MNTK_AUTH_OPAQUE is set on this mount
580 */
581 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
582 mount_unlock(mp);
583 }
584
585 int
586 vfs_authopaque(mount_t mp)
587 {
588 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) {
589 return 1;
590 } else {
591 return 0;
592 }
593 }
594
595 int
596 vfs_authopaqueaccess(mount_t mp)
597 {
598 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) {
599 return 1;
600 } else {
601 return 0;
602 }
603 }
604
605 void
606 vfs_setauthopaque(mount_t mp)
607 {
608 mount_lock(mp);
609 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
610 mount_unlock(mp);
611 }
612
613 void
614 vfs_setauthopaqueaccess(mount_t mp)
615 {
616 mount_lock(mp);
617 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
618 mount_unlock(mp);
619 }
620
621 void
622 vfs_clearauthopaque(mount_t mp)
623 {
624 mount_lock(mp);
625 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
626 mount_unlock(mp);
627 }
628
629 void
630 vfs_clearauthopaqueaccess(mount_t mp)
631 {
632 mount_lock(mp);
633 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
634 mount_unlock(mp);
635 }
636
637 void
638 vfs_setextendedsecurity(mount_t mp)
639 {
640 mount_lock(mp);
641 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
642 mount_unlock(mp);
643 }
644
645 void
646 vfs_clearextendedsecurity(mount_t mp)
647 {
648 mount_lock(mp);
649 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
650 mount_unlock(mp);
651 }
652
653 void
654 vfs_setnoswap(mount_t mp)
655 {
656 mount_lock(mp);
657 mp->mnt_kern_flag |= MNTK_NOSWAP;
658 mount_unlock(mp);
659 }
660
661 void
662 vfs_clearnoswap(mount_t mp)
663 {
664 mount_lock(mp);
665 mp->mnt_kern_flag &= ~MNTK_NOSWAP;
666 mount_unlock(mp);
667 }
668
669 int
670 vfs_extendedsecurity(mount_t mp)
671 {
672 return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY;
673 }
674
675 /* returns the max size of short symlink in this mount_t */
676 uint32_t
677 vfs_maxsymlen(mount_t mp)
678 {
679 return mp->mnt_maxsymlinklen;
680 }
681
682 /* set max size of short symlink on mount_t */
683 void
684 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
685 {
686 mp->mnt_maxsymlinklen = symlen;
687 }
688
689 /* return a pointer to the RO vfs_statfs associated with mount_t */
690 struct vfsstatfs *
691 vfs_statfs(mount_t mp)
692 {
693 return &mp->mnt_vfsstat;
694 }
695
696 int
697 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
698 {
699 int error;
700
701 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) {
702 return error;
703 }
704
705 /*
706 * If we have a filesystem create time, use it to default some others.
707 */
708 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
709 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) {
710 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
711 }
712 }
713
714 return 0;
715 }
716
717 int
718 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
719 {
720 int error;
721
722 if (vfs_isrdonly(mp)) {
723 return EROFS;
724 }
725
726 error = VFS_SETATTR(mp, vfa, ctx);
727
728 /*
729 * If we had alternate ways of setting vfs attributes, we'd
730 * fall back here.
731 */
732
733 return error;
734 }
735
736 /* return the private data handle stored in mount_t */
737 void *
738 vfs_fsprivate(mount_t mp)
739 {
740 return mp->mnt_data;
741 }
742
743 /* set the private data handle in mount_t */
744 void
745 vfs_setfsprivate(mount_t mp, void *mntdata)
746 {
747 mount_lock(mp);
748 mp->mnt_data = mntdata;
749 mount_unlock(mp);
750 }
751
752 /* query whether the mount point supports native EAs */
753 int
754 vfs_nativexattrs(mount_t mp)
755 {
756 return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS;
757 }
758
759 /*
760 * return the block size of the underlying
761 * device associated with mount_t
762 */
763 int
764 vfs_devblocksize(mount_t mp)
765 {
766 return mp->mnt_devblocksize;
767 }
768
769 /*
770 * Returns vnode with an iocount that must be released with vnode_put()
771 */
772 vnode_t
773 vfs_vnodecovered(mount_t mp)
774 {
775 vnode_t vp = mp->mnt_vnodecovered;
776 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
777 return NULL;
778 } else {
779 return vp;
780 }
781 }
782
783 /*
784 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
785 * The iocount must be released with vnode_put(). Note that this KPI is subtle
786 * with respect to the validity of using this device vnode for anything substantial
787 * (which is discouraged). If commands are sent to the device driver without
788 * taking proper steps to ensure that the device is still open, chaos may ensue.
789 * Similarly, this routine should only be called if there is some guarantee that
790 * the mount itself is still valid.
791 */
792 vnode_t
793 vfs_devvp(mount_t mp)
794 {
795 vnode_t vp = mp->mnt_devvp;
796
797 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
798 return vp;
799 }
800
801 return NULLVP;
802 }
803
804 /*
805 * return the io attributes associated with mount_t
806 */
807 void
808 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
809 {
810 ioattrp->io_reserved[0] = NULL;
811 ioattrp->io_reserved[1] = NULL;
812 if (mp == NULL) {
813 ioattrp->io_maxreadcnt = MAXPHYS;
814 ioattrp->io_maxwritecnt = MAXPHYS;
815 ioattrp->io_segreadcnt = 32;
816 ioattrp->io_segwritecnt = 32;
817 ioattrp->io_maxsegreadsize = MAXPHYS;
818 ioattrp->io_maxsegwritesize = MAXPHYS;
819 ioattrp->io_devblocksize = DEV_BSIZE;
820 ioattrp->io_flags = 0;
821 ioattrp->io_max_swappin_available = 0;
822 } else {
823 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
824 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
825 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
826 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
827 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
828 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
829 ioattrp->io_devblocksize = mp->mnt_devblocksize;
830 ioattrp->io_flags = mp->mnt_ioflags;
831 ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
832 }
833 }
834
835
836 /*
837 * set the IO attributes associated with mount_t
838 */
839 void
840 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
841 {
842 if (mp == NULL) {
843 return;
844 }
845 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
846 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
847 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
848 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
849 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
850 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
851 mp->mnt_devblocksize = ioattrp->io_devblocksize;
852 mp->mnt_ioflags = ioattrp->io_flags;
853 mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
854 }
855
856 /*
857 * Add a new filesystem into the kernel specified in passed in
858 * vfstable structure. It fills in the vnode
859 * dispatch vector that is to be passed to when vnodes are created.
860 * It returns a handle which is to be used to when the FS is to be removed
861 */
862 typedef int (*PFI)(void *);
863 extern int vfs_opv_numops;
864 errno_t
865 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
866 {
867 struct vfstable *newvfstbl = NULL;
868 int i, j;
869 int(***opv_desc_vector_p)(void *);
870 int(**opv_desc_vector)(void *);
871 struct vnodeopv_entry_desc *opve_descp;
872 int desccount;
873 int descsize;
874 PFI *descptr;
875
876 /*
877 * This routine is responsible for all the initialization that would
878 * ordinarily be done as part of the system startup;
879 */
880
881 if (vfe == (struct vfs_fsentry *)0) {
882 return EINVAL;
883 }
884
885 desccount = vfe->vfe_vopcnt;
886 if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
887 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) {
888 return EINVAL;
889 }
890
891 /* Non-threadsafe filesystems are not supported */
892 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
893 return EINVAL;
894 }
895
896 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
897 M_WAITOK);
898 bzero(newvfstbl, sizeof(struct vfstable));
899 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
900 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
901 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) {
902 newvfstbl->vfc_typenum = maxvfstypenum++;
903 } else {
904 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
905 }
906
907 newvfstbl->vfc_refcount = 0;
908 newvfstbl->vfc_flags = 0;
909 newvfstbl->vfc_mountroot = NULL;
910 newvfstbl->vfc_next = NULL;
911 newvfstbl->vfc_vfsflags = 0;
912 if (vfe->vfe_flags & VFS_TBL64BITREADY) {
913 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
914 }
915 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2) {
916 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
917 }
918 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2) {
919 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
920 }
921 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) {
922 newvfstbl->vfc_flags |= MNT_LOCAL;
923 }
924 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) {
925 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
926 } else {
927 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
928 }
929
930 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR) {
931 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
932 }
933 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT) {
934 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
935 }
936 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED) {
937 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
938 }
939 if (vfe->vfe_flags & VFS_TBLNOMACLABEL) {
940 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
941 }
942 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) {
943 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
944 }
945 if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) {
946 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
947 }
948 if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) {
949 newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
950 }
951
952 /*
953 * Allocate and init the vectors.
954 * Also handle backwards compatibility.
955 *
956 * We allocate one large block to hold all <desccount>
957 * vnode operation vectors stored contiguously.
958 */
959 /* XXX - shouldn't be M_TEMP */
960
961 descsize = desccount * vfs_opv_numops * sizeof(PFI);
962 MALLOC(descptr, PFI *, descsize,
963 M_TEMP, M_WAITOK);
964 bzero(descptr, descsize);
965
966 newvfstbl->vfc_descptr = descptr;
967 newvfstbl->vfc_descsize = descsize;
968
969 newvfstbl->vfc_sysctl = NULL;
970
971 for (i = 0; i < desccount; i++) {
972 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
973 /*
974 * Fill in the caller's pointer to the start of the i'th vector.
975 * They'll need to supply it when calling vnode_create.
976 */
977 opv_desc_vector = descptr + i * vfs_opv_numops;
978 *opv_desc_vector_p = opv_desc_vector;
979
980 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
981 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
982
983 /* Silently skip known-disabled operations */
984 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
985 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
986 vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
987 continue;
988 }
989
990 /*
991 * Sanity check: is this operation listed
992 * in the list of operations? We check this
993 * by seeing if its offset is zero. Since
994 * the default routine should always be listed
995 * first, it should be the only one with a zero
996 * offset. Any other operation with a zero
997 * offset is probably not listed in
998 * vfs_op_descs, and so is probably an error.
999 *
1000 * A panic here means the layer programmer
1001 * has committed the all-too common bug
1002 * of adding a new operation to the layer's
1003 * list of vnode operations but
1004 * not adding the operation to the system-wide
1005 * list of supported operations.
1006 */
1007 if (opve_descp->opve_op->vdesc_offset == 0 &&
1008 opve_descp->opve_op != VDESC(vnop_default)) {
1009 printf("vfs_fsadd: operation %s not listed in %s.\n",
1010 opve_descp->opve_op->vdesc_name,
1011 "vfs_op_descs");
1012 panic("vfs_fsadd: bad operation");
1013 }
1014 /*
1015 * Fill in this entry.
1016 */
1017 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1018 opve_descp->opve_impl;
1019 }
1020
1021
1022 /*
1023 * Finally, go back and replace unfilled routines
1024 * with their default. (Sigh, an O(n^3) algorithm. I
1025 * could make it better, but that'd be work, and n is small.)
1026 */
1027 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1028
1029 /*
1030 * Force every operations vector to have a default routine.
1031 */
1032 opv_desc_vector = *opv_desc_vector_p;
1033 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
1034 panic("vfs_fsadd: operation vector without default routine.");
1035 }
1036 for (j = 0; j < vfs_opv_numops; j++) {
1037 if (opv_desc_vector[j] == NULL) {
1038 opv_desc_vector[j] =
1039 opv_desc_vector[VOFFSET(vnop_default)];
1040 }
1041 }
1042 } /* end of each vnodeopv_desc parsing */
1043
1044
1045
1046 *handle = vfstable_add(newvfstbl);
1047
1048 if (newvfstbl->vfc_typenum <= maxvfstypenum) {
1049 maxvfstypenum = newvfstbl->vfc_typenum + 1;
1050 }
1051
1052 if (newvfstbl->vfc_vfsops->vfs_init) {
1053 struct vfsconf vfsc;
1054 bzero(&vfsc, sizeof(struct vfsconf));
1055 vfsc.vfc_reserved1 = 0;
1056 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1057 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1058 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1059 vfsc.vfc_flags = (*handle)->vfc_flags;
1060 vfsc.vfc_reserved2 = 0;
1061 vfsc.vfc_reserved3 = 0;
1062
1063 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1064 }
1065
1066 FREE(newvfstbl, M_TEMP);
1067
1068 return 0;
1069 }
1070
1071 /*
1072 * Removes the filesystem from kernel.
1073 * The argument passed in is the handle that was given when
1074 * file system was added
1075 */
1076 errno_t
1077 vfs_fsremove(vfstable_t handle)
1078 {
1079 struct vfstable * vfstbl = (struct vfstable *)handle;
1080 void *old_desc = NULL;
1081 errno_t err;
1082
1083 /* Preflight check for any mounts */
1084 mount_list_lock();
1085 if (vfstbl->vfc_refcount != 0) {
1086 mount_list_unlock();
1087 return EBUSY;
1088 }
1089
1090 /*
1091 * save the old descriptor; the free cannot occur unconditionally,
1092 * since vfstable_del() may fail.
1093 */
1094 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1095 old_desc = vfstbl->vfc_descptr;
1096 }
1097 err = vfstable_del(vfstbl);
1098
1099 mount_list_unlock();
1100
1101 /* free the descriptor if the delete was successful */
1102 if (err == 0 && old_desc) {
1103 FREE(old_desc, M_TEMP);
1104 }
1105
1106 return err;
1107 }
1108
1109 void
1110 vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1111 {
1112 mp->mnt_fsowner = uid;
1113 mp->mnt_fsgroup = gid;
1114 }
1115
1116 /*
1117 * Callers should be careful how they use this; accessing
1118 * mnt_last_write_completed_timestamp is not thread-safe. Writing to
1119 * it isn't either. Point is: be prepared to deal with strange values
1120 * being returned.
1121 */
1122 uint64_t
1123 vfs_idle_time(mount_t mp)
1124 {
1125 if (mp->mnt_pending_write_size) {
1126 return 0;
1127 }
1128
1129 struct timeval now;
1130
1131 microuptime(&now);
1132
1133 return (now.tv_sec
1134 - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1135 + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec;
1136 }
1137
1138 int
1139 vfs_context_pid(vfs_context_t ctx)
1140 {
1141 return proc_pid(vfs_context_proc(ctx));
1142 }
1143
1144 int
1145 vfs_context_suser(vfs_context_t ctx)
1146 {
1147 return suser(ctx->vc_ucred, NULL);
1148 }
1149
1150 /*
1151 * Return bit field of signals posted to all threads in the context's process.
1152 *
1153 * XXX Signals should be tied to threads, not processes, for most uses of this
1154 * XXX call.
1155 */
1156 int
1157 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1158 {
1159 proc_t p = vfs_context_proc(ctx);
1160 if (p) {
1161 return proc_pendingsignals(p, mask);
1162 }
1163 return 0;
1164 }
1165
1166 int
1167 vfs_context_is64bit(vfs_context_t ctx)
1168 {
1169 proc_t proc = vfs_context_proc(ctx);
1170
1171 if (proc) {
1172 return proc_is64bit(proc);
1173 }
1174 return 0;
1175 }
1176
1177
1178 /*
1179 * vfs_context_proc
1180 *
1181 * Description: Given a vfs_context_t, return the proc_t associated with it.
1182 *
1183 * Parameters: vfs_context_t The context to use
1184 *
1185 * Returns: proc_t The process for this context
1186 *
1187 * Notes: This function will return the current_proc() if any of the
1188 * following conditions are true:
1189 *
1190 * o The supplied context pointer is NULL
1191 * o There is no Mach thread associated with the context
1192 * o There is no Mach task associated with the Mach thread
1193 * o There is no proc_t associated with the Mach task
1194 * o The proc_t has no per process open file table
1195 * o The proc_t is post-vfork()
1196 *
1197 * This causes this function to return a value matching as
1198 * closely as possible the previous behaviour, while at the
1199 * same time avoiding the task lending that results from vfork()
1200 */
1201 proc_t
1202 vfs_context_proc(vfs_context_t ctx)
1203 {
1204 proc_t proc = NULL;
1205
1206 if (ctx != NULL && ctx->vc_thread != NULL) {
1207 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1208 }
1209 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) {
1210 proc = NULL;
1211 }
1212
1213 return proc == NULL ? current_proc() : proc;
1214 }
1215
1216 /*
1217 * vfs_context_get_special_port
1218 *
1219 * Description: Return the requested special port from the task associated
1220 * with the given context.
1221 *
1222 * Parameters: vfs_context_t The context to use
1223 * int Index of special port
1224 * ipc_port_t * Pointer to returned port
1225 *
1226 * Returns: kern_return_t see task_get_special_port()
1227 */
1228 kern_return_t
1229 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1230 {
1231 task_t task = NULL;
1232
1233 if (ctx != NULL && ctx->vc_thread != NULL) {
1234 task = get_threadtask(ctx->vc_thread);
1235 }
1236
1237 return task_get_special_port(task, which, portp);
1238 }
1239
1240 /*
1241 * vfs_context_set_special_port
1242 *
1243 * Description: Set the requested special port in the task associated
1244 * with the given context.
1245 *
1246 * Parameters: vfs_context_t The context to use
1247 * int Index of special port
1248 * ipc_port_t New special port
1249 *
1250 * Returns: kern_return_t see task_set_special_port()
1251 */
1252 kern_return_t
1253 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1254 {
1255 task_t task = NULL;
1256
1257 if (ctx != NULL && ctx->vc_thread != NULL) {
1258 task = get_threadtask(ctx->vc_thread);
1259 }
1260
1261 return task_set_special_port(task, which, port);
1262 }
1263
1264 /*
1265 * vfs_context_thread
1266 *
1267 * Description: Return the Mach thread associated with a vfs_context_t
1268 *
1269 * Parameters: vfs_context_t The context to use
1270 *
1271 * Returns: thread_t The thread for this context, or
1272 * NULL, if there is not one.
1273 *
1274 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1275 * as a result of a static vfs_context_t declaration in a function
1276 * and will result in this function returning NULL.
1277 *
1278 * This is intentional; this function should NOT return the
1279 * current_thread() in this case.
1280 */
1281 thread_t
1282 vfs_context_thread(vfs_context_t ctx)
1283 {
1284 return ctx->vc_thread;
1285 }
1286
1287
1288 /*
1289 * vfs_context_cwd
1290 *
1291 * Description: Returns a reference on the vnode for the current working
1292 * directory for the supplied context
1293 *
1294 * Parameters: vfs_context_t The context to use
1295 *
1296 * Returns: vnode_t The current working directory
1297 * for this context
1298 *
1299 * Notes: The function first attempts to obtain the current directory
1300 * from the thread, and if it is not present there, falls back
1301 * to obtaining it from the process instead. If it can't be
1302 * obtained from either place, we return NULLVP.
1303 */
1304 vnode_t
1305 vfs_context_cwd(vfs_context_t ctx)
1306 {
1307 vnode_t cwd = NULLVP;
1308
1309 if (ctx != NULL && ctx->vc_thread != NULL) {
1310 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1311 proc_t proc;
1312
1313 /*
1314 * Get the cwd from the thread; if there isn't one, get it
1315 * from the process, instead.
1316 */
1317 if ((cwd = uth->uu_cdir) == NULLVP &&
1318 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1319 proc->p_fd != NULL) {
1320 cwd = proc->p_fd->fd_cdir;
1321 }
1322 }
1323
1324 return cwd;
1325 }
1326
1327 /*
1328 * vfs_context_create
1329 *
1330 * Description: Allocate and initialize a new context.
1331 *
1332 * Parameters: vfs_context_t: Context to copy, or NULL for new
1333 *
1334 * Returns: Pointer to new context
1335 *
1336 * Notes: Copy cred and thread from argument, if available; else
1337 * initialize with current thread and new cred. Returns
1338 * with a reference held on the credential.
1339 */
1340 vfs_context_t
1341 vfs_context_create(vfs_context_t ctx)
1342 {
1343 vfs_context_t newcontext;
1344
1345 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
1346
1347 if (newcontext) {
1348 kauth_cred_t safecred;
1349 if (ctx) {
1350 newcontext->vc_thread = ctx->vc_thread;
1351 safecred = ctx->vc_ucred;
1352 } else {
1353 newcontext->vc_thread = current_thread();
1354 safecred = kauth_cred_get();
1355 }
1356 if (IS_VALID_CRED(safecred)) {
1357 kauth_cred_ref(safecred);
1358 }
1359 newcontext->vc_ucred = safecred;
1360 return newcontext;
1361 }
1362 return NULL;
1363 }
1364
1365
1366 vfs_context_t
1367 vfs_context_current(void)
1368 {
1369 vfs_context_t ctx = NULL;
1370 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1371
1372 if (ut != NULL) {
1373 if (ut->uu_context.vc_ucred != NULL) {
1374 ctx = &ut->uu_context;
1375 }
1376 }
1377
1378 return ctx == NULL ? vfs_context_kernel() : ctx;
1379 }
1380
1381
1382 /*
1383 * XXX Do not ask
1384 *
1385 * Dangerous hack - adopt the first kernel thread as the current thread, to
1386 * get to the vfs_context_t in the uthread associated with a kernel thread.
1387 * This is used by UDF to make the call into IOCDMediaBSDClient,
1388 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1389 * ioctl() is being called from kernel or user space (and all this because
1390 * we do not pass threads into our ioctl()'s, instead of processes).
1391 *
1392 * This is also used by imageboot_setup(), called early from bsd_init() after
1393 * kernproc has been given a credential.
1394 *
1395 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1396 * of many Mach headers to do the reference directly rather than indirectly;
1397 * we will need to forego this convenience when we reture proc_thread().
1398 */
1399 static struct vfs_context kerncontext;
1400 vfs_context_t
1401 vfs_context_kernel(void)
1402 {
1403 if (kerncontext.vc_ucred == NOCRED) {
1404 kerncontext.vc_ucred = kernproc->p_ucred;
1405 }
1406 if (kerncontext.vc_thread == NULL) {
1407 kerncontext.vc_thread = proc_thread(kernproc);
1408 }
1409
1410 return &kerncontext;
1411 }
1412
1413
1414 int
1415 vfs_context_rele(vfs_context_t ctx)
1416 {
1417 if (ctx) {
1418 if (IS_VALID_CRED(ctx->vc_ucred)) {
1419 kauth_cred_unref(&ctx->vc_ucred);
1420 }
1421 kfree(ctx, sizeof(struct vfs_context));
1422 }
1423 return 0;
1424 }
1425
1426
1427 kauth_cred_t
1428 vfs_context_ucred(vfs_context_t ctx)
1429 {
1430 return ctx->vc_ucred;
1431 }
1432
1433 /*
1434 * Return true if the context is owned by the superuser.
1435 */
1436 int
1437 vfs_context_issuser(vfs_context_t ctx)
1438 {
1439 return kauth_cred_issuser(vfs_context_ucred(ctx));
1440 }
1441
1442 int
1443 vfs_context_iskernel(vfs_context_t ctx)
1444 {
1445 return ctx == &kerncontext;
1446 }
1447
1448 /*
1449 * Given a context, for all fields of vfs_context_t which
1450 * are not held with a reference, set those fields to the
1451 * values for the current execution context. Currently, this
1452 * just means the vc_thread.
1453 *
1454 * Returns: 0 for success, nonzero for failure
1455 *
1456 * The intended use is:
1457 * 1. vfs_context_create() gets the caller a context
1458 * 2. vfs_context_bind() sets the unrefcounted data
1459 * 3. vfs_context_rele() releases the context
1460 *
1461 */
1462 int
1463 vfs_context_bind(vfs_context_t ctx)
1464 {
1465 ctx->vc_thread = current_thread();
1466 return 0;
1467 }
1468
1469 int
1470 vfs_isswapmount(mount_t mnt)
1471 {
1472 return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1473 }
1474
1475 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1476
1477
1478 /*
1479 * Convert between vnode types and inode formats (since POSIX.1
1480 * defines mode word of stat structure in terms of inode formats).
1481 */
1482 enum vtype
1483 vnode_iftovt(int mode)
1484 {
1485 return iftovt_tab[((mode) & S_IFMT) >> 12];
1486 }
1487
1488 int
1489 vnode_vttoif(enum vtype indx)
1490 {
1491 return vttoif_tab[(int)(indx)];
1492 }
1493
1494 int
1495 vnode_makeimode(int indx, int mode)
1496 {
1497 return (int)(VTTOIF(indx) | (mode));
1498 }
1499
1500
1501 /*
1502 * vnode manipulation functions.
1503 */
1504
1505 /* returns system root vnode iocount; It should be released using vnode_put() */
1506 vnode_t
1507 vfs_rootvnode(void)
1508 {
1509 int error;
1510
1511 error = vnode_get(rootvnode);
1512 if (error) {
1513 return (vnode_t)0;
1514 } else {
1515 return rootvnode;
1516 }
1517 }
1518
1519
1520 uint32_t
1521 vnode_vid(vnode_t vp)
1522 {
1523 return (uint32_t)(vp->v_id);
1524 }
1525
1526 mount_t
1527 vnode_mount(vnode_t vp)
1528 {
1529 return vp->v_mount;
1530 }
1531
1532 #if CONFIG_IOSCHED
1533 vnode_t
1534 vnode_mountdevvp(vnode_t vp)
1535 {
1536 if (vp->v_mount) {
1537 return vp->v_mount->mnt_devvp;
1538 } else {
1539 return (vnode_t)0;
1540 }
1541 }
1542 #endif
1543
1544 mount_t
1545 vnode_mountedhere(vnode_t vp)
1546 {
1547 mount_t mp;
1548
1549 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1550 (mp->mnt_vnodecovered == vp)) {
1551 return mp;
1552 } else {
1553 return (mount_t)NULL;
1554 }
1555 }
1556
1557 /* returns vnode type of vnode_t */
1558 enum vtype
1559 vnode_vtype(vnode_t vp)
1560 {
1561 return vp->v_type;
1562 }
1563
1564 /* returns FS specific node saved in vnode */
1565 void *
1566 vnode_fsnode(vnode_t vp)
1567 {
1568 return vp->v_data;
1569 }
1570
1571 void
1572 vnode_clearfsnode(vnode_t vp)
1573 {
1574 vp->v_data = NULL;
1575 }
1576
1577 dev_t
1578 vnode_specrdev(vnode_t vp)
1579 {
1580 return vp->v_rdev;
1581 }
1582
1583
1584 /* Accessor functions */
1585 /* is vnode_t a root vnode */
1586 int
1587 vnode_isvroot(vnode_t vp)
1588 {
1589 return (vp->v_flag & VROOT)? 1 : 0;
1590 }
1591
1592 /* is vnode_t a system vnode */
1593 int
1594 vnode_issystem(vnode_t vp)
1595 {
1596 return (vp->v_flag & VSYSTEM)? 1 : 0;
1597 }
1598
1599 /* is vnode_t a swap file vnode */
1600 int
1601 vnode_isswap(vnode_t vp)
1602 {
1603 return (vp->v_flag & VSWAP)? 1 : 0;
1604 }
1605
1606 /* is vnode_t a tty */
1607 int
1608 vnode_istty(vnode_t vp)
1609 {
1610 return (vp->v_flag & VISTTY) ? 1 : 0;
1611 }
1612
1613 /* if vnode_t mount operation in progress */
1614 int
1615 vnode_ismount(vnode_t vp)
1616 {
1617 return (vp->v_flag & VMOUNT)? 1 : 0;
1618 }
1619
1620 /* is this vnode under recyle now */
1621 int
1622 vnode_isrecycled(vnode_t vp)
1623 {
1624 int ret;
1625
1626 vnode_lock_spin(vp);
1627 ret = (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0;
1628 vnode_unlock(vp);
1629 return ret;
1630 }
1631
1632 /* vnode was created by background task requesting rapid aging
1633 * and has not since been referenced by a normal task */
1634 int
1635 vnode_israge(vnode_t vp)
1636 {
1637 return (vp->v_flag & VRAGE)? 1 : 0;
1638 }
1639
1640 int
1641 vnode_needssnapshots(vnode_t vp)
1642 {
1643 return (vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0;
1644 }
1645
1646
1647 /* Check the process/thread to see if we should skip atime updates */
1648 int
1649 vfs_ctx_skipatime(vfs_context_t ctx)
1650 {
1651 struct uthread *ut;
1652 proc_t proc;
1653 thread_t thr;
1654
1655 proc = vfs_context_proc(ctx);
1656 thr = vfs_context_thread(ctx);
1657
1658 /* Validate pointers in case we were invoked via a kernel context */
1659 if (thr && proc) {
1660 ut = get_bsdthread_info(thr);
1661
1662 if (proc->p_lflag & P_LRAGE_VNODES) {
1663 return 1;
1664 }
1665
1666 if (ut) {
1667 if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) {
1668 return 1;
1669 }
1670 }
1671
1672 if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) {
1673 return 1;
1674 }
1675 }
1676 return 0;
1677 }
1678
1679 /* is vnode_t marked to not keep data cached once it's been consumed */
1680 int
1681 vnode_isnocache(vnode_t vp)
1682 {
1683 return (vp->v_flag & VNOCACHE_DATA)? 1 : 0;
1684 }
1685
1686 /*
1687 * has sequential readahead been disabled on this vnode
1688 */
1689 int
1690 vnode_isnoreadahead(vnode_t vp)
1691 {
1692 return (vp->v_flag & VRAOFF)? 1 : 0;
1693 }
1694
1695 int
1696 vnode_is_openevt(vnode_t vp)
1697 {
1698 return (vp->v_flag & VOPENEVT)? 1 : 0;
1699 }
1700
1701 /* is vnode_t a standard one? */
1702 int
1703 vnode_isstandard(vnode_t vp)
1704 {
1705 return (vp->v_flag & VSTANDARD)? 1 : 0;
1706 }
1707
1708 /* don't vflush() if SKIPSYSTEM */
1709 int
1710 vnode_isnoflush(vnode_t vp)
1711 {
1712 return (vp->v_flag & VNOFLUSH)? 1 : 0;
1713 }
1714
1715 /* is vnode_t a regular file */
1716 int
1717 vnode_isreg(vnode_t vp)
1718 {
1719 return (vp->v_type == VREG)? 1 : 0;
1720 }
1721
1722 /* is vnode_t a directory? */
1723 int
1724 vnode_isdir(vnode_t vp)
1725 {
1726 return (vp->v_type == VDIR)? 1 : 0;
1727 }
1728
1729 /* is vnode_t a symbolic link ? */
1730 int
1731 vnode_islnk(vnode_t vp)
1732 {
1733 return (vp->v_type == VLNK)? 1 : 0;
1734 }
1735
1736 int
1737 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1738 {
1739 struct nameidata *ndp = cnp->cn_ndp;
1740
1741 if (ndp == NULL) {
1742 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1743 }
1744
1745 if (vnode_isdir(vp)) {
1746 if (vp->v_mountedhere != NULL) {
1747 goto yes;
1748 }
1749
1750 #if CONFIG_TRIGGERS
1751 if (vp->v_resolve) {
1752 goto yes;
1753 }
1754 #endif /* CONFIG_TRIGGERS */
1755 }
1756
1757
1758 if (vnode_islnk(vp)) {
1759 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1760 if (cnp->cn_flags & FOLLOW) {
1761 goto yes;
1762 }
1763 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1764 goto yes;
1765 }
1766 }
1767
1768 return 0;
1769
1770 yes:
1771 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1772 return EKEEPLOOKING;
1773 }
1774
1775 /* is vnode_t a fifo ? */
1776 int
1777 vnode_isfifo(vnode_t vp)
1778 {
1779 return (vp->v_type == VFIFO)? 1 : 0;
1780 }
1781
1782 /* is vnode_t a block device? */
1783 int
1784 vnode_isblk(vnode_t vp)
1785 {
1786 return (vp->v_type == VBLK)? 1 : 0;
1787 }
1788
1789 int
1790 vnode_isspec(vnode_t vp)
1791 {
1792 return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0;
1793 }
1794
1795 /* is vnode_t a char device? */
1796 int
1797 vnode_ischr(vnode_t vp)
1798 {
1799 return (vp->v_type == VCHR)? 1 : 0;
1800 }
1801
1802 /* is vnode_t a socket? */
1803 int
1804 vnode_issock(vnode_t vp)
1805 {
1806 return (vp->v_type == VSOCK)? 1 : 0;
1807 }
1808
1809 /* is vnode_t a device with multiple active vnodes referring to it? */
1810 int
1811 vnode_isaliased(vnode_t vp)
1812 {
1813 enum vtype vt = vp->v_type;
1814 if (!((vt == VCHR) || (vt == VBLK))) {
1815 return 0;
1816 } else {
1817 return vp->v_specflags & SI_ALIASED;
1818 }
1819 }
1820
1821 /* is vnode_t a named stream? */
1822 int
1823 vnode_isnamedstream(
1824 #if NAMEDSTREAMS
1825 vnode_t vp
1826 #else
1827 __unused vnode_t vp
1828 #endif
1829 )
1830 {
1831 #if NAMEDSTREAMS
1832 return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0;
1833 #else
1834 return 0;
1835 #endif
1836 }
1837
1838 int
1839 vnode_isshadow(
1840 #if NAMEDSTREAMS
1841 vnode_t vp
1842 #else
1843 __unused vnode_t vp
1844 #endif
1845 )
1846 {
1847 #if NAMEDSTREAMS
1848 return (vp->v_flag & VISSHADOW) ? 1 : 0;
1849 #else
1850 return 0;
1851 #endif
1852 }
1853
1854 /* does vnode have associated named stream vnodes ? */
1855 int
1856 vnode_hasnamedstreams(
1857 #if NAMEDSTREAMS
1858 vnode_t vp
1859 #else
1860 __unused vnode_t vp
1861 #endif
1862 )
1863 {
1864 #if NAMEDSTREAMS
1865 return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0;
1866 #else
1867 return 0;
1868 #endif
1869 }
1870 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1871 void
1872 vnode_setnocache(vnode_t vp)
1873 {
1874 vnode_lock_spin(vp);
1875 vp->v_flag |= VNOCACHE_DATA;
1876 vnode_unlock(vp);
1877 }
1878
1879 void
1880 vnode_clearnocache(vnode_t vp)
1881 {
1882 vnode_lock_spin(vp);
1883 vp->v_flag &= ~VNOCACHE_DATA;
1884 vnode_unlock(vp);
1885 }
1886
1887 void
1888 vnode_set_openevt(vnode_t vp)
1889 {
1890 vnode_lock_spin(vp);
1891 vp->v_flag |= VOPENEVT;
1892 vnode_unlock(vp);
1893 }
1894
1895 void
1896 vnode_clear_openevt(vnode_t vp)
1897 {
1898 vnode_lock_spin(vp);
1899 vp->v_flag &= ~VOPENEVT;
1900 vnode_unlock(vp);
1901 }
1902
1903
1904 void
1905 vnode_setnoreadahead(vnode_t vp)
1906 {
1907 vnode_lock_spin(vp);
1908 vp->v_flag |= VRAOFF;
1909 vnode_unlock(vp);
1910 }
1911
1912 void
1913 vnode_clearnoreadahead(vnode_t vp)
1914 {
1915 vnode_lock_spin(vp);
1916 vp->v_flag &= ~VRAOFF;
1917 vnode_unlock(vp);
1918 }
1919
1920 int
1921 vnode_isfastdevicecandidate(vnode_t vp)
1922 {
1923 return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0;
1924 }
1925
1926 void
1927 vnode_setfastdevicecandidate(vnode_t vp)
1928 {
1929 vnode_lock_spin(vp);
1930 vp->v_flag |= VFASTDEVCANDIDATE;
1931 vnode_unlock(vp);
1932 }
1933
1934 void
1935 vnode_clearfastdevicecandidate(vnode_t vp)
1936 {
1937 vnode_lock_spin(vp);
1938 vp->v_flag &= ~VFASTDEVCANDIDATE;
1939 vnode_unlock(vp);
1940 }
1941
1942 int
1943 vnode_isautocandidate(vnode_t vp)
1944 {
1945 return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0;
1946 }
1947
1948 void
1949 vnode_setautocandidate(vnode_t vp)
1950 {
1951 vnode_lock_spin(vp);
1952 vp->v_flag |= VAUTOCANDIDATE;
1953 vnode_unlock(vp);
1954 }
1955
1956 void
1957 vnode_clearautocandidate(vnode_t vp)
1958 {
1959 vnode_lock_spin(vp);
1960 vp->v_flag &= ~VAUTOCANDIDATE;
1961 vnode_unlock(vp);
1962 }
1963
1964
1965
1966
1967 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1968 void
1969 vnode_setnoflush(vnode_t vp)
1970 {
1971 vnode_lock_spin(vp);
1972 vp->v_flag |= VNOFLUSH;
1973 vnode_unlock(vp);
1974 }
1975
1976 void
1977 vnode_clearnoflush(vnode_t vp)
1978 {
1979 vnode_lock_spin(vp);
1980 vp->v_flag &= ~VNOFLUSH;
1981 vnode_unlock(vp);
1982 }
1983
1984
1985 /* is vnode_t a blkdevice and has a FS mounted on it */
1986 int
1987 vnode_ismountedon(vnode_t vp)
1988 {
1989 return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0;
1990 }
1991
1992 void
1993 vnode_setmountedon(vnode_t vp)
1994 {
1995 vnode_lock_spin(vp);
1996 vp->v_specflags |= SI_MOUNTEDON;
1997 vnode_unlock(vp);
1998 }
1999
2000 void
2001 vnode_clearmountedon(vnode_t vp)
2002 {
2003 vnode_lock_spin(vp);
2004 vp->v_specflags &= ~SI_MOUNTEDON;
2005 vnode_unlock(vp);
2006 }
2007
2008
2009 void
2010 vnode_settag(vnode_t vp, int tag)
2011 {
2012 vp->v_tag = tag;
2013 }
2014
2015 int
2016 vnode_tag(vnode_t vp)
2017 {
2018 return vp->v_tag;
2019 }
2020
2021 vnode_t
2022 vnode_parent(vnode_t vp)
2023 {
2024 return vp->v_parent;
2025 }
2026
2027 void
2028 vnode_setparent(vnode_t vp, vnode_t dvp)
2029 {
2030 vp->v_parent = dvp;
2031 }
2032
2033 void
2034 vnode_setname(vnode_t vp, char * name)
2035 {
2036 vp->v_name = name;
2037 }
2038
2039 /* return the registered FS name when adding the FS to kernel */
2040 void
2041 vnode_vfsname(vnode_t vp, char * buf)
2042 {
2043 strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2044 }
2045
2046 /* return the FS type number */
2047 int
2048 vnode_vfstypenum(vnode_t vp)
2049 {
2050 return vp->v_mount->mnt_vtable->vfc_typenum;
2051 }
2052
2053 int
2054 vnode_vfs64bitready(vnode_t vp)
2055 {
2056 /*
2057 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2058 */
2059 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
2060 return 1;
2061 } else {
2062 return 0;
2063 }
2064 }
2065
2066
2067
2068 /* return the visible flags on associated mount point of vnode_t */
2069 uint32_t
2070 vnode_vfsvisflags(vnode_t vp)
2071 {
2072 return vp->v_mount->mnt_flag & MNT_VISFLAGMASK;
2073 }
2074
2075 /* return the command modifier flags on associated mount point of vnode_t */
2076 uint32_t
2077 vnode_vfscmdflags(vnode_t vp)
2078 {
2079 return vp->v_mount->mnt_flag & MNT_CMDFLAGS;
2080 }
2081
2082 /* return the max symlink of short links of vnode_t */
2083 uint32_t
2084 vnode_vfsmaxsymlen(vnode_t vp)
2085 {
2086 return vp->v_mount->mnt_maxsymlinklen;
2087 }
2088
2089 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2090 struct vfsstatfs *
2091 vnode_vfsstatfs(vnode_t vp)
2092 {
2093 return &vp->v_mount->mnt_vfsstat;
2094 }
2095
2096 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2097 void *
2098 vnode_vfsfsprivate(vnode_t vp)
2099 {
2100 return vp->v_mount->mnt_data;
2101 }
2102
2103 /* is vnode_t in a rdonly mounted FS */
2104 int
2105 vnode_vfsisrdonly(vnode_t vp)
2106 {
2107 return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0;
2108 }
2109
2110 int
2111 vnode_compound_rename_available(vnode_t vp)
2112 {
2113 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2114 }
2115 int
2116 vnode_compound_rmdir_available(vnode_t vp)
2117 {
2118 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2119 }
2120 int
2121 vnode_compound_mkdir_available(vnode_t vp)
2122 {
2123 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2124 }
2125 int
2126 vnode_compound_remove_available(vnode_t vp)
2127 {
2128 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2129 }
2130 int
2131 vnode_compound_open_available(vnode_t vp)
2132 {
2133 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2134 }
2135
2136 int
2137 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2138 {
2139 return (vp->v_mount->mnt_compound_ops & opid) != 0;
2140 }
2141
2142 /*
2143 * Returns vnode ref to current working directory; if a per-thread current
2144 * working directory is in effect, return that instead of the per process one.
2145 *
2146 * XXX Published, but not used.
2147 */
2148 vnode_t
2149 current_workingdir(void)
2150 {
2151 return vfs_context_cwd(vfs_context_current());
2152 }
2153
2154 /* returns vnode ref to current root(chroot) directory */
2155 vnode_t
2156 current_rootdir(void)
2157 {
2158 proc_t proc = current_proc();
2159 struct vnode * vp;
2160
2161 if ((vp = proc->p_fd->fd_rdir)) {
2162 if ((vnode_getwithref(vp))) {
2163 return NULL;
2164 }
2165 }
2166 return vp;
2167 }
2168
2169 /*
2170 * Get a filesec and optional acl contents from an extended attribute.
2171 * Function will attempt to retrive ACL, UUID, and GUID information using a
2172 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2173 *
2174 * Parameters: vp The vnode on which to operate.
2175 * fsecp The filesec (and ACL, if any) being
2176 * retrieved.
2177 * ctx The vnode context in which the
2178 * operation is to be attempted.
2179 *
2180 * Returns: 0 Success
2181 * !0 errno value
2182 *
2183 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2184 * host byte order, as will be the ACL contents, if any.
2185 * Internally, we will cannonize these values from network (PPC)
2186 * byte order after we retrieve them so that the on-disk contents
2187 * of the extended attribute are identical for both PPC and Intel
2188 * (if we were not being required to provide this service via
2189 * fallback, this would be the job of the filesystem
2190 * 'VNOP_GETATTR' call).
2191 *
2192 * We use ntohl() because it has a transitive property on Intel
2193 * machines and no effect on PPC mancines. This guarantees us
2194 *
2195 * XXX: Deleting rather than ignoreing a corrupt security structure is
2196 * probably the only way to reset it without assistance from an
2197 * file system integrity checking tool. Right now we ignore it.
2198 *
2199 * XXX: We should enummerate the possible errno values here, and where
2200 * in the code they originated.
2201 */
2202 static int
2203 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2204 {
2205 kauth_filesec_t fsec;
2206 uio_t fsec_uio;
2207 size_t fsec_size;
2208 size_t xsize, rsize;
2209 int error;
2210 uint32_t host_fsec_magic;
2211 uint32_t host_acl_entrycount;
2212
2213 fsec = NULL;
2214 fsec_uio = NULL;
2215
2216 /* find out how big the EA is */
2217 error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2218 if (error != 0) {
2219 /* no EA, no filesec */
2220 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2221 error = 0;
2222 }
2223 /* either way, we are done */
2224 goto out;
2225 }
2226
2227 /*
2228 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2229 * ACE entrly ACL, and if it's larger than that, it must have the right
2230 * number of bytes such that it contains an atomic number of ACEs,
2231 * rather than partial entries. Otherwise, we ignore it.
2232 */
2233 if (!KAUTH_FILESEC_VALID(xsize)) {
2234 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2235 error = 0;
2236 goto out;
2237 }
2238
2239 /* how many entries would fit? */
2240 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2241 if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2242 KAUTH_DEBUG(" ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2243 error = 0;
2244 goto out;
2245 }
2246
2247 /* get buffer and uio */
2248 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2249 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2250 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2251 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2252 error = ENOMEM;
2253 goto out;
2254 }
2255
2256 /* read security attribute */
2257 rsize = xsize;
2258 if ((error = vn_getxattr(vp,
2259 KAUTH_FILESEC_XATTR,
2260 fsec_uio,
2261 &rsize,
2262 XATTR_NOSECURITY,
2263 ctx)) != 0) {
2264 /* no attribute - no security data */
2265 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2266 error = 0;
2267 }
2268 /* either way, we are done */
2269 goto out;
2270 }
2271
2272 /*
2273 * Validate security structure; the validation must take place in host
2274 * byte order. If it's corrupt, we will just ignore it.
2275 */
2276
2277 /* Validate the size before trying to convert it */
2278 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2279 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2280 goto out;
2281 }
2282
2283 /* Validate the magic number before trying to convert it */
2284 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2285 if (fsec->fsec_magic != host_fsec_magic) {
2286 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2287 goto out;
2288 }
2289
2290 /* Validate the entry count before trying to convert it. */
2291 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2292 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2293 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2294 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2295 goto out;
2296 }
2297 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2298 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2299 goto out;
2300 }
2301 }
2302
2303 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2304
2305 *fsecp = fsec;
2306 fsec = NULL;
2307 error = 0;
2308 out:
2309 if (fsec != NULL) {
2310 kauth_filesec_free(fsec);
2311 }
2312 if (fsec_uio != NULL) {
2313 uio_free(fsec_uio);
2314 }
2315 if (error) {
2316 *fsecp = NULL;
2317 }
2318 return error;
2319 }
2320
2321 /*
2322 * Set a filesec and optional acl contents into an extended attribute.
2323 * function will attempt to store ACL, UUID, and GUID information using a
2324 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2325 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2326 * original caller supplied an acl.
2327 *
2328 * Parameters: vp The vnode on which to operate.
2329 * fsec The filesec being set.
2330 * acl The acl to be associated with 'fsec'.
2331 * ctx The vnode context in which the
2332 * operation is to be attempted.
2333 *
2334 * Returns: 0 Success
2335 * !0 errno value
2336 *
2337 * Notes: Both the fsec and the acl are always valid.
2338 *
2339 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2340 * as are the acl contents, if they are used. Internally, we will
2341 * cannonize these values into network (PPC) byte order before we
2342 * attempt to write them so that the on-disk contents of the
2343 * extended attribute are identical for both PPC and Intel (if we
2344 * were not being required to provide this service via fallback,
2345 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2346 * We reverse this process on the way out, so we leave with the
2347 * same byte order we started with.
2348 *
2349 * XXX: We should enummerate the possible errno values here, and where
2350 * in the code they originated.
2351 */
2352 static int
2353 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2354 {
2355 uio_t fsec_uio;
2356 int error;
2357 uint32_t saved_acl_copysize;
2358
2359 fsec_uio = NULL;
2360
2361 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2362 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2363 error = ENOMEM;
2364 goto out;
2365 }
2366 /*
2367 * Save the pre-converted ACL copysize, because it gets swapped too
2368 * if we are running with the wrong endianness.
2369 */
2370 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2371
2372 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2373
2374 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2375 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2376 error = vn_setxattr(vp,
2377 KAUTH_FILESEC_XATTR,
2378 fsec_uio,
2379 XATTR_NOSECURITY, /* we have auth'ed already */
2380 ctx);
2381 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2382
2383 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2384
2385 out:
2386 if (fsec_uio != NULL) {
2387 uio_free(fsec_uio);
2388 }
2389 return error;
2390 }
2391
2392
2393 /*
2394 * Returns: 0 Success
2395 * ENOMEM Not enough space [only if has filesec]
2396 * EINVAL Requested unknown attributes
2397 * VNOP_GETATTR: ???
2398 * vnode_get_filesec: ???
2399 * kauth_cred_guid2uid: ???
2400 * kauth_cred_guid2gid: ???
2401 * vfs_update_vfsstat: ???
2402 */
2403 int
2404 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2405 {
2406 kauth_filesec_t fsec;
2407 kauth_acl_t facl;
2408 int error;
2409 uid_t nuid;
2410 gid_t ngid;
2411
2412 /*
2413 * Reject attempts to fetch unknown attributes.
2414 */
2415 if (vap->va_active & ~VNODE_ATTR_ALL) {
2416 return EINVAL;
2417 }
2418
2419 /* don't ask for extended security data if the filesystem doesn't support it */
2420 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2421 VATTR_CLEAR_ACTIVE(vap, va_acl);
2422 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2423 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2424 }
2425
2426 /*
2427 * If the caller wants size values we might have to synthesise, give the
2428 * filesystem the opportunity to supply better intermediate results.
2429 */
2430 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2431 VATTR_IS_ACTIVE(vap, va_total_size) ||
2432 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2433 VATTR_SET_ACTIVE(vap, va_data_size);
2434 VATTR_SET_ACTIVE(vap, va_data_alloc);
2435 VATTR_SET_ACTIVE(vap, va_total_size);
2436 VATTR_SET_ACTIVE(vap, va_total_alloc);
2437 }
2438
2439 error = VNOP_GETATTR(vp, vap, ctx);
2440 if (error) {
2441 KAUTH_DEBUG("ERROR - returning %d", error);
2442 goto out;
2443 }
2444
2445 /*
2446 * If extended security data was requested but not returned, try the fallback
2447 * path.
2448 */
2449 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2450 fsec = NULL;
2451
2452 if (XATTR_VNODE_SUPPORTED(vp)) {
2453 /* try to get the filesec */
2454 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2455 goto out;
2456 }
2457 }
2458 /* if no filesec, no attributes */
2459 if (fsec == NULL) {
2460 VATTR_RETURN(vap, va_acl, NULL);
2461 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2462 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2463 } else {
2464 /* looks good, try to return what we were asked for */
2465 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2466 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2467
2468 /* only return the ACL if we were actually asked for it */
2469 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2470 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2471 VATTR_RETURN(vap, va_acl, NULL);
2472 } else {
2473 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2474 if (facl == NULL) {
2475 kauth_filesec_free(fsec);
2476 error = ENOMEM;
2477 goto out;
2478 }
2479 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2480 VATTR_RETURN(vap, va_acl, facl);
2481 }
2482 }
2483 kauth_filesec_free(fsec);
2484 }
2485 }
2486 /*
2487 * If someone gave us an unsolicited filesec, toss it. We promise that
2488 * we're OK with a filesystem giving us anything back, but our callers
2489 * only expect what they asked for.
2490 */
2491 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2492 if (vap->va_acl != NULL) {
2493 kauth_acl_free(vap->va_acl);
2494 }
2495 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2496 }
2497
2498 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2499 /*
2500 * Handle the case where we need a UID/GID, but only have extended
2501 * security information.
2502 */
2503 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2504 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2505 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2506 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) {
2507 VATTR_RETURN(vap, va_uid, nuid);
2508 }
2509 }
2510 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2511 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2512 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2513 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) {
2514 VATTR_RETURN(vap, va_gid, ngid);
2515 }
2516 }
2517 #endif
2518
2519 /*
2520 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2521 */
2522 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2523 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2524 nuid = vap->va_uid;
2525 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2526 nuid = vp->v_mount->mnt_fsowner;
2527 if (nuid == KAUTH_UID_NONE) {
2528 nuid = 99;
2529 }
2530 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2531 nuid = vap->va_uid;
2532 } else {
2533 /* this will always be something sensible */
2534 nuid = vp->v_mount->mnt_fsowner;
2535 }
2536 if ((nuid == 99) && !vfs_context_issuser(ctx)) {
2537 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2538 }
2539 VATTR_RETURN(vap, va_uid, nuid);
2540 }
2541 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2542 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2543 ngid = vap->va_gid;
2544 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2545 ngid = vp->v_mount->mnt_fsgroup;
2546 if (ngid == KAUTH_GID_NONE) {
2547 ngid = 99;
2548 }
2549 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2550 ngid = vap->va_gid;
2551 } else {
2552 /* this will always be something sensible */
2553 ngid = vp->v_mount->mnt_fsgroup;
2554 }
2555 if ((ngid == 99) && !vfs_context_issuser(ctx)) {
2556 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2557 }
2558 VATTR_RETURN(vap, va_gid, ngid);
2559 }
2560
2561 /*
2562 * Synthesise some values that can be reasonably guessed.
2563 */
2564 if (!VATTR_IS_SUPPORTED(vap, va_iosize)) {
2565 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2566 }
2567
2568 if (!VATTR_IS_SUPPORTED(vap, va_flags)) {
2569 VATTR_RETURN(vap, va_flags, 0);
2570 }
2571
2572 if (!VATTR_IS_SUPPORTED(vap, va_filerev)) {
2573 VATTR_RETURN(vap, va_filerev, 0);
2574 }
2575
2576 if (!VATTR_IS_SUPPORTED(vap, va_gen)) {
2577 VATTR_RETURN(vap, va_gen, 0);
2578 }
2579
2580 /*
2581 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2582 */
2583 if (!VATTR_IS_SUPPORTED(vap, va_data_size)) {
2584 VATTR_RETURN(vap, va_data_size, 0);
2585 }
2586
2587 /* do we want any of the possibly-computed values? */
2588 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2589 VATTR_IS_ACTIVE(vap, va_total_size) ||
2590 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2591 /* make sure f_bsize is valid */
2592 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2593 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) {
2594 goto out;
2595 }
2596 }
2597
2598 /* default va_data_alloc from va_data_size */
2599 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) {
2600 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2601 }
2602
2603 /* default va_total_size from va_data_size */
2604 if (!VATTR_IS_SUPPORTED(vap, va_total_size)) {
2605 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2606 }
2607
2608 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2609 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) {
2610 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2611 }
2612 }
2613
2614 /*
2615 * If we don't have a change time, pull it from the modtime.
2616 */
2617 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) {
2618 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2619 }
2620
2621 /*
2622 * This is really only supported for the creation VNOPs, but since the field is there
2623 * we should populate it correctly.
2624 */
2625 VATTR_RETURN(vap, va_type, vp->v_type);
2626
2627 /*
2628 * The fsid can be obtained from the mountpoint directly.
2629 */
2630 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2631
2632 out:
2633
2634 return error;
2635 }
2636
2637 /*
2638 * Choose 32 bit or 64 bit fsid
2639 */
2640 uint64_t
2641 vnode_get_va_fsid(struct vnode_attr *vap)
2642 {
2643 if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
2644 return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32);
2645 }
2646 return vap->va_fsid;
2647 }
2648
2649 /*
2650 * Set the attributes on a vnode in a vnode context.
2651 *
2652 * Parameters: vp The vnode whose attributes to set.
2653 * vap A pointer to the attributes to set.
2654 * ctx The vnode context in which the
2655 * operation is to be attempted.
2656 *
2657 * Returns: 0 Success
2658 * !0 errno value
2659 *
2660 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2661 *
2662 * The contents of the data area pointed to by 'vap' may be
2663 * modified if the vnode is on a filesystem which has been
2664 * mounted with ingore ownership flags, or by the underlyng
2665 * VFS itself, or by the fallback code, if the underlying VFS
2666 * does not support ACL, UUID, or GUUID attributes directly.
2667 *
2668 * XXX: We should enummerate the possible errno values here, and where
2669 * in the code they originated.
2670 */
2671 int
2672 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2673 {
2674 int error;
2675 #if CONFIG_FSE
2676 uint64_t active;
2677 int is_perm_change = 0;
2678 int is_stat_change = 0;
2679 #endif
2680
2681 /*
2682 * Reject attempts to set unknown attributes.
2683 */
2684 if (vap->va_active & ~VNODE_ATTR_ALL) {
2685 return EINVAL;
2686 }
2687
2688 /*
2689 * Make sure the filesystem is mounted R/W.
2690 * If not, return an error.
2691 */
2692 if (vfs_isrdonly(vp->v_mount)) {
2693 error = EROFS;
2694 goto out;
2695 }
2696
2697 #if DEVELOPMENT || DEBUG
2698 /*
2699 * XXX VSWAP: Check for entitlements or special flag here
2700 * so we can restrict access appropriately.
2701 */
2702 #else /* DEVELOPMENT || DEBUG */
2703
2704 if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
2705 error = EPERM;
2706 goto out;
2707 }
2708 #endif /* DEVELOPMENT || DEBUG */
2709
2710 #if NAMEDSTREAMS
2711 /* For streams, va_data_size is the only setable attribute. */
2712 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2713 error = EPERM;
2714 goto out;
2715 }
2716 #endif
2717 /* Check for truncation */
2718 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2719 switch (vp->v_type) {
2720 case VREG:
2721 /* For regular files it's ok */
2722 break;
2723 case VDIR:
2724 /* Not allowed to truncate directories */
2725 error = EISDIR;
2726 goto out;
2727 default:
2728 /* For everything else we will clear the bit and let underlying FS decide on the rest */
2729 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2730 if (vap->va_active) {
2731 break;
2732 }
2733 /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
2734 return 0;
2735 }
2736 }
2737
2738 /*
2739 * If ownership is being ignored on this volume, we silently discard
2740 * ownership changes.
2741 */
2742 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2743 VATTR_CLEAR_ACTIVE(vap, va_uid);
2744 VATTR_CLEAR_ACTIVE(vap, va_gid);
2745 }
2746
2747 /*
2748 * Make sure that extended security is enabled if we're going to try
2749 * to set any.
2750 */
2751 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2752 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2753 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2754 error = ENOTSUP;
2755 goto out;
2756 }
2757
2758 /* Never allow the setting of any unsupported superuser flags. */
2759 if (VATTR_IS_ACTIVE(vap, va_flags)) {
2760 vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
2761 }
2762
2763 #if CONFIG_FSE
2764 /*
2765 * Remember all of the active attributes that we're
2766 * attempting to modify.
2767 */
2768 active = vap->va_active & ~VNODE_ATTR_RDONLY;
2769 #endif
2770
2771 error = VNOP_SETATTR(vp, vap, ctx);
2772
2773 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) {
2774 error = vnode_setattr_fallback(vp, vap, ctx);
2775 }
2776
2777 #if CONFIG_FSE
2778 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
2779 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
2780 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
2781
2782 /*
2783 * Now that we've changed them, decide whether to send an
2784 * FSevent.
2785 */
2786 if ((active & PERMISSION_BITS) & vap->va_supported) {
2787 is_perm_change = 1;
2788 } else {
2789 /*
2790 * We've already checked the permission bits, and we
2791 * also want to filter out access time / backup time
2792 * changes.
2793 */
2794 active &= ~(PERMISSION_BITS |
2795 VNODE_ATTR_BIT(va_access_time) |
2796 VNODE_ATTR_BIT(va_backup_time));
2797
2798 /* Anything left to notify about? */
2799 if (active & vap->va_supported) {
2800 is_stat_change = 1;
2801 }
2802 }
2803
2804 if (error == 0) {
2805 if (is_perm_change) {
2806 if (need_fsevent(FSE_CHOWN, vp)) {
2807 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2808 }
2809 } else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
2810 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2811 }
2812 }
2813 #undef PERMISSION_BITS
2814 #endif
2815
2816 out:
2817 return error;
2818 }
2819
2820 /*
2821 * Fallback for setting the attributes on a vnode in a vnode context. This
2822 * Function will attempt to store ACL, UUID, and GUID information utilizing
2823 * a read/modify/write operation against an EA used as a backing store for
2824 * the object.
2825 *
2826 * Parameters: vp The vnode whose attributes to set.
2827 * vap A pointer to the attributes to set.
2828 * ctx The vnode context in which the
2829 * operation is to be attempted.
2830 *
2831 * Returns: 0 Success
2832 * !0 errno value
2833 *
2834 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2835 * as are the fsec and lfsec, if they are used.
2836 *
2837 * The contents of the data area pointed to by 'vap' may be
2838 * modified to indicate that the attribute is supported for
2839 * any given requested attribute.
2840 *
2841 * XXX: We should enummerate the possible errno values here, and where
2842 * in the code they originated.
2843 */
2844 int
2845 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2846 {
2847 kauth_filesec_t fsec;
2848 kauth_acl_t facl;
2849 struct kauth_filesec lfsec;
2850 int error;
2851
2852 error = 0;
2853
2854 /*
2855 * Extended security fallback via extended attributes.
2856 *
2857 * Note that we do not free the filesec; the caller is expected to
2858 * do this.
2859 */
2860 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2861 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2862 VATTR_NOT_RETURNED(vap, va_guuid)) {
2863 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2864
2865 /*
2866 * Fail for file types that we don't permit extended security
2867 * to be set on.
2868 */
2869 if (!XATTR_VNODE_SUPPORTED(vp)) {
2870 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2871 error = EINVAL;
2872 goto out;
2873 }
2874
2875 /*
2876 * If we don't have all the extended security items, we need
2877 * to fetch the existing data to perform a read-modify-write
2878 * operation.
2879 */
2880 fsec = NULL;
2881 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2882 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2883 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2884 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2885 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2886 goto out;
2887 }
2888 }
2889 /* if we didn't get a filesec, use our local one */
2890 if (fsec == NULL) {
2891 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2892 fsec = &lfsec;
2893 } else {
2894 KAUTH_DEBUG("SETATTR - updating existing filesec");
2895 }
2896 /* find the ACL */
2897 facl = &fsec->fsec_acl;
2898
2899 /* if we're using the local filesec, we need to initialise it */
2900 if (fsec == &lfsec) {
2901 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2902 fsec->fsec_owner = kauth_null_guid;
2903 fsec->fsec_group = kauth_null_guid;
2904 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2905 facl->acl_flags = 0;
2906 }
2907
2908 /*
2909 * Update with the supplied attributes.
2910 */
2911 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2912 KAUTH_DEBUG("SETATTR - updating owner UUID");
2913 fsec->fsec_owner = vap->va_uuuid;
2914 VATTR_SET_SUPPORTED(vap, va_uuuid);
2915 }
2916 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2917 KAUTH_DEBUG("SETATTR - updating group UUID");
2918 fsec->fsec_group = vap->va_guuid;
2919 VATTR_SET_SUPPORTED(vap, va_guuid);
2920 }
2921 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2922 if (vap->va_acl == NULL) {
2923 KAUTH_DEBUG("SETATTR - removing ACL");
2924 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2925 } else {
2926 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2927 facl = vap->va_acl;
2928 }
2929 VATTR_SET_SUPPORTED(vap, va_acl);
2930 }
2931
2932 /*
2933 * If the filesec data is all invalid, we can just remove
2934 * the EA completely.
2935 */
2936 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2937 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2938 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2939 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2940 /* no attribute is ok, nothing to delete */
2941 if (error == ENOATTR) {
2942 error = 0;
2943 }
2944 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2945 } else {
2946 /* write the EA */
2947 error = vnode_set_filesec(vp, fsec, facl, ctx);
2948 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2949 }
2950
2951 /* if we fetched a filesec, dispose of the buffer */
2952 if (fsec != &lfsec) {
2953 kauth_filesec_free(fsec);
2954 }
2955 }
2956 out:
2957
2958 return error;
2959 }
2960
2961 /*
2962 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2963 * event on a vnode.
2964 */
2965 int
2966 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
2967 {
2968 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2969 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
2970 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
2971 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
2972 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
2973 uint32_t knote_events = (events & knote_mask);
2974
2975 /* Permissions are not explicitly part of the kqueue model */
2976 if (events & VNODE_EVENT_PERMS) {
2977 knote_events |= NOTE_ATTRIB;
2978 }
2979
2980 /* Directory contents information just becomes NOTE_WRITE */
2981 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
2982 knote_events |= NOTE_WRITE;
2983 }
2984
2985 if (knote_events) {
2986 lock_vnode_and_post(vp, knote_events);
2987 #if CONFIG_FSE
2988 if (vap != NULL) {
2989 create_fsevent_from_kevent(vp, events, vap);
2990 }
2991 #else
2992 (void)vap;
2993 #endif
2994 }
2995
2996 return 0;
2997 }
2998
2999
3000
3001 int
3002 vnode_isdyldsharedcache(vnode_t vp)
3003 {
3004 return (vp->v_flag & VSHARED_DYLD) ? 1 : 0;
3005 }
3006
3007
3008 /*
3009 * For a filesystem that isn't tracking its own vnode watchers:
3010 * check whether a vnode is being monitored.
3011 */
3012 int
3013 vnode_ismonitored(vnode_t vp)
3014 {
3015 return vp->v_knotes.slh_first != NULL;
3016 }
3017
3018 int
3019 vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
3020 {
3021 if (out_vpp) {
3022 *out_vpp = NULLVP;
3023 }
3024 #if NULLFS
3025 return nullfs_getbackingvnode(in_vp, out_vpp);
3026 #else
3027 #pragma unused(in_vp)
3028 return ENOENT;
3029 #endif
3030 }
3031
3032 /*
3033 * Initialize a struct vnode_attr and activate the attributes required
3034 * by the vnode_notify() call.
3035 */
3036 int
3037 vfs_get_notify_attributes(struct vnode_attr *vap)
3038 {
3039 VATTR_INIT(vap);
3040 vap->va_active = VNODE_NOTIFY_ATTRS;
3041 return 0;
3042 }
3043
3044 #if CONFIG_TRIGGERS
3045 int
3046 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
3047 {
3048 int error;
3049 mount_t mp;
3050
3051 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
3052 if (mp == NULL) {
3053 return ENOENT;
3054 }
3055
3056 error = vfs_busy(mp, LK_NOWAIT);
3057 mount_iterdrop(mp);
3058
3059 if (error != 0) {
3060 return ENOENT;
3061 }
3062
3063 mount_lock(mp);
3064 if (mp->mnt_triggercallback != NULL) {
3065 error = EBUSY;
3066 mount_unlock(mp);
3067 goto out;
3068 }
3069
3070 mp->mnt_triggercallback = vtc;
3071 mp->mnt_triggerdata = data;
3072 mount_unlock(mp);
3073
3074 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
3075
3076 out:
3077 vfs_unbusy(mp);
3078 return 0;
3079 }
3080 #endif /* CONFIG_TRIGGERS */
3081
3082 /*
3083 * Definition of vnode operations.
3084 */
3085
3086 #if 0
3087 /*
3088 *#
3089 *#% lookup dvp L ? ?
3090 *#% lookup vpp - L -
3091 */
3092 struct vnop_lookup_args {
3093 struct vnodeop_desc *a_desc;
3094 vnode_t a_dvp;
3095 vnode_t *a_vpp;
3096 struct componentname *a_cnp;
3097 vfs_context_t a_context;
3098 };
3099 #endif /* 0*/
3100
3101 /*
3102 * Returns: 0 Success
3103 * lock_fsnode:ENOENT No such file or directory [only for VFS
3104 * that is not thread safe & vnode is
3105 * currently being/has been terminated]
3106 * <vfs_lookup>:ENAMETOOLONG
3107 * <vfs_lookup>:ENOENT
3108 * <vfs_lookup>:EJUSTRETURN
3109 * <vfs_lookup>:EPERM
3110 * <vfs_lookup>:EISDIR
3111 * <vfs_lookup>:ENOTDIR
3112 * <vfs_lookup>:???
3113 *
3114 * Note: The return codes from the underlying VFS's lookup routine can't
3115 * be fully enumerated here, since third party VFS authors may not
3116 * limit their error returns to the ones documented here, even
3117 * though this may result in some programs functioning incorrectly.
3118 *
3119 * The return codes documented above are those which may currently
3120 * be returned by HFS from hfs_lookup, not including additional
3121 * error code which may be propagated from underlying routines.
3122 */
3123 errno_t
3124 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3125 {
3126 int _err;
3127 struct vnop_lookup_args a;
3128
3129 a.a_desc = &vnop_lookup_desc;
3130 a.a_dvp = dvp;
3131 a.a_vpp = vpp;
3132 a.a_cnp = cnp;
3133 a.a_context = ctx;
3134
3135 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3136 if (_err == 0 && *vpp) {
3137 DTRACE_FSINFO(lookup, vnode_t, *vpp);
3138 }
3139
3140 return _err;
3141 }
3142
3143 #if 0
3144 struct vnop_compound_open_args {
3145 struct vnodeop_desc *a_desc;
3146 vnode_t a_dvp;
3147 vnode_t *a_vpp;
3148 struct componentname *a_cnp;
3149 int32_t a_flags;
3150 int32_t a_fmode;
3151 struct vnode_attr *a_vap;
3152 vfs_context_t a_context;
3153 void *a_reserved;
3154 };
3155 #endif /* 0 */
3156
3157 int
3158 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3159 {
3160 int _err;
3161 struct vnop_compound_open_args a;
3162 int did_create = 0;
3163 int want_create;
3164 uint32_t tmp_status = 0;
3165 struct componentname *cnp = &ndp->ni_cnd;
3166
3167 want_create = (flags & O_CREAT);
3168
3169 a.a_desc = &vnop_compound_open_desc;
3170 a.a_dvp = dvp;
3171 a.a_vpp = vpp; /* Could be NULL */
3172 a.a_cnp = cnp;
3173 a.a_flags = flags;
3174 a.a_fmode = fmode;
3175 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3176 a.a_vap = vap;
3177 a.a_context = ctx;
3178 a.a_open_create_authorizer = vn_authorize_create;
3179 a.a_open_existing_authorizer = vn_authorize_open_existing;
3180 a.a_reserved = NULL;
3181
3182 if (dvp == NULLVP) {
3183 panic("No dvp?");
3184 }
3185 if (want_create && !vap) {
3186 panic("Want create, but no vap?");
3187 }
3188 if (!want_create && vap) {
3189 panic("Don't want create, but have a vap?");
3190 }
3191
3192 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3193 if (want_create) {
3194 if (_err == 0 && *vpp) {
3195 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3196 } else {
3197 DTRACE_FSINFO(compound_open, vnode_t, dvp);
3198 }
3199 } else {
3200 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3201 }
3202
3203 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3204
3205 if (did_create && !want_create) {
3206 panic("Filesystem did a create, even though none was requested?");
3207 }
3208
3209 if (did_create) {
3210 #if CONFIG_APPLEDOUBLE
3211 if (!NATIVE_XATTR(dvp)) {
3212 /*
3213 * Remove stale Apple Double file (if any).
3214 */
3215 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3216 }
3217 #endif /* CONFIG_APPLEDOUBLE */
3218 /* On create, provide kqueue notification */
3219 post_event_if_success(dvp, _err, NOTE_WRITE);
3220 }
3221
3222 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3223 #if 0 /* FSEvents... */
3224 if (*vpp && _err && _err != EKEEPLOOKING) {
3225 vnode_put(*vpp);
3226 *vpp = NULLVP;
3227 }
3228 #endif /* 0 */
3229
3230 return _err;
3231 }
3232
3233 #if 0
3234 struct vnop_create_args {
3235 struct vnodeop_desc *a_desc;
3236 vnode_t a_dvp;
3237 vnode_t *a_vpp;
3238 struct componentname *a_cnp;
3239 struct vnode_attr *a_vap;
3240 vfs_context_t a_context;
3241 };
3242 #endif /* 0*/
3243 errno_t
3244 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3245 {
3246 int _err;
3247 struct vnop_create_args a;
3248
3249 a.a_desc = &vnop_create_desc;
3250 a.a_dvp = dvp;
3251 a.a_vpp = vpp;
3252 a.a_cnp = cnp;
3253 a.a_vap = vap;
3254 a.a_context = ctx;
3255
3256 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3257 if (_err == 0 && *vpp) {
3258 DTRACE_FSINFO(create, vnode_t, *vpp);
3259 }
3260
3261 #if CONFIG_APPLEDOUBLE
3262 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3263 /*
3264 * Remove stale Apple Double file (if any).
3265 */
3266 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3267 }
3268 #endif /* CONFIG_APPLEDOUBLE */
3269
3270 post_event_if_success(dvp, _err, NOTE_WRITE);
3271
3272 return _err;
3273 }
3274
3275 #if 0
3276 /*
3277 *#
3278 *#% whiteout dvp L L L
3279 *#% whiteout cnp - - -
3280 *#% whiteout flag - - -
3281 *#
3282 */
3283 struct vnop_whiteout_args {
3284 struct vnodeop_desc *a_desc;
3285 vnode_t a_dvp;
3286 struct componentname *a_cnp;
3287 int a_flags;
3288 vfs_context_t a_context;
3289 };
3290 #endif /* 0*/
3291 errno_t
3292 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3293 __unused int flags, __unused vfs_context_t ctx)
3294 {
3295 return ENOTSUP; // XXX OBSOLETE
3296 }
3297
3298 #if 0
3299 /*
3300 *#
3301 *#% mknod dvp L U U
3302 *#% mknod vpp - X -
3303 *#
3304 */
3305 struct vnop_mknod_args {
3306 struct vnodeop_desc *a_desc;
3307 vnode_t a_dvp;
3308 vnode_t *a_vpp;
3309 struct componentname *a_cnp;
3310 struct vnode_attr *a_vap;
3311 vfs_context_t a_context;
3312 };
3313 #endif /* 0*/
3314 errno_t
3315 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3316 {
3317 int _err;
3318 struct vnop_mknod_args a;
3319
3320 a.a_desc = &vnop_mknod_desc;
3321 a.a_dvp = dvp;
3322 a.a_vpp = vpp;
3323 a.a_cnp = cnp;
3324 a.a_vap = vap;
3325 a.a_context = ctx;
3326
3327 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3328 if (_err == 0 && *vpp) {
3329 DTRACE_FSINFO(mknod, vnode_t, *vpp);
3330 }
3331
3332 post_event_if_success(dvp, _err, NOTE_WRITE);
3333
3334 return _err;
3335 }
3336
3337 #if 0
3338 /*
3339 *#
3340 *#% open vp L L L
3341 *#
3342 */
3343 struct vnop_open_args {
3344 struct vnodeop_desc *a_desc;
3345 vnode_t a_vp;
3346 int a_mode;
3347 vfs_context_t a_context;
3348 };
3349 #endif /* 0*/
3350 errno_t
3351 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3352 {
3353 int _err;
3354 struct vnop_open_args a;
3355
3356 if (ctx == NULL) {
3357 ctx = vfs_context_current();
3358 }
3359 a.a_desc = &vnop_open_desc;
3360 a.a_vp = vp;
3361 a.a_mode = mode;
3362 a.a_context = ctx;
3363
3364 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3365 DTRACE_FSINFO(open, vnode_t, vp);
3366
3367 return _err;
3368 }
3369
3370 #if 0
3371 /*
3372 *#
3373 *#% close vp U U U
3374 *#
3375 */
3376 struct vnop_close_args {
3377 struct vnodeop_desc *a_desc;
3378 vnode_t a_vp;
3379 int a_fflag;
3380 vfs_context_t a_context;
3381 };
3382 #endif /* 0*/
3383 errno_t
3384 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3385 {
3386 int _err;
3387 struct vnop_close_args a;
3388
3389 if (ctx == NULL) {
3390 ctx = vfs_context_current();
3391 }
3392 a.a_desc = &vnop_close_desc;
3393 a.a_vp = vp;
3394 a.a_fflag = fflag;
3395 a.a_context = ctx;
3396
3397 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3398 DTRACE_FSINFO(close, vnode_t, vp);
3399
3400 return _err;
3401 }
3402
3403 #if 0
3404 /*
3405 *#
3406 *#% access vp L L L
3407 *#
3408 */
3409 struct vnop_access_args {
3410 struct vnodeop_desc *a_desc;
3411 vnode_t a_vp;
3412 int a_action;
3413 vfs_context_t a_context;
3414 };
3415 #endif /* 0*/
3416 errno_t
3417 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3418 {
3419 int _err;
3420 struct vnop_access_args a;
3421
3422 if (ctx == NULL) {
3423 ctx = vfs_context_current();
3424 }
3425 a.a_desc = &vnop_access_desc;
3426 a.a_vp = vp;
3427 a.a_action = action;
3428 a.a_context = ctx;
3429
3430 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3431 DTRACE_FSINFO(access, vnode_t, vp);
3432
3433 return _err;
3434 }
3435
3436 #if 0
3437 /*
3438 *#
3439 *#% getattr vp = = =
3440 *#
3441 */
3442 struct vnop_getattr_args {
3443 struct vnodeop_desc *a_desc;
3444 vnode_t a_vp;
3445 struct vnode_attr *a_vap;
3446 vfs_context_t a_context;
3447 };
3448 #endif /* 0*/
3449 errno_t
3450 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3451 {
3452 int _err;
3453 struct vnop_getattr_args a;
3454
3455 a.a_desc = &vnop_getattr_desc;
3456 a.a_vp = vp;
3457 a.a_vap = vap;
3458 a.a_context = ctx;
3459
3460 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3461 DTRACE_FSINFO(getattr, vnode_t, vp);
3462
3463 return _err;
3464 }
3465
3466 #if 0
3467 /*
3468 *#
3469 *#% setattr vp L L L
3470 *#
3471 */
3472 struct vnop_setattr_args {
3473 struct vnodeop_desc *a_desc;
3474 vnode_t a_vp;
3475 struct vnode_attr *a_vap;
3476 vfs_context_t a_context;
3477 };
3478 #endif /* 0*/
3479 errno_t
3480 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3481 {
3482 int _err;
3483 struct vnop_setattr_args a;
3484
3485 a.a_desc = &vnop_setattr_desc;
3486 a.a_vp = vp;
3487 a.a_vap = vap;
3488 a.a_context = ctx;
3489
3490 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3491 DTRACE_FSINFO(setattr, vnode_t, vp);
3492
3493 #if CONFIG_APPLEDOUBLE
3494 /*
3495 * Shadow uid/gid/mod change to extended attribute file.
3496 */
3497 if (_err == 0 && !NATIVE_XATTR(vp)) {
3498 struct vnode_attr va;
3499 int change = 0;
3500
3501 VATTR_INIT(&va);
3502 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3503 VATTR_SET(&va, va_uid, vap->va_uid);
3504 change = 1;
3505 }
3506 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3507 VATTR_SET(&va, va_gid, vap->va_gid);
3508 change = 1;
3509 }
3510 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3511 VATTR_SET(&va, va_mode, vap->va_mode);
3512 change = 1;
3513 }
3514 if (change) {
3515 vnode_t dvp;
3516 const char *vname;
3517
3518 dvp = vnode_getparent(vp);
3519 vname = vnode_getname(vp);
3520
3521 xattrfile_setattr(dvp, vname, &va, ctx);
3522 if (dvp != NULLVP) {
3523 vnode_put(dvp);
3524 }
3525 if (vname != NULL) {
3526 vnode_putname(vname);
3527 }
3528 }
3529 }
3530 #endif /* CONFIG_APPLEDOUBLE */
3531
3532 /*
3533 * If we have changed any of the things about the file that are likely
3534 * to result in changes to authorization results, blow the vnode auth
3535 * cache
3536 */
3537 if (_err == 0 && (
3538 VATTR_IS_SUPPORTED(vap, va_mode) ||
3539 VATTR_IS_SUPPORTED(vap, va_uid) ||
3540 VATTR_IS_SUPPORTED(vap, va_gid) ||
3541 VATTR_IS_SUPPORTED(vap, va_flags) ||
3542 VATTR_IS_SUPPORTED(vap, va_acl) ||
3543 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3544 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3545 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3546
3547 #if NAMEDSTREAMS
3548 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3549 vnode_t svp;
3550 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3551 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3552 vnode_put(svp);
3553 }
3554 }
3555 #endif /* NAMEDSTREAMS */
3556 }
3557
3558
3559 post_event_if_success(vp, _err, NOTE_ATTRIB);
3560
3561 return _err;
3562 }
3563
3564
3565 #if 0
3566 /*
3567 *#
3568 *#% read vp L L L
3569 *#
3570 */
3571 struct vnop_read_args {
3572 struct vnodeop_desc *a_desc;
3573 vnode_t a_vp;
3574 struct uio *a_uio;
3575 int a_ioflag;
3576 vfs_context_t a_context;
3577 };
3578 #endif /* 0*/
3579 errno_t
3580 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3581 {
3582 int _err;
3583 struct vnop_read_args a;
3584 #if CONFIG_DTRACE
3585 user_ssize_t resid = uio_resid(uio);
3586 #endif
3587
3588 if (ctx == NULL) {
3589 return EINVAL;
3590 }
3591
3592 a.a_desc = &vnop_read_desc;
3593 a.a_vp = vp;
3594 a.a_uio = uio;
3595 a.a_ioflag = ioflag;
3596 a.a_context = ctx;
3597
3598 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3599 DTRACE_FSINFO_IO(read,
3600 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3601
3602 return _err;
3603 }
3604
3605
3606 #if 0
3607 /*
3608 *#
3609 *#% write vp L L L
3610 *#
3611 */
3612 struct vnop_write_args {
3613 struct vnodeop_desc *a_desc;
3614 vnode_t a_vp;
3615 struct uio *a_uio;
3616 int a_ioflag;
3617 vfs_context_t a_context;
3618 };
3619 #endif /* 0*/
3620 errno_t
3621 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3622 {
3623 struct vnop_write_args a;
3624 int _err;
3625 #if CONFIG_DTRACE
3626 user_ssize_t resid = uio_resid(uio);
3627 #endif
3628
3629 if (ctx == NULL) {
3630 return EINVAL;
3631 }
3632
3633 a.a_desc = &vnop_write_desc;
3634 a.a_vp = vp;
3635 a.a_uio = uio;
3636 a.a_ioflag = ioflag;
3637 a.a_context = ctx;
3638
3639 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3640 DTRACE_FSINFO_IO(write,
3641 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3642
3643 post_event_if_success(vp, _err, NOTE_WRITE);
3644
3645 return _err;
3646 }
3647
3648
3649 #if 0
3650 /*
3651 *#
3652 *#% ioctl vp U U U
3653 *#
3654 */
3655 struct vnop_ioctl_args {
3656 struct vnodeop_desc *a_desc;
3657 vnode_t a_vp;
3658 u_long a_command;
3659 caddr_t a_data;
3660 int a_fflag;
3661 vfs_context_t a_context;
3662 };
3663 #endif /* 0*/
3664 errno_t
3665 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3666 {
3667 int _err;
3668 struct vnop_ioctl_args a;
3669
3670 if (ctx == NULL) {
3671 ctx = vfs_context_current();
3672 }
3673
3674 /*
3675 * This check should probably have been put in the TTY code instead...
3676 *
3677 * We have to be careful about what we assume during startup and shutdown.
3678 * We have to be able to use the root filesystem's device vnode even when
3679 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3680 * structure. If there is no data pointer, it doesn't matter whether
3681 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
3682 * which passes NULL for its data pointer can therefore be used during
3683 * mount or unmount of the root filesystem.
3684 *
3685 * Depending on what root filesystems need to do during mount/unmount, we
3686 * may need to loosen this check again in the future.
3687 */
3688 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3689 if (data != NULL && !vnode_vfs64bitready(vp)) {
3690 return ENOTTY;
3691 }
3692 }
3693
3694 a.a_desc = &vnop_ioctl_desc;
3695 a.a_vp = vp;
3696 a.a_command = command;
3697 a.a_data = data;
3698 a.a_fflag = fflag;
3699 a.a_context = ctx;
3700
3701 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3702 DTRACE_FSINFO(ioctl, vnode_t, vp);
3703
3704 return _err;
3705 }
3706
3707
3708 #if 0
3709 /*
3710 *#
3711 *#% select vp U U U
3712 *#
3713 */
3714 struct vnop_select_args {
3715 struct vnodeop_desc *a_desc;
3716 vnode_t a_vp;
3717 int a_which;
3718 int a_fflags;
3719 void *a_wql;
3720 vfs_context_t a_context;
3721 };
3722 #endif /* 0*/
3723 errno_t
3724 VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx)
3725 {
3726 int _err;
3727 struct vnop_select_args a;
3728
3729 if (ctx == NULL) {
3730 ctx = vfs_context_current();
3731 }
3732 a.a_desc = &vnop_select_desc;
3733 a.a_vp = vp;
3734 a.a_which = which;
3735 a.a_fflags = fflags;
3736 a.a_context = ctx;
3737 a.a_wql = wql;
3738
3739 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3740 DTRACE_FSINFO(select, vnode_t, vp);
3741
3742 return _err;
3743 }
3744
3745
3746 #if 0
3747 /*
3748 *#
3749 *#% exchange fvp L L L
3750 *#% exchange tvp L L L
3751 *#
3752 */
3753 struct vnop_exchange_args {
3754 struct vnodeop_desc *a_desc;
3755 vnode_t a_fvp;
3756 vnode_t a_tvp;
3757 int a_options;
3758 vfs_context_t a_context;
3759 };
3760 #endif /* 0*/
3761 errno_t
3762 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3763 {
3764 int _err;
3765 struct vnop_exchange_args a;
3766
3767 a.a_desc = &vnop_exchange_desc;
3768 a.a_fvp = fvp;
3769 a.a_tvp = tvp;
3770 a.a_options = options;
3771 a.a_context = ctx;
3772
3773 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3774 DTRACE_FSINFO(exchange, vnode_t, fvp);
3775
3776 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3777 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3778 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3779
3780 return _err;
3781 }
3782
3783
3784 #if 0
3785 /*
3786 *#
3787 *#% revoke vp U U U
3788 *#
3789 */
3790 struct vnop_revoke_args {
3791 struct vnodeop_desc *a_desc;
3792 vnode_t a_vp;
3793 int a_flags;
3794 vfs_context_t a_context;
3795 };
3796 #endif /* 0*/
3797 errno_t
3798 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3799 {
3800 struct vnop_revoke_args a;
3801 int _err;
3802
3803 a.a_desc = &vnop_revoke_desc;
3804 a.a_vp = vp;
3805 a.a_flags = flags;
3806 a.a_context = ctx;
3807
3808 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3809 DTRACE_FSINFO(revoke, vnode_t, vp);
3810
3811 return _err;
3812 }
3813
3814
3815 #if 0
3816 /*
3817 *#
3818 *# mmap - vp U U U
3819 *#
3820 */
3821 struct vnop_mmap_args {
3822 struct vnodeop_desc *a_desc;
3823 vnode_t a_vp;
3824 int a_fflags;
3825 vfs_context_t a_context;
3826 };
3827 #endif /* 0*/
3828 errno_t
3829 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3830 {
3831 int _err;
3832 struct vnop_mmap_args a;
3833
3834 a.a_desc = &vnop_mmap_desc;
3835 a.a_vp = vp;
3836 a.a_fflags = fflags;
3837 a.a_context = ctx;
3838
3839 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3840 DTRACE_FSINFO(mmap, vnode_t, vp);
3841
3842 return _err;
3843 }
3844
3845
3846 #if 0
3847 /*
3848 *#
3849 *# mnomap - vp U U U
3850 *#
3851 */
3852 struct vnop_mnomap_args {
3853 struct vnodeop_desc *a_desc;
3854 vnode_t a_vp;
3855 vfs_context_t a_context;
3856 };
3857 #endif /* 0*/
3858 errno_t
3859 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
3860 {
3861 int _err;
3862 struct vnop_mnomap_args a;
3863
3864 a.a_desc = &vnop_mnomap_desc;
3865 a.a_vp = vp;
3866 a.a_context = ctx;
3867
3868 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3869 DTRACE_FSINFO(mnomap, vnode_t, vp);
3870
3871 return _err;
3872 }
3873
3874
3875 #if 0
3876 /*
3877 *#
3878 *#% fsync vp L L L
3879 *#
3880 */
3881 struct vnop_fsync_args {
3882 struct vnodeop_desc *a_desc;
3883 vnode_t a_vp;
3884 int a_waitfor;
3885 vfs_context_t a_context;
3886 };
3887 #endif /* 0*/
3888 errno_t
3889 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
3890 {
3891 struct vnop_fsync_args a;
3892 int _err;
3893
3894 a.a_desc = &vnop_fsync_desc;
3895 a.a_vp = vp;
3896 a.a_waitfor = waitfor;
3897 a.a_context = ctx;
3898
3899 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3900 DTRACE_FSINFO(fsync, vnode_t, vp);
3901
3902 return _err;
3903 }
3904
3905
3906 #if 0
3907 /*
3908 *#
3909 *#% remove dvp L U U
3910 *#% remove vp L U U
3911 *#
3912 */
3913 struct vnop_remove_args {
3914 struct vnodeop_desc *a_desc;
3915 vnode_t a_dvp;
3916 vnode_t a_vp;
3917 struct componentname *a_cnp;
3918 int a_flags;
3919 vfs_context_t a_context;
3920 };
3921 #endif /* 0*/
3922 errno_t
3923 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
3924 {
3925 int _err;
3926 struct vnop_remove_args a;
3927
3928 a.a_desc = &vnop_remove_desc;
3929 a.a_dvp = dvp;
3930 a.a_vp = vp;
3931 a.a_cnp = cnp;
3932 a.a_flags = flags;
3933 a.a_context = ctx;
3934
3935 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3936 DTRACE_FSINFO(remove, vnode_t, vp);
3937
3938 if (_err == 0) {
3939 vnode_setneedinactive(vp);
3940 #if CONFIG_APPLEDOUBLE
3941 if (!(NATIVE_XATTR(dvp))) {
3942 /*
3943 * Remove any associated extended attribute file (._ AppleDouble file).
3944 */
3945 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
3946 }
3947 #endif /* CONFIG_APPLEDOUBLE */
3948 }
3949
3950 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
3951 post_event_if_success(dvp, _err, NOTE_WRITE);
3952
3953 return _err;
3954 }
3955
3956 int
3957 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
3958 {
3959 int _err;
3960 struct vnop_compound_remove_args a;
3961 int no_vp = (*vpp == NULLVP);
3962
3963 a.a_desc = &vnop_compound_remove_desc;
3964 a.a_dvp = dvp;
3965 a.a_vpp = vpp;
3966 a.a_cnp = &ndp->ni_cnd;
3967 a.a_flags = flags;
3968 a.a_vap = vap;
3969 a.a_context = ctx;
3970 a.a_remove_authorizer = vn_authorize_unlink;
3971
3972 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
3973 if (_err == 0 && *vpp) {
3974 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
3975 } else {
3976 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
3977 }
3978 if (_err == 0) {
3979 vnode_setneedinactive(*vpp);
3980 #if CONFIG_APPLEDOUBLE
3981 if (!(NATIVE_XATTR(dvp))) {
3982 /*
3983 * Remove any associated extended attribute file (._ AppleDouble file).
3984 */
3985 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
3986 }
3987 #endif /* CONFIG_APPLEDOUBLE */
3988 }
3989
3990 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
3991 post_event_if_success(dvp, _err, NOTE_WRITE);
3992
3993 if (no_vp) {
3994 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
3995 if (*vpp && _err && _err != EKEEPLOOKING) {
3996 vnode_put(*vpp);
3997 *vpp = NULLVP;
3998 }
3999 }
4000
4001 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4002
4003 return _err;
4004 }
4005
4006 #if 0
4007 /*
4008 *#
4009 *#% link vp U U U
4010 *#% link tdvp L U U
4011 *#
4012 */
4013 struct vnop_link_args {
4014 struct vnodeop_desc *a_desc;
4015 vnode_t a_vp;
4016 vnode_t a_tdvp;
4017 struct componentname *a_cnp;
4018 vfs_context_t a_context;
4019 };
4020 #endif /* 0*/
4021 errno_t
4022 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4023 {
4024 int _err;
4025 struct vnop_link_args a;
4026
4027 #if CONFIG_APPLEDOUBLE
4028 /*
4029 * For file systems with non-native extended attributes,
4030 * disallow linking to an existing "._" Apple Double file.
4031 */
4032 if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4033 const char *vname;
4034
4035 vname = vnode_getname(vp);
4036 if (vname != NULL) {
4037 _err = 0;
4038 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4039 _err = EPERM;
4040 }
4041 vnode_putname(vname);
4042 if (_err) {
4043 return _err;
4044 }
4045 }
4046 }
4047 #endif /* CONFIG_APPLEDOUBLE */
4048
4049 a.a_desc = &vnop_link_desc;
4050 a.a_vp = vp;
4051 a.a_tdvp = tdvp;
4052 a.a_cnp = cnp;
4053 a.a_context = ctx;
4054
4055 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4056 DTRACE_FSINFO(link, vnode_t, vp);
4057
4058 post_event_if_success(vp, _err, NOTE_LINK);
4059 post_event_if_success(tdvp, _err, NOTE_WRITE);
4060
4061 return _err;
4062 }
4063
4064 errno_t
4065 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4066 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4067 vfs_rename_flags_t flags, vfs_context_t ctx)
4068 {
4069 int _err;
4070 struct nameidata *fromnd = NULL;
4071 struct nameidata *tond = NULL;
4072 #if CONFIG_APPLEDOUBLE
4073 vnode_t src_attr_vp = NULLVP;
4074 vnode_t dst_attr_vp = NULLVP;
4075 char smallname1[48];
4076 char smallname2[48];
4077 char *xfromname = NULL;
4078 char *xtoname = NULL;
4079 #endif /* CONFIG_APPLEDOUBLE */
4080 int batched;
4081 uint32_t tdfflags; // Target directory file flags
4082
4083 batched = vnode_compound_rename_available(fdvp);
4084
4085 if (!batched) {
4086 if (*fvpp == NULLVP) {
4087 panic("Not batched, and no fvp?");
4088 }
4089 }
4090
4091 #if CONFIG_APPLEDOUBLE
4092 /*
4093 * We need to preflight any potential AppleDouble file for the source file
4094 * before doing the rename operation, since we could potentially be doing
4095 * this operation on a network filesystem, and would end up duplicating
4096 * the work. Also, save the source and destination names. Skip it if the
4097 * source has a "._" prefix.
4098 */
4099
4100 if (!NATIVE_XATTR(fdvp) &&
4101 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4102 size_t len;
4103 int error;
4104
4105 /* Get source attribute file name. */
4106 len = fcnp->cn_namelen + 3;
4107 if (len > sizeof(smallname1)) {
4108 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
4109 } else {
4110 xfromname = &smallname1[0];
4111 }
4112 strlcpy(xfromname, "._", min(sizeof smallname1, len));
4113 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
4114 xfromname[len - 1] = '\0';
4115
4116 /* Get destination attribute file name. */
4117 len = tcnp->cn_namelen + 3;
4118 if (len > sizeof(smallname2)) {
4119 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
4120 } else {
4121 xtoname = &smallname2[0];
4122 }
4123 strlcpy(xtoname, "._", min(sizeof smallname2, len));
4124 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
4125 xtoname[len - 1] = '\0';
4126
4127 /*
4128 * Look up source attribute file, keep reference on it if exists.
4129 * Note that we do the namei with the nameiop of RENAME, which is different than
4130 * in the rename syscall. It's OK if the source file does not exist, since this
4131 * is only for AppleDouble files.
4132 */
4133 MALLOC(fromnd, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
4134 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4135 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4136 fromnd->ni_dvp = fdvp;
4137 error = namei(fromnd);
4138
4139 /*
4140 * If there was an error looking up source attribute file,
4141 * we'll behave as if it didn't exist.
4142 */
4143
4144 if (error == 0) {
4145 if (fromnd->ni_vp) {
4146 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4147 src_attr_vp = fromnd->ni_vp;
4148
4149 if (fromnd->ni_vp->v_type != VREG) {
4150 src_attr_vp = NULLVP;
4151 vnode_put(fromnd->ni_vp);
4152 }
4153 }
4154 /*
4155 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4156 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4157 * have a vnode here, so we drop our namei buffer for the source attribute file
4158 */
4159 if (src_attr_vp == NULLVP) {
4160 nameidone(fromnd);
4161 }
4162 }
4163 }
4164 #endif /* CONFIG_APPLEDOUBLE */
4165
4166 if (batched) {
4167 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4168 if (_err != 0) {
4169 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4170 }
4171 } else {
4172 if (flags) {
4173 _err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4174 if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4175 // Legacy...
4176 if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4177 fcnp->cn_flags |= CN_SECLUDE_RENAME;
4178 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4179 }
4180 }
4181 } else {
4182 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4183 }
4184 }
4185
4186 /*
4187 * If moved to a new directory that is restricted,
4188 * set the restricted flag on the item moved.
4189 */
4190 if (_err == 0) {
4191 _err = vnode_flags(tdvp, &tdfflags, ctx);
4192 if (_err == 0) {
4193 uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4194 if (inherit_flags) {
4195 uint32_t fflags;
4196 _err = vnode_flags(*fvpp, &fflags, ctx);
4197 if (_err == 0 && fflags != (fflags | inherit_flags)) {
4198 struct vnode_attr va;
4199 VATTR_INIT(&va);
4200 VATTR_SET(&va, va_flags, fflags | inherit_flags);
4201 _err = vnode_setattr(*fvpp, &va, ctx);
4202 }
4203 }
4204 }
4205 }
4206
4207 #if CONFIG_MACF
4208 if (_err == 0) {
4209 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
4210 }
4211 #endif
4212
4213 #if CONFIG_APPLEDOUBLE
4214 /*
4215 * Rename any associated extended attribute file (._ AppleDouble file).
4216 */
4217 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4218 int error = 0;
4219
4220 /*
4221 * Get destination attribute file vnode.
4222 * Note that tdvp already has an iocount reference. Make sure to check that we
4223 * get a valid vnode from namei.
4224 */
4225 MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
4226 NDINIT(tond, RENAME, OP_RENAME,
4227 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4228 CAST_USER_ADDR_T(xtoname), ctx);
4229 tond->ni_dvp = tdvp;
4230 error = namei(tond);
4231
4232 if (error) {
4233 goto ad_error;
4234 }
4235
4236 if (tond->ni_vp) {
4237 dst_attr_vp = tond->ni_vp;
4238 }
4239
4240 if (src_attr_vp) {
4241 const char *old_name = src_attr_vp->v_name;
4242 vnode_t old_parent = src_attr_vp->v_parent;
4243
4244 if (batched) {
4245 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4246 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4247 0, ctx);
4248 } else {
4249 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4250 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4251 }
4252
4253 if (error == 0 && old_name == src_attr_vp->v_name &&
4254 old_parent == src_attr_vp->v_parent) {
4255 int update_flags = VNODE_UPDATE_NAME;
4256
4257 if (fdvp != tdvp) {
4258 update_flags |= VNODE_UPDATE_PARENT;
4259 }
4260
4261 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4262 vnode_update_identity(src_attr_vp, tdvp,
4263 tond->ni_cnd.cn_nameptr,
4264 tond->ni_cnd.cn_namelen,
4265 tond->ni_cnd.cn_hash,
4266 update_flags);
4267 }
4268 }
4269
4270 /* kevent notifications for moving resource files
4271 * _err is zero if we're here, so no need to notify directories, code
4272 * below will do that. only need to post the rename on the source and
4273 * possibly a delete on the dest
4274 */
4275 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4276 if (dst_attr_vp) {
4277 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4278 }
4279 } else if (dst_attr_vp) {
4280 /*
4281 * Just delete destination attribute file vnode if it exists, since
4282 * we didn't have a source attribute file.
4283 * Note that tdvp already has an iocount reference.
4284 */
4285
4286 struct vnop_remove_args args;
4287
4288 args.a_desc = &vnop_remove_desc;
4289 args.a_dvp = tdvp;
4290 args.a_vp = dst_attr_vp;
4291 args.a_cnp = &tond->ni_cnd;
4292 args.a_context = ctx;
4293
4294 if (error == 0) {
4295 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4296
4297 if (error == 0) {
4298 vnode_setneedinactive(dst_attr_vp);
4299 }
4300 }
4301
4302 /* kevent notification for deleting the destination's attribute file
4303 * if it existed. Only need to post the delete on the destination, since
4304 * the code below will handle the directories.
4305 */
4306 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4307 }
4308 }
4309 ad_error:
4310 if (src_attr_vp) {
4311 vnode_put(src_attr_vp);
4312 nameidone(fromnd);
4313 }
4314 if (dst_attr_vp) {
4315 vnode_put(dst_attr_vp);
4316 nameidone(tond);
4317 }
4318 if (xfromname && xfromname != &smallname1[0]) {
4319 FREE(xfromname, M_TEMP);
4320 }
4321 if (xtoname && xtoname != &smallname2[0]) {
4322 FREE(xtoname, M_TEMP);
4323 }
4324 #endif /* CONFIG_APPLEDOUBLE */
4325 if (fromnd) {
4326 FREE(fromnd, M_TEMP);
4327 }
4328 if (tond) {
4329 FREE(tond, M_TEMP);
4330 }
4331 return _err;
4332 }
4333
4334
4335 #if 0
4336 /*
4337 *#
4338 *#% rename fdvp U U U
4339 *#% rename fvp U U U
4340 *#% rename tdvp L U U
4341 *#% rename tvp X U U
4342 *#
4343 */
4344 struct vnop_rename_args {
4345 struct vnodeop_desc *a_desc;
4346 vnode_t a_fdvp;
4347 vnode_t a_fvp;
4348 struct componentname *a_fcnp;
4349 vnode_t a_tdvp;
4350 vnode_t a_tvp;
4351 struct componentname *a_tcnp;
4352 vfs_context_t a_context;
4353 };
4354 #endif /* 0*/
4355 errno_t
4356 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4357 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4358 vfs_context_t ctx)
4359 {
4360 int _err = 0;
4361 struct vnop_rename_args a;
4362
4363 a.a_desc = &vnop_rename_desc;
4364 a.a_fdvp = fdvp;
4365 a.a_fvp = fvp;
4366 a.a_fcnp = fcnp;
4367 a.a_tdvp = tdvp;
4368 a.a_tvp = tvp;
4369 a.a_tcnp = tcnp;
4370 a.a_context = ctx;
4371
4372 /* do the rename of the main file. */
4373 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4374 DTRACE_FSINFO(rename, vnode_t, fdvp);
4375
4376 if (_err) {
4377 return _err;
4378 }
4379
4380 return post_rename(fdvp, fvp, tdvp, tvp);
4381 }
4382
4383 static errno_t
4384 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4385 {
4386 if (tvp && tvp != fvp) {
4387 vnode_setneedinactive(tvp);
4388 }
4389
4390 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4391 int events = NOTE_WRITE;
4392 if (vnode_isdir(fvp)) {
4393 /* Link count on dir changed only if we are moving a dir and...
4394 * --Moved to new dir, not overwriting there
4395 * --Kept in same dir and DID overwrite
4396 */
4397 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4398 events |= NOTE_LINK;
4399 }
4400 }
4401
4402 lock_vnode_and_post(fdvp, events);
4403 if (fdvp != tdvp) {
4404 lock_vnode_and_post(tdvp, events);
4405 }
4406
4407 /* If you're replacing the target, post a deletion for it */
4408 if (tvp) {
4409 lock_vnode_and_post(tvp, NOTE_DELETE);
4410 }
4411
4412 lock_vnode_and_post(fvp, NOTE_RENAME);
4413
4414 return 0;
4415 }
4416
4417 #if 0
4418 /*
4419 *#
4420 *#% renamex fdvp U U U
4421 *#% renamex fvp U U U
4422 *#% renamex tdvp L U U
4423 *#% renamex tvp X U U
4424 *#
4425 */
4426 struct vnop_renamex_args {
4427 struct vnodeop_desc *a_desc;
4428 vnode_t a_fdvp;
4429 vnode_t a_fvp;
4430 struct componentname *a_fcnp;
4431 vnode_t a_tdvp;
4432 vnode_t a_tvp;
4433 struct componentname *a_tcnp;
4434 vfs_rename_flags_t a_flags;
4435 vfs_context_t a_context;
4436 };
4437 #endif /* 0*/
4438 errno_t
4439 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4440 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4441 vfs_rename_flags_t flags, vfs_context_t ctx)
4442 {
4443 int _err = 0;
4444 struct vnop_renamex_args a;
4445
4446 a.a_desc = &vnop_renamex_desc;
4447 a.a_fdvp = fdvp;
4448 a.a_fvp = fvp;
4449 a.a_fcnp = fcnp;
4450 a.a_tdvp = tdvp;
4451 a.a_tvp = tvp;
4452 a.a_tcnp = tcnp;
4453 a.a_flags = flags;
4454 a.a_context = ctx;
4455
4456 /* do the rename of the main file. */
4457 _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4458 DTRACE_FSINFO(renamex, vnode_t, fdvp);
4459
4460 if (_err) {
4461 return _err;
4462 }
4463
4464 return post_rename(fdvp, fvp, tdvp, tvp);
4465 }
4466
4467
4468 int
4469 VNOP_COMPOUND_RENAME(
4470 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4471 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4472 uint32_t flags, vfs_context_t ctx)
4473 {
4474 int _err = 0;
4475 int events;
4476 struct vnop_compound_rename_args a;
4477 int no_fvp, no_tvp;
4478
4479 no_fvp = (*fvpp) == NULLVP;
4480 no_tvp = (*tvpp) == NULLVP;
4481
4482 a.a_desc = &vnop_compound_rename_desc;
4483
4484 a.a_fdvp = fdvp;
4485 a.a_fvpp = fvpp;
4486 a.a_fcnp = fcnp;
4487 a.a_fvap = fvap;
4488
4489 a.a_tdvp = tdvp;
4490 a.a_tvpp = tvpp;
4491 a.a_tcnp = tcnp;
4492 a.a_tvap = tvap;
4493
4494 a.a_flags = flags;
4495 a.a_context = ctx;
4496 a.a_rename_authorizer = vn_authorize_rename;
4497 a.a_reserved = NULL;
4498
4499 /* do the rename of the main file. */
4500 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4501 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4502
4503 if (_err == 0) {
4504 if (*tvpp && *tvpp != *fvpp) {
4505 vnode_setneedinactive(*tvpp);
4506 }
4507 }
4508
4509 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4510 if (_err == 0 && *fvpp != *tvpp) {
4511 if (!*fvpp) {
4512 panic("No fvpp after compound rename?");
4513 }
4514
4515 events = NOTE_WRITE;
4516 if (vnode_isdir(*fvpp)) {
4517 /* Link count on dir changed only if we are moving a dir and...
4518 * --Moved to new dir, not overwriting there
4519 * --Kept in same dir and DID overwrite
4520 */
4521 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4522 events |= NOTE_LINK;
4523 }
4524 }
4525
4526 lock_vnode_and_post(fdvp, events);
4527 if (fdvp != tdvp) {
4528 lock_vnode_and_post(tdvp, events);
4529 }
4530
4531 /* If you're replacing the target, post a deletion for it */
4532 if (*tvpp) {
4533 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4534 }
4535
4536 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4537 }
4538
4539 if (no_fvp) {
4540 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4541 }
4542 if (no_tvp && *tvpp != NULLVP) {
4543 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4544 }
4545
4546 if (_err && _err != EKEEPLOOKING) {
4547 if (*fvpp) {
4548 vnode_put(*fvpp);
4549 *fvpp = NULLVP;
4550 }
4551 if (*tvpp) {
4552 vnode_put(*tvpp);
4553 *tvpp = NULLVP;
4554 }
4555 }
4556
4557 return _err;
4558 }
4559
4560 int
4561 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4562 struct vnode_attr *vap, vfs_context_t ctx)
4563 {
4564 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4565 panic("Non-CREATE nameiop in vn_mkdir()?");
4566 }
4567
4568 if (vnode_compound_mkdir_available(dvp)) {
4569 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4570 } else {
4571 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4572 }
4573 }
4574
4575 #if 0
4576 /*
4577 *#
4578 *#% mkdir dvp L U U
4579 *#% mkdir vpp - L -
4580 *#
4581 */
4582 struct vnop_mkdir_args {
4583 struct vnodeop_desc *a_desc;
4584 vnode_t a_dvp;
4585 vnode_t *a_vpp;
4586 struct componentname *a_cnp;
4587 struct vnode_attr *a_vap;
4588 vfs_context_t a_context;
4589 };
4590 #endif /* 0*/
4591 errno_t
4592 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4593 struct vnode_attr *vap, vfs_context_t ctx)
4594 {
4595 int _err;
4596 struct vnop_mkdir_args a;
4597
4598 a.a_desc = &vnop_mkdir_desc;
4599 a.a_dvp = dvp;
4600 a.a_vpp = vpp;
4601 a.a_cnp = cnp;
4602 a.a_vap = vap;
4603 a.a_context = ctx;
4604
4605 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4606 if (_err == 0 && *vpp) {
4607 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4608 }
4609 #if CONFIG_APPLEDOUBLE
4610 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4611 /*
4612 * Remove stale Apple Double file (if any).
4613 */
4614 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4615 }
4616 #endif /* CONFIG_APPLEDOUBLE */
4617
4618 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4619
4620 return _err;
4621 }
4622
4623 int
4624 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4625 struct vnode_attr *vap, vfs_context_t ctx)
4626 {
4627 int _err;
4628 struct vnop_compound_mkdir_args a;
4629
4630 a.a_desc = &vnop_compound_mkdir_desc;
4631 a.a_dvp = dvp;
4632 a.a_vpp = vpp;
4633 a.a_cnp = &ndp->ni_cnd;
4634 a.a_vap = vap;
4635 a.a_flags = 0;
4636 a.a_context = ctx;
4637 #if 0
4638 a.a_mkdir_authorizer = vn_authorize_mkdir;
4639 #endif /* 0 */
4640 a.a_reserved = NULL;
4641
4642 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4643 if (_err == 0 && *vpp) {
4644 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4645 }
4646 #if CONFIG_APPLEDOUBLE
4647 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4648 /*
4649 * Remove stale Apple Double file (if any).
4650 */
4651 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4652 }
4653 #endif /* CONFIG_APPLEDOUBLE */
4654
4655 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4656
4657 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4658 if (*vpp && _err && _err != EKEEPLOOKING) {
4659 vnode_put(*vpp);
4660 *vpp = NULLVP;
4661 }
4662
4663 return _err;
4664 }
4665
4666 int
4667 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4668 {
4669 if (vnode_compound_rmdir_available(dvp)) {
4670 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4671 } else {
4672 if (*vpp == NULLVP) {
4673 panic("NULL vp, but not a compound VNOP?");
4674 }
4675 if (vap != NULL) {
4676 panic("Non-NULL vap, but not a compound VNOP?");
4677 }
4678 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4679 }
4680 }
4681
4682 #if 0
4683 /*
4684 *#
4685 *#% rmdir dvp L U U
4686 *#% rmdir vp L U U
4687 *#
4688 */
4689 struct vnop_rmdir_args {
4690 struct vnodeop_desc *a_desc;
4691 vnode_t a_dvp;
4692 vnode_t a_vp;
4693 struct componentname *a_cnp;
4694 vfs_context_t a_context;
4695 };
4696
4697 #endif /* 0*/
4698 errno_t
4699 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4700 {
4701 int _err;
4702 struct vnop_rmdir_args a;
4703
4704 a.a_desc = &vnop_rmdir_desc;
4705 a.a_dvp = dvp;
4706 a.a_vp = vp;
4707 a.a_cnp = cnp;
4708 a.a_context = ctx;
4709
4710 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4711 DTRACE_FSINFO(rmdir, vnode_t, vp);
4712
4713 if (_err == 0) {
4714 vnode_setneedinactive(vp);
4715 #if CONFIG_APPLEDOUBLE
4716 if (!(NATIVE_XATTR(dvp))) {
4717 /*
4718 * Remove any associated extended attribute file (._ AppleDouble file).
4719 */
4720 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4721 }
4722 #endif
4723 }
4724
4725 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4726 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4727 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4728
4729 return _err;
4730 }
4731
4732 int
4733 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4734 struct vnode_attr *vap, vfs_context_t ctx)
4735 {
4736 int _err;
4737 struct vnop_compound_rmdir_args a;
4738 int no_vp;
4739
4740 a.a_desc = &vnop_mkdir_desc;
4741 a.a_dvp = dvp;
4742 a.a_vpp = vpp;
4743 a.a_cnp = &ndp->ni_cnd;
4744 a.a_vap = vap;
4745 a.a_flags = 0;
4746 a.a_context = ctx;
4747 a.a_rmdir_authorizer = vn_authorize_rmdir;
4748 a.a_reserved = NULL;
4749
4750 no_vp = (*vpp == NULLVP);
4751
4752 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4753 if (_err == 0 && *vpp) {
4754 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4755 }
4756 #if CONFIG_APPLEDOUBLE
4757 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4758 /*
4759 * Remove stale Apple Double file (if any).
4760 */
4761 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4762 }
4763 #endif
4764
4765 if (*vpp) {
4766 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4767 }
4768 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4769
4770 if (no_vp) {
4771 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4772
4773 #if 0 /* Removing orphaned ._ files requires a vp.... */
4774 if (*vpp && _err && _err != EKEEPLOOKING) {
4775 vnode_put(*vpp);
4776 *vpp = NULLVP;
4777 }
4778 #endif /* 0 */
4779 }
4780
4781 return _err;
4782 }
4783
4784 #if CONFIG_APPLEDOUBLE
4785 /*
4786 * Remove a ._ AppleDouble file
4787 */
4788 #define AD_STALE_SECS (180)
4789 static void
4790 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4791 {
4792 vnode_t xvp;
4793 struct nameidata nd;
4794 char smallname[64];
4795 char *filename = NULL;
4796 size_t len;
4797
4798 if ((basename == NULL) || (basename[0] == '\0') ||
4799 (basename[0] == '.' && basename[1] == '_')) {
4800 return;
4801 }
4802 filename = &smallname[0];
4803 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4804 if (len >= sizeof(smallname)) {
4805 len++; /* snprintf result doesn't include '\0' */
4806 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4807 len = snprintf(filename, len, "._%s", basename);
4808 }
4809 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4810 CAST_USER_ADDR_T(filename), ctx);
4811 nd.ni_dvp = dvp;
4812 if (namei(&nd) != 0) {
4813 goto out2;
4814 }
4815
4816 xvp = nd.ni_vp;
4817 nameidone(&nd);
4818 if (xvp->v_type != VREG) {
4819 goto out1;
4820 }
4821
4822 /*
4823 * When creating a new object and a "._" file already
4824 * exists, check to see if its a stale "._" file.
4825 *
4826 */
4827 if (!force) {
4828 struct vnode_attr va;
4829
4830 VATTR_INIT(&va);
4831 VATTR_WANTED(&va, va_data_size);
4832 VATTR_WANTED(&va, va_modify_time);
4833 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4834 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4835 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4836 va.va_data_size != 0) {
4837 struct timeval tv;
4838
4839 microtime(&tv);
4840 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4841 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4842 force = 1; /* must be stale */
4843 }
4844 }
4845 }
4846 if (force) {
4847 int error;
4848
4849 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
4850 if (error == 0) {
4851 vnode_setneedinactive(xvp);
4852 }
4853
4854 post_event_if_success(xvp, error, NOTE_DELETE);
4855 post_event_if_success(dvp, error, NOTE_WRITE);
4856 }
4857
4858 out1:
4859 vnode_put(dvp);
4860 vnode_put(xvp);
4861 out2:
4862 if (filename && filename != &smallname[0]) {
4863 FREE(filename, M_TEMP);
4864 }
4865 }
4866
4867 /*
4868 * Shadow uid/gid/mod to a ._ AppleDouble file
4869 */
4870 static void
4871 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
4872 vfs_context_t ctx)
4873 {
4874 vnode_t xvp;
4875 struct nameidata nd;
4876 char smallname[64];
4877 char *filename = NULL;
4878 size_t len;
4879
4880 if ((dvp == NULLVP) ||
4881 (basename == NULL) || (basename[0] == '\0') ||
4882 (basename[0] == '.' && basename[1] == '_')) {
4883 return;
4884 }
4885 filename = &smallname[0];
4886 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4887 if (len >= sizeof(smallname)) {
4888 len++; /* snprintf result doesn't include '\0' */
4889 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4890 len = snprintf(filename, len, "._%s", basename);
4891 }
4892 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
4893 CAST_USER_ADDR_T(filename), ctx);
4894 nd.ni_dvp = dvp;
4895 if (namei(&nd) != 0) {
4896 goto out2;
4897 }
4898
4899 xvp = nd.ni_vp;
4900 nameidone(&nd);
4901
4902 if (xvp->v_type == VREG) {
4903 struct vnop_setattr_args a;
4904
4905 a.a_desc = &vnop_setattr_desc;
4906 a.a_vp = xvp;
4907 a.a_vap = vap;
4908 a.a_context = ctx;
4909
4910 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
4911 }
4912
4913 vnode_put(xvp);
4914 out2:
4915 if (filename && filename != &smallname[0]) {
4916 FREE(filename, M_TEMP);
4917 }
4918 }
4919 #endif /* CONFIG_APPLEDOUBLE */
4920
4921 #if 0
4922 /*
4923 *#
4924 *#% symlink dvp L U U
4925 *#% symlink vpp - U -
4926 *#
4927 */
4928 struct vnop_symlink_args {
4929 struct vnodeop_desc *a_desc;
4930 vnode_t a_dvp;
4931 vnode_t *a_vpp;
4932 struct componentname *a_cnp;
4933 struct vnode_attr *a_vap;
4934 char *a_target;
4935 vfs_context_t a_context;
4936 };
4937
4938 #endif /* 0*/
4939 errno_t
4940 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4941 struct vnode_attr *vap, char *target, vfs_context_t ctx)
4942 {
4943 int _err;
4944 struct vnop_symlink_args a;
4945
4946 a.a_desc = &vnop_symlink_desc;
4947 a.a_dvp = dvp;
4948 a.a_vpp = vpp;
4949 a.a_cnp = cnp;
4950 a.a_vap = vap;
4951 a.a_target = target;
4952 a.a_context = ctx;
4953
4954 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
4955 DTRACE_FSINFO(symlink, vnode_t, dvp);
4956 #if CONFIG_APPLEDOUBLE
4957 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4958 /*
4959 * Remove stale Apple Double file (if any). Posts its own knotes
4960 */
4961 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4962 }
4963 #endif /* CONFIG_APPLEDOUBLE */
4964
4965 post_event_if_success(dvp, _err, NOTE_WRITE);
4966
4967 return _err;
4968 }
4969
4970 #if 0
4971 /*
4972 *#
4973 *#% readdir vp L L L
4974 *#
4975 */
4976 struct vnop_readdir_args {
4977 struct vnodeop_desc *a_desc;
4978 vnode_t a_vp;
4979 struct uio *a_uio;
4980 int a_flags;
4981 int *a_eofflag;
4982 int *a_numdirent;
4983 vfs_context_t a_context;
4984 };
4985
4986 #endif /* 0*/
4987 errno_t
4988 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
4989 int *numdirent, vfs_context_t ctx)
4990 {
4991 int _err;
4992 struct vnop_readdir_args a;
4993 #if CONFIG_DTRACE
4994 user_ssize_t resid = uio_resid(uio);
4995 #endif
4996
4997 a.a_desc = &vnop_readdir_desc;
4998 a.a_vp = vp;
4999 a.a_uio = uio;
5000 a.a_flags = flags;
5001 a.a_eofflag = eofflag;
5002 a.a_numdirent = numdirent;
5003 a.a_context = ctx;
5004
5005 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5006 DTRACE_FSINFO_IO(readdir,
5007 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5008
5009 return _err;
5010 }
5011
5012 #if 0
5013 /*
5014 *#
5015 *#% readdirattr vp L L L
5016 *#
5017 */
5018 struct vnop_readdirattr_args {
5019 struct vnodeop_desc *a_desc;
5020 vnode_t a_vp;
5021 struct attrlist *a_alist;
5022 struct uio *a_uio;
5023 uint32_t a_maxcount;
5024 uint32_t a_options;
5025 uint32_t *a_newstate;
5026 int *a_eofflag;
5027 uint32_t *a_actualcount;
5028 vfs_context_t a_context;
5029 };
5030
5031 #endif /* 0*/
5032 errno_t
5033 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5034 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5035 {
5036 int _err;
5037 struct vnop_readdirattr_args a;
5038 #if CONFIG_DTRACE
5039 user_ssize_t resid = uio_resid(uio);
5040 #endif
5041
5042 a.a_desc = &vnop_readdirattr_desc;
5043 a.a_vp = vp;
5044 a.a_alist = alist;
5045 a.a_uio = uio;
5046 a.a_maxcount = maxcount;
5047 a.a_options = options;
5048 a.a_newstate = newstate;
5049 a.a_eofflag = eofflag;
5050 a.a_actualcount = actualcount;
5051 a.a_context = ctx;
5052
5053 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5054 DTRACE_FSINFO_IO(readdirattr,
5055 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5056
5057 return _err;
5058 }
5059
5060 #if 0
5061 struct vnop_getttrlistbulk_args {
5062 struct vnodeop_desc *a_desc;
5063 vnode_t a_vp;
5064 struct attrlist *a_alist;
5065 struct vnode_attr *a_vap;
5066 struct uio *a_uio;
5067 void *a_private
5068 uint64_t a_options;
5069 int *a_eofflag;
5070 uint32_t *a_actualcount;
5071 vfs_context_t a_context;
5072 };
5073 #endif /* 0*/
5074 errno_t
5075 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
5076 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
5077 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
5078 {
5079 int _err;
5080 struct vnop_getattrlistbulk_args a;
5081 #if CONFIG_DTRACE
5082 user_ssize_t resid = uio_resid(uio);
5083 #endif
5084
5085 a.a_desc = &vnop_getattrlistbulk_desc;
5086 a.a_vp = vp;
5087 a.a_alist = alist;
5088 a.a_vap = vap;
5089 a.a_uio = uio;
5090 a.a_private = private;
5091 a.a_options = options;
5092 a.a_eofflag = eofflag;
5093 a.a_actualcount = actualcount;
5094 a.a_context = ctx;
5095
5096 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
5097 DTRACE_FSINFO_IO(getattrlistbulk,
5098 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5099
5100 return _err;
5101 }
5102
5103 #if 0
5104 /*
5105 *#
5106 *#% readlink vp L L L
5107 *#
5108 */
5109 struct vnop_readlink_args {
5110 struct vnodeop_desc *a_desc;
5111 vnode_t a_vp;
5112 struct uio *a_uio;
5113 vfs_context_t a_context;
5114 };
5115 #endif /* 0 */
5116
5117 /*
5118 * Returns: 0 Success
5119 * lock_fsnode:ENOENT No such file or directory [only for VFS
5120 * that is not thread safe & vnode is
5121 * currently being/has been terminated]
5122 * <vfs_readlink>:EINVAL
5123 * <vfs_readlink>:???
5124 *
5125 * Note: The return codes from the underlying VFS's readlink routine
5126 * can't be fully enumerated here, since third party VFS authors
5127 * may not limit their error returns to the ones documented here,
5128 * even though this may result in some programs functioning
5129 * incorrectly.
5130 *
5131 * The return codes documented above are those which may currently
5132 * be returned by HFS from hfs_vnop_readlink, not including
5133 * additional error code which may be propagated from underlying
5134 * routines.
5135 */
5136 errno_t
5137 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5138 {
5139 int _err;
5140 struct vnop_readlink_args a;
5141 #if CONFIG_DTRACE
5142 user_ssize_t resid = uio_resid(uio);
5143 #endif
5144 a.a_desc = &vnop_readlink_desc;
5145 a.a_vp = vp;
5146 a.a_uio = uio;
5147 a.a_context = ctx;
5148
5149 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5150 DTRACE_FSINFO_IO(readlink,
5151 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5152
5153 return _err;
5154 }
5155
5156 #if 0
5157 /*
5158 *#
5159 *#% inactive vp L U U
5160 *#
5161 */
5162 struct vnop_inactive_args {
5163 struct vnodeop_desc *a_desc;
5164 vnode_t a_vp;
5165 vfs_context_t a_context;
5166 };
5167 #endif /* 0*/
5168 errno_t
5169 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5170 {
5171 int _err;
5172 struct vnop_inactive_args a;
5173
5174 a.a_desc = &vnop_inactive_desc;
5175 a.a_vp = vp;
5176 a.a_context = ctx;
5177
5178 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5179 DTRACE_FSINFO(inactive, vnode_t, vp);
5180
5181 #if NAMEDSTREAMS
5182 /* For file systems that do not support namedstream natively, mark
5183 * the shadow stream file vnode to be recycled as soon as the last
5184 * reference goes away. To avoid re-entering reclaim code, do not
5185 * call recycle on terminating namedstream vnodes.
5186 */
5187 if (vnode_isnamedstream(vp) &&
5188 (vp->v_parent != NULLVP) &&
5189 vnode_isshadow(vp) &&
5190 ((vp->v_lflag & VL_TERMINATE) == 0)) {
5191 vnode_recycle(vp);
5192 }
5193 #endif
5194
5195 return _err;
5196 }
5197
5198
5199 #if 0
5200 /*
5201 *#
5202 *#% reclaim vp U U U
5203 *#
5204 */
5205 struct vnop_reclaim_args {
5206 struct vnodeop_desc *a_desc;
5207 vnode_t a_vp;
5208 vfs_context_t a_context;
5209 };
5210 #endif /* 0*/
5211 errno_t
5212 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5213 {
5214 int _err;
5215 struct vnop_reclaim_args a;
5216
5217 a.a_desc = &vnop_reclaim_desc;
5218 a.a_vp = vp;
5219 a.a_context = ctx;
5220
5221 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5222 DTRACE_FSINFO(reclaim, vnode_t, vp);
5223
5224 return _err;
5225 }
5226
5227
5228 /*
5229 * Returns: 0 Success
5230 * lock_fsnode:ENOENT No such file or directory [only for VFS
5231 * that is not thread safe & vnode is
5232 * currently being/has been terminated]
5233 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5234 */
5235 #if 0
5236 /*
5237 *#
5238 *#% pathconf vp L L L
5239 *#
5240 */
5241 struct vnop_pathconf_args {
5242 struct vnodeop_desc *a_desc;
5243 vnode_t a_vp;
5244 int a_name;
5245 int32_t *a_retval;
5246 vfs_context_t a_context;
5247 };
5248 #endif /* 0*/
5249 errno_t
5250 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5251 {
5252 int _err;
5253 struct vnop_pathconf_args a;
5254
5255 a.a_desc = &vnop_pathconf_desc;
5256 a.a_vp = vp;
5257 a.a_name = name;
5258 a.a_retval = retval;
5259 a.a_context = ctx;
5260
5261 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5262 DTRACE_FSINFO(pathconf, vnode_t, vp);
5263
5264 return _err;
5265 }
5266
5267 /*
5268 * Returns: 0 Success
5269 * err_advlock:ENOTSUP
5270 * lf_advlock:???
5271 * <vnop_advlock_desc>:???
5272 *
5273 * Notes: VFS implementations of advisory locking using calls through
5274 * <vnop_advlock_desc> because lock enforcement does not occur
5275 * locally should try to limit themselves to the return codes
5276 * documented above for lf_advlock and err_advlock.
5277 */
5278 #if 0
5279 /*
5280 *#
5281 *#% advlock vp U U U
5282 *#
5283 */
5284 struct vnop_advlock_args {
5285 struct vnodeop_desc *a_desc;
5286 vnode_t a_vp;
5287 caddr_t a_id;
5288 int a_op;
5289 struct flock *a_fl;
5290 int a_flags;
5291 vfs_context_t a_context;
5292 };
5293 #endif /* 0*/
5294 errno_t
5295 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5296 {
5297 int _err;
5298 struct vnop_advlock_args a;
5299
5300 a.a_desc = &vnop_advlock_desc;
5301 a.a_vp = vp;
5302 a.a_id = id;
5303 a.a_op = op;
5304 a.a_fl = fl;
5305 a.a_flags = flags;
5306 a.a_context = ctx;
5307 a.a_timeout = timeout;
5308
5309 /* Disallow advisory locking on non-seekable vnodes */
5310 if (vnode_isfifo(vp)) {
5311 _err = err_advlock(&a);
5312 } else {
5313 if ((vp->v_flag & VLOCKLOCAL)) {
5314 /* Advisory locking done at this layer */
5315 _err = lf_advlock(&a);
5316 } else if (flags & F_OFD_LOCK) {
5317 /* Non-local locking doesn't work for OFD locks */
5318 _err = err_advlock(&a);
5319 } else {
5320 /* Advisory locking done by underlying filesystem */
5321 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5322 }
5323 DTRACE_FSINFO(advlock, vnode_t, vp);
5324 if (op == F_UNLCK && flags == F_FLOCK) {
5325 post_event_if_success(vp, _err, NOTE_FUNLOCK);
5326 }
5327 }
5328
5329 return _err;
5330 }
5331
5332
5333
5334 #if 0
5335 /*
5336 *#
5337 *#% allocate vp L L L
5338 *#
5339 */
5340 struct vnop_allocate_args {
5341 struct vnodeop_desc *a_desc;
5342 vnode_t a_vp;
5343 off_t a_length;
5344 u_int32_t a_flags;
5345 off_t *a_bytesallocated;
5346 off_t a_offset;
5347 vfs_context_t a_context;
5348 };
5349
5350 #endif /* 0*/
5351 errno_t
5352 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5353 {
5354 int _err;
5355 struct vnop_allocate_args a;
5356
5357 a.a_desc = &vnop_allocate_desc;
5358 a.a_vp = vp;
5359 a.a_length = length;
5360 a.a_flags = flags;
5361 a.a_bytesallocated = bytesallocated;
5362 a.a_offset = offset;
5363 a.a_context = ctx;
5364
5365 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5366 DTRACE_FSINFO(allocate, vnode_t, vp);
5367 #if CONFIG_FSE
5368 if (_err == 0) {
5369 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5370 }
5371 #endif
5372
5373 return _err;
5374 }
5375
5376 #if 0
5377 /*
5378 *#
5379 *#% pagein vp = = =
5380 *#
5381 */
5382 struct vnop_pagein_args {
5383 struct vnodeop_desc *a_desc;
5384 vnode_t a_vp;
5385 upl_t a_pl;
5386 upl_offset_t a_pl_offset;
5387 off_t a_f_offset;
5388 size_t a_size;
5389 int a_flags;
5390 vfs_context_t a_context;
5391 };
5392 #endif /* 0*/
5393 errno_t
5394 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5395 {
5396 int _err;
5397 struct vnop_pagein_args a;
5398
5399 a.a_desc = &vnop_pagein_desc;
5400 a.a_vp = vp;
5401 a.a_pl = pl;
5402 a.a_pl_offset = pl_offset;
5403 a.a_f_offset = f_offset;
5404 a.a_size = size;
5405 a.a_flags = flags;
5406 a.a_context = ctx;
5407
5408 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5409 DTRACE_FSINFO(pagein, vnode_t, vp);
5410
5411 return _err;
5412 }
5413
5414 #if 0
5415 /*
5416 *#
5417 *#% pageout vp = = =
5418 *#
5419 */
5420 struct vnop_pageout_args {
5421 struct vnodeop_desc *a_desc;
5422 vnode_t a_vp;
5423 upl_t a_pl;
5424 upl_offset_t a_pl_offset;
5425 off_t a_f_offset;
5426 size_t a_size;
5427 int a_flags;
5428 vfs_context_t a_context;
5429 };
5430
5431 #endif /* 0*/
5432 errno_t
5433 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5434 {
5435 int _err;
5436 struct vnop_pageout_args a;
5437
5438 a.a_desc = &vnop_pageout_desc;
5439 a.a_vp = vp;
5440 a.a_pl = pl;
5441 a.a_pl_offset = pl_offset;
5442 a.a_f_offset = f_offset;
5443 a.a_size = size;
5444 a.a_flags = flags;
5445 a.a_context = ctx;
5446
5447 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5448 DTRACE_FSINFO(pageout, vnode_t, vp);
5449
5450 post_event_if_success(vp, _err, NOTE_WRITE);
5451
5452 return _err;
5453 }
5454
5455 int
5456 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5457 {
5458 if (vnode_compound_remove_available(dvp)) {
5459 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5460 } else {
5461 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5462 }
5463 }
5464
5465 #if CONFIG_SEARCHFS
5466
5467 #if 0
5468 /*
5469 *#
5470 *#% searchfs vp L L L
5471 *#
5472 */
5473 struct vnop_searchfs_args {
5474 struct vnodeop_desc *a_desc;
5475 vnode_t a_vp;
5476 void *a_searchparams1;
5477 void *a_searchparams2;
5478 struct attrlist *a_searchattrs;
5479 uint32_t a_maxmatches;
5480 struct timeval *a_timelimit;
5481 struct attrlist *a_returnattrs;
5482 uint32_t *a_nummatches;
5483 uint32_t a_scriptcode;
5484 uint32_t a_options;
5485 struct uio *a_uio;
5486 struct searchstate *a_searchstate;
5487 vfs_context_t a_context;
5488 };
5489
5490 #endif /* 0*/
5491 errno_t
5492 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5493 {
5494 int _err;
5495 struct vnop_searchfs_args a;
5496
5497 a.a_desc = &vnop_searchfs_desc;
5498 a.a_vp = vp;
5499 a.a_searchparams1 = searchparams1;
5500 a.a_searchparams2 = searchparams2;
5501 a.a_searchattrs = searchattrs;
5502 a.a_maxmatches = maxmatches;
5503 a.a_timelimit = timelimit;
5504 a.a_returnattrs = returnattrs;
5505 a.a_nummatches = nummatches;
5506 a.a_scriptcode = scriptcode;
5507 a.a_options = options;
5508 a.a_uio = uio;
5509 a.a_searchstate = searchstate;
5510 a.a_context = ctx;
5511
5512 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5513 DTRACE_FSINFO(searchfs, vnode_t, vp);
5514
5515 return _err;
5516 }
5517 #endif /* CONFIG_SEARCHFS */
5518
5519 #if 0
5520 /*
5521 *#
5522 *#% copyfile fvp U U U
5523 *#% copyfile tdvp L U U
5524 *#% copyfile tvp X U U
5525 *#
5526 */
5527 struct vnop_copyfile_args {
5528 struct vnodeop_desc *a_desc;
5529 vnode_t a_fvp;
5530 vnode_t a_tdvp;
5531 vnode_t a_tvp;
5532 struct componentname *a_tcnp;
5533 int a_mode;
5534 int a_flags;
5535 vfs_context_t a_context;
5536 };
5537 #endif /* 0*/
5538 errno_t
5539 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5540 int mode, int flags, vfs_context_t ctx)
5541 {
5542 int _err;
5543 struct vnop_copyfile_args a;
5544 a.a_desc = &vnop_copyfile_desc;
5545 a.a_fvp = fvp;
5546 a.a_tdvp = tdvp;
5547 a.a_tvp = tvp;
5548 a.a_tcnp = tcnp;
5549 a.a_mode = mode;
5550 a.a_flags = flags;
5551 a.a_context = ctx;
5552 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5553 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5554 return _err;
5555 }
5556
5557 #if 0
5558 struct vnop_clonefile_args {
5559 struct vnodeop_desc *a_desc;
5560 vnode_t a_fvp;
5561 vnode_t a_dvp;
5562 vnode_t *a_vpp;
5563 struct componentname *a_cnp;
5564 struct vnode_attr *a_vap;
5565 uint32_t a_flags;
5566 vfs_context_t a_context;
5567 int (*a_dir_clone_authorizer)( /* Authorization callback */
5568 struct vnode_attr *vap, /* attribute to be authorized */
5569 kauth_action_t action, /* action for which attribute is to be authorized */
5570 struct vnode_attr *dvap, /* target directory attributes */
5571 vnode_t sdvp, /* source directory vnode pointer (optional) */
5572 mount_t mp, /* mount point of filesystem */
5573 dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */
5574 uint32_t flags; /* value passed in a_flags to the VNOP */
5575 vfs_context_t ctx, /* As passed to VNOP */
5576 void *reserved); /* Always NULL */
5577 void *a_reserved; /* Currently unused */
5578 };
5579 #endif /* 0 */
5580
5581 errno_t
5582 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
5583 struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
5584 vfs_context_t ctx)
5585 {
5586 int _err;
5587 struct vnop_clonefile_args a;
5588 a.a_desc = &vnop_clonefile_desc;
5589 a.a_fvp = fvp;
5590 a.a_dvp = dvp;
5591 a.a_vpp = vpp;
5592 a.a_cnp = cnp;
5593 a.a_vap = vap;
5594 a.a_flags = flags;
5595 a.a_context = ctx;
5596
5597 if (vnode_vtype(fvp) == VDIR) {
5598 a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
5599 } else {
5600 a.a_dir_clone_authorizer = NULL;
5601 }
5602
5603 _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
5604
5605 if (_err == 0 && *vpp) {
5606 DTRACE_FSINFO(clonefile, vnode_t, *vpp);
5607 if (kdebug_enable) {
5608 kdebug_lookup(*vpp, cnp);
5609 }
5610 }
5611
5612 post_event_if_success(dvp, _err, NOTE_WRITE);
5613
5614 return _err;
5615 }
5616
5617 errno_t
5618 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5619 {
5620 struct vnop_getxattr_args a;
5621 int error;
5622
5623 a.a_desc = &vnop_getxattr_desc;
5624 a.a_vp = vp;
5625 a.a_name = name;
5626 a.a_uio = uio;
5627 a.a_size = size;
5628 a.a_options = options;
5629 a.a_context = ctx;
5630
5631 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5632 DTRACE_FSINFO(getxattr, vnode_t, vp);
5633
5634 return error;
5635 }
5636
5637 errno_t
5638 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5639 {
5640 struct vnop_setxattr_args a;
5641 int error;
5642
5643 a.a_desc = &vnop_setxattr_desc;
5644 a.a_vp = vp;
5645 a.a_name = name;
5646 a.a_uio = uio;
5647 a.a_options = options;
5648 a.a_context = ctx;
5649
5650 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5651 DTRACE_FSINFO(setxattr, vnode_t, vp);
5652
5653 if (error == 0) {
5654 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5655 }
5656
5657 post_event_if_success(vp, error, NOTE_ATTRIB);
5658
5659 return error;
5660 }
5661
5662 errno_t
5663 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5664 {
5665 struct vnop_removexattr_args a;
5666 int error;
5667
5668 a.a_desc = &vnop_removexattr_desc;
5669 a.a_vp = vp;
5670 a.a_name = name;
5671 a.a_options = options;
5672 a.a_context = ctx;
5673
5674 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5675 DTRACE_FSINFO(removexattr, vnode_t, vp);
5676
5677 post_event_if_success(vp, error, NOTE_ATTRIB);
5678
5679 return error;
5680 }
5681
5682 errno_t
5683 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5684 {
5685 struct vnop_listxattr_args a;
5686 int error;
5687
5688 a.a_desc = &vnop_listxattr_desc;
5689 a.a_vp = vp;
5690 a.a_uio = uio;
5691 a.a_size = size;
5692 a.a_options = options;
5693 a.a_context = ctx;
5694
5695 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5696 DTRACE_FSINFO(listxattr, vnode_t, vp);
5697
5698 return error;
5699 }
5700
5701
5702 #if 0
5703 /*
5704 *#
5705 *#% blktooff vp = = =
5706 *#
5707 */
5708 struct vnop_blktooff_args {
5709 struct vnodeop_desc *a_desc;
5710 vnode_t a_vp;
5711 daddr64_t a_lblkno;
5712 off_t *a_offset;
5713 };
5714 #endif /* 0*/
5715 errno_t
5716 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5717 {
5718 int _err;
5719 struct vnop_blktooff_args a;
5720
5721 a.a_desc = &vnop_blktooff_desc;
5722 a.a_vp = vp;
5723 a.a_lblkno = lblkno;
5724 a.a_offset = offset;
5725
5726 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5727 DTRACE_FSINFO(blktooff, vnode_t, vp);
5728
5729 return _err;
5730 }
5731
5732 #if 0
5733 /*
5734 *#
5735 *#% offtoblk vp = = =
5736 *#
5737 */
5738 struct vnop_offtoblk_args {
5739 struct vnodeop_desc *a_desc;
5740 vnode_t a_vp;
5741 off_t a_offset;
5742 daddr64_t *a_lblkno;
5743 };
5744 #endif /* 0*/
5745 errno_t
5746 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5747 {
5748 int _err;
5749 struct vnop_offtoblk_args a;
5750
5751 a.a_desc = &vnop_offtoblk_desc;
5752 a.a_vp = vp;
5753 a.a_offset = offset;
5754 a.a_lblkno = lblkno;
5755
5756 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5757 DTRACE_FSINFO(offtoblk, vnode_t, vp);
5758
5759 return _err;
5760 }
5761
5762 #if 0
5763 /*
5764 *#
5765 *#% blockmap vp L L L
5766 *#
5767 */
5768 struct vnop_blockmap_args {
5769 struct vnodeop_desc *a_desc;
5770 vnode_t a_vp;
5771 off_t a_foffset;
5772 size_t a_size;
5773 daddr64_t *a_bpn;
5774 size_t *a_run;
5775 void *a_poff;
5776 int a_flags;
5777 vfs_context_t a_context;
5778 };
5779 #endif /* 0*/
5780 errno_t
5781 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
5782 {
5783 int _err;
5784 struct vnop_blockmap_args a;
5785 size_t localrun = 0;
5786
5787 if (ctx == NULL) {
5788 ctx = vfs_context_current();
5789 }
5790 a.a_desc = &vnop_blockmap_desc;
5791 a.a_vp = vp;
5792 a.a_foffset = foffset;
5793 a.a_size = size;
5794 a.a_bpn = bpn;
5795 a.a_run = &localrun;
5796 a.a_poff = poff;
5797 a.a_flags = flags;
5798 a.a_context = ctx;
5799
5800 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
5801 DTRACE_FSINFO(blockmap, vnode_t, vp);
5802
5803 /*
5804 * We used a local variable to request information from the underlying
5805 * filesystem about the length of the I/O run in question. If
5806 * we get malformed output from the filesystem, we cap it to the length
5807 * requested, at most. Update 'run' on the way out.
5808 */
5809 if (_err == 0) {
5810 if (localrun > size) {
5811 localrun = size;
5812 }
5813
5814 if (run) {
5815 *run = localrun;
5816 }
5817 }
5818
5819 return _err;
5820 }
5821
5822 #if 0
5823 struct vnop_strategy_args {
5824 struct vnodeop_desc *a_desc;
5825 struct buf *a_bp;
5826 };
5827
5828 #endif /* 0*/
5829 errno_t
5830 VNOP_STRATEGY(struct buf *bp)
5831 {
5832 int _err;
5833 struct vnop_strategy_args a;
5834 vnode_t vp = buf_vnode(bp);
5835 a.a_desc = &vnop_strategy_desc;
5836 a.a_bp = bp;
5837 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
5838 DTRACE_FSINFO(strategy, vnode_t, vp);
5839 return _err;
5840 }
5841
5842 #if 0
5843 struct vnop_bwrite_args {
5844 struct vnodeop_desc *a_desc;
5845 buf_t a_bp;
5846 };
5847 #endif /* 0*/
5848 errno_t
5849 VNOP_BWRITE(struct buf *bp)
5850 {
5851 int _err;
5852 struct vnop_bwrite_args a;
5853 vnode_t vp = buf_vnode(bp);
5854 a.a_desc = &vnop_bwrite_desc;
5855 a.a_bp = bp;
5856 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
5857 DTRACE_FSINFO(bwrite, vnode_t, vp);
5858 return _err;
5859 }
5860
5861 #if 0
5862 struct vnop_kqfilt_add_args {
5863 struct vnodeop_desc *a_desc;
5864 struct vnode *a_vp;
5865 struct knote *a_kn;
5866 vfs_context_t a_context;
5867 };
5868 #endif
5869 errno_t
5870 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
5871 {
5872 int _err;
5873 struct vnop_kqfilt_add_args a;
5874
5875 a.a_desc = VDESC(vnop_kqfilt_add);
5876 a.a_vp = vp;
5877 a.a_kn = kn;
5878 a.a_context = ctx;
5879
5880 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
5881 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
5882
5883 return _err;
5884 }
5885
5886 #if 0
5887 struct vnop_kqfilt_remove_args {
5888 struct vnodeop_desc *a_desc;
5889 struct vnode *a_vp;
5890 uintptr_t a_ident;
5891 vfs_context_t a_context;
5892 };
5893 #endif
5894 errno_t
5895 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
5896 {
5897 int _err;
5898 struct vnop_kqfilt_remove_args a;
5899
5900 a.a_desc = VDESC(vnop_kqfilt_remove);
5901 a.a_vp = vp;
5902 a.a_ident = ident;
5903 a.a_context = ctx;
5904
5905 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
5906 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
5907
5908 return _err;
5909 }
5910
5911 errno_t
5912 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
5913 {
5914 int _err;
5915 struct vnop_monitor_args a;
5916
5917 a.a_desc = VDESC(vnop_monitor);
5918 a.a_vp = vp;
5919 a.a_events = events;
5920 a.a_flags = flags;
5921 a.a_handle = handle;
5922 a.a_context = ctx;
5923
5924 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
5925 DTRACE_FSINFO(monitor, vnode_t, vp);
5926
5927 return _err;
5928 }
5929
5930 #if 0
5931 struct vnop_setlabel_args {
5932 struct vnodeop_desc *a_desc;
5933 struct vnode *a_vp;
5934 struct label *a_vl;
5935 vfs_context_t a_context;
5936 };
5937 #endif
5938 errno_t
5939 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
5940 {
5941 int _err;
5942 struct vnop_setlabel_args a;
5943
5944 a.a_desc = VDESC(vnop_setlabel);
5945 a.a_vp = vp;
5946 a.a_vl = label;
5947 a.a_context = ctx;
5948
5949 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
5950 DTRACE_FSINFO(setlabel, vnode_t, vp);
5951
5952 return _err;
5953 }
5954
5955
5956 #if NAMEDSTREAMS
5957 /*
5958 * Get a named streamed
5959 */
5960 errno_t
5961 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
5962 {
5963 int _err;
5964 struct vnop_getnamedstream_args a;
5965
5966 a.a_desc = &vnop_getnamedstream_desc;
5967 a.a_vp = vp;
5968 a.a_svpp = svpp;
5969 a.a_name = name;
5970 a.a_operation = operation;
5971 a.a_flags = flags;
5972 a.a_context = ctx;
5973
5974 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
5975 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
5976 return _err;
5977 }
5978
5979 /*
5980 * Create a named streamed
5981 */
5982 errno_t
5983 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
5984 {
5985 int _err;
5986 struct vnop_makenamedstream_args a;
5987
5988 a.a_desc = &vnop_makenamedstream_desc;
5989 a.a_vp = vp;
5990 a.a_svpp = svpp;
5991 a.a_name = name;
5992 a.a_flags = flags;
5993 a.a_context = ctx;
5994
5995 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
5996 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
5997 return _err;
5998 }
5999
6000
6001 /*
6002 * Remove a named streamed
6003 */
6004 errno_t
6005 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6006 {
6007 int _err;
6008 struct vnop_removenamedstream_args a;
6009
6010 a.a_desc = &vnop_removenamedstream_desc;
6011 a.a_vp = vp;
6012 a.a_svp = svp;
6013 a.a_name = name;
6014 a.a_flags = flags;
6015 a.a_context = ctx;
6016
6017 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6018 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
6019 return _err;
6020 }
6021 #endif