]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
95 #include <sys/mbuf.h>
96 #include <sys/syslog.h>
97 #include <sys/ubc.h>
98 #include <sys/vm.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
106 #include <sys/kdebug.h>
107
108 #include <kern/assert.h>
109 #include <kern/kalloc.h>
110 #include <kern/task.h>
111 #include <kern/policy_internal.h>
112
113 #include <libkern/OSByteOrder.h>
114
115 #include <miscfs/specfs/specdev.h>
116
117 #include <mach/mach_types.h>
118 #include <mach/memory_object_types.h>
119 #include <mach/task.h>
120
121 #if CONFIG_MACF
122 #include <security/mac_framework.h>
123 #endif
124
125 #if NULLFS
126 #include <miscfs/nullfs/nullfs.h>
127 #endif
128
129 #include <sys/sdt.h>
130
131 #define ESUCCESS 0
132 #undef mount_t
133 #undef vnode_t
134
135 #define COMPAT_ONLY
136
137 #define NATIVE_XATTR(VP) \
138 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
139
140 #if CONFIG_APPLEDOUBLE
141 static void xattrfile_remove(vnode_t dvp, const char *basename,
142 vfs_context_t ctx, int force);
143 static void xattrfile_setattr(vnode_t dvp, const char * basename,
144 struct vnode_attr * vap, vfs_context_t ctx);
145 #endif /* CONFIG_APPLEDOUBLE */
146
147 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
148
149 /*
150 * vnode_setneedinactive
151 *
152 * Description: Indicate that when the last iocount on this vnode goes away,
153 * and the usecount is also zero, we should inform the filesystem
154 * via VNOP_INACTIVE.
155 *
156 * Parameters: vnode_t vnode to mark
157 *
158 * Returns: Nothing
159 *
160 * Notes: Notably used when we're deleting a file--we need not have a
161 * usecount, so VNOP_INACTIVE may not get called by anyone. We
162 * want it called when we drop our iocount.
163 */
164 void
165 vnode_setneedinactive(vnode_t vp)
166 {
167 cache_purge(vp);
168
169 vnode_lock_spin(vp);
170 vp->v_lflag |= VL_NEEDINACTIVE;
171 vnode_unlock(vp);
172 }
173
174
175 /* ====================================================================== */
176 /* ************ EXTERNAL KERNEL APIS ********************************** */
177 /* ====================================================================== */
178
179 /*
180 * implementations of exported VFS operations
181 */
182 int
183 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
184 {
185 int error;
186
187 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) {
188 return ENOTSUP;
189 }
190
191 if (vfs_context_is64bit(ctx)) {
192 if (vfs_64bitready(mp)) {
193 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
194 } else {
195 error = ENOTSUP;
196 }
197 } else {
198 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
199 }
200
201 return error;
202 }
203
204 int
205 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
206 {
207 int error;
208
209 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) {
210 return ENOTSUP;
211 }
212
213 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
214
215 return error;
216 }
217
218 int
219 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
220 {
221 int error;
222
223 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) {
224 return ENOTSUP;
225 }
226
227 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
228
229 return error;
230 }
231
232 /*
233 * Returns: 0 Success
234 * ENOTSUP Not supported
235 * <vfs_root>:ENOENT
236 * <vfs_root>:???
237 *
238 * Note: The return codes from the underlying VFS's root routine can't
239 * be fully enumerated here, since third party VFS authors may not
240 * limit their error returns to the ones documented here, even
241 * though this may result in some programs functioning incorrectly.
242 *
243 * The return codes documented above are those which may currently
244 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
245 * for a call to hfs_vget on the volume mount point, not including
246 * additional error codes which may be propagated from underlying
247 * routines called by hfs_vget.
248 */
249 int
250 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
251 {
252 int error;
253
254 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) {
255 return ENOTSUP;
256 }
257
258 if (ctx == NULL) {
259 ctx = vfs_context_current();
260 }
261
262 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
263
264 return error;
265 }
266
267 int
268 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
269 {
270 int error;
271
272 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) {
273 return ENOTSUP;
274 }
275
276 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
277
278 return error;
279 }
280
281 int
282 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
283 {
284 int error;
285
286 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) {
287 return ENOTSUP;
288 }
289
290 if (ctx == NULL) {
291 ctx = vfs_context_current();
292 }
293
294 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
295
296 return error;
297 }
298
299 int
300 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
301 {
302 int error;
303
304 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) {
305 return ENOTSUP;
306 }
307
308 if (ctx == NULL) {
309 ctx = vfs_context_current();
310 }
311
312 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
313
314 return error;
315 }
316
317 int
318 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
319 {
320 int error;
321
322 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) {
323 return ENOTSUP;
324 }
325
326 if (ctx == NULL) {
327 ctx = vfs_context_current();
328 }
329
330 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
331
332 return error;
333 }
334
335 int
336 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
337 {
338 int error;
339
340 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) {
341 return ENOTSUP;
342 }
343
344 if (ctx == NULL) {
345 ctx = vfs_context_current();
346 }
347
348 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
349
350 return error;
351 }
352
353 int
354 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
355 {
356 int error;
357
358 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) {
359 return ENOTSUP;
360 }
361
362 if (ctx == NULL) {
363 ctx = vfs_context_current();
364 }
365
366 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
367
368 return error;
369 }
370
371 int
372 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
373 {
374 int error;
375
376 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) {
377 return ENOTSUP;
378 }
379
380 if (ctx == NULL) {
381 ctx = vfs_context_current();
382 }
383
384 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
385
386 return error;
387 }
388
389 int
390 VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
391 int flags, vfs_context_t context)
392 {
393 if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) {
394 return ENOTSUP;
395 }
396
397 return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
398 context ?: vfs_context_current());
399 }
400
401 int
402 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
403 {
404 int error;
405
406 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) {
407 return ENOTSUP;
408 }
409
410 if (ctx == NULL) {
411 ctx = vfs_context_current();
412 }
413
414 error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
415
416 return error;
417 }
418
419 /* returns the cached throttle mask for the mount_t */
420 uint64_t
421 vfs_throttle_mask(mount_t mp)
422 {
423 return mp->mnt_throttle_mask;
424 }
425
426 /* returns a copy of vfs type name for the mount_t */
427 void
428 vfs_name(mount_t mp, char *buffer)
429 {
430 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
431 }
432
433 /* returns vfs type number for the mount_t */
434 int
435 vfs_typenum(mount_t mp)
436 {
437 return mp->mnt_vtable->vfc_typenum;
438 }
439
440 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
441 void*
442 vfs_mntlabel(mount_t mp)
443 {
444 return (void*)mp->mnt_mntlabel;
445 }
446
447 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
448 uint64_t
449 vfs_flags(mount_t mp)
450 {
451 return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
452 }
453
454 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
455 void
456 vfs_setflags(mount_t mp, uint64_t flags)
457 {
458 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
459
460 mount_lock(mp);
461 mp->mnt_flag |= lflags;
462 mount_unlock(mp);
463 }
464
465 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
466 void
467 vfs_clearflags(mount_t mp, uint64_t flags)
468 {
469 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
470
471 mount_lock(mp);
472 mp->mnt_flag &= ~lflags;
473 mount_unlock(mp);
474 }
475
476 /* Is the mount_t ronly and upgrade read/write requested? */
477 int
478 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
479 {
480 return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR);
481 }
482
483
484 /* Is the mount_t mounted ronly */
485 int
486 vfs_isrdonly(mount_t mp)
487 {
488 return mp->mnt_flag & MNT_RDONLY;
489 }
490
491 /* Is the mount_t mounted for filesystem synchronous writes? */
492 int
493 vfs_issynchronous(mount_t mp)
494 {
495 return mp->mnt_flag & MNT_SYNCHRONOUS;
496 }
497
498 /* Is the mount_t mounted read/write? */
499 int
500 vfs_isrdwr(mount_t mp)
501 {
502 return (mp->mnt_flag & MNT_RDONLY) == 0;
503 }
504
505
506 /* Is mount_t marked for update (ie MNT_UPDATE) */
507 int
508 vfs_isupdate(mount_t mp)
509 {
510 return mp->mnt_flag & MNT_UPDATE;
511 }
512
513
514 /* Is mount_t marked for reload (ie MNT_RELOAD) */
515 int
516 vfs_isreload(mount_t mp)
517 {
518 return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD);
519 }
520
521 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
522 int
523 vfs_isforce(mount_t mp)
524 {
525 if (mp->mnt_lflag & MNT_LFORCE) {
526 return 1;
527 } else {
528 return 0;
529 }
530 }
531
532 int
533 vfs_isunmount(mount_t mp)
534 {
535 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
536 return 1;
537 } else {
538 return 0;
539 }
540 }
541
542 int
543 vfs_64bitready(mount_t mp)
544 {
545 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
546 return 1;
547 } else {
548 return 0;
549 }
550 }
551
552
553 int
554 vfs_authcache_ttl(mount_t mp)
555 {
556 if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
557 return mp->mnt_authcache_ttl;
558 } else {
559 return CACHED_RIGHT_INFINITE_TTL;
560 }
561 }
562
563 void
564 vfs_setauthcache_ttl(mount_t mp, int ttl)
565 {
566 mount_lock(mp);
567 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
568 mp->mnt_authcache_ttl = ttl;
569 mount_unlock(mp);
570 }
571
572 void
573 vfs_clearauthcache_ttl(mount_t mp)
574 {
575 mount_lock(mp);
576 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
577 /*
578 * back to the default TTL value in case
579 * MNTK_AUTH_OPAQUE is set on this mount
580 */
581 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
582 mount_unlock(mp);
583 }
584
585 int
586 vfs_authopaque(mount_t mp)
587 {
588 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) {
589 return 1;
590 } else {
591 return 0;
592 }
593 }
594
595 int
596 vfs_authopaqueaccess(mount_t mp)
597 {
598 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) {
599 return 1;
600 } else {
601 return 0;
602 }
603 }
604
605 void
606 vfs_setauthopaque(mount_t mp)
607 {
608 mount_lock(mp);
609 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
610 mount_unlock(mp);
611 }
612
613 void
614 vfs_setauthopaqueaccess(mount_t mp)
615 {
616 mount_lock(mp);
617 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
618 mount_unlock(mp);
619 }
620
621 void
622 vfs_clearauthopaque(mount_t mp)
623 {
624 mount_lock(mp);
625 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
626 mount_unlock(mp);
627 }
628
629 void
630 vfs_clearauthopaqueaccess(mount_t mp)
631 {
632 mount_lock(mp);
633 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
634 mount_unlock(mp);
635 }
636
637 void
638 vfs_setextendedsecurity(mount_t mp)
639 {
640 mount_lock(mp);
641 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
642 mount_unlock(mp);
643 }
644
645 void
646 vfs_clearextendedsecurity(mount_t mp)
647 {
648 mount_lock(mp);
649 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
650 mount_unlock(mp);
651 }
652
653 void
654 vfs_setnoswap(mount_t mp)
655 {
656 mount_lock(mp);
657 mp->mnt_kern_flag |= MNTK_NOSWAP;
658 mount_unlock(mp);
659 }
660
661 void
662 vfs_clearnoswap(mount_t mp)
663 {
664 mount_lock(mp);
665 mp->mnt_kern_flag &= ~MNTK_NOSWAP;
666 mount_unlock(mp);
667 }
668
669 int
670 vfs_extendedsecurity(mount_t mp)
671 {
672 return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY;
673 }
674
675 /* returns the max size of short symlink in this mount_t */
676 uint32_t
677 vfs_maxsymlen(mount_t mp)
678 {
679 return mp->mnt_maxsymlinklen;
680 }
681
682 /* set max size of short symlink on mount_t */
683 void
684 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
685 {
686 mp->mnt_maxsymlinklen = symlen;
687 }
688
689 /* return a pointer to the RO vfs_statfs associated with mount_t */
690 struct vfsstatfs *
691 vfs_statfs(mount_t mp)
692 {
693 return &mp->mnt_vfsstat;
694 }
695
696 int
697 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
698 {
699 int error;
700
701 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) {
702 return error;
703 }
704
705 /*
706 * If we have a filesystem create time, use it to default some others.
707 */
708 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
709 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) {
710 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
711 }
712 }
713
714 return 0;
715 }
716
717 int
718 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
719 {
720 int error;
721
722 /*
723 * with a read-only system volume, we need to allow rename of the root volume
724 * even if it's read-only. Don't return EROFS here if setattr changes only
725 * the volume name
726 */
727 if (vfs_isrdonly(mp) &&
728 !((mp->mnt_flag & MNT_ROOTFS) && (vfa->f_active == VFSATTR_f_vol_name))) {
729 return EROFS;
730 }
731
732 error = VFS_SETATTR(mp, vfa, ctx);
733
734 /*
735 * If we had alternate ways of setting vfs attributes, we'd
736 * fall back here.
737 */
738
739 return error;
740 }
741
742 /* return the private data handle stored in mount_t */
743 void *
744 vfs_fsprivate(mount_t mp)
745 {
746 return mp->mnt_data;
747 }
748
749 /* set the private data handle in mount_t */
750 void
751 vfs_setfsprivate(mount_t mp, void *mntdata)
752 {
753 mount_lock(mp);
754 mp->mnt_data = mntdata;
755 mount_unlock(mp);
756 }
757
758 /* query whether the mount point supports native EAs */
759 int
760 vfs_nativexattrs(mount_t mp)
761 {
762 return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS;
763 }
764
765 /*
766 * return the block size of the underlying
767 * device associated with mount_t
768 */
769 int
770 vfs_devblocksize(mount_t mp)
771 {
772 return mp->mnt_devblocksize;
773 }
774
775 /*
776 * Returns vnode with an iocount that must be released with vnode_put()
777 */
778 vnode_t
779 vfs_vnodecovered(mount_t mp)
780 {
781 vnode_t vp = mp->mnt_vnodecovered;
782 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
783 return NULL;
784 } else {
785 return vp;
786 }
787 }
788
789 /*
790 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
791 * The iocount must be released with vnode_put(). Note that this KPI is subtle
792 * with respect to the validity of using this device vnode for anything substantial
793 * (which is discouraged). If commands are sent to the device driver without
794 * taking proper steps to ensure that the device is still open, chaos may ensue.
795 * Similarly, this routine should only be called if there is some guarantee that
796 * the mount itself is still valid.
797 */
798 vnode_t
799 vfs_devvp(mount_t mp)
800 {
801 vnode_t vp = mp->mnt_devvp;
802
803 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
804 return vp;
805 }
806
807 return NULLVP;
808 }
809
810 /*
811 * return the io attributes associated with mount_t
812 */
813 void
814 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
815 {
816 ioattrp->io_reserved[0] = NULL;
817 ioattrp->io_reserved[1] = NULL;
818 if (mp == NULL) {
819 ioattrp->io_maxreadcnt = MAXPHYS;
820 ioattrp->io_maxwritecnt = MAXPHYS;
821 ioattrp->io_segreadcnt = 32;
822 ioattrp->io_segwritecnt = 32;
823 ioattrp->io_maxsegreadsize = MAXPHYS;
824 ioattrp->io_maxsegwritesize = MAXPHYS;
825 ioattrp->io_devblocksize = DEV_BSIZE;
826 ioattrp->io_flags = 0;
827 ioattrp->io_max_swappin_available = 0;
828 } else {
829 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
830 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
831 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
832 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
833 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
834 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
835 ioattrp->io_devblocksize = mp->mnt_devblocksize;
836 ioattrp->io_flags = mp->mnt_ioflags;
837 ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
838 }
839 }
840
841
842 /*
843 * set the IO attributes associated with mount_t
844 */
845 void
846 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
847 {
848 if (mp == NULL) {
849 return;
850 }
851 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
852 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
853 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
854 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
855 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
856 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
857 mp->mnt_devblocksize = ioattrp->io_devblocksize;
858 mp->mnt_ioflags = ioattrp->io_flags;
859 mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
860 }
861
862 /*
863 * Add a new filesystem into the kernel specified in passed in
864 * vfstable structure. It fills in the vnode
865 * dispatch vector that is to be passed to when vnodes are created.
866 * It returns a handle which is to be used to when the FS is to be removed
867 */
868 typedef int (*PFI)(void *);
869 extern int vfs_opv_numops;
870 errno_t
871 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
872 {
873 struct vfstable *newvfstbl = NULL;
874 int i, j;
875 int(***opv_desc_vector_p)(void *);
876 int(**opv_desc_vector)(void *);
877 const struct vnodeopv_entry_desc *opve_descp;
878 int desccount;
879 int descsize;
880 PFI *descptr;
881
882 /*
883 * This routine is responsible for all the initialization that would
884 * ordinarily be done as part of the system startup;
885 */
886
887 if (vfe == (struct vfs_fsentry *)0) {
888 return EINVAL;
889 }
890
891 desccount = vfe->vfe_vopcnt;
892 if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
893 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) {
894 return EINVAL;
895 }
896
897 /* Non-threadsafe filesystems are not supported */
898 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
899 return EINVAL;
900 }
901
902 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
903 M_WAITOK);
904 bzero(newvfstbl, sizeof(struct vfstable));
905 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
906 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
907 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) {
908 newvfstbl->vfc_typenum = maxvfstypenum++;
909 } else {
910 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
911 }
912
913 newvfstbl->vfc_refcount = 0;
914 newvfstbl->vfc_flags = 0;
915 newvfstbl->vfc_mountroot = NULL;
916 newvfstbl->vfc_next = NULL;
917 newvfstbl->vfc_vfsflags = 0;
918 if (vfe->vfe_flags & VFS_TBL64BITREADY) {
919 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
920 }
921 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2) {
922 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
923 }
924 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2) {
925 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
926 }
927 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) {
928 newvfstbl->vfc_flags |= MNT_LOCAL;
929 }
930 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) {
931 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
932 } else {
933 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
934 }
935
936 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR) {
937 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
938 }
939 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT) {
940 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
941 }
942 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED) {
943 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
944 }
945 if (vfe->vfe_flags & VFS_TBLNOMACLABEL) {
946 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
947 }
948 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) {
949 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
950 }
951 if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) {
952 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
953 }
954 if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) {
955 newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
956 }
957
958 /*
959 * Allocate and init the vectors.
960 * Also handle backwards compatibility.
961 *
962 * We allocate one large block to hold all <desccount>
963 * vnode operation vectors stored contiguously.
964 */
965 /* XXX - shouldn't be M_TEMP */
966
967 descsize = desccount * vfs_opv_numops * sizeof(PFI);
968 MALLOC(descptr, PFI *, descsize,
969 M_TEMP, M_WAITOK);
970 bzero(descptr, descsize);
971
972 newvfstbl->vfc_descptr = descptr;
973 newvfstbl->vfc_descsize = descsize;
974
975 newvfstbl->vfc_sysctl = NULL;
976
977 for (i = 0; i < desccount; i++) {
978 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
979 /*
980 * Fill in the caller's pointer to the start of the i'th vector.
981 * They'll need to supply it when calling vnode_create.
982 */
983 opv_desc_vector = descptr + i * vfs_opv_numops;
984 *opv_desc_vector_p = opv_desc_vector;
985
986 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
987 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
988
989 /* Silently skip known-disabled operations */
990 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
991 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
992 vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
993 continue;
994 }
995
996 /*
997 * Sanity check: is this operation listed
998 * in the list of operations? We check this
999 * by seeing if its offset is zero. Since
1000 * the default routine should always be listed
1001 * first, it should be the only one with a zero
1002 * offset. Any other operation with a zero
1003 * offset is probably not listed in
1004 * vfs_op_descs, and so is probably an error.
1005 *
1006 * A panic here means the layer programmer
1007 * has committed the all-too common bug
1008 * of adding a new operation to the layer's
1009 * list of vnode operations but
1010 * not adding the operation to the system-wide
1011 * list of supported operations.
1012 */
1013 if (opve_descp->opve_op->vdesc_offset == 0 &&
1014 opve_descp->opve_op != VDESC(vnop_default)) {
1015 printf("vfs_fsadd: operation %s not listed in %s.\n",
1016 opve_descp->opve_op->vdesc_name,
1017 "vfs_op_descs");
1018 panic("vfs_fsadd: bad operation");
1019 }
1020 /*
1021 * Fill in this entry.
1022 */
1023 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1024 opve_descp->opve_impl;
1025 }
1026
1027
1028 /*
1029 * Finally, go back and replace unfilled routines
1030 * with their default. (Sigh, an O(n^3) algorithm. I
1031 * could make it better, but that'd be work, and n is small.)
1032 */
1033 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1034
1035 /*
1036 * Force every operations vector to have a default routine.
1037 */
1038 opv_desc_vector = *opv_desc_vector_p;
1039 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
1040 panic("vfs_fsadd: operation vector without default routine.");
1041 }
1042 for (j = 0; j < vfs_opv_numops; j++) {
1043 if (opv_desc_vector[j] == NULL) {
1044 opv_desc_vector[j] =
1045 opv_desc_vector[VOFFSET(vnop_default)];
1046 }
1047 }
1048 } /* end of each vnodeopv_desc parsing */
1049
1050
1051
1052 *handle = vfstable_add(newvfstbl);
1053
1054 if (newvfstbl->vfc_typenum <= maxvfstypenum) {
1055 maxvfstypenum = newvfstbl->vfc_typenum + 1;
1056 }
1057
1058 if (newvfstbl->vfc_vfsops->vfs_init) {
1059 struct vfsconf vfsc;
1060 bzero(&vfsc, sizeof(struct vfsconf));
1061 vfsc.vfc_reserved1 = 0;
1062 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1063 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1064 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1065 vfsc.vfc_flags = (*handle)->vfc_flags;
1066 vfsc.vfc_reserved2 = 0;
1067 vfsc.vfc_reserved3 = 0;
1068
1069 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1070 }
1071
1072 FREE(newvfstbl, M_TEMP);
1073
1074 return 0;
1075 }
1076
1077 /*
1078 * Removes the filesystem from kernel.
1079 * The argument passed in is the handle that was given when
1080 * file system was added
1081 */
1082 errno_t
1083 vfs_fsremove(vfstable_t handle)
1084 {
1085 struct vfstable * vfstbl = (struct vfstable *)handle;
1086 void *old_desc = NULL;
1087 errno_t err;
1088
1089 /* Preflight check for any mounts */
1090 mount_list_lock();
1091 if (vfstbl->vfc_refcount != 0) {
1092 mount_list_unlock();
1093 return EBUSY;
1094 }
1095
1096 /*
1097 * save the old descriptor; the free cannot occur unconditionally,
1098 * since vfstable_del() may fail.
1099 */
1100 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1101 old_desc = vfstbl->vfc_descptr;
1102 }
1103 err = vfstable_del(vfstbl);
1104
1105 mount_list_unlock();
1106
1107 /* free the descriptor if the delete was successful */
1108 if (err == 0 && old_desc) {
1109 FREE(old_desc, M_TEMP);
1110 }
1111
1112 return err;
1113 }
1114
1115 void
1116 vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1117 {
1118 mp->mnt_fsowner = uid;
1119 mp->mnt_fsgroup = gid;
1120 }
1121
1122 /*
1123 * Callers should be careful how they use this; accessing
1124 * mnt_last_write_completed_timestamp is not thread-safe. Writing to
1125 * it isn't either. Point is: be prepared to deal with strange values
1126 * being returned.
1127 */
1128 uint64_t
1129 vfs_idle_time(mount_t mp)
1130 {
1131 if (mp->mnt_pending_write_size) {
1132 return 0;
1133 }
1134
1135 struct timeval now;
1136
1137 microuptime(&now);
1138
1139 return (now.tv_sec
1140 - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1141 + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec;
1142 }
1143
1144 int
1145 vfs_context_pid(vfs_context_t ctx)
1146 {
1147 return proc_pid(vfs_context_proc(ctx));
1148 }
1149
1150 int
1151 vfs_context_suser(vfs_context_t ctx)
1152 {
1153 return suser(ctx->vc_ucred, NULL);
1154 }
1155
1156 /*
1157 * Return bit field of signals posted to all threads in the context's process.
1158 *
1159 * XXX Signals should be tied to threads, not processes, for most uses of this
1160 * XXX call.
1161 */
1162 int
1163 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1164 {
1165 proc_t p = vfs_context_proc(ctx);
1166 if (p) {
1167 return proc_pendingsignals(p, mask);
1168 }
1169 return 0;
1170 }
1171
1172 int
1173 vfs_context_is64bit(vfs_context_t ctx)
1174 {
1175 proc_t proc = vfs_context_proc(ctx);
1176
1177 if (proc) {
1178 return proc_is64bit(proc);
1179 }
1180 return 0;
1181 }
1182
1183
1184 /*
1185 * vfs_context_proc
1186 *
1187 * Description: Given a vfs_context_t, return the proc_t associated with it.
1188 *
1189 * Parameters: vfs_context_t The context to use
1190 *
1191 * Returns: proc_t The process for this context
1192 *
1193 * Notes: This function will return the current_proc() if any of the
1194 * following conditions are true:
1195 *
1196 * o The supplied context pointer is NULL
1197 * o There is no Mach thread associated with the context
1198 * o There is no Mach task associated with the Mach thread
1199 * o There is no proc_t associated with the Mach task
1200 * o The proc_t has no per process open file table
1201 * o The proc_t is post-vfork()
1202 *
1203 * This causes this function to return a value matching as
1204 * closely as possible the previous behaviour, while at the
1205 * same time avoiding the task lending that results from vfork()
1206 */
1207 proc_t
1208 vfs_context_proc(vfs_context_t ctx)
1209 {
1210 proc_t proc = NULL;
1211
1212 if (ctx != NULL && ctx->vc_thread != NULL) {
1213 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1214 }
1215 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) {
1216 proc = NULL;
1217 }
1218
1219 return proc == NULL ? current_proc() : proc;
1220 }
1221
1222 /*
1223 * vfs_context_get_special_port
1224 *
1225 * Description: Return the requested special port from the task associated
1226 * with the given context.
1227 *
1228 * Parameters: vfs_context_t The context to use
1229 * int Index of special port
1230 * ipc_port_t * Pointer to returned port
1231 *
1232 * Returns: kern_return_t see task_get_special_port()
1233 */
1234 kern_return_t
1235 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1236 {
1237 task_t task = NULL;
1238
1239 if (ctx != NULL && ctx->vc_thread != NULL) {
1240 task = get_threadtask(ctx->vc_thread);
1241 }
1242
1243 return task_get_special_port(task, which, portp);
1244 }
1245
1246 /*
1247 * vfs_context_set_special_port
1248 *
1249 * Description: Set the requested special port in the task associated
1250 * with the given context.
1251 *
1252 * Parameters: vfs_context_t The context to use
1253 * int Index of special port
1254 * ipc_port_t New special port
1255 *
1256 * Returns: kern_return_t see task_set_special_port()
1257 */
1258 kern_return_t
1259 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1260 {
1261 task_t task = NULL;
1262
1263 if (ctx != NULL && ctx->vc_thread != NULL) {
1264 task = get_threadtask(ctx->vc_thread);
1265 }
1266
1267 return task_set_special_port(task, which, port);
1268 }
1269
1270 /*
1271 * vfs_context_thread
1272 *
1273 * Description: Return the Mach thread associated with a vfs_context_t
1274 *
1275 * Parameters: vfs_context_t The context to use
1276 *
1277 * Returns: thread_t The thread for this context, or
1278 * NULL, if there is not one.
1279 *
1280 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1281 * as a result of a static vfs_context_t declaration in a function
1282 * and will result in this function returning NULL.
1283 *
1284 * This is intentional; this function should NOT return the
1285 * current_thread() in this case.
1286 */
1287 thread_t
1288 vfs_context_thread(vfs_context_t ctx)
1289 {
1290 return ctx->vc_thread;
1291 }
1292
1293
1294 /*
1295 * vfs_context_cwd
1296 *
1297 * Description: Returns a reference on the vnode for the current working
1298 * directory for the supplied context
1299 *
1300 * Parameters: vfs_context_t The context to use
1301 *
1302 * Returns: vnode_t The current working directory
1303 * for this context
1304 *
1305 * Notes: The function first attempts to obtain the current directory
1306 * from the thread, and if it is not present there, falls back
1307 * to obtaining it from the process instead. If it can't be
1308 * obtained from either place, we return NULLVP.
1309 */
1310 vnode_t
1311 vfs_context_cwd(vfs_context_t ctx)
1312 {
1313 vnode_t cwd = NULLVP;
1314
1315 if (ctx != NULL && ctx->vc_thread != NULL) {
1316 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1317 proc_t proc;
1318
1319 /*
1320 * Get the cwd from the thread; if there isn't one, get it
1321 * from the process, instead.
1322 */
1323 if ((cwd = uth->uu_cdir) == NULLVP &&
1324 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1325 proc->p_fd != NULL) {
1326 cwd = proc->p_fd->fd_cdir;
1327 }
1328 }
1329
1330 return cwd;
1331 }
1332
1333 /*
1334 * vfs_context_get_cwd
1335 *
1336 * Description: Returns a vnode for the current working directory for the
1337 * supplied context. The returned vnode has an iocount on it
1338 * which must be released with a vnode_put().
1339 *
1340 * Parameters: vfs_context_t The context to use
1341 *
1342 * Returns: vnode_t The current working directory
1343 * for this context
1344 *
1345 * Notes: The function first attempts to obtain the current directory
1346 * from the thread, and if it is not present there, falls back
1347 * to obtaining it from the process instead. If it can't be
1348 * obtained from either place, we return NULLVP.
1349 */
1350 vnode_t
1351 vfs_context_get_cwd(vfs_context_t ctx)
1352 {
1353 vnode_t cwd = NULLVP;
1354
1355 if (ctx != NULL && ctx->vc_thread != NULL) {
1356 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1357 proc_t proc;
1358
1359 /*
1360 * Get the cwd from the thread; if there isn't one, get it
1361 * from the process, instead.
1362 */
1363 cwd = uth->uu_cdir;
1364
1365 if (cwd) {
1366 if ((vnode_get(cwd) != 0)) {
1367 cwd = NULLVP;
1368 }
1369 } else if ((proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1370 proc->p_fd != NULL) {
1371 proc_fdlock(proc);
1372 cwd = proc->p_fd->fd_cdir;
1373 if (cwd && (vnode_get(cwd) != 0)) {
1374 cwd = NULLVP;
1375 }
1376 proc_fdunlock(proc);
1377 }
1378 }
1379
1380 return cwd;
1381 }
1382
1383 /*
1384 * vfs_context_create
1385 *
1386 * Description: Allocate and initialize a new context.
1387 *
1388 * Parameters: vfs_context_t: Context to copy, or NULL for new
1389 *
1390 * Returns: Pointer to new context
1391 *
1392 * Notes: Copy cred and thread from argument, if available; else
1393 * initialize with current thread and new cred. Returns
1394 * with a reference held on the credential.
1395 */
1396 vfs_context_t
1397 vfs_context_create(vfs_context_t ctx)
1398 {
1399 vfs_context_t newcontext;
1400
1401 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
1402
1403 if (newcontext) {
1404 kauth_cred_t safecred;
1405 if (ctx) {
1406 newcontext->vc_thread = ctx->vc_thread;
1407 safecred = ctx->vc_ucred;
1408 } else {
1409 newcontext->vc_thread = current_thread();
1410 safecred = kauth_cred_get();
1411 }
1412 if (IS_VALID_CRED(safecred)) {
1413 kauth_cred_ref(safecred);
1414 }
1415 newcontext->vc_ucred = safecred;
1416 return newcontext;
1417 }
1418 return NULL;
1419 }
1420
1421
1422 vfs_context_t
1423 vfs_context_current(void)
1424 {
1425 vfs_context_t ctx = NULL;
1426 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1427
1428 if (ut != NULL) {
1429 if (ut->uu_context.vc_ucred != NULL) {
1430 ctx = &ut->uu_context;
1431 }
1432 }
1433
1434 return ctx == NULL ? vfs_context_kernel() : ctx;
1435 }
1436
1437
1438 /*
1439 * XXX Do not ask
1440 *
1441 * Dangerous hack - adopt the first kernel thread as the current thread, to
1442 * get to the vfs_context_t in the uthread associated with a kernel thread.
1443 * This is used by UDF to make the call into IOCDMediaBSDClient,
1444 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1445 * ioctl() is being called from kernel or user space (and all this because
1446 * we do not pass threads into our ioctl()'s, instead of processes).
1447 *
1448 * This is also used by imageboot_setup(), called early from bsd_init() after
1449 * kernproc has been given a credential.
1450 *
1451 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1452 * of many Mach headers to do the reference directly rather than indirectly;
1453 * we will need to forego this convenience when we reture proc_thread().
1454 */
1455 static struct vfs_context kerncontext;
1456 vfs_context_t
1457 vfs_context_kernel(void)
1458 {
1459 if (kerncontext.vc_ucred == NOCRED) {
1460 kerncontext.vc_ucred = kernproc->p_ucred;
1461 }
1462 if (kerncontext.vc_thread == NULL) {
1463 kerncontext.vc_thread = proc_thread(kernproc);
1464 }
1465
1466 return &kerncontext;
1467 }
1468
1469
1470 int
1471 vfs_context_rele(vfs_context_t ctx)
1472 {
1473 if (ctx) {
1474 if (IS_VALID_CRED(ctx->vc_ucred)) {
1475 kauth_cred_unref(&ctx->vc_ucred);
1476 }
1477 kfree(ctx, sizeof(struct vfs_context));
1478 }
1479 return 0;
1480 }
1481
1482
1483 kauth_cred_t
1484 vfs_context_ucred(vfs_context_t ctx)
1485 {
1486 return ctx->vc_ucred;
1487 }
1488
1489 /*
1490 * Return true if the context is owned by the superuser.
1491 */
1492 int
1493 vfs_context_issuser(vfs_context_t ctx)
1494 {
1495 return kauth_cred_issuser(vfs_context_ucred(ctx));
1496 }
1497
1498 int
1499 vfs_context_iskernel(vfs_context_t ctx)
1500 {
1501 return ctx == &kerncontext;
1502 }
1503
1504 /*
1505 * Given a context, for all fields of vfs_context_t which
1506 * are not held with a reference, set those fields to the
1507 * values for the current execution context. Currently, this
1508 * just means the vc_thread.
1509 *
1510 * Returns: 0 for success, nonzero for failure
1511 *
1512 * The intended use is:
1513 * 1. vfs_context_create() gets the caller a context
1514 * 2. vfs_context_bind() sets the unrefcounted data
1515 * 3. vfs_context_rele() releases the context
1516 *
1517 */
1518 int
1519 vfs_context_bind(vfs_context_t ctx)
1520 {
1521 ctx->vc_thread = current_thread();
1522 return 0;
1523 }
1524
1525 int
1526 vfs_isswapmount(mount_t mnt)
1527 {
1528 return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1529 }
1530
1531 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1532
1533
1534 /*
1535 * Convert between vnode types and inode formats (since POSIX.1
1536 * defines mode word of stat structure in terms of inode formats).
1537 */
1538 enum vtype
1539 vnode_iftovt(int mode)
1540 {
1541 return iftovt_tab[((mode) & S_IFMT) >> 12];
1542 }
1543
1544 int
1545 vnode_vttoif(enum vtype indx)
1546 {
1547 return vttoif_tab[(int)(indx)];
1548 }
1549
1550 int
1551 vnode_makeimode(int indx, int mode)
1552 {
1553 return (int)(VTTOIF(indx) | (mode));
1554 }
1555
1556
1557 /*
1558 * vnode manipulation functions.
1559 */
1560
1561 /* returns system root vnode iocount; It should be released using vnode_put() */
1562 vnode_t
1563 vfs_rootvnode(void)
1564 {
1565 int error;
1566
1567 error = vnode_get(rootvnode);
1568 if (error) {
1569 return (vnode_t)0;
1570 } else {
1571 return rootvnode;
1572 }
1573 }
1574
1575
1576 uint32_t
1577 vnode_vid(vnode_t vp)
1578 {
1579 return (uint32_t)(vp->v_id);
1580 }
1581
1582 mount_t
1583 vnode_mount(vnode_t vp)
1584 {
1585 return vp->v_mount;
1586 }
1587
1588 #if CONFIG_IOSCHED
1589 vnode_t
1590 vnode_mountdevvp(vnode_t vp)
1591 {
1592 if (vp->v_mount) {
1593 return vp->v_mount->mnt_devvp;
1594 } else {
1595 return (vnode_t)0;
1596 }
1597 }
1598 #endif
1599
1600 boolean_t
1601 vnode_isonexternalstorage(vnode_t vp)
1602 {
1603 if (vp) {
1604 if (vp->v_mount) {
1605 if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_PERIPHERAL_DRIVE) {
1606 return TRUE;
1607 }
1608 }
1609 }
1610 return FALSE;
1611 }
1612
1613 mount_t
1614 vnode_mountedhere(vnode_t vp)
1615 {
1616 mount_t mp;
1617
1618 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1619 (mp->mnt_vnodecovered == vp)) {
1620 return mp;
1621 } else {
1622 return (mount_t)NULL;
1623 }
1624 }
1625
1626 /* returns vnode type of vnode_t */
1627 enum vtype
1628 vnode_vtype(vnode_t vp)
1629 {
1630 return vp->v_type;
1631 }
1632
1633 /* returns FS specific node saved in vnode */
1634 void *
1635 vnode_fsnode(vnode_t vp)
1636 {
1637 return vp->v_data;
1638 }
1639
1640 void
1641 vnode_clearfsnode(vnode_t vp)
1642 {
1643 vp->v_data = NULL;
1644 }
1645
1646 dev_t
1647 vnode_specrdev(vnode_t vp)
1648 {
1649 return vp->v_rdev;
1650 }
1651
1652
1653 /* Accessor functions */
1654 /* is vnode_t a root vnode */
1655 int
1656 vnode_isvroot(vnode_t vp)
1657 {
1658 return (vp->v_flag & VROOT)? 1 : 0;
1659 }
1660
1661 /* is vnode_t a system vnode */
1662 int
1663 vnode_issystem(vnode_t vp)
1664 {
1665 return (vp->v_flag & VSYSTEM)? 1 : 0;
1666 }
1667
1668 /* is vnode_t a swap file vnode */
1669 int
1670 vnode_isswap(vnode_t vp)
1671 {
1672 return (vp->v_flag & VSWAP)? 1 : 0;
1673 }
1674
1675 /* is vnode_t a tty */
1676 int
1677 vnode_istty(vnode_t vp)
1678 {
1679 return (vp->v_flag & VISTTY) ? 1 : 0;
1680 }
1681
1682 /* if vnode_t mount operation in progress */
1683 int
1684 vnode_ismount(vnode_t vp)
1685 {
1686 return (vp->v_flag & VMOUNT)? 1 : 0;
1687 }
1688
1689 /* is this vnode under recyle now */
1690 int
1691 vnode_isrecycled(vnode_t vp)
1692 {
1693 int ret;
1694
1695 vnode_lock_spin(vp);
1696 ret = (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0;
1697 vnode_unlock(vp);
1698 return ret;
1699 }
1700
1701 /* vnode was created by background task requesting rapid aging
1702 * and has not since been referenced by a normal task */
1703 int
1704 vnode_israge(vnode_t vp)
1705 {
1706 return (vp->v_flag & VRAGE)? 1 : 0;
1707 }
1708
1709 int
1710 vnode_needssnapshots(vnode_t vp)
1711 {
1712 return (vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0;
1713 }
1714
1715
1716 /* Check the process/thread to see if we should skip atime updates */
1717 int
1718 vfs_ctx_skipatime(vfs_context_t ctx)
1719 {
1720 struct uthread *ut;
1721 proc_t proc;
1722 thread_t thr;
1723
1724 proc = vfs_context_proc(ctx);
1725 thr = vfs_context_thread(ctx);
1726
1727 /* Validate pointers in case we were invoked via a kernel context */
1728 if (thr && proc) {
1729 ut = get_bsdthread_info(thr);
1730
1731 if (proc->p_lflag & P_LRAGE_VNODES) {
1732 return 1;
1733 }
1734
1735 if (ut) {
1736 if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) {
1737 return 1;
1738 }
1739 }
1740
1741 if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) {
1742 return 1;
1743 }
1744 }
1745 return 0;
1746 }
1747
1748 /* is vnode_t marked to not keep data cached once it's been consumed */
1749 int
1750 vnode_isnocache(vnode_t vp)
1751 {
1752 return (vp->v_flag & VNOCACHE_DATA)? 1 : 0;
1753 }
1754
1755 /*
1756 * has sequential readahead been disabled on this vnode
1757 */
1758 int
1759 vnode_isnoreadahead(vnode_t vp)
1760 {
1761 return (vp->v_flag & VRAOFF)? 1 : 0;
1762 }
1763
1764 int
1765 vnode_is_openevt(vnode_t vp)
1766 {
1767 return (vp->v_flag & VOPENEVT)? 1 : 0;
1768 }
1769
1770 /* is vnode_t a standard one? */
1771 int
1772 vnode_isstandard(vnode_t vp)
1773 {
1774 return (vp->v_flag & VSTANDARD)? 1 : 0;
1775 }
1776
1777 /* don't vflush() if SKIPSYSTEM */
1778 int
1779 vnode_isnoflush(vnode_t vp)
1780 {
1781 return (vp->v_flag & VNOFLUSH)? 1 : 0;
1782 }
1783
1784 /* is vnode_t a regular file */
1785 int
1786 vnode_isreg(vnode_t vp)
1787 {
1788 return (vp->v_type == VREG)? 1 : 0;
1789 }
1790
1791 /* is vnode_t a directory? */
1792 int
1793 vnode_isdir(vnode_t vp)
1794 {
1795 return (vp->v_type == VDIR)? 1 : 0;
1796 }
1797
1798 /* is vnode_t a symbolic link ? */
1799 int
1800 vnode_islnk(vnode_t vp)
1801 {
1802 return (vp->v_type == VLNK)? 1 : 0;
1803 }
1804
1805 int
1806 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1807 {
1808 struct nameidata *ndp = cnp->cn_ndp;
1809
1810 if (ndp == NULL) {
1811 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1812 }
1813
1814 if (vnode_isdir(vp)) {
1815 if (vp->v_mountedhere != NULL) {
1816 goto yes;
1817 }
1818
1819 #if CONFIG_TRIGGERS
1820 if (vp->v_resolve) {
1821 goto yes;
1822 }
1823 #endif /* CONFIG_TRIGGERS */
1824 }
1825
1826
1827 if (vnode_islnk(vp)) {
1828 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1829 if (cnp->cn_flags & FOLLOW) {
1830 goto yes;
1831 }
1832 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1833 goto yes;
1834 }
1835 }
1836
1837 return 0;
1838
1839 yes:
1840 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1841 return EKEEPLOOKING;
1842 }
1843
1844 /* is vnode_t a fifo ? */
1845 int
1846 vnode_isfifo(vnode_t vp)
1847 {
1848 return (vp->v_type == VFIFO)? 1 : 0;
1849 }
1850
1851 /* is vnode_t a block device? */
1852 int
1853 vnode_isblk(vnode_t vp)
1854 {
1855 return (vp->v_type == VBLK)? 1 : 0;
1856 }
1857
1858 int
1859 vnode_isspec(vnode_t vp)
1860 {
1861 return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0;
1862 }
1863
1864 /* is vnode_t a char device? */
1865 int
1866 vnode_ischr(vnode_t vp)
1867 {
1868 return (vp->v_type == VCHR)? 1 : 0;
1869 }
1870
1871 /* is vnode_t a socket? */
1872 int
1873 vnode_issock(vnode_t vp)
1874 {
1875 return (vp->v_type == VSOCK)? 1 : 0;
1876 }
1877
1878 /* is vnode_t a device with multiple active vnodes referring to it? */
1879 int
1880 vnode_isaliased(vnode_t vp)
1881 {
1882 enum vtype vt = vp->v_type;
1883 if (!((vt == VCHR) || (vt == VBLK))) {
1884 return 0;
1885 } else {
1886 return vp->v_specflags & SI_ALIASED;
1887 }
1888 }
1889
1890 /* is vnode_t a named stream? */
1891 int
1892 vnode_isnamedstream(
1893 #if NAMEDSTREAMS
1894 vnode_t vp
1895 #else
1896 __unused vnode_t vp
1897 #endif
1898 )
1899 {
1900 #if NAMEDSTREAMS
1901 return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0;
1902 #else
1903 return 0;
1904 #endif
1905 }
1906
1907 int
1908 vnode_isshadow(
1909 #if NAMEDSTREAMS
1910 vnode_t vp
1911 #else
1912 __unused vnode_t vp
1913 #endif
1914 )
1915 {
1916 #if NAMEDSTREAMS
1917 return (vp->v_flag & VISSHADOW) ? 1 : 0;
1918 #else
1919 return 0;
1920 #endif
1921 }
1922
1923 /* does vnode have associated named stream vnodes ? */
1924 int
1925 vnode_hasnamedstreams(
1926 #if NAMEDSTREAMS
1927 vnode_t vp
1928 #else
1929 __unused vnode_t vp
1930 #endif
1931 )
1932 {
1933 #if NAMEDSTREAMS
1934 return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0;
1935 #else
1936 return 0;
1937 #endif
1938 }
1939 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1940 void
1941 vnode_setnocache(vnode_t vp)
1942 {
1943 vnode_lock_spin(vp);
1944 vp->v_flag |= VNOCACHE_DATA;
1945 vnode_unlock(vp);
1946 }
1947
1948 void
1949 vnode_clearnocache(vnode_t vp)
1950 {
1951 vnode_lock_spin(vp);
1952 vp->v_flag &= ~VNOCACHE_DATA;
1953 vnode_unlock(vp);
1954 }
1955
1956 void
1957 vnode_set_openevt(vnode_t vp)
1958 {
1959 vnode_lock_spin(vp);
1960 vp->v_flag |= VOPENEVT;
1961 vnode_unlock(vp);
1962 }
1963
1964 void
1965 vnode_clear_openevt(vnode_t vp)
1966 {
1967 vnode_lock_spin(vp);
1968 vp->v_flag &= ~VOPENEVT;
1969 vnode_unlock(vp);
1970 }
1971
1972
1973 void
1974 vnode_setnoreadahead(vnode_t vp)
1975 {
1976 vnode_lock_spin(vp);
1977 vp->v_flag |= VRAOFF;
1978 vnode_unlock(vp);
1979 }
1980
1981 void
1982 vnode_clearnoreadahead(vnode_t vp)
1983 {
1984 vnode_lock_spin(vp);
1985 vp->v_flag &= ~VRAOFF;
1986 vnode_unlock(vp);
1987 }
1988
1989 int
1990 vnode_isfastdevicecandidate(vnode_t vp)
1991 {
1992 return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0;
1993 }
1994
1995 void
1996 vnode_setfastdevicecandidate(vnode_t vp)
1997 {
1998 vnode_lock_spin(vp);
1999 vp->v_flag |= VFASTDEVCANDIDATE;
2000 vnode_unlock(vp);
2001 }
2002
2003 void
2004 vnode_clearfastdevicecandidate(vnode_t vp)
2005 {
2006 vnode_lock_spin(vp);
2007 vp->v_flag &= ~VFASTDEVCANDIDATE;
2008 vnode_unlock(vp);
2009 }
2010
2011 int
2012 vnode_isautocandidate(vnode_t vp)
2013 {
2014 return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0;
2015 }
2016
2017 void
2018 vnode_setautocandidate(vnode_t vp)
2019 {
2020 vnode_lock_spin(vp);
2021 vp->v_flag |= VAUTOCANDIDATE;
2022 vnode_unlock(vp);
2023 }
2024
2025 void
2026 vnode_clearautocandidate(vnode_t vp)
2027 {
2028 vnode_lock_spin(vp);
2029 vp->v_flag &= ~VAUTOCANDIDATE;
2030 vnode_unlock(vp);
2031 }
2032
2033
2034
2035
2036 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
2037 void
2038 vnode_setnoflush(vnode_t vp)
2039 {
2040 vnode_lock_spin(vp);
2041 vp->v_flag |= VNOFLUSH;
2042 vnode_unlock(vp);
2043 }
2044
2045 void
2046 vnode_clearnoflush(vnode_t vp)
2047 {
2048 vnode_lock_spin(vp);
2049 vp->v_flag &= ~VNOFLUSH;
2050 vnode_unlock(vp);
2051 }
2052
2053
2054 /* is vnode_t a blkdevice and has a FS mounted on it */
2055 int
2056 vnode_ismountedon(vnode_t vp)
2057 {
2058 return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0;
2059 }
2060
2061 void
2062 vnode_setmountedon(vnode_t vp)
2063 {
2064 vnode_lock_spin(vp);
2065 vp->v_specflags |= SI_MOUNTEDON;
2066 vnode_unlock(vp);
2067 }
2068
2069 void
2070 vnode_clearmountedon(vnode_t vp)
2071 {
2072 vnode_lock_spin(vp);
2073 vp->v_specflags &= ~SI_MOUNTEDON;
2074 vnode_unlock(vp);
2075 }
2076
2077
2078 void
2079 vnode_settag(vnode_t vp, int tag)
2080 {
2081 vp->v_tag = tag;
2082 }
2083
2084 int
2085 vnode_tag(vnode_t vp)
2086 {
2087 return vp->v_tag;
2088 }
2089
2090 vnode_t
2091 vnode_parent(vnode_t vp)
2092 {
2093 return vp->v_parent;
2094 }
2095
2096 void
2097 vnode_setparent(vnode_t vp, vnode_t dvp)
2098 {
2099 vp->v_parent = dvp;
2100 }
2101
2102 void
2103 vnode_setname(vnode_t vp, char * name)
2104 {
2105 vp->v_name = name;
2106 }
2107
2108 /* return the registered FS name when adding the FS to kernel */
2109 void
2110 vnode_vfsname(vnode_t vp, char * buf)
2111 {
2112 strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2113 }
2114
2115 /* return the FS type number */
2116 int
2117 vnode_vfstypenum(vnode_t vp)
2118 {
2119 return vp->v_mount->mnt_vtable->vfc_typenum;
2120 }
2121
2122 int
2123 vnode_vfs64bitready(vnode_t vp)
2124 {
2125 /*
2126 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2127 */
2128 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
2129 return 1;
2130 } else {
2131 return 0;
2132 }
2133 }
2134
2135
2136
2137 /* return the visible flags on associated mount point of vnode_t */
2138 uint32_t
2139 vnode_vfsvisflags(vnode_t vp)
2140 {
2141 return vp->v_mount->mnt_flag & MNT_VISFLAGMASK;
2142 }
2143
2144 /* return the command modifier flags on associated mount point of vnode_t */
2145 uint32_t
2146 vnode_vfscmdflags(vnode_t vp)
2147 {
2148 return vp->v_mount->mnt_flag & MNT_CMDFLAGS;
2149 }
2150
2151 /* return the max symlink of short links of vnode_t */
2152 uint32_t
2153 vnode_vfsmaxsymlen(vnode_t vp)
2154 {
2155 return vp->v_mount->mnt_maxsymlinklen;
2156 }
2157
2158 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2159 struct vfsstatfs *
2160 vnode_vfsstatfs(vnode_t vp)
2161 {
2162 return &vp->v_mount->mnt_vfsstat;
2163 }
2164
2165 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2166 void *
2167 vnode_vfsfsprivate(vnode_t vp)
2168 {
2169 return vp->v_mount->mnt_data;
2170 }
2171
2172 /* is vnode_t in a rdonly mounted FS */
2173 int
2174 vnode_vfsisrdonly(vnode_t vp)
2175 {
2176 return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0;
2177 }
2178
2179 int
2180 vnode_compound_rename_available(vnode_t vp)
2181 {
2182 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2183 }
2184 int
2185 vnode_compound_rmdir_available(vnode_t vp)
2186 {
2187 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2188 }
2189 int
2190 vnode_compound_mkdir_available(vnode_t vp)
2191 {
2192 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2193 }
2194 int
2195 vnode_compound_remove_available(vnode_t vp)
2196 {
2197 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2198 }
2199 int
2200 vnode_compound_open_available(vnode_t vp)
2201 {
2202 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2203 }
2204
2205 int
2206 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2207 {
2208 return (vp->v_mount->mnt_compound_ops & opid) != 0;
2209 }
2210
2211 /*
2212 * Returns vnode ref to current working directory; if a per-thread current
2213 * working directory is in effect, return that instead of the per process one.
2214 *
2215 * XXX Published, but not used.
2216 */
2217 vnode_t
2218 current_workingdir(void)
2219 {
2220 return vfs_context_cwd(vfs_context_current());
2221 }
2222
2223 /* returns vnode ref to current root(chroot) directory */
2224 vnode_t
2225 current_rootdir(void)
2226 {
2227 proc_t proc = current_proc();
2228 struct vnode * vp;
2229
2230 if ((vp = proc->p_fd->fd_rdir)) {
2231 if ((vnode_getwithref(vp))) {
2232 return NULL;
2233 }
2234 }
2235 return vp;
2236 }
2237
2238 /*
2239 * Get a filesec and optional acl contents from an extended attribute.
2240 * Function will attempt to retrive ACL, UUID, and GUID information using a
2241 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2242 *
2243 * Parameters: vp The vnode on which to operate.
2244 * fsecp The filesec (and ACL, if any) being
2245 * retrieved.
2246 * ctx The vnode context in which the
2247 * operation is to be attempted.
2248 *
2249 * Returns: 0 Success
2250 * !0 errno value
2251 *
2252 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2253 * host byte order, as will be the ACL contents, if any.
2254 * Internally, we will cannonize these values from network (PPC)
2255 * byte order after we retrieve them so that the on-disk contents
2256 * of the extended attribute are identical for both PPC and Intel
2257 * (if we were not being required to provide this service via
2258 * fallback, this would be the job of the filesystem
2259 * 'VNOP_GETATTR' call).
2260 *
2261 * We use ntohl() because it has a transitive property on Intel
2262 * machines and no effect on PPC mancines. This guarantees us
2263 *
2264 * XXX: Deleting rather than ignoreing a corrupt security structure is
2265 * probably the only way to reset it without assistance from an
2266 * file system integrity checking tool. Right now we ignore it.
2267 *
2268 * XXX: We should enummerate the possible errno values here, and where
2269 * in the code they originated.
2270 */
2271 static int
2272 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2273 {
2274 kauth_filesec_t fsec;
2275 uio_t fsec_uio;
2276 size_t fsec_size;
2277 size_t xsize, rsize;
2278 int error;
2279 uint32_t host_fsec_magic;
2280 uint32_t host_acl_entrycount;
2281
2282 fsec = NULL;
2283 fsec_uio = NULL;
2284
2285 /* find out how big the EA is */
2286 error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2287 if (error != 0) {
2288 /* no EA, no filesec */
2289 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2290 error = 0;
2291 }
2292 /* either way, we are done */
2293 goto out;
2294 }
2295
2296 /*
2297 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2298 * ACE entrly ACL, and if it's larger than that, it must have the right
2299 * number of bytes such that it contains an atomic number of ACEs,
2300 * rather than partial entries. Otherwise, we ignore it.
2301 */
2302 if (!KAUTH_FILESEC_VALID(xsize)) {
2303 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2304 error = 0;
2305 goto out;
2306 }
2307
2308 /* how many entries would fit? */
2309 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2310 if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2311 KAUTH_DEBUG(" ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2312 error = 0;
2313 goto out;
2314 }
2315
2316 /* get buffer and uio */
2317 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2318 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2319 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2320 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2321 error = ENOMEM;
2322 goto out;
2323 }
2324
2325 /* read security attribute */
2326 rsize = xsize;
2327 if ((error = vn_getxattr(vp,
2328 KAUTH_FILESEC_XATTR,
2329 fsec_uio,
2330 &rsize,
2331 XATTR_NOSECURITY,
2332 ctx)) != 0) {
2333 /* no attribute - no security data */
2334 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2335 error = 0;
2336 }
2337 /* either way, we are done */
2338 goto out;
2339 }
2340
2341 /*
2342 * Validate security structure; the validation must take place in host
2343 * byte order. If it's corrupt, we will just ignore it.
2344 */
2345
2346 /* Validate the size before trying to convert it */
2347 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2348 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2349 goto out;
2350 }
2351
2352 /* Validate the magic number before trying to convert it */
2353 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2354 if (fsec->fsec_magic != host_fsec_magic) {
2355 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2356 goto out;
2357 }
2358
2359 /* Validate the entry count before trying to convert it. */
2360 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2361 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2362 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2363 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2364 goto out;
2365 }
2366 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2367 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2368 goto out;
2369 }
2370 }
2371
2372 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2373
2374 *fsecp = fsec;
2375 fsec = NULL;
2376 error = 0;
2377 out:
2378 if (fsec != NULL) {
2379 kauth_filesec_free(fsec);
2380 }
2381 if (fsec_uio != NULL) {
2382 uio_free(fsec_uio);
2383 }
2384 if (error) {
2385 *fsecp = NULL;
2386 }
2387 return error;
2388 }
2389
2390 /*
2391 * Set a filesec and optional acl contents into an extended attribute.
2392 * function will attempt to store ACL, UUID, and GUID information using a
2393 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2394 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2395 * original caller supplied an acl.
2396 *
2397 * Parameters: vp The vnode on which to operate.
2398 * fsec The filesec being set.
2399 * acl The acl to be associated with 'fsec'.
2400 * ctx The vnode context in which the
2401 * operation is to be attempted.
2402 *
2403 * Returns: 0 Success
2404 * !0 errno value
2405 *
2406 * Notes: Both the fsec and the acl are always valid.
2407 *
2408 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2409 * as are the acl contents, if they are used. Internally, we will
2410 * cannonize these values into network (PPC) byte order before we
2411 * attempt to write them so that the on-disk contents of the
2412 * extended attribute are identical for both PPC and Intel (if we
2413 * were not being required to provide this service via fallback,
2414 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2415 * We reverse this process on the way out, so we leave with the
2416 * same byte order we started with.
2417 *
2418 * XXX: We should enummerate the possible errno values here, and where
2419 * in the code they originated.
2420 */
2421 static int
2422 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2423 {
2424 uio_t fsec_uio;
2425 int error;
2426 uint32_t saved_acl_copysize;
2427
2428 fsec_uio = NULL;
2429
2430 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2431 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2432 error = ENOMEM;
2433 goto out;
2434 }
2435 /*
2436 * Save the pre-converted ACL copysize, because it gets swapped too
2437 * if we are running with the wrong endianness.
2438 */
2439 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2440
2441 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2442
2443 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2444 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2445 error = vn_setxattr(vp,
2446 KAUTH_FILESEC_XATTR,
2447 fsec_uio,
2448 XATTR_NOSECURITY, /* we have auth'ed already */
2449 ctx);
2450 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2451
2452 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2453
2454 out:
2455 if (fsec_uio != NULL) {
2456 uio_free(fsec_uio);
2457 }
2458 return error;
2459 }
2460
2461
2462 /*
2463 * Returns: 0 Success
2464 * ENOMEM Not enough space [only if has filesec]
2465 * EINVAL Requested unknown attributes
2466 * VNOP_GETATTR: ???
2467 * vnode_get_filesec: ???
2468 * kauth_cred_guid2uid: ???
2469 * kauth_cred_guid2gid: ???
2470 * vfs_update_vfsstat: ???
2471 */
2472 int
2473 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2474 {
2475 kauth_filesec_t fsec;
2476 kauth_acl_t facl;
2477 int error;
2478 uid_t nuid;
2479 gid_t ngid;
2480
2481 /*
2482 * Reject attempts to fetch unknown attributes.
2483 */
2484 if (vap->va_active & ~VNODE_ATTR_ALL) {
2485 return EINVAL;
2486 }
2487
2488 /* don't ask for extended security data if the filesystem doesn't support it */
2489 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2490 VATTR_CLEAR_ACTIVE(vap, va_acl);
2491 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2492 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2493 }
2494
2495 /*
2496 * If the caller wants size values we might have to synthesise, give the
2497 * filesystem the opportunity to supply better intermediate results.
2498 */
2499 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2500 VATTR_IS_ACTIVE(vap, va_total_size) ||
2501 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2502 VATTR_SET_ACTIVE(vap, va_data_size);
2503 VATTR_SET_ACTIVE(vap, va_data_alloc);
2504 VATTR_SET_ACTIVE(vap, va_total_size);
2505 VATTR_SET_ACTIVE(vap, va_total_alloc);
2506 }
2507
2508 vap->va_vaflags &= ~VA_USEFSID;
2509
2510 error = VNOP_GETATTR(vp, vap, ctx);
2511 if (error) {
2512 KAUTH_DEBUG("ERROR - returning %d", error);
2513 goto out;
2514 }
2515
2516 /*
2517 * If extended security data was requested but not returned, try the fallback
2518 * path.
2519 */
2520 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2521 fsec = NULL;
2522
2523 if (XATTR_VNODE_SUPPORTED(vp)) {
2524 /* try to get the filesec */
2525 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2526 goto out;
2527 }
2528 }
2529 /* if no filesec, no attributes */
2530 if (fsec == NULL) {
2531 VATTR_RETURN(vap, va_acl, NULL);
2532 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2533 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2534 } else {
2535 /* looks good, try to return what we were asked for */
2536 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2537 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2538
2539 /* only return the ACL if we were actually asked for it */
2540 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2541 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2542 VATTR_RETURN(vap, va_acl, NULL);
2543 } else {
2544 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2545 if (facl == NULL) {
2546 kauth_filesec_free(fsec);
2547 error = ENOMEM;
2548 goto out;
2549 }
2550 __nochk_bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2551 VATTR_RETURN(vap, va_acl, facl);
2552 }
2553 }
2554 kauth_filesec_free(fsec);
2555 }
2556 }
2557 /*
2558 * If someone gave us an unsolicited filesec, toss it. We promise that
2559 * we're OK with a filesystem giving us anything back, but our callers
2560 * only expect what they asked for.
2561 */
2562 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2563 if (vap->va_acl != NULL) {
2564 kauth_acl_free(vap->va_acl);
2565 }
2566 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2567 }
2568
2569 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2570 /*
2571 * Handle the case where we need a UID/GID, but only have extended
2572 * security information.
2573 */
2574 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2575 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2576 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2577 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) {
2578 VATTR_RETURN(vap, va_uid, nuid);
2579 }
2580 }
2581 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2582 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2583 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2584 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) {
2585 VATTR_RETURN(vap, va_gid, ngid);
2586 }
2587 }
2588 #endif
2589
2590 /*
2591 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2592 */
2593 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2594 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2595 nuid = vap->va_uid;
2596 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2597 nuid = vp->v_mount->mnt_fsowner;
2598 if (nuid == KAUTH_UID_NONE) {
2599 nuid = 99;
2600 }
2601 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2602 nuid = vap->va_uid;
2603 } else {
2604 /* this will always be something sensible */
2605 nuid = vp->v_mount->mnt_fsowner;
2606 }
2607 if ((nuid == 99) && !vfs_context_issuser(ctx)) {
2608 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2609 }
2610 VATTR_RETURN(vap, va_uid, nuid);
2611 }
2612 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2613 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2614 ngid = vap->va_gid;
2615 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2616 ngid = vp->v_mount->mnt_fsgroup;
2617 if (ngid == KAUTH_GID_NONE) {
2618 ngid = 99;
2619 }
2620 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2621 ngid = vap->va_gid;
2622 } else {
2623 /* this will always be something sensible */
2624 ngid = vp->v_mount->mnt_fsgroup;
2625 }
2626 if ((ngid == 99) && !vfs_context_issuser(ctx)) {
2627 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2628 }
2629 VATTR_RETURN(vap, va_gid, ngid);
2630 }
2631
2632 /*
2633 * Synthesise some values that can be reasonably guessed.
2634 */
2635 if (!VATTR_IS_SUPPORTED(vap, va_iosize)) {
2636 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2637 }
2638
2639 if (!VATTR_IS_SUPPORTED(vap, va_flags)) {
2640 VATTR_RETURN(vap, va_flags, 0);
2641 }
2642
2643 if (!VATTR_IS_SUPPORTED(vap, va_filerev)) {
2644 VATTR_RETURN(vap, va_filerev, 0);
2645 }
2646
2647 if (!VATTR_IS_SUPPORTED(vap, va_gen)) {
2648 VATTR_RETURN(vap, va_gen, 0);
2649 }
2650
2651 /*
2652 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2653 */
2654 if (!VATTR_IS_SUPPORTED(vap, va_data_size)) {
2655 VATTR_RETURN(vap, va_data_size, 0);
2656 }
2657
2658 /* do we want any of the possibly-computed values? */
2659 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2660 VATTR_IS_ACTIVE(vap, va_total_size) ||
2661 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2662 /* make sure f_bsize is valid */
2663 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2664 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) {
2665 goto out;
2666 }
2667 }
2668
2669 /* default va_data_alloc from va_data_size */
2670 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) {
2671 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2672 }
2673
2674 /* default va_total_size from va_data_size */
2675 if (!VATTR_IS_SUPPORTED(vap, va_total_size)) {
2676 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2677 }
2678
2679 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2680 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) {
2681 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2682 }
2683 }
2684
2685 /*
2686 * If we don't have a change time, pull it from the modtime.
2687 */
2688 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) {
2689 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2690 }
2691
2692 /*
2693 * This is really only supported for the creation VNOPs, but since the field is there
2694 * we should populate it correctly.
2695 */
2696 VATTR_RETURN(vap, va_type, vp->v_type);
2697
2698 /*
2699 * The fsid can be obtained from the mountpoint directly.
2700 */
2701 if (VATTR_IS_ACTIVE(vap, va_fsid) &&
2702 (!VATTR_IS_SUPPORTED(vap, va_fsid) ||
2703 vap->va_vaflags & VA_REALFSID || !(vap->va_vaflags & VA_USEFSID))) {
2704 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2705 }
2706
2707 out:
2708 vap->va_vaflags &= ~VA_USEFSID;
2709
2710 return error;
2711 }
2712
2713 /*
2714 * Choose 32 bit or 64 bit fsid
2715 */
2716 uint64_t
2717 vnode_get_va_fsid(struct vnode_attr *vap)
2718 {
2719 if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
2720 return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32);
2721 }
2722 return vap->va_fsid;
2723 }
2724
2725 /*
2726 * Set the attributes on a vnode in a vnode context.
2727 *
2728 * Parameters: vp The vnode whose attributes to set.
2729 * vap A pointer to the attributes to set.
2730 * ctx The vnode context in which the
2731 * operation is to be attempted.
2732 *
2733 * Returns: 0 Success
2734 * !0 errno value
2735 *
2736 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2737 *
2738 * The contents of the data area pointed to by 'vap' may be
2739 * modified if the vnode is on a filesystem which has been
2740 * mounted with ingore ownership flags, or by the underlyng
2741 * VFS itself, or by the fallback code, if the underlying VFS
2742 * does not support ACL, UUID, or GUUID attributes directly.
2743 *
2744 * XXX: We should enummerate the possible errno values here, and where
2745 * in the code they originated.
2746 */
2747 int
2748 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2749 {
2750 int error;
2751 #if CONFIG_FSE
2752 uint64_t active;
2753 int is_perm_change = 0;
2754 int is_stat_change = 0;
2755 #endif
2756
2757 /*
2758 * Reject attempts to set unknown attributes.
2759 */
2760 if (vap->va_active & ~VNODE_ATTR_ALL) {
2761 return EINVAL;
2762 }
2763
2764 /*
2765 * Make sure the filesystem is mounted R/W.
2766 * If not, return an error.
2767 */
2768 if (vfs_isrdonly(vp->v_mount)) {
2769 error = EROFS;
2770 goto out;
2771 }
2772
2773 #if DEVELOPMENT || DEBUG
2774 /*
2775 * XXX VSWAP: Check for entitlements or special flag here
2776 * so we can restrict access appropriately.
2777 */
2778 #else /* DEVELOPMENT || DEBUG */
2779
2780 if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
2781 error = EPERM;
2782 goto out;
2783 }
2784 #endif /* DEVELOPMENT || DEBUG */
2785
2786 #if NAMEDSTREAMS
2787 /* For streams, va_data_size is the only setable attribute. */
2788 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2789 error = EPERM;
2790 goto out;
2791 }
2792 #endif
2793 /* Check for truncation */
2794 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2795 switch (vp->v_type) {
2796 case VREG:
2797 /* For regular files it's ok */
2798 break;
2799 case VDIR:
2800 /* Not allowed to truncate directories */
2801 error = EISDIR;
2802 goto out;
2803 default:
2804 /* For everything else we will clear the bit and let underlying FS decide on the rest */
2805 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2806 if (vap->va_active) {
2807 break;
2808 }
2809 /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
2810 return 0;
2811 }
2812 }
2813
2814 /*
2815 * If ownership is being ignored on this volume, we silently discard
2816 * ownership changes.
2817 */
2818 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2819 VATTR_CLEAR_ACTIVE(vap, va_uid);
2820 VATTR_CLEAR_ACTIVE(vap, va_gid);
2821 }
2822
2823 /*
2824 * Make sure that extended security is enabled if we're going to try
2825 * to set any.
2826 */
2827 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2828 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2829 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2830 error = ENOTSUP;
2831 goto out;
2832 }
2833
2834 /* Never allow the setting of any unsupported superuser flags. */
2835 if (VATTR_IS_ACTIVE(vap, va_flags)) {
2836 vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
2837 }
2838
2839 #if CONFIG_FSE
2840 /*
2841 * Remember all of the active attributes that we're
2842 * attempting to modify.
2843 */
2844 active = vap->va_active & ~VNODE_ATTR_RDONLY;
2845 #endif
2846
2847 error = VNOP_SETATTR(vp, vap, ctx);
2848
2849 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) {
2850 error = vnode_setattr_fallback(vp, vap, ctx);
2851 }
2852
2853 #if CONFIG_FSE
2854 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
2855 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
2856 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
2857
2858 /*
2859 * Now that we've changed them, decide whether to send an
2860 * FSevent.
2861 */
2862 if ((active & PERMISSION_BITS) & vap->va_supported) {
2863 is_perm_change = 1;
2864 } else {
2865 /*
2866 * We've already checked the permission bits, and we
2867 * also want to filter out access time / backup time
2868 * changes.
2869 */
2870 active &= ~(PERMISSION_BITS |
2871 VNODE_ATTR_BIT(va_access_time) |
2872 VNODE_ATTR_BIT(va_backup_time));
2873
2874 /* Anything left to notify about? */
2875 if (active & vap->va_supported) {
2876 is_stat_change = 1;
2877 }
2878 }
2879
2880 if (error == 0) {
2881 if (is_perm_change) {
2882 if (need_fsevent(FSE_CHOWN, vp)) {
2883 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2884 }
2885 } else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
2886 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2887 }
2888 }
2889 #undef PERMISSION_BITS
2890 #endif
2891
2892 out:
2893 return error;
2894 }
2895
2896 /*
2897 * Fallback for setting the attributes on a vnode in a vnode context. This
2898 * Function will attempt to store ACL, UUID, and GUID information utilizing
2899 * a read/modify/write operation against an EA used as a backing store for
2900 * the object.
2901 *
2902 * Parameters: vp The vnode whose attributes to set.
2903 * vap A pointer to the attributes to set.
2904 * ctx The vnode context in which the
2905 * operation is to be attempted.
2906 *
2907 * Returns: 0 Success
2908 * !0 errno value
2909 *
2910 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2911 * as are the fsec and lfsec, if they are used.
2912 *
2913 * The contents of the data area pointed to by 'vap' may be
2914 * modified to indicate that the attribute is supported for
2915 * any given requested attribute.
2916 *
2917 * XXX: We should enummerate the possible errno values here, and where
2918 * in the code they originated.
2919 */
2920 int
2921 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2922 {
2923 kauth_filesec_t fsec;
2924 kauth_acl_t facl;
2925 struct kauth_filesec lfsec;
2926 int error;
2927
2928 error = 0;
2929
2930 /*
2931 * Extended security fallback via extended attributes.
2932 *
2933 * Note that we do not free the filesec; the caller is expected to
2934 * do this.
2935 */
2936 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2937 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2938 VATTR_NOT_RETURNED(vap, va_guuid)) {
2939 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2940
2941 /*
2942 * Fail for file types that we don't permit extended security
2943 * to be set on.
2944 */
2945 if (!XATTR_VNODE_SUPPORTED(vp)) {
2946 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2947 error = EINVAL;
2948 goto out;
2949 }
2950
2951 /*
2952 * If we don't have all the extended security items, we need
2953 * to fetch the existing data to perform a read-modify-write
2954 * operation.
2955 */
2956 fsec = NULL;
2957 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2958 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2959 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2960 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2961 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2962 goto out;
2963 }
2964 }
2965 /* if we didn't get a filesec, use our local one */
2966 if (fsec == NULL) {
2967 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2968 fsec = &lfsec;
2969 } else {
2970 KAUTH_DEBUG("SETATTR - updating existing filesec");
2971 }
2972 /* find the ACL */
2973 facl = &fsec->fsec_acl;
2974
2975 /* if we're using the local filesec, we need to initialise it */
2976 if (fsec == &lfsec) {
2977 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2978 fsec->fsec_owner = kauth_null_guid;
2979 fsec->fsec_group = kauth_null_guid;
2980 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2981 facl->acl_flags = 0;
2982 }
2983
2984 /*
2985 * Update with the supplied attributes.
2986 */
2987 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2988 KAUTH_DEBUG("SETATTR - updating owner UUID");
2989 fsec->fsec_owner = vap->va_uuuid;
2990 VATTR_SET_SUPPORTED(vap, va_uuuid);
2991 }
2992 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2993 KAUTH_DEBUG("SETATTR - updating group UUID");
2994 fsec->fsec_group = vap->va_guuid;
2995 VATTR_SET_SUPPORTED(vap, va_guuid);
2996 }
2997 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2998 if (vap->va_acl == NULL) {
2999 KAUTH_DEBUG("SETATTR - removing ACL");
3000 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3001 } else {
3002 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
3003 facl = vap->va_acl;
3004 }
3005 VATTR_SET_SUPPORTED(vap, va_acl);
3006 }
3007
3008 /*
3009 * If the filesec data is all invalid, we can just remove
3010 * the EA completely.
3011 */
3012 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
3013 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
3014 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
3015 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
3016 /* no attribute is ok, nothing to delete */
3017 if (error == ENOATTR) {
3018 error = 0;
3019 }
3020 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
3021 } else {
3022 /* write the EA */
3023 error = vnode_set_filesec(vp, fsec, facl, ctx);
3024 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
3025 }
3026
3027 /* if we fetched a filesec, dispose of the buffer */
3028 if (fsec != &lfsec) {
3029 kauth_filesec_free(fsec);
3030 }
3031 }
3032 out:
3033
3034 return error;
3035 }
3036
3037 /*
3038 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
3039 * event on a vnode.
3040 */
3041 int
3042 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
3043 {
3044 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
3045 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
3046 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
3047 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
3048 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
3049 uint32_t knote_events = (events & knote_mask);
3050
3051 /* Permissions are not explicitly part of the kqueue model */
3052 if (events & VNODE_EVENT_PERMS) {
3053 knote_events |= NOTE_ATTRIB;
3054 }
3055
3056 /* Directory contents information just becomes NOTE_WRITE */
3057 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
3058 knote_events |= NOTE_WRITE;
3059 }
3060
3061 if (knote_events) {
3062 lock_vnode_and_post(vp, knote_events);
3063 #if CONFIG_FSE
3064 if (vap != NULL) {
3065 create_fsevent_from_kevent(vp, events, vap);
3066 }
3067 #else
3068 (void)vap;
3069 #endif
3070 }
3071
3072 return 0;
3073 }
3074
3075
3076
3077 int
3078 vnode_isdyldsharedcache(vnode_t vp)
3079 {
3080 return (vp->v_flag & VSHARED_DYLD) ? 1 : 0;
3081 }
3082
3083
3084 /*
3085 * For a filesystem that isn't tracking its own vnode watchers:
3086 * check whether a vnode is being monitored.
3087 */
3088 int
3089 vnode_ismonitored(vnode_t vp)
3090 {
3091 return vp->v_knotes.slh_first != NULL;
3092 }
3093
3094 int
3095 vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
3096 {
3097 if (out_vpp) {
3098 *out_vpp = NULLVP;
3099 }
3100 #if NULLFS
3101 return nullfs_getbackingvnode(in_vp, out_vpp);
3102 #else
3103 #pragma unused(in_vp)
3104 return ENOENT;
3105 #endif
3106 }
3107
3108 /*
3109 * Initialize a struct vnode_attr and activate the attributes required
3110 * by the vnode_notify() call.
3111 */
3112 int
3113 vfs_get_notify_attributes(struct vnode_attr *vap)
3114 {
3115 VATTR_INIT(vap);
3116 vap->va_active = VNODE_NOTIFY_ATTRS;
3117 return 0;
3118 }
3119
3120 #if CONFIG_TRIGGERS
3121 int
3122 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
3123 {
3124 int error;
3125 mount_t mp;
3126
3127 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
3128 if (mp == NULL) {
3129 return ENOENT;
3130 }
3131
3132 error = vfs_busy(mp, LK_NOWAIT);
3133 mount_iterdrop(mp);
3134
3135 if (error != 0) {
3136 return ENOENT;
3137 }
3138
3139 mount_lock(mp);
3140 if (mp->mnt_triggercallback != NULL) {
3141 error = EBUSY;
3142 mount_unlock(mp);
3143 goto out;
3144 }
3145
3146 mp->mnt_triggercallback = vtc;
3147 mp->mnt_triggerdata = data;
3148 mount_unlock(mp);
3149
3150 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
3151
3152 out:
3153 vfs_unbusy(mp);
3154 return 0;
3155 }
3156 #endif /* CONFIG_TRIGGERS */
3157
3158 /*
3159 * Definition of vnode operations.
3160 */
3161
3162 #if 0
3163 /*
3164 *#
3165 *#% lookup dvp L ? ?
3166 *#% lookup vpp - L -
3167 */
3168 struct vnop_lookup_args {
3169 struct vnodeop_desc *a_desc;
3170 vnode_t a_dvp;
3171 vnode_t *a_vpp;
3172 struct componentname *a_cnp;
3173 vfs_context_t a_context;
3174 };
3175 #endif /* 0*/
3176
3177 /*
3178 * Returns: 0 Success
3179 * lock_fsnode:ENOENT No such file or directory [only for VFS
3180 * that is not thread safe & vnode is
3181 * currently being/has been terminated]
3182 * <vfs_lookup>:ENAMETOOLONG
3183 * <vfs_lookup>:ENOENT
3184 * <vfs_lookup>:EJUSTRETURN
3185 * <vfs_lookup>:EPERM
3186 * <vfs_lookup>:EISDIR
3187 * <vfs_lookup>:ENOTDIR
3188 * <vfs_lookup>:???
3189 *
3190 * Note: The return codes from the underlying VFS's lookup routine can't
3191 * be fully enumerated here, since third party VFS authors may not
3192 * limit their error returns to the ones documented here, even
3193 * though this may result in some programs functioning incorrectly.
3194 *
3195 * The return codes documented above are those which may currently
3196 * be returned by HFS from hfs_lookup, not including additional
3197 * error code which may be propagated from underlying routines.
3198 */
3199 errno_t
3200 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3201 {
3202 int _err;
3203 struct vnop_lookup_args a;
3204
3205 a.a_desc = &vnop_lookup_desc;
3206 a.a_dvp = dvp;
3207 a.a_vpp = vpp;
3208 a.a_cnp = cnp;
3209 a.a_context = ctx;
3210
3211 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3212 if (_err == 0 && *vpp) {
3213 DTRACE_FSINFO(lookup, vnode_t, *vpp);
3214 }
3215
3216 return _err;
3217 }
3218
3219 #if 0
3220 struct vnop_compound_open_args {
3221 struct vnodeop_desc *a_desc;
3222 vnode_t a_dvp;
3223 vnode_t *a_vpp;
3224 struct componentname *a_cnp;
3225 int32_t a_flags;
3226 int32_t a_fmode;
3227 struct vnode_attr *a_vap;
3228 vfs_context_t a_context;
3229 void *a_reserved;
3230 };
3231 #endif /* 0 */
3232
3233 int
3234 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3235 {
3236 int _err;
3237 struct vnop_compound_open_args a;
3238 int did_create = 0;
3239 int want_create;
3240 uint32_t tmp_status = 0;
3241 struct componentname *cnp = &ndp->ni_cnd;
3242
3243 want_create = (flags & O_CREAT);
3244
3245 a.a_desc = &vnop_compound_open_desc;
3246 a.a_dvp = dvp;
3247 a.a_vpp = vpp; /* Could be NULL */
3248 a.a_cnp = cnp;
3249 a.a_flags = flags;
3250 a.a_fmode = fmode;
3251 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3252 a.a_vap = vap;
3253 a.a_context = ctx;
3254 a.a_open_create_authorizer = vn_authorize_create;
3255 a.a_open_existing_authorizer = vn_authorize_open_existing;
3256 a.a_reserved = NULL;
3257
3258 if (dvp == NULLVP) {
3259 panic("No dvp?");
3260 }
3261 if (want_create && !vap) {
3262 panic("Want create, but no vap?");
3263 }
3264 if (!want_create && vap) {
3265 panic("Don't want create, but have a vap?");
3266 }
3267
3268 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3269 if (want_create) {
3270 if (_err == 0 && *vpp) {
3271 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3272 } else {
3273 DTRACE_FSINFO(compound_open, vnode_t, dvp);
3274 }
3275 } else {
3276 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3277 }
3278
3279 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3280
3281 if (did_create && !want_create) {
3282 panic("Filesystem did a create, even though none was requested?");
3283 }
3284
3285 if (did_create) {
3286 #if CONFIG_APPLEDOUBLE
3287 if (!NATIVE_XATTR(dvp)) {
3288 /*
3289 * Remove stale Apple Double file (if any).
3290 */
3291 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3292 }
3293 #endif /* CONFIG_APPLEDOUBLE */
3294 /* On create, provide kqueue notification */
3295 post_event_if_success(dvp, _err, NOTE_WRITE);
3296 }
3297
3298 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3299 #if 0 /* FSEvents... */
3300 if (*vpp && _err && _err != EKEEPLOOKING) {
3301 vnode_put(*vpp);
3302 *vpp = NULLVP;
3303 }
3304 #endif /* 0 */
3305
3306 return _err;
3307 }
3308
3309 #if 0
3310 struct vnop_create_args {
3311 struct vnodeop_desc *a_desc;
3312 vnode_t a_dvp;
3313 vnode_t *a_vpp;
3314 struct componentname *a_cnp;
3315 struct vnode_attr *a_vap;
3316 vfs_context_t a_context;
3317 };
3318 #endif /* 0*/
3319 errno_t
3320 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3321 {
3322 int _err;
3323 struct vnop_create_args a;
3324
3325 a.a_desc = &vnop_create_desc;
3326 a.a_dvp = dvp;
3327 a.a_vpp = vpp;
3328 a.a_cnp = cnp;
3329 a.a_vap = vap;
3330 a.a_context = ctx;
3331
3332 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3333 if (_err == 0 && *vpp) {
3334 DTRACE_FSINFO(create, vnode_t, *vpp);
3335 }
3336
3337 #if CONFIG_APPLEDOUBLE
3338 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3339 /*
3340 * Remove stale Apple Double file (if any).
3341 */
3342 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3343 }
3344 #endif /* CONFIG_APPLEDOUBLE */
3345
3346 post_event_if_success(dvp, _err, NOTE_WRITE);
3347
3348 return _err;
3349 }
3350
3351 #if 0
3352 /*
3353 *#
3354 *#% whiteout dvp L L L
3355 *#% whiteout cnp - - -
3356 *#% whiteout flag - - -
3357 *#
3358 */
3359 struct vnop_whiteout_args {
3360 struct vnodeop_desc *a_desc;
3361 vnode_t a_dvp;
3362 struct componentname *a_cnp;
3363 int a_flags;
3364 vfs_context_t a_context;
3365 };
3366 #endif /* 0*/
3367 errno_t
3368 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3369 __unused int flags, __unused vfs_context_t ctx)
3370 {
3371 return ENOTSUP; // XXX OBSOLETE
3372 }
3373
3374 #if 0
3375 /*
3376 *#
3377 *#% mknod dvp L U U
3378 *#% mknod vpp - X -
3379 *#
3380 */
3381 struct vnop_mknod_args {
3382 struct vnodeop_desc *a_desc;
3383 vnode_t a_dvp;
3384 vnode_t *a_vpp;
3385 struct componentname *a_cnp;
3386 struct vnode_attr *a_vap;
3387 vfs_context_t a_context;
3388 };
3389 #endif /* 0*/
3390 errno_t
3391 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3392 {
3393 int _err;
3394 struct vnop_mknod_args a;
3395
3396 a.a_desc = &vnop_mknod_desc;
3397 a.a_dvp = dvp;
3398 a.a_vpp = vpp;
3399 a.a_cnp = cnp;
3400 a.a_vap = vap;
3401 a.a_context = ctx;
3402
3403 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3404 if (_err == 0 && *vpp) {
3405 DTRACE_FSINFO(mknod, vnode_t, *vpp);
3406 }
3407
3408 post_event_if_success(dvp, _err, NOTE_WRITE);
3409
3410 return _err;
3411 }
3412
3413 #if 0
3414 /*
3415 *#
3416 *#% open vp L L L
3417 *#
3418 */
3419 struct vnop_open_args {
3420 struct vnodeop_desc *a_desc;
3421 vnode_t a_vp;
3422 int a_mode;
3423 vfs_context_t a_context;
3424 };
3425 #endif /* 0*/
3426 errno_t
3427 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3428 {
3429 int _err;
3430 struct vnop_open_args a;
3431
3432 if (ctx == NULL) {
3433 ctx = vfs_context_current();
3434 }
3435 a.a_desc = &vnop_open_desc;
3436 a.a_vp = vp;
3437 a.a_mode = mode;
3438 a.a_context = ctx;
3439
3440 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3441 DTRACE_FSINFO(open, vnode_t, vp);
3442
3443 return _err;
3444 }
3445
3446 #if 0
3447 /*
3448 *#
3449 *#% close vp U U U
3450 *#
3451 */
3452 struct vnop_close_args {
3453 struct vnodeop_desc *a_desc;
3454 vnode_t a_vp;
3455 int a_fflag;
3456 vfs_context_t a_context;
3457 };
3458 #endif /* 0*/
3459 errno_t
3460 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3461 {
3462 int _err;
3463 struct vnop_close_args a;
3464
3465 if (ctx == NULL) {
3466 ctx = vfs_context_current();
3467 }
3468 a.a_desc = &vnop_close_desc;
3469 a.a_vp = vp;
3470 a.a_fflag = fflag;
3471 a.a_context = ctx;
3472
3473 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3474 DTRACE_FSINFO(close, vnode_t, vp);
3475
3476 return _err;
3477 }
3478
3479 #if 0
3480 /*
3481 *#
3482 *#% access vp L L L
3483 *#
3484 */
3485 struct vnop_access_args {
3486 struct vnodeop_desc *a_desc;
3487 vnode_t a_vp;
3488 int a_action;
3489 vfs_context_t a_context;
3490 };
3491 #endif /* 0*/
3492 errno_t
3493 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3494 {
3495 int _err;
3496 struct vnop_access_args a;
3497
3498 if (ctx == NULL) {
3499 ctx = vfs_context_current();
3500 }
3501 a.a_desc = &vnop_access_desc;
3502 a.a_vp = vp;
3503 a.a_action = action;
3504 a.a_context = ctx;
3505
3506 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3507 DTRACE_FSINFO(access, vnode_t, vp);
3508
3509 return _err;
3510 }
3511
3512 #if 0
3513 /*
3514 *#
3515 *#% getattr vp = = =
3516 *#
3517 */
3518 struct vnop_getattr_args {
3519 struct vnodeop_desc *a_desc;
3520 vnode_t a_vp;
3521 struct vnode_attr *a_vap;
3522 vfs_context_t a_context;
3523 };
3524 #endif /* 0*/
3525 errno_t
3526 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3527 {
3528 int _err;
3529 struct vnop_getattr_args a;
3530
3531 a.a_desc = &vnop_getattr_desc;
3532 a.a_vp = vp;
3533 a.a_vap = vap;
3534 a.a_context = ctx;
3535
3536 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3537 DTRACE_FSINFO(getattr, vnode_t, vp);
3538
3539 return _err;
3540 }
3541
3542 #if 0
3543 /*
3544 *#
3545 *#% setattr vp L L L
3546 *#
3547 */
3548 struct vnop_setattr_args {
3549 struct vnodeop_desc *a_desc;
3550 vnode_t a_vp;
3551 struct vnode_attr *a_vap;
3552 vfs_context_t a_context;
3553 };
3554 #endif /* 0*/
3555 errno_t
3556 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3557 {
3558 int _err;
3559 struct vnop_setattr_args a;
3560
3561 a.a_desc = &vnop_setattr_desc;
3562 a.a_vp = vp;
3563 a.a_vap = vap;
3564 a.a_context = ctx;
3565
3566 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3567 DTRACE_FSINFO(setattr, vnode_t, vp);
3568
3569 #if CONFIG_APPLEDOUBLE
3570 /*
3571 * Shadow uid/gid/mod change to extended attribute file.
3572 */
3573 if (_err == 0 && !NATIVE_XATTR(vp)) {
3574 struct vnode_attr va;
3575 int change = 0;
3576
3577 VATTR_INIT(&va);
3578 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3579 VATTR_SET(&va, va_uid, vap->va_uid);
3580 change = 1;
3581 }
3582 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3583 VATTR_SET(&va, va_gid, vap->va_gid);
3584 change = 1;
3585 }
3586 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3587 VATTR_SET(&va, va_mode, vap->va_mode);
3588 change = 1;
3589 }
3590 if (change) {
3591 vnode_t dvp;
3592 const char *vname;
3593
3594 dvp = vnode_getparent(vp);
3595 vname = vnode_getname(vp);
3596
3597 xattrfile_setattr(dvp, vname, &va, ctx);
3598 if (dvp != NULLVP) {
3599 vnode_put(dvp);
3600 }
3601 if (vname != NULL) {
3602 vnode_putname(vname);
3603 }
3604 }
3605 }
3606 #endif /* CONFIG_APPLEDOUBLE */
3607
3608 /*
3609 * If we have changed any of the things about the file that are likely
3610 * to result in changes to authorization results, blow the vnode auth
3611 * cache
3612 */
3613 if (_err == 0 && (
3614 VATTR_IS_SUPPORTED(vap, va_mode) ||
3615 VATTR_IS_SUPPORTED(vap, va_uid) ||
3616 VATTR_IS_SUPPORTED(vap, va_gid) ||
3617 VATTR_IS_SUPPORTED(vap, va_flags) ||
3618 VATTR_IS_SUPPORTED(vap, va_acl) ||
3619 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3620 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3621 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3622
3623 #if NAMEDSTREAMS
3624 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3625 vnode_t svp;
3626 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3627 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3628 vnode_put(svp);
3629 }
3630 }
3631 #endif /* NAMEDSTREAMS */
3632 }
3633
3634
3635 post_event_if_success(vp, _err, NOTE_ATTRIB);
3636
3637 return _err;
3638 }
3639
3640
3641 #if 0
3642 /*
3643 *#
3644 *#% read vp L L L
3645 *#
3646 */
3647 struct vnop_read_args {
3648 struct vnodeop_desc *a_desc;
3649 vnode_t a_vp;
3650 struct uio *a_uio;
3651 int a_ioflag;
3652 vfs_context_t a_context;
3653 };
3654 #endif /* 0*/
3655 errno_t
3656 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3657 {
3658 int _err;
3659 struct vnop_read_args a;
3660 #if CONFIG_DTRACE
3661 user_ssize_t resid = uio_resid(uio);
3662 #endif
3663
3664 if (ctx == NULL) {
3665 return EINVAL;
3666 }
3667
3668 a.a_desc = &vnop_read_desc;
3669 a.a_vp = vp;
3670 a.a_uio = uio;
3671 a.a_ioflag = ioflag;
3672 a.a_context = ctx;
3673
3674 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3675 DTRACE_FSINFO_IO(read,
3676 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3677
3678 return _err;
3679 }
3680
3681
3682 #if 0
3683 /*
3684 *#
3685 *#% write vp L L L
3686 *#
3687 */
3688 struct vnop_write_args {
3689 struct vnodeop_desc *a_desc;
3690 vnode_t a_vp;
3691 struct uio *a_uio;
3692 int a_ioflag;
3693 vfs_context_t a_context;
3694 };
3695 #endif /* 0*/
3696 errno_t
3697 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3698 {
3699 struct vnop_write_args a;
3700 int _err;
3701 #if CONFIG_DTRACE
3702 user_ssize_t resid = uio_resid(uio);
3703 #endif
3704
3705 if (ctx == NULL) {
3706 return EINVAL;
3707 }
3708
3709 a.a_desc = &vnop_write_desc;
3710 a.a_vp = vp;
3711 a.a_uio = uio;
3712 a.a_ioflag = ioflag;
3713 a.a_context = ctx;
3714
3715 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3716 DTRACE_FSINFO_IO(write,
3717 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3718
3719 post_event_if_success(vp, _err, NOTE_WRITE);
3720
3721 return _err;
3722 }
3723
3724
3725 #if 0
3726 /*
3727 *#
3728 *#% ioctl vp U U U
3729 *#
3730 */
3731 struct vnop_ioctl_args {
3732 struct vnodeop_desc *a_desc;
3733 vnode_t a_vp;
3734 u_long a_command;
3735 caddr_t a_data;
3736 int a_fflag;
3737 vfs_context_t a_context;
3738 };
3739 #endif /* 0*/
3740 errno_t
3741 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3742 {
3743 int _err;
3744 struct vnop_ioctl_args a;
3745
3746 if (ctx == NULL) {
3747 ctx = vfs_context_current();
3748 }
3749
3750 /*
3751 * This check should probably have been put in the TTY code instead...
3752 *
3753 * We have to be careful about what we assume during startup and shutdown.
3754 * We have to be able to use the root filesystem's device vnode even when
3755 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3756 * structure. If there is no data pointer, it doesn't matter whether
3757 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
3758 * which passes NULL for its data pointer can therefore be used during
3759 * mount or unmount of the root filesystem.
3760 *
3761 * Depending on what root filesystems need to do during mount/unmount, we
3762 * may need to loosen this check again in the future.
3763 */
3764 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3765 if (data != NULL && !vnode_vfs64bitready(vp)) {
3766 return ENOTTY;
3767 }
3768 }
3769
3770 a.a_desc = &vnop_ioctl_desc;
3771 a.a_vp = vp;
3772 a.a_command = command;
3773 a.a_data = data;
3774 a.a_fflag = fflag;
3775 a.a_context = ctx;
3776
3777 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3778 DTRACE_FSINFO(ioctl, vnode_t, vp);
3779
3780 return _err;
3781 }
3782
3783
3784 #if 0
3785 /*
3786 *#
3787 *#% select vp U U U
3788 *#
3789 */
3790 struct vnop_select_args {
3791 struct vnodeop_desc *a_desc;
3792 vnode_t a_vp;
3793 int a_which;
3794 int a_fflags;
3795 void *a_wql;
3796 vfs_context_t a_context;
3797 };
3798 #endif /* 0*/
3799 errno_t
3800 VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx)
3801 {
3802 int _err;
3803 struct vnop_select_args a;
3804
3805 if (ctx == NULL) {
3806 ctx = vfs_context_current();
3807 }
3808 a.a_desc = &vnop_select_desc;
3809 a.a_vp = vp;
3810 a.a_which = which;
3811 a.a_fflags = fflags;
3812 a.a_context = ctx;
3813 a.a_wql = wql;
3814
3815 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3816 DTRACE_FSINFO(select, vnode_t, vp);
3817
3818 return _err;
3819 }
3820
3821
3822 #if 0
3823 /*
3824 *#
3825 *#% exchange fvp L L L
3826 *#% exchange tvp L L L
3827 *#
3828 */
3829 struct vnop_exchange_args {
3830 struct vnodeop_desc *a_desc;
3831 vnode_t a_fvp;
3832 vnode_t a_tvp;
3833 int a_options;
3834 vfs_context_t a_context;
3835 };
3836 #endif /* 0*/
3837 errno_t
3838 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3839 {
3840 int _err;
3841 struct vnop_exchange_args a;
3842
3843 a.a_desc = &vnop_exchange_desc;
3844 a.a_fvp = fvp;
3845 a.a_tvp = tvp;
3846 a.a_options = options;
3847 a.a_context = ctx;
3848
3849 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3850 DTRACE_FSINFO(exchange, vnode_t, fvp);
3851
3852 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3853 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3854 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3855
3856 return _err;
3857 }
3858
3859
3860 #if 0
3861 /*
3862 *#
3863 *#% revoke vp U U U
3864 *#
3865 */
3866 struct vnop_revoke_args {
3867 struct vnodeop_desc *a_desc;
3868 vnode_t a_vp;
3869 int a_flags;
3870 vfs_context_t a_context;
3871 };
3872 #endif /* 0*/
3873 errno_t
3874 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3875 {
3876 struct vnop_revoke_args a;
3877 int _err;
3878
3879 a.a_desc = &vnop_revoke_desc;
3880 a.a_vp = vp;
3881 a.a_flags = flags;
3882 a.a_context = ctx;
3883
3884 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3885 DTRACE_FSINFO(revoke, vnode_t, vp);
3886
3887 return _err;
3888 }
3889
3890
3891 #if 0
3892 /*
3893 *#
3894 *# mmap_check - vp U U U
3895 *#
3896 */
3897 struct vnop_mmap_check_args {
3898 struct vnodeop_desc *a_desc;
3899 vnode_t a_vp;
3900 int a_flags;
3901 vfs_context_t a_context;
3902 };
3903 #endif /* 0 */
3904 errno_t
3905 VNOP_MMAP_CHECK(vnode_t vp, int flags, vfs_context_t ctx)
3906 {
3907 int _err;
3908 struct vnop_mmap_check_args a;
3909
3910 a.a_desc = &vnop_mmap_check_desc;
3911 a.a_vp = vp;
3912 a.a_flags = flags;
3913 a.a_context = ctx;
3914
3915 _err = (*vp->v_op[vnop_mmap_check_desc.vdesc_offset])(&a);
3916 if (_err == ENOTSUP) {
3917 _err = 0;
3918 }
3919 DTRACE_FSINFO(mmap_check, vnode_t, vp);
3920
3921 return _err;
3922 }
3923
3924 #if 0
3925 /*
3926 *#
3927 *# mmap - vp U U U
3928 *#
3929 */
3930 struct vnop_mmap_args {
3931 struct vnodeop_desc *a_desc;
3932 vnode_t a_vp;
3933 int a_fflags;
3934 vfs_context_t a_context;
3935 };
3936 #endif /* 0*/
3937 errno_t
3938 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3939 {
3940 int _err;
3941 struct vnop_mmap_args a;
3942
3943 a.a_desc = &vnop_mmap_desc;
3944 a.a_vp = vp;
3945 a.a_fflags = fflags;
3946 a.a_context = ctx;
3947
3948 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3949 DTRACE_FSINFO(mmap, vnode_t, vp);
3950
3951 return _err;
3952 }
3953
3954
3955 #if 0
3956 /*
3957 *#
3958 *# mnomap - vp U U U
3959 *#
3960 */
3961 struct vnop_mnomap_args {
3962 struct vnodeop_desc *a_desc;
3963 vnode_t a_vp;
3964 vfs_context_t a_context;
3965 };
3966 #endif /* 0*/
3967 errno_t
3968 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
3969 {
3970 int _err;
3971 struct vnop_mnomap_args a;
3972
3973 a.a_desc = &vnop_mnomap_desc;
3974 a.a_vp = vp;
3975 a.a_context = ctx;
3976
3977 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3978 DTRACE_FSINFO(mnomap, vnode_t, vp);
3979
3980 return _err;
3981 }
3982
3983
3984 #if 0
3985 /*
3986 *#
3987 *#% fsync vp L L L
3988 *#
3989 */
3990 struct vnop_fsync_args {
3991 struct vnodeop_desc *a_desc;
3992 vnode_t a_vp;
3993 int a_waitfor;
3994 vfs_context_t a_context;
3995 };
3996 #endif /* 0*/
3997 errno_t
3998 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
3999 {
4000 struct vnop_fsync_args a;
4001 int _err;
4002
4003 a.a_desc = &vnop_fsync_desc;
4004 a.a_vp = vp;
4005 a.a_waitfor = waitfor;
4006 a.a_context = ctx;
4007
4008 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
4009 DTRACE_FSINFO(fsync, vnode_t, vp);
4010
4011 return _err;
4012 }
4013
4014
4015 #if 0
4016 /*
4017 *#
4018 *#% remove dvp L U U
4019 *#% remove vp L U U
4020 *#
4021 */
4022 struct vnop_remove_args {
4023 struct vnodeop_desc *a_desc;
4024 vnode_t a_dvp;
4025 vnode_t a_vp;
4026 struct componentname *a_cnp;
4027 int a_flags;
4028 vfs_context_t a_context;
4029 };
4030 #endif /* 0*/
4031 errno_t
4032 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
4033 {
4034 int _err;
4035 struct vnop_remove_args a;
4036
4037 a.a_desc = &vnop_remove_desc;
4038 a.a_dvp = dvp;
4039 a.a_vp = vp;
4040 a.a_cnp = cnp;
4041 a.a_flags = flags;
4042 a.a_context = ctx;
4043
4044 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4045 DTRACE_FSINFO(remove, vnode_t, vp);
4046
4047 if (_err == 0) {
4048 vnode_setneedinactive(vp);
4049 #if CONFIG_APPLEDOUBLE
4050 if (!(NATIVE_XATTR(dvp))) {
4051 /*
4052 * Remove any associated extended attribute file (._ AppleDouble file).
4053 */
4054 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4055 }
4056 #endif /* CONFIG_APPLEDOUBLE */
4057 }
4058
4059 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4060 post_event_if_success(dvp, _err, NOTE_WRITE);
4061
4062 return _err;
4063 }
4064
4065 int
4066 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4067 {
4068 int _err;
4069 struct vnop_compound_remove_args a;
4070 int no_vp = (*vpp == NULLVP);
4071
4072 a.a_desc = &vnop_compound_remove_desc;
4073 a.a_dvp = dvp;
4074 a.a_vpp = vpp;
4075 a.a_cnp = &ndp->ni_cnd;
4076 a.a_flags = flags;
4077 a.a_vap = vap;
4078 a.a_context = ctx;
4079 a.a_remove_authorizer = vn_authorize_unlink;
4080
4081 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4082 if (_err == 0 && *vpp) {
4083 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
4084 } else {
4085 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
4086 }
4087 if (_err == 0) {
4088 vnode_setneedinactive(*vpp);
4089 #if CONFIG_APPLEDOUBLE
4090 if (!(NATIVE_XATTR(dvp))) {
4091 /*
4092 * Remove any associated extended attribute file (._ AppleDouble file).
4093 */
4094 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4095 }
4096 #endif /* CONFIG_APPLEDOUBLE */
4097 }
4098
4099 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4100 post_event_if_success(dvp, _err, NOTE_WRITE);
4101
4102 if (no_vp) {
4103 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4104 if (*vpp && _err && _err != EKEEPLOOKING) {
4105 vnode_put(*vpp);
4106 *vpp = NULLVP;
4107 }
4108 }
4109
4110 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4111
4112 return _err;
4113 }
4114
4115 #if 0
4116 /*
4117 *#
4118 *#% link vp U U U
4119 *#% link tdvp L U U
4120 *#
4121 */
4122 struct vnop_link_args {
4123 struct vnodeop_desc *a_desc;
4124 vnode_t a_vp;
4125 vnode_t a_tdvp;
4126 struct componentname *a_cnp;
4127 vfs_context_t a_context;
4128 };
4129 #endif /* 0*/
4130 errno_t
4131 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4132 {
4133 int _err;
4134 struct vnop_link_args a;
4135
4136 #if CONFIG_APPLEDOUBLE
4137 /*
4138 * For file systems with non-native extended attributes,
4139 * disallow linking to an existing "._" Apple Double file.
4140 */
4141 if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4142 const char *vname;
4143
4144 vname = vnode_getname(vp);
4145 if (vname != NULL) {
4146 _err = 0;
4147 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4148 _err = EPERM;
4149 }
4150 vnode_putname(vname);
4151 if (_err) {
4152 return _err;
4153 }
4154 }
4155 }
4156 #endif /* CONFIG_APPLEDOUBLE */
4157
4158 a.a_desc = &vnop_link_desc;
4159 a.a_vp = vp;
4160 a.a_tdvp = tdvp;
4161 a.a_cnp = cnp;
4162 a.a_context = ctx;
4163
4164 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4165 DTRACE_FSINFO(link, vnode_t, vp);
4166
4167 post_event_if_success(vp, _err, NOTE_LINK);
4168 post_event_if_success(tdvp, _err, NOTE_WRITE);
4169
4170 return _err;
4171 }
4172
4173 errno_t
4174 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4175 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4176 vfs_rename_flags_t flags, vfs_context_t ctx)
4177 {
4178 int _err;
4179 struct nameidata *fromnd = NULL;
4180 struct nameidata *tond = NULL;
4181 #if CONFIG_APPLEDOUBLE
4182 vnode_t src_attr_vp = NULLVP;
4183 vnode_t dst_attr_vp = NULLVP;
4184 char smallname1[48];
4185 char smallname2[48];
4186 char *xfromname = NULL;
4187 char *xtoname = NULL;
4188 #endif /* CONFIG_APPLEDOUBLE */
4189 int batched;
4190 uint32_t tdfflags; // Target directory file flags
4191
4192 batched = vnode_compound_rename_available(fdvp);
4193
4194 if (!batched) {
4195 if (*fvpp == NULLVP) {
4196 panic("Not batched, and no fvp?");
4197 }
4198 }
4199
4200 #if CONFIG_APPLEDOUBLE
4201 /*
4202 * We need to preflight any potential AppleDouble file for the source file
4203 * before doing the rename operation, since we could potentially be doing
4204 * this operation on a network filesystem, and would end up duplicating
4205 * the work. Also, save the source and destination names. Skip it if the
4206 * source has a "._" prefix.
4207 */
4208
4209 if (!NATIVE_XATTR(fdvp) &&
4210 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4211 size_t len;
4212 int error;
4213
4214 /* Get source attribute file name. */
4215 len = fcnp->cn_namelen + 3;
4216 if (len > sizeof(smallname1)) {
4217 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
4218 } else {
4219 xfromname = &smallname1[0];
4220 }
4221 strlcpy(xfromname, "._", len);
4222 strlcat(xfromname, fcnp->cn_nameptr, len);
4223
4224 /* Get destination attribute file name. */
4225 len = tcnp->cn_namelen + 3;
4226 if (len > sizeof(smallname2)) {
4227 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
4228 } else {
4229 xtoname = &smallname2[0];
4230 }
4231 strlcpy(xtoname, "._", len);
4232 strlcat(xtoname, tcnp->cn_nameptr, len);
4233
4234 /*
4235 * Look up source attribute file, keep reference on it if exists.
4236 * Note that we do the namei with the nameiop of RENAME, which is different than
4237 * in the rename syscall. It's OK if the source file does not exist, since this
4238 * is only for AppleDouble files.
4239 */
4240 MALLOC(fromnd, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
4241 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4242 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4243 fromnd->ni_dvp = fdvp;
4244 error = namei(fromnd);
4245
4246 /*
4247 * If there was an error looking up source attribute file,
4248 * we'll behave as if it didn't exist.
4249 */
4250
4251 if (error == 0) {
4252 if (fromnd->ni_vp) {
4253 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4254 src_attr_vp = fromnd->ni_vp;
4255
4256 if (fromnd->ni_vp->v_type != VREG) {
4257 src_attr_vp = NULLVP;
4258 vnode_put(fromnd->ni_vp);
4259 }
4260 }
4261 /*
4262 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4263 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4264 * have a vnode here, so we drop our namei buffer for the source attribute file
4265 */
4266 if (src_attr_vp == NULLVP) {
4267 nameidone(fromnd);
4268 }
4269 }
4270 }
4271 #endif /* CONFIG_APPLEDOUBLE */
4272
4273 if (batched) {
4274 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4275 if (_err != 0) {
4276 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4277 }
4278 } else {
4279 if (flags) {
4280 _err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4281 if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4282 // Legacy...
4283 if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4284 fcnp->cn_flags |= CN_SECLUDE_RENAME;
4285 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4286 }
4287 }
4288 } else {
4289 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4290 }
4291 }
4292
4293 /*
4294 * If moved to a new directory that is restricted,
4295 * set the restricted flag on the item moved.
4296 */
4297 if (_err == 0) {
4298 _err = vnode_flags(tdvp, &tdfflags, ctx);
4299 if (_err == 0) {
4300 uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4301 if (inherit_flags) {
4302 uint32_t fflags;
4303 _err = vnode_flags(*fvpp, &fflags, ctx);
4304 if (_err == 0 && fflags != (fflags | inherit_flags)) {
4305 struct vnode_attr va;
4306 VATTR_INIT(&va);
4307 VATTR_SET(&va, va_flags, fflags | inherit_flags);
4308 _err = vnode_setattr(*fvpp, &va, ctx);
4309 }
4310 }
4311 }
4312 }
4313
4314 #if CONFIG_MACF
4315 if (_err == 0) {
4316 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
4317 if (flags & VFS_RENAME_SWAP) {
4318 mac_vnode_notify_rename(ctx, *tvpp, fdvp, fcnp);
4319 }
4320 }
4321 #endif
4322
4323 #if CONFIG_APPLEDOUBLE
4324 /*
4325 * Rename any associated extended attribute file (._ AppleDouble file).
4326 */
4327 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4328 int error = 0;
4329
4330 /*
4331 * Get destination attribute file vnode.
4332 * Note that tdvp already has an iocount reference. Make sure to check that we
4333 * get a valid vnode from namei.
4334 */
4335 MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
4336 NDINIT(tond, RENAME, OP_RENAME,
4337 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4338 CAST_USER_ADDR_T(xtoname), ctx);
4339 tond->ni_dvp = tdvp;
4340 error = namei(tond);
4341
4342 if (error) {
4343 goto ad_error;
4344 }
4345
4346 if (tond->ni_vp) {
4347 dst_attr_vp = tond->ni_vp;
4348 }
4349
4350 if (src_attr_vp) {
4351 const char *old_name = src_attr_vp->v_name;
4352 vnode_t old_parent = src_attr_vp->v_parent;
4353
4354 if (batched) {
4355 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4356 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4357 0, ctx);
4358 } else {
4359 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4360 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4361 }
4362
4363 if (error == 0 && old_name == src_attr_vp->v_name &&
4364 old_parent == src_attr_vp->v_parent) {
4365 int update_flags = VNODE_UPDATE_NAME;
4366
4367 if (fdvp != tdvp) {
4368 update_flags |= VNODE_UPDATE_PARENT;
4369 }
4370
4371 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4372 vnode_update_identity(src_attr_vp, tdvp,
4373 tond->ni_cnd.cn_nameptr,
4374 tond->ni_cnd.cn_namelen,
4375 tond->ni_cnd.cn_hash,
4376 update_flags);
4377 }
4378 }
4379
4380 /* kevent notifications for moving resource files
4381 * _err is zero if we're here, so no need to notify directories, code
4382 * below will do that. only need to post the rename on the source and
4383 * possibly a delete on the dest
4384 */
4385 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4386 if (dst_attr_vp) {
4387 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4388 }
4389 } else if (dst_attr_vp) {
4390 /*
4391 * Just delete destination attribute file vnode if it exists, since
4392 * we didn't have a source attribute file.
4393 * Note that tdvp already has an iocount reference.
4394 */
4395
4396 struct vnop_remove_args args;
4397
4398 args.a_desc = &vnop_remove_desc;
4399 args.a_dvp = tdvp;
4400 args.a_vp = dst_attr_vp;
4401 args.a_cnp = &tond->ni_cnd;
4402 args.a_context = ctx;
4403
4404 if (error == 0) {
4405 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4406
4407 if (error == 0) {
4408 vnode_setneedinactive(dst_attr_vp);
4409 }
4410 }
4411
4412 /* kevent notification for deleting the destination's attribute file
4413 * if it existed. Only need to post the delete on the destination, since
4414 * the code below will handle the directories.
4415 */
4416 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4417 }
4418 }
4419 ad_error:
4420 if (src_attr_vp) {
4421 vnode_put(src_attr_vp);
4422 nameidone(fromnd);
4423 }
4424 if (dst_attr_vp) {
4425 vnode_put(dst_attr_vp);
4426 nameidone(tond);
4427 }
4428 if (xfromname && xfromname != &smallname1[0]) {
4429 FREE(xfromname, M_TEMP);
4430 }
4431 if (xtoname && xtoname != &smallname2[0]) {
4432 FREE(xtoname, M_TEMP);
4433 }
4434 #endif /* CONFIG_APPLEDOUBLE */
4435 if (fromnd) {
4436 FREE(fromnd, M_TEMP);
4437 }
4438 if (tond) {
4439 FREE(tond, M_TEMP);
4440 }
4441 return _err;
4442 }
4443
4444
4445 #if 0
4446 /*
4447 *#
4448 *#% rename fdvp U U U
4449 *#% rename fvp U U U
4450 *#% rename tdvp L U U
4451 *#% rename tvp X U U
4452 *#
4453 */
4454 struct vnop_rename_args {
4455 struct vnodeop_desc *a_desc;
4456 vnode_t a_fdvp;
4457 vnode_t a_fvp;
4458 struct componentname *a_fcnp;
4459 vnode_t a_tdvp;
4460 vnode_t a_tvp;
4461 struct componentname *a_tcnp;
4462 vfs_context_t a_context;
4463 };
4464 #endif /* 0*/
4465 errno_t
4466 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4467 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4468 vfs_context_t ctx)
4469 {
4470 int _err = 0;
4471 struct vnop_rename_args a;
4472
4473 a.a_desc = &vnop_rename_desc;
4474 a.a_fdvp = fdvp;
4475 a.a_fvp = fvp;
4476 a.a_fcnp = fcnp;
4477 a.a_tdvp = tdvp;
4478 a.a_tvp = tvp;
4479 a.a_tcnp = tcnp;
4480 a.a_context = ctx;
4481
4482 /* do the rename of the main file. */
4483 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4484 DTRACE_FSINFO(rename, vnode_t, fdvp);
4485
4486 if (_err) {
4487 return _err;
4488 }
4489
4490 return post_rename(fdvp, fvp, tdvp, tvp);
4491 }
4492
4493 static errno_t
4494 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4495 {
4496 if (tvp && tvp != fvp) {
4497 vnode_setneedinactive(tvp);
4498 }
4499
4500 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4501 int events = NOTE_WRITE;
4502 if (vnode_isdir(fvp)) {
4503 /* Link count on dir changed only if we are moving a dir and...
4504 * --Moved to new dir, not overwriting there
4505 * --Kept in same dir and DID overwrite
4506 */
4507 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4508 events |= NOTE_LINK;
4509 }
4510 }
4511
4512 lock_vnode_and_post(fdvp, events);
4513 if (fdvp != tdvp) {
4514 lock_vnode_and_post(tdvp, events);
4515 }
4516
4517 /* If you're replacing the target, post a deletion for it */
4518 if (tvp) {
4519 lock_vnode_and_post(tvp, NOTE_DELETE);
4520 }
4521
4522 lock_vnode_and_post(fvp, NOTE_RENAME);
4523
4524 return 0;
4525 }
4526
4527 #if 0
4528 /*
4529 *#
4530 *#% renamex fdvp U U U
4531 *#% renamex fvp U U U
4532 *#% renamex tdvp L U U
4533 *#% renamex tvp X U U
4534 *#
4535 */
4536 struct vnop_renamex_args {
4537 struct vnodeop_desc *a_desc;
4538 vnode_t a_fdvp;
4539 vnode_t a_fvp;
4540 struct componentname *a_fcnp;
4541 vnode_t a_tdvp;
4542 vnode_t a_tvp;
4543 struct componentname *a_tcnp;
4544 vfs_rename_flags_t a_flags;
4545 vfs_context_t a_context;
4546 };
4547 #endif /* 0*/
4548 errno_t
4549 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4550 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4551 vfs_rename_flags_t flags, vfs_context_t ctx)
4552 {
4553 int _err = 0;
4554 struct vnop_renamex_args a;
4555
4556 a.a_desc = &vnop_renamex_desc;
4557 a.a_fdvp = fdvp;
4558 a.a_fvp = fvp;
4559 a.a_fcnp = fcnp;
4560 a.a_tdvp = tdvp;
4561 a.a_tvp = tvp;
4562 a.a_tcnp = tcnp;
4563 a.a_flags = flags;
4564 a.a_context = ctx;
4565
4566 /* do the rename of the main file. */
4567 _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4568 DTRACE_FSINFO(renamex, vnode_t, fdvp);
4569
4570 if (_err) {
4571 return _err;
4572 }
4573
4574 return post_rename(fdvp, fvp, tdvp, tvp);
4575 }
4576
4577
4578 int
4579 VNOP_COMPOUND_RENAME(
4580 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4581 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4582 uint32_t flags, vfs_context_t ctx)
4583 {
4584 int _err = 0;
4585 int events;
4586 struct vnop_compound_rename_args a;
4587 int no_fvp, no_tvp;
4588
4589 no_fvp = (*fvpp) == NULLVP;
4590 no_tvp = (*tvpp) == NULLVP;
4591
4592 a.a_desc = &vnop_compound_rename_desc;
4593
4594 a.a_fdvp = fdvp;
4595 a.a_fvpp = fvpp;
4596 a.a_fcnp = fcnp;
4597 a.a_fvap = fvap;
4598
4599 a.a_tdvp = tdvp;
4600 a.a_tvpp = tvpp;
4601 a.a_tcnp = tcnp;
4602 a.a_tvap = tvap;
4603
4604 a.a_flags = flags;
4605 a.a_context = ctx;
4606 a.a_rename_authorizer = vn_authorize_rename;
4607 a.a_reserved = NULL;
4608
4609 /* do the rename of the main file. */
4610 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4611 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4612
4613 if (_err == 0) {
4614 if (*tvpp && *tvpp != *fvpp) {
4615 vnode_setneedinactive(*tvpp);
4616 }
4617 }
4618
4619 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4620 if (_err == 0 && *fvpp != *tvpp) {
4621 if (!*fvpp) {
4622 panic("No fvpp after compound rename?");
4623 }
4624
4625 events = NOTE_WRITE;
4626 if (vnode_isdir(*fvpp)) {
4627 /* Link count on dir changed only if we are moving a dir and...
4628 * --Moved to new dir, not overwriting there
4629 * --Kept in same dir and DID overwrite
4630 */
4631 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4632 events |= NOTE_LINK;
4633 }
4634 }
4635
4636 lock_vnode_and_post(fdvp, events);
4637 if (fdvp != tdvp) {
4638 lock_vnode_and_post(tdvp, events);
4639 }
4640
4641 /* If you're replacing the target, post a deletion for it */
4642 if (*tvpp) {
4643 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4644 }
4645
4646 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4647 }
4648
4649 if (no_fvp) {
4650 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4651 }
4652 if (no_tvp && *tvpp != NULLVP) {
4653 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4654 }
4655
4656 if (_err && _err != EKEEPLOOKING) {
4657 if (*fvpp) {
4658 vnode_put(*fvpp);
4659 *fvpp = NULLVP;
4660 }
4661 if (*tvpp) {
4662 vnode_put(*tvpp);
4663 *tvpp = NULLVP;
4664 }
4665 }
4666
4667 return _err;
4668 }
4669
4670 int
4671 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4672 struct vnode_attr *vap, vfs_context_t ctx)
4673 {
4674 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4675 panic("Non-CREATE nameiop in vn_mkdir()?");
4676 }
4677
4678 if (vnode_compound_mkdir_available(dvp)) {
4679 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4680 } else {
4681 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4682 }
4683 }
4684
4685 #if 0
4686 /*
4687 *#
4688 *#% mkdir dvp L U U
4689 *#% mkdir vpp - L -
4690 *#
4691 */
4692 struct vnop_mkdir_args {
4693 struct vnodeop_desc *a_desc;
4694 vnode_t a_dvp;
4695 vnode_t *a_vpp;
4696 struct componentname *a_cnp;
4697 struct vnode_attr *a_vap;
4698 vfs_context_t a_context;
4699 };
4700 #endif /* 0*/
4701 errno_t
4702 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4703 struct vnode_attr *vap, vfs_context_t ctx)
4704 {
4705 int _err;
4706 struct vnop_mkdir_args a;
4707
4708 a.a_desc = &vnop_mkdir_desc;
4709 a.a_dvp = dvp;
4710 a.a_vpp = vpp;
4711 a.a_cnp = cnp;
4712 a.a_vap = vap;
4713 a.a_context = ctx;
4714
4715 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4716 if (_err == 0 && *vpp) {
4717 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4718 }
4719 #if CONFIG_APPLEDOUBLE
4720 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4721 /*
4722 * Remove stale Apple Double file (if any).
4723 */
4724 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4725 }
4726 #endif /* CONFIG_APPLEDOUBLE */
4727
4728 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4729
4730 return _err;
4731 }
4732
4733 int
4734 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4735 struct vnode_attr *vap, vfs_context_t ctx)
4736 {
4737 int _err;
4738 struct vnop_compound_mkdir_args a;
4739
4740 a.a_desc = &vnop_compound_mkdir_desc;
4741 a.a_dvp = dvp;
4742 a.a_vpp = vpp;
4743 a.a_cnp = &ndp->ni_cnd;
4744 a.a_vap = vap;
4745 a.a_flags = 0;
4746 a.a_context = ctx;
4747 #if 0
4748 a.a_mkdir_authorizer = vn_authorize_mkdir;
4749 #endif /* 0 */
4750 a.a_reserved = NULL;
4751
4752 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4753 if (_err == 0 && *vpp) {
4754 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4755 }
4756 #if CONFIG_APPLEDOUBLE
4757 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4758 /*
4759 * Remove stale Apple Double file (if any).
4760 */
4761 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4762 }
4763 #endif /* CONFIG_APPLEDOUBLE */
4764
4765 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4766
4767 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4768 if (*vpp && _err && _err != EKEEPLOOKING) {
4769 vnode_put(*vpp);
4770 *vpp = NULLVP;
4771 }
4772
4773 return _err;
4774 }
4775
4776 int
4777 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4778 {
4779 if (vnode_compound_rmdir_available(dvp)) {
4780 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4781 } else {
4782 if (*vpp == NULLVP) {
4783 panic("NULL vp, but not a compound VNOP?");
4784 }
4785 if (vap != NULL) {
4786 panic("Non-NULL vap, but not a compound VNOP?");
4787 }
4788 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4789 }
4790 }
4791
4792 #if 0
4793 /*
4794 *#
4795 *#% rmdir dvp L U U
4796 *#% rmdir vp L U U
4797 *#
4798 */
4799 struct vnop_rmdir_args {
4800 struct vnodeop_desc *a_desc;
4801 vnode_t a_dvp;
4802 vnode_t a_vp;
4803 struct componentname *a_cnp;
4804 vfs_context_t a_context;
4805 };
4806
4807 #endif /* 0*/
4808 errno_t
4809 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4810 {
4811 int _err;
4812 struct vnop_rmdir_args a;
4813
4814 a.a_desc = &vnop_rmdir_desc;
4815 a.a_dvp = dvp;
4816 a.a_vp = vp;
4817 a.a_cnp = cnp;
4818 a.a_context = ctx;
4819
4820 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4821 DTRACE_FSINFO(rmdir, vnode_t, vp);
4822
4823 if (_err == 0) {
4824 vnode_setneedinactive(vp);
4825 #if CONFIG_APPLEDOUBLE
4826 if (!(NATIVE_XATTR(dvp))) {
4827 /*
4828 * Remove any associated extended attribute file (._ AppleDouble file).
4829 */
4830 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4831 }
4832 #endif
4833 }
4834
4835 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4836 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4837 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4838
4839 return _err;
4840 }
4841
4842 int
4843 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4844 struct vnode_attr *vap, vfs_context_t ctx)
4845 {
4846 int _err;
4847 struct vnop_compound_rmdir_args a;
4848 int no_vp;
4849
4850 a.a_desc = &vnop_mkdir_desc;
4851 a.a_dvp = dvp;
4852 a.a_vpp = vpp;
4853 a.a_cnp = &ndp->ni_cnd;
4854 a.a_vap = vap;
4855 a.a_flags = 0;
4856 a.a_context = ctx;
4857 a.a_rmdir_authorizer = vn_authorize_rmdir;
4858 a.a_reserved = NULL;
4859
4860 no_vp = (*vpp == NULLVP);
4861
4862 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4863 if (_err == 0 && *vpp) {
4864 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4865 }
4866 #if CONFIG_APPLEDOUBLE
4867 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4868 /*
4869 * Remove stale Apple Double file (if any).
4870 */
4871 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4872 }
4873 #endif
4874
4875 if (*vpp) {
4876 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4877 }
4878 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4879
4880 if (no_vp) {
4881 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4882
4883 #if 0 /* Removing orphaned ._ files requires a vp.... */
4884 if (*vpp && _err && _err != EKEEPLOOKING) {
4885 vnode_put(*vpp);
4886 *vpp = NULLVP;
4887 }
4888 #endif /* 0 */
4889 }
4890
4891 return _err;
4892 }
4893
4894 #if CONFIG_APPLEDOUBLE
4895 /*
4896 * Remove a ._ AppleDouble file
4897 */
4898 #define AD_STALE_SECS (180)
4899 static void
4900 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4901 {
4902 vnode_t xvp;
4903 struct nameidata nd;
4904 char smallname[64];
4905 char *filename = NULL;
4906 size_t len;
4907
4908 if ((basename == NULL) || (basename[0] == '\0') ||
4909 (basename[0] == '.' && basename[1] == '_')) {
4910 return;
4911 }
4912 filename = &smallname[0];
4913 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4914 if (len >= sizeof(smallname)) {
4915 len++; /* snprintf result doesn't include '\0' */
4916 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4917 len = snprintf(filename, len, "._%s", basename);
4918 }
4919 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4920 CAST_USER_ADDR_T(filename), ctx);
4921 nd.ni_dvp = dvp;
4922 if (namei(&nd) != 0) {
4923 goto out2;
4924 }
4925
4926 xvp = nd.ni_vp;
4927 nameidone(&nd);
4928 if (xvp->v_type != VREG) {
4929 goto out1;
4930 }
4931
4932 /*
4933 * When creating a new object and a "._" file already
4934 * exists, check to see if its a stale "._" file.
4935 *
4936 */
4937 if (!force) {
4938 struct vnode_attr va;
4939
4940 VATTR_INIT(&va);
4941 VATTR_WANTED(&va, va_data_size);
4942 VATTR_WANTED(&va, va_modify_time);
4943 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4944 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4945 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4946 va.va_data_size != 0) {
4947 struct timeval tv;
4948
4949 microtime(&tv);
4950 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4951 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4952 force = 1; /* must be stale */
4953 }
4954 }
4955 }
4956 if (force) {
4957 int error;
4958
4959 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
4960 if (error == 0) {
4961 vnode_setneedinactive(xvp);
4962 }
4963
4964 post_event_if_success(xvp, error, NOTE_DELETE);
4965 post_event_if_success(dvp, error, NOTE_WRITE);
4966 }
4967
4968 out1:
4969 vnode_put(dvp);
4970 vnode_put(xvp);
4971 out2:
4972 if (filename && filename != &smallname[0]) {
4973 FREE(filename, M_TEMP);
4974 }
4975 }
4976
4977 /*
4978 * Shadow uid/gid/mod to a ._ AppleDouble file
4979 */
4980 static void
4981 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
4982 vfs_context_t ctx)
4983 {
4984 vnode_t xvp;
4985 struct nameidata nd;
4986 char smallname[64];
4987 char *filename = NULL;
4988 size_t len;
4989
4990 if ((dvp == NULLVP) ||
4991 (basename == NULL) || (basename[0] == '\0') ||
4992 (basename[0] == '.' && basename[1] == '_')) {
4993 return;
4994 }
4995 filename = &smallname[0];
4996 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4997 if (len >= sizeof(smallname)) {
4998 len++; /* snprintf result doesn't include '\0' */
4999 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
5000 len = snprintf(filename, len, "._%s", basename);
5001 }
5002 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
5003 CAST_USER_ADDR_T(filename), ctx);
5004 nd.ni_dvp = dvp;
5005 if (namei(&nd) != 0) {
5006 goto out2;
5007 }
5008
5009 xvp = nd.ni_vp;
5010 nameidone(&nd);
5011
5012 if (xvp->v_type == VREG) {
5013 struct vnop_setattr_args a;
5014
5015 a.a_desc = &vnop_setattr_desc;
5016 a.a_vp = xvp;
5017 a.a_vap = vap;
5018 a.a_context = ctx;
5019
5020 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
5021 }
5022
5023 vnode_put(xvp);
5024 out2:
5025 if (filename && filename != &smallname[0]) {
5026 FREE(filename, M_TEMP);
5027 }
5028 }
5029 #endif /* CONFIG_APPLEDOUBLE */
5030
5031 #if 0
5032 /*
5033 *#
5034 *#% symlink dvp L U U
5035 *#% symlink vpp - U -
5036 *#
5037 */
5038 struct vnop_symlink_args {
5039 struct vnodeop_desc *a_desc;
5040 vnode_t a_dvp;
5041 vnode_t *a_vpp;
5042 struct componentname *a_cnp;
5043 struct vnode_attr *a_vap;
5044 char *a_target;
5045 vfs_context_t a_context;
5046 };
5047
5048 #endif /* 0*/
5049 errno_t
5050 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5051 struct vnode_attr *vap, char *target, vfs_context_t ctx)
5052 {
5053 int _err;
5054 struct vnop_symlink_args a;
5055
5056 a.a_desc = &vnop_symlink_desc;
5057 a.a_dvp = dvp;
5058 a.a_vpp = vpp;
5059 a.a_cnp = cnp;
5060 a.a_vap = vap;
5061 a.a_target = target;
5062 a.a_context = ctx;
5063
5064 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5065 DTRACE_FSINFO(symlink, vnode_t, dvp);
5066 #if CONFIG_APPLEDOUBLE
5067 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5068 /*
5069 * Remove stale Apple Double file (if any). Posts its own knotes
5070 */
5071 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5072 }
5073 #endif /* CONFIG_APPLEDOUBLE */
5074
5075 post_event_if_success(dvp, _err, NOTE_WRITE);
5076
5077 return _err;
5078 }
5079
5080 #if 0
5081 /*
5082 *#
5083 *#% readdir vp L L L
5084 *#
5085 */
5086 struct vnop_readdir_args {
5087 struct vnodeop_desc *a_desc;
5088 vnode_t a_vp;
5089 struct uio *a_uio;
5090 int a_flags;
5091 int *a_eofflag;
5092 int *a_numdirent;
5093 vfs_context_t a_context;
5094 };
5095
5096 #endif /* 0*/
5097 errno_t
5098 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
5099 int *numdirent, vfs_context_t ctx)
5100 {
5101 int _err;
5102 struct vnop_readdir_args a;
5103 #if CONFIG_DTRACE
5104 user_ssize_t resid = uio_resid(uio);
5105 #endif
5106
5107 a.a_desc = &vnop_readdir_desc;
5108 a.a_vp = vp;
5109 a.a_uio = uio;
5110 a.a_flags = flags;
5111 a.a_eofflag = eofflag;
5112 a.a_numdirent = numdirent;
5113 a.a_context = ctx;
5114
5115 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5116 DTRACE_FSINFO_IO(readdir,
5117 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5118
5119 return _err;
5120 }
5121
5122 #if 0
5123 /*
5124 *#
5125 *#% readdirattr vp L L L
5126 *#
5127 */
5128 struct vnop_readdirattr_args {
5129 struct vnodeop_desc *a_desc;
5130 vnode_t a_vp;
5131 struct attrlist *a_alist;
5132 struct uio *a_uio;
5133 uint32_t a_maxcount;
5134 uint32_t a_options;
5135 uint32_t *a_newstate;
5136 int *a_eofflag;
5137 uint32_t *a_actualcount;
5138 vfs_context_t a_context;
5139 };
5140
5141 #endif /* 0*/
5142 errno_t
5143 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5144 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5145 {
5146 int _err;
5147 struct vnop_readdirattr_args a;
5148 #if CONFIG_DTRACE
5149 user_ssize_t resid = uio_resid(uio);
5150 #endif
5151
5152 a.a_desc = &vnop_readdirattr_desc;
5153 a.a_vp = vp;
5154 a.a_alist = alist;
5155 a.a_uio = uio;
5156 a.a_maxcount = maxcount;
5157 a.a_options = options;
5158 a.a_newstate = newstate;
5159 a.a_eofflag = eofflag;
5160 a.a_actualcount = actualcount;
5161 a.a_context = ctx;
5162
5163 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5164 DTRACE_FSINFO_IO(readdirattr,
5165 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5166
5167 return _err;
5168 }
5169
5170 #if 0
5171 struct vnop_getttrlistbulk_args {
5172 struct vnodeop_desc *a_desc;
5173 vnode_t a_vp;
5174 struct attrlist *a_alist;
5175 struct vnode_attr *a_vap;
5176 struct uio *a_uio;
5177 void *a_private
5178 uint64_t a_options;
5179 int *a_eofflag;
5180 uint32_t *a_actualcount;
5181 vfs_context_t a_context;
5182 };
5183 #endif /* 0*/
5184 errno_t
5185 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
5186 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
5187 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
5188 {
5189 int _err;
5190 struct vnop_getattrlistbulk_args a;
5191 #if CONFIG_DTRACE
5192 user_ssize_t resid = uio_resid(uio);
5193 #endif
5194
5195 a.a_desc = &vnop_getattrlistbulk_desc;
5196 a.a_vp = vp;
5197 a.a_alist = alist;
5198 a.a_vap = vap;
5199 a.a_uio = uio;
5200 a.a_private = private;
5201 a.a_options = options;
5202 a.a_eofflag = eofflag;
5203 a.a_actualcount = actualcount;
5204 a.a_context = ctx;
5205
5206 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
5207 DTRACE_FSINFO_IO(getattrlistbulk,
5208 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5209
5210 return _err;
5211 }
5212
5213 #if 0
5214 /*
5215 *#
5216 *#% readlink vp L L L
5217 *#
5218 */
5219 struct vnop_readlink_args {
5220 struct vnodeop_desc *a_desc;
5221 vnode_t a_vp;
5222 struct uio *a_uio;
5223 vfs_context_t a_context;
5224 };
5225 #endif /* 0 */
5226
5227 /*
5228 * Returns: 0 Success
5229 * lock_fsnode:ENOENT No such file or directory [only for VFS
5230 * that is not thread safe & vnode is
5231 * currently being/has been terminated]
5232 * <vfs_readlink>:EINVAL
5233 * <vfs_readlink>:???
5234 *
5235 * Note: The return codes from the underlying VFS's readlink routine
5236 * can't be fully enumerated here, since third party VFS authors
5237 * may not limit their error returns to the ones documented here,
5238 * even though this may result in some programs functioning
5239 * incorrectly.
5240 *
5241 * The return codes documented above are those which may currently
5242 * be returned by HFS from hfs_vnop_readlink, not including
5243 * additional error code which may be propagated from underlying
5244 * routines.
5245 */
5246 errno_t
5247 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5248 {
5249 int _err;
5250 struct vnop_readlink_args a;
5251 #if CONFIG_DTRACE
5252 user_ssize_t resid = uio_resid(uio);
5253 #endif
5254 a.a_desc = &vnop_readlink_desc;
5255 a.a_vp = vp;
5256 a.a_uio = uio;
5257 a.a_context = ctx;
5258
5259 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5260 DTRACE_FSINFO_IO(readlink,
5261 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5262
5263 return _err;
5264 }
5265
5266 #if 0
5267 /*
5268 *#
5269 *#% inactive vp L U U
5270 *#
5271 */
5272 struct vnop_inactive_args {
5273 struct vnodeop_desc *a_desc;
5274 vnode_t a_vp;
5275 vfs_context_t a_context;
5276 };
5277 #endif /* 0*/
5278 errno_t
5279 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5280 {
5281 int _err;
5282 struct vnop_inactive_args a;
5283
5284 a.a_desc = &vnop_inactive_desc;
5285 a.a_vp = vp;
5286 a.a_context = ctx;
5287
5288 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5289 DTRACE_FSINFO(inactive, vnode_t, vp);
5290
5291 #if NAMEDSTREAMS
5292 /* For file systems that do not support namedstream natively, mark
5293 * the shadow stream file vnode to be recycled as soon as the last
5294 * reference goes away. To avoid re-entering reclaim code, do not
5295 * call recycle on terminating namedstream vnodes.
5296 */
5297 if (vnode_isnamedstream(vp) &&
5298 (vp->v_parent != NULLVP) &&
5299 vnode_isshadow(vp) &&
5300 ((vp->v_lflag & VL_TERMINATE) == 0)) {
5301 vnode_recycle(vp);
5302 }
5303 #endif
5304
5305 return _err;
5306 }
5307
5308
5309 #if 0
5310 /*
5311 *#
5312 *#% reclaim vp U U U
5313 *#
5314 */
5315 struct vnop_reclaim_args {
5316 struct vnodeop_desc *a_desc;
5317 vnode_t a_vp;
5318 vfs_context_t a_context;
5319 };
5320 #endif /* 0*/
5321 errno_t
5322 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5323 {
5324 int _err;
5325 struct vnop_reclaim_args a;
5326
5327 a.a_desc = &vnop_reclaim_desc;
5328 a.a_vp = vp;
5329 a.a_context = ctx;
5330
5331 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5332 DTRACE_FSINFO(reclaim, vnode_t, vp);
5333
5334 return _err;
5335 }
5336
5337
5338 /*
5339 * Returns: 0 Success
5340 * lock_fsnode:ENOENT No such file or directory [only for VFS
5341 * that is not thread safe & vnode is
5342 * currently being/has been terminated]
5343 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5344 */
5345 #if 0
5346 /*
5347 *#
5348 *#% pathconf vp L L L
5349 *#
5350 */
5351 struct vnop_pathconf_args {
5352 struct vnodeop_desc *a_desc;
5353 vnode_t a_vp;
5354 int a_name;
5355 int32_t *a_retval;
5356 vfs_context_t a_context;
5357 };
5358 #endif /* 0*/
5359 errno_t
5360 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5361 {
5362 int _err;
5363 struct vnop_pathconf_args a;
5364
5365 a.a_desc = &vnop_pathconf_desc;
5366 a.a_vp = vp;
5367 a.a_name = name;
5368 a.a_retval = retval;
5369 a.a_context = ctx;
5370
5371 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5372 DTRACE_FSINFO(pathconf, vnode_t, vp);
5373
5374 return _err;
5375 }
5376
5377 /*
5378 * Returns: 0 Success
5379 * err_advlock:ENOTSUP
5380 * lf_advlock:???
5381 * <vnop_advlock_desc>:???
5382 *
5383 * Notes: VFS implementations of advisory locking using calls through
5384 * <vnop_advlock_desc> because lock enforcement does not occur
5385 * locally should try to limit themselves to the return codes
5386 * documented above for lf_advlock and err_advlock.
5387 */
5388 #if 0
5389 /*
5390 *#
5391 *#% advlock vp U U U
5392 *#
5393 */
5394 struct vnop_advlock_args {
5395 struct vnodeop_desc *a_desc;
5396 vnode_t a_vp;
5397 caddr_t a_id;
5398 int a_op;
5399 struct flock *a_fl;
5400 int a_flags;
5401 vfs_context_t a_context;
5402 };
5403 #endif /* 0*/
5404 errno_t
5405 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5406 {
5407 int _err;
5408 struct vnop_advlock_args a;
5409
5410 a.a_desc = &vnop_advlock_desc;
5411 a.a_vp = vp;
5412 a.a_id = id;
5413 a.a_op = op;
5414 a.a_fl = fl;
5415 a.a_flags = flags;
5416 a.a_context = ctx;
5417 a.a_timeout = timeout;
5418
5419 /* Disallow advisory locking on non-seekable vnodes */
5420 if (vnode_isfifo(vp)) {
5421 _err = err_advlock(&a);
5422 } else {
5423 if ((vp->v_flag & VLOCKLOCAL)) {
5424 /* Advisory locking done at this layer */
5425 _err = lf_advlock(&a);
5426 } else if (flags & F_OFD_LOCK) {
5427 /* Non-local locking doesn't work for OFD locks */
5428 _err = err_advlock(&a);
5429 } else {
5430 /* Advisory locking done by underlying filesystem */
5431 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5432 }
5433 DTRACE_FSINFO(advlock, vnode_t, vp);
5434 if (op == F_UNLCK && flags == F_FLOCK) {
5435 post_event_if_success(vp, _err, NOTE_FUNLOCK);
5436 }
5437 }
5438
5439 return _err;
5440 }
5441
5442
5443
5444 #if 0
5445 /*
5446 *#
5447 *#% allocate vp L L L
5448 *#
5449 */
5450 struct vnop_allocate_args {
5451 struct vnodeop_desc *a_desc;
5452 vnode_t a_vp;
5453 off_t a_length;
5454 u_int32_t a_flags;
5455 off_t *a_bytesallocated;
5456 off_t a_offset;
5457 vfs_context_t a_context;
5458 };
5459
5460 #endif /* 0*/
5461 errno_t
5462 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5463 {
5464 int _err;
5465 struct vnop_allocate_args a;
5466
5467 a.a_desc = &vnop_allocate_desc;
5468 a.a_vp = vp;
5469 a.a_length = length;
5470 a.a_flags = flags;
5471 a.a_bytesallocated = bytesallocated;
5472 a.a_offset = offset;
5473 a.a_context = ctx;
5474
5475 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5476 DTRACE_FSINFO(allocate, vnode_t, vp);
5477 #if CONFIG_FSE
5478 if (_err == 0) {
5479 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5480 }
5481 #endif
5482
5483 return _err;
5484 }
5485
5486 #if 0
5487 /*
5488 *#
5489 *#% pagein vp = = =
5490 *#
5491 */
5492 struct vnop_pagein_args {
5493 struct vnodeop_desc *a_desc;
5494 vnode_t a_vp;
5495 upl_t a_pl;
5496 upl_offset_t a_pl_offset;
5497 off_t a_f_offset;
5498 size_t a_size;
5499 int a_flags;
5500 vfs_context_t a_context;
5501 };
5502 #endif /* 0*/
5503 errno_t
5504 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5505 {
5506 int _err;
5507 struct vnop_pagein_args a;
5508
5509 a.a_desc = &vnop_pagein_desc;
5510 a.a_vp = vp;
5511 a.a_pl = pl;
5512 a.a_pl_offset = pl_offset;
5513 a.a_f_offset = f_offset;
5514 a.a_size = size;
5515 a.a_flags = flags;
5516 a.a_context = ctx;
5517
5518 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5519 DTRACE_FSINFO(pagein, vnode_t, vp);
5520
5521 return _err;
5522 }
5523
5524 #if 0
5525 /*
5526 *#
5527 *#% pageout vp = = =
5528 *#
5529 */
5530 struct vnop_pageout_args {
5531 struct vnodeop_desc *a_desc;
5532 vnode_t a_vp;
5533 upl_t a_pl;
5534 upl_offset_t a_pl_offset;
5535 off_t a_f_offset;
5536 size_t a_size;
5537 int a_flags;
5538 vfs_context_t a_context;
5539 };
5540
5541 #endif /* 0*/
5542 errno_t
5543 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5544 {
5545 int _err;
5546 struct vnop_pageout_args a;
5547
5548 a.a_desc = &vnop_pageout_desc;
5549 a.a_vp = vp;
5550 a.a_pl = pl;
5551 a.a_pl_offset = pl_offset;
5552 a.a_f_offset = f_offset;
5553 a.a_size = size;
5554 a.a_flags = flags;
5555 a.a_context = ctx;
5556
5557 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5558 DTRACE_FSINFO(pageout, vnode_t, vp);
5559
5560 post_event_if_success(vp, _err, NOTE_WRITE);
5561
5562 return _err;
5563 }
5564
5565 int
5566 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5567 {
5568 if (vnode_compound_remove_available(dvp)) {
5569 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5570 } else {
5571 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5572 }
5573 }
5574
5575 #if CONFIG_SEARCHFS
5576
5577 #if 0
5578 /*
5579 *#
5580 *#% searchfs vp L L L
5581 *#
5582 */
5583 struct vnop_searchfs_args {
5584 struct vnodeop_desc *a_desc;
5585 vnode_t a_vp;
5586 void *a_searchparams1;
5587 void *a_searchparams2;
5588 struct attrlist *a_searchattrs;
5589 uint32_t a_maxmatches;
5590 struct timeval *a_timelimit;
5591 struct attrlist *a_returnattrs;
5592 uint32_t *a_nummatches;
5593 uint32_t a_scriptcode;
5594 uint32_t a_options;
5595 struct uio *a_uio;
5596 struct searchstate *a_searchstate;
5597 vfs_context_t a_context;
5598 };
5599
5600 #endif /* 0*/
5601 errno_t
5602 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5603 {
5604 int _err;
5605 struct vnop_searchfs_args a;
5606
5607 a.a_desc = &vnop_searchfs_desc;
5608 a.a_vp = vp;
5609 a.a_searchparams1 = searchparams1;
5610 a.a_searchparams2 = searchparams2;
5611 a.a_searchattrs = searchattrs;
5612 a.a_maxmatches = maxmatches;
5613 a.a_timelimit = timelimit;
5614 a.a_returnattrs = returnattrs;
5615 a.a_nummatches = nummatches;
5616 a.a_scriptcode = scriptcode;
5617 a.a_options = options;
5618 a.a_uio = uio;
5619 a.a_searchstate = searchstate;
5620 a.a_context = ctx;
5621
5622 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5623 DTRACE_FSINFO(searchfs, vnode_t, vp);
5624
5625 return _err;
5626 }
5627 #endif /* CONFIG_SEARCHFS */
5628
5629 #if 0
5630 /*
5631 *#
5632 *#% copyfile fvp U U U
5633 *#% copyfile tdvp L U U
5634 *#% copyfile tvp X U U
5635 *#
5636 */
5637 struct vnop_copyfile_args {
5638 struct vnodeop_desc *a_desc;
5639 vnode_t a_fvp;
5640 vnode_t a_tdvp;
5641 vnode_t a_tvp;
5642 struct componentname *a_tcnp;
5643 int a_mode;
5644 int a_flags;
5645 vfs_context_t a_context;
5646 };
5647 #endif /* 0*/
5648 errno_t
5649 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5650 int mode, int flags, vfs_context_t ctx)
5651 {
5652 int _err;
5653 struct vnop_copyfile_args a;
5654 a.a_desc = &vnop_copyfile_desc;
5655 a.a_fvp = fvp;
5656 a.a_tdvp = tdvp;
5657 a.a_tvp = tvp;
5658 a.a_tcnp = tcnp;
5659 a.a_mode = mode;
5660 a.a_flags = flags;
5661 a.a_context = ctx;
5662 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5663 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5664 return _err;
5665 }
5666
5667 #if 0
5668 struct vnop_clonefile_args {
5669 struct vnodeop_desc *a_desc;
5670 vnode_t a_fvp;
5671 vnode_t a_dvp;
5672 vnode_t *a_vpp;
5673 struct componentname *a_cnp;
5674 struct vnode_attr *a_vap;
5675 uint32_t a_flags;
5676 vfs_context_t a_context;
5677 int (*a_dir_clone_authorizer)( /* Authorization callback */
5678 struct vnode_attr *vap, /* attribute to be authorized */
5679 kauth_action_t action, /* action for which attribute is to be authorized */
5680 struct vnode_attr *dvap, /* target directory attributes */
5681 vnode_t sdvp, /* source directory vnode pointer (optional) */
5682 mount_t mp, /* mount point of filesystem */
5683 dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */
5684 uint32_t flags; /* value passed in a_flags to the VNOP */
5685 vfs_context_t ctx, /* As passed to VNOP */
5686 void *reserved); /* Always NULL */
5687 void *a_reserved; /* Currently unused */
5688 };
5689 #endif /* 0 */
5690
5691 errno_t
5692 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
5693 struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
5694 vfs_context_t ctx)
5695 {
5696 int _err;
5697 struct vnop_clonefile_args a;
5698 a.a_desc = &vnop_clonefile_desc;
5699 a.a_fvp = fvp;
5700 a.a_dvp = dvp;
5701 a.a_vpp = vpp;
5702 a.a_cnp = cnp;
5703 a.a_vap = vap;
5704 a.a_flags = flags;
5705 a.a_context = ctx;
5706
5707 if (vnode_vtype(fvp) == VDIR) {
5708 a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
5709 } else {
5710 a.a_dir_clone_authorizer = NULL;
5711 }
5712
5713 _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
5714
5715 if (_err == 0 && *vpp) {
5716 DTRACE_FSINFO(clonefile, vnode_t, *vpp);
5717 if (kdebug_enable) {
5718 kdebug_lookup(*vpp, cnp);
5719 }
5720 }
5721
5722 post_event_if_success(dvp, _err, NOTE_WRITE);
5723
5724 return _err;
5725 }
5726
5727 errno_t
5728 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5729 {
5730 struct vnop_getxattr_args a;
5731 int error;
5732
5733 a.a_desc = &vnop_getxattr_desc;
5734 a.a_vp = vp;
5735 a.a_name = name;
5736 a.a_uio = uio;
5737 a.a_size = size;
5738 a.a_options = options;
5739 a.a_context = ctx;
5740
5741 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5742 DTRACE_FSINFO(getxattr, vnode_t, vp);
5743
5744 return error;
5745 }
5746
5747 errno_t
5748 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5749 {
5750 struct vnop_setxattr_args a;
5751 int error;
5752
5753 a.a_desc = &vnop_setxattr_desc;
5754 a.a_vp = vp;
5755 a.a_name = name;
5756 a.a_uio = uio;
5757 a.a_options = options;
5758 a.a_context = ctx;
5759
5760 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5761 DTRACE_FSINFO(setxattr, vnode_t, vp);
5762
5763 if (error == 0) {
5764 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5765 }
5766
5767 post_event_if_success(vp, error, NOTE_ATTRIB);
5768
5769 return error;
5770 }
5771
5772 errno_t
5773 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5774 {
5775 struct vnop_removexattr_args a;
5776 int error;
5777
5778 a.a_desc = &vnop_removexattr_desc;
5779 a.a_vp = vp;
5780 a.a_name = name;
5781 a.a_options = options;
5782 a.a_context = ctx;
5783
5784 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5785 DTRACE_FSINFO(removexattr, vnode_t, vp);
5786
5787 post_event_if_success(vp, error, NOTE_ATTRIB);
5788
5789 return error;
5790 }
5791
5792 errno_t
5793 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5794 {
5795 struct vnop_listxattr_args a;
5796 int error;
5797
5798 a.a_desc = &vnop_listxattr_desc;
5799 a.a_vp = vp;
5800 a.a_uio = uio;
5801 a.a_size = size;
5802 a.a_options = options;
5803 a.a_context = ctx;
5804
5805 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5806 DTRACE_FSINFO(listxattr, vnode_t, vp);
5807
5808 return error;
5809 }
5810
5811
5812 #if 0
5813 /*
5814 *#
5815 *#% blktooff vp = = =
5816 *#
5817 */
5818 struct vnop_blktooff_args {
5819 struct vnodeop_desc *a_desc;
5820 vnode_t a_vp;
5821 daddr64_t a_lblkno;
5822 off_t *a_offset;
5823 };
5824 #endif /* 0*/
5825 errno_t
5826 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5827 {
5828 int _err;
5829 struct vnop_blktooff_args a;
5830
5831 a.a_desc = &vnop_blktooff_desc;
5832 a.a_vp = vp;
5833 a.a_lblkno = lblkno;
5834 a.a_offset = offset;
5835
5836 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5837 DTRACE_FSINFO(blktooff, vnode_t, vp);
5838
5839 return _err;
5840 }
5841
5842 #if 0
5843 /*
5844 *#
5845 *#% offtoblk vp = = =
5846 *#
5847 */
5848 struct vnop_offtoblk_args {
5849 struct vnodeop_desc *a_desc;
5850 vnode_t a_vp;
5851 off_t a_offset;
5852 daddr64_t *a_lblkno;
5853 };
5854 #endif /* 0*/
5855 errno_t
5856 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5857 {
5858 int _err;
5859 struct vnop_offtoblk_args a;
5860
5861 a.a_desc = &vnop_offtoblk_desc;
5862 a.a_vp = vp;
5863 a.a_offset = offset;
5864 a.a_lblkno = lblkno;
5865
5866 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5867 DTRACE_FSINFO(offtoblk, vnode_t, vp);
5868
5869 return _err;
5870 }
5871
5872 #if 0
5873 /*
5874 *#
5875 *#% blockmap vp L L L
5876 *#
5877 */
5878 struct vnop_blockmap_args {
5879 struct vnodeop_desc *a_desc;
5880 vnode_t a_vp;
5881 off_t a_foffset;
5882 size_t a_size;
5883 daddr64_t *a_bpn;
5884 size_t *a_run;
5885 void *a_poff;
5886 int a_flags;
5887 vfs_context_t a_context;
5888 };
5889 #endif /* 0*/
5890 errno_t
5891 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
5892 {
5893 int _err;
5894 struct vnop_blockmap_args a;
5895 size_t localrun = 0;
5896
5897 if (ctx == NULL) {
5898 ctx = vfs_context_current();
5899 }
5900 a.a_desc = &vnop_blockmap_desc;
5901 a.a_vp = vp;
5902 a.a_foffset = foffset;
5903 a.a_size = size;
5904 a.a_bpn = bpn;
5905 a.a_run = &localrun;
5906 a.a_poff = poff;
5907 a.a_flags = flags;
5908 a.a_context = ctx;
5909
5910 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
5911 DTRACE_FSINFO(blockmap, vnode_t, vp);
5912
5913 /*
5914 * We used a local variable to request information from the underlying
5915 * filesystem about the length of the I/O run in question. If
5916 * we get malformed output from the filesystem, we cap it to the length
5917 * requested, at most. Update 'run' on the way out.
5918 */
5919 if (_err == 0) {
5920 if (localrun > size) {
5921 localrun = size;
5922 }
5923
5924 if (run) {
5925 *run = localrun;
5926 }
5927 }
5928
5929 return _err;
5930 }
5931
5932 #if 0
5933 struct vnop_strategy_args {
5934 struct vnodeop_desc *a_desc;
5935 struct buf *a_bp;
5936 };
5937
5938 #endif /* 0*/
5939 errno_t
5940 VNOP_STRATEGY(struct buf *bp)
5941 {
5942 int _err;
5943 struct vnop_strategy_args a;
5944 vnode_t vp = buf_vnode(bp);
5945 a.a_desc = &vnop_strategy_desc;
5946 a.a_bp = bp;
5947 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
5948 DTRACE_FSINFO(strategy, vnode_t, vp);
5949 return _err;
5950 }
5951
5952 #if 0
5953 struct vnop_bwrite_args {
5954 struct vnodeop_desc *a_desc;
5955 buf_t a_bp;
5956 };
5957 #endif /* 0*/
5958 errno_t
5959 VNOP_BWRITE(struct buf *bp)
5960 {
5961 int _err;
5962 struct vnop_bwrite_args a;
5963 vnode_t vp = buf_vnode(bp);
5964 a.a_desc = &vnop_bwrite_desc;
5965 a.a_bp = bp;
5966 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
5967 DTRACE_FSINFO(bwrite, vnode_t, vp);
5968 return _err;
5969 }
5970
5971 #if 0
5972 struct vnop_kqfilt_add_args {
5973 struct vnodeop_desc *a_desc;
5974 struct vnode *a_vp;
5975 struct knote *a_kn;
5976 vfs_context_t a_context;
5977 };
5978 #endif
5979 errno_t
5980 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
5981 {
5982 int _err;
5983 struct vnop_kqfilt_add_args a;
5984
5985 a.a_desc = VDESC(vnop_kqfilt_add);
5986 a.a_vp = vp;
5987 a.a_kn = kn;
5988 a.a_context = ctx;
5989
5990 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
5991 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
5992
5993 return _err;
5994 }
5995
5996 #if 0
5997 struct vnop_kqfilt_remove_args {
5998 struct vnodeop_desc *a_desc;
5999 struct vnode *a_vp;
6000 uintptr_t a_ident;
6001 vfs_context_t a_context;
6002 };
6003 #endif
6004 errno_t
6005 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
6006 {
6007 int _err;
6008 struct vnop_kqfilt_remove_args a;
6009
6010 a.a_desc = VDESC(vnop_kqfilt_remove);
6011 a.a_vp = vp;
6012 a.a_ident = ident;
6013 a.a_context = ctx;
6014
6015 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
6016 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
6017
6018 return _err;
6019 }
6020
6021 errno_t
6022 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6023 {
6024 int _err;
6025 struct vnop_monitor_args a;
6026
6027 a.a_desc = VDESC(vnop_monitor);
6028 a.a_vp = vp;
6029 a.a_events = events;
6030 a.a_flags = flags;
6031 a.a_handle = handle;
6032 a.a_context = ctx;
6033
6034 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6035 DTRACE_FSINFO(monitor, vnode_t, vp);
6036
6037 return _err;
6038 }
6039
6040 #if 0
6041 struct vnop_setlabel_args {
6042 struct vnodeop_desc *a_desc;
6043 struct vnode *a_vp;
6044 struct label *a_vl;
6045 vfs_context_t a_context;
6046 };
6047 #endif
6048 errno_t
6049 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6050 {
6051 int _err;
6052 struct vnop_setlabel_args a;
6053
6054 a.a_desc = VDESC(vnop_setlabel);
6055 a.a_vp = vp;
6056 a.a_vl = label;
6057 a.a_context = ctx;
6058
6059 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
6060 DTRACE_FSINFO(setlabel, vnode_t, vp);
6061
6062 return _err;
6063 }
6064
6065
6066 #if NAMEDSTREAMS
6067 /*
6068 * Get a named streamed
6069 */
6070 errno_t
6071 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6072 {
6073 int _err;
6074 struct vnop_getnamedstream_args a;
6075
6076 a.a_desc = &vnop_getnamedstream_desc;
6077 a.a_vp = vp;
6078 a.a_svpp = svpp;
6079 a.a_name = name;
6080 a.a_operation = operation;
6081 a.a_flags = flags;
6082 a.a_context = ctx;
6083
6084 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6085 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
6086 return _err;
6087 }
6088
6089 /*
6090 * Create a named streamed
6091 */
6092 errno_t
6093 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6094 {
6095 int _err;
6096 struct vnop_makenamedstream_args a;
6097
6098 a.a_desc = &vnop_makenamedstream_desc;
6099 a.a_vp = vp;
6100 a.a_svpp = svpp;
6101 a.a_name = name;
6102 a.a_flags = flags;
6103 a.a_context = ctx;
6104
6105 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6106 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
6107 return _err;
6108 }
6109
6110
6111 /*
6112 * Remove a named streamed
6113 */
6114 errno_t
6115 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6116 {
6117 int _err;
6118 struct vnop_removenamedstream_args a;
6119
6120 a.a_desc = &vnop_removenamedstream_desc;
6121 a.a_vp = vp;
6122 a.a_svp = svp;
6123 a.a_name = name;
6124 a.a_flags = flags;
6125 a.a_context = ctx;
6126
6127 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6128 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
6129 return _err;
6130 }
6131 #endif