]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/disk.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/domain.h>
96 #include <sys/mbuf.h>
97 #include <sys/syslog.h>
98 #include <sys/ubc.h>
99 #include <sys/vm.h>
100 #include <sys/sysctl.h>
101 #include <sys/filedesc.h>
102 #include <sys/event.h>
103 #include <sys/fsevents.h>
104 #include <sys/user.h>
105 #include <sys/lockf.h>
106 #include <sys/xattr.h>
107 #include <sys/kdebug.h>
108
109 #include <kern/assert.h>
110 #include <kern/zalloc.h>
111 #include <kern/task.h>
112 #include <kern/policy_internal.h>
113
114 #include <libkern/OSByteOrder.h>
115
116 #include <miscfs/specfs/specdev.h>
117
118 #include <mach/mach_types.h>
119 #include <mach/memory_object_types.h>
120 #include <mach/task.h>
121
122 #if CONFIG_MACF
123 #include <security/mac_framework.h>
124 #endif
125
126 #if NULLFS
127 #include <miscfs/nullfs/nullfs.h>
128 #endif
129
130 #include <sys/sdt.h>
131
132 #define ESUCCESS 0
133 #undef mount_t
134 #undef vnode_t
135
136 #define COMPAT_ONLY
137
138 #define NATIVE_XATTR(VP) \
139 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
140
141 #if CONFIG_APPLEDOUBLE
142 static void xattrfile_remove(vnode_t dvp, const char *basename,
143 vfs_context_t ctx, int force);
144 static void xattrfile_setattr(vnode_t dvp, const char * basename,
145 struct vnode_attr * vap, vfs_context_t ctx);
146 #endif /* CONFIG_APPLEDOUBLE */
147
148 extern lck_rw_t * rootvnode_rw_lock;
149
150 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
151
152 static ZONE_VIEW_DEFINE(ZV_VFS_CONTEXT, "vfs_context",
153 KHEAP_ID_DEFAULT, sizeof(struct vfs_context));
154
155 /*
156 * vnode_setneedinactive
157 *
158 * Description: Indicate that when the last iocount on this vnode goes away,
159 * and the usecount is also zero, we should inform the filesystem
160 * via VNOP_INACTIVE.
161 *
162 * Parameters: vnode_t vnode to mark
163 *
164 * Returns: Nothing
165 *
166 * Notes: Notably used when we're deleting a file--we need not have a
167 * usecount, so VNOP_INACTIVE may not get called by anyone. We
168 * want it called when we drop our iocount.
169 */
170 void
171 vnode_setneedinactive(vnode_t vp)
172 {
173 cache_purge(vp);
174
175 vnode_lock_spin(vp);
176 vp->v_lflag |= VL_NEEDINACTIVE;
177 vnode_unlock(vp);
178 }
179
180
181 /* ====================================================================== */
182 /* ************ EXTERNAL KERNEL APIS ********************************** */
183 /* ====================================================================== */
184
185 /*
186 * implementations of exported VFS operations
187 */
188 int
189 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
190 {
191 int error;
192
193 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) {
194 return ENOTSUP;
195 }
196
197 if (vfs_context_is64bit(ctx)) {
198 if (vfs_64bitready(mp)) {
199 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
200 } else {
201 error = ENOTSUP;
202 }
203 } else {
204 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
205 }
206
207 return error;
208 }
209
210 int
211 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
212 {
213 int error;
214
215 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) {
216 return ENOTSUP;
217 }
218
219 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
220
221 return error;
222 }
223
224 int
225 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
226 {
227 int error;
228
229 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) {
230 return ENOTSUP;
231 }
232
233 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
234
235 return error;
236 }
237
238 /*
239 * Returns: 0 Success
240 * ENOTSUP Not supported
241 * <vfs_root>:ENOENT
242 * <vfs_root>:???
243 *
244 * Note: The return codes from the underlying VFS's root routine can't
245 * be fully enumerated here, since third party VFS authors may not
246 * limit their error returns to the ones documented here, even
247 * though this may result in some programs functioning incorrectly.
248 *
249 * The return codes documented above are those which may currently
250 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
251 * for a call to hfs_vget on the volume mount point, not including
252 * additional error codes which may be propagated from underlying
253 * routines called by hfs_vget.
254 */
255 int
256 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
257 {
258 int error;
259
260 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) {
261 return ENOTSUP;
262 }
263
264 if (ctx == NULL) {
265 ctx = vfs_context_current();
266 }
267
268 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
269
270 return error;
271 }
272
273 int
274 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
275 {
276 int error;
277
278 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) {
279 return ENOTSUP;
280 }
281
282 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
283
284 return error;
285 }
286
287 int
288 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
289 {
290 int error;
291
292 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) {
293 return ENOTSUP;
294 }
295
296 if (ctx == NULL) {
297 ctx = vfs_context_current();
298 }
299
300 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
301
302 return error;
303 }
304
305 int
306 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
307 {
308 int error;
309
310 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) {
311 return ENOTSUP;
312 }
313
314 if (ctx == NULL) {
315 ctx = vfs_context_current();
316 }
317
318 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
319
320 return error;
321 }
322
323 int
324 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
325 {
326 int error;
327
328 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) {
329 return ENOTSUP;
330 }
331
332 if (ctx == NULL) {
333 ctx = vfs_context_current();
334 }
335
336 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
337
338 return error;
339 }
340
341 int
342 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
343 {
344 int error;
345
346 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) {
347 return ENOTSUP;
348 }
349
350 if (ctx == NULL) {
351 ctx = vfs_context_current();
352 }
353
354 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
355
356 return error;
357 }
358
359 int
360 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
361 {
362 int error;
363
364 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) {
365 return ENOTSUP;
366 }
367
368 if (ctx == NULL) {
369 ctx = vfs_context_current();
370 }
371
372 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
373
374 return error;
375 }
376
377 int
378 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
379 {
380 int error;
381
382 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) {
383 return ENOTSUP;
384 }
385
386 if (ctx == NULL) {
387 ctx = vfs_context_current();
388 }
389
390 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
391
392 return error;
393 }
394
395 int
396 VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
397 int flags, vfs_context_t context)
398 {
399 if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) {
400 return ENOTSUP;
401 }
402
403 return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
404 context ?: vfs_context_current());
405 }
406
407 int
408 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
409 {
410 int error;
411
412 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) {
413 return ENOTSUP;
414 }
415
416 if (ctx == NULL) {
417 ctx = vfs_context_current();
418 }
419
420 error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
421
422 return error;
423 }
424
425 /* returns the cached throttle mask for the mount_t */
426 uint64_t
427 vfs_throttle_mask(mount_t mp)
428 {
429 return mp->mnt_throttle_mask;
430 }
431
432 /* returns a copy of vfs type name for the mount_t */
433 void
434 vfs_name(mount_t mp, char *buffer)
435 {
436 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
437 }
438
439 /* returns vfs type number for the mount_t */
440 int
441 vfs_typenum(mount_t mp)
442 {
443 return mp->mnt_vtable->vfc_typenum;
444 }
445
446 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
447 void*
448 vfs_mntlabel(mount_t mp)
449 {
450 return (void*)mp->mnt_mntlabel;
451 }
452
453 uint64_t
454 vfs_mount_id(mount_t mp)
455 {
456 return mp->mnt_mount_id;
457 }
458
459 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
460 uint64_t
461 vfs_flags(mount_t mp)
462 {
463 return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
464 }
465
466 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
467 void
468 vfs_setflags(mount_t mp, uint64_t flags)
469 {
470 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
471
472 mount_lock(mp);
473 mp->mnt_flag |= lflags;
474 mount_unlock(mp);
475 }
476
477 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
478 void
479 vfs_clearflags(mount_t mp, uint64_t flags)
480 {
481 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
482
483 mount_lock(mp);
484 mp->mnt_flag &= ~lflags;
485 mount_unlock(mp);
486 }
487
488 /* Is the mount_t ronly and upgrade read/write requested? */
489 int
490 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
491 {
492 return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR);
493 }
494
495
496 /* Is the mount_t mounted ronly */
497 int
498 vfs_isrdonly(mount_t mp)
499 {
500 return mp->mnt_flag & MNT_RDONLY;
501 }
502
503 /* Is the mount_t mounted for filesystem synchronous writes? */
504 int
505 vfs_issynchronous(mount_t mp)
506 {
507 return mp->mnt_flag & MNT_SYNCHRONOUS;
508 }
509
510 /* Is the mount_t mounted read/write? */
511 int
512 vfs_isrdwr(mount_t mp)
513 {
514 return (mp->mnt_flag & MNT_RDONLY) == 0;
515 }
516
517
518 /* Is mount_t marked for update (ie MNT_UPDATE) */
519 int
520 vfs_isupdate(mount_t mp)
521 {
522 return mp->mnt_flag & MNT_UPDATE;
523 }
524
525
526 /* Is mount_t marked for reload (ie MNT_RELOAD) */
527 int
528 vfs_isreload(mount_t mp)
529 {
530 return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD);
531 }
532
533 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
534 int
535 vfs_isforce(mount_t mp)
536 {
537 if (mp->mnt_lflag & MNT_LFORCE) {
538 return 1;
539 } else {
540 return 0;
541 }
542 }
543
544 int
545 vfs_isunmount(mount_t mp)
546 {
547 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
548 return 1;
549 } else {
550 return 0;
551 }
552 }
553
554 int
555 vfs_64bitready(mount_t mp)
556 {
557 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
558 return 1;
559 } else {
560 return 0;
561 }
562 }
563
564
565 int
566 vfs_authcache_ttl(mount_t mp)
567 {
568 if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
569 return mp->mnt_authcache_ttl;
570 } else {
571 return CACHED_RIGHT_INFINITE_TTL;
572 }
573 }
574
575 void
576 vfs_setauthcache_ttl(mount_t mp, int ttl)
577 {
578 mount_lock(mp);
579 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
580 mp->mnt_authcache_ttl = ttl;
581 mount_unlock(mp);
582 }
583
584 void
585 vfs_clearauthcache_ttl(mount_t mp)
586 {
587 mount_lock(mp);
588 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
589 /*
590 * back to the default TTL value in case
591 * MNTK_AUTH_OPAQUE is set on this mount
592 */
593 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
594 mount_unlock(mp);
595 }
596
597 int
598 vfs_authopaque(mount_t mp)
599 {
600 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) {
601 return 1;
602 } else {
603 return 0;
604 }
605 }
606
607 int
608 vfs_authopaqueaccess(mount_t mp)
609 {
610 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) {
611 return 1;
612 } else {
613 return 0;
614 }
615 }
616
617 void
618 vfs_setauthopaque(mount_t mp)
619 {
620 mount_lock(mp);
621 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
622 mount_unlock(mp);
623 }
624
625 void
626 vfs_setauthopaqueaccess(mount_t mp)
627 {
628 mount_lock(mp);
629 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
630 mount_unlock(mp);
631 }
632
633 void
634 vfs_clearauthopaque(mount_t mp)
635 {
636 mount_lock(mp);
637 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
638 mount_unlock(mp);
639 }
640
641 void
642 vfs_clearauthopaqueaccess(mount_t mp)
643 {
644 mount_lock(mp);
645 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
646 mount_unlock(mp);
647 }
648
649 void
650 vfs_setextendedsecurity(mount_t mp)
651 {
652 mount_lock(mp);
653 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
654 mount_unlock(mp);
655 }
656
657 void
658 vfs_setmntsystem(mount_t mp)
659 {
660 mount_lock(mp);
661 mp->mnt_kern_flag |= MNTK_SYSTEM;
662 mount_unlock(mp);
663 }
664
665 void
666 vfs_setmntsystemdata(mount_t mp)
667 {
668 mount_lock(mp);
669 mp->mnt_kern_flag |= MNTK_SYSTEMDATA;
670 mount_unlock(mp);
671 }
672
673 void
674 vfs_setmntswap(mount_t mp)
675 {
676 mount_lock(mp);
677 mp->mnt_kern_flag |= (MNTK_SYSTEM | MNTK_SWAP_MOUNT);
678 mount_unlock(mp);
679 }
680
681 void
682 vfs_clearextendedsecurity(mount_t mp)
683 {
684 mount_lock(mp);
685 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
686 mount_unlock(mp);
687 }
688
689 void
690 vfs_setnoswap(mount_t mp)
691 {
692 mount_lock(mp);
693 mp->mnt_kern_flag |= MNTK_NOSWAP;
694 mount_unlock(mp);
695 }
696
697 void
698 vfs_clearnoswap(mount_t mp)
699 {
700 mount_lock(mp);
701 mp->mnt_kern_flag &= ~MNTK_NOSWAP;
702 mount_unlock(mp);
703 }
704
705 int
706 vfs_extendedsecurity(mount_t mp)
707 {
708 return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY;
709 }
710
711 /* returns the max size of short symlink in this mount_t */
712 uint32_t
713 vfs_maxsymlen(mount_t mp)
714 {
715 return mp->mnt_maxsymlinklen;
716 }
717
718 /* set max size of short symlink on mount_t */
719 void
720 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
721 {
722 mp->mnt_maxsymlinklen = symlen;
723 }
724
725 boolean_t
726 vfs_is_basesystem(mount_t mp)
727 {
728 return ((mp->mnt_supl_kern_flag & MNTK_SUPL_BASESYSTEM) == 0) ? false : true;
729 }
730
731 /* return a pointer to the RO vfs_statfs associated with mount_t */
732 struct vfsstatfs *
733 vfs_statfs(mount_t mp)
734 {
735 return &mp->mnt_vfsstat;
736 }
737
738 int
739 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
740 {
741 int error;
742
743 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) {
744 return error;
745 }
746
747 /*
748 * If we have a filesystem create time, use it to default some others.
749 */
750 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
751 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) {
752 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
753 }
754 }
755
756 return 0;
757 }
758
759 int
760 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
761 {
762 int error;
763
764 /*
765 * with a read-only system volume, we need to allow rename of the root volume
766 * even if it's read-only. Don't return EROFS here if setattr changes only
767 * the volume name
768 */
769 if (vfs_isrdonly(mp) &&
770 !((strcmp(mp->mnt_vfsstat.f_fstypename, "apfs") == 0) && (vfa->f_active == VFSATTR_f_vol_name))) {
771 return EROFS;
772 }
773
774 error = VFS_SETATTR(mp, vfa, ctx);
775
776 /*
777 * If we had alternate ways of setting vfs attributes, we'd
778 * fall back here.
779 */
780
781 return error;
782 }
783
784 /* return the private data handle stored in mount_t */
785 void *
786 vfs_fsprivate(mount_t mp)
787 {
788 return mp->mnt_data;
789 }
790
791 /* set the private data handle in mount_t */
792 void
793 vfs_setfsprivate(mount_t mp, void *mntdata)
794 {
795 mount_lock(mp);
796 mp->mnt_data = mntdata;
797 mount_unlock(mp);
798 }
799
800 /* query whether the mount point supports native EAs */
801 int
802 vfs_nativexattrs(mount_t mp)
803 {
804 return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS;
805 }
806
807 /*
808 * return the block size of the underlying
809 * device associated with mount_t
810 */
811 int
812 vfs_devblocksize(mount_t mp)
813 {
814 return mp->mnt_devblocksize;
815 }
816
817 /*
818 * Returns vnode with an iocount that must be released with vnode_put()
819 */
820 vnode_t
821 vfs_vnodecovered(mount_t mp)
822 {
823 vnode_t vp = mp->mnt_vnodecovered;
824 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
825 return NULL;
826 } else {
827 return vp;
828 }
829 }
830
831 /*
832 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
833 * The iocount must be released with vnode_put(). Note that this KPI is subtle
834 * with respect to the validity of using this device vnode for anything substantial
835 * (which is discouraged). If commands are sent to the device driver without
836 * taking proper steps to ensure that the device is still open, chaos may ensue.
837 * Similarly, this routine should only be called if there is some guarantee that
838 * the mount itself is still valid.
839 */
840 vnode_t
841 vfs_devvp(mount_t mp)
842 {
843 vnode_t vp = mp->mnt_devvp;
844
845 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
846 return vp;
847 }
848
849 return NULLVP;
850 }
851
852 /*
853 * return the io attributes associated with mount_t
854 */
855 void
856 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
857 {
858 ioattrp->io_reserved[0] = NULL;
859 ioattrp->io_reserved[1] = NULL;
860 if (mp == NULL) {
861 ioattrp->io_maxreadcnt = MAXPHYS;
862 ioattrp->io_maxwritecnt = MAXPHYS;
863 ioattrp->io_segreadcnt = 32;
864 ioattrp->io_segwritecnt = 32;
865 ioattrp->io_maxsegreadsize = MAXPHYS;
866 ioattrp->io_maxsegwritesize = MAXPHYS;
867 ioattrp->io_devblocksize = DEV_BSIZE;
868 ioattrp->io_flags = 0;
869 ioattrp->io_max_swappin_available = 0;
870 } else {
871 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
872 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
873 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
874 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
875 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
876 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
877 ioattrp->io_devblocksize = mp->mnt_devblocksize;
878 ioattrp->io_flags = mp->mnt_ioflags;
879 ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
880 }
881 }
882
883
884 /*
885 * set the IO attributes associated with mount_t
886 */
887 void
888 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
889 {
890 if (mp == NULL) {
891 return;
892 }
893 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
894 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
895 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
896 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
897 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
898 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
899 mp->mnt_devblocksize = ioattrp->io_devblocksize;
900 mp->mnt_ioflags = ioattrp->io_flags;
901 mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
902 }
903
904 /*
905 * Add a new filesystem into the kernel specified in passed in
906 * vfstable structure. It fills in the vnode
907 * dispatch vector that is to be passed to when vnodes are created.
908 * It returns a handle which is to be used to when the FS is to be removed
909 */
910 typedef int (*PFI)(void *);
911 extern int vfs_opv_numops;
912 errno_t
913 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
914 {
915 struct vfstable *newvfstbl = NULL;
916 int i, j;
917 int(***opv_desc_vector_p)(void *);
918 int(**opv_desc_vector)(void *);
919 const struct vnodeopv_entry_desc *opve_descp;
920 int desccount;
921 int descsize;
922 PFI *descptr;
923
924 /*
925 * This routine is responsible for all the initialization that would
926 * ordinarily be done as part of the system startup;
927 */
928
929 if (vfe == (struct vfs_fsentry *)0) {
930 return EINVAL;
931 }
932
933 desccount = vfe->vfe_vopcnt;
934 if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
935 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) {
936 return EINVAL;
937 }
938
939 /* Non-threadsafe filesystems are not supported */
940 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
941 return EINVAL;
942 }
943
944 newvfstbl = kheap_alloc(KHEAP_TEMP, sizeof(struct vfstable),
945 Z_WAITOK | Z_ZERO);
946 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
947 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
948 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) {
949 newvfstbl->vfc_typenum = maxvfstypenum++;
950 } else {
951 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
952 }
953
954 newvfstbl->vfc_refcount = 0;
955 newvfstbl->vfc_flags = 0;
956 newvfstbl->vfc_mountroot = NULL;
957 newvfstbl->vfc_next = NULL;
958 newvfstbl->vfc_vfsflags = 0;
959 if (vfe->vfe_flags & VFS_TBL64BITREADY) {
960 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
961 }
962 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2) {
963 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
964 }
965 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2) {
966 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
967 }
968 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) {
969 newvfstbl->vfc_flags |= MNT_LOCAL;
970 }
971 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) {
972 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
973 } else {
974 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
975 }
976
977 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR) {
978 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
979 }
980 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT) {
981 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
982 }
983 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED) {
984 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
985 }
986 if (vfe->vfe_flags & VFS_TBLNOMACLABEL) {
987 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
988 }
989 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) {
990 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
991 }
992 if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) {
993 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
994 }
995 if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) {
996 newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
997 }
998
999 /*
1000 * Allocate and init the vectors.
1001 * Also handle backwards compatibility.
1002 *
1003 * We allocate one large block to hold all <desccount>
1004 * vnode operation vectors stored contiguously.
1005 */
1006 /* XXX - shouldn't be M_TEMP */
1007
1008 descsize = desccount * vfs_opv_numops * sizeof(PFI);
1009 descptr = kheap_alloc(KHEAP_DEFAULT, descsize, Z_WAITOK | Z_ZERO);
1010
1011 newvfstbl->vfc_descptr = descptr;
1012 newvfstbl->vfc_descsize = descsize;
1013
1014 newvfstbl->vfc_sysctl = NULL;
1015
1016 for (i = 0; i < desccount; i++) {
1017 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1018 /*
1019 * Fill in the caller's pointer to the start of the i'th vector.
1020 * They'll need to supply it when calling vnode_create.
1021 */
1022 opv_desc_vector = descptr + i * vfs_opv_numops;
1023 *opv_desc_vector_p = opv_desc_vector;
1024
1025 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1026 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1027
1028 /* Silently skip known-disabled operations */
1029 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
1030 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
1031 vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
1032 continue;
1033 }
1034
1035 /*
1036 * Sanity check: is this operation listed
1037 * in the list of operations? We check this
1038 * by seeing if its offset is zero. Since
1039 * the default routine should always be listed
1040 * first, it should be the only one with a zero
1041 * offset. Any other operation with a zero
1042 * offset is probably not listed in
1043 * vfs_op_descs, and so is probably an error.
1044 *
1045 * A panic here means the layer programmer
1046 * has committed the all-too common bug
1047 * of adding a new operation to the layer's
1048 * list of vnode operations but
1049 * not adding the operation to the system-wide
1050 * list of supported operations.
1051 */
1052 if (opve_descp->opve_op->vdesc_offset == 0 &&
1053 opve_descp->opve_op != VDESC(vnop_default)) {
1054 printf("vfs_fsadd: operation %s not listed in %s.\n",
1055 opve_descp->opve_op->vdesc_name,
1056 "vfs_op_descs");
1057 panic("vfs_fsadd: bad operation");
1058 }
1059 /*
1060 * Fill in this entry.
1061 */
1062 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1063 opve_descp->opve_impl;
1064 }
1065
1066
1067 /*
1068 * Finally, go back and replace unfilled routines
1069 * with their default. (Sigh, an O(n^3) algorithm. I
1070 * could make it better, but that'd be work, and n is small.)
1071 */
1072 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1073
1074 /*
1075 * Force every operations vector to have a default routine.
1076 */
1077 opv_desc_vector = *opv_desc_vector_p;
1078 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
1079 panic("vfs_fsadd: operation vector without default routine.");
1080 }
1081 for (j = 0; j < vfs_opv_numops; j++) {
1082 if (opv_desc_vector[j] == NULL) {
1083 opv_desc_vector[j] =
1084 opv_desc_vector[VOFFSET(vnop_default)];
1085 }
1086 }
1087 } /* end of each vnodeopv_desc parsing */
1088
1089
1090
1091 *handle = vfstable_add(newvfstbl);
1092
1093 if (newvfstbl->vfc_typenum <= maxvfstypenum) {
1094 maxvfstypenum = newvfstbl->vfc_typenum + 1;
1095 }
1096
1097 if (newvfstbl->vfc_vfsops->vfs_init) {
1098 struct vfsconf vfsc;
1099 bzero(&vfsc, sizeof(struct vfsconf));
1100 vfsc.vfc_reserved1 = 0;
1101 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1102 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1103 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1104 vfsc.vfc_flags = (*handle)->vfc_flags;
1105 vfsc.vfc_reserved2 = 0;
1106 vfsc.vfc_reserved3 = 0;
1107
1108 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1109 }
1110
1111 kheap_free(KHEAP_TEMP, newvfstbl, sizeof(struct vfstable));
1112
1113 return 0;
1114 }
1115
1116 /*
1117 * Removes the filesystem from kernel.
1118 * The argument passed in is the handle that was given when
1119 * file system was added
1120 */
1121 errno_t
1122 vfs_fsremove(vfstable_t handle)
1123 {
1124 struct vfstable * vfstbl = (struct vfstable *)handle;
1125 void *old_desc = NULL;
1126 size_t descsize = 0;
1127 errno_t err;
1128
1129 /* Preflight check for any mounts */
1130 mount_list_lock();
1131 if (vfstbl->vfc_refcount != 0) {
1132 mount_list_unlock();
1133 return EBUSY;
1134 }
1135
1136 /*
1137 * save the old descriptor; the free cannot occur unconditionally,
1138 * since vfstable_del() may fail.
1139 */
1140 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1141 old_desc = vfstbl->vfc_descptr;
1142 descsize = vfstbl->vfc_descsize;
1143 }
1144 err = vfstable_del(vfstbl);
1145
1146 mount_list_unlock();
1147
1148 /* free the descriptor if the delete was successful */
1149 if (err == 0) {
1150 kheap_free(KHEAP_DEFAULT, old_desc, descsize);
1151 }
1152
1153 return err;
1154 }
1155
1156 void
1157 vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1158 {
1159 mp->mnt_fsowner = uid;
1160 mp->mnt_fsgroup = gid;
1161 }
1162
1163 /*
1164 * Callers should be careful how they use this; accessing
1165 * mnt_last_write_completed_timestamp is not thread-safe. Writing to
1166 * it isn't either. Point is: be prepared to deal with strange values
1167 * being returned.
1168 */
1169 uint64_t
1170 vfs_idle_time(mount_t mp)
1171 {
1172 if (mp->mnt_pending_write_size) {
1173 return 0;
1174 }
1175
1176 struct timeval now;
1177
1178 microuptime(&now);
1179
1180 return (now.tv_sec
1181 - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1182 + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec;
1183 }
1184
1185 int
1186 vfs_context_pid(vfs_context_t ctx)
1187 {
1188 return proc_pid(vfs_context_proc(ctx));
1189 }
1190
1191 int
1192 vfs_context_suser(vfs_context_t ctx)
1193 {
1194 return suser(ctx->vc_ucred, NULL);
1195 }
1196
1197 /*
1198 * Return bit field of signals posted to all threads in the context's process.
1199 *
1200 * XXX Signals should be tied to threads, not processes, for most uses of this
1201 * XXX call.
1202 */
1203 int
1204 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1205 {
1206 proc_t p = vfs_context_proc(ctx);
1207 if (p) {
1208 return proc_pendingsignals(p, mask);
1209 }
1210 return 0;
1211 }
1212
1213 int
1214 vfs_context_is64bit(vfs_context_t ctx)
1215 {
1216 proc_t proc = vfs_context_proc(ctx);
1217
1218 if (proc) {
1219 return proc_is64bit(proc);
1220 }
1221 return 0;
1222 }
1223
1224 boolean_t
1225 vfs_context_can_resolve_triggers(vfs_context_t ctx)
1226 {
1227 proc_t proc = vfs_context_proc(ctx);
1228
1229 if (proc) {
1230 if (proc->p_vfs_iopolicy &
1231 P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) {
1232 return false;
1233 }
1234 return true;
1235 }
1236 return false;
1237 }
1238
1239 /*
1240 * vfs_context_proc
1241 *
1242 * Description: Given a vfs_context_t, return the proc_t associated with it.
1243 *
1244 * Parameters: vfs_context_t The context to use
1245 *
1246 * Returns: proc_t The process for this context
1247 *
1248 * Notes: This function will return the current_proc() if any of the
1249 * following conditions are true:
1250 *
1251 * o The supplied context pointer is NULL
1252 * o There is no Mach thread associated with the context
1253 * o There is no Mach task associated with the Mach thread
1254 * o There is no proc_t associated with the Mach task
1255 * o The proc_t has no per process open file table
1256 * o The proc_t is post-vfork()
1257 *
1258 * This causes this function to return a value matching as
1259 * closely as possible the previous behaviour, while at the
1260 * same time avoiding the task lending that results from vfork()
1261 */
1262 proc_t
1263 vfs_context_proc(vfs_context_t ctx)
1264 {
1265 proc_t proc = NULL;
1266
1267 if (ctx != NULL && ctx->vc_thread != NULL) {
1268 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1269 }
1270 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) {
1271 proc = NULL;
1272 }
1273
1274 return proc == NULL ? current_proc() : proc;
1275 }
1276
1277 /*
1278 * vfs_context_get_special_port
1279 *
1280 * Description: Return the requested special port from the task associated
1281 * with the given context.
1282 *
1283 * Parameters: vfs_context_t The context to use
1284 * int Index of special port
1285 * ipc_port_t * Pointer to returned port
1286 *
1287 * Returns: kern_return_t see task_get_special_port()
1288 */
1289 kern_return_t
1290 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1291 {
1292 task_t task = NULL;
1293
1294 if (ctx != NULL && ctx->vc_thread != NULL) {
1295 task = get_threadtask(ctx->vc_thread);
1296 }
1297
1298 return task_get_special_port(task, which, portp);
1299 }
1300
1301 /*
1302 * vfs_context_set_special_port
1303 *
1304 * Description: Set the requested special port in the task associated
1305 * with the given context.
1306 *
1307 * Parameters: vfs_context_t The context to use
1308 * int Index of special port
1309 * ipc_port_t New special port
1310 *
1311 * Returns: kern_return_t see task_set_special_port_internal()
1312 */
1313 kern_return_t
1314 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1315 {
1316 task_t task = NULL;
1317
1318 if (ctx != NULL && ctx->vc_thread != NULL) {
1319 task = get_threadtask(ctx->vc_thread);
1320 }
1321
1322 return task_set_special_port_internal(task, which, port);
1323 }
1324
1325 /*
1326 * vfs_context_thread
1327 *
1328 * Description: Return the Mach thread associated with a vfs_context_t
1329 *
1330 * Parameters: vfs_context_t The context to use
1331 *
1332 * Returns: thread_t The thread for this context, or
1333 * NULL, if there is not one.
1334 *
1335 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1336 * as a result of a static vfs_context_t declaration in a function
1337 * and will result in this function returning NULL.
1338 *
1339 * This is intentional; this function should NOT return the
1340 * current_thread() in this case.
1341 */
1342 thread_t
1343 vfs_context_thread(vfs_context_t ctx)
1344 {
1345 return ctx->vc_thread;
1346 }
1347
1348
1349 /*
1350 * vfs_context_cwd
1351 *
1352 * Description: Returns a reference on the vnode for the current working
1353 * directory for the supplied context
1354 *
1355 * Parameters: vfs_context_t The context to use
1356 *
1357 * Returns: vnode_t The current working directory
1358 * for this context
1359 *
1360 * Notes: The function first attempts to obtain the current directory
1361 * from the thread, and if it is not present there, falls back
1362 * to obtaining it from the process instead. If it can't be
1363 * obtained from either place, we return NULLVP.
1364 */
1365 vnode_t
1366 vfs_context_cwd(vfs_context_t ctx)
1367 {
1368 vnode_t cwd = NULLVP;
1369
1370 if (ctx != NULL && ctx->vc_thread != NULL) {
1371 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1372 proc_t proc;
1373
1374 /*
1375 * Get the cwd from the thread; if there isn't one, get it
1376 * from the process, instead.
1377 */
1378 if ((cwd = uth->uu_cdir) == NULLVP &&
1379 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1380 proc->p_fd != NULL) {
1381 cwd = proc->p_fd->fd_cdir;
1382 }
1383 }
1384
1385 return cwd;
1386 }
1387
1388 /*
1389 * vfs_context_create
1390 *
1391 * Description: Allocate and initialize a new context.
1392 *
1393 * Parameters: vfs_context_t: Context to copy, or NULL for new
1394 *
1395 * Returns: Pointer to new context
1396 *
1397 * Notes: Copy cred and thread from argument, if available; else
1398 * initialize with current thread and new cred. Returns
1399 * with a reference held on the credential.
1400 */
1401 vfs_context_t
1402 vfs_context_create(vfs_context_t ctx)
1403 {
1404 vfs_context_t newcontext;
1405
1406 newcontext = zalloc_flags(ZV_VFS_CONTEXT, Z_WAITOK | Z_ZERO);
1407
1408 if (newcontext) {
1409 kauth_cred_t safecred;
1410 if (ctx) {
1411 newcontext->vc_thread = ctx->vc_thread;
1412 safecred = ctx->vc_ucred;
1413 } else {
1414 newcontext->vc_thread = current_thread();
1415 safecred = kauth_cred_get();
1416 }
1417 if (IS_VALID_CRED(safecred)) {
1418 kauth_cred_ref(safecred);
1419 }
1420 newcontext->vc_ucred = safecred;
1421 return newcontext;
1422 }
1423 return NULL;
1424 }
1425
1426
1427 vfs_context_t
1428 vfs_context_current(void)
1429 {
1430 vfs_context_t ctx = NULL;
1431 uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1432
1433 if (ut != NULL) {
1434 if (ut->uu_context.vc_ucred != NULL) {
1435 ctx = &ut->uu_context;
1436 }
1437 }
1438
1439 return ctx == NULL ? vfs_context_kernel() : ctx;
1440 }
1441
1442
1443 /*
1444 * XXX Do not ask
1445 *
1446 * Dangerous hack - adopt the first kernel thread as the current thread, to
1447 * get to the vfs_context_t in the uthread associated with a kernel thread.
1448 * This is used by UDF to make the call into IOCDMediaBSDClient,
1449 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1450 * ioctl() is being called from kernel or user space (and all this because
1451 * we do not pass threads into our ioctl()'s, instead of processes).
1452 *
1453 * This is also used by imageboot_setup(), called early from bsd_init() after
1454 * kernproc has been given a credential.
1455 *
1456 */
1457 static struct vfs_context kerncontext;
1458 vfs_context_t
1459 vfs_context_kernel(void)
1460 {
1461 return &kerncontext;
1462 }
1463
1464 /*
1465 * Called early in bsd_init() when kernproc sets its thread and cred context.
1466 */
1467 void
1468 vfs_set_context_kernel(vfs_context_t ctx)
1469 {
1470 kerncontext = *ctx;
1471 }
1472
1473 int
1474 vfs_context_rele(vfs_context_t ctx)
1475 {
1476 if (ctx) {
1477 if (IS_VALID_CRED(ctx->vc_ucred)) {
1478 kauth_cred_unref(&ctx->vc_ucred);
1479 }
1480 zfree(ZV_VFS_CONTEXT, ctx);
1481 }
1482 return 0;
1483 }
1484
1485
1486 kauth_cred_t
1487 vfs_context_ucred(vfs_context_t ctx)
1488 {
1489 return ctx->vc_ucred;
1490 }
1491
1492 /*
1493 * Return true if the context is owned by the superuser.
1494 */
1495 int
1496 vfs_context_issuser(vfs_context_t ctx)
1497 {
1498 return kauth_cred_issuser(vfs_context_ucred(ctx));
1499 }
1500
1501 int
1502 vfs_context_iskernel(vfs_context_t ctx)
1503 {
1504 return ctx == &kerncontext;
1505 }
1506
1507 /*
1508 * Given a context, for all fields of vfs_context_t which
1509 * are not held with a reference, set those fields to the
1510 * values for the current execution context. Currently, this
1511 * just means the vc_thread.
1512 *
1513 * Returns: 0 for success, nonzero for failure
1514 *
1515 * The intended use is:
1516 * 1. vfs_context_create() gets the caller a context
1517 * 2. vfs_context_bind() sets the unrefcounted data
1518 * 3. vfs_context_rele() releases the context
1519 *
1520 */
1521 int
1522 vfs_context_bind(vfs_context_t ctx)
1523 {
1524 ctx->vc_thread = current_thread();
1525 return 0;
1526 }
1527
1528 int
1529 vfs_isswapmount(mount_t mnt)
1530 {
1531 return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1532 }
1533
1534 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1535
1536
1537 /*
1538 * Convert between vnode types and inode formats (since POSIX.1
1539 * defines mode word of stat structure in terms of inode formats).
1540 */
1541 enum vtype
1542 vnode_iftovt(int mode)
1543 {
1544 return iftovt_tab[((mode) & S_IFMT) >> 12];
1545 }
1546
1547 int
1548 vnode_vttoif(enum vtype indx)
1549 {
1550 return vttoif_tab[(int)(indx)];
1551 }
1552
1553 int
1554 vnode_makeimode(int indx, int mode)
1555 {
1556 return (int)(VTTOIF(indx) | (mode));
1557 }
1558
1559
1560 /*
1561 * vnode manipulation functions.
1562 */
1563
1564 /* returns system root vnode iocount; It should be released using vnode_put() */
1565 vnode_t
1566 vfs_rootvnode(void)
1567 {
1568 int error;
1569
1570 lck_rw_lock_shared(rootvnode_rw_lock);
1571 error = vnode_get(rootvnode);
1572 lck_rw_unlock_shared(rootvnode_rw_lock);
1573 if (error) {
1574 return (vnode_t)0;
1575 } else {
1576 return rootvnode;
1577 }
1578 }
1579
1580
1581 uint32_t
1582 vnode_vid(vnode_t vp)
1583 {
1584 return (uint32_t)(vp->v_id);
1585 }
1586
1587 mount_t
1588 vnode_mount(vnode_t vp)
1589 {
1590 return vp->v_mount;
1591 }
1592
1593 #if CONFIG_IOSCHED
1594 vnode_t
1595 vnode_mountdevvp(vnode_t vp)
1596 {
1597 if (vp->v_mount) {
1598 return vp->v_mount->mnt_devvp;
1599 } else {
1600 return (vnode_t)0;
1601 }
1602 }
1603 #endif
1604
1605 boolean_t
1606 vnode_isonexternalstorage(vnode_t vp)
1607 {
1608 if (vp) {
1609 if (vp->v_mount) {
1610 if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_PERIPHERAL_DRIVE) {
1611 return TRUE;
1612 }
1613 }
1614 }
1615 return FALSE;
1616 }
1617
1618 mount_t
1619 vnode_mountedhere(vnode_t vp)
1620 {
1621 mount_t mp;
1622
1623 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1624 (mp->mnt_vnodecovered == vp)) {
1625 return mp;
1626 } else {
1627 return (mount_t)NULL;
1628 }
1629 }
1630
1631 /* returns vnode type of vnode_t */
1632 enum vtype
1633 vnode_vtype(vnode_t vp)
1634 {
1635 return vp->v_type;
1636 }
1637
1638 /* returns FS specific node saved in vnode */
1639 void *
1640 vnode_fsnode(vnode_t vp)
1641 {
1642 return vp->v_data;
1643 }
1644
1645 void
1646 vnode_clearfsnode(vnode_t vp)
1647 {
1648 vp->v_data = NULL;
1649 }
1650
1651 dev_t
1652 vnode_specrdev(vnode_t vp)
1653 {
1654 return vp->v_rdev;
1655 }
1656
1657
1658 /* Accessor functions */
1659 /* is vnode_t a root vnode */
1660 int
1661 vnode_isvroot(vnode_t vp)
1662 {
1663 return (vp->v_flag & VROOT)? 1 : 0;
1664 }
1665
1666 /* is vnode_t a system vnode */
1667 int
1668 vnode_issystem(vnode_t vp)
1669 {
1670 return (vp->v_flag & VSYSTEM)? 1 : 0;
1671 }
1672
1673 /* is vnode_t a swap file vnode */
1674 int
1675 vnode_isswap(vnode_t vp)
1676 {
1677 return (vp->v_flag & VSWAP)? 1 : 0;
1678 }
1679
1680 /* is vnode_t a tty */
1681 int
1682 vnode_istty(vnode_t vp)
1683 {
1684 return (vp->v_flag & VISTTY) ? 1 : 0;
1685 }
1686
1687 /* if vnode_t mount operation in progress */
1688 int
1689 vnode_ismount(vnode_t vp)
1690 {
1691 return (vp->v_flag & VMOUNT)? 1 : 0;
1692 }
1693
1694 /* is this vnode under recyle now */
1695 int
1696 vnode_isrecycled(vnode_t vp)
1697 {
1698 int ret;
1699
1700 vnode_lock_spin(vp);
1701 ret = (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0;
1702 vnode_unlock(vp);
1703 return ret;
1704 }
1705
1706 /* vnode was created by background task requesting rapid aging
1707 * and has not since been referenced by a normal task */
1708 int
1709 vnode_israge(vnode_t vp)
1710 {
1711 return (vp->v_flag & VRAGE)? 1 : 0;
1712 }
1713
1714 int
1715 vnode_needssnapshots(vnode_t vp)
1716 {
1717 return (vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0;
1718 }
1719
1720
1721 /* Check the process/thread to see if we should skip atime updates */
1722 int
1723 vfs_ctx_skipatime(vfs_context_t ctx)
1724 {
1725 struct uthread *ut;
1726 proc_t proc;
1727 thread_t thr;
1728
1729 proc = vfs_context_proc(ctx);
1730 thr = vfs_context_thread(ctx);
1731
1732 /* Validate pointers in case we were invoked via a kernel context */
1733 if (thr && proc) {
1734 ut = get_bsdthread_info(thr);
1735
1736 if (proc->p_lflag & P_LRAGE_VNODES) {
1737 return 1;
1738 }
1739
1740 if (ut) {
1741 if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) {
1742 return 1;
1743 }
1744 }
1745
1746 if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) {
1747 return 1;
1748 }
1749 }
1750 return 0;
1751 }
1752
1753 /* is vnode_t marked to not keep data cached once it's been consumed */
1754 int
1755 vnode_isnocache(vnode_t vp)
1756 {
1757 return (vp->v_flag & VNOCACHE_DATA)? 1 : 0;
1758 }
1759
1760 /*
1761 * has sequential readahead been disabled on this vnode
1762 */
1763 int
1764 vnode_isnoreadahead(vnode_t vp)
1765 {
1766 return (vp->v_flag & VRAOFF)? 1 : 0;
1767 }
1768
1769 int
1770 vnode_is_openevt(vnode_t vp)
1771 {
1772 return (vp->v_flag & VOPENEVT)? 1 : 0;
1773 }
1774
1775 /* is vnode_t a standard one? */
1776 int
1777 vnode_isstandard(vnode_t vp)
1778 {
1779 return (vp->v_flag & VSTANDARD)? 1 : 0;
1780 }
1781
1782 /* don't vflush() if SKIPSYSTEM */
1783 int
1784 vnode_isnoflush(vnode_t vp)
1785 {
1786 return (vp->v_flag & VNOFLUSH)? 1 : 0;
1787 }
1788
1789 /* is vnode_t a regular file */
1790 int
1791 vnode_isreg(vnode_t vp)
1792 {
1793 return (vp->v_type == VREG)? 1 : 0;
1794 }
1795
1796 /* is vnode_t a directory? */
1797 int
1798 vnode_isdir(vnode_t vp)
1799 {
1800 return (vp->v_type == VDIR)? 1 : 0;
1801 }
1802
1803 /* is vnode_t a symbolic link ? */
1804 int
1805 vnode_islnk(vnode_t vp)
1806 {
1807 return (vp->v_type == VLNK)? 1 : 0;
1808 }
1809
1810 int
1811 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1812 {
1813 struct nameidata *ndp = cnp->cn_ndp;
1814
1815 if (ndp == NULL) {
1816 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1817 }
1818
1819 if (vnode_isdir(vp)) {
1820 if (vp->v_mountedhere != NULL) {
1821 goto yes;
1822 }
1823
1824 #if CONFIG_TRIGGERS
1825 if (vp->v_resolve) {
1826 goto yes;
1827 }
1828 #endif /* CONFIG_TRIGGERS */
1829 }
1830
1831
1832 if (vnode_islnk(vp)) {
1833 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1834 if (cnp->cn_flags & FOLLOW) {
1835 goto yes;
1836 }
1837 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1838 goto yes;
1839 }
1840 }
1841
1842 return 0;
1843
1844 yes:
1845 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1846 return EKEEPLOOKING;
1847 }
1848
1849 /* is vnode_t a fifo ? */
1850 int
1851 vnode_isfifo(vnode_t vp)
1852 {
1853 return (vp->v_type == VFIFO)? 1 : 0;
1854 }
1855
1856 /* is vnode_t a block device? */
1857 int
1858 vnode_isblk(vnode_t vp)
1859 {
1860 return (vp->v_type == VBLK)? 1 : 0;
1861 }
1862
1863 int
1864 vnode_isspec(vnode_t vp)
1865 {
1866 return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0;
1867 }
1868
1869 /* is vnode_t a char device? */
1870 int
1871 vnode_ischr(vnode_t vp)
1872 {
1873 return (vp->v_type == VCHR)? 1 : 0;
1874 }
1875
1876 /* is vnode_t a socket? */
1877 int
1878 vnode_issock(vnode_t vp)
1879 {
1880 return (vp->v_type == VSOCK)? 1 : 0;
1881 }
1882
1883 /* is vnode_t a device with multiple active vnodes referring to it? */
1884 int
1885 vnode_isaliased(vnode_t vp)
1886 {
1887 enum vtype vt = vp->v_type;
1888 if (!((vt == VCHR) || (vt == VBLK))) {
1889 return 0;
1890 } else {
1891 return vp->v_specflags & SI_ALIASED;
1892 }
1893 }
1894
1895 /* is vnode_t a named stream? */
1896 int
1897 vnode_isnamedstream(
1898 #if NAMEDSTREAMS
1899 vnode_t vp
1900 #else
1901 __unused vnode_t vp
1902 #endif
1903 )
1904 {
1905 #if NAMEDSTREAMS
1906 return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0;
1907 #else
1908 return 0;
1909 #endif
1910 }
1911
1912 int
1913 vnode_isshadow(
1914 #if NAMEDSTREAMS
1915 vnode_t vp
1916 #else
1917 __unused vnode_t vp
1918 #endif
1919 )
1920 {
1921 #if NAMEDSTREAMS
1922 return (vp->v_flag & VISSHADOW) ? 1 : 0;
1923 #else
1924 return 0;
1925 #endif
1926 }
1927
1928 /* does vnode have associated named stream vnodes ? */
1929 int
1930 vnode_hasnamedstreams(
1931 #if NAMEDSTREAMS
1932 vnode_t vp
1933 #else
1934 __unused vnode_t vp
1935 #endif
1936 )
1937 {
1938 #if NAMEDSTREAMS
1939 return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0;
1940 #else
1941 return 0;
1942 #endif
1943 }
1944 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1945 void
1946 vnode_setnocache(vnode_t vp)
1947 {
1948 vnode_lock_spin(vp);
1949 vp->v_flag |= VNOCACHE_DATA;
1950 vnode_unlock(vp);
1951 }
1952
1953 void
1954 vnode_clearnocache(vnode_t vp)
1955 {
1956 vnode_lock_spin(vp);
1957 vp->v_flag &= ~VNOCACHE_DATA;
1958 vnode_unlock(vp);
1959 }
1960
1961 void
1962 vnode_set_openevt(vnode_t vp)
1963 {
1964 vnode_lock_spin(vp);
1965 vp->v_flag |= VOPENEVT;
1966 vnode_unlock(vp);
1967 }
1968
1969 void
1970 vnode_clear_openevt(vnode_t vp)
1971 {
1972 vnode_lock_spin(vp);
1973 vp->v_flag &= ~VOPENEVT;
1974 vnode_unlock(vp);
1975 }
1976
1977
1978 void
1979 vnode_setnoreadahead(vnode_t vp)
1980 {
1981 vnode_lock_spin(vp);
1982 vp->v_flag |= VRAOFF;
1983 vnode_unlock(vp);
1984 }
1985
1986 void
1987 vnode_clearnoreadahead(vnode_t vp)
1988 {
1989 vnode_lock_spin(vp);
1990 vp->v_flag &= ~VRAOFF;
1991 vnode_unlock(vp);
1992 }
1993
1994 int
1995 vnode_isfastdevicecandidate(vnode_t vp)
1996 {
1997 return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0;
1998 }
1999
2000 void
2001 vnode_setfastdevicecandidate(vnode_t vp)
2002 {
2003 vnode_lock_spin(vp);
2004 vp->v_flag |= VFASTDEVCANDIDATE;
2005 vnode_unlock(vp);
2006 }
2007
2008 void
2009 vnode_clearfastdevicecandidate(vnode_t vp)
2010 {
2011 vnode_lock_spin(vp);
2012 vp->v_flag &= ~VFASTDEVCANDIDATE;
2013 vnode_unlock(vp);
2014 }
2015
2016 int
2017 vnode_isautocandidate(vnode_t vp)
2018 {
2019 return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0;
2020 }
2021
2022 void
2023 vnode_setautocandidate(vnode_t vp)
2024 {
2025 vnode_lock_spin(vp);
2026 vp->v_flag |= VAUTOCANDIDATE;
2027 vnode_unlock(vp);
2028 }
2029
2030 void
2031 vnode_clearautocandidate(vnode_t vp)
2032 {
2033 vnode_lock_spin(vp);
2034 vp->v_flag &= ~VAUTOCANDIDATE;
2035 vnode_unlock(vp);
2036 }
2037
2038
2039
2040
2041 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
2042 void
2043 vnode_setnoflush(vnode_t vp)
2044 {
2045 vnode_lock_spin(vp);
2046 vp->v_flag |= VNOFLUSH;
2047 vnode_unlock(vp);
2048 }
2049
2050 void
2051 vnode_clearnoflush(vnode_t vp)
2052 {
2053 vnode_lock_spin(vp);
2054 vp->v_flag &= ~VNOFLUSH;
2055 vnode_unlock(vp);
2056 }
2057
2058
2059 /* is vnode_t a blkdevice and has a FS mounted on it */
2060 int
2061 vnode_ismountedon(vnode_t vp)
2062 {
2063 return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0;
2064 }
2065
2066 void
2067 vnode_setmountedon(vnode_t vp)
2068 {
2069 vnode_lock_spin(vp);
2070 vp->v_specflags |= SI_MOUNTEDON;
2071 vnode_unlock(vp);
2072 }
2073
2074 void
2075 vnode_clearmountedon(vnode_t vp)
2076 {
2077 vnode_lock_spin(vp);
2078 vp->v_specflags &= ~SI_MOUNTEDON;
2079 vnode_unlock(vp);
2080 }
2081
2082
2083 void
2084 vnode_settag(vnode_t vp, int tag)
2085 {
2086 /*
2087 * We only assign enum values to v_tag, but add an assert to make sure we
2088 * catch it in dev/debug builds if this ever change.
2089 */
2090 assert(tag >= SHRT_MIN && tag <= SHRT_MAX);
2091 vp->v_tag = (uint16_t)tag;
2092 }
2093
2094 int
2095 vnode_tag(vnode_t vp)
2096 {
2097 return vp->v_tag;
2098 }
2099
2100 vnode_t
2101 vnode_parent(vnode_t vp)
2102 {
2103 return vp->v_parent;
2104 }
2105
2106 void
2107 vnode_setparent(vnode_t vp, vnode_t dvp)
2108 {
2109 vp->v_parent = dvp;
2110 }
2111
2112 void
2113 vnode_setname(vnode_t vp, char * name)
2114 {
2115 vp->v_name = name;
2116 }
2117
2118 /* return the registered FS name when adding the FS to kernel */
2119 void
2120 vnode_vfsname(vnode_t vp, char * buf)
2121 {
2122 strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2123 }
2124
2125 /* return the FS type number */
2126 int
2127 vnode_vfstypenum(vnode_t vp)
2128 {
2129 return vp->v_mount->mnt_vtable->vfc_typenum;
2130 }
2131
2132 int
2133 vnode_vfs64bitready(vnode_t vp)
2134 {
2135 /*
2136 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2137 */
2138 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
2139 return 1;
2140 } else {
2141 return 0;
2142 }
2143 }
2144
2145
2146
2147 /* return the visible flags on associated mount point of vnode_t */
2148 uint32_t
2149 vnode_vfsvisflags(vnode_t vp)
2150 {
2151 return vp->v_mount->mnt_flag & MNT_VISFLAGMASK;
2152 }
2153
2154 /* return the command modifier flags on associated mount point of vnode_t */
2155 uint32_t
2156 vnode_vfscmdflags(vnode_t vp)
2157 {
2158 return vp->v_mount->mnt_flag & MNT_CMDFLAGS;
2159 }
2160
2161 /* return the max symlink of short links of vnode_t */
2162 uint32_t
2163 vnode_vfsmaxsymlen(vnode_t vp)
2164 {
2165 return vp->v_mount->mnt_maxsymlinklen;
2166 }
2167
2168 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2169 struct vfsstatfs *
2170 vnode_vfsstatfs(vnode_t vp)
2171 {
2172 return &vp->v_mount->mnt_vfsstat;
2173 }
2174
2175 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2176 void *
2177 vnode_vfsfsprivate(vnode_t vp)
2178 {
2179 return vp->v_mount->mnt_data;
2180 }
2181
2182 /* is vnode_t in a rdonly mounted FS */
2183 int
2184 vnode_vfsisrdonly(vnode_t vp)
2185 {
2186 return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0;
2187 }
2188
2189 int
2190 vnode_compound_rename_available(vnode_t vp)
2191 {
2192 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2193 }
2194 int
2195 vnode_compound_rmdir_available(vnode_t vp)
2196 {
2197 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2198 }
2199 int
2200 vnode_compound_mkdir_available(vnode_t vp)
2201 {
2202 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2203 }
2204 int
2205 vnode_compound_remove_available(vnode_t vp)
2206 {
2207 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2208 }
2209 int
2210 vnode_compound_open_available(vnode_t vp)
2211 {
2212 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2213 }
2214
2215 int
2216 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2217 {
2218 return (vp->v_mount->mnt_compound_ops & opid) != 0;
2219 }
2220
2221 /*
2222 * Returns vnode ref to current working directory; if a per-thread current
2223 * working directory is in effect, return that instead of the per process one.
2224 *
2225 * XXX Published, but not used.
2226 */
2227 vnode_t
2228 current_workingdir(void)
2229 {
2230 return vfs_context_cwd(vfs_context_current());
2231 }
2232
2233 /*
2234 * Get a filesec and optional acl contents from an extended attribute.
2235 * Function will attempt to retrive ACL, UUID, and GUID information using a
2236 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2237 *
2238 * Parameters: vp The vnode on which to operate.
2239 * fsecp The filesec (and ACL, if any) being
2240 * retrieved.
2241 * ctx The vnode context in which the
2242 * operation is to be attempted.
2243 *
2244 * Returns: 0 Success
2245 * !0 errno value
2246 *
2247 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2248 * host byte order, as will be the ACL contents, if any.
2249 * Internally, we will cannonize these values from network (PPC)
2250 * byte order after we retrieve them so that the on-disk contents
2251 * of the extended attribute are identical for both PPC and Intel
2252 * (if we were not being required to provide this service via
2253 * fallback, this would be the job of the filesystem
2254 * 'VNOP_GETATTR' call).
2255 *
2256 * We use ntohl() because it has a transitive property on Intel
2257 * machines and no effect on PPC mancines. This guarantees us
2258 *
2259 * XXX: Deleting rather than ignoreing a corrupt security structure is
2260 * probably the only way to reset it without assistance from an
2261 * file system integrity checking tool. Right now we ignore it.
2262 *
2263 * XXX: We should enummerate the possible errno values here, and where
2264 * in the code they originated.
2265 */
2266 static int
2267 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2268 {
2269 kauth_filesec_t fsec;
2270 uio_t fsec_uio;
2271 size_t fsec_size;
2272 size_t xsize, rsize;
2273 int error;
2274 uint32_t host_fsec_magic;
2275 uint32_t host_acl_entrycount;
2276
2277 fsec = NULL;
2278 fsec_uio = NULL;
2279
2280 /* find out how big the EA is */
2281 error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2282 if (error != 0) {
2283 /* no EA, no filesec */
2284 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2285 error = 0;
2286 }
2287 /* either way, we are done */
2288 goto out;
2289 }
2290
2291 /*
2292 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2293 * ACE entrly ACL, and if it's larger than that, it must have the right
2294 * number of bytes such that it contains an atomic number of ACEs,
2295 * rather than partial entries. Otherwise, we ignore it.
2296 */
2297 if (!KAUTH_FILESEC_VALID(xsize)) {
2298 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2299 error = 0;
2300 goto out;
2301 }
2302
2303 /* how many entries would fit? */
2304 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2305 if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2306 KAUTH_DEBUG(" ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2307 error = 0;
2308 goto out;
2309 }
2310
2311 /* get buffer and uio */
2312 if (((fsec = kauth_filesec_alloc((int)fsec_size)) == NULL) ||
2313 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2314 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2315 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2316 error = ENOMEM;
2317 goto out;
2318 }
2319
2320 /* read security attribute */
2321 rsize = xsize;
2322 if ((error = vn_getxattr(vp,
2323 KAUTH_FILESEC_XATTR,
2324 fsec_uio,
2325 &rsize,
2326 XATTR_NOSECURITY,
2327 ctx)) != 0) {
2328 /* no attribute - no security data */
2329 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2330 error = 0;
2331 }
2332 /* either way, we are done */
2333 goto out;
2334 }
2335
2336 /*
2337 * Validate security structure; the validation must take place in host
2338 * byte order. If it's corrupt, we will just ignore it.
2339 */
2340
2341 /* Validate the size before trying to convert it */
2342 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2343 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2344 goto out;
2345 }
2346
2347 /* Validate the magic number before trying to convert it */
2348 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2349 if (fsec->fsec_magic != host_fsec_magic) {
2350 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2351 goto out;
2352 }
2353
2354 /* Validate the entry count before trying to convert it. */
2355 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2356 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2357 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2358 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2359 goto out;
2360 }
2361 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2362 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2363 goto out;
2364 }
2365 }
2366
2367 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2368
2369 *fsecp = fsec;
2370 fsec = NULL;
2371 error = 0;
2372 out:
2373 if (fsec != NULL) {
2374 kauth_filesec_free(fsec);
2375 }
2376 if (fsec_uio != NULL) {
2377 uio_free(fsec_uio);
2378 }
2379 if (error) {
2380 *fsecp = NULL;
2381 }
2382 return error;
2383 }
2384
2385 /*
2386 * Set a filesec and optional acl contents into an extended attribute.
2387 * function will attempt to store ACL, UUID, and GUID information using a
2388 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2389 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2390 * original caller supplied an acl.
2391 *
2392 * Parameters: vp The vnode on which to operate.
2393 * fsec The filesec being set.
2394 * acl The acl to be associated with 'fsec'.
2395 * ctx The vnode context in which the
2396 * operation is to be attempted.
2397 *
2398 * Returns: 0 Success
2399 * !0 errno value
2400 *
2401 * Notes: Both the fsec and the acl are always valid.
2402 *
2403 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2404 * as are the acl contents, if they are used. Internally, we will
2405 * cannonize these values into network (PPC) byte order before we
2406 * attempt to write them so that the on-disk contents of the
2407 * extended attribute are identical for both PPC and Intel (if we
2408 * were not being required to provide this service via fallback,
2409 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2410 * We reverse this process on the way out, so we leave with the
2411 * same byte order we started with.
2412 *
2413 * XXX: We should enummerate the possible errno values here, and where
2414 * in the code they originated.
2415 */
2416 static int
2417 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2418 {
2419 uio_t fsec_uio;
2420 int error;
2421 uint32_t saved_acl_copysize;
2422
2423 fsec_uio = NULL;
2424
2425 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2426 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2427 error = ENOMEM;
2428 goto out;
2429 }
2430 /*
2431 * Save the pre-converted ACL copysize, because it gets swapped too
2432 * if we are running with the wrong endianness.
2433 */
2434 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2435
2436 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2437
2438 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2439 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2440 error = vn_setxattr(vp,
2441 KAUTH_FILESEC_XATTR,
2442 fsec_uio,
2443 XATTR_NOSECURITY, /* we have auth'ed already */
2444 ctx);
2445 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2446
2447 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2448
2449 out:
2450 if (fsec_uio != NULL) {
2451 uio_free(fsec_uio);
2452 }
2453 return error;
2454 }
2455
2456 /*
2457 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2458 */
2459 void
2460 vnode_attr_handle_mnt_ignore_ownership(struct vnode_attr *vap, mount_t mp, vfs_context_t ctx)
2461 {
2462 uid_t nuid;
2463 gid_t ngid;
2464
2465 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2466 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2467 nuid = vap->va_uid;
2468 } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2469 nuid = mp->mnt_fsowner;
2470 if (nuid == KAUTH_UID_NONE) {
2471 nuid = 99;
2472 }
2473 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2474 nuid = vap->va_uid;
2475 } else {
2476 /* this will always be something sensible */
2477 nuid = mp->mnt_fsowner;
2478 }
2479 if ((nuid == 99) && !vfs_context_issuser(ctx)) {
2480 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2481 }
2482 VATTR_RETURN(vap, va_uid, nuid);
2483 }
2484 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2485 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2486 ngid = vap->va_gid;
2487 } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2488 ngid = mp->mnt_fsgroup;
2489 if (ngid == KAUTH_GID_NONE) {
2490 ngid = 99;
2491 }
2492 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2493 ngid = vap->va_gid;
2494 } else {
2495 /* this will always be something sensible */
2496 ngid = mp->mnt_fsgroup;
2497 }
2498 if ((ngid == 99) && !vfs_context_issuser(ctx)) {
2499 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2500 }
2501 VATTR_RETURN(vap, va_gid, ngid);
2502 }
2503 }
2504
2505 /*
2506 * Returns: 0 Success
2507 * ENOMEM Not enough space [only if has filesec]
2508 * EINVAL Requested unknown attributes
2509 * VNOP_GETATTR: ???
2510 * vnode_get_filesec: ???
2511 * kauth_cred_guid2uid: ???
2512 * kauth_cred_guid2gid: ???
2513 * vfs_update_vfsstat: ???
2514 */
2515 int
2516 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2517 {
2518 kauth_filesec_t fsec;
2519 kauth_acl_t facl;
2520 int error;
2521
2522 /*
2523 * Reject attempts to fetch unknown attributes.
2524 */
2525 if (vap->va_active & ~VNODE_ATTR_ALL) {
2526 return EINVAL;
2527 }
2528
2529 /* don't ask for extended security data if the filesystem doesn't support it */
2530 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2531 VATTR_CLEAR_ACTIVE(vap, va_acl);
2532 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2533 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2534 }
2535
2536 /*
2537 * If the caller wants size values we might have to synthesise, give the
2538 * filesystem the opportunity to supply better intermediate results.
2539 */
2540 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2541 VATTR_IS_ACTIVE(vap, va_total_size) ||
2542 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2543 VATTR_SET_ACTIVE(vap, va_data_size);
2544 VATTR_SET_ACTIVE(vap, va_data_alloc);
2545 VATTR_SET_ACTIVE(vap, va_total_size);
2546 VATTR_SET_ACTIVE(vap, va_total_alloc);
2547 }
2548
2549 vap->va_vaflags &= ~VA_USEFSID;
2550
2551 error = VNOP_GETATTR(vp, vap, ctx);
2552 if (error) {
2553 KAUTH_DEBUG("ERROR - returning %d", error);
2554 goto out;
2555 }
2556
2557 /*
2558 * If extended security data was requested but not returned, try the fallback
2559 * path.
2560 */
2561 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2562 fsec = NULL;
2563
2564 if (XATTR_VNODE_SUPPORTED(vp)) {
2565 /* try to get the filesec */
2566 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2567 goto out;
2568 }
2569 }
2570 /* if no filesec, no attributes */
2571 if (fsec == NULL) {
2572 VATTR_RETURN(vap, va_acl, NULL);
2573 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2574 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2575 } else {
2576 /* looks good, try to return what we were asked for */
2577 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2578 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2579
2580 /* only return the ACL if we were actually asked for it */
2581 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2582 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2583 VATTR_RETURN(vap, va_acl, NULL);
2584 } else {
2585 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2586 if (facl == NULL) {
2587 kauth_filesec_free(fsec);
2588 error = ENOMEM;
2589 goto out;
2590 }
2591 __nochk_bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2592 VATTR_RETURN(vap, va_acl, facl);
2593 }
2594 }
2595 kauth_filesec_free(fsec);
2596 }
2597 }
2598 /*
2599 * If someone gave us an unsolicited filesec, toss it. We promise that
2600 * we're OK with a filesystem giving us anything back, but our callers
2601 * only expect what they asked for.
2602 */
2603 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2604 if (vap->va_acl != NULL) {
2605 kauth_acl_free(vap->va_acl);
2606 }
2607 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2608 }
2609
2610 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2611 /*
2612 * Handle the case where we need a UID/GID, but only have extended
2613 * security information.
2614 */
2615 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2616 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2617 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2618 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) {
2619 VATTR_RETURN(vap, va_uid, nuid);
2620 }
2621 }
2622 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2623 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2624 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2625 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) {
2626 VATTR_RETURN(vap, va_gid, ngid);
2627 }
2628 }
2629 #endif
2630
2631 vnode_attr_handle_mnt_ignore_ownership(vap, vp->v_mount, ctx);
2632
2633 /*
2634 * Synthesise some values that can be reasonably guessed.
2635 */
2636 if (!VATTR_IS_SUPPORTED(vap, va_iosize)) {
2637 assert(vp->v_mount->mnt_vfsstat.f_iosize <= UINT32_MAX);
2638 VATTR_RETURN(vap, va_iosize, (uint32_t)vp->v_mount->mnt_vfsstat.f_iosize);
2639 }
2640
2641 if (!VATTR_IS_SUPPORTED(vap, va_flags)) {
2642 VATTR_RETURN(vap, va_flags, 0);
2643 }
2644
2645 if (!VATTR_IS_SUPPORTED(vap, va_filerev)) {
2646 VATTR_RETURN(vap, va_filerev, 0);
2647 }
2648
2649 if (!VATTR_IS_SUPPORTED(vap, va_gen)) {
2650 VATTR_RETURN(vap, va_gen, 0);
2651 }
2652
2653 /*
2654 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2655 */
2656 if (!VATTR_IS_SUPPORTED(vap, va_data_size)) {
2657 VATTR_RETURN(vap, va_data_size, 0);
2658 }
2659
2660 /* do we want any of the possibly-computed values? */
2661 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2662 VATTR_IS_ACTIVE(vap, va_total_size) ||
2663 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2664 /* make sure f_bsize is valid */
2665 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2666 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) {
2667 goto out;
2668 }
2669 }
2670
2671 /* default va_data_alloc from va_data_size */
2672 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) {
2673 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2674 }
2675
2676 /* default va_total_size from va_data_size */
2677 if (!VATTR_IS_SUPPORTED(vap, va_total_size)) {
2678 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2679 }
2680
2681 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2682 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) {
2683 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2684 }
2685 }
2686
2687 /*
2688 * If we don't have a change time, pull it from the modtime.
2689 */
2690 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) {
2691 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2692 }
2693
2694 /*
2695 * This is really only supported for the creation VNOPs, but since the field is there
2696 * we should populate it correctly.
2697 */
2698 VATTR_RETURN(vap, va_type, vp->v_type);
2699
2700 /*
2701 * The fsid can be obtained from the mountpoint directly.
2702 */
2703 if (VATTR_IS_ACTIVE(vap, va_fsid) &&
2704 (!VATTR_IS_SUPPORTED(vap, va_fsid) ||
2705 vap->va_vaflags & VA_REALFSID || !(vap->va_vaflags & VA_USEFSID))) {
2706 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2707 }
2708
2709 out:
2710 vap->va_vaflags &= ~VA_USEFSID;
2711
2712 return error;
2713 }
2714
2715 /*
2716 * Choose 32 bit or 64 bit fsid
2717 */
2718 uint64_t
2719 vnode_get_va_fsid(struct vnode_attr *vap)
2720 {
2721 if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
2722 return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32);
2723 }
2724 return vap->va_fsid;
2725 }
2726
2727 /*
2728 * Set the attributes on a vnode in a vnode context.
2729 *
2730 * Parameters: vp The vnode whose attributes to set.
2731 * vap A pointer to the attributes to set.
2732 * ctx The vnode context in which the
2733 * operation is to be attempted.
2734 *
2735 * Returns: 0 Success
2736 * !0 errno value
2737 *
2738 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2739 *
2740 * The contents of the data area pointed to by 'vap' may be
2741 * modified if the vnode is on a filesystem which has been
2742 * mounted with ingore ownership flags, or by the underlyng
2743 * VFS itself, or by the fallback code, if the underlying VFS
2744 * does not support ACL, UUID, or GUUID attributes directly.
2745 *
2746 * XXX: We should enummerate the possible errno values here, and where
2747 * in the code they originated.
2748 */
2749 int
2750 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2751 {
2752 int error;
2753 #if CONFIG_FSE
2754 uint64_t active;
2755 int is_perm_change = 0;
2756 int is_stat_change = 0;
2757 #endif
2758
2759 /*
2760 * Reject attempts to set unknown attributes.
2761 */
2762 if (vap->va_active & ~VNODE_ATTR_ALL) {
2763 return EINVAL;
2764 }
2765
2766 /*
2767 * Make sure the filesystem is mounted R/W.
2768 * If not, return an error.
2769 */
2770 if (vfs_isrdonly(vp->v_mount)) {
2771 error = EROFS;
2772 goto out;
2773 }
2774
2775 #if DEVELOPMENT || DEBUG
2776 /*
2777 * XXX VSWAP: Check for entitlements or special flag here
2778 * so we can restrict access appropriately.
2779 */
2780 #else /* DEVELOPMENT || DEBUG */
2781
2782 if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
2783 error = EPERM;
2784 goto out;
2785 }
2786 #endif /* DEVELOPMENT || DEBUG */
2787
2788 #if NAMEDSTREAMS
2789 /* For streams, va_data_size is the only setable attribute. */
2790 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2791 error = EPERM;
2792 goto out;
2793 }
2794 #endif
2795 /* Check for truncation */
2796 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2797 switch (vp->v_type) {
2798 case VREG:
2799 /* For regular files it's ok */
2800 break;
2801 case VDIR:
2802 /* Not allowed to truncate directories */
2803 error = EISDIR;
2804 goto out;
2805 default:
2806 /* For everything else we will clear the bit and let underlying FS decide on the rest */
2807 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2808 if (vap->va_active) {
2809 break;
2810 }
2811 /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
2812 return 0;
2813 }
2814 }
2815
2816 /*
2817 * If ownership is being ignored on this volume, we silently discard
2818 * ownership changes.
2819 */
2820 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2821 VATTR_CLEAR_ACTIVE(vap, va_uid);
2822 VATTR_CLEAR_ACTIVE(vap, va_gid);
2823 }
2824
2825 /*
2826 * Make sure that extended security is enabled if we're going to try
2827 * to set any.
2828 */
2829 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2830 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2831 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2832 error = ENOTSUP;
2833 goto out;
2834 }
2835
2836 /* Never allow the setting of any unsupported superuser flags. */
2837 if (VATTR_IS_ACTIVE(vap, va_flags)) {
2838 vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
2839 }
2840
2841 #if CONFIG_FSE
2842 /*
2843 * Remember all of the active attributes that we're
2844 * attempting to modify.
2845 */
2846 active = vap->va_active & ~VNODE_ATTR_RDONLY;
2847 #endif
2848
2849 error = VNOP_SETATTR(vp, vap, ctx);
2850
2851 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) {
2852 error = vnode_setattr_fallback(vp, vap, ctx);
2853 }
2854
2855 #if CONFIG_FSE
2856 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
2857 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
2858 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
2859
2860 /*
2861 * Now that we've changed them, decide whether to send an
2862 * FSevent.
2863 */
2864 if ((active & PERMISSION_BITS) & vap->va_supported) {
2865 is_perm_change = 1;
2866 } else {
2867 /*
2868 * We've already checked the permission bits, and we
2869 * also want to filter out access time / backup time
2870 * changes.
2871 */
2872 active &= ~(PERMISSION_BITS |
2873 VNODE_ATTR_BIT(va_access_time) |
2874 VNODE_ATTR_BIT(va_backup_time));
2875
2876 /* Anything left to notify about? */
2877 if (active & vap->va_supported) {
2878 is_stat_change = 1;
2879 }
2880 }
2881
2882 if (error == 0) {
2883 if (is_perm_change) {
2884 if (need_fsevent(FSE_CHOWN, vp)) {
2885 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2886 }
2887 } else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
2888 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2889 }
2890 }
2891 #undef PERMISSION_BITS
2892 #endif
2893
2894 out:
2895 return error;
2896 }
2897
2898 /*
2899 * Fallback for setting the attributes on a vnode in a vnode context. This
2900 * Function will attempt to store ACL, UUID, and GUID information utilizing
2901 * a read/modify/write operation against an EA used as a backing store for
2902 * the object.
2903 *
2904 * Parameters: vp The vnode whose attributes to set.
2905 * vap A pointer to the attributes to set.
2906 * ctx The vnode context in which the
2907 * operation is to be attempted.
2908 *
2909 * Returns: 0 Success
2910 * !0 errno value
2911 *
2912 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2913 * as are the fsec and lfsec, if they are used.
2914 *
2915 * The contents of the data area pointed to by 'vap' may be
2916 * modified to indicate that the attribute is supported for
2917 * any given requested attribute.
2918 *
2919 * XXX: We should enummerate the possible errno values here, and where
2920 * in the code they originated.
2921 */
2922 int
2923 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2924 {
2925 kauth_filesec_t fsec;
2926 kauth_acl_t facl;
2927 struct kauth_filesec lfsec;
2928 int error;
2929
2930 error = 0;
2931
2932 /*
2933 * Extended security fallback via extended attributes.
2934 *
2935 * Note that we do not free the filesec; the caller is expected to
2936 * do this.
2937 */
2938 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2939 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2940 VATTR_NOT_RETURNED(vap, va_guuid)) {
2941 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2942
2943 /*
2944 * Fail for file types that we don't permit extended security
2945 * to be set on.
2946 */
2947 if (!XATTR_VNODE_SUPPORTED(vp)) {
2948 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2949 error = EINVAL;
2950 goto out;
2951 }
2952
2953 /*
2954 * If we don't have all the extended security items, we need
2955 * to fetch the existing data to perform a read-modify-write
2956 * operation.
2957 */
2958 fsec = NULL;
2959 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2960 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2961 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2962 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2963 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2964 goto out;
2965 }
2966 }
2967 /* if we didn't get a filesec, use our local one */
2968 if (fsec == NULL) {
2969 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2970 fsec = &lfsec;
2971 } else {
2972 KAUTH_DEBUG("SETATTR - updating existing filesec");
2973 }
2974 /* find the ACL */
2975 facl = &fsec->fsec_acl;
2976
2977 /* if we're using the local filesec, we need to initialise it */
2978 if (fsec == &lfsec) {
2979 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2980 fsec->fsec_owner = kauth_null_guid;
2981 fsec->fsec_group = kauth_null_guid;
2982 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2983 facl->acl_flags = 0;
2984 }
2985
2986 /*
2987 * Update with the supplied attributes.
2988 */
2989 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2990 KAUTH_DEBUG("SETATTR - updating owner UUID");
2991 fsec->fsec_owner = vap->va_uuuid;
2992 VATTR_SET_SUPPORTED(vap, va_uuuid);
2993 }
2994 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2995 KAUTH_DEBUG("SETATTR - updating group UUID");
2996 fsec->fsec_group = vap->va_guuid;
2997 VATTR_SET_SUPPORTED(vap, va_guuid);
2998 }
2999 if (VATTR_IS_ACTIVE(vap, va_acl)) {
3000 if (vap->va_acl == NULL) {
3001 KAUTH_DEBUG("SETATTR - removing ACL");
3002 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3003 } else {
3004 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
3005 facl = vap->va_acl;
3006 }
3007 VATTR_SET_SUPPORTED(vap, va_acl);
3008 }
3009
3010 /*
3011 * If the filesec data is all invalid, we can just remove
3012 * the EA completely.
3013 */
3014 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
3015 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
3016 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
3017 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
3018 /* no attribute is ok, nothing to delete */
3019 if (error == ENOATTR) {
3020 error = 0;
3021 }
3022 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
3023 } else {
3024 /* write the EA */
3025 error = vnode_set_filesec(vp, fsec, facl, ctx);
3026 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
3027 }
3028
3029 /* if we fetched a filesec, dispose of the buffer */
3030 if (fsec != &lfsec) {
3031 kauth_filesec_free(fsec);
3032 }
3033 }
3034 out:
3035
3036 return error;
3037 }
3038
3039 /*
3040 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
3041 * event on a vnode.
3042 */
3043 int
3044 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
3045 {
3046 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
3047 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
3048 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
3049 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
3050 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
3051 uint32_t knote_events = (events & knote_mask);
3052
3053 /* Permissions are not explicitly part of the kqueue model */
3054 if (events & VNODE_EVENT_PERMS) {
3055 knote_events |= NOTE_ATTRIB;
3056 }
3057
3058 /* Directory contents information just becomes NOTE_WRITE */
3059 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
3060 knote_events |= NOTE_WRITE;
3061 }
3062
3063 if (knote_events) {
3064 lock_vnode_and_post(vp, knote_events);
3065 #if CONFIG_FSE
3066 if (vap != NULL) {
3067 create_fsevent_from_kevent(vp, events, vap);
3068 }
3069 #else
3070 (void)vap;
3071 #endif
3072 }
3073
3074 return 0;
3075 }
3076
3077
3078
3079 int
3080 vnode_isdyldsharedcache(vnode_t vp)
3081 {
3082 return (vp->v_flag & VSHARED_DYLD) ? 1 : 0;
3083 }
3084
3085
3086 /*
3087 * For a filesystem that isn't tracking its own vnode watchers:
3088 * check whether a vnode is being monitored.
3089 */
3090 int
3091 vnode_ismonitored(vnode_t vp)
3092 {
3093 return vp->v_knotes.slh_first != NULL;
3094 }
3095
3096 int
3097 vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
3098 {
3099 if (out_vpp) {
3100 *out_vpp = NULLVP;
3101 }
3102 #if NULLFS
3103 return nullfs_getbackingvnode(in_vp, out_vpp);
3104 #else
3105 #pragma unused(in_vp)
3106 return ENOENT;
3107 #endif
3108 }
3109
3110 /*
3111 * Initialize a struct vnode_attr and activate the attributes required
3112 * by the vnode_notify() call.
3113 */
3114 int
3115 vfs_get_notify_attributes(struct vnode_attr *vap)
3116 {
3117 VATTR_INIT(vap);
3118 vap->va_active = VNODE_NOTIFY_ATTRS;
3119 return 0;
3120 }
3121
3122 #if CONFIG_TRIGGERS
3123 int
3124 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
3125 {
3126 int error;
3127 mount_t mp;
3128
3129 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
3130 if (mp == NULL) {
3131 return ENOENT;
3132 }
3133
3134 error = vfs_busy(mp, LK_NOWAIT);
3135 mount_iterdrop(mp);
3136
3137 if (error != 0) {
3138 return ENOENT;
3139 }
3140
3141 mount_lock(mp);
3142 if (mp->mnt_triggercallback != NULL) {
3143 error = EBUSY;
3144 mount_unlock(mp);
3145 goto out;
3146 }
3147
3148 mp->mnt_triggercallback = vtc;
3149 mp->mnt_triggerdata = data;
3150 mount_unlock(mp);
3151
3152 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
3153
3154 out:
3155 vfs_unbusy(mp);
3156 return 0;
3157 }
3158 #endif /* CONFIG_TRIGGERS */
3159
3160 /*
3161 * Definition of vnode operations.
3162 */
3163
3164 #if 0
3165 /*
3166 *#
3167 *#% lookup dvp L ? ?
3168 *#% lookup vpp - L -
3169 */
3170 struct vnop_lookup_args {
3171 struct vnodeop_desc *a_desc;
3172 vnode_t a_dvp;
3173 vnode_t *a_vpp;
3174 struct componentname *a_cnp;
3175 vfs_context_t a_context;
3176 };
3177 #endif /* 0*/
3178
3179 /*
3180 * Returns: 0 Success
3181 * lock_fsnode:ENOENT No such file or directory [only for VFS
3182 * that is not thread safe & vnode is
3183 * currently being/has been terminated]
3184 * <vfs_lookup>:ENAMETOOLONG
3185 * <vfs_lookup>:ENOENT
3186 * <vfs_lookup>:EJUSTRETURN
3187 * <vfs_lookup>:EPERM
3188 * <vfs_lookup>:EISDIR
3189 * <vfs_lookup>:ENOTDIR
3190 * <vfs_lookup>:???
3191 *
3192 * Note: The return codes from the underlying VFS's lookup routine can't
3193 * be fully enumerated here, since third party VFS authors may not
3194 * limit their error returns to the ones documented here, even
3195 * though this may result in some programs functioning incorrectly.
3196 *
3197 * The return codes documented above are those which may currently
3198 * be returned by HFS from hfs_lookup, not including additional
3199 * error code which may be propagated from underlying routines.
3200 */
3201 errno_t
3202 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3203 {
3204 int _err;
3205 struct vnop_lookup_args a;
3206
3207 a.a_desc = &vnop_lookup_desc;
3208 a.a_dvp = dvp;
3209 a.a_vpp = vpp;
3210 a.a_cnp = cnp;
3211 a.a_context = ctx;
3212
3213 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3214 if (_err == 0 && *vpp) {
3215 DTRACE_FSINFO(lookup, vnode_t, *vpp);
3216 }
3217
3218 return _err;
3219 }
3220
3221 #if 0
3222 struct vnop_compound_open_args {
3223 struct vnodeop_desc *a_desc;
3224 vnode_t a_dvp;
3225 vnode_t *a_vpp;
3226 struct componentname *a_cnp;
3227 int32_t a_flags;
3228 int32_t a_fmode;
3229 struct vnode_attr *a_vap;
3230 vfs_context_t a_context;
3231 void *a_reserved;
3232 };
3233 #endif /* 0 */
3234
3235 int
3236 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3237 {
3238 int _err;
3239 struct vnop_compound_open_args a;
3240 int did_create = 0;
3241 int want_create;
3242 uint32_t tmp_status = 0;
3243 struct componentname *cnp = &ndp->ni_cnd;
3244
3245 want_create = (flags & O_CREAT);
3246
3247 a.a_desc = &vnop_compound_open_desc;
3248 a.a_dvp = dvp;
3249 a.a_vpp = vpp; /* Could be NULL */
3250 a.a_cnp = cnp;
3251 a.a_flags = flags;
3252 a.a_fmode = fmode;
3253 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3254 a.a_vap = vap;
3255 a.a_context = ctx;
3256 a.a_open_create_authorizer = vn_authorize_create;
3257 a.a_open_existing_authorizer = vn_authorize_open_existing;
3258 a.a_reserved = NULL;
3259
3260 if (dvp == NULLVP) {
3261 panic("No dvp?");
3262 }
3263 if (want_create && !vap) {
3264 panic("Want create, but no vap?");
3265 }
3266 if (!want_create && vap) {
3267 panic("Don't want create, but have a vap?");
3268 }
3269
3270 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3271 if (want_create) {
3272 if (_err == 0 && *vpp) {
3273 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3274 } else {
3275 DTRACE_FSINFO(compound_open, vnode_t, dvp);
3276 }
3277 } else {
3278 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3279 }
3280
3281 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3282
3283 if (did_create && !want_create) {
3284 panic("Filesystem did a create, even though none was requested?");
3285 }
3286
3287 if (did_create) {
3288 #if CONFIG_APPLEDOUBLE
3289 if (!NATIVE_XATTR(dvp)) {
3290 /*
3291 * Remove stale Apple Double file (if any).
3292 */
3293 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3294 }
3295 #endif /* CONFIG_APPLEDOUBLE */
3296 /* On create, provide kqueue notification */
3297 post_event_if_success(dvp, _err, NOTE_WRITE);
3298 }
3299
3300 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3301 #if 0 /* FSEvents... */
3302 if (*vpp && _err && _err != EKEEPLOOKING) {
3303 vnode_put(*vpp);
3304 *vpp = NULLVP;
3305 }
3306 #endif /* 0 */
3307
3308 return _err;
3309 }
3310
3311 #if 0
3312 struct vnop_create_args {
3313 struct vnodeop_desc *a_desc;
3314 vnode_t a_dvp;
3315 vnode_t *a_vpp;
3316 struct componentname *a_cnp;
3317 struct vnode_attr *a_vap;
3318 vfs_context_t a_context;
3319 };
3320 #endif /* 0*/
3321 errno_t
3322 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3323 {
3324 int _err;
3325 struct vnop_create_args a;
3326
3327 a.a_desc = &vnop_create_desc;
3328 a.a_dvp = dvp;
3329 a.a_vpp = vpp;
3330 a.a_cnp = cnp;
3331 a.a_vap = vap;
3332 a.a_context = ctx;
3333
3334 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3335 if (_err == 0 && *vpp) {
3336 DTRACE_FSINFO(create, vnode_t, *vpp);
3337 }
3338
3339 #if CONFIG_APPLEDOUBLE
3340 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3341 /*
3342 * Remove stale Apple Double file (if any).
3343 */
3344 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3345 }
3346 #endif /* CONFIG_APPLEDOUBLE */
3347
3348 post_event_if_success(dvp, _err, NOTE_WRITE);
3349
3350 return _err;
3351 }
3352
3353 #if 0
3354 /*
3355 *#
3356 *#% whiteout dvp L L L
3357 *#% whiteout cnp - - -
3358 *#% whiteout flag - - -
3359 *#
3360 */
3361 struct vnop_whiteout_args {
3362 struct vnodeop_desc *a_desc;
3363 vnode_t a_dvp;
3364 struct componentname *a_cnp;
3365 int a_flags;
3366 vfs_context_t a_context;
3367 };
3368 #endif /* 0*/
3369 errno_t
3370 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3371 __unused int flags, __unused vfs_context_t ctx)
3372 {
3373 return ENOTSUP; // XXX OBSOLETE
3374 }
3375
3376 #if 0
3377 /*
3378 *#
3379 *#% mknod dvp L U U
3380 *#% mknod vpp - X -
3381 *#
3382 */
3383 struct vnop_mknod_args {
3384 struct vnodeop_desc *a_desc;
3385 vnode_t a_dvp;
3386 vnode_t *a_vpp;
3387 struct componentname *a_cnp;
3388 struct vnode_attr *a_vap;
3389 vfs_context_t a_context;
3390 };
3391 #endif /* 0*/
3392 errno_t
3393 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3394 {
3395 int _err;
3396 struct vnop_mknod_args a;
3397
3398 a.a_desc = &vnop_mknod_desc;
3399 a.a_dvp = dvp;
3400 a.a_vpp = vpp;
3401 a.a_cnp = cnp;
3402 a.a_vap = vap;
3403 a.a_context = ctx;
3404
3405 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3406 if (_err == 0 && *vpp) {
3407 DTRACE_FSINFO(mknod, vnode_t, *vpp);
3408 }
3409
3410 post_event_if_success(dvp, _err, NOTE_WRITE);
3411
3412 return _err;
3413 }
3414
3415 #if 0
3416 /*
3417 *#
3418 *#% open vp L L L
3419 *#
3420 */
3421 struct vnop_open_args {
3422 struct vnodeop_desc *a_desc;
3423 vnode_t a_vp;
3424 int a_mode;
3425 vfs_context_t a_context;
3426 };
3427 #endif /* 0*/
3428 errno_t
3429 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3430 {
3431 int _err;
3432 struct vnop_open_args a;
3433
3434 if (ctx == NULL) {
3435 ctx = vfs_context_current();
3436 }
3437 a.a_desc = &vnop_open_desc;
3438 a.a_vp = vp;
3439 a.a_mode = mode;
3440 a.a_context = ctx;
3441
3442 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3443 DTRACE_FSINFO(open, vnode_t, vp);
3444
3445 return _err;
3446 }
3447
3448 #if 0
3449 /*
3450 *#
3451 *#% close vp U U U
3452 *#
3453 */
3454 struct vnop_close_args {
3455 struct vnodeop_desc *a_desc;
3456 vnode_t a_vp;
3457 int a_fflag;
3458 vfs_context_t a_context;
3459 };
3460 #endif /* 0*/
3461 errno_t
3462 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3463 {
3464 int _err;
3465 struct vnop_close_args a;
3466
3467 if (ctx == NULL) {
3468 ctx = vfs_context_current();
3469 }
3470 a.a_desc = &vnop_close_desc;
3471 a.a_vp = vp;
3472 a.a_fflag = fflag;
3473 a.a_context = ctx;
3474
3475 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3476 DTRACE_FSINFO(close, vnode_t, vp);
3477
3478 return _err;
3479 }
3480
3481 #if 0
3482 /*
3483 *#
3484 *#% access vp L L L
3485 *#
3486 */
3487 struct vnop_access_args {
3488 struct vnodeop_desc *a_desc;
3489 vnode_t a_vp;
3490 int a_action;
3491 vfs_context_t a_context;
3492 };
3493 #endif /* 0*/
3494 errno_t
3495 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3496 {
3497 int _err;
3498 struct vnop_access_args a;
3499
3500 if (ctx == NULL) {
3501 ctx = vfs_context_current();
3502 }
3503 a.a_desc = &vnop_access_desc;
3504 a.a_vp = vp;
3505 a.a_action = action;
3506 a.a_context = ctx;
3507
3508 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3509 DTRACE_FSINFO(access, vnode_t, vp);
3510
3511 return _err;
3512 }
3513
3514 #if 0
3515 /*
3516 *#
3517 *#% getattr vp = = =
3518 *#
3519 */
3520 struct vnop_getattr_args {
3521 struct vnodeop_desc *a_desc;
3522 vnode_t a_vp;
3523 struct vnode_attr *a_vap;
3524 vfs_context_t a_context;
3525 };
3526 #endif /* 0*/
3527 errno_t
3528 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3529 {
3530 int _err;
3531 struct vnop_getattr_args a;
3532
3533 a.a_desc = &vnop_getattr_desc;
3534 a.a_vp = vp;
3535 a.a_vap = vap;
3536 a.a_context = ctx;
3537
3538 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3539 DTRACE_FSINFO(getattr, vnode_t, vp);
3540
3541 return _err;
3542 }
3543
3544 #if 0
3545 /*
3546 *#
3547 *#% setattr vp L L L
3548 *#
3549 */
3550 struct vnop_setattr_args {
3551 struct vnodeop_desc *a_desc;
3552 vnode_t a_vp;
3553 struct vnode_attr *a_vap;
3554 vfs_context_t a_context;
3555 };
3556 #endif /* 0*/
3557 errno_t
3558 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3559 {
3560 int _err;
3561 struct vnop_setattr_args a;
3562
3563 a.a_desc = &vnop_setattr_desc;
3564 a.a_vp = vp;
3565 a.a_vap = vap;
3566 a.a_context = ctx;
3567
3568 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3569 DTRACE_FSINFO(setattr, vnode_t, vp);
3570
3571 #if CONFIG_APPLEDOUBLE
3572 /*
3573 * Shadow uid/gid/mod change to extended attribute file.
3574 */
3575 if (_err == 0 && !NATIVE_XATTR(vp)) {
3576 struct vnode_attr va;
3577 int change = 0;
3578
3579 VATTR_INIT(&va);
3580 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3581 VATTR_SET(&va, va_uid, vap->va_uid);
3582 change = 1;
3583 }
3584 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3585 VATTR_SET(&va, va_gid, vap->va_gid);
3586 change = 1;
3587 }
3588 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3589 VATTR_SET(&va, va_mode, vap->va_mode);
3590 change = 1;
3591 }
3592 if (change) {
3593 vnode_t dvp;
3594 const char *vname;
3595
3596 dvp = vnode_getparent(vp);
3597 vname = vnode_getname(vp);
3598
3599 xattrfile_setattr(dvp, vname, &va, ctx);
3600 if (dvp != NULLVP) {
3601 vnode_put(dvp);
3602 }
3603 if (vname != NULL) {
3604 vnode_putname(vname);
3605 }
3606 }
3607 }
3608 #endif /* CONFIG_APPLEDOUBLE */
3609
3610 /*
3611 * If we have changed any of the things about the file that are likely
3612 * to result in changes to authorization results, blow the vnode auth
3613 * cache
3614 */
3615 if (_err == 0 && (
3616 VATTR_IS_SUPPORTED(vap, va_mode) ||
3617 VATTR_IS_SUPPORTED(vap, va_uid) ||
3618 VATTR_IS_SUPPORTED(vap, va_gid) ||
3619 VATTR_IS_SUPPORTED(vap, va_flags) ||
3620 VATTR_IS_SUPPORTED(vap, va_acl) ||
3621 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3622 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3623 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3624
3625 #if NAMEDSTREAMS
3626 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3627 vnode_t svp;
3628 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3629 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3630 vnode_put(svp);
3631 }
3632 }
3633 #endif /* NAMEDSTREAMS */
3634 }
3635
3636
3637 post_event_if_success(vp, _err, NOTE_ATTRIB);
3638
3639 return _err;
3640 }
3641
3642
3643 #if 0
3644 /*
3645 *#
3646 *#% read vp L L L
3647 *#
3648 */
3649 struct vnop_read_args {
3650 struct vnodeop_desc *a_desc;
3651 vnode_t a_vp;
3652 struct uio *a_uio;
3653 int a_ioflag;
3654 vfs_context_t a_context;
3655 };
3656 #endif /* 0*/
3657 errno_t
3658 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3659 {
3660 int _err;
3661 struct vnop_read_args a;
3662 #if CONFIG_DTRACE
3663 user_ssize_t resid = uio_resid(uio);
3664 #endif
3665
3666 if (ctx == NULL) {
3667 return EINVAL;
3668 }
3669
3670 a.a_desc = &vnop_read_desc;
3671 a.a_vp = vp;
3672 a.a_uio = uio;
3673 a.a_ioflag = ioflag;
3674 a.a_context = ctx;
3675
3676 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3677 DTRACE_FSINFO_IO(read,
3678 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3679
3680 return _err;
3681 }
3682
3683
3684 #if 0
3685 /*
3686 *#
3687 *#% write vp L L L
3688 *#
3689 */
3690 struct vnop_write_args {
3691 struct vnodeop_desc *a_desc;
3692 vnode_t a_vp;
3693 struct uio *a_uio;
3694 int a_ioflag;
3695 vfs_context_t a_context;
3696 };
3697 #endif /* 0*/
3698 errno_t
3699 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3700 {
3701 struct vnop_write_args a;
3702 int _err;
3703 #if CONFIG_DTRACE
3704 user_ssize_t resid = uio_resid(uio);
3705 #endif
3706
3707 if (ctx == NULL) {
3708 return EINVAL;
3709 }
3710
3711 a.a_desc = &vnop_write_desc;
3712 a.a_vp = vp;
3713 a.a_uio = uio;
3714 a.a_ioflag = ioflag;
3715 a.a_context = ctx;
3716
3717 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3718 DTRACE_FSINFO_IO(write,
3719 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3720
3721 post_event_if_success(vp, _err, NOTE_WRITE);
3722
3723 return _err;
3724 }
3725
3726
3727 #if 0
3728 /*
3729 *#
3730 *#% ioctl vp U U U
3731 *#
3732 */
3733 struct vnop_ioctl_args {
3734 struct vnodeop_desc *a_desc;
3735 vnode_t a_vp;
3736 u_long a_command;
3737 caddr_t a_data;
3738 int a_fflag;
3739 vfs_context_t a_context;
3740 };
3741 #endif /* 0*/
3742 errno_t
3743 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3744 {
3745 int _err;
3746 struct vnop_ioctl_args a;
3747
3748 if (ctx == NULL) {
3749 ctx = vfs_context_current();
3750 }
3751
3752 /*
3753 * This check should probably have been put in the TTY code instead...
3754 *
3755 * We have to be careful about what we assume during startup and shutdown.
3756 * We have to be able to use the root filesystem's device vnode even when
3757 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3758 * structure. If there is no data pointer, it doesn't matter whether
3759 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
3760 * which passes NULL for its data pointer can therefore be used during
3761 * mount or unmount of the root filesystem.
3762 *
3763 * Depending on what root filesystems need to do during mount/unmount, we
3764 * may need to loosen this check again in the future.
3765 */
3766 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3767 if (data != NULL && !vnode_vfs64bitready(vp)) {
3768 return ENOTTY;
3769 }
3770 }
3771
3772 if ((command == DKIOCISSOLIDSTATE) && (vp == rootvp) && rootvp_is_ssd && data) {
3773 *data = 1;
3774 return 0;
3775 }
3776
3777 a.a_desc = &vnop_ioctl_desc;
3778 a.a_vp = vp;
3779 a.a_command = command;
3780 a.a_data = data;
3781 a.a_fflag = fflag;
3782 a.a_context = ctx;
3783
3784 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3785 DTRACE_FSINFO(ioctl, vnode_t, vp);
3786
3787 return _err;
3788 }
3789
3790
3791 #if 0
3792 /*
3793 *#
3794 *#% select vp U U U
3795 *#
3796 */
3797 struct vnop_select_args {
3798 struct vnodeop_desc *a_desc;
3799 vnode_t a_vp;
3800 int a_which;
3801 int a_fflags;
3802 void *a_wql;
3803 vfs_context_t a_context;
3804 };
3805 #endif /* 0*/
3806 errno_t
3807 VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx)
3808 {
3809 int _err;
3810 struct vnop_select_args a;
3811
3812 if (ctx == NULL) {
3813 ctx = vfs_context_current();
3814 }
3815 a.a_desc = &vnop_select_desc;
3816 a.a_vp = vp;
3817 a.a_which = which;
3818 a.a_fflags = fflags;
3819 a.a_context = ctx;
3820 a.a_wql = wql;
3821
3822 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3823 DTRACE_FSINFO(select, vnode_t, vp);
3824
3825 return _err;
3826 }
3827
3828
3829 #if 0
3830 /*
3831 *#
3832 *#% exchange fvp L L L
3833 *#% exchange tvp L L L
3834 *#
3835 */
3836 struct vnop_exchange_args {
3837 struct vnodeop_desc *a_desc;
3838 vnode_t a_fvp;
3839 vnode_t a_tvp;
3840 int a_options;
3841 vfs_context_t a_context;
3842 };
3843 #endif /* 0*/
3844 errno_t
3845 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3846 {
3847 int _err;
3848 struct vnop_exchange_args a;
3849
3850 a.a_desc = &vnop_exchange_desc;
3851 a.a_fvp = fvp;
3852 a.a_tvp = tvp;
3853 a.a_options = options;
3854 a.a_context = ctx;
3855
3856 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3857 DTRACE_FSINFO(exchange, vnode_t, fvp);
3858
3859 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3860 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3861 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3862
3863 return _err;
3864 }
3865
3866
3867 #if 0
3868 /*
3869 *#
3870 *#% revoke vp U U U
3871 *#
3872 */
3873 struct vnop_revoke_args {
3874 struct vnodeop_desc *a_desc;
3875 vnode_t a_vp;
3876 int a_flags;
3877 vfs_context_t a_context;
3878 };
3879 #endif /* 0*/
3880 errno_t
3881 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3882 {
3883 struct vnop_revoke_args a;
3884 int _err;
3885
3886 a.a_desc = &vnop_revoke_desc;
3887 a.a_vp = vp;
3888 a.a_flags = flags;
3889 a.a_context = ctx;
3890
3891 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3892 DTRACE_FSINFO(revoke, vnode_t, vp);
3893
3894 return _err;
3895 }
3896
3897
3898 #if 0
3899 /*
3900 *#
3901 *# mmap_check - vp U U U
3902 *#
3903 */
3904 struct vnop_mmap_check_args {
3905 struct vnodeop_desc *a_desc;
3906 vnode_t a_vp;
3907 int a_flags;
3908 vfs_context_t a_context;
3909 };
3910 #endif /* 0 */
3911 errno_t
3912 VNOP_MMAP_CHECK(vnode_t vp, int flags, vfs_context_t ctx)
3913 {
3914 int _err;
3915 struct vnop_mmap_check_args a;
3916
3917 a.a_desc = &vnop_mmap_check_desc;
3918 a.a_vp = vp;
3919 a.a_flags = flags;
3920 a.a_context = ctx;
3921
3922 _err = (*vp->v_op[vnop_mmap_check_desc.vdesc_offset])(&a);
3923 if (_err == ENOTSUP) {
3924 _err = 0;
3925 }
3926 DTRACE_FSINFO(mmap_check, vnode_t, vp);
3927
3928 return _err;
3929 }
3930
3931 #if 0
3932 /*
3933 *#
3934 *# mmap - vp U U U
3935 *#
3936 */
3937 struct vnop_mmap_args {
3938 struct vnodeop_desc *a_desc;
3939 vnode_t a_vp;
3940 int a_fflags;
3941 vfs_context_t a_context;
3942 };
3943 #endif /* 0*/
3944 errno_t
3945 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3946 {
3947 int _err;
3948 struct vnop_mmap_args a;
3949
3950 a.a_desc = &vnop_mmap_desc;
3951 a.a_vp = vp;
3952 a.a_fflags = fflags;
3953 a.a_context = ctx;
3954
3955 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3956 DTRACE_FSINFO(mmap, vnode_t, vp);
3957
3958 return _err;
3959 }
3960
3961
3962 #if 0
3963 /*
3964 *#
3965 *# mnomap - vp U U U
3966 *#
3967 */
3968 struct vnop_mnomap_args {
3969 struct vnodeop_desc *a_desc;
3970 vnode_t a_vp;
3971 vfs_context_t a_context;
3972 };
3973 #endif /* 0*/
3974 errno_t
3975 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
3976 {
3977 int _err;
3978 struct vnop_mnomap_args a;
3979
3980 a.a_desc = &vnop_mnomap_desc;
3981 a.a_vp = vp;
3982 a.a_context = ctx;
3983
3984 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3985 DTRACE_FSINFO(mnomap, vnode_t, vp);
3986
3987 return _err;
3988 }
3989
3990
3991 #if 0
3992 /*
3993 *#
3994 *#% fsync vp L L L
3995 *#
3996 */
3997 struct vnop_fsync_args {
3998 struct vnodeop_desc *a_desc;
3999 vnode_t a_vp;
4000 int a_waitfor;
4001 vfs_context_t a_context;
4002 };
4003 #endif /* 0*/
4004 errno_t
4005 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
4006 {
4007 struct vnop_fsync_args a;
4008 int _err;
4009
4010 a.a_desc = &vnop_fsync_desc;
4011 a.a_vp = vp;
4012 a.a_waitfor = waitfor;
4013 a.a_context = ctx;
4014
4015 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
4016 DTRACE_FSINFO(fsync, vnode_t, vp);
4017
4018 return _err;
4019 }
4020
4021
4022 #if 0
4023 /*
4024 *#
4025 *#% remove dvp L U U
4026 *#% remove vp L U U
4027 *#
4028 */
4029 struct vnop_remove_args {
4030 struct vnodeop_desc *a_desc;
4031 vnode_t a_dvp;
4032 vnode_t a_vp;
4033 struct componentname *a_cnp;
4034 int a_flags;
4035 vfs_context_t a_context;
4036 };
4037 #endif /* 0*/
4038 errno_t
4039 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
4040 {
4041 int _err;
4042 struct vnop_remove_args a;
4043
4044 a.a_desc = &vnop_remove_desc;
4045 a.a_dvp = dvp;
4046 a.a_vp = vp;
4047 a.a_cnp = cnp;
4048 a.a_flags = flags;
4049 a.a_context = ctx;
4050
4051 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4052 DTRACE_FSINFO(remove, vnode_t, vp);
4053
4054 if (_err == 0) {
4055 vnode_setneedinactive(vp);
4056 #if CONFIG_APPLEDOUBLE
4057 if (!(NATIVE_XATTR(dvp))) {
4058 /*
4059 * Remove any associated extended attribute file (._ AppleDouble file).
4060 */
4061 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4062 }
4063 #endif /* CONFIG_APPLEDOUBLE */
4064 }
4065
4066 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4067 post_event_if_success(dvp, _err, NOTE_WRITE);
4068
4069 return _err;
4070 }
4071
4072 int
4073 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4074 {
4075 int _err;
4076 struct vnop_compound_remove_args a;
4077 int no_vp = (*vpp == NULLVP);
4078
4079 a.a_desc = &vnop_compound_remove_desc;
4080 a.a_dvp = dvp;
4081 a.a_vpp = vpp;
4082 a.a_cnp = &ndp->ni_cnd;
4083 a.a_flags = flags;
4084 a.a_vap = vap;
4085 a.a_context = ctx;
4086 a.a_remove_authorizer = vn_authorize_unlink;
4087
4088 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4089 if (_err == 0 && *vpp) {
4090 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
4091 } else {
4092 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
4093 }
4094 if (_err == 0) {
4095 vnode_setneedinactive(*vpp);
4096 #if CONFIG_APPLEDOUBLE
4097 if (!(NATIVE_XATTR(dvp))) {
4098 /*
4099 * Remove any associated extended attribute file (._ AppleDouble file).
4100 */
4101 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4102 }
4103 #endif /* CONFIG_APPLEDOUBLE */
4104 }
4105
4106 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4107 post_event_if_success(dvp, _err, NOTE_WRITE);
4108
4109 if (no_vp) {
4110 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4111 if (*vpp && _err && _err != EKEEPLOOKING) {
4112 vnode_put(*vpp);
4113 *vpp = NULLVP;
4114 }
4115 }
4116
4117 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4118
4119 return _err;
4120 }
4121
4122 #if 0
4123 /*
4124 *#
4125 *#% link vp U U U
4126 *#% link tdvp L U U
4127 *#
4128 */
4129 struct vnop_link_args {
4130 struct vnodeop_desc *a_desc;
4131 vnode_t a_vp;
4132 vnode_t a_tdvp;
4133 struct componentname *a_cnp;
4134 vfs_context_t a_context;
4135 };
4136 #endif /* 0*/
4137 errno_t
4138 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4139 {
4140 int _err;
4141 struct vnop_link_args a;
4142
4143 #if CONFIG_APPLEDOUBLE
4144 /*
4145 * For file systems with non-native extended attributes,
4146 * disallow linking to an existing "._" Apple Double file.
4147 */
4148 if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4149 const char *vname;
4150
4151 vname = vnode_getname(vp);
4152 if (vname != NULL) {
4153 _err = 0;
4154 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4155 _err = EPERM;
4156 }
4157 vnode_putname(vname);
4158 if (_err) {
4159 return _err;
4160 }
4161 }
4162 }
4163 #endif /* CONFIG_APPLEDOUBLE */
4164
4165 a.a_desc = &vnop_link_desc;
4166 a.a_vp = vp;
4167 a.a_tdvp = tdvp;
4168 a.a_cnp = cnp;
4169 a.a_context = ctx;
4170
4171 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4172 DTRACE_FSINFO(link, vnode_t, vp);
4173
4174 post_event_if_success(vp, _err, NOTE_LINK);
4175 post_event_if_success(tdvp, _err, NOTE_WRITE);
4176
4177 return _err;
4178 }
4179
4180 errno_t
4181 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4182 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4183 vfs_rename_flags_t flags, vfs_context_t ctx)
4184 {
4185 int _err;
4186 struct nameidata *fromnd = NULL;
4187 struct nameidata *tond = NULL;
4188 #if CONFIG_APPLEDOUBLE
4189 vnode_t src_attr_vp = NULLVP;
4190 vnode_t dst_attr_vp = NULLVP;
4191 char smallname1[48];
4192 char smallname2[48];
4193 char *xfromname = NULL;
4194 char *xtoname = NULL;
4195 #endif /* CONFIG_APPLEDOUBLE */
4196 int batched;
4197 uint32_t tdfflags; // Target directory file flags
4198
4199 batched = vnode_compound_rename_available(fdvp);
4200
4201 if (!batched) {
4202 if (*fvpp == NULLVP) {
4203 panic("Not batched, and no fvp?");
4204 }
4205 }
4206
4207 #if CONFIG_APPLEDOUBLE
4208 /*
4209 * We need to preflight any potential AppleDouble file for the source file
4210 * before doing the rename operation, since we could potentially be doing
4211 * this operation on a network filesystem, and would end up duplicating
4212 * the work. Also, save the source and destination names. Skip it if the
4213 * source has a "._" prefix.
4214 */
4215
4216 size_t xfromname_len = 0;
4217 size_t xtoname_len = 0;
4218 if (!NATIVE_XATTR(fdvp) &&
4219 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4220 int error;
4221
4222 /* Get source attribute file name. */
4223 xfromname_len = fcnp->cn_namelen + 3;
4224 if (xfromname_len > sizeof(smallname1)) {
4225 xfromname = kheap_alloc(KHEAP_TEMP, xfromname_len, Z_WAITOK);
4226 } else {
4227 xfromname = &smallname1[0];
4228 }
4229 strlcpy(xfromname, "._", xfromname_len);
4230 strlcat(xfromname, fcnp->cn_nameptr, xfromname_len);
4231
4232 /* Get destination attribute file name. */
4233 xtoname_len = tcnp->cn_namelen + 3;
4234 if (xtoname_len > sizeof(smallname2)) {
4235 xtoname = kheap_alloc(KHEAP_TEMP, xtoname_len, Z_WAITOK);
4236 } else {
4237 xtoname = &smallname2[0];
4238 }
4239 strlcpy(xtoname, "._", xtoname_len);
4240 strlcat(xtoname, tcnp->cn_nameptr, xtoname_len);
4241
4242 /*
4243 * Look up source attribute file, keep reference on it if exists.
4244 * Note that we do the namei with the nameiop of RENAME, which is different than
4245 * in the rename syscall. It's OK if the source file does not exist, since this
4246 * is only for AppleDouble files.
4247 */
4248 fromnd = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK);
4249 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4250 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4251 fromnd->ni_dvp = fdvp;
4252 error = namei(fromnd);
4253
4254 /*
4255 * If there was an error looking up source attribute file,
4256 * we'll behave as if it didn't exist.
4257 */
4258
4259 if (error == 0) {
4260 if (fromnd->ni_vp) {
4261 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4262 src_attr_vp = fromnd->ni_vp;
4263
4264 if (fromnd->ni_vp->v_type != VREG) {
4265 src_attr_vp = NULLVP;
4266 vnode_put(fromnd->ni_vp);
4267 }
4268 }
4269 /*
4270 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4271 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4272 * have a vnode here, so we drop our namei buffer for the source attribute file
4273 */
4274 if (src_attr_vp == NULLVP) {
4275 nameidone(fromnd);
4276 }
4277 }
4278 }
4279 #endif /* CONFIG_APPLEDOUBLE */
4280
4281 if (batched) {
4282 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4283 if (_err != 0) {
4284 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4285 }
4286 } else {
4287 if (flags) {
4288 _err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4289 if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4290 // Legacy...
4291 if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4292 fcnp->cn_flags |= CN_SECLUDE_RENAME;
4293 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4294 }
4295 }
4296 } else {
4297 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4298 }
4299 }
4300
4301 /*
4302 * If moved to a new directory that is restricted,
4303 * set the restricted flag on the item moved.
4304 */
4305 if (_err == 0) {
4306 _err = vnode_flags(tdvp, &tdfflags, ctx);
4307 if (_err == 0) {
4308 uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4309 if (inherit_flags) {
4310 uint32_t fflags;
4311 _err = vnode_flags(*fvpp, &fflags, ctx);
4312 if (_err == 0 && fflags != (fflags | inherit_flags)) {
4313 struct vnode_attr va;
4314 VATTR_INIT(&va);
4315 VATTR_SET(&va, va_flags, fflags | inherit_flags);
4316 _err = vnode_setattr(*fvpp, &va, ctx);
4317 }
4318 }
4319 }
4320 }
4321
4322 #if CONFIG_MACF
4323 if (_err == 0) {
4324 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
4325 if (flags & VFS_RENAME_SWAP) {
4326 mac_vnode_notify_rename(ctx, *tvpp, fdvp, fcnp);
4327 }
4328 }
4329 #endif
4330
4331 #if CONFIG_APPLEDOUBLE
4332 /*
4333 * Rename any associated extended attribute file (._ AppleDouble file).
4334 */
4335 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4336 int error = 0;
4337
4338 /*
4339 * Get destination attribute file vnode.
4340 * Note that tdvp already has an iocount reference. Make sure to check that we
4341 * get a valid vnode from namei.
4342 */
4343 tond = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK);
4344 NDINIT(tond, RENAME, OP_RENAME,
4345 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4346 CAST_USER_ADDR_T(xtoname), ctx);
4347 tond->ni_dvp = tdvp;
4348 error = namei(tond);
4349
4350 if (error) {
4351 goto ad_error;
4352 }
4353
4354 if (tond->ni_vp) {
4355 dst_attr_vp = tond->ni_vp;
4356 }
4357
4358 if (src_attr_vp) {
4359 const char *old_name = src_attr_vp->v_name;
4360 vnode_t old_parent = src_attr_vp->v_parent;
4361
4362 if (batched) {
4363 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4364 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4365 0, ctx);
4366 } else {
4367 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4368 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4369 }
4370
4371 if (error == 0 && old_name == src_attr_vp->v_name &&
4372 old_parent == src_attr_vp->v_parent) {
4373 int update_flags = VNODE_UPDATE_NAME;
4374
4375 if (fdvp != tdvp) {
4376 update_flags |= VNODE_UPDATE_PARENT;
4377 }
4378
4379 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4380 vnode_update_identity(src_attr_vp, tdvp,
4381 tond->ni_cnd.cn_nameptr,
4382 tond->ni_cnd.cn_namelen,
4383 tond->ni_cnd.cn_hash,
4384 update_flags);
4385 }
4386 }
4387
4388 /* kevent notifications for moving resource files
4389 * _err is zero if we're here, so no need to notify directories, code
4390 * below will do that. only need to post the rename on the source and
4391 * possibly a delete on the dest
4392 */
4393 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4394 if (dst_attr_vp) {
4395 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4396 }
4397 } else if (dst_attr_vp) {
4398 /*
4399 * Just delete destination attribute file vnode if it exists, since
4400 * we didn't have a source attribute file.
4401 * Note that tdvp already has an iocount reference.
4402 */
4403
4404 struct vnop_remove_args args;
4405
4406 args.a_desc = &vnop_remove_desc;
4407 args.a_dvp = tdvp;
4408 args.a_vp = dst_attr_vp;
4409 args.a_cnp = &tond->ni_cnd;
4410 args.a_context = ctx;
4411
4412 if (error == 0) {
4413 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4414
4415 if (error == 0) {
4416 vnode_setneedinactive(dst_attr_vp);
4417 }
4418 }
4419
4420 /* kevent notification for deleting the destination's attribute file
4421 * if it existed. Only need to post the delete on the destination, since
4422 * the code below will handle the directories.
4423 */
4424 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4425 }
4426 }
4427 ad_error:
4428 if (src_attr_vp) {
4429 vnode_put(src_attr_vp);
4430 nameidone(fromnd);
4431 }
4432 if (dst_attr_vp) {
4433 vnode_put(dst_attr_vp);
4434 nameidone(tond);
4435 }
4436 if (xfromname && xfromname != &smallname1[0]) {
4437 kheap_free(KHEAP_TEMP, xfromname, xfromname_len);
4438 }
4439 if (xtoname && xtoname != &smallname2[0]) {
4440 kheap_free(KHEAP_TEMP, xtoname, xtoname_len);
4441 }
4442 #endif /* CONFIG_APPLEDOUBLE */
4443 kheap_free(KHEAP_TEMP, fromnd, sizeof(struct nameidata));
4444 kheap_free(KHEAP_TEMP, tond, sizeof(struct nameidata));
4445 return _err;
4446 }
4447
4448
4449 #if 0
4450 /*
4451 *#
4452 *#% rename fdvp U U U
4453 *#% rename fvp U U U
4454 *#% rename tdvp L U U
4455 *#% rename tvp X U U
4456 *#
4457 */
4458 struct vnop_rename_args {
4459 struct vnodeop_desc *a_desc;
4460 vnode_t a_fdvp;
4461 vnode_t a_fvp;
4462 struct componentname *a_fcnp;
4463 vnode_t a_tdvp;
4464 vnode_t a_tvp;
4465 struct componentname *a_tcnp;
4466 vfs_context_t a_context;
4467 };
4468 #endif /* 0*/
4469 errno_t
4470 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4471 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4472 vfs_context_t ctx)
4473 {
4474 int _err = 0;
4475 struct vnop_rename_args a;
4476
4477 a.a_desc = &vnop_rename_desc;
4478 a.a_fdvp = fdvp;
4479 a.a_fvp = fvp;
4480 a.a_fcnp = fcnp;
4481 a.a_tdvp = tdvp;
4482 a.a_tvp = tvp;
4483 a.a_tcnp = tcnp;
4484 a.a_context = ctx;
4485
4486 /* do the rename of the main file. */
4487 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4488 DTRACE_FSINFO(rename, vnode_t, fdvp);
4489
4490 if (_err) {
4491 return _err;
4492 }
4493
4494 return post_rename(fdvp, fvp, tdvp, tvp);
4495 }
4496
4497 static errno_t
4498 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4499 {
4500 if (tvp && tvp != fvp) {
4501 vnode_setneedinactive(tvp);
4502 }
4503
4504 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4505 int events = NOTE_WRITE;
4506 if (vnode_isdir(fvp)) {
4507 /* Link count on dir changed only if we are moving a dir and...
4508 * --Moved to new dir, not overwriting there
4509 * --Kept in same dir and DID overwrite
4510 */
4511 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4512 events |= NOTE_LINK;
4513 }
4514 }
4515
4516 lock_vnode_and_post(fdvp, events);
4517 if (fdvp != tdvp) {
4518 lock_vnode_and_post(tdvp, events);
4519 }
4520
4521 /* If you're replacing the target, post a deletion for it */
4522 if (tvp) {
4523 lock_vnode_and_post(tvp, NOTE_DELETE);
4524 }
4525
4526 lock_vnode_and_post(fvp, NOTE_RENAME);
4527
4528 return 0;
4529 }
4530
4531 #if 0
4532 /*
4533 *#
4534 *#% renamex fdvp U U U
4535 *#% renamex fvp U U U
4536 *#% renamex tdvp L U U
4537 *#% renamex tvp X U U
4538 *#
4539 */
4540 struct vnop_renamex_args {
4541 struct vnodeop_desc *a_desc;
4542 vnode_t a_fdvp;
4543 vnode_t a_fvp;
4544 struct componentname *a_fcnp;
4545 vnode_t a_tdvp;
4546 vnode_t a_tvp;
4547 struct componentname *a_tcnp;
4548 vfs_rename_flags_t a_flags;
4549 vfs_context_t a_context;
4550 };
4551 #endif /* 0*/
4552 errno_t
4553 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4554 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4555 vfs_rename_flags_t flags, vfs_context_t ctx)
4556 {
4557 int _err = 0;
4558 struct vnop_renamex_args a;
4559
4560 a.a_desc = &vnop_renamex_desc;
4561 a.a_fdvp = fdvp;
4562 a.a_fvp = fvp;
4563 a.a_fcnp = fcnp;
4564 a.a_tdvp = tdvp;
4565 a.a_tvp = tvp;
4566 a.a_tcnp = tcnp;
4567 a.a_flags = flags;
4568 a.a_context = ctx;
4569
4570 /* do the rename of the main file. */
4571 _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4572 DTRACE_FSINFO(renamex, vnode_t, fdvp);
4573
4574 if (_err) {
4575 return _err;
4576 }
4577
4578 return post_rename(fdvp, fvp, tdvp, tvp);
4579 }
4580
4581
4582 int
4583 VNOP_COMPOUND_RENAME(
4584 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4585 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4586 uint32_t flags, vfs_context_t ctx)
4587 {
4588 int _err = 0;
4589 int events;
4590 struct vnop_compound_rename_args a;
4591 int no_fvp, no_tvp;
4592
4593 no_fvp = (*fvpp) == NULLVP;
4594 no_tvp = (*tvpp) == NULLVP;
4595
4596 a.a_desc = &vnop_compound_rename_desc;
4597
4598 a.a_fdvp = fdvp;
4599 a.a_fvpp = fvpp;
4600 a.a_fcnp = fcnp;
4601 a.a_fvap = fvap;
4602
4603 a.a_tdvp = tdvp;
4604 a.a_tvpp = tvpp;
4605 a.a_tcnp = tcnp;
4606 a.a_tvap = tvap;
4607
4608 a.a_flags = flags;
4609 a.a_context = ctx;
4610 a.a_rename_authorizer = vn_authorize_rename;
4611 a.a_reserved = NULL;
4612
4613 /* do the rename of the main file. */
4614 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4615 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4616
4617 if (_err == 0) {
4618 if (*tvpp && *tvpp != *fvpp) {
4619 vnode_setneedinactive(*tvpp);
4620 }
4621 }
4622
4623 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4624 if (_err == 0 && *fvpp != *tvpp) {
4625 if (!*fvpp) {
4626 panic("No fvpp after compound rename?");
4627 }
4628
4629 events = NOTE_WRITE;
4630 if (vnode_isdir(*fvpp)) {
4631 /* Link count on dir changed only if we are moving a dir and...
4632 * --Moved to new dir, not overwriting there
4633 * --Kept in same dir and DID overwrite
4634 */
4635 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4636 events |= NOTE_LINK;
4637 }
4638 }
4639
4640 lock_vnode_and_post(fdvp, events);
4641 if (fdvp != tdvp) {
4642 lock_vnode_and_post(tdvp, events);
4643 }
4644
4645 /* If you're replacing the target, post a deletion for it */
4646 if (*tvpp) {
4647 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4648 }
4649
4650 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4651 }
4652
4653 if (no_fvp) {
4654 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4655 }
4656 if (no_tvp && *tvpp != NULLVP) {
4657 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4658 }
4659
4660 if (_err && _err != EKEEPLOOKING) {
4661 if (*fvpp) {
4662 vnode_put(*fvpp);
4663 *fvpp = NULLVP;
4664 }
4665 if (*tvpp) {
4666 vnode_put(*tvpp);
4667 *tvpp = NULLVP;
4668 }
4669 }
4670
4671 return _err;
4672 }
4673
4674 int
4675 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4676 struct vnode_attr *vap, vfs_context_t ctx)
4677 {
4678 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4679 panic("Non-CREATE nameiop in vn_mkdir()?");
4680 }
4681
4682 if (vnode_compound_mkdir_available(dvp)) {
4683 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4684 } else {
4685 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4686 }
4687 }
4688
4689 #if 0
4690 /*
4691 *#
4692 *#% mkdir dvp L U U
4693 *#% mkdir vpp - L -
4694 *#
4695 */
4696 struct vnop_mkdir_args {
4697 struct vnodeop_desc *a_desc;
4698 vnode_t a_dvp;
4699 vnode_t *a_vpp;
4700 struct componentname *a_cnp;
4701 struct vnode_attr *a_vap;
4702 vfs_context_t a_context;
4703 };
4704 #endif /* 0*/
4705 errno_t
4706 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4707 struct vnode_attr *vap, vfs_context_t ctx)
4708 {
4709 int _err;
4710 struct vnop_mkdir_args a;
4711
4712 a.a_desc = &vnop_mkdir_desc;
4713 a.a_dvp = dvp;
4714 a.a_vpp = vpp;
4715 a.a_cnp = cnp;
4716 a.a_vap = vap;
4717 a.a_context = ctx;
4718
4719 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4720 if (_err == 0 && *vpp) {
4721 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4722 }
4723 #if CONFIG_APPLEDOUBLE
4724 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4725 /*
4726 * Remove stale Apple Double file (if any).
4727 */
4728 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4729 }
4730 #endif /* CONFIG_APPLEDOUBLE */
4731
4732 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4733
4734 return _err;
4735 }
4736
4737 int
4738 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4739 struct vnode_attr *vap, vfs_context_t ctx)
4740 {
4741 int _err;
4742 struct vnop_compound_mkdir_args a;
4743
4744 a.a_desc = &vnop_compound_mkdir_desc;
4745 a.a_dvp = dvp;
4746 a.a_vpp = vpp;
4747 a.a_cnp = &ndp->ni_cnd;
4748 a.a_vap = vap;
4749 a.a_flags = 0;
4750 a.a_context = ctx;
4751 #if 0
4752 a.a_mkdir_authorizer = vn_authorize_mkdir;
4753 #endif /* 0 */
4754 a.a_reserved = NULL;
4755
4756 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4757 if (_err == 0 && *vpp) {
4758 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4759 }
4760 #if CONFIG_APPLEDOUBLE
4761 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4762 /*
4763 * Remove stale Apple Double file (if any).
4764 */
4765 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4766 }
4767 #endif /* CONFIG_APPLEDOUBLE */
4768
4769 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4770
4771 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4772 if (*vpp && _err && _err != EKEEPLOOKING) {
4773 vnode_put(*vpp);
4774 *vpp = NULLVP;
4775 }
4776
4777 return _err;
4778 }
4779
4780 int
4781 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4782 {
4783 if (vnode_compound_rmdir_available(dvp)) {
4784 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4785 } else {
4786 if (*vpp == NULLVP) {
4787 panic("NULL vp, but not a compound VNOP?");
4788 }
4789 if (vap != NULL) {
4790 panic("Non-NULL vap, but not a compound VNOP?");
4791 }
4792 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4793 }
4794 }
4795
4796 #if 0
4797 /*
4798 *#
4799 *#% rmdir dvp L U U
4800 *#% rmdir vp L U U
4801 *#
4802 */
4803 struct vnop_rmdir_args {
4804 struct vnodeop_desc *a_desc;
4805 vnode_t a_dvp;
4806 vnode_t a_vp;
4807 struct componentname *a_cnp;
4808 vfs_context_t a_context;
4809 };
4810
4811 #endif /* 0*/
4812 errno_t
4813 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4814 {
4815 int _err;
4816 struct vnop_rmdir_args a;
4817
4818 a.a_desc = &vnop_rmdir_desc;
4819 a.a_dvp = dvp;
4820 a.a_vp = vp;
4821 a.a_cnp = cnp;
4822 a.a_context = ctx;
4823
4824 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4825 DTRACE_FSINFO(rmdir, vnode_t, vp);
4826
4827 if (_err == 0) {
4828 vnode_setneedinactive(vp);
4829 #if CONFIG_APPLEDOUBLE
4830 if (!(NATIVE_XATTR(dvp))) {
4831 /*
4832 * Remove any associated extended attribute file (._ AppleDouble file).
4833 */
4834 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4835 }
4836 #endif
4837 }
4838
4839 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4840 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4841 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4842
4843 return _err;
4844 }
4845
4846 int
4847 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4848 struct vnode_attr *vap, vfs_context_t ctx)
4849 {
4850 int _err;
4851 struct vnop_compound_rmdir_args a;
4852 int no_vp;
4853
4854 a.a_desc = &vnop_mkdir_desc;
4855 a.a_dvp = dvp;
4856 a.a_vpp = vpp;
4857 a.a_cnp = &ndp->ni_cnd;
4858 a.a_vap = vap;
4859 a.a_flags = 0;
4860 a.a_context = ctx;
4861 a.a_rmdir_authorizer = vn_authorize_rmdir;
4862 a.a_reserved = NULL;
4863
4864 no_vp = (*vpp == NULLVP);
4865
4866 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4867 if (_err == 0 && *vpp) {
4868 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4869 }
4870 #if CONFIG_APPLEDOUBLE
4871 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4872 /*
4873 * Remove stale Apple Double file (if any).
4874 */
4875 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4876 }
4877 #endif
4878
4879 if (*vpp) {
4880 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4881 }
4882 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4883
4884 if (no_vp) {
4885 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4886
4887 #if 0 /* Removing orphaned ._ files requires a vp.... */
4888 if (*vpp && _err && _err != EKEEPLOOKING) {
4889 vnode_put(*vpp);
4890 *vpp = NULLVP;
4891 }
4892 #endif /* 0 */
4893 }
4894
4895 return _err;
4896 }
4897
4898 #if CONFIG_APPLEDOUBLE
4899 /*
4900 * Remove a ._ AppleDouble file
4901 */
4902 #define AD_STALE_SECS (180)
4903 static void
4904 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4905 {
4906 vnode_t xvp;
4907 struct nameidata nd;
4908 char smallname[64];
4909 char *filename = NULL;
4910 size_t alloc_len;
4911 size_t copy_len;
4912
4913 if ((basename == NULL) || (basename[0] == '\0') ||
4914 (basename[0] == '.' && basename[1] == '_')) {
4915 return;
4916 }
4917 filename = &smallname[0];
4918 alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
4919 if (alloc_len >= sizeof(smallname)) {
4920 alloc_len++; /* snprintf result doesn't include '\0' */
4921 filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK);
4922 copy_len = snprintf(filename, alloc_len, "._%s", basename);
4923 }
4924 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4925 CAST_USER_ADDR_T(filename), ctx);
4926 nd.ni_dvp = dvp;
4927 if (namei(&nd) != 0) {
4928 goto out2;
4929 }
4930
4931 xvp = nd.ni_vp;
4932 nameidone(&nd);
4933 if (xvp->v_type != VREG) {
4934 goto out1;
4935 }
4936
4937 /*
4938 * When creating a new object and a "._" file already
4939 * exists, check to see if its a stale "._" file.
4940 *
4941 */
4942 if (!force) {
4943 struct vnode_attr va;
4944
4945 VATTR_INIT(&va);
4946 VATTR_WANTED(&va, va_data_size);
4947 VATTR_WANTED(&va, va_modify_time);
4948 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4949 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4950 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4951 va.va_data_size != 0) {
4952 struct timeval tv;
4953
4954 microtime(&tv);
4955 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4956 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4957 force = 1; /* must be stale */
4958 }
4959 }
4960 }
4961 if (force) {
4962 int error;
4963
4964 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
4965 if (error == 0) {
4966 vnode_setneedinactive(xvp);
4967 }
4968
4969 post_event_if_success(xvp, error, NOTE_DELETE);
4970 post_event_if_success(dvp, error, NOTE_WRITE);
4971 }
4972
4973 out1:
4974 vnode_put(dvp);
4975 vnode_put(xvp);
4976 out2:
4977 if (filename && filename != &smallname[0]) {
4978 kheap_free(KHEAP_TEMP, filename, alloc_len);
4979 }
4980 }
4981
4982 /*
4983 * Shadow uid/gid/mod to a ._ AppleDouble file
4984 */
4985 static void
4986 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
4987 vfs_context_t ctx)
4988 {
4989 vnode_t xvp;
4990 struct nameidata nd;
4991 char smallname[64];
4992 char *filename = NULL;
4993 size_t len;
4994
4995 if ((dvp == NULLVP) ||
4996 (basename == NULL) || (basename[0] == '\0') ||
4997 (basename[0] == '.' && basename[1] == '_')) {
4998 return;
4999 }
5000 filename = &smallname[0];
5001 len = snprintf(filename, sizeof(smallname), "._%s", basename);
5002 if (len >= sizeof(smallname)) {
5003 len++; /* snprintf result doesn't include '\0' */
5004 filename = kheap_alloc(KHEAP_TEMP, len, Z_WAITOK);
5005 len = snprintf(filename, len, "._%s", basename);
5006 }
5007 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
5008 CAST_USER_ADDR_T(filename), ctx);
5009 nd.ni_dvp = dvp;
5010 if (namei(&nd) != 0) {
5011 goto out2;
5012 }
5013
5014 xvp = nd.ni_vp;
5015 nameidone(&nd);
5016
5017 if (xvp->v_type == VREG) {
5018 struct vnop_setattr_args a;
5019
5020 a.a_desc = &vnop_setattr_desc;
5021 a.a_vp = xvp;
5022 a.a_vap = vap;
5023 a.a_context = ctx;
5024
5025 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
5026 }
5027
5028 vnode_put(xvp);
5029 out2:
5030 if (filename && filename != &smallname[0]) {
5031 kheap_free(KHEAP_TEMP, filename, len);
5032 }
5033 }
5034 #endif /* CONFIG_APPLEDOUBLE */
5035
5036 #if 0
5037 /*
5038 *#
5039 *#% symlink dvp L U U
5040 *#% symlink vpp - U -
5041 *#
5042 */
5043 struct vnop_symlink_args {
5044 struct vnodeop_desc *a_desc;
5045 vnode_t a_dvp;
5046 vnode_t *a_vpp;
5047 struct componentname *a_cnp;
5048 struct vnode_attr *a_vap;
5049 char *a_target;
5050 vfs_context_t a_context;
5051 };
5052
5053 #endif /* 0*/
5054 errno_t
5055 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5056 struct vnode_attr *vap, char *target, vfs_context_t ctx)
5057 {
5058 int _err;
5059 struct vnop_symlink_args a;
5060
5061 a.a_desc = &vnop_symlink_desc;
5062 a.a_dvp = dvp;
5063 a.a_vpp = vpp;
5064 a.a_cnp = cnp;
5065 a.a_vap = vap;
5066 a.a_target = target;
5067 a.a_context = ctx;
5068
5069 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5070 DTRACE_FSINFO(symlink, vnode_t, dvp);
5071 #if CONFIG_APPLEDOUBLE
5072 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5073 /*
5074 * Remove stale Apple Double file (if any). Posts its own knotes
5075 */
5076 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5077 }
5078 #endif /* CONFIG_APPLEDOUBLE */
5079
5080 post_event_if_success(dvp, _err, NOTE_WRITE);
5081
5082 return _err;
5083 }
5084
5085 #if 0
5086 /*
5087 *#
5088 *#% readdir vp L L L
5089 *#
5090 */
5091 struct vnop_readdir_args {
5092 struct vnodeop_desc *a_desc;
5093 vnode_t a_vp;
5094 struct uio *a_uio;
5095 int a_flags;
5096 int *a_eofflag;
5097 int *a_numdirent;
5098 vfs_context_t a_context;
5099 };
5100
5101 #endif /* 0*/
5102 errno_t
5103 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
5104 int *numdirent, vfs_context_t ctx)
5105 {
5106 int _err;
5107 struct vnop_readdir_args a;
5108 #if CONFIG_DTRACE
5109 user_ssize_t resid = uio_resid(uio);
5110 #endif
5111
5112 a.a_desc = &vnop_readdir_desc;
5113 a.a_vp = vp;
5114 a.a_uio = uio;
5115 a.a_flags = flags;
5116 a.a_eofflag = eofflag;
5117 a.a_numdirent = numdirent;
5118 a.a_context = ctx;
5119
5120 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5121 DTRACE_FSINFO_IO(readdir,
5122 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5123
5124 return _err;
5125 }
5126
5127 #if 0
5128 /*
5129 *#
5130 *#% readdirattr vp L L L
5131 *#
5132 */
5133 struct vnop_readdirattr_args {
5134 struct vnodeop_desc *a_desc;
5135 vnode_t a_vp;
5136 struct attrlist *a_alist;
5137 struct uio *a_uio;
5138 uint32_t a_maxcount;
5139 uint32_t a_options;
5140 uint32_t *a_newstate;
5141 int *a_eofflag;
5142 uint32_t *a_actualcount;
5143 vfs_context_t a_context;
5144 };
5145
5146 #endif /* 0*/
5147 errno_t
5148 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5149 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5150 {
5151 int _err;
5152 struct vnop_readdirattr_args a;
5153 #if CONFIG_DTRACE
5154 user_ssize_t resid = uio_resid(uio);
5155 #endif
5156
5157 a.a_desc = &vnop_readdirattr_desc;
5158 a.a_vp = vp;
5159 a.a_alist = alist;
5160 a.a_uio = uio;
5161 a.a_maxcount = maxcount;
5162 a.a_options = options;
5163 a.a_newstate = newstate;
5164 a.a_eofflag = eofflag;
5165 a.a_actualcount = actualcount;
5166 a.a_context = ctx;
5167
5168 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5169 DTRACE_FSINFO_IO(readdirattr,
5170 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5171
5172 return _err;
5173 }
5174
5175 #if 0
5176 struct vnop_getttrlistbulk_args {
5177 struct vnodeop_desc *a_desc;
5178 vnode_t a_vp;
5179 struct attrlist *a_alist;
5180 struct vnode_attr *a_vap;
5181 struct uio *a_uio;
5182 void *a_private
5183 uint64_t a_options;
5184 int *a_eofflag;
5185 uint32_t *a_actualcount;
5186 vfs_context_t a_context;
5187 };
5188 #endif /* 0*/
5189 errno_t
5190 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
5191 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
5192 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
5193 {
5194 int _err;
5195 struct vnop_getattrlistbulk_args a;
5196 #if CONFIG_DTRACE
5197 user_ssize_t resid = uio_resid(uio);
5198 #endif
5199
5200 a.a_desc = &vnop_getattrlistbulk_desc;
5201 a.a_vp = vp;
5202 a.a_alist = alist;
5203 a.a_vap = vap;
5204 a.a_uio = uio;
5205 a.a_private = private;
5206 a.a_options = options;
5207 a.a_eofflag = eofflag;
5208 a.a_actualcount = actualcount;
5209 a.a_context = ctx;
5210
5211 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
5212 DTRACE_FSINFO_IO(getattrlistbulk,
5213 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5214
5215 return _err;
5216 }
5217
5218 #if 0
5219 /*
5220 *#
5221 *#% readlink vp L L L
5222 *#
5223 */
5224 struct vnop_readlink_args {
5225 struct vnodeop_desc *a_desc;
5226 vnode_t a_vp;
5227 struct uio *a_uio;
5228 vfs_context_t a_context;
5229 };
5230 #endif /* 0 */
5231
5232 /*
5233 * Returns: 0 Success
5234 * lock_fsnode:ENOENT No such file or directory [only for VFS
5235 * that is not thread safe & vnode is
5236 * currently being/has been terminated]
5237 * <vfs_readlink>:EINVAL
5238 * <vfs_readlink>:???
5239 *
5240 * Note: The return codes from the underlying VFS's readlink routine
5241 * can't be fully enumerated here, since third party VFS authors
5242 * may not limit their error returns to the ones documented here,
5243 * even though this may result in some programs functioning
5244 * incorrectly.
5245 *
5246 * The return codes documented above are those which may currently
5247 * be returned by HFS from hfs_vnop_readlink, not including
5248 * additional error code which may be propagated from underlying
5249 * routines.
5250 */
5251 errno_t
5252 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5253 {
5254 int _err;
5255 struct vnop_readlink_args a;
5256 #if CONFIG_DTRACE
5257 user_ssize_t resid = uio_resid(uio);
5258 #endif
5259 a.a_desc = &vnop_readlink_desc;
5260 a.a_vp = vp;
5261 a.a_uio = uio;
5262 a.a_context = ctx;
5263
5264 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5265 DTRACE_FSINFO_IO(readlink,
5266 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5267
5268 return _err;
5269 }
5270
5271 #if 0
5272 /*
5273 *#
5274 *#% inactive vp L U U
5275 *#
5276 */
5277 struct vnop_inactive_args {
5278 struct vnodeop_desc *a_desc;
5279 vnode_t a_vp;
5280 vfs_context_t a_context;
5281 };
5282 #endif /* 0*/
5283 errno_t
5284 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5285 {
5286 int _err;
5287 struct vnop_inactive_args a;
5288
5289 a.a_desc = &vnop_inactive_desc;
5290 a.a_vp = vp;
5291 a.a_context = ctx;
5292
5293 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5294 DTRACE_FSINFO(inactive, vnode_t, vp);
5295
5296 #if NAMEDSTREAMS
5297 /* For file systems that do not support namedstream natively, mark
5298 * the shadow stream file vnode to be recycled as soon as the last
5299 * reference goes away. To avoid re-entering reclaim code, do not
5300 * call recycle on terminating namedstream vnodes.
5301 */
5302 if (vnode_isnamedstream(vp) &&
5303 (vp->v_parent != NULLVP) &&
5304 vnode_isshadow(vp) &&
5305 ((vp->v_lflag & VL_TERMINATE) == 0)) {
5306 vnode_recycle(vp);
5307 }
5308 #endif
5309
5310 return _err;
5311 }
5312
5313
5314 #if 0
5315 /*
5316 *#
5317 *#% reclaim vp U U U
5318 *#
5319 */
5320 struct vnop_reclaim_args {
5321 struct vnodeop_desc *a_desc;
5322 vnode_t a_vp;
5323 vfs_context_t a_context;
5324 };
5325 #endif /* 0*/
5326 errno_t
5327 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5328 {
5329 int _err;
5330 struct vnop_reclaim_args a;
5331
5332 a.a_desc = &vnop_reclaim_desc;
5333 a.a_vp = vp;
5334 a.a_context = ctx;
5335
5336 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5337 DTRACE_FSINFO(reclaim, vnode_t, vp);
5338
5339 return _err;
5340 }
5341
5342
5343 /*
5344 * Returns: 0 Success
5345 * lock_fsnode:ENOENT No such file or directory [only for VFS
5346 * that is not thread safe & vnode is
5347 * currently being/has been terminated]
5348 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5349 */
5350 #if 0
5351 /*
5352 *#
5353 *#% pathconf vp L L L
5354 *#
5355 */
5356 struct vnop_pathconf_args {
5357 struct vnodeop_desc *a_desc;
5358 vnode_t a_vp;
5359 int a_name;
5360 int32_t *a_retval;
5361 vfs_context_t a_context;
5362 };
5363 #endif /* 0*/
5364 errno_t
5365 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5366 {
5367 int _err;
5368 struct vnop_pathconf_args a;
5369
5370 a.a_desc = &vnop_pathconf_desc;
5371 a.a_vp = vp;
5372 a.a_name = name;
5373 a.a_retval = retval;
5374 a.a_context = ctx;
5375
5376 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5377 DTRACE_FSINFO(pathconf, vnode_t, vp);
5378
5379 return _err;
5380 }
5381
5382 /*
5383 * Returns: 0 Success
5384 * err_advlock:ENOTSUP
5385 * lf_advlock:???
5386 * <vnop_advlock_desc>:???
5387 *
5388 * Notes: VFS implementations of advisory locking using calls through
5389 * <vnop_advlock_desc> because lock enforcement does not occur
5390 * locally should try to limit themselves to the return codes
5391 * documented above for lf_advlock and err_advlock.
5392 */
5393 #if 0
5394 /*
5395 *#
5396 *#% advlock vp U U U
5397 *#
5398 */
5399 struct vnop_advlock_args {
5400 struct vnodeop_desc *a_desc;
5401 vnode_t a_vp;
5402 caddr_t a_id;
5403 int a_op;
5404 struct flock *a_fl;
5405 int a_flags;
5406 vfs_context_t a_context;
5407 };
5408 #endif /* 0*/
5409 errno_t
5410 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5411 {
5412 int _err;
5413 struct vnop_advlock_args a;
5414
5415 a.a_desc = &vnop_advlock_desc;
5416 a.a_vp = vp;
5417 a.a_id = id;
5418 a.a_op = op;
5419 a.a_fl = fl;
5420 a.a_flags = flags;
5421 a.a_context = ctx;
5422 a.a_timeout = timeout;
5423
5424 /* Disallow advisory locking on non-seekable vnodes */
5425 if (vnode_isfifo(vp)) {
5426 _err = err_advlock(&a);
5427 } else {
5428 if ((vp->v_flag & VLOCKLOCAL)) {
5429 /* Advisory locking done at this layer */
5430 _err = lf_advlock(&a);
5431 } else if (flags & F_OFD_LOCK) {
5432 /* Non-local locking doesn't work for OFD locks */
5433 _err = err_advlock(&a);
5434 } else {
5435 /* Advisory locking done by underlying filesystem */
5436 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5437 }
5438 DTRACE_FSINFO(advlock, vnode_t, vp);
5439 if (op == F_UNLCK && flags == F_FLOCK) {
5440 post_event_if_success(vp, _err, NOTE_FUNLOCK);
5441 }
5442 }
5443
5444 return _err;
5445 }
5446
5447
5448
5449 #if 0
5450 /*
5451 *#
5452 *#% allocate vp L L L
5453 *#
5454 */
5455 struct vnop_allocate_args {
5456 struct vnodeop_desc *a_desc;
5457 vnode_t a_vp;
5458 off_t a_length;
5459 u_int32_t a_flags;
5460 off_t *a_bytesallocated;
5461 off_t a_offset;
5462 vfs_context_t a_context;
5463 };
5464
5465 #endif /* 0*/
5466 errno_t
5467 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5468 {
5469 int _err;
5470 struct vnop_allocate_args a;
5471
5472 a.a_desc = &vnop_allocate_desc;
5473 a.a_vp = vp;
5474 a.a_length = length;
5475 a.a_flags = flags;
5476 a.a_bytesallocated = bytesallocated;
5477 a.a_offset = offset;
5478 a.a_context = ctx;
5479
5480 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5481 DTRACE_FSINFO(allocate, vnode_t, vp);
5482 #if CONFIG_FSE
5483 if (_err == 0) {
5484 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5485 }
5486 #endif
5487
5488 return _err;
5489 }
5490
5491 #if 0
5492 /*
5493 *#
5494 *#% pagein vp = = =
5495 *#
5496 */
5497 struct vnop_pagein_args {
5498 struct vnodeop_desc *a_desc;
5499 vnode_t a_vp;
5500 upl_t a_pl;
5501 upl_offset_t a_pl_offset;
5502 off_t a_f_offset;
5503 size_t a_size;
5504 int a_flags;
5505 vfs_context_t a_context;
5506 };
5507 #endif /* 0*/
5508 errno_t
5509 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5510 {
5511 int _err;
5512 struct vnop_pagein_args a;
5513
5514 a.a_desc = &vnop_pagein_desc;
5515 a.a_vp = vp;
5516 a.a_pl = pl;
5517 a.a_pl_offset = pl_offset;
5518 a.a_f_offset = f_offset;
5519 a.a_size = size;
5520 a.a_flags = flags;
5521 a.a_context = ctx;
5522
5523 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5524 DTRACE_FSINFO(pagein, vnode_t, vp);
5525
5526 return _err;
5527 }
5528
5529 #if 0
5530 /*
5531 *#
5532 *#% pageout vp = = =
5533 *#
5534 */
5535 struct vnop_pageout_args {
5536 struct vnodeop_desc *a_desc;
5537 vnode_t a_vp;
5538 upl_t a_pl;
5539 upl_offset_t a_pl_offset;
5540 off_t a_f_offset;
5541 size_t a_size;
5542 int a_flags;
5543 vfs_context_t a_context;
5544 };
5545
5546 #endif /* 0*/
5547 errno_t
5548 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5549 {
5550 int _err;
5551 struct vnop_pageout_args a;
5552
5553 a.a_desc = &vnop_pageout_desc;
5554 a.a_vp = vp;
5555 a.a_pl = pl;
5556 a.a_pl_offset = pl_offset;
5557 a.a_f_offset = f_offset;
5558 a.a_size = size;
5559 a.a_flags = flags;
5560 a.a_context = ctx;
5561
5562 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5563 DTRACE_FSINFO(pageout, vnode_t, vp);
5564
5565 post_event_if_success(vp, _err, NOTE_WRITE);
5566
5567 return _err;
5568 }
5569
5570 int
5571 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5572 {
5573 if (vnode_compound_remove_available(dvp)) {
5574 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5575 } else {
5576 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5577 }
5578 }
5579
5580 #if CONFIG_SEARCHFS
5581
5582 #if 0
5583 /*
5584 *#
5585 *#% searchfs vp L L L
5586 *#
5587 */
5588 struct vnop_searchfs_args {
5589 struct vnodeop_desc *a_desc;
5590 vnode_t a_vp;
5591 void *a_searchparams1;
5592 void *a_searchparams2;
5593 struct attrlist *a_searchattrs;
5594 uint32_t a_maxmatches;
5595 struct timeval *a_timelimit;
5596 struct attrlist *a_returnattrs;
5597 uint32_t *a_nummatches;
5598 uint32_t a_scriptcode;
5599 uint32_t a_options;
5600 struct uio *a_uio;
5601 struct searchstate *a_searchstate;
5602 vfs_context_t a_context;
5603 };
5604
5605 #endif /* 0*/
5606 errno_t
5607 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5608 {
5609 int _err;
5610 struct vnop_searchfs_args a;
5611
5612 a.a_desc = &vnop_searchfs_desc;
5613 a.a_vp = vp;
5614 a.a_searchparams1 = searchparams1;
5615 a.a_searchparams2 = searchparams2;
5616 a.a_searchattrs = searchattrs;
5617 a.a_maxmatches = maxmatches;
5618 a.a_timelimit = timelimit;
5619 a.a_returnattrs = returnattrs;
5620 a.a_nummatches = nummatches;
5621 a.a_scriptcode = scriptcode;
5622 a.a_options = options;
5623 a.a_uio = uio;
5624 a.a_searchstate = searchstate;
5625 a.a_context = ctx;
5626
5627 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5628 DTRACE_FSINFO(searchfs, vnode_t, vp);
5629
5630 return _err;
5631 }
5632 #endif /* CONFIG_SEARCHFS */
5633
5634 #if 0
5635 /*
5636 *#
5637 *#% copyfile fvp U U U
5638 *#% copyfile tdvp L U U
5639 *#% copyfile tvp X U U
5640 *#
5641 */
5642 struct vnop_copyfile_args {
5643 struct vnodeop_desc *a_desc;
5644 vnode_t a_fvp;
5645 vnode_t a_tdvp;
5646 vnode_t a_tvp;
5647 struct componentname *a_tcnp;
5648 int a_mode;
5649 int a_flags;
5650 vfs_context_t a_context;
5651 };
5652 #endif /* 0*/
5653 errno_t
5654 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5655 int mode, int flags, vfs_context_t ctx)
5656 {
5657 int _err;
5658 struct vnop_copyfile_args a;
5659 a.a_desc = &vnop_copyfile_desc;
5660 a.a_fvp = fvp;
5661 a.a_tdvp = tdvp;
5662 a.a_tvp = tvp;
5663 a.a_tcnp = tcnp;
5664 a.a_mode = mode;
5665 a.a_flags = flags;
5666 a.a_context = ctx;
5667 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5668 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5669 return _err;
5670 }
5671
5672 #if 0
5673 struct vnop_clonefile_args {
5674 struct vnodeop_desc *a_desc;
5675 vnode_t a_fvp;
5676 vnode_t a_dvp;
5677 vnode_t *a_vpp;
5678 struct componentname *a_cnp;
5679 struct vnode_attr *a_vap;
5680 uint32_t a_flags;
5681 vfs_context_t a_context;
5682 int (*a_dir_clone_authorizer)( /* Authorization callback */
5683 struct vnode_attr *vap, /* attribute to be authorized */
5684 kauth_action_t action, /* action for which attribute is to be authorized */
5685 struct vnode_attr *dvap, /* target directory attributes */
5686 vnode_t sdvp, /* source directory vnode pointer (optional) */
5687 mount_t mp, /* mount point of filesystem */
5688 dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */
5689 uint32_t flags; /* value passed in a_flags to the VNOP */
5690 vfs_context_t ctx, /* As passed to VNOP */
5691 void *reserved); /* Always NULL */
5692 void *a_reserved; /* Currently unused */
5693 };
5694 #endif /* 0 */
5695
5696 errno_t
5697 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
5698 struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
5699 vfs_context_t ctx)
5700 {
5701 int _err;
5702 struct vnop_clonefile_args a;
5703 a.a_desc = &vnop_clonefile_desc;
5704 a.a_fvp = fvp;
5705 a.a_dvp = dvp;
5706 a.a_vpp = vpp;
5707 a.a_cnp = cnp;
5708 a.a_vap = vap;
5709 a.a_flags = flags;
5710 a.a_context = ctx;
5711
5712 if (vnode_vtype(fvp) == VDIR) {
5713 a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
5714 } else {
5715 a.a_dir_clone_authorizer = NULL;
5716 }
5717
5718 _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
5719
5720 if (_err == 0 && *vpp) {
5721 DTRACE_FSINFO(clonefile, vnode_t, *vpp);
5722 if (kdebug_enable) {
5723 kdebug_lookup(*vpp, cnp);
5724 }
5725 }
5726
5727 post_event_if_success(dvp, _err, NOTE_WRITE);
5728
5729 return _err;
5730 }
5731
5732 errno_t
5733 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5734 {
5735 struct vnop_getxattr_args a;
5736 int error;
5737
5738 a.a_desc = &vnop_getxattr_desc;
5739 a.a_vp = vp;
5740 a.a_name = name;
5741 a.a_uio = uio;
5742 a.a_size = size;
5743 a.a_options = options;
5744 a.a_context = ctx;
5745
5746 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5747 DTRACE_FSINFO(getxattr, vnode_t, vp);
5748
5749 return error;
5750 }
5751
5752 errno_t
5753 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5754 {
5755 struct vnop_setxattr_args a;
5756 int error;
5757
5758 a.a_desc = &vnop_setxattr_desc;
5759 a.a_vp = vp;
5760 a.a_name = name;
5761 a.a_uio = uio;
5762 a.a_options = options;
5763 a.a_context = ctx;
5764
5765 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5766 DTRACE_FSINFO(setxattr, vnode_t, vp);
5767
5768 if (error == 0) {
5769 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5770 }
5771
5772 post_event_if_success(vp, error, NOTE_ATTRIB);
5773
5774 return error;
5775 }
5776
5777 errno_t
5778 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5779 {
5780 struct vnop_removexattr_args a;
5781 int error;
5782
5783 a.a_desc = &vnop_removexattr_desc;
5784 a.a_vp = vp;
5785 a.a_name = name;
5786 a.a_options = options;
5787 a.a_context = ctx;
5788
5789 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5790 DTRACE_FSINFO(removexattr, vnode_t, vp);
5791
5792 post_event_if_success(vp, error, NOTE_ATTRIB);
5793
5794 return error;
5795 }
5796
5797 errno_t
5798 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5799 {
5800 struct vnop_listxattr_args a;
5801 int error;
5802
5803 a.a_desc = &vnop_listxattr_desc;
5804 a.a_vp = vp;
5805 a.a_uio = uio;
5806 a.a_size = size;
5807 a.a_options = options;
5808 a.a_context = ctx;
5809
5810 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5811 DTRACE_FSINFO(listxattr, vnode_t, vp);
5812
5813 return error;
5814 }
5815
5816
5817 #if 0
5818 /*
5819 *#
5820 *#% blktooff vp = = =
5821 *#
5822 */
5823 struct vnop_blktooff_args {
5824 struct vnodeop_desc *a_desc;
5825 vnode_t a_vp;
5826 daddr64_t a_lblkno;
5827 off_t *a_offset;
5828 };
5829 #endif /* 0*/
5830 errno_t
5831 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5832 {
5833 int _err;
5834 struct vnop_blktooff_args a;
5835
5836 a.a_desc = &vnop_blktooff_desc;
5837 a.a_vp = vp;
5838 a.a_lblkno = lblkno;
5839 a.a_offset = offset;
5840
5841 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5842 DTRACE_FSINFO(blktooff, vnode_t, vp);
5843
5844 return _err;
5845 }
5846
5847 #if 0
5848 /*
5849 *#
5850 *#% offtoblk vp = = =
5851 *#
5852 */
5853 struct vnop_offtoblk_args {
5854 struct vnodeop_desc *a_desc;
5855 vnode_t a_vp;
5856 off_t a_offset;
5857 daddr64_t *a_lblkno;
5858 };
5859 #endif /* 0*/
5860 errno_t
5861 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5862 {
5863 int _err;
5864 struct vnop_offtoblk_args a;
5865
5866 a.a_desc = &vnop_offtoblk_desc;
5867 a.a_vp = vp;
5868 a.a_offset = offset;
5869 a.a_lblkno = lblkno;
5870
5871 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5872 DTRACE_FSINFO(offtoblk, vnode_t, vp);
5873
5874 return _err;
5875 }
5876
5877 #if 0
5878 /*
5879 *#
5880 *#% ap vp L L L
5881 *#
5882 */
5883 struct vnop_verify_args {
5884 struct vnodeop_desc *a_desc;
5885 vnode_t a_vp;
5886 off_t a_foffset;
5887 char *a_buf;
5888 size_t a_bufsize;
5889 size_t *a_verifyblksize;
5890 int a_flags;
5891 vfs_context_t a_context;
5892 };
5893 #endif
5894
5895 errno_t
5896 VNOP_VERIFY(struct vnode *vp, off_t foffset, uint8_t *buf, size_t bufsize,
5897 size_t *verify_block_size, vnode_verify_flags_t flags, vfs_context_t ctx)
5898 {
5899 int _err;
5900 struct vnop_verify_args a;
5901
5902 if (ctx == NULL) {
5903 ctx = vfs_context_current();
5904 }
5905 a.a_desc = &vnop_verify_desc;
5906 a.a_vp = vp;
5907 a.a_foffset = foffset;
5908 a.a_buf = buf;
5909 a.a_bufsize = bufsize;
5910 a.a_verifyblksize = verify_block_size;
5911 a.a_flags = flags;
5912 a.a_context = ctx;
5913
5914 _err = (*vp->v_op[vnop_verify_desc.vdesc_offset])(&a);
5915 DTRACE_FSINFO(verify, vnode_t, vp);
5916
5917 /* It is not an error for a filesystem to not support this VNOP */
5918 if (_err == ENOTSUP) {
5919 if (!buf && verify_block_size) {
5920 *verify_block_size = 0;
5921 }
5922
5923 _err = 0;
5924 }
5925
5926 return _err;
5927 }
5928
5929 #if 0
5930 /*
5931 *#
5932 *#% blockmap vp L L L
5933 *#
5934 */
5935 struct vnop_blockmap_args {
5936 struct vnodeop_desc *a_desc;
5937 vnode_t a_vp;
5938 off_t a_foffset;
5939 size_t a_size;
5940 daddr64_t *a_bpn;
5941 size_t *a_run;
5942 void *a_poff;
5943 int a_flags;
5944 vfs_context_t a_context;
5945 };
5946 #endif /* 0*/
5947 errno_t
5948 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
5949 {
5950 int _err;
5951 struct vnop_blockmap_args a;
5952 size_t localrun = 0;
5953
5954 if (ctx == NULL) {
5955 ctx = vfs_context_current();
5956 }
5957 a.a_desc = &vnop_blockmap_desc;
5958 a.a_vp = vp;
5959 a.a_foffset = foffset;
5960 a.a_size = size;
5961 a.a_bpn = bpn;
5962 a.a_run = &localrun;
5963 a.a_poff = poff;
5964 a.a_flags = flags;
5965 a.a_context = ctx;
5966
5967 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
5968 DTRACE_FSINFO(blockmap, vnode_t, vp);
5969
5970 /*
5971 * We used a local variable to request information from the underlying
5972 * filesystem about the length of the I/O run in question. If
5973 * we get malformed output from the filesystem, we cap it to the length
5974 * requested, at most. Update 'run' on the way out.
5975 */
5976 if (_err == 0) {
5977 if (localrun > size) {
5978 localrun = size;
5979 }
5980
5981 if (run) {
5982 *run = localrun;
5983 }
5984 }
5985
5986 return _err;
5987 }
5988
5989 #if 0
5990 struct vnop_strategy_args {
5991 struct vnodeop_desc *a_desc;
5992 struct buf *a_bp;
5993 };
5994
5995 #endif /* 0*/
5996 errno_t
5997 VNOP_STRATEGY(struct buf *bp)
5998 {
5999 int _err;
6000 struct vnop_strategy_args a;
6001 vnode_t vp = buf_vnode(bp);
6002 a.a_desc = &vnop_strategy_desc;
6003 a.a_bp = bp;
6004 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
6005 DTRACE_FSINFO(strategy, vnode_t, vp);
6006 return _err;
6007 }
6008
6009 #if 0
6010 struct vnop_bwrite_args {
6011 struct vnodeop_desc *a_desc;
6012 buf_t a_bp;
6013 };
6014 #endif /* 0*/
6015 errno_t
6016 VNOP_BWRITE(struct buf *bp)
6017 {
6018 int _err;
6019 struct vnop_bwrite_args a;
6020 vnode_t vp = buf_vnode(bp);
6021 a.a_desc = &vnop_bwrite_desc;
6022 a.a_bp = bp;
6023 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
6024 DTRACE_FSINFO(bwrite, vnode_t, vp);
6025 return _err;
6026 }
6027
6028 #if 0
6029 struct vnop_kqfilt_add_args {
6030 struct vnodeop_desc *a_desc;
6031 struct vnode *a_vp;
6032 struct knote *a_kn;
6033 vfs_context_t a_context;
6034 };
6035 #endif
6036 errno_t
6037 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
6038 {
6039 int _err;
6040 struct vnop_kqfilt_add_args a;
6041
6042 a.a_desc = VDESC(vnop_kqfilt_add);
6043 a.a_vp = vp;
6044 a.a_kn = kn;
6045 a.a_context = ctx;
6046
6047 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
6048 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
6049
6050 return _err;
6051 }
6052
6053 #if 0
6054 struct vnop_kqfilt_remove_args {
6055 struct vnodeop_desc *a_desc;
6056 struct vnode *a_vp;
6057 uintptr_t a_ident;
6058 vfs_context_t a_context;
6059 };
6060 #endif
6061 errno_t
6062 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
6063 {
6064 int _err;
6065 struct vnop_kqfilt_remove_args a;
6066
6067 a.a_desc = VDESC(vnop_kqfilt_remove);
6068 a.a_vp = vp;
6069 a.a_ident = ident;
6070 a.a_context = ctx;
6071
6072 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
6073 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
6074
6075 return _err;
6076 }
6077
6078 errno_t
6079 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6080 {
6081 int _err;
6082 struct vnop_monitor_args a;
6083
6084 a.a_desc = VDESC(vnop_monitor);
6085 a.a_vp = vp;
6086 a.a_events = events;
6087 a.a_flags = flags;
6088 a.a_handle = handle;
6089 a.a_context = ctx;
6090
6091 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6092 DTRACE_FSINFO(monitor, vnode_t, vp);
6093
6094 return _err;
6095 }
6096
6097 #if 0
6098 struct vnop_setlabel_args {
6099 struct vnodeop_desc *a_desc;
6100 struct vnode *a_vp;
6101 struct label *a_vl;
6102 vfs_context_t a_context;
6103 };
6104 #endif
6105 errno_t
6106 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6107 {
6108 int _err;
6109 struct vnop_setlabel_args a;
6110
6111 a.a_desc = VDESC(vnop_setlabel);
6112 a.a_vp = vp;
6113 a.a_vl = label;
6114 a.a_context = ctx;
6115
6116 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
6117 DTRACE_FSINFO(setlabel, vnode_t, vp);
6118
6119 return _err;
6120 }
6121
6122
6123 #if NAMEDSTREAMS
6124 /*
6125 * Get a named streamed
6126 */
6127 errno_t
6128 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6129 {
6130 int _err;
6131 struct vnop_getnamedstream_args a;
6132
6133 a.a_desc = &vnop_getnamedstream_desc;
6134 a.a_vp = vp;
6135 a.a_svpp = svpp;
6136 a.a_name = name;
6137 a.a_operation = operation;
6138 a.a_flags = flags;
6139 a.a_context = ctx;
6140
6141 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6142 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
6143 return _err;
6144 }
6145
6146 /*
6147 * Create a named streamed
6148 */
6149 errno_t
6150 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6151 {
6152 int _err;
6153 struct vnop_makenamedstream_args a;
6154
6155 a.a_desc = &vnop_makenamedstream_desc;
6156 a.a_vp = vp;
6157 a.a_svpp = svpp;
6158 a.a_name = name;
6159 a.a_flags = flags;
6160 a.a_context = ctx;
6161
6162 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6163 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
6164 return _err;
6165 }
6166
6167
6168 /*
6169 * Remove a named streamed
6170 */
6171 errno_t
6172 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6173 {
6174 int _err;
6175 struct vnop_removenamedstream_args a;
6176
6177 a.a_desc = &vnop_removenamedstream_desc;
6178 a.a_vp = vp;
6179 a.a_svp = svp;
6180 a.a_name = name;
6181 a.a_flags = flags;
6182 a.a_context = ctx;
6183
6184 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6185 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
6186 return _err;
6187 }
6188 #endif