]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/disk.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/domain.h>
96 #include <sys/mbuf.h>
97 #include <sys/syslog.h>
98 #include <sys/ubc.h>
99 #include <sys/vm.h>
100 #include <sys/sysctl.h>
101 #include <sys/filedesc.h>
102 #include <sys/event.h>
103 #include <sys/fsevents.h>
104 #include <sys/user.h>
105 #include <sys/lockf.h>
106 #include <sys/xattr.h>
107 #include <sys/kdebug.h>
108
109 #include <kern/assert.h>
110 #include <kern/zalloc.h>
111 #include <kern/task.h>
112 #include <kern/policy_internal.h>
113
114 #include <libkern/OSByteOrder.h>
115
116 #include <miscfs/specfs/specdev.h>
117
118 #include <mach/mach_types.h>
119 #include <mach/memory_object_types.h>
120 #include <mach/task.h>
121
122 #if CONFIG_MACF
123 #include <security/mac_framework.h>
124 #endif
125
126 #if NULLFS
127 #include <miscfs/nullfs/nullfs.h>
128 #endif
129
130 #include <sys/sdt.h>
131
132 #define ESUCCESS 0
133 #undef mount_t
134 #undef vnode_t
135
136 #define COMPAT_ONLY
137
138 #define NATIVE_XATTR(VP) \
139 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
140
141 #if CONFIG_APPLEDOUBLE
142 static void xattrfile_remove(vnode_t dvp, const char *basename,
143 vfs_context_t ctx, int force);
144 static void xattrfile_setattr(vnode_t dvp, const char * basename,
145 struct vnode_attr * vap, vfs_context_t ctx);
146 #endif /* CONFIG_APPLEDOUBLE */
147
148 extern lck_rw_t rootvnode_rw_lock;
149
150 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
151
152 static ZONE_VIEW_DEFINE(ZV_VFS_CONTEXT, "vfs_context",
153 KHEAP_ID_DEFAULT, sizeof(struct vfs_context));
154
155 /*
156 * vnode_setneedinactive
157 *
158 * Description: Indicate that when the last iocount on this vnode goes away,
159 * and the usecount is also zero, we should inform the filesystem
160 * via VNOP_INACTIVE.
161 *
162 * Parameters: vnode_t vnode to mark
163 *
164 * Returns: Nothing
165 *
166 * Notes: Notably used when we're deleting a file--we need not have a
167 * usecount, so VNOP_INACTIVE may not get called by anyone. We
168 * want it called when we drop our iocount.
169 */
170 void
171 vnode_setneedinactive(vnode_t vp)
172 {
173 cache_purge(vp);
174
175 vnode_lock_spin(vp);
176 vp->v_lflag |= VL_NEEDINACTIVE;
177 vnode_unlock(vp);
178 }
179
180
181 /* ====================================================================== */
182 /* ************ EXTERNAL KERNEL APIS ********************************** */
183 /* ====================================================================== */
184
185 /*
186 * implementations of exported VFS operations
187 */
188 int
189 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
190 {
191 int error;
192
193 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) {
194 return ENOTSUP;
195 }
196
197 if (vfs_context_is64bit(ctx)) {
198 if (vfs_64bitready(mp)) {
199 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
200 } else {
201 error = ENOTSUP;
202 }
203 } else {
204 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
205 }
206
207 return error;
208 }
209
210 int
211 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
212 {
213 int error;
214
215 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) {
216 return ENOTSUP;
217 }
218
219 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
220
221 return error;
222 }
223
224 int
225 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
226 {
227 int error;
228
229 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) {
230 return ENOTSUP;
231 }
232
233 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
234
235 return error;
236 }
237
238 /*
239 * Returns: 0 Success
240 * ENOTSUP Not supported
241 * <vfs_root>:ENOENT
242 * <vfs_root>:???
243 *
244 * Note: The return codes from the underlying VFS's root routine can't
245 * be fully enumerated here, since third party VFS authors may not
246 * limit their error returns to the ones documented here, even
247 * though this may result in some programs functioning incorrectly.
248 *
249 * The return codes documented above are those which may currently
250 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
251 * for a call to hfs_vget on the volume mount point, not including
252 * additional error codes which may be propagated from underlying
253 * routines called by hfs_vget.
254 */
255 int
256 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
257 {
258 int error;
259
260 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) {
261 return ENOTSUP;
262 }
263
264 if (ctx == NULL) {
265 ctx = vfs_context_current();
266 }
267
268 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
269
270 return error;
271 }
272
273 int
274 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
275 {
276 int error;
277
278 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) {
279 return ENOTSUP;
280 }
281
282 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
283
284 return error;
285 }
286
287 int
288 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
289 {
290 int error;
291
292 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) {
293 return ENOTSUP;
294 }
295
296 if (ctx == NULL) {
297 ctx = vfs_context_current();
298 }
299
300 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
301
302 return error;
303 }
304
305 int
306 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
307 {
308 int error;
309
310 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) {
311 return ENOTSUP;
312 }
313
314 if (ctx == NULL) {
315 ctx = vfs_context_current();
316 }
317
318 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
319
320 return error;
321 }
322
323 int
324 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
325 {
326 int error;
327
328 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) {
329 return ENOTSUP;
330 }
331
332 if (ctx == NULL) {
333 ctx = vfs_context_current();
334 }
335
336 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
337
338 return error;
339 }
340
341 int
342 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
343 {
344 int error;
345
346 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) {
347 return ENOTSUP;
348 }
349
350 if (ctx == NULL) {
351 ctx = vfs_context_current();
352 }
353
354 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
355
356 return error;
357 }
358
359 int
360 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
361 {
362 int error;
363
364 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) {
365 return ENOTSUP;
366 }
367
368 if (ctx == NULL) {
369 ctx = vfs_context_current();
370 }
371
372 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
373
374 return error;
375 }
376
377 int
378 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
379 {
380 int error;
381
382 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) {
383 return ENOTSUP;
384 }
385
386 if (ctx == NULL) {
387 ctx = vfs_context_current();
388 }
389
390 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
391
392 return error;
393 }
394
395 int
396 VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
397 int flags, vfs_context_t context)
398 {
399 if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) {
400 return ENOTSUP;
401 }
402
403 return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
404 context ?: vfs_context_current());
405 }
406
407 int
408 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
409 {
410 int error;
411
412 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) {
413 return ENOTSUP;
414 }
415
416 if (ctx == NULL) {
417 ctx = vfs_context_current();
418 }
419
420 error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
421
422 return error;
423 }
424
425 /* returns the cached throttle mask for the mount_t */
426 uint64_t
427 vfs_throttle_mask(mount_t mp)
428 {
429 return mp->mnt_throttle_mask;
430 }
431
432 /* returns a copy of vfs type name for the mount_t */
433 void
434 vfs_name(mount_t mp, char *buffer)
435 {
436 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
437 }
438
439 /* returns vfs type number for the mount_t */
440 int
441 vfs_typenum(mount_t mp)
442 {
443 return mp->mnt_vtable->vfc_typenum;
444 }
445
446 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
447 void*
448 vfs_mntlabel(mount_t mp)
449 {
450 return (void*)mp->mnt_mntlabel;
451 }
452
453 uint64_t
454 vfs_mount_id(mount_t mp)
455 {
456 return mp->mnt_mount_id;
457 }
458
459 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
460 uint64_t
461 vfs_flags(mount_t mp)
462 {
463 return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
464 }
465
466 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
467 void
468 vfs_setflags(mount_t mp, uint64_t flags)
469 {
470 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
471
472 mount_lock(mp);
473 mp->mnt_flag |= lflags;
474 mount_unlock(mp);
475 }
476
477 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
478 void
479 vfs_clearflags(mount_t mp, uint64_t flags)
480 {
481 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
482
483 mount_lock(mp);
484 mp->mnt_flag &= ~lflags;
485 mount_unlock(mp);
486 }
487
488 /* Is the mount_t ronly and upgrade read/write requested? */
489 int
490 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
491 {
492 return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR);
493 }
494
495
496 /* Is the mount_t mounted ronly */
497 int
498 vfs_isrdonly(mount_t mp)
499 {
500 return mp->mnt_flag & MNT_RDONLY;
501 }
502
503 /* Is the mount_t mounted for filesystem synchronous writes? */
504 int
505 vfs_issynchronous(mount_t mp)
506 {
507 return mp->mnt_flag & MNT_SYNCHRONOUS;
508 }
509
510 /* Is the mount_t mounted read/write? */
511 int
512 vfs_isrdwr(mount_t mp)
513 {
514 return (mp->mnt_flag & MNT_RDONLY) == 0;
515 }
516
517
518 /* Is mount_t marked for update (ie MNT_UPDATE) */
519 int
520 vfs_isupdate(mount_t mp)
521 {
522 return mp->mnt_flag & MNT_UPDATE;
523 }
524
525
526 /* Is mount_t marked for reload (ie MNT_RELOAD) */
527 int
528 vfs_isreload(mount_t mp)
529 {
530 return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD);
531 }
532
533 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
534 int
535 vfs_isforce(mount_t mp)
536 {
537 if (mp->mnt_lflag & MNT_LFORCE) {
538 return 1;
539 } else {
540 return 0;
541 }
542 }
543
544 int
545 vfs_isunmount(mount_t mp)
546 {
547 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
548 return 1;
549 } else {
550 return 0;
551 }
552 }
553
554 int
555 vfs_64bitready(mount_t mp)
556 {
557 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
558 return 1;
559 } else {
560 return 0;
561 }
562 }
563
564
565 int
566 vfs_authcache_ttl(mount_t mp)
567 {
568 if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
569 return mp->mnt_authcache_ttl;
570 } else {
571 return CACHED_RIGHT_INFINITE_TTL;
572 }
573 }
574
575 void
576 vfs_setauthcache_ttl(mount_t mp, int ttl)
577 {
578 mount_lock(mp);
579 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
580 mp->mnt_authcache_ttl = ttl;
581 mount_unlock(mp);
582 }
583
584 void
585 vfs_clearauthcache_ttl(mount_t mp)
586 {
587 mount_lock(mp);
588 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
589 /*
590 * back to the default TTL value in case
591 * MNTK_AUTH_OPAQUE is set on this mount
592 */
593 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
594 mount_unlock(mp);
595 }
596
597 int
598 vfs_authopaque(mount_t mp)
599 {
600 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) {
601 return 1;
602 } else {
603 return 0;
604 }
605 }
606
607 int
608 vfs_authopaqueaccess(mount_t mp)
609 {
610 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) {
611 return 1;
612 } else {
613 return 0;
614 }
615 }
616
617 void
618 vfs_setauthopaque(mount_t mp)
619 {
620 mount_lock(mp);
621 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
622 mount_unlock(mp);
623 }
624
625 void
626 vfs_setauthopaqueaccess(mount_t mp)
627 {
628 mount_lock(mp);
629 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
630 mount_unlock(mp);
631 }
632
633 void
634 vfs_clearauthopaque(mount_t mp)
635 {
636 mount_lock(mp);
637 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
638 mount_unlock(mp);
639 }
640
641 void
642 vfs_clearauthopaqueaccess(mount_t mp)
643 {
644 mount_lock(mp);
645 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
646 mount_unlock(mp);
647 }
648
649 void
650 vfs_setextendedsecurity(mount_t mp)
651 {
652 mount_lock(mp);
653 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
654 mount_unlock(mp);
655 }
656
657 void
658 vfs_setmntsystem(mount_t mp)
659 {
660 mount_lock(mp);
661 mp->mnt_kern_flag |= MNTK_SYSTEM;
662 mount_unlock(mp);
663 }
664
665 void
666 vfs_setmntsystemdata(mount_t mp)
667 {
668 mount_lock(mp);
669 mp->mnt_kern_flag |= MNTK_SYSTEMDATA;
670 mount_unlock(mp);
671 }
672
673 void
674 vfs_setmntswap(mount_t mp)
675 {
676 mount_lock(mp);
677 mp->mnt_kern_flag |= (MNTK_SYSTEM | MNTK_SWAP_MOUNT);
678 mount_unlock(mp);
679 }
680
681 void
682 vfs_clearextendedsecurity(mount_t mp)
683 {
684 mount_lock(mp);
685 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
686 mount_unlock(mp);
687 }
688
689 void
690 vfs_setnoswap(mount_t mp)
691 {
692 mount_lock(mp);
693 mp->mnt_kern_flag |= MNTK_NOSWAP;
694 mount_unlock(mp);
695 }
696
697 void
698 vfs_clearnoswap(mount_t mp)
699 {
700 mount_lock(mp);
701 mp->mnt_kern_flag &= ~MNTK_NOSWAP;
702 mount_unlock(mp);
703 }
704
705 int
706 vfs_extendedsecurity(mount_t mp)
707 {
708 return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY;
709 }
710
711 /* returns the max size of short symlink in this mount_t */
712 uint32_t
713 vfs_maxsymlen(mount_t mp)
714 {
715 return mp->mnt_maxsymlinklen;
716 }
717
718 /* set max size of short symlink on mount_t */
719 void
720 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
721 {
722 mp->mnt_maxsymlinklen = symlen;
723 }
724
725 boolean_t
726 vfs_is_basesystem(mount_t mp)
727 {
728 return ((mp->mnt_supl_kern_flag & MNTK_SUPL_BASESYSTEM) == 0) ? false : true;
729 }
730
731 /* return a pointer to the RO vfs_statfs associated with mount_t */
732 struct vfsstatfs *
733 vfs_statfs(mount_t mp)
734 {
735 return &mp->mnt_vfsstat;
736 }
737
738 int
739 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
740 {
741 int error;
742
743 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) {
744 return error;
745 }
746
747 /*
748 * If we have a filesystem create time, use it to default some others.
749 */
750 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
751 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) {
752 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
753 }
754 }
755
756 return 0;
757 }
758
759 int
760 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
761 {
762 int error;
763
764 /*
765 * with a read-only system volume, we need to allow rename of the root volume
766 * even if it's read-only. Don't return EROFS here if setattr changes only
767 * the volume name
768 */
769 if (vfs_isrdonly(mp) &&
770 !((strcmp(mp->mnt_vfsstat.f_fstypename, "apfs") == 0) && (vfa->f_active == VFSATTR_f_vol_name))) {
771 return EROFS;
772 }
773
774 error = VFS_SETATTR(mp, vfa, ctx);
775
776 /*
777 * If we had alternate ways of setting vfs attributes, we'd
778 * fall back here.
779 */
780
781 return error;
782 }
783
784 /* return the private data handle stored in mount_t */
785 void *
786 vfs_fsprivate(mount_t mp)
787 {
788 return mp->mnt_data;
789 }
790
791 /* set the private data handle in mount_t */
792 void
793 vfs_setfsprivate(mount_t mp, void *mntdata)
794 {
795 mount_lock(mp);
796 mp->mnt_data = mntdata;
797 mount_unlock(mp);
798 }
799
800 /* query whether the mount point supports native EAs */
801 int
802 vfs_nativexattrs(mount_t mp)
803 {
804 return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS;
805 }
806
807 /*
808 * return the block size of the underlying
809 * device associated with mount_t
810 */
811 int
812 vfs_devblocksize(mount_t mp)
813 {
814 return mp->mnt_devblocksize;
815 }
816
817 /*
818 * Returns vnode with an iocount that must be released with vnode_put()
819 */
820 vnode_t
821 vfs_vnodecovered(mount_t mp)
822 {
823 vnode_t vp = mp->mnt_vnodecovered;
824 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
825 return NULL;
826 } else {
827 return vp;
828 }
829 }
830
831 /*
832 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
833 * The iocount must be released with vnode_put(). Note that this KPI is subtle
834 * with respect to the validity of using this device vnode for anything substantial
835 * (which is discouraged). If commands are sent to the device driver without
836 * taking proper steps to ensure that the device is still open, chaos may ensue.
837 * Similarly, this routine should only be called if there is some guarantee that
838 * the mount itself is still valid.
839 */
840 vnode_t
841 vfs_devvp(mount_t mp)
842 {
843 vnode_t vp = mp->mnt_devvp;
844
845 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
846 return vp;
847 }
848
849 return NULLVP;
850 }
851
852 /*
853 * return the io attributes associated with mount_t
854 */
855 void
856 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
857 {
858 ioattrp->io_reserved[0] = NULL;
859 ioattrp->io_reserved[1] = NULL;
860 if (mp == NULL) {
861 ioattrp->io_maxreadcnt = MAXPHYS;
862 ioattrp->io_maxwritecnt = MAXPHYS;
863 ioattrp->io_segreadcnt = 32;
864 ioattrp->io_segwritecnt = 32;
865 ioattrp->io_maxsegreadsize = MAXPHYS;
866 ioattrp->io_maxsegwritesize = MAXPHYS;
867 ioattrp->io_devblocksize = DEV_BSIZE;
868 ioattrp->io_flags = 0;
869 ioattrp->io_max_swappin_available = 0;
870 } else {
871 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
872 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
873 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
874 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
875 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
876 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
877 ioattrp->io_devblocksize = mp->mnt_devblocksize;
878 ioattrp->io_flags = mp->mnt_ioflags;
879 ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
880 }
881 }
882
883
884 /*
885 * set the IO attributes associated with mount_t
886 */
887 void
888 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
889 {
890 if (mp == NULL) {
891 return;
892 }
893 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
894 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
895 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
896 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
897 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
898 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
899 mp->mnt_devblocksize = ioattrp->io_devblocksize;
900 mp->mnt_ioflags = ioattrp->io_flags;
901 mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
902 }
903
904 /*
905 * Add a new filesystem into the kernel specified in passed in
906 * vfstable structure. It fills in the vnode
907 * dispatch vector that is to be passed to when vnodes are created.
908 * It returns a handle which is to be used to when the FS is to be removed
909 */
910 typedef int (*PFI)(void *);
911 extern int vfs_opv_numops;
912 errno_t
913 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
914 {
915 struct vfstable *newvfstbl = NULL;
916 int i, j;
917 int(***opv_desc_vector_p)(void *);
918 int(**opv_desc_vector)(void *);
919 const struct vnodeopv_entry_desc *opve_descp;
920 int desccount;
921 int descsize;
922 PFI *descptr;
923
924 /*
925 * This routine is responsible for all the initialization that would
926 * ordinarily be done as part of the system startup;
927 */
928
929 if (vfe == (struct vfs_fsentry *)0) {
930 return EINVAL;
931 }
932
933 desccount = vfe->vfe_vopcnt;
934 if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
935 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) {
936 return EINVAL;
937 }
938
939 /* Non-threadsafe filesystems are not supported */
940 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
941 return EINVAL;
942 }
943
944 newvfstbl = kheap_alloc(KHEAP_TEMP, sizeof(struct vfstable),
945 Z_WAITOK | Z_ZERO);
946 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
947 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
948 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) {
949 newvfstbl->vfc_typenum = maxvfstypenum++;
950 } else {
951 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
952 }
953
954 newvfstbl->vfc_refcount = 0;
955 newvfstbl->vfc_flags = 0;
956 newvfstbl->vfc_mountroot = NULL;
957 newvfstbl->vfc_next = NULL;
958 newvfstbl->vfc_vfsflags = 0;
959 if (vfe->vfe_flags & VFS_TBL64BITREADY) {
960 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
961 }
962 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2) {
963 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
964 }
965 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2) {
966 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
967 }
968 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) {
969 newvfstbl->vfc_flags |= MNT_LOCAL;
970 }
971 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) {
972 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
973 } else {
974 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
975 }
976
977 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR) {
978 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
979 }
980 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT) {
981 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
982 }
983 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED) {
984 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
985 }
986 if (vfe->vfe_flags & VFS_TBLNOMACLABEL) {
987 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
988 }
989 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) {
990 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
991 }
992 if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) {
993 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
994 }
995 if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) {
996 newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
997 }
998
999 /*
1000 * Allocate and init the vectors.
1001 * Also handle backwards compatibility.
1002 *
1003 * We allocate one large block to hold all <desccount>
1004 * vnode operation vectors stored contiguously.
1005 */
1006 /* XXX - shouldn't be M_TEMP */
1007
1008 descsize = desccount * vfs_opv_numops * sizeof(PFI);
1009 descptr = kheap_alloc(KHEAP_DEFAULT, descsize, Z_WAITOK | Z_ZERO);
1010
1011 newvfstbl->vfc_descptr = descptr;
1012 newvfstbl->vfc_descsize = descsize;
1013
1014 newvfstbl->vfc_sysctl = NULL;
1015
1016 for (i = 0; i < desccount; i++) {
1017 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1018 /*
1019 * Fill in the caller's pointer to the start of the i'th vector.
1020 * They'll need to supply it when calling vnode_create.
1021 */
1022 opv_desc_vector = descptr + i * vfs_opv_numops;
1023 *opv_desc_vector_p = opv_desc_vector;
1024
1025 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1026 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1027
1028 /* Silently skip known-disabled operations */
1029 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
1030 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
1031 vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
1032 continue;
1033 }
1034
1035 /*
1036 * Sanity check: is this operation listed
1037 * in the list of operations? We check this
1038 * by seeing if its offset is zero. Since
1039 * the default routine should always be listed
1040 * first, it should be the only one with a zero
1041 * offset. Any other operation with a zero
1042 * offset is probably not listed in
1043 * vfs_op_descs, and so is probably an error.
1044 *
1045 * A panic here means the layer programmer
1046 * has committed the all-too common bug
1047 * of adding a new operation to the layer's
1048 * list of vnode operations but
1049 * not adding the operation to the system-wide
1050 * list of supported operations.
1051 */
1052 if (opve_descp->opve_op->vdesc_offset == 0 &&
1053 opve_descp->opve_op != VDESC(vnop_default)) {
1054 printf("vfs_fsadd: operation %s not listed in %s.\n",
1055 opve_descp->opve_op->vdesc_name,
1056 "vfs_op_descs");
1057 panic("vfs_fsadd: bad operation");
1058 }
1059 /*
1060 * Fill in this entry.
1061 */
1062 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1063 opve_descp->opve_impl;
1064 }
1065
1066
1067 /*
1068 * Finally, go back and replace unfilled routines
1069 * with their default. (Sigh, an O(n^3) algorithm. I
1070 * could make it better, but that'd be work, and n is small.)
1071 */
1072 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1073
1074 /*
1075 * Force every operations vector to have a default routine.
1076 */
1077 opv_desc_vector = *opv_desc_vector_p;
1078 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
1079 panic("vfs_fsadd: operation vector without default routine.");
1080 }
1081 for (j = 0; j < vfs_opv_numops; j++) {
1082 if (opv_desc_vector[j] == NULL) {
1083 opv_desc_vector[j] =
1084 opv_desc_vector[VOFFSET(vnop_default)];
1085 }
1086 }
1087 } /* end of each vnodeopv_desc parsing */
1088
1089
1090
1091 *handle = vfstable_add(newvfstbl);
1092
1093 if (newvfstbl->vfc_typenum <= maxvfstypenum) {
1094 maxvfstypenum = newvfstbl->vfc_typenum + 1;
1095 }
1096
1097 if (newvfstbl->vfc_vfsops->vfs_init) {
1098 struct vfsconf vfsc;
1099 bzero(&vfsc, sizeof(struct vfsconf));
1100 vfsc.vfc_reserved1 = 0;
1101 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1102 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1103 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1104 vfsc.vfc_flags = (*handle)->vfc_flags;
1105 vfsc.vfc_reserved2 = 0;
1106 vfsc.vfc_reserved3 = 0;
1107
1108 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1109 }
1110
1111 kheap_free(KHEAP_TEMP, newvfstbl, sizeof(struct vfstable));
1112
1113 return 0;
1114 }
1115
1116 /*
1117 * Removes the filesystem from kernel.
1118 * The argument passed in is the handle that was given when
1119 * file system was added
1120 */
1121 errno_t
1122 vfs_fsremove(vfstable_t handle)
1123 {
1124 struct vfstable * vfstbl = (struct vfstable *)handle;
1125 void *old_desc = NULL;
1126 size_t descsize = 0;
1127 errno_t err;
1128
1129 /* Preflight check for any mounts */
1130 mount_list_lock();
1131 if (vfstbl->vfc_refcount != 0) {
1132 mount_list_unlock();
1133 return EBUSY;
1134 }
1135
1136 /*
1137 * save the old descriptor; the free cannot occur unconditionally,
1138 * since vfstable_del() may fail.
1139 */
1140 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1141 old_desc = vfstbl->vfc_descptr;
1142 descsize = vfstbl->vfc_descsize;
1143 }
1144 err = vfstable_del(vfstbl);
1145
1146 mount_list_unlock();
1147
1148 /* free the descriptor if the delete was successful */
1149 if (err == 0) {
1150 kheap_free(KHEAP_DEFAULT, old_desc, descsize);
1151 }
1152
1153 return err;
1154 }
1155
1156 void
1157 vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1158 {
1159 mp->mnt_fsowner = uid;
1160 mp->mnt_fsgroup = gid;
1161 }
1162
1163 /*
1164 * Callers should be careful how they use this; accessing
1165 * mnt_last_write_completed_timestamp is not thread-safe. Writing to
1166 * it isn't either. Point is: be prepared to deal with strange values
1167 * being returned.
1168 */
1169 uint64_t
1170 vfs_idle_time(mount_t mp)
1171 {
1172 if (mp->mnt_pending_write_size) {
1173 return 0;
1174 }
1175
1176 struct timeval now;
1177
1178 microuptime(&now);
1179
1180 return (now.tv_sec
1181 - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1182 + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec;
1183 }
1184
1185 int
1186 vfs_context_pid(vfs_context_t ctx)
1187 {
1188 return proc_pid(vfs_context_proc(ctx));
1189 }
1190
1191 int
1192 vfs_context_suser(vfs_context_t ctx)
1193 {
1194 return suser(ctx->vc_ucred, NULL);
1195 }
1196
1197 /*
1198 * Return bit field of signals posted to all threads in the context's process.
1199 *
1200 * XXX Signals should be tied to threads, not processes, for most uses of this
1201 * XXX call.
1202 */
1203 int
1204 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1205 {
1206 proc_t p = vfs_context_proc(ctx);
1207 if (p) {
1208 return proc_pendingsignals(p, mask);
1209 }
1210 return 0;
1211 }
1212
1213 int
1214 vfs_context_is64bit(vfs_context_t ctx)
1215 {
1216 proc_t proc = vfs_context_proc(ctx);
1217
1218 if (proc) {
1219 return proc_is64bit(proc);
1220 }
1221 return 0;
1222 }
1223
1224 boolean_t
1225 vfs_context_can_resolve_triggers(vfs_context_t ctx)
1226 {
1227 proc_t proc = vfs_context_proc(ctx);
1228
1229 if (proc) {
1230 if (proc->p_vfs_iopolicy &
1231 P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) {
1232 return false;
1233 }
1234 return true;
1235 }
1236 return false;
1237 }
1238
1239 /*
1240 * vfs_context_proc
1241 *
1242 * Description: Given a vfs_context_t, return the proc_t associated with it.
1243 *
1244 * Parameters: vfs_context_t The context to use
1245 *
1246 * Returns: proc_t The process for this context
1247 *
1248 * Notes: This function will return the current_proc() if any of the
1249 * following conditions are true:
1250 *
1251 * o The supplied context pointer is NULL
1252 * o There is no Mach thread associated with the context
1253 * o There is no Mach task associated with the Mach thread
1254 * o There is no proc_t associated with the Mach task
1255 * o The proc_t has no per process open file table
1256 * o The proc_t is post-vfork()
1257 *
1258 * This causes this function to return a value matching as
1259 * closely as possible the previous behaviour, while at the
1260 * same time avoiding the task lending that results from vfork()
1261 */
1262 proc_t
1263 vfs_context_proc(vfs_context_t ctx)
1264 {
1265 proc_t proc = NULL;
1266
1267 if (ctx != NULL && ctx->vc_thread != NULL) {
1268 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1269 }
1270 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) {
1271 proc = NULL;
1272 }
1273
1274 return proc == NULL ? current_proc() : proc;
1275 }
1276
1277 /*
1278 * vfs_context_get_special_port
1279 *
1280 * Description: Return the requested special port from the task associated
1281 * with the given context.
1282 *
1283 * Parameters: vfs_context_t The context to use
1284 * int Index of special port
1285 * ipc_port_t * Pointer to returned port
1286 *
1287 * Returns: kern_return_t see task_get_special_port()
1288 */
1289 kern_return_t
1290 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1291 {
1292 task_t task = NULL;
1293
1294 if (ctx != NULL && ctx->vc_thread != NULL) {
1295 task = get_threadtask(ctx->vc_thread);
1296 }
1297
1298 return task_get_special_port(task, which, portp);
1299 }
1300
1301 /*
1302 * vfs_context_set_special_port
1303 *
1304 * Description: Set the requested special port in the task associated
1305 * with the given context.
1306 *
1307 * Parameters: vfs_context_t The context to use
1308 * int Index of special port
1309 * ipc_port_t New special port
1310 *
1311 * Returns: kern_return_t see task_set_special_port_internal()
1312 */
1313 kern_return_t
1314 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1315 {
1316 task_t task = NULL;
1317
1318 if (ctx != NULL && ctx->vc_thread != NULL) {
1319 task = get_threadtask(ctx->vc_thread);
1320 }
1321
1322 return task_set_special_port_internal(task, which, port);
1323 }
1324
1325 /*
1326 * vfs_context_thread
1327 *
1328 * Description: Return the Mach thread associated with a vfs_context_t
1329 *
1330 * Parameters: vfs_context_t The context to use
1331 *
1332 * Returns: thread_t The thread for this context, or
1333 * NULL, if there is not one.
1334 *
1335 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1336 * as a result of a static vfs_context_t declaration in a function
1337 * and will result in this function returning NULL.
1338 *
1339 * This is intentional; this function should NOT return the
1340 * current_thread() in this case.
1341 */
1342 thread_t
1343 vfs_context_thread(vfs_context_t ctx)
1344 {
1345 return ctx->vc_thread;
1346 }
1347
1348
1349 /*
1350 * vfs_context_cwd
1351 *
1352 * Description: Returns a reference on the vnode for the current working
1353 * directory for the supplied context
1354 *
1355 * Parameters: vfs_context_t The context to use
1356 *
1357 * Returns: vnode_t The current working directory
1358 * for this context
1359 *
1360 * Notes: The function first attempts to obtain the current directory
1361 * from the thread, and if it is not present there, falls back
1362 * to obtaining it from the process instead. If it can't be
1363 * obtained from either place, we return NULLVP.
1364 */
1365 vnode_t
1366 vfs_context_cwd(vfs_context_t ctx)
1367 {
1368 vnode_t cwd = NULLVP;
1369
1370 if (ctx != NULL && ctx->vc_thread != NULL) {
1371 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1372 proc_t proc;
1373
1374 /*
1375 * Get the cwd from the thread; if there isn't one, get it
1376 * from the process, instead.
1377 */
1378 if ((cwd = uth->uu_cdir) == NULLVP &&
1379 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1380 proc->p_fd != NULL) {
1381 cwd = proc->p_fd->fd_cdir;
1382 }
1383 }
1384
1385 return cwd;
1386 }
1387
1388 /*
1389 * vfs_context_create
1390 *
1391 * Description: Allocate and initialize a new context.
1392 *
1393 * Parameters: vfs_context_t: Context to copy, or NULL for new
1394 *
1395 * Returns: Pointer to new context
1396 *
1397 * Notes: Copy cred and thread from argument, if available; else
1398 * initialize with current thread and new cred. Returns
1399 * with a reference held on the credential.
1400 */
1401 vfs_context_t
1402 vfs_context_create(vfs_context_t ctx)
1403 {
1404 vfs_context_t newcontext;
1405
1406 newcontext = zalloc_flags(ZV_VFS_CONTEXT, Z_WAITOK | Z_ZERO);
1407
1408 if (newcontext) {
1409 kauth_cred_t safecred;
1410 if (ctx) {
1411 newcontext->vc_thread = ctx->vc_thread;
1412 safecred = ctx->vc_ucred;
1413 } else {
1414 newcontext->vc_thread = current_thread();
1415 safecred = kauth_cred_get();
1416 }
1417 if (IS_VALID_CRED(safecred)) {
1418 kauth_cred_ref(safecred);
1419 }
1420 newcontext->vc_ucred = safecred;
1421 return newcontext;
1422 }
1423 return NULL;
1424 }
1425
1426
1427 vfs_context_t
1428 vfs_context_current(void)
1429 {
1430 vfs_context_t ctx = NULL;
1431 uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1432
1433 if (ut != NULL) {
1434 if (ut->uu_context.vc_ucred != NULL) {
1435 ctx = &ut->uu_context;
1436 }
1437 }
1438
1439 return ctx == NULL ? vfs_context_kernel() : ctx;
1440 }
1441
1442
1443 /*
1444 * XXX Do not ask
1445 *
1446 * Dangerous hack - adopt the first kernel thread as the current thread, to
1447 * get to the vfs_context_t in the uthread associated with a kernel thread.
1448 * This is used by UDF to make the call into IOCDMediaBSDClient,
1449 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1450 * ioctl() is being called from kernel or user space (and all this because
1451 * we do not pass threads into our ioctl()'s, instead of processes).
1452 *
1453 * This is also used by imageboot_setup(), called early from bsd_init() after
1454 * kernproc has been given a credential.
1455 *
1456 */
1457 static struct vfs_context kerncontext;
1458 vfs_context_t
1459 vfs_context_kernel(void)
1460 {
1461 return &kerncontext;
1462 }
1463
1464 /*
1465 * Called early in bsd_init() when kernproc sets its thread and cred context.
1466 */
1467 void
1468 vfs_set_context_kernel(vfs_context_t ctx)
1469 {
1470 kerncontext = *ctx;
1471 }
1472
1473 int
1474 vfs_context_rele(vfs_context_t ctx)
1475 {
1476 if (ctx) {
1477 if (IS_VALID_CRED(ctx->vc_ucred)) {
1478 kauth_cred_unref(&ctx->vc_ucred);
1479 }
1480 zfree(ZV_VFS_CONTEXT, ctx);
1481 }
1482 return 0;
1483 }
1484
1485
1486 kauth_cred_t
1487 vfs_context_ucred(vfs_context_t ctx)
1488 {
1489 return ctx->vc_ucred;
1490 }
1491
1492 /*
1493 * Return true if the context is owned by the superuser.
1494 */
1495 int
1496 vfs_context_issuser(vfs_context_t ctx)
1497 {
1498 return kauth_cred_issuser(vfs_context_ucred(ctx));
1499 }
1500
1501 int
1502 vfs_context_iskernel(vfs_context_t ctx)
1503 {
1504 return ctx == &kerncontext;
1505 }
1506
1507 /*
1508 * Given a context, for all fields of vfs_context_t which
1509 * are not held with a reference, set those fields to the
1510 * values for the current execution context. Currently, this
1511 * just means the vc_thread.
1512 *
1513 * Returns: 0 for success, nonzero for failure
1514 *
1515 * The intended use is:
1516 * 1. vfs_context_create() gets the caller a context
1517 * 2. vfs_context_bind() sets the unrefcounted data
1518 * 3. vfs_context_rele() releases the context
1519 *
1520 */
1521 int
1522 vfs_context_bind(vfs_context_t ctx)
1523 {
1524 ctx->vc_thread = current_thread();
1525 return 0;
1526 }
1527
1528 int
1529 vfs_set_thread_fs_private(uint8_t tag, uint64_t fs_private)
1530 {
1531 struct uthread *ut;
1532
1533 if (tag != FS_PRIVATE_TAG_APFS) {
1534 return ENOTSUP;
1535 }
1536
1537 ut = get_bsdthread_info(current_thread());
1538 ut->t_fs_private = fs_private;
1539
1540 return 0;
1541 }
1542
1543 int
1544 vfs_get_thread_fs_private(uint8_t tag, uint64_t *fs_private)
1545 {
1546 struct uthread *ut;
1547
1548 if (tag != FS_PRIVATE_TAG_APFS) {
1549 return ENOTSUP;
1550 }
1551
1552 ut = get_bsdthread_info(current_thread());
1553 *fs_private = ut->t_fs_private;
1554
1555 return 0;
1556 }
1557
1558 int
1559 vfs_isswapmount(mount_t mnt)
1560 {
1561 return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1562 }
1563
1564 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1565
1566
1567 /*
1568 * Convert between vnode types and inode formats (since POSIX.1
1569 * defines mode word of stat structure in terms of inode formats).
1570 */
1571 enum vtype
1572 vnode_iftovt(int mode)
1573 {
1574 return iftovt_tab[((mode) & S_IFMT) >> 12];
1575 }
1576
1577 int
1578 vnode_vttoif(enum vtype indx)
1579 {
1580 return vttoif_tab[(int)(indx)];
1581 }
1582
1583 int
1584 vnode_makeimode(int indx, int mode)
1585 {
1586 return (int)(VTTOIF(indx) | (mode));
1587 }
1588
1589
1590 /*
1591 * vnode manipulation functions.
1592 */
1593
1594 /* returns system root vnode iocount; It should be released using vnode_put() */
1595 vnode_t
1596 vfs_rootvnode(void)
1597 {
1598 int error;
1599
1600 lck_rw_lock_shared(&rootvnode_rw_lock);
1601 error = vnode_get(rootvnode);
1602 lck_rw_unlock_shared(&rootvnode_rw_lock);
1603 if (error) {
1604 return (vnode_t)0;
1605 } else {
1606 return rootvnode;
1607 }
1608 }
1609
1610
1611 uint32_t
1612 vnode_vid(vnode_t vp)
1613 {
1614 return (uint32_t)(vp->v_id);
1615 }
1616
1617 mount_t
1618 vnode_mount(vnode_t vp)
1619 {
1620 return vp->v_mount;
1621 }
1622
1623 #if CONFIG_IOSCHED
1624 vnode_t
1625 vnode_mountdevvp(vnode_t vp)
1626 {
1627 if (vp->v_mount) {
1628 return vp->v_mount->mnt_devvp;
1629 } else {
1630 return (vnode_t)0;
1631 }
1632 }
1633 #endif
1634
1635 boolean_t
1636 vnode_isonexternalstorage(vnode_t vp)
1637 {
1638 if (vp) {
1639 if (vp->v_mount) {
1640 if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_PERIPHERAL_DRIVE) {
1641 return TRUE;
1642 }
1643 }
1644 }
1645 return FALSE;
1646 }
1647
1648 mount_t
1649 vnode_mountedhere(vnode_t vp)
1650 {
1651 mount_t mp;
1652
1653 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1654 (mp->mnt_vnodecovered == vp)) {
1655 return mp;
1656 } else {
1657 return (mount_t)NULL;
1658 }
1659 }
1660
1661 /* returns vnode type of vnode_t */
1662 enum vtype
1663 vnode_vtype(vnode_t vp)
1664 {
1665 return vp->v_type;
1666 }
1667
1668 /* returns FS specific node saved in vnode */
1669 void *
1670 vnode_fsnode(vnode_t vp)
1671 {
1672 return vp->v_data;
1673 }
1674
1675 void
1676 vnode_clearfsnode(vnode_t vp)
1677 {
1678 vp->v_data = NULL;
1679 }
1680
1681 dev_t
1682 vnode_specrdev(vnode_t vp)
1683 {
1684 return vp->v_rdev;
1685 }
1686
1687
1688 /* Accessor functions */
1689 /* is vnode_t a root vnode */
1690 int
1691 vnode_isvroot(vnode_t vp)
1692 {
1693 return (vp->v_flag & VROOT)? 1 : 0;
1694 }
1695
1696 /* is vnode_t a system vnode */
1697 int
1698 vnode_issystem(vnode_t vp)
1699 {
1700 return (vp->v_flag & VSYSTEM)? 1 : 0;
1701 }
1702
1703 /* is vnode_t a swap file vnode */
1704 int
1705 vnode_isswap(vnode_t vp)
1706 {
1707 return (vp->v_flag & VSWAP)? 1 : 0;
1708 }
1709
1710 /* is vnode_t a tty */
1711 int
1712 vnode_istty(vnode_t vp)
1713 {
1714 return (vp->v_flag & VISTTY) ? 1 : 0;
1715 }
1716
1717 /* if vnode_t mount operation in progress */
1718 int
1719 vnode_ismount(vnode_t vp)
1720 {
1721 return (vp->v_flag & VMOUNT)? 1 : 0;
1722 }
1723
1724 /* is this vnode under recyle now */
1725 int
1726 vnode_isrecycled(vnode_t vp)
1727 {
1728 int ret;
1729
1730 vnode_lock_spin(vp);
1731 ret = (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0;
1732 vnode_unlock(vp);
1733 return ret;
1734 }
1735
1736 /* vnode was created by background task requesting rapid aging
1737 * and has not since been referenced by a normal task */
1738 int
1739 vnode_israge(vnode_t vp)
1740 {
1741 return (vp->v_flag & VRAGE)? 1 : 0;
1742 }
1743
1744 int
1745 vnode_needssnapshots(vnode_t vp)
1746 {
1747 return (vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0;
1748 }
1749
1750
1751 /* Check the process/thread to see if we should skip atime updates */
1752 int
1753 vfs_ctx_skipatime(vfs_context_t ctx)
1754 {
1755 struct uthread *ut;
1756 proc_t proc;
1757 thread_t thr;
1758
1759 proc = vfs_context_proc(ctx);
1760 thr = vfs_context_thread(ctx);
1761
1762 /* Validate pointers in case we were invoked via a kernel context */
1763 if (thr && proc) {
1764 ut = get_bsdthread_info(thr);
1765
1766 if (proc->p_lflag & P_LRAGE_VNODES) {
1767 return 1;
1768 }
1769
1770 if (ut) {
1771 if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) {
1772 return 1;
1773 }
1774 }
1775
1776 if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) {
1777 return 1;
1778 }
1779 }
1780 return 0;
1781 }
1782
1783 /* is vnode_t marked to not keep data cached once it's been consumed */
1784 int
1785 vnode_isnocache(vnode_t vp)
1786 {
1787 return (vp->v_flag & VNOCACHE_DATA)? 1 : 0;
1788 }
1789
1790 /*
1791 * has sequential readahead been disabled on this vnode
1792 */
1793 int
1794 vnode_isnoreadahead(vnode_t vp)
1795 {
1796 return (vp->v_flag & VRAOFF)? 1 : 0;
1797 }
1798
1799 int
1800 vnode_is_openevt(vnode_t vp)
1801 {
1802 return (vp->v_flag & VOPENEVT)? 1 : 0;
1803 }
1804
1805 /* is vnode_t a standard one? */
1806 int
1807 vnode_isstandard(vnode_t vp)
1808 {
1809 return (vp->v_flag & VSTANDARD)? 1 : 0;
1810 }
1811
1812 /* don't vflush() if SKIPSYSTEM */
1813 int
1814 vnode_isnoflush(vnode_t vp)
1815 {
1816 return (vp->v_flag & VNOFLUSH)? 1 : 0;
1817 }
1818
1819 /* is vnode_t a regular file */
1820 int
1821 vnode_isreg(vnode_t vp)
1822 {
1823 return (vp->v_type == VREG)? 1 : 0;
1824 }
1825
1826 /* is vnode_t a directory? */
1827 int
1828 vnode_isdir(vnode_t vp)
1829 {
1830 return (vp->v_type == VDIR)? 1 : 0;
1831 }
1832
1833 /* is vnode_t a symbolic link ? */
1834 int
1835 vnode_islnk(vnode_t vp)
1836 {
1837 return (vp->v_type == VLNK)? 1 : 0;
1838 }
1839
1840 int
1841 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1842 {
1843 struct nameidata *ndp = cnp->cn_ndp;
1844
1845 if (ndp == NULL) {
1846 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1847 }
1848
1849 if (vnode_isdir(vp)) {
1850 if (vp->v_mountedhere != NULL) {
1851 goto yes;
1852 }
1853
1854 #if CONFIG_TRIGGERS
1855 if (vp->v_resolve) {
1856 goto yes;
1857 }
1858 #endif /* CONFIG_TRIGGERS */
1859 }
1860
1861
1862 if (vnode_islnk(vp)) {
1863 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1864 if (cnp->cn_flags & FOLLOW) {
1865 goto yes;
1866 }
1867 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1868 goto yes;
1869 }
1870 }
1871
1872 return 0;
1873
1874 yes:
1875 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1876 return EKEEPLOOKING;
1877 }
1878
1879 /* is vnode_t a fifo ? */
1880 int
1881 vnode_isfifo(vnode_t vp)
1882 {
1883 return (vp->v_type == VFIFO)? 1 : 0;
1884 }
1885
1886 /* is vnode_t a block device? */
1887 int
1888 vnode_isblk(vnode_t vp)
1889 {
1890 return (vp->v_type == VBLK)? 1 : 0;
1891 }
1892
1893 int
1894 vnode_isspec(vnode_t vp)
1895 {
1896 return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0;
1897 }
1898
1899 /* is vnode_t a char device? */
1900 int
1901 vnode_ischr(vnode_t vp)
1902 {
1903 return (vp->v_type == VCHR)? 1 : 0;
1904 }
1905
1906 /* is vnode_t a socket? */
1907 int
1908 vnode_issock(vnode_t vp)
1909 {
1910 return (vp->v_type == VSOCK)? 1 : 0;
1911 }
1912
1913 /* is vnode_t a device with multiple active vnodes referring to it? */
1914 int
1915 vnode_isaliased(vnode_t vp)
1916 {
1917 enum vtype vt = vp->v_type;
1918 if (!((vt == VCHR) || (vt == VBLK))) {
1919 return 0;
1920 } else {
1921 return vp->v_specflags & SI_ALIASED;
1922 }
1923 }
1924
1925 /* is vnode_t a named stream? */
1926 int
1927 vnode_isnamedstream(
1928 #if NAMEDSTREAMS
1929 vnode_t vp
1930 #else
1931 __unused vnode_t vp
1932 #endif
1933 )
1934 {
1935 #if NAMEDSTREAMS
1936 return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0;
1937 #else
1938 return 0;
1939 #endif
1940 }
1941
1942 int
1943 vnode_isshadow(
1944 #if NAMEDSTREAMS
1945 vnode_t vp
1946 #else
1947 __unused vnode_t vp
1948 #endif
1949 )
1950 {
1951 #if NAMEDSTREAMS
1952 return (vp->v_flag & VISSHADOW) ? 1 : 0;
1953 #else
1954 return 0;
1955 #endif
1956 }
1957
1958 /* does vnode have associated named stream vnodes ? */
1959 int
1960 vnode_hasnamedstreams(
1961 #if NAMEDSTREAMS
1962 vnode_t vp
1963 #else
1964 __unused vnode_t vp
1965 #endif
1966 )
1967 {
1968 #if NAMEDSTREAMS
1969 return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0;
1970 #else
1971 return 0;
1972 #endif
1973 }
1974 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1975 void
1976 vnode_setnocache(vnode_t vp)
1977 {
1978 vnode_lock_spin(vp);
1979 vp->v_flag |= VNOCACHE_DATA;
1980 vnode_unlock(vp);
1981 }
1982
1983 void
1984 vnode_clearnocache(vnode_t vp)
1985 {
1986 vnode_lock_spin(vp);
1987 vp->v_flag &= ~VNOCACHE_DATA;
1988 vnode_unlock(vp);
1989 }
1990
1991 void
1992 vnode_set_openevt(vnode_t vp)
1993 {
1994 vnode_lock_spin(vp);
1995 vp->v_flag |= VOPENEVT;
1996 vnode_unlock(vp);
1997 }
1998
1999 void
2000 vnode_clear_openevt(vnode_t vp)
2001 {
2002 vnode_lock_spin(vp);
2003 vp->v_flag &= ~VOPENEVT;
2004 vnode_unlock(vp);
2005 }
2006
2007
2008 void
2009 vnode_setnoreadahead(vnode_t vp)
2010 {
2011 vnode_lock_spin(vp);
2012 vp->v_flag |= VRAOFF;
2013 vnode_unlock(vp);
2014 }
2015
2016 void
2017 vnode_clearnoreadahead(vnode_t vp)
2018 {
2019 vnode_lock_spin(vp);
2020 vp->v_flag &= ~VRAOFF;
2021 vnode_unlock(vp);
2022 }
2023
2024 int
2025 vnode_isfastdevicecandidate(vnode_t vp)
2026 {
2027 return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0;
2028 }
2029
2030 void
2031 vnode_setfastdevicecandidate(vnode_t vp)
2032 {
2033 vnode_lock_spin(vp);
2034 vp->v_flag |= VFASTDEVCANDIDATE;
2035 vnode_unlock(vp);
2036 }
2037
2038 void
2039 vnode_clearfastdevicecandidate(vnode_t vp)
2040 {
2041 vnode_lock_spin(vp);
2042 vp->v_flag &= ~VFASTDEVCANDIDATE;
2043 vnode_unlock(vp);
2044 }
2045
2046 int
2047 vnode_isautocandidate(vnode_t vp)
2048 {
2049 return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0;
2050 }
2051
2052 void
2053 vnode_setautocandidate(vnode_t vp)
2054 {
2055 vnode_lock_spin(vp);
2056 vp->v_flag |= VAUTOCANDIDATE;
2057 vnode_unlock(vp);
2058 }
2059
2060 void
2061 vnode_clearautocandidate(vnode_t vp)
2062 {
2063 vnode_lock_spin(vp);
2064 vp->v_flag &= ~VAUTOCANDIDATE;
2065 vnode_unlock(vp);
2066 }
2067
2068
2069
2070
2071 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
2072 void
2073 vnode_setnoflush(vnode_t vp)
2074 {
2075 vnode_lock_spin(vp);
2076 vp->v_flag |= VNOFLUSH;
2077 vnode_unlock(vp);
2078 }
2079
2080 void
2081 vnode_clearnoflush(vnode_t vp)
2082 {
2083 vnode_lock_spin(vp);
2084 vp->v_flag &= ~VNOFLUSH;
2085 vnode_unlock(vp);
2086 }
2087
2088
2089 /* is vnode_t a blkdevice and has a FS mounted on it */
2090 int
2091 vnode_ismountedon(vnode_t vp)
2092 {
2093 return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0;
2094 }
2095
2096 void
2097 vnode_setmountedon(vnode_t vp)
2098 {
2099 vnode_lock_spin(vp);
2100 vp->v_specflags |= SI_MOUNTEDON;
2101 vnode_unlock(vp);
2102 }
2103
2104 void
2105 vnode_clearmountedon(vnode_t vp)
2106 {
2107 vnode_lock_spin(vp);
2108 vp->v_specflags &= ~SI_MOUNTEDON;
2109 vnode_unlock(vp);
2110 }
2111
2112
2113 void
2114 vnode_settag(vnode_t vp, int tag)
2115 {
2116 /*
2117 * We only assign enum values to v_tag, but add an assert to make sure we
2118 * catch it in dev/debug builds if this ever change.
2119 */
2120 assert(tag >= SHRT_MIN && tag <= SHRT_MAX);
2121 vp->v_tag = (uint16_t)tag;
2122 }
2123
2124 int
2125 vnode_tag(vnode_t vp)
2126 {
2127 return vp->v_tag;
2128 }
2129
2130 vnode_t
2131 vnode_parent(vnode_t vp)
2132 {
2133 return vp->v_parent;
2134 }
2135
2136 void
2137 vnode_setparent(vnode_t vp, vnode_t dvp)
2138 {
2139 vp->v_parent = dvp;
2140 }
2141
2142 void
2143 vnode_setname(vnode_t vp, char * name)
2144 {
2145 vp->v_name = name;
2146 }
2147
2148 /* return the registered FS name when adding the FS to kernel */
2149 void
2150 vnode_vfsname(vnode_t vp, char * buf)
2151 {
2152 strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2153 }
2154
2155 /* return the FS type number */
2156 int
2157 vnode_vfstypenum(vnode_t vp)
2158 {
2159 return vp->v_mount->mnt_vtable->vfc_typenum;
2160 }
2161
2162 int
2163 vnode_vfs64bitready(vnode_t vp)
2164 {
2165 /*
2166 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2167 */
2168 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
2169 return 1;
2170 } else {
2171 return 0;
2172 }
2173 }
2174
2175
2176
2177 /* return the visible flags on associated mount point of vnode_t */
2178 uint32_t
2179 vnode_vfsvisflags(vnode_t vp)
2180 {
2181 return vp->v_mount->mnt_flag & MNT_VISFLAGMASK;
2182 }
2183
2184 /* return the command modifier flags on associated mount point of vnode_t */
2185 uint32_t
2186 vnode_vfscmdflags(vnode_t vp)
2187 {
2188 return vp->v_mount->mnt_flag & MNT_CMDFLAGS;
2189 }
2190
2191 /* return the max symlink of short links of vnode_t */
2192 uint32_t
2193 vnode_vfsmaxsymlen(vnode_t vp)
2194 {
2195 return vp->v_mount->mnt_maxsymlinklen;
2196 }
2197
2198 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2199 struct vfsstatfs *
2200 vnode_vfsstatfs(vnode_t vp)
2201 {
2202 return &vp->v_mount->mnt_vfsstat;
2203 }
2204
2205 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2206 void *
2207 vnode_vfsfsprivate(vnode_t vp)
2208 {
2209 return vp->v_mount->mnt_data;
2210 }
2211
2212 /* is vnode_t in a rdonly mounted FS */
2213 int
2214 vnode_vfsisrdonly(vnode_t vp)
2215 {
2216 return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0;
2217 }
2218
2219 int
2220 vnode_compound_rename_available(vnode_t vp)
2221 {
2222 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2223 }
2224 int
2225 vnode_compound_rmdir_available(vnode_t vp)
2226 {
2227 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2228 }
2229 int
2230 vnode_compound_mkdir_available(vnode_t vp)
2231 {
2232 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2233 }
2234 int
2235 vnode_compound_remove_available(vnode_t vp)
2236 {
2237 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2238 }
2239 int
2240 vnode_compound_open_available(vnode_t vp)
2241 {
2242 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2243 }
2244
2245 int
2246 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2247 {
2248 return (vp->v_mount->mnt_compound_ops & opid) != 0;
2249 }
2250
2251 /*
2252 * Returns vnode ref to current working directory; if a per-thread current
2253 * working directory is in effect, return that instead of the per process one.
2254 *
2255 * XXX Published, but not used.
2256 */
2257 vnode_t
2258 current_workingdir(void)
2259 {
2260 return vfs_context_cwd(vfs_context_current());
2261 }
2262
2263 /*
2264 * Get a filesec and optional acl contents from an extended attribute.
2265 * Function will attempt to retrive ACL, UUID, and GUID information using a
2266 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2267 *
2268 * Parameters: vp The vnode on which to operate.
2269 * fsecp The filesec (and ACL, if any) being
2270 * retrieved.
2271 * ctx The vnode context in which the
2272 * operation is to be attempted.
2273 *
2274 * Returns: 0 Success
2275 * !0 errno value
2276 *
2277 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2278 * host byte order, as will be the ACL contents, if any.
2279 * Internally, we will cannonize these values from network (PPC)
2280 * byte order after we retrieve them so that the on-disk contents
2281 * of the extended attribute are identical for both PPC and Intel
2282 * (if we were not being required to provide this service via
2283 * fallback, this would be the job of the filesystem
2284 * 'VNOP_GETATTR' call).
2285 *
2286 * We use ntohl() because it has a transitive property on Intel
2287 * machines and no effect on PPC mancines. This guarantees us
2288 *
2289 * XXX: Deleting rather than ignoreing a corrupt security structure is
2290 * probably the only way to reset it without assistance from an
2291 * file system integrity checking tool. Right now we ignore it.
2292 *
2293 * XXX: We should enummerate the possible errno values here, and where
2294 * in the code they originated.
2295 */
2296 static int
2297 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2298 {
2299 kauth_filesec_t fsec;
2300 uio_t fsec_uio;
2301 size_t fsec_size;
2302 size_t xsize, rsize;
2303 int error;
2304 uint32_t host_fsec_magic;
2305 uint32_t host_acl_entrycount;
2306
2307 fsec = NULL;
2308 fsec_uio = NULL;
2309
2310 /* find out how big the EA is */
2311 error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2312 if (error != 0) {
2313 /* no EA, no filesec */
2314 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2315 error = 0;
2316 }
2317 /* either way, we are done */
2318 goto out;
2319 }
2320
2321 /*
2322 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2323 * ACE entrly ACL, and if it's larger than that, it must have the right
2324 * number of bytes such that it contains an atomic number of ACEs,
2325 * rather than partial entries. Otherwise, we ignore it.
2326 */
2327 if (!KAUTH_FILESEC_VALID(xsize)) {
2328 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2329 error = 0;
2330 goto out;
2331 }
2332
2333 /* how many entries would fit? */
2334 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2335 if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2336 KAUTH_DEBUG(" ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2337 error = 0;
2338 goto out;
2339 }
2340
2341 /* get buffer and uio */
2342 if (((fsec = kauth_filesec_alloc((int)fsec_size)) == NULL) ||
2343 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2344 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2345 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2346 error = ENOMEM;
2347 goto out;
2348 }
2349
2350 /* read security attribute */
2351 rsize = xsize;
2352 if ((error = vn_getxattr(vp,
2353 KAUTH_FILESEC_XATTR,
2354 fsec_uio,
2355 &rsize,
2356 XATTR_NOSECURITY,
2357 ctx)) != 0) {
2358 /* no attribute - no security data */
2359 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2360 error = 0;
2361 }
2362 /* either way, we are done */
2363 goto out;
2364 }
2365
2366 /*
2367 * Validate security structure; the validation must take place in host
2368 * byte order. If it's corrupt, we will just ignore it.
2369 */
2370
2371 /* Validate the size before trying to convert it */
2372 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2373 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2374 goto out;
2375 }
2376
2377 /* Validate the magic number before trying to convert it */
2378 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2379 if (fsec->fsec_magic != host_fsec_magic) {
2380 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2381 goto out;
2382 }
2383
2384 /* Validate the entry count before trying to convert it. */
2385 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2386 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2387 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2388 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2389 goto out;
2390 }
2391 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2392 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2393 goto out;
2394 }
2395 }
2396
2397 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2398
2399 *fsecp = fsec;
2400 fsec = NULL;
2401 error = 0;
2402 out:
2403 if (fsec != NULL) {
2404 kauth_filesec_free(fsec);
2405 }
2406 if (fsec_uio != NULL) {
2407 uio_free(fsec_uio);
2408 }
2409 if (error) {
2410 *fsecp = NULL;
2411 }
2412 return error;
2413 }
2414
2415 /*
2416 * Set a filesec and optional acl contents into an extended attribute.
2417 * function will attempt to store ACL, UUID, and GUID information using a
2418 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2419 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2420 * original caller supplied an acl.
2421 *
2422 * Parameters: vp The vnode on which to operate.
2423 * fsec The filesec being set.
2424 * acl The acl to be associated with 'fsec'.
2425 * ctx The vnode context in which the
2426 * operation is to be attempted.
2427 *
2428 * Returns: 0 Success
2429 * !0 errno value
2430 *
2431 * Notes: Both the fsec and the acl are always valid.
2432 *
2433 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2434 * as are the acl contents, if they are used. Internally, we will
2435 * cannonize these values into network (PPC) byte order before we
2436 * attempt to write them so that the on-disk contents of the
2437 * extended attribute are identical for both PPC and Intel (if we
2438 * were not being required to provide this service via fallback,
2439 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2440 * We reverse this process on the way out, so we leave with the
2441 * same byte order we started with.
2442 *
2443 * XXX: We should enummerate the possible errno values here, and where
2444 * in the code they originated.
2445 */
2446 static int
2447 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2448 {
2449 uio_t fsec_uio;
2450 int error;
2451 uint32_t saved_acl_copysize;
2452
2453 fsec_uio = NULL;
2454
2455 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2456 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2457 error = ENOMEM;
2458 goto out;
2459 }
2460 /*
2461 * Save the pre-converted ACL copysize, because it gets swapped too
2462 * if we are running with the wrong endianness.
2463 */
2464 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2465
2466 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2467
2468 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2469 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2470 error = vn_setxattr(vp,
2471 KAUTH_FILESEC_XATTR,
2472 fsec_uio,
2473 XATTR_NOSECURITY, /* we have auth'ed already */
2474 ctx);
2475 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2476
2477 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2478
2479 out:
2480 if (fsec_uio != NULL) {
2481 uio_free(fsec_uio);
2482 }
2483 return error;
2484 }
2485
2486 /*
2487 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2488 */
2489 void
2490 vnode_attr_handle_mnt_ignore_ownership(struct vnode_attr *vap, mount_t mp, vfs_context_t ctx)
2491 {
2492 uid_t nuid;
2493 gid_t ngid;
2494
2495 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2496 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2497 nuid = vap->va_uid;
2498 } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2499 nuid = mp->mnt_fsowner;
2500 if (nuid == KAUTH_UID_NONE) {
2501 nuid = 99;
2502 }
2503 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2504 nuid = vap->va_uid;
2505 } else {
2506 /* this will always be something sensible */
2507 nuid = mp->mnt_fsowner;
2508 }
2509 if ((nuid == 99) && !vfs_context_issuser(ctx)) {
2510 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2511 }
2512 VATTR_RETURN(vap, va_uid, nuid);
2513 }
2514 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2515 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2516 ngid = vap->va_gid;
2517 } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2518 ngid = mp->mnt_fsgroup;
2519 if (ngid == KAUTH_GID_NONE) {
2520 ngid = 99;
2521 }
2522 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2523 ngid = vap->va_gid;
2524 } else {
2525 /* this will always be something sensible */
2526 ngid = mp->mnt_fsgroup;
2527 }
2528 if ((ngid == 99) && !vfs_context_issuser(ctx)) {
2529 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2530 }
2531 VATTR_RETURN(vap, va_gid, ngid);
2532 }
2533 }
2534
2535 /*
2536 * Returns: 0 Success
2537 * ENOMEM Not enough space [only if has filesec]
2538 * EINVAL Requested unknown attributes
2539 * VNOP_GETATTR: ???
2540 * vnode_get_filesec: ???
2541 * kauth_cred_guid2uid: ???
2542 * kauth_cred_guid2gid: ???
2543 * vfs_update_vfsstat: ???
2544 */
2545 int
2546 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2547 {
2548 kauth_filesec_t fsec;
2549 kauth_acl_t facl;
2550 int error;
2551
2552 /*
2553 * Reject attempts to fetch unknown attributes.
2554 */
2555 if (vap->va_active & ~VNODE_ATTR_ALL) {
2556 return EINVAL;
2557 }
2558
2559 /* don't ask for extended security data if the filesystem doesn't support it */
2560 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2561 VATTR_CLEAR_ACTIVE(vap, va_acl);
2562 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2563 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2564 }
2565
2566 /*
2567 * If the caller wants size values we might have to synthesise, give the
2568 * filesystem the opportunity to supply better intermediate results.
2569 */
2570 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2571 VATTR_IS_ACTIVE(vap, va_total_size) ||
2572 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2573 VATTR_SET_ACTIVE(vap, va_data_size);
2574 VATTR_SET_ACTIVE(vap, va_data_alloc);
2575 VATTR_SET_ACTIVE(vap, va_total_size);
2576 VATTR_SET_ACTIVE(vap, va_total_alloc);
2577 }
2578
2579 vap->va_vaflags &= ~VA_USEFSID;
2580
2581 error = VNOP_GETATTR(vp, vap, ctx);
2582 if (error) {
2583 KAUTH_DEBUG("ERROR - returning %d", error);
2584 goto out;
2585 }
2586
2587 /*
2588 * If extended security data was requested but not returned, try the fallback
2589 * path.
2590 */
2591 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2592 fsec = NULL;
2593
2594 if (XATTR_VNODE_SUPPORTED(vp)) {
2595 /* try to get the filesec */
2596 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2597 goto out;
2598 }
2599 }
2600 /* if no filesec, no attributes */
2601 if (fsec == NULL) {
2602 VATTR_RETURN(vap, va_acl, NULL);
2603 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2604 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2605 } else {
2606 /* looks good, try to return what we were asked for */
2607 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2608 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2609
2610 /* only return the ACL if we were actually asked for it */
2611 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2612 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2613 VATTR_RETURN(vap, va_acl, NULL);
2614 } else {
2615 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2616 if (facl == NULL) {
2617 kauth_filesec_free(fsec);
2618 error = ENOMEM;
2619 goto out;
2620 }
2621 __nochk_bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2622 VATTR_RETURN(vap, va_acl, facl);
2623 }
2624 }
2625 kauth_filesec_free(fsec);
2626 }
2627 }
2628 /*
2629 * If someone gave us an unsolicited filesec, toss it. We promise that
2630 * we're OK with a filesystem giving us anything back, but our callers
2631 * only expect what they asked for.
2632 */
2633 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2634 if (vap->va_acl != NULL) {
2635 kauth_acl_free(vap->va_acl);
2636 }
2637 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2638 }
2639
2640 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2641 /*
2642 * Handle the case where we need a UID/GID, but only have extended
2643 * security information.
2644 */
2645 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2646 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2647 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2648 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) {
2649 VATTR_RETURN(vap, va_uid, nuid);
2650 }
2651 }
2652 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2653 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2654 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2655 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) {
2656 VATTR_RETURN(vap, va_gid, ngid);
2657 }
2658 }
2659 #endif
2660
2661 vnode_attr_handle_mnt_ignore_ownership(vap, vp->v_mount, ctx);
2662
2663 /*
2664 * Synthesise some values that can be reasonably guessed.
2665 */
2666 if (!VATTR_IS_SUPPORTED(vap, va_iosize)) {
2667 assert(vp->v_mount->mnt_vfsstat.f_iosize <= UINT32_MAX);
2668 VATTR_RETURN(vap, va_iosize, (uint32_t)vp->v_mount->mnt_vfsstat.f_iosize);
2669 }
2670
2671 if (!VATTR_IS_SUPPORTED(vap, va_flags)) {
2672 VATTR_RETURN(vap, va_flags, 0);
2673 }
2674
2675 if (!VATTR_IS_SUPPORTED(vap, va_filerev)) {
2676 VATTR_RETURN(vap, va_filerev, 0);
2677 }
2678
2679 if (!VATTR_IS_SUPPORTED(vap, va_gen)) {
2680 VATTR_RETURN(vap, va_gen, 0);
2681 }
2682
2683 /*
2684 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2685 */
2686 if (!VATTR_IS_SUPPORTED(vap, va_data_size)) {
2687 VATTR_RETURN(vap, va_data_size, 0);
2688 }
2689
2690 /* do we want any of the possibly-computed values? */
2691 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2692 VATTR_IS_ACTIVE(vap, va_total_size) ||
2693 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2694 /* make sure f_bsize is valid */
2695 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2696 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) {
2697 goto out;
2698 }
2699 }
2700
2701 /* default va_data_alloc from va_data_size */
2702 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) {
2703 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2704 }
2705
2706 /* default va_total_size from va_data_size */
2707 if (!VATTR_IS_SUPPORTED(vap, va_total_size)) {
2708 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2709 }
2710
2711 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2712 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) {
2713 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2714 }
2715 }
2716
2717 /*
2718 * If we don't have a change time, pull it from the modtime.
2719 */
2720 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) {
2721 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2722 }
2723
2724 /*
2725 * This is really only supported for the creation VNOPs, but since the field is there
2726 * we should populate it correctly.
2727 */
2728 VATTR_RETURN(vap, va_type, vp->v_type);
2729
2730 /*
2731 * The fsid can be obtained from the mountpoint directly.
2732 */
2733 if (VATTR_IS_ACTIVE(vap, va_fsid) &&
2734 (!VATTR_IS_SUPPORTED(vap, va_fsid) ||
2735 vap->va_vaflags & VA_REALFSID || !(vap->va_vaflags & VA_USEFSID))) {
2736 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2737 }
2738
2739 out:
2740 vap->va_vaflags &= ~VA_USEFSID;
2741
2742 return error;
2743 }
2744
2745 /*
2746 * Choose 32 bit or 64 bit fsid
2747 */
2748 uint64_t
2749 vnode_get_va_fsid(struct vnode_attr *vap)
2750 {
2751 if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
2752 return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32);
2753 }
2754 return vap->va_fsid;
2755 }
2756
2757 /*
2758 * Set the attributes on a vnode in a vnode context.
2759 *
2760 * Parameters: vp The vnode whose attributes to set.
2761 * vap A pointer to the attributes to set.
2762 * ctx The vnode context in which the
2763 * operation is to be attempted.
2764 *
2765 * Returns: 0 Success
2766 * !0 errno value
2767 *
2768 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2769 *
2770 * The contents of the data area pointed to by 'vap' may be
2771 * modified if the vnode is on a filesystem which has been
2772 * mounted with ingore ownership flags, or by the underlyng
2773 * VFS itself, or by the fallback code, if the underlying VFS
2774 * does not support ACL, UUID, or GUUID attributes directly.
2775 *
2776 * XXX: We should enummerate the possible errno values here, and where
2777 * in the code they originated.
2778 */
2779 int
2780 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2781 {
2782 int error;
2783 #if CONFIG_FSE
2784 uint64_t active;
2785 int is_perm_change = 0;
2786 int is_stat_change = 0;
2787 #endif
2788
2789 /*
2790 * Reject attempts to set unknown attributes.
2791 */
2792 if (vap->va_active & ~VNODE_ATTR_ALL) {
2793 return EINVAL;
2794 }
2795
2796 /*
2797 * Make sure the filesystem is mounted R/W.
2798 * If not, return an error.
2799 */
2800 if (vfs_isrdonly(vp->v_mount)) {
2801 error = EROFS;
2802 goto out;
2803 }
2804
2805 #if DEVELOPMENT || DEBUG
2806 /*
2807 * XXX VSWAP: Check for entitlements or special flag here
2808 * so we can restrict access appropriately.
2809 */
2810 #else /* DEVELOPMENT || DEBUG */
2811
2812 if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
2813 error = EPERM;
2814 goto out;
2815 }
2816 #endif /* DEVELOPMENT || DEBUG */
2817
2818 #if NAMEDSTREAMS
2819 /* For streams, va_data_size is the only setable attribute. */
2820 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2821 error = EPERM;
2822 goto out;
2823 }
2824 #endif
2825 /* Check for truncation */
2826 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2827 switch (vp->v_type) {
2828 case VREG:
2829 /* For regular files it's ok */
2830 break;
2831 case VDIR:
2832 /* Not allowed to truncate directories */
2833 error = EISDIR;
2834 goto out;
2835 default:
2836 /* For everything else we will clear the bit and let underlying FS decide on the rest */
2837 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2838 if (vap->va_active) {
2839 break;
2840 }
2841 /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
2842 return 0;
2843 }
2844 }
2845
2846 /*
2847 * If ownership is being ignored on this volume, we silently discard
2848 * ownership changes.
2849 */
2850 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2851 VATTR_CLEAR_ACTIVE(vap, va_uid);
2852 VATTR_CLEAR_ACTIVE(vap, va_gid);
2853 }
2854
2855 /*
2856 * Make sure that extended security is enabled if we're going to try
2857 * to set any.
2858 */
2859 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2860 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2861 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2862 error = ENOTSUP;
2863 goto out;
2864 }
2865
2866 /* Never allow the setting of any unsupported superuser flags. */
2867 if (VATTR_IS_ACTIVE(vap, va_flags)) {
2868 vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
2869 }
2870
2871 #if CONFIG_FSE
2872 /*
2873 * Remember all of the active attributes that we're
2874 * attempting to modify.
2875 */
2876 active = vap->va_active & ~VNODE_ATTR_RDONLY;
2877 #endif
2878
2879 error = VNOP_SETATTR(vp, vap, ctx);
2880
2881 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) {
2882 error = vnode_setattr_fallback(vp, vap, ctx);
2883 }
2884
2885 #if CONFIG_FSE
2886 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
2887 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
2888 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
2889
2890 /*
2891 * Now that we've changed them, decide whether to send an
2892 * FSevent.
2893 */
2894 if ((active & PERMISSION_BITS) & vap->va_supported) {
2895 is_perm_change = 1;
2896 } else {
2897 /*
2898 * We've already checked the permission bits, and we
2899 * also want to filter out access time / backup time
2900 * changes.
2901 */
2902 active &= ~(PERMISSION_BITS |
2903 VNODE_ATTR_BIT(va_access_time) |
2904 VNODE_ATTR_BIT(va_backup_time));
2905
2906 /* Anything left to notify about? */
2907 if (active & vap->va_supported) {
2908 is_stat_change = 1;
2909 }
2910 }
2911
2912 if (error == 0) {
2913 if (is_perm_change) {
2914 if (need_fsevent(FSE_CHOWN, vp)) {
2915 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2916 }
2917 } else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
2918 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2919 }
2920 }
2921 #undef PERMISSION_BITS
2922 #endif
2923
2924 out:
2925 return error;
2926 }
2927
2928 /*
2929 * Fallback for setting the attributes on a vnode in a vnode context. This
2930 * Function will attempt to store ACL, UUID, and GUID information utilizing
2931 * a read/modify/write operation against an EA used as a backing store for
2932 * the object.
2933 *
2934 * Parameters: vp The vnode whose attributes to set.
2935 * vap A pointer to the attributes to set.
2936 * ctx The vnode context in which the
2937 * operation is to be attempted.
2938 *
2939 * Returns: 0 Success
2940 * !0 errno value
2941 *
2942 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2943 * as are the fsec and lfsec, if they are used.
2944 *
2945 * The contents of the data area pointed to by 'vap' may be
2946 * modified to indicate that the attribute is supported for
2947 * any given requested attribute.
2948 *
2949 * XXX: We should enummerate the possible errno values here, and where
2950 * in the code they originated.
2951 */
2952 int
2953 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2954 {
2955 kauth_filesec_t fsec;
2956 kauth_acl_t facl;
2957 struct kauth_filesec lfsec;
2958 int error;
2959
2960 error = 0;
2961
2962 /*
2963 * Extended security fallback via extended attributes.
2964 *
2965 * Note that we do not free the filesec; the caller is expected to
2966 * do this.
2967 */
2968 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2969 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2970 VATTR_NOT_RETURNED(vap, va_guuid)) {
2971 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2972
2973 /*
2974 * Fail for file types that we don't permit extended security
2975 * to be set on.
2976 */
2977 if (!XATTR_VNODE_SUPPORTED(vp)) {
2978 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2979 error = EINVAL;
2980 goto out;
2981 }
2982
2983 /*
2984 * If we don't have all the extended security items, we need
2985 * to fetch the existing data to perform a read-modify-write
2986 * operation.
2987 */
2988 fsec = NULL;
2989 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2990 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2991 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2992 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2993 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2994 goto out;
2995 }
2996 }
2997 /* if we didn't get a filesec, use our local one */
2998 if (fsec == NULL) {
2999 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
3000 fsec = &lfsec;
3001 } else {
3002 KAUTH_DEBUG("SETATTR - updating existing filesec");
3003 }
3004 /* find the ACL */
3005 facl = &fsec->fsec_acl;
3006
3007 /* if we're using the local filesec, we need to initialise it */
3008 if (fsec == &lfsec) {
3009 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
3010 fsec->fsec_owner = kauth_null_guid;
3011 fsec->fsec_group = kauth_null_guid;
3012 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3013 facl->acl_flags = 0;
3014 }
3015
3016 /*
3017 * Update with the supplied attributes.
3018 */
3019 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
3020 KAUTH_DEBUG("SETATTR - updating owner UUID");
3021 fsec->fsec_owner = vap->va_uuuid;
3022 VATTR_SET_SUPPORTED(vap, va_uuuid);
3023 }
3024 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
3025 KAUTH_DEBUG("SETATTR - updating group UUID");
3026 fsec->fsec_group = vap->va_guuid;
3027 VATTR_SET_SUPPORTED(vap, va_guuid);
3028 }
3029 if (VATTR_IS_ACTIVE(vap, va_acl)) {
3030 if (vap->va_acl == NULL) {
3031 KAUTH_DEBUG("SETATTR - removing ACL");
3032 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3033 } else {
3034 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
3035 facl = vap->va_acl;
3036 }
3037 VATTR_SET_SUPPORTED(vap, va_acl);
3038 }
3039
3040 /*
3041 * If the filesec data is all invalid, we can just remove
3042 * the EA completely.
3043 */
3044 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
3045 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
3046 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
3047 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
3048 /* no attribute is ok, nothing to delete */
3049 if (error == ENOATTR) {
3050 error = 0;
3051 }
3052 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
3053 } else {
3054 /* write the EA */
3055 error = vnode_set_filesec(vp, fsec, facl, ctx);
3056 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
3057 }
3058
3059 /* if we fetched a filesec, dispose of the buffer */
3060 if (fsec != &lfsec) {
3061 kauth_filesec_free(fsec);
3062 }
3063 }
3064 out:
3065
3066 return error;
3067 }
3068
3069 /*
3070 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
3071 * event on a vnode.
3072 */
3073 int
3074 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
3075 {
3076 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
3077 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
3078 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
3079 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
3080 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
3081 uint32_t knote_events = (events & knote_mask);
3082
3083 /* Permissions are not explicitly part of the kqueue model */
3084 if (events & VNODE_EVENT_PERMS) {
3085 knote_events |= NOTE_ATTRIB;
3086 }
3087
3088 /* Directory contents information just becomes NOTE_WRITE */
3089 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
3090 knote_events |= NOTE_WRITE;
3091 }
3092
3093 if (knote_events) {
3094 lock_vnode_and_post(vp, knote_events);
3095 #if CONFIG_FSE
3096 if (vap != NULL) {
3097 create_fsevent_from_kevent(vp, events, vap);
3098 }
3099 #else
3100 (void)vap;
3101 #endif
3102 }
3103
3104 return 0;
3105 }
3106
3107
3108
3109 int
3110 vnode_isdyldsharedcache(vnode_t vp)
3111 {
3112 return (vp->v_flag & VSHARED_DYLD) ? 1 : 0;
3113 }
3114
3115
3116 /*
3117 * For a filesystem that isn't tracking its own vnode watchers:
3118 * check whether a vnode is being monitored.
3119 */
3120 int
3121 vnode_ismonitored(vnode_t vp)
3122 {
3123 return vp->v_knotes.slh_first != NULL;
3124 }
3125
3126 int
3127 vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
3128 {
3129 if (out_vpp) {
3130 *out_vpp = NULLVP;
3131 }
3132 #if NULLFS
3133 return nullfs_getbackingvnode(in_vp, out_vpp);
3134 #else
3135 #pragma unused(in_vp)
3136 return ENOENT;
3137 #endif
3138 }
3139
3140 /*
3141 * Initialize a struct vnode_attr and activate the attributes required
3142 * by the vnode_notify() call.
3143 */
3144 int
3145 vfs_get_notify_attributes(struct vnode_attr *vap)
3146 {
3147 VATTR_INIT(vap);
3148 vap->va_active = VNODE_NOTIFY_ATTRS;
3149 return 0;
3150 }
3151
3152 #if CONFIG_TRIGGERS
3153 int
3154 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
3155 {
3156 int error;
3157 mount_t mp;
3158
3159 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
3160 if (mp == NULL) {
3161 return ENOENT;
3162 }
3163
3164 error = vfs_busy(mp, LK_NOWAIT);
3165 mount_iterdrop(mp);
3166
3167 if (error != 0) {
3168 return ENOENT;
3169 }
3170
3171 mount_lock(mp);
3172 if (mp->mnt_triggercallback != NULL) {
3173 error = EBUSY;
3174 mount_unlock(mp);
3175 goto out;
3176 }
3177
3178 mp->mnt_triggercallback = vtc;
3179 mp->mnt_triggerdata = data;
3180 mount_unlock(mp);
3181
3182 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
3183
3184 out:
3185 vfs_unbusy(mp);
3186 return 0;
3187 }
3188 #endif /* CONFIG_TRIGGERS */
3189
3190 /*
3191 * Definition of vnode operations.
3192 */
3193
3194 #if 0
3195 /*
3196 *#
3197 *#% lookup dvp L ? ?
3198 *#% lookup vpp - L -
3199 */
3200 struct vnop_lookup_args {
3201 struct vnodeop_desc *a_desc;
3202 vnode_t a_dvp;
3203 vnode_t *a_vpp;
3204 struct componentname *a_cnp;
3205 vfs_context_t a_context;
3206 };
3207 #endif /* 0*/
3208
3209 /*
3210 * Returns: 0 Success
3211 * lock_fsnode:ENOENT No such file or directory [only for VFS
3212 * that is not thread safe & vnode is
3213 * currently being/has been terminated]
3214 * <vfs_lookup>:ENAMETOOLONG
3215 * <vfs_lookup>:ENOENT
3216 * <vfs_lookup>:EJUSTRETURN
3217 * <vfs_lookup>:EPERM
3218 * <vfs_lookup>:EISDIR
3219 * <vfs_lookup>:ENOTDIR
3220 * <vfs_lookup>:???
3221 *
3222 * Note: The return codes from the underlying VFS's lookup routine can't
3223 * be fully enumerated here, since third party VFS authors may not
3224 * limit their error returns to the ones documented here, even
3225 * though this may result in some programs functioning incorrectly.
3226 *
3227 * The return codes documented above are those which may currently
3228 * be returned by HFS from hfs_lookup, not including additional
3229 * error code which may be propagated from underlying routines.
3230 */
3231 errno_t
3232 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3233 {
3234 int _err;
3235 struct vnop_lookup_args a;
3236
3237 a.a_desc = &vnop_lookup_desc;
3238 a.a_dvp = dvp;
3239 a.a_vpp = vpp;
3240 a.a_cnp = cnp;
3241 a.a_context = ctx;
3242
3243 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3244 if (_err == 0 && *vpp) {
3245 DTRACE_FSINFO(lookup, vnode_t, *vpp);
3246 }
3247
3248 return _err;
3249 }
3250
3251 #if 0
3252 struct vnop_compound_open_args {
3253 struct vnodeop_desc *a_desc;
3254 vnode_t a_dvp;
3255 vnode_t *a_vpp;
3256 struct componentname *a_cnp;
3257 int32_t a_flags;
3258 int32_t a_fmode;
3259 struct vnode_attr *a_vap;
3260 vfs_context_t a_context;
3261 void *a_reserved;
3262 };
3263 #endif /* 0 */
3264
3265 int
3266 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3267 {
3268 int _err;
3269 struct vnop_compound_open_args a;
3270 int did_create = 0;
3271 int want_create;
3272 uint32_t tmp_status = 0;
3273 struct componentname *cnp = &ndp->ni_cnd;
3274
3275 want_create = (flags & O_CREAT);
3276
3277 a.a_desc = &vnop_compound_open_desc;
3278 a.a_dvp = dvp;
3279 a.a_vpp = vpp; /* Could be NULL */
3280 a.a_cnp = cnp;
3281 a.a_flags = flags;
3282 a.a_fmode = fmode;
3283 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3284 a.a_vap = vap;
3285 a.a_context = ctx;
3286 a.a_open_create_authorizer = vn_authorize_create;
3287 a.a_open_existing_authorizer = vn_authorize_open_existing;
3288 a.a_reserved = NULL;
3289
3290 if (dvp == NULLVP) {
3291 panic("No dvp?");
3292 }
3293 if (want_create && !vap) {
3294 panic("Want create, but no vap?");
3295 }
3296 if (!want_create && vap) {
3297 panic("Don't want create, but have a vap?");
3298 }
3299
3300 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3301 if (want_create) {
3302 if (_err == 0 && *vpp) {
3303 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3304 } else {
3305 DTRACE_FSINFO(compound_open, vnode_t, dvp);
3306 }
3307 } else {
3308 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3309 }
3310
3311 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3312
3313 if (did_create && !want_create) {
3314 panic("Filesystem did a create, even though none was requested?");
3315 }
3316
3317 if (did_create) {
3318 #if CONFIG_APPLEDOUBLE
3319 if (!NATIVE_XATTR(dvp)) {
3320 /*
3321 * Remove stale Apple Double file (if any).
3322 */
3323 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3324 }
3325 #endif /* CONFIG_APPLEDOUBLE */
3326 /* On create, provide kqueue notification */
3327 post_event_if_success(dvp, _err, NOTE_WRITE);
3328 }
3329
3330 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3331 #if 0 /* FSEvents... */
3332 if (*vpp && _err && _err != EKEEPLOOKING) {
3333 vnode_put(*vpp);
3334 *vpp = NULLVP;
3335 }
3336 #endif /* 0 */
3337
3338 return _err;
3339 }
3340
3341 #if 0
3342 struct vnop_create_args {
3343 struct vnodeop_desc *a_desc;
3344 vnode_t a_dvp;
3345 vnode_t *a_vpp;
3346 struct componentname *a_cnp;
3347 struct vnode_attr *a_vap;
3348 vfs_context_t a_context;
3349 };
3350 #endif /* 0*/
3351 errno_t
3352 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3353 {
3354 int _err;
3355 struct vnop_create_args a;
3356
3357 a.a_desc = &vnop_create_desc;
3358 a.a_dvp = dvp;
3359 a.a_vpp = vpp;
3360 a.a_cnp = cnp;
3361 a.a_vap = vap;
3362 a.a_context = ctx;
3363
3364 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3365 if (_err == 0 && *vpp) {
3366 DTRACE_FSINFO(create, vnode_t, *vpp);
3367 }
3368
3369 #if CONFIG_APPLEDOUBLE
3370 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3371 /*
3372 * Remove stale Apple Double file (if any).
3373 */
3374 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3375 }
3376 #endif /* CONFIG_APPLEDOUBLE */
3377
3378 post_event_if_success(dvp, _err, NOTE_WRITE);
3379
3380 return _err;
3381 }
3382
3383 #if 0
3384 /*
3385 *#
3386 *#% whiteout dvp L L L
3387 *#% whiteout cnp - - -
3388 *#% whiteout flag - - -
3389 *#
3390 */
3391 struct vnop_whiteout_args {
3392 struct vnodeop_desc *a_desc;
3393 vnode_t a_dvp;
3394 struct componentname *a_cnp;
3395 int a_flags;
3396 vfs_context_t a_context;
3397 };
3398 #endif /* 0*/
3399 errno_t
3400 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3401 __unused int flags, __unused vfs_context_t ctx)
3402 {
3403 return ENOTSUP; // XXX OBSOLETE
3404 }
3405
3406 #if 0
3407 /*
3408 *#
3409 *#% mknod dvp L U U
3410 *#% mknod vpp - X -
3411 *#
3412 */
3413 struct vnop_mknod_args {
3414 struct vnodeop_desc *a_desc;
3415 vnode_t a_dvp;
3416 vnode_t *a_vpp;
3417 struct componentname *a_cnp;
3418 struct vnode_attr *a_vap;
3419 vfs_context_t a_context;
3420 };
3421 #endif /* 0*/
3422 errno_t
3423 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3424 {
3425 int _err;
3426 struct vnop_mknod_args a;
3427
3428 a.a_desc = &vnop_mknod_desc;
3429 a.a_dvp = dvp;
3430 a.a_vpp = vpp;
3431 a.a_cnp = cnp;
3432 a.a_vap = vap;
3433 a.a_context = ctx;
3434
3435 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3436 if (_err == 0 && *vpp) {
3437 DTRACE_FSINFO(mknod, vnode_t, *vpp);
3438 }
3439
3440 post_event_if_success(dvp, _err, NOTE_WRITE);
3441
3442 return _err;
3443 }
3444
3445 #if 0
3446 /*
3447 *#
3448 *#% open vp L L L
3449 *#
3450 */
3451 struct vnop_open_args {
3452 struct vnodeop_desc *a_desc;
3453 vnode_t a_vp;
3454 int a_mode;
3455 vfs_context_t a_context;
3456 };
3457 #endif /* 0*/
3458 errno_t
3459 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3460 {
3461 int _err;
3462 struct vnop_open_args a;
3463
3464 if (ctx == NULL) {
3465 ctx = vfs_context_current();
3466 }
3467 a.a_desc = &vnop_open_desc;
3468 a.a_vp = vp;
3469 a.a_mode = mode;
3470 a.a_context = ctx;
3471
3472 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3473 DTRACE_FSINFO(open, vnode_t, vp);
3474
3475 return _err;
3476 }
3477
3478 #if 0
3479 /*
3480 *#
3481 *#% close vp U U U
3482 *#
3483 */
3484 struct vnop_close_args {
3485 struct vnodeop_desc *a_desc;
3486 vnode_t a_vp;
3487 int a_fflag;
3488 vfs_context_t a_context;
3489 };
3490 #endif /* 0*/
3491 errno_t
3492 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3493 {
3494 int _err;
3495 struct vnop_close_args a;
3496
3497 if (ctx == NULL) {
3498 ctx = vfs_context_current();
3499 }
3500 a.a_desc = &vnop_close_desc;
3501 a.a_vp = vp;
3502 a.a_fflag = fflag;
3503 a.a_context = ctx;
3504
3505 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3506 DTRACE_FSINFO(close, vnode_t, vp);
3507
3508 return _err;
3509 }
3510
3511 #if 0
3512 /*
3513 *#
3514 *#% access vp L L L
3515 *#
3516 */
3517 struct vnop_access_args {
3518 struct vnodeop_desc *a_desc;
3519 vnode_t a_vp;
3520 int a_action;
3521 vfs_context_t a_context;
3522 };
3523 #endif /* 0*/
3524 errno_t
3525 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3526 {
3527 int _err;
3528 struct vnop_access_args a;
3529
3530 if (ctx == NULL) {
3531 ctx = vfs_context_current();
3532 }
3533 a.a_desc = &vnop_access_desc;
3534 a.a_vp = vp;
3535 a.a_action = action;
3536 a.a_context = ctx;
3537
3538 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3539 DTRACE_FSINFO(access, vnode_t, vp);
3540
3541 return _err;
3542 }
3543
3544 #if 0
3545 /*
3546 *#
3547 *#% getattr vp = = =
3548 *#
3549 */
3550 struct vnop_getattr_args {
3551 struct vnodeop_desc *a_desc;
3552 vnode_t a_vp;
3553 struct vnode_attr *a_vap;
3554 vfs_context_t a_context;
3555 };
3556 #endif /* 0*/
3557 errno_t
3558 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3559 {
3560 int _err;
3561 struct vnop_getattr_args a;
3562
3563 a.a_desc = &vnop_getattr_desc;
3564 a.a_vp = vp;
3565 a.a_vap = vap;
3566 a.a_context = ctx;
3567
3568 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3569 DTRACE_FSINFO(getattr, vnode_t, vp);
3570
3571 return _err;
3572 }
3573
3574 #if 0
3575 /*
3576 *#
3577 *#% setattr vp L L L
3578 *#
3579 */
3580 struct vnop_setattr_args {
3581 struct vnodeop_desc *a_desc;
3582 vnode_t a_vp;
3583 struct vnode_attr *a_vap;
3584 vfs_context_t a_context;
3585 };
3586 #endif /* 0*/
3587 errno_t
3588 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3589 {
3590 int _err;
3591 struct vnop_setattr_args a;
3592
3593 a.a_desc = &vnop_setattr_desc;
3594 a.a_vp = vp;
3595 a.a_vap = vap;
3596 a.a_context = ctx;
3597
3598 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3599 DTRACE_FSINFO(setattr, vnode_t, vp);
3600
3601 #if CONFIG_APPLEDOUBLE
3602 /*
3603 * Shadow uid/gid/mod change to extended attribute file.
3604 */
3605 if (_err == 0 && !NATIVE_XATTR(vp)) {
3606 struct vnode_attr va;
3607 int change = 0;
3608
3609 VATTR_INIT(&va);
3610 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3611 VATTR_SET(&va, va_uid, vap->va_uid);
3612 change = 1;
3613 }
3614 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3615 VATTR_SET(&va, va_gid, vap->va_gid);
3616 change = 1;
3617 }
3618 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3619 VATTR_SET(&va, va_mode, vap->va_mode);
3620 change = 1;
3621 }
3622 if (change) {
3623 vnode_t dvp;
3624 const char *vname;
3625
3626 dvp = vnode_getparent(vp);
3627 vname = vnode_getname(vp);
3628
3629 xattrfile_setattr(dvp, vname, &va, ctx);
3630 if (dvp != NULLVP) {
3631 vnode_put(dvp);
3632 }
3633 if (vname != NULL) {
3634 vnode_putname(vname);
3635 }
3636 }
3637 }
3638 #endif /* CONFIG_APPLEDOUBLE */
3639
3640 /*
3641 * If we have changed any of the things about the file that are likely
3642 * to result in changes to authorization results, blow the vnode auth
3643 * cache
3644 */
3645 if (_err == 0 && (
3646 VATTR_IS_SUPPORTED(vap, va_mode) ||
3647 VATTR_IS_SUPPORTED(vap, va_uid) ||
3648 VATTR_IS_SUPPORTED(vap, va_gid) ||
3649 VATTR_IS_SUPPORTED(vap, va_flags) ||
3650 VATTR_IS_SUPPORTED(vap, va_acl) ||
3651 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3652 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3653 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3654
3655 #if NAMEDSTREAMS
3656 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3657 vnode_t svp;
3658 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3659 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3660 vnode_put(svp);
3661 }
3662 }
3663 #endif /* NAMEDSTREAMS */
3664 }
3665
3666
3667 post_event_if_success(vp, _err, NOTE_ATTRIB);
3668
3669 return _err;
3670 }
3671
3672
3673 #if 0
3674 /*
3675 *#
3676 *#% read vp L L L
3677 *#
3678 */
3679 struct vnop_read_args {
3680 struct vnodeop_desc *a_desc;
3681 vnode_t a_vp;
3682 struct uio *a_uio;
3683 int a_ioflag;
3684 vfs_context_t a_context;
3685 };
3686 #endif /* 0*/
3687 errno_t
3688 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3689 {
3690 int _err;
3691 struct vnop_read_args a;
3692 #if CONFIG_DTRACE
3693 user_ssize_t resid = uio_resid(uio);
3694 #endif
3695
3696 if (ctx == NULL) {
3697 return EINVAL;
3698 }
3699
3700 a.a_desc = &vnop_read_desc;
3701 a.a_vp = vp;
3702 a.a_uio = uio;
3703 a.a_ioflag = ioflag;
3704 a.a_context = ctx;
3705
3706 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3707 DTRACE_FSINFO_IO(read,
3708 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3709
3710 return _err;
3711 }
3712
3713
3714 #if 0
3715 /*
3716 *#
3717 *#% write vp L L L
3718 *#
3719 */
3720 struct vnop_write_args {
3721 struct vnodeop_desc *a_desc;
3722 vnode_t a_vp;
3723 struct uio *a_uio;
3724 int a_ioflag;
3725 vfs_context_t a_context;
3726 };
3727 #endif /* 0*/
3728 errno_t
3729 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3730 {
3731 struct vnop_write_args a;
3732 int _err;
3733 #if CONFIG_DTRACE
3734 user_ssize_t resid = uio_resid(uio);
3735 #endif
3736
3737 if (ctx == NULL) {
3738 return EINVAL;
3739 }
3740
3741 a.a_desc = &vnop_write_desc;
3742 a.a_vp = vp;
3743 a.a_uio = uio;
3744 a.a_ioflag = ioflag;
3745 a.a_context = ctx;
3746
3747 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3748 DTRACE_FSINFO_IO(write,
3749 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3750
3751 post_event_if_success(vp, _err, NOTE_WRITE);
3752
3753 return _err;
3754 }
3755
3756
3757 #if 0
3758 /*
3759 *#
3760 *#% ioctl vp U U U
3761 *#
3762 */
3763 struct vnop_ioctl_args {
3764 struct vnodeop_desc *a_desc;
3765 vnode_t a_vp;
3766 u_long a_command;
3767 caddr_t a_data;
3768 int a_fflag;
3769 vfs_context_t a_context;
3770 };
3771 #endif /* 0*/
3772 errno_t
3773 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3774 {
3775 int _err;
3776 struct vnop_ioctl_args a;
3777
3778 if (ctx == NULL) {
3779 ctx = vfs_context_current();
3780 }
3781
3782 /*
3783 * This check should probably have been put in the TTY code instead...
3784 *
3785 * We have to be careful about what we assume during startup and shutdown.
3786 * We have to be able to use the root filesystem's device vnode even when
3787 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3788 * structure. If there is no data pointer, it doesn't matter whether
3789 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
3790 * which passes NULL for its data pointer can therefore be used during
3791 * mount or unmount of the root filesystem.
3792 *
3793 * Depending on what root filesystems need to do during mount/unmount, we
3794 * may need to loosen this check again in the future.
3795 */
3796 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3797 if (data != NULL && !vnode_vfs64bitready(vp)) {
3798 return ENOTTY;
3799 }
3800 }
3801
3802 if ((command == DKIOCISSOLIDSTATE) && (vp == rootvp) && rootvp_is_ssd && data) {
3803 *data = 1;
3804 return 0;
3805 }
3806
3807 a.a_desc = &vnop_ioctl_desc;
3808 a.a_vp = vp;
3809 a.a_command = command;
3810 a.a_data = data;
3811 a.a_fflag = fflag;
3812 a.a_context = ctx;
3813
3814 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3815 DTRACE_FSINFO(ioctl, vnode_t, vp);
3816
3817 return _err;
3818 }
3819
3820
3821 #if 0
3822 /*
3823 *#
3824 *#% select vp U U U
3825 *#
3826 */
3827 struct vnop_select_args {
3828 struct vnodeop_desc *a_desc;
3829 vnode_t a_vp;
3830 int a_which;
3831 int a_fflags;
3832 void *a_wql;
3833 vfs_context_t a_context;
3834 };
3835 #endif /* 0*/
3836 errno_t
3837 VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx)
3838 {
3839 int _err;
3840 struct vnop_select_args a;
3841
3842 if (ctx == NULL) {
3843 ctx = vfs_context_current();
3844 }
3845 a.a_desc = &vnop_select_desc;
3846 a.a_vp = vp;
3847 a.a_which = which;
3848 a.a_fflags = fflags;
3849 a.a_context = ctx;
3850 a.a_wql = wql;
3851
3852 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3853 DTRACE_FSINFO(select, vnode_t, vp);
3854
3855 return _err;
3856 }
3857
3858
3859 #if 0
3860 /*
3861 *#
3862 *#% exchange fvp L L L
3863 *#% exchange tvp L L L
3864 *#
3865 */
3866 struct vnop_exchange_args {
3867 struct vnodeop_desc *a_desc;
3868 vnode_t a_fvp;
3869 vnode_t a_tvp;
3870 int a_options;
3871 vfs_context_t a_context;
3872 };
3873 #endif /* 0*/
3874 errno_t
3875 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3876 {
3877 int _err;
3878 struct vnop_exchange_args a;
3879
3880 a.a_desc = &vnop_exchange_desc;
3881 a.a_fvp = fvp;
3882 a.a_tvp = tvp;
3883 a.a_options = options;
3884 a.a_context = ctx;
3885
3886 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3887 DTRACE_FSINFO(exchange, vnode_t, fvp);
3888
3889 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3890 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3891 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3892
3893 return _err;
3894 }
3895
3896
3897 #if 0
3898 /*
3899 *#
3900 *#% revoke vp U U U
3901 *#
3902 */
3903 struct vnop_revoke_args {
3904 struct vnodeop_desc *a_desc;
3905 vnode_t a_vp;
3906 int a_flags;
3907 vfs_context_t a_context;
3908 };
3909 #endif /* 0*/
3910 errno_t
3911 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3912 {
3913 struct vnop_revoke_args a;
3914 int _err;
3915
3916 a.a_desc = &vnop_revoke_desc;
3917 a.a_vp = vp;
3918 a.a_flags = flags;
3919 a.a_context = ctx;
3920
3921 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3922 DTRACE_FSINFO(revoke, vnode_t, vp);
3923
3924 return _err;
3925 }
3926
3927
3928 #if 0
3929 /*
3930 *#
3931 *# mmap_check - vp U U U
3932 *#
3933 */
3934 struct vnop_mmap_check_args {
3935 struct vnodeop_desc *a_desc;
3936 vnode_t a_vp;
3937 int a_flags;
3938 vfs_context_t a_context;
3939 };
3940 #endif /* 0 */
3941 errno_t
3942 VNOP_MMAP_CHECK(vnode_t vp, int flags, vfs_context_t ctx)
3943 {
3944 int _err;
3945 struct vnop_mmap_check_args a;
3946
3947 a.a_desc = &vnop_mmap_check_desc;
3948 a.a_vp = vp;
3949 a.a_flags = flags;
3950 a.a_context = ctx;
3951
3952 _err = (*vp->v_op[vnop_mmap_check_desc.vdesc_offset])(&a);
3953 if (_err == ENOTSUP) {
3954 _err = 0;
3955 }
3956 DTRACE_FSINFO(mmap_check, vnode_t, vp);
3957
3958 return _err;
3959 }
3960
3961 #if 0
3962 /*
3963 *#
3964 *# mmap - vp U U U
3965 *#
3966 */
3967 struct vnop_mmap_args {
3968 struct vnodeop_desc *a_desc;
3969 vnode_t a_vp;
3970 int a_fflags;
3971 vfs_context_t a_context;
3972 };
3973 #endif /* 0*/
3974 errno_t
3975 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3976 {
3977 int _err;
3978 struct vnop_mmap_args a;
3979
3980 a.a_desc = &vnop_mmap_desc;
3981 a.a_vp = vp;
3982 a.a_fflags = fflags;
3983 a.a_context = ctx;
3984
3985 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3986 DTRACE_FSINFO(mmap, vnode_t, vp);
3987
3988 return _err;
3989 }
3990
3991
3992 #if 0
3993 /*
3994 *#
3995 *# mnomap - vp U U U
3996 *#
3997 */
3998 struct vnop_mnomap_args {
3999 struct vnodeop_desc *a_desc;
4000 vnode_t a_vp;
4001 vfs_context_t a_context;
4002 };
4003 #endif /* 0*/
4004 errno_t
4005 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
4006 {
4007 int _err;
4008 struct vnop_mnomap_args a;
4009
4010 a.a_desc = &vnop_mnomap_desc;
4011 a.a_vp = vp;
4012 a.a_context = ctx;
4013
4014 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
4015 DTRACE_FSINFO(mnomap, vnode_t, vp);
4016
4017 return _err;
4018 }
4019
4020
4021 #if 0
4022 /*
4023 *#
4024 *#% fsync vp L L L
4025 *#
4026 */
4027 struct vnop_fsync_args {
4028 struct vnodeop_desc *a_desc;
4029 vnode_t a_vp;
4030 int a_waitfor;
4031 vfs_context_t a_context;
4032 };
4033 #endif /* 0*/
4034 errno_t
4035 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
4036 {
4037 struct vnop_fsync_args a;
4038 int _err;
4039
4040 a.a_desc = &vnop_fsync_desc;
4041 a.a_vp = vp;
4042 a.a_waitfor = waitfor;
4043 a.a_context = ctx;
4044
4045 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
4046 DTRACE_FSINFO(fsync, vnode_t, vp);
4047
4048 return _err;
4049 }
4050
4051
4052 #if 0
4053 /*
4054 *#
4055 *#% remove dvp L U U
4056 *#% remove vp L U U
4057 *#
4058 */
4059 struct vnop_remove_args {
4060 struct vnodeop_desc *a_desc;
4061 vnode_t a_dvp;
4062 vnode_t a_vp;
4063 struct componentname *a_cnp;
4064 int a_flags;
4065 vfs_context_t a_context;
4066 };
4067 #endif /* 0*/
4068 errno_t
4069 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
4070 {
4071 int _err;
4072 struct vnop_remove_args a;
4073
4074 a.a_desc = &vnop_remove_desc;
4075 a.a_dvp = dvp;
4076 a.a_vp = vp;
4077 a.a_cnp = cnp;
4078 a.a_flags = flags;
4079 a.a_context = ctx;
4080
4081 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4082 DTRACE_FSINFO(remove, vnode_t, vp);
4083
4084 if (_err == 0) {
4085 vnode_setneedinactive(vp);
4086 #if CONFIG_APPLEDOUBLE
4087 if (!(NATIVE_XATTR(dvp))) {
4088 /*
4089 * Remove any associated extended attribute file (._ AppleDouble file).
4090 */
4091 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4092 }
4093 #endif /* CONFIG_APPLEDOUBLE */
4094 }
4095
4096 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4097 post_event_if_success(dvp, _err, NOTE_WRITE);
4098
4099 return _err;
4100 }
4101
4102 int
4103 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4104 {
4105 int _err;
4106 struct vnop_compound_remove_args a;
4107 int no_vp = (*vpp == NULLVP);
4108
4109 a.a_desc = &vnop_compound_remove_desc;
4110 a.a_dvp = dvp;
4111 a.a_vpp = vpp;
4112 a.a_cnp = &ndp->ni_cnd;
4113 a.a_flags = flags;
4114 a.a_vap = vap;
4115 a.a_context = ctx;
4116 a.a_remove_authorizer = vn_authorize_unlink;
4117
4118 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4119 if (_err == 0 && *vpp) {
4120 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
4121 } else {
4122 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
4123 }
4124 if (_err == 0) {
4125 vnode_setneedinactive(*vpp);
4126 #if CONFIG_APPLEDOUBLE
4127 if (!(NATIVE_XATTR(dvp))) {
4128 /*
4129 * Remove any associated extended attribute file (._ AppleDouble file).
4130 */
4131 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4132 }
4133 #endif /* CONFIG_APPLEDOUBLE */
4134 }
4135
4136 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4137 post_event_if_success(dvp, _err, NOTE_WRITE);
4138
4139 if (no_vp) {
4140 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4141 if (*vpp && _err && _err != EKEEPLOOKING) {
4142 vnode_put(*vpp);
4143 *vpp = NULLVP;
4144 }
4145 }
4146
4147 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4148
4149 return _err;
4150 }
4151
4152 #if 0
4153 /*
4154 *#
4155 *#% link vp U U U
4156 *#% link tdvp L U U
4157 *#
4158 */
4159 struct vnop_link_args {
4160 struct vnodeop_desc *a_desc;
4161 vnode_t a_vp;
4162 vnode_t a_tdvp;
4163 struct componentname *a_cnp;
4164 vfs_context_t a_context;
4165 };
4166 #endif /* 0*/
4167 errno_t
4168 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4169 {
4170 int _err;
4171 struct vnop_link_args a;
4172
4173 #if CONFIG_APPLEDOUBLE
4174 /*
4175 * For file systems with non-native extended attributes,
4176 * disallow linking to an existing "._" Apple Double file.
4177 */
4178 if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4179 const char *vname;
4180
4181 vname = vnode_getname(vp);
4182 if (vname != NULL) {
4183 _err = 0;
4184 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4185 _err = EPERM;
4186 }
4187 vnode_putname(vname);
4188 if (_err) {
4189 return _err;
4190 }
4191 }
4192 }
4193 #endif /* CONFIG_APPLEDOUBLE */
4194
4195 a.a_desc = &vnop_link_desc;
4196 a.a_vp = vp;
4197 a.a_tdvp = tdvp;
4198 a.a_cnp = cnp;
4199 a.a_context = ctx;
4200
4201 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4202 DTRACE_FSINFO(link, vnode_t, vp);
4203
4204 post_event_if_success(vp, _err, NOTE_LINK);
4205 post_event_if_success(tdvp, _err, NOTE_WRITE);
4206
4207 return _err;
4208 }
4209
4210 errno_t
4211 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4212 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4213 vfs_rename_flags_t flags, vfs_context_t ctx)
4214 {
4215 int _err;
4216 struct nameidata *fromnd = NULL;
4217 struct nameidata *tond = NULL;
4218 #if CONFIG_APPLEDOUBLE
4219 vnode_t src_attr_vp = NULLVP;
4220 vnode_t dst_attr_vp = NULLVP;
4221 char smallname1[48];
4222 char smallname2[48];
4223 char *xfromname = NULL;
4224 char *xtoname = NULL;
4225 #endif /* CONFIG_APPLEDOUBLE */
4226 int batched;
4227 uint32_t tdfflags; // Target directory file flags
4228
4229 batched = vnode_compound_rename_available(fdvp);
4230
4231 if (!batched) {
4232 if (*fvpp == NULLVP) {
4233 panic("Not batched, and no fvp?");
4234 }
4235 }
4236
4237 #if CONFIG_APPLEDOUBLE
4238 /*
4239 * We need to preflight any potential AppleDouble file for the source file
4240 * before doing the rename operation, since we could potentially be doing
4241 * this operation on a network filesystem, and would end up duplicating
4242 * the work. Also, save the source and destination names. Skip it if the
4243 * source has a "._" prefix.
4244 */
4245
4246 size_t xfromname_len = 0;
4247 size_t xtoname_len = 0;
4248 if (!NATIVE_XATTR(fdvp) &&
4249 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4250 int error;
4251
4252 /* Get source attribute file name. */
4253 xfromname_len = fcnp->cn_namelen + 3;
4254 if (xfromname_len > sizeof(smallname1)) {
4255 xfromname = kheap_alloc(KHEAP_TEMP, xfromname_len, Z_WAITOK);
4256 } else {
4257 xfromname = &smallname1[0];
4258 }
4259 strlcpy(xfromname, "._", xfromname_len);
4260 strlcat(xfromname, fcnp->cn_nameptr, xfromname_len);
4261
4262 /* Get destination attribute file name. */
4263 xtoname_len = tcnp->cn_namelen + 3;
4264 if (xtoname_len > sizeof(smallname2)) {
4265 xtoname = kheap_alloc(KHEAP_TEMP, xtoname_len, Z_WAITOK);
4266 } else {
4267 xtoname = &smallname2[0];
4268 }
4269 strlcpy(xtoname, "._", xtoname_len);
4270 strlcat(xtoname, tcnp->cn_nameptr, xtoname_len);
4271
4272 /*
4273 * Look up source attribute file, keep reference on it if exists.
4274 * Note that we do the namei with the nameiop of RENAME, which is different than
4275 * in the rename syscall. It's OK if the source file does not exist, since this
4276 * is only for AppleDouble files.
4277 */
4278 fromnd = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK);
4279 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4280 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4281 fromnd->ni_dvp = fdvp;
4282 error = namei(fromnd);
4283
4284 /*
4285 * If there was an error looking up source attribute file,
4286 * we'll behave as if it didn't exist.
4287 */
4288
4289 if (error == 0) {
4290 if (fromnd->ni_vp) {
4291 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4292 src_attr_vp = fromnd->ni_vp;
4293
4294 if (fromnd->ni_vp->v_type != VREG) {
4295 src_attr_vp = NULLVP;
4296 vnode_put(fromnd->ni_vp);
4297 }
4298 }
4299 /*
4300 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4301 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4302 * have a vnode here, so we drop our namei buffer for the source attribute file
4303 */
4304 if (src_attr_vp == NULLVP) {
4305 nameidone(fromnd);
4306 }
4307 }
4308 }
4309 #endif /* CONFIG_APPLEDOUBLE */
4310
4311 if (batched) {
4312 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4313 if (_err != 0) {
4314 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4315 }
4316 } else {
4317 if (flags) {
4318 _err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4319 if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4320 // Legacy...
4321 if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4322 fcnp->cn_flags |= CN_SECLUDE_RENAME;
4323 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4324 }
4325 }
4326 } else {
4327 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4328 }
4329 }
4330
4331 /*
4332 * If moved to a new directory that is restricted,
4333 * set the restricted flag on the item moved.
4334 */
4335 if (_err == 0) {
4336 _err = vnode_flags(tdvp, &tdfflags, ctx);
4337 if (_err == 0) {
4338 uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4339 if (inherit_flags) {
4340 uint32_t fflags;
4341 _err = vnode_flags(*fvpp, &fflags, ctx);
4342 if (_err == 0 && fflags != (fflags | inherit_flags)) {
4343 struct vnode_attr va;
4344 VATTR_INIT(&va);
4345 VATTR_SET(&va, va_flags, fflags | inherit_flags);
4346 _err = vnode_setattr(*fvpp, &va, ctx);
4347 }
4348 }
4349 }
4350 }
4351
4352 #if CONFIG_MACF
4353 if (_err == 0) {
4354 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
4355 if (flags & VFS_RENAME_SWAP) {
4356 mac_vnode_notify_rename(ctx, *tvpp, fdvp, fcnp);
4357 }
4358 }
4359 #endif
4360
4361 #if CONFIG_APPLEDOUBLE
4362 /*
4363 * Rename any associated extended attribute file (._ AppleDouble file).
4364 */
4365 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4366 int error = 0;
4367
4368 /*
4369 * Get destination attribute file vnode.
4370 * Note that tdvp already has an iocount reference. Make sure to check that we
4371 * get a valid vnode from namei.
4372 */
4373 tond = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK);
4374 NDINIT(tond, RENAME, OP_RENAME,
4375 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4376 CAST_USER_ADDR_T(xtoname), ctx);
4377 tond->ni_dvp = tdvp;
4378 error = namei(tond);
4379
4380 if (error) {
4381 goto ad_error;
4382 }
4383
4384 if (tond->ni_vp) {
4385 dst_attr_vp = tond->ni_vp;
4386 }
4387
4388 if (src_attr_vp) {
4389 const char *old_name = src_attr_vp->v_name;
4390 vnode_t old_parent = src_attr_vp->v_parent;
4391
4392 if (batched) {
4393 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4394 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4395 0, ctx);
4396 } else {
4397 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4398 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4399 }
4400
4401 if (error == 0 && old_name == src_attr_vp->v_name &&
4402 old_parent == src_attr_vp->v_parent) {
4403 int update_flags = VNODE_UPDATE_NAME;
4404
4405 if (fdvp != tdvp) {
4406 update_flags |= VNODE_UPDATE_PARENT;
4407 }
4408
4409 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4410 vnode_update_identity(src_attr_vp, tdvp,
4411 tond->ni_cnd.cn_nameptr,
4412 tond->ni_cnd.cn_namelen,
4413 tond->ni_cnd.cn_hash,
4414 update_flags);
4415 }
4416 }
4417
4418 /* kevent notifications for moving resource files
4419 * _err is zero if we're here, so no need to notify directories, code
4420 * below will do that. only need to post the rename on the source and
4421 * possibly a delete on the dest
4422 */
4423 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4424 if (dst_attr_vp) {
4425 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4426 }
4427 } else if (dst_attr_vp) {
4428 /*
4429 * Just delete destination attribute file vnode if it exists, since
4430 * we didn't have a source attribute file.
4431 * Note that tdvp already has an iocount reference.
4432 */
4433
4434 struct vnop_remove_args args;
4435
4436 args.a_desc = &vnop_remove_desc;
4437 args.a_dvp = tdvp;
4438 args.a_vp = dst_attr_vp;
4439 args.a_cnp = &tond->ni_cnd;
4440 args.a_context = ctx;
4441
4442 if (error == 0) {
4443 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4444
4445 if (error == 0) {
4446 vnode_setneedinactive(dst_attr_vp);
4447 }
4448 }
4449
4450 /* kevent notification for deleting the destination's attribute file
4451 * if it existed. Only need to post the delete on the destination, since
4452 * the code below will handle the directories.
4453 */
4454 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4455 }
4456 }
4457 ad_error:
4458 if (src_attr_vp) {
4459 vnode_put(src_attr_vp);
4460 nameidone(fromnd);
4461 }
4462 if (dst_attr_vp) {
4463 vnode_put(dst_attr_vp);
4464 nameidone(tond);
4465 }
4466 if (xfromname && xfromname != &smallname1[0]) {
4467 kheap_free(KHEAP_TEMP, xfromname, xfromname_len);
4468 }
4469 if (xtoname && xtoname != &smallname2[0]) {
4470 kheap_free(KHEAP_TEMP, xtoname, xtoname_len);
4471 }
4472 #endif /* CONFIG_APPLEDOUBLE */
4473 kheap_free(KHEAP_TEMP, fromnd, sizeof(struct nameidata));
4474 kheap_free(KHEAP_TEMP, tond, sizeof(struct nameidata));
4475 return _err;
4476 }
4477
4478
4479 #if 0
4480 /*
4481 *#
4482 *#% rename fdvp U U U
4483 *#% rename fvp U U U
4484 *#% rename tdvp L U U
4485 *#% rename tvp X U U
4486 *#
4487 */
4488 struct vnop_rename_args {
4489 struct vnodeop_desc *a_desc;
4490 vnode_t a_fdvp;
4491 vnode_t a_fvp;
4492 struct componentname *a_fcnp;
4493 vnode_t a_tdvp;
4494 vnode_t a_tvp;
4495 struct componentname *a_tcnp;
4496 vfs_context_t a_context;
4497 };
4498 #endif /* 0*/
4499 errno_t
4500 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4501 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4502 vfs_context_t ctx)
4503 {
4504 int _err = 0;
4505 struct vnop_rename_args a;
4506
4507 a.a_desc = &vnop_rename_desc;
4508 a.a_fdvp = fdvp;
4509 a.a_fvp = fvp;
4510 a.a_fcnp = fcnp;
4511 a.a_tdvp = tdvp;
4512 a.a_tvp = tvp;
4513 a.a_tcnp = tcnp;
4514 a.a_context = ctx;
4515
4516 /* do the rename of the main file. */
4517 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4518 DTRACE_FSINFO(rename, vnode_t, fdvp);
4519
4520 if (_err) {
4521 return _err;
4522 }
4523
4524 return post_rename(fdvp, fvp, tdvp, tvp);
4525 }
4526
4527 static errno_t
4528 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4529 {
4530 if (tvp && tvp != fvp) {
4531 vnode_setneedinactive(tvp);
4532 }
4533
4534 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4535 int events = NOTE_WRITE;
4536 if (vnode_isdir(fvp)) {
4537 /* Link count on dir changed only if we are moving a dir and...
4538 * --Moved to new dir, not overwriting there
4539 * --Kept in same dir and DID overwrite
4540 */
4541 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4542 events |= NOTE_LINK;
4543 }
4544 }
4545
4546 lock_vnode_and_post(fdvp, events);
4547 if (fdvp != tdvp) {
4548 lock_vnode_and_post(tdvp, events);
4549 }
4550
4551 /* If you're replacing the target, post a deletion for it */
4552 if (tvp) {
4553 lock_vnode_and_post(tvp, NOTE_DELETE);
4554 }
4555
4556 lock_vnode_and_post(fvp, NOTE_RENAME);
4557
4558 return 0;
4559 }
4560
4561 #if 0
4562 /*
4563 *#
4564 *#% renamex fdvp U U U
4565 *#% renamex fvp U U U
4566 *#% renamex tdvp L U U
4567 *#% renamex tvp X U U
4568 *#
4569 */
4570 struct vnop_renamex_args {
4571 struct vnodeop_desc *a_desc;
4572 vnode_t a_fdvp;
4573 vnode_t a_fvp;
4574 struct componentname *a_fcnp;
4575 vnode_t a_tdvp;
4576 vnode_t a_tvp;
4577 struct componentname *a_tcnp;
4578 vfs_rename_flags_t a_flags;
4579 vfs_context_t a_context;
4580 };
4581 #endif /* 0*/
4582 errno_t
4583 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4584 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4585 vfs_rename_flags_t flags, vfs_context_t ctx)
4586 {
4587 int _err = 0;
4588 struct vnop_renamex_args a;
4589
4590 a.a_desc = &vnop_renamex_desc;
4591 a.a_fdvp = fdvp;
4592 a.a_fvp = fvp;
4593 a.a_fcnp = fcnp;
4594 a.a_tdvp = tdvp;
4595 a.a_tvp = tvp;
4596 a.a_tcnp = tcnp;
4597 a.a_flags = flags;
4598 a.a_context = ctx;
4599
4600 /* do the rename of the main file. */
4601 _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4602 DTRACE_FSINFO(renamex, vnode_t, fdvp);
4603
4604 if (_err) {
4605 return _err;
4606 }
4607
4608 return post_rename(fdvp, fvp, tdvp, tvp);
4609 }
4610
4611
4612 int
4613 VNOP_COMPOUND_RENAME(
4614 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4615 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4616 uint32_t flags, vfs_context_t ctx)
4617 {
4618 int _err = 0;
4619 int events;
4620 struct vnop_compound_rename_args a;
4621 int no_fvp, no_tvp;
4622
4623 no_fvp = (*fvpp) == NULLVP;
4624 no_tvp = (*tvpp) == NULLVP;
4625
4626 a.a_desc = &vnop_compound_rename_desc;
4627
4628 a.a_fdvp = fdvp;
4629 a.a_fvpp = fvpp;
4630 a.a_fcnp = fcnp;
4631 a.a_fvap = fvap;
4632
4633 a.a_tdvp = tdvp;
4634 a.a_tvpp = tvpp;
4635 a.a_tcnp = tcnp;
4636 a.a_tvap = tvap;
4637
4638 a.a_flags = flags;
4639 a.a_context = ctx;
4640 a.a_rename_authorizer = vn_authorize_rename;
4641 a.a_reserved = NULL;
4642
4643 /* do the rename of the main file. */
4644 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4645 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4646
4647 if (_err == 0) {
4648 if (*tvpp && *tvpp != *fvpp) {
4649 vnode_setneedinactive(*tvpp);
4650 }
4651 }
4652
4653 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4654 if (_err == 0 && *fvpp != *tvpp) {
4655 if (!*fvpp) {
4656 panic("No fvpp after compound rename?");
4657 }
4658
4659 events = NOTE_WRITE;
4660 if (vnode_isdir(*fvpp)) {
4661 /* Link count on dir changed only if we are moving a dir and...
4662 * --Moved to new dir, not overwriting there
4663 * --Kept in same dir and DID overwrite
4664 */
4665 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4666 events |= NOTE_LINK;
4667 }
4668 }
4669
4670 lock_vnode_and_post(fdvp, events);
4671 if (fdvp != tdvp) {
4672 lock_vnode_and_post(tdvp, events);
4673 }
4674
4675 /* If you're replacing the target, post a deletion for it */
4676 if (*tvpp) {
4677 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4678 }
4679
4680 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4681 }
4682
4683 if (no_fvp) {
4684 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4685 }
4686 if (no_tvp && *tvpp != NULLVP) {
4687 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4688 }
4689
4690 if (_err && _err != EKEEPLOOKING) {
4691 if (*fvpp) {
4692 vnode_put(*fvpp);
4693 *fvpp = NULLVP;
4694 }
4695 if (*tvpp) {
4696 vnode_put(*tvpp);
4697 *tvpp = NULLVP;
4698 }
4699 }
4700
4701 return _err;
4702 }
4703
4704 int
4705 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4706 struct vnode_attr *vap, vfs_context_t ctx)
4707 {
4708 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4709 panic("Non-CREATE nameiop in vn_mkdir()?");
4710 }
4711
4712 if (vnode_compound_mkdir_available(dvp)) {
4713 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4714 } else {
4715 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4716 }
4717 }
4718
4719 #if 0
4720 /*
4721 *#
4722 *#% mkdir dvp L U U
4723 *#% mkdir vpp - L -
4724 *#
4725 */
4726 struct vnop_mkdir_args {
4727 struct vnodeop_desc *a_desc;
4728 vnode_t a_dvp;
4729 vnode_t *a_vpp;
4730 struct componentname *a_cnp;
4731 struct vnode_attr *a_vap;
4732 vfs_context_t a_context;
4733 };
4734 #endif /* 0*/
4735 errno_t
4736 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4737 struct vnode_attr *vap, vfs_context_t ctx)
4738 {
4739 int _err;
4740 struct vnop_mkdir_args a;
4741
4742 a.a_desc = &vnop_mkdir_desc;
4743 a.a_dvp = dvp;
4744 a.a_vpp = vpp;
4745 a.a_cnp = cnp;
4746 a.a_vap = vap;
4747 a.a_context = ctx;
4748
4749 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4750 if (_err == 0 && *vpp) {
4751 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4752 }
4753 #if CONFIG_APPLEDOUBLE
4754 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4755 /*
4756 * Remove stale Apple Double file (if any).
4757 */
4758 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4759 }
4760 #endif /* CONFIG_APPLEDOUBLE */
4761
4762 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4763
4764 return _err;
4765 }
4766
4767 int
4768 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4769 struct vnode_attr *vap, vfs_context_t ctx)
4770 {
4771 int _err;
4772 struct vnop_compound_mkdir_args a;
4773
4774 a.a_desc = &vnop_compound_mkdir_desc;
4775 a.a_dvp = dvp;
4776 a.a_vpp = vpp;
4777 a.a_cnp = &ndp->ni_cnd;
4778 a.a_vap = vap;
4779 a.a_flags = 0;
4780 a.a_context = ctx;
4781 #if 0
4782 a.a_mkdir_authorizer = vn_authorize_mkdir;
4783 #endif /* 0 */
4784 a.a_reserved = NULL;
4785
4786 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4787 if (_err == 0 && *vpp) {
4788 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4789 }
4790 #if CONFIG_APPLEDOUBLE
4791 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4792 /*
4793 * Remove stale Apple Double file (if any).
4794 */
4795 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4796 }
4797 #endif /* CONFIG_APPLEDOUBLE */
4798
4799 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4800
4801 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4802 if (*vpp && _err && _err != EKEEPLOOKING) {
4803 vnode_put(*vpp);
4804 *vpp = NULLVP;
4805 }
4806
4807 return _err;
4808 }
4809
4810 int
4811 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4812 {
4813 if (vnode_compound_rmdir_available(dvp)) {
4814 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4815 } else {
4816 if (*vpp == NULLVP) {
4817 panic("NULL vp, but not a compound VNOP?");
4818 }
4819 if (vap != NULL) {
4820 panic("Non-NULL vap, but not a compound VNOP?");
4821 }
4822 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4823 }
4824 }
4825
4826 #if 0
4827 /*
4828 *#
4829 *#% rmdir dvp L U U
4830 *#% rmdir vp L U U
4831 *#
4832 */
4833 struct vnop_rmdir_args {
4834 struct vnodeop_desc *a_desc;
4835 vnode_t a_dvp;
4836 vnode_t a_vp;
4837 struct componentname *a_cnp;
4838 vfs_context_t a_context;
4839 };
4840
4841 #endif /* 0*/
4842 errno_t
4843 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4844 {
4845 int _err;
4846 struct vnop_rmdir_args a;
4847
4848 a.a_desc = &vnop_rmdir_desc;
4849 a.a_dvp = dvp;
4850 a.a_vp = vp;
4851 a.a_cnp = cnp;
4852 a.a_context = ctx;
4853
4854 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4855 DTRACE_FSINFO(rmdir, vnode_t, vp);
4856
4857 if (_err == 0) {
4858 vnode_setneedinactive(vp);
4859 #if CONFIG_APPLEDOUBLE
4860 if (!(NATIVE_XATTR(dvp))) {
4861 /*
4862 * Remove any associated extended attribute file (._ AppleDouble file).
4863 */
4864 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4865 }
4866 #endif
4867 }
4868
4869 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4870 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4871 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4872
4873 return _err;
4874 }
4875
4876 int
4877 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4878 struct vnode_attr *vap, vfs_context_t ctx)
4879 {
4880 int _err;
4881 struct vnop_compound_rmdir_args a;
4882 int no_vp;
4883
4884 a.a_desc = &vnop_mkdir_desc;
4885 a.a_dvp = dvp;
4886 a.a_vpp = vpp;
4887 a.a_cnp = &ndp->ni_cnd;
4888 a.a_vap = vap;
4889 a.a_flags = 0;
4890 a.a_context = ctx;
4891 a.a_rmdir_authorizer = vn_authorize_rmdir;
4892 a.a_reserved = NULL;
4893
4894 no_vp = (*vpp == NULLVP);
4895
4896 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4897 if (_err == 0 && *vpp) {
4898 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4899 }
4900 #if CONFIG_APPLEDOUBLE
4901 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4902 /*
4903 * Remove stale Apple Double file (if any).
4904 */
4905 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4906 }
4907 #endif
4908
4909 if (*vpp) {
4910 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4911 }
4912 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4913
4914 if (no_vp) {
4915 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4916
4917 #if 0 /* Removing orphaned ._ files requires a vp.... */
4918 if (*vpp && _err && _err != EKEEPLOOKING) {
4919 vnode_put(*vpp);
4920 *vpp = NULLVP;
4921 }
4922 #endif /* 0 */
4923 }
4924
4925 return _err;
4926 }
4927
4928 #if CONFIG_APPLEDOUBLE
4929 /*
4930 * Remove a ._ AppleDouble file
4931 */
4932 #define AD_STALE_SECS (180)
4933 static void
4934 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4935 {
4936 vnode_t xvp;
4937 struct nameidata nd;
4938 char smallname[64];
4939 char *filename = NULL;
4940 size_t alloc_len;
4941 size_t copy_len;
4942
4943 if ((basename == NULL) || (basename[0] == '\0') ||
4944 (basename[0] == '.' && basename[1] == '_')) {
4945 return;
4946 }
4947 filename = &smallname[0];
4948 alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
4949 if (alloc_len >= sizeof(smallname)) {
4950 alloc_len++; /* snprintf result doesn't include '\0' */
4951 filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK);
4952 copy_len = snprintf(filename, alloc_len, "._%s", basename);
4953 }
4954 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4955 CAST_USER_ADDR_T(filename), ctx);
4956 nd.ni_dvp = dvp;
4957 if (namei(&nd) != 0) {
4958 goto out2;
4959 }
4960
4961 xvp = nd.ni_vp;
4962 nameidone(&nd);
4963 if (xvp->v_type != VREG) {
4964 goto out1;
4965 }
4966
4967 /*
4968 * When creating a new object and a "._" file already
4969 * exists, check to see if its a stale "._" file.
4970 *
4971 */
4972 if (!force) {
4973 struct vnode_attr va;
4974
4975 VATTR_INIT(&va);
4976 VATTR_WANTED(&va, va_data_size);
4977 VATTR_WANTED(&va, va_modify_time);
4978 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4979 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4980 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4981 va.va_data_size != 0) {
4982 struct timeval tv;
4983
4984 microtime(&tv);
4985 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4986 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4987 force = 1; /* must be stale */
4988 }
4989 }
4990 }
4991 if (force) {
4992 int error;
4993
4994 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
4995 if (error == 0) {
4996 vnode_setneedinactive(xvp);
4997 }
4998
4999 post_event_if_success(xvp, error, NOTE_DELETE);
5000 post_event_if_success(dvp, error, NOTE_WRITE);
5001 }
5002
5003 out1:
5004 vnode_put(dvp);
5005 vnode_put(xvp);
5006 out2:
5007 if (filename && filename != &smallname[0]) {
5008 kheap_free(KHEAP_TEMP, filename, alloc_len);
5009 }
5010 }
5011
5012 /*
5013 * Shadow uid/gid/mod to a ._ AppleDouble file
5014 */
5015 static void
5016 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
5017 vfs_context_t ctx)
5018 {
5019 vnode_t xvp;
5020 struct nameidata nd;
5021 char smallname[64];
5022 char *filename = NULL;
5023 size_t alloc_len;
5024 size_t copy_len;
5025
5026 if ((dvp == NULLVP) ||
5027 (basename == NULL) || (basename[0] == '\0') ||
5028 (basename[0] == '.' && basename[1] == '_')) {
5029 return;
5030 }
5031 filename = &smallname[0];
5032 alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5033 if (alloc_len >= sizeof(smallname)) {
5034 alloc_len++; /* snprintf result doesn't include '\0' */
5035 filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK);
5036 copy_len = snprintf(filename, alloc_len, "._%s", basename);
5037 }
5038 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
5039 CAST_USER_ADDR_T(filename), ctx);
5040 nd.ni_dvp = dvp;
5041 if (namei(&nd) != 0) {
5042 goto out2;
5043 }
5044
5045 xvp = nd.ni_vp;
5046 nameidone(&nd);
5047
5048 if (xvp->v_type == VREG) {
5049 struct vnop_setattr_args a;
5050
5051 a.a_desc = &vnop_setattr_desc;
5052 a.a_vp = xvp;
5053 a.a_vap = vap;
5054 a.a_context = ctx;
5055
5056 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
5057 }
5058
5059 vnode_put(xvp);
5060 out2:
5061 if (filename && filename != &smallname[0]) {
5062 kheap_free(KHEAP_TEMP, filename, alloc_len);
5063 }
5064 }
5065 #endif /* CONFIG_APPLEDOUBLE */
5066
5067 #if 0
5068 /*
5069 *#
5070 *#% symlink dvp L U U
5071 *#% symlink vpp - U -
5072 *#
5073 */
5074 struct vnop_symlink_args {
5075 struct vnodeop_desc *a_desc;
5076 vnode_t a_dvp;
5077 vnode_t *a_vpp;
5078 struct componentname *a_cnp;
5079 struct vnode_attr *a_vap;
5080 char *a_target;
5081 vfs_context_t a_context;
5082 };
5083
5084 #endif /* 0*/
5085 errno_t
5086 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5087 struct vnode_attr *vap, char *target, vfs_context_t ctx)
5088 {
5089 int _err;
5090 struct vnop_symlink_args a;
5091
5092 a.a_desc = &vnop_symlink_desc;
5093 a.a_dvp = dvp;
5094 a.a_vpp = vpp;
5095 a.a_cnp = cnp;
5096 a.a_vap = vap;
5097 a.a_target = target;
5098 a.a_context = ctx;
5099
5100 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5101 DTRACE_FSINFO(symlink, vnode_t, dvp);
5102 #if CONFIG_APPLEDOUBLE
5103 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5104 /*
5105 * Remove stale Apple Double file (if any). Posts its own knotes
5106 */
5107 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5108 }
5109 #endif /* CONFIG_APPLEDOUBLE */
5110
5111 post_event_if_success(dvp, _err, NOTE_WRITE);
5112
5113 return _err;
5114 }
5115
5116 #if 0
5117 /*
5118 *#
5119 *#% readdir vp L L L
5120 *#
5121 */
5122 struct vnop_readdir_args {
5123 struct vnodeop_desc *a_desc;
5124 vnode_t a_vp;
5125 struct uio *a_uio;
5126 int a_flags;
5127 int *a_eofflag;
5128 int *a_numdirent;
5129 vfs_context_t a_context;
5130 };
5131
5132 #endif /* 0*/
5133 errno_t
5134 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
5135 int *numdirent, vfs_context_t ctx)
5136 {
5137 int _err;
5138 struct vnop_readdir_args a;
5139 #if CONFIG_DTRACE
5140 user_ssize_t resid = uio_resid(uio);
5141 #endif
5142
5143 a.a_desc = &vnop_readdir_desc;
5144 a.a_vp = vp;
5145 a.a_uio = uio;
5146 a.a_flags = flags;
5147 a.a_eofflag = eofflag;
5148 a.a_numdirent = numdirent;
5149 a.a_context = ctx;
5150
5151 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5152 DTRACE_FSINFO_IO(readdir,
5153 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5154
5155 return _err;
5156 }
5157
5158 #if 0
5159 /*
5160 *#
5161 *#% readdirattr vp L L L
5162 *#
5163 */
5164 struct vnop_readdirattr_args {
5165 struct vnodeop_desc *a_desc;
5166 vnode_t a_vp;
5167 struct attrlist *a_alist;
5168 struct uio *a_uio;
5169 uint32_t a_maxcount;
5170 uint32_t a_options;
5171 uint32_t *a_newstate;
5172 int *a_eofflag;
5173 uint32_t *a_actualcount;
5174 vfs_context_t a_context;
5175 };
5176
5177 #endif /* 0*/
5178 errno_t
5179 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5180 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5181 {
5182 int _err;
5183 struct vnop_readdirattr_args a;
5184 #if CONFIG_DTRACE
5185 user_ssize_t resid = uio_resid(uio);
5186 #endif
5187
5188 a.a_desc = &vnop_readdirattr_desc;
5189 a.a_vp = vp;
5190 a.a_alist = alist;
5191 a.a_uio = uio;
5192 a.a_maxcount = maxcount;
5193 a.a_options = options;
5194 a.a_newstate = newstate;
5195 a.a_eofflag = eofflag;
5196 a.a_actualcount = actualcount;
5197 a.a_context = ctx;
5198
5199 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5200 DTRACE_FSINFO_IO(readdirattr,
5201 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5202
5203 return _err;
5204 }
5205
5206 #if 0
5207 struct vnop_getttrlistbulk_args {
5208 struct vnodeop_desc *a_desc;
5209 vnode_t a_vp;
5210 struct attrlist *a_alist;
5211 struct vnode_attr *a_vap;
5212 struct uio *a_uio;
5213 void *a_private
5214 uint64_t a_options;
5215 int *a_eofflag;
5216 uint32_t *a_actualcount;
5217 vfs_context_t a_context;
5218 };
5219 #endif /* 0*/
5220 errno_t
5221 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
5222 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
5223 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
5224 {
5225 int _err;
5226 struct vnop_getattrlistbulk_args a;
5227 #if CONFIG_DTRACE
5228 user_ssize_t resid = uio_resid(uio);
5229 #endif
5230
5231 a.a_desc = &vnop_getattrlistbulk_desc;
5232 a.a_vp = vp;
5233 a.a_alist = alist;
5234 a.a_vap = vap;
5235 a.a_uio = uio;
5236 a.a_private = private;
5237 a.a_options = options;
5238 a.a_eofflag = eofflag;
5239 a.a_actualcount = actualcount;
5240 a.a_context = ctx;
5241
5242 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
5243 DTRACE_FSINFO_IO(getattrlistbulk,
5244 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5245
5246 return _err;
5247 }
5248
5249 #if 0
5250 /*
5251 *#
5252 *#% readlink vp L L L
5253 *#
5254 */
5255 struct vnop_readlink_args {
5256 struct vnodeop_desc *a_desc;
5257 vnode_t a_vp;
5258 struct uio *a_uio;
5259 vfs_context_t a_context;
5260 };
5261 #endif /* 0 */
5262
5263 /*
5264 * Returns: 0 Success
5265 * lock_fsnode:ENOENT No such file or directory [only for VFS
5266 * that is not thread safe & vnode is
5267 * currently being/has been terminated]
5268 * <vfs_readlink>:EINVAL
5269 * <vfs_readlink>:???
5270 *
5271 * Note: The return codes from the underlying VFS's readlink routine
5272 * can't be fully enumerated here, since third party VFS authors
5273 * may not limit their error returns to the ones documented here,
5274 * even though this may result in some programs functioning
5275 * incorrectly.
5276 *
5277 * The return codes documented above are those which may currently
5278 * be returned by HFS from hfs_vnop_readlink, not including
5279 * additional error code which may be propagated from underlying
5280 * routines.
5281 */
5282 errno_t
5283 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5284 {
5285 int _err;
5286 struct vnop_readlink_args a;
5287 #if CONFIG_DTRACE
5288 user_ssize_t resid = uio_resid(uio);
5289 #endif
5290 a.a_desc = &vnop_readlink_desc;
5291 a.a_vp = vp;
5292 a.a_uio = uio;
5293 a.a_context = ctx;
5294
5295 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5296 DTRACE_FSINFO_IO(readlink,
5297 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5298
5299 return _err;
5300 }
5301
5302 #if 0
5303 /*
5304 *#
5305 *#% inactive vp L U U
5306 *#
5307 */
5308 struct vnop_inactive_args {
5309 struct vnodeop_desc *a_desc;
5310 vnode_t a_vp;
5311 vfs_context_t a_context;
5312 };
5313 #endif /* 0*/
5314 errno_t
5315 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5316 {
5317 int _err;
5318 struct vnop_inactive_args a;
5319
5320 a.a_desc = &vnop_inactive_desc;
5321 a.a_vp = vp;
5322 a.a_context = ctx;
5323
5324 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5325 DTRACE_FSINFO(inactive, vnode_t, vp);
5326
5327 #if NAMEDSTREAMS
5328 /* For file systems that do not support namedstream natively, mark
5329 * the shadow stream file vnode to be recycled as soon as the last
5330 * reference goes away. To avoid re-entering reclaim code, do not
5331 * call recycle on terminating namedstream vnodes.
5332 */
5333 if (vnode_isnamedstream(vp) &&
5334 (vp->v_parent != NULLVP) &&
5335 vnode_isshadow(vp) &&
5336 ((vp->v_lflag & VL_TERMINATE) == 0)) {
5337 vnode_recycle(vp);
5338 }
5339 #endif
5340
5341 return _err;
5342 }
5343
5344
5345 #if 0
5346 /*
5347 *#
5348 *#% reclaim vp U U U
5349 *#
5350 */
5351 struct vnop_reclaim_args {
5352 struct vnodeop_desc *a_desc;
5353 vnode_t a_vp;
5354 vfs_context_t a_context;
5355 };
5356 #endif /* 0*/
5357 errno_t
5358 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5359 {
5360 int _err;
5361 struct vnop_reclaim_args a;
5362
5363 a.a_desc = &vnop_reclaim_desc;
5364 a.a_vp = vp;
5365 a.a_context = ctx;
5366
5367 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5368 DTRACE_FSINFO(reclaim, vnode_t, vp);
5369
5370 return _err;
5371 }
5372
5373
5374 /*
5375 * Returns: 0 Success
5376 * lock_fsnode:ENOENT No such file or directory [only for VFS
5377 * that is not thread safe & vnode is
5378 * currently being/has been terminated]
5379 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5380 */
5381 #if 0
5382 /*
5383 *#
5384 *#% pathconf vp L L L
5385 *#
5386 */
5387 struct vnop_pathconf_args {
5388 struct vnodeop_desc *a_desc;
5389 vnode_t a_vp;
5390 int a_name;
5391 int32_t *a_retval;
5392 vfs_context_t a_context;
5393 };
5394 #endif /* 0*/
5395 errno_t
5396 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5397 {
5398 int _err;
5399 struct vnop_pathconf_args a;
5400
5401 a.a_desc = &vnop_pathconf_desc;
5402 a.a_vp = vp;
5403 a.a_name = name;
5404 a.a_retval = retval;
5405 a.a_context = ctx;
5406
5407 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5408 DTRACE_FSINFO(pathconf, vnode_t, vp);
5409
5410 return _err;
5411 }
5412
5413 /*
5414 * Returns: 0 Success
5415 * err_advlock:ENOTSUP
5416 * lf_advlock:???
5417 * <vnop_advlock_desc>:???
5418 *
5419 * Notes: VFS implementations of advisory locking using calls through
5420 * <vnop_advlock_desc> because lock enforcement does not occur
5421 * locally should try to limit themselves to the return codes
5422 * documented above for lf_advlock and err_advlock.
5423 */
5424 #if 0
5425 /*
5426 *#
5427 *#% advlock vp U U U
5428 *#
5429 */
5430 struct vnop_advlock_args {
5431 struct vnodeop_desc *a_desc;
5432 vnode_t a_vp;
5433 caddr_t a_id;
5434 int a_op;
5435 struct flock *a_fl;
5436 int a_flags;
5437 vfs_context_t a_context;
5438 };
5439 #endif /* 0*/
5440 errno_t
5441 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5442 {
5443 int _err;
5444 struct vnop_advlock_args a;
5445
5446 a.a_desc = &vnop_advlock_desc;
5447 a.a_vp = vp;
5448 a.a_id = id;
5449 a.a_op = op;
5450 a.a_fl = fl;
5451 a.a_flags = flags;
5452 a.a_context = ctx;
5453 a.a_timeout = timeout;
5454
5455 /* Disallow advisory locking on non-seekable vnodes */
5456 if (vnode_isfifo(vp)) {
5457 _err = err_advlock(&a);
5458 } else {
5459 if ((vp->v_flag & VLOCKLOCAL)) {
5460 /* Advisory locking done at this layer */
5461 _err = lf_advlock(&a);
5462 } else if (flags & F_OFD_LOCK) {
5463 /* Non-local locking doesn't work for OFD locks */
5464 _err = err_advlock(&a);
5465 } else {
5466 /* Advisory locking done by underlying filesystem */
5467 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5468 }
5469 DTRACE_FSINFO(advlock, vnode_t, vp);
5470 if (op == F_UNLCK &&
5471 (flags & (F_FLOCK | F_OFD_LOCK)) != 0) {
5472 post_event_if_success(vp, _err, NOTE_FUNLOCK);
5473 }
5474 }
5475
5476 return _err;
5477 }
5478
5479
5480
5481 #if 0
5482 /*
5483 *#
5484 *#% allocate vp L L L
5485 *#
5486 */
5487 struct vnop_allocate_args {
5488 struct vnodeop_desc *a_desc;
5489 vnode_t a_vp;
5490 off_t a_length;
5491 u_int32_t a_flags;
5492 off_t *a_bytesallocated;
5493 off_t a_offset;
5494 vfs_context_t a_context;
5495 };
5496
5497 #endif /* 0*/
5498 errno_t
5499 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5500 {
5501 int _err;
5502 struct vnop_allocate_args a;
5503
5504 a.a_desc = &vnop_allocate_desc;
5505 a.a_vp = vp;
5506 a.a_length = length;
5507 a.a_flags = flags;
5508 a.a_bytesallocated = bytesallocated;
5509 a.a_offset = offset;
5510 a.a_context = ctx;
5511
5512 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5513 DTRACE_FSINFO(allocate, vnode_t, vp);
5514 #if CONFIG_FSE
5515 if (_err == 0) {
5516 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5517 }
5518 #endif
5519
5520 return _err;
5521 }
5522
5523 #if 0
5524 /*
5525 *#
5526 *#% pagein vp = = =
5527 *#
5528 */
5529 struct vnop_pagein_args {
5530 struct vnodeop_desc *a_desc;
5531 vnode_t a_vp;
5532 upl_t a_pl;
5533 upl_offset_t a_pl_offset;
5534 off_t a_f_offset;
5535 size_t a_size;
5536 int a_flags;
5537 vfs_context_t a_context;
5538 };
5539 #endif /* 0*/
5540 errno_t
5541 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5542 {
5543 int _err;
5544 struct vnop_pagein_args a;
5545
5546 a.a_desc = &vnop_pagein_desc;
5547 a.a_vp = vp;
5548 a.a_pl = pl;
5549 a.a_pl_offset = pl_offset;
5550 a.a_f_offset = f_offset;
5551 a.a_size = size;
5552 a.a_flags = flags;
5553 a.a_context = ctx;
5554
5555 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5556 DTRACE_FSINFO(pagein, vnode_t, vp);
5557
5558 return _err;
5559 }
5560
5561 #if 0
5562 /*
5563 *#
5564 *#% pageout vp = = =
5565 *#
5566 */
5567 struct vnop_pageout_args {
5568 struct vnodeop_desc *a_desc;
5569 vnode_t a_vp;
5570 upl_t a_pl;
5571 upl_offset_t a_pl_offset;
5572 off_t a_f_offset;
5573 size_t a_size;
5574 int a_flags;
5575 vfs_context_t a_context;
5576 };
5577
5578 #endif /* 0*/
5579 errno_t
5580 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5581 {
5582 int _err;
5583 struct vnop_pageout_args a;
5584
5585 a.a_desc = &vnop_pageout_desc;
5586 a.a_vp = vp;
5587 a.a_pl = pl;
5588 a.a_pl_offset = pl_offset;
5589 a.a_f_offset = f_offset;
5590 a.a_size = size;
5591 a.a_flags = flags;
5592 a.a_context = ctx;
5593
5594 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5595 DTRACE_FSINFO(pageout, vnode_t, vp);
5596
5597 post_event_if_success(vp, _err, NOTE_WRITE);
5598
5599 return _err;
5600 }
5601
5602 int
5603 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5604 {
5605 if (vnode_compound_remove_available(dvp)) {
5606 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5607 } else {
5608 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5609 }
5610 }
5611
5612 #if CONFIG_SEARCHFS
5613
5614 #if 0
5615 /*
5616 *#
5617 *#% searchfs vp L L L
5618 *#
5619 */
5620 struct vnop_searchfs_args {
5621 struct vnodeop_desc *a_desc;
5622 vnode_t a_vp;
5623 void *a_searchparams1;
5624 void *a_searchparams2;
5625 struct attrlist *a_searchattrs;
5626 uint32_t a_maxmatches;
5627 struct timeval *a_timelimit;
5628 struct attrlist *a_returnattrs;
5629 uint32_t *a_nummatches;
5630 uint32_t a_scriptcode;
5631 uint32_t a_options;
5632 struct uio *a_uio;
5633 struct searchstate *a_searchstate;
5634 vfs_context_t a_context;
5635 };
5636
5637 #endif /* 0*/
5638 errno_t
5639 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5640 {
5641 int _err;
5642 struct vnop_searchfs_args a;
5643
5644 a.a_desc = &vnop_searchfs_desc;
5645 a.a_vp = vp;
5646 a.a_searchparams1 = searchparams1;
5647 a.a_searchparams2 = searchparams2;
5648 a.a_searchattrs = searchattrs;
5649 a.a_maxmatches = maxmatches;
5650 a.a_timelimit = timelimit;
5651 a.a_returnattrs = returnattrs;
5652 a.a_nummatches = nummatches;
5653 a.a_scriptcode = scriptcode;
5654 a.a_options = options;
5655 a.a_uio = uio;
5656 a.a_searchstate = searchstate;
5657 a.a_context = ctx;
5658
5659 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5660 DTRACE_FSINFO(searchfs, vnode_t, vp);
5661
5662 return _err;
5663 }
5664 #endif /* CONFIG_SEARCHFS */
5665
5666 #if 0
5667 /*
5668 *#
5669 *#% copyfile fvp U U U
5670 *#% copyfile tdvp L U U
5671 *#% copyfile tvp X U U
5672 *#
5673 */
5674 struct vnop_copyfile_args {
5675 struct vnodeop_desc *a_desc;
5676 vnode_t a_fvp;
5677 vnode_t a_tdvp;
5678 vnode_t a_tvp;
5679 struct componentname *a_tcnp;
5680 int a_mode;
5681 int a_flags;
5682 vfs_context_t a_context;
5683 };
5684 #endif /* 0*/
5685 errno_t
5686 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5687 int mode, int flags, vfs_context_t ctx)
5688 {
5689 int _err;
5690 struct vnop_copyfile_args a;
5691 a.a_desc = &vnop_copyfile_desc;
5692 a.a_fvp = fvp;
5693 a.a_tdvp = tdvp;
5694 a.a_tvp = tvp;
5695 a.a_tcnp = tcnp;
5696 a.a_mode = mode;
5697 a.a_flags = flags;
5698 a.a_context = ctx;
5699 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5700 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5701 return _err;
5702 }
5703
5704 #if 0
5705 struct vnop_clonefile_args {
5706 struct vnodeop_desc *a_desc;
5707 vnode_t a_fvp;
5708 vnode_t a_dvp;
5709 vnode_t *a_vpp;
5710 struct componentname *a_cnp;
5711 struct vnode_attr *a_vap;
5712 uint32_t a_flags;
5713 vfs_context_t a_context;
5714 int (*a_dir_clone_authorizer)( /* Authorization callback */
5715 struct vnode_attr *vap, /* attribute to be authorized */
5716 kauth_action_t action, /* action for which attribute is to be authorized */
5717 struct vnode_attr *dvap, /* target directory attributes */
5718 vnode_t sdvp, /* source directory vnode pointer (optional) */
5719 mount_t mp, /* mount point of filesystem */
5720 dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */
5721 uint32_t flags; /* value passed in a_flags to the VNOP */
5722 vfs_context_t ctx, /* As passed to VNOP */
5723 void *reserved); /* Always NULL */
5724 void *a_reserved; /* Currently unused */
5725 };
5726 #endif /* 0 */
5727
5728 errno_t
5729 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
5730 struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
5731 vfs_context_t ctx)
5732 {
5733 int _err;
5734 struct vnop_clonefile_args a;
5735 a.a_desc = &vnop_clonefile_desc;
5736 a.a_fvp = fvp;
5737 a.a_dvp = dvp;
5738 a.a_vpp = vpp;
5739 a.a_cnp = cnp;
5740 a.a_vap = vap;
5741 a.a_flags = flags;
5742 a.a_context = ctx;
5743
5744 if (vnode_vtype(fvp) == VDIR) {
5745 a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
5746 } else {
5747 a.a_dir_clone_authorizer = NULL;
5748 }
5749
5750 _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
5751
5752 if (_err == 0 && *vpp) {
5753 DTRACE_FSINFO(clonefile, vnode_t, *vpp);
5754 if (kdebug_enable) {
5755 kdebug_lookup(*vpp, cnp);
5756 }
5757 }
5758
5759 post_event_if_success(dvp, _err, NOTE_WRITE);
5760
5761 return _err;
5762 }
5763
5764 errno_t
5765 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5766 {
5767 struct vnop_getxattr_args a;
5768 int error;
5769
5770 a.a_desc = &vnop_getxattr_desc;
5771 a.a_vp = vp;
5772 a.a_name = name;
5773 a.a_uio = uio;
5774 a.a_size = size;
5775 a.a_options = options;
5776 a.a_context = ctx;
5777
5778 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5779 DTRACE_FSINFO(getxattr, vnode_t, vp);
5780
5781 return error;
5782 }
5783
5784 errno_t
5785 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5786 {
5787 struct vnop_setxattr_args a;
5788 int error;
5789
5790 a.a_desc = &vnop_setxattr_desc;
5791 a.a_vp = vp;
5792 a.a_name = name;
5793 a.a_uio = uio;
5794 a.a_options = options;
5795 a.a_context = ctx;
5796
5797 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5798 DTRACE_FSINFO(setxattr, vnode_t, vp);
5799
5800 if (error == 0) {
5801 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5802 }
5803
5804 post_event_if_success(vp, error, NOTE_ATTRIB);
5805
5806 return error;
5807 }
5808
5809 errno_t
5810 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5811 {
5812 struct vnop_removexattr_args a;
5813 int error;
5814
5815 a.a_desc = &vnop_removexattr_desc;
5816 a.a_vp = vp;
5817 a.a_name = name;
5818 a.a_options = options;
5819 a.a_context = ctx;
5820
5821 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5822 DTRACE_FSINFO(removexattr, vnode_t, vp);
5823
5824 post_event_if_success(vp, error, NOTE_ATTRIB);
5825
5826 return error;
5827 }
5828
5829 errno_t
5830 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5831 {
5832 struct vnop_listxattr_args a;
5833 int error;
5834
5835 a.a_desc = &vnop_listxattr_desc;
5836 a.a_vp = vp;
5837 a.a_uio = uio;
5838 a.a_size = size;
5839 a.a_options = options;
5840 a.a_context = ctx;
5841
5842 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5843 DTRACE_FSINFO(listxattr, vnode_t, vp);
5844
5845 return error;
5846 }
5847
5848
5849 #if 0
5850 /*
5851 *#
5852 *#% blktooff vp = = =
5853 *#
5854 */
5855 struct vnop_blktooff_args {
5856 struct vnodeop_desc *a_desc;
5857 vnode_t a_vp;
5858 daddr64_t a_lblkno;
5859 off_t *a_offset;
5860 };
5861 #endif /* 0*/
5862 errno_t
5863 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5864 {
5865 int _err;
5866 struct vnop_blktooff_args a;
5867
5868 a.a_desc = &vnop_blktooff_desc;
5869 a.a_vp = vp;
5870 a.a_lblkno = lblkno;
5871 a.a_offset = offset;
5872
5873 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5874 DTRACE_FSINFO(blktooff, vnode_t, vp);
5875
5876 return _err;
5877 }
5878
5879 #if 0
5880 /*
5881 *#
5882 *#% offtoblk vp = = =
5883 *#
5884 */
5885 struct vnop_offtoblk_args {
5886 struct vnodeop_desc *a_desc;
5887 vnode_t a_vp;
5888 off_t a_offset;
5889 daddr64_t *a_lblkno;
5890 };
5891 #endif /* 0*/
5892 errno_t
5893 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5894 {
5895 int _err;
5896 struct vnop_offtoblk_args a;
5897
5898 a.a_desc = &vnop_offtoblk_desc;
5899 a.a_vp = vp;
5900 a.a_offset = offset;
5901 a.a_lblkno = lblkno;
5902
5903 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5904 DTRACE_FSINFO(offtoblk, vnode_t, vp);
5905
5906 return _err;
5907 }
5908
5909 #if 0
5910 /*
5911 *#
5912 *#% ap vp L L L
5913 *#
5914 */
5915 struct vnop_verify_args {
5916 struct vnodeop_desc *a_desc;
5917 vnode_t a_vp;
5918 off_t a_foffset;
5919 char *a_buf;
5920 size_t a_bufsize;
5921 size_t *a_verifyblksize;
5922 int a_flags;
5923 vfs_context_t a_context;
5924 };
5925 #endif
5926
5927 errno_t
5928 VNOP_VERIFY(struct vnode *vp, off_t foffset, uint8_t *buf, size_t bufsize,
5929 size_t *verify_block_size, vnode_verify_flags_t flags, vfs_context_t ctx)
5930 {
5931 int _err;
5932 struct vnop_verify_args a;
5933
5934 if (ctx == NULL) {
5935 ctx = vfs_context_current();
5936 }
5937 a.a_desc = &vnop_verify_desc;
5938 a.a_vp = vp;
5939 a.a_foffset = foffset;
5940 a.a_buf = buf;
5941 a.a_bufsize = bufsize;
5942 a.a_verifyblksize = verify_block_size;
5943 a.a_flags = flags;
5944 a.a_context = ctx;
5945
5946 _err = (*vp->v_op[vnop_verify_desc.vdesc_offset])(&a);
5947 DTRACE_FSINFO(verify, vnode_t, vp);
5948
5949 /* It is not an error for a filesystem to not support this VNOP */
5950 if (_err == ENOTSUP) {
5951 if (!buf && verify_block_size) {
5952 *verify_block_size = 0;
5953 }
5954
5955 _err = 0;
5956 }
5957
5958 return _err;
5959 }
5960
5961 #if 0
5962 /*
5963 *#
5964 *#% blockmap vp L L L
5965 *#
5966 */
5967 struct vnop_blockmap_args {
5968 struct vnodeop_desc *a_desc;
5969 vnode_t a_vp;
5970 off_t a_foffset;
5971 size_t a_size;
5972 daddr64_t *a_bpn;
5973 size_t *a_run;
5974 void *a_poff;
5975 int a_flags;
5976 vfs_context_t a_context;
5977 };
5978 #endif /* 0*/
5979 errno_t
5980 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
5981 {
5982 int _err;
5983 struct vnop_blockmap_args a;
5984 size_t localrun = 0;
5985
5986 if (ctx == NULL) {
5987 ctx = vfs_context_current();
5988 }
5989 a.a_desc = &vnop_blockmap_desc;
5990 a.a_vp = vp;
5991 a.a_foffset = foffset;
5992 a.a_size = size;
5993 a.a_bpn = bpn;
5994 a.a_run = &localrun;
5995 a.a_poff = poff;
5996 a.a_flags = flags;
5997 a.a_context = ctx;
5998
5999 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
6000 DTRACE_FSINFO(blockmap, vnode_t, vp);
6001
6002 /*
6003 * We used a local variable to request information from the underlying
6004 * filesystem about the length of the I/O run in question. If
6005 * we get malformed output from the filesystem, we cap it to the length
6006 * requested, at most. Update 'run' on the way out.
6007 */
6008 if (_err == 0) {
6009 if (localrun > size) {
6010 localrun = size;
6011 }
6012
6013 if (run) {
6014 *run = localrun;
6015 }
6016 }
6017
6018 return _err;
6019 }
6020
6021 #if 0
6022 struct vnop_strategy_args {
6023 struct vnodeop_desc *a_desc;
6024 struct buf *a_bp;
6025 };
6026
6027 #endif /* 0*/
6028 errno_t
6029 VNOP_STRATEGY(struct buf *bp)
6030 {
6031 int _err;
6032 struct vnop_strategy_args a;
6033 vnode_t vp = buf_vnode(bp);
6034 a.a_desc = &vnop_strategy_desc;
6035 a.a_bp = bp;
6036 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
6037 DTRACE_FSINFO(strategy, vnode_t, vp);
6038 return _err;
6039 }
6040
6041 #if 0
6042 struct vnop_bwrite_args {
6043 struct vnodeop_desc *a_desc;
6044 buf_t a_bp;
6045 };
6046 #endif /* 0*/
6047 errno_t
6048 VNOP_BWRITE(struct buf *bp)
6049 {
6050 int _err;
6051 struct vnop_bwrite_args a;
6052 vnode_t vp = buf_vnode(bp);
6053 a.a_desc = &vnop_bwrite_desc;
6054 a.a_bp = bp;
6055 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
6056 DTRACE_FSINFO(bwrite, vnode_t, vp);
6057 return _err;
6058 }
6059
6060 #if 0
6061 struct vnop_kqfilt_add_args {
6062 struct vnodeop_desc *a_desc;
6063 struct vnode *a_vp;
6064 struct knote *a_kn;
6065 vfs_context_t a_context;
6066 };
6067 #endif
6068 errno_t
6069 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
6070 {
6071 int _err;
6072 struct vnop_kqfilt_add_args a;
6073
6074 a.a_desc = VDESC(vnop_kqfilt_add);
6075 a.a_vp = vp;
6076 a.a_kn = kn;
6077 a.a_context = ctx;
6078
6079 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
6080 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
6081
6082 return _err;
6083 }
6084
6085 #if 0
6086 struct vnop_kqfilt_remove_args {
6087 struct vnodeop_desc *a_desc;
6088 struct vnode *a_vp;
6089 uintptr_t a_ident;
6090 vfs_context_t a_context;
6091 };
6092 #endif
6093 errno_t
6094 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
6095 {
6096 int _err;
6097 struct vnop_kqfilt_remove_args a;
6098
6099 a.a_desc = VDESC(vnop_kqfilt_remove);
6100 a.a_vp = vp;
6101 a.a_ident = ident;
6102 a.a_context = ctx;
6103
6104 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
6105 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
6106
6107 return _err;
6108 }
6109
6110 errno_t
6111 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6112 {
6113 int _err;
6114 struct vnop_monitor_args a;
6115
6116 a.a_desc = VDESC(vnop_monitor);
6117 a.a_vp = vp;
6118 a.a_events = events;
6119 a.a_flags = flags;
6120 a.a_handle = handle;
6121 a.a_context = ctx;
6122
6123 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6124 DTRACE_FSINFO(monitor, vnode_t, vp);
6125
6126 return _err;
6127 }
6128
6129 #if 0
6130 struct vnop_setlabel_args {
6131 struct vnodeop_desc *a_desc;
6132 struct vnode *a_vp;
6133 struct label *a_vl;
6134 vfs_context_t a_context;
6135 };
6136 #endif
6137 errno_t
6138 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6139 {
6140 int _err;
6141 struct vnop_setlabel_args a;
6142
6143 a.a_desc = VDESC(vnop_setlabel);
6144 a.a_vp = vp;
6145 a.a_vl = label;
6146 a.a_context = ctx;
6147
6148 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
6149 DTRACE_FSINFO(setlabel, vnode_t, vp);
6150
6151 return _err;
6152 }
6153
6154
6155 #if NAMEDSTREAMS
6156 /*
6157 * Get a named streamed
6158 */
6159 errno_t
6160 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6161 {
6162 int _err;
6163 struct vnop_getnamedstream_args a;
6164
6165 a.a_desc = &vnop_getnamedstream_desc;
6166 a.a_vp = vp;
6167 a.a_svpp = svpp;
6168 a.a_name = name;
6169 a.a_operation = operation;
6170 a.a_flags = flags;
6171 a.a_context = ctx;
6172
6173 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6174 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
6175 return _err;
6176 }
6177
6178 /*
6179 * Create a named streamed
6180 */
6181 errno_t
6182 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6183 {
6184 int _err;
6185 struct vnop_makenamedstream_args a;
6186
6187 a.a_desc = &vnop_makenamedstream_desc;
6188 a.a_vp = vp;
6189 a.a_svpp = svpp;
6190 a.a_name = name;
6191 a.a_flags = flags;
6192 a.a_context = ctx;
6193
6194 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6195 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
6196 return _err;
6197 }
6198
6199
6200 /*
6201 * Remove a named streamed
6202 */
6203 errno_t
6204 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6205 {
6206 int _err;
6207 struct vnop_removenamedstream_args a;
6208
6209 a.a_desc = &vnop_removenamedstream_desc;
6210 a.a_vp = vp;
6211 a.a_svp = svp;
6212 a.a_name = name;
6213 a.a_flags = flags;
6214 a.a_context = ctx;
6215
6216 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6217 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
6218 return _err;
6219 }
6220 #endif