]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
95 #include <sys/mbuf.h>
96 #include <sys/syslog.h>
97 #include <sys/ubc.h>
98 #include <sys/vm.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
106
107 #include <kern/assert.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
110
111 #include <libkern/OSByteOrder.h>
112
113 #include <miscfs/specfs/specdev.h>
114
115 #include <mach/mach_types.h>
116 #include <mach/memory_object_types.h>
117 #include <mach/task.h>
118
119 #if CONFIG_MACF
120 #include <security/mac_framework.h>
121 #endif
122
123 #include <sys/sdt.h>
124
125 #define ESUCCESS 0
126 #undef mount_t
127 #undef vnode_t
128
129 #define COMPAT_ONLY
130
131 #define NATIVE_XATTR(VP) \
132 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
133
134 #if CONFIG_APPLEDOUBLE
135 static void xattrfile_remove(vnode_t dvp, const char *basename,
136 vfs_context_t ctx, int force);
137 static void xattrfile_setattr(vnode_t dvp, const char * basename,
138 struct vnode_attr * vap, vfs_context_t ctx);
139 #endif /* CONFIG_APPLEDOUBLE */
140
141 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
142
143 /*
144 * vnode_setneedinactive
145 *
146 * Description: Indicate that when the last iocount on this vnode goes away,
147 * and the usecount is also zero, we should inform the filesystem
148 * via VNOP_INACTIVE.
149 *
150 * Parameters: vnode_t vnode to mark
151 *
152 * Returns: Nothing
153 *
154 * Notes: Notably used when we're deleting a file--we need not have a
155 * usecount, so VNOP_INACTIVE may not get called by anyone. We
156 * want it called when we drop our iocount.
157 */
158 void
159 vnode_setneedinactive(vnode_t vp)
160 {
161 cache_purge(vp);
162
163 vnode_lock_spin(vp);
164 vp->v_lflag |= VL_NEEDINACTIVE;
165 vnode_unlock(vp);
166 }
167
168
169 /* ====================================================================== */
170 /* ************ EXTERNAL KERNEL APIS ********************************** */
171 /* ====================================================================== */
172
173 /*
174 * implementations of exported VFS operations
175 */
176 int
177 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
178 {
179 int error;
180
181 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
182 return(ENOTSUP);
183
184 if (vfs_context_is64bit(ctx)) {
185 if (vfs_64bitready(mp)) {
186 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
187 }
188 else {
189 error = ENOTSUP;
190 }
191 }
192 else {
193 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
194 }
195
196 return (error);
197 }
198
199 int
200 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
201 {
202 int error;
203
204 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
205 return(ENOTSUP);
206
207 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
208
209 return (error);
210 }
211
212 int
213 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
214 {
215 int error;
216
217 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
218 return(ENOTSUP);
219
220 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
221
222 return (error);
223 }
224
225 /*
226 * Returns: 0 Success
227 * ENOTSUP Not supported
228 * <vfs_root>:ENOENT
229 * <vfs_root>:???
230 *
231 * Note: The return codes from the underlying VFS's root routine can't
232 * be fully enumerated here, since third party VFS authors may not
233 * limit their error returns to the ones documented here, even
234 * though this may result in some programs functioning incorrectly.
235 *
236 * The return codes documented above are those which may currently
237 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
238 * for a call to hfs_vget on the volume mount point, not including
239 * additional error codes which may be propagated from underlying
240 * routines called by hfs_vget.
241 */
242 int
243 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
244 {
245 int error;
246
247 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
248 return(ENOTSUP);
249
250 if (ctx == NULL) {
251 ctx = vfs_context_current();
252 }
253
254 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
255
256 return (error);
257 }
258
259 int
260 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
261 {
262 int error;
263
264 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
265 return(ENOTSUP);
266
267 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
268
269 return (error);
270 }
271
272 int
273 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
274 {
275 int error;
276
277 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
278 return(ENOTSUP);
279
280 if (ctx == NULL) {
281 ctx = vfs_context_current();
282 }
283
284 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
285
286 return(error);
287 }
288
289 int
290 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
291 {
292 int error;
293
294 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
295 return(ENOTSUP);
296
297 if (ctx == NULL) {
298 ctx = vfs_context_current();
299 }
300
301 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
302
303 return(error);
304 }
305
306 int
307 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
308 {
309 int error;
310
311 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
312 return(ENOTSUP);
313
314 if (ctx == NULL) {
315 ctx = vfs_context_current();
316 }
317
318 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
319
320 return(error);
321 }
322
323 int
324 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
325 {
326 int error;
327
328 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
329 return(ENOTSUP);
330
331 if (ctx == NULL) {
332 ctx = vfs_context_current();
333 }
334
335 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
336
337 return(error);
338 }
339
340 int
341 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
342 {
343 int error;
344
345 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
346 return(ENOTSUP);
347
348 if (ctx == NULL) {
349 ctx = vfs_context_current();
350 }
351
352 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
353
354 return(error);
355 }
356
357 int
358 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
359 {
360 int error;
361
362 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
363 return(ENOTSUP);
364
365 if (ctx == NULL) {
366 ctx = vfs_context_current();
367 }
368
369 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
370
371 return(error);
372 }
373
374 int VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
375 int flags, vfs_context_t context)
376 {
377 if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl)
378 return ENOTSUP;
379
380 return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
381 context ?: vfs_context_current());
382 }
383
384 int
385 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
386 {
387 int error;
388
389 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0))
390 return(ENOTSUP);
391
392 if (ctx == NULL)
393 ctx = vfs_context_current();
394
395 error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
396
397 return (error);
398 }
399
400 /* returns the cached throttle mask for the mount_t */
401 uint64_t
402 vfs_throttle_mask(mount_t mp)
403 {
404 return(mp->mnt_throttle_mask);
405 }
406
407 /* returns a copy of vfs type name for the mount_t */
408 void
409 vfs_name(mount_t mp, char *buffer)
410 {
411 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
412 }
413
414 /* returns vfs type number for the mount_t */
415 int
416 vfs_typenum(mount_t mp)
417 {
418 return(mp->mnt_vtable->vfc_typenum);
419 }
420
421 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
422 void*
423 vfs_mntlabel(mount_t mp)
424 {
425 return (void*)mp->mnt_mntlabel;
426 }
427
428 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
429 uint64_t
430 vfs_flags(mount_t mp)
431 {
432 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
433 }
434
435 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
436 void
437 vfs_setflags(mount_t mp, uint64_t flags)
438 {
439 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
440
441 mount_lock(mp);
442 mp->mnt_flag |= lflags;
443 mount_unlock(mp);
444 }
445
446 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
447 void
448 vfs_clearflags(mount_t mp , uint64_t flags)
449 {
450 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
451
452 mount_lock(mp);
453 mp->mnt_flag &= ~lflags;
454 mount_unlock(mp);
455 }
456
457 /* Is the mount_t ronly and upgrade read/write requested? */
458 int
459 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
460 {
461 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
462 }
463
464
465 /* Is the mount_t mounted ronly */
466 int
467 vfs_isrdonly(mount_t mp)
468 {
469 return (mp->mnt_flag & MNT_RDONLY);
470 }
471
472 /* Is the mount_t mounted for filesystem synchronous writes? */
473 int
474 vfs_issynchronous(mount_t mp)
475 {
476 return (mp->mnt_flag & MNT_SYNCHRONOUS);
477 }
478
479 /* Is the mount_t mounted read/write? */
480 int
481 vfs_isrdwr(mount_t mp)
482 {
483 return ((mp->mnt_flag & MNT_RDONLY) == 0);
484 }
485
486
487 /* Is mount_t marked for update (ie MNT_UPDATE) */
488 int
489 vfs_isupdate(mount_t mp)
490 {
491 return (mp->mnt_flag & MNT_UPDATE);
492 }
493
494
495 /* Is mount_t marked for reload (ie MNT_RELOAD) */
496 int
497 vfs_isreload(mount_t mp)
498 {
499 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
500 }
501
502 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
503 int
504 vfs_isforce(mount_t mp)
505 {
506 if (mp->mnt_lflag & MNT_LFORCE)
507 return(1);
508 else
509 return(0);
510 }
511
512 int
513 vfs_isunmount(mount_t mp)
514 {
515 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
516 return 1;
517 } else {
518 return 0;
519 }
520 }
521
522 int
523 vfs_64bitready(mount_t mp)
524 {
525 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
526 return(1);
527 else
528 return(0);
529 }
530
531
532 int
533 vfs_authcache_ttl(mount_t mp)
534 {
535 if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) )
536 return (mp->mnt_authcache_ttl);
537 else
538 return (CACHED_RIGHT_INFINITE_TTL);
539 }
540
541 void
542 vfs_setauthcache_ttl(mount_t mp, int ttl)
543 {
544 mount_lock(mp);
545 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
546 mp->mnt_authcache_ttl = ttl;
547 mount_unlock(mp);
548 }
549
550 void
551 vfs_clearauthcache_ttl(mount_t mp)
552 {
553 mount_lock(mp);
554 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
555 /*
556 * back to the default TTL value in case
557 * MNTK_AUTH_OPAQUE is set on this mount
558 */
559 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
560 mount_unlock(mp);
561 }
562
563 int
564 vfs_authopaque(mount_t mp)
565 {
566 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
567 return(1);
568 else
569 return(0);
570 }
571
572 int
573 vfs_authopaqueaccess(mount_t mp)
574 {
575 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
576 return(1);
577 else
578 return(0);
579 }
580
581 void
582 vfs_setauthopaque(mount_t mp)
583 {
584 mount_lock(mp);
585 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
586 mount_unlock(mp);
587 }
588
589 void
590 vfs_setauthopaqueaccess(mount_t mp)
591 {
592 mount_lock(mp);
593 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
594 mount_unlock(mp);
595 }
596
597 void
598 vfs_clearauthopaque(mount_t mp)
599 {
600 mount_lock(mp);
601 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
602 mount_unlock(mp);
603 }
604
605 void
606 vfs_clearauthopaqueaccess(mount_t mp)
607 {
608 mount_lock(mp);
609 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
610 mount_unlock(mp);
611 }
612
613 void
614 vfs_setextendedsecurity(mount_t mp)
615 {
616 mount_lock(mp);
617 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
618 mount_unlock(mp);
619 }
620
621 void
622 vfs_clearextendedsecurity(mount_t mp)
623 {
624 mount_lock(mp);
625 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
626 mount_unlock(mp);
627 }
628
629 void
630 vfs_setnoswap(mount_t mp)
631 {
632 mount_lock(mp);
633 mp->mnt_kern_flag |= MNTK_NOSWAP;
634 mount_unlock(mp);
635 }
636
637 void
638 vfs_clearnoswap(mount_t mp)
639 {
640 mount_lock(mp);
641 mp->mnt_kern_flag &= ~MNTK_NOSWAP;
642 mount_unlock(mp);
643 }
644
645 int
646 vfs_extendedsecurity(mount_t mp)
647 {
648 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
649 }
650
651 /* returns the max size of short symlink in this mount_t */
652 uint32_t
653 vfs_maxsymlen(mount_t mp)
654 {
655 return(mp->mnt_maxsymlinklen);
656 }
657
658 /* set max size of short symlink on mount_t */
659 void
660 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
661 {
662 mp->mnt_maxsymlinklen = symlen;
663 }
664
665 /* return a pointer to the RO vfs_statfs associated with mount_t */
666 struct vfsstatfs *
667 vfs_statfs(mount_t mp)
668 {
669 return(&mp->mnt_vfsstat);
670 }
671
672 int
673 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
674 {
675 int error;
676
677 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
678 return(error);
679
680 /*
681 * If we have a filesystem create time, use it to default some others.
682 */
683 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
684 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
685 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
686 }
687
688 return(0);
689 }
690
691 int
692 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
693 {
694 int error;
695
696 if (vfs_isrdonly(mp))
697 return EROFS;
698
699 error = VFS_SETATTR(mp, vfa, ctx);
700
701 /*
702 * If we had alternate ways of setting vfs attributes, we'd
703 * fall back here.
704 */
705
706 return error;
707 }
708
709 /* return the private data handle stored in mount_t */
710 void *
711 vfs_fsprivate(mount_t mp)
712 {
713 return(mp->mnt_data);
714 }
715
716 /* set the private data handle in mount_t */
717 void
718 vfs_setfsprivate(mount_t mp, void *mntdata)
719 {
720 mount_lock(mp);
721 mp->mnt_data = mntdata;
722 mount_unlock(mp);
723 }
724
725 /* query whether the mount point supports native EAs */
726 int
727 vfs_nativexattrs(mount_t mp) {
728 return (mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS);
729 }
730
731 /*
732 * return the block size of the underlying
733 * device associated with mount_t
734 */
735 int
736 vfs_devblocksize(mount_t mp) {
737
738 return(mp->mnt_devblocksize);
739 }
740
741 /*
742 * Returns vnode with an iocount that must be released with vnode_put()
743 */
744 vnode_t
745 vfs_vnodecovered(mount_t mp)
746 {
747 vnode_t vp = mp->mnt_vnodecovered;
748 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
749 return NULL;
750 } else {
751 return vp;
752 }
753 }
754
755 /*
756 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
757 * The iocount must be released with vnode_put(). Note that this KPI is subtle
758 * with respect to the validity of using this device vnode for anything substantial
759 * (which is discouraged). If commands are sent to the device driver without
760 * taking proper steps to ensure that the device is still open, chaos may ensue.
761 * Similarly, this routine should only be called if there is some guarantee that
762 * the mount itself is still valid.
763 */
764 vnode_t
765 vfs_devvp(mount_t mp)
766 {
767 vnode_t vp = mp->mnt_devvp;
768
769 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
770 return vp;
771 }
772
773 return NULLVP;
774 }
775
776 /*
777 * return the io attributes associated with mount_t
778 */
779 void
780 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
781 {
782 ioattrp->io_reserved[0] = NULL;
783 ioattrp->io_reserved[1] = NULL;
784 if (mp == NULL) {
785 ioattrp->io_maxreadcnt = MAXPHYS;
786 ioattrp->io_maxwritecnt = MAXPHYS;
787 ioattrp->io_segreadcnt = 32;
788 ioattrp->io_segwritecnt = 32;
789 ioattrp->io_maxsegreadsize = MAXPHYS;
790 ioattrp->io_maxsegwritesize = MAXPHYS;
791 ioattrp->io_devblocksize = DEV_BSIZE;
792 ioattrp->io_flags = 0;
793 ioattrp->io_max_swappin_available = 0;
794 } else {
795 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
796 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
797 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
798 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
799 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
800 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
801 ioattrp->io_devblocksize = mp->mnt_devblocksize;
802 ioattrp->io_flags = mp->mnt_ioflags;
803 ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
804 }
805 }
806
807
808 /*
809 * set the IO attributes associated with mount_t
810 */
811 void
812 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
813 {
814 if (mp == NULL)
815 return;
816 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
817 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
818 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
819 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
820 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
821 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
822 mp->mnt_devblocksize = ioattrp->io_devblocksize;
823 mp->mnt_ioflags = ioattrp->io_flags;
824 mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
825 }
826
827 /*
828 * Add a new filesystem into the kernel specified in passed in
829 * vfstable structure. It fills in the vnode
830 * dispatch vector that is to be passed to when vnodes are created.
831 * It returns a handle which is to be used to when the FS is to be removed
832 */
833 typedef int (*PFI)(void *);
834 extern int vfs_opv_numops;
835 errno_t
836 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
837 {
838 struct vfstable *newvfstbl = NULL;
839 int i,j;
840 int (***opv_desc_vector_p)(void *);
841 int (**opv_desc_vector)(void *);
842 struct vnodeopv_entry_desc *opve_descp;
843 int desccount;
844 int descsize;
845 PFI *descptr;
846
847 /*
848 * This routine is responsible for all the initialization that would
849 * ordinarily be done as part of the system startup;
850 */
851
852 if (vfe == (struct vfs_fsentry *)0)
853 return(EINVAL);
854
855 desccount = vfe->vfe_vopcnt;
856 if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
857 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
858 return(EINVAL);
859
860 /* Non-threadsafe filesystems are not supported */
861 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
862 return (EINVAL);
863 }
864
865 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
866 M_WAITOK);
867 bzero(newvfstbl, sizeof(struct vfstable));
868 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
869 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
870 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
871 newvfstbl->vfc_typenum = maxvfstypenum++;
872 else
873 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
874
875 newvfstbl->vfc_refcount = 0;
876 newvfstbl->vfc_flags = 0;
877 newvfstbl->vfc_mountroot = NULL;
878 newvfstbl->vfc_next = NULL;
879 newvfstbl->vfc_vfsflags = 0;
880 if (vfe->vfe_flags & VFS_TBL64BITREADY)
881 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
882 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2)
883 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
884 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2)
885 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
886 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
887 newvfstbl->vfc_flags |= MNT_LOCAL;
888 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
889 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
890 else
891 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
892
893 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR)
894 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
895 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT)
896 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
897 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED)
898 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
899 if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
900 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
901 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME)
902 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
903 if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME)
904 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
905 if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT)
906 newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
907
908 /*
909 * Allocate and init the vectors.
910 * Also handle backwards compatibility.
911 *
912 * We allocate one large block to hold all <desccount>
913 * vnode operation vectors stored contiguously.
914 */
915 /* XXX - shouldn't be M_TEMP */
916
917 descsize = desccount * vfs_opv_numops * sizeof(PFI);
918 MALLOC(descptr, PFI *, descsize,
919 M_TEMP, M_WAITOK);
920 bzero(descptr, descsize);
921
922 newvfstbl->vfc_descptr = descptr;
923 newvfstbl->vfc_descsize = descsize;
924
925 newvfstbl->vfc_sysctl = NULL;
926
927 for (i= 0; i< desccount; i++ ) {
928 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
929 /*
930 * Fill in the caller's pointer to the start of the i'th vector.
931 * They'll need to supply it when calling vnode_create.
932 */
933 opv_desc_vector = descptr + i * vfs_opv_numops;
934 *opv_desc_vector_p = opv_desc_vector;
935
936 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
937 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
938
939 /* Silently skip known-disabled operations */
940 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
941 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
942 vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
943 continue;
944 }
945
946 /*
947 * Sanity check: is this operation listed
948 * in the list of operations? We check this
949 * by seeing if its offset is zero. Since
950 * the default routine should always be listed
951 * first, it should be the only one with a zero
952 * offset. Any other operation with a zero
953 * offset is probably not listed in
954 * vfs_op_descs, and so is probably an error.
955 *
956 * A panic here means the layer programmer
957 * has committed the all-too common bug
958 * of adding a new operation to the layer's
959 * list of vnode operations but
960 * not adding the operation to the system-wide
961 * list of supported operations.
962 */
963 if (opve_descp->opve_op->vdesc_offset == 0 &&
964 opve_descp->opve_op != VDESC(vnop_default)) {
965 printf("vfs_fsadd: operation %s not listed in %s.\n",
966 opve_descp->opve_op->vdesc_name,
967 "vfs_op_descs");
968 panic("vfs_fsadd: bad operation");
969 }
970 /*
971 * Fill in this entry.
972 */
973 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
974 opve_descp->opve_impl;
975 }
976
977
978 /*
979 * Finally, go back and replace unfilled routines
980 * with their default. (Sigh, an O(n^3) algorithm. I
981 * could make it better, but that'd be work, and n is small.)
982 */
983 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
984
985 /*
986 * Force every operations vector to have a default routine.
987 */
988 opv_desc_vector = *opv_desc_vector_p;
989 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
990 panic("vfs_fsadd: operation vector without default routine.");
991 for (j = 0; j < vfs_opv_numops; j++)
992 if (opv_desc_vector[j] == NULL)
993 opv_desc_vector[j] =
994 opv_desc_vector[VOFFSET(vnop_default)];
995
996 } /* end of each vnodeopv_desc parsing */
997
998
999
1000 *handle = vfstable_add(newvfstbl);
1001
1002 if (newvfstbl->vfc_typenum <= maxvfstypenum )
1003 maxvfstypenum = newvfstbl->vfc_typenum + 1;
1004
1005 if (newvfstbl->vfc_vfsops->vfs_init) {
1006 struct vfsconf vfsc;
1007 bzero(&vfsc, sizeof(struct vfsconf));
1008 vfsc.vfc_reserved1 = 0;
1009 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1010 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1011 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1012 vfsc.vfc_flags = (*handle)->vfc_flags;
1013 vfsc.vfc_reserved2 = 0;
1014 vfsc.vfc_reserved3 = 0;
1015
1016 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1017 }
1018
1019 FREE(newvfstbl, M_TEMP);
1020
1021 return(0);
1022 }
1023
1024 /*
1025 * Removes the filesystem from kernel.
1026 * The argument passed in is the handle that was given when
1027 * file system was added
1028 */
1029 errno_t
1030 vfs_fsremove(vfstable_t handle)
1031 {
1032 struct vfstable * vfstbl = (struct vfstable *)handle;
1033 void *old_desc = NULL;
1034 errno_t err;
1035
1036 /* Preflight check for any mounts */
1037 mount_list_lock();
1038 if ( vfstbl->vfc_refcount != 0 ) {
1039 mount_list_unlock();
1040 return EBUSY;
1041 }
1042
1043 /*
1044 * save the old descriptor; the free cannot occur unconditionally,
1045 * since vfstable_del() may fail.
1046 */
1047 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1048 old_desc = vfstbl->vfc_descptr;
1049 }
1050 err = vfstable_del(vfstbl);
1051
1052 mount_list_unlock();
1053
1054 /* free the descriptor if the delete was successful */
1055 if (err == 0 && old_desc) {
1056 FREE(old_desc, M_TEMP);
1057 }
1058
1059 return(err);
1060 }
1061
1062 void vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1063 {
1064 mp->mnt_fsowner = uid;
1065 mp->mnt_fsgroup = gid;
1066 }
1067
1068 /*
1069 * Callers should be careful how they use this; accessing
1070 * mnt_last_write_completed_timestamp is not thread-safe. Writing to
1071 * it isn't either. Point is: be prepared to deal with strange values
1072 * being returned.
1073 */
1074 uint64_t vfs_idle_time(mount_t mp)
1075 {
1076 if (mp->mnt_pending_write_size)
1077 return 0;
1078
1079 struct timeval now;
1080
1081 microuptime(&now);
1082
1083 return ((now.tv_sec
1084 - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1085 + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec);
1086 }
1087
1088 int
1089 vfs_context_pid(vfs_context_t ctx)
1090 {
1091 return (proc_pid(vfs_context_proc(ctx)));
1092 }
1093
1094 int
1095 vfs_context_suser(vfs_context_t ctx)
1096 {
1097 return (suser(ctx->vc_ucred, NULL));
1098 }
1099
1100 /*
1101 * Return bit field of signals posted to all threads in the context's process.
1102 *
1103 * XXX Signals should be tied to threads, not processes, for most uses of this
1104 * XXX call.
1105 */
1106 int
1107 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1108 {
1109 proc_t p = vfs_context_proc(ctx);
1110 if (p)
1111 return(proc_pendingsignals(p, mask));
1112 return(0);
1113 }
1114
1115 int
1116 vfs_context_is64bit(vfs_context_t ctx)
1117 {
1118 proc_t proc = vfs_context_proc(ctx);
1119
1120 if (proc)
1121 return(proc_is64bit(proc));
1122 return(0);
1123 }
1124
1125
1126 /*
1127 * vfs_context_proc
1128 *
1129 * Description: Given a vfs_context_t, return the proc_t associated with it.
1130 *
1131 * Parameters: vfs_context_t The context to use
1132 *
1133 * Returns: proc_t The process for this context
1134 *
1135 * Notes: This function will return the current_proc() if any of the
1136 * following conditions are true:
1137 *
1138 * o The supplied context pointer is NULL
1139 * o There is no Mach thread associated with the context
1140 * o There is no Mach task associated with the Mach thread
1141 * o There is no proc_t associated with the Mach task
1142 * o The proc_t has no per process open file table
1143 * o The proc_t is post-vfork()
1144 *
1145 * This causes this function to return a value matching as
1146 * closely as possible the previous behaviour, while at the
1147 * same time avoiding the task lending that results from vfork()
1148 */
1149 proc_t
1150 vfs_context_proc(vfs_context_t ctx)
1151 {
1152 proc_t proc = NULL;
1153
1154 if (ctx != NULL && ctx->vc_thread != NULL)
1155 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1156 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1157 proc = NULL;
1158
1159 return(proc == NULL ? current_proc() : proc);
1160 }
1161
1162 /*
1163 * vfs_context_get_special_port
1164 *
1165 * Description: Return the requested special port from the task associated
1166 * with the given context.
1167 *
1168 * Parameters: vfs_context_t The context to use
1169 * int Index of special port
1170 * ipc_port_t * Pointer to returned port
1171 *
1172 * Returns: kern_return_t see task_get_special_port()
1173 */
1174 kern_return_t
1175 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1176 {
1177 task_t task = NULL;
1178
1179 if (ctx != NULL && ctx->vc_thread != NULL)
1180 task = get_threadtask(ctx->vc_thread);
1181
1182 return task_get_special_port(task, which, portp);
1183 }
1184
1185 /*
1186 * vfs_context_set_special_port
1187 *
1188 * Description: Set the requested special port in the task associated
1189 * with the given context.
1190 *
1191 * Parameters: vfs_context_t The context to use
1192 * int Index of special port
1193 * ipc_port_t New special port
1194 *
1195 * Returns: kern_return_t see task_set_special_port()
1196 */
1197 kern_return_t
1198 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1199 {
1200 task_t task = NULL;
1201
1202 if (ctx != NULL && ctx->vc_thread != NULL)
1203 task = get_threadtask(ctx->vc_thread);
1204
1205 return task_set_special_port(task, which, port);
1206 }
1207
1208 /*
1209 * vfs_context_thread
1210 *
1211 * Description: Return the Mach thread associated with a vfs_context_t
1212 *
1213 * Parameters: vfs_context_t The context to use
1214 *
1215 * Returns: thread_t The thread for this context, or
1216 * NULL, if there is not one.
1217 *
1218 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1219 * as a result of a static vfs_context_t declaration in a function
1220 * and will result in this function returning NULL.
1221 *
1222 * This is intentional; this function should NOT return the
1223 * current_thread() in this case.
1224 */
1225 thread_t
1226 vfs_context_thread(vfs_context_t ctx)
1227 {
1228 return(ctx->vc_thread);
1229 }
1230
1231
1232 /*
1233 * vfs_context_cwd
1234 *
1235 * Description: Returns a reference on the vnode for the current working
1236 * directory for the supplied context
1237 *
1238 * Parameters: vfs_context_t The context to use
1239 *
1240 * Returns: vnode_t The current working directory
1241 * for this context
1242 *
1243 * Notes: The function first attempts to obtain the current directory
1244 * from the thread, and if it is not present there, falls back
1245 * to obtaining it from the process instead. If it can't be
1246 * obtained from either place, we return NULLVP.
1247 */
1248 vnode_t
1249 vfs_context_cwd(vfs_context_t ctx)
1250 {
1251 vnode_t cwd = NULLVP;
1252
1253 if(ctx != NULL && ctx->vc_thread != NULL) {
1254 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1255 proc_t proc;
1256
1257 /*
1258 * Get the cwd from the thread; if there isn't one, get it
1259 * from the process, instead.
1260 */
1261 if ((cwd = uth->uu_cdir) == NULLVP &&
1262 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1263 proc->p_fd != NULL)
1264 cwd = proc->p_fd->fd_cdir;
1265 }
1266
1267 return(cwd);
1268 }
1269
1270 /*
1271 * vfs_context_create
1272 *
1273 * Description: Allocate and initialize a new context.
1274 *
1275 * Parameters: vfs_context_t: Context to copy, or NULL for new
1276 *
1277 * Returns: Pointer to new context
1278 *
1279 * Notes: Copy cred and thread from argument, if available; else
1280 * initialize with current thread and new cred. Returns
1281 * with a reference held on the credential.
1282 */
1283 vfs_context_t
1284 vfs_context_create(vfs_context_t ctx)
1285 {
1286 vfs_context_t newcontext;
1287
1288 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
1289
1290 if (newcontext) {
1291 kauth_cred_t safecred;
1292 if (ctx) {
1293 newcontext->vc_thread = ctx->vc_thread;
1294 safecred = ctx->vc_ucred;
1295 } else {
1296 newcontext->vc_thread = current_thread();
1297 safecred = kauth_cred_get();
1298 }
1299 if (IS_VALID_CRED(safecred))
1300 kauth_cred_ref(safecred);
1301 newcontext->vc_ucred = safecred;
1302 return(newcontext);
1303 }
1304 return(NULL);
1305 }
1306
1307
1308 vfs_context_t
1309 vfs_context_current(void)
1310 {
1311 vfs_context_t ctx = NULL;
1312 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1313
1314 if (ut != NULL ) {
1315 if (ut->uu_context.vc_ucred != NULL) {
1316 ctx = &ut->uu_context;
1317 }
1318 }
1319
1320 return(ctx == NULL ? vfs_context_kernel() : ctx);
1321 }
1322
1323
1324 /*
1325 * XXX Do not ask
1326 *
1327 * Dangerous hack - adopt the first kernel thread as the current thread, to
1328 * get to the vfs_context_t in the uthread associated with a kernel thread.
1329 * This is used by UDF to make the call into IOCDMediaBSDClient,
1330 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1331 * ioctl() is being called from kernel or user space (and all this because
1332 * we do not pass threads into our ioctl()'s, instead of processes).
1333 *
1334 * This is also used by imageboot_setup(), called early from bsd_init() after
1335 * kernproc has been given a credential.
1336 *
1337 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1338 * of many Mach headers to do the reference directly rather than indirectly;
1339 * we will need to forego this convenience when we reture proc_thread().
1340 */
1341 static struct vfs_context kerncontext;
1342 vfs_context_t
1343 vfs_context_kernel(void)
1344 {
1345 if (kerncontext.vc_ucred == NOCRED)
1346 kerncontext.vc_ucred = kernproc->p_ucred;
1347 if (kerncontext.vc_thread == NULL)
1348 kerncontext.vc_thread = proc_thread(kernproc);
1349
1350 return(&kerncontext);
1351 }
1352
1353
1354 int
1355 vfs_context_rele(vfs_context_t ctx)
1356 {
1357 if (ctx) {
1358 if (IS_VALID_CRED(ctx->vc_ucred))
1359 kauth_cred_unref(&ctx->vc_ucred);
1360 kfree(ctx, sizeof(struct vfs_context));
1361 }
1362 return(0);
1363 }
1364
1365
1366 kauth_cred_t
1367 vfs_context_ucred(vfs_context_t ctx)
1368 {
1369 return (ctx->vc_ucred);
1370 }
1371
1372 /*
1373 * Return true if the context is owned by the superuser.
1374 */
1375 int
1376 vfs_context_issuser(vfs_context_t ctx)
1377 {
1378 return(kauth_cred_issuser(vfs_context_ucred(ctx)));
1379 }
1380
1381 int vfs_context_iskernel(vfs_context_t ctx)
1382 {
1383 return ctx == &kerncontext;
1384 }
1385
1386 /*
1387 * Given a context, for all fields of vfs_context_t which
1388 * are not held with a reference, set those fields to the
1389 * values for the current execution context. Currently, this
1390 * just means the vc_thread.
1391 *
1392 * Returns: 0 for success, nonzero for failure
1393 *
1394 * The intended use is:
1395 * 1. vfs_context_create() gets the caller a context
1396 * 2. vfs_context_bind() sets the unrefcounted data
1397 * 3. vfs_context_rele() releases the context
1398 *
1399 */
1400 int
1401 vfs_context_bind(vfs_context_t ctx)
1402 {
1403 ctx->vc_thread = current_thread();
1404 return 0;
1405 }
1406
1407 int vfs_isswapmount(mount_t mnt)
1408 {
1409 return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1410 }
1411
1412 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1413
1414
1415 /*
1416 * Convert between vnode types and inode formats (since POSIX.1
1417 * defines mode word of stat structure in terms of inode formats).
1418 */
1419 enum vtype
1420 vnode_iftovt(int mode)
1421 {
1422 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1423 }
1424
1425 int
1426 vnode_vttoif(enum vtype indx)
1427 {
1428 return(vttoif_tab[(int)(indx)]);
1429 }
1430
1431 int
1432 vnode_makeimode(int indx, int mode)
1433 {
1434 return (int)(VTTOIF(indx) | (mode));
1435 }
1436
1437
1438 /*
1439 * vnode manipulation functions.
1440 */
1441
1442 /* returns system root vnode iocount; It should be released using vnode_put() */
1443 vnode_t
1444 vfs_rootvnode(void)
1445 {
1446 int error;
1447
1448 error = vnode_get(rootvnode);
1449 if (error)
1450 return ((vnode_t)0);
1451 else
1452 return rootvnode;
1453 }
1454
1455
1456 uint32_t
1457 vnode_vid(vnode_t vp)
1458 {
1459 return ((uint32_t)(vp->v_id));
1460 }
1461
1462 mount_t
1463 vnode_mount(vnode_t vp)
1464 {
1465 return (vp->v_mount);
1466 }
1467
1468 #if CONFIG_IOSCHED
1469 vnode_t
1470 vnode_mountdevvp(vnode_t vp)
1471 {
1472 if (vp->v_mount)
1473 return (vp->v_mount->mnt_devvp);
1474 else
1475 return ((vnode_t)0);
1476 }
1477 #endif
1478
1479 mount_t
1480 vnode_mountedhere(vnode_t vp)
1481 {
1482 mount_t mp;
1483
1484 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1485 (mp->mnt_vnodecovered == vp))
1486 return (mp);
1487 else
1488 return (mount_t)NULL;
1489 }
1490
1491 /* returns vnode type of vnode_t */
1492 enum vtype
1493 vnode_vtype(vnode_t vp)
1494 {
1495 return (vp->v_type);
1496 }
1497
1498 /* returns FS specific node saved in vnode */
1499 void *
1500 vnode_fsnode(vnode_t vp)
1501 {
1502 return (vp->v_data);
1503 }
1504
1505 void
1506 vnode_clearfsnode(vnode_t vp)
1507 {
1508 vp->v_data = NULL;
1509 }
1510
1511 dev_t
1512 vnode_specrdev(vnode_t vp)
1513 {
1514 return(vp->v_rdev);
1515 }
1516
1517
1518 /* Accessor functions */
1519 /* is vnode_t a root vnode */
1520 int
1521 vnode_isvroot(vnode_t vp)
1522 {
1523 return ((vp->v_flag & VROOT)? 1 : 0);
1524 }
1525
1526 /* is vnode_t a system vnode */
1527 int
1528 vnode_issystem(vnode_t vp)
1529 {
1530 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1531 }
1532
1533 /* is vnode_t a swap file vnode */
1534 int
1535 vnode_isswap(vnode_t vp)
1536 {
1537 return ((vp->v_flag & VSWAP)? 1 : 0);
1538 }
1539
1540 /* is vnode_t a tty */
1541 int
1542 vnode_istty(vnode_t vp)
1543 {
1544 return ((vp->v_flag & VISTTY) ? 1 : 0);
1545 }
1546
1547 /* if vnode_t mount operation in progress */
1548 int
1549 vnode_ismount(vnode_t vp)
1550 {
1551 return ((vp->v_flag & VMOUNT)? 1 : 0);
1552 }
1553
1554 /* is this vnode under recyle now */
1555 int
1556 vnode_isrecycled(vnode_t vp)
1557 {
1558 int ret;
1559
1560 vnode_lock_spin(vp);
1561 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1562 vnode_unlock(vp);
1563 return(ret);
1564 }
1565
1566 /* vnode was created by background task requesting rapid aging
1567 and has not since been referenced by a normal task */
1568 int
1569 vnode_israge(vnode_t vp)
1570 {
1571 return ((vp->v_flag & VRAGE)? 1 : 0);
1572 }
1573
1574 int
1575 vnode_needssnapshots(vnode_t vp)
1576 {
1577 return ((vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0);
1578 }
1579
1580
1581 /* Check the process/thread to see if we should skip atime updates */
1582 int
1583 vfs_ctx_skipatime (vfs_context_t ctx) {
1584 struct uthread *ut;
1585 proc_t proc;
1586 thread_t thr;
1587
1588 proc = vfs_context_proc(ctx);
1589 thr = vfs_context_thread (ctx);
1590
1591 /* Validate pointers in case we were invoked via a kernel context */
1592 if (thr && proc) {
1593 ut = get_bsdthread_info (thr);
1594
1595 if (proc->p_lflag & P_LRAGE_VNODES) {
1596 return 1;
1597 }
1598
1599 if (ut) {
1600 if (ut->uu_flag & UT_RAGE_VNODES) {
1601 return 1;
1602 }
1603 }
1604 }
1605 return 0;
1606 }
1607
1608 /* is vnode_t marked to not keep data cached once it's been consumed */
1609 int
1610 vnode_isnocache(vnode_t vp)
1611 {
1612 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1613 }
1614
1615 /*
1616 * has sequential readahead been disabled on this vnode
1617 */
1618 int
1619 vnode_isnoreadahead(vnode_t vp)
1620 {
1621 return ((vp->v_flag & VRAOFF)? 1 : 0);
1622 }
1623
1624 int
1625 vnode_is_openevt(vnode_t vp)
1626 {
1627 return ((vp->v_flag & VOPENEVT)? 1 : 0);
1628 }
1629
1630 /* is vnode_t a standard one? */
1631 int
1632 vnode_isstandard(vnode_t vp)
1633 {
1634 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1635 }
1636
1637 /* don't vflush() if SKIPSYSTEM */
1638 int
1639 vnode_isnoflush(vnode_t vp)
1640 {
1641 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1642 }
1643
1644 /* is vnode_t a regular file */
1645 int
1646 vnode_isreg(vnode_t vp)
1647 {
1648 return ((vp->v_type == VREG)? 1 : 0);
1649 }
1650
1651 /* is vnode_t a directory? */
1652 int
1653 vnode_isdir(vnode_t vp)
1654 {
1655 return ((vp->v_type == VDIR)? 1 : 0);
1656 }
1657
1658 /* is vnode_t a symbolic link ? */
1659 int
1660 vnode_islnk(vnode_t vp)
1661 {
1662 return ((vp->v_type == VLNK)? 1 : 0);
1663 }
1664
1665 int
1666 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1667 {
1668 struct nameidata *ndp = cnp->cn_ndp;
1669
1670 if (ndp == NULL) {
1671 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1672 }
1673
1674 if (vnode_isdir(vp)) {
1675 if (vp->v_mountedhere != NULL) {
1676 goto yes;
1677 }
1678
1679 #if CONFIG_TRIGGERS
1680 if (vp->v_resolve) {
1681 goto yes;
1682 }
1683 #endif /* CONFIG_TRIGGERS */
1684
1685 }
1686
1687
1688 if (vnode_islnk(vp)) {
1689 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1690 if (cnp->cn_flags & FOLLOW) {
1691 goto yes;
1692 }
1693 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1694 goto yes;
1695 }
1696 }
1697
1698 return 0;
1699
1700 yes:
1701 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1702 return EKEEPLOOKING;
1703 }
1704
1705 /* is vnode_t a fifo ? */
1706 int
1707 vnode_isfifo(vnode_t vp)
1708 {
1709 return ((vp->v_type == VFIFO)? 1 : 0);
1710 }
1711
1712 /* is vnode_t a block device? */
1713 int
1714 vnode_isblk(vnode_t vp)
1715 {
1716 return ((vp->v_type == VBLK)? 1 : 0);
1717 }
1718
1719 int
1720 vnode_isspec(vnode_t vp)
1721 {
1722 return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0);
1723 }
1724
1725 /* is vnode_t a char device? */
1726 int
1727 vnode_ischr(vnode_t vp)
1728 {
1729 return ((vp->v_type == VCHR)? 1 : 0);
1730 }
1731
1732 /* is vnode_t a socket? */
1733 int
1734 vnode_issock(vnode_t vp)
1735 {
1736 return ((vp->v_type == VSOCK)? 1 : 0);
1737 }
1738
1739 /* is vnode_t a device with multiple active vnodes referring to it? */
1740 int
1741 vnode_isaliased(vnode_t vp)
1742 {
1743 enum vtype vt = vp->v_type;
1744 if (!((vt == VCHR) || (vt == VBLK))) {
1745 return 0;
1746 } else {
1747 return (vp->v_specflags & SI_ALIASED);
1748 }
1749 }
1750
1751 /* is vnode_t a named stream? */
1752 int
1753 vnode_isnamedstream(
1754 #if NAMEDSTREAMS
1755 vnode_t vp
1756 #else
1757 __unused vnode_t vp
1758 #endif
1759 )
1760 {
1761 #if NAMEDSTREAMS
1762 return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0);
1763 #else
1764 return (0);
1765 #endif
1766 }
1767
1768 int
1769 vnode_isshadow(
1770 #if NAMEDSTREAMS
1771 vnode_t vp
1772 #else
1773 __unused vnode_t vp
1774 #endif
1775 )
1776 {
1777 #if NAMEDSTREAMS
1778 return ((vp->v_flag & VISSHADOW) ? 1 : 0);
1779 #else
1780 return (0);
1781 #endif
1782 }
1783
1784 /* does vnode have associated named stream vnodes ? */
1785 int
1786 vnode_hasnamedstreams(
1787 #if NAMEDSTREAMS
1788 vnode_t vp
1789 #else
1790 __unused vnode_t vp
1791 #endif
1792 )
1793 {
1794 #if NAMEDSTREAMS
1795 return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0);
1796 #else
1797 return (0);
1798 #endif
1799 }
1800 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1801 void
1802 vnode_setnocache(vnode_t vp)
1803 {
1804 vnode_lock_spin(vp);
1805 vp->v_flag |= VNOCACHE_DATA;
1806 vnode_unlock(vp);
1807 }
1808
1809 void
1810 vnode_clearnocache(vnode_t vp)
1811 {
1812 vnode_lock_spin(vp);
1813 vp->v_flag &= ~VNOCACHE_DATA;
1814 vnode_unlock(vp);
1815 }
1816
1817 void
1818 vnode_set_openevt(vnode_t vp)
1819 {
1820 vnode_lock_spin(vp);
1821 vp->v_flag |= VOPENEVT;
1822 vnode_unlock(vp);
1823 }
1824
1825 void
1826 vnode_clear_openevt(vnode_t vp)
1827 {
1828 vnode_lock_spin(vp);
1829 vp->v_flag &= ~VOPENEVT;
1830 vnode_unlock(vp);
1831 }
1832
1833
1834 void
1835 vnode_setnoreadahead(vnode_t vp)
1836 {
1837 vnode_lock_spin(vp);
1838 vp->v_flag |= VRAOFF;
1839 vnode_unlock(vp);
1840 }
1841
1842 void
1843 vnode_clearnoreadahead(vnode_t vp)
1844 {
1845 vnode_lock_spin(vp);
1846 vp->v_flag &= ~VRAOFF;
1847 vnode_unlock(vp);
1848 }
1849
1850 int
1851 vnode_isfastdevicecandidate(vnode_t vp)
1852 {
1853 return ((vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0);
1854 }
1855
1856 void
1857 vnode_setfastdevicecandidate(vnode_t vp)
1858 {
1859 vnode_lock_spin(vp);
1860 vp->v_flag |= VFASTDEVCANDIDATE;
1861 vnode_unlock(vp);
1862 }
1863
1864 void
1865 vnode_clearfastdevicecandidate(vnode_t vp)
1866 {
1867 vnode_lock_spin(vp);
1868 vp->v_flag &= ~VFASTDEVCANDIDATE;
1869 vnode_unlock(vp);
1870 }
1871
1872 int
1873 vnode_isautocandidate(vnode_t vp)
1874 {
1875 return ((vp->v_flag & VAUTOCANDIDATE)? 1 : 0);
1876 }
1877
1878 void
1879 vnode_setautocandidate(vnode_t vp)
1880 {
1881 vnode_lock_spin(vp);
1882 vp->v_flag |= VAUTOCANDIDATE;
1883 vnode_unlock(vp);
1884 }
1885
1886 void
1887 vnode_clearautocandidate(vnode_t vp)
1888 {
1889 vnode_lock_spin(vp);
1890 vp->v_flag &= ~VAUTOCANDIDATE;
1891 vnode_unlock(vp);
1892 }
1893
1894
1895
1896
1897 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1898 void
1899 vnode_setnoflush(vnode_t vp)
1900 {
1901 vnode_lock_spin(vp);
1902 vp->v_flag |= VNOFLUSH;
1903 vnode_unlock(vp);
1904 }
1905
1906 void
1907 vnode_clearnoflush(vnode_t vp)
1908 {
1909 vnode_lock_spin(vp);
1910 vp->v_flag &= ~VNOFLUSH;
1911 vnode_unlock(vp);
1912 }
1913
1914
1915 /* is vnode_t a blkdevice and has a FS mounted on it */
1916 int
1917 vnode_ismountedon(vnode_t vp)
1918 {
1919 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1920 }
1921
1922 void
1923 vnode_setmountedon(vnode_t vp)
1924 {
1925 vnode_lock_spin(vp);
1926 vp->v_specflags |= SI_MOUNTEDON;
1927 vnode_unlock(vp);
1928 }
1929
1930 void
1931 vnode_clearmountedon(vnode_t vp)
1932 {
1933 vnode_lock_spin(vp);
1934 vp->v_specflags &= ~SI_MOUNTEDON;
1935 vnode_unlock(vp);
1936 }
1937
1938
1939 void
1940 vnode_settag(vnode_t vp, int tag)
1941 {
1942 vp->v_tag = tag;
1943
1944 }
1945
1946 int
1947 vnode_tag(vnode_t vp)
1948 {
1949 return(vp->v_tag);
1950 }
1951
1952 vnode_t
1953 vnode_parent(vnode_t vp)
1954 {
1955
1956 return(vp->v_parent);
1957 }
1958
1959 void
1960 vnode_setparent(vnode_t vp, vnode_t dvp)
1961 {
1962 vp->v_parent = dvp;
1963 }
1964
1965 void
1966 vnode_setname(vnode_t vp, char * name)
1967 {
1968 vp->v_name = name;
1969 }
1970
1971 /* return the registered FS name when adding the FS to kernel */
1972 void
1973 vnode_vfsname(vnode_t vp, char * buf)
1974 {
1975 strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1976 }
1977
1978 /* return the FS type number */
1979 int
1980 vnode_vfstypenum(vnode_t vp)
1981 {
1982 return(vp->v_mount->mnt_vtable->vfc_typenum);
1983 }
1984
1985 int
1986 vnode_vfs64bitready(vnode_t vp)
1987 {
1988
1989 /*
1990 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
1991 */
1992 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
1993 return(1);
1994 else
1995 return(0);
1996 }
1997
1998
1999
2000 /* return the visible flags on associated mount point of vnode_t */
2001 uint32_t
2002 vnode_vfsvisflags(vnode_t vp)
2003 {
2004 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
2005 }
2006
2007 /* return the command modifier flags on associated mount point of vnode_t */
2008 uint32_t
2009 vnode_vfscmdflags(vnode_t vp)
2010 {
2011 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
2012 }
2013
2014 /* return the max symlink of short links of vnode_t */
2015 uint32_t
2016 vnode_vfsmaxsymlen(vnode_t vp)
2017 {
2018 return(vp->v_mount->mnt_maxsymlinklen);
2019 }
2020
2021 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2022 struct vfsstatfs *
2023 vnode_vfsstatfs(vnode_t vp)
2024 {
2025 return(&vp->v_mount->mnt_vfsstat);
2026 }
2027
2028 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2029 void *
2030 vnode_vfsfsprivate(vnode_t vp)
2031 {
2032 return(vp->v_mount->mnt_data);
2033 }
2034
2035 /* is vnode_t in a rdonly mounted FS */
2036 int
2037 vnode_vfsisrdonly(vnode_t vp)
2038 {
2039 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
2040 }
2041
2042 int
2043 vnode_compound_rename_available(vnode_t vp)
2044 {
2045 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2046 }
2047 int
2048 vnode_compound_rmdir_available(vnode_t vp)
2049 {
2050 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2051 }
2052 int
2053 vnode_compound_mkdir_available(vnode_t vp)
2054 {
2055 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2056 }
2057 int
2058 vnode_compound_remove_available(vnode_t vp)
2059 {
2060 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2061 }
2062 int
2063 vnode_compound_open_available(vnode_t vp)
2064 {
2065 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2066 }
2067
2068 int
2069 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2070 {
2071 return ((vp->v_mount->mnt_compound_ops & opid) != 0);
2072 }
2073
2074 /*
2075 * Returns vnode ref to current working directory; if a per-thread current
2076 * working directory is in effect, return that instead of the per process one.
2077 *
2078 * XXX Published, but not used.
2079 */
2080 vnode_t
2081 current_workingdir(void)
2082 {
2083 return vfs_context_cwd(vfs_context_current());
2084 }
2085
2086 /* returns vnode ref to current root(chroot) directory */
2087 vnode_t
2088 current_rootdir(void)
2089 {
2090 proc_t proc = current_proc();
2091 struct vnode * vp ;
2092
2093 if ( (vp = proc->p_fd->fd_rdir) ) {
2094 if ( (vnode_getwithref(vp)) )
2095 return (NULL);
2096 }
2097 return vp;
2098 }
2099
2100 /*
2101 * Get a filesec and optional acl contents from an extended attribute.
2102 * Function will attempt to retrive ACL, UUID, and GUID information using a
2103 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2104 *
2105 * Parameters: vp The vnode on which to operate.
2106 * fsecp The filesec (and ACL, if any) being
2107 * retrieved.
2108 * ctx The vnode context in which the
2109 * operation is to be attempted.
2110 *
2111 * Returns: 0 Success
2112 * !0 errno value
2113 *
2114 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2115 * host byte order, as will be the ACL contents, if any.
2116 * Internally, we will cannonize these values from network (PPC)
2117 * byte order after we retrieve them so that the on-disk contents
2118 * of the extended attribute are identical for both PPC and Intel
2119 * (if we were not being required to provide this service via
2120 * fallback, this would be the job of the filesystem
2121 * 'VNOP_GETATTR' call).
2122 *
2123 * We use ntohl() because it has a transitive property on Intel
2124 * machines and no effect on PPC mancines. This guarantees us
2125 *
2126 * XXX: Deleting rather than ignoreing a corrupt security structure is
2127 * probably the only way to reset it without assistance from an
2128 * file system integrity checking tool. Right now we ignore it.
2129 *
2130 * XXX: We should enummerate the possible errno values here, and where
2131 * in the code they originated.
2132 */
2133 static int
2134 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2135 {
2136 kauth_filesec_t fsec;
2137 uio_t fsec_uio;
2138 size_t fsec_size;
2139 size_t xsize, rsize;
2140 int error;
2141 uint32_t host_fsec_magic;
2142 uint32_t host_acl_entrycount;
2143
2144 fsec = NULL;
2145 fsec_uio = NULL;
2146
2147 /* find out how big the EA is */
2148 error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2149 if (error != 0) {
2150 /* no EA, no filesec */
2151 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2152 error = 0;
2153 /* either way, we are done */
2154 goto out;
2155 }
2156
2157 /*
2158 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2159 * ACE entrly ACL, and if it's larger than that, it must have the right
2160 * number of bytes such that it contains an atomic number of ACEs,
2161 * rather than partial entries. Otherwise, we ignore it.
2162 */
2163 if (!KAUTH_FILESEC_VALID(xsize)) {
2164 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2165 error = 0;
2166 goto out;
2167 }
2168
2169 /* how many entries would fit? */
2170 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2171 if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2172 KAUTH_DEBUG(" ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2173 error = 0;
2174 goto out;
2175 }
2176
2177 /* get buffer and uio */
2178 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2179 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2180 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2181 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2182 error = ENOMEM;
2183 goto out;
2184 }
2185
2186 /* read security attribute */
2187 rsize = xsize;
2188 if ((error = vn_getxattr(vp,
2189 KAUTH_FILESEC_XATTR,
2190 fsec_uio,
2191 &rsize,
2192 XATTR_NOSECURITY,
2193 ctx)) != 0) {
2194
2195 /* no attribute - no security data */
2196 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2197 error = 0;
2198 /* either way, we are done */
2199 goto out;
2200 }
2201
2202 /*
2203 * Validate security structure; the validation must take place in host
2204 * byte order. If it's corrupt, we will just ignore it.
2205 */
2206
2207 /* Validate the size before trying to convert it */
2208 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2209 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2210 goto out;
2211 }
2212
2213 /* Validate the magic number before trying to convert it */
2214 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2215 if (fsec->fsec_magic != host_fsec_magic) {
2216 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2217 goto out;
2218 }
2219
2220 /* Validate the entry count before trying to convert it. */
2221 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2222 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2223 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2224 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2225 goto out;
2226 }
2227 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2228 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2229 goto out;
2230 }
2231 }
2232
2233 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2234
2235 *fsecp = fsec;
2236 fsec = NULL;
2237 error = 0;
2238 out:
2239 if (fsec != NULL)
2240 kauth_filesec_free(fsec);
2241 if (fsec_uio != NULL)
2242 uio_free(fsec_uio);
2243 if (error)
2244 *fsecp = NULL;
2245 return(error);
2246 }
2247
2248 /*
2249 * Set a filesec and optional acl contents into an extended attribute.
2250 * function will attempt to store ACL, UUID, and GUID information using a
2251 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2252 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2253 * original caller supplied an acl.
2254 *
2255 * Parameters: vp The vnode on which to operate.
2256 * fsec The filesec being set.
2257 * acl The acl to be associated with 'fsec'.
2258 * ctx The vnode context in which the
2259 * operation is to be attempted.
2260 *
2261 * Returns: 0 Success
2262 * !0 errno value
2263 *
2264 * Notes: Both the fsec and the acl are always valid.
2265 *
2266 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2267 * as are the acl contents, if they are used. Internally, we will
2268 * cannonize these values into network (PPC) byte order before we
2269 * attempt to write them so that the on-disk contents of the
2270 * extended attribute are identical for both PPC and Intel (if we
2271 * were not being required to provide this service via fallback,
2272 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2273 * We reverse this process on the way out, so we leave with the
2274 * same byte order we started with.
2275 *
2276 * XXX: We should enummerate the possible errno values here, and where
2277 * in the code they originated.
2278 */
2279 static int
2280 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2281 {
2282 uio_t fsec_uio;
2283 int error;
2284 uint32_t saved_acl_copysize;
2285
2286 fsec_uio = NULL;
2287
2288 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2289 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2290 error = ENOMEM;
2291 goto out;
2292 }
2293 /*
2294 * Save the pre-converted ACL copysize, because it gets swapped too
2295 * if we are running with the wrong endianness.
2296 */
2297 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2298
2299 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2300
2301 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2302 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2303 error = vn_setxattr(vp,
2304 KAUTH_FILESEC_XATTR,
2305 fsec_uio,
2306 XATTR_NOSECURITY, /* we have auth'ed already */
2307 ctx);
2308 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2309
2310 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2311
2312 out:
2313 if (fsec_uio != NULL)
2314 uio_free(fsec_uio);
2315 return(error);
2316 }
2317
2318
2319 /*
2320 * Returns: 0 Success
2321 * ENOMEM Not enough space [only if has filesec]
2322 * EINVAL Requested unknown attributes
2323 * VNOP_GETATTR: ???
2324 * vnode_get_filesec: ???
2325 * kauth_cred_guid2uid: ???
2326 * kauth_cred_guid2gid: ???
2327 * vfs_update_vfsstat: ???
2328 */
2329 int
2330 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2331 {
2332 kauth_filesec_t fsec;
2333 kauth_acl_t facl;
2334 int error;
2335 uid_t nuid;
2336 gid_t ngid;
2337
2338 /*
2339 * Reject attempts to fetch unknown attributes.
2340 */
2341 if (vap->va_active & ~VNODE_ATTR_ALL)
2342 return (EINVAL);
2343
2344 /* don't ask for extended security data if the filesystem doesn't support it */
2345 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2346 VATTR_CLEAR_ACTIVE(vap, va_acl);
2347 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2348 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2349 }
2350
2351 /*
2352 * If the caller wants size values we might have to synthesise, give the
2353 * filesystem the opportunity to supply better intermediate results.
2354 */
2355 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2356 VATTR_IS_ACTIVE(vap, va_total_size) ||
2357 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2358 VATTR_SET_ACTIVE(vap, va_data_size);
2359 VATTR_SET_ACTIVE(vap, va_data_alloc);
2360 VATTR_SET_ACTIVE(vap, va_total_size);
2361 VATTR_SET_ACTIVE(vap, va_total_alloc);
2362 }
2363
2364 error = VNOP_GETATTR(vp, vap, ctx);
2365 if (error) {
2366 KAUTH_DEBUG("ERROR - returning %d", error);
2367 goto out;
2368 }
2369
2370 /*
2371 * If extended security data was requested but not returned, try the fallback
2372 * path.
2373 */
2374 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2375 fsec = NULL;
2376
2377 if (XATTR_VNODE_SUPPORTED(vp)) {
2378 /* try to get the filesec */
2379 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
2380 goto out;
2381 }
2382 /* if no filesec, no attributes */
2383 if (fsec == NULL) {
2384 VATTR_RETURN(vap, va_acl, NULL);
2385 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2386 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2387 } else {
2388
2389 /* looks good, try to return what we were asked for */
2390 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2391 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2392
2393 /* only return the ACL if we were actually asked for it */
2394 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2395 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2396 VATTR_RETURN(vap, va_acl, NULL);
2397 } else {
2398 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2399 if (facl == NULL) {
2400 kauth_filesec_free(fsec);
2401 error = ENOMEM;
2402 goto out;
2403 }
2404 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2405 VATTR_RETURN(vap, va_acl, facl);
2406 }
2407 }
2408 kauth_filesec_free(fsec);
2409 }
2410 }
2411 /*
2412 * If someone gave us an unsolicited filesec, toss it. We promise that
2413 * we're OK with a filesystem giving us anything back, but our callers
2414 * only expect what they asked for.
2415 */
2416 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2417 if (vap->va_acl != NULL)
2418 kauth_acl_free(vap->va_acl);
2419 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2420 }
2421
2422 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2423 /*
2424 * Handle the case where we need a UID/GID, but only have extended
2425 * security information.
2426 */
2427 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2428 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2429 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2430 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
2431 VATTR_RETURN(vap, va_uid, nuid);
2432 }
2433 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2434 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2435 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2436 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
2437 VATTR_RETURN(vap, va_gid, ngid);
2438 }
2439 #endif
2440
2441 /*
2442 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2443 */
2444 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2445 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2446 nuid = vap->va_uid;
2447 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2448 nuid = vp->v_mount->mnt_fsowner;
2449 if (nuid == KAUTH_UID_NONE)
2450 nuid = 99;
2451 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2452 nuid = vap->va_uid;
2453 } else {
2454 /* this will always be something sensible */
2455 nuid = vp->v_mount->mnt_fsowner;
2456 }
2457 if ((nuid == 99) && !vfs_context_issuser(ctx))
2458 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2459 VATTR_RETURN(vap, va_uid, nuid);
2460 }
2461 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2462 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2463 ngid = vap->va_gid;
2464 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2465 ngid = vp->v_mount->mnt_fsgroup;
2466 if (ngid == KAUTH_GID_NONE)
2467 ngid = 99;
2468 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2469 ngid = vap->va_gid;
2470 } else {
2471 /* this will always be something sensible */
2472 ngid = vp->v_mount->mnt_fsgroup;
2473 }
2474 if ((ngid == 99) && !vfs_context_issuser(ctx))
2475 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2476 VATTR_RETURN(vap, va_gid, ngid);
2477 }
2478
2479 /*
2480 * Synthesise some values that can be reasonably guessed.
2481 */
2482 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
2483 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2484
2485 if (!VATTR_IS_SUPPORTED(vap, va_flags))
2486 VATTR_RETURN(vap, va_flags, 0);
2487
2488 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
2489 VATTR_RETURN(vap, va_filerev, 0);
2490
2491 if (!VATTR_IS_SUPPORTED(vap, va_gen))
2492 VATTR_RETURN(vap, va_gen, 0);
2493
2494 /*
2495 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2496 */
2497 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
2498 VATTR_RETURN(vap, va_data_size, 0);
2499
2500 /* do we want any of the possibly-computed values? */
2501 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2502 VATTR_IS_ACTIVE(vap, va_total_size) ||
2503 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2504 /* make sure f_bsize is valid */
2505 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2506 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0)
2507 goto out;
2508 }
2509
2510 /* default va_data_alloc from va_data_size */
2511 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
2512 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2513
2514 /* default va_total_size from va_data_size */
2515 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
2516 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2517
2518 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2519 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
2520 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2521 }
2522
2523 /*
2524 * If we don't have a change time, pull it from the modtime.
2525 */
2526 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
2527 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2528
2529 /*
2530 * This is really only supported for the creation VNOPs, but since the field is there
2531 * we should populate it correctly.
2532 */
2533 VATTR_RETURN(vap, va_type, vp->v_type);
2534
2535 /*
2536 * The fsid can be obtained from the mountpoint directly.
2537 */
2538 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2539
2540 out:
2541
2542 return(error);
2543 }
2544
2545 /*
2546 * Set the attributes on a vnode in a vnode context.
2547 *
2548 * Parameters: vp The vnode whose attributes to set.
2549 * vap A pointer to the attributes to set.
2550 * ctx The vnode context in which the
2551 * operation is to be attempted.
2552 *
2553 * Returns: 0 Success
2554 * !0 errno value
2555 *
2556 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2557 *
2558 * The contents of the data area pointed to by 'vap' may be
2559 * modified if the vnode is on a filesystem which has been
2560 * mounted with ingore ownership flags, or by the underlyng
2561 * VFS itself, or by the fallback code, if the underlying VFS
2562 * does not support ACL, UUID, or GUUID attributes directly.
2563 *
2564 * XXX: We should enummerate the possible errno values here, and where
2565 * in the code they originated.
2566 */
2567 int
2568 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2569 {
2570 int error;
2571 #if CONFIG_FSE
2572 uint64_t active;
2573 int is_perm_change = 0;
2574 int is_stat_change = 0;
2575 #endif
2576
2577 /*
2578 * Reject attempts to set unknown attributes.
2579 */
2580 if (vap->va_active & ~VNODE_ATTR_ALL)
2581 return (EINVAL);
2582
2583 /*
2584 * Make sure the filesystem is mounted R/W.
2585 * If not, return an error.
2586 */
2587 if (vfs_isrdonly(vp->v_mount)) {
2588 error = EROFS;
2589 goto out;
2590 }
2591
2592 #if DEVELOPMENT || DEBUG
2593 /*
2594 * XXX VSWAP: Check for entitlements or special flag here
2595 * so we can restrict access appropriately.
2596 */
2597 #else /* DEVELOPMENT || DEBUG */
2598
2599 if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
2600 error = EPERM;
2601 goto out;
2602 }
2603 #endif /* DEVELOPMENT || DEBUG */
2604
2605 #if NAMEDSTREAMS
2606 /* For streams, va_data_size is the only setable attribute. */
2607 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2608 error = EPERM;
2609 goto out;
2610 }
2611 #endif
2612 /* Check for truncation */
2613 if(VATTR_IS_ACTIVE(vap, va_data_size)) {
2614 switch(vp->v_type) {
2615 case VREG:
2616 /* For regular files it's ok */
2617 break;
2618 case VDIR:
2619 /* Not allowed to truncate directories */
2620 error = EISDIR;
2621 goto out;
2622 default:
2623 /* For everything else we will clear the bit and let underlying FS decide on the rest */
2624 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2625 if (vap->va_active)
2626 break;
2627 /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
2628 return (0);
2629 }
2630 }
2631
2632 /*
2633 * If ownership is being ignored on this volume, we silently discard
2634 * ownership changes.
2635 */
2636 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2637 VATTR_CLEAR_ACTIVE(vap, va_uid);
2638 VATTR_CLEAR_ACTIVE(vap, va_gid);
2639 }
2640
2641 /*
2642 * Make sure that extended security is enabled if we're going to try
2643 * to set any.
2644 */
2645 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2646 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2647 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2648 error = ENOTSUP;
2649 goto out;
2650 }
2651
2652 /* Never allow the setting of any unsupported superuser flags. */
2653 if (VATTR_IS_ACTIVE(vap, va_flags)) {
2654 vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
2655 }
2656
2657 #if CONFIG_FSE
2658 /*
2659 * Remember all of the active attributes that we're
2660 * attempting to modify.
2661 */
2662 active = vap->va_active & ~VNODE_ATTR_RDONLY;
2663 #endif
2664
2665 error = VNOP_SETATTR(vp, vap, ctx);
2666
2667 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2668 error = vnode_setattr_fallback(vp, vap, ctx);
2669
2670 #if CONFIG_FSE
2671 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
2672 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
2673 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
2674
2675 /*
2676 * Now that we've changed them, decide whether to send an
2677 * FSevent.
2678 */
2679 if ((active & PERMISSION_BITS) & vap->va_supported) {
2680 is_perm_change = 1;
2681 } else {
2682 /*
2683 * We've already checked the permission bits, and we
2684 * also want to filter out access time / backup time
2685 * changes.
2686 */
2687 active &= ~(PERMISSION_BITS |
2688 VNODE_ATTR_BIT(va_access_time) |
2689 VNODE_ATTR_BIT(va_backup_time));
2690
2691 /* Anything left to notify about? */
2692 if (active & vap->va_supported)
2693 is_stat_change = 1;
2694 }
2695
2696 if (error == 0) {
2697 if (is_perm_change) {
2698 if (need_fsevent(FSE_CHOWN, vp)) {
2699 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2700 }
2701 } else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
2702 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2703 }
2704 }
2705 #undef PERMISSION_BITS
2706 #endif
2707
2708 out:
2709 return(error);
2710 }
2711
2712 /*
2713 * Fallback for setting the attributes on a vnode in a vnode context. This
2714 * Function will attempt to store ACL, UUID, and GUID information utilizing
2715 * a read/modify/write operation against an EA used as a backing store for
2716 * the object.
2717 *
2718 * Parameters: vp The vnode whose attributes to set.
2719 * vap A pointer to the attributes to set.
2720 * ctx The vnode context in which the
2721 * operation is to be attempted.
2722 *
2723 * Returns: 0 Success
2724 * !0 errno value
2725 *
2726 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2727 * as are the fsec and lfsec, if they are used.
2728 *
2729 * The contents of the data area pointed to by 'vap' may be
2730 * modified to indicate that the attribute is supported for
2731 * any given requested attribute.
2732 *
2733 * XXX: We should enummerate the possible errno values here, and where
2734 * in the code they originated.
2735 */
2736 int
2737 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2738 {
2739 kauth_filesec_t fsec;
2740 kauth_acl_t facl;
2741 struct kauth_filesec lfsec;
2742 int error;
2743
2744 error = 0;
2745
2746 /*
2747 * Extended security fallback via extended attributes.
2748 *
2749 * Note that we do not free the filesec; the caller is expected to
2750 * do this.
2751 */
2752 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2753 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2754 VATTR_NOT_RETURNED(vap, va_guuid)) {
2755 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2756
2757 /*
2758 * Fail for file types that we don't permit extended security
2759 * to be set on.
2760 */
2761 if (!XATTR_VNODE_SUPPORTED(vp)) {
2762 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2763 error = EINVAL;
2764 goto out;
2765 }
2766
2767 /*
2768 * If we don't have all the extended security items, we need
2769 * to fetch the existing data to perform a read-modify-write
2770 * operation.
2771 */
2772 fsec = NULL;
2773 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2774 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2775 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2776 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2777 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2778 goto out;
2779 }
2780 }
2781 /* if we didn't get a filesec, use our local one */
2782 if (fsec == NULL) {
2783 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2784 fsec = &lfsec;
2785 } else {
2786 KAUTH_DEBUG("SETATTR - updating existing filesec");
2787 }
2788 /* find the ACL */
2789 facl = &fsec->fsec_acl;
2790
2791 /* if we're using the local filesec, we need to initialise it */
2792 if (fsec == &lfsec) {
2793 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2794 fsec->fsec_owner = kauth_null_guid;
2795 fsec->fsec_group = kauth_null_guid;
2796 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2797 facl->acl_flags = 0;
2798 }
2799
2800 /*
2801 * Update with the supplied attributes.
2802 */
2803 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2804 KAUTH_DEBUG("SETATTR - updating owner UUID");
2805 fsec->fsec_owner = vap->va_uuuid;
2806 VATTR_SET_SUPPORTED(vap, va_uuuid);
2807 }
2808 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2809 KAUTH_DEBUG("SETATTR - updating group UUID");
2810 fsec->fsec_group = vap->va_guuid;
2811 VATTR_SET_SUPPORTED(vap, va_guuid);
2812 }
2813 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2814 if (vap->va_acl == NULL) {
2815 KAUTH_DEBUG("SETATTR - removing ACL");
2816 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2817 } else {
2818 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2819 facl = vap->va_acl;
2820 }
2821 VATTR_SET_SUPPORTED(vap, va_acl);
2822 }
2823
2824 /*
2825 * If the filesec data is all invalid, we can just remove
2826 * the EA completely.
2827 */
2828 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2829 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2830 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2831 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2832 /* no attribute is ok, nothing to delete */
2833 if (error == ENOATTR)
2834 error = 0;
2835 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2836 } else {
2837 /* write the EA */
2838 error = vnode_set_filesec(vp, fsec, facl, ctx);
2839 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2840 }
2841
2842 /* if we fetched a filesec, dispose of the buffer */
2843 if (fsec != &lfsec)
2844 kauth_filesec_free(fsec);
2845 }
2846 out:
2847
2848 return(error);
2849 }
2850
2851 /*
2852 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2853 * event on a vnode.
2854 */
2855 int
2856 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
2857 {
2858 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2859 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
2860 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
2861 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
2862 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
2863 uint32_t knote_events = (events & knote_mask);
2864
2865 /* Permissions are not explicitly part of the kqueue model */
2866 if (events & VNODE_EVENT_PERMS) {
2867 knote_events |= NOTE_ATTRIB;
2868 }
2869
2870 /* Directory contents information just becomes NOTE_WRITE */
2871 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
2872 knote_events |= NOTE_WRITE;
2873 }
2874
2875 if (knote_events) {
2876 lock_vnode_and_post(vp, knote_events);
2877 #if CONFIG_FSE
2878 if (vap != NULL) {
2879 create_fsevent_from_kevent(vp, events, vap);
2880 }
2881 #else
2882 (void)vap;
2883 #endif
2884 }
2885
2886 return 0;
2887 }
2888
2889
2890
2891 int
2892 vnode_isdyldsharedcache(vnode_t vp)
2893 {
2894 return ((vp->v_flag & VSHARED_DYLD) ? 1 : 0);
2895 }
2896
2897
2898 /*
2899 * For a filesystem that isn't tracking its own vnode watchers:
2900 * check whether a vnode is being monitored.
2901 */
2902 int
2903 vnode_ismonitored(vnode_t vp) {
2904 return (vp->v_knotes.slh_first != NULL);
2905 }
2906
2907 /*
2908 * Initialize a struct vnode_attr and activate the attributes required
2909 * by the vnode_notify() call.
2910 */
2911 int
2912 vfs_get_notify_attributes(struct vnode_attr *vap)
2913 {
2914 VATTR_INIT(vap);
2915 vap->va_active = VNODE_NOTIFY_ATTRS;
2916 return 0;
2917 }
2918
2919 #if CONFIG_TRIGGERS
2920 int
2921 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
2922 {
2923 int error;
2924 mount_t mp;
2925
2926 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
2927 if (mp == NULL) {
2928 return ENOENT;
2929 }
2930
2931 error = vfs_busy(mp, LK_NOWAIT);
2932 mount_iterdrop(mp);
2933
2934 if (error != 0) {
2935 return ENOENT;
2936 }
2937
2938 mount_lock(mp);
2939 if (mp->mnt_triggercallback != NULL) {
2940 error = EBUSY;
2941 mount_unlock(mp);
2942 goto out;
2943 }
2944
2945 mp->mnt_triggercallback = vtc;
2946 mp->mnt_triggerdata = data;
2947 mount_unlock(mp);
2948
2949 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
2950
2951 out:
2952 vfs_unbusy(mp);
2953 return 0;
2954 }
2955 #endif /* CONFIG_TRIGGERS */
2956
2957 /*
2958 * Definition of vnode operations.
2959 */
2960
2961 #if 0
2962 /*
2963 *#
2964 *#% lookup dvp L ? ?
2965 *#% lookup vpp - L -
2966 */
2967 struct vnop_lookup_args {
2968 struct vnodeop_desc *a_desc;
2969 vnode_t a_dvp;
2970 vnode_t *a_vpp;
2971 struct componentname *a_cnp;
2972 vfs_context_t a_context;
2973 };
2974 #endif /* 0*/
2975
2976 /*
2977 * Returns: 0 Success
2978 * lock_fsnode:ENOENT No such file or directory [only for VFS
2979 * that is not thread safe & vnode is
2980 * currently being/has been terminated]
2981 * <vfs_lookup>:ENAMETOOLONG
2982 * <vfs_lookup>:ENOENT
2983 * <vfs_lookup>:EJUSTRETURN
2984 * <vfs_lookup>:EPERM
2985 * <vfs_lookup>:EISDIR
2986 * <vfs_lookup>:ENOTDIR
2987 * <vfs_lookup>:???
2988 *
2989 * Note: The return codes from the underlying VFS's lookup routine can't
2990 * be fully enumerated here, since third party VFS authors may not
2991 * limit their error returns to the ones documented here, even
2992 * though this may result in some programs functioning incorrectly.
2993 *
2994 * The return codes documented above are those which may currently
2995 * be returned by HFS from hfs_lookup, not including additional
2996 * error code which may be propagated from underlying routines.
2997 */
2998 errno_t
2999 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3000 {
3001 int _err;
3002 struct vnop_lookup_args a;
3003
3004 a.a_desc = &vnop_lookup_desc;
3005 a.a_dvp = dvp;
3006 a.a_vpp = vpp;
3007 a.a_cnp = cnp;
3008 a.a_context = ctx;
3009
3010 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3011 if (_err == 0 && *vpp) {
3012 DTRACE_FSINFO(lookup, vnode_t, *vpp);
3013 }
3014
3015 return (_err);
3016 }
3017
3018 #if 0
3019 struct vnop_compound_open_args {
3020 struct vnodeop_desc *a_desc;
3021 vnode_t a_dvp;
3022 vnode_t *a_vpp;
3023 struct componentname *a_cnp;
3024 int32_t a_flags;
3025 int32_t a_fmode;
3026 struct vnode_attr *a_vap;
3027 vfs_context_t a_context;
3028 void *a_reserved;
3029 };
3030 #endif /* 0 */
3031
3032 int
3033 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3034 {
3035 int _err;
3036 struct vnop_compound_open_args a;
3037 int did_create = 0;
3038 int want_create;
3039 uint32_t tmp_status = 0;
3040 struct componentname *cnp = &ndp->ni_cnd;
3041
3042 want_create = (flags & O_CREAT);
3043
3044 a.a_desc = &vnop_compound_open_desc;
3045 a.a_dvp = dvp;
3046 a.a_vpp = vpp; /* Could be NULL */
3047 a.a_cnp = cnp;
3048 a.a_flags = flags;
3049 a.a_fmode = fmode;
3050 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3051 a.a_vap = vap;
3052 a.a_context = ctx;
3053 a.a_open_create_authorizer = vn_authorize_create;
3054 a.a_open_existing_authorizer = vn_authorize_open_existing;
3055 a.a_reserved = NULL;
3056
3057 if (dvp == NULLVP) {
3058 panic("No dvp?");
3059 }
3060 if (want_create && !vap) {
3061 panic("Want create, but no vap?");
3062 }
3063 if (!want_create && vap) {
3064 panic("Don't want create, but have a vap?");
3065 }
3066
3067 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3068 if (want_create) {
3069 if (_err == 0 && *vpp) {
3070 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3071 } else {
3072 DTRACE_FSINFO(compound_open, vnode_t, dvp);
3073 }
3074 } else {
3075 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3076 }
3077
3078 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3079
3080 if (did_create && !want_create) {
3081 panic("Filesystem did a create, even though none was requested?");
3082 }
3083
3084 if (did_create) {
3085 #if CONFIG_APPLEDOUBLE
3086 if (!NATIVE_XATTR(dvp)) {
3087 /*
3088 * Remove stale Apple Double file (if any).
3089 */
3090 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3091 }
3092 #endif /* CONFIG_APPLEDOUBLE */
3093 /* On create, provide kqueue notification */
3094 post_event_if_success(dvp, _err, NOTE_WRITE);
3095 }
3096
3097 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3098 #if 0 /* FSEvents... */
3099 if (*vpp && _err && _err != EKEEPLOOKING) {
3100 vnode_put(*vpp);
3101 *vpp = NULLVP;
3102 }
3103 #endif /* 0 */
3104
3105 return (_err);
3106
3107 }
3108
3109 #if 0
3110 struct vnop_create_args {
3111 struct vnodeop_desc *a_desc;
3112 vnode_t a_dvp;
3113 vnode_t *a_vpp;
3114 struct componentname *a_cnp;
3115 struct vnode_attr *a_vap;
3116 vfs_context_t a_context;
3117 };
3118 #endif /* 0*/
3119 errno_t
3120 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3121 {
3122 int _err;
3123 struct vnop_create_args a;
3124
3125 a.a_desc = &vnop_create_desc;
3126 a.a_dvp = dvp;
3127 a.a_vpp = vpp;
3128 a.a_cnp = cnp;
3129 a.a_vap = vap;
3130 a.a_context = ctx;
3131
3132 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3133 if (_err == 0 && *vpp) {
3134 DTRACE_FSINFO(create, vnode_t, *vpp);
3135 }
3136
3137 #if CONFIG_APPLEDOUBLE
3138 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3139 /*
3140 * Remove stale Apple Double file (if any).
3141 */
3142 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3143 }
3144 #endif /* CONFIG_APPLEDOUBLE */
3145
3146 post_event_if_success(dvp, _err, NOTE_WRITE);
3147
3148 return (_err);
3149 }
3150
3151 #if 0
3152 /*
3153 *#
3154 *#% whiteout dvp L L L
3155 *#% whiteout cnp - - -
3156 *#% whiteout flag - - -
3157 *#
3158 */
3159 struct vnop_whiteout_args {
3160 struct vnodeop_desc *a_desc;
3161 vnode_t a_dvp;
3162 struct componentname *a_cnp;
3163 int a_flags;
3164 vfs_context_t a_context;
3165 };
3166 #endif /* 0*/
3167 errno_t
3168 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3169 __unused int flags, __unused vfs_context_t ctx)
3170 {
3171 return (ENOTSUP); // XXX OBSOLETE
3172 }
3173
3174 #if 0
3175 /*
3176 *#
3177 *#% mknod dvp L U U
3178 *#% mknod vpp - X -
3179 *#
3180 */
3181 struct vnop_mknod_args {
3182 struct vnodeop_desc *a_desc;
3183 vnode_t a_dvp;
3184 vnode_t *a_vpp;
3185 struct componentname *a_cnp;
3186 struct vnode_attr *a_vap;
3187 vfs_context_t a_context;
3188 };
3189 #endif /* 0*/
3190 errno_t
3191 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3192 {
3193
3194 int _err;
3195 struct vnop_mknod_args a;
3196
3197 a.a_desc = &vnop_mknod_desc;
3198 a.a_dvp = dvp;
3199 a.a_vpp = vpp;
3200 a.a_cnp = cnp;
3201 a.a_vap = vap;
3202 a.a_context = ctx;
3203
3204 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3205 if (_err == 0 && *vpp) {
3206 DTRACE_FSINFO(mknod, vnode_t, *vpp);
3207 }
3208
3209 post_event_if_success(dvp, _err, NOTE_WRITE);
3210
3211 return (_err);
3212 }
3213
3214 #if 0
3215 /*
3216 *#
3217 *#% open vp L L L
3218 *#
3219 */
3220 struct vnop_open_args {
3221 struct vnodeop_desc *a_desc;
3222 vnode_t a_vp;
3223 int a_mode;
3224 vfs_context_t a_context;
3225 };
3226 #endif /* 0*/
3227 errno_t
3228 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3229 {
3230 int _err;
3231 struct vnop_open_args a;
3232
3233 if (ctx == NULL) {
3234 ctx = vfs_context_current();
3235 }
3236 a.a_desc = &vnop_open_desc;
3237 a.a_vp = vp;
3238 a.a_mode = mode;
3239 a.a_context = ctx;
3240
3241 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3242 DTRACE_FSINFO(open, vnode_t, vp);
3243
3244 return (_err);
3245 }
3246
3247 #if 0
3248 /*
3249 *#
3250 *#% close vp U U U
3251 *#
3252 */
3253 struct vnop_close_args {
3254 struct vnodeop_desc *a_desc;
3255 vnode_t a_vp;
3256 int a_fflag;
3257 vfs_context_t a_context;
3258 };
3259 #endif /* 0*/
3260 errno_t
3261 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3262 {
3263 int _err;
3264 struct vnop_close_args a;
3265
3266 if (ctx == NULL) {
3267 ctx = vfs_context_current();
3268 }
3269 a.a_desc = &vnop_close_desc;
3270 a.a_vp = vp;
3271 a.a_fflag = fflag;
3272 a.a_context = ctx;
3273
3274 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3275 DTRACE_FSINFO(close, vnode_t, vp);
3276
3277 return (_err);
3278 }
3279
3280 #if 0
3281 /*
3282 *#
3283 *#% access vp L L L
3284 *#
3285 */
3286 struct vnop_access_args {
3287 struct vnodeop_desc *a_desc;
3288 vnode_t a_vp;
3289 int a_action;
3290 vfs_context_t a_context;
3291 };
3292 #endif /* 0*/
3293 errno_t
3294 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3295 {
3296 int _err;
3297 struct vnop_access_args a;
3298
3299 if (ctx == NULL) {
3300 ctx = vfs_context_current();
3301 }
3302 a.a_desc = &vnop_access_desc;
3303 a.a_vp = vp;
3304 a.a_action = action;
3305 a.a_context = ctx;
3306
3307 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3308 DTRACE_FSINFO(access, vnode_t, vp);
3309
3310 return (_err);
3311 }
3312
3313 #if 0
3314 /*
3315 *#
3316 *#% getattr vp = = =
3317 *#
3318 */
3319 struct vnop_getattr_args {
3320 struct vnodeop_desc *a_desc;
3321 vnode_t a_vp;
3322 struct vnode_attr *a_vap;
3323 vfs_context_t a_context;
3324 };
3325 #endif /* 0*/
3326 errno_t
3327 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3328 {
3329 int _err;
3330 struct vnop_getattr_args a;
3331
3332 a.a_desc = &vnop_getattr_desc;
3333 a.a_vp = vp;
3334 a.a_vap = vap;
3335 a.a_context = ctx;
3336
3337 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3338 DTRACE_FSINFO(getattr, vnode_t, vp);
3339
3340 return (_err);
3341 }
3342
3343 #if 0
3344 /*
3345 *#
3346 *#% setattr vp L L L
3347 *#
3348 */
3349 struct vnop_setattr_args {
3350 struct vnodeop_desc *a_desc;
3351 vnode_t a_vp;
3352 struct vnode_attr *a_vap;
3353 vfs_context_t a_context;
3354 };
3355 #endif /* 0*/
3356 errno_t
3357 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3358 {
3359 int _err;
3360 struct vnop_setattr_args a;
3361
3362 a.a_desc = &vnop_setattr_desc;
3363 a.a_vp = vp;
3364 a.a_vap = vap;
3365 a.a_context = ctx;
3366
3367 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3368 DTRACE_FSINFO(setattr, vnode_t, vp);
3369
3370 #if CONFIG_APPLEDOUBLE
3371 /*
3372 * Shadow uid/gid/mod change to extended attribute file.
3373 */
3374 if (_err == 0 && !NATIVE_XATTR(vp)) {
3375 struct vnode_attr va;
3376 int change = 0;
3377
3378 VATTR_INIT(&va);
3379 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3380 VATTR_SET(&va, va_uid, vap->va_uid);
3381 change = 1;
3382 }
3383 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3384 VATTR_SET(&va, va_gid, vap->va_gid);
3385 change = 1;
3386 }
3387 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3388 VATTR_SET(&va, va_mode, vap->va_mode);
3389 change = 1;
3390 }
3391 if (change) {
3392 vnode_t dvp;
3393 const char *vname;
3394
3395 dvp = vnode_getparent(vp);
3396 vname = vnode_getname(vp);
3397
3398 xattrfile_setattr(dvp, vname, &va, ctx);
3399 if (dvp != NULLVP)
3400 vnode_put(dvp);
3401 if (vname != NULL)
3402 vnode_putname(vname);
3403 }
3404 }
3405 #endif /* CONFIG_APPLEDOUBLE */
3406
3407 /*
3408 * If we have changed any of the things about the file that are likely
3409 * to result in changes to authorization results, blow the vnode auth
3410 * cache
3411 */
3412 if (_err == 0 && (
3413 VATTR_IS_SUPPORTED(vap, va_mode) ||
3414 VATTR_IS_SUPPORTED(vap, va_uid) ||
3415 VATTR_IS_SUPPORTED(vap, va_gid) ||
3416 VATTR_IS_SUPPORTED(vap, va_flags) ||
3417 VATTR_IS_SUPPORTED(vap, va_acl) ||
3418 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3419 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3420 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3421
3422 #if NAMEDSTREAMS
3423 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3424 vnode_t svp;
3425 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3426 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3427 vnode_put(svp);
3428 }
3429 }
3430 #endif /* NAMEDSTREAMS */
3431 }
3432
3433
3434 post_event_if_success(vp, _err, NOTE_ATTRIB);
3435
3436 return (_err);
3437 }
3438
3439
3440 #if 0
3441 /*
3442 *#
3443 *#% read vp L L L
3444 *#
3445 */
3446 struct vnop_read_args {
3447 struct vnodeop_desc *a_desc;
3448 vnode_t a_vp;
3449 struct uio *a_uio;
3450 int a_ioflag;
3451 vfs_context_t a_context;
3452 };
3453 #endif /* 0*/
3454 errno_t
3455 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3456 {
3457 int _err;
3458 struct vnop_read_args a;
3459 #if CONFIG_DTRACE
3460 user_ssize_t resid = uio_resid(uio);
3461 #endif
3462
3463 if (ctx == NULL) {
3464 return EINVAL;
3465 }
3466
3467 a.a_desc = &vnop_read_desc;
3468 a.a_vp = vp;
3469 a.a_uio = uio;
3470 a.a_ioflag = ioflag;
3471 a.a_context = ctx;
3472
3473 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3474 DTRACE_FSINFO_IO(read,
3475 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3476
3477 return (_err);
3478 }
3479
3480
3481 #if 0
3482 /*
3483 *#
3484 *#% write vp L L L
3485 *#
3486 */
3487 struct vnop_write_args {
3488 struct vnodeop_desc *a_desc;
3489 vnode_t a_vp;
3490 struct uio *a_uio;
3491 int a_ioflag;
3492 vfs_context_t a_context;
3493 };
3494 #endif /* 0*/
3495 errno_t
3496 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3497 {
3498 struct vnop_write_args a;
3499 int _err;
3500 #if CONFIG_DTRACE
3501 user_ssize_t resid = uio_resid(uio);
3502 #endif
3503
3504 if (ctx == NULL) {
3505 return EINVAL;
3506 }
3507
3508 a.a_desc = &vnop_write_desc;
3509 a.a_vp = vp;
3510 a.a_uio = uio;
3511 a.a_ioflag = ioflag;
3512 a.a_context = ctx;
3513
3514 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3515 DTRACE_FSINFO_IO(write,
3516 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3517
3518 post_event_if_success(vp, _err, NOTE_WRITE);
3519
3520 return (_err);
3521 }
3522
3523
3524 #if 0
3525 /*
3526 *#
3527 *#% ioctl vp U U U
3528 *#
3529 */
3530 struct vnop_ioctl_args {
3531 struct vnodeop_desc *a_desc;
3532 vnode_t a_vp;
3533 u_long a_command;
3534 caddr_t a_data;
3535 int a_fflag;
3536 vfs_context_t a_context;
3537 };
3538 #endif /* 0*/
3539 errno_t
3540 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3541 {
3542 int _err;
3543 struct vnop_ioctl_args a;
3544
3545 if (ctx == NULL) {
3546 ctx = vfs_context_current();
3547 }
3548
3549 /*
3550 * This check should probably have been put in the TTY code instead...
3551 *
3552 * We have to be careful about what we assume during startup and shutdown.
3553 * We have to be able to use the root filesystem's device vnode even when
3554 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3555 * structure. If there is no data pointer, it doesn't matter whether
3556 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
3557 * which passes NULL for its data pointer can therefore be used during
3558 * mount or unmount of the root filesystem.
3559 *
3560 * Depending on what root filesystems need to do during mount/unmount, we
3561 * may need to loosen this check again in the future.
3562 */
3563 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3564 if (data != NULL && !vnode_vfs64bitready(vp)) {
3565 return(ENOTTY);
3566 }
3567 }
3568
3569 a.a_desc = &vnop_ioctl_desc;
3570 a.a_vp = vp;
3571 a.a_command = command;
3572 a.a_data = data;
3573 a.a_fflag = fflag;
3574 a.a_context= ctx;
3575
3576 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3577 DTRACE_FSINFO(ioctl, vnode_t, vp);
3578
3579 return (_err);
3580 }
3581
3582
3583 #if 0
3584 /*
3585 *#
3586 *#% select vp U U U
3587 *#
3588 */
3589 struct vnop_select_args {
3590 struct vnodeop_desc *a_desc;
3591 vnode_t a_vp;
3592 int a_which;
3593 int a_fflags;
3594 void *a_wql;
3595 vfs_context_t a_context;
3596 };
3597 #endif /* 0*/
3598 errno_t
3599 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx)
3600 {
3601 int _err;
3602 struct vnop_select_args a;
3603
3604 if (ctx == NULL) {
3605 ctx = vfs_context_current();
3606 }
3607 a.a_desc = &vnop_select_desc;
3608 a.a_vp = vp;
3609 a.a_which = which;
3610 a.a_fflags = fflags;
3611 a.a_context = ctx;
3612 a.a_wql = wql;
3613
3614 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3615 DTRACE_FSINFO(select, vnode_t, vp);
3616
3617 return (_err);
3618 }
3619
3620
3621 #if 0
3622 /*
3623 *#
3624 *#% exchange fvp L L L
3625 *#% exchange tvp L L L
3626 *#
3627 */
3628 struct vnop_exchange_args {
3629 struct vnodeop_desc *a_desc;
3630 vnode_t a_fvp;
3631 vnode_t a_tvp;
3632 int a_options;
3633 vfs_context_t a_context;
3634 };
3635 #endif /* 0*/
3636 errno_t
3637 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3638 {
3639 int _err;
3640 struct vnop_exchange_args a;
3641
3642 a.a_desc = &vnop_exchange_desc;
3643 a.a_fvp = fvp;
3644 a.a_tvp = tvp;
3645 a.a_options = options;
3646 a.a_context = ctx;
3647
3648 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3649 DTRACE_FSINFO(exchange, vnode_t, fvp);
3650
3651 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3652 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3653 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3654
3655 return (_err);
3656 }
3657
3658
3659 #if 0
3660 /*
3661 *#
3662 *#% revoke vp U U U
3663 *#
3664 */
3665 struct vnop_revoke_args {
3666 struct vnodeop_desc *a_desc;
3667 vnode_t a_vp;
3668 int a_flags;
3669 vfs_context_t a_context;
3670 };
3671 #endif /* 0*/
3672 errno_t
3673 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3674 {
3675 struct vnop_revoke_args a;
3676 int _err;
3677
3678 a.a_desc = &vnop_revoke_desc;
3679 a.a_vp = vp;
3680 a.a_flags = flags;
3681 a.a_context = ctx;
3682
3683 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3684 DTRACE_FSINFO(revoke, vnode_t, vp);
3685
3686 return (_err);
3687 }
3688
3689
3690 #if 0
3691 /*
3692 *#
3693 *# mmap - vp U U U
3694 *#
3695 */
3696 struct vnop_mmap_args {
3697 struct vnodeop_desc *a_desc;
3698 vnode_t a_vp;
3699 int a_fflags;
3700 vfs_context_t a_context;
3701 };
3702 #endif /* 0*/
3703 errno_t
3704 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3705 {
3706 int _err;
3707 struct vnop_mmap_args a;
3708
3709 a.a_desc = &vnop_mmap_desc;
3710 a.a_vp = vp;
3711 a.a_fflags = fflags;
3712 a.a_context = ctx;
3713
3714 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3715 DTRACE_FSINFO(mmap, vnode_t, vp);
3716
3717 return (_err);
3718 }
3719
3720
3721 #if 0
3722 /*
3723 *#
3724 *# mnomap - vp U U U
3725 *#
3726 */
3727 struct vnop_mnomap_args {
3728 struct vnodeop_desc *a_desc;
3729 vnode_t a_vp;
3730 vfs_context_t a_context;
3731 };
3732 #endif /* 0*/
3733 errno_t
3734 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
3735 {
3736 int _err;
3737 struct vnop_mnomap_args a;
3738
3739 a.a_desc = &vnop_mnomap_desc;
3740 a.a_vp = vp;
3741 a.a_context = ctx;
3742
3743 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3744 DTRACE_FSINFO(mnomap, vnode_t, vp);
3745
3746 return (_err);
3747 }
3748
3749
3750 #if 0
3751 /*
3752 *#
3753 *#% fsync vp L L L
3754 *#
3755 */
3756 struct vnop_fsync_args {
3757 struct vnodeop_desc *a_desc;
3758 vnode_t a_vp;
3759 int a_waitfor;
3760 vfs_context_t a_context;
3761 };
3762 #endif /* 0*/
3763 errno_t
3764 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
3765 {
3766 struct vnop_fsync_args a;
3767 int _err;
3768
3769 a.a_desc = &vnop_fsync_desc;
3770 a.a_vp = vp;
3771 a.a_waitfor = waitfor;
3772 a.a_context = ctx;
3773
3774 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3775 DTRACE_FSINFO(fsync, vnode_t, vp);
3776
3777 return (_err);
3778 }
3779
3780
3781 #if 0
3782 /*
3783 *#
3784 *#% remove dvp L U U
3785 *#% remove vp L U U
3786 *#
3787 */
3788 struct vnop_remove_args {
3789 struct vnodeop_desc *a_desc;
3790 vnode_t a_dvp;
3791 vnode_t a_vp;
3792 struct componentname *a_cnp;
3793 int a_flags;
3794 vfs_context_t a_context;
3795 };
3796 #endif /* 0*/
3797 errno_t
3798 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
3799 {
3800 int _err;
3801 struct vnop_remove_args a;
3802
3803 a.a_desc = &vnop_remove_desc;
3804 a.a_dvp = dvp;
3805 a.a_vp = vp;
3806 a.a_cnp = cnp;
3807 a.a_flags = flags;
3808 a.a_context = ctx;
3809
3810 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3811 DTRACE_FSINFO(remove, vnode_t, vp);
3812
3813 if (_err == 0) {
3814 vnode_setneedinactive(vp);
3815 #if CONFIG_APPLEDOUBLE
3816 if ( !(NATIVE_XATTR(dvp)) ) {
3817 /*
3818 * Remove any associated extended attribute file (._ AppleDouble file).
3819 */
3820 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
3821 }
3822 #endif /* CONFIG_APPLEDOUBLE */
3823 }
3824
3825 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
3826 post_event_if_success(dvp, _err, NOTE_WRITE);
3827
3828 return (_err);
3829 }
3830
3831 int
3832 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
3833 {
3834 int _err;
3835 struct vnop_compound_remove_args a;
3836 int no_vp = (*vpp == NULLVP);
3837
3838 a.a_desc = &vnop_compound_remove_desc;
3839 a.a_dvp = dvp;
3840 a.a_vpp = vpp;
3841 a.a_cnp = &ndp->ni_cnd;
3842 a.a_flags = flags;
3843 a.a_vap = vap;
3844 a.a_context = ctx;
3845 a.a_remove_authorizer = vn_authorize_unlink;
3846
3847 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
3848 if (_err == 0 && *vpp) {
3849 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
3850 } else {
3851 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
3852 }
3853 if (_err == 0) {
3854 vnode_setneedinactive(*vpp);
3855 #if CONFIG_APPLEDOUBLE
3856 if ( !(NATIVE_XATTR(dvp)) ) {
3857 /*
3858 * Remove any associated extended attribute file (._ AppleDouble file).
3859 */
3860 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
3861 }
3862 #endif /* CONFIG_APPLEDOUBLE */
3863 }
3864
3865 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
3866 post_event_if_success(dvp, _err, NOTE_WRITE);
3867
3868 if (no_vp) {
3869 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
3870 if (*vpp && _err && _err != EKEEPLOOKING) {
3871 vnode_put(*vpp);
3872 *vpp = NULLVP;
3873 }
3874 }
3875
3876 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
3877
3878 return (_err);
3879 }
3880
3881 #if 0
3882 /*
3883 *#
3884 *#% link vp U U U
3885 *#% link tdvp L U U
3886 *#
3887 */
3888 struct vnop_link_args {
3889 struct vnodeop_desc *a_desc;
3890 vnode_t a_vp;
3891 vnode_t a_tdvp;
3892 struct componentname *a_cnp;
3893 vfs_context_t a_context;
3894 };
3895 #endif /* 0*/
3896 errno_t
3897 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
3898 {
3899 int _err;
3900 struct vnop_link_args a;
3901
3902 #if CONFIG_APPLEDOUBLE
3903 /*
3904 * For file systems with non-native extended attributes,
3905 * disallow linking to an existing "._" Apple Double file.
3906 */
3907 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3908 const char *vname;
3909
3910 vname = vnode_getname(vp);
3911 if (vname != NULL) {
3912 _err = 0;
3913 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3914 _err = EPERM;
3915 }
3916 vnode_putname(vname);
3917 if (_err)
3918 return (_err);
3919 }
3920 }
3921 #endif /* CONFIG_APPLEDOUBLE */
3922
3923 a.a_desc = &vnop_link_desc;
3924 a.a_vp = vp;
3925 a.a_tdvp = tdvp;
3926 a.a_cnp = cnp;
3927 a.a_context = ctx;
3928
3929 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3930 DTRACE_FSINFO(link, vnode_t, vp);
3931
3932 post_event_if_success(vp, _err, NOTE_LINK);
3933 post_event_if_success(tdvp, _err, NOTE_WRITE);
3934
3935 return (_err);
3936 }
3937
3938 errno_t
3939 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
3940 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
3941 vfs_rename_flags_t flags, vfs_context_t ctx)
3942 {
3943 int _err;
3944 struct nameidata *fromnd = NULL;
3945 struct nameidata *tond = NULL;
3946 #if CONFIG_APPLEDOUBLE
3947 vnode_t src_attr_vp = NULLVP;
3948 vnode_t dst_attr_vp = NULLVP;
3949 char smallname1[48];
3950 char smallname2[48];
3951 char *xfromname = NULL;
3952 char *xtoname = NULL;
3953 #endif /* CONFIG_APPLEDOUBLE */
3954 int batched;
3955 uint32_t tdfflags; // Target directory file flags
3956
3957 batched = vnode_compound_rename_available(fdvp);
3958
3959 if (!batched) {
3960 if (*fvpp == NULLVP)
3961 panic("Not batched, and no fvp?");
3962 }
3963
3964 #if CONFIG_APPLEDOUBLE
3965 /*
3966 * We need to preflight any potential AppleDouble file for the source file
3967 * before doing the rename operation, since we could potentially be doing
3968 * this operation on a network filesystem, and would end up duplicating
3969 * the work. Also, save the source and destination names. Skip it if the
3970 * source has a "._" prefix.
3971 */
3972
3973 if (!NATIVE_XATTR(fdvp) &&
3974 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3975 size_t len;
3976 int error;
3977
3978 /* Get source attribute file name. */
3979 len = fcnp->cn_namelen + 3;
3980 if (len > sizeof(smallname1)) {
3981 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3982 } else {
3983 xfromname = &smallname1[0];
3984 }
3985 strlcpy(xfromname, "._", min(sizeof smallname1, len));
3986 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3987 xfromname[len-1] = '\0';
3988
3989 /* Get destination attribute file name. */
3990 len = tcnp->cn_namelen + 3;
3991 if (len > sizeof(smallname2)) {
3992 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3993 } else {
3994 xtoname = &smallname2[0];
3995 }
3996 strlcpy(xtoname, "._", min(sizeof smallname2, len));
3997 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3998 xtoname[len-1] = '\0';
3999
4000 /*
4001 * Look up source attribute file, keep reference on it if exists.
4002 * Note that we do the namei with the nameiop of RENAME, which is different than
4003 * in the rename syscall. It's OK if the source file does not exist, since this
4004 * is only for AppleDouble files.
4005 */
4006 if (xfromname != NULL) {
4007 MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK);
4008 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4009 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4010 fromnd->ni_dvp = fdvp;
4011 error = namei(fromnd);
4012
4013 /*
4014 * If there was an error looking up source attribute file,
4015 * we'll behave as if it didn't exist.
4016 */
4017
4018 if (error == 0) {
4019 if (fromnd->ni_vp) {
4020 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4021 src_attr_vp = fromnd->ni_vp;
4022
4023 if (fromnd->ni_vp->v_type != VREG) {
4024 src_attr_vp = NULLVP;
4025 vnode_put(fromnd->ni_vp);
4026 }
4027 }
4028 /*
4029 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4030 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4031 * have a vnode here, so we drop our namei buffer for the source attribute file
4032 */
4033 if (src_attr_vp == NULLVP) {
4034 nameidone(fromnd);
4035 }
4036 }
4037 }
4038 }
4039 #endif /* CONFIG_APPLEDOUBLE */
4040
4041 if (batched) {
4042 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4043 if (_err != 0) {
4044 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4045 }
4046 } else {
4047 if (flags) {
4048 _err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4049 if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4050 // Legacy...
4051 if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4052 fcnp->cn_flags |= CN_SECLUDE_RENAME;
4053 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4054 }
4055 }
4056 } else
4057 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4058 }
4059
4060 /*
4061 * If moved to a new directory that is restricted,
4062 * set the restricted flag on the item moved.
4063 */
4064 if (_err == 0) {
4065 _err = vnode_flags(tdvp, &tdfflags, ctx);
4066 if (_err == 0) {
4067 uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4068 if (inherit_flags) {
4069 uint32_t fflags;
4070 _err = vnode_flags(*fvpp, &fflags, ctx);
4071 if (_err == 0 && fflags != (fflags | inherit_flags)) {
4072 struct vnode_attr va;
4073 VATTR_INIT(&va);
4074 VATTR_SET(&va, va_flags, fflags | inherit_flags);
4075 _err = vnode_setattr(*fvpp, &va, ctx);
4076 }
4077 }
4078 }
4079 }
4080
4081 #if CONFIG_MACF
4082 if (_err == 0) {
4083 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
4084 }
4085 #endif
4086
4087 #if CONFIG_APPLEDOUBLE
4088 /*
4089 * Rename any associated extended attribute file (._ AppleDouble file).
4090 */
4091 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4092 int error = 0;
4093
4094 /*
4095 * Get destination attribute file vnode.
4096 * Note that tdvp already has an iocount reference. Make sure to check that we
4097 * get a valid vnode from namei.
4098 */
4099 MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
4100 NDINIT(tond, RENAME, OP_RENAME,
4101 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4102 CAST_USER_ADDR_T(xtoname), ctx);
4103 tond->ni_dvp = tdvp;
4104 error = namei(tond);
4105
4106 if (error)
4107 goto ad_error;
4108
4109 if (tond->ni_vp) {
4110 dst_attr_vp = tond->ni_vp;
4111 }
4112
4113 if (src_attr_vp) {
4114 const char *old_name = src_attr_vp->v_name;
4115 vnode_t old_parent = src_attr_vp->v_parent;
4116
4117 if (batched) {
4118 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4119 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4120 0, ctx);
4121 } else {
4122 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4123 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4124 }
4125
4126 if (error == 0 && old_name == src_attr_vp->v_name &&
4127 old_parent == src_attr_vp->v_parent) {
4128 int update_flags = VNODE_UPDATE_NAME;
4129
4130 if (fdvp != tdvp)
4131 update_flags |= VNODE_UPDATE_PARENT;
4132
4133 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4134 vnode_update_identity(src_attr_vp, tdvp,
4135 tond->ni_cnd.cn_nameptr,
4136 tond->ni_cnd.cn_namelen,
4137 tond->ni_cnd.cn_hash,
4138 update_flags);
4139 }
4140 }
4141
4142 /* kevent notifications for moving resource files
4143 * _err is zero if we're here, so no need to notify directories, code
4144 * below will do that. only need to post the rename on the source and
4145 * possibly a delete on the dest
4146 */
4147 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4148 if (dst_attr_vp) {
4149 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4150 }
4151
4152 } else if (dst_attr_vp) {
4153 /*
4154 * Just delete destination attribute file vnode if it exists, since
4155 * we didn't have a source attribute file.
4156 * Note that tdvp already has an iocount reference.
4157 */
4158
4159 struct vnop_remove_args args;
4160
4161 args.a_desc = &vnop_remove_desc;
4162 args.a_dvp = tdvp;
4163 args.a_vp = dst_attr_vp;
4164 args.a_cnp = &tond->ni_cnd;
4165 args.a_context = ctx;
4166
4167 if (error == 0) {
4168 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4169
4170 if (error == 0)
4171 vnode_setneedinactive(dst_attr_vp);
4172 }
4173
4174 /* kevent notification for deleting the destination's attribute file
4175 * if it existed. Only need to post the delete on the destination, since
4176 * the code below will handle the directories.
4177 */
4178 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4179 }
4180 }
4181 ad_error:
4182 if (src_attr_vp) {
4183 vnode_put(src_attr_vp);
4184 nameidone(fromnd);
4185 }
4186 if (dst_attr_vp) {
4187 vnode_put(dst_attr_vp);
4188 nameidone(tond);
4189 }
4190 if (xfromname && xfromname != &smallname1[0]) {
4191 FREE(xfromname, M_TEMP);
4192 }
4193 if (xtoname && xtoname != &smallname2[0]) {
4194 FREE(xtoname, M_TEMP);
4195 }
4196 #endif /* CONFIG_APPLEDOUBLE */
4197 if (fromnd) {
4198 FREE(fromnd, M_TEMP);
4199 }
4200 if (tond) {
4201 FREE(tond, M_TEMP);
4202 }
4203 return _err;
4204 }
4205
4206
4207 #if 0
4208 /*
4209 *#
4210 *#% rename fdvp U U U
4211 *#% rename fvp U U U
4212 *#% rename tdvp L U U
4213 *#% rename tvp X U U
4214 *#
4215 */
4216 struct vnop_rename_args {
4217 struct vnodeop_desc *a_desc;
4218 vnode_t a_fdvp;
4219 vnode_t a_fvp;
4220 struct componentname *a_fcnp;
4221 vnode_t a_tdvp;
4222 vnode_t a_tvp;
4223 struct componentname *a_tcnp;
4224 vfs_context_t a_context;
4225 };
4226 #endif /* 0*/
4227 errno_t
4228 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4229 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4230 vfs_context_t ctx)
4231 {
4232 int _err = 0;
4233 struct vnop_rename_args a;
4234
4235 a.a_desc = &vnop_rename_desc;
4236 a.a_fdvp = fdvp;
4237 a.a_fvp = fvp;
4238 a.a_fcnp = fcnp;
4239 a.a_tdvp = tdvp;
4240 a.a_tvp = tvp;
4241 a.a_tcnp = tcnp;
4242 a.a_context = ctx;
4243
4244 /* do the rename of the main file. */
4245 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4246 DTRACE_FSINFO(rename, vnode_t, fdvp);
4247
4248 if (_err)
4249 return _err;
4250
4251 return post_rename(fdvp, fvp, tdvp, tvp);
4252 }
4253
4254 static errno_t
4255 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4256 {
4257 if (tvp && tvp != fvp)
4258 vnode_setneedinactive(tvp);
4259
4260 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4261 int events = NOTE_WRITE;
4262 if (vnode_isdir(fvp)) {
4263 /* Link count on dir changed only if we are moving a dir and...
4264 * --Moved to new dir, not overwriting there
4265 * --Kept in same dir and DID overwrite
4266 */
4267 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4268 events |= NOTE_LINK;
4269 }
4270 }
4271
4272 lock_vnode_and_post(fdvp, events);
4273 if (fdvp != tdvp) {
4274 lock_vnode_and_post(tdvp, events);
4275 }
4276
4277 /* If you're replacing the target, post a deletion for it */
4278 if (tvp)
4279 {
4280 lock_vnode_and_post(tvp, NOTE_DELETE);
4281 }
4282
4283 lock_vnode_and_post(fvp, NOTE_RENAME);
4284
4285 return 0;
4286 }
4287
4288 #if 0
4289 /*
4290 *#
4291 *#% renamex fdvp U U U
4292 *#% renamex fvp U U U
4293 *#% renamex tdvp L U U
4294 *#% renamex tvp X U U
4295 *#
4296 */
4297 struct vnop_renamex_args {
4298 struct vnodeop_desc *a_desc;
4299 vnode_t a_fdvp;
4300 vnode_t a_fvp;
4301 struct componentname *a_fcnp;
4302 vnode_t a_tdvp;
4303 vnode_t a_tvp;
4304 struct componentname *a_tcnp;
4305 vfs_rename_flags_t a_flags;
4306 vfs_context_t a_context;
4307 };
4308 #endif /* 0*/
4309 errno_t
4310 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4311 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4312 vfs_rename_flags_t flags, vfs_context_t ctx)
4313 {
4314 int _err = 0;
4315 struct vnop_renamex_args a;
4316
4317 a.a_desc = &vnop_renamex_desc;
4318 a.a_fdvp = fdvp;
4319 a.a_fvp = fvp;
4320 a.a_fcnp = fcnp;
4321 a.a_tdvp = tdvp;
4322 a.a_tvp = tvp;
4323 a.a_tcnp = tcnp;
4324 a.a_flags = flags;
4325 a.a_context = ctx;
4326
4327 /* do the rename of the main file. */
4328 _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4329 DTRACE_FSINFO(renamex, vnode_t, fdvp);
4330
4331 if (_err)
4332 return _err;
4333
4334 return post_rename(fdvp, fvp, tdvp, tvp);
4335 }
4336
4337
4338 int
4339 VNOP_COMPOUND_RENAME(
4340 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4341 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4342 uint32_t flags, vfs_context_t ctx)
4343 {
4344 int _err = 0;
4345 int events;
4346 struct vnop_compound_rename_args a;
4347 int no_fvp, no_tvp;
4348
4349 no_fvp = (*fvpp) == NULLVP;
4350 no_tvp = (*tvpp) == NULLVP;
4351
4352 a.a_desc = &vnop_compound_rename_desc;
4353
4354 a.a_fdvp = fdvp;
4355 a.a_fvpp = fvpp;
4356 a.a_fcnp = fcnp;
4357 a.a_fvap = fvap;
4358
4359 a.a_tdvp = tdvp;
4360 a.a_tvpp = tvpp;
4361 a.a_tcnp = tcnp;
4362 a.a_tvap = tvap;
4363
4364 a.a_flags = flags;
4365 a.a_context = ctx;
4366 a.a_rename_authorizer = vn_authorize_rename;
4367 a.a_reserved = NULL;
4368
4369 /* do the rename of the main file. */
4370 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4371 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4372
4373 if (_err == 0) {
4374 if (*tvpp && *tvpp != *fvpp)
4375 vnode_setneedinactive(*tvpp);
4376 }
4377
4378 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4379 if (_err == 0 && *fvpp != *tvpp) {
4380 if (!*fvpp) {
4381 panic("No fvpp after compound rename?");
4382 }
4383
4384 events = NOTE_WRITE;
4385 if (vnode_isdir(*fvpp)) {
4386 /* Link count on dir changed only if we are moving a dir and...
4387 * --Moved to new dir, not overwriting there
4388 * --Kept in same dir and DID overwrite
4389 */
4390 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4391 events |= NOTE_LINK;
4392 }
4393 }
4394
4395 lock_vnode_and_post(fdvp, events);
4396 if (fdvp != tdvp) {
4397 lock_vnode_and_post(tdvp, events);
4398 }
4399
4400 /* If you're replacing the target, post a deletion for it */
4401 if (*tvpp)
4402 {
4403 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4404 }
4405
4406 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4407 }
4408
4409 if (no_fvp) {
4410 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4411 }
4412 if (no_tvp && *tvpp != NULLVP) {
4413 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4414 }
4415
4416 if (_err && _err != EKEEPLOOKING) {
4417 if (*fvpp) {
4418 vnode_put(*fvpp);
4419 *fvpp = NULLVP;
4420 }
4421 if (*tvpp) {
4422 vnode_put(*tvpp);
4423 *tvpp = NULLVP;
4424 }
4425 }
4426
4427 return (_err);
4428 }
4429
4430 int
4431 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4432 struct vnode_attr *vap, vfs_context_t ctx)
4433 {
4434 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4435 panic("Non-CREATE nameiop in vn_mkdir()?");
4436 }
4437
4438 if (vnode_compound_mkdir_available(dvp)) {
4439 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4440 } else {
4441 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4442 }
4443 }
4444
4445 #if 0
4446 /*
4447 *#
4448 *#% mkdir dvp L U U
4449 *#% mkdir vpp - L -
4450 *#
4451 */
4452 struct vnop_mkdir_args {
4453 struct vnodeop_desc *a_desc;
4454 vnode_t a_dvp;
4455 vnode_t *a_vpp;
4456 struct componentname *a_cnp;
4457 struct vnode_attr *a_vap;
4458 vfs_context_t a_context;
4459 };
4460 #endif /* 0*/
4461 errno_t
4462 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4463 struct vnode_attr *vap, vfs_context_t ctx)
4464 {
4465 int _err;
4466 struct vnop_mkdir_args a;
4467
4468 a.a_desc = &vnop_mkdir_desc;
4469 a.a_dvp = dvp;
4470 a.a_vpp = vpp;
4471 a.a_cnp = cnp;
4472 a.a_vap = vap;
4473 a.a_context = ctx;
4474
4475 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4476 if (_err == 0 && *vpp) {
4477 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4478 }
4479 #if CONFIG_APPLEDOUBLE
4480 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4481 /*
4482 * Remove stale Apple Double file (if any).
4483 */
4484 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4485 }
4486 #endif /* CONFIG_APPLEDOUBLE */
4487
4488 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4489
4490 return (_err);
4491 }
4492
4493 int
4494 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4495 struct vnode_attr *vap, vfs_context_t ctx)
4496 {
4497 int _err;
4498 struct vnop_compound_mkdir_args a;
4499
4500 a.a_desc = &vnop_compound_mkdir_desc;
4501 a.a_dvp = dvp;
4502 a.a_vpp = vpp;
4503 a.a_cnp = &ndp->ni_cnd;
4504 a.a_vap = vap;
4505 a.a_flags = 0;
4506 a.a_context = ctx;
4507 #if 0
4508 a.a_mkdir_authorizer = vn_authorize_mkdir;
4509 #endif /* 0 */
4510 a.a_reserved = NULL;
4511
4512 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4513 if (_err == 0 && *vpp) {
4514 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4515 }
4516 #if CONFIG_APPLEDOUBLE
4517 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4518 /*
4519 * Remove stale Apple Double file (if any).
4520 */
4521 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4522 }
4523 #endif /* CONFIG_APPLEDOUBLE */
4524
4525 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4526
4527 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4528 if (*vpp && _err && _err != EKEEPLOOKING) {
4529 vnode_put(*vpp);
4530 *vpp = NULLVP;
4531 }
4532
4533 return (_err);
4534 }
4535
4536 int
4537 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4538 {
4539 if (vnode_compound_rmdir_available(dvp)) {
4540 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4541 } else {
4542 if (*vpp == NULLVP) {
4543 panic("NULL vp, but not a compound VNOP?");
4544 }
4545 if (vap != NULL) {
4546 panic("Non-NULL vap, but not a compound VNOP?");
4547 }
4548 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4549 }
4550 }
4551
4552 #if 0
4553 /*
4554 *#
4555 *#% rmdir dvp L U U
4556 *#% rmdir vp L U U
4557 *#
4558 */
4559 struct vnop_rmdir_args {
4560 struct vnodeop_desc *a_desc;
4561 vnode_t a_dvp;
4562 vnode_t a_vp;
4563 struct componentname *a_cnp;
4564 vfs_context_t a_context;
4565 };
4566
4567 #endif /* 0*/
4568 errno_t
4569 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4570 {
4571 int _err;
4572 struct vnop_rmdir_args a;
4573
4574 a.a_desc = &vnop_rmdir_desc;
4575 a.a_dvp = dvp;
4576 a.a_vp = vp;
4577 a.a_cnp = cnp;
4578 a.a_context = ctx;
4579
4580 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4581 DTRACE_FSINFO(rmdir, vnode_t, vp);
4582
4583 if (_err == 0) {
4584 vnode_setneedinactive(vp);
4585 #if CONFIG_APPLEDOUBLE
4586 if ( !(NATIVE_XATTR(dvp)) ) {
4587 /*
4588 * Remove any associated extended attribute file (._ AppleDouble file).
4589 */
4590 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4591 }
4592 #endif
4593 }
4594
4595 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4596 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4597 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4598
4599 return (_err);
4600 }
4601
4602 int
4603 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4604 struct vnode_attr *vap, vfs_context_t ctx)
4605 {
4606 int _err;
4607 struct vnop_compound_rmdir_args a;
4608 int no_vp;
4609
4610 a.a_desc = &vnop_mkdir_desc;
4611 a.a_dvp = dvp;
4612 a.a_vpp = vpp;
4613 a.a_cnp = &ndp->ni_cnd;
4614 a.a_vap = vap;
4615 a.a_flags = 0;
4616 a.a_context = ctx;
4617 a.a_rmdir_authorizer = vn_authorize_rmdir;
4618 a.a_reserved = NULL;
4619
4620 no_vp = (*vpp == NULLVP);
4621
4622 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4623 if (_err == 0 && *vpp) {
4624 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4625 }
4626 #if CONFIG_APPLEDOUBLE
4627 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4628 /*
4629 * Remove stale Apple Double file (if any).
4630 */
4631 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4632 }
4633 #endif
4634
4635 if (*vpp) {
4636 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4637 }
4638 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4639
4640 if (no_vp) {
4641 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4642
4643 #if 0 /* Removing orphaned ._ files requires a vp.... */
4644 if (*vpp && _err && _err != EKEEPLOOKING) {
4645 vnode_put(*vpp);
4646 *vpp = NULLVP;
4647 }
4648 #endif /* 0 */
4649 }
4650
4651 return (_err);
4652 }
4653
4654 #if CONFIG_APPLEDOUBLE
4655 /*
4656 * Remove a ._ AppleDouble file
4657 */
4658 #define AD_STALE_SECS (180)
4659 static void
4660 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4661 {
4662 vnode_t xvp;
4663 struct nameidata nd;
4664 char smallname[64];
4665 char *filename = NULL;
4666 size_t len;
4667
4668 if ((basename == NULL) || (basename[0] == '\0') ||
4669 (basename[0] == '.' && basename[1] == '_')) {
4670 return;
4671 }
4672 filename = &smallname[0];
4673 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4674 if (len >= sizeof(smallname)) {
4675 len++; /* snprintf result doesn't include '\0' */
4676 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4677 len = snprintf(filename, len, "._%s", basename);
4678 }
4679 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4680 CAST_USER_ADDR_T(filename), ctx);
4681 nd.ni_dvp = dvp;
4682 if (namei(&nd) != 0)
4683 goto out2;
4684
4685 xvp = nd.ni_vp;
4686 nameidone(&nd);
4687 if (xvp->v_type != VREG)
4688 goto out1;
4689
4690 /*
4691 * When creating a new object and a "._" file already
4692 * exists, check to see if its a stale "._" file.
4693 *
4694 */
4695 if (!force) {
4696 struct vnode_attr va;
4697
4698 VATTR_INIT(&va);
4699 VATTR_WANTED(&va, va_data_size);
4700 VATTR_WANTED(&va, va_modify_time);
4701 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4702 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4703 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4704 va.va_data_size != 0) {
4705 struct timeval tv;
4706
4707 microtime(&tv);
4708 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4709 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4710 force = 1; /* must be stale */
4711 }
4712 }
4713 }
4714 if (force) {
4715 int error;
4716
4717 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
4718 if (error == 0)
4719 vnode_setneedinactive(xvp);
4720
4721 post_event_if_success(xvp, error, NOTE_DELETE);
4722 post_event_if_success(dvp, error, NOTE_WRITE);
4723 }
4724
4725 out1:
4726 vnode_put(dvp);
4727 vnode_put(xvp);
4728 out2:
4729 if (filename && filename != &smallname[0]) {
4730 FREE(filename, M_TEMP);
4731 }
4732 }
4733
4734 /*
4735 * Shadow uid/gid/mod to a ._ AppleDouble file
4736 */
4737 static void
4738 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
4739 vfs_context_t ctx)
4740 {
4741 vnode_t xvp;
4742 struct nameidata nd;
4743 char smallname[64];
4744 char *filename = NULL;
4745 size_t len;
4746
4747 if ((dvp == NULLVP) ||
4748 (basename == NULL) || (basename[0] == '\0') ||
4749 (basename[0] == '.' && basename[1] == '_')) {
4750 return;
4751 }
4752 filename = &smallname[0];
4753 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4754 if (len >= sizeof(smallname)) {
4755 len++; /* snprintf result doesn't include '\0' */
4756 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4757 len = snprintf(filename, len, "._%s", basename);
4758 }
4759 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
4760 CAST_USER_ADDR_T(filename), ctx);
4761 nd.ni_dvp = dvp;
4762 if (namei(&nd) != 0)
4763 goto out2;
4764
4765 xvp = nd.ni_vp;
4766 nameidone(&nd);
4767
4768 if (xvp->v_type == VREG) {
4769 struct vnop_setattr_args a;
4770
4771 a.a_desc = &vnop_setattr_desc;
4772 a.a_vp = xvp;
4773 a.a_vap = vap;
4774 a.a_context = ctx;
4775
4776 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
4777 }
4778
4779 vnode_put(xvp);
4780 out2:
4781 if (filename && filename != &smallname[0]) {
4782 FREE(filename, M_TEMP);
4783 }
4784 }
4785 #endif /* CONFIG_APPLEDOUBLE */
4786
4787 #if 0
4788 /*
4789 *#
4790 *#% symlink dvp L U U
4791 *#% symlink vpp - U -
4792 *#
4793 */
4794 struct vnop_symlink_args {
4795 struct vnodeop_desc *a_desc;
4796 vnode_t a_dvp;
4797 vnode_t *a_vpp;
4798 struct componentname *a_cnp;
4799 struct vnode_attr *a_vap;
4800 char *a_target;
4801 vfs_context_t a_context;
4802 };
4803
4804 #endif /* 0*/
4805 errno_t
4806 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4807 struct vnode_attr *vap, char *target, vfs_context_t ctx)
4808 {
4809 int _err;
4810 struct vnop_symlink_args a;
4811
4812 a.a_desc = &vnop_symlink_desc;
4813 a.a_dvp = dvp;
4814 a.a_vpp = vpp;
4815 a.a_cnp = cnp;
4816 a.a_vap = vap;
4817 a.a_target = target;
4818 a.a_context = ctx;
4819
4820 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
4821 DTRACE_FSINFO(symlink, vnode_t, dvp);
4822 #if CONFIG_APPLEDOUBLE
4823 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4824 /*
4825 * Remove stale Apple Double file (if any). Posts its own knotes
4826 */
4827 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4828 }
4829 #endif /* CONFIG_APPLEDOUBLE */
4830
4831 post_event_if_success(dvp, _err, NOTE_WRITE);
4832
4833 return (_err);
4834 }
4835
4836 #if 0
4837 /*
4838 *#
4839 *#% readdir vp L L L
4840 *#
4841 */
4842 struct vnop_readdir_args {
4843 struct vnodeop_desc *a_desc;
4844 vnode_t a_vp;
4845 struct uio *a_uio;
4846 int a_flags;
4847 int *a_eofflag;
4848 int *a_numdirent;
4849 vfs_context_t a_context;
4850 };
4851
4852 #endif /* 0*/
4853 errno_t
4854 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
4855 int *numdirent, vfs_context_t ctx)
4856 {
4857 int _err;
4858 struct vnop_readdir_args a;
4859 #if CONFIG_DTRACE
4860 user_ssize_t resid = uio_resid(uio);
4861 #endif
4862
4863 a.a_desc = &vnop_readdir_desc;
4864 a.a_vp = vp;
4865 a.a_uio = uio;
4866 a.a_flags = flags;
4867 a.a_eofflag = eofflag;
4868 a.a_numdirent = numdirent;
4869 a.a_context = ctx;
4870
4871 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
4872 DTRACE_FSINFO_IO(readdir,
4873 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4874
4875 return (_err);
4876 }
4877
4878 #if 0
4879 /*
4880 *#
4881 *#% readdirattr vp L L L
4882 *#
4883 */
4884 struct vnop_readdirattr_args {
4885 struct vnodeop_desc *a_desc;
4886 vnode_t a_vp;
4887 struct attrlist *a_alist;
4888 struct uio *a_uio;
4889 uint32_t a_maxcount;
4890 uint32_t a_options;
4891 uint32_t *a_newstate;
4892 int *a_eofflag;
4893 uint32_t *a_actualcount;
4894 vfs_context_t a_context;
4895 };
4896
4897 #endif /* 0*/
4898 errno_t
4899 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
4900 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
4901 {
4902 int _err;
4903 struct vnop_readdirattr_args a;
4904 #if CONFIG_DTRACE
4905 user_ssize_t resid = uio_resid(uio);
4906 #endif
4907
4908 a.a_desc = &vnop_readdirattr_desc;
4909 a.a_vp = vp;
4910 a.a_alist = alist;
4911 a.a_uio = uio;
4912 a.a_maxcount = maxcount;
4913 a.a_options = options;
4914 a.a_newstate = newstate;
4915 a.a_eofflag = eofflag;
4916 a.a_actualcount = actualcount;
4917 a.a_context = ctx;
4918
4919 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
4920 DTRACE_FSINFO_IO(readdirattr,
4921 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4922
4923 return (_err);
4924 }
4925
4926 #if 0
4927 struct vnop_getttrlistbulk_args {
4928 struct vnodeop_desc *a_desc;
4929 vnode_t a_vp;
4930 struct attrlist *a_alist;
4931 struct vnode_attr *a_vap;
4932 struct uio *a_uio;
4933 void *a_private
4934 uint64_t a_options;
4935 int *a_eofflag;
4936 uint32_t *a_actualcount;
4937 vfs_context_t a_context;
4938 };
4939 #endif /* 0*/
4940 errno_t
4941 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
4942 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
4943 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
4944 {
4945 int _err;
4946 struct vnop_getattrlistbulk_args a;
4947 #if CONFIG_DTRACE
4948 user_ssize_t resid = uio_resid(uio);
4949 #endif
4950
4951 a.a_desc = &vnop_getattrlistbulk_desc;
4952 a.a_vp = vp;
4953 a.a_alist = alist;
4954 a.a_vap = vap;
4955 a.a_uio = uio;
4956 a.a_private = private;
4957 a.a_options = options;
4958 a.a_eofflag = eofflag;
4959 a.a_actualcount = actualcount;
4960 a.a_context = ctx;
4961
4962 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
4963 DTRACE_FSINFO_IO(getattrlistbulk,
4964 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4965
4966 return (_err);
4967 }
4968
4969 #if 0
4970 /*
4971 *#
4972 *#% readlink vp L L L
4973 *#
4974 */
4975 struct vnop_readlink_args {
4976 struct vnodeop_desc *a_desc;
4977 vnode_t a_vp;
4978 struct uio *a_uio;
4979 vfs_context_t a_context;
4980 };
4981 #endif /* 0 */
4982
4983 /*
4984 * Returns: 0 Success
4985 * lock_fsnode:ENOENT No such file or directory [only for VFS
4986 * that is not thread safe & vnode is
4987 * currently being/has been terminated]
4988 * <vfs_readlink>:EINVAL
4989 * <vfs_readlink>:???
4990 *
4991 * Note: The return codes from the underlying VFS's readlink routine
4992 * can't be fully enumerated here, since third party VFS authors
4993 * may not limit their error returns to the ones documented here,
4994 * even though this may result in some programs functioning
4995 * incorrectly.
4996 *
4997 * The return codes documented above are those which may currently
4998 * be returned by HFS from hfs_vnop_readlink, not including
4999 * additional error code which may be propagated from underlying
5000 * routines.
5001 */
5002 errno_t
5003 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5004 {
5005 int _err;
5006 struct vnop_readlink_args a;
5007 #if CONFIG_DTRACE
5008 user_ssize_t resid = uio_resid(uio);
5009 #endif
5010 a.a_desc = &vnop_readlink_desc;
5011 a.a_vp = vp;
5012 a.a_uio = uio;
5013 a.a_context = ctx;
5014
5015 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5016 DTRACE_FSINFO_IO(readlink,
5017 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5018
5019 return (_err);
5020 }
5021
5022 #if 0
5023 /*
5024 *#
5025 *#% inactive vp L U U
5026 *#
5027 */
5028 struct vnop_inactive_args {
5029 struct vnodeop_desc *a_desc;
5030 vnode_t a_vp;
5031 vfs_context_t a_context;
5032 };
5033 #endif /* 0*/
5034 errno_t
5035 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5036 {
5037 int _err;
5038 struct vnop_inactive_args a;
5039
5040 a.a_desc = &vnop_inactive_desc;
5041 a.a_vp = vp;
5042 a.a_context = ctx;
5043
5044 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5045 DTRACE_FSINFO(inactive, vnode_t, vp);
5046
5047 #if NAMEDSTREAMS
5048 /* For file systems that do not support namedstream natively, mark
5049 * the shadow stream file vnode to be recycled as soon as the last
5050 * reference goes away. To avoid re-entering reclaim code, do not
5051 * call recycle on terminating namedstream vnodes.
5052 */
5053 if (vnode_isnamedstream(vp) &&
5054 (vp->v_parent != NULLVP) &&
5055 vnode_isshadow(vp) &&
5056 ((vp->v_lflag & VL_TERMINATE) == 0)) {
5057 vnode_recycle(vp);
5058 }
5059 #endif
5060
5061 return (_err);
5062 }
5063
5064
5065 #if 0
5066 /*
5067 *#
5068 *#% reclaim vp U U U
5069 *#
5070 */
5071 struct vnop_reclaim_args {
5072 struct vnodeop_desc *a_desc;
5073 vnode_t a_vp;
5074 vfs_context_t a_context;
5075 };
5076 #endif /* 0*/
5077 errno_t
5078 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5079 {
5080 int _err;
5081 struct vnop_reclaim_args a;
5082
5083 a.a_desc = &vnop_reclaim_desc;
5084 a.a_vp = vp;
5085 a.a_context = ctx;
5086
5087 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5088 DTRACE_FSINFO(reclaim, vnode_t, vp);
5089
5090 return (_err);
5091 }
5092
5093
5094 /*
5095 * Returns: 0 Success
5096 * lock_fsnode:ENOENT No such file or directory [only for VFS
5097 * that is not thread safe & vnode is
5098 * currently being/has been terminated]
5099 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5100 */
5101 #if 0
5102 /*
5103 *#
5104 *#% pathconf vp L L L
5105 *#
5106 */
5107 struct vnop_pathconf_args {
5108 struct vnodeop_desc *a_desc;
5109 vnode_t a_vp;
5110 int a_name;
5111 int32_t *a_retval;
5112 vfs_context_t a_context;
5113 };
5114 #endif /* 0*/
5115 errno_t
5116 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5117 {
5118 int _err;
5119 struct vnop_pathconf_args a;
5120
5121 a.a_desc = &vnop_pathconf_desc;
5122 a.a_vp = vp;
5123 a.a_name = name;
5124 a.a_retval = retval;
5125 a.a_context = ctx;
5126
5127 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5128 DTRACE_FSINFO(pathconf, vnode_t, vp);
5129
5130 return (_err);
5131 }
5132
5133 /*
5134 * Returns: 0 Success
5135 * err_advlock:ENOTSUP
5136 * lf_advlock:???
5137 * <vnop_advlock_desc>:???
5138 *
5139 * Notes: VFS implementations of advisory locking using calls through
5140 * <vnop_advlock_desc> because lock enforcement does not occur
5141 * locally should try to limit themselves to the return codes
5142 * documented above for lf_advlock and err_advlock.
5143 */
5144 #if 0
5145 /*
5146 *#
5147 *#% advlock vp U U U
5148 *#
5149 */
5150 struct vnop_advlock_args {
5151 struct vnodeop_desc *a_desc;
5152 vnode_t a_vp;
5153 caddr_t a_id;
5154 int a_op;
5155 struct flock *a_fl;
5156 int a_flags;
5157 vfs_context_t a_context;
5158 };
5159 #endif /* 0*/
5160 errno_t
5161 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5162 {
5163 int _err;
5164 struct vnop_advlock_args a;
5165
5166 a.a_desc = &vnop_advlock_desc;
5167 a.a_vp = vp;
5168 a.a_id = id;
5169 a.a_op = op;
5170 a.a_fl = fl;
5171 a.a_flags = flags;
5172 a.a_context = ctx;
5173 a.a_timeout = timeout;
5174
5175 /* Disallow advisory locking on non-seekable vnodes */
5176 if (vnode_isfifo(vp)) {
5177 _err = err_advlock(&a);
5178 } else {
5179 if ((vp->v_flag & VLOCKLOCAL)) {
5180 /* Advisory locking done at this layer */
5181 _err = lf_advlock(&a);
5182 } else if (flags & F_OFD_LOCK) {
5183 /* Non-local locking doesn't work for OFD locks */
5184 _err = err_advlock(&a);
5185 } else {
5186 /* Advisory locking done by underlying filesystem */
5187 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5188 }
5189 DTRACE_FSINFO(advlock, vnode_t, vp);
5190 if (op == F_UNLCK && flags == F_FLOCK)
5191 post_event_if_success(vp, _err, NOTE_FUNLOCK);
5192 }
5193
5194 return (_err);
5195 }
5196
5197
5198
5199 #if 0
5200 /*
5201 *#
5202 *#% allocate vp L L L
5203 *#
5204 */
5205 struct vnop_allocate_args {
5206 struct vnodeop_desc *a_desc;
5207 vnode_t a_vp;
5208 off_t a_length;
5209 u_int32_t a_flags;
5210 off_t *a_bytesallocated;
5211 off_t a_offset;
5212 vfs_context_t a_context;
5213 };
5214
5215 #endif /* 0*/
5216 errno_t
5217 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5218 {
5219 int _err;
5220 struct vnop_allocate_args a;
5221
5222 a.a_desc = &vnop_allocate_desc;
5223 a.a_vp = vp;
5224 a.a_length = length;
5225 a.a_flags = flags;
5226 a.a_bytesallocated = bytesallocated;
5227 a.a_offset = offset;
5228 a.a_context = ctx;
5229
5230 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5231 DTRACE_FSINFO(allocate, vnode_t, vp);
5232 #if CONFIG_FSE
5233 if (_err == 0) {
5234 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5235 }
5236 #endif
5237
5238 return (_err);
5239 }
5240
5241 #if 0
5242 /*
5243 *#
5244 *#% pagein vp = = =
5245 *#
5246 */
5247 struct vnop_pagein_args {
5248 struct vnodeop_desc *a_desc;
5249 vnode_t a_vp;
5250 upl_t a_pl;
5251 upl_offset_t a_pl_offset;
5252 off_t a_f_offset;
5253 size_t a_size;
5254 int a_flags;
5255 vfs_context_t a_context;
5256 };
5257 #endif /* 0*/
5258 errno_t
5259 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5260 {
5261 int _err;
5262 struct vnop_pagein_args a;
5263
5264 a.a_desc = &vnop_pagein_desc;
5265 a.a_vp = vp;
5266 a.a_pl = pl;
5267 a.a_pl_offset = pl_offset;
5268 a.a_f_offset = f_offset;
5269 a.a_size = size;
5270 a.a_flags = flags;
5271 a.a_context = ctx;
5272
5273 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5274 DTRACE_FSINFO(pagein, vnode_t, vp);
5275
5276 return (_err);
5277 }
5278
5279 #if 0
5280 /*
5281 *#
5282 *#% pageout vp = = =
5283 *#
5284 */
5285 struct vnop_pageout_args {
5286 struct vnodeop_desc *a_desc;
5287 vnode_t a_vp;
5288 upl_t a_pl;
5289 upl_offset_t a_pl_offset;
5290 off_t a_f_offset;
5291 size_t a_size;
5292 int a_flags;
5293 vfs_context_t a_context;
5294 };
5295
5296 #endif /* 0*/
5297 errno_t
5298 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5299 {
5300 int _err;
5301 struct vnop_pageout_args a;
5302
5303 a.a_desc = &vnop_pageout_desc;
5304 a.a_vp = vp;
5305 a.a_pl = pl;
5306 a.a_pl_offset = pl_offset;
5307 a.a_f_offset = f_offset;
5308 a.a_size = size;
5309 a.a_flags = flags;
5310 a.a_context = ctx;
5311
5312 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5313 DTRACE_FSINFO(pageout, vnode_t, vp);
5314
5315 post_event_if_success(vp, _err, NOTE_WRITE);
5316
5317 return (_err);
5318 }
5319
5320 int
5321 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5322 {
5323 if (vnode_compound_remove_available(dvp)) {
5324 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5325 } else {
5326 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5327 }
5328 }
5329
5330 #if CONFIG_SEARCHFS
5331
5332 #if 0
5333 /*
5334 *#
5335 *#% searchfs vp L L L
5336 *#
5337 */
5338 struct vnop_searchfs_args {
5339 struct vnodeop_desc *a_desc;
5340 vnode_t a_vp;
5341 void *a_searchparams1;
5342 void *a_searchparams2;
5343 struct attrlist *a_searchattrs;
5344 uint32_t a_maxmatches;
5345 struct timeval *a_timelimit;
5346 struct attrlist *a_returnattrs;
5347 uint32_t *a_nummatches;
5348 uint32_t a_scriptcode;
5349 uint32_t a_options;
5350 struct uio *a_uio;
5351 struct searchstate *a_searchstate;
5352 vfs_context_t a_context;
5353 };
5354
5355 #endif /* 0*/
5356 errno_t
5357 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5358 {
5359 int _err;
5360 struct vnop_searchfs_args a;
5361
5362 a.a_desc = &vnop_searchfs_desc;
5363 a.a_vp = vp;
5364 a.a_searchparams1 = searchparams1;
5365 a.a_searchparams2 = searchparams2;
5366 a.a_searchattrs = searchattrs;
5367 a.a_maxmatches = maxmatches;
5368 a.a_timelimit = timelimit;
5369 a.a_returnattrs = returnattrs;
5370 a.a_nummatches = nummatches;
5371 a.a_scriptcode = scriptcode;
5372 a.a_options = options;
5373 a.a_uio = uio;
5374 a.a_searchstate = searchstate;
5375 a.a_context = ctx;
5376
5377 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5378 DTRACE_FSINFO(searchfs, vnode_t, vp);
5379
5380 return (_err);
5381 }
5382 #endif /* CONFIG_SEARCHFS */
5383
5384 #if 0
5385 /*
5386 *#
5387 *#% copyfile fvp U U U
5388 *#% copyfile tdvp L U U
5389 *#% copyfile tvp X U U
5390 *#
5391 */
5392 struct vnop_copyfile_args {
5393 struct vnodeop_desc *a_desc;
5394 vnode_t a_fvp;
5395 vnode_t a_tdvp;
5396 vnode_t a_tvp;
5397 struct componentname *a_tcnp;
5398 int a_mode;
5399 int a_flags;
5400 vfs_context_t a_context;
5401 };
5402 #endif /* 0*/
5403 errno_t
5404 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5405 int mode, int flags, vfs_context_t ctx)
5406 {
5407 int _err;
5408 struct vnop_copyfile_args a;
5409 a.a_desc = &vnop_copyfile_desc;
5410 a.a_fvp = fvp;
5411 a.a_tdvp = tdvp;
5412 a.a_tvp = tvp;
5413 a.a_tcnp = tcnp;
5414 a.a_mode = mode;
5415 a.a_flags = flags;
5416 a.a_context = ctx;
5417 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5418 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5419 return (_err);
5420 }
5421
5422 #if 0
5423 struct vnop_clonefile_args {
5424 struct vnodeop_desc *a_desc;
5425 vnode_t a_fvp;
5426 vnode_t a_dvp;
5427 vnode_t *a_vpp;
5428 struct componentname *a_cnp;
5429 struct vnode_attr *a_vap;
5430 uint32_t a_flags;
5431 vfs_context_t a_context;
5432 int (*a_dir_clone_authorizer)( /* Authorization callback */
5433 struct vnode_attr *vap, /* attribute to be authorized */
5434 kauth_action_t action, /* action for which attribute is to be authorized */
5435 struct vnode_attr *dvap, /* target directory attributes */
5436 vnode_t sdvp, /* source directory vnode pointer (optional) */
5437 mount_t mp, /* mount point of filesystem */
5438 dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */
5439 uint32_t flags; /* value passed in a_flags to the VNOP */
5440 vfs_context_t ctx, /* As passed to VNOP */
5441 void *reserved); /* Always NULL */
5442 void *a_reserved; /* Currently unused */
5443 };
5444 #endif /* 0 */
5445
5446 errno_t
5447 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
5448 struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
5449 vfs_context_t ctx)
5450 {
5451 int _err;
5452 struct vnop_clonefile_args a;
5453 a.a_desc = &vnop_clonefile_desc;
5454 a.a_fvp = fvp;
5455 a.a_dvp = dvp;
5456 a.a_vpp = vpp;
5457 a.a_cnp = cnp;
5458 a.a_vap = vap;
5459 a.a_flags = flags;
5460 a.a_context = ctx;
5461
5462 if (vnode_vtype(fvp) == VDIR)
5463 a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
5464 else
5465 a.a_dir_clone_authorizer = NULL;
5466
5467 _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
5468
5469 if (_err == 0 && *vpp)
5470 DTRACE_FSINFO(clonefile, vnode_t, *vpp);
5471
5472 post_event_if_success(dvp, _err, NOTE_WRITE);
5473
5474 return (_err);
5475 }
5476
5477 errno_t
5478 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5479 {
5480 struct vnop_getxattr_args a;
5481 int error;
5482
5483 a.a_desc = &vnop_getxattr_desc;
5484 a.a_vp = vp;
5485 a.a_name = name;
5486 a.a_uio = uio;
5487 a.a_size = size;
5488 a.a_options = options;
5489 a.a_context = ctx;
5490
5491 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5492 DTRACE_FSINFO(getxattr, vnode_t, vp);
5493
5494 return (error);
5495 }
5496
5497 errno_t
5498 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5499 {
5500 struct vnop_setxattr_args a;
5501 int error;
5502
5503 a.a_desc = &vnop_setxattr_desc;
5504 a.a_vp = vp;
5505 a.a_name = name;
5506 a.a_uio = uio;
5507 a.a_options = options;
5508 a.a_context = ctx;
5509
5510 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5511 DTRACE_FSINFO(setxattr, vnode_t, vp);
5512
5513 if (error == 0)
5514 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5515
5516 post_event_if_success(vp, error, NOTE_ATTRIB);
5517
5518 return (error);
5519 }
5520
5521 errno_t
5522 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5523 {
5524 struct vnop_removexattr_args a;
5525 int error;
5526
5527 a.a_desc = &vnop_removexattr_desc;
5528 a.a_vp = vp;
5529 a.a_name = name;
5530 a.a_options = options;
5531 a.a_context = ctx;
5532
5533 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5534 DTRACE_FSINFO(removexattr, vnode_t, vp);
5535
5536 post_event_if_success(vp, error, NOTE_ATTRIB);
5537
5538 return (error);
5539 }
5540
5541 errno_t
5542 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5543 {
5544 struct vnop_listxattr_args a;
5545 int error;
5546
5547 a.a_desc = &vnop_listxattr_desc;
5548 a.a_vp = vp;
5549 a.a_uio = uio;
5550 a.a_size = size;
5551 a.a_options = options;
5552 a.a_context = ctx;
5553
5554 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5555 DTRACE_FSINFO(listxattr, vnode_t, vp);
5556
5557 return (error);
5558 }
5559
5560
5561 #if 0
5562 /*
5563 *#
5564 *#% blktooff vp = = =
5565 *#
5566 */
5567 struct vnop_blktooff_args {
5568 struct vnodeop_desc *a_desc;
5569 vnode_t a_vp;
5570 daddr64_t a_lblkno;
5571 off_t *a_offset;
5572 };
5573 #endif /* 0*/
5574 errno_t
5575 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5576 {
5577 int _err;
5578 struct vnop_blktooff_args a;
5579
5580 a.a_desc = &vnop_blktooff_desc;
5581 a.a_vp = vp;
5582 a.a_lblkno = lblkno;
5583 a.a_offset = offset;
5584
5585 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5586 DTRACE_FSINFO(blktooff, vnode_t, vp);
5587
5588 return (_err);
5589 }
5590
5591 #if 0
5592 /*
5593 *#
5594 *#% offtoblk vp = = =
5595 *#
5596 */
5597 struct vnop_offtoblk_args {
5598 struct vnodeop_desc *a_desc;
5599 vnode_t a_vp;
5600 off_t a_offset;
5601 daddr64_t *a_lblkno;
5602 };
5603 #endif /* 0*/
5604 errno_t
5605 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5606 {
5607 int _err;
5608 struct vnop_offtoblk_args a;
5609
5610 a.a_desc = &vnop_offtoblk_desc;
5611 a.a_vp = vp;
5612 a.a_offset = offset;
5613 a.a_lblkno = lblkno;
5614
5615 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5616 DTRACE_FSINFO(offtoblk, vnode_t, vp);
5617
5618 return (_err);
5619 }
5620
5621 #if 0
5622 /*
5623 *#
5624 *#% blockmap vp L L L
5625 *#
5626 */
5627 struct vnop_blockmap_args {
5628 struct vnodeop_desc *a_desc;
5629 vnode_t a_vp;
5630 off_t a_foffset;
5631 size_t a_size;
5632 daddr64_t *a_bpn;
5633 size_t *a_run;
5634 void *a_poff;
5635 int a_flags;
5636 vfs_context_t a_context;
5637 };
5638 #endif /* 0*/
5639 errno_t
5640 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
5641 {
5642 int _err;
5643 struct vnop_blockmap_args a;
5644 size_t localrun = 0;
5645
5646 if (ctx == NULL) {
5647 ctx = vfs_context_current();
5648 }
5649 a.a_desc = &vnop_blockmap_desc;
5650 a.a_vp = vp;
5651 a.a_foffset = foffset;
5652 a.a_size = size;
5653 a.a_bpn = bpn;
5654 a.a_run = &localrun;
5655 a.a_poff = poff;
5656 a.a_flags = flags;
5657 a.a_context = ctx;
5658
5659 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
5660 DTRACE_FSINFO(blockmap, vnode_t, vp);
5661
5662 /*
5663 * We used a local variable to request information from the underlying
5664 * filesystem about the length of the I/O run in question. If
5665 * we get malformed output from the filesystem, we cap it to the length
5666 * requested, at most. Update 'run' on the way out.
5667 */
5668 if (_err == 0) {
5669 if (localrun > size) {
5670 localrun = size;
5671 }
5672
5673 if (run) {
5674 *run = localrun;
5675 }
5676 }
5677
5678 return (_err);
5679 }
5680
5681 #if 0
5682 struct vnop_strategy_args {
5683 struct vnodeop_desc *a_desc;
5684 struct buf *a_bp;
5685 };
5686
5687 #endif /* 0*/
5688 errno_t
5689 VNOP_STRATEGY(struct buf *bp)
5690 {
5691 int _err;
5692 struct vnop_strategy_args a;
5693 vnode_t vp = buf_vnode(bp);
5694 a.a_desc = &vnop_strategy_desc;
5695 a.a_bp = bp;
5696 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
5697 DTRACE_FSINFO(strategy, vnode_t, vp);
5698 return (_err);
5699 }
5700
5701 #if 0
5702 struct vnop_bwrite_args {
5703 struct vnodeop_desc *a_desc;
5704 buf_t a_bp;
5705 };
5706 #endif /* 0*/
5707 errno_t
5708 VNOP_BWRITE(struct buf *bp)
5709 {
5710 int _err;
5711 struct vnop_bwrite_args a;
5712 vnode_t vp = buf_vnode(bp);
5713 a.a_desc = &vnop_bwrite_desc;
5714 a.a_bp = bp;
5715 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
5716 DTRACE_FSINFO(bwrite, vnode_t, vp);
5717 return (_err);
5718 }
5719
5720 #if 0
5721 struct vnop_kqfilt_add_args {
5722 struct vnodeop_desc *a_desc;
5723 struct vnode *a_vp;
5724 struct knote *a_kn;
5725 vfs_context_t a_context;
5726 };
5727 #endif
5728 errno_t
5729 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
5730 {
5731 int _err;
5732 struct vnop_kqfilt_add_args a;
5733
5734 a.a_desc = VDESC(vnop_kqfilt_add);
5735 a.a_vp = vp;
5736 a.a_kn = kn;
5737 a.a_context = ctx;
5738
5739 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
5740 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
5741
5742 return(_err);
5743 }
5744
5745 #if 0
5746 struct vnop_kqfilt_remove_args {
5747 struct vnodeop_desc *a_desc;
5748 struct vnode *a_vp;
5749 uintptr_t a_ident;
5750 vfs_context_t a_context;
5751 };
5752 #endif
5753 errno_t
5754 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
5755 {
5756 int _err;
5757 struct vnop_kqfilt_remove_args a;
5758
5759 a.a_desc = VDESC(vnop_kqfilt_remove);
5760 a.a_vp = vp;
5761 a.a_ident = ident;
5762 a.a_context = ctx;
5763
5764 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
5765 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
5766
5767 return(_err);
5768 }
5769
5770 errno_t
5771 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
5772 {
5773 int _err;
5774 struct vnop_monitor_args a;
5775
5776 a.a_desc = VDESC(vnop_monitor);
5777 a.a_vp = vp;
5778 a.a_events = events;
5779 a.a_flags = flags;
5780 a.a_handle = handle;
5781 a.a_context = ctx;
5782
5783 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
5784 DTRACE_FSINFO(monitor, vnode_t, vp);
5785
5786 return(_err);
5787 }
5788
5789 #if 0
5790 struct vnop_setlabel_args {
5791 struct vnodeop_desc *a_desc;
5792 struct vnode *a_vp;
5793 struct label *a_vl;
5794 vfs_context_t a_context;
5795 };
5796 #endif
5797 errno_t
5798 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
5799 {
5800 int _err;
5801 struct vnop_setlabel_args a;
5802
5803 a.a_desc = VDESC(vnop_setlabel);
5804 a.a_vp = vp;
5805 a.a_vl = label;
5806 a.a_context = ctx;
5807
5808 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
5809 DTRACE_FSINFO(setlabel, vnode_t, vp);
5810
5811 return(_err);
5812 }
5813
5814
5815 #if NAMEDSTREAMS
5816 /*
5817 * Get a named streamed
5818 */
5819 errno_t
5820 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
5821 {
5822 int _err;
5823 struct vnop_getnamedstream_args a;
5824
5825 a.a_desc = &vnop_getnamedstream_desc;
5826 a.a_vp = vp;
5827 a.a_svpp = svpp;
5828 a.a_name = name;
5829 a.a_operation = operation;
5830 a.a_flags = flags;
5831 a.a_context = ctx;
5832
5833 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
5834 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
5835 return (_err);
5836 }
5837
5838 /*
5839 * Create a named streamed
5840 */
5841 errno_t
5842 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
5843 {
5844 int _err;
5845 struct vnop_makenamedstream_args a;
5846
5847 a.a_desc = &vnop_makenamedstream_desc;
5848 a.a_vp = vp;
5849 a.a_svpp = svpp;
5850 a.a_name = name;
5851 a.a_flags = flags;
5852 a.a_context = ctx;
5853
5854 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
5855 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
5856 return (_err);
5857 }
5858
5859
5860 /*
5861 * Remove a named streamed
5862 */
5863 errno_t
5864 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
5865 {
5866 int _err;
5867 struct vnop_removenamedstream_args a;
5868
5869 a.a_desc = &vnop_removenamedstream_desc;
5870 a.a_vp = vp;
5871 a.a_svp = svp;
5872 a.a_name = name;
5873 a.a_flags = flags;
5874 a.a_context = ctx;
5875
5876 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
5877 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
5878 return (_err);
5879 }
5880 #endif