]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
95 #include <sys/mbuf.h>
96 #include <sys/syslog.h>
97 #include <sys/ubc.h>
98 #include <sys/vm.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
106
107 #include <kern/assert.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
110
111 #include <libkern/OSByteOrder.h>
112
113 #include <miscfs/specfs/specdev.h>
114
115 #include <mach/mach_types.h>
116 #include <mach/memory_object_types.h>
117 #include <mach/task.h>
118
119 #if CONFIG_MACF
120 #include <security/mac_framework.h>
121 #endif
122
123 #include <sys/sdt.h>
124
125 #define ESUCCESS 0
126 #undef mount_t
127 #undef vnode_t
128
129 #define COMPAT_ONLY
130
131 #define NATIVE_XATTR(VP) \
132 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
133
134 #if CONFIG_APPLEDOUBLE
135 static void xattrfile_remove(vnode_t dvp, const char *basename,
136 vfs_context_t ctx, int force);
137 static void xattrfile_setattr(vnode_t dvp, const char * basename,
138 struct vnode_attr * vap, vfs_context_t ctx);
139 #endif /* CONFIG_APPLEDOUBLE */
140
141 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
142
143 /*
144 * vnode_setneedinactive
145 *
146 * Description: Indicate that when the last iocount on this vnode goes away,
147 * and the usecount is also zero, we should inform the filesystem
148 * via VNOP_INACTIVE.
149 *
150 * Parameters: vnode_t vnode to mark
151 *
152 * Returns: Nothing
153 *
154 * Notes: Notably used when we're deleting a file--we need not have a
155 * usecount, so VNOP_INACTIVE may not get called by anyone. We
156 * want it called when we drop our iocount.
157 */
158 void
159 vnode_setneedinactive(vnode_t vp)
160 {
161 cache_purge(vp);
162
163 vnode_lock_spin(vp);
164 vp->v_lflag |= VL_NEEDINACTIVE;
165 vnode_unlock(vp);
166 }
167
168
169 /* ====================================================================== */
170 /* ************ EXTERNAL KERNEL APIS ********************************** */
171 /* ====================================================================== */
172
173 /*
174 * implementations of exported VFS operations
175 */
176 int
177 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
178 {
179 int error;
180
181 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
182 return(ENOTSUP);
183
184 if (vfs_context_is64bit(ctx)) {
185 if (vfs_64bitready(mp)) {
186 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
187 }
188 else {
189 error = ENOTSUP;
190 }
191 }
192 else {
193 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
194 }
195
196 return (error);
197 }
198
199 int
200 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
201 {
202 int error;
203
204 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
205 return(ENOTSUP);
206
207 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
208
209 return (error);
210 }
211
212 int
213 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
214 {
215 int error;
216
217 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
218 return(ENOTSUP);
219
220 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
221
222 return (error);
223 }
224
225 /*
226 * Returns: 0 Success
227 * ENOTSUP Not supported
228 * <vfs_root>:ENOENT
229 * <vfs_root>:???
230 *
231 * Note: The return codes from the underlying VFS's root routine can't
232 * be fully enumerated here, since third party VFS authors may not
233 * limit their error returns to the ones documented here, even
234 * though this may result in some programs functioning incorrectly.
235 *
236 * The return codes documented above are those which may currently
237 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
238 * for a call to hfs_vget on the volume mount point, not including
239 * additional error codes which may be propagated from underlying
240 * routines called by hfs_vget.
241 */
242 int
243 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
244 {
245 int error;
246
247 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
248 return(ENOTSUP);
249
250 if (ctx == NULL) {
251 ctx = vfs_context_current();
252 }
253
254 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
255
256 return (error);
257 }
258
259 int
260 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
261 {
262 int error;
263
264 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
265 return(ENOTSUP);
266
267 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
268
269 return (error);
270 }
271
272 int
273 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
274 {
275 int error;
276
277 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
278 return(ENOTSUP);
279
280 if (ctx == NULL) {
281 ctx = vfs_context_current();
282 }
283
284 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
285
286 return(error);
287 }
288
289 int
290 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
291 {
292 int error;
293
294 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
295 return(ENOTSUP);
296
297 if (ctx == NULL) {
298 ctx = vfs_context_current();
299 }
300
301 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
302
303 return(error);
304 }
305
306 int
307 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
308 {
309 int error;
310
311 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
312 return(ENOTSUP);
313
314 if (ctx == NULL) {
315 ctx = vfs_context_current();
316 }
317
318 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
319
320 return(error);
321 }
322
323 int
324 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
325 {
326 int error;
327
328 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
329 return(ENOTSUP);
330
331 if (ctx == NULL) {
332 ctx = vfs_context_current();
333 }
334
335 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
336
337 return(error);
338 }
339
340 int
341 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
342 {
343 int error;
344
345 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
346 return(ENOTSUP);
347
348 if (ctx == NULL) {
349 ctx = vfs_context_current();
350 }
351
352 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
353
354 return(error);
355 }
356
357 int
358 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
359 {
360 int error;
361
362 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
363 return(ENOTSUP);
364
365 if (ctx == NULL) {
366 ctx = vfs_context_current();
367 }
368
369 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
370
371 return(error);
372 }
373
374 int VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
375 int flags, vfs_context_t context)
376 {
377 if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl)
378 return ENOTSUP;
379
380 return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
381 context ?: vfs_context_current());
382 }
383
384 int
385 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
386 {
387 int error;
388
389 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0))
390 return(ENOTSUP);
391
392 if (ctx == NULL)
393 ctx = vfs_context_current();
394
395 error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
396
397 return (error);
398 }
399
400 /* returns the cached throttle mask for the mount_t */
401 uint64_t
402 vfs_throttle_mask(mount_t mp)
403 {
404 return(mp->mnt_throttle_mask);
405 }
406
407 /* returns a copy of vfs type name for the mount_t */
408 void
409 vfs_name(mount_t mp, char *buffer)
410 {
411 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
412 }
413
414 /* returns vfs type number for the mount_t */
415 int
416 vfs_typenum(mount_t mp)
417 {
418 return(mp->mnt_vtable->vfc_typenum);
419 }
420
421 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
422 void*
423 vfs_mntlabel(mount_t mp)
424 {
425 return (void*)mp->mnt_mntlabel;
426 }
427
428 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
429 uint64_t
430 vfs_flags(mount_t mp)
431 {
432 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
433 }
434
435 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
436 void
437 vfs_setflags(mount_t mp, uint64_t flags)
438 {
439 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
440
441 mount_lock(mp);
442 mp->mnt_flag |= lflags;
443 mount_unlock(mp);
444 }
445
446 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
447 void
448 vfs_clearflags(mount_t mp , uint64_t flags)
449 {
450 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
451
452 mount_lock(mp);
453 mp->mnt_flag &= ~lflags;
454 mount_unlock(mp);
455 }
456
457 /* Is the mount_t ronly and upgrade read/write requested? */
458 int
459 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
460 {
461 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
462 }
463
464
465 /* Is the mount_t mounted ronly */
466 int
467 vfs_isrdonly(mount_t mp)
468 {
469 return (mp->mnt_flag & MNT_RDONLY);
470 }
471
472 /* Is the mount_t mounted for filesystem synchronous writes? */
473 int
474 vfs_issynchronous(mount_t mp)
475 {
476 return (mp->mnt_flag & MNT_SYNCHRONOUS);
477 }
478
479 /* Is the mount_t mounted read/write? */
480 int
481 vfs_isrdwr(mount_t mp)
482 {
483 return ((mp->mnt_flag & MNT_RDONLY) == 0);
484 }
485
486
487 /* Is mount_t marked for update (ie MNT_UPDATE) */
488 int
489 vfs_isupdate(mount_t mp)
490 {
491 return (mp->mnt_flag & MNT_UPDATE);
492 }
493
494
495 /* Is mount_t marked for reload (ie MNT_RELOAD) */
496 int
497 vfs_isreload(mount_t mp)
498 {
499 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
500 }
501
502 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
503 int
504 vfs_isforce(mount_t mp)
505 {
506 if (mp->mnt_lflag & MNT_LFORCE)
507 return(1);
508 else
509 return(0);
510 }
511
512 int
513 vfs_isunmount(mount_t mp)
514 {
515 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
516 return 1;
517 } else {
518 return 0;
519 }
520 }
521
522 int
523 vfs_64bitready(mount_t mp)
524 {
525 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
526 return(1);
527 else
528 return(0);
529 }
530
531
532 int
533 vfs_authcache_ttl(mount_t mp)
534 {
535 if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) )
536 return (mp->mnt_authcache_ttl);
537 else
538 return (CACHED_RIGHT_INFINITE_TTL);
539 }
540
541 void
542 vfs_setauthcache_ttl(mount_t mp, int ttl)
543 {
544 mount_lock(mp);
545 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
546 mp->mnt_authcache_ttl = ttl;
547 mount_unlock(mp);
548 }
549
550 void
551 vfs_clearauthcache_ttl(mount_t mp)
552 {
553 mount_lock(mp);
554 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
555 /*
556 * back to the default TTL value in case
557 * MNTK_AUTH_OPAQUE is set on this mount
558 */
559 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
560 mount_unlock(mp);
561 }
562
563 int
564 vfs_authopaque(mount_t mp)
565 {
566 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
567 return(1);
568 else
569 return(0);
570 }
571
572 int
573 vfs_authopaqueaccess(mount_t mp)
574 {
575 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
576 return(1);
577 else
578 return(0);
579 }
580
581 void
582 vfs_setauthopaque(mount_t mp)
583 {
584 mount_lock(mp);
585 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
586 mount_unlock(mp);
587 }
588
589 void
590 vfs_setauthopaqueaccess(mount_t mp)
591 {
592 mount_lock(mp);
593 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
594 mount_unlock(mp);
595 }
596
597 void
598 vfs_clearauthopaque(mount_t mp)
599 {
600 mount_lock(mp);
601 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
602 mount_unlock(mp);
603 }
604
605 void
606 vfs_clearauthopaqueaccess(mount_t mp)
607 {
608 mount_lock(mp);
609 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
610 mount_unlock(mp);
611 }
612
613 void
614 vfs_setextendedsecurity(mount_t mp)
615 {
616 mount_lock(mp);
617 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
618 mount_unlock(mp);
619 }
620
621 void
622 vfs_clearextendedsecurity(mount_t mp)
623 {
624 mount_lock(mp);
625 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
626 mount_unlock(mp);
627 }
628
629 void
630 vfs_setnoswap(mount_t mp)
631 {
632 mount_lock(mp);
633 mp->mnt_kern_flag |= MNTK_NOSWAP;
634 mount_unlock(mp);
635 }
636
637 void
638 vfs_clearnoswap(mount_t mp)
639 {
640 mount_lock(mp);
641 mp->mnt_kern_flag &= ~MNTK_NOSWAP;
642 mount_unlock(mp);
643 }
644
645 int
646 vfs_extendedsecurity(mount_t mp)
647 {
648 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
649 }
650
651 /* returns the max size of short symlink in this mount_t */
652 uint32_t
653 vfs_maxsymlen(mount_t mp)
654 {
655 return(mp->mnt_maxsymlinklen);
656 }
657
658 /* set max size of short symlink on mount_t */
659 void
660 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
661 {
662 mp->mnt_maxsymlinklen = symlen;
663 }
664
665 /* return a pointer to the RO vfs_statfs associated with mount_t */
666 struct vfsstatfs *
667 vfs_statfs(mount_t mp)
668 {
669 return(&mp->mnt_vfsstat);
670 }
671
672 int
673 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
674 {
675 int error;
676
677 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
678 return(error);
679
680 /*
681 * If we have a filesystem create time, use it to default some others.
682 */
683 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
684 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
685 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
686 }
687
688 return(0);
689 }
690
691 int
692 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
693 {
694 int error;
695
696 if (vfs_isrdonly(mp))
697 return EROFS;
698
699 error = VFS_SETATTR(mp, vfa, ctx);
700
701 /*
702 * If we had alternate ways of setting vfs attributes, we'd
703 * fall back here.
704 */
705
706 return error;
707 }
708
709 /* return the private data handle stored in mount_t */
710 void *
711 vfs_fsprivate(mount_t mp)
712 {
713 return(mp->mnt_data);
714 }
715
716 /* set the private data handle in mount_t */
717 void
718 vfs_setfsprivate(mount_t mp, void *mntdata)
719 {
720 mount_lock(mp);
721 mp->mnt_data = mntdata;
722 mount_unlock(mp);
723 }
724
725 /* query whether the mount point supports native EAs */
726 int
727 vfs_nativexattrs(mount_t mp) {
728 return (mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS);
729 }
730
731 /*
732 * return the block size of the underlying
733 * device associated with mount_t
734 */
735 int
736 vfs_devblocksize(mount_t mp) {
737
738 return(mp->mnt_devblocksize);
739 }
740
741 /*
742 * Returns vnode with an iocount that must be released with vnode_put()
743 */
744 vnode_t
745 vfs_vnodecovered(mount_t mp)
746 {
747 vnode_t vp = mp->mnt_vnodecovered;
748 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
749 return NULL;
750 } else {
751 return vp;
752 }
753 }
754
755 /*
756 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
757 * The iocount must be released with vnode_put(). Note that this KPI is subtle
758 * with respect to the validity of using this device vnode for anything substantial
759 * (which is discouraged). If commands are sent to the device driver without
760 * taking proper steps to ensure that the device is still open, chaos may ensue.
761 * Similarly, this routine should only be called if there is some guarantee that
762 * the mount itself is still valid.
763 */
764 vnode_t
765 vfs_devvp(mount_t mp)
766 {
767 vnode_t vp = mp->mnt_devvp;
768
769 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
770 return vp;
771 }
772
773 return NULLVP;
774 }
775
776 /*
777 * return the io attributes associated with mount_t
778 */
779 void
780 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
781 {
782 ioattrp->io_reserved[0] = NULL;
783 ioattrp->io_reserved[1] = NULL;
784 if (mp == NULL) {
785 ioattrp->io_maxreadcnt = MAXPHYS;
786 ioattrp->io_maxwritecnt = MAXPHYS;
787 ioattrp->io_segreadcnt = 32;
788 ioattrp->io_segwritecnt = 32;
789 ioattrp->io_maxsegreadsize = MAXPHYS;
790 ioattrp->io_maxsegwritesize = MAXPHYS;
791 ioattrp->io_devblocksize = DEV_BSIZE;
792 ioattrp->io_flags = 0;
793 ioattrp->io_max_swappin_available = 0;
794 } else {
795 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
796 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
797 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
798 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
799 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
800 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
801 ioattrp->io_devblocksize = mp->mnt_devblocksize;
802 ioattrp->io_flags = mp->mnt_ioflags;
803 ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
804 }
805 }
806
807
808 /*
809 * set the IO attributes associated with mount_t
810 */
811 void
812 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
813 {
814 if (mp == NULL)
815 return;
816 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
817 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
818 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
819 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
820 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
821 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
822 mp->mnt_devblocksize = ioattrp->io_devblocksize;
823 mp->mnt_ioflags = ioattrp->io_flags;
824 mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
825 }
826
827 /*
828 * Add a new filesystem into the kernel specified in passed in
829 * vfstable structure. It fills in the vnode
830 * dispatch vector that is to be passed to when vnodes are created.
831 * It returns a handle which is to be used to when the FS is to be removed
832 */
833 typedef int (*PFI)(void *);
834 extern int vfs_opv_numops;
835 errno_t
836 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
837 {
838 struct vfstable *newvfstbl = NULL;
839 int i,j;
840 int (***opv_desc_vector_p)(void *);
841 int (**opv_desc_vector)(void *);
842 struct vnodeopv_entry_desc *opve_descp;
843 int desccount;
844 int descsize;
845 PFI *descptr;
846
847 /*
848 * This routine is responsible for all the initialization that would
849 * ordinarily be done as part of the system startup;
850 */
851
852 if (vfe == (struct vfs_fsentry *)0)
853 return(EINVAL);
854
855 desccount = vfe->vfe_vopcnt;
856 if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
857 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
858 return(EINVAL);
859
860 /* Non-threadsafe filesystems are not supported */
861 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
862 return (EINVAL);
863 }
864
865 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
866 M_WAITOK);
867 bzero(newvfstbl, sizeof(struct vfstable));
868 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
869 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
870 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
871 newvfstbl->vfc_typenum = maxvfstypenum++;
872 else
873 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
874
875 newvfstbl->vfc_refcount = 0;
876 newvfstbl->vfc_flags = 0;
877 newvfstbl->vfc_mountroot = NULL;
878 newvfstbl->vfc_next = NULL;
879 newvfstbl->vfc_vfsflags = 0;
880 if (vfe->vfe_flags & VFS_TBL64BITREADY)
881 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
882 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2)
883 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
884 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2)
885 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
886 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
887 newvfstbl->vfc_flags |= MNT_LOCAL;
888 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
889 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
890 else
891 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
892
893 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR)
894 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
895 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT)
896 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
897 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED)
898 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
899 if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
900 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
901 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME)
902 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
903 if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME)
904 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
905 if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT)
906 newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
907
908 /*
909 * Allocate and init the vectors.
910 * Also handle backwards compatibility.
911 *
912 * We allocate one large block to hold all <desccount>
913 * vnode operation vectors stored contiguously.
914 */
915 /* XXX - shouldn't be M_TEMP */
916
917 descsize = desccount * vfs_opv_numops * sizeof(PFI);
918 MALLOC(descptr, PFI *, descsize,
919 M_TEMP, M_WAITOK);
920 bzero(descptr, descsize);
921
922 newvfstbl->vfc_descptr = descptr;
923 newvfstbl->vfc_descsize = descsize;
924
925 newvfstbl->vfc_sysctl = NULL;
926
927 for (i= 0; i< desccount; i++ ) {
928 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
929 /*
930 * Fill in the caller's pointer to the start of the i'th vector.
931 * They'll need to supply it when calling vnode_create.
932 */
933 opv_desc_vector = descptr + i * vfs_opv_numops;
934 *opv_desc_vector_p = opv_desc_vector;
935
936 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
937 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
938
939 /* Silently skip known-disabled operations */
940 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
941 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
942 vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
943 continue;
944 }
945
946 /*
947 * Sanity check: is this operation listed
948 * in the list of operations? We check this
949 * by seeing if its offset is zero. Since
950 * the default routine should always be listed
951 * first, it should be the only one with a zero
952 * offset. Any other operation with a zero
953 * offset is probably not listed in
954 * vfs_op_descs, and so is probably an error.
955 *
956 * A panic here means the layer programmer
957 * has committed the all-too common bug
958 * of adding a new operation to the layer's
959 * list of vnode operations but
960 * not adding the operation to the system-wide
961 * list of supported operations.
962 */
963 if (opve_descp->opve_op->vdesc_offset == 0 &&
964 opve_descp->opve_op != VDESC(vnop_default)) {
965 printf("vfs_fsadd: operation %s not listed in %s.\n",
966 opve_descp->opve_op->vdesc_name,
967 "vfs_op_descs");
968 panic("vfs_fsadd: bad operation");
969 }
970 /*
971 * Fill in this entry.
972 */
973 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
974 opve_descp->opve_impl;
975 }
976
977
978 /*
979 * Finally, go back and replace unfilled routines
980 * with their default. (Sigh, an O(n^3) algorithm. I
981 * could make it better, but that'd be work, and n is small.)
982 */
983 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
984
985 /*
986 * Force every operations vector to have a default routine.
987 */
988 opv_desc_vector = *opv_desc_vector_p;
989 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
990 panic("vfs_fsadd: operation vector without default routine.");
991 for (j = 0; j < vfs_opv_numops; j++)
992 if (opv_desc_vector[j] == NULL)
993 opv_desc_vector[j] =
994 opv_desc_vector[VOFFSET(vnop_default)];
995
996 } /* end of each vnodeopv_desc parsing */
997
998
999
1000 *handle = vfstable_add(newvfstbl);
1001
1002 if (newvfstbl->vfc_typenum <= maxvfstypenum )
1003 maxvfstypenum = newvfstbl->vfc_typenum + 1;
1004
1005 if (newvfstbl->vfc_vfsops->vfs_init) {
1006 struct vfsconf vfsc;
1007 bzero(&vfsc, sizeof(struct vfsconf));
1008 vfsc.vfc_reserved1 = 0;
1009 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1010 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1011 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1012 vfsc.vfc_flags = (*handle)->vfc_flags;
1013 vfsc.vfc_reserved2 = 0;
1014 vfsc.vfc_reserved3 = 0;
1015
1016 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1017 }
1018
1019 FREE(newvfstbl, M_TEMP);
1020
1021 return(0);
1022 }
1023
1024 /*
1025 * Removes the filesystem from kernel.
1026 * The argument passed in is the handle that was given when
1027 * file system was added
1028 */
1029 errno_t
1030 vfs_fsremove(vfstable_t handle)
1031 {
1032 struct vfstable * vfstbl = (struct vfstable *)handle;
1033 void *old_desc = NULL;
1034 errno_t err;
1035
1036 /* Preflight check for any mounts */
1037 mount_list_lock();
1038 if ( vfstbl->vfc_refcount != 0 ) {
1039 mount_list_unlock();
1040 return EBUSY;
1041 }
1042
1043 /*
1044 * save the old descriptor; the free cannot occur unconditionally,
1045 * since vfstable_del() may fail.
1046 */
1047 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1048 old_desc = vfstbl->vfc_descptr;
1049 }
1050 err = vfstable_del(vfstbl);
1051
1052 mount_list_unlock();
1053
1054 /* free the descriptor if the delete was successful */
1055 if (err == 0 && old_desc) {
1056 FREE(old_desc, M_TEMP);
1057 }
1058
1059 return(err);
1060 }
1061
1062 void vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1063 {
1064 mp->mnt_fsowner = uid;
1065 mp->mnt_fsgroup = gid;
1066 }
1067
1068 /*
1069 * Callers should be careful how they use this; accessing
1070 * mnt_last_write_completed_timestamp is not thread-safe. Writing to
1071 * it isn't either. Point is: be prepared to deal with strange values
1072 * being returned.
1073 */
1074 uint64_t vfs_idle_time(mount_t mp)
1075 {
1076 if (mp->mnt_pending_write_size)
1077 return 0;
1078
1079 struct timeval now;
1080
1081 microuptime(&now);
1082
1083 return ((now.tv_sec
1084 - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1085 + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec);
1086 }
1087
1088 int
1089 vfs_context_pid(vfs_context_t ctx)
1090 {
1091 return (proc_pid(vfs_context_proc(ctx)));
1092 }
1093
1094 int
1095 vfs_context_suser(vfs_context_t ctx)
1096 {
1097 return (suser(ctx->vc_ucred, NULL));
1098 }
1099
1100 /*
1101 * Return bit field of signals posted to all threads in the context's process.
1102 *
1103 * XXX Signals should be tied to threads, not processes, for most uses of this
1104 * XXX call.
1105 */
1106 int
1107 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1108 {
1109 proc_t p = vfs_context_proc(ctx);
1110 if (p)
1111 return(proc_pendingsignals(p, mask));
1112 return(0);
1113 }
1114
1115 int
1116 vfs_context_is64bit(vfs_context_t ctx)
1117 {
1118 proc_t proc = vfs_context_proc(ctx);
1119
1120 if (proc)
1121 return(proc_is64bit(proc));
1122 return(0);
1123 }
1124
1125
1126 /*
1127 * vfs_context_proc
1128 *
1129 * Description: Given a vfs_context_t, return the proc_t associated with it.
1130 *
1131 * Parameters: vfs_context_t The context to use
1132 *
1133 * Returns: proc_t The process for this context
1134 *
1135 * Notes: This function will return the current_proc() if any of the
1136 * following conditions are true:
1137 *
1138 * o The supplied context pointer is NULL
1139 * o There is no Mach thread associated with the context
1140 * o There is no Mach task associated with the Mach thread
1141 * o There is no proc_t associated with the Mach task
1142 * o The proc_t has no per process open file table
1143 * o The proc_t is post-vfork()
1144 *
1145 * This causes this function to return a value matching as
1146 * closely as possible the previous behaviour, while at the
1147 * same time avoiding the task lending that results from vfork()
1148 */
1149 proc_t
1150 vfs_context_proc(vfs_context_t ctx)
1151 {
1152 proc_t proc = NULL;
1153
1154 if (ctx != NULL && ctx->vc_thread != NULL)
1155 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1156 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1157 proc = NULL;
1158
1159 return(proc == NULL ? current_proc() : proc);
1160 }
1161
1162 /*
1163 * vfs_context_get_special_port
1164 *
1165 * Description: Return the requested special port from the task associated
1166 * with the given context.
1167 *
1168 * Parameters: vfs_context_t The context to use
1169 * int Index of special port
1170 * ipc_port_t * Pointer to returned port
1171 *
1172 * Returns: kern_return_t see task_get_special_port()
1173 */
1174 kern_return_t
1175 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1176 {
1177 task_t task = NULL;
1178
1179 if (ctx != NULL && ctx->vc_thread != NULL)
1180 task = get_threadtask(ctx->vc_thread);
1181
1182 return task_get_special_port(task, which, portp);
1183 }
1184
1185 /*
1186 * vfs_context_set_special_port
1187 *
1188 * Description: Set the requested special port in the task associated
1189 * with the given context.
1190 *
1191 * Parameters: vfs_context_t The context to use
1192 * int Index of special port
1193 * ipc_port_t New special port
1194 *
1195 * Returns: kern_return_t see task_set_special_port()
1196 */
1197 kern_return_t
1198 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1199 {
1200 task_t task = NULL;
1201
1202 if (ctx != NULL && ctx->vc_thread != NULL)
1203 task = get_threadtask(ctx->vc_thread);
1204
1205 return task_set_special_port(task, which, port);
1206 }
1207
1208 /*
1209 * vfs_context_thread
1210 *
1211 * Description: Return the Mach thread associated with a vfs_context_t
1212 *
1213 * Parameters: vfs_context_t The context to use
1214 *
1215 * Returns: thread_t The thread for this context, or
1216 * NULL, if there is not one.
1217 *
1218 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1219 * as a result of a static vfs_context_t declaration in a function
1220 * and will result in this function returning NULL.
1221 *
1222 * This is intentional; this function should NOT return the
1223 * current_thread() in this case.
1224 */
1225 thread_t
1226 vfs_context_thread(vfs_context_t ctx)
1227 {
1228 return(ctx->vc_thread);
1229 }
1230
1231
1232 /*
1233 * vfs_context_cwd
1234 *
1235 * Description: Returns a reference on the vnode for the current working
1236 * directory for the supplied context
1237 *
1238 * Parameters: vfs_context_t The context to use
1239 *
1240 * Returns: vnode_t The current working directory
1241 * for this context
1242 *
1243 * Notes: The function first attempts to obtain the current directory
1244 * from the thread, and if it is not present there, falls back
1245 * to obtaining it from the process instead. If it can't be
1246 * obtained from either place, we return NULLVP.
1247 */
1248 vnode_t
1249 vfs_context_cwd(vfs_context_t ctx)
1250 {
1251 vnode_t cwd = NULLVP;
1252
1253 if(ctx != NULL && ctx->vc_thread != NULL) {
1254 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1255 proc_t proc;
1256
1257 /*
1258 * Get the cwd from the thread; if there isn't one, get it
1259 * from the process, instead.
1260 */
1261 if ((cwd = uth->uu_cdir) == NULLVP &&
1262 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1263 proc->p_fd != NULL)
1264 cwd = proc->p_fd->fd_cdir;
1265 }
1266
1267 return(cwd);
1268 }
1269
1270 /*
1271 * vfs_context_create
1272 *
1273 * Description: Allocate and initialize a new context.
1274 *
1275 * Parameters: vfs_context_t: Context to copy, or NULL for new
1276 *
1277 * Returns: Pointer to new context
1278 *
1279 * Notes: Copy cred and thread from argument, if available; else
1280 * initialize with current thread and new cred. Returns
1281 * with a reference held on the credential.
1282 */
1283 vfs_context_t
1284 vfs_context_create(vfs_context_t ctx)
1285 {
1286 vfs_context_t newcontext;
1287
1288 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
1289
1290 if (newcontext) {
1291 kauth_cred_t safecred;
1292 if (ctx) {
1293 newcontext->vc_thread = ctx->vc_thread;
1294 safecred = ctx->vc_ucred;
1295 } else {
1296 newcontext->vc_thread = current_thread();
1297 safecred = kauth_cred_get();
1298 }
1299 if (IS_VALID_CRED(safecred))
1300 kauth_cred_ref(safecred);
1301 newcontext->vc_ucred = safecred;
1302 return(newcontext);
1303 }
1304 return(NULL);
1305 }
1306
1307
1308 vfs_context_t
1309 vfs_context_current(void)
1310 {
1311 vfs_context_t ctx = NULL;
1312 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1313
1314 if (ut != NULL ) {
1315 if (ut->uu_context.vc_ucred != NULL) {
1316 ctx = &ut->uu_context;
1317 }
1318 }
1319
1320 return(ctx == NULL ? vfs_context_kernel() : ctx);
1321 }
1322
1323
1324 /*
1325 * XXX Do not ask
1326 *
1327 * Dangerous hack - adopt the first kernel thread as the current thread, to
1328 * get to the vfs_context_t in the uthread associated with a kernel thread.
1329 * This is used by UDF to make the call into IOCDMediaBSDClient,
1330 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1331 * ioctl() is being called from kernel or user space (and all this because
1332 * we do not pass threads into our ioctl()'s, instead of processes).
1333 *
1334 * This is also used by imageboot_setup(), called early from bsd_init() after
1335 * kernproc has been given a credential.
1336 *
1337 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1338 * of many Mach headers to do the reference directly rather than indirectly;
1339 * we will need to forego this convenience when we reture proc_thread().
1340 */
1341 static struct vfs_context kerncontext;
1342 vfs_context_t
1343 vfs_context_kernel(void)
1344 {
1345 if (kerncontext.vc_ucred == NOCRED)
1346 kerncontext.vc_ucred = kernproc->p_ucred;
1347 if (kerncontext.vc_thread == NULL)
1348 kerncontext.vc_thread = proc_thread(kernproc);
1349
1350 return(&kerncontext);
1351 }
1352
1353
1354 int
1355 vfs_context_rele(vfs_context_t ctx)
1356 {
1357 if (ctx) {
1358 if (IS_VALID_CRED(ctx->vc_ucred))
1359 kauth_cred_unref(&ctx->vc_ucred);
1360 kfree(ctx, sizeof(struct vfs_context));
1361 }
1362 return(0);
1363 }
1364
1365
1366 kauth_cred_t
1367 vfs_context_ucred(vfs_context_t ctx)
1368 {
1369 return (ctx->vc_ucred);
1370 }
1371
1372 /*
1373 * Return true if the context is owned by the superuser.
1374 */
1375 int
1376 vfs_context_issuser(vfs_context_t ctx)
1377 {
1378 return(kauth_cred_issuser(vfs_context_ucred(ctx)));
1379 }
1380
1381 int vfs_context_iskernel(vfs_context_t ctx)
1382 {
1383 return ctx == &kerncontext;
1384 }
1385
1386 /*
1387 * Given a context, for all fields of vfs_context_t which
1388 * are not held with a reference, set those fields to the
1389 * values for the current execution context. Currently, this
1390 * just means the vc_thread.
1391 *
1392 * Returns: 0 for success, nonzero for failure
1393 *
1394 * The intended use is:
1395 * 1. vfs_context_create() gets the caller a context
1396 * 2. vfs_context_bind() sets the unrefcounted data
1397 * 3. vfs_context_rele() releases the context
1398 *
1399 */
1400 int
1401 vfs_context_bind(vfs_context_t ctx)
1402 {
1403 ctx->vc_thread = current_thread();
1404 return 0;
1405 }
1406
1407 int vfs_isswapmount(mount_t mnt)
1408 {
1409 return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1410 }
1411
1412 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1413
1414
1415 /*
1416 * Convert between vnode types and inode formats (since POSIX.1
1417 * defines mode word of stat structure in terms of inode formats).
1418 */
1419 enum vtype
1420 vnode_iftovt(int mode)
1421 {
1422 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1423 }
1424
1425 int
1426 vnode_vttoif(enum vtype indx)
1427 {
1428 return(vttoif_tab[(int)(indx)]);
1429 }
1430
1431 int
1432 vnode_makeimode(int indx, int mode)
1433 {
1434 return (int)(VTTOIF(indx) | (mode));
1435 }
1436
1437
1438 /*
1439 * vnode manipulation functions.
1440 */
1441
1442 /* returns system root vnode iocount; It should be released using vnode_put() */
1443 vnode_t
1444 vfs_rootvnode(void)
1445 {
1446 int error;
1447
1448 error = vnode_get(rootvnode);
1449 if (error)
1450 return ((vnode_t)0);
1451 else
1452 return rootvnode;
1453 }
1454
1455
1456 uint32_t
1457 vnode_vid(vnode_t vp)
1458 {
1459 return ((uint32_t)(vp->v_id));
1460 }
1461
1462 mount_t
1463 vnode_mount(vnode_t vp)
1464 {
1465 return (vp->v_mount);
1466 }
1467
1468 #if CONFIG_IOSCHED
1469 vnode_t
1470 vnode_mountdevvp(vnode_t vp)
1471 {
1472 if (vp->v_mount)
1473 return (vp->v_mount->mnt_devvp);
1474 else
1475 return ((vnode_t)0);
1476 }
1477 #endif
1478
1479 mount_t
1480 vnode_mountedhere(vnode_t vp)
1481 {
1482 mount_t mp;
1483
1484 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1485 (mp->mnt_vnodecovered == vp))
1486 return (mp);
1487 else
1488 return (mount_t)NULL;
1489 }
1490
1491 /* returns vnode type of vnode_t */
1492 enum vtype
1493 vnode_vtype(vnode_t vp)
1494 {
1495 return (vp->v_type);
1496 }
1497
1498 /* returns FS specific node saved in vnode */
1499 void *
1500 vnode_fsnode(vnode_t vp)
1501 {
1502 return (vp->v_data);
1503 }
1504
1505 void
1506 vnode_clearfsnode(vnode_t vp)
1507 {
1508 vp->v_data = NULL;
1509 }
1510
1511 dev_t
1512 vnode_specrdev(vnode_t vp)
1513 {
1514 return(vp->v_rdev);
1515 }
1516
1517
1518 /* Accessor functions */
1519 /* is vnode_t a root vnode */
1520 int
1521 vnode_isvroot(vnode_t vp)
1522 {
1523 return ((vp->v_flag & VROOT)? 1 : 0);
1524 }
1525
1526 /* is vnode_t a system vnode */
1527 int
1528 vnode_issystem(vnode_t vp)
1529 {
1530 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1531 }
1532
1533 /* is vnode_t a swap file vnode */
1534 int
1535 vnode_isswap(vnode_t vp)
1536 {
1537 return ((vp->v_flag & VSWAP)? 1 : 0);
1538 }
1539
1540 /* is vnode_t a tty */
1541 int
1542 vnode_istty(vnode_t vp)
1543 {
1544 return ((vp->v_flag & VISTTY) ? 1 : 0);
1545 }
1546
1547 /* if vnode_t mount operation in progress */
1548 int
1549 vnode_ismount(vnode_t vp)
1550 {
1551 return ((vp->v_flag & VMOUNT)? 1 : 0);
1552 }
1553
1554 /* is this vnode under recyle now */
1555 int
1556 vnode_isrecycled(vnode_t vp)
1557 {
1558 int ret;
1559
1560 vnode_lock_spin(vp);
1561 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1562 vnode_unlock(vp);
1563 return(ret);
1564 }
1565
1566 /* vnode was created by background task requesting rapid aging
1567 and has not since been referenced by a normal task */
1568 int
1569 vnode_israge(vnode_t vp)
1570 {
1571 return ((vp->v_flag & VRAGE)? 1 : 0);
1572 }
1573
1574 int
1575 vnode_needssnapshots(vnode_t vp)
1576 {
1577 return ((vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0);
1578 }
1579
1580
1581 /* Check the process/thread to see if we should skip atime updates */
1582 int
1583 vfs_ctx_skipatime (vfs_context_t ctx) {
1584 struct uthread *ut;
1585 proc_t proc;
1586 thread_t thr;
1587
1588 proc = vfs_context_proc(ctx);
1589 thr = vfs_context_thread (ctx);
1590
1591 /* Validate pointers in case we were invoked via a kernel context */
1592 if (thr && proc) {
1593 ut = get_bsdthread_info (thr);
1594
1595 if (proc->p_lflag & P_LRAGE_VNODES) {
1596 return 1;
1597 }
1598
1599 if (ut) {
1600 if (ut->uu_flag & UT_RAGE_VNODES) {
1601 return 1;
1602 }
1603 }
1604 }
1605 return 0;
1606 }
1607
1608 /* is vnode_t marked to not keep data cached once it's been consumed */
1609 int
1610 vnode_isnocache(vnode_t vp)
1611 {
1612 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1613 }
1614
1615 /*
1616 * has sequential readahead been disabled on this vnode
1617 */
1618 int
1619 vnode_isnoreadahead(vnode_t vp)
1620 {
1621 return ((vp->v_flag & VRAOFF)? 1 : 0);
1622 }
1623
1624 int
1625 vnode_is_openevt(vnode_t vp)
1626 {
1627 return ((vp->v_flag & VOPENEVT)? 1 : 0);
1628 }
1629
1630 /* is vnode_t a standard one? */
1631 int
1632 vnode_isstandard(vnode_t vp)
1633 {
1634 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1635 }
1636
1637 /* don't vflush() if SKIPSYSTEM */
1638 int
1639 vnode_isnoflush(vnode_t vp)
1640 {
1641 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1642 }
1643
1644 /* is vnode_t a regular file */
1645 int
1646 vnode_isreg(vnode_t vp)
1647 {
1648 return ((vp->v_type == VREG)? 1 : 0);
1649 }
1650
1651 /* is vnode_t a directory? */
1652 int
1653 vnode_isdir(vnode_t vp)
1654 {
1655 return ((vp->v_type == VDIR)? 1 : 0);
1656 }
1657
1658 /* is vnode_t a symbolic link ? */
1659 int
1660 vnode_islnk(vnode_t vp)
1661 {
1662 return ((vp->v_type == VLNK)? 1 : 0);
1663 }
1664
1665 int
1666 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1667 {
1668 struct nameidata *ndp = cnp->cn_ndp;
1669
1670 if (ndp == NULL) {
1671 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1672 }
1673
1674 if (vnode_isdir(vp)) {
1675 if (vp->v_mountedhere != NULL) {
1676 goto yes;
1677 }
1678
1679 #if CONFIG_TRIGGERS
1680 if (vp->v_resolve) {
1681 goto yes;
1682 }
1683 #endif /* CONFIG_TRIGGERS */
1684
1685 }
1686
1687
1688 if (vnode_islnk(vp)) {
1689 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1690 if (cnp->cn_flags & FOLLOW) {
1691 goto yes;
1692 }
1693 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1694 goto yes;
1695 }
1696 }
1697
1698 return 0;
1699
1700 yes:
1701 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1702 return EKEEPLOOKING;
1703 }
1704
1705 /* is vnode_t a fifo ? */
1706 int
1707 vnode_isfifo(vnode_t vp)
1708 {
1709 return ((vp->v_type == VFIFO)? 1 : 0);
1710 }
1711
1712 /* is vnode_t a block device? */
1713 int
1714 vnode_isblk(vnode_t vp)
1715 {
1716 return ((vp->v_type == VBLK)? 1 : 0);
1717 }
1718
1719 int
1720 vnode_isspec(vnode_t vp)
1721 {
1722 return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0);
1723 }
1724
1725 /* is vnode_t a char device? */
1726 int
1727 vnode_ischr(vnode_t vp)
1728 {
1729 return ((vp->v_type == VCHR)? 1 : 0);
1730 }
1731
1732 /* is vnode_t a socket? */
1733 int
1734 vnode_issock(vnode_t vp)
1735 {
1736 return ((vp->v_type == VSOCK)? 1 : 0);
1737 }
1738
1739 /* is vnode_t a device with multiple active vnodes referring to it? */
1740 int
1741 vnode_isaliased(vnode_t vp)
1742 {
1743 enum vtype vt = vp->v_type;
1744 if (!((vt == VCHR) || (vt == VBLK))) {
1745 return 0;
1746 } else {
1747 return (vp->v_specflags & SI_ALIASED);
1748 }
1749 }
1750
1751 /* is vnode_t a named stream? */
1752 int
1753 vnode_isnamedstream(
1754 #if NAMEDSTREAMS
1755 vnode_t vp
1756 #else
1757 __unused vnode_t vp
1758 #endif
1759 )
1760 {
1761 #if NAMEDSTREAMS
1762 return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0);
1763 #else
1764 return (0);
1765 #endif
1766 }
1767
1768 int
1769 vnode_isshadow(
1770 #if NAMEDSTREAMS
1771 vnode_t vp
1772 #else
1773 __unused vnode_t vp
1774 #endif
1775 )
1776 {
1777 #if NAMEDSTREAMS
1778 return ((vp->v_flag & VISSHADOW) ? 1 : 0);
1779 #else
1780 return (0);
1781 #endif
1782 }
1783
1784 /* does vnode have associated named stream vnodes ? */
1785 int
1786 vnode_hasnamedstreams(
1787 #if NAMEDSTREAMS
1788 vnode_t vp
1789 #else
1790 __unused vnode_t vp
1791 #endif
1792 )
1793 {
1794 #if NAMEDSTREAMS
1795 return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0);
1796 #else
1797 return (0);
1798 #endif
1799 }
1800 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1801 void
1802 vnode_setnocache(vnode_t vp)
1803 {
1804 vnode_lock_spin(vp);
1805 vp->v_flag |= VNOCACHE_DATA;
1806 vnode_unlock(vp);
1807 }
1808
1809 void
1810 vnode_clearnocache(vnode_t vp)
1811 {
1812 vnode_lock_spin(vp);
1813 vp->v_flag &= ~VNOCACHE_DATA;
1814 vnode_unlock(vp);
1815 }
1816
1817 void
1818 vnode_set_openevt(vnode_t vp)
1819 {
1820 vnode_lock_spin(vp);
1821 vp->v_flag |= VOPENEVT;
1822 vnode_unlock(vp);
1823 }
1824
1825 void
1826 vnode_clear_openevt(vnode_t vp)
1827 {
1828 vnode_lock_spin(vp);
1829 vp->v_flag &= ~VOPENEVT;
1830 vnode_unlock(vp);
1831 }
1832
1833
1834 void
1835 vnode_setnoreadahead(vnode_t vp)
1836 {
1837 vnode_lock_spin(vp);
1838 vp->v_flag |= VRAOFF;
1839 vnode_unlock(vp);
1840 }
1841
1842 void
1843 vnode_clearnoreadahead(vnode_t vp)
1844 {
1845 vnode_lock_spin(vp);
1846 vp->v_flag &= ~VRAOFF;
1847 vnode_unlock(vp);
1848 }
1849
1850 int
1851 vnode_isfastdevicecandidate(vnode_t vp)
1852 {
1853 return ((vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0);
1854 }
1855
1856 void
1857 vnode_setfastdevicecandidate(vnode_t vp)
1858 {
1859 vnode_lock_spin(vp);
1860 vp->v_flag |= VFASTDEVCANDIDATE;
1861 vnode_unlock(vp);
1862 }
1863
1864 void
1865 vnode_clearfastdevicecandidate(vnode_t vp)
1866 {
1867 vnode_lock_spin(vp);
1868 vp->v_flag &= ~VFASTDEVCANDIDATE;
1869 vnode_unlock(vp);
1870 }
1871
1872 int
1873 vnode_isautocandidate(vnode_t vp)
1874 {
1875 return ((vp->v_flag & VAUTOCANDIDATE)? 1 : 0);
1876 }
1877
1878 void
1879 vnode_setautocandidate(vnode_t vp)
1880 {
1881 vnode_lock_spin(vp);
1882 vp->v_flag |= VAUTOCANDIDATE;
1883 vnode_unlock(vp);
1884 }
1885
1886 void
1887 vnode_clearautocandidate(vnode_t vp)
1888 {
1889 vnode_lock_spin(vp);
1890 vp->v_flag &= ~VAUTOCANDIDATE;
1891 vnode_unlock(vp);
1892 }
1893
1894
1895
1896
1897 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1898 void
1899 vnode_setnoflush(vnode_t vp)
1900 {
1901 vnode_lock_spin(vp);
1902 vp->v_flag |= VNOFLUSH;
1903 vnode_unlock(vp);
1904 }
1905
1906 void
1907 vnode_clearnoflush(vnode_t vp)
1908 {
1909 vnode_lock_spin(vp);
1910 vp->v_flag &= ~VNOFLUSH;
1911 vnode_unlock(vp);
1912 }
1913
1914
1915 /* is vnode_t a blkdevice and has a FS mounted on it */
1916 int
1917 vnode_ismountedon(vnode_t vp)
1918 {
1919 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1920 }
1921
1922 void
1923 vnode_setmountedon(vnode_t vp)
1924 {
1925 vnode_lock_spin(vp);
1926 vp->v_specflags |= SI_MOUNTEDON;
1927 vnode_unlock(vp);
1928 }
1929
1930 void
1931 vnode_clearmountedon(vnode_t vp)
1932 {
1933 vnode_lock_spin(vp);
1934 vp->v_specflags &= ~SI_MOUNTEDON;
1935 vnode_unlock(vp);
1936 }
1937
1938
1939 void
1940 vnode_settag(vnode_t vp, int tag)
1941 {
1942 vp->v_tag = tag;
1943
1944 }
1945
1946 int
1947 vnode_tag(vnode_t vp)
1948 {
1949 return(vp->v_tag);
1950 }
1951
1952 vnode_t
1953 vnode_parent(vnode_t vp)
1954 {
1955
1956 return(vp->v_parent);
1957 }
1958
1959 void
1960 vnode_setparent(vnode_t vp, vnode_t dvp)
1961 {
1962 vp->v_parent = dvp;
1963 }
1964
1965 void
1966 vnode_setname(vnode_t vp, char * name)
1967 {
1968 vp->v_name = name;
1969 }
1970
1971 /* return the registered FS name when adding the FS to kernel */
1972 void
1973 vnode_vfsname(vnode_t vp, char * buf)
1974 {
1975 strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1976 }
1977
1978 /* return the FS type number */
1979 int
1980 vnode_vfstypenum(vnode_t vp)
1981 {
1982 return(vp->v_mount->mnt_vtable->vfc_typenum);
1983 }
1984
1985 int
1986 vnode_vfs64bitready(vnode_t vp)
1987 {
1988
1989 /*
1990 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
1991 */
1992 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
1993 return(1);
1994 else
1995 return(0);
1996 }
1997
1998
1999
2000 /* return the visible flags on associated mount point of vnode_t */
2001 uint32_t
2002 vnode_vfsvisflags(vnode_t vp)
2003 {
2004 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
2005 }
2006
2007 /* return the command modifier flags on associated mount point of vnode_t */
2008 uint32_t
2009 vnode_vfscmdflags(vnode_t vp)
2010 {
2011 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
2012 }
2013
2014 /* return the max symlink of short links of vnode_t */
2015 uint32_t
2016 vnode_vfsmaxsymlen(vnode_t vp)
2017 {
2018 return(vp->v_mount->mnt_maxsymlinklen);
2019 }
2020
2021 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2022 struct vfsstatfs *
2023 vnode_vfsstatfs(vnode_t vp)
2024 {
2025 return(&vp->v_mount->mnt_vfsstat);
2026 }
2027
2028 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2029 void *
2030 vnode_vfsfsprivate(vnode_t vp)
2031 {
2032 return(vp->v_mount->mnt_data);
2033 }
2034
2035 /* is vnode_t in a rdonly mounted FS */
2036 int
2037 vnode_vfsisrdonly(vnode_t vp)
2038 {
2039 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
2040 }
2041
2042 int
2043 vnode_compound_rename_available(vnode_t vp)
2044 {
2045 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2046 }
2047 int
2048 vnode_compound_rmdir_available(vnode_t vp)
2049 {
2050 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2051 }
2052 int
2053 vnode_compound_mkdir_available(vnode_t vp)
2054 {
2055 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2056 }
2057 int
2058 vnode_compound_remove_available(vnode_t vp)
2059 {
2060 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2061 }
2062 int
2063 vnode_compound_open_available(vnode_t vp)
2064 {
2065 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2066 }
2067
2068 int
2069 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2070 {
2071 return ((vp->v_mount->mnt_compound_ops & opid) != 0);
2072 }
2073
2074 /*
2075 * Returns vnode ref to current working directory; if a per-thread current
2076 * working directory is in effect, return that instead of the per process one.
2077 *
2078 * XXX Published, but not used.
2079 */
2080 vnode_t
2081 current_workingdir(void)
2082 {
2083 return vfs_context_cwd(vfs_context_current());
2084 }
2085
2086 /* returns vnode ref to current root(chroot) directory */
2087 vnode_t
2088 current_rootdir(void)
2089 {
2090 proc_t proc = current_proc();
2091 struct vnode * vp ;
2092
2093 if ( (vp = proc->p_fd->fd_rdir) ) {
2094 if ( (vnode_getwithref(vp)) )
2095 return (NULL);
2096 }
2097 return vp;
2098 }
2099
2100 /*
2101 * Get a filesec and optional acl contents from an extended attribute.
2102 * Function will attempt to retrive ACL, UUID, and GUID information using a
2103 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2104 *
2105 * Parameters: vp The vnode on which to operate.
2106 * fsecp The filesec (and ACL, if any) being
2107 * retrieved.
2108 * ctx The vnode context in which the
2109 * operation is to be attempted.
2110 *
2111 * Returns: 0 Success
2112 * !0 errno value
2113 *
2114 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2115 * host byte order, as will be the ACL contents, if any.
2116 * Internally, we will cannonize these values from network (PPC)
2117 * byte order after we retrieve them so that the on-disk contents
2118 * of the extended attribute are identical for both PPC and Intel
2119 * (if we were not being required to provide this service via
2120 * fallback, this would be the job of the filesystem
2121 * 'VNOP_GETATTR' call).
2122 *
2123 * We use ntohl() because it has a transitive property on Intel
2124 * machines and no effect on PPC mancines. This guarantees us
2125 *
2126 * XXX: Deleting rather than ignoreing a corrupt security structure is
2127 * probably the only way to reset it without assistance from an
2128 * file system integrity checking tool. Right now we ignore it.
2129 *
2130 * XXX: We should enummerate the possible errno values here, and where
2131 * in the code they originated.
2132 */
2133 static int
2134 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2135 {
2136 kauth_filesec_t fsec;
2137 uio_t fsec_uio;
2138 size_t fsec_size;
2139 size_t xsize, rsize;
2140 int error;
2141 uint32_t host_fsec_magic;
2142 uint32_t host_acl_entrycount;
2143
2144 fsec = NULL;
2145 fsec_uio = NULL;
2146
2147 /* find out how big the EA is */
2148 error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2149 if (error != 0) {
2150 /* no EA, no filesec */
2151 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2152 error = 0;
2153 /* either way, we are done */
2154 goto out;
2155 }
2156
2157 /*
2158 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2159 * ACE entrly ACL, and if it's larger than that, it must have the right
2160 * number of bytes such that it contains an atomic number of ACEs,
2161 * rather than partial entries. Otherwise, we ignore it.
2162 */
2163 if (!KAUTH_FILESEC_VALID(xsize)) {
2164 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2165 error = 0;
2166 goto out;
2167 }
2168
2169 /* how many entries would fit? */
2170 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2171
2172 /* get buffer and uio */
2173 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2174 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2175 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2176 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2177 error = ENOMEM;
2178 goto out;
2179 }
2180
2181 /* read security attribute */
2182 rsize = xsize;
2183 if ((error = vn_getxattr(vp,
2184 KAUTH_FILESEC_XATTR,
2185 fsec_uio,
2186 &rsize,
2187 XATTR_NOSECURITY,
2188 ctx)) != 0) {
2189
2190 /* no attribute - no security data */
2191 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2192 error = 0;
2193 /* either way, we are done */
2194 goto out;
2195 }
2196
2197 /*
2198 * Validate security structure; the validation must take place in host
2199 * byte order. If it's corrupt, we will just ignore it.
2200 */
2201
2202 /* Validate the size before trying to convert it */
2203 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2204 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2205 goto out;
2206 }
2207
2208 /* Validate the magic number before trying to convert it */
2209 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2210 if (fsec->fsec_magic != host_fsec_magic) {
2211 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2212 goto out;
2213 }
2214
2215 /* Validate the entry count before trying to convert it. */
2216 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2217 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2218 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2219 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2220 goto out;
2221 }
2222 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2223 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2224 goto out;
2225 }
2226 }
2227
2228 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2229
2230 *fsecp = fsec;
2231 fsec = NULL;
2232 error = 0;
2233 out:
2234 if (fsec != NULL)
2235 kauth_filesec_free(fsec);
2236 if (fsec_uio != NULL)
2237 uio_free(fsec_uio);
2238 if (error)
2239 *fsecp = NULL;
2240 return(error);
2241 }
2242
2243 /*
2244 * Set a filesec and optional acl contents into an extended attribute.
2245 * function will attempt to store ACL, UUID, and GUID information using a
2246 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2247 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2248 * original caller supplied an acl.
2249 *
2250 * Parameters: vp The vnode on which to operate.
2251 * fsec The filesec being set.
2252 * acl The acl to be associated with 'fsec'.
2253 * ctx The vnode context in which the
2254 * operation is to be attempted.
2255 *
2256 * Returns: 0 Success
2257 * !0 errno value
2258 *
2259 * Notes: Both the fsec and the acl are always valid.
2260 *
2261 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2262 * as are the acl contents, if they are used. Internally, we will
2263 * cannonize these values into network (PPC) byte order before we
2264 * attempt to write them so that the on-disk contents of the
2265 * extended attribute are identical for both PPC and Intel (if we
2266 * were not being required to provide this service via fallback,
2267 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2268 * We reverse this process on the way out, so we leave with the
2269 * same byte order we started with.
2270 *
2271 * XXX: We should enummerate the possible errno values here, and where
2272 * in the code they originated.
2273 */
2274 static int
2275 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2276 {
2277 uio_t fsec_uio;
2278 int error;
2279 uint32_t saved_acl_copysize;
2280
2281 fsec_uio = NULL;
2282
2283 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2284 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2285 error = ENOMEM;
2286 goto out;
2287 }
2288 /*
2289 * Save the pre-converted ACL copysize, because it gets swapped too
2290 * if we are running with the wrong endianness.
2291 */
2292 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2293
2294 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2295
2296 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2297 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2298 error = vn_setxattr(vp,
2299 KAUTH_FILESEC_XATTR,
2300 fsec_uio,
2301 XATTR_NOSECURITY, /* we have auth'ed already */
2302 ctx);
2303 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2304
2305 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2306
2307 out:
2308 if (fsec_uio != NULL)
2309 uio_free(fsec_uio);
2310 return(error);
2311 }
2312
2313
2314 /*
2315 * Returns: 0 Success
2316 * ENOMEM Not enough space [only if has filesec]
2317 * VNOP_GETATTR: ???
2318 * vnode_get_filesec: ???
2319 * kauth_cred_guid2uid: ???
2320 * kauth_cred_guid2gid: ???
2321 * vfs_update_vfsstat: ???
2322 */
2323 int
2324 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2325 {
2326 kauth_filesec_t fsec;
2327 kauth_acl_t facl;
2328 int error;
2329 uid_t nuid;
2330 gid_t ngid;
2331
2332 /* don't ask for extended security data if the filesystem doesn't support it */
2333 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2334 VATTR_CLEAR_ACTIVE(vap, va_acl);
2335 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2336 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2337 }
2338
2339 /*
2340 * If the caller wants size values we might have to synthesise, give the
2341 * filesystem the opportunity to supply better intermediate results.
2342 */
2343 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2344 VATTR_IS_ACTIVE(vap, va_total_size) ||
2345 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2346 VATTR_SET_ACTIVE(vap, va_data_size);
2347 VATTR_SET_ACTIVE(vap, va_data_alloc);
2348 VATTR_SET_ACTIVE(vap, va_total_size);
2349 VATTR_SET_ACTIVE(vap, va_total_alloc);
2350 }
2351
2352 error = VNOP_GETATTR(vp, vap, ctx);
2353 if (error) {
2354 KAUTH_DEBUG("ERROR - returning %d", error);
2355 goto out;
2356 }
2357
2358 /*
2359 * If extended security data was requested but not returned, try the fallback
2360 * path.
2361 */
2362 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2363 fsec = NULL;
2364
2365 if (XATTR_VNODE_SUPPORTED(vp)) {
2366 /* try to get the filesec */
2367 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
2368 goto out;
2369 }
2370 /* if no filesec, no attributes */
2371 if (fsec == NULL) {
2372 VATTR_RETURN(vap, va_acl, NULL);
2373 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2374 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2375 } else {
2376
2377 /* looks good, try to return what we were asked for */
2378 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2379 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2380
2381 /* only return the ACL if we were actually asked for it */
2382 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2383 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2384 VATTR_RETURN(vap, va_acl, NULL);
2385 } else {
2386 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2387 if (facl == NULL) {
2388 kauth_filesec_free(fsec);
2389 error = ENOMEM;
2390 goto out;
2391 }
2392 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2393 VATTR_RETURN(vap, va_acl, facl);
2394 }
2395 }
2396 kauth_filesec_free(fsec);
2397 }
2398 }
2399 /*
2400 * If someone gave us an unsolicited filesec, toss it. We promise that
2401 * we're OK with a filesystem giving us anything back, but our callers
2402 * only expect what they asked for.
2403 */
2404 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2405 if (vap->va_acl != NULL)
2406 kauth_acl_free(vap->va_acl);
2407 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2408 }
2409
2410 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2411 /*
2412 * Handle the case where we need a UID/GID, but only have extended
2413 * security information.
2414 */
2415 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2416 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2417 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2418 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
2419 VATTR_RETURN(vap, va_uid, nuid);
2420 }
2421 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2422 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2423 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2424 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
2425 VATTR_RETURN(vap, va_gid, ngid);
2426 }
2427 #endif
2428
2429 /*
2430 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2431 */
2432 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2433 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2434 nuid = vap->va_uid;
2435 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2436 nuid = vp->v_mount->mnt_fsowner;
2437 if (nuid == KAUTH_UID_NONE)
2438 nuid = 99;
2439 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2440 nuid = vap->va_uid;
2441 } else {
2442 /* this will always be something sensible */
2443 nuid = vp->v_mount->mnt_fsowner;
2444 }
2445 if ((nuid == 99) && !vfs_context_issuser(ctx))
2446 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2447 VATTR_RETURN(vap, va_uid, nuid);
2448 }
2449 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2450 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2451 ngid = vap->va_gid;
2452 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2453 ngid = vp->v_mount->mnt_fsgroup;
2454 if (ngid == KAUTH_GID_NONE)
2455 ngid = 99;
2456 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2457 ngid = vap->va_gid;
2458 } else {
2459 /* this will always be something sensible */
2460 ngid = vp->v_mount->mnt_fsgroup;
2461 }
2462 if ((ngid == 99) && !vfs_context_issuser(ctx))
2463 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2464 VATTR_RETURN(vap, va_gid, ngid);
2465 }
2466
2467 /*
2468 * Synthesise some values that can be reasonably guessed.
2469 */
2470 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
2471 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2472
2473 if (!VATTR_IS_SUPPORTED(vap, va_flags))
2474 VATTR_RETURN(vap, va_flags, 0);
2475
2476 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
2477 VATTR_RETURN(vap, va_filerev, 0);
2478
2479 if (!VATTR_IS_SUPPORTED(vap, va_gen))
2480 VATTR_RETURN(vap, va_gen, 0);
2481
2482 /*
2483 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2484 */
2485 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
2486 VATTR_RETURN(vap, va_data_size, 0);
2487
2488 /* do we want any of the possibly-computed values? */
2489 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2490 VATTR_IS_ACTIVE(vap, va_total_size) ||
2491 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2492 /* make sure f_bsize is valid */
2493 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2494 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0)
2495 goto out;
2496 }
2497
2498 /* default va_data_alloc from va_data_size */
2499 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
2500 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2501
2502 /* default va_total_size from va_data_size */
2503 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
2504 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2505
2506 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2507 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
2508 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2509 }
2510
2511 /*
2512 * If we don't have a change time, pull it from the modtime.
2513 */
2514 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
2515 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2516
2517 /*
2518 * This is really only supported for the creation VNOPs, but since the field is there
2519 * we should populate it correctly.
2520 */
2521 VATTR_RETURN(vap, va_type, vp->v_type);
2522
2523 /*
2524 * The fsid can be obtained from the mountpoint directly.
2525 */
2526 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2527
2528 out:
2529
2530 return(error);
2531 }
2532
2533 /*
2534 * Set the attributes on a vnode in a vnode context.
2535 *
2536 * Parameters: vp The vnode whose attributes to set.
2537 * vap A pointer to the attributes to set.
2538 * ctx The vnode context in which the
2539 * operation is to be attempted.
2540 *
2541 * Returns: 0 Success
2542 * !0 errno value
2543 *
2544 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2545 *
2546 * The contents of the data area pointed to by 'vap' may be
2547 * modified if the vnode is on a filesystem which has been
2548 * mounted with ingore ownership flags, or by the underlyng
2549 * VFS itself, or by the fallback code, if the underlying VFS
2550 * does not support ACL, UUID, or GUUID attributes directly.
2551 *
2552 * XXX: We should enummerate the possible errno values here, and where
2553 * in the code they originated.
2554 */
2555 int
2556 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2557 {
2558 int error, is_perm_change=0;
2559
2560 /*
2561 * Make sure the filesystem is mounted R/W.
2562 * If not, return an error.
2563 */
2564 if (vfs_isrdonly(vp->v_mount)) {
2565 error = EROFS;
2566 goto out;
2567 }
2568
2569 #if DEVELOPMENT || DEBUG
2570 /*
2571 * XXX VSWAP: Check for entitlements or special flag here
2572 * so we can restrict access appropriately.
2573 */
2574 #else /* DEVELOPMENT || DEBUG */
2575
2576 if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
2577 error = EPERM;
2578 goto out;
2579 }
2580 #endif /* DEVELOPMENT || DEBUG */
2581
2582 #if NAMEDSTREAMS
2583 /* For streams, va_data_size is the only setable attribute. */
2584 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2585 error = EPERM;
2586 goto out;
2587 }
2588 #endif
2589 /* Check for truncation */
2590 if(VATTR_IS_ACTIVE(vap, va_data_size)) {
2591 switch(vp->v_type) {
2592 case VREG:
2593 /* For regular files it's ok */
2594 break;
2595 case VDIR:
2596 /* Not allowed to truncate directories */
2597 error = EISDIR;
2598 goto out;
2599 default:
2600 /* For everything else we will clear the bit and let underlying FS decide on the rest */
2601 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2602 if (vap->va_active)
2603 break;
2604 /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
2605 return (0);
2606 }
2607 }
2608
2609 /*
2610 * If ownership is being ignored on this volume, we silently discard
2611 * ownership changes.
2612 */
2613 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2614 VATTR_CLEAR_ACTIVE(vap, va_uid);
2615 VATTR_CLEAR_ACTIVE(vap, va_gid);
2616 }
2617
2618 if ( VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)
2619 || VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_acl)) {
2620 is_perm_change = 1;
2621 }
2622
2623 /*
2624 * Make sure that extended security is enabled if we're going to try
2625 * to set any.
2626 */
2627 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2628 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2629 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2630 error = ENOTSUP;
2631 goto out;
2632 }
2633
2634 /* Never allow the setting of any unsupported superuser flags. */
2635 if (VATTR_IS_ACTIVE(vap, va_flags)) {
2636 vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
2637 }
2638
2639 error = VNOP_SETATTR(vp, vap, ctx);
2640
2641 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2642 error = vnode_setattr_fallback(vp, vap, ctx);
2643
2644 #if CONFIG_FSE
2645 // only send a stat_changed event if this is more than
2646 // just an access or backup time update
2647 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time)) && (vap->va_active != VNODE_ATTR_BIT(va_backup_time))) {
2648 if (is_perm_change) {
2649 if (need_fsevent(FSE_CHOWN, vp)) {
2650 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2651 }
2652 } else if(need_fsevent(FSE_STAT_CHANGED, vp)) {
2653 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2654 }
2655 }
2656 #endif
2657
2658 out:
2659 return(error);
2660 }
2661
2662 /*
2663 * Fallback for setting the attributes on a vnode in a vnode context. This
2664 * Function will attempt to store ACL, UUID, and GUID information utilizing
2665 * a read/modify/write operation against an EA used as a backing store for
2666 * the object.
2667 *
2668 * Parameters: vp The vnode whose attributes to set.
2669 * vap A pointer to the attributes to set.
2670 * ctx The vnode context in which the
2671 * operation is to be attempted.
2672 *
2673 * Returns: 0 Success
2674 * !0 errno value
2675 *
2676 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2677 * as are the fsec and lfsec, if they are used.
2678 *
2679 * The contents of the data area pointed to by 'vap' may be
2680 * modified to indicate that the attribute is supported for
2681 * any given requested attribute.
2682 *
2683 * XXX: We should enummerate the possible errno values here, and where
2684 * in the code they originated.
2685 */
2686 int
2687 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2688 {
2689 kauth_filesec_t fsec;
2690 kauth_acl_t facl;
2691 struct kauth_filesec lfsec;
2692 int error;
2693
2694 error = 0;
2695
2696 /*
2697 * Extended security fallback via extended attributes.
2698 *
2699 * Note that we do not free the filesec; the caller is expected to
2700 * do this.
2701 */
2702 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2703 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2704 VATTR_NOT_RETURNED(vap, va_guuid)) {
2705 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2706
2707 /*
2708 * Fail for file types that we don't permit extended security
2709 * to be set on.
2710 */
2711 if (!XATTR_VNODE_SUPPORTED(vp)) {
2712 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2713 error = EINVAL;
2714 goto out;
2715 }
2716
2717 /*
2718 * If we don't have all the extended security items, we need
2719 * to fetch the existing data to perform a read-modify-write
2720 * operation.
2721 */
2722 fsec = NULL;
2723 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2724 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2725 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2726 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2727 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2728 goto out;
2729 }
2730 }
2731 /* if we didn't get a filesec, use our local one */
2732 if (fsec == NULL) {
2733 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2734 fsec = &lfsec;
2735 } else {
2736 KAUTH_DEBUG("SETATTR - updating existing filesec");
2737 }
2738 /* find the ACL */
2739 facl = &fsec->fsec_acl;
2740
2741 /* if we're using the local filesec, we need to initialise it */
2742 if (fsec == &lfsec) {
2743 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2744 fsec->fsec_owner = kauth_null_guid;
2745 fsec->fsec_group = kauth_null_guid;
2746 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2747 facl->acl_flags = 0;
2748 }
2749
2750 /*
2751 * Update with the supplied attributes.
2752 */
2753 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2754 KAUTH_DEBUG("SETATTR - updating owner UUID");
2755 fsec->fsec_owner = vap->va_uuuid;
2756 VATTR_SET_SUPPORTED(vap, va_uuuid);
2757 }
2758 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2759 KAUTH_DEBUG("SETATTR - updating group UUID");
2760 fsec->fsec_group = vap->va_guuid;
2761 VATTR_SET_SUPPORTED(vap, va_guuid);
2762 }
2763 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2764 if (vap->va_acl == NULL) {
2765 KAUTH_DEBUG("SETATTR - removing ACL");
2766 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2767 } else {
2768 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2769 facl = vap->va_acl;
2770 }
2771 VATTR_SET_SUPPORTED(vap, va_acl);
2772 }
2773
2774 /*
2775 * If the filesec data is all invalid, we can just remove
2776 * the EA completely.
2777 */
2778 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2779 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2780 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2781 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2782 /* no attribute is ok, nothing to delete */
2783 if (error == ENOATTR)
2784 error = 0;
2785 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2786 } else {
2787 /* write the EA */
2788 error = vnode_set_filesec(vp, fsec, facl, ctx);
2789 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2790 }
2791
2792 /* if we fetched a filesec, dispose of the buffer */
2793 if (fsec != &lfsec)
2794 kauth_filesec_free(fsec);
2795 }
2796 out:
2797
2798 return(error);
2799 }
2800
2801 /*
2802 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2803 * event on a vnode.
2804 */
2805 int
2806 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
2807 {
2808 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2809 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
2810 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
2811 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
2812 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
2813 uint32_t knote_events = (events & knote_mask);
2814
2815 /* Permissions are not explicitly part of the kqueue model */
2816 if (events & VNODE_EVENT_PERMS) {
2817 knote_events |= NOTE_ATTRIB;
2818 }
2819
2820 /* Directory contents information just becomes NOTE_WRITE */
2821 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
2822 knote_events |= NOTE_WRITE;
2823 }
2824
2825 if (knote_events) {
2826 lock_vnode_and_post(vp, knote_events);
2827 #if CONFIG_FSE
2828 if (vap != NULL) {
2829 create_fsevent_from_kevent(vp, events, vap);
2830 }
2831 #else
2832 (void)vap;
2833 #endif
2834 }
2835
2836 return 0;
2837 }
2838
2839
2840
2841 int
2842 vnode_isdyldsharedcache(vnode_t vp)
2843 {
2844 return ((vp->v_flag & VSHARED_DYLD) ? 1 : 0);
2845 }
2846
2847
2848 /*
2849 * For a filesystem that isn't tracking its own vnode watchers:
2850 * check whether a vnode is being monitored.
2851 */
2852 int
2853 vnode_ismonitored(vnode_t vp) {
2854 return (vp->v_knotes.slh_first != NULL);
2855 }
2856
2857 /*
2858 * Initialize a struct vnode_attr and activate the attributes required
2859 * by the vnode_notify() call.
2860 */
2861 int
2862 vfs_get_notify_attributes(struct vnode_attr *vap)
2863 {
2864 VATTR_INIT(vap);
2865 vap->va_active = VNODE_NOTIFY_ATTRS;
2866 return 0;
2867 }
2868
2869 #if CONFIG_TRIGGERS
2870 int
2871 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
2872 {
2873 int error;
2874 mount_t mp;
2875
2876 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
2877 if (mp == NULL) {
2878 return ENOENT;
2879 }
2880
2881 error = vfs_busy(mp, LK_NOWAIT);
2882 mount_iterdrop(mp);
2883
2884 if (error != 0) {
2885 return ENOENT;
2886 }
2887
2888 mount_lock(mp);
2889 if (mp->mnt_triggercallback != NULL) {
2890 error = EBUSY;
2891 mount_unlock(mp);
2892 goto out;
2893 }
2894
2895 mp->mnt_triggercallback = vtc;
2896 mp->mnt_triggerdata = data;
2897 mount_unlock(mp);
2898
2899 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
2900
2901 out:
2902 vfs_unbusy(mp);
2903 return 0;
2904 }
2905 #endif /* CONFIG_TRIGGERS */
2906
2907 /*
2908 * Definition of vnode operations.
2909 */
2910
2911 #if 0
2912 /*
2913 *#
2914 *#% lookup dvp L ? ?
2915 *#% lookup vpp - L -
2916 */
2917 struct vnop_lookup_args {
2918 struct vnodeop_desc *a_desc;
2919 vnode_t a_dvp;
2920 vnode_t *a_vpp;
2921 struct componentname *a_cnp;
2922 vfs_context_t a_context;
2923 };
2924 #endif /* 0*/
2925
2926 /*
2927 * Returns: 0 Success
2928 * lock_fsnode:ENOENT No such file or directory [only for VFS
2929 * that is not thread safe & vnode is
2930 * currently being/has been terminated]
2931 * <vfs_lookup>:ENAMETOOLONG
2932 * <vfs_lookup>:ENOENT
2933 * <vfs_lookup>:EJUSTRETURN
2934 * <vfs_lookup>:EPERM
2935 * <vfs_lookup>:EISDIR
2936 * <vfs_lookup>:ENOTDIR
2937 * <vfs_lookup>:???
2938 *
2939 * Note: The return codes from the underlying VFS's lookup routine can't
2940 * be fully enumerated here, since third party VFS authors may not
2941 * limit their error returns to the ones documented here, even
2942 * though this may result in some programs functioning incorrectly.
2943 *
2944 * The return codes documented above are those which may currently
2945 * be returned by HFS from hfs_lookup, not including additional
2946 * error code which may be propagated from underlying routines.
2947 */
2948 errno_t
2949 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
2950 {
2951 int _err;
2952 struct vnop_lookup_args a;
2953
2954 a.a_desc = &vnop_lookup_desc;
2955 a.a_dvp = dvp;
2956 a.a_vpp = vpp;
2957 a.a_cnp = cnp;
2958 a.a_context = ctx;
2959
2960 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2961 if (_err == 0 && *vpp) {
2962 DTRACE_FSINFO(lookup, vnode_t, *vpp);
2963 }
2964
2965 return (_err);
2966 }
2967
2968 #if 0
2969 struct vnop_compound_open_args {
2970 struct vnodeop_desc *a_desc;
2971 vnode_t a_dvp;
2972 vnode_t *a_vpp;
2973 struct componentname *a_cnp;
2974 int32_t a_flags;
2975 int32_t a_fmode;
2976 struct vnode_attr *a_vap;
2977 vfs_context_t a_context;
2978 void *a_reserved;
2979 };
2980 #endif /* 0 */
2981
2982 int
2983 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
2984 {
2985 int _err;
2986 struct vnop_compound_open_args a;
2987 int did_create = 0;
2988 int want_create;
2989 uint32_t tmp_status = 0;
2990 struct componentname *cnp = &ndp->ni_cnd;
2991
2992 want_create = (flags & O_CREAT);
2993
2994 a.a_desc = &vnop_compound_open_desc;
2995 a.a_dvp = dvp;
2996 a.a_vpp = vpp; /* Could be NULL */
2997 a.a_cnp = cnp;
2998 a.a_flags = flags;
2999 a.a_fmode = fmode;
3000 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3001 a.a_vap = vap;
3002 a.a_context = ctx;
3003 a.a_open_create_authorizer = vn_authorize_create;
3004 a.a_open_existing_authorizer = vn_authorize_open_existing;
3005 a.a_reserved = NULL;
3006
3007 if (dvp == NULLVP) {
3008 panic("No dvp?");
3009 }
3010 if (want_create && !vap) {
3011 panic("Want create, but no vap?");
3012 }
3013 if (!want_create && vap) {
3014 panic("Don't want create, but have a vap?");
3015 }
3016
3017 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3018 if (want_create) {
3019 if (_err == 0 && *vpp) {
3020 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3021 } else {
3022 DTRACE_FSINFO(compound_open, vnode_t, dvp);
3023 }
3024 } else {
3025 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3026 }
3027
3028 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3029
3030 if (did_create && !want_create) {
3031 panic("Filesystem did a create, even though none was requested?");
3032 }
3033
3034 if (did_create) {
3035 #if CONFIG_APPLEDOUBLE
3036 if (!NATIVE_XATTR(dvp)) {
3037 /*
3038 * Remove stale Apple Double file (if any).
3039 */
3040 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3041 }
3042 #endif /* CONFIG_APPLEDOUBLE */
3043 /* On create, provide kqueue notification */
3044 post_event_if_success(dvp, _err, NOTE_WRITE);
3045 }
3046
3047 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3048 #if 0 /* FSEvents... */
3049 if (*vpp && _err && _err != EKEEPLOOKING) {
3050 vnode_put(*vpp);
3051 *vpp = NULLVP;
3052 }
3053 #endif /* 0 */
3054
3055 return (_err);
3056
3057 }
3058
3059 #if 0
3060 struct vnop_create_args {
3061 struct vnodeop_desc *a_desc;
3062 vnode_t a_dvp;
3063 vnode_t *a_vpp;
3064 struct componentname *a_cnp;
3065 struct vnode_attr *a_vap;
3066 vfs_context_t a_context;
3067 };
3068 #endif /* 0*/
3069 errno_t
3070 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3071 {
3072 int _err;
3073 struct vnop_create_args a;
3074
3075 a.a_desc = &vnop_create_desc;
3076 a.a_dvp = dvp;
3077 a.a_vpp = vpp;
3078 a.a_cnp = cnp;
3079 a.a_vap = vap;
3080 a.a_context = ctx;
3081
3082 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3083 if (_err == 0 && *vpp) {
3084 DTRACE_FSINFO(create, vnode_t, *vpp);
3085 }
3086
3087 #if CONFIG_APPLEDOUBLE
3088 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3089 /*
3090 * Remove stale Apple Double file (if any).
3091 */
3092 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3093 }
3094 #endif /* CONFIG_APPLEDOUBLE */
3095
3096 post_event_if_success(dvp, _err, NOTE_WRITE);
3097
3098 return (_err);
3099 }
3100
3101 #if 0
3102 /*
3103 *#
3104 *#% whiteout dvp L L L
3105 *#% whiteout cnp - - -
3106 *#% whiteout flag - - -
3107 *#
3108 */
3109 struct vnop_whiteout_args {
3110 struct vnodeop_desc *a_desc;
3111 vnode_t a_dvp;
3112 struct componentname *a_cnp;
3113 int a_flags;
3114 vfs_context_t a_context;
3115 };
3116 #endif /* 0*/
3117 errno_t
3118 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3119 __unused int flags, __unused vfs_context_t ctx)
3120 {
3121 return (ENOTSUP); // XXX OBSOLETE
3122 }
3123
3124 #if 0
3125 /*
3126 *#
3127 *#% mknod dvp L U U
3128 *#% mknod vpp - X -
3129 *#
3130 */
3131 struct vnop_mknod_args {
3132 struct vnodeop_desc *a_desc;
3133 vnode_t a_dvp;
3134 vnode_t *a_vpp;
3135 struct componentname *a_cnp;
3136 struct vnode_attr *a_vap;
3137 vfs_context_t a_context;
3138 };
3139 #endif /* 0*/
3140 errno_t
3141 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3142 {
3143
3144 int _err;
3145 struct vnop_mknod_args a;
3146
3147 a.a_desc = &vnop_mknod_desc;
3148 a.a_dvp = dvp;
3149 a.a_vpp = vpp;
3150 a.a_cnp = cnp;
3151 a.a_vap = vap;
3152 a.a_context = ctx;
3153
3154 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3155 if (_err == 0 && *vpp) {
3156 DTRACE_FSINFO(mknod, vnode_t, *vpp);
3157 }
3158
3159 post_event_if_success(dvp, _err, NOTE_WRITE);
3160
3161 return (_err);
3162 }
3163
3164 #if 0
3165 /*
3166 *#
3167 *#% open vp L L L
3168 *#
3169 */
3170 struct vnop_open_args {
3171 struct vnodeop_desc *a_desc;
3172 vnode_t a_vp;
3173 int a_mode;
3174 vfs_context_t a_context;
3175 };
3176 #endif /* 0*/
3177 errno_t
3178 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3179 {
3180 int _err;
3181 struct vnop_open_args a;
3182
3183 if (ctx == NULL) {
3184 ctx = vfs_context_current();
3185 }
3186 a.a_desc = &vnop_open_desc;
3187 a.a_vp = vp;
3188 a.a_mode = mode;
3189 a.a_context = ctx;
3190
3191 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3192 DTRACE_FSINFO(open, vnode_t, vp);
3193
3194 return (_err);
3195 }
3196
3197 #if 0
3198 /*
3199 *#
3200 *#% close vp U U U
3201 *#
3202 */
3203 struct vnop_close_args {
3204 struct vnodeop_desc *a_desc;
3205 vnode_t a_vp;
3206 int a_fflag;
3207 vfs_context_t a_context;
3208 };
3209 #endif /* 0*/
3210 errno_t
3211 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3212 {
3213 int _err;
3214 struct vnop_close_args a;
3215
3216 if (ctx == NULL) {
3217 ctx = vfs_context_current();
3218 }
3219 a.a_desc = &vnop_close_desc;
3220 a.a_vp = vp;
3221 a.a_fflag = fflag;
3222 a.a_context = ctx;
3223
3224 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3225 DTRACE_FSINFO(close, vnode_t, vp);
3226
3227 return (_err);
3228 }
3229
3230 #if 0
3231 /*
3232 *#
3233 *#% access vp L L L
3234 *#
3235 */
3236 struct vnop_access_args {
3237 struct vnodeop_desc *a_desc;
3238 vnode_t a_vp;
3239 int a_action;
3240 vfs_context_t a_context;
3241 };
3242 #endif /* 0*/
3243 errno_t
3244 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3245 {
3246 int _err;
3247 struct vnop_access_args a;
3248
3249 if (ctx == NULL) {
3250 ctx = vfs_context_current();
3251 }
3252 a.a_desc = &vnop_access_desc;
3253 a.a_vp = vp;
3254 a.a_action = action;
3255 a.a_context = ctx;
3256
3257 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3258 DTRACE_FSINFO(access, vnode_t, vp);
3259
3260 return (_err);
3261 }
3262
3263 #if 0
3264 /*
3265 *#
3266 *#% getattr vp = = =
3267 *#
3268 */
3269 struct vnop_getattr_args {
3270 struct vnodeop_desc *a_desc;
3271 vnode_t a_vp;
3272 struct vnode_attr *a_vap;
3273 vfs_context_t a_context;
3274 };
3275 #endif /* 0*/
3276 errno_t
3277 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3278 {
3279 int _err;
3280 struct vnop_getattr_args a;
3281
3282 a.a_desc = &vnop_getattr_desc;
3283 a.a_vp = vp;
3284 a.a_vap = vap;
3285 a.a_context = ctx;
3286
3287 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3288 DTRACE_FSINFO(getattr, vnode_t, vp);
3289
3290 return (_err);
3291 }
3292
3293 #if 0
3294 /*
3295 *#
3296 *#% setattr vp L L L
3297 *#
3298 */
3299 struct vnop_setattr_args {
3300 struct vnodeop_desc *a_desc;
3301 vnode_t a_vp;
3302 struct vnode_attr *a_vap;
3303 vfs_context_t a_context;
3304 };
3305 #endif /* 0*/
3306 errno_t
3307 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3308 {
3309 int _err;
3310 struct vnop_setattr_args a;
3311
3312 a.a_desc = &vnop_setattr_desc;
3313 a.a_vp = vp;
3314 a.a_vap = vap;
3315 a.a_context = ctx;
3316
3317 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3318 DTRACE_FSINFO(setattr, vnode_t, vp);
3319
3320 #if CONFIG_APPLEDOUBLE
3321 /*
3322 * Shadow uid/gid/mod change to extended attribute file.
3323 */
3324 if (_err == 0 && !NATIVE_XATTR(vp)) {
3325 struct vnode_attr va;
3326 int change = 0;
3327
3328 VATTR_INIT(&va);
3329 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3330 VATTR_SET(&va, va_uid, vap->va_uid);
3331 change = 1;
3332 }
3333 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3334 VATTR_SET(&va, va_gid, vap->va_gid);
3335 change = 1;
3336 }
3337 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3338 VATTR_SET(&va, va_mode, vap->va_mode);
3339 change = 1;
3340 }
3341 if (change) {
3342 vnode_t dvp;
3343 const char *vname;
3344
3345 dvp = vnode_getparent(vp);
3346 vname = vnode_getname(vp);
3347
3348 xattrfile_setattr(dvp, vname, &va, ctx);
3349 if (dvp != NULLVP)
3350 vnode_put(dvp);
3351 if (vname != NULL)
3352 vnode_putname(vname);
3353 }
3354 }
3355 #endif /* CONFIG_APPLEDOUBLE */
3356
3357 /*
3358 * If we have changed any of the things about the file that are likely
3359 * to result in changes to authorization results, blow the vnode auth
3360 * cache
3361 */
3362 if (_err == 0 && (
3363 VATTR_IS_SUPPORTED(vap, va_mode) ||
3364 VATTR_IS_SUPPORTED(vap, va_uid) ||
3365 VATTR_IS_SUPPORTED(vap, va_gid) ||
3366 VATTR_IS_SUPPORTED(vap, va_flags) ||
3367 VATTR_IS_SUPPORTED(vap, va_acl) ||
3368 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3369 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3370 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3371
3372 #if NAMEDSTREAMS
3373 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3374 vnode_t svp;
3375 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3376 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3377 vnode_put(svp);
3378 }
3379 }
3380 #endif /* NAMEDSTREAMS */
3381 }
3382
3383
3384 post_event_if_success(vp, _err, NOTE_ATTRIB);
3385
3386 return (_err);
3387 }
3388
3389
3390 #if 0
3391 /*
3392 *#
3393 *#% read vp L L L
3394 *#
3395 */
3396 struct vnop_read_args {
3397 struct vnodeop_desc *a_desc;
3398 vnode_t a_vp;
3399 struct uio *a_uio;
3400 int a_ioflag;
3401 vfs_context_t a_context;
3402 };
3403 #endif /* 0*/
3404 errno_t
3405 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3406 {
3407 int _err;
3408 struct vnop_read_args a;
3409 #if CONFIG_DTRACE
3410 user_ssize_t resid = uio_resid(uio);
3411 #endif
3412
3413 if (ctx == NULL) {
3414 return EINVAL;
3415 }
3416
3417 a.a_desc = &vnop_read_desc;
3418 a.a_vp = vp;
3419 a.a_uio = uio;
3420 a.a_ioflag = ioflag;
3421 a.a_context = ctx;
3422
3423 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3424 DTRACE_FSINFO_IO(read,
3425 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3426
3427 return (_err);
3428 }
3429
3430
3431 #if 0
3432 /*
3433 *#
3434 *#% write vp L L L
3435 *#
3436 */
3437 struct vnop_write_args {
3438 struct vnodeop_desc *a_desc;
3439 vnode_t a_vp;
3440 struct uio *a_uio;
3441 int a_ioflag;
3442 vfs_context_t a_context;
3443 };
3444 #endif /* 0*/
3445 errno_t
3446 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3447 {
3448 struct vnop_write_args a;
3449 int _err;
3450 #if CONFIG_DTRACE
3451 user_ssize_t resid = uio_resid(uio);
3452 #endif
3453
3454 if (ctx == NULL) {
3455 return EINVAL;
3456 }
3457
3458 a.a_desc = &vnop_write_desc;
3459 a.a_vp = vp;
3460 a.a_uio = uio;
3461 a.a_ioflag = ioflag;
3462 a.a_context = ctx;
3463
3464 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3465 DTRACE_FSINFO_IO(write,
3466 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3467
3468 post_event_if_success(vp, _err, NOTE_WRITE);
3469
3470 return (_err);
3471 }
3472
3473
3474 #if 0
3475 /*
3476 *#
3477 *#% ioctl vp U U U
3478 *#
3479 */
3480 struct vnop_ioctl_args {
3481 struct vnodeop_desc *a_desc;
3482 vnode_t a_vp;
3483 u_long a_command;
3484 caddr_t a_data;
3485 int a_fflag;
3486 vfs_context_t a_context;
3487 };
3488 #endif /* 0*/
3489 errno_t
3490 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3491 {
3492 int _err;
3493 struct vnop_ioctl_args a;
3494
3495 if (ctx == NULL) {
3496 ctx = vfs_context_current();
3497 }
3498
3499 /*
3500 * This check should probably have been put in the TTY code instead...
3501 *
3502 * We have to be careful about what we assume during startup and shutdown.
3503 * We have to be able to use the root filesystem's device vnode even when
3504 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3505 * structure. If there is no data pointer, it doesn't matter whether
3506 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
3507 * which passes NULL for its data pointer can therefore be used during
3508 * mount or unmount of the root filesystem.
3509 *
3510 * Depending on what root filesystems need to do during mount/unmount, we
3511 * may need to loosen this check again in the future.
3512 */
3513 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3514 if (data != NULL && !vnode_vfs64bitready(vp)) {
3515 return(ENOTTY);
3516 }
3517 }
3518
3519 a.a_desc = &vnop_ioctl_desc;
3520 a.a_vp = vp;
3521 a.a_command = command;
3522 a.a_data = data;
3523 a.a_fflag = fflag;
3524 a.a_context= ctx;
3525
3526 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3527 DTRACE_FSINFO(ioctl, vnode_t, vp);
3528
3529 return (_err);
3530 }
3531
3532
3533 #if 0
3534 /*
3535 *#
3536 *#% select vp U U U
3537 *#
3538 */
3539 struct vnop_select_args {
3540 struct vnodeop_desc *a_desc;
3541 vnode_t a_vp;
3542 int a_which;
3543 int a_fflags;
3544 void *a_wql;
3545 vfs_context_t a_context;
3546 };
3547 #endif /* 0*/
3548 errno_t
3549 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx)
3550 {
3551 int _err;
3552 struct vnop_select_args a;
3553
3554 if (ctx == NULL) {
3555 ctx = vfs_context_current();
3556 }
3557 a.a_desc = &vnop_select_desc;
3558 a.a_vp = vp;
3559 a.a_which = which;
3560 a.a_fflags = fflags;
3561 a.a_context = ctx;
3562 a.a_wql = wql;
3563
3564 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3565 DTRACE_FSINFO(select, vnode_t, vp);
3566
3567 return (_err);
3568 }
3569
3570
3571 #if 0
3572 /*
3573 *#
3574 *#% exchange fvp L L L
3575 *#% exchange tvp L L L
3576 *#
3577 */
3578 struct vnop_exchange_args {
3579 struct vnodeop_desc *a_desc;
3580 vnode_t a_fvp;
3581 vnode_t a_tvp;
3582 int a_options;
3583 vfs_context_t a_context;
3584 };
3585 #endif /* 0*/
3586 errno_t
3587 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3588 {
3589 int _err;
3590 struct vnop_exchange_args a;
3591
3592 a.a_desc = &vnop_exchange_desc;
3593 a.a_fvp = fvp;
3594 a.a_tvp = tvp;
3595 a.a_options = options;
3596 a.a_context = ctx;
3597
3598 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3599 DTRACE_FSINFO(exchange, vnode_t, fvp);
3600
3601 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3602 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3603 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3604
3605 return (_err);
3606 }
3607
3608
3609 #if 0
3610 /*
3611 *#
3612 *#% revoke vp U U U
3613 *#
3614 */
3615 struct vnop_revoke_args {
3616 struct vnodeop_desc *a_desc;
3617 vnode_t a_vp;
3618 int a_flags;
3619 vfs_context_t a_context;
3620 };
3621 #endif /* 0*/
3622 errno_t
3623 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3624 {
3625 struct vnop_revoke_args a;
3626 int _err;
3627
3628 a.a_desc = &vnop_revoke_desc;
3629 a.a_vp = vp;
3630 a.a_flags = flags;
3631 a.a_context = ctx;
3632
3633 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3634 DTRACE_FSINFO(revoke, vnode_t, vp);
3635
3636 return (_err);
3637 }
3638
3639
3640 #if 0
3641 /*
3642 *#
3643 *# mmap - vp U U U
3644 *#
3645 */
3646 struct vnop_mmap_args {
3647 struct vnodeop_desc *a_desc;
3648 vnode_t a_vp;
3649 int a_fflags;
3650 vfs_context_t a_context;
3651 };
3652 #endif /* 0*/
3653 errno_t
3654 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3655 {
3656 int _err;
3657 struct vnop_mmap_args a;
3658
3659 a.a_desc = &vnop_mmap_desc;
3660 a.a_vp = vp;
3661 a.a_fflags = fflags;
3662 a.a_context = ctx;
3663
3664 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3665 DTRACE_FSINFO(mmap, vnode_t, vp);
3666
3667 return (_err);
3668 }
3669
3670
3671 #if 0
3672 /*
3673 *#
3674 *# mnomap - vp U U U
3675 *#
3676 */
3677 struct vnop_mnomap_args {
3678 struct vnodeop_desc *a_desc;
3679 vnode_t a_vp;
3680 vfs_context_t a_context;
3681 };
3682 #endif /* 0*/
3683 errno_t
3684 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
3685 {
3686 int _err;
3687 struct vnop_mnomap_args a;
3688
3689 a.a_desc = &vnop_mnomap_desc;
3690 a.a_vp = vp;
3691 a.a_context = ctx;
3692
3693 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3694 DTRACE_FSINFO(mnomap, vnode_t, vp);
3695
3696 return (_err);
3697 }
3698
3699
3700 #if 0
3701 /*
3702 *#
3703 *#% fsync vp L L L
3704 *#
3705 */
3706 struct vnop_fsync_args {
3707 struct vnodeop_desc *a_desc;
3708 vnode_t a_vp;
3709 int a_waitfor;
3710 vfs_context_t a_context;
3711 };
3712 #endif /* 0*/
3713 errno_t
3714 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
3715 {
3716 struct vnop_fsync_args a;
3717 int _err;
3718
3719 a.a_desc = &vnop_fsync_desc;
3720 a.a_vp = vp;
3721 a.a_waitfor = waitfor;
3722 a.a_context = ctx;
3723
3724 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3725 DTRACE_FSINFO(fsync, vnode_t, vp);
3726
3727 return (_err);
3728 }
3729
3730
3731 #if 0
3732 /*
3733 *#
3734 *#% remove dvp L U U
3735 *#% remove vp L U U
3736 *#
3737 */
3738 struct vnop_remove_args {
3739 struct vnodeop_desc *a_desc;
3740 vnode_t a_dvp;
3741 vnode_t a_vp;
3742 struct componentname *a_cnp;
3743 int a_flags;
3744 vfs_context_t a_context;
3745 };
3746 #endif /* 0*/
3747 errno_t
3748 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
3749 {
3750 int _err;
3751 struct vnop_remove_args a;
3752
3753 a.a_desc = &vnop_remove_desc;
3754 a.a_dvp = dvp;
3755 a.a_vp = vp;
3756 a.a_cnp = cnp;
3757 a.a_flags = flags;
3758 a.a_context = ctx;
3759
3760 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3761 DTRACE_FSINFO(remove, vnode_t, vp);
3762
3763 if (_err == 0) {
3764 vnode_setneedinactive(vp);
3765 #if CONFIG_APPLEDOUBLE
3766 if ( !(NATIVE_XATTR(dvp)) ) {
3767 /*
3768 * Remove any associated extended attribute file (._ AppleDouble file).
3769 */
3770 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
3771 }
3772 #endif /* CONFIG_APPLEDOUBLE */
3773 }
3774
3775 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
3776 post_event_if_success(dvp, _err, NOTE_WRITE);
3777
3778 return (_err);
3779 }
3780
3781 int
3782 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
3783 {
3784 int _err;
3785 struct vnop_compound_remove_args a;
3786 int no_vp = (*vpp == NULLVP);
3787
3788 a.a_desc = &vnop_compound_remove_desc;
3789 a.a_dvp = dvp;
3790 a.a_vpp = vpp;
3791 a.a_cnp = &ndp->ni_cnd;
3792 a.a_flags = flags;
3793 a.a_vap = vap;
3794 a.a_context = ctx;
3795 a.a_remove_authorizer = vn_authorize_unlink;
3796
3797 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
3798 if (_err == 0 && *vpp) {
3799 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
3800 } else {
3801 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
3802 }
3803 if (_err == 0) {
3804 vnode_setneedinactive(*vpp);
3805 #if CONFIG_APPLEDOUBLE
3806 if ( !(NATIVE_XATTR(dvp)) ) {
3807 /*
3808 * Remove any associated extended attribute file (._ AppleDouble file).
3809 */
3810 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
3811 }
3812 #endif /* CONFIG_APPLEDOUBLE */
3813 }
3814
3815 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
3816 post_event_if_success(dvp, _err, NOTE_WRITE);
3817
3818 if (no_vp) {
3819 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
3820 if (*vpp && _err && _err != EKEEPLOOKING) {
3821 vnode_put(*vpp);
3822 *vpp = NULLVP;
3823 }
3824 }
3825
3826 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
3827
3828 return (_err);
3829 }
3830
3831 #if 0
3832 /*
3833 *#
3834 *#% link vp U U U
3835 *#% link tdvp L U U
3836 *#
3837 */
3838 struct vnop_link_args {
3839 struct vnodeop_desc *a_desc;
3840 vnode_t a_vp;
3841 vnode_t a_tdvp;
3842 struct componentname *a_cnp;
3843 vfs_context_t a_context;
3844 };
3845 #endif /* 0*/
3846 errno_t
3847 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
3848 {
3849 int _err;
3850 struct vnop_link_args a;
3851
3852 #if CONFIG_APPLEDOUBLE
3853 /*
3854 * For file systems with non-native extended attributes,
3855 * disallow linking to an existing "._" Apple Double file.
3856 */
3857 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3858 const char *vname;
3859
3860 vname = vnode_getname(vp);
3861 if (vname != NULL) {
3862 _err = 0;
3863 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3864 _err = EPERM;
3865 }
3866 vnode_putname(vname);
3867 if (_err)
3868 return (_err);
3869 }
3870 }
3871 #endif /* CONFIG_APPLEDOUBLE */
3872
3873 a.a_desc = &vnop_link_desc;
3874 a.a_vp = vp;
3875 a.a_tdvp = tdvp;
3876 a.a_cnp = cnp;
3877 a.a_context = ctx;
3878
3879 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3880 DTRACE_FSINFO(link, vnode_t, vp);
3881
3882 post_event_if_success(vp, _err, NOTE_LINK);
3883 post_event_if_success(tdvp, _err, NOTE_WRITE);
3884
3885 return (_err);
3886 }
3887
3888 errno_t
3889 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
3890 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
3891 vfs_rename_flags_t flags, vfs_context_t ctx)
3892 {
3893 int _err;
3894 struct nameidata *fromnd = NULL;
3895 struct nameidata *tond = NULL;
3896 #if CONFIG_APPLEDOUBLE
3897 vnode_t src_attr_vp = NULLVP;
3898 vnode_t dst_attr_vp = NULLVP;
3899 char smallname1[48];
3900 char smallname2[48];
3901 char *xfromname = NULL;
3902 char *xtoname = NULL;
3903 #endif /* CONFIG_APPLEDOUBLE */
3904 int batched;
3905 uint32_t tdfflags; // Target directory file flags
3906
3907 batched = vnode_compound_rename_available(fdvp);
3908
3909 if (!batched) {
3910 if (*fvpp == NULLVP)
3911 panic("Not batched, and no fvp?");
3912 }
3913
3914 #if CONFIG_APPLEDOUBLE
3915 /*
3916 * We need to preflight any potential AppleDouble file for the source file
3917 * before doing the rename operation, since we could potentially be doing
3918 * this operation on a network filesystem, and would end up duplicating
3919 * the work. Also, save the source and destination names. Skip it if the
3920 * source has a "._" prefix.
3921 */
3922
3923 if (!NATIVE_XATTR(fdvp) &&
3924 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3925 size_t len;
3926 int error;
3927
3928 /* Get source attribute file name. */
3929 len = fcnp->cn_namelen + 3;
3930 if (len > sizeof(smallname1)) {
3931 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3932 } else {
3933 xfromname = &smallname1[0];
3934 }
3935 strlcpy(xfromname, "._", min(sizeof smallname1, len));
3936 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3937 xfromname[len-1] = '\0';
3938
3939 /* Get destination attribute file name. */
3940 len = tcnp->cn_namelen + 3;
3941 if (len > sizeof(smallname2)) {
3942 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3943 } else {
3944 xtoname = &smallname2[0];
3945 }
3946 strlcpy(xtoname, "._", min(sizeof smallname2, len));
3947 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3948 xtoname[len-1] = '\0';
3949
3950 /*
3951 * Look up source attribute file, keep reference on it if exists.
3952 * Note that we do the namei with the nameiop of RENAME, which is different than
3953 * in the rename syscall. It's OK if the source file does not exist, since this
3954 * is only for AppleDouble files.
3955 */
3956 if (xfromname != NULL) {
3957 MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK);
3958 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
3959 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
3960 fromnd->ni_dvp = fdvp;
3961 error = namei(fromnd);
3962
3963 /*
3964 * If there was an error looking up source attribute file,
3965 * we'll behave as if it didn't exist.
3966 */
3967
3968 if (error == 0) {
3969 if (fromnd->ni_vp) {
3970 /* src_attr_vp indicates need to call vnode_put / nameidone later */
3971 src_attr_vp = fromnd->ni_vp;
3972
3973 if (fromnd->ni_vp->v_type != VREG) {
3974 src_attr_vp = NULLVP;
3975 vnode_put(fromnd->ni_vp);
3976 }
3977 }
3978 /*
3979 * Either we got an invalid vnode type (not a regular file) or the namei lookup
3980 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
3981 * have a vnode here, so we drop our namei buffer for the source attribute file
3982 */
3983 if (src_attr_vp == NULLVP) {
3984 nameidone(fromnd);
3985 }
3986 }
3987 }
3988 }
3989 #endif /* CONFIG_APPLEDOUBLE */
3990
3991 if (batched) {
3992 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
3993 if (_err != 0) {
3994 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
3995 }
3996 } else {
3997 if (flags) {
3998 _err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
3999 if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4000 // Legacy...
4001 if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4002 fcnp->cn_flags |= CN_SECLUDE_RENAME;
4003 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4004 }
4005 }
4006 } else
4007 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4008 }
4009
4010 /*
4011 * If moved to a new directory that is restricted,
4012 * set the restricted flag on the item moved.
4013 */
4014 if (_err == 0) {
4015 _err = vnode_flags(tdvp, &tdfflags, ctx);
4016 if (_err == 0 && (tdfflags & SF_RESTRICTED)) {
4017 uint32_t fflags;
4018 _err = vnode_flags(*fvpp, &fflags, ctx);
4019 if (_err == 0 && !(fflags & SF_RESTRICTED)) {
4020 struct vnode_attr va;
4021 VATTR_INIT(&va);
4022 VATTR_SET(&va, va_flags, fflags | SF_RESTRICTED);
4023 _err = vnode_setattr(*fvpp, &va, ctx);
4024 }
4025 }
4026 }
4027
4028 #if CONFIG_MACF
4029 if (_err == 0) {
4030 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
4031 }
4032 #endif
4033
4034 #if CONFIG_APPLEDOUBLE
4035 /*
4036 * Rename any associated extended attribute file (._ AppleDouble file).
4037 */
4038 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4039 int error = 0;
4040
4041 /*
4042 * Get destination attribute file vnode.
4043 * Note that tdvp already has an iocount reference. Make sure to check that we
4044 * get a valid vnode from namei.
4045 */
4046 MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
4047 NDINIT(tond, RENAME, OP_RENAME,
4048 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4049 CAST_USER_ADDR_T(xtoname), ctx);
4050 tond->ni_dvp = tdvp;
4051 error = namei(tond);
4052
4053 if (error)
4054 goto ad_error;
4055
4056 if (tond->ni_vp) {
4057 dst_attr_vp = tond->ni_vp;
4058 }
4059
4060 if (src_attr_vp) {
4061 const char *old_name = src_attr_vp->v_name;
4062 vnode_t old_parent = src_attr_vp->v_parent;
4063
4064 if (batched) {
4065 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4066 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4067 0, ctx);
4068 } else {
4069 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4070 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4071 }
4072
4073 if (error == 0 && old_name == src_attr_vp->v_name &&
4074 old_parent == src_attr_vp->v_parent) {
4075 int update_flags = VNODE_UPDATE_NAME;
4076
4077 if (fdvp != tdvp)
4078 update_flags |= VNODE_UPDATE_PARENT;
4079
4080 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4081 vnode_update_identity(src_attr_vp, tdvp,
4082 tond->ni_cnd.cn_nameptr,
4083 tond->ni_cnd.cn_namelen,
4084 tond->ni_cnd.cn_hash,
4085 update_flags);
4086 }
4087 }
4088
4089 /* kevent notifications for moving resource files
4090 * _err is zero if we're here, so no need to notify directories, code
4091 * below will do that. only need to post the rename on the source and
4092 * possibly a delete on the dest
4093 */
4094 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4095 if (dst_attr_vp) {
4096 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4097 }
4098
4099 } else if (dst_attr_vp) {
4100 /*
4101 * Just delete destination attribute file vnode if it exists, since
4102 * we didn't have a source attribute file.
4103 * Note that tdvp already has an iocount reference.
4104 */
4105
4106 struct vnop_remove_args args;
4107
4108 args.a_desc = &vnop_remove_desc;
4109 args.a_dvp = tdvp;
4110 args.a_vp = dst_attr_vp;
4111 args.a_cnp = &tond->ni_cnd;
4112 args.a_context = ctx;
4113
4114 if (error == 0) {
4115 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4116
4117 if (error == 0)
4118 vnode_setneedinactive(dst_attr_vp);
4119 }
4120
4121 /* kevent notification for deleting the destination's attribute file
4122 * if it existed. Only need to post the delete on the destination, since
4123 * the code below will handle the directories.
4124 */
4125 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4126 }
4127 }
4128 ad_error:
4129 if (src_attr_vp) {
4130 vnode_put(src_attr_vp);
4131 nameidone(fromnd);
4132 }
4133 if (dst_attr_vp) {
4134 vnode_put(dst_attr_vp);
4135 nameidone(tond);
4136 }
4137 if (xfromname && xfromname != &smallname1[0]) {
4138 FREE(xfromname, M_TEMP);
4139 }
4140 if (xtoname && xtoname != &smallname2[0]) {
4141 FREE(xtoname, M_TEMP);
4142 }
4143 #endif /* CONFIG_APPLEDOUBLE */
4144 if (fromnd) {
4145 FREE(fromnd, M_TEMP);
4146 }
4147 if (tond) {
4148 FREE(tond, M_TEMP);
4149 }
4150 return _err;
4151 }
4152
4153
4154 #if 0
4155 /*
4156 *#
4157 *#% rename fdvp U U U
4158 *#% rename fvp U U U
4159 *#% rename tdvp L U U
4160 *#% rename tvp X U U
4161 *#
4162 */
4163 struct vnop_rename_args {
4164 struct vnodeop_desc *a_desc;
4165 vnode_t a_fdvp;
4166 vnode_t a_fvp;
4167 struct componentname *a_fcnp;
4168 vnode_t a_tdvp;
4169 vnode_t a_tvp;
4170 struct componentname *a_tcnp;
4171 vfs_context_t a_context;
4172 };
4173 #endif /* 0*/
4174 errno_t
4175 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4176 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4177 vfs_context_t ctx)
4178 {
4179 int _err = 0;
4180 struct vnop_rename_args a;
4181
4182 a.a_desc = &vnop_rename_desc;
4183 a.a_fdvp = fdvp;
4184 a.a_fvp = fvp;
4185 a.a_fcnp = fcnp;
4186 a.a_tdvp = tdvp;
4187 a.a_tvp = tvp;
4188 a.a_tcnp = tcnp;
4189 a.a_context = ctx;
4190
4191 /* do the rename of the main file. */
4192 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4193 DTRACE_FSINFO(rename, vnode_t, fdvp);
4194
4195 if (_err)
4196 return _err;
4197
4198 return post_rename(fdvp, fvp, tdvp, tvp);
4199 }
4200
4201 static errno_t
4202 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4203 {
4204 if (tvp && tvp != fvp)
4205 vnode_setneedinactive(tvp);
4206
4207 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4208 int events = NOTE_WRITE;
4209 if (vnode_isdir(fvp)) {
4210 /* Link count on dir changed only if we are moving a dir and...
4211 * --Moved to new dir, not overwriting there
4212 * --Kept in same dir and DID overwrite
4213 */
4214 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4215 events |= NOTE_LINK;
4216 }
4217 }
4218
4219 lock_vnode_and_post(fdvp, events);
4220 if (fdvp != tdvp) {
4221 lock_vnode_and_post(tdvp, events);
4222 }
4223
4224 /* If you're replacing the target, post a deletion for it */
4225 if (tvp)
4226 {
4227 lock_vnode_and_post(tvp, NOTE_DELETE);
4228 }
4229
4230 lock_vnode_and_post(fvp, NOTE_RENAME);
4231
4232 return 0;
4233 }
4234
4235 #if 0
4236 /*
4237 *#
4238 *#% renamex fdvp U U U
4239 *#% renamex fvp U U U
4240 *#% renamex tdvp L U U
4241 *#% renamex tvp X U U
4242 *#
4243 */
4244 struct vnop_renamex_args {
4245 struct vnodeop_desc *a_desc;
4246 vnode_t a_fdvp;
4247 vnode_t a_fvp;
4248 struct componentname *a_fcnp;
4249 vnode_t a_tdvp;
4250 vnode_t a_tvp;
4251 struct componentname *a_tcnp;
4252 vfs_rename_flags_t a_flags;
4253 vfs_context_t a_context;
4254 };
4255 #endif /* 0*/
4256 errno_t
4257 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4258 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4259 vfs_rename_flags_t flags, vfs_context_t ctx)
4260 {
4261 int _err = 0;
4262 struct vnop_renamex_args a;
4263
4264 a.a_desc = &vnop_renamex_desc;
4265 a.a_fdvp = fdvp;
4266 a.a_fvp = fvp;
4267 a.a_fcnp = fcnp;
4268 a.a_tdvp = tdvp;
4269 a.a_tvp = tvp;
4270 a.a_tcnp = tcnp;
4271 a.a_flags = flags;
4272 a.a_context = ctx;
4273
4274 /* do the rename of the main file. */
4275 _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4276 DTRACE_FSINFO(renamex, vnode_t, fdvp);
4277
4278 if (_err)
4279 return _err;
4280
4281 return post_rename(fdvp, fvp, tdvp, tvp);
4282 }
4283
4284
4285 int
4286 VNOP_COMPOUND_RENAME(
4287 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4288 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4289 uint32_t flags, vfs_context_t ctx)
4290 {
4291 int _err = 0;
4292 int events;
4293 struct vnop_compound_rename_args a;
4294 int no_fvp, no_tvp;
4295
4296 no_fvp = (*fvpp) == NULLVP;
4297 no_tvp = (*tvpp) == NULLVP;
4298
4299 a.a_desc = &vnop_compound_rename_desc;
4300
4301 a.a_fdvp = fdvp;
4302 a.a_fvpp = fvpp;
4303 a.a_fcnp = fcnp;
4304 a.a_fvap = fvap;
4305
4306 a.a_tdvp = tdvp;
4307 a.a_tvpp = tvpp;
4308 a.a_tcnp = tcnp;
4309 a.a_tvap = tvap;
4310
4311 a.a_flags = flags;
4312 a.a_context = ctx;
4313 a.a_rename_authorizer = vn_authorize_rename;
4314 a.a_reserved = NULL;
4315
4316 /* do the rename of the main file. */
4317 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4318 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4319
4320 if (_err == 0) {
4321 if (*tvpp && *tvpp != *fvpp)
4322 vnode_setneedinactive(*tvpp);
4323 }
4324
4325 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4326 if (_err == 0 && *fvpp != *tvpp) {
4327 if (!*fvpp) {
4328 panic("No fvpp after compound rename?");
4329 }
4330
4331 events = NOTE_WRITE;
4332 if (vnode_isdir(*fvpp)) {
4333 /* Link count on dir changed only if we are moving a dir and...
4334 * --Moved to new dir, not overwriting there
4335 * --Kept in same dir and DID overwrite
4336 */
4337 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4338 events |= NOTE_LINK;
4339 }
4340 }
4341
4342 lock_vnode_and_post(fdvp, events);
4343 if (fdvp != tdvp) {
4344 lock_vnode_and_post(tdvp, events);
4345 }
4346
4347 /* If you're replacing the target, post a deletion for it */
4348 if (*tvpp)
4349 {
4350 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4351 }
4352
4353 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4354 }
4355
4356 if (no_fvp) {
4357 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4358 }
4359 if (no_tvp && *tvpp != NULLVP) {
4360 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4361 }
4362
4363 if (_err && _err != EKEEPLOOKING) {
4364 if (*fvpp) {
4365 vnode_put(*fvpp);
4366 *fvpp = NULLVP;
4367 }
4368 if (*tvpp) {
4369 vnode_put(*tvpp);
4370 *tvpp = NULLVP;
4371 }
4372 }
4373
4374 return (_err);
4375 }
4376
4377 int
4378 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4379 struct vnode_attr *vap, vfs_context_t ctx)
4380 {
4381 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4382 panic("Non-CREATE nameiop in vn_mkdir()?");
4383 }
4384
4385 if (vnode_compound_mkdir_available(dvp)) {
4386 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4387 } else {
4388 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4389 }
4390 }
4391
4392 #if 0
4393 /*
4394 *#
4395 *#% mkdir dvp L U U
4396 *#% mkdir vpp - L -
4397 *#
4398 */
4399 struct vnop_mkdir_args {
4400 struct vnodeop_desc *a_desc;
4401 vnode_t a_dvp;
4402 vnode_t *a_vpp;
4403 struct componentname *a_cnp;
4404 struct vnode_attr *a_vap;
4405 vfs_context_t a_context;
4406 };
4407 #endif /* 0*/
4408 errno_t
4409 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4410 struct vnode_attr *vap, vfs_context_t ctx)
4411 {
4412 int _err;
4413 struct vnop_mkdir_args a;
4414
4415 a.a_desc = &vnop_mkdir_desc;
4416 a.a_dvp = dvp;
4417 a.a_vpp = vpp;
4418 a.a_cnp = cnp;
4419 a.a_vap = vap;
4420 a.a_context = ctx;
4421
4422 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4423 if (_err == 0 && *vpp) {
4424 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4425 }
4426 #if CONFIG_APPLEDOUBLE
4427 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4428 /*
4429 * Remove stale Apple Double file (if any).
4430 */
4431 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4432 }
4433 #endif /* CONFIG_APPLEDOUBLE */
4434
4435 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4436
4437 return (_err);
4438 }
4439
4440 int
4441 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4442 struct vnode_attr *vap, vfs_context_t ctx)
4443 {
4444 int _err;
4445 struct vnop_compound_mkdir_args a;
4446
4447 a.a_desc = &vnop_compound_mkdir_desc;
4448 a.a_dvp = dvp;
4449 a.a_vpp = vpp;
4450 a.a_cnp = &ndp->ni_cnd;
4451 a.a_vap = vap;
4452 a.a_flags = 0;
4453 a.a_context = ctx;
4454 #if 0
4455 a.a_mkdir_authorizer = vn_authorize_mkdir;
4456 #endif /* 0 */
4457 a.a_reserved = NULL;
4458
4459 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4460 if (_err == 0 && *vpp) {
4461 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4462 }
4463 #if CONFIG_APPLEDOUBLE
4464 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4465 /*
4466 * Remove stale Apple Double file (if any).
4467 */
4468 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4469 }
4470 #endif /* CONFIG_APPLEDOUBLE */
4471
4472 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4473
4474 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4475 if (*vpp && _err && _err != EKEEPLOOKING) {
4476 vnode_put(*vpp);
4477 *vpp = NULLVP;
4478 }
4479
4480 return (_err);
4481 }
4482
4483 int
4484 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4485 {
4486 if (vnode_compound_rmdir_available(dvp)) {
4487 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4488 } else {
4489 if (*vpp == NULLVP) {
4490 panic("NULL vp, but not a compound VNOP?");
4491 }
4492 if (vap != NULL) {
4493 panic("Non-NULL vap, but not a compound VNOP?");
4494 }
4495 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4496 }
4497 }
4498
4499 #if 0
4500 /*
4501 *#
4502 *#% rmdir dvp L U U
4503 *#% rmdir vp L U U
4504 *#
4505 */
4506 struct vnop_rmdir_args {
4507 struct vnodeop_desc *a_desc;
4508 vnode_t a_dvp;
4509 vnode_t a_vp;
4510 struct componentname *a_cnp;
4511 vfs_context_t a_context;
4512 };
4513
4514 #endif /* 0*/
4515 errno_t
4516 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4517 {
4518 int _err;
4519 struct vnop_rmdir_args a;
4520
4521 a.a_desc = &vnop_rmdir_desc;
4522 a.a_dvp = dvp;
4523 a.a_vp = vp;
4524 a.a_cnp = cnp;
4525 a.a_context = ctx;
4526
4527 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4528 DTRACE_FSINFO(rmdir, vnode_t, vp);
4529
4530 if (_err == 0) {
4531 vnode_setneedinactive(vp);
4532 #if CONFIG_APPLEDOUBLE
4533 if ( !(NATIVE_XATTR(dvp)) ) {
4534 /*
4535 * Remove any associated extended attribute file (._ AppleDouble file).
4536 */
4537 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4538 }
4539 #endif
4540 }
4541
4542 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4543 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4544 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4545
4546 return (_err);
4547 }
4548
4549 int
4550 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4551 struct vnode_attr *vap, vfs_context_t ctx)
4552 {
4553 int _err;
4554 struct vnop_compound_rmdir_args a;
4555 int no_vp;
4556
4557 a.a_desc = &vnop_mkdir_desc;
4558 a.a_dvp = dvp;
4559 a.a_vpp = vpp;
4560 a.a_cnp = &ndp->ni_cnd;
4561 a.a_vap = vap;
4562 a.a_flags = 0;
4563 a.a_context = ctx;
4564 a.a_rmdir_authorizer = vn_authorize_rmdir;
4565 a.a_reserved = NULL;
4566
4567 no_vp = (*vpp == NULLVP);
4568
4569 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4570 if (_err == 0 && *vpp) {
4571 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4572 }
4573 #if CONFIG_APPLEDOUBLE
4574 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4575 /*
4576 * Remove stale Apple Double file (if any).
4577 */
4578 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4579 }
4580 #endif
4581
4582 if (*vpp) {
4583 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4584 }
4585 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4586
4587 if (no_vp) {
4588 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4589
4590 #if 0 /* Removing orphaned ._ files requires a vp.... */
4591 if (*vpp && _err && _err != EKEEPLOOKING) {
4592 vnode_put(*vpp);
4593 *vpp = NULLVP;
4594 }
4595 #endif /* 0 */
4596 }
4597
4598 return (_err);
4599 }
4600
4601 #if CONFIG_APPLEDOUBLE
4602 /*
4603 * Remove a ._ AppleDouble file
4604 */
4605 #define AD_STALE_SECS (180)
4606 static void
4607 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4608 {
4609 vnode_t xvp;
4610 struct nameidata nd;
4611 char smallname[64];
4612 char *filename = NULL;
4613 size_t len;
4614
4615 if ((basename == NULL) || (basename[0] == '\0') ||
4616 (basename[0] == '.' && basename[1] == '_')) {
4617 return;
4618 }
4619 filename = &smallname[0];
4620 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4621 if (len >= sizeof(smallname)) {
4622 len++; /* snprintf result doesn't include '\0' */
4623 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4624 len = snprintf(filename, len, "._%s", basename);
4625 }
4626 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4627 CAST_USER_ADDR_T(filename), ctx);
4628 nd.ni_dvp = dvp;
4629 if (namei(&nd) != 0)
4630 goto out2;
4631
4632 xvp = nd.ni_vp;
4633 nameidone(&nd);
4634 if (xvp->v_type != VREG)
4635 goto out1;
4636
4637 /*
4638 * When creating a new object and a "._" file already
4639 * exists, check to see if its a stale "._" file.
4640 *
4641 */
4642 if (!force) {
4643 struct vnode_attr va;
4644
4645 VATTR_INIT(&va);
4646 VATTR_WANTED(&va, va_data_size);
4647 VATTR_WANTED(&va, va_modify_time);
4648 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4649 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4650 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4651 va.va_data_size != 0) {
4652 struct timeval tv;
4653
4654 microtime(&tv);
4655 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4656 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4657 force = 1; /* must be stale */
4658 }
4659 }
4660 }
4661 if (force) {
4662 int error;
4663
4664 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
4665 if (error == 0)
4666 vnode_setneedinactive(xvp);
4667
4668 post_event_if_success(xvp, error, NOTE_DELETE);
4669 post_event_if_success(dvp, error, NOTE_WRITE);
4670 }
4671
4672 out1:
4673 vnode_put(dvp);
4674 vnode_put(xvp);
4675 out2:
4676 if (filename && filename != &smallname[0]) {
4677 FREE(filename, M_TEMP);
4678 }
4679 }
4680
4681 /*
4682 * Shadow uid/gid/mod to a ._ AppleDouble file
4683 */
4684 static void
4685 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
4686 vfs_context_t ctx)
4687 {
4688 vnode_t xvp;
4689 struct nameidata nd;
4690 char smallname[64];
4691 char *filename = NULL;
4692 size_t len;
4693
4694 if ((dvp == NULLVP) ||
4695 (basename == NULL) || (basename[0] == '\0') ||
4696 (basename[0] == '.' && basename[1] == '_')) {
4697 return;
4698 }
4699 filename = &smallname[0];
4700 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4701 if (len >= sizeof(smallname)) {
4702 len++; /* snprintf result doesn't include '\0' */
4703 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4704 len = snprintf(filename, len, "._%s", basename);
4705 }
4706 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
4707 CAST_USER_ADDR_T(filename), ctx);
4708 nd.ni_dvp = dvp;
4709 if (namei(&nd) != 0)
4710 goto out2;
4711
4712 xvp = nd.ni_vp;
4713 nameidone(&nd);
4714
4715 if (xvp->v_type == VREG) {
4716 struct vnop_setattr_args a;
4717
4718 a.a_desc = &vnop_setattr_desc;
4719 a.a_vp = xvp;
4720 a.a_vap = vap;
4721 a.a_context = ctx;
4722
4723 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
4724 }
4725
4726 vnode_put(xvp);
4727 out2:
4728 if (filename && filename != &smallname[0]) {
4729 FREE(filename, M_TEMP);
4730 }
4731 }
4732 #endif /* CONFIG_APPLEDOUBLE */
4733
4734 #if 0
4735 /*
4736 *#
4737 *#% symlink dvp L U U
4738 *#% symlink vpp - U -
4739 *#
4740 */
4741 struct vnop_symlink_args {
4742 struct vnodeop_desc *a_desc;
4743 vnode_t a_dvp;
4744 vnode_t *a_vpp;
4745 struct componentname *a_cnp;
4746 struct vnode_attr *a_vap;
4747 char *a_target;
4748 vfs_context_t a_context;
4749 };
4750
4751 #endif /* 0*/
4752 errno_t
4753 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4754 struct vnode_attr *vap, char *target, vfs_context_t ctx)
4755 {
4756 int _err;
4757 struct vnop_symlink_args a;
4758
4759 a.a_desc = &vnop_symlink_desc;
4760 a.a_dvp = dvp;
4761 a.a_vpp = vpp;
4762 a.a_cnp = cnp;
4763 a.a_vap = vap;
4764 a.a_target = target;
4765 a.a_context = ctx;
4766
4767 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
4768 DTRACE_FSINFO(symlink, vnode_t, dvp);
4769 #if CONFIG_APPLEDOUBLE
4770 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4771 /*
4772 * Remove stale Apple Double file (if any). Posts its own knotes
4773 */
4774 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4775 }
4776 #endif /* CONFIG_APPLEDOUBLE */
4777
4778 post_event_if_success(dvp, _err, NOTE_WRITE);
4779
4780 return (_err);
4781 }
4782
4783 #if 0
4784 /*
4785 *#
4786 *#% readdir vp L L L
4787 *#
4788 */
4789 struct vnop_readdir_args {
4790 struct vnodeop_desc *a_desc;
4791 vnode_t a_vp;
4792 struct uio *a_uio;
4793 int a_flags;
4794 int *a_eofflag;
4795 int *a_numdirent;
4796 vfs_context_t a_context;
4797 };
4798
4799 #endif /* 0*/
4800 errno_t
4801 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
4802 int *numdirent, vfs_context_t ctx)
4803 {
4804 int _err;
4805 struct vnop_readdir_args a;
4806 #if CONFIG_DTRACE
4807 user_ssize_t resid = uio_resid(uio);
4808 #endif
4809
4810 a.a_desc = &vnop_readdir_desc;
4811 a.a_vp = vp;
4812 a.a_uio = uio;
4813 a.a_flags = flags;
4814 a.a_eofflag = eofflag;
4815 a.a_numdirent = numdirent;
4816 a.a_context = ctx;
4817
4818 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
4819 DTRACE_FSINFO_IO(readdir,
4820 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4821
4822 return (_err);
4823 }
4824
4825 #if 0
4826 /*
4827 *#
4828 *#% readdirattr vp L L L
4829 *#
4830 */
4831 struct vnop_readdirattr_args {
4832 struct vnodeop_desc *a_desc;
4833 vnode_t a_vp;
4834 struct attrlist *a_alist;
4835 struct uio *a_uio;
4836 uint32_t a_maxcount;
4837 uint32_t a_options;
4838 uint32_t *a_newstate;
4839 int *a_eofflag;
4840 uint32_t *a_actualcount;
4841 vfs_context_t a_context;
4842 };
4843
4844 #endif /* 0*/
4845 errno_t
4846 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
4847 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
4848 {
4849 int _err;
4850 struct vnop_readdirattr_args a;
4851 #if CONFIG_DTRACE
4852 user_ssize_t resid = uio_resid(uio);
4853 #endif
4854
4855 a.a_desc = &vnop_readdirattr_desc;
4856 a.a_vp = vp;
4857 a.a_alist = alist;
4858 a.a_uio = uio;
4859 a.a_maxcount = maxcount;
4860 a.a_options = options;
4861 a.a_newstate = newstate;
4862 a.a_eofflag = eofflag;
4863 a.a_actualcount = actualcount;
4864 a.a_context = ctx;
4865
4866 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
4867 DTRACE_FSINFO_IO(readdirattr,
4868 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4869
4870 return (_err);
4871 }
4872
4873 #if 0
4874 struct vnop_getttrlistbulk_args {
4875 struct vnodeop_desc *a_desc;
4876 vnode_t a_vp;
4877 struct attrlist *a_alist;
4878 struct vnode_attr *a_vap;
4879 struct uio *a_uio;
4880 void *a_private
4881 uint64_t a_options;
4882 int *a_eofflag;
4883 uint32_t *a_actualcount;
4884 vfs_context_t a_context;
4885 };
4886 #endif /* 0*/
4887 errno_t
4888 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
4889 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
4890 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
4891 {
4892 int _err;
4893 struct vnop_getattrlistbulk_args a;
4894 #if CONFIG_DTRACE
4895 user_ssize_t resid = uio_resid(uio);
4896 #endif
4897
4898 a.a_desc = &vnop_getattrlistbulk_desc;
4899 a.a_vp = vp;
4900 a.a_alist = alist;
4901 a.a_vap = vap;
4902 a.a_uio = uio;
4903 a.a_private = private;
4904 a.a_options = options;
4905 a.a_eofflag = eofflag;
4906 a.a_actualcount = actualcount;
4907 a.a_context = ctx;
4908
4909 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
4910 DTRACE_FSINFO_IO(getattrlistbulk,
4911 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4912
4913 return (_err);
4914 }
4915
4916 #if 0
4917 /*
4918 *#
4919 *#% readlink vp L L L
4920 *#
4921 */
4922 struct vnop_readlink_args {
4923 struct vnodeop_desc *a_desc;
4924 vnode_t a_vp;
4925 struct uio *a_uio;
4926 vfs_context_t a_context;
4927 };
4928 #endif /* 0 */
4929
4930 /*
4931 * Returns: 0 Success
4932 * lock_fsnode:ENOENT No such file or directory [only for VFS
4933 * that is not thread safe & vnode is
4934 * currently being/has been terminated]
4935 * <vfs_readlink>:EINVAL
4936 * <vfs_readlink>:???
4937 *
4938 * Note: The return codes from the underlying VFS's readlink routine
4939 * can't be fully enumerated here, since third party VFS authors
4940 * may not limit their error returns to the ones documented here,
4941 * even though this may result in some programs functioning
4942 * incorrectly.
4943 *
4944 * The return codes documented above are those which may currently
4945 * be returned by HFS from hfs_vnop_readlink, not including
4946 * additional error code which may be propagated from underlying
4947 * routines.
4948 */
4949 errno_t
4950 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
4951 {
4952 int _err;
4953 struct vnop_readlink_args a;
4954 #if CONFIG_DTRACE
4955 user_ssize_t resid = uio_resid(uio);
4956 #endif
4957 a.a_desc = &vnop_readlink_desc;
4958 a.a_vp = vp;
4959 a.a_uio = uio;
4960 a.a_context = ctx;
4961
4962 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
4963 DTRACE_FSINFO_IO(readlink,
4964 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4965
4966 return (_err);
4967 }
4968
4969 #if 0
4970 /*
4971 *#
4972 *#% inactive vp L U U
4973 *#
4974 */
4975 struct vnop_inactive_args {
4976 struct vnodeop_desc *a_desc;
4977 vnode_t a_vp;
4978 vfs_context_t a_context;
4979 };
4980 #endif /* 0*/
4981 errno_t
4982 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
4983 {
4984 int _err;
4985 struct vnop_inactive_args a;
4986
4987 a.a_desc = &vnop_inactive_desc;
4988 a.a_vp = vp;
4989 a.a_context = ctx;
4990
4991 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
4992 DTRACE_FSINFO(inactive, vnode_t, vp);
4993
4994 #if NAMEDSTREAMS
4995 /* For file systems that do not support namedstream natively, mark
4996 * the shadow stream file vnode to be recycled as soon as the last
4997 * reference goes away. To avoid re-entering reclaim code, do not
4998 * call recycle on terminating namedstream vnodes.
4999 */
5000 if (vnode_isnamedstream(vp) &&
5001 (vp->v_parent != NULLVP) &&
5002 vnode_isshadow(vp) &&
5003 ((vp->v_lflag & VL_TERMINATE) == 0)) {
5004 vnode_recycle(vp);
5005 }
5006 #endif
5007
5008 return (_err);
5009 }
5010
5011
5012 #if 0
5013 /*
5014 *#
5015 *#% reclaim vp U U U
5016 *#
5017 */
5018 struct vnop_reclaim_args {
5019 struct vnodeop_desc *a_desc;
5020 vnode_t a_vp;
5021 vfs_context_t a_context;
5022 };
5023 #endif /* 0*/
5024 errno_t
5025 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5026 {
5027 int _err;
5028 struct vnop_reclaim_args a;
5029
5030 a.a_desc = &vnop_reclaim_desc;
5031 a.a_vp = vp;
5032 a.a_context = ctx;
5033
5034 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5035 DTRACE_FSINFO(reclaim, vnode_t, vp);
5036
5037 return (_err);
5038 }
5039
5040
5041 /*
5042 * Returns: 0 Success
5043 * lock_fsnode:ENOENT No such file or directory [only for VFS
5044 * that is not thread safe & vnode is
5045 * currently being/has been terminated]
5046 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5047 */
5048 #if 0
5049 /*
5050 *#
5051 *#% pathconf vp L L L
5052 *#
5053 */
5054 struct vnop_pathconf_args {
5055 struct vnodeop_desc *a_desc;
5056 vnode_t a_vp;
5057 int a_name;
5058 int32_t *a_retval;
5059 vfs_context_t a_context;
5060 };
5061 #endif /* 0*/
5062 errno_t
5063 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5064 {
5065 int _err;
5066 struct vnop_pathconf_args a;
5067
5068 a.a_desc = &vnop_pathconf_desc;
5069 a.a_vp = vp;
5070 a.a_name = name;
5071 a.a_retval = retval;
5072 a.a_context = ctx;
5073
5074 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5075 DTRACE_FSINFO(pathconf, vnode_t, vp);
5076
5077 return (_err);
5078 }
5079
5080 /*
5081 * Returns: 0 Success
5082 * err_advlock:ENOTSUP
5083 * lf_advlock:???
5084 * <vnop_advlock_desc>:???
5085 *
5086 * Notes: VFS implementations of advisory locking using calls through
5087 * <vnop_advlock_desc> because lock enforcement does not occur
5088 * locally should try to limit themselves to the return codes
5089 * documented above for lf_advlock and err_advlock.
5090 */
5091 #if 0
5092 /*
5093 *#
5094 *#% advlock vp U U U
5095 *#
5096 */
5097 struct vnop_advlock_args {
5098 struct vnodeop_desc *a_desc;
5099 vnode_t a_vp;
5100 caddr_t a_id;
5101 int a_op;
5102 struct flock *a_fl;
5103 int a_flags;
5104 vfs_context_t a_context;
5105 };
5106 #endif /* 0*/
5107 errno_t
5108 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5109 {
5110 int _err;
5111 struct vnop_advlock_args a;
5112
5113 a.a_desc = &vnop_advlock_desc;
5114 a.a_vp = vp;
5115 a.a_id = id;
5116 a.a_op = op;
5117 a.a_fl = fl;
5118 a.a_flags = flags;
5119 a.a_context = ctx;
5120 a.a_timeout = timeout;
5121
5122 /* Disallow advisory locking on non-seekable vnodes */
5123 if (vnode_isfifo(vp)) {
5124 _err = err_advlock(&a);
5125 } else {
5126 if ((vp->v_flag & VLOCKLOCAL)) {
5127 /* Advisory locking done at this layer */
5128 _err = lf_advlock(&a);
5129 } else if (flags & F_OFD_LOCK) {
5130 /* Non-local locking doesn't work for OFD locks */
5131 _err = err_advlock(&a);
5132 } else {
5133 /* Advisory locking done by underlying filesystem */
5134 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5135 }
5136 DTRACE_FSINFO(advlock, vnode_t, vp);
5137 if (op == F_UNLCK && flags == F_FLOCK)
5138 post_event_if_success(vp, _err, NOTE_FUNLOCK);
5139 }
5140
5141 return (_err);
5142 }
5143
5144
5145
5146 #if 0
5147 /*
5148 *#
5149 *#% allocate vp L L L
5150 *#
5151 */
5152 struct vnop_allocate_args {
5153 struct vnodeop_desc *a_desc;
5154 vnode_t a_vp;
5155 off_t a_length;
5156 u_int32_t a_flags;
5157 off_t *a_bytesallocated;
5158 off_t a_offset;
5159 vfs_context_t a_context;
5160 };
5161
5162 #endif /* 0*/
5163 errno_t
5164 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5165 {
5166 int _err;
5167 struct vnop_allocate_args a;
5168
5169 a.a_desc = &vnop_allocate_desc;
5170 a.a_vp = vp;
5171 a.a_length = length;
5172 a.a_flags = flags;
5173 a.a_bytesallocated = bytesallocated;
5174 a.a_offset = offset;
5175 a.a_context = ctx;
5176
5177 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5178 DTRACE_FSINFO(allocate, vnode_t, vp);
5179 #if CONFIG_FSE
5180 if (_err == 0) {
5181 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5182 }
5183 #endif
5184
5185 return (_err);
5186 }
5187
5188 #if 0
5189 /*
5190 *#
5191 *#% pagein vp = = =
5192 *#
5193 */
5194 struct vnop_pagein_args {
5195 struct vnodeop_desc *a_desc;
5196 vnode_t a_vp;
5197 upl_t a_pl;
5198 upl_offset_t a_pl_offset;
5199 off_t a_f_offset;
5200 size_t a_size;
5201 int a_flags;
5202 vfs_context_t a_context;
5203 };
5204 #endif /* 0*/
5205 errno_t
5206 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5207 {
5208 int _err;
5209 struct vnop_pagein_args a;
5210
5211 a.a_desc = &vnop_pagein_desc;
5212 a.a_vp = vp;
5213 a.a_pl = pl;
5214 a.a_pl_offset = pl_offset;
5215 a.a_f_offset = f_offset;
5216 a.a_size = size;
5217 a.a_flags = flags;
5218 a.a_context = ctx;
5219
5220 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5221 DTRACE_FSINFO(pagein, vnode_t, vp);
5222
5223 return (_err);
5224 }
5225
5226 #if 0
5227 /*
5228 *#
5229 *#% pageout vp = = =
5230 *#
5231 */
5232 struct vnop_pageout_args {
5233 struct vnodeop_desc *a_desc;
5234 vnode_t a_vp;
5235 upl_t a_pl;
5236 upl_offset_t a_pl_offset;
5237 off_t a_f_offset;
5238 size_t a_size;
5239 int a_flags;
5240 vfs_context_t a_context;
5241 };
5242
5243 #endif /* 0*/
5244 errno_t
5245 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5246 {
5247 int _err;
5248 struct vnop_pageout_args a;
5249
5250 a.a_desc = &vnop_pageout_desc;
5251 a.a_vp = vp;
5252 a.a_pl = pl;
5253 a.a_pl_offset = pl_offset;
5254 a.a_f_offset = f_offset;
5255 a.a_size = size;
5256 a.a_flags = flags;
5257 a.a_context = ctx;
5258
5259 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5260 DTRACE_FSINFO(pageout, vnode_t, vp);
5261
5262 post_event_if_success(vp, _err, NOTE_WRITE);
5263
5264 return (_err);
5265 }
5266
5267 int
5268 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5269 {
5270 if (vnode_compound_remove_available(dvp)) {
5271 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5272 } else {
5273 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5274 }
5275 }
5276
5277 #if CONFIG_SEARCHFS
5278
5279 #if 0
5280 /*
5281 *#
5282 *#% searchfs vp L L L
5283 *#
5284 */
5285 struct vnop_searchfs_args {
5286 struct vnodeop_desc *a_desc;
5287 vnode_t a_vp;
5288 void *a_searchparams1;
5289 void *a_searchparams2;
5290 struct attrlist *a_searchattrs;
5291 uint32_t a_maxmatches;
5292 struct timeval *a_timelimit;
5293 struct attrlist *a_returnattrs;
5294 uint32_t *a_nummatches;
5295 uint32_t a_scriptcode;
5296 uint32_t a_options;
5297 struct uio *a_uio;
5298 struct searchstate *a_searchstate;
5299 vfs_context_t a_context;
5300 };
5301
5302 #endif /* 0*/
5303 errno_t
5304 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5305 {
5306 int _err;
5307 struct vnop_searchfs_args a;
5308
5309 a.a_desc = &vnop_searchfs_desc;
5310 a.a_vp = vp;
5311 a.a_searchparams1 = searchparams1;
5312 a.a_searchparams2 = searchparams2;
5313 a.a_searchattrs = searchattrs;
5314 a.a_maxmatches = maxmatches;
5315 a.a_timelimit = timelimit;
5316 a.a_returnattrs = returnattrs;
5317 a.a_nummatches = nummatches;
5318 a.a_scriptcode = scriptcode;
5319 a.a_options = options;
5320 a.a_uio = uio;
5321 a.a_searchstate = searchstate;
5322 a.a_context = ctx;
5323
5324 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5325 DTRACE_FSINFO(searchfs, vnode_t, vp);
5326
5327 return (_err);
5328 }
5329 #endif /* CONFIG_SEARCHFS */
5330
5331 #if 0
5332 /*
5333 *#
5334 *#% copyfile fvp U U U
5335 *#% copyfile tdvp L U U
5336 *#% copyfile tvp X U U
5337 *#
5338 */
5339 struct vnop_copyfile_args {
5340 struct vnodeop_desc *a_desc;
5341 vnode_t a_fvp;
5342 vnode_t a_tdvp;
5343 vnode_t a_tvp;
5344 struct componentname *a_tcnp;
5345 int a_mode;
5346 int a_flags;
5347 vfs_context_t a_context;
5348 };
5349 #endif /* 0*/
5350 errno_t
5351 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5352 int mode, int flags, vfs_context_t ctx)
5353 {
5354 int _err;
5355 struct vnop_copyfile_args a;
5356 a.a_desc = &vnop_copyfile_desc;
5357 a.a_fvp = fvp;
5358 a.a_tdvp = tdvp;
5359 a.a_tvp = tvp;
5360 a.a_tcnp = tcnp;
5361 a.a_mode = mode;
5362 a.a_flags = flags;
5363 a.a_context = ctx;
5364 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5365 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5366 return (_err);
5367 }
5368
5369 #if 0
5370 struct vnop_clonefile_args {
5371 struct vnodeop_desc *a_desc;
5372 vnode_t a_fvp;
5373 vnode_t a_dvp;
5374 vnode_t *a_vpp;
5375 struct componentname *a_cnp;
5376 struct vnode_attr *a_vap;
5377 uint32_t a_flags;
5378 vfs_context_t a_context;
5379 int (*a_dir_clone_authorizer)( /* Authorization callback */
5380 struct vnode_attr *vap, /* attribute to be authorized */
5381 kauth_action_t action, /* action for which attribute is to be authorized */
5382 struct vnode_attr *dvap, /* target directory attributes */
5383 vnode_t sdvp, /* source directory vnode pointer (optional) */
5384 mount_t mp, /* mount point of filesystem */
5385 dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */
5386 vfs_context_t ctx, /* As passed to VNOP */
5387 void *reserved); /* Always NULL */
5388 void *a_reserved; /* Currently unused */
5389 };
5390 #endif /* 0 */
5391
5392 errno_t
5393 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
5394 struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
5395 vfs_context_t ctx)
5396 {
5397 int _err;
5398 struct vnop_clonefile_args a;
5399 a.a_desc = &vnop_clonefile_desc;
5400 a.a_fvp = fvp;
5401 a.a_dvp = dvp;
5402 a.a_vpp = vpp;
5403 a.a_cnp = cnp;
5404 a.a_vap = vap;
5405 a.a_flags = flags;
5406 a.a_context = ctx;
5407
5408 if (vnode_vtype(fvp) == VDIR)
5409 a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
5410 else
5411 a.a_dir_clone_authorizer = NULL;
5412
5413 _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
5414
5415 if (_err == 0 && *vpp)
5416 DTRACE_FSINFO(clonefile, vnode_t, *vpp);
5417
5418 post_event_if_success(dvp, _err, NOTE_WRITE);
5419
5420 return (_err);
5421 }
5422
5423 errno_t
5424 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5425 {
5426 struct vnop_getxattr_args a;
5427 int error;
5428
5429 a.a_desc = &vnop_getxattr_desc;
5430 a.a_vp = vp;
5431 a.a_name = name;
5432 a.a_uio = uio;
5433 a.a_size = size;
5434 a.a_options = options;
5435 a.a_context = ctx;
5436
5437 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5438 DTRACE_FSINFO(getxattr, vnode_t, vp);
5439
5440 return (error);
5441 }
5442
5443 errno_t
5444 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5445 {
5446 struct vnop_setxattr_args a;
5447 int error;
5448
5449 a.a_desc = &vnop_setxattr_desc;
5450 a.a_vp = vp;
5451 a.a_name = name;
5452 a.a_uio = uio;
5453 a.a_options = options;
5454 a.a_context = ctx;
5455
5456 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5457 DTRACE_FSINFO(setxattr, vnode_t, vp);
5458
5459 if (error == 0)
5460 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5461
5462 post_event_if_success(vp, error, NOTE_ATTRIB);
5463
5464 return (error);
5465 }
5466
5467 errno_t
5468 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5469 {
5470 struct vnop_removexattr_args a;
5471 int error;
5472
5473 a.a_desc = &vnop_removexattr_desc;
5474 a.a_vp = vp;
5475 a.a_name = name;
5476 a.a_options = options;
5477 a.a_context = ctx;
5478
5479 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5480 DTRACE_FSINFO(removexattr, vnode_t, vp);
5481
5482 post_event_if_success(vp, error, NOTE_ATTRIB);
5483
5484 return (error);
5485 }
5486
5487 errno_t
5488 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5489 {
5490 struct vnop_listxattr_args a;
5491 int error;
5492
5493 a.a_desc = &vnop_listxattr_desc;
5494 a.a_vp = vp;
5495 a.a_uio = uio;
5496 a.a_size = size;
5497 a.a_options = options;
5498 a.a_context = ctx;
5499
5500 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5501 DTRACE_FSINFO(listxattr, vnode_t, vp);
5502
5503 return (error);
5504 }
5505
5506
5507 #if 0
5508 /*
5509 *#
5510 *#% blktooff vp = = =
5511 *#
5512 */
5513 struct vnop_blktooff_args {
5514 struct vnodeop_desc *a_desc;
5515 vnode_t a_vp;
5516 daddr64_t a_lblkno;
5517 off_t *a_offset;
5518 };
5519 #endif /* 0*/
5520 errno_t
5521 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5522 {
5523 int _err;
5524 struct vnop_blktooff_args a;
5525
5526 a.a_desc = &vnop_blktooff_desc;
5527 a.a_vp = vp;
5528 a.a_lblkno = lblkno;
5529 a.a_offset = offset;
5530
5531 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5532 DTRACE_FSINFO(blktooff, vnode_t, vp);
5533
5534 return (_err);
5535 }
5536
5537 #if 0
5538 /*
5539 *#
5540 *#% offtoblk vp = = =
5541 *#
5542 */
5543 struct vnop_offtoblk_args {
5544 struct vnodeop_desc *a_desc;
5545 vnode_t a_vp;
5546 off_t a_offset;
5547 daddr64_t *a_lblkno;
5548 };
5549 #endif /* 0*/
5550 errno_t
5551 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5552 {
5553 int _err;
5554 struct vnop_offtoblk_args a;
5555
5556 a.a_desc = &vnop_offtoblk_desc;
5557 a.a_vp = vp;
5558 a.a_offset = offset;
5559 a.a_lblkno = lblkno;
5560
5561 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5562 DTRACE_FSINFO(offtoblk, vnode_t, vp);
5563
5564 return (_err);
5565 }
5566
5567 #if 0
5568 /*
5569 *#
5570 *#% blockmap vp L L L
5571 *#
5572 */
5573 struct vnop_blockmap_args {
5574 struct vnodeop_desc *a_desc;
5575 vnode_t a_vp;
5576 off_t a_foffset;
5577 size_t a_size;
5578 daddr64_t *a_bpn;
5579 size_t *a_run;
5580 void *a_poff;
5581 int a_flags;
5582 vfs_context_t a_context;
5583 };
5584 #endif /* 0*/
5585 errno_t
5586 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
5587 {
5588 int _err;
5589 struct vnop_blockmap_args a;
5590 size_t localrun = 0;
5591
5592 if (ctx == NULL) {
5593 ctx = vfs_context_current();
5594 }
5595 a.a_desc = &vnop_blockmap_desc;
5596 a.a_vp = vp;
5597 a.a_foffset = foffset;
5598 a.a_size = size;
5599 a.a_bpn = bpn;
5600 a.a_run = &localrun;
5601 a.a_poff = poff;
5602 a.a_flags = flags;
5603 a.a_context = ctx;
5604
5605 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
5606 DTRACE_FSINFO(blockmap, vnode_t, vp);
5607
5608 /*
5609 * We used a local variable to request information from the underlying
5610 * filesystem about the length of the I/O run in question. If
5611 * we get malformed output from the filesystem, we cap it to the length
5612 * requested, at most. Update 'run' on the way out.
5613 */
5614 if (_err == 0) {
5615 if (localrun > size) {
5616 localrun = size;
5617 }
5618
5619 if (run) {
5620 *run = localrun;
5621 }
5622 }
5623
5624 return (_err);
5625 }
5626
5627 #if 0
5628 struct vnop_strategy_args {
5629 struct vnodeop_desc *a_desc;
5630 struct buf *a_bp;
5631 };
5632
5633 #endif /* 0*/
5634 errno_t
5635 VNOP_STRATEGY(struct buf *bp)
5636 {
5637 int _err;
5638 struct vnop_strategy_args a;
5639 vnode_t vp = buf_vnode(bp);
5640 a.a_desc = &vnop_strategy_desc;
5641 a.a_bp = bp;
5642 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
5643 DTRACE_FSINFO(strategy, vnode_t, vp);
5644 return (_err);
5645 }
5646
5647 #if 0
5648 struct vnop_bwrite_args {
5649 struct vnodeop_desc *a_desc;
5650 buf_t a_bp;
5651 };
5652 #endif /* 0*/
5653 errno_t
5654 VNOP_BWRITE(struct buf *bp)
5655 {
5656 int _err;
5657 struct vnop_bwrite_args a;
5658 vnode_t vp = buf_vnode(bp);
5659 a.a_desc = &vnop_bwrite_desc;
5660 a.a_bp = bp;
5661 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
5662 DTRACE_FSINFO(bwrite, vnode_t, vp);
5663 return (_err);
5664 }
5665
5666 #if 0
5667 struct vnop_kqfilt_add_args {
5668 struct vnodeop_desc *a_desc;
5669 struct vnode *a_vp;
5670 struct knote *a_kn;
5671 vfs_context_t a_context;
5672 };
5673 #endif
5674 errno_t
5675 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
5676 {
5677 int _err;
5678 struct vnop_kqfilt_add_args a;
5679
5680 a.a_desc = VDESC(vnop_kqfilt_add);
5681 a.a_vp = vp;
5682 a.a_kn = kn;
5683 a.a_context = ctx;
5684
5685 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
5686 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
5687
5688 return(_err);
5689 }
5690
5691 #if 0
5692 struct vnop_kqfilt_remove_args {
5693 struct vnodeop_desc *a_desc;
5694 struct vnode *a_vp;
5695 uintptr_t a_ident;
5696 vfs_context_t a_context;
5697 };
5698 #endif
5699 errno_t
5700 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
5701 {
5702 int _err;
5703 struct vnop_kqfilt_remove_args a;
5704
5705 a.a_desc = VDESC(vnop_kqfilt_remove);
5706 a.a_vp = vp;
5707 a.a_ident = ident;
5708 a.a_context = ctx;
5709
5710 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
5711 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
5712
5713 return(_err);
5714 }
5715
5716 errno_t
5717 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
5718 {
5719 int _err;
5720 struct vnop_monitor_args a;
5721
5722 a.a_desc = VDESC(vnop_monitor);
5723 a.a_vp = vp;
5724 a.a_events = events;
5725 a.a_flags = flags;
5726 a.a_handle = handle;
5727 a.a_context = ctx;
5728
5729 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
5730 DTRACE_FSINFO(monitor, vnode_t, vp);
5731
5732 return(_err);
5733 }
5734
5735 #if 0
5736 struct vnop_setlabel_args {
5737 struct vnodeop_desc *a_desc;
5738 struct vnode *a_vp;
5739 struct label *a_vl;
5740 vfs_context_t a_context;
5741 };
5742 #endif
5743 errno_t
5744 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
5745 {
5746 int _err;
5747 struct vnop_setlabel_args a;
5748
5749 a.a_desc = VDESC(vnop_setlabel);
5750 a.a_vp = vp;
5751 a.a_vl = label;
5752 a.a_context = ctx;
5753
5754 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
5755 DTRACE_FSINFO(setlabel, vnode_t, vp);
5756
5757 return(_err);
5758 }
5759
5760
5761 #if NAMEDSTREAMS
5762 /*
5763 * Get a named streamed
5764 */
5765 errno_t
5766 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
5767 {
5768 int _err;
5769 struct vnop_getnamedstream_args a;
5770
5771 a.a_desc = &vnop_getnamedstream_desc;
5772 a.a_vp = vp;
5773 a.a_svpp = svpp;
5774 a.a_name = name;
5775 a.a_operation = operation;
5776 a.a_flags = flags;
5777 a.a_context = ctx;
5778
5779 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
5780 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
5781 return (_err);
5782 }
5783
5784 /*
5785 * Create a named streamed
5786 */
5787 errno_t
5788 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
5789 {
5790 int _err;
5791 struct vnop_makenamedstream_args a;
5792
5793 a.a_desc = &vnop_makenamedstream_desc;
5794 a.a_vp = vp;
5795 a.a_svpp = svpp;
5796 a.a_name = name;
5797 a.a_flags = flags;
5798 a.a_context = ctx;
5799
5800 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
5801 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
5802 return (_err);
5803 }
5804
5805
5806 /*
5807 * Remove a named streamed
5808 */
5809 errno_t
5810 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
5811 {
5812 int _err;
5813 struct vnop_removenamedstream_args a;
5814
5815 a.a_desc = &vnop_removenamedstream_desc;
5816 a.a_vp = vp;
5817 a.a_svp = svp;
5818 a.a_name = name;
5819 a.a_flags = flags;
5820 a.a_context = ctx;
5821
5822 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
5823 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
5824 return (_err);
5825 }
5826 #endif