]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
200cb51c416413383807807ff031808464d9e36b
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
95 #include <sys/mbuf.h>
96 #include <sys/syslog.h>
97 #include <sys/ubc.h>
98 #include <sys/vm.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
106
107 #include <kern/assert.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
110
111 #include <libkern/OSByteOrder.h>
112
113 #include <miscfs/specfs/specdev.h>
114
115 #include <mach/mach_types.h>
116 #include <mach/memory_object_types.h>
117 #include <mach/task.h>
118
119 #if CONFIG_MACF
120 #include <security/mac_framework.h>
121 #endif
122
123 #include <sys/sdt.h>
124
125 #define ESUCCESS 0
126 #undef mount_t
127 #undef vnode_t
128
129 #define COMPAT_ONLY
130
131 #define NATIVE_XATTR(VP) \
132 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
133
134 #if CONFIG_APPLEDOUBLE
135 static void xattrfile_remove(vnode_t dvp, const char *basename,
136 vfs_context_t ctx, int force);
137 static void xattrfile_setattr(vnode_t dvp, const char * basename,
138 struct vnode_attr * vap, vfs_context_t ctx);
139 #endif /* CONFIG_APPLEDOUBLE */
140
141 /*
142 * vnode_setneedinactive
143 *
144 * Description: Indicate that when the last iocount on this vnode goes away,
145 * and the usecount is also zero, we should inform the filesystem
146 * via VNOP_INACTIVE.
147 *
148 * Parameters: vnode_t vnode to mark
149 *
150 * Returns: Nothing
151 *
152 * Notes: Notably used when we're deleting a file--we need not have a
153 * usecount, so VNOP_INACTIVE may not get called by anyone. We
154 * want it called when we drop our iocount.
155 */
156 void
157 vnode_setneedinactive(vnode_t vp)
158 {
159 cache_purge(vp);
160
161 vnode_lock_spin(vp);
162 vp->v_lflag |= VL_NEEDINACTIVE;
163 vnode_unlock(vp);
164 }
165
166
167 /* ====================================================================== */
168 /* ************ EXTERNAL KERNEL APIS ********************************** */
169 /* ====================================================================== */
170
171 /*
172 * implementations of exported VFS operations
173 */
174 int
175 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
176 {
177 int error;
178
179 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
180 return(ENOTSUP);
181
182 if (vfs_context_is64bit(ctx)) {
183 if (vfs_64bitready(mp)) {
184 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
185 }
186 else {
187 error = ENOTSUP;
188 }
189 }
190 else {
191 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
192 }
193
194 return (error);
195 }
196
197 int
198 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
199 {
200 int error;
201
202 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
203 return(ENOTSUP);
204
205 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
206
207 return (error);
208 }
209
210 int
211 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
212 {
213 int error;
214
215 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
216 return(ENOTSUP);
217
218 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
219
220 return (error);
221 }
222
223 /*
224 * Returns: 0 Success
225 * ENOTSUP Not supported
226 * <vfs_root>:ENOENT
227 * <vfs_root>:???
228 *
229 * Note: The return codes from the underlying VFS's root routine can't
230 * be fully enumerated here, since third party VFS authors may not
231 * limit their error returns to the ones documented here, even
232 * though this may result in some programs functioning incorrectly.
233 *
234 * The return codes documented above are those which may currently
235 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
236 * for a call to hfs_vget on the volume mount poit, not including
237 * additional error codes which may be propagated from underlying
238 * routines called by hfs_vget.
239 */
240 int
241 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
242 {
243 int error;
244
245 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
246 return(ENOTSUP);
247
248 if (ctx == NULL) {
249 ctx = vfs_context_current();
250 }
251
252 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
253
254 return (error);
255 }
256
257 int
258 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
259 {
260 int error;
261
262 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
263 return(ENOTSUP);
264
265 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
266
267 return (error);
268 }
269
270 int
271 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
272 {
273 int error;
274
275 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
276 return(ENOTSUP);
277
278 if (ctx == NULL) {
279 ctx = vfs_context_current();
280 }
281
282 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
283
284 return(error);
285 }
286
287 int
288 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
289 {
290 int error;
291
292 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
293 return(ENOTSUP);
294
295 if (ctx == NULL) {
296 ctx = vfs_context_current();
297 }
298
299 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
300
301 return(error);
302 }
303
304 int
305 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
306 {
307 int error;
308
309 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
310 return(ENOTSUP);
311
312 if (ctx == NULL) {
313 ctx = vfs_context_current();
314 }
315
316 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
317
318 return(error);
319 }
320
321 int
322 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
323 {
324 int error;
325
326 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
327 return(ENOTSUP);
328
329 if (ctx == NULL) {
330 ctx = vfs_context_current();
331 }
332
333 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
334
335 return(error);
336 }
337
338 int
339 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx)
340 {
341 int error;
342
343 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
344 return(ENOTSUP);
345
346 if (ctx == NULL) {
347 ctx = vfs_context_current();
348 }
349
350 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
351
352 return(error);
353 }
354
355 int
356 VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx)
357 {
358 int error;
359
360 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
361 return(ENOTSUP);
362
363 if (ctx == NULL) {
364 ctx = vfs_context_current();
365 }
366
367 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
368
369 return(error);
370 }
371
372
373 /* returns the cached throttle mask for the mount_t */
374 uint64_t
375 vfs_throttle_mask(mount_t mp)
376 {
377 return(mp->mnt_throttle_mask);
378 }
379
380 /* returns a copy of vfs type name for the mount_t */
381 void
382 vfs_name(mount_t mp, char * buffer)
383 {
384 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
385 }
386
387 /* returns vfs type number for the mount_t */
388 int
389 vfs_typenum(mount_t mp)
390 {
391 return(mp->mnt_vtable->vfc_typenum);
392 }
393
394 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
395 void*
396 vfs_mntlabel(mount_t mp)
397 {
398 return (void*)mp->mnt_mntlabel;
399 }
400
401 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
402 uint64_t
403 vfs_flags(mount_t mp)
404 {
405 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
406 }
407
408 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
409 void
410 vfs_setflags(mount_t mp, uint64_t flags)
411 {
412 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
413
414 mount_lock(mp);
415 mp->mnt_flag |= lflags;
416 mount_unlock(mp);
417 }
418
419 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
420 void
421 vfs_clearflags(mount_t mp , uint64_t flags)
422 {
423 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
424
425 mount_lock(mp);
426 mp->mnt_flag &= ~lflags;
427 mount_unlock(mp);
428 }
429
430 /* Is the mount_t ronly and upgrade read/write requested? */
431 int
432 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
433 {
434 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
435 }
436
437
438 /* Is the mount_t mounted ronly */
439 int
440 vfs_isrdonly(mount_t mp)
441 {
442 return (mp->mnt_flag & MNT_RDONLY);
443 }
444
445 /* Is the mount_t mounted for filesystem synchronous writes? */
446 int
447 vfs_issynchronous(mount_t mp)
448 {
449 return (mp->mnt_flag & MNT_SYNCHRONOUS);
450 }
451
452 /* Is the mount_t mounted read/write? */
453 int
454 vfs_isrdwr(mount_t mp)
455 {
456 return ((mp->mnt_flag & MNT_RDONLY) == 0);
457 }
458
459
460 /* Is mount_t marked for update (ie MNT_UPDATE) */
461 int
462 vfs_isupdate(mount_t mp)
463 {
464 return (mp->mnt_flag & MNT_UPDATE);
465 }
466
467
468 /* Is mount_t marked for reload (ie MNT_RELOAD) */
469 int
470 vfs_isreload(mount_t mp)
471 {
472 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
473 }
474
475 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
476 int
477 vfs_isforce(mount_t mp)
478 {
479 if ((mp->mnt_lflag & MNT_LFORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
480 return(1);
481 else
482 return(0);
483 }
484
485 int
486 vfs_isunmount(mount_t mp)
487 {
488 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
489 return 1;
490 } else {
491 return 0;
492 }
493 }
494
495 int
496 vfs_64bitready(mount_t mp)
497 {
498 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
499 return(1);
500 else
501 return(0);
502 }
503
504
505 int
506 vfs_authcache_ttl(mount_t mp)
507 {
508 if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) )
509 return (mp->mnt_authcache_ttl);
510 else
511 return (CACHED_RIGHT_INFINITE_TTL);
512 }
513
514 void
515 vfs_setauthcache_ttl(mount_t mp, int ttl)
516 {
517 mount_lock(mp);
518 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
519 mp->mnt_authcache_ttl = ttl;
520 mount_unlock(mp);
521 }
522
523 void
524 vfs_clearauthcache_ttl(mount_t mp)
525 {
526 mount_lock(mp);
527 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
528 /*
529 * back to the default TTL value in case
530 * MNTK_AUTH_OPAQUE is set on this mount
531 */
532 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
533 mount_unlock(mp);
534 }
535
536 int
537 vfs_authopaque(mount_t mp)
538 {
539 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
540 return(1);
541 else
542 return(0);
543 }
544
545 int
546 vfs_authopaqueaccess(mount_t mp)
547 {
548 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
549 return(1);
550 else
551 return(0);
552 }
553
554 void
555 vfs_setauthopaque(mount_t mp)
556 {
557 mount_lock(mp);
558 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
559 mount_unlock(mp);
560 }
561
562 void
563 vfs_setauthopaqueaccess(mount_t mp)
564 {
565 mount_lock(mp);
566 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
567 mount_unlock(mp);
568 }
569
570 void
571 vfs_clearauthopaque(mount_t mp)
572 {
573 mount_lock(mp);
574 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
575 mount_unlock(mp);
576 }
577
578 void
579 vfs_clearauthopaqueaccess(mount_t mp)
580 {
581 mount_lock(mp);
582 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
583 mount_unlock(mp);
584 }
585
586 void
587 vfs_setextendedsecurity(mount_t mp)
588 {
589 mount_lock(mp);
590 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
591 mount_unlock(mp);
592 }
593
594 void
595 vfs_clearextendedsecurity(mount_t mp)
596 {
597 mount_lock(mp);
598 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
599 mount_unlock(mp);
600 }
601
602 int
603 vfs_extendedsecurity(mount_t mp)
604 {
605 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
606 }
607
608 /* returns the max size of short symlink in this mount_t */
609 uint32_t
610 vfs_maxsymlen(mount_t mp)
611 {
612 return(mp->mnt_maxsymlinklen);
613 }
614
615 /* set max size of short symlink on mount_t */
616 void
617 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
618 {
619 mp->mnt_maxsymlinklen = symlen;
620 }
621
622 /* return a pointer to the RO vfs_statfs associated with mount_t */
623 struct vfsstatfs *
624 vfs_statfs(mount_t mp)
625 {
626 return(&mp->mnt_vfsstat);
627 }
628
629 int
630 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
631 {
632 int error;
633
634 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
635 return(error);
636
637 /*
638 * If we have a filesystem create time, use it to default some others.
639 */
640 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
641 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
642 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
643 }
644
645 return(0);
646 }
647
648 int
649 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
650 {
651 int error;
652
653 if (vfs_isrdonly(mp))
654 return EROFS;
655
656 error = VFS_SETATTR(mp, vfa, ctx);
657
658 /*
659 * If we had alternate ways of setting vfs attributes, we'd
660 * fall back here.
661 */
662
663 return error;
664 }
665
666 /* return the private data handle stored in mount_t */
667 void *
668 vfs_fsprivate(mount_t mp)
669 {
670 return(mp->mnt_data);
671 }
672
673 /* set the private data handle in mount_t */
674 void
675 vfs_setfsprivate(mount_t mp, void *mntdata)
676 {
677 mount_lock(mp);
678 mp->mnt_data = mntdata;
679 mount_unlock(mp);
680 }
681
682 /* query whether the mount point supports native EAs */
683 int
684 vfs_nativexattrs(mount_t mp) {
685 return (mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS);
686 }
687
688 /*
689 * return the block size of the underlying
690 * device associated with mount_t
691 */
692 int
693 vfs_devblocksize(mount_t mp) {
694
695 return(mp->mnt_devblocksize);
696 }
697
698 /*
699 * Returns vnode with an iocount that must be released with vnode_put()
700 */
701 vnode_t
702 vfs_vnodecovered(mount_t mp)
703 {
704 vnode_t vp = mp->mnt_vnodecovered;
705 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
706 return NULL;
707 } else {
708 return vp;
709 }
710 }
711
712 /*
713 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
714 * The iocount must be released with vnode_put(). Note that this KPI is subtle
715 * with respect to the validity of using this device vnode for anything substantial
716 * (which is discouraged). If commands are sent to the device driver without
717 * taking proper steps to ensure that the device is still open, chaos may ensue.
718 * Similarly, this routine should only be called if there is some guarantee that
719 * the mount itself is still valid.
720 */
721 vnode_t
722 vfs_devvp(mount_t mp)
723 {
724 vnode_t vp = mp->mnt_devvp;
725
726 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
727 return vp;
728 }
729
730 return NULLVP;
731 }
732
733 /*
734 * return the io attributes associated with mount_t
735 */
736 void
737 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
738 {
739 if (mp == NULL) {
740 ioattrp->io_maxreadcnt = MAXPHYS;
741 ioattrp->io_maxwritecnt = MAXPHYS;
742 ioattrp->io_segreadcnt = 32;
743 ioattrp->io_segwritecnt = 32;
744 ioattrp->io_maxsegreadsize = MAXPHYS;
745 ioattrp->io_maxsegwritesize = MAXPHYS;
746 ioattrp->io_devblocksize = DEV_BSIZE;
747 ioattrp->io_flags = 0;
748 } else {
749 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
750 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
751 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
752 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
753 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
754 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
755 ioattrp->io_devblocksize = mp->mnt_devblocksize;
756 ioattrp->io_flags = mp->mnt_ioflags;
757 }
758 ioattrp->io_reserved[0] = NULL;
759 ioattrp->io_reserved[1] = NULL;
760 }
761
762
763 /*
764 * set the IO attributes associated with mount_t
765 */
766 void
767 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
768 {
769 if (mp == NULL)
770 return;
771 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
772 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
773 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
774 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
775 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
776 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
777 mp->mnt_devblocksize = ioattrp->io_devblocksize;
778 mp->mnt_ioflags = ioattrp->io_flags;
779 }
780
781 /*
782 * Add a new filesystem into the kernel specified in passed in
783 * vfstable structure. It fills in the vnode
784 * dispatch vector that is to be passed to when vnodes are created.
785 * It returns a handle which is to be used to when the FS is to be removed
786 */
787 typedef int (*PFI)(void *);
788 extern int vfs_opv_numops;
789 errno_t
790 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
791 {
792 struct vfstable *newvfstbl = NULL;
793 int i,j;
794 int (***opv_desc_vector_p)(void *);
795 int (**opv_desc_vector)(void *);
796 struct vnodeopv_entry_desc *opve_descp;
797 int desccount;
798 int descsize;
799 PFI *descptr;
800
801 /*
802 * This routine is responsible for all the initialization that would
803 * ordinarily be done as part of the system startup;
804 */
805
806 if (vfe == (struct vfs_fsentry *)0)
807 return(EINVAL);
808
809 desccount = vfe->vfe_vopcnt;
810 if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
811 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
812 return(EINVAL);
813
814 /* Non-threadsafe filesystems are not supported */
815 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
816 return (EINVAL);
817 }
818
819 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
820 M_WAITOK);
821 bzero(newvfstbl, sizeof(struct vfstable));
822 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
823 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
824 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
825 newvfstbl->vfc_typenum = maxvfsconf++;
826 else
827 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
828
829 newvfstbl->vfc_refcount = 0;
830 newvfstbl->vfc_flags = 0;
831 newvfstbl->vfc_mountroot = NULL;
832 newvfstbl->vfc_next = NULL;
833 newvfstbl->vfc_vfsflags = 0;
834 if (vfe->vfe_flags & VFS_TBL64BITREADY)
835 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
836 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2)
837 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
838 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2)
839 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
840 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
841 newvfstbl->vfc_flags |= MNT_LOCAL;
842 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
843 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
844 else
845 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
846
847 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR)
848 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
849 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT)
850 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
851 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED)
852 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
853 if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
854 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
855 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME)
856 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
857
858 /*
859 * Allocate and init the vectors.
860 * Also handle backwards compatibility.
861 *
862 * We allocate one large block to hold all <desccount>
863 * vnode operation vectors stored contiguously.
864 */
865 /* XXX - shouldn't be M_TEMP */
866
867 descsize = desccount * vfs_opv_numops * sizeof(PFI);
868 MALLOC(descptr, PFI *, descsize,
869 M_TEMP, M_WAITOK);
870 bzero(descptr, descsize);
871
872 newvfstbl->vfc_descptr = descptr;
873 newvfstbl->vfc_descsize = descsize;
874
875
876 for (i= 0; i< desccount; i++ ) {
877 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
878 /*
879 * Fill in the caller's pointer to the start of the i'th vector.
880 * They'll need to supply it when calling vnode_create.
881 */
882 opv_desc_vector = descptr + i * vfs_opv_numops;
883 *opv_desc_vector_p = opv_desc_vector;
884
885 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
886 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
887
888 /*
889 * Sanity check: is this operation listed
890 * in the list of operations? We check this
891 * by seeing if its offset is zero. Since
892 * the default routine should always be listed
893 * first, it should be the only one with a zero
894 * offset. Any other operation with a zero
895 * offset is probably not listed in
896 * vfs_op_descs, and so is probably an error.
897 *
898 * A panic here means the layer programmer
899 * has committed the all-too common bug
900 * of adding a new operation to the layer's
901 * list of vnode operations but
902 * not adding the operation to the system-wide
903 * list of supported operations.
904 */
905 if (opve_descp->opve_op->vdesc_offset == 0 &&
906 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
907 printf("vfs_fsadd: operation %s not listed in %s.\n",
908 opve_descp->opve_op->vdesc_name,
909 "vfs_op_descs");
910 panic("vfs_fsadd: bad operation");
911 }
912 /*
913 * Fill in this entry.
914 */
915 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
916 opve_descp->opve_impl;
917 }
918
919
920 /*
921 * Finally, go back and replace unfilled routines
922 * with their default. (Sigh, an O(n^3) algorithm. I
923 * could make it better, but that'd be work, and n is small.)
924 */
925 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
926
927 /*
928 * Force every operations vector to have a default routine.
929 */
930 opv_desc_vector = *opv_desc_vector_p;
931 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
932 panic("vfs_fsadd: operation vector without default routine.");
933 for (j = 0; j < vfs_opv_numops; j++)
934 if (opv_desc_vector[j] == NULL)
935 opv_desc_vector[j] =
936 opv_desc_vector[VOFFSET(vnop_default)];
937
938 } /* end of each vnodeopv_desc parsing */
939
940
941
942 *handle = vfstable_add(newvfstbl);
943
944 if (newvfstbl->vfc_typenum <= maxvfsconf )
945 maxvfsconf = newvfstbl->vfc_typenum + 1;
946
947 if (newvfstbl->vfc_vfsops->vfs_init) {
948 struct vfsconf vfsc;
949 bzero(&vfsc, sizeof(struct vfsconf));
950 vfsc.vfc_reserved1 = 0;
951 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
952 vfsc.vfc_typenum = (*handle)->vfc_typenum;
953 vfsc.vfc_refcount = (*handle)->vfc_refcount;
954 vfsc.vfc_flags = (*handle)->vfc_flags;
955 vfsc.vfc_reserved2 = 0;
956 vfsc.vfc_reserved3 = 0;
957
958 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
959 }
960
961 FREE(newvfstbl, M_TEMP);
962
963 return(0);
964 }
965
966 /*
967 * Removes the filesystem from kernel.
968 * The argument passed in is the handle that was given when
969 * file system was added
970 */
971 errno_t
972 vfs_fsremove(vfstable_t handle)
973 {
974 struct vfstable * vfstbl = (struct vfstable *)handle;
975 void *old_desc = NULL;
976 errno_t err;
977
978 /* Preflight check for any mounts */
979 mount_list_lock();
980 if ( vfstbl->vfc_refcount != 0 ) {
981 mount_list_unlock();
982 return EBUSY;
983 }
984
985 /*
986 * save the old descriptor; the free cannot occur unconditionally,
987 * since vfstable_del() may fail.
988 */
989 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
990 old_desc = vfstbl->vfc_descptr;
991 }
992 err = vfstable_del(vfstbl);
993
994 mount_list_unlock();
995
996 /* free the descriptor if the delete was successful */
997 if (err == 0 && old_desc) {
998 FREE(old_desc, M_TEMP);
999 }
1000
1001 return(err);
1002 }
1003
1004 int
1005 vfs_context_pid(vfs_context_t ctx)
1006 {
1007 return (proc_pid(vfs_context_proc(ctx)));
1008 }
1009
1010 int
1011 vfs_context_suser(vfs_context_t ctx)
1012 {
1013 return (suser(ctx->vc_ucred, NULL));
1014 }
1015
1016 /*
1017 * Return bit field of signals posted to all threads in the context's process.
1018 *
1019 * XXX Signals should be tied to threads, not processes, for most uses of this
1020 * XXX call.
1021 */
1022 int
1023 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1024 {
1025 proc_t p = vfs_context_proc(ctx);
1026 if (p)
1027 return(proc_pendingsignals(p, mask));
1028 return(0);
1029 }
1030
1031 int
1032 vfs_context_is64bit(vfs_context_t ctx)
1033 {
1034 proc_t proc = vfs_context_proc(ctx);
1035
1036 if (proc)
1037 return(proc_is64bit(proc));
1038 return(0);
1039 }
1040
1041
1042 /*
1043 * vfs_context_proc
1044 *
1045 * Description: Given a vfs_context_t, return the proc_t associated with it.
1046 *
1047 * Parameters: vfs_context_t The context to use
1048 *
1049 * Returns: proc_t The process for this context
1050 *
1051 * Notes: This function will return the current_proc() if any of the
1052 * following conditions are true:
1053 *
1054 * o The supplied context pointer is NULL
1055 * o There is no Mach thread associated with the context
1056 * o There is no Mach task associated with the Mach thread
1057 * o There is no proc_t associated with the Mach task
1058 * o The proc_t has no per process open file table
1059 * o The proc_t is post-vfork()
1060 *
1061 * This causes this function to return a value matching as
1062 * closely as possible the previous behaviour, while at the
1063 * same time avoiding the task lending that results from vfork()
1064 */
1065 proc_t
1066 vfs_context_proc(vfs_context_t ctx)
1067 {
1068 proc_t proc = NULL;
1069
1070 if (ctx != NULL && ctx->vc_thread != NULL)
1071 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1072 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1073 proc = NULL;
1074
1075 return(proc == NULL ? current_proc() : proc);
1076 }
1077
1078 /*
1079 * vfs_context_get_special_port
1080 *
1081 * Description: Return the requested special port from the task associated
1082 * with the given context.
1083 *
1084 * Parameters: vfs_context_t The context to use
1085 * int Index of special port
1086 * ipc_port_t * Pointer to returned port
1087 *
1088 * Returns: kern_return_t see task_get_special_port()
1089 */
1090 kern_return_t
1091 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1092 {
1093 task_t task = NULL;
1094
1095 if (ctx != NULL && ctx->vc_thread != NULL)
1096 task = get_threadtask(ctx->vc_thread);
1097
1098 return task_get_special_port(task, which, portp);
1099 }
1100
1101 /*
1102 * vfs_context_set_special_port
1103 *
1104 * Description: Set the requested special port in the task associated
1105 * with the given context.
1106 *
1107 * Parameters: vfs_context_t The context to use
1108 * int Index of special port
1109 * ipc_port_t New special port
1110 *
1111 * Returns: kern_return_t see task_set_special_port()
1112 */
1113 kern_return_t
1114 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1115 {
1116 task_t task = NULL;
1117
1118 if (ctx != NULL && ctx->vc_thread != NULL)
1119 task = get_threadtask(ctx->vc_thread);
1120
1121 return task_set_special_port(task, which, port);
1122 }
1123
1124 /*
1125 * vfs_context_thread
1126 *
1127 * Description: Return the Mach thread associated with a vfs_context_t
1128 *
1129 * Parameters: vfs_context_t The context to use
1130 *
1131 * Returns: thread_t The thread for this context, or
1132 * NULL, if there is not one.
1133 *
1134 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1135 * as a result of a static vfs_context_t declaration in a function
1136 * and will result in this function returning NULL.
1137 *
1138 * This is intentional; this function should NOT return the
1139 * current_thread() in this case.
1140 */
1141 thread_t
1142 vfs_context_thread(vfs_context_t ctx)
1143 {
1144 return(ctx->vc_thread);
1145 }
1146
1147
1148 /*
1149 * vfs_context_cwd
1150 *
1151 * Description: Returns a reference on the vnode for the current working
1152 * directory for the supplied context
1153 *
1154 * Parameters: vfs_context_t The context to use
1155 *
1156 * Returns: vnode_t The current working directory
1157 * for this context
1158 *
1159 * Notes: The function first attempts to obtain the current directory
1160 * from the thread, and if it is not present there, falls back
1161 * to obtaining it from the process instead. If it can't be
1162 * obtained from either place, we return NULLVP.
1163 */
1164 vnode_t
1165 vfs_context_cwd(vfs_context_t ctx)
1166 {
1167 vnode_t cwd = NULLVP;
1168
1169 if(ctx != NULL && ctx->vc_thread != NULL) {
1170 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1171 proc_t proc;
1172
1173 /*
1174 * Get the cwd from the thread; if there isn't one, get it
1175 * from the process, instead.
1176 */
1177 if ((cwd = uth->uu_cdir) == NULLVP &&
1178 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1179 proc->p_fd != NULL)
1180 cwd = proc->p_fd->fd_cdir;
1181 }
1182
1183 return(cwd);
1184 }
1185
1186 /*
1187 * vfs_context_create
1188 *
1189 * Description: Allocate and initialize a new context.
1190 *
1191 * Parameters: vfs_context_t: Context to copy, or NULL for new
1192 *
1193 * Returns: Pointer to new context
1194 *
1195 * Notes: Copy cred and thread from argument, if available; else
1196 * initialize with current thread and new cred. Returns
1197 * with a reference held on the credential.
1198 */
1199 vfs_context_t
1200 vfs_context_create(vfs_context_t ctx)
1201 {
1202 vfs_context_t newcontext;
1203
1204 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
1205
1206 if (newcontext) {
1207 kauth_cred_t safecred;
1208 if (ctx) {
1209 newcontext->vc_thread = ctx->vc_thread;
1210 safecred = ctx->vc_ucred;
1211 } else {
1212 newcontext->vc_thread = current_thread();
1213 safecred = kauth_cred_get();
1214 }
1215 if (IS_VALID_CRED(safecred))
1216 kauth_cred_ref(safecred);
1217 newcontext->vc_ucred = safecred;
1218 return(newcontext);
1219 }
1220 return(NULL);
1221 }
1222
1223
1224 vfs_context_t
1225 vfs_context_current(void)
1226 {
1227 vfs_context_t ctx = NULL;
1228 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1229
1230 if (ut != NULL ) {
1231 if (ut->uu_context.vc_ucred != NULL) {
1232 ctx = &ut->uu_context;
1233 }
1234 }
1235
1236 return(ctx == NULL ? vfs_context_kernel() : ctx);
1237 }
1238
1239
1240 /*
1241 * XXX Do not ask
1242 *
1243 * Dangerous hack - adopt the first kernel thread as the current thread, to
1244 * get to the vfs_context_t in the uthread associated with a kernel thread.
1245 * This is used by UDF to make the call into IOCDMediaBSDClient,
1246 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1247 * ioctl() is being called from kernel or user space (and all this because
1248 * we do not pass threads into our ioctl()'s, instead of processes).
1249 *
1250 * This is also used by imageboot_setup(), called early from bsd_init() after
1251 * kernproc has been given a credential.
1252 *
1253 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1254 * of many Mach headers to do the reference directly rather than indirectly;
1255 * we will need to forego this convenience when we reture proc_thread().
1256 */
1257 static struct vfs_context kerncontext;
1258 vfs_context_t
1259 vfs_context_kernel(void)
1260 {
1261 if (kerncontext.vc_ucred == NOCRED)
1262 kerncontext.vc_ucred = kernproc->p_ucred;
1263 if (kerncontext.vc_thread == NULL)
1264 kerncontext.vc_thread = proc_thread(kernproc);
1265
1266 return(&kerncontext);
1267 }
1268
1269
1270 int
1271 vfs_context_rele(vfs_context_t ctx)
1272 {
1273 if (ctx) {
1274 if (IS_VALID_CRED(ctx->vc_ucred))
1275 kauth_cred_unref(&ctx->vc_ucred);
1276 kfree(ctx, sizeof(struct vfs_context));
1277 }
1278 return(0);
1279 }
1280
1281
1282 kauth_cred_t
1283 vfs_context_ucred(vfs_context_t ctx)
1284 {
1285 return (ctx->vc_ucred);
1286 }
1287
1288 /*
1289 * Return true if the context is owned by the superuser.
1290 */
1291 int
1292 vfs_context_issuser(vfs_context_t ctx)
1293 {
1294 return(kauth_cred_issuser(vfs_context_ucred(ctx)));
1295 }
1296
1297 /*
1298 * Given a context, for all fields of vfs_context_t which
1299 * are not held with a reference, set those fields to the
1300 * values for the current execution context. Currently, this
1301 * just means the vc_thread.
1302 *
1303 * Returns: 0 for success, nonzero for failure
1304 *
1305 * The intended use is:
1306 * 1. vfs_context_create() gets the caller a context
1307 * 2. vfs_context_bind() sets the unrefcounted data
1308 * 3. vfs_context_rele() releases the context
1309 *
1310 */
1311 int
1312 vfs_context_bind(vfs_context_t ctx)
1313 {
1314 ctx->vc_thread = current_thread();
1315 return 0;
1316 }
1317
1318 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1319
1320
1321 /*
1322 * Convert between vnode types and inode formats (since POSIX.1
1323 * defines mode word of stat structure in terms of inode formats).
1324 */
1325 enum vtype
1326 vnode_iftovt(int mode)
1327 {
1328 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1329 }
1330
1331 int
1332 vnode_vttoif(enum vtype indx)
1333 {
1334 return(vttoif_tab[(int)(indx)]);
1335 }
1336
1337 int
1338 vnode_makeimode(int indx, int mode)
1339 {
1340 return (int)(VTTOIF(indx) | (mode));
1341 }
1342
1343
1344 /*
1345 * vnode manipulation functions.
1346 */
1347
1348 /* returns system root vnode iocount; It should be released using vnode_put() */
1349 vnode_t
1350 vfs_rootvnode(void)
1351 {
1352 int error;
1353
1354 error = vnode_get(rootvnode);
1355 if (error)
1356 return ((vnode_t)0);
1357 else
1358 return rootvnode;
1359 }
1360
1361
1362 uint32_t
1363 vnode_vid(vnode_t vp)
1364 {
1365 return ((uint32_t)(vp->v_id));
1366 }
1367
1368 mount_t
1369 vnode_mount(vnode_t vp)
1370 {
1371 return (vp->v_mount);
1372 }
1373
1374 mount_t
1375 vnode_mountedhere(vnode_t vp)
1376 {
1377 mount_t mp;
1378
1379 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1380 (mp->mnt_vnodecovered == vp))
1381 return (mp);
1382 else
1383 return (mount_t)NULL;
1384 }
1385
1386 /* returns vnode type of vnode_t */
1387 enum vtype
1388 vnode_vtype(vnode_t vp)
1389 {
1390 return (vp->v_type);
1391 }
1392
1393 /* returns FS specific node saved in vnode */
1394 void *
1395 vnode_fsnode(vnode_t vp)
1396 {
1397 return (vp->v_data);
1398 }
1399
1400 void
1401 vnode_clearfsnode(vnode_t vp)
1402 {
1403 vp->v_data = NULL;
1404 }
1405
1406 dev_t
1407 vnode_specrdev(vnode_t vp)
1408 {
1409 return(vp->v_rdev);
1410 }
1411
1412
1413 /* Accessor functions */
1414 /* is vnode_t a root vnode */
1415 int
1416 vnode_isvroot(vnode_t vp)
1417 {
1418 return ((vp->v_flag & VROOT)? 1 : 0);
1419 }
1420
1421 /* is vnode_t a system vnode */
1422 int
1423 vnode_issystem(vnode_t vp)
1424 {
1425 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1426 }
1427
1428 /* is vnode_t a swap file vnode */
1429 int
1430 vnode_isswap(vnode_t vp)
1431 {
1432 return ((vp->v_flag & VSWAP)? 1 : 0);
1433 }
1434
1435 /* is vnode_t a tty */
1436 int
1437 vnode_istty(vnode_t vp)
1438 {
1439 return ((vp->v_flag & VISTTY) ? 1 : 0);
1440 }
1441
1442 /* if vnode_t mount operation in progress */
1443 int
1444 vnode_ismount(vnode_t vp)
1445 {
1446 return ((vp->v_flag & VMOUNT)? 1 : 0);
1447 }
1448
1449 /* is this vnode under recyle now */
1450 int
1451 vnode_isrecycled(vnode_t vp)
1452 {
1453 int ret;
1454
1455 vnode_lock_spin(vp);
1456 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1457 vnode_unlock(vp);
1458 return(ret);
1459 }
1460
1461 /* vnode was created by background task requesting rapid aging
1462 and has not since been referenced by a normal task */
1463 int
1464 vnode_israge(vnode_t vp)
1465 {
1466 return ((vp->v_flag & VRAGE)? 1 : 0);
1467 }
1468
1469 int
1470 vnode_needssnapshots(vnode_t vp)
1471 {
1472 return ((vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0);
1473 }
1474
1475
1476 /* Check the process/thread to see if we should skip atime updates */
1477 int
1478 vfs_ctx_skipatime (vfs_context_t ctx) {
1479 struct uthread *ut;
1480 proc_t proc;
1481 thread_t thr;
1482
1483 proc = vfs_context_proc(ctx);
1484 thr = vfs_context_thread (ctx);
1485
1486 /* Validate pointers in case we were invoked via a kernel context */
1487 if (thr && proc) {
1488 ut = get_bsdthread_info (thr);
1489
1490 if (proc->p_lflag & P_LRAGE_VNODES) {
1491 return 1;
1492 }
1493
1494 if (ut) {
1495 if (ut->uu_flag & UT_RAGE_VNODES) {
1496 return 1;
1497 }
1498 }
1499 }
1500 return 0;
1501 }
1502
1503 /* is vnode_t marked to not keep data cached once it's been consumed */
1504 int
1505 vnode_isnocache(vnode_t vp)
1506 {
1507 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1508 }
1509
1510 /*
1511 * has sequential readahead been disabled on this vnode
1512 */
1513 int
1514 vnode_isnoreadahead(vnode_t vp)
1515 {
1516 return ((vp->v_flag & VRAOFF)? 1 : 0);
1517 }
1518
1519 int
1520 vnode_is_openevt(vnode_t vp)
1521 {
1522 return ((vp->v_flag & VOPENEVT)? 1 : 0);
1523 }
1524
1525 /* is vnode_t a standard one? */
1526 int
1527 vnode_isstandard(vnode_t vp)
1528 {
1529 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1530 }
1531
1532 /* don't vflush() if SKIPSYSTEM */
1533 int
1534 vnode_isnoflush(vnode_t vp)
1535 {
1536 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1537 }
1538
1539 /* is vnode_t a regular file */
1540 int
1541 vnode_isreg(vnode_t vp)
1542 {
1543 return ((vp->v_type == VREG)? 1 : 0);
1544 }
1545
1546 /* is vnode_t a directory? */
1547 int
1548 vnode_isdir(vnode_t vp)
1549 {
1550 return ((vp->v_type == VDIR)? 1 : 0);
1551 }
1552
1553 /* is vnode_t a symbolic link ? */
1554 int
1555 vnode_islnk(vnode_t vp)
1556 {
1557 return ((vp->v_type == VLNK)? 1 : 0);
1558 }
1559
1560 int
1561 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1562 {
1563 struct nameidata *ndp = cnp->cn_ndp;
1564
1565 if (ndp == NULL) {
1566 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1567 }
1568
1569 if (vnode_isdir(vp)) {
1570 if (vp->v_mountedhere != NULL) {
1571 goto yes;
1572 }
1573
1574 #if CONFIG_TRIGGERS
1575 if (vp->v_resolve) {
1576 goto yes;
1577 }
1578 #endif /* CONFIG_TRIGGERS */
1579
1580 }
1581
1582
1583 if (vnode_islnk(vp)) {
1584 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1585 if (cnp->cn_flags & FOLLOW) {
1586 goto yes;
1587 }
1588 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1589 goto yes;
1590 }
1591 }
1592
1593 return 0;
1594
1595 yes:
1596 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1597 return EKEEPLOOKING;
1598 }
1599
1600 /* is vnode_t a fifo ? */
1601 int
1602 vnode_isfifo(vnode_t vp)
1603 {
1604 return ((vp->v_type == VFIFO)? 1 : 0);
1605 }
1606
1607 /* is vnode_t a block device? */
1608 int
1609 vnode_isblk(vnode_t vp)
1610 {
1611 return ((vp->v_type == VBLK)? 1 : 0);
1612 }
1613
1614 int
1615 vnode_isspec(vnode_t vp)
1616 {
1617 return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0);
1618 }
1619
1620 /* is vnode_t a char device? */
1621 int
1622 vnode_ischr(vnode_t vp)
1623 {
1624 return ((vp->v_type == VCHR)? 1 : 0);
1625 }
1626
1627 /* is vnode_t a socket? */
1628 int
1629 vnode_issock(vnode_t vp)
1630 {
1631 return ((vp->v_type == VSOCK)? 1 : 0);
1632 }
1633
1634 /* is vnode_t a device with multiple active vnodes referring to it? */
1635 int
1636 vnode_isaliased(vnode_t vp)
1637 {
1638 enum vtype vt = vp->v_type;
1639 if (!((vt == VCHR) || (vt == VBLK))) {
1640 return 0;
1641 } else {
1642 return (vp->v_specflags & SI_ALIASED);
1643 }
1644 }
1645
1646 /* is vnode_t a named stream? */
1647 int
1648 vnode_isnamedstream(
1649 #if NAMEDSTREAMS
1650 vnode_t vp
1651 #else
1652 __unused vnode_t vp
1653 #endif
1654 )
1655 {
1656 #if NAMEDSTREAMS
1657 return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0);
1658 #else
1659 return (0);
1660 #endif
1661 }
1662
1663 int
1664 vnode_isshadow(
1665 #if NAMEDSTREAMS
1666 vnode_t vp
1667 #else
1668 __unused vnode_t vp
1669 #endif
1670 )
1671 {
1672 #if NAMEDSTREAMS
1673 return ((vp->v_flag & VISSHADOW) ? 1 : 0);
1674 #else
1675 return (0);
1676 #endif
1677 }
1678
1679 /* does vnode have associated named stream vnodes ? */
1680 int
1681 vnode_hasnamedstreams(
1682 #if NAMEDSTREAMS
1683 vnode_t vp
1684 #else
1685 __unused vnode_t vp
1686 #endif
1687 )
1688 {
1689 #if NAMEDSTREAMS
1690 return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0);
1691 #else
1692 return (0);
1693 #endif
1694 }
1695 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1696 void
1697 vnode_setnocache(vnode_t vp)
1698 {
1699 vnode_lock_spin(vp);
1700 vp->v_flag |= VNOCACHE_DATA;
1701 vnode_unlock(vp);
1702 }
1703
1704 void
1705 vnode_clearnocache(vnode_t vp)
1706 {
1707 vnode_lock_spin(vp);
1708 vp->v_flag &= ~VNOCACHE_DATA;
1709 vnode_unlock(vp);
1710 }
1711
1712 void
1713 vnode_set_openevt(vnode_t vp)
1714 {
1715 vnode_lock_spin(vp);
1716 vp->v_flag |= VOPENEVT;
1717 vnode_unlock(vp);
1718 }
1719
1720 void
1721 vnode_clear_openevt(vnode_t vp)
1722 {
1723 vnode_lock_spin(vp);
1724 vp->v_flag &= ~VOPENEVT;
1725 vnode_unlock(vp);
1726 }
1727
1728
1729 void
1730 vnode_setnoreadahead(vnode_t vp)
1731 {
1732 vnode_lock_spin(vp);
1733 vp->v_flag |= VRAOFF;
1734 vnode_unlock(vp);
1735 }
1736
1737 void
1738 vnode_clearnoreadahead(vnode_t vp)
1739 {
1740 vnode_lock_spin(vp);
1741 vp->v_flag &= ~VRAOFF;
1742 vnode_unlock(vp);
1743 }
1744
1745
1746 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1747 void
1748 vnode_setnoflush(vnode_t vp)
1749 {
1750 vnode_lock_spin(vp);
1751 vp->v_flag |= VNOFLUSH;
1752 vnode_unlock(vp);
1753 }
1754
1755 void
1756 vnode_clearnoflush(vnode_t vp)
1757 {
1758 vnode_lock_spin(vp);
1759 vp->v_flag &= ~VNOFLUSH;
1760 vnode_unlock(vp);
1761 }
1762
1763
1764 /* is vnode_t a blkdevice and has a FS mounted on it */
1765 int
1766 vnode_ismountedon(vnode_t vp)
1767 {
1768 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1769 }
1770
1771 void
1772 vnode_setmountedon(vnode_t vp)
1773 {
1774 vnode_lock_spin(vp);
1775 vp->v_specflags |= SI_MOUNTEDON;
1776 vnode_unlock(vp);
1777 }
1778
1779 void
1780 vnode_clearmountedon(vnode_t vp)
1781 {
1782 vnode_lock_spin(vp);
1783 vp->v_specflags &= ~SI_MOUNTEDON;
1784 vnode_unlock(vp);
1785 }
1786
1787
1788 void
1789 vnode_settag(vnode_t vp, int tag)
1790 {
1791 vp->v_tag = tag;
1792
1793 }
1794
1795 int
1796 vnode_tag(vnode_t vp)
1797 {
1798 return(vp->v_tag);
1799 }
1800
1801 vnode_t
1802 vnode_parent(vnode_t vp)
1803 {
1804
1805 return(vp->v_parent);
1806 }
1807
1808 void
1809 vnode_setparent(vnode_t vp, vnode_t dvp)
1810 {
1811 vp->v_parent = dvp;
1812 }
1813
1814 void
1815 vnode_setname(vnode_t vp, char * name)
1816 {
1817 vp->v_name = name;
1818 }
1819
1820 /* return the registered FS name when adding the FS to kernel */
1821 void
1822 vnode_vfsname(vnode_t vp, char * buf)
1823 {
1824 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1825 }
1826
1827 /* return the FS type number */
1828 int
1829 vnode_vfstypenum(vnode_t vp)
1830 {
1831 return(vp->v_mount->mnt_vtable->vfc_typenum);
1832 }
1833
1834 int
1835 vnode_vfs64bitready(vnode_t vp)
1836 {
1837
1838 /*
1839 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
1840 */
1841 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
1842 return(1);
1843 else
1844 return(0);
1845 }
1846
1847
1848
1849 /* return the visible flags on associated mount point of vnode_t */
1850 uint32_t
1851 vnode_vfsvisflags(vnode_t vp)
1852 {
1853 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1854 }
1855
1856 /* return the command modifier flags on associated mount point of vnode_t */
1857 uint32_t
1858 vnode_vfscmdflags(vnode_t vp)
1859 {
1860 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1861 }
1862
1863 /* return the max symlink of short links of vnode_t */
1864 uint32_t
1865 vnode_vfsmaxsymlen(vnode_t vp)
1866 {
1867 return(vp->v_mount->mnt_maxsymlinklen);
1868 }
1869
1870 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1871 struct vfsstatfs *
1872 vnode_vfsstatfs(vnode_t vp)
1873 {
1874 return(&vp->v_mount->mnt_vfsstat);
1875 }
1876
1877 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1878 void *
1879 vnode_vfsfsprivate(vnode_t vp)
1880 {
1881 return(vp->v_mount->mnt_data);
1882 }
1883
1884 /* is vnode_t in a rdonly mounted FS */
1885 int
1886 vnode_vfsisrdonly(vnode_t vp)
1887 {
1888 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1889 }
1890
1891 int
1892 vnode_compound_rename_available(vnode_t vp)
1893 {
1894 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
1895 }
1896 int
1897 vnode_compound_rmdir_available(vnode_t vp)
1898 {
1899 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
1900 }
1901 int
1902 vnode_compound_mkdir_available(vnode_t vp)
1903 {
1904 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
1905 }
1906 int
1907 vnode_compound_remove_available(vnode_t vp)
1908 {
1909 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
1910 }
1911 int
1912 vnode_compound_open_available(vnode_t vp)
1913 {
1914 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
1915 }
1916
1917 int
1918 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
1919 {
1920 return ((vp->v_mount->mnt_compound_ops & opid) != 0);
1921 }
1922
1923 /*
1924 * Returns vnode ref to current working directory; if a per-thread current
1925 * working directory is in effect, return that instead of the per process one.
1926 *
1927 * XXX Published, but not used.
1928 */
1929 vnode_t
1930 current_workingdir(void)
1931 {
1932 return vfs_context_cwd(vfs_context_current());
1933 }
1934
1935 /* returns vnode ref to current root(chroot) directory */
1936 vnode_t
1937 current_rootdir(void)
1938 {
1939 proc_t proc = current_proc();
1940 struct vnode * vp ;
1941
1942 if ( (vp = proc->p_fd->fd_rdir) ) {
1943 if ( (vnode_getwithref(vp)) )
1944 return (NULL);
1945 }
1946 return vp;
1947 }
1948
1949 /*
1950 * Get a filesec and optional acl contents from an extended attribute.
1951 * Function will attempt to retrive ACL, UUID, and GUID information using a
1952 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1953 *
1954 * Parameters: vp The vnode on which to operate.
1955 * fsecp The filesec (and ACL, if any) being
1956 * retrieved.
1957 * ctx The vnode context in which the
1958 * operation is to be attempted.
1959 *
1960 * Returns: 0 Success
1961 * !0 errno value
1962 *
1963 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1964 * host byte order, as will be the ACL contents, if any.
1965 * Internally, we will cannonize these values from network (PPC)
1966 * byte order after we retrieve them so that the on-disk contents
1967 * of the extended attribute are identical for both PPC and Intel
1968 * (if we were not being required to provide this service via
1969 * fallback, this would be the job of the filesystem
1970 * 'VNOP_GETATTR' call).
1971 *
1972 * We use ntohl() because it has a transitive property on Intel
1973 * machines and no effect on PPC mancines. This guarantees us
1974 *
1975 * XXX: Deleting rather than ignoreing a corrupt security structure is
1976 * probably the only way to reset it without assistance from an
1977 * file system integrity checking tool. Right now we ignore it.
1978 *
1979 * XXX: We should enummerate the possible errno values here, and where
1980 * in the code they originated.
1981 */
1982 static int
1983 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
1984 {
1985 kauth_filesec_t fsec;
1986 uio_t fsec_uio;
1987 size_t fsec_size;
1988 size_t xsize, rsize;
1989 int error;
1990 uint32_t host_fsec_magic;
1991 uint32_t host_acl_entrycount;
1992
1993 fsec = NULL;
1994 fsec_uio = NULL;
1995 error = 0;
1996
1997 /* find out how big the EA is */
1998 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
1999 /* no EA, no filesec */
2000 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2001 error = 0;
2002 /* either way, we are done */
2003 goto out;
2004 }
2005
2006 /*
2007 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2008 * ACE entrly ACL, and if it's larger than that, it must have the right
2009 * number of bytes such that it contains an atomic number of ACEs,
2010 * rather than partial entries. Otherwise, we ignore it.
2011 */
2012 if (!KAUTH_FILESEC_VALID(xsize)) {
2013 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2014 error = 0;
2015 goto out;
2016 }
2017
2018 /* how many entries would fit? */
2019 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2020
2021 /* get buffer and uio */
2022 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2023 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2024 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2025 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2026 error = ENOMEM;
2027 goto out;
2028 }
2029
2030 /* read security attribute */
2031 rsize = xsize;
2032 if ((error = vn_getxattr(vp,
2033 KAUTH_FILESEC_XATTR,
2034 fsec_uio,
2035 &rsize,
2036 XATTR_NOSECURITY,
2037 ctx)) != 0) {
2038
2039 /* no attribute - no security data */
2040 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2041 error = 0;
2042 /* either way, we are done */
2043 goto out;
2044 }
2045
2046 /*
2047 * Validate security structure; the validation must take place in host
2048 * byte order. If it's corrupt, we will just ignore it.
2049 */
2050
2051 /* Validate the size before trying to convert it */
2052 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2053 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2054 goto out;
2055 }
2056
2057 /* Validate the magic number before trying to convert it */
2058 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2059 if (fsec->fsec_magic != host_fsec_magic) {
2060 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2061 goto out;
2062 }
2063
2064 /* Validate the entry count before trying to convert it. */
2065 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2066 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2067 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2068 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2069 goto out;
2070 }
2071 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2072 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2073 goto out;
2074 }
2075 }
2076
2077 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2078
2079 *fsecp = fsec;
2080 fsec = NULL;
2081 error = 0;
2082 out:
2083 if (fsec != NULL)
2084 kauth_filesec_free(fsec);
2085 if (fsec_uio != NULL)
2086 uio_free(fsec_uio);
2087 if (error)
2088 *fsecp = NULL;
2089 return(error);
2090 }
2091
2092 /*
2093 * Set a filesec and optional acl contents into an extended attribute.
2094 * function will attempt to store ACL, UUID, and GUID information using a
2095 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2096 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2097 * original caller supplied an acl.
2098 *
2099 * Parameters: vp The vnode on which to operate.
2100 * fsec The filesec being set.
2101 * acl The acl to be associated with 'fsec'.
2102 * ctx The vnode context in which the
2103 * operation is to be attempted.
2104 *
2105 * Returns: 0 Success
2106 * !0 errno value
2107 *
2108 * Notes: Both the fsec and the acl are always valid.
2109 *
2110 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2111 * as are the acl contents, if they are used. Internally, we will
2112 * cannonize these values into network (PPC) byte order before we
2113 * attempt to write them so that the on-disk contents of the
2114 * extended attribute are identical for both PPC and Intel (if we
2115 * were not being required to provide this service via fallback,
2116 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2117 * We reverse this process on the way out, so we leave with the
2118 * same byte order we started with.
2119 *
2120 * XXX: We should enummerate the possible errno values here, and where
2121 * in the code they originated.
2122 */
2123 static int
2124 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2125 {
2126 uio_t fsec_uio;
2127 int error;
2128 uint32_t saved_acl_copysize;
2129
2130 fsec_uio = NULL;
2131
2132 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2133 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2134 error = ENOMEM;
2135 goto out;
2136 }
2137 /*
2138 * Save the pre-converted ACL copysize, because it gets swapped too
2139 * if we are running with the wrong endianness.
2140 */
2141 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2142
2143 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2144
2145 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2146 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2147 error = vn_setxattr(vp,
2148 KAUTH_FILESEC_XATTR,
2149 fsec_uio,
2150 XATTR_NOSECURITY, /* we have auth'ed already */
2151 ctx);
2152 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2153
2154 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2155
2156 out:
2157 if (fsec_uio != NULL)
2158 uio_free(fsec_uio);
2159 return(error);
2160 }
2161
2162
2163 /*
2164 * Returns: 0 Success
2165 * ENOMEM Not enough space [only if has filesec]
2166 * VNOP_GETATTR: ???
2167 * vnode_get_filesec: ???
2168 * kauth_cred_guid2uid: ???
2169 * kauth_cred_guid2gid: ???
2170 * vfs_update_vfsstat: ???
2171 */
2172 int
2173 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2174 {
2175 kauth_filesec_t fsec;
2176 kauth_acl_t facl;
2177 int error;
2178 uid_t nuid;
2179 gid_t ngid;
2180
2181 /* don't ask for extended security data if the filesystem doesn't support it */
2182 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2183 VATTR_CLEAR_ACTIVE(vap, va_acl);
2184 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2185 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2186 }
2187
2188 /*
2189 * If the caller wants size values we might have to synthesise, give the
2190 * filesystem the opportunity to supply better intermediate results.
2191 */
2192 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2193 VATTR_IS_ACTIVE(vap, va_total_size) ||
2194 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2195 VATTR_SET_ACTIVE(vap, va_data_size);
2196 VATTR_SET_ACTIVE(vap, va_data_alloc);
2197 VATTR_SET_ACTIVE(vap, va_total_size);
2198 VATTR_SET_ACTIVE(vap, va_total_alloc);
2199 }
2200
2201 error = VNOP_GETATTR(vp, vap, ctx);
2202 if (error) {
2203 KAUTH_DEBUG("ERROR - returning %d", error);
2204 goto out;
2205 }
2206
2207 /*
2208 * If extended security data was requested but not returned, try the fallback
2209 * path.
2210 */
2211 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2212 fsec = NULL;
2213
2214 if (XATTR_VNODE_SUPPORTED(vp)) {
2215 /* try to get the filesec */
2216 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
2217 goto out;
2218 }
2219 /* if no filesec, no attributes */
2220 if (fsec == NULL) {
2221 VATTR_RETURN(vap, va_acl, NULL);
2222 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2223 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2224 } else {
2225
2226 /* looks good, try to return what we were asked for */
2227 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2228 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2229
2230 /* only return the ACL if we were actually asked for it */
2231 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2232 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2233 VATTR_RETURN(vap, va_acl, NULL);
2234 } else {
2235 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2236 if (facl == NULL) {
2237 kauth_filesec_free(fsec);
2238 error = ENOMEM;
2239 goto out;
2240 }
2241 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2242 VATTR_RETURN(vap, va_acl, facl);
2243 }
2244 }
2245 kauth_filesec_free(fsec);
2246 }
2247 }
2248 /*
2249 * If someone gave us an unsolicited filesec, toss it. We promise that
2250 * we're OK with a filesystem giving us anything back, but our callers
2251 * only expect what they asked for.
2252 */
2253 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2254 if (vap->va_acl != NULL)
2255 kauth_acl_free(vap->va_acl);
2256 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2257 }
2258
2259 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2260 /*
2261 * Handle the case where we need a UID/GID, but only have extended
2262 * security information.
2263 */
2264 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2265 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2266 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2267 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
2268 VATTR_RETURN(vap, va_uid, nuid);
2269 }
2270 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2271 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2272 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2273 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
2274 VATTR_RETURN(vap, va_gid, ngid);
2275 }
2276 #endif
2277
2278 /*
2279 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2280 */
2281 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2282 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2283 nuid = vap->va_uid;
2284 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2285 nuid = vp->v_mount->mnt_fsowner;
2286 if (nuid == KAUTH_UID_NONE)
2287 nuid = 99;
2288 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2289 nuid = vap->va_uid;
2290 } else {
2291 /* this will always be something sensible */
2292 nuid = vp->v_mount->mnt_fsowner;
2293 }
2294 if ((nuid == 99) && !vfs_context_issuser(ctx))
2295 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2296 VATTR_RETURN(vap, va_uid, nuid);
2297 }
2298 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2299 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2300 ngid = vap->va_gid;
2301 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2302 ngid = vp->v_mount->mnt_fsgroup;
2303 if (ngid == KAUTH_GID_NONE)
2304 ngid = 99;
2305 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2306 ngid = vap->va_gid;
2307 } else {
2308 /* this will always be something sensible */
2309 ngid = vp->v_mount->mnt_fsgroup;
2310 }
2311 if ((ngid == 99) && !vfs_context_issuser(ctx))
2312 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2313 VATTR_RETURN(vap, va_gid, ngid);
2314 }
2315
2316 /*
2317 * Synthesise some values that can be reasonably guessed.
2318 */
2319 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
2320 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2321
2322 if (!VATTR_IS_SUPPORTED(vap, va_flags))
2323 VATTR_RETURN(vap, va_flags, 0);
2324
2325 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
2326 VATTR_RETURN(vap, va_filerev, 0);
2327
2328 if (!VATTR_IS_SUPPORTED(vap, va_gen))
2329 VATTR_RETURN(vap, va_gen, 0);
2330
2331 /*
2332 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2333 */
2334 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
2335 VATTR_RETURN(vap, va_data_size, 0);
2336
2337 /* do we want any of the possibly-computed values? */
2338 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2339 VATTR_IS_ACTIVE(vap, va_total_size) ||
2340 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2341 /* make sure f_bsize is valid */
2342 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2343 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0)
2344 goto out;
2345 }
2346
2347 /* default va_data_alloc from va_data_size */
2348 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
2349 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2350
2351 /* default va_total_size from va_data_size */
2352 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
2353 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2354
2355 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2356 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
2357 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2358 }
2359
2360 /*
2361 * If we don't have a change time, pull it from the modtime.
2362 */
2363 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
2364 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2365
2366 /*
2367 * This is really only supported for the creation VNOPs, but since the field is there
2368 * we should populate it correctly.
2369 */
2370 VATTR_RETURN(vap, va_type, vp->v_type);
2371
2372 /*
2373 * The fsid can be obtained from the mountpoint directly.
2374 */
2375 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2376
2377 out:
2378
2379 return(error);
2380 }
2381
2382 /*
2383 * Set the attributes on a vnode in a vnode context.
2384 *
2385 * Parameters: vp The vnode whose attributes to set.
2386 * vap A pointer to the attributes to set.
2387 * ctx The vnode context in which the
2388 * operation is to be attempted.
2389 *
2390 * Returns: 0 Success
2391 * !0 errno value
2392 *
2393 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2394 *
2395 * The contents of the data area pointed to by 'vap' may be
2396 * modified if the vnode is on a filesystem which has been
2397 * mounted with ingore ownership flags, or by the underlyng
2398 * VFS itself, or by the fallback code, if the underlying VFS
2399 * does not support ACL, UUID, or GUUID attributes directly.
2400 *
2401 * XXX: We should enummerate the possible errno values here, and where
2402 * in the code they originated.
2403 */
2404 int
2405 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2406 {
2407 int error, is_perm_change=0;
2408
2409 /*
2410 * Make sure the filesystem is mounted R/W.
2411 * If not, return an error.
2412 */
2413 if (vfs_isrdonly(vp->v_mount)) {
2414 error = EROFS;
2415 goto out;
2416 }
2417 #if NAMEDSTREAMS
2418 /* For streams, va_data_size is the only setable attribute. */
2419 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2420 error = EPERM;
2421 goto out;
2422 }
2423 #endif
2424
2425 /*
2426 * If ownership is being ignored on this volume, we silently discard
2427 * ownership changes.
2428 */
2429 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2430 VATTR_CLEAR_ACTIVE(vap, va_uid);
2431 VATTR_CLEAR_ACTIVE(vap, va_gid);
2432 }
2433
2434 if ( VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)
2435 || VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_acl)) {
2436 is_perm_change = 1;
2437 }
2438
2439 /*
2440 * Make sure that extended security is enabled if we're going to try
2441 * to set any.
2442 */
2443 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2444 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2445 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2446 error = ENOTSUP;
2447 goto out;
2448 }
2449
2450 error = VNOP_SETATTR(vp, vap, ctx);
2451
2452 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2453 error = vnode_setattr_fallback(vp, vap, ctx);
2454
2455 #if CONFIG_FSE
2456 // only send a stat_changed event if this is more than
2457 // just an access or backup time update
2458 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time)) && (vap->va_active != VNODE_ATTR_BIT(va_backup_time))) {
2459 if (is_perm_change) {
2460 if (need_fsevent(FSE_CHOWN, vp)) {
2461 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2462 }
2463 } else if(need_fsevent(FSE_STAT_CHANGED, vp)) {
2464 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2465 }
2466 }
2467 #endif
2468
2469 out:
2470 return(error);
2471 }
2472
2473 /*
2474 * Fallback for setting the attributes on a vnode in a vnode context. This
2475 * Function will attempt to store ACL, UUID, and GUID information utilizing
2476 * a read/modify/write operation against an EA used as a backing store for
2477 * the object.
2478 *
2479 * Parameters: vp The vnode whose attributes to set.
2480 * vap A pointer to the attributes to set.
2481 * ctx The vnode context in which the
2482 * operation is to be attempted.
2483 *
2484 * Returns: 0 Success
2485 * !0 errno value
2486 *
2487 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2488 * as are the fsec and lfsec, if they are used.
2489 *
2490 * The contents of the data area pointed to by 'vap' may be
2491 * modified to indicate that the attribute is supported for
2492 * any given requested attribute.
2493 *
2494 * XXX: We should enummerate the possible errno values here, and where
2495 * in the code they originated.
2496 */
2497 int
2498 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2499 {
2500 kauth_filesec_t fsec;
2501 kauth_acl_t facl;
2502 struct kauth_filesec lfsec;
2503 int error;
2504
2505 error = 0;
2506
2507 /*
2508 * Extended security fallback via extended attributes.
2509 *
2510 * Note that we do not free the filesec; the caller is expected to
2511 * do this.
2512 */
2513 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2514 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2515 VATTR_NOT_RETURNED(vap, va_guuid)) {
2516 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2517
2518 /*
2519 * Fail for file types that we don't permit extended security
2520 * to be set on.
2521 */
2522 if (!XATTR_VNODE_SUPPORTED(vp)) {
2523 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2524 error = EINVAL;
2525 goto out;
2526 }
2527
2528 /*
2529 * If we don't have all the extended security items, we need
2530 * to fetch the existing data to perform a read-modify-write
2531 * operation.
2532 */
2533 fsec = NULL;
2534 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2535 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2536 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2537 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2538 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2539 goto out;
2540 }
2541 }
2542 /* if we didn't get a filesec, use our local one */
2543 if (fsec == NULL) {
2544 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2545 fsec = &lfsec;
2546 } else {
2547 KAUTH_DEBUG("SETATTR - updating existing filesec");
2548 }
2549 /* find the ACL */
2550 facl = &fsec->fsec_acl;
2551
2552 /* if we're using the local filesec, we need to initialise it */
2553 if (fsec == &lfsec) {
2554 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2555 fsec->fsec_owner = kauth_null_guid;
2556 fsec->fsec_group = kauth_null_guid;
2557 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2558 facl->acl_flags = 0;
2559 }
2560
2561 /*
2562 * Update with the supplied attributes.
2563 */
2564 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2565 KAUTH_DEBUG("SETATTR - updating owner UUID");
2566 fsec->fsec_owner = vap->va_uuuid;
2567 VATTR_SET_SUPPORTED(vap, va_uuuid);
2568 }
2569 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2570 KAUTH_DEBUG("SETATTR - updating group UUID");
2571 fsec->fsec_group = vap->va_guuid;
2572 VATTR_SET_SUPPORTED(vap, va_guuid);
2573 }
2574 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2575 if (vap->va_acl == NULL) {
2576 KAUTH_DEBUG("SETATTR - removing ACL");
2577 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2578 } else {
2579 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2580 facl = vap->va_acl;
2581 }
2582 VATTR_SET_SUPPORTED(vap, va_acl);
2583 }
2584
2585 /*
2586 * If the filesec data is all invalid, we can just remove
2587 * the EA completely.
2588 */
2589 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2590 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2591 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2592 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2593 /* no attribute is ok, nothing to delete */
2594 if (error == ENOATTR)
2595 error = 0;
2596 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2597 } else {
2598 /* write the EA */
2599 error = vnode_set_filesec(vp, fsec, facl, ctx);
2600 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2601 }
2602
2603 /* if we fetched a filesec, dispose of the buffer */
2604 if (fsec != &lfsec)
2605 kauth_filesec_free(fsec);
2606 }
2607 out:
2608
2609 return(error);
2610 }
2611
2612 /*
2613 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2614 * event on a vnode.
2615 */
2616 int
2617 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
2618 {
2619 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2620 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
2621 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
2622 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
2623 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
2624 uint32_t knote_events = (events & knote_mask);
2625
2626 /* Permissions are not explicitly part of the kqueue model */
2627 if (events & VNODE_EVENT_PERMS) {
2628 knote_events |= NOTE_ATTRIB;
2629 }
2630
2631 /* Directory contents information just becomes NOTE_WRITE */
2632 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
2633 knote_events |= NOTE_WRITE;
2634 }
2635
2636 if (knote_events) {
2637 lock_vnode_and_post(vp, knote_events);
2638 #if CONFIG_FSE
2639 if (vap != NULL) {
2640 create_fsevent_from_kevent(vp, events, vap);
2641 }
2642 #else
2643 (void)vap;
2644 #endif
2645 }
2646
2647 return 0;
2648 }
2649
2650
2651
2652 int
2653 vnode_isdyldsharedcache(vnode_t vp)
2654 {
2655 return ((vp->v_flag & VSHARED_DYLD) ? 1 : 0);
2656 }
2657
2658
2659 /*
2660 * For a filesystem that isn't tracking its own vnode watchers:
2661 * check whether a vnode is being monitored.
2662 */
2663 int
2664 vnode_ismonitored(vnode_t vp) {
2665 return (vp->v_knotes.slh_first != NULL);
2666 }
2667
2668 /*
2669 * Initialize a struct vnode_attr and activate the attributes required
2670 * by the vnode_notify() call.
2671 */
2672 int
2673 vfs_get_notify_attributes(struct vnode_attr *vap)
2674 {
2675 VATTR_INIT(vap);
2676 vap->va_active = VNODE_NOTIFY_ATTRS;
2677 return 0;
2678 }
2679
2680 #if CONFIG_TRIGGERS
2681 int
2682 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
2683 {
2684 int error;
2685 mount_t mp;
2686
2687 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
2688 if (mp == NULL) {
2689 return ENOENT;
2690 }
2691
2692 error = vfs_busy(mp, LK_NOWAIT);
2693 mount_iterdrop(mp);
2694
2695 if (error != 0) {
2696 return ENOENT;
2697 }
2698
2699 mount_lock(mp);
2700 if (mp->mnt_triggercallback != NULL) {
2701 error = EBUSY;
2702 mount_unlock(mp);
2703 goto out;
2704 }
2705
2706 mp->mnt_triggercallback = vtc;
2707 mp->mnt_triggerdata = data;
2708 mount_unlock(mp);
2709
2710 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
2711
2712 out:
2713 vfs_unbusy(mp);
2714 return 0;
2715 }
2716 #endif /* CONFIG_TRIGGERS */
2717
2718 /*
2719 * Definition of vnode operations.
2720 */
2721
2722 #if 0
2723 /*
2724 *#
2725 *#% lookup dvp L ? ?
2726 *#% lookup vpp - L -
2727 */
2728 struct vnop_lookup_args {
2729 struct vnodeop_desc *a_desc;
2730 vnode_t a_dvp;
2731 vnode_t *a_vpp;
2732 struct componentname *a_cnp;
2733 vfs_context_t a_context;
2734 };
2735 #endif /* 0*/
2736
2737 /*
2738 * Returns: 0 Success
2739 * lock_fsnode:ENOENT No such file or directory [only for VFS
2740 * that is not thread safe & vnode is
2741 * currently being/has been terminated]
2742 * <vfs_lookup>:ENAMETOOLONG
2743 * <vfs_lookup>:ENOENT
2744 * <vfs_lookup>:EJUSTRETURN
2745 * <vfs_lookup>:EPERM
2746 * <vfs_lookup>:EISDIR
2747 * <vfs_lookup>:ENOTDIR
2748 * <vfs_lookup>:???
2749 *
2750 * Note: The return codes from the underlying VFS's lookup routine can't
2751 * be fully enumerated here, since third party VFS authors may not
2752 * limit their error returns to the ones documented here, even
2753 * though this may result in some programs functioning incorrectly.
2754 *
2755 * The return codes documented above are those which may currently
2756 * be returned by HFS from hfs_lookup, not including additional
2757 * error code which may be propagated from underlying routines.
2758 */
2759 errno_t
2760 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
2761 {
2762 int _err;
2763 struct vnop_lookup_args a;
2764
2765 a.a_desc = &vnop_lookup_desc;
2766 a.a_dvp = dvp;
2767 a.a_vpp = vpp;
2768 a.a_cnp = cnp;
2769 a.a_context = ctx;
2770
2771 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2772 if (_err == 0 && *vpp) {
2773 DTRACE_FSINFO(lookup, vnode_t, *vpp);
2774 }
2775
2776 return (_err);
2777 }
2778
2779 #if 0
2780 struct vnop_compound_open_args {
2781 struct vnodeop_desc *a_desc;
2782 vnode_t a_dvp;
2783 vnode_t *a_vpp;
2784 struct componentname *a_cnp;
2785 int32_t a_flags;
2786 int32_t a_fmode;
2787 struct vnode_attr *a_vap;
2788 vfs_context_t a_context;
2789 void *a_reserved;
2790 };
2791 #endif /* 0 */
2792
2793 int
2794 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
2795 {
2796 int _err;
2797 struct vnop_compound_open_args a;
2798 int did_create = 0;
2799 int want_create;
2800 uint32_t tmp_status = 0;
2801 struct componentname *cnp = &ndp->ni_cnd;
2802
2803 want_create = (flags & VNOP_COMPOUND_OPEN_DO_CREATE);
2804
2805 a.a_desc = &vnop_compound_open_desc;
2806 a.a_dvp = dvp;
2807 a.a_vpp = vpp; /* Could be NULL */
2808 a.a_cnp = cnp;
2809 a.a_flags = flags;
2810 a.a_fmode = fmode;
2811 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
2812 a.a_vap = vap;
2813 a.a_context = ctx;
2814 a.a_open_create_authorizer = vn_authorize_create;
2815 a.a_open_existing_authorizer = vn_authorize_open_existing;
2816 a.a_reserved = NULL;
2817
2818 if (dvp == NULLVP) {
2819 panic("No dvp?");
2820 }
2821 if (want_create && !vap) {
2822 panic("Want create, but no vap?");
2823 }
2824 if (!want_create && vap) {
2825 panic("Don't want create, but have a vap?");
2826 }
2827
2828 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
2829 if (want_create) {
2830 if (_err == 0 && *vpp) {
2831 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
2832 } else {
2833 DTRACE_FSINFO(compound_open, vnode_t, dvp);
2834 }
2835 } else {
2836 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
2837 }
2838
2839 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
2840
2841 if (did_create && !want_create) {
2842 panic("Filesystem did a create, even though none was requested?");
2843 }
2844
2845 if (did_create) {
2846 #if CONFIG_APPLEDOUBLE
2847 if (!NATIVE_XATTR(dvp)) {
2848 /*
2849 * Remove stale Apple Double file (if any).
2850 */
2851 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
2852 }
2853 #endif /* CONFIG_APPLEDOUBLE */
2854 /* On create, provide kqueue notification */
2855 post_event_if_success(dvp, _err, NOTE_WRITE);
2856 }
2857
2858 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
2859 #if 0 /* FSEvents... */
2860 if (*vpp && _err && _err != EKEEPLOOKING) {
2861 vnode_put(*vpp);
2862 *vpp = NULLVP;
2863 }
2864 #endif /* 0 */
2865
2866 return (_err);
2867
2868 }
2869
2870 #if 0
2871 struct vnop_create_args {
2872 struct vnodeop_desc *a_desc;
2873 vnode_t a_dvp;
2874 vnode_t *a_vpp;
2875 struct componentname *a_cnp;
2876 struct vnode_attr *a_vap;
2877 vfs_context_t a_context;
2878 };
2879 #endif /* 0*/
2880 errno_t
2881 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
2882 {
2883 int _err;
2884 struct vnop_create_args a;
2885
2886 a.a_desc = &vnop_create_desc;
2887 a.a_dvp = dvp;
2888 a.a_vpp = vpp;
2889 a.a_cnp = cnp;
2890 a.a_vap = vap;
2891 a.a_context = ctx;
2892
2893 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2894 if (_err == 0 && *vpp) {
2895 DTRACE_FSINFO(create, vnode_t, *vpp);
2896 }
2897
2898 #if CONFIG_APPLEDOUBLE
2899 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2900 /*
2901 * Remove stale Apple Double file (if any).
2902 */
2903 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
2904 }
2905 #endif /* CONFIG_APPLEDOUBLE */
2906
2907 post_event_if_success(dvp, _err, NOTE_WRITE);
2908
2909 return (_err);
2910 }
2911
2912 #if 0
2913 /*
2914 *#
2915 *#% whiteout dvp L L L
2916 *#% whiteout cnp - - -
2917 *#% whiteout flag - - -
2918 *#
2919 */
2920 struct vnop_whiteout_args {
2921 struct vnodeop_desc *a_desc;
2922 vnode_t a_dvp;
2923 struct componentname *a_cnp;
2924 int a_flags;
2925 vfs_context_t a_context;
2926 };
2927 #endif /* 0*/
2928 errno_t
2929 VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t ctx)
2930 {
2931 int _err;
2932 struct vnop_whiteout_args a;
2933
2934 a.a_desc = &vnop_whiteout_desc;
2935 a.a_dvp = dvp;
2936 a.a_cnp = cnp;
2937 a.a_flags = flags;
2938 a.a_context = ctx;
2939
2940 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
2941 DTRACE_FSINFO(whiteout, vnode_t, dvp);
2942
2943 post_event_if_success(dvp, _err, NOTE_WRITE);
2944
2945 return (_err);
2946 }
2947
2948 #if 0
2949 /*
2950 *#
2951 *#% mknod dvp L U U
2952 *#% mknod vpp - X -
2953 *#
2954 */
2955 struct vnop_mknod_args {
2956 struct vnodeop_desc *a_desc;
2957 vnode_t a_dvp;
2958 vnode_t *a_vpp;
2959 struct componentname *a_cnp;
2960 struct vnode_attr *a_vap;
2961 vfs_context_t a_context;
2962 };
2963 #endif /* 0*/
2964 errno_t
2965 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
2966 {
2967
2968 int _err;
2969 struct vnop_mknod_args a;
2970
2971 a.a_desc = &vnop_mknod_desc;
2972 a.a_dvp = dvp;
2973 a.a_vpp = vpp;
2974 a.a_cnp = cnp;
2975 a.a_vap = vap;
2976 a.a_context = ctx;
2977
2978 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
2979 if (_err == 0 && *vpp) {
2980 DTRACE_FSINFO(mknod, vnode_t, *vpp);
2981 }
2982
2983 post_event_if_success(dvp, _err, NOTE_WRITE);
2984
2985 return (_err);
2986 }
2987
2988 #if 0
2989 /*
2990 *#
2991 *#% open vp L L L
2992 *#
2993 */
2994 struct vnop_open_args {
2995 struct vnodeop_desc *a_desc;
2996 vnode_t a_vp;
2997 int a_mode;
2998 vfs_context_t a_context;
2999 };
3000 #endif /* 0*/
3001 errno_t
3002 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3003 {
3004 int _err;
3005 struct vnop_open_args a;
3006
3007 if (ctx == NULL) {
3008 ctx = vfs_context_current();
3009 }
3010 a.a_desc = &vnop_open_desc;
3011 a.a_vp = vp;
3012 a.a_mode = mode;
3013 a.a_context = ctx;
3014
3015 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3016 DTRACE_FSINFO(open, vnode_t, vp);
3017
3018 return (_err);
3019 }
3020
3021 #if 0
3022 /*
3023 *#
3024 *#% close vp U U U
3025 *#
3026 */
3027 struct vnop_close_args {
3028 struct vnodeop_desc *a_desc;
3029 vnode_t a_vp;
3030 int a_fflag;
3031 vfs_context_t a_context;
3032 };
3033 #endif /* 0*/
3034 errno_t
3035 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3036 {
3037 int _err;
3038 struct vnop_close_args a;
3039
3040 if (ctx == NULL) {
3041 ctx = vfs_context_current();
3042 }
3043 a.a_desc = &vnop_close_desc;
3044 a.a_vp = vp;
3045 a.a_fflag = fflag;
3046 a.a_context = ctx;
3047
3048 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3049 DTRACE_FSINFO(close, vnode_t, vp);
3050
3051 return (_err);
3052 }
3053
3054 #if 0
3055 /*
3056 *#
3057 *#% access vp L L L
3058 *#
3059 */
3060 struct vnop_access_args {
3061 struct vnodeop_desc *a_desc;
3062 vnode_t a_vp;
3063 int a_action;
3064 vfs_context_t a_context;
3065 };
3066 #endif /* 0*/
3067 errno_t
3068 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3069 {
3070 int _err;
3071 struct vnop_access_args a;
3072
3073 if (ctx == NULL) {
3074 ctx = vfs_context_current();
3075 }
3076 a.a_desc = &vnop_access_desc;
3077 a.a_vp = vp;
3078 a.a_action = action;
3079 a.a_context = ctx;
3080
3081 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3082 DTRACE_FSINFO(access, vnode_t, vp);
3083
3084 return (_err);
3085 }
3086
3087 #if 0
3088 /*
3089 *#
3090 *#% getattr vp = = =
3091 *#
3092 */
3093 struct vnop_getattr_args {
3094 struct vnodeop_desc *a_desc;
3095 vnode_t a_vp;
3096 struct vnode_attr *a_vap;
3097 vfs_context_t a_context;
3098 };
3099 #endif /* 0*/
3100 errno_t
3101 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3102 {
3103 int _err;
3104 struct vnop_getattr_args a;
3105
3106 a.a_desc = &vnop_getattr_desc;
3107 a.a_vp = vp;
3108 a.a_vap = vap;
3109 a.a_context = ctx;
3110
3111 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3112 DTRACE_FSINFO(getattr, vnode_t, vp);
3113
3114 return (_err);
3115 }
3116
3117 #if 0
3118 /*
3119 *#
3120 *#% setattr vp L L L
3121 *#
3122 */
3123 struct vnop_setattr_args {
3124 struct vnodeop_desc *a_desc;
3125 vnode_t a_vp;
3126 struct vnode_attr *a_vap;
3127 vfs_context_t a_context;
3128 };
3129 #endif /* 0*/
3130 errno_t
3131 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3132 {
3133 int _err;
3134 struct vnop_setattr_args a;
3135
3136 a.a_desc = &vnop_setattr_desc;
3137 a.a_vp = vp;
3138 a.a_vap = vap;
3139 a.a_context = ctx;
3140
3141 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3142 DTRACE_FSINFO(setattr, vnode_t, vp);
3143
3144 #if CONFIG_APPLEDOUBLE
3145 /*
3146 * Shadow uid/gid/mod change to extended attribute file.
3147 */
3148 if (_err == 0 && !NATIVE_XATTR(vp)) {
3149 struct vnode_attr va;
3150 int change = 0;
3151
3152 VATTR_INIT(&va);
3153 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3154 VATTR_SET(&va, va_uid, vap->va_uid);
3155 change = 1;
3156 }
3157 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3158 VATTR_SET(&va, va_gid, vap->va_gid);
3159 change = 1;
3160 }
3161 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3162 VATTR_SET(&va, va_mode, vap->va_mode);
3163 change = 1;
3164 }
3165 if (change) {
3166 vnode_t dvp;
3167 const char *vname;
3168
3169 dvp = vnode_getparent(vp);
3170 vname = vnode_getname(vp);
3171
3172 xattrfile_setattr(dvp, vname, &va, ctx);
3173 if (dvp != NULLVP)
3174 vnode_put(dvp);
3175 if (vname != NULL)
3176 vnode_putname(vname);
3177 }
3178 }
3179 #endif /* CONFIG_APPLEDOUBLE */
3180
3181 /*
3182 * If we have changed any of the things about the file that are likely
3183 * to result in changes to authorization results, blow the vnode auth
3184 * cache
3185 */
3186 if (_err == 0 && (
3187 VATTR_IS_SUPPORTED(vap, va_mode) ||
3188 VATTR_IS_SUPPORTED(vap, va_uid) ||
3189 VATTR_IS_SUPPORTED(vap, va_gid) ||
3190 VATTR_IS_SUPPORTED(vap, va_flags) ||
3191 VATTR_IS_SUPPORTED(vap, va_acl) ||
3192 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3193 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3194 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3195
3196 #if NAMEDSTREAMS
3197 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3198 vnode_t svp;
3199 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3200 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3201 vnode_put(svp);
3202 }
3203 }
3204 #endif /* NAMEDSTREAMS */
3205 }
3206
3207
3208 post_event_if_success(vp, _err, NOTE_ATTRIB);
3209
3210 return (_err);
3211 }
3212
3213
3214 #if 0
3215 /*
3216 *#
3217 *#% read vp L L L
3218 *#
3219 */
3220 struct vnop_read_args {
3221 struct vnodeop_desc *a_desc;
3222 vnode_t a_vp;
3223 struct uio *a_uio;
3224 int a_ioflag;
3225 vfs_context_t a_context;
3226 };
3227 #endif /* 0*/
3228 errno_t
3229 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3230 {
3231 int _err;
3232 struct vnop_read_args a;
3233 #if CONFIG_DTRACE
3234 user_ssize_t resid = uio_resid(uio);
3235 #endif
3236
3237 if (ctx == NULL) {
3238 ctx = vfs_context_current();
3239 }
3240
3241 a.a_desc = &vnop_read_desc;
3242 a.a_vp = vp;
3243 a.a_uio = uio;
3244 a.a_ioflag = ioflag;
3245 a.a_context = ctx;
3246
3247 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3248 DTRACE_FSINFO_IO(read,
3249 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3250
3251 return (_err);
3252 }
3253
3254
3255 #if 0
3256 /*
3257 *#
3258 *#% write vp L L L
3259 *#
3260 */
3261 struct vnop_write_args {
3262 struct vnodeop_desc *a_desc;
3263 vnode_t a_vp;
3264 struct uio *a_uio;
3265 int a_ioflag;
3266 vfs_context_t a_context;
3267 };
3268 #endif /* 0*/
3269 errno_t
3270 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3271 {
3272 struct vnop_write_args a;
3273 int _err;
3274 #if CONFIG_DTRACE
3275 user_ssize_t resid = uio_resid(uio);
3276 #endif
3277
3278 if (ctx == NULL) {
3279 ctx = vfs_context_current();
3280 }
3281
3282 a.a_desc = &vnop_write_desc;
3283 a.a_vp = vp;
3284 a.a_uio = uio;
3285 a.a_ioflag = ioflag;
3286 a.a_context = ctx;
3287
3288 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3289 DTRACE_FSINFO_IO(write,
3290 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3291
3292 post_event_if_success(vp, _err, NOTE_WRITE);
3293
3294 return (_err);
3295 }
3296
3297
3298 #if 0
3299 /*
3300 *#
3301 *#% ioctl vp U U U
3302 *#
3303 */
3304 struct vnop_ioctl_args {
3305 struct vnodeop_desc *a_desc;
3306 vnode_t a_vp;
3307 u_long a_command;
3308 caddr_t a_data;
3309 int a_fflag;
3310 vfs_context_t a_context;
3311 };
3312 #endif /* 0*/
3313 errno_t
3314 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3315 {
3316 int _err;
3317 struct vnop_ioctl_args a;
3318
3319 if (ctx == NULL) {
3320 ctx = vfs_context_current();
3321 }
3322
3323 /*
3324 * This check should probably have been put in the TTY code instead...
3325 *
3326 * We have to be careful about what we assume during startup and shutdown.
3327 * We have to be able to use the root filesystem's device vnode even when
3328 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3329 * structure. If there is no data pointer, it doesn't matter whether
3330 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE)
3331 * which passes NULL for its data pointer can therefore be used during
3332 * mount or unmount of the root filesystem.
3333 *
3334 * Depending on what root filesystems need to do during mount/unmount, we
3335 * may need to loosen this check again in the future.
3336 */
3337 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3338 if (data != NULL && !vnode_vfs64bitready(vp)) {
3339 return(ENOTTY);
3340 }
3341 }
3342
3343 a.a_desc = &vnop_ioctl_desc;
3344 a.a_vp = vp;
3345 a.a_command = command;
3346 a.a_data = data;
3347 a.a_fflag = fflag;
3348 a.a_context= ctx;
3349
3350 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3351 DTRACE_FSINFO(ioctl, vnode_t, vp);
3352
3353 return (_err);
3354 }
3355
3356
3357 #if 0
3358 /*
3359 *#
3360 *#% select vp U U U
3361 *#
3362 */
3363 struct vnop_select_args {
3364 struct vnodeop_desc *a_desc;
3365 vnode_t a_vp;
3366 int a_which;
3367 int a_fflags;
3368 void *a_wql;
3369 vfs_context_t a_context;
3370 };
3371 #endif /* 0*/
3372 errno_t
3373 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx)
3374 {
3375 int _err;
3376 struct vnop_select_args a;
3377
3378 if (ctx == NULL) {
3379 ctx = vfs_context_current();
3380 }
3381 a.a_desc = &vnop_select_desc;
3382 a.a_vp = vp;
3383 a.a_which = which;
3384 a.a_fflags = fflags;
3385 a.a_context = ctx;
3386 a.a_wql = wql;
3387
3388 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3389 DTRACE_FSINFO(select, vnode_t, vp);
3390
3391 return (_err);
3392 }
3393
3394
3395 #if 0
3396 /*
3397 *#
3398 *#% exchange fvp L L L
3399 *#% exchange tvp L L L
3400 *#
3401 */
3402 struct vnop_exchange_args {
3403 struct vnodeop_desc *a_desc;
3404 vnode_t a_fvp;
3405 vnode_t a_tvp;
3406 int a_options;
3407 vfs_context_t a_context;
3408 };
3409 #endif /* 0*/
3410 errno_t
3411 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3412 {
3413 int _err;
3414 struct vnop_exchange_args a;
3415
3416 a.a_desc = &vnop_exchange_desc;
3417 a.a_fvp = fvp;
3418 a.a_tvp = tvp;
3419 a.a_options = options;
3420 a.a_context = ctx;
3421
3422 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3423 DTRACE_FSINFO(exchange, vnode_t, fvp);
3424
3425 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3426 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3427 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3428
3429 return (_err);
3430 }
3431
3432
3433 #if 0
3434 /*
3435 *#
3436 *#% revoke vp U U U
3437 *#
3438 */
3439 struct vnop_revoke_args {
3440 struct vnodeop_desc *a_desc;
3441 vnode_t a_vp;
3442 int a_flags;
3443 vfs_context_t a_context;
3444 };
3445 #endif /* 0*/
3446 errno_t
3447 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3448 {
3449 struct vnop_revoke_args a;
3450 int _err;
3451
3452 a.a_desc = &vnop_revoke_desc;
3453 a.a_vp = vp;
3454 a.a_flags = flags;
3455 a.a_context = ctx;
3456
3457 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3458 DTRACE_FSINFO(revoke, vnode_t, vp);
3459
3460 return (_err);
3461 }
3462
3463
3464 #if 0
3465 /*
3466 *#
3467 *# mmap - vp U U U
3468 *#
3469 */
3470 struct vnop_mmap_args {
3471 struct vnodeop_desc *a_desc;
3472 vnode_t a_vp;
3473 int a_fflags;
3474 vfs_context_t a_context;
3475 };
3476 #endif /* 0*/
3477 errno_t
3478 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3479 {
3480 int _err;
3481 struct vnop_mmap_args a;
3482
3483 a.a_desc = &vnop_mmap_desc;
3484 a.a_vp = vp;
3485 a.a_fflags = fflags;
3486 a.a_context = ctx;
3487
3488 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3489 DTRACE_FSINFO(mmap, vnode_t, vp);
3490
3491 return (_err);
3492 }
3493
3494
3495 #if 0
3496 /*
3497 *#
3498 *# mnomap - vp U U U
3499 *#
3500 */
3501 struct vnop_mnomap_args {
3502 struct vnodeop_desc *a_desc;
3503 vnode_t a_vp;
3504 vfs_context_t a_context;
3505 };
3506 #endif /* 0*/
3507 errno_t
3508 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
3509 {
3510 int _err;
3511 struct vnop_mnomap_args a;
3512
3513 a.a_desc = &vnop_mnomap_desc;
3514 a.a_vp = vp;
3515 a.a_context = ctx;
3516
3517 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3518 DTRACE_FSINFO(mnomap, vnode_t, vp);
3519
3520 return (_err);
3521 }
3522
3523
3524 #if 0
3525 /*
3526 *#
3527 *#% fsync vp L L L
3528 *#
3529 */
3530 struct vnop_fsync_args {
3531 struct vnodeop_desc *a_desc;
3532 vnode_t a_vp;
3533 int a_waitfor;
3534 vfs_context_t a_context;
3535 };
3536 #endif /* 0*/
3537 errno_t
3538 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
3539 {
3540 struct vnop_fsync_args a;
3541 int _err;
3542
3543 a.a_desc = &vnop_fsync_desc;
3544 a.a_vp = vp;
3545 a.a_waitfor = waitfor;
3546 a.a_context = ctx;
3547
3548 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3549 DTRACE_FSINFO(fsync, vnode_t, vp);
3550
3551 return (_err);
3552 }
3553
3554
3555 #if 0
3556 /*
3557 *#
3558 *#% remove dvp L U U
3559 *#% remove vp L U U
3560 *#
3561 */
3562 struct vnop_remove_args {
3563 struct vnodeop_desc *a_desc;
3564 vnode_t a_dvp;
3565 vnode_t a_vp;
3566 struct componentname *a_cnp;
3567 int a_flags;
3568 vfs_context_t a_context;
3569 };
3570 #endif /* 0*/
3571 errno_t
3572 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
3573 {
3574 int _err;
3575 struct vnop_remove_args a;
3576
3577 a.a_desc = &vnop_remove_desc;
3578 a.a_dvp = dvp;
3579 a.a_vp = vp;
3580 a.a_cnp = cnp;
3581 a.a_flags = flags;
3582 a.a_context = ctx;
3583
3584 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3585 DTRACE_FSINFO(remove, vnode_t, vp);
3586
3587 if (_err == 0) {
3588 vnode_setneedinactive(vp);
3589 #if CONFIG_APPLEDOUBLE
3590 if ( !(NATIVE_XATTR(dvp)) ) {
3591 /*
3592 * Remove any associated extended attribute file (._ AppleDouble file).
3593 */
3594 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
3595 }
3596 #endif /* CONFIG_APPLEDOUBLE */
3597 }
3598
3599 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
3600 post_event_if_success(dvp, _err, NOTE_WRITE);
3601
3602 return (_err);
3603 }
3604
3605 int
3606 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
3607 {
3608 int _err;
3609 struct vnop_compound_remove_args a;
3610 int no_vp = (*vpp == NULLVP);
3611
3612 a.a_desc = &vnop_compound_remove_desc;
3613 a.a_dvp = dvp;
3614 a.a_vpp = vpp;
3615 a.a_cnp = &ndp->ni_cnd;
3616 a.a_flags = flags;
3617 a.a_vap = vap;
3618 a.a_context = ctx;
3619 a.a_remove_authorizer = vn_authorize_unlink;
3620
3621 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
3622 if (_err == 0 && *vpp) {
3623 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
3624 } else {
3625 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
3626 }
3627 if (_err == 0) {
3628 vnode_setneedinactive(*vpp);
3629 #if CONFIG_APPLEDOUBLE
3630 if ( !(NATIVE_XATTR(dvp)) ) {
3631 /*
3632 * Remove any associated extended attribute file (._ AppleDouble file).
3633 */
3634 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
3635 }
3636 #endif /* CONFIG_APPLEDOUBLE */
3637 }
3638
3639 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
3640 post_event_if_success(dvp, _err, NOTE_WRITE);
3641
3642 if (no_vp) {
3643 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
3644 if (*vpp && _err && _err != EKEEPLOOKING) {
3645 vnode_put(*vpp);
3646 *vpp = NULLVP;
3647 }
3648 }
3649
3650 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
3651
3652 return (_err);
3653 }
3654
3655 #if 0
3656 /*
3657 *#
3658 *#% link vp U U U
3659 *#% link tdvp L U U
3660 *#
3661 */
3662 struct vnop_link_args {
3663 struct vnodeop_desc *a_desc;
3664 vnode_t a_vp;
3665 vnode_t a_tdvp;
3666 struct componentname *a_cnp;
3667 vfs_context_t a_context;
3668 };
3669 #endif /* 0*/
3670 errno_t
3671 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
3672 {
3673 int _err;
3674 struct vnop_link_args a;
3675
3676 #if CONFIG_APPLEDOUBLE
3677 /*
3678 * For file systems with non-native extended attributes,
3679 * disallow linking to an existing "._" Apple Double file.
3680 */
3681 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3682 const char *vname;
3683
3684 vname = vnode_getname(vp);
3685 if (vname != NULL) {
3686 _err = 0;
3687 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3688 _err = EPERM;
3689 }
3690 vnode_putname(vname);
3691 if (_err)
3692 return (_err);
3693 }
3694 }
3695 #endif /* CONFIG_APPLEDOUBLE */
3696
3697 a.a_desc = &vnop_link_desc;
3698 a.a_vp = vp;
3699 a.a_tdvp = tdvp;
3700 a.a_cnp = cnp;
3701 a.a_context = ctx;
3702
3703 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3704 DTRACE_FSINFO(link, vnode_t, vp);
3705
3706 post_event_if_success(vp, _err, NOTE_LINK);
3707 post_event_if_success(tdvp, _err, NOTE_WRITE);
3708
3709 return (_err);
3710 }
3711
3712 errno_t
3713 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
3714 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
3715 uint32_t flags, vfs_context_t ctx)
3716 {
3717 int _err;
3718 struct nameidata *fromnd = NULL;
3719 struct nameidata *tond = NULL;
3720 #if CONFIG_APPLEDOUBLE
3721 vnode_t src_attr_vp = NULLVP;
3722 vnode_t dst_attr_vp = NULLVP;
3723 char smallname1[48];
3724 char smallname2[48];
3725 char *xfromname = NULL;
3726 char *xtoname = NULL;
3727 #endif /* CONFIG_APPLEDOUBLE */
3728 int batched;
3729
3730 batched = vnode_compound_rename_available(fdvp);
3731
3732 if (!batched) {
3733 if (*fvpp == NULLVP)
3734 panic("Not batched, and no fvp?");
3735 }
3736
3737 #if CONFIG_APPLEDOUBLE
3738 /*
3739 * We need to preflight any potential AppleDouble file for the source file
3740 * before doing the rename operation, since we could potentially be doing
3741 * this operation on a network filesystem, and would end up duplicating
3742 * the work. Also, save the source and destination names. Skip it if the
3743 * source has a "._" prefix.
3744 */
3745
3746 if (!NATIVE_XATTR(fdvp) &&
3747 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3748 size_t len;
3749 int error;
3750
3751 /* Get source attribute file name. */
3752 len = fcnp->cn_namelen + 3;
3753 if (len > sizeof(smallname1)) {
3754 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3755 } else {
3756 xfromname = &smallname1[0];
3757 }
3758 strlcpy(xfromname, "._", min(sizeof smallname1, len));
3759 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3760 xfromname[len-1] = '\0';
3761
3762 /* Get destination attribute file name. */
3763 len = tcnp->cn_namelen + 3;
3764 if (len > sizeof(smallname2)) {
3765 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3766 } else {
3767 xtoname = &smallname2[0];
3768 }
3769 strlcpy(xtoname, "._", min(sizeof smallname2, len));
3770 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3771 xtoname[len-1] = '\0';
3772
3773 /*
3774 * Look up source attribute file, keep reference on it if exists.
3775 * Note that we do the namei with the nameiop of RENAME, which is different than
3776 * in the rename syscall. It's OK if the source file does not exist, since this
3777 * is only for AppleDouble files.
3778 */
3779 if (xfromname != NULL) {
3780 MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK);
3781 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
3782 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
3783 fromnd->ni_dvp = fdvp;
3784 error = namei(fromnd);
3785
3786 /*
3787 * If there was an error looking up source attribute file,
3788 * we'll behave as if it didn't exist.
3789 */
3790
3791 if (error == 0) {
3792 if (fromnd->ni_vp) {
3793 /* src_attr_vp indicates need to call vnode_put / nameidone later */
3794 src_attr_vp = fromnd->ni_vp;
3795
3796 if (fromnd->ni_vp->v_type != VREG) {
3797 src_attr_vp = NULLVP;
3798 vnode_put(fromnd->ni_vp);
3799 }
3800 }
3801 /*
3802 * Either we got an invalid vnode type (not a regular file) or the namei lookup
3803 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
3804 * have a vnode here, so we drop our namei buffer for the source attribute file
3805 */
3806 if (src_attr_vp == NULLVP) {
3807 nameidone(fromnd);
3808 }
3809 }
3810 }
3811 }
3812 #endif /* CONFIG_APPLEDOUBLE */
3813
3814 if (batched) {
3815 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
3816 if (_err != 0) {
3817 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
3818 }
3819 } else {
3820 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
3821 }
3822 #if CONFIG_MACF
3823 if (_err == 0) {
3824 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
3825 }
3826 #endif
3827
3828 #if CONFIG_APPLEDOUBLE
3829 /*
3830 * Rename any associated extended attribute file (._ AppleDouble file).
3831 */
3832 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3833 int error = 0;
3834
3835 /*
3836 * Get destination attribute file vnode.
3837 * Note that tdvp already has an iocount reference. Make sure to check that we
3838 * get a valid vnode from namei.
3839 */
3840 MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
3841 NDINIT(tond, RENAME, OP_RENAME,
3842 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
3843 CAST_USER_ADDR_T(xtoname), ctx);
3844 tond->ni_dvp = tdvp;
3845 error = namei(tond);
3846
3847 if (error)
3848 goto ad_error;
3849
3850 if (tond->ni_vp) {
3851 dst_attr_vp = tond->ni_vp;
3852 }
3853
3854 if (src_attr_vp) {
3855 const char *old_name = src_attr_vp->v_name;
3856 vnode_t old_parent = src_attr_vp->v_parent;
3857
3858 if (batched) {
3859 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
3860 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
3861 0, ctx);
3862 } else {
3863 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
3864 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
3865 }
3866
3867 if (error == 0 && old_name == src_attr_vp->v_name &&
3868 old_parent == src_attr_vp->v_parent) {
3869 int update_flags = VNODE_UPDATE_NAME;
3870
3871 if (fdvp != tdvp)
3872 update_flags |= VNODE_UPDATE_PARENT;
3873
3874 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
3875 vnode_update_identity(src_attr_vp, tdvp,
3876 tond->ni_cnd.cn_nameptr,
3877 tond->ni_cnd.cn_namelen,
3878 tond->ni_cnd.cn_hash,
3879 update_flags);
3880 }
3881 }
3882
3883 /* kevent notifications for moving resource files
3884 * _err is zero if we're here, so no need to notify directories, code
3885 * below will do that. only need to post the rename on the source and
3886 * possibly a delete on the dest
3887 */
3888 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
3889 if (dst_attr_vp) {
3890 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
3891 }
3892
3893 } else if (dst_attr_vp) {
3894 /*
3895 * Just delete destination attribute file vnode if it exists, since
3896 * we didn't have a source attribute file.
3897 * Note that tdvp already has an iocount reference.
3898 */
3899
3900 struct vnop_remove_args args;
3901
3902 args.a_desc = &vnop_remove_desc;
3903 args.a_dvp = tdvp;
3904 args.a_vp = dst_attr_vp;
3905 args.a_cnp = &tond->ni_cnd;
3906 args.a_context = ctx;
3907
3908 if (error == 0) {
3909 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3910
3911 if (error == 0)
3912 vnode_setneedinactive(dst_attr_vp);
3913 }
3914
3915 /* kevent notification for deleting the destination's attribute file
3916 * if it existed. Only need to post the delete on the destination, since
3917 * the code below will handle the directories.
3918 */
3919 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
3920 }
3921 }
3922 ad_error:
3923 if (src_attr_vp) {
3924 vnode_put(src_attr_vp);
3925 nameidone(fromnd);
3926 }
3927 if (dst_attr_vp) {
3928 vnode_put(dst_attr_vp);
3929 nameidone(tond);
3930 }
3931 if (xfromname && xfromname != &smallname1[0]) {
3932 FREE(xfromname, M_TEMP);
3933 }
3934 if (xtoname && xtoname != &smallname2[0]) {
3935 FREE(xtoname, M_TEMP);
3936 }
3937 #endif /* CONFIG_APPLEDOUBLE */
3938 if (fromnd) {
3939 FREE(fromnd, M_TEMP);
3940 }
3941 if (tond) {
3942 FREE(tond, M_TEMP);
3943 }
3944 return _err;
3945 }
3946
3947
3948 #if 0
3949 /*
3950 *#
3951 *#% rename fdvp U U U
3952 *#% rename fvp U U U
3953 *#% rename tdvp L U U
3954 *#% rename tvp X U U
3955 *#
3956 */
3957 struct vnop_rename_args {
3958 struct vnodeop_desc *a_desc;
3959 vnode_t a_fdvp;
3960 vnode_t a_fvp;
3961 struct componentname *a_fcnp;
3962 vnode_t a_tdvp;
3963 vnode_t a_tvp;
3964 struct componentname *a_tcnp;
3965 vfs_context_t a_context;
3966 };
3967 #endif /* 0*/
3968 errno_t
3969 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
3970 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
3971 vfs_context_t ctx)
3972 {
3973 int _err = 0;
3974 int events;
3975 struct vnop_rename_args a;
3976
3977 a.a_desc = &vnop_rename_desc;
3978 a.a_fdvp = fdvp;
3979 a.a_fvp = fvp;
3980 a.a_fcnp = fcnp;
3981 a.a_tdvp = tdvp;
3982 a.a_tvp = tvp;
3983 a.a_tcnp = tcnp;
3984 a.a_context = ctx;
3985
3986 /* do the rename of the main file. */
3987 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3988 DTRACE_FSINFO(rename, vnode_t, fdvp);
3989
3990 if (_err == 0) {
3991 if (tvp && tvp != fvp)
3992 vnode_setneedinactive(tvp);
3993 }
3994
3995 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
3996 if (_err == 0) {
3997 events = NOTE_WRITE;
3998 if (vnode_isdir(fvp)) {
3999 /* Link count on dir changed only if we are moving a dir and...
4000 * --Moved to new dir, not overwriting there
4001 * --Kept in same dir and DID overwrite
4002 */
4003 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4004 events |= NOTE_LINK;
4005 }
4006 }
4007
4008 lock_vnode_and_post(fdvp, events);
4009 if (fdvp != tdvp) {
4010 lock_vnode_and_post(tdvp, events);
4011 }
4012
4013 /* If you're replacing the target, post a deletion for it */
4014 if (tvp)
4015 {
4016 lock_vnode_and_post(tvp, NOTE_DELETE);
4017 }
4018
4019 lock_vnode_and_post(fvp, NOTE_RENAME);
4020 }
4021
4022 return (_err);
4023 }
4024
4025 int
4026 VNOP_COMPOUND_RENAME(
4027 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4028 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4029 uint32_t flags, vfs_context_t ctx)
4030 {
4031 int _err = 0;
4032 int events;
4033 struct vnop_compound_rename_args a;
4034 int no_fvp, no_tvp;
4035
4036 no_fvp = (*fvpp) == NULLVP;
4037 no_tvp = (*tvpp) == NULLVP;
4038
4039 a.a_desc = &vnop_compound_rename_desc;
4040
4041 a.a_fdvp = fdvp;
4042 a.a_fvpp = fvpp;
4043 a.a_fcnp = fcnp;
4044 a.a_fvap = fvap;
4045
4046 a.a_tdvp = tdvp;
4047 a.a_tvpp = tvpp;
4048 a.a_tcnp = tcnp;
4049 a.a_tvap = tvap;
4050
4051 a.a_flags = flags;
4052 a.a_context = ctx;
4053 a.a_rename_authorizer = vn_authorize_rename;
4054 a.a_reserved = NULL;
4055
4056 /* do the rename of the main file. */
4057 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4058 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4059
4060 if (_err == 0) {
4061 if (*tvpp && *tvpp != *fvpp)
4062 vnode_setneedinactive(*tvpp);
4063 }
4064
4065 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4066 if (_err == 0 && *fvpp != *tvpp) {
4067 if (!*fvpp) {
4068 panic("No fvpp after compound rename?");
4069 }
4070
4071 events = NOTE_WRITE;
4072 if (vnode_isdir(*fvpp)) {
4073 /* Link count on dir changed only if we are moving a dir and...
4074 * --Moved to new dir, not overwriting there
4075 * --Kept in same dir and DID overwrite
4076 */
4077 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4078 events |= NOTE_LINK;
4079 }
4080 }
4081
4082 lock_vnode_and_post(fdvp, events);
4083 if (fdvp != tdvp) {
4084 lock_vnode_and_post(tdvp, events);
4085 }
4086
4087 /* If you're replacing the target, post a deletion for it */
4088 if (*tvpp)
4089 {
4090 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4091 }
4092
4093 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4094 }
4095
4096 if (no_fvp) {
4097 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4098 }
4099 if (no_tvp && *tvpp != NULLVP) {
4100 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4101 }
4102
4103 if (_err && _err != EKEEPLOOKING) {
4104 if (*fvpp) {
4105 vnode_put(*fvpp);
4106 *fvpp = NULLVP;
4107 }
4108 if (*tvpp) {
4109 vnode_put(*tvpp);
4110 *tvpp = NULLVP;
4111 }
4112 }
4113
4114 return (_err);
4115 }
4116
4117 int
4118 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4119 struct vnode_attr *vap, vfs_context_t ctx)
4120 {
4121 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4122 panic("Non-CREATE nameiop in vn_mkdir()?");
4123 }
4124
4125 if (vnode_compound_mkdir_available(dvp)) {
4126 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4127 } else {
4128 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4129 }
4130 }
4131
4132 #if 0
4133 /*
4134 *#
4135 *#% mkdir dvp L U U
4136 *#% mkdir vpp - L -
4137 *#
4138 */
4139 struct vnop_mkdir_args {
4140 struct vnodeop_desc *a_desc;
4141 vnode_t a_dvp;
4142 vnode_t *a_vpp;
4143 struct componentname *a_cnp;
4144 struct vnode_attr *a_vap;
4145 vfs_context_t a_context;
4146 };
4147 #endif /* 0*/
4148 errno_t
4149 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4150 struct vnode_attr *vap, vfs_context_t ctx)
4151 {
4152 int _err;
4153 struct vnop_mkdir_args a;
4154
4155 a.a_desc = &vnop_mkdir_desc;
4156 a.a_dvp = dvp;
4157 a.a_vpp = vpp;
4158 a.a_cnp = cnp;
4159 a.a_vap = vap;
4160 a.a_context = ctx;
4161
4162 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4163 if (_err == 0 && *vpp) {
4164 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4165 }
4166 #if CONFIG_APPLEDOUBLE
4167 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4168 /*
4169 * Remove stale Apple Double file (if any).
4170 */
4171 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4172 }
4173 #endif /* CONFIG_APPLEDOUBLE */
4174
4175 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4176
4177 return (_err);
4178 }
4179
4180 int
4181 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4182 struct vnode_attr *vap, vfs_context_t ctx)
4183 {
4184 int _err;
4185 struct vnop_compound_mkdir_args a;
4186
4187 a.a_desc = &vnop_compound_mkdir_desc;
4188 a.a_dvp = dvp;
4189 a.a_vpp = vpp;
4190 a.a_cnp = &ndp->ni_cnd;
4191 a.a_vap = vap;
4192 a.a_flags = 0;
4193 a.a_context = ctx;
4194 #if 0
4195 a.a_mkdir_authorizer = vn_authorize_mkdir;
4196 #endif /* 0 */
4197 a.a_reserved = NULL;
4198
4199 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4200 if (_err == 0 && *vpp) {
4201 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4202 }
4203 #if CONFIG_APPLEDOUBLE
4204 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4205 /*
4206 * Remove stale Apple Double file (if any).
4207 */
4208 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4209 }
4210 #endif /* CONFIG_APPLEDOUBLE */
4211
4212 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4213
4214 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4215 if (*vpp && _err && _err != EKEEPLOOKING) {
4216 vnode_put(*vpp);
4217 *vpp = NULLVP;
4218 }
4219
4220 return (_err);
4221 }
4222
4223 int
4224 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4225 {
4226 if (vnode_compound_rmdir_available(dvp)) {
4227 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4228 } else {
4229 if (*vpp == NULLVP) {
4230 panic("NULL vp, but not a compound VNOP?");
4231 }
4232 if (vap != NULL) {
4233 panic("Non-NULL vap, but not a compound VNOP?");
4234 }
4235 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4236 }
4237 }
4238
4239 #if 0
4240 /*
4241 *#
4242 *#% rmdir dvp L U U
4243 *#% rmdir vp L U U
4244 *#
4245 */
4246 struct vnop_rmdir_args {
4247 struct vnodeop_desc *a_desc;
4248 vnode_t a_dvp;
4249 vnode_t a_vp;
4250 struct componentname *a_cnp;
4251 vfs_context_t a_context;
4252 };
4253
4254 #endif /* 0*/
4255 errno_t
4256 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4257 {
4258 int _err;
4259 struct vnop_rmdir_args a;
4260
4261 a.a_desc = &vnop_rmdir_desc;
4262 a.a_dvp = dvp;
4263 a.a_vp = vp;
4264 a.a_cnp = cnp;
4265 a.a_context = ctx;
4266
4267 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4268 DTRACE_FSINFO(rmdir, vnode_t, vp);
4269
4270 if (_err == 0) {
4271 vnode_setneedinactive(vp);
4272 #if CONFIG_APPLEDOUBLE
4273 if ( !(NATIVE_XATTR(dvp)) ) {
4274 /*
4275 * Remove any associated extended attribute file (._ AppleDouble file).
4276 */
4277 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4278 }
4279 #endif
4280 }
4281
4282 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4283 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4284 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4285
4286 return (_err);
4287 }
4288
4289 int
4290 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4291 struct vnode_attr *vap, vfs_context_t ctx)
4292 {
4293 int _err;
4294 struct vnop_compound_rmdir_args a;
4295 int no_vp;
4296
4297 a.a_desc = &vnop_mkdir_desc;
4298 a.a_dvp = dvp;
4299 a.a_vpp = vpp;
4300 a.a_cnp = &ndp->ni_cnd;
4301 a.a_vap = vap;
4302 a.a_flags = 0;
4303 a.a_context = ctx;
4304 a.a_rmdir_authorizer = vn_authorize_rmdir;
4305 a.a_reserved = NULL;
4306
4307 no_vp = (*vpp == NULLVP);
4308
4309 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4310 if (_err == 0 && *vpp) {
4311 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4312 }
4313 #if CONFIG_APPLEDOUBLE
4314 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4315 /*
4316 * Remove stale Apple Double file (if any).
4317 */
4318 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4319 }
4320 #endif
4321
4322 if (*vpp) {
4323 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4324 }
4325 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4326
4327 if (no_vp) {
4328 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4329
4330 #if 0 /* Removing orphaned ._ files requires a vp.... */
4331 if (*vpp && _err && _err != EKEEPLOOKING) {
4332 vnode_put(*vpp);
4333 *vpp = NULLVP;
4334 }
4335 #endif /* 0 */
4336 }
4337
4338 return (_err);
4339 }
4340
4341 #if CONFIG_APPLEDOUBLE
4342 /*
4343 * Remove a ._ AppleDouble file
4344 */
4345 #define AD_STALE_SECS (180)
4346 static void
4347 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4348 {
4349 vnode_t xvp;
4350 struct nameidata nd;
4351 char smallname[64];
4352 char *filename = NULL;
4353 size_t len;
4354
4355 if ((basename == NULL) || (basename[0] == '\0') ||
4356 (basename[0] == '.' && basename[1] == '_')) {
4357 return;
4358 }
4359 filename = &smallname[0];
4360 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4361 if (len >= sizeof(smallname)) {
4362 len++; /* snprintf result doesn't include '\0' */
4363 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4364 len = snprintf(filename, len, "._%s", basename);
4365 }
4366 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4367 CAST_USER_ADDR_T(filename), ctx);
4368 nd.ni_dvp = dvp;
4369 if (namei(&nd) != 0)
4370 goto out2;
4371
4372 xvp = nd.ni_vp;
4373 nameidone(&nd);
4374 if (xvp->v_type != VREG)
4375 goto out1;
4376
4377 /*
4378 * When creating a new object and a "._" file already
4379 * exists, check to see if its a stale "._" file.
4380 *
4381 */
4382 if (!force) {
4383 struct vnode_attr va;
4384
4385 VATTR_INIT(&va);
4386 VATTR_WANTED(&va, va_data_size);
4387 VATTR_WANTED(&va, va_modify_time);
4388 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4389 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4390 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4391 va.va_data_size != 0) {
4392 struct timeval tv;
4393
4394 microtime(&tv);
4395 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4396 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4397 force = 1; /* must be stale */
4398 }
4399 }
4400 }
4401 if (force) {
4402 int error;
4403
4404 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
4405 if (error == 0)
4406 vnode_setneedinactive(xvp);
4407
4408 post_event_if_success(xvp, error, NOTE_DELETE);
4409 post_event_if_success(dvp, error, NOTE_WRITE);
4410 }
4411
4412 out1:
4413 vnode_put(dvp);
4414 vnode_put(xvp);
4415 out2:
4416 if (filename && filename != &smallname[0]) {
4417 FREE(filename, M_TEMP);
4418 }
4419 }
4420
4421 /*
4422 * Shadow uid/gid/mod to a ._ AppleDouble file
4423 */
4424 static void
4425 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
4426 vfs_context_t ctx)
4427 {
4428 vnode_t xvp;
4429 struct nameidata nd;
4430 char smallname[64];
4431 char *filename = NULL;
4432 size_t len;
4433
4434 if ((dvp == NULLVP) ||
4435 (basename == NULL) || (basename[0] == '\0') ||
4436 (basename[0] == '.' && basename[1] == '_')) {
4437 return;
4438 }
4439 filename = &smallname[0];
4440 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4441 if (len >= sizeof(smallname)) {
4442 len++; /* snprintf result doesn't include '\0' */
4443 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4444 len = snprintf(filename, len, "._%s", basename);
4445 }
4446 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
4447 CAST_USER_ADDR_T(filename), ctx);
4448 nd.ni_dvp = dvp;
4449 if (namei(&nd) != 0)
4450 goto out2;
4451
4452 xvp = nd.ni_vp;
4453 nameidone(&nd);
4454
4455 if (xvp->v_type == VREG) {
4456 struct vnop_setattr_args a;
4457
4458 a.a_desc = &vnop_setattr_desc;
4459 a.a_vp = xvp;
4460 a.a_vap = vap;
4461 a.a_context = ctx;
4462
4463 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
4464 }
4465
4466 vnode_put(xvp);
4467 out2:
4468 if (filename && filename != &smallname[0]) {
4469 FREE(filename, M_TEMP);
4470 }
4471 }
4472 #endif /* CONFIG_APPLEDOUBLE */
4473
4474 #if 0
4475 /*
4476 *#
4477 *#% symlink dvp L U U
4478 *#% symlink vpp - U -
4479 *#
4480 */
4481 struct vnop_symlink_args {
4482 struct vnodeop_desc *a_desc;
4483 vnode_t a_dvp;
4484 vnode_t *a_vpp;
4485 struct componentname *a_cnp;
4486 struct vnode_attr *a_vap;
4487 char *a_target;
4488 vfs_context_t a_context;
4489 };
4490
4491 #endif /* 0*/
4492 errno_t
4493 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4494 struct vnode_attr *vap, char *target, vfs_context_t ctx)
4495 {
4496 int _err;
4497 struct vnop_symlink_args a;
4498
4499 a.a_desc = &vnop_symlink_desc;
4500 a.a_dvp = dvp;
4501 a.a_vpp = vpp;
4502 a.a_cnp = cnp;
4503 a.a_vap = vap;
4504 a.a_target = target;
4505 a.a_context = ctx;
4506
4507 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
4508 DTRACE_FSINFO(symlink, vnode_t, dvp);
4509 #if CONFIG_APPLEDOUBLE
4510 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4511 /*
4512 * Remove stale Apple Double file (if any). Posts its own knotes
4513 */
4514 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4515 }
4516 #endif /* CONFIG_APPLEDOUBLE */
4517
4518 post_event_if_success(dvp, _err, NOTE_WRITE);
4519
4520 return (_err);
4521 }
4522
4523 #if 0
4524 /*
4525 *#
4526 *#% readdir vp L L L
4527 *#
4528 */
4529 struct vnop_readdir_args {
4530 struct vnodeop_desc *a_desc;
4531 vnode_t a_vp;
4532 struct uio *a_uio;
4533 int a_flags;
4534 int *a_eofflag;
4535 int *a_numdirent;
4536 vfs_context_t a_context;
4537 };
4538
4539 #endif /* 0*/
4540 errno_t
4541 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
4542 int *numdirent, vfs_context_t ctx)
4543 {
4544 int _err;
4545 struct vnop_readdir_args a;
4546 #if CONFIG_DTRACE
4547 user_ssize_t resid = uio_resid(uio);
4548 #endif
4549
4550 a.a_desc = &vnop_readdir_desc;
4551 a.a_vp = vp;
4552 a.a_uio = uio;
4553 a.a_flags = flags;
4554 a.a_eofflag = eofflag;
4555 a.a_numdirent = numdirent;
4556 a.a_context = ctx;
4557
4558 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
4559 DTRACE_FSINFO_IO(readdir,
4560 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4561
4562 return (_err);
4563 }
4564
4565 #if 0
4566 /*
4567 *#
4568 *#% readdirattr vp L L L
4569 *#
4570 */
4571 struct vnop_readdirattr_args {
4572 struct vnodeop_desc *a_desc;
4573 vnode_t a_vp;
4574 struct attrlist *a_alist;
4575 struct uio *a_uio;
4576 uint32_t a_maxcount;
4577 uint32_t a_options;
4578 uint32_t *a_newstate;
4579 int *a_eofflag;
4580 uint32_t *a_actualcount;
4581 vfs_context_t a_context;
4582 };
4583
4584 #endif /* 0*/
4585 errno_t
4586 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
4587 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
4588 {
4589 int _err;
4590 struct vnop_readdirattr_args a;
4591 #if CONFIG_DTRACE
4592 user_ssize_t resid = uio_resid(uio);
4593 #endif
4594
4595 a.a_desc = &vnop_readdirattr_desc;
4596 a.a_vp = vp;
4597 a.a_alist = alist;
4598 a.a_uio = uio;
4599 a.a_maxcount = maxcount;
4600 a.a_options = options;
4601 a.a_newstate = newstate;
4602 a.a_eofflag = eofflag;
4603 a.a_actualcount = actualcount;
4604 a.a_context = ctx;
4605
4606 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
4607 DTRACE_FSINFO_IO(readdirattr,
4608 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4609
4610 return (_err);
4611 }
4612
4613 #if 0
4614 /*
4615 *#
4616 *#% readlink vp L L L
4617 *#
4618 */
4619 struct vnop_readlink_args {
4620 struct vnodeop_desc *a_desc;
4621 vnode_t a_vp;
4622 struct uio *a_uio;
4623 vfs_context_t a_context;
4624 };
4625 #endif /* 0 */
4626
4627 /*
4628 * Returns: 0 Success
4629 * lock_fsnode:ENOENT No such file or directory [only for VFS
4630 * that is not thread safe & vnode is
4631 * currently being/has been terminated]
4632 * <vfs_readlink>:EINVAL
4633 * <vfs_readlink>:???
4634 *
4635 * Note: The return codes from the underlying VFS's readlink routine
4636 * can't be fully enumerated here, since third party VFS authors
4637 * may not limit their error returns to the ones documented here,
4638 * even though this may result in some programs functioning
4639 * incorrectly.
4640 *
4641 * The return codes documented above are those which may currently
4642 * be returned by HFS from hfs_vnop_readlink, not including
4643 * additional error code which may be propagated from underlying
4644 * routines.
4645 */
4646 errno_t
4647 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
4648 {
4649 int _err;
4650 struct vnop_readlink_args a;
4651 #if CONFIG_DTRACE
4652 user_ssize_t resid = uio_resid(uio);
4653 #endif
4654 a.a_desc = &vnop_readlink_desc;
4655 a.a_vp = vp;
4656 a.a_uio = uio;
4657 a.a_context = ctx;
4658
4659 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
4660 DTRACE_FSINFO_IO(readlink,
4661 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4662
4663 return (_err);
4664 }
4665
4666 #if 0
4667 /*
4668 *#
4669 *#% inactive vp L U U
4670 *#
4671 */
4672 struct vnop_inactive_args {
4673 struct vnodeop_desc *a_desc;
4674 vnode_t a_vp;
4675 vfs_context_t a_context;
4676 };
4677 #endif /* 0*/
4678 errno_t
4679 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
4680 {
4681 int _err;
4682 struct vnop_inactive_args a;
4683
4684 a.a_desc = &vnop_inactive_desc;
4685 a.a_vp = vp;
4686 a.a_context = ctx;
4687
4688 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
4689 DTRACE_FSINFO(inactive, vnode_t, vp);
4690
4691 #if NAMEDSTREAMS
4692 /* For file systems that do not support namedstream natively, mark
4693 * the shadow stream file vnode to be recycled as soon as the last
4694 * reference goes away. To avoid re-entering reclaim code, do not
4695 * call recycle on terminating namedstream vnodes.
4696 */
4697 if (vnode_isnamedstream(vp) &&
4698 (vp->v_parent != NULLVP) &&
4699 vnode_isshadow(vp) &&
4700 ((vp->v_lflag & VL_TERMINATE) == 0)) {
4701 vnode_recycle(vp);
4702 }
4703 #endif
4704
4705 return (_err);
4706 }
4707
4708
4709 #if 0
4710 /*
4711 *#
4712 *#% reclaim vp U U U
4713 *#
4714 */
4715 struct vnop_reclaim_args {
4716 struct vnodeop_desc *a_desc;
4717 vnode_t a_vp;
4718 vfs_context_t a_context;
4719 };
4720 #endif /* 0*/
4721 errno_t
4722 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
4723 {
4724 int _err;
4725 struct vnop_reclaim_args a;
4726
4727 a.a_desc = &vnop_reclaim_desc;
4728 a.a_vp = vp;
4729 a.a_context = ctx;
4730
4731 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
4732 DTRACE_FSINFO(reclaim, vnode_t, vp);
4733
4734 return (_err);
4735 }
4736
4737
4738 /*
4739 * Returns: 0 Success
4740 * lock_fsnode:ENOENT No such file or directory [only for VFS
4741 * that is not thread safe & vnode is
4742 * currently being/has been terminated]
4743 * <vnop_pathconf_desc>:??? [per FS implementation specific]
4744 */
4745 #if 0
4746 /*
4747 *#
4748 *#% pathconf vp L L L
4749 *#
4750 */
4751 struct vnop_pathconf_args {
4752 struct vnodeop_desc *a_desc;
4753 vnode_t a_vp;
4754 int a_name;
4755 int32_t *a_retval;
4756 vfs_context_t a_context;
4757 };
4758 #endif /* 0*/
4759 errno_t
4760 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
4761 {
4762 int _err;
4763 struct vnop_pathconf_args a;
4764
4765 a.a_desc = &vnop_pathconf_desc;
4766 a.a_vp = vp;
4767 a.a_name = name;
4768 a.a_retval = retval;
4769 a.a_context = ctx;
4770
4771 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
4772 DTRACE_FSINFO(pathconf, vnode_t, vp);
4773
4774 return (_err);
4775 }
4776
4777 /*
4778 * Returns: 0 Success
4779 * err_advlock:ENOTSUP
4780 * lf_advlock:???
4781 * <vnop_advlock_desc>:???
4782 *
4783 * Notes: VFS implementations of advisory locking using calls through
4784 * <vnop_advlock_desc> because lock enforcement does not occur
4785 * locally should try to limit themselves to the return codes
4786 * documented above for lf_advlock and err_advlock.
4787 */
4788 #if 0
4789 /*
4790 *#
4791 *#% advlock vp U U U
4792 *#
4793 */
4794 struct vnop_advlock_args {
4795 struct vnodeop_desc *a_desc;
4796 vnode_t a_vp;
4797 caddr_t a_id;
4798 int a_op;
4799 struct flock *a_fl;
4800 int a_flags;
4801 vfs_context_t a_context;
4802 };
4803 #endif /* 0*/
4804 errno_t
4805 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
4806 {
4807 int _err;
4808 struct vnop_advlock_args a;
4809
4810 a.a_desc = &vnop_advlock_desc;
4811 a.a_vp = vp;
4812 a.a_id = id;
4813 a.a_op = op;
4814 a.a_fl = fl;
4815 a.a_flags = flags;
4816 a.a_context = ctx;
4817 a.a_timeout = timeout;
4818
4819 /* Disallow advisory locking on non-seekable vnodes */
4820 if (vnode_isfifo(vp)) {
4821 _err = err_advlock(&a);
4822 } else {
4823 if ((vp->v_flag & VLOCKLOCAL)) {
4824 /* Advisory locking done at this layer */
4825 _err = lf_advlock(&a);
4826 } else {
4827 /* Advisory locking done by underlying filesystem */
4828 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4829 }
4830 DTRACE_FSINFO(advlock, vnode_t, vp);
4831 }
4832
4833 return (_err);
4834 }
4835
4836
4837
4838 #if 0
4839 /*
4840 *#
4841 *#% allocate vp L L L
4842 *#
4843 */
4844 struct vnop_allocate_args {
4845 struct vnodeop_desc *a_desc;
4846 vnode_t a_vp;
4847 off_t a_length;
4848 u_int32_t a_flags;
4849 off_t *a_bytesallocated;
4850 off_t a_offset;
4851 vfs_context_t a_context;
4852 };
4853
4854 #endif /* 0*/
4855 errno_t
4856 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
4857 {
4858 int _err;
4859 struct vnop_allocate_args a;
4860
4861 a.a_desc = &vnop_allocate_desc;
4862 a.a_vp = vp;
4863 a.a_length = length;
4864 a.a_flags = flags;
4865 a.a_bytesallocated = bytesallocated;
4866 a.a_offset = offset;
4867 a.a_context = ctx;
4868
4869 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4870 DTRACE_FSINFO(allocate, vnode_t, vp);
4871 #if CONFIG_FSE
4872 if (_err == 0) {
4873 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
4874 }
4875 #endif
4876
4877 return (_err);
4878 }
4879
4880 #if 0
4881 /*
4882 *#
4883 *#% pagein vp = = =
4884 *#
4885 */
4886 struct vnop_pagein_args {
4887 struct vnodeop_desc *a_desc;
4888 vnode_t a_vp;
4889 upl_t a_pl;
4890 upl_offset_t a_pl_offset;
4891 off_t a_f_offset;
4892 size_t a_size;
4893 int a_flags;
4894 vfs_context_t a_context;
4895 };
4896 #endif /* 0*/
4897 errno_t
4898 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
4899 {
4900 int _err;
4901 struct vnop_pagein_args a;
4902
4903 a.a_desc = &vnop_pagein_desc;
4904 a.a_vp = vp;
4905 a.a_pl = pl;
4906 a.a_pl_offset = pl_offset;
4907 a.a_f_offset = f_offset;
4908 a.a_size = size;
4909 a.a_flags = flags;
4910 a.a_context = ctx;
4911
4912 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
4913 DTRACE_FSINFO(pagein, vnode_t, vp);
4914
4915 return (_err);
4916 }
4917
4918 #if 0
4919 /*
4920 *#
4921 *#% pageout vp = = =
4922 *#
4923 */
4924 struct vnop_pageout_args {
4925 struct vnodeop_desc *a_desc;
4926 vnode_t a_vp;
4927 upl_t a_pl;
4928 upl_offset_t a_pl_offset;
4929 off_t a_f_offset;
4930 size_t a_size;
4931 int a_flags;
4932 vfs_context_t a_context;
4933 };
4934
4935 #endif /* 0*/
4936 errno_t
4937 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
4938 {
4939 int _err;
4940 struct vnop_pageout_args a;
4941
4942 a.a_desc = &vnop_pageout_desc;
4943 a.a_vp = vp;
4944 a.a_pl = pl;
4945 a.a_pl_offset = pl_offset;
4946 a.a_f_offset = f_offset;
4947 a.a_size = size;
4948 a.a_flags = flags;
4949 a.a_context = ctx;
4950
4951 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
4952 DTRACE_FSINFO(pageout, vnode_t, vp);
4953
4954 post_event_if_success(vp, _err, NOTE_WRITE);
4955
4956 return (_err);
4957 }
4958
4959 int
4960 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4961 {
4962 if (vnode_compound_remove_available(dvp)) {
4963 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
4964 } else {
4965 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
4966 }
4967 }
4968
4969 #if CONFIG_SEARCHFS
4970
4971 #if 0
4972 /*
4973 *#
4974 *#% searchfs vp L L L
4975 *#
4976 */
4977 struct vnop_searchfs_args {
4978 struct vnodeop_desc *a_desc;
4979 vnode_t a_vp;
4980 void *a_searchparams1;
4981 void *a_searchparams2;
4982 struct attrlist *a_searchattrs;
4983 uint32_t a_maxmatches;
4984 struct timeval *a_timelimit;
4985 struct attrlist *a_returnattrs;
4986 uint32_t *a_nummatches;
4987 uint32_t a_scriptcode;
4988 uint32_t a_options;
4989 struct uio *a_uio;
4990 struct searchstate *a_searchstate;
4991 vfs_context_t a_context;
4992 };
4993
4994 #endif /* 0*/
4995 errno_t
4996 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
4997 {
4998 int _err;
4999 struct vnop_searchfs_args a;
5000
5001 a.a_desc = &vnop_searchfs_desc;
5002 a.a_vp = vp;
5003 a.a_searchparams1 = searchparams1;
5004 a.a_searchparams2 = searchparams2;
5005 a.a_searchattrs = searchattrs;
5006 a.a_maxmatches = maxmatches;
5007 a.a_timelimit = timelimit;
5008 a.a_returnattrs = returnattrs;
5009 a.a_nummatches = nummatches;
5010 a.a_scriptcode = scriptcode;
5011 a.a_options = options;
5012 a.a_uio = uio;
5013 a.a_searchstate = searchstate;
5014 a.a_context = ctx;
5015
5016 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5017 DTRACE_FSINFO(searchfs, vnode_t, vp);
5018
5019 return (_err);
5020 }
5021 #endif /* CONFIG_SEARCHFS */
5022
5023 #if 0
5024 /*
5025 *#
5026 *#% copyfile fvp U U U
5027 *#% copyfile tdvp L U U
5028 *#% copyfile tvp X U U
5029 *#
5030 */
5031 struct vnop_copyfile_args {
5032 struct vnodeop_desc *a_desc;
5033 vnode_t a_fvp;
5034 vnode_t a_tdvp;
5035 vnode_t a_tvp;
5036 struct componentname *a_tcnp;
5037 int a_mode;
5038 int a_flags;
5039 vfs_context_t a_context;
5040 };
5041 #endif /* 0*/
5042 errno_t
5043 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5044 int mode, int flags, vfs_context_t ctx)
5045 {
5046 int _err;
5047 struct vnop_copyfile_args a;
5048 a.a_desc = &vnop_copyfile_desc;
5049 a.a_fvp = fvp;
5050 a.a_tdvp = tdvp;
5051 a.a_tvp = tvp;
5052 a.a_tcnp = tcnp;
5053 a.a_mode = mode;
5054 a.a_flags = flags;
5055 a.a_context = ctx;
5056 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5057 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5058 return (_err);
5059 }
5060
5061 errno_t
5062 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5063 {
5064 struct vnop_getxattr_args a;
5065 int error;
5066
5067 a.a_desc = &vnop_getxattr_desc;
5068 a.a_vp = vp;
5069 a.a_name = name;
5070 a.a_uio = uio;
5071 a.a_size = size;
5072 a.a_options = options;
5073 a.a_context = ctx;
5074
5075 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5076 DTRACE_FSINFO(getxattr, vnode_t, vp);
5077
5078 return (error);
5079 }
5080
5081 errno_t
5082 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5083 {
5084 struct vnop_setxattr_args a;
5085 int error;
5086
5087 a.a_desc = &vnop_setxattr_desc;
5088 a.a_vp = vp;
5089 a.a_name = name;
5090 a.a_uio = uio;
5091 a.a_options = options;
5092 a.a_context = ctx;
5093
5094 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5095 DTRACE_FSINFO(setxattr, vnode_t, vp);
5096
5097 if (error == 0)
5098 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5099
5100 post_event_if_success(vp, error, NOTE_ATTRIB);
5101
5102 return (error);
5103 }
5104
5105 errno_t
5106 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5107 {
5108 struct vnop_removexattr_args a;
5109 int error;
5110
5111 a.a_desc = &vnop_removexattr_desc;
5112 a.a_vp = vp;
5113 a.a_name = name;
5114 a.a_options = options;
5115 a.a_context = ctx;
5116
5117 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5118 DTRACE_FSINFO(removexattr, vnode_t, vp);
5119
5120 post_event_if_success(vp, error, NOTE_ATTRIB);
5121
5122 return (error);
5123 }
5124
5125 errno_t
5126 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5127 {
5128 struct vnop_listxattr_args a;
5129 int error;
5130
5131 a.a_desc = &vnop_listxattr_desc;
5132 a.a_vp = vp;
5133 a.a_uio = uio;
5134 a.a_size = size;
5135 a.a_options = options;
5136 a.a_context = ctx;
5137
5138 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5139 DTRACE_FSINFO(listxattr, vnode_t, vp);
5140
5141 return (error);
5142 }
5143
5144
5145 #if 0
5146 /*
5147 *#
5148 *#% blktooff vp = = =
5149 *#
5150 */
5151 struct vnop_blktooff_args {
5152 struct vnodeop_desc *a_desc;
5153 vnode_t a_vp;
5154 daddr64_t a_lblkno;
5155 off_t *a_offset;
5156 };
5157 #endif /* 0*/
5158 errno_t
5159 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5160 {
5161 int _err;
5162 struct vnop_blktooff_args a;
5163
5164 a.a_desc = &vnop_blktooff_desc;
5165 a.a_vp = vp;
5166 a.a_lblkno = lblkno;
5167 a.a_offset = offset;
5168
5169 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5170 DTRACE_FSINFO(blktooff, vnode_t, vp);
5171
5172 return (_err);
5173 }
5174
5175 #if 0
5176 /*
5177 *#
5178 *#% offtoblk vp = = =
5179 *#
5180 */
5181 struct vnop_offtoblk_args {
5182 struct vnodeop_desc *a_desc;
5183 vnode_t a_vp;
5184 off_t a_offset;
5185 daddr64_t *a_lblkno;
5186 };
5187 #endif /* 0*/
5188 errno_t
5189 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5190 {
5191 int _err;
5192 struct vnop_offtoblk_args a;
5193
5194 a.a_desc = &vnop_offtoblk_desc;
5195 a.a_vp = vp;
5196 a.a_offset = offset;
5197 a.a_lblkno = lblkno;
5198
5199 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5200 DTRACE_FSINFO(offtoblk, vnode_t, vp);
5201
5202 return (_err);
5203 }
5204
5205 #if 0
5206 /*
5207 *#
5208 *#% blockmap vp L L L
5209 *#
5210 */
5211 struct vnop_blockmap_args {
5212 struct vnodeop_desc *a_desc;
5213 vnode_t a_vp;
5214 off_t a_foffset;
5215 size_t a_size;
5216 daddr64_t *a_bpn;
5217 size_t *a_run;
5218 void *a_poff;
5219 int a_flags;
5220 vfs_context_t a_context;
5221 };
5222 #endif /* 0*/
5223 errno_t
5224 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
5225 {
5226 int _err;
5227 struct vnop_blockmap_args a;
5228 size_t localrun = 0;
5229
5230 if (ctx == NULL) {
5231 ctx = vfs_context_current();
5232 }
5233 a.a_desc = &vnop_blockmap_desc;
5234 a.a_vp = vp;
5235 a.a_foffset = foffset;
5236 a.a_size = size;
5237 a.a_bpn = bpn;
5238 a.a_run = &localrun;
5239 a.a_poff = poff;
5240 a.a_flags = flags;
5241 a.a_context = ctx;
5242
5243 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
5244 DTRACE_FSINFO(blockmap, vnode_t, vp);
5245
5246 /*
5247 * We used a local variable to request information from the underlying
5248 * filesystem about the length of the I/O run in question. If
5249 * we get malformed output from the filesystem, we cap it to the length
5250 * requested, at most. Update 'run' on the way out.
5251 */
5252 if (_err == 0) {
5253 if (localrun > size) {
5254 localrun = size;
5255 }
5256
5257 if (run) {
5258 *run = localrun;
5259 }
5260 }
5261
5262 return (_err);
5263 }
5264
5265 #if 0
5266 struct vnop_strategy_args {
5267 struct vnodeop_desc *a_desc;
5268 struct buf *a_bp;
5269 };
5270
5271 #endif /* 0*/
5272 errno_t
5273 VNOP_STRATEGY(struct buf *bp)
5274 {
5275 int _err;
5276 struct vnop_strategy_args a;
5277 vnode_t vp = buf_vnode(bp);
5278 a.a_desc = &vnop_strategy_desc;
5279 a.a_bp = bp;
5280 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
5281 DTRACE_FSINFO(strategy, vnode_t, vp);
5282 return (_err);
5283 }
5284
5285 #if 0
5286 struct vnop_bwrite_args {
5287 struct vnodeop_desc *a_desc;
5288 buf_t a_bp;
5289 };
5290 #endif /* 0*/
5291 errno_t
5292 VNOP_BWRITE(struct buf *bp)
5293 {
5294 int _err;
5295 struct vnop_bwrite_args a;
5296 vnode_t vp = buf_vnode(bp);
5297 a.a_desc = &vnop_bwrite_desc;
5298 a.a_bp = bp;
5299 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
5300 DTRACE_FSINFO(bwrite, vnode_t, vp);
5301 return (_err);
5302 }
5303
5304 #if 0
5305 struct vnop_kqfilt_add_args {
5306 struct vnodeop_desc *a_desc;
5307 struct vnode *a_vp;
5308 struct knote *a_kn;
5309 vfs_context_t a_context;
5310 };
5311 #endif
5312 errno_t
5313 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
5314 {
5315 int _err;
5316 struct vnop_kqfilt_add_args a;
5317
5318 a.a_desc = VDESC(vnop_kqfilt_add);
5319 a.a_vp = vp;
5320 a.a_kn = kn;
5321 a.a_context = ctx;
5322
5323 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
5324 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
5325
5326 return(_err);
5327 }
5328
5329 #if 0
5330 struct vnop_kqfilt_remove_args {
5331 struct vnodeop_desc *a_desc;
5332 struct vnode *a_vp;
5333 uintptr_t a_ident;
5334 vfs_context_t a_context;
5335 };
5336 #endif
5337 errno_t
5338 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
5339 {
5340 int _err;
5341 struct vnop_kqfilt_remove_args a;
5342
5343 a.a_desc = VDESC(vnop_kqfilt_remove);
5344 a.a_vp = vp;
5345 a.a_ident = ident;
5346 a.a_context = ctx;
5347
5348 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
5349 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
5350
5351 return(_err);
5352 }
5353
5354 errno_t
5355 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
5356 {
5357 int _err;
5358 struct vnop_monitor_args a;
5359
5360 a.a_desc = VDESC(vnop_monitor);
5361 a.a_vp = vp;
5362 a.a_events = events;
5363 a.a_flags = flags;
5364 a.a_handle = handle;
5365 a.a_context = ctx;
5366
5367 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
5368 DTRACE_FSINFO(monitor, vnode_t, vp);
5369
5370 return(_err);
5371 }
5372
5373 #if 0
5374 struct vnop_setlabel_args {
5375 struct vnodeop_desc *a_desc;
5376 struct vnode *a_vp;
5377 struct label *a_vl;
5378 vfs_context_t a_context;
5379 };
5380 #endif
5381 errno_t
5382 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
5383 {
5384 int _err;
5385 struct vnop_setlabel_args a;
5386
5387 a.a_desc = VDESC(vnop_setlabel);
5388 a.a_vp = vp;
5389 a.a_vl = label;
5390 a.a_context = ctx;
5391
5392 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
5393 DTRACE_FSINFO(setlabel, vnode_t, vp);
5394
5395 return(_err);
5396 }
5397
5398
5399 #if NAMEDSTREAMS
5400 /*
5401 * Get a named streamed
5402 */
5403 errno_t
5404 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
5405 {
5406 int _err;
5407 struct vnop_getnamedstream_args a;
5408
5409 a.a_desc = &vnop_getnamedstream_desc;
5410 a.a_vp = vp;
5411 a.a_svpp = svpp;
5412 a.a_name = name;
5413 a.a_operation = operation;
5414 a.a_flags = flags;
5415 a.a_context = ctx;
5416
5417 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
5418 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
5419 return (_err);
5420 }
5421
5422 /*
5423 * Create a named streamed
5424 */
5425 errno_t
5426 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
5427 {
5428 int _err;
5429 struct vnop_makenamedstream_args a;
5430
5431 a.a_desc = &vnop_makenamedstream_desc;
5432 a.a_vp = vp;
5433 a.a_svpp = svpp;
5434 a.a_name = name;
5435 a.a_flags = flags;
5436 a.a_context = ctx;
5437
5438 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
5439 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
5440 return (_err);
5441 }
5442
5443
5444 /*
5445 * Remove a named streamed
5446 */
5447 errno_t
5448 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
5449 {
5450 int _err;
5451 struct vnop_removenamedstream_args a;
5452
5453 a.a_desc = &vnop_removenamedstream_desc;
5454 a.a_vp = vp;
5455 a.a_svp = svp;
5456 a.a_name = name;
5457 a.a_flags = flags;
5458 a.a_context = ctx;
5459
5460 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
5461 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
5462 return (_err);
5463 }
5464 #endif