]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
95 #include <sys/mbuf.h>
96 #include <sys/syslog.h>
97 #include <sys/ubc.h>
98 #include <sys/vm.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
106
107 #include <kern/assert.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
110
111 #include <libkern/OSByteOrder.h>
112
113 #include <miscfs/specfs/specdev.h>
114
115 #include <mach/mach_types.h>
116 #include <mach/memory_object_types.h>
117 #include <mach/task.h>
118
119 #if CONFIG_MACF
120 #include <security/mac_framework.h>
121 #endif
122
123 #define ESUCCESS 0
124 #undef mount_t
125 #undef vnode_t
126
127 #define COMPAT_ONLY
128
129
130 #ifndef __LP64__
131 #define THREAD_SAFE_FS(VP) \
132 ((VP)->v_unsafefs ? 0 : 1)
133 #endif /* __LP64__ */
134
135 #define NATIVE_XATTR(VP) \
136 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
137
138 static void xattrfile_remove(vnode_t dvp, const char *basename,
139 vfs_context_t ctx, int force);
140 static void xattrfile_setattr(vnode_t dvp, const char * basename,
141 struct vnode_attr * vap, vfs_context_t ctx);
142
143 /*
144 * vnode_setneedinactive
145 *
146 * Description: Indicate that when the last iocount on this vnode goes away,
147 * and the usecount is also zero, we should inform the filesystem
148 * via VNOP_INACTIVE.
149 *
150 * Parameters: vnode_t vnode to mark
151 *
152 * Returns: Nothing
153 *
154 * Notes: Notably used when we're deleting a file--we need not have a
155 * usecount, so VNOP_INACTIVE may not get called by anyone. We
156 * want it called when we drop our iocount.
157 */
158 void
159 vnode_setneedinactive(vnode_t vp)
160 {
161 cache_purge(vp);
162
163 vnode_lock_spin(vp);
164 vp->v_lflag |= VL_NEEDINACTIVE;
165 vnode_unlock(vp);
166 }
167
168
169 #ifndef __LP64__
170 int
171 lock_fsnode(vnode_t vp, int *funnel_state)
172 {
173 if (funnel_state)
174 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
175
176 if (vp->v_unsafefs) {
177 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
178 vp->v_unsafefs->fsnode_count++;
179 } else {
180 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
181
182 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
183 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
184
185 if (funnel_state)
186 (void) thread_funnel_set(kernel_flock, *funnel_state);
187 return (ENOENT);
188 }
189 vp->v_unsafefs->fsnodeowner = current_thread();
190 vp->v_unsafefs->fsnode_count = 1;
191 }
192 }
193 return (0);
194 }
195
196
197 void
198 unlock_fsnode(vnode_t vp, int *funnel_state)
199 {
200 if (vp->v_unsafefs) {
201 if (--vp->v_unsafefs->fsnode_count == 0) {
202 vp->v_unsafefs->fsnodeowner = NULL;
203 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
204 }
205 }
206 if (funnel_state)
207 (void) thread_funnel_set(kernel_flock, *funnel_state);
208 }
209 #endif /* __LP64__ */
210
211
212
213 /* ====================================================================== */
214 /* ************ EXTERNAL KERNEL APIS ********************************** */
215 /* ====================================================================== */
216
217 /*
218 * implementations of exported VFS operations
219 */
220 int
221 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
222 {
223 int error;
224 #ifndef __LP64__
225 int thread_safe;
226 int funnel_state = 0;
227 #endif /* __LP64__ */
228
229 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
230 return(ENOTSUP);
231
232 #ifndef __LP64__
233 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
234 if (!thread_safe) {
235 funnel_state = thread_funnel_set(kernel_flock, TRUE);
236 }
237 #endif /* __LP64__ */
238
239 if (vfs_context_is64bit(ctx)) {
240 if (vfs_64bitready(mp)) {
241 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
242 }
243 else {
244 error = ENOTSUP;
245 }
246 }
247 else {
248 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
249 }
250
251 #ifndef __LP64__
252 if (!thread_safe) {
253 (void) thread_funnel_set(kernel_flock, funnel_state);
254 }
255 #endif /* __LP64__ */
256
257 return (error);
258 }
259
260 int
261 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
262 {
263 int error;
264 #ifndef __LP64__
265 int thread_safe;
266 int funnel_state = 0;
267 #endif /* __LP64__ */
268
269 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
270 return(ENOTSUP);
271
272 #ifndef __LP64__
273 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
274
275 if (!thread_safe) {
276 funnel_state = thread_funnel_set(kernel_flock, TRUE);
277 }
278 #endif /* __LP64__ */
279
280 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
281
282 #ifndef __LP64__
283 if (!thread_safe) {
284 (void) thread_funnel_set(kernel_flock, funnel_state);
285 }
286 #endif /* __LP64__ */
287
288 return (error);
289 }
290
291 int
292 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
293 {
294 int error;
295 #ifndef __LP64__
296 int thread_safe;
297 int funnel_state = 0;
298 #endif /* __LP64__ */
299
300 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
301 return(ENOTSUP);
302
303 #ifndef __LP64__
304 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
305
306 if (!thread_safe) {
307 funnel_state = thread_funnel_set(kernel_flock, TRUE);
308 }
309 #endif /* __LP64__ */
310
311 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
312
313 #ifndef __LP64__
314 if (!thread_safe) {
315 (void) thread_funnel_set(kernel_flock, funnel_state);
316 }
317 #endif /* __LP64__ */
318
319 return (error);
320 }
321
322 /*
323 * Returns: 0 Success
324 * ENOTSUP Not supported
325 * <vfs_root>:ENOENT
326 * <vfs_root>:???
327 *
328 * Note: The return codes from the underlying VFS's root routine can't
329 * be fully enumerated here, since third party VFS authors may not
330 * limit their error returns to the ones documented here, even
331 * though this may result in some programs functioning incorrectly.
332 *
333 * The return codes documented above are those which may currently
334 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
335 * for a call to hfs_vget on the volume mount poit, not including
336 * additional error codes which may be propagated from underlying
337 * routines called by hfs_vget.
338 */
339 int
340 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
341 {
342 int error;
343 #ifndef __LP64__
344 int thread_safe;
345 int funnel_state = 0;
346 #endif /* __LP64__ */
347
348 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
349 return(ENOTSUP);
350
351 if (ctx == NULL) {
352 ctx = vfs_context_current();
353 }
354
355 #ifndef __LP64__
356 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
357 if (!thread_safe) {
358 funnel_state = thread_funnel_set(kernel_flock, TRUE);
359 }
360 #endif /* __LP64__ */
361
362 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
363
364 #ifndef __LP64__
365 if (!thread_safe) {
366 (void) thread_funnel_set(kernel_flock, funnel_state);
367 }
368 #endif /* __LP64__ */
369
370 return (error);
371 }
372
373 int
374 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
375 {
376 int error;
377 #ifndef __LP64__
378 int thread_safe;
379 int funnel_state = 0;
380 #endif /* __LP64__ */
381
382 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
383 return(ENOTSUP);
384
385 #ifndef __LP64__
386 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
387 if (!thread_safe) {
388 funnel_state = thread_funnel_set(kernel_flock, TRUE);
389 }
390 #endif /* __LP64__ */
391
392 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
393
394 #ifndef __LP64__
395 if (!thread_safe) {
396 (void) thread_funnel_set(kernel_flock, funnel_state);
397 }
398 #endif /* __LP64__ */
399
400 return (error);
401 }
402
403 int
404 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
405 {
406 int error;
407 #ifndef __LP64__
408 int thread_safe;
409 int funnel_state = 0;
410 #endif /* __LP64__ */
411
412 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
413 return(ENOTSUP);
414
415 if (ctx == NULL) {
416 ctx = vfs_context_current();
417 }
418
419 #ifndef __LP64__
420 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
421 if (!thread_safe) {
422 funnel_state = thread_funnel_set(kernel_flock, TRUE);
423 }
424 #endif /* __LP64__ */
425
426 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
427
428 #ifndef __LP64__
429 if (!thread_safe) {
430 (void) thread_funnel_set(kernel_flock, funnel_state);
431 }
432 #endif /* __LP64__ */
433
434 return(error);
435 }
436
437 int
438 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
439 {
440 int error;
441 #ifndef __LP64__
442 int thread_safe;
443 int funnel_state = 0;
444 #endif /* __LP64__ */
445
446 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
447 return(ENOTSUP);
448
449 if (ctx == NULL) {
450 ctx = vfs_context_current();
451 }
452
453 #ifndef __LP64__
454 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
455 if (!thread_safe) {
456 funnel_state = thread_funnel_set(kernel_flock, TRUE);
457 }
458 #endif /* __LP64__ */
459
460 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
461
462 #ifndef __LP64__
463 if (!thread_safe) {
464 (void) thread_funnel_set(kernel_flock, funnel_state);
465 }
466 #endif /* __LP64__ */
467
468 return(error);
469 }
470
471 int
472 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
473 {
474 int error;
475 #ifndef __LP64__
476 int thread_safe;
477 int funnel_state = 0;
478 #endif /* __LP64__ */
479
480 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
481 return(ENOTSUP);
482
483 if (ctx == NULL) {
484 ctx = vfs_context_current();
485 }
486
487 #ifndef __LP64__
488 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
489 if (!thread_safe) {
490 funnel_state = thread_funnel_set(kernel_flock, TRUE);
491 }
492 #endif /* __LP64__ */
493
494 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
495
496 #ifndef __LP64__
497 if (!thread_safe) {
498 (void) thread_funnel_set(kernel_flock, funnel_state);
499 }
500 #endif /* __LP64__ */
501
502 return(error);
503 }
504
505 int
506 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
507 {
508 int error;
509 #ifndef __LP64__
510 int thread_safe;
511 int funnel_state = 0;
512 #endif /* __LP64__ */
513
514 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
515 return(ENOTSUP);
516
517 if (ctx == NULL) {
518 ctx = vfs_context_current();
519 }
520
521 #ifndef __LP64__
522 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
523 if (!thread_safe) {
524 funnel_state = thread_funnel_set(kernel_flock, TRUE);
525 }
526 #endif /* __LP64__ */
527
528 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
529
530 #ifndef __LP64__
531 if (!thread_safe) {
532 (void) thread_funnel_set(kernel_flock, funnel_state);
533 }
534 #endif /* __LP64__ */
535
536 return(error);
537 }
538
539 int
540 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx)
541 {
542 int error;
543 #ifndef __LP64__
544 int thread_safe;
545 int funnel_state = 0;
546 #endif /* __LP64__ */
547
548 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
549 return(ENOTSUP);
550
551 if (ctx == NULL) {
552 ctx = vfs_context_current();
553 }
554
555 #ifndef __LP64__
556 thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
557 if (!thread_safe) {
558 funnel_state = thread_funnel_set(kernel_flock, TRUE);
559 }
560 #endif /* __LP64__ */
561
562 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
563
564 #ifndef __LP64__
565 if (!thread_safe) {
566 (void) thread_funnel_set(kernel_flock, funnel_state);
567 }
568 #endif /* __LP64__ */
569
570 return(error);
571 }
572
573 int
574 VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx)
575 {
576 int error;
577 #ifndef __LP64__
578 int thread_safe;
579 int funnel_state = 0;
580 #endif /* __LP64__ */
581
582 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
583 return(ENOTSUP);
584
585 if (ctx == NULL) {
586 ctx = vfs_context_current();
587 }
588
589 #ifndef __LP64__
590 thread_safe = THREAD_SAFE_FS(vp);
591 if (!thread_safe) {
592 funnel_state = thread_funnel_set(kernel_flock, TRUE);
593 }
594 #endif /* __LP64__ */
595
596 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
597
598 #ifndef __LP64__
599 if (!thread_safe) {
600 (void) thread_funnel_set(kernel_flock, funnel_state);
601 }
602 #endif /* __LP64__ */
603
604 return(error);
605 }
606
607
608 /* returns the cached throttle mask for the mount_t */
609 uint64_t
610 vfs_throttle_mask(mount_t mp)
611 {
612 return(mp->mnt_throttle_mask);
613 }
614
615 /* returns a copy of vfs type name for the mount_t */
616 void
617 vfs_name(mount_t mp, char * buffer)
618 {
619 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
620 }
621
622 /* returns vfs type number for the mount_t */
623 int
624 vfs_typenum(mount_t mp)
625 {
626 return(mp->mnt_vtable->vfc_typenum);
627 }
628
629 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
630 void*
631 vfs_mntlabel(mount_t mp)
632 {
633 return (void*)mp->mnt_mntlabel;
634 }
635
636 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
637 uint64_t
638 vfs_flags(mount_t mp)
639 {
640 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
641 }
642
643 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
644 void
645 vfs_setflags(mount_t mp, uint64_t flags)
646 {
647 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
648
649 mount_lock(mp);
650 mp->mnt_flag |= lflags;
651 mount_unlock(mp);
652 }
653
654 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
655 void
656 vfs_clearflags(mount_t mp , uint64_t flags)
657 {
658 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
659
660 mount_lock(mp);
661 mp->mnt_flag &= ~lflags;
662 mount_unlock(mp);
663 }
664
665 /* Is the mount_t ronly and upgrade read/write requested? */
666 int
667 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
668 {
669 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
670 }
671
672
673 /* Is the mount_t mounted ronly */
674 int
675 vfs_isrdonly(mount_t mp)
676 {
677 return (mp->mnt_flag & MNT_RDONLY);
678 }
679
680 /* Is the mount_t mounted for filesystem synchronous writes? */
681 int
682 vfs_issynchronous(mount_t mp)
683 {
684 return (mp->mnt_flag & MNT_SYNCHRONOUS);
685 }
686
687 /* Is the mount_t mounted read/write? */
688 int
689 vfs_isrdwr(mount_t mp)
690 {
691 return ((mp->mnt_flag & MNT_RDONLY) == 0);
692 }
693
694
695 /* Is mount_t marked for update (ie MNT_UPDATE) */
696 int
697 vfs_isupdate(mount_t mp)
698 {
699 return (mp->mnt_flag & MNT_UPDATE);
700 }
701
702
703 /* Is mount_t marked for reload (ie MNT_RELOAD) */
704 int
705 vfs_isreload(mount_t mp)
706 {
707 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
708 }
709
710 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
711 int
712 vfs_isforce(mount_t mp)
713 {
714 if ((mp->mnt_lflag & MNT_LFORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
715 return(1);
716 else
717 return(0);
718 }
719
720 int
721 vfs_isunmount(mount_t mp)
722 {
723 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
724 return 1;
725 } else {
726 return 0;
727 }
728 }
729
730 int
731 vfs_64bitready(mount_t mp)
732 {
733 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
734 return(1);
735 else
736 return(0);
737 }
738
739
740 int
741 vfs_authcache_ttl(mount_t mp)
742 {
743 if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) )
744 return (mp->mnt_authcache_ttl);
745 else
746 return (CACHED_RIGHT_INFINITE_TTL);
747 }
748
749 void
750 vfs_setauthcache_ttl(mount_t mp, int ttl)
751 {
752 mount_lock(mp);
753 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
754 mp->mnt_authcache_ttl = ttl;
755 mount_unlock(mp);
756 }
757
758 void
759 vfs_clearauthcache_ttl(mount_t mp)
760 {
761 mount_lock(mp);
762 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
763 /*
764 * back to the default TTL value in case
765 * MNTK_AUTH_OPAQUE is set on this mount
766 */
767 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
768 mount_unlock(mp);
769 }
770
771 void
772 vfs_markdependency(mount_t mp)
773 {
774 proc_t p = current_proc();
775 mount_lock(mp);
776 mp->mnt_dependent_process = p;
777 mp->mnt_dependent_pid = proc_pid(p);
778 mount_unlock(mp);
779 }
780
781
782 int
783 vfs_authopaque(mount_t mp)
784 {
785 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
786 return(1);
787 else
788 return(0);
789 }
790
791 int
792 vfs_authopaqueaccess(mount_t mp)
793 {
794 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
795 return(1);
796 else
797 return(0);
798 }
799
800 void
801 vfs_setauthopaque(mount_t mp)
802 {
803 mount_lock(mp);
804 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
805 mount_unlock(mp);
806 }
807
808 void
809 vfs_setauthopaqueaccess(mount_t mp)
810 {
811 mount_lock(mp);
812 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
813 mount_unlock(mp);
814 }
815
816 void
817 vfs_clearauthopaque(mount_t mp)
818 {
819 mount_lock(mp);
820 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
821 mount_unlock(mp);
822 }
823
824 void
825 vfs_clearauthopaqueaccess(mount_t mp)
826 {
827 mount_lock(mp);
828 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
829 mount_unlock(mp);
830 }
831
832 void
833 vfs_setextendedsecurity(mount_t mp)
834 {
835 mount_lock(mp);
836 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
837 mount_unlock(mp);
838 }
839
840 void
841 vfs_clearextendedsecurity(mount_t mp)
842 {
843 mount_lock(mp);
844 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
845 mount_unlock(mp);
846 }
847
848 int
849 vfs_extendedsecurity(mount_t mp)
850 {
851 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
852 }
853
854 /* returns the max size of short symlink in this mount_t */
855 uint32_t
856 vfs_maxsymlen(mount_t mp)
857 {
858 return(mp->mnt_maxsymlinklen);
859 }
860
861 /* set max size of short symlink on mount_t */
862 void
863 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
864 {
865 mp->mnt_maxsymlinklen = symlen;
866 }
867
868 /* return a pointer to the RO vfs_statfs associated with mount_t */
869 struct vfsstatfs *
870 vfs_statfs(mount_t mp)
871 {
872 return(&mp->mnt_vfsstat);
873 }
874
875 int
876 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
877 {
878 int error;
879
880 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
881 return(error);
882
883 /*
884 * If we have a filesystem create time, use it to default some others.
885 */
886 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
887 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
888 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
889 }
890
891 return(0);
892 }
893
894 int
895 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
896 {
897 int error;
898
899 if (vfs_isrdonly(mp))
900 return EROFS;
901
902 error = VFS_SETATTR(mp, vfa, ctx);
903
904 /*
905 * If we had alternate ways of setting vfs attributes, we'd
906 * fall back here.
907 */
908
909 return error;
910 }
911
912 /* return the private data handle stored in mount_t */
913 void *
914 vfs_fsprivate(mount_t mp)
915 {
916 return(mp->mnt_data);
917 }
918
919 /* set the private data handle in mount_t */
920 void
921 vfs_setfsprivate(mount_t mp, void *mntdata)
922 {
923 mount_lock(mp);
924 mp->mnt_data = mntdata;
925 mount_unlock(mp);
926 }
927
928
929 /*
930 * return the block size of the underlying
931 * device associated with mount_t
932 */
933 int
934 vfs_devblocksize(mount_t mp) {
935
936 return(mp->mnt_devblocksize);
937 }
938
939 /*
940 * Returns vnode with an iocount that must be released with vnode_put()
941 */
942 vnode_t
943 vfs_vnodecovered(mount_t mp)
944 {
945 vnode_t vp = mp->mnt_vnodecovered;
946 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
947 return NULL;
948 } else {
949 return vp;
950 }
951 }
952
953 /*
954 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
955 * The iocount must be released with vnode_put(). Note that this KPI is subtle
956 * with respect to the validity of using this device vnode for anything substantial
957 * (which is discouraged). If commands are sent to the device driver without
958 * taking proper steps to ensure that the device is still open, chaos may ensue.
959 * Similarly, this routine should only be called if there is some guarantee that
960 * the mount itself is still valid.
961 */
962 vnode_t
963 vfs_devvp(mount_t mp)
964 {
965 vnode_t vp = mp->mnt_devvp;
966
967 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
968 return vp;
969 }
970
971 return NULLVP;
972 }
973
974 /*
975 * return the io attributes associated with mount_t
976 */
977 void
978 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
979 {
980 if (mp == NULL) {
981 ioattrp->io_maxreadcnt = MAXPHYS;
982 ioattrp->io_maxwritecnt = MAXPHYS;
983 ioattrp->io_segreadcnt = 32;
984 ioattrp->io_segwritecnt = 32;
985 ioattrp->io_maxsegreadsize = MAXPHYS;
986 ioattrp->io_maxsegwritesize = MAXPHYS;
987 ioattrp->io_devblocksize = DEV_BSIZE;
988 ioattrp->io_flags = 0;
989 } else {
990 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
991 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
992 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
993 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
994 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
995 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
996 ioattrp->io_devblocksize = mp->mnt_devblocksize;
997 ioattrp->io_flags = mp->mnt_ioflags;
998 }
999 ioattrp->io_reserved[0] = NULL;
1000 ioattrp->io_reserved[1] = NULL;
1001 }
1002
1003
1004 /*
1005 * set the IO attributes associated with mount_t
1006 */
1007 void
1008 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
1009 {
1010 if (mp == NULL)
1011 return;
1012 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
1013 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
1014 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
1015 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
1016 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
1017 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
1018 mp->mnt_devblocksize = ioattrp->io_devblocksize;
1019 mp->mnt_ioflags = ioattrp->io_flags;
1020 }
1021
1022 /*
1023 * Add a new filesystem into the kernel specified in passed in
1024 * vfstable structure. It fills in the vnode
1025 * dispatch vector that is to be passed to when vnodes are created.
1026 * It returns a handle which is to be used to when the FS is to be removed
1027 */
1028 typedef int (*PFI)(void *);
1029 extern int vfs_opv_numops;
1030 errno_t
1031 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
1032 {
1033 struct vfstable *newvfstbl = NULL;
1034 int i,j;
1035 int (***opv_desc_vector_p)(void *);
1036 int (**opv_desc_vector)(void *);
1037 struct vnodeopv_entry_desc *opve_descp;
1038 int desccount;
1039 int descsize;
1040 PFI *descptr;
1041
1042 /*
1043 * This routine is responsible for all the initialization that would
1044 * ordinarily be done as part of the system startup;
1045 */
1046
1047 if (vfe == (struct vfs_fsentry *)0)
1048 return(EINVAL);
1049
1050 desccount = vfe->vfe_vopcnt;
1051 if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
1052 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
1053 return(EINVAL);
1054
1055 #ifdef __LP64__
1056 /* Non-threadsafe filesystems are not supported for K64 */
1057 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
1058 return (EINVAL);
1059 }
1060 #endif /* __LP64__ */
1061
1062 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
1063 M_WAITOK);
1064 bzero(newvfstbl, sizeof(struct vfstable));
1065 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
1066 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
1067 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
1068 newvfstbl->vfc_typenum = maxvfsconf++;
1069 else
1070 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
1071
1072 newvfstbl->vfc_refcount = 0;
1073 newvfstbl->vfc_flags = 0;
1074 newvfstbl->vfc_mountroot = NULL;
1075 newvfstbl->vfc_next = NULL;
1076 newvfstbl->vfc_vfsflags = 0;
1077 if (vfe->vfe_flags & VFS_TBL64BITREADY)
1078 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
1079 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2)
1080 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
1081 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2)
1082 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
1083 #ifndef __LP64__
1084 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
1085 newvfstbl->vfc_vfsflags |= VFC_VFSTHREADSAFE;
1086 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
1087 newvfstbl->vfc_vfsflags |= VFC_VFSTHREADSAFE;
1088 #endif /* __LP64__ */
1089 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
1090 newvfstbl->vfc_flags |= MNT_LOCAL;
1091 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
1092 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
1093 else
1094 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
1095
1096 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR)
1097 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1098 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT)
1099 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
1100 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED)
1101 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
1102 if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
1103 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
1104
1105 /*
1106 * Allocate and init the vectors.
1107 * Also handle backwards compatibility.
1108 *
1109 * We allocate one large block to hold all <desccount>
1110 * vnode operation vectors stored contiguously.
1111 */
1112 /* XXX - shouldn't be M_TEMP */
1113
1114 descsize = desccount * vfs_opv_numops * sizeof(PFI);
1115 MALLOC(descptr, PFI *, descsize,
1116 M_TEMP, M_WAITOK);
1117 bzero(descptr, descsize);
1118
1119 newvfstbl->vfc_descptr = descptr;
1120 newvfstbl->vfc_descsize = descsize;
1121
1122
1123 for (i= 0; i< desccount; i++ ) {
1124 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1125 /*
1126 * Fill in the caller's pointer to the start of the i'th vector.
1127 * They'll need to supply it when calling vnode_create.
1128 */
1129 opv_desc_vector = descptr + i * vfs_opv_numops;
1130 *opv_desc_vector_p = opv_desc_vector;
1131
1132 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1133 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1134
1135 /*
1136 * Sanity check: is this operation listed
1137 * in the list of operations? We check this
1138 * by seeing if its offset is zero. Since
1139 * the default routine should always be listed
1140 * first, it should be the only one with a zero
1141 * offset. Any other operation with a zero
1142 * offset is probably not listed in
1143 * vfs_op_descs, and so is probably an error.
1144 *
1145 * A panic here means the layer programmer
1146 * has committed the all-too common bug
1147 * of adding a new operation to the layer's
1148 * list of vnode operations but
1149 * not adding the operation to the system-wide
1150 * list of supported operations.
1151 */
1152 if (opve_descp->opve_op->vdesc_offset == 0 &&
1153 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
1154 printf("vfs_fsadd: operation %s not listed in %s.\n",
1155 opve_descp->opve_op->vdesc_name,
1156 "vfs_op_descs");
1157 panic("vfs_fsadd: bad operation");
1158 }
1159 /*
1160 * Fill in this entry.
1161 */
1162 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1163 opve_descp->opve_impl;
1164 }
1165
1166
1167 /*
1168 * Finally, go back and replace unfilled routines
1169 * with their default. (Sigh, an O(n^3) algorithm. I
1170 * could make it better, but that'd be work, and n is small.)
1171 */
1172 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1173
1174 /*
1175 * Force every operations vector to have a default routine.
1176 */
1177 opv_desc_vector = *opv_desc_vector_p;
1178 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
1179 panic("vfs_fsadd: operation vector without default routine.");
1180 for (j = 0; j < vfs_opv_numops; j++)
1181 if (opv_desc_vector[j] == NULL)
1182 opv_desc_vector[j] =
1183 opv_desc_vector[VOFFSET(vnop_default)];
1184
1185 } /* end of each vnodeopv_desc parsing */
1186
1187
1188
1189 *handle = vfstable_add(newvfstbl);
1190
1191 if (newvfstbl->vfc_typenum <= maxvfsconf )
1192 maxvfsconf = newvfstbl->vfc_typenum + 1;
1193
1194 if (newvfstbl->vfc_vfsops->vfs_init) {
1195 struct vfsconf vfsc;
1196 bzero(&vfsc, sizeof(struct vfsconf));
1197 vfsc.vfc_reserved1 = 0;
1198 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1199 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1200 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1201 vfsc.vfc_flags = (*handle)->vfc_flags;
1202 vfsc.vfc_reserved2 = 0;
1203 vfsc.vfc_reserved3 = 0;
1204
1205 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1206 }
1207
1208 FREE(newvfstbl, M_TEMP);
1209
1210 return(0);
1211 }
1212
1213 /*
1214 * Removes the filesystem from kernel.
1215 * The argument passed in is the handle that was given when
1216 * file system was added
1217 */
1218 errno_t
1219 vfs_fsremove(vfstable_t handle)
1220 {
1221 struct vfstable * vfstbl = (struct vfstable *)handle;
1222 void *old_desc = NULL;
1223 errno_t err;
1224
1225 /* Preflight check for any mounts */
1226 mount_list_lock();
1227 if ( vfstbl->vfc_refcount != 0 ) {
1228 mount_list_unlock();
1229 return EBUSY;
1230 }
1231
1232 /*
1233 * save the old descriptor; the free cannot occur unconditionally,
1234 * since vfstable_del() may fail.
1235 */
1236 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1237 old_desc = vfstbl->vfc_descptr;
1238 }
1239 err = vfstable_del(vfstbl);
1240
1241 mount_list_unlock();
1242
1243 /* free the descriptor if the delete was successful */
1244 if (err == 0 && old_desc) {
1245 FREE(old_desc, M_TEMP);
1246 }
1247
1248 return(err);
1249 }
1250
1251 int
1252 vfs_context_pid(vfs_context_t ctx)
1253 {
1254 return (proc_pid(vfs_context_proc(ctx)));
1255 }
1256
1257 int
1258 vfs_context_suser(vfs_context_t ctx)
1259 {
1260 return (suser(ctx->vc_ucred, NULL));
1261 }
1262
1263 /*
1264 * Return bit field of signals posted to all threads in the context's process.
1265 *
1266 * XXX Signals should be tied to threads, not processes, for most uses of this
1267 * XXX call.
1268 */
1269 int
1270 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1271 {
1272 proc_t p = vfs_context_proc(ctx);
1273 if (p)
1274 return(proc_pendingsignals(p, mask));
1275 return(0);
1276 }
1277
1278 int
1279 vfs_context_is64bit(vfs_context_t ctx)
1280 {
1281 proc_t proc = vfs_context_proc(ctx);
1282
1283 if (proc)
1284 return(proc_is64bit(proc));
1285 return(0);
1286 }
1287
1288
1289 /*
1290 * vfs_context_proc
1291 *
1292 * Description: Given a vfs_context_t, return the proc_t associated with it.
1293 *
1294 * Parameters: vfs_context_t The context to use
1295 *
1296 * Returns: proc_t The process for this context
1297 *
1298 * Notes: This function will return the current_proc() if any of the
1299 * following conditions are true:
1300 *
1301 * o The supplied context pointer is NULL
1302 * o There is no Mach thread associated with the context
1303 * o There is no Mach task associated with the Mach thread
1304 * o There is no proc_t associated with the Mach task
1305 * o The proc_t has no per process open file table
1306 * o The proc_t is post-vfork()
1307 *
1308 * This causes this function to return a value matching as
1309 * closely as possible the previous behaviour, while at the
1310 * same time avoiding the task lending that results from vfork()
1311 */
1312 proc_t
1313 vfs_context_proc(vfs_context_t ctx)
1314 {
1315 proc_t proc = NULL;
1316
1317 if (ctx != NULL && ctx->vc_thread != NULL)
1318 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1319 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1320 proc = NULL;
1321
1322 return(proc == NULL ? current_proc() : proc);
1323 }
1324
1325 /*
1326 * vfs_context_get_special_port
1327 *
1328 * Description: Return the requested special port from the task associated
1329 * with the given context.
1330 *
1331 * Parameters: vfs_context_t The context to use
1332 * int Index of special port
1333 * ipc_port_t * Pointer to returned port
1334 *
1335 * Returns: kern_return_t see task_get_special_port()
1336 */
1337 kern_return_t
1338 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1339 {
1340 task_t task = NULL;
1341
1342 if (ctx != NULL && ctx->vc_thread != NULL)
1343 task = get_threadtask(ctx->vc_thread);
1344
1345 return task_get_special_port(task, which, portp);
1346 }
1347
1348 /*
1349 * vfs_context_set_special_port
1350 *
1351 * Description: Set the requested special port in the task associated
1352 * with the given context.
1353 *
1354 * Parameters: vfs_context_t The context to use
1355 * int Index of special port
1356 * ipc_port_t New special port
1357 *
1358 * Returns: kern_return_t see task_set_special_port()
1359 */
1360 kern_return_t
1361 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1362 {
1363 task_t task = NULL;
1364
1365 if (ctx != NULL && ctx->vc_thread != NULL)
1366 task = get_threadtask(ctx->vc_thread);
1367
1368 return task_set_special_port(task, which, port);
1369 }
1370
1371 /*
1372 * vfs_context_thread
1373 *
1374 * Description: Return the Mach thread associated with a vfs_context_t
1375 *
1376 * Parameters: vfs_context_t The context to use
1377 *
1378 * Returns: thread_t The thread for this context, or
1379 * NULL, if there is not one.
1380 *
1381 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1382 * as a result of a static vfs_context_t declaration in a function
1383 * and will result in this function returning NULL.
1384 *
1385 * This is intentional; this function should NOT return the
1386 * current_thread() in this case.
1387 */
1388 thread_t
1389 vfs_context_thread(vfs_context_t ctx)
1390 {
1391 return(ctx->vc_thread);
1392 }
1393
1394
1395 /*
1396 * vfs_context_cwd
1397 *
1398 * Description: Returns a reference on the vnode for the current working
1399 * directory for the supplied context
1400 *
1401 * Parameters: vfs_context_t The context to use
1402 *
1403 * Returns: vnode_t The current working directory
1404 * for this context
1405 *
1406 * Notes: The function first attempts to obtain the current directory
1407 * from the thread, and if it is not present there, falls back
1408 * to obtaining it from the process instead. If it can't be
1409 * obtained from either place, we return NULLVP.
1410 */
1411 vnode_t
1412 vfs_context_cwd(vfs_context_t ctx)
1413 {
1414 vnode_t cwd = NULLVP;
1415
1416 if(ctx != NULL && ctx->vc_thread != NULL) {
1417 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1418 proc_t proc;
1419
1420 /*
1421 * Get the cwd from the thread; if there isn't one, get it
1422 * from the process, instead.
1423 */
1424 if ((cwd = uth->uu_cdir) == NULLVP &&
1425 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1426 proc->p_fd != NULL)
1427 cwd = proc->p_fd->fd_cdir;
1428 }
1429
1430 return(cwd);
1431 }
1432
1433 /*
1434 * vfs_context_create
1435 *
1436 * Description: Allocate and initialize a new context.
1437 *
1438 * Parameters: vfs_context_t: Context to copy, or NULL for new
1439 *
1440 * Returns: Pointer to new context
1441 *
1442 * Notes: Copy cred and thread from argument, if available; else
1443 * initialize with current thread and new cred. Returns
1444 * with a reference held on the credential.
1445 */
1446 vfs_context_t
1447 vfs_context_create(vfs_context_t ctx)
1448 {
1449 vfs_context_t newcontext;
1450
1451 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
1452
1453 if (newcontext) {
1454 kauth_cred_t safecred;
1455 if (ctx) {
1456 newcontext->vc_thread = ctx->vc_thread;
1457 safecred = ctx->vc_ucred;
1458 } else {
1459 newcontext->vc_thread = current_thread();
1460 safecred = kauth_cred_get();
1461 }
1462 if (IS_VALID_CRED(safecred))
1463 kauth_cred_ref(safecred);
1464 newcontext->vc_ucred = safecred;
1465 return(newcontext);
1466 }
1467 return(NULL);
1468 }
1469
1470
1471 vfs_context_t
1472 vfs_context_current(void)
1473 {
1474 vfs_context_t ctx = NULL;
1475 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1476
1477 if (ut != NULL ) {
1478 if (ut->uu_context.vc_ucred != NULL) {
1479 ctx = &ut->uu_context;
1480 }
1481 }
1482
1483 return(ctx == NULL ? vfs_context_kernel() : ctx);
1484 }
1485
1486
1487 /*
1488 * XXX Do not ask
1489 *
1490 * Dangerous hack - adopt the first kernel thread as the current thread, to
1491 * get to the vfs_context_t in the uthread associated with a kernel thread.
1492 * This is used by UDF to make the call into IOCDMediaBSDClient,
1493 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1494 * ioctl() is being called from kernel or user space (and all this because
1495 * we do not pass threads into our ioctl()'s, instead of processes).
1496 *
1497 * This is also used by imageboot_setup(), called early from bsd_init() after
1498 * kernproc has been given a credential.
1499 *
1500 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1501 * of many Mach headers to do the reference directly rather than indirectly;
1502 * we will need to forego this convenience when we reture proc_thread().
1503 */
1504 static struct vfs_context kerncontext;
1505 vfs_context_t
1506 vfs_context_kernel(void)
1507 {
1508 if (kerncontext.vc_ucred == NOCRED)
1509 kerncontext.vc_ucred = kernproc->p_ucred;
1510 if (kerncontext.vc_thread == NULL)
1511 kerncontext.vc_thread = proc_thread(kernproc);
1512
1513 return(&kerncontext);
1514 }
1515
1516
1517 int
1518 vfs_context_rele(vfs_context_t ctx)
1519 {
1520 if (ctx) {
1521 if (IS_VALID_CRED(ctx->vc_ucred))
1522 kauth_cred_unref(&ctx->vc_ucred);
1523 kfree(ctx, sizeof(struct vfs_context));
1524 }
1525 return(0);
1526 }
1527
1528
1529 kauth_cred_t
1530 vfs_context_ucred(vfs_context_t ctx)
1531 {
1532 return (ctx->vc_ucred);
1533 }
1534
1535 /*
1536 * Return true if the context is owned by the superuser.
1537 */
1538 int
1539 vfs_context_issuser(vfs_context_t ctx)
1540 {
1541 return(kauth_cred_issuser(vfs_context_ucred(ctx)));
1542 }
1543
1544 /*
1545 * Given a context, for all fields of vfs_context_t which
1546 * are not held with a reference, set those fields to the
1547 * values for the current execution context. Currently, this
1548 * just means the vc_thread.
1549 *
1550 * Returns: 0 for success, nonzero for failure
1551 *
1552 * The intended use is:
1553 * 1. vfs_context_create() gets the caller a context
1554 * 2. vfs_context_bind() sets the unrefcounted data
1555 * 3. vfs_context_rele() releases the context
1556 *
1557 */
1558 int
1559 vfs_context_bind(vfs_context_t ctx)
1560 {
1561 ctx->vc_thread = current_thread();
1562 return 0;
1563 }
1564
1565 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1566
1567
1568 /*
1569 * Convert between vnode types and inode formats (since POSIX.1
1570 * defines mode word of stat structure in terms of inode formats).
1571 */
1572 enum vtype
1573 vnode_iftovt(int mode)
1574 {
1575 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1576 }
1577
1578 int
1579 vnode_vttoif(enum vtype indx)
1580 {
1581 return(vttoif_tab[(int)(indx)]);
1582 }
1583
1584 int
1585 vnode_makeimode(int indx, int mode)
1586 {
1587 return (int)(VTTOIF(indx) | (mode));
1588 }
1589
1590
1591 /*
1592 * vnode manipulation functions.
1593 */
1594
1595 /* returns system root vnode iocount; It should be released using vnode_put() */
1596 vnode_t
1597 vfs_rootvnode(void)
1598 {
1599 int error;
1600
1601 error = vnode_get(rootvnode);
1602 if (error)
1603 return ((vnode_t)0);
1604 else
1605 return rootvnode;
1606 }
1607
1608
1609 uint32_t
1610 vnode_vid(vnode_t vp)
1611 {
1612 return ((uint32_t)(vp->v_id));
1613 }
1614
1615 mount_t
1616 vnode_mount(vnode_t vp)
1617 {
1618 return (vp->v_mount);
1619 }
1620
1621 mount_t
1622 vnode_mountedhere(vnode_t vp)
1623 {
1624 mount_t mp;
1625
1626 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1627 (mp->mnt_vnodecovered == vp))
1628 return (mp);
1629 else
1630 return (mount_t)NULL;
1631 }
1632
1633 /* returns vnode type of vnode_t */
1634 enum vtype
1635 vnode_vtype(vnode_t vp)
1636 {
1637 return (vp->v_type);
1638 }
1639
1640 /* returns FS specific node saved in vnode */
1641 void *
1642 vnode_fsnode(vnode_t vp)
1643 {
1644 return (vp->v_data);
1645 }
1646
1647 void
1648 vnode_clearfsnode(vnode_t vp)
1649 {
1650 vp->v_data = NULL;
1651 }
1652
1653 dev_t
1654 vnode_specrdev(vnode_t vp)
1655 {
1656 return(vp->v_rdev);
1657 }
1658
1659
1660 /* Accessor functions */
1661 /* is vnode_t a root vnode */
1662 int
1663 vnode_isvroot(vnode_t vp)
1664 {
1665 return ((vp->v_flag & VROOT)? 1 : 0);
1666 }
1667
1668 /* is vnode_t a system vnode */
1669 int
1670 vnode_issystem(vnode_t vp)
1671 {
1672 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1673 }
1674
1675 /* is vnode_t a swap file vnode */
1676 int
1677 vnode_isswap(vnode_t vp)
1678 {
1679 return ((vp->v_flag & VSWAP)? 1 : 0);
1680 }
1681
1682 /* is vnode_t a tty */
1683 int
1684 vnode_istty(vnode_t vp)
1685 {
1686 return ((vp->v_flag & VISTTY) ? 1 : 0);
1687 }
1688
1689 /* if vnode_t mount operation in progress */
1690 int
1691 vnode_ismount(vnode_t vp)
1692 {
1693 return ((vp->v_flag & VMOUNT)? 1 : 0);
1694 }
1695
1696 /* is this vnode under recyle now */
1697 int
1698 vnode_isrecycled(vnode_t vp)
1699 {
1700 int ret;
1701
1702 vnode_lock_spin(vp);
1703 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1704 vnode_unlock(vp);
1705 return(ret);
1706 }
1707
1708 /* vnode was created by background task requesting rapid aging
1709 and has not since been referenced by a normal task */
1710 int
1711 vnode_israge(vnode_t vp)
1712 {
1713 return ((vp->v_flag & VRAGE)? 1 : 0);
1714 }
1715
1716 int
1717 vnode_needssnapshots(vnode_t vp)
1718 {
1719 return ((vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0);
1720 }
1721
1722
1723 /* Check the process/thread to see if we should skip atime updates */
1724 int
1725 vfs_ctx_skipatime (vfs_context_t ctx) {
1726 struct uthread *ut;
1727 proc_t proc;
1728 thread_t thr;
1729
1730 proc = vfs_context_proc(ctx);
1731 thr = vfs_context_thread (ctx);
1732
1733 /* Validate pointers in case we were invoked via a kernel context */
1734 if (thr && proc) {
1735 ut = get_bsdthread_info (thr);
1736
1737 if (proc->p_lflag & P_LRAGE_VNODES) {
1738 return 1;
1739 }
1740
1741 if (ut) {
1742 if (ut->uu_flag & UT_RAGE_VNODES) {
1743 return 1;
1744 }
1745 }
1746 }
1747 return 0;
1748 }
1749
1750 /* is vnode_t marked to not keep data cached once it's been consumed */
1751 int
1752 vnode_isnocache(vnode_t vp)
1753 {
1754 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1755 }
1756
1757 /*
1758 * has sequential readahead been disabled on this vnode
1759 */
1760 int
1761 vnode_isnoreadahead(vnode_t vp)
1762 {
1763 return ((vp->v_flag & VRAOFF)? 1 : 0);
1764 }
1765
1766 int
1767 vnode_is_openevt(vnode_t vp)
1768 {
1769 return ((vp->v_flag & VOPENEVT)? 1 : 0);
1770 }
1771
1772 /* is vnode_t a standard one? */
1773 int
1774 vnode_isstandard(vnode_t vp)
1775 {
1776 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1777 }
1778
1779 /* don't vflush() if SKIPSYSTEM */
1780 int
1781 vnode_isnoflush(vnode_t vp)
1782 {
1783 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1784 }
1785
1786 /* is vnode_t a regular file */
1787 int
1788 vnode_isreg(vnode_t vp)
1789 {
1790 return ((vp->v_type == VREG)? 1 : 0);
1791 }
1792
1793 /* is vnode_t a directory? */
1794 int
1795 vnode_isdir(vnode_t vp)
1796 {
1797 return ((vp->v_type == VDIR)? 1 : 0);
1798 }
1799
1800 /* is vnode_t a symbolic link ? */
1801 int
1802 vnode_islnk(vnode_t vp)
1803 {
1804 return ((vp->v_type == VLNK)? 1 : 0);
1805 }
1806
1807 int
1808 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1809 {
1810 struct nameidata *ndp = cnp->cn_ndp;
1811
1812 if (ndp == NULL) {
1813 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1814 }
1815
1816 if (vnode_isdir(vp)) {
1817 if (vp->v_mountedhere != NULL) {
1818 goto yes;
1819 }
1820
1821 #if CONFIG_TRIGGERS
1822 if (vp->v_resolve) {
1823 goto yes;
1824 }
1825 #endif /* CONFIG_TRIGGERS */
1826
1827 }
1828
1829
1830 if (vnode_islnk(vp)) {
1831 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1832 if (cnp->cn_flags & FOLLOW) {
1833 goto yes;
1834 }
1835 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1836 goto yes;
1837 }
1838 }
1839
1840 return 0;
1841
1842 yes:
1843 ndp->ni_flag |= NAMEI_CONTLOOKUP;
1844 return EKEEPLOOKING;
1845 }
1846
1847 /* is vnode_t a fifo ? */
1848 int
1849 vnode_isfifo(vnode_t vp)
1850 {
1851 return ((vp->v_type == VFIFO)? 1 : 0);
1852 }
1853
1854 /* is vnode_t a block device? */
1855 int
1856 vnode_isblk(vnode_t vp)
1857 {
1858 return ((vp->v_type == VBLK)? 1 : 0);
1859 }
1860
1861 int
1862 vnode_isspec(vnode_t vp)
1863 {
1864 return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0);
1865 }
1866
1867 /* is vnode_t a char device? */
1868 int
1869 vnode_ischr(vnode_t vp)
1870 {
1871 return ((vp->v_type == VCHR)? 1 : 0);
1872 }
1873
1874 /* is vnode_t a socket? */
1875 int
1876 vnode_issock(vnode_t vp)
1877 {
1878 return ((vp->v_type == VSOCK)? 1 : 0);
1879 }
1880
1881 /* is vnode_t a device with multiple active vnodes referring to it? */
1882 int
1883 vnode_isaliased(vnode_t vp)
1884 {
1885 enum vtype vt = vp->v_type;
1886 if (!((vt == VCHR) || (vt == VBLK))) {
1887 return 0;
1888 } else {
1889 return (vp->v_specflags & SI_ALIASED);
1890 }
1891 }
1892
1893 /* is vnode_t a named stream? */
1894 int
1895 vnode_isnamedstream(
1896 #if NAMEDSTREAMS
1897 vnode_t vp
1898 #else
1899 __unused vnode_t vp
1900 #endif
1901 )
1902 {
1903 #if NAMEDSTREAMS
1904 return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0);
1905 #else
1906 return (0);
1907 #endif
1908 }
1909
1910 int
1911 vnode_isshadow(
1912 #if NAMEDSTREAMS
1913 vnode_t vp
1914 #else
1915 __unused vnode_t vp
1916 #endif
1917 )
1918 {
1919 #if NAMEDSTREAMS
1920 return ((vp->v_flag & VISSHADOW) ? 1 : 0);
1921 #else
1922 return (0);
1923 #endif
1924 }
1925
1926 /* does vnode have associated named stream vnodes ? */
1927 int
1928 vnode_hasnamedstreams(
1929 #if NAMEDSTREAMS
1930 vnode_t vp
1931 #else
1932 __unused vnode_t vp
1933 #endif
1934 )
1935 {
1936 #if NAMEDSTREAMS
1937 return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0);
1938 #else
1939 return (0);
1940 #endif
1941 }
1942 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1943 void
1944 vnode_setnocache(vnode_t vp)
1945 {
1946 vnode_lock_spin(vp);
1947 vp->v_flag |= VNOCACHE_DATA;
1948 vnode_unlock(vp);
1949 }
1950
1951 void
1952 vnode_clearnocache(vnode_t vp)
1953 {
1954 vnode_lock_spin(vp);
1955 vp->v_flag &= ~VNOCACHE_DATA;
1956 vnode_unlock(vp);
1957 }
1958
1959 void
1960 vnode_set_openevt(vnode_t vp)
1961 {
1962 vnode_lock_spin(vp);
1963 vp->v_flag |= VOPENEVT;
1964 vnode_unlock(vp);
1965 }
1966
1967 void
1968 vnode_clear_openevt(vnode_t vp)
1969 {
1970 vnode_lock_spin(vp);
1971 vp->v_flag &= ~VOPENEVT;
1972 vnode_unlock(vp);
1973 }
1974
1975
1976 void
1977 vnode_setnoreadahead(vnode_t vp)
1978 {
1979 vnode_lock_spin(vp);
1980 vp->v_flag |= VRAOFF;
1981 vnode_unlock(vp);
1982 }
1983
1984 void
1985 vnode_clearnoreadahead(vnode_t vp)
1986 {
1987 vnode_lock_spin(vp);
1988 vp->v_flag &= ~VRAOFF;
1989 vnode_unlock(vp);
1990 }
1991
1992
1993 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1994 void
1995 vnode_setnoflush(vnode_t vp)
1996 {
1997 vnode_lock_spin(vp);
1998 vp->v_flag |= VNOFLUSH;
1999 vnode_unlock(vp);
2000 }
2001
2002 void
2003 vnode_clearnoflush(vnode_t vp)
2004 {
2005 vnode_lock_spin(vp);
2006 vp->v_flag &= ~VNOFLUSH;
2007 vnode_unlock(vp);
2008 }
2009
2010
2011 /* is vnode_t a blkdevice and has a FS mounted on it */
2012 int
2013 vnode_ismountedon(vnode_t vp)
2014 {
2015 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
2016 }
2017
2018 void
2019 vnode_setmountedon(vnode_t vp)
2020 {
2021 vnode_lock_spin(vp);
2022 vp->v_specflags |= SI_MOUNTEDON;
2023 vnode_unlock(vp);
2024 }
2025
2026 void
2027 vnode_clearmountedon(vnode_t vp)
2028 {
2029 vnode_lock_spin(vp);
2030 vp->v_specflags &= ~SI_MOUNTEDON;
2031 vnode_unlock(vp);
2032 }
2033
2034
2035 void
2036 vnode_settag(vnode_t vp, int tag)
2037 {
2038 vp->v_tag = tag;
2039
2040 }
2041
2042 int
2043 vnode_tag(vnode_t vp)
2044 {
2045 return(vp->v_tag);
2046 }
2047
2048 vnode_t
2049 vnode_parent(vnode_t vp)
2050 {
2051
2052 return(vp->v_parent);
2053 }
2054
2055 void
2056 vnode_setparent(vnode_t vp, vnode_t dvp)
2057 {
2058 vp->v_parent = dvp;
2059 }
2060
2061 const char *
2062 vnode_name(vnode_t vp)
2063 {
2064 /* we try to keep v_name a reasonable name for the node */
2065 return(vp->v_name);
2066 }
2067
2068 void
2069 vnode_setname(vnode_t vp, char * name)
2070 {
2071 vp->v_name = name;
2072 }
2073
2074 /* return the registered FS name when adding the FS to kernel */
2075 void
2076 vnode_vfsname(vnode_t vp, char * buf)
2077 {
2078 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2079 }
2080
2081 /* return the FS type number */
2082 int
2083 vnode_vfstypenum(vnode_t vp)
2084 {
2085 return(vp->v_mount->mnt_vtable->vfc_typenum);
2086 }
2087
2088 int
2089 vnode_vfs64bitready(vnode_t vp)
2090 {
2091
2092 /*
2093 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2094 */
2095 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY))
2096 return(1);
2097 else
2098 return(0);
2099 }
2100
2101
2102
2103 /* return the visible flags on associated mount point of vnode_t */
2104 uint32_t
2105 vnode_vfsvisflags(vnode_t vp)
2106 {
2107 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
2108 }
2109
2110 /* return the command modifier flags on associated mount point of vnode_t */
2111 uint32_t
2112 vnode_vfscmdflags(vnode_t vp)
2113 {
2114 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
2115 }
2116
2117 /* return the max symlink of short links of vnode_t */
2118 uint32_t
2119 vnode_vfsmaxsymlen(vnode_t vp)
2120 {
2121 return(vp->v_mount->mnt_maxsymlinklen);
2122 }
2123
2124 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2125 struct vfsstatfs *
2126 vnode_vfsstatfs(vnode_t vp)
2127 {
2128 return(&vp->v_mount->mnt_vfsstat);
2129 }
2130
2131 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2132 void *
2133 vnode_vfsfsprivate(vnode_t vp)
2134 {
2135 return(vp->v_mount->mnt_data);
2136 }
2137
2138 /* is vnode_t in a rdonly mounted FS */
2139 int
2140 vnode_vfsisrdonly(vnode_t vp)
2141 {
2142 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
2143 }
2144
2145 int
2146 vnode_compound_rename_available(vnode_t vp)
2147 {
2148 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2149 }
2150 int
2151 vnode_compound_rmdir_available(vnode_t vp)
2152 {
2153 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2154 }
2155 int
2156 vnode_compound_mkdir_available(vnode_t vp)
2157 {
2158 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2159 }
2160 int
2161 vnode_compound_remove_available(vnode_t vp)
2162 {
2163 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2164 }
2165 int
2166 vnode_compound_open_available(vnode_t vp)
2167 {
2168 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2169 }
2170
2171 int
2172 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2173 {
2174 return ((vp->v_mount->mnt_compound_ops & opid) != 0);
2175 }
2176
2177 /*
2178 * Returns vnode ref to current working directory; if a per-thread current
2179 * working directory is in effect, return that instead of the per process one.
2180 *
2181 * XXX Published, but not used.
2182 */
2183 vnode_t
2184 current_workingdir(void)
2185 {
2186 return vfs_context_cwd(vfs_context_current());
2187 }
2188
2189 /* returns vnode ref to current root(chroot) directory */
2190 vnode_t
2191 current_rootdir(void)
2192 {
2193 proc_t proc = current_proc();
2194 struct vnode * vp ;
2195
2196 if ( (vp = proc->p_fd->fd_rdir) ) {
2197 if ( (vnode_getwithref(vp)) )
2198 return (NULL);
2199 }
2200 return vp;
2201 }
2202
2203 /*
2204 * Get a filesec and optional acl contents from an extended attribute.
2205 * Function will attempt to retrive ACL, UUID, and GUID information using a
2206 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2207 *
2208 * Parameters: vp The vnode on which to operate.
2209 * fsecp The filesec (and ACL, if any) being
2210 * retrieved.
2211 * ctx The vnode context in which the
2212 * operation is to be attempted.
2213 *
2214 * Returns: 0 Success
2215 * !0 errno value
2216 *
2217 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2218 * host byte order, as will be the ACL contents, if any.
2219 * Internally, we will cannonize these values from network (PPC)
2220 * byte order after we retrieve them so that the on-disk contents
2221 * of the extended attribute are identical for both PPC and Intel
2222 * (if we were not being required to provide this service via
2223 * fallback, this would be the job of the filesystem
2224 * 'VNOP_GETATTR' call).
2225 *
2226 * We use ntohl() because it has a transitive property on Intel
2227 * machines and no effect on PPC mancines. This guarantees us
2228 *
2229 * XXX: Deleting rather than ignoreing a corrupt security structure is
2230 * probably the only way to reset it without assistance from an
2231 * file system integrity checking tool. Right now we ignore it.
2232 *
2233 * XXX: We should enummerate the possible errno values here, and where
2234 * in the code they originated.
2235 */
2236 static int
2237 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2238 {
2239 kauth_filesec_t fsec;
2240 uio_t fsec_uio;
2241 size_t fsec_size;
2242 size_t xsize, rsize;
2243 int error;
2244 uint32_t host_fsec_magic;
2245 uint32_t host_acl_entrycount;
2246
2247 fsec = NULL;
2248 fsec_uio = NULL;
2249 error = 0;
2250
2251 /* find out how big the EA is */
2252 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
2253 /* no EA, no filesec */
2254 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2255 error = 0;
2256 /* either way, we are done */
2257 goto out;
2258 }
2259
2260 /*
2261 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2262 * ACE entrly ACL, and if it's larger than that, it must have the right
2263 * number of bytes such that it contains an atomic number of ACEs,
2264 * rather than partial entries. Otherwise, we ignore it.
2265 */
2266 if (!KAUTH_FILESEC_VALID(xsize)) {
2267 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2268 error = 0;
2269 goto out;
2270 }
2271
2272 /* how many entries would fit? */
2273 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2274
2275 /* get buffer and uio */
2276 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
2277 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2278 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2279 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2280 error = ENOMEM;
2281 goto out;
2282 }
2283
2284 /* read security attribute */
2285 rsize = xsize;
2286 if ((error = vn_getxattr(vp,
2287 KAUTH_FILESEC_XATTR,
2288 fsec_uio,
2289 &rsize,
2290 XATTR_NOSECURITY,
2291 ctx)) != 0) {
2292
2293 /* no attribute - no security data */
2294 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
2295 error = 0;
2296 /* either way, we are done */
2297 goto out;
2298 }
2299
2300 /*
2301 * Validate security structure; the validation must take place in host
2302 * byte order. If it's corrupt, we will just ignore it.
2303 */
2304
2305 /* Validate the size before trying to convert it */
2306 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2307 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2308 goto out;
2309 }
2310
2311 /* Validate the magic number before trying to convert it */
2312 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2313 if (fsec->fsec_magic != host_fsec_magic) {
2314 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2315 goto out;
2316 }
2317
2318 /* Validate the entry count before trying to convert it. */
2319 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2320 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2321 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2322 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2323 goto out;
2324 }
2325 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2326 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2327 goto out;
2328 }
2329 }
2330
2331 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2332
2333 *fsecp = fsec;
2334 fsec = NULL;
2335 error = 0;
2336 out:
2337 if (fsec != NULL)
2338 kauth_filesec_free(fsec);
2339 if (fsec_uio != NULL)
2340 uio_free(fsec_uio);
2341 if (error)
2342 *fsecp = NULL;
2343 return(error);
2344 }
2345
2346 /*
2347 * Set a filesec and optional acl contents into an extended attribute.
2348 * function will attempt to store ACL, UUID, and GUID information using a
2349 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2350 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2351 * original caller supplied an acl.
2352 *
2353 * Parameters: vp The vnode on which to operate.
2354 * fsec The filesec being set.
2355 * acl The acl to be associated with 'fsec'.
2356 * ctx The vnode context in which the
2357 * operation is to be attempted.
2358 *
2359 * Returns: 0 Success
2360 * !0 errno value
2361 *
2362 * Notes: Both the fsec and the acl are always valid.
2363 *
2364 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2365 * as are the acl contents, if they are used. Internally, we will
2366 * cannonize these values into network (PPC) byte order before we
2367 * attempt to write them so that the on-disk contents of the
2368 * extended attribute are identical for both PPC and Intel (if we
2369 * were not being required to provide this service via fallback,
2370 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2371 * We reverse this process on the way out, so we leave with the
2372 * same byte order we started with.
2373 *
2374 * XXX: We should enummerate the possible errno values here, and where
2375 * in the code they originated.
2376 */
2377 static int
2378 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2379 {
2380 uio_t fsec_uio;
2381 int error;
2382 uint32_t saved_acl_copysize;
2383
2384 fsec_uio = NULL;
2385
2386 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2387 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2388 error = ENOMEM;
2389 goto out;
2390 }
2391 /*
2392 * Save the pre-converted ACL copysize, because it gets swapped too
2393 * if we are running with the wrong endianness.
2394 */
2395 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2396
2397 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2398
2399 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2400 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2401 error = vn_setxattr(vp,
2402 KAUTH_FILESEC_XATTR,
2403 fsec_uio,
2404 XATTR_NOSECURITY, /* we have auth'ed already */
2405 ctx);
2406 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2407
2408 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2409
2410 out:
2411 if (fsec_uio != NULL)
2412 uio_free(fsec_uio);
2413 return(error);
2414 }
2415
2416
2417 /*
2418 * Returns: 0 Success
2419 * ENOMEM Not enough space [only if has filesec]
2420 * VNOP_GETATTR: ???
2421 * vnode_get_filesec: ???
2422 * kauth_cred_guid2uid: ???
2423 * kauth_cred_guid2gid: ???
2424 * vfs_update_vfsstat: ???
2425 */
2426 int
2427 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2428 {
2429 kauth_filesec_t fsec;
2430 kauth_acl_t facl;
2431 int error;
2432 uid_t nuid;
2433 gid_t ngid;
2434
2435 /* don't ask for extended security data if the filesystem doesn't support it */
2436 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2437 VATTR_CLEAR_ACTIVE(vap, va_acl);
2438 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2439 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2440 }
2441
2442 /*
2443 * If the caller wants size values we might have to synthesise, give the
2444 * filesystem the opportunity to supply better intermediate results.
2445 */
2446 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2447 VATTR_IS_ACTIVE(vap, va_total_size) ||
2448 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2449 VATTR_SET_ACTIVE(vap, va_data_size);
2450 VATTR_SET_ACTIVE(vap, va_data_alloc);
2451 VATTR_SET_ACTIVE(vap, va_total_size);
2452 VATTR_SET_ACTIVE(vap, va_total_alloc);
2453 }
2454
2455 error = VNOP_GETATTR(vp, vap, ctx);
2456 if (error) {
2457 KAUTH_DEBUG("ERROR - returning %d", error);
2458 goto out;
2459 }
2460
2461 /*
2462 * If extended security data was requested but not returned, try the fallback
2463 * path.
2464 */
2465 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2466 fsec = NULL;
2467
2468 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
2469 /* try to get the filesec */
2470 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
2471 goto out;
2472 }
2473 /* if no filesec, no attributes */
2474 if (fsec == NULL) {
2475 VATTR_RETURN(vap, va_acl, NULL);
2476 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2477 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2478 } else {
2479
2480 /* looks good, try to return what we were asked for */
2481 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2482 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2483
2484 /* only return the ACL if we were actually asked for it */
2485 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2486 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2487 VATTR_RETURN(vap, va_acl, NULL);
2488 } else {
2489 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2490 if (facl == NULL) {
2491 kauth_filesec_free(fsec);
2492 error = ENOMEM;
2493 goto out;
2494 }
2495 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2496 VATTR_RETURN(vap, va_acl, facl);
2497 }
2498 }
2499 kauth_filesec_free(fsec);
2500 }
2501 }
2502 /*
2503 * If someone gave us an unsolicited filesec, toss it. We promise that
2504 * we're OK with a filesystem giving us anything back, but our callers
2505 * only expect what they asked for.
2506 */
2507 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2508 if (vap->va_acl != NULL)
2509 kauth_acl_free(vap->va_acl);
2510 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2511 }
2512
2513 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2514 /*
2515 * Handle the case where we need a UID/GID, but only have extended
2516 * security information.
2517 */
2518 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2519 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2520 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2521 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
2522 VATTR_RETURN(vap, va_uid, nuid);
2523 }
2524 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2525 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2526 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2527 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
2528 VATTR_RETURN(vap, va_gid, ngid);
2529 }
2530 #endif
2531
2532 /*
2533 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2534 */
2535 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2536 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2537 nuid = vap->va_uid;
2538 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2539 nuid = vp->v_mount->mnt_fsowner;
2540 if (nuid == KAUTH_UID_NONE)
2541 nuid = 99;
2542 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2543 nuid = vap->va_uid;
2544 } else {
2545 /* this will always be something sensible */
2546 nuid = vp->v_mount->mnt_fsowner;
2547 }
2548 if ((nuid == 99) && !vfs_context_issuser(ctx))
2549 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2550 VATTR_RETURN(vap, va_uid, nuid);
2551 }
2552 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2553 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2554 ngid = vap->va_gid;
2555 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2556 ngid = vp->v_mount->mnt_fsgroup;
2557 if (ngid == KAUTH_GID_NONE)
2558 ngid = 99;
2559 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2560 ngid = vap->va_gid;
2561 } else {
2562 /* this will always be something sensible */
2563 ngid = vp->v_mount->mnt_fsgroup;
2564 }
2565 if ((ngid == 99) && !vfs_context_issuser(ctx))
2566 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2567 VATTR_RETURN(vap, va_gid, ngid);
2568 }
2569
2570 /*
2571 * Synthesise some values that can be reasonably guessed.
2572 */
2573 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
2574 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2575
2576 if (!VATTR_IS_SUPPORTED(vap, va_flags))
2577 VATTR_RETURN(vap, va_flags, 0);
2578
2579 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
2580 VATTR_RETURN(vap, va_filerev, 0);
2581
2582 if (!VATTR_IS_SUPPORTED(vap, va_gen))
2583 VATTR_RETURN(vap, va_gen, 0);
2584
2585 /*
2586 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2587 */
2588 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
2589 VATTR_RETURN(vap, va_data_size, 0);
2590
2591 /* do we want any of the possibly-computed values? */
2592 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2593 VATTR_IS_ACTIVE(vap, va_total_size) ||
2594 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2595 /* make sure f_bsize is valid */
2596 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2597 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0)
2598 goto out;
2599 }
2600
2601 /* default va_data_alloc from va_data_size */
2602 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
2603 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2604
2605 /* default va_total_size from va_data_size */
2606 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
2607 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2608
2609 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2610 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
2611 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2612 }
2613
2614 /*
2615 * If we don't have a change time, pull it from the modtime.
2616 */
2617 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
2618 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2619
2620 /*
2621 * This is really only supported for the creation VNOPs, but since the field is there
2622 * we should populate it correctly.
2623 */
2624 VATTR_RETURN(vap, va_type, vp->v_type);
2625
2626 /*
2627 * The fsid can be obtained from the mountpoint directly.
2628 */
2629 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2630
2631 out:
2632
2633 return(error);
2634 }
2635
2636 /*
2637 * Set the attributes on a vnode in a vnode context.
2638 *
2639 * Parameters: vp The vnode whose attributes to set.
2640 * vap A pointer to the attributes to set.
2641 * ctx The vnode context in which the
2642 * operation is to be attempted.
2643 *
2644 * Returns: 0 Success
2645 * !0 errno value
2646 *
2647 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2648 *
2649 * The contents of the data area pointed to by 'vap' may be
2650 * modified if the vnode is on a filesystem which has been
2651 * mounted with ingore ownership flags, or by the underlyng
2652 * VFS itself, or by the fallback code, if the underlying VFS
2653 * does not support ACL, UUID, or GUUID attributes directly.
2654 *
2655 * XXX: We should enummerate the possible errno values here, and where
2656 * in the code they originated.
2657 */
2658 int
2659 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2660 {
2661 int error, is_perm_change=0;
2662
2663 /*
2664 * Make sure the filesystem is mounted R/W.
2665 * If not, return an error.
2666 */
2667 if (vfs_isrdonly(vp->v_mount)) {
2668 error = EROFS;
2669 goto out;
2670 }
2671 #if NAMEDSTREAMS
2672 /* For streams, va_data_size is the only setable attribute. */
2673 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2674 error = EPERM;
2675 goto out;
2676 }
2677 #endif
2678
2679 /*
2680 * If ownership is being ignored on this volume, we silently discard
2681 * ownership changes.
2682 */
2683 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2684 VATTR_CLEAR_ACTIVE(vap, va_uid);
2685 VATTR_CLEAR_ACTIVE(vap, va_gid);
2686 }
2687
2688 if ( VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)
2689 || VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_acl)) {
2690 is_perm_change = 1;
2691 }
2692
2693 /*
2694 * Make sure that extended security is enabled if we're going to try
2695 * to set any.
2696 */
2697 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2698 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2699 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2700 error = ENOTSUP;
2701 goto out;
2702 }
2703
2704 error = VNOP_SETATTR(vp, vap, ctx);
2705
2706 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2707 error = vnode_setattr_fallback(vp, vap, ctx);
2708
2709 #if CONFIG_FSE
2710 // only send a stat_changed event if this is more than
2711 // just an access or backup time update
2712 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time)) && (vap->va_active != VNODE_ATTR_BIT(va_backup_time))) {
2713 if (is_perm_change) {
2714 if (need_fsevent(FSE_CHOWN, vp)) {
2715 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2716 }
2717 } else if(need_fsevent(FSE_STAT_CHANGED, vp)) {
2718 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2719 }
2720 }
2721 #endif
2722
2723 out:
2724 return(error);
2725 }
2726
2727 /*
2728 * Fallback for setting the attributes on a vnode in a vnode context. This
2729 * Function will attempt to store ACL, UUID, and GUID information utilizing
2730 * a read/modify/write operation against an EA used as a backing store for
2731 * the object.
2732 *
2733 * Parameters: vp The vnode whose attributes to set.
2734 * vap A pointer to the attributes to set.
2735 * ctx The vnode context in which the
2736 * operation is to be attempted.
2737 *
2738 * Returns: 0 Success
2739 * !0 errno value
2740 *
2741 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2742 * as are the fsec and lfsec, if they are used.
2743 *
2744 * The contents of the data area pointed to by 'vap' may be
2745 * modified to indicate that the attribute is supported for
2746 * any given requested attribute.
2747 *
2748 * XXX: We should enummerate the possible errno values here, and where
2749 * in the code they originated.
2750 */
2751 int
2752 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2753 {
2754 kauth_filesec_t fsec;
2755 kauth_acl_t facl;
2756 struct kauth_filesec lfsec;
2757 int error;
2758
2759 error = 0;
2760
2761 /*
2762 * Extended security fallback via extended attributes.
2763 *
2764 * Note that we do not free the filesec; the caller is expected to
2765 * do this.
2766 */
2767 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2768 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2769 VATTR_NOT_RETURNED(vap, va_guuid)) {
2770 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2771
2772 /*
2773 * Fail for file types that we don't permit extended security
2774 * to be set on.
2775 */
2776 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
2777 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2778 error = EINVAL;
2779 goto out;
2780 }
2781
2782 /*
2783 * If we don't have all the extended security items, we need
2784 * to fetch the existing data to perform a read-modify-write
2785 * operation.
2786 */
2787 fsec = NULL;
2788 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2789 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2790 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2791 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2792 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2793 goto out;
2794 }
2795 }
2796 /* if we didn't get a filesec, use our local one */
2797 if (fsec == NULL) {
2798 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2799 fsec = &lfsec;
2800 } else {
2801 KAUTH_DEBUG("SETATTR - updating existing filesec");
2802 }
2803 /* find the ACL */
2804 facl = &fsec->fsec_acl;
2805
2806 /* if we're using the local filesec, we need to initialise it */
2807 if (fsec == &lfsec) {
2808 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2809 fsec->fsec_owner = kauth_null_guid;
2810 fsec->fsec_group = kauth_null_guid;
2811 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2812 facl->acl_flags = 0;
2813 }
2814
2815 /*
2816 * Update with the supplied attributes.
2817 */
2818 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2819 KAUTH_DEBUG("SETATTR - updating owner UUID");
2820 fsec->fsec_owner = vap->va_uuuid;
2821 VATTR_SET_SUPPORTED(vap, va_uuuid);
2822 }
2823 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2824 KAUTH_DEBUG("SETATTR - updating group UUID");
2825 fsec->fsec_group = vap->va_guuid;
2826 VATTR_SET_SUPPORTED(vap, va_guuid);
2827 }
2828 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2829 if (vap->va_acl == NULL) {
2830 KAUTH_DEBUG("SETATTR - removing ACL");
2831 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2832 } else {
2833 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2834 facl = vap->va_acl;
2835 }
2836 VATTR_SET_SUPPORTED(vap, va_acl);
2837 }
2838
2839 /*
2840 * If the filesec data is all invalid, we can just remove
2841 * the EA completely.
2842 */
2843 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2844 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2845 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2846 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2847 /* no attribute is ok, nothing to delete */
2848 if (error == ENOATTR)
2849 error = 0;
2850 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2851 } else {
2852 /* write the EA */
2853 error = vnode_set_filesec(vp, fsec, facl, ctx);
2854 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2855 }
2856
2857 /* if we fetched a filesec, dispose of the buffer */
2858 if (fsec != &lfsec)
2859 kauth_filesec_free(fsec);
2860 }
2861 out:
2862
2863 return(error);
2864 }
2865
2866 /*
2867 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2868 * event on a vnode.
2869 */
2870 int
2871 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
2872 {
2873 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2874 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
2875 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
2876 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
2877 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
2878 uint32_t knote_events = (events & knote_mask);
2879
2880 /* Permissions are not explicitly part of the kqueue model */
2881 if (events & VNODE_EVENT_PERMS) {
2882 knote_events |= NOTE_ATTRIB;
2883 }
2884
2885 /* Directory contents information just becomes NOTE_WRITE */
2886 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
2887 knote_events |= NOTE_WRITE;
2888 }
2889
2890 if (knote_events) {
2891 lock_vnode_and_post(vp, knote_events);
2892 #if CONFIG_FSE
2893 if (vap != NULL) {
2894 create_fsevent_from_kevent(vp, events, vap);
2895 }
2896 #else
2897 (void)vap;
2898 #endif
2899 }
2900
2901 return 0;
2902 }
2903
2904
2905
2906 int
2907 vnode_isdyldsharedcache(vnode_t vp)
2908 {
2909 return ((vp->v_flag & VSHARED_DYLD) ? 1 : 0);
2910 }
2911
2912
2913 /*
2914 * For a filesystem that isn't tracking its own vnode watchers:
2915 * check whether a vnode is being monitored.
2916 */
2917 int
2918 vnode_ismonitored(vnode_t vp) {
2919 return (vp->v_knotes.slh_first != NULL);
2920 }
2921
2922 /*
2923 * Initialize a struct vnode_attr and activate the attributes required
2924 * by the vnode_notify() call.
2925 */
2926 int
2927 vfs_get_notify_attributes(struct vnode_attr *vap)
2928 {
2929 VATTR_INIT(vap);
2930 vap->va_active = VNODE_NOTIFY_ATTRS;
2931 return 0;
2932 }
2933
2934 #if CONFIG_TRIGGERS
2935 int
2936 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
2937 {
2938 int error;
2939 mount_t mp;
2940
2941 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
2942 if (mp == NULL) {
2943 return ENOENT;
2944 }
2945
2946 error = vfs_busy(mp, LK_NOWAIT);
2947 mount_iterdrop(mp);
2948
2949 if (error != 0) {
2950 return ENOENT;
2951 }
2952
2953 mount_lock(mp);
2954 if (mp->mnt_triggercallback != NULL) {
2955 error = EBUSY;
2956 mount_unlock(mp);
2957 goto out;
2958 }
2959
2960 mp->mnt_triggercallback = vtc;
2961 mp->mnt_triggerdata = data;
2962 mount_unlock(mp);
2963
2964 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
2965
2966 out:
2967 vfs_unbusy(mp);
2968 return 0;
2969 }
2970 #endif /* CONFIG_TRIGGERS */
2971
2972 /*
2973 * Definition of vnode operations.
2974 */
2975
2976 #if 0
2977 /*
2978 *#
2979 *#% lookup dvp L ? ?
2980 *#% lookup vpp - L -
2981 */
2982 struct vnop_lookup_args {
2983 struct vnodeop_desc *a_desc;
2984 vnode_t a_dvp;
2985 vnode_t *a_vpp;
2986 struct componentname *a_cnp;
2987 vfs_context_t a_context;
2988 };
2989 #endif /* 0*/
2990
2991 /*
2992 * Returns: 0 Success
2993 * lock_fsnode:ENOENT No such file or directory [only for VFS
2994 * that is not thread safe & vnode is
2995 * currently being/has been terminated]
2996 * <vfs_lookup>:ENAMETOOLONG
2997 * <vfs_lookup>:ENOENT
2998 * <vfs_lookup>:EJUSTRETURN
2999 * <vfs_lookup>:EPERM
3000 * <vfs_lookup>:EISDIR
3001 * <vfs_lookup>:ENOTDIR
3002 * <vfs_lookup>:???
3003 *
3004 * Note: The return codes from the underlying VFS's lookup routine can't
3005 * be fully enumerated here, since third party VFS authors may not
3006 * limit their error returns to the ones documented here, even
3007 * though this may result in some programs functioning incorrectly.
3008 *
3009 * The return codes documented above are those which may currently
3010 * be returned by HFS from hfs_lookup, not including additional
3011 * error code which may be propagated from underlying routines.
3012 */
3013 errno_t
3014 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3015 {
3016 int _err;
3017 struct vnop_lookup_args a;
3018 vnode_t vp;
3019 #ifndef __LP64__
3020 int thread_safe;
3021 int funnel_state = 0;
3022 #endif /* __LP64__ */
3023
3024 a.a_desc = &vnop_lookup_desc;
3025 a.a_dvp = dvp;
3026 a.a_vpp = vpp;
3027 a.a_cnp = cnp;
3028 a.a_context = ctx;
3029
3030 #ifndef __LP64__
3031 thread_safe = THREAD_SAFE_FS(dvp);
3032 if (!thread_safe) {
3033 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3034 return (_err);
3035 }
3036 }
3037 #endif /* __LP64__ */
3038
3039 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3040
3041 vp = *vpp;
3042
3043 #ifndef __LP64__
3044 if (!thread_safe) {
3045 if ( (cnp->cn_flags & ISLASTCN) ) {
3046 if ( (cnp->cn_flags & LOCKPARENT) ) {
3047 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
3048 /*
3049 * leave the fsnode lock held on
3050 * the directory, but restore the funnel...
3051 * also indicate that we need to drop the
3052 * fsnode_lock when we're done with the
3053 * system call processing for this path
3054 */
3055 cnp->cn_flags |= FSNODELOCKHELD;
3056
3057 (void) thread_funnel_set(kernel_flock, funnel_state);
3058 return (_err);
3059 }
3060 }
3061 }
3062 unlock_fsnode(dvp, &funnel_state);
3063 }
3064 #endif /* __LP64__ */
3065
3066 return (_err);
3067 }
3068
3069 #if 0
3070 struct vnop_compound_open_args {
3071 struct vnodeop_desc *a_desc;
3072 vnode_t a_dvp;
3073 vnode_t *a_vpp;
3074 struct componentname *a_cnp;
3075 int32_t a_flags;
3076 int32_t a_fmode;
3077 struct vnode_attr *a_vap;
3078 vfs_context_t a_context;
3079 void *a_reserved;
3080 };
3081 #endif /* 0 */
3082
3083 int
3084 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3085 {
3086 int _err;
3087 struct vnop_compound_open_args a;
3088 int did_create = 0;
3089 int want_create;
3090 uint32_t tmp_status = 0;
3091 struct componentname *cnp = &ndp->ni_cnd;
3092
3093 want_create = (flags & VNOP_COMPOUND_OPEN_DO_CREATE);
3094
3095 a.a_desc = &vnop_compound_open_desc;
3096 a.a_dvp = dvp;
3097 a.a_vpp = vpp; /* Could be NULL */
3098 a.a_cnp = cnp;
3099 a.a_flags = flags;
3100 a.a_fmode = fmode;
3101 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3102 a.a_vap = vap;
3103 a.a_context = ctx;
3104 a.a_open_create_authorizer = vn_authorize_create;
3105 a.a_open_existing_authorizer = vn_authorize_open_existing;
3106 a.a_reserved = NULL;
3107
3108 if (dvp == NULLVP) {
3109 panic("No dvp?");
3110 }
3111 if (want_create && !vap) {
3112 panic("Want create, but no vap?");
3113 }
3114 if (!want_create && vap) {
3115 panic("Don't want create, but have a vap?");
3116 }
3117
3118 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3119
3120 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3121
3122 if (did_create && !want_create) {
3123 panic("Filesystem did a create, even though none was requested?");
3124 }
3125
3126 if (did_create) {
3127 if (!NATIVE_XATTR(dvp)) {
3128 /*
3129 * Remove stale Apple Double file (if any).
3130 */
3131 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3132 }
3133
3134 /* On create, provide kqueue notification */
3135 post_event_if_success(dvp, _err, NOTE_WRITE);
3136 }
3137
3138 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3139 #if 0 /* FSEvents... */
3140 if (*vpp && _err && _err != EKEEPLOOKING) {
3141 vnode_put(*vpp);
3142 *vpp = NULLVP;
3143 }
3144 #endif /* 0 */
3145
3146 return (_err);
3147
3148 }
3149
3150 #if 0
3151 struct vnop_create_args {
3152 struct vnodeop_desc *a_desc;
3153 vnode_t a_dvp;
3154 vnode_t *a_vpp;
3155 struct componentname *a_cnp;
3156 struct vnode_attr *a_vap;
3157 vfs_context_t a_context;
3158 };
3159 #endif /* 0*/
3160 errno_t
3161 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3162 {
3163 int _err;
3164 struct vnop_create_args a;
3165 #ifndef __LP64__
3166 int thread_safe;
3167 int funnel_state = 0;
3168 #endif /* __LP64__ */
3169
3170 a.a_desc = &vnop_create_desc;
3171 a.a_dvp = dvp;
3172 a.a_vpp = vpp;
3173 a.a_cnp = cnp;
3174 a.a_vap = vap;
3175 a.a_context = ctx;
3176
3177 #ifndef __LP64__
3178 thread_safe = THREAD_SAFE_FS(dvp);
3179 if (!thread_safe) {
3180 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3181 return (_err);
3182 }
3183 }
3184 #endif /* __LP64__ */
3185
3186 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3187 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3188 /*
3189 * Remove stale Apple Double file (if any).
3190 */
3191 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3192 }
3193
3194 #ifndef __LP64__
3195 if (!thread_safe) {
3196 unlock_fsnode(dvp, &funnel_state);
3197 }
3198 #endif /* __LP64__ */
3199
3200 post_event_if_success(dvp, _err, NOTE_WRITE);
3201
3202 return (_err);
3203 }
3204
3205 #if 0
3206 /*
3207 *#
3208 *#% whiteout dvp L L L
3209 *#% whiteout cnp - - -
3210 *#% whiteout flag - - -
3211 *#
3212 */
3213 struct vnop_whiteout_args {
3214 struct vnodeop_desc *a_desc;
3215 vnode_t a_dvp;
3216 struct componentname *a_cnp;
3217 int a_flags;
3218 vfs_context_t a_context;
3219 };
3220 #endif /* 0*/
3221 errno_t
3222 VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t ctx)
3223 {
3224 int _err;
3225 struct vnop_whiteout_args a;
3226 #ifndef __LP64__
3227 int thread_safe;
3228 int funnel_state = 0;
3229 #endif /* __LP64__ */
3230
3231 a.a_desc = &vnop_whiteout_desc;
3232 a.a_dvp = dvp;
3233 a.a_cnp = cnp;
3234 a.a_flags = flags;
3235 a.a_context = ctx;
3236
3237 #ifndef __LP64__
3238 thread_safe = THREAD_SAFE_FS(dvp);
3239 if (!thread_safe) {
3240 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3241 return (_err);
3242 }
3243 }
3244 #endif /* __LP64__ */
3245
3246 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
3247
3248 #ifndef __LP64__
3249 if (!thread_safe) {
3250 unlock_fsnode(dvp, &funnel_state);
3251 }
3252 #endif /* __LP64__ */
3253
3254 post_event_if_success(dvp, _err, NOTE_WRITE);
3255
3256 return (_err);
3257 }
3258
3259 #if 0
3260 /*
3261 *#
3262 *#% mknod dvp L U U
3263 *#% mknod vpp - X -
3264 *#
3265 */
3266 struct vnop_mknod_args {
3267 struct vnodeop_desc *a_desc;
3268 vnode_t a_dvp;
3269 vnode_t *a_vpp;
3270 struct componentname *a_cnp;
3271 struct vnode_attr *a_vap;
3272 vfs_context_t a_context;
3273 };
3274 #endif /* 0*/
3275 errno_t
3276 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3277 {
3278
3279 int _err;
3280 struct vnop_mknod_args a;
3281 #ifndef __LP64__
3282 int thread_safe;
3283 int funnel_state = 0;
3284 #endif /* __LP64__ */
3285
3286 a.a_desc = &vnop_mknod_desc;
3287 a.a_dvp = dvp;
3288 a.a_vpp = vpp;
3289 a.a_cnp = cnp;
3290 a.a_vap = vap;
3291 a.a_context = ctx;
3292
3293 #ifndef __LP64__
3294 thread_safe = THREAD_SAFE_FS(dvp);
3295 if (!thread_safe) {
3296 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3297 return (_err);
3298 }
3299 }
3300 #endif /* __LP64__ */
3301
3302 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3303
3304 #ifndef __LP64__
3305 if (!thread_safe) {
3306 unlock_fsnode(dvp, &funnel_state);
3307 }
3308 #endif /* __LP64__ */
3309
3310 post_event_if_success(dvp, _err, NOTE_WRITE);
3311
3312 return (_err);
3313 }
3314
3315 #if 0
3316 /*
3317 *#
3318 *#% open vp L L L
3319 *#
3320 */
3321 struct vnop_open_args {
3322 struct vnodeop_desc *a_desc;
3323 vnode_t a_vp;
3324 int a_mode;
3325 vfs_context_t a_context;
3326 };
3327 #endif /* 0*/
3328 errno_t
3329 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3330 {
3331 int _err;
3332 struct vnop_open_args a;
3333 #ifndef __LP64__
3334 int thread_safe;
3335 int funnel_state = 0;
3336 #endif /* __LP64__ */
3337
3338 if (ctx == NULL) {
3339 ctx = vfs_context_current();
3340 }
3341 a.a_desc = &vnop_open_desc;
3342 a.a_vp = vp;
3343 a.a_mode = mode;
3344 a.a_context = ctx;
3345
3346 #ifndef __LP64__
3347 thread_safe = THREAD_SAFE_FS(vp);
3348 if (!thread_safe) {
3349 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3350 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3351 if ( (_err = lock_fsnode(vp, NULL)) ) {
3352 (void) thread_funnel_set(kernel_flock, funnel_state);
3353 return (_err);
3354 }
3355 }
3356 }
3357 #endif /* __LP64__ */
3358
3359 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3360
3361 #ifndef __LP64__
3362 if (!thread_safe) {
3363 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3364 unlock_fsnode(vp, NULL);
3365 }
3366 (void) thread_funnel_set(kernel_flock, funnel_state);
3367 }
3368 #endif /* __LP64__ */
3369
3370 return (_err);
3371 }
3372
3373 #if 0
3374 /*
3375 *#
3376 *#% close vp U U U
3377 *#
3378 */
3379 struct vnop_close_args {
3380 struct vnodeop_desc *a_desc;
3381 vnode_t a_vp;
3382 int a_fflag;
3383 vfs_context_t a_context;
3384 };
3385 #endif /* 0*/
3386 errno_t
3387 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3388 {
3389 int _err;
3390 struct vnop_close_args a;
3391 #ifndef __LP64__
3392 int thread_safe;
3393 int funnel_state = 0;
3394 #endif /* __LP64__ */
3395
3396 if (ctx == NULL) {
3397 ctx = vfs_context_current();
3398 }
3399 a.a_desc = &vnop_close_desc;
3400 a.a_vp = vp;
3401 a.a_fflag = fflag;
3402 a.a_context = ctx;
3403
3404 #ifndef __LP64__
3405 thread_safe = THREAD_SAFE_FS(vp);
3406 if (!thread_safe) {
3407 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3408 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3409 if ( (_err = lock_fsnode(vp, NULL)) ) {
3410 (void) thread_funnel_set(kernel_flock, funnel_state);
3411 return (_err);
3412 }
3413 }
3414 }
3415 #endif /* __LP64__ */
3416
3417 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3418
3419 #ifndef __LP64__
3420 if (!thread_safe) {
3421 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3422 unlock_fsnode(vp, NULL);
3423 }
3424 (void) thread_funnel_set(kernel_flock, funnel_state);
3425 }
3426 #endif /* __LP64__ */
3427
3428 return (_err);
3429 }
3430
3431 #if 0
3432 /*
3433 *#
3434 *#% access vp L L L
3435 *#
3436 */
3437 struct vnop_access_args {
3438 struct vnodeop_desc *a_desc;
3439 vnode_t a_vp;
3440 int a_action;
3441 vfs_context_t a_context;
3442 };
3443 #endif /* 0*/
3444 errno_t
3445 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3446 {
3447 int _err;
3448 struct vnop_access_args a;
3449 #ifndef __LP64__
3450 int thread_safe;
3451 int funnel_state = 0;
3452 #endif /* __LP64__ */
3453
3454 if (ctx == NULL) {
3455 ctx = vfs_context_current();
3456 }
3457 a.a_desc = &vnop_access_desc;
3458 a.a_vp = vp;
3459 a.a_action = action;
3460 a.a_context = ctx;
3461
3462 #ifndef __LP64__
3463 thread_safe = THREAD_SAFE_FS(vp);
3464 if (!thread_safe) {
3465 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3466 return (_err);
3467 }
3468 }
3469 #endif /* __LP64__ */
3470
3471 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3472
3473 #ifndef __LP64__
3474 if (!thread_safe) {
3475 unlock_fsnode(vp, &funnel_state);
3476 }
3477 #endif /* __LP64__ */
3478
3479 return (_err);
3480 }
3481
3482 #if 0
3483 /*
3484 *#
3485 *#% getattr vp = = =
3486 *#
3487 */
3488 struct vnop_getattr_args {
3489 struct vnodeop_desc *a_desc;
3490 vnode_t a_vp;
3491 struct vnode_attr *a_vap;
3492 vfs_context_t a_context;
3493 };
3494 #endif /* 0*/
3495 errno_t
3496 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3497 {
3498 int _err;
3499 struct vnop_getattr_args a;
3500 #ifndef __LP64__
3501 int thread_safe;
3502 int funnel_state = 0;
3503 #endif /* __LP64__ */
3504
3505 a.a_desc = &vnop_getattr_desc;
3506 a.a_vp = vp;
3507 a.a_vap = vap;
3508 a.a_context = ctx;
3509
3510 #ifndef __LP64__
3511 thread_safe = THREAD_SAFE_FS(vp);
3512 if (!thread_safe) {
3513 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3514 return (_err);
3515 }
3516 }
3517 #endif /* __LP64__ */
3518
3519 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3520
3521 #ifndef __LP64__
3522 if (!thread_safe) {
3523 unlock_fsnode(vp, &funnel_state);
3524 }
3525 #endif /* __LP64__ */
3526
3527 return (_err);
3528 }
3529
3530 #if 0
3531 /*
3532 *#
3533 *#% setattr vp L L L
3534 *#
3535 */
3536 struct vnop_setattr_args {
3537 struct vnodeop_desc *a_desc;
3538 vnode_t a_vp;
3539 struct vnode_attr *a_vap;
3540 vfs_context_t a_context;
3541 };
3542 #endif /* 0*/
3543 errno_t
3544 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3545 {
3546 int _err;
3547 struct vnop_setattr_args a;
3548 #ifndef __LP64__
3549 int thread_safe;
3550 int funnel_state = 0;
3551 #endif /* __LP64__ */
3552
3553 a.a_desc = &vnop_setattr_desc;
3554 a.a_vp = vp;
3555 a.a_vap = vap;
3556 a.a_context = ctx;
3557
3558 #ifndef __LP64__
3559 thread_safe = THREAD_SAFE_FS(vp);
3560 if (!thread_safe) {
3561 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3562 return (_err);
3563 }
3564 }
3565 #endif /* __LP64__ */
3566
3567 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3568
3569 /*
3570 * Shadow uid/gid/mod change to extended attribute file.
3571 */
3572 if (_err == 0 && !NATIVE_XATTR(vp)) {
3573 struct vnode_attr va;
3574 int change = 0;
3575
3576 VATTR_INIT(&va);
3577 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3578 VATTR_SET(&va, va_uid, vap->va_uid);
3579 change = 1;
3580 }
3581 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3582 VATTR_SET(&va, va_gid, vap->va_gid);
3583 change = 1;
3584 }
3585 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3586 VATTR_SET(&va, va_mode, vap->va_mode);
3587 change = 1;
3588 }
3589 if (change) {
3590 vnode_t dvp;
3591 const char *vname;
3592
3593 dvp = vnode_getparent(vp);
3594 vname = vnode_getname(vp);
3595
3596 xattrfile_setattr(dvp, vname, &va, ctx);
3597 if (dvp != NULLVP)
3598 vnode_put(dvp);
3599 if (vname != NULL)
3600 vnode_putname(vname);
3601 }
3602 }
3603
3604 #ifndef __LP64__
3605 if (!thread_safe) {
3606 unlock_fsnode(vp, &funnel_state);
3607 }
3608 #endif /* __LP64__ */
3609
3610 /*
3611 * If we have changed any of the things about the file that are likely
3612 * to result in changes to authorization results, blow the vnode auth
3613 * cache
3614 */
3615 if (_err == 0 && (
3616 VATTR_IS_SUPPORTED(vap, va_mode) ||
3617 VATTR_IS_SUPPORTED(vap, va_uid) ||
3618 VATTR_IS_SUPPORTED(vap, va_gid) ||
3619 VATTR_IS_SUPPORTED(vap, va_flags) ||
3620 VATTR_IS_SUPPORTED(vap, va_acl) ||
3621 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3622 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3623 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3624
3625 #if NAMEDSTREAMS
3626 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3627 vnode_t svp;
3628 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3629 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3630 vnode_put(svp);
3631 }
3632 }
3633 #endif /* NAMEDSTREAMS */
3634 }
3635
3636
3637 post_event_if_success(vp, _err, NOTE_ATTRIB);
3638
3639 return (_err);
3640 }
3641
3642
3643 #if 0
3644 /*
3645 *#
3646 *#% read vp L L L
3647 *#
3648 */
3649 struct vnop_read_args {
3650 struct vnodeop_desc *a_desc;
3651 vnode_t a_vp;
3652 struct uio *a_uio;
3653 int a_ioflag;
3654 vfs_context_t a_context;
3655 };
3656 #endif /* 0*/
3657 errno_t
3658 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3659 {
3660 int _err;
3661 struct vnop_read_args a;
3662 #ifndef __LP64__
3663 int thread_safe;
3664 int funnel_state = 0;
3665 #endif /* __LP64__ */
3666
3667 if (ctx == NULL) {
3668 ctx = vfs_context_current();
3669 }
3670
3671 a.a_desc = &vnop_read_desc;
3672 a.a_vp = vp;
3673 a.a_uio = uio;
3674 a.a_ioflag = ioflag;
3675 a.a_context = ctx;
3676
3677 #ifndef __LP64__
3678 thread_safe = THREAD_SAFE_FS(vp);
3679 if (!thread_safe) {
3680 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3681 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3682 if ( (_err = lock_fsnode(vp, NULL)) ) {
3683 (void) thread_funnel_set(kernel_flock, funnel_state);
3684 return (_err);
3685 }
3686 }
3687 }
3688 #endif /* __LP64__ */
3689
3690 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3691
3692 #ifndef __LP64__
3693 if (!thread_safe) {
3694 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3695 unlock_fsnode(vp, NULL);
3696 }
3697 (void) thread_funnel_set(kernel_flock, funnel_state);
3698 }
3699 #endif /* __LP64__ */
3700
3701 return (_err);
3702 }
3703
3704
3705 #if 0
3706 /*
3707 *#
3708 *#% write vp L L L
3709 *#
3710 */
3711 struct vnop_write_args {
3712 struct vnodeop_desc *a_desc;
3713 vnode_t a_vp;
3714 struct uio *a_uio;
3715 int a_ioflag;
3716 vfs_context_t a_context;
3717 };
3718 #endif /* 0*/
3719 errno_t
3720 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3721 {
3722 struct vnop_write_args a;
3723 int _err;
3724 #ifndef __LP64__
3725 int thread_safe;
3726 int funnel_state = 0;
3727 #endif /* __LP64__ */
3728
3729 if (ctx == NULL) {
3730 ctx = vfs_context_current();
3731 }
3732
3733 a.a_desc = &vnop_write_desc;
3734 a.a_vp = vp;
3735 a.a_uio = uio;
3736 a.a_ioflag = ioflag;
3737 a.a_context = ctx;
3738
3739 #ifndef __LP64__
3740 thread_safe = THREAD_SAFE_FS(vp);
3741 if (!thread_safe) {
3742 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3743 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3744 if ( (_err = lock_fsnode(vp, NULL)) ) {
3745 (void) thread_funnel_set(kernel_flock, funnel_state);
3746 return (_err);
3747 }
3748 }
3749 }
3750 #endif /* __LP64__ */
3751
3752 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3753
3754 #ifndef __LP64__
3755 if (!thread_safe) {
3756 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3757 unlock_fsnode(vp, NULL);
3758 }
3759 (void) thread_funnel_set(kernel_flock, funnel_state);
3760 }
3761 #endif /* __LP64__ */
3762
3763 post_event_if_success(vp, _err, NOTE_WRITE);
3764
3765 return (_err);
3766 }
3767
3768
3769 #if 0
3770 /*
3771 *#
3772 *#% ioctl vp U U U
3773 *#
3774 */
3775 struct vnop_ioctl_args {
3776 struct vnodeop_desc *a_desc;
3777 vnode_t a_vp;
3778 u_long a_command;
3779 caddr_t a_data;
3780 int a_fflag;
3781 vfs_context_t a_context;
3782 };
3783 #endif /* 0*/
3784 errno_t
3785 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3786 {
3787 int _err;
3788 struct vnop_ioctl_args a;
3789 #ifndef __LP64__
3790 int thread_safe;
3791 int funnel_state = 0;
3792 #endif /* __LP64__ */
3793
3794 if (ctx == NULL) {
3795 ctx = vfs_context_current();
3796 }
3797
3798 /*
3799 * This check should probably have been put in the TTY code instead...
3800 *
3801 * We have to be careful about what we assume during startup and shutdown.
3802 * We have to be able to use the root filesystem's device vnode even when
3803 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3804 * structure. If there is no data pointer, it doesn't matter whether
3805 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE)
3806 * which passes NULL for its data pointer can therefore be used during
3807 * mount or unmount of the root filesystem.
3808 *
3809 * Depending on what root filesystems need to do during mount/unmount, we
3810 * may need to loosen this check again in the future.
3811 */
3812 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3813 if (data != NULL && !vnode_vfs64bitready(vp)) {
3814 return(ENOTTY);
3815 }
3816 }
3817
3818 a.a_desc = &vnop_ioctl_desc;
3819 a.a_vp = vp;
3820 a.a_command = command;
3821 a.a_data = data;
3822 a.a_fflag = fflag;
3823 a.a_context= ctx;
3824
3825 #ifndef __LP64__
3826 thread_safe = THREAD_SAFE_FS(vp);
3827 if (!thread_safe) {
3828 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3829 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3830 if ( (_err = lock_fsnode(vp, NULL)) ) {
3831 (void) thread_funnel_set(kernel_flock, funnel_state);
3832 return (_err);
3833 }
3834 }
3835 }
3836 #endif /* __LP64__ */
3837
3838 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3839
3840 #ifndef __LP64__
3841 if (!thread_safe) {
3842 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3843 unlock_fsnode(vp, NULL);
3844 }
3845 (void) thread_funnel_set(kernel_flock, funnel_state);
3846 }
3847 #endif /* __LP64__ */
3848
3849 return (_err);
3850 }
3851
3852
3853 #if 0
3854 /*
3855 *#
3856 *#% select vp U U U
3857 *#
3858 */
3859 struct vnop_select_args {
3860 struct vnodeop_desc *a_desc;
3861 vnode_t a_vp;
3862 int a_which;
3863 int a_fflags;
3864 void *a_wql;
3865 vfs_context_t a_context;
3866 };
3867 #endif /* 0*/
3868 errno_t
3869 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx)
3870 {
3871 int _err;
3872 struct vnop_select_args a;
3873 #ifndef __LP64__
3874 int thread_safe;
3875 int funnel_state = 0;
3876 #endif /* __LP64__ */
3877
3878 if (ctx == NULL) {
3879 ctx = vfs_context_current();
3880 }
3881 a.a_desc = &vnop_select_desc;
3882 a.a_vp = vp;
3883 a.a_which = which;
3884 a.a_fflags = fflags;
3885 a.a_context = ctx;
3886 a.a_wql = wql;
3887
3888 #ifndef __LP64__
3889 thread_safe = THREAD_SAFE_FS(vp);
3890 if (!thread_safe) {
3891 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3892 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3893 if ( (_err = lock_fsnode(vp, NULL)) ) {
3894 (void) thread_funnel_set(kernel_flock, funnel_state);
3895 return (_err);
3896 }
3897 }
3898 }
3899 #endif /* __LP64__ */
3900
3901 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3902
3903 #ifndef __LP64__
3904 if (!thread_safe) {
3905 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3906 unlock_fsnode(vp, NULL);
3907 }
3908 (void) thread_funnel_set(kernel_flock, funnel_state);
3909 }
3910 #endif /* __LP64__ */
3911
3912 return (_err);
3913 }
3914
3915
3916 #if 0
3917 /*
3918 *#
3919 *#% exchange fvp L L L
3920 *#% exchange tvp L L L
3921 *#
3922 */
3923 struct vnop_exchange_args {
3924 struct vnodeop_desc *a_desc;
3925 vnode_t a_fvp;
3926 vnode_t a_tvp;
3927 int a_options;
3928 vfs_context_t a_context;
3929 };
3930 #endif /* 0*/
3931 errno_t
3932 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3933 {
3934 int _err;
3935 struct vnop_exchange_args a;
3936 #ifndef __LP64__
3937 int thread_safe;
3938 int funnel_state = 0;
3939 vnode_t lock_first = NULL, lock_second = NULL;
3940 #endif /* __LP64__ */
3941
3942 a.a_desc = &vnop_exchange_desc;
3943 a.a_fvp = fvp;
3944 a.a_tvp = tvp;
3945 a.a_options = options;
3946 a.a_context = ctx;
3947
3948 #ifndef __LP64__
3949 thread_safe = THREAD_SAFE_FS(fvp);
3950 if (!thread_safe) {
3951 /*
3952 * Lock in vnode address order to avoid deadlocks
3953 */
3954 if (fvp < tvp) {
3955 lock_first = fvp;
3956 lock_second = tvp;
3957 } else {
3958 lock_first = tvp;
3959 lock_second = fvp;
3960 }
3961 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
3962 return (_err);
3963 }
3964 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
3965 unlock_fsnode(lock_first, &funnel_state);
3966 return (_err);
3967 }
3968 }
3969 #endif /* __LP64__ */
3970
3971 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3972
3973 #ifndef __LP64__
3974 if (!thread_safe) {
3975 unlock_fsnode(lock_second, NULL);
3976 unlock_fsnode(lock_first, &funnel_state);
3977 }
3978 #endif /* __LP64__ */
3979
3980 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3981 post_event_if_success(fvp, _err, NOTE_ATTRIB);
3982 post_event_if_success(tvp, _err, NOTE_ATTRIB);
3983
3984 return (_err);
3985 }
3986
3987
3988 #if 0
3989 /*
3990 *#
3991 *#% revoke vp U U U
3992 *#
3993 */
3994 struct vnop_revoke_args {
3995 struct vnodeop_desc *a_desc;
3996 vnode_t a_vp;
3997 int a_flags;
3998 vfs_context_t a_context;
3999 };
4000 #endif /* 0*/
4001 errno_t
4002 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
4003 {
4004 struct vnop_revoke_args a;
4005 int _err;
4006 #ifndef __LP64__
4007 int thread_safe;
4008 int funnel_state = 0;
4009 #endif /* __LP64__ */
4010
4011 a.a_desc = &vnop_revoke_desc;
4012 a.a_vp = vp;
4013 a.a_flags = flags;
4014 a.a_context = ctx;
4015
4016 #ifndef __LP64__
4017 thread_safe = THREAD_SAFE_FS(vp);
4018 if (!thread_safe) {
4019 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4020 }
4021 #endif /* __LP64__ */
4022
4023 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
4024
4025 #ifndef __LP64__
4026 if (!thread_safe) {
4027 (void) thread_funnel_set(kernel_flock, funnel_state);
4028 }
4029 #endif /* __LP64__ */
4030
4031 return (_err);
4032 }
4033
4034
4035 #if 0
4036 /*
4037 *#
4038 *# mmap - vp U U U
4039 *#
4040 */
4041 struct vnop_mmap_args {
4042 struct vnodeop_desc *a_desc;
4043 vnode_t a_vp;
4044 int a_fflags;
4045 vfs_context_t a_context;
4046 };
4047 #endif /* 0*/
4048 errno_t
4049 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
4050 {
4051 int _err;
4052 struct vnop_mmap_args a;
4053 #ifndef __LP64__
4054 int thread_safe;
4055 int funnel_state = 0;
4056 #endif /* __LP64__ */
4057
4058 a.a_desc = &vnop_mmap_desc;
4059 a.a_vp = vp;
4060 a.a_fflags = fflags;
4061 a.a_context = ctx;
4062
4063 #ifndef __LP64__
4064 thread_safe = THREAD_SAFE_FS(vp);
4065 if (!thread_safe) {
4066 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4067 return (_err);
4068 }
4069 }
4070 #endif /* __LP64__ */
4071
4072 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
4073
4074 #ifndef __LP64__
4075 if (!thread_safe) {
4076 unlock_fsnode(vp, &funnel_state);
4077 }
4078 #endif /* __LP64__ */
4079
4080 return (_err);
4081 }
4082
4083
4084 #if 0
4085 /*
4086 *#
4087 *# mnomap - vp U U U
4088 *#
4089 */
4090 struct vnop_mnomap_args {
4091 struct vnodeop_desc *a_desc;
4092 vnode_t a_vp;
4093 vfs_context_t a_context;
4094 };
4095 #endif /* 0*/
4096 errno_t
4097 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
4098 {
4099 int _err;
4100 struct vnop_mnomap_args a;
4101 #ifndef __LP64__
4102 int thread_safe;
4103 int funnel_state = 0;
4104 #endif /* __LP64__ */
4105
4106 a.a_desc = &vnop_mnomap_desc;
4107 a.a_vp = vp;
4108 a.a_context = ctx;
4109
4110 #ifndef __LP64__
4111 thread_safe = THREAD_SAFE_FS(vp);
4112 if (!thread_safe) {
4113 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4114 return (_err);
4115 }
4116 }
4117 #endif /* __LP64__ */
4118
4119 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
4120
4121 #ifndef __LP64__
4122 if (!thread_safe) {
4123 unlock_fsnode(vp, &funnel_state);
4124 }
4125 #endif /* __LP64__ */
4126
4127 return (_err);
4128 }
4129
4130
4131 #if 0
4132 /*
4133 *#
4134 *#% fsync vp L L L
4135 *#
4136 */
4137 struct vnop_fsync_args {
4138 struct vnodeop_desc *a_desc;
4139 vnode_t a_vp;
4140 int a_waitfor;
4141 vfs_context_t a_context;
4142 };
4143 #endif /* 0*/
4144 errno_t
4145 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
4146 {
4147 struct vnop_fsync_args a;
4148 int _err;
4149 #ifndef __LP64__
4150 int thread_safe;
4151 int funnel_state = 0;
4152 #endif /* __LP64__ */
4153
4154 a.a_desc = &vnop_fsync_desc;
4155 a.a_vp = vp;
4156 a.a_waitfor = waitfor;
4157 a.a_context = ctx;
4158
4159 #ifndef __LP64__
4160 thread_safe = THREAD_SAFE_FS(vp);
4161 if (!thread_safe) {
4162 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4163 return (_err);
4164 }
4165 }
4166 #endif /* __LP64__ */
4167
4168 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
4169
4170 #ifndef __LP64__
4171 if (!thread_safe) {
4172 unlock_fsnode(vp, &funnel_state);
4173 }
4174 #endif /* __LP64__ */
4175
4176 return (_err);
4177 }
4178
4179
4180 #if 0
4181 /*
4182 *#
4183 *#% remove dvp L U U
4184 *#% remove vp L U U
4185 *#
4186 */
4187 struct vnop_remove_args {
4188 struct vnodeop_desc *a_desc;
4189 vnode_t a_dvp;
4190 vnode_t a_vp;
4191 struct componentname *a_cnp;
4192 int a_flags;
4193 vfs_context_t a_context;
4194 };
4195 #endif /* 0*/
4196 errno_t
4197 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
4198 {
4199 int _err;
4200 struct vnop_remove_args a;
4201 #ifndef __LP64__
4202 int thread_safe;
4203 int funnel_state = 0;
4204 #endif /* __LP64__ */
4205
4206 a.a_desc = &vnop_remove_desc;
4207 a.a_dvp = dvp;
4208 a.a_vp = vp;
4209 a.a_cnp = cnp;
4210 a.a_flags = flags;
4211 a.a_context = ctx;
4212
4213 #ifndef __LP64__
4214 thread_safe = THREAD_SAFE_FS(dvp);
4215 if (!thread_safe) {
4216 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4217 return (_err);
4218 }
4219 }
4220 #endif /* __LP64__ */
4221
4222 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4223
4224 if (_err == 0) {
4225 vnode_setneedinactive(vp);
4226
4227 if ( !(NATIVE_XATTR(dvp)) ) {
4228 /*
4229 * Remove any associated extended attribute file (._ AppleDouble file).
4230 */
4231 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4232 }
4233 }
4234
4235 #ifndef __LP64__
4236 if (!thread_safe) {
4237 unlock_fsnode(vp, &funnel_state);
4238 }
4239 #endif /* __LP64__ */
4240
4241 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4242 post_event_if_success(dvp, _err, NOTE_WRITE);
4243
4244 return (_err);
4245 }
4246
4247 int
4248 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4249 {
4250 int _err;
4251 struct vnop_compound_remove_args a;
4252 int no_vp = (*vpp == NULLVP);
4253
4254 a.a_desc = &vnop_compound_remove_desc;
4255 a.a_dvp = dvp;
4256 a.a_vpp = vpp;
4257 a.a_cnp = &ndp->ni_cnd;
4258 a.a_flags = flags;
4259 a.a_vap = vap;
4260 a.a_context = ctx;
4261 a.a_remove_authorizer = vn_authorize_unlink;
4262
4263 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4264 if (_err == 0) {
4265 vnode_setneedinactive(*vpp);
4266
4267 if ( !(NATIVE_XATTR(dvp)) ) {
4268 /*
4269 * Remove any associated extended attribute file (._ AppleDouble file).
4270 */
4271 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4272 }
4273 }
4274
4275 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4276 post_event_if_success(dvp, _err, NOTE_WRITE);
4277
4278 if (no_vp) {
4279 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4280 if (*vpp && _err && _err != EKEEPLOOKING) {
4281 vnode_put(*vpp);
4282 *vpp = NULLVP;
4283 }
4284 }
4285
4286 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4287
4288 return (_err);
4289 }
4290
4291 #if 0
4292 /*
4293 *#
4294 *#% link vp U U U
4295 *#% link tdvp L U U
4296 *#
4297 */
4298 struct vnop_link_args {
4299 struct vnodeop_desc *a_desc;
4300 vnode_t a_vp;
4301 vnode_t a_tdvp;
4302 struct componentname *a_cnp;
4303 vfs_context_t a_context;
4304 };
4305 #endif /* 0*/
4306 errno_t
4307 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4308 {
4309 int _err;
4310 struct vnop_link_args a;
4311 #ifndef __LP64__
4312 int thread_safe;
4313 int funnel_state = 0;
4314 #endif /* __LP64__ */
4315
4316 /*
4317 * For file systems with non-native extended attributes,
4318 * disallow linking to an existing "._" Apple Double file.
4319 */
4320 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4321 const char *vname;
4322
4323 vname = vnode_getname(vp);
4324 if (vname != NULL) {
4325 _err = 0;
4326 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4327 _err = EPERM;
4328 }
4329 vnode_putname(vname);
4330 if (_err)
4331 return (_err);
4332 }
4333 }
4334 a.a_desc = &vnop_link_desc;
4335 a.a_vp = vp;
4336 a.a_tdvp = tdvp;
4337 a.a_cnp = cnp;
4338 a.a_context = ctx;
4339
4340 #ifndef __LP64__
4341 thread_safe = THREAD_SAFE_FS(vp);
4342 if (!thread_safe) {
4343 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4344 return (_err);
4345 }
4346 }
4347 #endif /* __LP64__ */
4348
4349 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4350
4351 #ifndef __LP64__
4352 if (!thread_safe) {
4353 unlock_fsnode(vp, &funnel_state);
4354 }
4355 #endif /* __LP64__ */
4356
4357 post_event_if_success(vp, _err, NOTE_LINK);
4358 post_event_if_success(tdvp, _err, NOTE_WRITE);
4359
4360 return (_err);
4361 }
4362
4363 errno_t
4364 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4365 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4366 uint32_t flags, vfs_context_t ctx)
4367 {
4368 int _err;
4369 vnode_t src_attr_vp = NULLVP;
4370 vnode_t dst_attr_vp = NULLVP;
4371 struct nameidata fromnd;
4372 struct nameidata tond;
4373 char smallname1[48];
4374 char smallname2[48];
4375 char *xfromname = NULL;
4376 char *xtoname = NULL;
4377 int batched;
4378
4379 batched = vnode_compound_rename_available(fdvp);
4380
4381 #ifndef __LP64__
4382 vnode_t fdvp_unsafe = (THREAD_SAFE_FS(fdvp) ? NULLVP : fdvp);
4383 #endif /* __LP64__ */
4384
4385 if (!batched) {
4386 if (*fvpp == NULLVP)
4387 panic("Not batched, and no fvp?");
4388 }
4389
4390 /*
4391 * We need to preflight any potential AppleDouble file for the source file
4392 * before doing the rename operation, since we could potentially be doing
4393 * this operation on a network filesystem, and would end up duplicating
4394 * the work. Also, save the source and destination names. Skip it if the
4395 * source has a "._" prefix.
4396 */
4397
4398 if (!NATIVE_XATTR(fdvp) &&
4399 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4400 size_t len;
4401 int error;
4402
4403 /* Get source attribute file name. */
4404 len = fcnp->cn_namelen + 3;
4405 if (len > sizeof(smallname1)) {
4406 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
4407 } else {
4408 xfromname = &smallname1[0];
4409 }
4410 strlcpy(xfromname, "._", min(sizeof smallname1, len));
4411 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
4412 xfromname[len-1] = '\0';
4413
4414 /* Get destination attribute file name. */
4415 len = tcnp->cn_namelen + 3;
4416 if (len > sizeof(smallname2)) {
4417 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
4418 } else {
4419 xtoname = &smallname2[0];
4420 }
4421 strlcpy(xtoname, "._", min(sizeof smallname2, len));
4422 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
4423 xtoname[len-1] = '\0';
4424
4425 /*
4426 * Look up source attribute file, keep reference on it if exists.
4427 * Note that we do the namei with the nameiop of RENAME, which is different than
4428 * in the rename syscall. It's OK if the source file does not exist, since this
4429 * is only for AppleDouble files.
4430 */
4431 if (xfromname != NULL) {
4432 NDINIT(&fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4433 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4434 fromnd.ni_dvp = fdvp;
4435 error = namei(&fromnd);
4436
4437 /*
4438 * If there was an error looking up source attribute file,
4439 * we'll behave as if it didn't exist.
4440 */
4441
4442 if (error == 0) {
4443 if (fromnd.ni_vp) {
4444 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4445 src_attr_vp = fromnd.ni_vp;
4446
4447 if (fromnd.ni_vp->v_type != VREG) {
4448 src_attr_vp = NULLVP;
4449 vnode_put(fromnd.ni_vp);
4450 }
4451 }
4452 /*
4453 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4454 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4455 * have a vnode here, so we drop our namei buffer for the source attribute file
4456 */
4457 if (src_attr_vp == NULLVP) {
4458 nameidone(&fromnd);
4459 }
4460 }
4461 }
4462 }
4463
4464 if (batched) {
4465 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4466 if (_err != 0) {
4467 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4468 }
4469
4470 } else {
4471 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4472 }
4473
4474 if (_err == 0) {
4475 mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
4476 }
4477
4478 /*
4479 * Rename any associated extended attribute file (._ AppleDouble file).
4480 */
4481 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4482 int error = 0;
4483
4484 /*
4485 * Get destination attribute file vnode.
4486 * Note that tdvp already has an iocount reference. Make sure to check that we
4487 * get a valid vnode from namei.
4488 */
4489 NDINIT(&tond, RENAME, OP_RENAME,
4490 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4491 CAST_USER_ADDR_T(xtoname), ctx);
4492 tond.ni_dvp = tdvp;
4493 error = namei(&tond);
4494
4495 if (error)
4496 goto out;
4497
4498 if (tond.ni_vp) {
4499 dst_attr_vp = tond.ni_vp;
4500 }
4501
4502 if (src_attr_vp) {
4503 if (batched) {
4504 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd.ni_cnd, NULL,
4505 tdvp, &dst_attr_vp, &tond.ni_cnd, NULL,
4506 0, ctx);
4507 } else {
4508 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd.ni_cnd,
4509 tdvp, dst_attr_vp, &tond.ni_cnd, ctx);
4510 }
4511
4512 /* kevent notifications for moving resource files
4513 * _err is zero if we're here, so no need to notify directories, code
4514 * below will do that. only need to post the rename on the source and
4515 * possibly a delete on the dest
4516 */
4517 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4518 if (dst_attr_vp) {
4519 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4520 }
4521
4522 } else if (dst_attr_vp) {
4523 /*
4524 * Just delete destination attribute file vnode if it exists, since
4525 * we didn't have a source attribute file.
4526 * Note that tdvp already has an iocount reference.
4527 */
4528
4529 struct vnop_remove_args args;
4530
4531 args.a_desc = &vnop_remove_desc;
4532 args.a_dvp = tdvp;
4533 args.a_vp = dst_attr_vp;
4534 args.a_cnp = &tond.ni_cnd;
4535 args.a_context = ctx;
4536
4537 #ifndef __LP64__
4538 if (fdvp_unsafe != NULLVP)
4539 error = lock_fsnode(dst_attr_vp, NULL);
4540 #endif /* __LP64__ */
4541 if (error == 0) {
4542 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4543
4544 #ifndef __LP64__
4545 if (fdvp_unsafe != NULLVP)
4546 unlock_fsnode(dst_attr_vp, NULL);
4547 #endif /* __LP64__ */
4548
4549 if (error == 0)
4550 vnode_setneedinactive(dst_attr_vp);
4551 }
4552
4553 /* kevent notification for deleting the destination's attribute file
4554 * if it existed. Only need to post the delete on the destination, since
4555 * the code below will handle the directories.
4556 */
4557 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4558 }
4559 }
4560 out:
4561 if (src_attr_vp) {
4562 vnode_put(src_attr_vp);
4563 nameidone(&fromnd);
4564 }
4565 if (dst_attr_vp) {
4566 vnode_put(dst_attr_vp);
4567 nameidone(&tond);
4568 }
4569
4570 if (xfromname && xfromname != &smallname1[0]) {
4571 FREE(xfromname, M_TEMP);
4572 }
4573 if (xtoname && xtoname != &smallname2[0]) {
4574 FREE(xtoname, M_TEMP);
4575 }
4576
4577 return _err;
4578 }
4579
4580
4581 #if 0
4582 /*
4583 *#
4584 *#% rename fdvp U U U
4585 *#% rename fvp U U U
4586 *#% rename tdvp L U U
4587 *#% rename tvp X U U
4588 *#
4589 */
4590 struct vnop_rename_args {
4591 struct vnodeop_desc *a_desc;
4592 vnode_t a_fdvp;
4593 vnode_t a_fvp;
4594 struct componentname *a_fcnp;
4595 vnode_t a_tdvp;
4596 vnode_t a_tvp;
4597 struct componentname *a_tcnp;
4598 vfs_context_t a_context;
4599 };
4600 #endif /* 0*/
4601 errno_t
4602 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4603 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4604 vfs_context_t ctx)
4605 {
4606 int _err = 0;
4607 int events;
4608 struct vnop_rename_args a;
4609 #ifndef __LP64__
4610 int funnel_state = 0;
4611 vnode_t lock_first = NULL, lock_second = NULL;
4612 vnode_t fdvp_unsafe = NULLVP;
4613 vnode_t tdvp_unsafe = NULLVP;
4614 #endif /* __LP64__ */
4615
4616 a.a_desc = &vnop_rename_desc;
4617 a.a_fdvp = fdvp;
4618 a.a_fvp = fvp;
4619 a.a_fcnp = fcnp;
4620 a.a_tdvp = tdvp;
4621 a.a_tvp = tvp;
4622 a.a_tcnp = tcnp;
4623 a.a_context = ctx;
4624
4625 #ifndef __LP64__
4626 if (!THREAD_SAFE_FS(fdvp))
4627 fdvp_unsafe = fdvp;
4628 if (!THREAD_SAFE_FS(tdvp))
4629 tdvp_unsafe = tdvp;
4630
4631 if (fdvp_unsafe != NULLVP) {
4632 /*
4633 * Lock parents in vnode address order to avoid deadlocks
4634 * note that it's possible for the fdvp to be unsafe,
4635 * but the tdvp to be safe because tvp could be a directory
4636 * in the root of a filesystem... in that case, tdvp is the
4637 * in the filesystem that this root is mounted on
4638 */
4639 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
4640 lock_first = fdvp_unsafe;
4641 lock_second = NULL;
4642 } else if (fdvp_unsafe < tdvp_unsafe) {
4643 lock_first = fdvp_unsafe;
4644 lock_second = tdvp_unsafe;
4645 } else {
4646 lock_first = tdvp_unsafe;
4647 lock_second = fdvp_unsafe;
4648 }
4649 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
4650 return (_err);
4651
4652 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
4653 unlock_fsnode(lock_first, &funnel_state);
4654 return (_err);
4655 }
4656
4657 /*
4658 * Lock both children in vnode address order to avoid deadlocks
4659 */
4660 if (tvp == NULL || tvp == fvp) {
4661 lock_first = fvp;
4662 lock_second = NULL;
4663 } else if (fvp < tvp) {
4664 lock_first = fvp;
4665 lock_second = tvp;
4666 } else {
4667 lock_first = tvp;
4668 lock_second = fvp;
4669 }
4670 if ( (_err = lock_fsnode(lock_first, NULL)) )
4671 goto out1;
4672
4673 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
4674 unlock_fsnode(lock_first, NULL);
4675 goto out1;
4676 }
4677 }
4678 #endif /* __LP64__ */
4679
4680 /* do the rename of the main file. */
4681 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4682
4683 #ifndef __LP64__
4684 if (fdvp_unsafe != NULLVP) {
4685 if (lock_second != NULL)
4686 unlock_fsnode(lock_second, NULL);
4687 unlock_fsnode(lock_first, NULL);
4688 }
4689 #endif /* __LP64__ */
4690
4691 if (_err == 0) {
4692 if (tvp && tvp != fvp)
4693 vnode_setneedinactive(tvp);
4694 }
4695
4696 #ifndef __LP64__
4697 out1:
4698 if (fdvp_unsafe != NULLVP) {
4699 if (tdvp_unsafe != NULLVP)
4700 unlock_fsnode(tdvp_unsafe, NULL);
4701 unlock_fsnode(fdvp_unsafe, &funnel_state);
4702 }
4703 #endif /* __LP64__ */
4704
4705 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4706 if (0 == _err) {
4707 events = NOTE_WRITE;
4708 if (vnode_isdir(fvp)) {
4709 /* Link count on dir changed only if we are moving a dir and...
4710 * --Moved to new dir, not overwriting there
4711 * --Kept in same dir and DID overwrite
4712 */
4713 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4714 events |= NOTE_LINK;
4715 }
4716 }
4717
4718 lock_vnode_and_post(fdvp, events);
4719 if (fdvp != tdvp) {
4720 lock_vnode_and_post(tdvp, events);
4721 }
4722
4723 /* If you're replacing the target, post a deletion for it */
4724 if (tvp)
4725 {
4726 lock_vnode_and_post(tvp, NOTE_DELETE);
4727 }
4728
4729 lock_vnode_and_post(fvp, NOTE_RENAME);
4730 }
4731
4732 return (_err);
4733 }
4734
4735 int
4736 VNOP_COMPOUND_RENAME(
4737 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4738 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4739 uint32_t flags, vfs_context_t ctx)
4740 {
4741 int _err = 0;
4742 int events;
4743 struct vnop_compound_rename_args a;
4744 int no_fvp, no_tvp;
4745
4746 no_fvp = (*fvpp) == NULLVP;
4747 no_tvp = (*tvpp) == NULLVP;
4748
4749 a.a_desc = &vnop_compound_rename_desc;
4750
4751 a.a_fdvp = fdvp;
4752 a.a_fvpp = fvpp;
4753 a.a_fcnp = fcnp;
4754 a.a_fvap = fvap;
4755
4756 a.a_tdvp = tdvp;
4757 a.a_tvpp = tvpp;
4758 a.a_tcnp = tcnp;
4759 a.a_tvap = tvap;
4760
4761 a.a_flags = flags;
4762 a.a_context = ctx;
4763 a.a_rename_authorizer = vn_authorize_rename;
4764 a.a_reserved = NULL;
4765
4766 /* do the rename of the main file. */
4767 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4768
4769 if (_err == 0) {
4770 if (*tvpp && *tvpp != *fvpp)
4771 vnode_setneedinactive(*tvpp);
4772 }
4773
4774 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4775 if (0 == _err && *fvpp != *tvpp) {
4776 if (!*fvpp) {
4777 panic("No fvpp after compound rename?");
4778 }
4779
4780 events = NOTE_WRITE;
4781 if (vnode_isdir(*fvpp)) {
4782 /* Link count on dir changed only if we are moving a dir and...
4783 * --Moved to new dir, not overwriting there
4784 * --Kept in same dir and DID overwrite
4785 */
4786 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4787 events |= NOTE_LINK;
4788 }
4789 }
4790
4791 lock_vnode_and_post(fdvp, events);
4792 if (fdvp != tdvp) {
4793 lock_vnode_and_post(tdvp, events);
4794 }
4795
4796 /* If you're replacing the target, post a deletion for it */
4797 if (*tvpp)
4798 {
4799 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4800 }
4801
4802 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4803 }
4804
4805 if (no_fvp) {
4806 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4807 }
4808 if (no_tvp && *tvpp != NULLVP) {
4809 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4810 }
4811
4812 if (_err && _err != EKEEPLOOKING) {
4813 if (*fvpp) {
4814 vnode_put(*fvpp);
4815 *fvpp = NULLVP;
4816 }
4817 if (*tvpp) {
4818 vnode_put(*tvpp);
4819 *tvpp = NULLVP;
4820 }
4821 }
4822
4823 return (_err);
4824 }
4825
4826 int
4827 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4828 struct vnode_attr *vap, vfs_context_t ctx)
4829 {
4830 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4831 panic("Non-CREATE nameiop in vn_mkdir()?");
4832 }
4833
4834 if (vnode_compound_mkdir_available(dvp)) {
4835 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4836 } else {
4837 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4838 }
4839 }
4840
4841 #if 0
4842 /*
4843 *#
4844 *#% mkdir dvp L U U
4845 *#% mkdir vpp - L -
4846 *#
4847 */
4848 struct vnop_mkdir_args {
4849 struct vnodeop_desc *a_desc;
4850 vnode_t a_dvp;
4851 vnode_t *a_vpp;
4852 struct componentname *a_cnp;
4853 struct vnode_attr *a_vap;
4854 vfs_context_t a_context;
4855 };
4856 #endif /* 0*/
4857 errno_t
4858 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4859 struct vnode_attr *vap, vfs_context_t ctx)
4860 {
4861 int _err;
4862 struct vnop_mkdir_args a;
4863 #ifndef __LP64__
4864 int thread_safe;
4865 int funnel_state = 0;
4866 #endif /* __LP64__ */
4867
4868 a.a_desc = &vnop_mkdir_desc;
4869 a.a_dvp = dvp;
4870 a.a_vpp = vpp;
4871 a.a_cnp = cnp;
4872 a.a_vap = vap;
4873 a.a_context = ctx;
4874
4875 #ifndef __LP64__
4876 thread_safe = THREAD_SAFE_FS(dvp);
4877 if (!thread_safe) {
4878 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
4879 return (_err);
4880 }
4881 }
4882 #endif /* __LP64__ */
4883
4884 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4885 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4886 /*
4887 * Remove stale Apple Double file (if any).
4888 */
4889 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4890 }
4891
4892 #ifndef __LP64__
4893 if (!thread_safe) {
4894 unlock_fsnode(dvp, &funnel_state);
4895 }
4896 #endif /* __LP64__ */
4897
4898 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4899
4900 return (_err);
4901 }
4902
4903 int
4904 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4905 struct vnode_attr *vap, vfs_context_t ctx)
4906 {
4907 int _err;
4908 struct vnop_compound_mkdir_args a;
4909
4910 a.a_desc = &vnop_compound_mkdir_desc;
4911 a.a_dvp = dvp;
4912 a.a_vpp = vpp;
4913 a.a_cnp = &ndp->ni_cnd;
4914 a.a_vap = vap;
4915 a.a_flags = 0;
4916 a.a_context = ctx;
4917 #if 0
4918 a.a_mkdir_authorizer = vn_authorize_mkdir;
4919 #endif /* 0 */
4920 a.a_reserved = NULL;
4921
4922 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4923 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4924 /*
4925 * Remove stale Apple Double file (if any).
4926 */
4927 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4928 }
4929
4930 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4931
4932 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4933 if (*vpp && _err && _err != EKEEPLOOKING) {
4934 vnode_put(*vpp);
4935 *vpp = NULLVP;
4936 }
4937
4938 return (_err);
4939 }
4940
4941 int
4942 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4943 {
4944 if (vnode_compound_rmdir_available(dvp)) {
4945 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4946 } else {
4947 if (*vpp == NULLVP) {
4948 panic("NULL vp, but not a compound VNOP?");
4949 }
4950 if (vap != NULL) {
4951 panic("Non-NULL vap, but not a compound VNOP?");
4952 }
4953 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4954 }
4955 }
4956
4957 #if 0
4958 /*
4959 *#
4960 *#% rmdir dvp L U U
4961 *#% rmdir vp L U U
4962 *#
4963 */
4964 struct vnop_rmdir_args {
4965 struct vnodeop_desc *a_desc;
4966 vnode_t a_dvp;
4967 vnode_t a_vp;
4968 struct componentname *a_cnp;
4969 vfs_context_t a_context;
4970 };
4971
4972 #endif /* 0*/
4973 errno_t
4974 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4975 {
4976 int _err;
4977 struct vnop_rmdir_args a;
4978 #ifndef __LP64__
4979 int thread_safe;
4980 int funnel_state = 0;
4981 #endif /* __LP64__ */
4982
4983 a.a_desc = &vnop_rmdir_desc;
4984 a.a_dvp = dvp;
4985 a.a_vp = vp;
4986 a.a_cnp = cnp;
4987 a.a_context = ctx;
4988
4989 #ifndef __LP64__
4990 thread_safe = THREAD_SAFE_FS(dvp);
4991 if (!thread_safe) {
4992 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4993 return (_err);
4994 }
4995 }
4996 #endif /* __LP64__ */
4997
4998 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4999
5000 if (_err == 0) {
5001 vnode_setneedinactive(vp);
5002
5003 if ( !(NATIVE_XATTR(dvp)) ) {
5004 /*
5005 * Remove any associated extended attribute file (._ AppleDouble file).
5006 */
5007 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
5008 }
5009 }
5010
5011 #ifndef __LP64__
5012 if (!thread_safe) {
5013 unlock_fsnode(vp, &funnel_state);
5014 }
5015 #endif /* __LP64__ */
5016
5017 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
5018 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
5019 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5020
5021 return (_err);
5022 }
5023
5024 int
5025 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
5026 struct vnode_attr *vap, vfs_context_t ctx)
5027 {
5028 int _err;
5029 struct vnop_compound_rmdir_args a;
5030 int no_vp;
5031
5032 a.a_desc = &vnop_mkdir_desc;
5033 a.a_dvp = dvp;
5034 a.a_vpp = vpp;
5035 a.a_cnp = &ndp->ni_cnd;
5036 a.a_vap = vap;
5037 a.a_flags = 0;
5038 a.a_context = ctx;
5039 a.a_rmdir_authorizer = vn_authorize_rmdir;
5040 a.a_reserved = NULL;
5041
5042 no_vp = (*vpp == NULLVP);
5043
5044 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
5045 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5046 /*
5047 * Remove stale Apple Double file (if any).
5048 */
5049 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
5050 }
5051
5052 if (*vpp) {
5053 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
5054 }
5055 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5056
5057 if (no_vp) {
5058 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
5059
5060 #if 0 /* Removing orphaned ._ files requires a vp.... */
5061 if (*vpp && _err && _err != EKEEPLOOKING) {
5062 vnode_put(*vpp);
5063 *vpp = NULLVP;
5064 }
5065 #endif /* 0 */
5066 }
5067
5068 return (_err);
5069 }
5070
5071 /*
5072 * Remove a ._ AppleDouble file
5073 */
5074 #define AD_STALE_SECS (180)
5075 static void
5076 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
5077 {
5078 vnode_t xvp;
5079 struct nameidata nd;
5080 char smallname[64];
5081 char *filename = NULL;
5082 size_t len;
5083
5084 if ((basename == NULL) || (basename[0] == '\0') ||
5085 (basename[0] == '.' && basename[1] == '_')) {
5086 return;
5087 }
5088 filename = &smallname[0];
5089 len = snprintf(filename, sizeof(smallname), "._%s", basename);
5090 if (len >= sizeof(smallname)) {
5091 len++; /* snprintf result doesn't include '\0' */
5092 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
5093 len = snprintf(filename, len, "._%s", basename);
5094 }
5095 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
5096 CAST_USER_ADDR_T(filename), ctx);
5097 nd.ni_dvp = dvp;
5098 if (namei(&nd) != 0)
5099 goto out2;
5100
5101 xvp = nd.ni_vp;
5102 nameidone(&nd);
5103 if (xvp->v_type != VREG)
5104 goto out1;
5105
5106 /*
5107 * When creating a new object and a "._" file already
5108 * exists, check to see if its a stale "._" file.
5109 *
5110 */
5111 if (!force) {
5112 struct vnode_attr va;
5113
5114 VATTR_INIT(&va);
5115 VATTR_WANTED(&va, va_data_size);
5116 VATTR_WANTED(&va, va_modify_time);
5117 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
5118 VATTR_IS_SUPPORTED(&va, va_data_size) &&
5119 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
5120 va.va_data_size != 0) {
5121 struct timeval tv;
5122
5123 microtime(&tv);
5124 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
5125 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
5126 force = 1; /* must be stale */
5127 }
5128 }
5129 }
5130 if (force) {
5131 int error;
5132
5133 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
5134 if (error == 0)
5135 vnode_setneedinactive(xvp);
5136
5137 post_event_if_success(xvp, error, NOTE_DELETE);
5138 post_event_if_success(dvp, error, NOTE_WRITE);
5139 }
5140
5141 out1:
5142 vnode_put(dvp);
5143 vnode_put(xvp);
5144 out2:
5145 if (filename && filename != &smallname[0]) {
5146 FREE(filename, M_TEMP);
5147 }
5148 }
5149
5150 /*
5151 * Shadow uid/gid/mod to a ._ AppleDouble file
5152 */
5153 static void
5154 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
5155 vfs_context_t ctx)
5156 {
5157 vnode_t xvp;
5158 struct nameidata nd;
5159 char smallname[64];
5160 char *filename = NULL;
5161 size_t len;
5162
5163 if ((dvp == NULLVP) ||
5164 (basename == NULL) || (basename[0] == '\0') ||
5165 (basename[0] == '.' && basename[1] == '_')) {
5166 return;
5167 }
5168 filename = &smallname[0];
5169 len = snprintf(filename, sizeof(smallname), "._%s", basename);
5170 if (len >= sizeof(smallname)) {
5171 len++; /* snprintf result doesn't include '\0' */
5172 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
5173 len = snprintf(filename, len, "._%s", basename);
5174 }
5175 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
5176 CAST_USER_ADDR_T(filename), ctx);
5177 nd.ni_dvp = dvp;
5178 if (namei(&nd) != 0)
5179 goto out2;
5180
5181 xvp = nd.ni_vp;
5182 nameidone(&nd);
5183
5184 if (xvp->v_type == VREG) {
5185 #ifndef __LP64__
5186 int thread_safe = THREAD_SAFE_FS(dvp);
5187 #endif /* __LP64__ */
5188 struct vnop_setattr_args a;
5189
5190 a.a_desc = &vnop_setattr_desc;
5191 a.a_vp = xvp;
5192 a.a_vap = vap;
5193 a.a_context = ctx;
5194
5195 #ifndef __LP64__
5196 if (!thread_safe) {
5197 if ( (lock_fsnode(xvp, NULL)) )
5198 goto out1;
5199 }
5200 #endif /* __LP64__ */
5201
5202 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
5203
5204 #ifndef __LP64__
5205 if (!thread_safe) {
5206 unlock_fsnode(xvp, NULL);
5207 }
5208 #endif /* __LP64__ */
5209 }
5210
5211
5212 #ifndef __LP64__
5213 out1:
5214 #endif /* __LP64__ */
5215 vnode_put(xvp);
5216
5217 out2:
5218 if (filename && filename != &smallname[0]) {
5219 FREE(filename, M_TEMP);
5220 }
5221 }
5222
5223 #if 0
5224 /*
5225 *#
5226 *#% symlink dvp L U U
5227 *#% symlink vpp - U -
5228 *#
5229 */
5230 struct vnop_symlink_args {
5231 struct vnodeop_desc *a_desc;
5232 vnode_t a_dvp;
5233 vnode_t *a_vpp;
5234 struct componentname *a_cnp;
5235 struct vnode_attr *a_vap;
5236 char *a_target;
5237 vfs_context_t a_context;
5238 };
5239
5240 #endif /* 0*/
5241 errno_t
5242 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5243 struct vnode_attr *vap, char *target, vfs_context_t ctx)
5244 {
5245 int _err;
5246 struct vnop_symlink_args a;
5247 #ifndef __LP64__
5248 int thread_safe;
5249 int funnel_state = 0;
5250 #endif /* __LP64__ */
5251
5252 a.a_desc = &vnop_symlink_desc;
5253 a.a_dvp = dvp;
5254 a.a_vpp = vpp;
5255 a.a_cnp = cnp;
5256 a.a_vap = vap;
5257 a.a_target = target;
5258 a.a_context = ctx;
5259
5260 #ifndef __LP64__
5261 thread_safe = THREAD_SAFE_FS(dvp);
5262 if (!thread_safe) {
5263 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
5264 return (_err);
5265 }
5266 }
5267 #endif /* __LP64__ */
5268
5269 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5270 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5271 /*
5272 * Remove stale Apple Double file (if any). Posts its own knotes
5273 */
5274 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5275 }
5276
5277 #ifndef __LP64__
5278 if (!thread_safe) {
5279 unlock_fsnode(dvp, &funnel_state);
5280 }
5281 #endif /* __LP64__ */
5282
5283 post_event_if_success(dvp, _err, NOTE_WRITE);
5284
5285 return (_err);
5286 }
5287
5288 #if 0
5289 /*
5290 *#
5291 *#% readdir vp L L L
5292 *#
5293 */
5294 struct vnop_readdir_args {
5295 struct vnodeop_desc *a_desc;
5296 vnode_t a_vp;
5297 struct uio *a_uio;
5298 int a_flags;
5299 int *a_eofflag;
5300 int *a_numdirent;
5301 vfs_context_t a_context;
5302 };
5303
5304 #endif /* 0*/
5305 errno_t
5306 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
5307 int *numdirent, vfs_context_t ctx)
5308 {
5309 int _err;
5310 struct vnop_readdir_args a;
5311 #ifndef __LP64__
5312 int thread_safe;
5313 int funnel_state = 0;
5314 #endif /* __LP64__ */
5315
5316 a.a_desc = &vnop_readdir_desc;
5317 a.a_vp = vp;
5318 a.a_uio = uio;
5319 a.a_flags = flags;
5320 a.a_eofflag = eofflag;
5321 a.a_numdirent = numdirent;
5322 a.a_context = ctx;
5323 #ifndef __LP64__
5324 thread_safe = THREAD_SAFE_FS(vp);
5325
5326 if (!thread_safe) {
5327 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5328 return (_err);
5329 }
5330 }
5331 #endif /* __LP64__ */
5332
5333 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5334
5335 #ifndef __LP64__
5336 if (!thread_safe) {
5337 unlock_fsnode(vp, &funnel_state);
5338 }
5339 #endif /* __LP64__ */
5340 return (_err);
5341 }
5342
5343 #if 0
5344 /*
5345 *#
5346 *#% readdirattr vp L L L
5347 *#
5348 */
5349 struct vnop_readdirattr_args {
5350 struct vnodeop_desc *a_desc;
5351 vnode_t a_vp;
5352 struct attrlist *a_alist;
5353 struct uio *a_uio;
5354 uint32_t a_maxcount;
5355 uint32_t a_options;
5356 uint32_t *a_newstate;
5357 int *a_eofflag;
5358 uint32_t *a_actualcount;
5359 vfs_context_t a_context;
5360 };
5361
5362 #endif /* 0*/
5363 errno_t
5364 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5365 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5366 {
5367 int _err;
5368 struct vnop_readdirattr_args a;
5369 #ifndef __LP64__
5370 int thread_safe;
5371 int funnel_state = 0;
5372 #endif /* __LP64__ */
5373
5374 a.a_desc = &vnop_readdirattr_desc;
5375 a.a_vp = vp;
5376 a.a_alist = alist;
5377 a.a_uio = uio;
5378 a.a_maxcount = maxcount;
5379 a.a_options = options;
5380 a.a_newstate = newstate;
5381 a.a_eofflag = eofflag;
5382 a.a_actualcount = actualcount;
5383 a.a_context = ctx;
5384
5385 #ifndef __LP64__
5386 thread_safe = THREAD_SAFE_FS(vp);
5387 if (!thread_safe) {
5388 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5389 return (_err);
5390 }
5391 }
5392 #endif /* __LP64__ */
5393
5394 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5395
5396 #ifndef __LP64__
5397 if (!thread_safe) {
5398 unlock_fsnode(vp, &funnel_state);
5399 }
5400 #endif /* __LP64__ */
5401
5402 return (_err);
5403 }
5404
5405 #if 0
5406 /*
5407 *#
5408 *#% readlink vp L L L
5409 *#
5410 */
5411 struct vnop_readlink_args {
5412 struct vnodeop_desc *a_desc;
5413 vnode_t a_vp;
5414 struct uio *a_uio;
5415 vfs_context_t a_context;
5416 };
5417 #endif /* 0 */
5418
5419 /*
5420 * Returns: 0 Success
5421 * lock_fsnode:ENOENT No such file or directory [only for VFS
5422 * that is not thread safe & vnode is
5423 * currently being/has been terminated]
5424 * <vfs_readlink>:EINVAL
5425 * <vfs_readlink>:???
5426 *
5427 * Note: The return codes from the underlying VFS's readlink routine
5428 * can't be fully enumerated here, since third party VFS authors
5429 * may not limit their error returns to the ones documented here,
5430 * even though this may result in some programs functioning
5431 * incorrectly.
5432 *
5433 * The return codes documented above are those which may currently
5434 * be returned by HFS from hfs_vnop_readlink, not including
5435 * additional error code which may be propagated from underlying
5436 * routines.
5437 */
5438 errno_t
5439 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5440 {
5441 int _err;
5442 struct vnop_readlink_args a;
5443 #ifndef __LP64__
5444 int thread_safe;
5445 int funnel_state = 0;
5446 #endif /* __LP64__ */
5447
5448 a.a_desc = &vnop_readlink_desc;
5449 a.a_vp = vp;
5450 a.a_uio = uio;
5451 a.a_context = ctx;
5452
5453 #ifndef __LP64__
5454 thread_safe = THREAD_SAFE_FS(vp);
5455 if (!thread_safe) {
5456 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5457 return (_err);
5458 }
5459 }
5460 #endif /* __LP64__ */
5461
5462 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5463
5464 #ifndef __LP64__
5465 if (!thread_safe) {
5466 unlock_fsnode(vp, &funnel_state);
5467 }
5468 #endif /* __LP64__ */
5469
5470 return (_err);
5471 }
5472
5473 #if 0
5474 /*
5475 *#
5476 *#% inactive vp L U U
5477 *#
5478 */
5479 struct vnop_inactive_args {
5480 struct vnodeop_desc *a_desc;
5481 vnode_t a_vp;
5482 vfs_context_t a_context;
5483 };
5484 #endif /* 0*/
5485 errno_t
5486 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5487 {
5488 int _err;
5489 struct vnop_inactive_args a;
5490 #ifndef __LP64__
5491 int thread_safe;
5492 int funnel_state = 0;
5493 #endif /* __LP64__ */
5494
5495 a.a_desc = &vnop_inactive_desc;
5496 a.a_vp = vp;
5497 a.a_context = ctx;
5498
5499 #ifndef __LP64__
5500 thread_safe = THREAD_SAFE_FS(vp);
5501 if (!thread_safe) {
5502 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5503 return (_err);
5504 }
5505 }
5506 #endif /* __LP64__ */
5507
5508 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5509
5510 #ifndef __LP64__
5511 if (!thread_safe) {
5512 unlock_fsnode(vp, &funnel_state);
5513 }
5514 #endif /* __LP64__ */
5515
5516 #if NAMEDSTREAMS
5517 /* For file systems that do not support namedstream natively, mark
5518 * the shadow stream file vnode to be recycled as soon as the last
5519 * reference goes away. To avoid re-entering reclaim code, do not
5520 * call recycle on terminating namedstream vnodes.
5521 */
5522 if (vnode_isnamedstream(vp) &&
5523 (vp->v_parent != NULLVP) &&
5524 vnode_isshadow(vp) &&
5525 ((vp->v_lflag & VL_TERMINATE) == 0)) {
5526 vnode_recycle(vp);
5527 }
5528 #endif
5529
5530 return (_err);
5531 }
5532
5533
5534 #if 0
5535 /*
5536 *#
5537 *#% reclaim vp U U U
5538 *#
5539 */
5540 struct vnop_reclaim_args {
5541 struct vnodeop_desc *a_desc;
5542 vnode_t a_vp;
5543 vfs_context_t a_context;
5544 };
5545 #endif /* 0*/
5546 errno_t
5547 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5548 {
5549 int _err;
5550 struct vnop_reclaim_args a;
5551 #ifndef __LP64__
5552 int thread_safe;
5553 int funnel_state = 0;
5554 #endif /* __LP64__ */
5555
5556 a.a_desc = &vnop_reclaim_desc;
5557 a.a_vp = vp;
5558 a.a_context = ctx;
5559
5560 #ifndef __LP64__
5561 thread_safe = THREAD_SAFE_FS(vp);
5562 if (!thread_safe) {
5563 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5564 }
5565 #endif /* __LP64__ */
5566
5567 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5568
5569 #ifndef __LP64__
5570 if (!thread_safe) {
5571 (void) thread_funnel_set(kernel_flock, funnel_state);
5572 }
5573 #endif /* __LP64__ */
5574
5575 return (_err);
5576 }
5577
5578
5579 /*
5580 * Returns: 0 Success
5581 * lock_fsnode:ENOENT No such file or directory [only for VFS
5582 * that is not thread safe & vnode is
5583 * currently being/has been terminated]
5584 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5585 */
5586 #if 0
5587 /*
5588 *#
5589 *#% pathconf vp L L L
5590 *#
5591 */
5592 struct vnop_pathconf_args {
5593 struct vnodeop_desc *a_desc;
5594 vnode_t a_vp;
5595 int a_name;
5596 int32_t *a_retval;
5597 vfs_context_t a_context;
5598 };
5599 #endif /* 0*/
5600 errno_t
5601 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5602 {
5603 int _err;
5604 struct vnop_pathconf_args a;
5605 #ifndef __LP64__
5606 int thread_safe;
5607 int funnel_state = 0;
5608 #endif /* __LP64__ */
5609
5610 a.a_desc = &vnop_pathconf_desc;
5611 a.a_vp = vp;
5612 a.a_name = name;
5613 a.a_retval = retval;
5614 a.a_context = ctx;
5615
5616 #ifndef __LP64__
5617 thread_safe = THREAD_SAFE_FS(vp);
5618 if (!thread_safe) {
5619 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5620 return (_err);
5621 }
5622 }
5623 #endif /* __LP64__ */
5624
5625 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5626
5627 #ifndef __LP64__
5628 if (!thread_safe) {
5629 unlock_fsnode(vp, &funnel_state);
5630 }
5631 #endif /* __LP64__ */
5632
5633 return (_err);
5634 }
5635
5636 /*
5637 * Returns: 0 Success
5638 * err_advlock:ENOTSUP
5639 * lf_advlock:???
5640 * <vnop_advlock_desc>:???
5641 *
5642 * Notes: VFS implementations of advisory locking using calls through
5643 * <vnop_advlock_desc> because lock enforcement does not occur
5644 * locally should try to limit themselves to the return codes
5645 * documented above for lf_advlock and err_advlock.
5646 */
5647 #if 0
5648 /*
5649 *#
5650 *#% advlock vp U U U
5651 *#
5652 */
5653 struct vnop_advlock_args {
5654 struct vnodeop_desc *a_desc;
5655 vnode_t a_vp;
5656 caddr_t a_id;
5657 int a_op;
5658 struct flock *a_fl;
5659 int a_flags;
5660 vfs_context_t a_context;
5661 };
5662 #endif /* 0*/
5663 errno_t
5664 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx)
5665 {
5666 int _err;
5667 struct vnop_advlock_args a;
5668 #ifndef __LP64__
5669 int thread_safe;
5670 int funnel_state = 0;
5671 #endif /* __LP64__ */
5672
5673 a.a_desc = &vnop_advlock_desc;
5674 a.a_vp = vp;
5675 a.a_id = id;
5676 a.a_op = op;
5677 a.a_fl = fl;
5678 a.a_flags = flags;
5679 a.a_context = ctx;
5680
5681 #ifndef __LP64__
5682 thread_safe = THREAD_SAFE_FS(vp);
5683 if (!thread_safe) {
5684 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5685 }
5686 #endif /* __LP64__ */
5687
5688 /* Disallow advisory locking on non-seekable vnodes */
5689 if (vnode_isfifo(vp)) {
5690 _err = err_advlock(&a);
5691 } else {
5692 if ((vp->v_flag & VLOCKLOCAL)) {
5693 /* Advisory locking done at this layer */
5694 _err = lf_advlock(&a);
5695 } else {
5696 /* Advisory locking done by underlying filesystem */
5697 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5698 }
5699 }
5700
5701 #ifndef __LP64__
5702 if (!thread_safe) {
5703 (void) thread_funnel_set(kernel_flock, funnel_state);
5704 }
5705 #endif /* __LP64__ */
5706
5707 return (_err);
5708 }
5709
5710
5711
5712 #if 0
5713 /*
5714 *#
5715 *#% allocate vp L L L
5716 *#
5717 */
5718 struct vnop_allocate_args {
5719 struct vnodeop_desc *a_desc;
5720 vnode_t a_vp;
5721 off_t a_length;
5722 u_int32_t a_flags;
5723 off_t *a_bytesallocated;
5724 off_t a_offset;
5725 vfs_context_t a_context;
5726 };
5727
5728 #endif /* 0*/
5729 errno_t
5730 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5731 {
5732 int _err;
5733 struct vnop_allocate_args a;
5734 #ifndef __LP64__
5735 int thread_safe;
5736 int funnel_state = 0;
5737 #endif /* __LP64__ */
5738
5739 a.a_desc = &vnop_allocate_desc;
5740 a.a_vp = vp;
5741 a.a_length = length;
5742 a.a_flags = flags;
5743 a.a_bytesallocated = bytesallocated;
5744 a.a_offset = offset;
5745 a.a_context = ctx;
5746
5747 #ifndef __LP64__
5748 thread_safe = THREAD_SAFE_FS(vp);
5749 if (!thread_safe) {
5750 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5751 return (_err);
5752 }
5753 }
5754 #endif /* __LP64__ */
5755
5756 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5757 #if CONFIG_FSE
5758 if (_err == 0) {
5759 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5760 }
5761 #endif
5762
5763 #ifndef __LP64__
5764 if (!thread_safe) {
5765 unlock_fsnode(vp, &funnel_state);
5766 }
5767 #endif /* __LP64__ */
5768
5769 return (_err);
5770 }
5771
5772 #if 0
5773 /*
5774 *#
5775 *#% pagein vp = = =
5776 *#
5777 */
5778 struct vnop_pagein_args {
5779 struct vnodeop_desc *a_desc;
5780 vnode_t a_vp;
5781 upl_t a_pl;
5782 upl_offset_t a_pl_offset;
5783 off_t a_f_offset;
5784 size_t a_size;
5785 int a_flags;
5786 vfs_context_t a_context;
5787 };
5788 #endif /* 0*/
5789 errno_t
5790 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5791 {
5792 int _err;
5793 struct vnop_pagein_args a;
5794 #ifndef __LP64__
5795 int thread_safe;
5796 int funnel_state = 0;
5797 #endif /* __LP64__ */
5798
5799 a.a_desc = &vnop_pagein_desc;
5800 a.a_vp = vp;
5801 a.a_pl = pl;
5802 a.a_pl_offset = pl_offset;
5803 a.a_f_offset = f_offset;
5804 a.a_size = size;
5805 a.a_flags = flags;
5806 a.a_context = ctx;
5807
5808 #ifndef __LP64__
5809 thread_safe = THREAD_SAFE_FS(vp);
5810 if (!thread_safe) {
5811 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5812 }
5813 #endif /* __LP64__ */
5814
5815 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5816
5817 #ifndef __LP64__
5818 if (!thread_safe) {
5819 (void) thread_funnel_set(kernel_flock, funnel_state);
5820 }
5821 #endif /* __LP64__ */
5822
5823 return (_err);
5824 }
5825
5826 #if 0
5827 /*
5828 *#
5829 *#% pageout vp = = =
5830 *#
5831 */
5832 struct vnop_pageout_args {
5833 struct vnodeop_desc *a_desc;
5834 vnode_t a_vp;
5835 upl_t a_pl;
5836 upl_offset_t a_pl_offset;
5837 off_t a_f_offset;
5838 size_t a_size;
5839 int a_flags;
5840 vfs_context_t a_context;
5841 };
5842
5843 #endif /* 0*/
5844 errno_t
5845 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5846 {
5847 int _err;
5848 struct vnop_pageout_args a;
5849 #ifndef __LP64__
5850 int thread_safe;
5851 int funnel_state = 0;
5852 #endif /* __LP64__ */
5853
5854 a.a_desc = &vnop_pageout_desc;
5855 a.a_vp = vp;
5856 a.a_pl = pl;
5857 a.a_pl_offset = pl_offset;
5858 a.a_f_offset = f_offset;
5859 a.a_size = size;
5860 a.a_flags = flags;
5861 a.a_context = ctx;
5862
5863 #ifndef __LP64__
5864 thread_safe = THREAD_SAFE_FS(vp);
5865 if (!thread_safe) {
5866 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5867 }
5868 #endif /* __LP64__ */
5869
5870 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5871
5872 #ifndef __LP64__
5873 if (!thread_safe) {
5874 (void) thread_funnel_set(kernel_flock, funnel_state);
5875 }
5876 #endif /* __LP64__ */
5877
5878 post_event_if_success(vp, _err, NOTE_WRITE);
5879
5880 return (_err);
5881 }
5882
5883 int
5884 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5885 {
5886 if (vnode_compound_remove_available(dvp)) {
5887 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5888 } else {
5889 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5890 }
5891 }
5892
5893
5894 #if 0
5895 /*
5896 *#
5897 *#% searchfs vp L L L
5898 *#
5899 */
5900 struct vnop_searchfs_args {
5901 struct vnodeop_desc *a_desc;
5902 vnode_t a_vp;
5903 void *a_searchparams1;
5904 void *a_searchparams2;
5905 struct attrlist *a_searchattrs;
5906 uint32_t a_maxmatches;
5907 struct timeval *a_timelimit;
5908 struct attrlist *a_returnattrs;
5909 uint32_t *a_nummatches;
5910 uint32_t a_scriptcode;
5911 uint32_t a_options;
5912 struct uio *a_uio;
5913 struct searchstate *a_searchstate;
5914 vfs_context_t a_context;
5915 };
5916
5917 #endif /* 0*/
5918 errno_t
5919 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5920 {
5921 int _err;
5922 struct vnop_searchfs_args a;
5923 #ifndef __LP64__
5924 int thread_safe;
5925 int funnel_state = 0;
5926 #endif /* __LP64__ */
5927
5928 a.a_desc = &vnop_searchfs_desc;
5929 a.a_vp = vp;
5930 a.a_searchparams1 = searchparams1;
5931 a.a_searchparams2 = searchparams2;
5932 a.a_searchattrs = searchattrs;
5933 a.a_maxmatches = maxmatches;
5934 a.a_timelimit = timelimit;
5935 a.a_returnattrs = returnattrs;
5936 a.a_nummatches = nummatches;
5937 a.a_scriptcode = scriptcode;
5938 a.a_options = options;
5939 a.a_uio = uio;
5940 a.a_searchstate = searchstate;
5941 a.a_context = ctx;
5942
5943 #ifndef __LP64__
5944 thread_safe = THREAD_SAFE_FS(vp);
5945 if (!thread_safe) {
5946 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5947 return (_err);
5948 }
5949 }
5950 #endif /* __LP64__ */
5951
5952 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5953
5954 #ifndef __LP64__
5955 if (!thread_safe) {
5956 unlock_fsnode(vp, &funnel_state);
5957 }
5958 #endif /* __LP64__ */
5959
5960 return (_err);
5961 }
5962
5963 #if 0
5964 /*
5965 *#
5966 *#% copyfile fvp U U U
5967 *#% copyfile tdvp L U U
5968 *#% copyfile tvp X U U
5969 *#
5970 */
5971 struct vnop_copyfile_args {
5972 struct vnodeop_desc *a_desc;
5973 vnode_t a_fvp;
5974 vnode_t a_tdvp;
5975 vnode_t a_tvp;
5976 struct componentname *a_tcnp;
5977 int a_mode;
5978 int a_flags;
5979 vfs_context_t a_context;
5980 };
5981 #endif /* 0*/
5982 errno_t
5983 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5984 int mode, int flags, vfs_context_t ctx)
5985 {
5986 int _err;
5987 struct vnop_copyfile_args a;
5988 a.a_desc = &vnop_copyfile_desc;
5989 a.a_fvp = fvp;
5990 a.a_tdvp = tdvp;
5991 a.a_tvp = tvp;
5992 a.a_tcnp = tcnp;
5993 a.a_mode = mode;
5994 a.a_flags = flags;
5995 a.a_context = ctx;
5996 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5997 return (_err);
5998 }
5999
6000 errno_t
6001 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
6002 {
6003 struct vnop_getxattr_args a;
6004 int error;
6005 #ifndef __LP64__
6006 int thread_safe;
6007 int funnel_state = 0;
6008 #endif /* __LP64__ */
6009
6010 a.a_desc = &vnop_getxattr_desc;
6011 a.a_vp = vp;
6012 a.a_name = name;
6013 a.a_uio = uio;
6014 a.a_size = size;
6015 a.a_options = options;
6016 a.a_context = ctx;
6017
6018 #ifndef __LP64__
6019 thread_safe = THREAD_SAFE_FS(vp);
6020 if (!thread_safe) {
6021 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
6022 return (error);
6023 }
6024 }
6025 #endif /* __LP64__ */
6026
6027 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
6028
6029 #ifndef __LP64__
6030 if (!thread_safe) {
6031 unlock_fsnode(vp, &funnel_state);
6032 }
6033 #endif /* __LP64__ */
6034
6035 return (error);
6036 }
6037
6038 errno_t
6039 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
6040 {
6041 struct vnop_setxattr_args a;
6042 int error;
6043 #ifndef __LP64__
6044 int thread_safe;
6045 int funnel_state = 0;
6046 #endif /* __LP64__ */
6047
6048 a.a_desc = &vnop_setxattr_desc;
6049 a.a_vp = vp;
6050 a.a_name = name;
6051 a.a_uio = uio;
6052 a.a_options = options;
6053 a.a_context = ctx;
6054
6055 #ifndef __LP64__
6056 thread_safe = THREAD_SAFE_FS(vp);
6057 if (!thread_safe) {
6058 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
6059 return (error);
6060 }
6061 }
6062 #endif /* __LP64__ */
6063
6064 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
6065
6066 #ifndef __LP64__
6067 if (!thread_safe) {
6068 unlock_fsnode(vp, &funnel_state);
6069 }
6070 #endif /* __LP64__ */
6071
6072 if (error == 0)
6073 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
6074
6075 post_event_if_success(vp, error, NOTE_ATTRIB);
6076
6077 return (error);
6078 }
6079
6080 errno_t
6081 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
6082 {
6083 struct vnop_removexattr_args a;
6084 int error;
6085 #ifndef __LP64__
6086 int thread_safe;
6087 int funnel_state = 0;
6088 #endif /* __LP64__ */
6089
6090 a.a_desc = &vnop_removexattr_desc;
6091 a.a_vp = vp;
6092 a.a_name = name;
6093 a.a_options = options;
6094 a.a_context = ctx;
6095
6096 #ifndef __LP64__
6097 thread_safe = THREAD_SAFE_FS(vp);
6098 if (!thread_safe) {
6099 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
6100 return (error);
6101 }
6102 }
6103 #endif /* __LP64__ */
6104
6105 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
6106
6107 #ifndef __LP64__
6108 if (!thread_safe) {
6109 unlock_fsnode(vp, &funnel_state);
6110 }
6111 #endif /* __LP64__ */
6112
6113 post_event_if_success(vp, error, NOTE_ATTRIB);
6114
6115 return (error);
6116 }
6117
6118 errno_t
6119 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
6120 {
6121 struct vnop_listxattr_args a;
6122 int error;
6123 #ifndef __LP64__
6124 int thread_safe;
6125 int funnel_state = 0;
6126 #endif /* __LP64__ */
6127
6128 a.a_desc = &vnop_listxattr_desc;
6129 a.a_vp = vp;
6130 a.a_uio = uio;
6131 a.a_size = size;
6132 a.a_options = options;
6133 a.a_context = ctx;
6134
6135 #ifndef __LP64__
6136 thread_safe = THREAD_SAFE_FS(vp);
6137 if (!thread_safe) {
6138 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
6139 return (error);
6140 }
6141 }
6142 #endif /* __LP64__ */
6143
6144 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
6145
6146 #ifndef __LP64__
6147 if (!thread_safe) {
6148 unlock_fsnode(vp, &funnel_state);
6149 }
6150 #endif /* __LP64__ */
6151
6152 return (error);
6153 }
6154
6155
6156 #if 0
6157 /*
6158 *#
6159 *#% blktooff vp = = =
6160 *#
6161 */
6162 struct vnop_blktooff_args {
6163 struct vnodeop_desc *a_desc;
6164 vnode_t a_vp;
6165 daddr64_t a_lblkno;
6166 off_t *a_offset;
6167 };
6168 #endif /* 0*/
6169 errno_t
6170 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
6171 {
6172 int _err;
6173 struct vnop_blktooff_args a;
6174 #ifndef __LP64__
6175 int thread_safe;
6176 int funnel_state = 0;
6177 #endif /* __LP64__ */
6178
6179 a.a_desc = &vnop_blktooff_desc;
6180 a.a_vp = vp;
6181 a.a_lblkno = lblkno;
6182 a.a_offset = offset;
6183
6184 #ifndef __LP64__
6185 thread_safe = THREAD_SAFE_FS(vp);
6186 if (!thread_safe) {
6187 funnel_state = thread_funnel_set(kernel_flock, TRUE);
6188 }
6189 #endif /* __LP64__ */
6190
6191 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
6192
6193 #ifndef __LP64__
6194 if (!thread_safe) {
6195 (void) thread_funnel_set(kernel_flock, funnel_state);
6196 }
6197 #endif /* __LP64__ */
6198
6199 return (_err);
6200 }
6201
6202 #if 0
6203 /*
6204 *#
6205 *#% offtoblk vp = = =
6206 *#
6207 */
6208 struct vnop_offtoblk_args {
6209 struct vnodeop_desc *a_desc;
6210 vnode_t a_vp;
6211 off_t a_offset;
6212 daddr64_t *a_lblkno;
6213 };
6214 #endif /* 0*/
6215 errno_t
6216 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
6217 {
6218 int _err;
6219 struct vnop_offtoblk_args a;
6220 #ifndef __LP64__
6221 int thread_safe;
6222 int funnel_state = 0;
6223 #endif /* __LP64__ */
6224
6225 a.a_desc = &vnop_offtoblk_desc;
6226 a.a_vp = vp;
6227 a.a_offset = offset;
6228 a.a_lblkno = lblkno;
6229
6230 #ifndef __LP64__
6231 thread_safe = THREAD_SAFE_FS(vp);
6232 if (!thread_safe) {
6233 funnel_state = thread_funnel_set(kernel_flock, TRUE);
6234 }
6235 #endif /* __LP64__ */
6236
6237 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
6238
6239 #ifndef __LP64__
6240 if (!thread_safe) {
6241 (void) thread_funnel_set(kernel_flock, funnel_state);
6242 }
6243 #endif /* __LP64__ */
6244
6245 return (_err);
6246 }
6247
6248 #if 0
6249 /*
6250 *#
6251 *#% blockmap vp L L L
6252 *#
6253 */
6254 struct vnop_blockmap_args {
6255 struct vnodeop_desc *a_desc;
6256 vnode_t a_vp;
6257 off_t a_foffset;
6258 size_t a_size;
6259 daddr64_t *a_bpn;
6260 size_t *a_run;
6261 void *a_poff;
6262 int a_flags;
6263 vfs_context_t a_context;
6264 };
6265 #endif /* 0*/
6266 errno_t
6267 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
6268 {
6269 int _err;
6270 struct vnop_blockmap_args a;
6271 #ifndef __LP64__
6272 int thread_safe;
6273 int funnel_state = 0;
6274 #endif /* __LP64__ */
6275
6276 if (ctx == NULL) {
6277 ctx = vfs_context_current();
6278 }
6279 a.a_desc = &vnop_blockmap_desc;
6280 a.a_vp = vp;
6281 a.a_foffset = foffset;
6282 a.a_size = size;
6283 a.a_bpn = bpn;
6284 a.a_run = run;
6285 a.a_poff = poff;
6286 a.a_flags = flags;
6287 a.a_context = ctx;
6288
6289 #ifndef __LP64__
6290 thread_safe = THREAD_SAFE_FS(vp);
6291 if (!thread_safe) {
6292 funnel_state = thread_funnel_set(kernel_flock, TRUE);
6293 }
6294 #endif /* __LP64__ */
6295
6296 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
6297
6298 #ifndef __LP64__
6299 if (!thread_safe) {
6300 (void) thread_funnel_set(kernel_flock, funnel_state);
6301 }
6302 #endif /* __LP64__ */
6303
6304 return (_err);
6305 }
6306
6307 #if 0
6308 struct vnop_strategy_args {
6309 struct vnodeop_desc *a_desc;
6310 struct buf *a_bp;
6311 };
6312
6313 #endif /* 0*/
6314 errno_t
6315 VNOP_STRATEGY(struct buf *bp)
6316 {
6317 int _err;
6318 struct vnop_strategy_args a;
6319 a.a_desc = &vnop_strategy_desc;
6320 a.a_bp = bp;
6321 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
6322 return (_err);
6323 }
6324
6325 #if 0
6326 struct vnop_bwrite_args {
6327 struct vnodeop_desc *a_desc;
6328 buf_t a_bp;
6329 };
6330 #endif /* 0*/
6331 errno_t
6332 VNOP_BWRITE(struct buf *bp)
6333 {
6334 int _err;
6335 struct vnop_bwrite_args a;
6336 a.a_desc = &vnop_bwrite_desc;
6337 a.a_bp = bp;
6338 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
6339 return (_err);
6340 }
6341
6342 #if 0
6343 struct vnop_kqfilt_add_args {
6344 struct vnodeop_desc *a_desc;
6345 struct vnode *a_vp;
6346 struct knote *a_kn;
6347 vfs_context_t a_context;
6348 };
6349 #endif
6350 errno_t
6351 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
6352 {
6353 int _err;
6354 struct vnop_kqfilt_add_args a;
6355 #ifndef __LP64__
6356 int thread_safe;
6357 int funnel_state = 0;
6358 #endif /* __LP64__ */
6359
6360 a.a_desc = VDESC(vnop_kqfilt_add);
6361 a.a_vp = vp;
6362 a.a_kn = kn;
6363 a.a_context = ctx;
6364
6365 #ifndef __LP64__
6366 thread_safe = THREAD_SAFE_FS(vp);
6367 if (!thread_safe) {
6368 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6369 return (_err);
6370 }
6371 }
6372 #endif /* __LP64__ */
6373
6374 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
6375
6376 #ifndef __LP64__
6377 if (!thread_safe) {
6378 unlock_fsnode(vp, &funnel_state);
6379 }
6380 #endif /* __LP64__ */
6381
6382 return(_err);
6383 }
6384
6385 #if 0
6386 struct vnop_kqfilt_remove_args {
6387 struct vnodeop_desc *a_desc;
6388 struct vnode *a_vp;
6389 uintptr_t a_ident;
6390 vfs_context_t a_context;
6391 };
6392 #endif
6393 errno_t
6394 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
6395 {
6396 int _err;
6397 struct vnop_kqfilt_remove_args a;
6398 #ifndef __LP64__
6399 int thread_safe;
6400 int funnel_state = 0;
6401 #endif /* __LP64__ */
6402
6403 a.a_desc = VDESC(vnop_kqfilt_remove);
6404 a.a_vp = vp;
6405 a.a_ident = ident;
6406 a.a_context = ctx;
6407
6408 #ifndef __LP64__
6409 thread_safe = THREAD_SAFE_FS(vp);
6410 if (!thread_safe) {
6411 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6412 return (_err);
6413 }
6414 }
6415 #endif /* __LP64__ */
6416
6417 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
6418
6419 #ifndef __LP64__
6420 if (!thread_safe) {
6421 unlock_fsnode(vp, &funnel_state);
6422 }
6423 #endif /* __LP64__ */
6424
6425 return(_err);
6426 }
6427
6428 errno_t
6429 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6430 {
6431 int _err;
6432 struct vnop_monitor_args a;
6433 #ifndef __LP64__
6434 int thread_safe;
6435 int funnel_state = 0;
6436 #endif /* __LP64__ */
6437
6438 a.a_desc = VDESC(vnop_monitor);
6439 a.a_vp = vp;
6440 a.a_events = events;
6441 a.a_flags = flags;
6442 a.a_handle = handle;
6443 a.a_context = ctx;
6444
6445 #ifndef __LP64__
6446 thread_safe = THREAD_SAFE_FS(vp);
6447 if (!thread_safe) {
6448 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6449 return (_err);
6450 }
6451 }
6452 #endif /* __LP64__ */
6453
6454 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6455
6456 #ifndef __LP64__
6457 if (!thread_safe) {
6458 unlock_fsnode(vp, &funnel_state);
6459 }
6460 #endif /* __LP64__ */
6461
6462 return(_err);
6463 }
6464
6465 #if 0
6466 struct vnop_setlabel_args {
6467 struct vnodeop_desc *a_desc;
6468 struct vnode *a_vp;
6469 struct label *a_vl;
6470 vfs_context_t a_context;
6471 };
6472 #endif
6473 errno_t
6474 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6475 {
6476 int _err;
6477 struct vnop_setlabel_args a;
6478 #ifndef __LP64__
6479 int thread_safe;
6480 int funnel_state = 0;
6481 #endif /* __LP64__ */
6482
6483 a.a_desc = VDESC(vnop_setlabel);
6484 a.a_vp = vp;
6485 a.a_vl = label;
6486 a.a_context = ctx;
6487
6488 #ifndef __LP64__
6489 thread_safe = THREAD_SAFE_FS(vp);
6490 if (!thread_safe) {
6491 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
6492 return (_err);
6493 }
6494 }
6495 #endif /* __LP64__ */
6496
6497 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
6498
6499 #ifndef __LP64__
6500 if (!thread_safe) {
6501 unlock_fsnode(vp, &funnel_state);
6502 }
6503 #endif /* __LP64__ */
6504
6505 return(_err);
6506 }
6507
6508
6509 #if NAMEDSTREAMS
6510 /*
6511 * Get a named streamed
6512 */
6513 errno_t
6514 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6515 {
6516 struct vnop_getnamedstream_args a;
6517
6518 #ifndef __LP64__
6519 if (!THREAD_SAFE_FS(vp))
6520 return (ENOTSUP);
6521 #endif /* __LP64__ */
6522
6523 a.a_desc = &vnop_getnamedstream_desc;
6524 a.a_vp = vp;
6525 a.a_svpp = svpp;
6526 a.a_name = name;
6527 a.a_operation = operation;
6528 a.a_flags = flags;
6529 a.a_context = ctx;
6530
6531 return (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6532 }
6533
6534 /*
6535 * Create a named streamed
6536 */
6537 errno_t
6538 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6539 {
6540 struct vnop_makenamedstream_args a;
6541
6542 #ifndef __LP64__
6543 if (!THREAD_SAFE_FS(vp))
6544 return (ENOTSUP);
6545 #endif /* __LP64__ */
6546
6547 a.a_desc = &vnop_makenamedstream_desc;
6548 a.a_vp = vp;
6549 a.a_svpp = svpp;
6550 a.a_name = name;
6551 a.a_flags = flags;
6552 a.a_context = ctx;
6553
6554 return (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6555 }
6556
6557
6558 /*
6559 * Remove a named streamed
6560 */
6561 errno_t
6562 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6563 {
6564 struct vnop_removenamedstream_args a;
6565
6566 #ifndef __LP64__
6567 if (!THREAD_SAFE_FS(vp))
6568 return (ENOTSUP);
6569 #endif /* __LP64__ */
6570
6571 a.a_desc = &vnop_removenamedstream_desc;
6572 a.a_vp = vp;
6573 a.a_svp = svp;
6574 a.a_name = name;
6575 a.a_flags = flags;
6576 a.a_context = ctx;
6577
6578 return (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6579 }
6580 #endif