]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
95 #include <sys/mbuf.h>
96 #include <sys/syslog.h>
97 #include <sys/ubc.h>
98 #include <sys/vm.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/fsevents.h>
102 #include <sys/user.h>
103 #include <sys/lockf.h>
104 #include <sys/xattr.h>
105
106 #include <kern/assert.h>
107 #include <kern/kalloc.h>
108 #include <kern/task.h>
109
110 #include <libkern/OSByteOrder.h>
111
112 #include <miscfs/specfs/specdev.h>
113
114 #include <mach/mach_types.h>
115 #include <mach/memory_object_types.h>
116 #include <mach/task.h>
117
118 #if CONFIG_MACF
119 #include <security/mac_framework.h>
120 #endif
121
122 #define ESUCCESS 0
123 #undef mount_t
124 #undef vnode_t
125
126 #define COMPAT_ONLY
127
128
129 #define THREAD_SAFE_FS(VP) \
130 ((VP)->v_unsafefs ? 0 : 1)
131
132 #define NATIVE_XATTR(VP) \
133 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
134
135 static void xattrfile_remove(vnode_t dvp, const char *basename,
136 vfs_context_t ctx, int thread_safe, int force);
137 static void xattrfile_setattr(vnode_t dvp, const char * basename,
138 struct vnode_attr * vap, vfs_context_t ctx,
139 int thread_safe);
140
141
142 static void
143 vnode_setneedinactive(vnode_t vp)
144 {
145 cache_purge(vp);
146
147 vnode_lock_spin(vp);
148 vp->v_lflag |= VL_NEEDINACTIVE;
149 vnode_unlock(vp);
150 }
151
152
153 int
154 lock_fsnode(vnode_t vp, int *funnel_state)
155 {
156 if (funnel_state)
157 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
158
159 if (vp->v_unsafefs) {
160 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
161 vp->v_unsafefs->fsnode_count++;
162 } else {
163 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
164
165 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
166 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
167
168 if (funnel_state)
169 (void) thread_funnel_set(kernel_flock, *funnel_state);
170 return (ENOENT);
171 }
172 vp->v_unsafefs->fsnodeowner = current_thread();
173 vp->v_unsafefs->fsnode_count = 1;
174 }
175 }
176 return (0);
177 }
178
179
180 void
181 unlock_fsnode(vnode_t vp, int *funnel_state)
182 {
183 if (vp->v_unsafefs) {
184 if (--vp->v_unsafefs->fsnode_count == 0) {
185 vp->v_unsafefs->fsnodeowner = NULL;
186 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
187 }
188 }
189 if (funnel_state)
190 (void) thread_funnel_set(kernel_flock, *funnel_state);
191 }
192
193
194
195 /* ====================================================================== */
196 /* ************ EXTERNAL KERNEL APIS ********************************** */
197 /* ====================================================================== */
198
199 /*
200 * prototypes for exported VFS operations
201 */
202 int
203 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
204 {
205 int error;
206 int thread_safe;
207 int funnel_state = 0;
208
209 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
210 return(ENOTSUP);
211
212 thread_safe = mp->mnt_vtable->vfc_threadsafe;
213
214
215 if (!thread_safe) {
216 funnel_state = thread_funnel_set(kernel_flock, TRUE);
217 }
218
219 if (vfs_context_is64bit(ctx)) {
220 if (vfs_64bitready(mp)) {
221 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
222 }
223 else {
224 error = ENOTSUP;
225 }
226 }
227 else {
228 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
229 }
230
231 if (!thread_safe) {
232 (void) thread_funnel_set(kernel_flock, funnel_state);
233 }
234 return (error);
235 }
236
237 int
238 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
239 {
240 int error;
241 int thread_safe;
242 int funnel_state = 0;
243
244 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
245 return(ENOTSUP);
246
247 thread_safe = mp->mnt_vtable->vfc_threadsafe;
248
249 if (!thread_safe) {
250 funnel_state = thread_funnel_set(kernel_flock, TRUE);
251 }
252 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
253 if (!thread_safe) {
254 (void) thread_funnel_set(kernel_flock, funnel_state);
255 }
256 return (error);
257 }
258
259 int
260 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
261 {
262 int error;
263 int thread_safe;
264 int funnel_state = 0;
265
266 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
267 return(ENOTSUP);
268
269 thread_safe = mp->mnt_vtable->vfc_threadsafe;
270
271 if (!thread_safe) {
272 funnel_state = thread_funnel_set(kernel_flock, TRUE);
273 }
274 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
275 if (!thread_safe) {
276 (void) thread_funnel_set(kernel_flock, funnel_state);
277 }
278 return (error);
279 }
280
281 /*
282 * Returns: 0 Success
283 * ENOTSUP Not supported
284 * <vfs_root>:ENOENT
285 * <vfs_root>:???
286 *
287 * Note: The return codes from the underlying VFS's root routine can't
288 * be fully enumerated here, since third party VFS authors may not
289 * limit their error returns to the ones documented here, even
290 * though this may result in some programs functioning incorrectly.
291 *
292 * The return codes documented above are those which may currently
293 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
294 * for a call to hfs_vget on the volume mount poit, not including
295 * additional error codes which may be propagated from underlying
296 * routines called by hfs_vget.
297 */
298 int
299 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
300 {
301 int error;
302 int thread_safe;
303 int funnel_state = 0;
304
305 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
306 return(ENOTSUP);
307
308 if (ctx == NULL) {
309 ctx = vfs_context_current();
310 }
311 thread_safe = mp->mnt_vtable->vfc_threadsafe;
312
313 if (!thread_safe) {
314 funnel_state = thread_funnel_set(kernel_flock, TRUE);
315 }
316 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
317 if (!thread_safe) {
318 (void) thread_funnel_set(kernel_flock, funnel_state);
319 }
320 return (error);
321 }
322
323 int
324 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
325 {
326 int error;
327 int thread_safe;
328 int funnel_state = 0;
329
330 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
331 return(ENOTSUP);
332
333 thread_safe = mp->mnt_vtable->vfc_threadsafe;
334
335 if (!thread_safe) {
336 funnel_state = thread_funnel_set(kernel_flock, TRUE);
337 }
338 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
339 if (!thread_safe) {
340 (void) thread_funnel_set(kernel_flock, funnel_state);
341 }
342 return (error);
343 }
344
345 int
346 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
347 {
348 int error;
349 int thread_safe;
350 int funnel_state = 0;
351
352 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
353 return(ENOTSUP);
354
355 if (ctx == NULL) {
356 ctx = vfs_context_current();
357 }
358
359 thread_safe = mp->mnt_vtable->vfc_threadsafe;
360
361 if (!thread_safe) {
362 funnel_state = thread_funnel_set(kernel_flock, TRUE);
363 }
364 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
365 if (!thread_safe) {
366 (void) thread_funnel_set(kernel_flock, funnel_state);
367 }
368 return(error);
369 }
370
371 int
372 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
373 {
374 int error;
375 int thread_safe;
376 int funnel_state = 0;
377
378 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
379 return(ENOTSUP);
380
381 if (ctx == NULL) {
382 ctx = vfs_context_current();
383 }
384
385 thread_safe = mp->mnt_vtable->vfc_threadsafe;
386
387 if (!thread_safe) {
388 funnel_state = thread_funnel_set(kernel_flock, TRUE);
389 }
390 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
391 if (!thread_safe) {
392 (void) thread_funnel_set(kernel_flock, funnel_state);
393 }
394 return(error);
395 }
396
397 int
398 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
399 {
400 int error;
401 int thread_safe;
402 int funnel_state = 0;
403
404 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
405 return(ENOTSUP);
406
407 if (ctx == NULL) {
408 ctx = vfs_context_current();
409 }
410 thread_safe = mp->mnt_vtable->vfc_threadsafe;
411
412 if (!thread_safe) {
413 funnel_state = thread_funnel_set(kernel_flock, TRUE);
414 }
415 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
416 if (!thread_safe) {
417 (void) thread_funnel_set(kernel_flock, funnel_state);
418 }
419 return(error);
420 }
421
422 int
423 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
424 {
425 int error;
426 int thread_safe;
427 int funnel_state = 0;
428
429 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
430 return(ENOTSUP);
431
432 if (ctx == NULL) {
433 ctx = vfs_context_current();
434 }
435 thread_safe = mp->mnt_vtable->vfc_threadsafe;
436
437 if (!thread_safe) {
438 funnel_state = thread_funnel_set(kernel_flock, TRUE);
439 }
440 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
441 if (!thread_safe) {
442 (void) thread_funnel_set(kernel_flock, funnel_state);
443 }
444 return(error);
445 }
446
447 int
448 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx)
449 {
450 int error;
451 int thread_safe;
452 int funnel_state = 0;
453
454 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
455 return(ENOTSUP);
456
457 if (ctx == NULL) {
458 ctx = vfs_context_current();
459 }
460 thread_safe = mp->mnt_vtable->vfc_threadsafe;
461
462 if (!thread_safe) {
463 funnel_state = thread_funnel_set(kernel_flock, TRUE);
464 }
465 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
466 if (!thread_safe) {
467 (void) thread_funnel_set(kernel_flock, funnel_state);
468 }
469 return(error);
470 }
471
472 int
473 VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx)
474 {
475 int error;
476 int thread_safe;
477 int funnel_state = 0;
478
479 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
480 return(ENOTSUP);
481
482 if (ctx == NULL) {
483 ctx = vfs_context_current();
484 }
485 thread_safe = THREAD_SAFE_FS(vp);
486
487 if (!thread_safe) {
488 funnel_state = thread_funnel_set(kernel_flock, TRUE);
489 }
490 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
491 if (!thread_safe) {
492 (void) thread_funnel_set(kernel_flock, funnel_state);
493 }
494 return(error);
495 }
496
497
498 /* returns a copy of vfs type name for the mount_t */
499 void
500 vfs_name(mount_t mp, char * buffer)
501 {
502 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
503 }
504
505 /* returns vfs type number for the mount_t */
506 int
507 vfs_typenum(mount_t mp)
508 {
509 return(mp->mnt_vtable->vfc_typenum);
510 }
511
512
513 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
514 uint64_t
515 vfs_flags(mount_t mp)
516 {
517 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
518 }
519
520 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
521 void
522 vfs_setflags(mount_t mp, uint64_t flags)
523 {
524 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
525
526 mount_lock(mp);
527 mp->mnt_flag |= lflags;
528 mount_unlock(mp);
529 }
530
531 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
532 void
533 vfs_clearflags(mount_t mp , uint64_t flags)
534 {
535 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
536
537 mount_lock(mp);
538 mp->mnt_flag &= ~lflags;
539 mount_unlock(mp);
540 }
541
542 /* Is the mount_t ronly and upgrade read/write requested? */
543 int
544 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
545 {
546 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
547 }
548
549
550 /* Is the mount_t mounted ronly */
551 int
552 vfs_isrdonly(mount_t mp)
553 {
554 return (mp->mnt_flag & MNT_RDONLY);
555 }
556
557 /* Is the mount_t mounted for filesystem synchronous writes? */
558 int
559 vfs_issynchronous(mount_t mp)
560 {
561 return (mp->mnt_flag & MNT_SYNCHRONOUS);
562 }
563
564 /* Is the mount_t mounted read/write? */
565 int
566 vfs_isrdwr(mount_t mp)
567 {
568 return ((mp->mnt_flag & MNT_RDONLY) == 0);
569 }
570
571
572 /* Is mount_t marked for update (ie MNT_UPDATE) */
573 int
574 vfs_isupdate(mount_t mp)
575 {
576 return (mp->mnt_flag & MNT_UPDATE);
577 }
578
579
580 /* Is mount_t marked for reload (ie MNT_RELOAD) */
581 int
582 vfs_isreload(mount_t mp)
583 {
584 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
585 }
586
587 /* Is mount_t marked for reload (ie MNT_FORCE) */
588 int
589 vfs_isforce(mount_t mp)
590 {
591 if ((mp->mnt_lflag & MNT_LFORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
592 return(1);
593 else
594 return(0);
595 }
596
597 int
598 vfs_64bitready(mount_t mp)
599 {
600 if ((mp->mnt_vtable->vfc_64bitready))
601 return(1);
602 else
603 return(0);
604 }
605
606
607 int
608 vfs_authcache_ttl(mount_t mp)
609 {
610 if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) )
611 return (mp->mnt_authcache_ttl);
612 else
613 return (CACHED_RIGHT_INFINITE_TTL);
614 }
615
616 void
617 vfs_setauthcache_ttl(mount_t mp, int ttl)
618 {
619 mount_lock(mp);
620 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
621 mp->mnt_authcache_ttl = ttl;
622 mount_unlock(mp);
623 }
624
625 void
626 vfs_clearauthcache_ttl(mount_t mp)
627 {
628 mount_lock(mp);
629 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
630 /*
631 * back to the default TTL value in case
632 * MNTK_AUTH_OPAQUE is set on this mount
633 */
634 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
635 mount_unlock(mp);
636 }
637
638 void
639 vfs_markdependency(mount_t mp)
640 {
641 proc_t p = current_proc();
642 mount_lock(mp);
643 mp->mnt_dependent_process = p;
644 mp->mnt_dependent_pid = proc_pid(p);
645 mount_unlock(mp);
646 }
647
648
649 int
650 vfs_authopaque(mount_t mp)
651 {
652 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
653 return(1);
654 else
655 return(0);
656 }
657
658 int
659 vfs_authopaqueaccess(mount_t mp)
660 {
661 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
662 return(1);
663 else
664 return(0);
665 }
666
667 void
668 vfs_setauthopaque(mount_t mp)
669 {
670 mount_lock(mp);
671 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
672 mount_unlock(mp);
673 }
674
675 void
676 vfs_setauthopaqueaccess(mount_t mp)
677 {
678 mount_lock(mp);
679 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
680 mount_unlock(mp);
681 }
682
683 void
684 vfs_clearauthopaque(mount_t mp)
685 {
686 mount_lock(mp);
687 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
688 mount_unlock(mp);
689 }
690
691 void
692 vfs_clearauthopaqueaccess(mount_t mp)
693 {
694 mount_lock(mp);
695 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
696 mount_unlock(mp);
697 }
698
699 void
700 vfs_setextendedsecurity(mount_t mp)
701 {
702 mount_lock(mp);
703 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
704 mount_unlock(mp);
705 }
706
707 void
708 vfs_clearextendedsecurity(mount_t mp)
709 {
710 mount_lock(mp);
711 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
712 mount_unlock(mp);
713 }
714
715 int
716 vfs_extendedsecurity(mount_t mp)
717 {
718 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
719 }
720
721 /* returns the max size of short symlink in this mount_t */
722 uint32_t
723 vfs_maxsymlen(mount_t mp)
724 {
725 return(mp->mnt_maxsymlinklen);
726 }
727
728 /* set max size of short symlink on mount_t */
729 void
730 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
731 {
732 mp->mnt_maxsymlinklen = symlen;
733 }
734
735 /* return a pointer to the RO vfs_statfs associated with mount_t */
736 struct vfsstatfs *
737 vfs_statfs(mount_t mp)
738 {
739 return(&mp->mnt_vfsstat);
740 }
741
742 int
743 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
744 {
745 int error;
746
747 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
748 return(error);
749
750 /*
751 * If we have a filesystem create time, use it to default some others.
752 */
753 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
754 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
755 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
756 }
757
758 return(0);
759 }
760
761 int
762 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
763 {
764 int error;
765
766 if (vfs_isrdonly(mp))
767 return EROFS;
768
769 error = VFS_SETATTR(mp, vfa, ctx);
770
771 /*
772 * If we had alternate ways of setting vfs attributes, we'd
773 * fall back here.
774 */
775
776 return error;
777 }
778
779 /* return the private data handle stored in mount_t */
780 void *
781 vfs_fsprivate(mount_t mp)
782 {
783 return(mp->mnt_data);
784 }
785
786 /* set the private data handle in mount_t */
787 void
788 vfs_setfsprivate(mount_t mp, void *mntdata)
789 {
790 mount_lock(mp);
791 mp->mnt_data = mntdata;
792 mount_unlock(mp);
793 }
794
795
796 /*
797 * return the block size of the underlying
798 * device associated with mount_t
799 */
800 int
801 vfs_devblocksize(mount_t mp) {
802
803 return(mp->mnt_devblocksize);
804 }
805
806
807 /*
808 * return the io attributes associated with mount_t
809 */
810 void
811 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
812 {
813 if (mp == NULL) {
814 ioattrp->io_maxreadcnt = MAXPHYS;
815 ioattrp->io_maxwritecnt = MAXPHYS;
816 ioattrp->io_segreadcnt = 32;
817 ioattrp->io_segwritecnt = 32;
818 ioattrp->io_maxsegreadsize = MAXPHYS;
819 ioattrp->io_maxsegwritesize = MAXPHYS;
820 ioattrp->io_devblocksize = DEV_BSIZE;
821 ioattrp->io_flags = 0;
822 } else {
823 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
824 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
825 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
826 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
827 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
828 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
829 ioattrp->io_devblocksize = mp->mnt_devblocksize;
830 ioattrp->io_flags = mp->mnt_ioflags;
831 }
832 ioattrp->io_reserved[0] = NULL;
833 ioattrp->io_reserved[1] = NULL;
834 }
835
836
837 /*
838 * set the IO attributes associated with mount_t
839 */
840 void
841 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
842 {
843 if (mp == NULL)
844 return;
845 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
846 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
847 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
848 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
849 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
850 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
851 mp->mnt_devblocksize = ioattrp->io_devblocksize;
852 mp->mnt_ioflags = ioattrp->io_flags;
853 }
854
855 /*
856 * Add a new filesystem into the kernel specified in passed in
857 * vfstable structure. It fills in the vnode
858 * dispatch vector that is to be passed to when vnodes are created.
859 * It returns a handle which is to be used to when the FS is to be removed
860 */
861 typedef int (*PFI)(void *);
862 extern int vfs_opv_numops;
863 errno_t
864 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
865 {
866 #pragma unused(data)
867 struct vfstable *newvfstbl = NULL;
868 int i,j;
869 int (***opv_desc_vector_p)(void *);
870 int (**opv_desc_vector)(void *);
871 struct vnodeopv_entry_desc *opve_descp;
872 int desccount;
873 int descsize;
874 PFI *descptr;
875
876 /*
877 * This routine is responsible for all the initialization that would
878 * ordinarily be done as part of the system startup;
879 */
880
881 if (vfe == (struct vfs_fsentry *)0)
882 return(EINVAL);
883
884 desccount = vfe->vfe_vopcnt;
885 if ((desccount <=0) || ((desccount > 5)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
886 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
887 return(EINVAL);
888
889
890 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
891 M_WAITOK);
892 bzero(newvfstbl, sizeof(struct vfstable));
893 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
894 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
895 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
896 newvfstbl->vfc_typenum = maxvfsconf++;
897 else
898 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
899
900 newvfstbl->vfc_refcount = 0;
901 newvfstbl->vfc_flags = 0;
902 newvfstbl->vfc_mountroot = NULL;
903 newvfstbl->vfc_next = NULL;
904 newvfstbl->vfc_threadsafe = 0;
905 newvfstbl->vfc_vfsflags = 0;
906 if (vfe->vfe_flags & VFS_TBL64BITREADY)
907 newvfstbl->vfc_64bitready= 1;
908 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
909 newvfstbl->vfc_threadsafe= 1;
910 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
911 newvfstbl->vfc_threadsafe= 1;
912 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
913 newvfstbl->vfc_flags |= MNT_LOCAL;
914 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
915 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
916 else
917 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
918
919 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR)
920 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
921 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT)
922 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
923 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED)
924 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
925 if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
926 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
927
928 /*
929 * Allocate and init the vectors.
930 * Also handle backwards compatibility.
931 *
932 * We allocate one large block to hold all <desccount>
933 * vnode operation vectors stored contiguously.
934 */
935 /* XXX - shouldn't be M_TEMP */
936
937 descsize = desccount * vfs_opv_numops * sizeof(PFI);
938 MALLOC(descptr, PFI *, descsize,
939 M_TEMP, M_WAITOK);
940 bzero(descptr, descsize);
941
942 newvfstbl->vfc_descptr = descptr;
943 newvfstbl->vfc_descsize = descsize;
944
945
946 for (i= 0; i< desccount; i++ ) {
947 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
948 /*
949 * Fill in the caller's pointer to the start of the i'th vector.
950 * They'll need to supply it when calling vnode_create.
951 */
952 opv_desc_vector = descptr + i * vfs_opv_numops;
953 *opv_desc_vector_p = opv_desc_vector;
954
955 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
956 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
957
958 /*
959 * Sanity check: is this operation listed
960 * in the list of operations? We check this
961 * by seeing if its offest is zero. Since
962 * the default routine should always be listed
963 * first, it should be the only one with a zero
964 * offset. Any other operation with a zero
965 * offset is probably not listed in
966 * vfs_op_descs, and so is probably an error.
967 *
968 * A panic here means the layer programmer
969 * has committed the all-too common bug
970 * of adding a new operation to the layer's
971 * list of vnode operations but
972 * not adding the operation to the system-wide
973 * list of supported operations.
974 */
975 if (opve_descp->opve_op->vdesc_offset == 0 &&
976 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
977 printf("vfs_fsadd: operation %s not listed in %s.\n",
978 opve_descp->opve_op->vdesc_name,
979 "vfs_op_descs");
980 panic("vfs_fsadd: bad operation");
981 }
982 /*
983 * Fill in this entry.
984 */
985 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
986 opve_descp->opve_impl;
987 }
988
989
990 /*
991 * Finally, go back and replace unfilled routines
992 * with their default. (Sigh, an O(n^3) algorithm. I
993 * could make it better, but that'd be work, and n is small.)
994 */
995 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
996
997 /*
998 * Force every operations vector to have a default routine.
999 */
1000 opv_desc_vector = *opv_desc_vector_p;
1001 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
1002 panic("vfs_fsadd: operation vector without default routine.");
1003 for (j = 0; j < vfs_opv_numops; j++)
1004 if (opv_desc_vector[j] == NULL)
1005 opv_desc_vector[j] =
1006 opv_desc_vector[VOFFSET(vnop_default)];
1007
1008 } /* end of each vnodeopv_desc parsing */
1009
1010
1011
1012 *handle = vfstable_add(newvfstbl);
1013
1014 if (newvfstbl->vfc_typenum <= maxvfsconf )
1015 maxvfsconf = newvfstbl->vfc_typenum + 1;
1016 numused_vfsslots++;
1017
1018 if (newvfstbl->vfc_vfsops->vfs_init)
1019 (*newvfstbl->vfc_vfsops->vfs_init)((struct vfsconf *)handle);
1020
1021 FREE(newvfstbl, M_TEMP);
1022
1023 return(0);
1024 }
1025
1026 /*
1027 * Removes the filesystem from kernel.
1028 * The argument passed in is the handle that was given when
1029 * file system was added
1030 */
1031 errno_t
1032 vfs_fsremove(vfstable_t handle)
1033 {
1034 struct vfstable * vfstbl = (struct vfstable *)handle;
1035 void *old_desc = NULL;
1036 errno_t err;
1037
1038 /* Preflight check for any mounts */
1039 mount_list_lock();
1040 if ( vfstbl->vfc_refcount != 0 ) {
1041 mount_list_unlock();
1042 return EBUSY;
1043 }
1044 mount_list_unlock();
1045
1046 /*
1047 * save the old descriptor; the free cannot occur unconditionally,
1048 * since vfstable_del() may fail.
1049 */
1050 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1051 old_desc = vfstbl->vfc_descptr;
1052 }
1053 err = vfstable_del(vfstbl);
1054
1055 /* free the descriptor if the delete was successful */
1056 if (err == 0 && old_desc) {
1057 FREE(old_desc, M_TEMP);
1058 }
1059
1060 return(err);
1061 }
1062
1063 /*
1064 * This returns a reference to mount_t
1065 * which should be dropped using vfs_mountrele().
1066 * Not doing so will leak a mountpoint
1067 * and associated data structures.
1068 */
1069 errno_t
1070 vfs_mountref(__unused mount_t mp ) /* gives a reference */
1071 {
1072 return(0);
1073 }
1074
1075 /* This drops the reference on mount_t that was acquired */
1076 errno_t
1077 vfs_mountrele(__unused mount_t mp ) /* drops reference */
1078 {
1079 return(0);
1080 }
1081
1082 int
1083 vfs_context_pid(vfs_context_t ctx)
1084 {
1085 return (proc_pid(vfs_context_proc(ctx)));
1086 }
1087
1088 int
1089 vfs_context_suser(vfs_context_t ctx)
1090 {
1091 return (suser(ctx->vc_ucred, NULL));
1092 }
1093
1094 /*
1095 * XXX Signals should be tied to threads, not processes, for most uses of this
1096 * XXX call.
1097 */
1098 int
1099 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1100 {
1101 proc_t p = vfs_context_proc(ctx);
1102 if (p)
1103 return(proc_pendingsignals(p, mask));
1104 return(0);
1105 }
1106
1107 int
1108 vfs_context_is64bit(vfs_context_t ctx)
1109 {
1110 proc_t proc = vfs_context_proc(ctx);
1111
1112 if (proc)
1113 return(proc_is64bit(proc));
1114 return(0);
1115 }
1116
1117
1118 /*
1119 * vfs_context_proc
1120 *
1121 * Description: Given a vfs_context_t, return the proc_t associated with it.
1122 *
1123 * Parameters: vfs_context_t The context to use
1124 *
1125 * Returns: proc_t The process for this context
1126 *
1127 * Notes: This function will return the current_proc() if any of the
1128 * following conditions are true:
1129 *
1130 * o The supplied context pointer is NULL
1131 * o There is no Mach thread associated with the context
1132 * o There is no Mach task associated with the Mach thread
1133 * o There is no proc_t associated with the Mach task
1134 * o The proc_t has no per process open file table
1135 * o The proc_t is post-vfork()
1136 *
1137 * This causes this function to return a value matching as
1138 * closely as possible the previous behaviour, while at the
1139 * same time avoiding the task lending that results from vfork()
1140 */
1141 proc_t
1142 vfs_context_proc(vfs_context_t ctx)
1143 {
1144 proc_t proc = NULL;
1145
1146 if (ctx != NULL && ctx->vc_thread != NULL)
1147 proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1148 if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1149 proc = NULL;
1150
1151 return(proc == NULL ? current_proc() : proc);
1152 }
1153
1154 /*
1155 * vfs_context_get_special_port
1156 *
1157 * Description: Return the requested special port from the task associated
1158 * with the given context.
1159 *
1160 * Parameters: vfs_context_t The context to use
1161 * int Index of special port
1162 * ipc_port_t * Pointer to returned port
1163 *
1164 * Returns: kern_return_t see task_get_special_port()
1165 */
1166 kern_return_t
1167 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1168 {
1169 task_t task = NULL;
1170
1171 if (ctx != NULL && ctx->vc_thread != NULL)
1172 task = get_threadtask(ctx->vc_thread);
1173
1174 return task_get_special_port(task, which, portp);
1175 }
1176
1177 /*
1178 * vfs_context_set_special_port
1179 *
1180 * Description: Set the requested special port in the task associated
1181 * with the given context.
1182 *
1183 * Parameters: vfs_context_t The context to use
1184 * int Index of special port
1185 * ipc_port_t New special port
1186 *
1187 * Returns: kern_return_t see task_set_special_port()
1188 */
1189 kern_return_t
1190 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1191 {
1192 task_t task = NULL;
1193
1194 if (ctx != NULL && ctx->vc_thread != NULL)
1195 task = get_threadtask(ctx->vc_thread);
1196
1197 return task_set_special_port(task, which, port);
1198 }
1199
1200 /*
1201 * vfs_context_thread
1202 *
1203 * Description: Return the Mach thread associated with a vfs_context_t
1204 *
1205 * Parameters: vfs_context_t The context to use
1206 *
1207 * Returns: thread_t The thread for this context, or
1208 * NULL, if there is not one.
1209 *
1210 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1211 * as a result of a static vfs_context_t declaration in a function
1212 * and will result in this function returning NULL.
1213 *
1214 * This is intentional; this function should NOT return the
1215 * current_thread() in this case.
1216 */
1217 thread_t
1218 vfs_context_thread(vfs_context_t ctx)
1219 {
1220 return(ctx->vc_thread);
1221 }
1222
1223
1224 /*
1225 * vfs_context_cwd
1226 *
1227 * Description: Returns a reference on the vnode for the current working
1228 * directory for the supplied context
1229 *
1230 * Parameters: vfs_context_t The context to use
1231 *
1232 * Returns: vnode_t The current working directory
1233 * for this context
1234 *
1235 * Notes: The function first attempts to obtain the current directory
1236 * from the thread, and if it is not present there, falls back
1237 * to obtaining it from the process instead. If it can't be
1238 * obtained from either place, we return NULLVP.
1239 */
1240 vnode_t
1241 vfs_context_cwd(vfs_context_t ctx)
1242 {
1243 vnode_t cwd = NULLVP;
1244
1245 if(ctx != NULL && ctx->vc_thread != NULL) {
1246 uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1247 proc_t proc;
1248
1249 /*
1250 * Get the cwd from the thread; if there isn't one, get it
1251 * from the process, instead.
1252 */
1253 if ((cwd = uth->uu_cdir) == NULLVP &&
1254 (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL &&
1255 proc->p_fd != NULL)
1256 cwd = proc->p_fd->fd_cdir;
1257 }
1258
1259 return(cwd);
1260 }
1261
1262
1263 vfs_context_t
1264 vfs_context_create(vfs_context_t ctx)
1265 {
1266 vfs_context_t newcontext;
1267
1268 newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context));
1269
1270 if (newcontext) {
1271 kauth_cred_t safecred;
1272 if (ctx) {
1273 newcontext->vc_thread = ctx->vc_thread;
1274 safecred = ctx->vc_ucred;
1275 } else {
1276 newcontext->vc_thread = current_thread();
1277 safecred = kauth_cred_get();
1278 }
1279 if (IS_VALID_CRED(safecred))
1280 kauth_cred_ref(safecred);
1281 newcontext->vc_ucred = safecred;
1282 return(newcontext);
1283 }
1284 return(NULL);
1285 }
1286
1287
1288 vfs_context_t
1289 vfs_context_current(void)
1290 {
1291 vfs_context_t ctx = NULL;
1292 volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
1293
1294 if (ut != NULL ) {
1295 if (ut->uu_context.vc_ucred != NULL) {
1296 ctx = &ut->uu_context;
1297 }
1298 }
1299
1300 return(ctx == NULL ? vfs_context_kernel() : ctx);
1301 }
1302
1303
1304 /*
1305 * XXX Do not ask
1306 *
1307 * Dangerous hack - adopt the first kernel thread as the current thread, to
1308 * get to the vfs_context_t in the uthread associated with a kernel thread.
1309 * This is used by UDF to make the call into IOCDMediaBSDClient,
1310 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1311 * ioctl() is being called from kernel or user space (and all this because
1312 * we do not pass threads into our ioctl()'s, instead of processes).
1313 *
1314 * This is also used by imageboot_setup(), called early from bsd_init() after
1315 * kernproc has been given a credential.
1316 *
1317 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1318 * of many Mach headers to do the reference directly rather than indirectly;
1319 * we will need to forego this convenience when we reture proc_thread().
1320 */
1321 static struct vfs_context kerncontext;
1322 vfs_context_t
1323 vfs_context_kernel(void)
1324 {
1325 if (kerncontext.vc_ucred == NOCRED)
1326 kerncontext.vc_ucred = kernproc->p_ucred;
1327 if (kerncontext.vc_thread == NULL)
1328 kerncontext.vc_thread = proc_thread(kernproc);
1329
1330 return(&kerncontext);
1331 }
1332
1333
1334 int
1335 vfs_context_rele(vfs_context_t ctx)
1336 {
1337 if (ctx) {
1338 if (IS_VALID_CRED(ctx->vc_ucred))
1339 kauth_cred_unref(&ctx->vc_ucred);
1340 kfree(ctx, sizeof(struct vfs_context));
1341 }
1342 return(0);
1343 }
1344
1345
1346 ucred_t
1347 vfs_context_ucred(vfs_context_t ctx)
1348 {
1349 return (ctx->vc_ucred);
1350 }
1351
1352 /*
1353 * Return true if the context is owned by the superuser.
1354 */
1355 int
1356 vfs_context_issuser(vfs_context_t ctx)
1357 {
1358 return(kauth_cred_issuser(vfs_context_ucred(ctx)));
1359 }
1360
1361
1362 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1363
1364
1365 /*
1366 * Convert between vnode types and inode formats (since POSIX.1
1367 * defines mode word of stat structure in terms of inode formats).
1368 */
1369 enum vtype
1370 vnode_iftovt(int mode)
1371 {
1372 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1373 }
1374
1375 int
1376 vnode_vttoif(enum vtype indx)
1377 {
1378 return(vttoif_tab[(int)(indx)]);
1379 }
1380
1381 int
1382 vnode_makeimode(int indx, int mode)
1383 {
1384 return (int)(VTTOIF(indx) | (mode));
1385 }
1386
1387
1388 /*
1389 * vnode manipulation functions.
1390 */
1391
1392 /* returns system root vnode reference; It should be dropped using vrele() */
1393 vnode_t
1394 vfs_rootvnode(void)
1395 {
1396 int error;
1397
1398 error = vnode_get(rootvnode);
1399 if (error)
1400 return ((vnode_t)0);
1401 else
1402 return rootvnode;
1403 }
1404
1405
1406 uint32_t
1407 vnode_vid(vnode_t vp)
1408 {
1409 return ((uint32_t)(vp->v_id));
1410 }
1411
1412 /* returns a mount reference; drop it with vfs_mountrelease() */
1413 mount_t
1414 vnode_mount(vnode_t vp)
1415 {
1416 return (vp->v_mount);
1417 }
1418
1419 /* returns a mount reference iff vnode_t is a dir and is a mount point */
1420 mount_t
1421 vnode_mountedhere(vnode_t vp)
1422 {
1423 mount_t mp;
1424
1425 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1426 (mp->mnt_vnodecovered == vp))
1427 return (mp);
1428 else
1429 return (mount_t)NULL;
1430 }
1431
1432 /* returns vnode type of vnode_t */
1433 enum vtype
1434 vnode_vtype(vnode_t vp)
1435 {
1436 return (vp->v_type);
1437 }
1438
1439 /* returns FS specific node saved in vnode */
1440 void *
1441 vnode_fsnode(vnode_t vp)
1442 {
1443 return (vp->v_data);
1444 }
1445
1446 void
1447 vnode_clearfsnode(vnode_t vp)
1448 {
1449 vp->v_data = NULL;
1450 }
1451
1452 dev_t
1453 vnode_specrdev(vnode_t vp)
1454 {
1455 return(vp->v_rdev);
1456 }
1457
1458
1459 /* Accessor functions */
1460 /* is vnode_t a root vnode */
1461 int
1462 vnode_isvroot(vnode_t vp)
1463 {
1464 return ((vp->v_flag & VROOT)? 1 : 0);
1465 }
1466
1467 /* is vnode_t a system vnode */
1468 int
1469 vnode_issystem(vnode_t vp)
1470 {
1471 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1472 }
1473
1474 /* is vnode_t a swap file vnode */
1475 int
1476 vnode_isswap(vnode_t vp)
1477 {
1478 return ((vp->v_flag & VSWAP)? 1 : 0);
1479 }
1480
1481 /* if vnode_t mount operation in progress */
1482 int
1483 vnode_ismount(vnode_t vp)
1484 {
1485 return ((vp->v_flag & VMOUNT)? 1 : 0);
1486 }
1487
1488 /* is this vnode under recyle now */
1489 int
1490 vnode_isrecycled(vnode_t vp)
1491 {
1492 int ret;
1493
1494 vnode_lock_spin(vp);
1495 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1496 vnode_unlock(vp);
1497 return(ret);
1498 }
1499
1500 /* is vnode_t marked to not keep data cached once it's been consumed */
1501 int
1502 vnode_isnocache(vnode_t vp)
1503 {
1504 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1505 }
1506
1507 /*
1508 * has sequential readahead been disabled on this vnode
1509 */
1510 int
1511 vnode_isnoreadahead(vnode_t vp)
1512 {
1513 return ((vp->v_flag & VRAOFF)? 1 : 0);
1514 }
1515
1516 int
1517 vnode_is_openevt(vnode_t vp)
1518 {
1519 return ((vp->v_flag & VOPENEVT)? 1 : 0);
1520 }
1521
1522 /* is vnode_t a standard one? */
1523 int
1524 vnode_isstandard(vnode_t vp)
1525 {
1526 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1527 }
1528
1529 /* don't vflush() if SKIPSYSTEM */
1530 int
1531 vnode_isnoflush(vnode_t vp)
1532 {
1533 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1534 }
1535
1536 /* is vnode_t a regular file */
1537 int
1538 vnode_isreg(vnode_t vp)
1539 {
1540 return ((vp->v_type == VREG)? 1 : 0);
1541 }
1542
1543 /* is vnode_t a directory? */
1544 int
1545 vnode_isdir(vnode_t vp)
1546 {
1547 return ((vp->v_type == VDIR)? 1 : 0);
1548 }
1549
1550 /* is vnode_t a symbolic link ? */
1551 int
1552 vnode_islnk(vnode_t vp)
1553 {
1554 return ((vp->v_type == VLNK)? 1 : 0);
1555 }
1556
1557 /* is vnode_t a fifo ? */
1558 int
1559 vnode_isfifo(vnode_t vp)
1560 {
1561 return ((vp->v_type == VFIFO)? 1 : 0);
1562 }
1563
1564 /* is vnode_t a block device? */
1565 int
1566 vnode_isblk(vnode_t vp)
1567 {
1568 return ((vp->v_type == VBLK)? 1 : 0);
1569 }
1570
1571 /* is vnode_t a char device? */
1572 int
1573 vnode_ischr(vnode_t vp)
1574 {
1575 return ((vp->v_type == VCHR)? 1 : 0);
1576 }
1577
1578 /* is vnode_t a socket? */
1579 int
1580 vnode_issock(vnode_t vp)
1581 {
1582 return ((vp->v_type == VSOCK)? 1 : 0);
1583 }
1584
1585 /* is vnode_t a named stream? */
1586 int
1587 vnode_isnamedstream(
1588 #if NAMEDSTREAMS
1589 vnode_t vp
1590 #else
1591 __unused vnode_t vp
1592 #endif
1593 )
1594 {
1595 #if NAMEDSTREAMS
1596 return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0);
1597 #else
1598 return (0);
1599 #endif
1600 }
1601
1602 int
1603 vnode_isshadow(
1604 #if NAMEDSTREAMS
1605 vnode_t vp
1606 #else
1607 __unused vnode_t vp
1608 #endif
1609 )
1610 {
1611 #if NAMEDSTREAMS
1612 return ((vp->v_flag & VISSHADOW) ? 1 : 0);
1613 #else
1614 return (0);
1615 #endif
1616 }
1617
1618 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1619 void
1620 vnode_setnocache(vnode_t vp)
1621 {
1622 vnode_lock_spin(vp);
1623 vp->v_flag |= VNOCACHE_DATA;
1624 vnode_unlock(vp);
1625 }
1626
1627 void
1628 vnode_clearnocache(vnode_t vp)
1629 {
1630 vnode_lock_spin(vp);
1631 vp->v_flag &= ~VNOCACHE_DATA;
1632 vnode_unlock(vp);
1633 }
1634
1635 void
1636 vnode_set_openevt(vnode_t vp)
1637 {
1638 vnode_lock_spin(vp);
1639 vp->v_flag |= VOPENEVT;
1640 vnode_unlock(vp);
1641 }
1642
1643 void
1644 vnode_clear_openevt(vnode_t vp)
1645 {
1646 vnode_lock_spin(vp);
1647 vp->v_flag &= ~VOPENEVT;
1648 vnode_unlock(vp);
1649 }
1650
1651
1652 void
1653 vnode_setnoreadahead(vnode_t vp)
1654 {
1655 vnode_lock_spin(vp);
1656 vp->v_flag |= VRAOFF;
1657 vnode_unlock(vp);
1658 }
1659
1660 void
1661 vnode_clearnoreadahead(vnode_t vp)
1662 {
1663 vnode_lock_spin(vp);
1664 vp->v_flag &= ~VRAOFF;
1665 vnode_unlock(vp);
1666 }
1667
1668
1669 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1670 void
1671 vnode_setnoflush(vnode_t vp)
1672 {
1673 vnode_lock_spin(vp);
1674 vp->v_flag |= VNOFLUSH;
1675 vnode_unlock(vp);
1676 }
1677
1678 void
1679 vnode_clearnoflush(vnode_t vp)
1680 {
1681 vnode_lock_spin(vp);
1682 vp->v_flag &= ~VNOFLUSH;
1683 vnode_unlock(vp);
1684 }
1685
1686
1687 /* is vnode_t a blkdevice and has a FS mounted on it */
1688 int
1689 vnode_ismountedon(vnode_t vp)
1690 {
1691 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1692 }
1693
1694 void
1695 vnode_setmountedon(vnode_t vp)
1696 {
1697 vnode_lock_spin(vp);
1698 vp->v_specflags |= SI_MOUNTEDON;
1699 vnode_unlock(vp);
1700 }
1701
1702 void
1703 vnode_clearmountedon(vnode_t vp)
1704 {
1705 vnode_lock_spin(vp);
1706 vp->v_specflags &= ~SI_MOUNTEDON;
1707 vnode_unlock(vp);
1708 }
1709
1710
1711 void
1712 vnode_settag(vnode_t vp, int tag)
1713 {
1714 vp->v_tag = tag;
1715
1716 }
1717
1718 int
1719 vnode_tag(vnode_t vp)
1720 {
1721 return(vp->v_tag);
1722 }
1723
1724 vnode_t
1725 vnode_parent(vnode_t vp)
1726 {
1727
1728 return(vp->v_parent);
1729 }
1730
1731 void
1732 vnode_setparent(vnode_t vp, vnode_t dvp)
1733 {
1734 vp->v_parent = dvp;
1735 }
1736
1737 const char *
1738 vnode_name(vnode_t vp)
1739 {
1740 /* we try to keep v_name a reasonable name for the node */
1741 return(vp->v_name);
1742 }
1743
1744 void
1745 vnode_setname(vnode_t vp, char * name)
1746 {
1747 vp->v_name = name;
1748 }
1749
1750 /* return the registered FS name when adding the FS to kernel */
1751 void
1752 vnode_vfsname(vnode_t vp, char * buf)
1753 {
1754 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1755 }
1756
1757 /* return the FS type number */
1758 int
1759 vnode_vfstypenum(vnode_t vp)
1760 {
1761 return(vp->v_mount->mnt_vtable->vfc_typenum);
1762 }
1763
1764 int
1765 vnode_vfs64bitready(vnode_t vp)
1766 {
1767
1768 if ((vp->v_mount->mnt_vtable->vfc_64bitready))
1769 return(1);
1770 else
1771 return(0);
1772 }
1773
1774
1775
1776 /* return the visible flags on associated mount point of vnode_t */
1777 uint32_t
1778 vnode_vfsvisflags(vnode_t vp)
1779 {
1780 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1781 }
1782
1783 /* return the command modifier flags on associated mount point of vnode_t */
1784 uint32_t
1785 vnode_vfscmdflags(vnode_t vp)
1786 {
1787 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1788 }
1789
1790 /* return the max symlink of short links of vnode_t */
1791 uint32_t
1792 vnode_vfsmaxsymlen(vnode_t vp)
1793 {
1794 return(vp->v_mount->mnt_maxsymlinklen);
1795 }
1796
1797 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1798 struct vfsstatfs *
1799 vnode_vfsstatfs(vnode_t vp)
1800 {
1801 return(&vp->v_mount->mnt_vfsstat);
1802 }
1803
1804 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1805 void *
1806 vnode_vfsfsprivate(vnode_t vp)
1807 {
1808 return(vp->v_mount->mnt_data);
1809 }
1810
1811 /* is vnode_t in a rdonly mounted FS */
1812 int
1813 vnode_vfsisrdonly(vnode_t vp)
1814 {
1815 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1816 }
1817
1818
1819 /*
1820 * Returns vnode ref to current working directory; if a per-thread current
1821 * working directory is in effect, return that instead of the per process one.
1822 *
1823 * XXX Published, but not used.
1824 */
1825 vnode_t
1826 current_workingdir(void)
1827 {
1828 return vfs_context_cwd(vfs_context_current());
1829 }
1830
1831 /* returns vnode ref to current root(chroot) directory */
1832 vnode_t
1833 current_rootdir(void)
1834 {
1835 proc_t proc = current_proc();
1836 struct vnode * vp ;
1837
1838 if ( (vp = proc->p_fd->fd_rdir) ) {
1839 if ( (vnode_getwithref(vp)) )
1840 return (NULL);
1841 }
1842 return vp;
1843 }
1844
1845 /*
1846 * Get a filesec and optional acl contents from an extended attribute.
1847 * Function will attempt to retrive ACL, UUID, and GUID information using a
1848 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1849 *
1850 * Parameters: vp The vnode on which to operate.
1851 * fsecp The filesec (and ACL, if any) being
1852 * retrieved.
1853 * ctx The vnode context in which the
1854 * operation is to be attempted.
1855 *
1856 * Returns: 0 Success
1857 * !0 errno value
1858 *
1859 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1860 * host byte order, as will be the ACL contents, if any.
1861 * Internally, we will cannonize these values from network (PPC)
1862 * byte order after we retrieve them so that the on-disk contents
1863 * of the extended attribute are identical for both PPC and Intel
1864 * (if we were not being required to provide this service via
1865 * fallback, this would be the job of the filesystem
1866 * 'VNOP_GETATTR' call).
1867 *
1868 * We use ntohl() because it has a transitive property on Intel
1869 * machines and no effect on PPC mancines. This guarantees us
1870 *
1871 * XXX: Deleting rather than ignoreing a corrupt security structure is
1872 * probably the only way to reset it without assistance from an
1873 * file system integrity checking tool. Right now we ignore it.
1874 *
1875 * XXX: We should enummerate the possible errno values here, and where
1876 * in the code they originated.
1877 */
1878 static int
1879 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
1880 {
1881 kauth_filesec_t fsec;
1882 uio_t fsec_uio;
1883 size_t fsec_size;
1884 size_t xsize, rsize;
1885 int error;
1886 uint32_t host_fsec_magic;
1887 uint32_t host_acl_entrycount;
1888
1889 fsec = NULL;
1890 fsec_uio = NULL;
1891 error = 0;
1892
1893 /* find out how big the EA is */
1894 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
1895 /* no EA, no filesec */
1896 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1897 error = 0;
1898 /* either way, we are done */
1899 goto out;
1900 }
1901
1902 /*
1903 * To be valid, a kauth_filesec_t must be large enough to hold a zero
1904 * ACE entrly ACL, and if it's larger than that, it must have the right
1905 * number of bytes such that it contains an atomic number of ACEs,
1906 * rather than partial entries. Otherwise, we ignore it.
1907 */
1908 if (!KAUTH_FILESEC_VALID(xsize)) {
1909 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
1910 error = 0;
1911 goto out;
1912 }
1913
1914 /* how many entries would fit? */
1915 fsec_size = KAUTH_FILESEC_COUNT(xsize);
1916
1917 /* get buffer and uio */
1918 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
1919 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
1920 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
1921 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1922 error = ENOMEM;
1923 goto out;
1924 }
1925
1926 /* read security attribute */
1927 rsize = xsize;
1928 if ((error = vn_getxattr(vp,
1929 KAUTH_FILESEC_XATTR,
1930 fsec_uio,
1931 &rsize,
1932 XATTR_NOSECURITY,
1933 ctx)) != 0) {
1934
1935 /* no attribute - no security data */
1936 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1937 error = 0;
1938 /* either way, we are done */
1939 goto out;
1940 }
1941
1942 /*
1943 * Validate security structure; the validation must take place in host
1944 * byte order. If it's corrupt, we will just ignore it.
1945 */
1946
1947 /* Validate the size before trying to convert it */
1948 if (rsize < KAUTH_FILESEC_SIZE(0)) {
1949 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
1950 goto out;
1951 }
1952
1953 /* Validate the magic number before trying to convert it */
1954 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
1955 if (fsec->fsec_magic != host_fsec_magic) {
1956 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
1957 goto out;
1958 }
1959
1960 /* Validate the entry count before trying to convert it. */
1961 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
1962 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
1963 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
1964 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
1965 goto out;
1966 }
1967 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
1968 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
1969 goto out;
1970 }
1971 }
1972
1973 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
1974
1975 *fsecp = fsec;
1976 fsec = NULL;
1977 error = 0;
1978 out:
1979 if (fsec != NULL)
1980 kauth_filesec_free(fsec);
1981 if (fsec_uio != NULL)
1982 uio_free(fsec_uio);
1983 if (error)
1984 *fsecp = NULL;
1985 return(error);
1986 }
1987
1988 /*
1989 * Set a filesec and optional acl contents into an extended attribute.
1990 * function will attempt to store ACL, UUID, and GUID information using a
1991 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
1992 * may or may not point to the `fsec->fsec_acl`, depending on whether the
1993 * original caller supplied an acl.
1994 *
1995 * Parameters: vp The vnode on which to operate.
1996 * fsec The filesec being set.
1997 * acl The acl to be associated with 'fsec'.
1998 * ctx The vnode context in which the
1999 * operation is to be attempted.
2000 *
2001 * Returns: 0 Success
2002 * !0 errno value
2003 *
2004 * Notes: Both the fsec and the acl are always valid.
2005 *
2006 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2007 * as are the acl contents, if they are used. Internally, we will
2008 * cannonize these values into network (PPC) byte order before we
2009 * attempt to write them so that the on-disk contents of the
2010 * extended attribute are identical for both PPC and Intel (if we
2011 * were not being required to provide this service via fallback,
2012 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2013 * We reverse this process on the way out, so we leave with the
2014 * same byte order we started with.
2015 *
2016 * XXX: We should enummerate the possible errno values here, and where
2017 * in the code they originated.
2018 */
2019 static int
2020 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2021 {
2022 uio_t fsec_uio;
2023 int error;
2024 uint32_t saved_acl_copysize;
2025
2026 fsec_uio = NULL;
2027
2028 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2029 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2030 error = ENOMEM;
2031 goto out;
2032 }
2033 /*
2034 * Save the pre-converted ACL copysize, because it gets swapped too
2035 * if we are running with the wrong endianness.
2036 */
2037 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2038
2039 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2040
2041 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), sizeof(struct kauth_filesec) - sizeof(struct kauth_acl));
2042 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2043 error = vn_setxattr(vp,
2044 KAUTH_FILESEC_XATTR,
2045 fsec_uio,
2046 XATTR_NOSECURITY, /* we have auth'ed already */
2047 ctx);
2048 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2049
2050 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2051
2052 out:
2053 if (fsec_uio != NULL)
2054 uio_free(fsec_uio);
2055 return(error);
2056 }
2057
2058
2059 /*
2060 * Returns: 0 Success
2061 * ENOMEM Not enough space [only if has filesec]
2062 * VNOP_GETATTR: ???
2063 * vnode_get_filesec: ???
2064 * kauth_cred_guid2uid: ???
2065 * kauth_cred_guid2gid: ???
2066 * vfs_update_vfsstat: ???
2067 */
2068 int
2069 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2070 {
2071 kauth_filesec_t fsec;
2072 kauth_acl_t facl;
2073 int error;
2074 uid_t nuid;
2075 gid_t ngid;
2076
2077 /* don't ask for extended security data if the filesystem doesn't support it */
2078 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2079 VATTR_CLEAR_ACTIVE(vap, va_acl);
2080 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2081 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2082 }
2083
2084 /*
2085 * If the caller wants size values we might have to synthesise, give the
2086 * filesystem the opportunity to supply better intermediate results.
2087 */
2088 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2089 VATTR_IS_ACTIVE(vap, va_total_size) ||
2090 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2091 VATTR_SET_ACTIVE(vap, va_data_size);
2092 VATTR_SET_ACTIVE(vap, va_data_alloc);
2093 VATTR_SET_ACTIVE(vap, va_total_size);
2094 VATTR_SET_ACTIVE(vap, va_total_alloc);
2095 }
2096
2097 error = VNOP_GETATTR(vp, vap, ctx);
2098 if (error) {
2099 KAUTH_DEBUG("ERROR - returning %d", error);
2100 goto out;
2101 }
2102
2103 /*
2104 * If extended security data was requested but not returned, try the fallback
2105 * path.
2106 */
2107 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2108 fsec = NULL;
2109
2110 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
2111 /* try to get the filesec */
2112 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
2113 goto out;
2114 }
2115 /* if no filesec, no attributes */
2116 if (fsec == NULL) {
2117 VATTR_RETURN(vap, va_acl, NULL);
2118 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2119 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2120 } else {
2121
2122 /* looks good, try to return what we were asked for */
2123 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2124 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2125
2126 /* only return the ACL if we were actually asked for it */
2127 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2128 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2129 VATTR_RETURN(vap, va_acl, NULL);
2130 } else {
2131 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2132 if (facl == NULL) {
2133 kauth_filesec_free(fsec);
2134 error = ENOMEM;
2135 goto out;
2136 }
2137 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2138 VATTR_RETURN(vap, va_acl, facl);
2139 }
2140 }
2141 kauth_filesec_free(fsec);
2142 }
2143 }
2144 /*
2145 * If someone gave us an unsolicited filesec, toss it. We promise that
2146 * we're OK with a filesystem giving us anything back, but our callers
2147 * only expect what they asked for.
2148 */
2149 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2150 if (vap->va_acl != NULL)
2151 kauth_acl_free(vap->va_acl);
2152 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2153 }
2154
2155 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2156 /*
2157 * Handle the case where we need a UID/GID, but only have extended
2158 * security information.
2159 */
2160 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2161 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2162 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2163 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
2164 VATTR_RETURN(vap, va_uid, nuid);
2165 }
2166 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2167 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2168 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2169 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
2170 VATTR_RETURN(vap, va_gid, ngid);
2171 }
2172 #endif
2173
2174 /*
2175 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2176 */
2177 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2178 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2179 nuid = vap->va_uid;
2180 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2181 nuid = vp->v_mount->mnt_fsowner;
2182 if (nuid == KAUTH_UID_NONE)
2183 nuid = 99;
2184 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2185 nuid = vap->va_uid;
2186 } else {
2187 /* this will always be something sensible */
2188 nuid = vp->v_mount->mnt_fsowner;
2189 }
2190 if ((nuid == 99) && !vfs_context_issuser(ctx))
2191 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2192 VATTR_RETURN(vap, va_uid, nuid);
2193 }
2194 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2195 if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2196 ngid = vap->va_gid;
2197 } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2198 ngid = vp->v_mount->mnt_fsgroup;
2199 if (ngid == KAUTH_GID_NONE)
2200 ngid = 99;
2201 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2202 ngid = vap->va_gid;
2203 } else {
2204 /* this will always be something sensible */
2205 ngid = vp->v_mount->mnt_fsgroup;
2206 }
2207 if ((ngid == 99) && !vfs_context_issuser(ctx))
2208 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2209 VATTR_RETURN(vap, va_gid, ngid);
2210 }
2211
2212 /*
2213 * Synthesise some values that can be reasonably guessed.
2214 */
2215 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
2216 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
2217
2218 if (!VATTR_IS_SUPPORTED(vap, va_flags))
2219 VATTR_RETURN(vap, va_flags, 0);
2220
2221 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
2222 VATTR_RETURN(vap, va_filerev, 0);
2223
2224 if (!VATTR_IS_SUPPORTED(vap, va_gen))
2225 VATTR_RETURN(vap, va_gen, 0);
2226
2227 /*
2228 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2229 */
2230 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
2231 VATTR_RETURN(vap, va_data_size, 0);
2232
2233 /* do we want any of the possibly-computed values? */
2234 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2235 VATTR_IS_ACTIVE(vap, va_total_size) ||
2236 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2237 /* make sure f_bsize is valid */
2238 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2239 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0)
2240 goto out;
2241 }
2242
2243 /* default va_data_alloc from va_data_size */
2244 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
2245 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2246
2247 /* default va_total_size from va_data_size */
2248 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
2249 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2250
2251 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2252 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
2253 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2254 }
2255
2256 /*
2257 * If we don't have a change time, pull it from the modtime.
2258 */
2259 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
2260 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2261
2262 /*
2263 * This is really only supported for the creation VNOPs, but since the field is there
2264 * we should populate it correctly.
2265 */
2266 VATTR_RETURN(vap, va_type, vp->v_type);
2267
2268 /*
2269 * The fsid can be obtained from the mountpoint directly.
2270 */
2271 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2272
2273 out:
2274
2275 return(error);
2276 }
2277
2278 /*
2279 * Set the attributes on a vnode in a vnode context.
2280 *
2281 * Parameters: vp The vnode whose attributes to set.
2282 * vap A pointer to the attributes to set.
2283 * ctx The vnode context in which the
2284 * operation is to be attempted.
2285 *
2286 * Returns: 0 Success
2287 * !0 errno value
2288 *
2289 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2290 *
2291 * The contents of the data area pointed to by 'vap' may be
2292 * modified if the vnode is on a filesystem which has been
2293 * mounted with ingore ownership flags, or by the underlyng
2294 * VFS itself, or by the fallback code, if the underlying VFS
2295 * does not support ACL, UUID, or GUUID attributes directly.
2296 *
2297 * XXX: We should enummerate the possible errno values here, and where
2298 * in the code they originated.
2299 */
2300 int
2301 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2302 {
2303 int error, is_perm_change=0;
2304
2305 /*
2306 * Make sure the filesystem is mounted R/W.
2307 * If not, return an error.
2308 */
2309 if (vfs_isrdonly(vp->v_mount)) {
2310 error = EROFS;
2311 goto out;
2312 }
2313 #if NAMEDSTREAMS
2314 /* For streams, va_data_size is the only setable attribute. */
2315 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2316 error = EPERM;
2317 goto out;
2318 }
2319 #endif
2320
2321 /*
2322 * If ownership is being ignored on this volume, we silently discard
2323 * ownership changes.
2324 */
2325 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2326 VATTR_CLEAR_ACTIVE(vap, va_uid);
2327 VATTR_CLEAR_ACTIVE(vap, va_gid);
2328 }
2329
2330 if ( VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)
2331 || VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_acl)) {
2332 is_perm_change = 1;
2333 }
2334
2335 /*
2336 * Make sure that extended security is enabled if we're going to try
2337 * to set any.
2338 */
2339 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2340 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2341 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2342 error = ENOTSUP;
2343 goto out;
2344 }
2345
2346 error = VNOP_SETATTR(vp, vap, ctx);
2347
2348 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2349 error = vnode_setattr_fallback(vp, vap, ctx);
2350
2351 #if CONFIG_FSE
2352 // only send a stat_changed event if this is more than
2353 // just an access time update
2354 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time))) {
2355 if (is_perm_change) {
2356 if (need_fsevent(FSE_CHOWN, vp)) {
2357 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2358 }
2359 } else if(need_fsevent(FSE_STAT_CHANGED, vp)) {
2360 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2361 }
2362 }
2363 #endif
2364
2365 out:
2366 return(error);
2367 }
2368
2369 /*
2370 * Fallback for setting the attributes on a vnode in a vnode context. This
2371 * Function will attempt to store ACL, UUID, and GUID information utilizing
2372 * a read/modify/write operation against an EA used as a backing store for
2373 * the object.
2374 *
2375 * Parameters: vp The vnode whose attributes to set.
2376 * vap A pointer to the attributes to set.
2377 * ctx The vnode context in which the
2378 * operation is to be attempted.
2379 *
2380 * Returns: 0 Success
2381 * !0 errno value
2382 *
2383 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2384 * as are the fsec and lfsec, if they are used.
2385 *
2386 * The contents of the data area pointed to by 'vap' may be
2387 * modified to indicate that the attribute is supported for
2388 * any given requested attribute.
2389 *
2390 * XXX: We should enummerate the possible errno values here, and where
2391 * in the code they originated.
2392 */
2393 int
2394 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2395 {
2396 kauth_filesec_t fsec;
2397 kauth_acl_t facl;
2398 struct kauth_filesec lfsec;
2399 int error;
2400
2401 error = 0;
2402
2403 /*
2404 * Extended security fallback via extended attributes.
2405 *
2406 * Note that we do not free the filesec; the caller is expected to
2407 * do this.
2408 */
2409 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2410 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2411 VATTR_NOT_RETURNED(vap, va_guuid)) {
2412 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2413
2414 /*
2415 * Fail for file types that we don't permit extended security
2416 * to be set on.
2417 */
2418 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
2419 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2420 error = EINVAL;
2421 goto out;
2422 }
2423
2424 /*
2425 * If we don't have all the extended security items, we need
2426 * to fetch the existing data to perform a read-modify-write
2427 * operation.
2428 */
2429 fsec = NULL;
2430 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2431 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2432 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2433 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2434 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2435 goto out;
2436 }
2437 }
2438 /* if we didn't get a filesec, use our local one */
2439 if (fsec == NULL) {
2440 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2441 fsec = &lfsec;
2442 } else {
2443 KAUTH_DEBUG("SETATTR - updating existing filesec");
2444 }
2445 /* find the ACL */
2446 facl = &fsec->fsec_acl;
2447
2448 /* if we're using the local filesec, we need to initialise it */
2449 if (fsec == &lfsec) {
2450 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2451 fsec->fsec_owner = kauth_null_guid;
2452 fsec->fsec_group = kauth_null_guid;
2453 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2454 facl->acl_flags = 0;
2455 }
2456
2457 /*
2458 * Update with the supplied attributes.
2459 */
2460 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2461 KAUTH_DEBUG("SETATTR - updating owner UUID");
2462 fsec->fsec_owner = vap->va_uuuid;
2463 VATTR_SET_SUPPORTED(vap, va_uuuid);
2464 }
2465 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2466 KAUTH_DEBUG("SETATTR - updating group UUID");
2467 fsec->fsec_group = vap->va_guuid;
2468 VATTR_SET_SUPPORTED(vap, va_guuid);
2469 }
2470 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2471 if (vap->va_acl == NULL) {
2472 KAUTH_DEBUG("SETATTR - removing ACL");
2473 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2474 } else {
2475 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2476 facl = vap->va_acl;
2477 }
2478 VATTR_SET_SUPPORTED(vap, va_acl);
2479 }
2480
2481 /*
2482 * If the filesec data is all invalid, we can just remove
2483 * the EA completely.
2484 */
2485 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2486 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2487 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2488 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2489 /* no attribute is ok, nothing to delete */
2490 if (error == ENOATTR)
2491 error = 0;
2492 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2493 } else {
2494 /* write the EA */
2495 error = vnode_set_filesec(vp, fsec, facl, ctx);
2496 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2497 }
2498
2499 /* if we fetched a filesec, dispose of the buffer */
2500 if (fsec != &lfsec)
2501 kauth_filesec_free(fsec);
2502 }
2503 out:
2504
2505 return(error);
2506 }
2507
2508 /*
2509 * Definition of vnode operations.
2510 */
2511
2512 #if 0
2513 /*
2514 *#
2515 *#% lookup dvp L ? ?
2516 *#% lookup vpp - L -
2517 */
2518 struct vnop_lookup_args {
2519 struct vnodeop_desc *a_desc;
2520 vnode_t a_dvp;
2521 vnode_t *a_vpp;
2522 struct componentname *a_cnp;
2523 vfs_context_t a_context;
2524 };
2525 #endif /* 0*/
2526
2527 /*
2528 * Returns: 0 Success
2529 * lock_fsnode:ENOENT No such file or directory [only for VFS
2530 * that is not thread safe & vnode is
2531 * currently being/has been terminated]
2532 * <vfs_lookup>:ENAMETOOLONG
2533 * <vfs_lookup>:ENOENT
2534 * <vfs_lookup>:EJUSTRETURN
2535 * <vfs_lookup>:EPERM
2536 * <vfs_lookup>:EISDIR
2537 * <vfs_lookup>:ENOTDIR
2538 * <vfs_lookup>:???
2539 *
2540 * Note: The return codes from the underlying VFS's lookup routine can't
2541 * be fully enumerated here, since third party VFS authors may not
2542 * limit their error returns to the ones documented here, even
2543 * though this may result in some programs functioning incorrectly.
2544 *
2545 * The return codes documented above are those which may currently
2546 * be returned by HFS from hfs_lookup, not including additional
2547 * error code which may be propagated from underlying routines.
2548 */
2549 errno_t
2550 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
2551 {
2552 int _err;
2553 struct vnop_lookup_args a;
2554 vnode_t vp;
2555 int thread_safe;
2556 int funnel_state = 0;
2557
2558 a.a_desc = &vnop_lookup_desc;
2559 a.a_dvp = dvp;
2560 a.a_vpp = vpp;
2561 a.a_cnp = cnp;
2562 a.a_context = ctx;
2563 thread_safe = THREAD_SAFE_FS(dvp);
2564
2565 if (!thread_safe) {
2566 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2567 return (_err);
2568 }
2569 }
2570 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2571
2572 vp = *vpp;
2573
2574 if (!thread_safe) {
2575 if ( (cnp->cn_flags & ISLASTCN) ) {
2576 if ( (cnp->cn_flags & LOCKPARENT) ) {
2577 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
2578 /*
2579 * leave the fsnode lock held on
2580 * the directory, but restore the funnel...
2581 * also indicate that we need to drop the
2582 * fsnode_lock when we're done with the
2583 * system call processing for this path
2584 */
2585 cnp->cn_flags |= FSNODELOCKHELD;
2586
2587 (void) thread_funnel_set(kernel_flock, funnel_state);
2588 return (_err);
2589 }
2590 }
2591 }
2592 unlock_fsnode(dvp, &funnel_state);
2593 }
2594 return (_err);
2595 }
2596
2597 #if 0
2598 /*
2599 *#
2600 *#% create dvp L L L
2601 *#% create vpp - L -
2602 *#
2603 */
2604
2605 struct vnop_create_args {
2606 struct vnodeop_desc *a_desc;
2607 vnode_t a_dvp;
2608 vnode_t *a_vpp;
2609 struct componentname *a_cnp;
2610 struct vnode_attr *a_vap;
2611 vfs_context_t a_context;
2612 };
2613 #endif /* 0*/
2614 errno_t
2615 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
2616 {
2617 int _err;
2618 struct vnop_create_args a;
2619 int thread_safe;
2620 int funnel_state = 0;
2621
2622 a.a_desc = &vnop_create_desc;
2623 a.a_dvp = dvp;
2624 a.a_vpp = vpp;
2625 a.a_cnp = cnp;
2626 a.a_vap = vap;
2627 a.a_context = ctx;
2628 thread_safe = THREAD_SAFE_FS(dvp);
2629
2630 if (!thread_safe) {
2631 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2632 return (_err);
2633 }
2634 }
2635 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2636 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2637 /*
2638 * Remove stale Apple Double file (if any).
2639 */
2640 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 0);
2641 }
2642 if (!thread_safe) {
2643 unlock_fsnode(dvp, &funnel_state);
2644 }
2645 return (_err);
2646 }
2647
2648 #if 0
2649 /*
2650 *#
2651 *#% whiteout dvp L L L
2652 *#% whiteout cnp - - -
2653 *#% whiteout flag - - -
2654 *#
2655 */
2656 struct vnop_whiteout_args {
2657 struct vnodeop_desc *a_desc;
2658 vnode_t a_dvp;
2659 struct componentname *a_cnp;
2660 int a_flags;
2661 vfs_context_t a_context;
2662 };
2663 #endif /* 0*/
2664 errno_t
2665 VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t ctx)
2666 {
2667 int _err;
2668 struct vnop_whiteout_args a;
2669 int thread_safe;
2670 int funnel_state = 0;
2671
2672 a.a_desc = &vnop_whiteout_desc;
2673 a.a_dvp = dvp;
2674 a.a_cnp = cnp;
2675 a.a_flags = flags;
2676 a.a_context = ctx;
2677 thread_safe = THREAD_SAFE_FS(dvp);
2678
2679 if (!thread_safe) {
2680 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2681 return (_err);
2682 }
2683 }
2684 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
2685 if (!thread_safe) {
2686 unlock_fsnode(dvp, &funnel_state);
2687 }
2688 return (_err);
2689 }
2690
2691 #if 0
2692 /*
2693 *#
2694 *#% mknod dvp L U U
2695 *#% mknod vpp - X -
2696 *#
2697 */
2698 struct vnop_mknod_args {
2699 struct vnodeop_desc *a_desc;
2700 vnode_t a_dvp;
2701 vnode_t *a_vpp;
2702 struct componentname *a_cnp;
2703 struct vnode_attr *a_vap;
2704 vfs_context_t a_context;
2705 };
2706 #endif /* 0*/
2707 errno_t
2708 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
2709 {
2710
2711 int _err;
2712 struct vnop_mknod_args a;
2713 int thread_safe;
2714 int funnel_state = 0;
2715
2716 a.a_desc = &vnop_mknod_desc;
2717 a.a_dvp = dvp;
2718 a.a_vpp = vpp;
2719 a.a_cnp = cnp;
2720 a.a_vap = vap;
2721 a.a_context = ctx;
2722 thread_safe = THREAD_SAFE_FS(dvp);
2723
2724 if (!thread_safe) {
2725 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2726 return (_err);
2727 }
2728 }
2729 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
2730 if (!thread_safe) {
2731 unlock_fsnode(dvp, &funnel_state);
2732 }
2733 return (_err);
2734 }
2735
2736 #if 0
2737 /*
2738 *#
2739 *#% open vp L L L
2740 *#
2741 */
2742 struct vnop_open_args {
2743 struct vnodeop_desc *a_desc;
2744 vnode_t a_vp;
2745 int a_mode;
2746 vfs_context_t a_context;
2747 };
2748 #endif /* 0*/
2749 errno_t
2750 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
2751 {
2752 int _err;
2753 struct vnop_open_args a;
2754 int thread_safe;
2755 int funnel_state = 0;
2756
2757 if (ctx == NULL) {
2758 ctx = vfs_context_current();
2759 }
2760 a.a_desc = &vnop_open_desc;
2761 a.a_vp = vp;
2762 a.a_mode = mode;
2763 a.a_context = ctx;
2764 thread_safe = THREAD_SAFE_FS(vp);
2765
2766 if (!thread_safe) {
2767 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2768 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2769 if ( (_err = lock_fsnode(vp, NULL)) ) {
2770 (void) thread_funnel_set(kernel_flock, funnel_state);
2771 return (_err);
2772 }
2773 }
2774 }
2775 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
2776 if (!thread_safe) {
2777 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2778 unlock_fsnode(vp, NULL);
2779 }
2780 (void) thread_funnel_set(kernel_flock, funnel_state);
2781 }
2782 return (_err);
2783 }
2784
2785 #if 0
2786 /*
2787 *#
2788 *#% close vp U U U
2789 *#
2790 */
2791 struct vnop_close_args {
2792 struct vnodeop_desc *a_desc;
2793 vnode_t a_vp;
2794 int a_fflag;
2795 vfs_context_t a_context;
2796 };
2797 #endif /* 0*/
2798 errno_t
2799 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
2800 {
2801 int _err;
2802 struct vnop_close_args a;
2803 int thread_safe;
2804 int funnel_state = 0;
2805
2806 if (ctx == NULL) {
2807 ctx = vfs_context_current();
2808 }
2809 a.a_desc = &vnop_close_desc;
2810 a.a_vp = vp;
2811 a.a_fflag = fflag;
2812 a.a_context = ctx;
2813 thread_safe = THREAD_SAFE_FS(vp);
2814
2815 if (!thread_safe) {
2816 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2817 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2818 if ( (_err = lock_fsnode(vp, NULL)) ) {
2819 (void) thread_funnel_set(kernel_flock, funnel_state);
2820 return (_err);
2821 }
2822 }
2823 }
2824 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
2825 if (!thread_safe) {
2826 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2827 unlock_fsnode(vp, NULL);
2828 }
2829 (void) thread_funnel_set(kernel_flock, funnel_state);
2830 }
2831 return (_err);
2832 }
2833
2834 #if 0
2835 /*
2836 *#
2837 *#% access vp L L L
2838 *#
2839 */
2840 struct vnop_access_args {
2841 struct vnodeop_desc *a_desc;
2842 vnode_t a_vp;
2843 int a_action;
2844 vfs_context_t a_context;
2845 };
2846 #endif /* 0*/
2847 errno_t
2848 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
2849 {
2850 int _err;
2851 struct vnop_access_args a;
2852 int thread_safe;
2853 int funnel_state = 0;
2854
2855 if (ctx == NULL) {
2856 ctx = vfs_context_current();
2857 }
2858 a.a_desc = &vnop_access_desc;
2859 a.a_vp = vp;
2860 a.a_action = action;
2861 a.a_context = ctx;
2862 thread_safe = THREAD_SAFE_FS(vp);
2863
2864 if (!thread_safe) {
2865 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2866 return (_err);
2867 }
2868 }
2869 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
2870 if (!thread_safe) {
2871 unlock_fsnode(vp, &funnel_state);
2872 }
2873 return (_err);
2874 }
2875
2876 #if 0
2877 /*
2878 *#
2879 *#% getattr vp = = =
2880 *#
2881 */
2882 struct vnop_getattr_args {
2883 struct vnodeop_desc *a_desc;
2884 vnode_t a_vp;
2885 struct vnode_attr *a_vap;
2886 vfs_context_t a_context;
2887 };
2888 #endif /* 0*/
2889 errno_t
2890 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
2891 {
2892 int _err;
2893 struct vnop_getattr_args a;
2894 int thread_safe;
2895 int funnel_state = 0; /* protected by thread_safe */
2896
2897 a.a_desc = &vnop_getattr_desc;
2898 a.a_vp = vp;
2899 a.a_vap = vap;
2900 a.a_context = ctx;
2901 thread_safe = THREAD_SAFE_FS(vp);
2902
2903 if (!thread_safe) {
2904 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2905 return (_err);
2906 }
2907 }
2908 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
2909 if (!thread_safe) {
2910 unlock_fsnode(vp, &funnel_state);
2911 }
2912 return (_err);
2913 }
2914
2915 #if 0
2916 /*
2917 *#
2918 *#% setattr vp L L L
2919 *#
2920 */
2921 struct vnop_setattr_args {
2922 struct vnodeop_desc *a_desc;
2923 vnode_t a_vp;
2924 struct vnode_attr *a_vap;
2925 vfs_context_t a_context;
2926 };
2927 #endif /* 0*/
2928 errno_t
2929 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
2930 {
2931 int _err;
2932 struct vnop_setattr_args a;
2933 int thread_safe;
2934 int funnel_state = 0; /* protected by thread_safe */
2935
2936 a.a_desc = &vnop_setattr_desc;
2937 a.a_vp = vp;
2938 a.a_vap = vap;
2939 a.a_context = ctx;
2940 thread_safe = THREAD_SAFE_FS(vp);
2941
2942 if (!thread_safe) {
2943 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2944 return (_err);
2945 }
2946 }
2947 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
2948
2949 /*
2950 * Shadow uid/gid/mod change to extended attribute file.
2951 */
2952 if (_err == 0 && !NATIVE_XATTR(vp)) {
2953 struct vnode_attr va;
2954 int change = 0;
2955
2956 VATTR_INIT(&va);
2957 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2958 VATTR_SET(&va, va_uid, vap->va_uid);
2959 change = 1;
2960 }
2961 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2962 VATTR_SET(&va, va_gid, vap->va_gid);
2963 change = 1;
2964 }
2965 if (VATTR_IS_ACTIVE(vap, va_mode)) {
2966 VATTR_SET(&va, va_mode, vap->va_mode);
2967 change = 1;
2968 }
2969 if (change) {
2970 vnode_t dvp;
2971 const char *vname;
2972
2973 dvp = vnode_getparent(vp);
2974 vname = vnode_getname(vp);
2975
2976 xattrfile_setattr(dvp, vname, &va, ctx, thread_safe);
2977 if (dvp != NULLVP)
2978 vnode_put(dvp);
2979 if (vname != NULL)
2980 vnode_putname(vname);
2981 }
2982 }
2983 if (!thread_safe) {
2984 unlock_fsnode(vp, &funnel_state);
2985 }
2986 /*
2987 * If we have changed any of the things about the file that are likely
2988 * to result in changes to authorization results, blow the vnode auth
2989 * cache
2990 */
2991 if (_err == 0 && (
2992 VATTR_IS_SUPPORTED(vap, va_mode) ||
2993 VATTR_IS_SUPPORTED(vap, va_uid) ||
2994 VATTR_IS_SUPPORTED(vap, va_gid) ||
2995 VATTR_IS_SUPPORTED(vap, va_flags) ||
2996 VATTR_IS_SUPPORTED(vap, va_acl) ||
2997 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
2998 VATTR_IS_SUPPORTED(vap, va_guuid)))
2999 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3000
3001 return (_err);
3002 }
3003
3004
3005 #if 0
3006 /*
3007 *#
3008 *#% read vp L L L
3009 *#
3010 */
3011 struct vnop_read_args {
3012 struct vnodeop_desc *a_desc;
3013 vnode_t a_vp;
3014 struct uio *a_uio;
3015 int a_ioflag;
3016 vfs_context_t a_context;
3017 };
3018 #endif /* 0*/
3019 errno_t
3020 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3021 {
3022 int _err;
3023 struct vnop_read_args a;
3024 int thread_safe;
3025 int funnel_state = 0;
3026
3027 if (ctx == NULL) {
3028 ctx = vfs_context_current();
3029 }
3030
3031 a.a_desc = &vnop_read_desc;
3032 a.a_vp = vp;
3033 a.a_uio = uio;
3034 a.a_ioflag = ioflag;
3035 a.a_context = ctx;
3036 thread_safe = THREAD_SAFE_FS(vp);
3037
3038 if (!thread_safe) {
3039 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3040 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3041 if ( (_err = lock_fsnode(vp, NULL)) ) {
3042 (void) thread_funnel_set(kernel_flock, funnel_state);
3043 return (_err);
3044 }
3045 }
3046 }
3047 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3048
3049 if (!thread_safe) {
3050 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3051 unlock_fsnode(vp, NULL);
3052 }
3053 (void) thread_funnel_set(kernel_flock, funnel_state);
3054 }
3055 return (_err);
3056 }
3057
3058
3059 #if 0
3060 /*
3061 *#
3062 *#% write vp L L L
3063 *#
3064 */
3065 struct vnop_write_args {
3066 struct vnodeop_desc *a_desc;
3067 vnode_t a_vp;
3068 struct uio *a_uio;
3069 int a_ioflag;
3070 vfs_context_t a_context;
3071 };
3072 #endif /* 0*/
3073 errno_t
3074 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3075 {
3076 struct vnop_write_args a;
3077 int _err;
3078 int thread_safe;
3079 int funnel_state = 0;
3080
3081 if (ctx == NULL) {
3082 ctx = vfs_context_current();
3083 }
3084
3085 a.a_desc = &vnop_write_desc;
3086 a.a_vp = vp;
3087 a.a_uio = uio;
3088 a.a_ioflag = ioflag;
3089 a.a_context = ctx;
3090 thread_safe = THREAD_SAFE_FS(vp);
3091
3092 if (!thread_safe) {
3093 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3094 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3095 if ( (_err = lock_fsnode(vp, NULL)) ) {
3096 (void) thread_funnel_set(kernel_flock, funnel_state);
3097 return (_err);
3098 }
3099 }
3100 }
3101 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3102
3103 if (!thread_safe) {
3104 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3105 unlock_fsnode(vp, NULL);
3106 }
3107 (void) thread_funnel_set(kernel_flock, funnel_state);
3108 }
3109 return (_err);
3110 }
3111
3112
3113 #if 0
3114 /*
3115 *#
3116 *#% ioctl vp U U U
3117 *#
3118 */
3119 struct vnop_ioctl_args {
3120 struct vnodeop_desc *a_desc;
3121 vnode_t a_vp;
3122 u_long a_command;
3123 caddr_t a_data;
3124 int a_fflag;
3125 vfs_context_t a_context;
3126 };
3127 #endif /* 0*/
3128 errno_t
3129 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3130 {
3131 int _err;
3132 struct vnop_ioctl_args a;
3133 int thread_safe;
3134 int funnel_state = 0;
3135
3136 if (ctx == NULL) {
3137 ctx = vfs_context_current();
3138 }
3139
3140 if (vfs_context_is64bit(ctx)) {
3141 if (!vnode_vfs64bitready(vp)) {
3142 return(ENOTTY);
3143 }
3144 }
3145
3146 a.a_desc = &vnop_ioctl_desc;
3147 a.a_vp = vp;
3148 a.a_command = command;
3149 a.a_data = data;
3150 a.a_fflag = fflag;
3151 a.a_context= ctx;
3152 thread_safe = THREAD_SAFE_FS(vp);
3153
3154 if (!thread_safe) {
3155 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3156 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3157 if ( (_err = lock_fsnode(vp, NULL)) ) {
3158 (void) thread_funnel_set(kernel_flock, funnel_state);
3159 return (_err);
3160 }
3161 }
3162 }
3163 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3164 if (!thread_safe) {
3165 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3166 unlock_fsnode(vp, NULL);
3167 }
3168 (void) thread_funnel_set(kernel_flock, funnel_state);
3169 }
3170 return (_err);
3171 }
3172
3173
3174 #if 0
3175 /*
3176 *#
3177 *#% select vp U U U
3178 *#
3179 */
3180 struct vnop_select_args {
3181 struct vnodeop_desc *a_desc;
3182 vnode_t a_vp;
3183 int a_which;
3184 int a_fflags;
3185 void *a_wql;
3186 vfs_context_t a_context;
3187 };
3188 #endif /* 0*/
3189 errno_t
3190 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx)
3191 {
3192 int _err;
3193 struct vnop_select_args a;
3194 int thread_safe;
3195 int funnel_state = 0;
3196
3197 if (ctx == NULL) {
3198 ctx = vfs_context_current();
3199 }
3200 a.a_desc = &vnop_select_desc;
3201 a.a_vp = vp;
3202 a.a_which = which;
3203 a.a_fflags = fflags;
3204 a.a_context = ctx;
3205 a.a_wql = wql;
3206 thread_safe = THREAD_SAFE_FS(vp);
3207
3208 if (!thread_safe) {
3209 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3210 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3211 if ( (_err = lock_fsnode(vp, NULL)) ) {
3212 (void) thread_funnel_set(kernel_flock, funnel_state);
3213 return (_err);
3214 }
3215 }
3216 }
3217 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3218 if (!thread_safe) {
3219 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
3220 unlock_fsnode(vp, NULL);
3221 }
3222 (void) thread_funnel_set(kernel_flock, funnel_state);
3223 }
3224 return (_err);
3225 }
3226
3227
3228 #if 0
3229 /*
3230 *#
3231 *#% exchange fvp L L L
3232 *#% exchange tvp L L L
3233 *#
3234 */
3235 struct vnop_exchange_args {
3236 struct vnodeop_desc *a_desc;
3237 vnode_t a_fvp;
3238 vnode_t a_tvp;
3239 int a_options;
3240 vfs_context_t a_context;
3241 };
3242 #endif /* 0*/
3243 errno_t
3244 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3245 {
3246 int _err;
3247 struct vnop_exchange_args a;
3248 int thread_safe;
3249 int funnel_state = 0;
3250 vnode_t lock_first = NULL, lock_second = NULL;
3251
3252 a.a_desc = &vnop_exchange_desc;
3253 a.a_fvp = fvp;
3254 a.a_tvp = tvp;
3255 a.a_options = options;
3256 a.a_context = ctx;
3257 thread_safe = THREAD_SAFE_FS(fvp);
3258
3259 if (!thread_safe) {
3260 /*
3261 * Lock in vnode address order to avoid deadlocks
3262 */
3263 if (fvp < tvp) {
3264 lock_first = fvp;
3265 lock_second = tvp;
3266 } else {
3267 lock_first = tvp;
3268 lock_second = fvp;
3269 }
3270 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
3271 return (_err);
3272 }
3273 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
3274 unlock_fsnode(lock_first, &funnel_state);
3275 return (_err);
3276 }
3277 }
3278 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3279 if (!thread_safe) {
3280 unlock_fsnode(lock_second, NULL);
3281 unlock_fsnode(lock_first, &funnel_state);
3282 }
3283 return (_err);
3284 }
3285
3286
3287 #if 0
3288 /*
3289 *#
3290 *#% revoke vp U U U
3291 *#
3292 */
3293 struct vnop_revoke_args {
3294 struct vnodeop_desc *a_desc;
3295 vnode_t a_vp;
3296 int a_flags;
3297 vfs_context_t a_context;
3298 };
3299 #endif /* 0*/
3300 errno_t
3301 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3302 {
3303 struct vnop_revoke_args a;
3304 int _err;
3305 int thread_safe;
3306 int funnel_state = 0;
3307
3308 a.a_desc = &vnop_revoke_desc;
3309 a.a_vp = vp;
3310 a.a_flags = flags;
3311 a.a_context = ctx;
3312 thread_safe = THREAD_SAFE_FS(vp);
3313
3314 if (!thread_safe) {
3315 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3316 }
3317 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3318 if (!thread_safe) {
3319 (void) thread_funnel_set(kernel_flock, funnel_state);
3320 }
3321 return (_err);
3322 }
3323
3324
3325 #if 0
3326 /*
3327 *#
3328 *# mmap - vp U U U
3329 *#
3330 */
3331 struct vnop_mmap_args {
3332 struct vnodeop_desc *a_desc;
3333 vnode_t a_vp;
3334 int a_fflags;
3335 vfs_context_t a_context;
3336 };
3337 #endif /* 0*/
3338 errno_t
3339 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
3340 {
3341 int _err;
3342 struct vnop_mmap_args a;
3343 int thread_safe;
3344 int funnel_state = 0;
3345
3346 a.a_desc = &vnop_mmap_desc;
3347 a.a_vp = vp;
3348 a.a_fflags = fflags;
3349 a.a_context = ctx;
3350 thread_safe = THREAD_SAFE_FS(vp);
3351
3352 if (!thread_safe) {
3353 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3354 return (_err);
3355 }
3356 }
3357 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3358 if (!thread_safe) {
3359 unlock_fsnode(vp, &funnel_state);
3360 }
3361 return (_err);
3362 }
3363
3364
3365 #if 0
3366 /*
3367 *#
3368 *# mnomap - vp U U U
3369 *#
3370 */
3371 struct vnop_mnomap_args {
3372 struct vnodeop_desc *a_desc;
3373 vnode_t a_vp;
3374 vfs_context_t a_context;
3375 };
3376 #endif /* 0*/
3377 errno_t
3378 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
3379 {
3380 int _err;
3381 struct vnop_mnomap_args a;
3382 int thread_safe;
3383 int funnel_state = 0;
3384
3385 a.a_desc = &vnop_mnomap_desc;
3386 a.a_vp = vp;
3387 a.a_context = ctx;
3388 thread_safe = THREAD_SAFE_FS(vp);
3389
3390 if (!thread_safe) {
3391 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3392 return (_err);
3393 }
3394 }
3395 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3396 if (!thread_safe) {
3397 unlock_fsnode(vp, &funnel_state);
3398 }
3399 return (_err);
3400 }
3401
3402
3403 #if 0
3404 /*
3405 *#
3406 *#% fsync vp L L L
3407 *#
3408 */
3409 struct vnop_fsync_args {
3410 struct vnodeop_desc *a_desc;
3411 vnode_t a_vp;
3412 int a_waitfor;
3413 vfs_context_t a_context;
3414 };
3415 #endif /* 0*/
3416 errno_t
3417 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
3418 {
3419 struct vnop_fsync_args a;
3420 int _err;
3421 int thread_safe;
3422 int funnel_state = 0;
3423
3424 a.a_desc = &vnop_fsync_desc;
3425 a.a_vp = vp;
3426 a.a_waitfor = waitfor;
3427 a.a_context = ctx;
3428 thread_safe = THREAD_SAFE_FS(vp);
3429
3430 if (!thread_safe) {
3431 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3432 return (_err);
3433 }
3434 }
3435 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3436 if (!thread_safe) {
3437 unlock_fsnode(vp, &funnel_state);
3438 }
3439 return (_err);
3440 }
3441
3442
3443 #if 0
3444 /*
3445 *#
3446 *#% remove dvp L U U
3447 *#% remove vp L U U
3448 *#
3449 */
3450 struct vnop_remove_args {
3451 struct vnodeop_desc *a_desc;
3452 vnode_t a_dvp;
3453 vnode_t a_vp;
3454 struct componentname *a_cnp;
3455 int a_flags;
3456 vfs_context_t a_context;
3457 };
3458 #endif /* 0*/
3459 errno_t
3460 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
3461 {
3462 int _err;
3463 struct vnop_remove_args a;
3464 int thread_safe;
3465 int funnel_state = 0;
3466
3467 a.a_desc = &vnop_remove_desc;
3468 a.a_dvp = dvp;
3469 a.a_vp = vp;
3470 a.a_cnp = cnp;
3471 a.a_flags = flags;
3472 a.a_context = ctx;
3473 thread_safe = THREAD_SAFE_FS(dvp);
3474
3475 if (!thread_safe) {
3476 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3477 return (_err);
3478 }
3479 }
3480 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3481
3482 if (_err == 0) {
3483 vnode_setneedinactive(vp);
3484
3485 if ( !(NATIVE_XATTR(dvp)) ) {
3486 /*
3487 * Remove any associated extended attribute file (._ AppleDouble file).
3488 */
3489 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 1);
3490 }
3491 }
3492 if (!thread_safe) {
3493 unlock_fsnode(vp, &funnel_state);
3494 }
3495 return (_err);
3496 }
3497
3498
3499 #if 0
3500 /*
3501 *#
3502 *#% link vp U U U
3503 *#% link tdvp L U U
3504 *#
3505 */
3506 struct vnop_link_args {
3507 struct vnodeop_desc *a_desc;
3508 vnode_t a_vp;
3509 vnode_t a_tdvp;
3510 struct componentname *a_cnp;
3511 vfs_context_t a_context;
3512 };
3513 #endif /* 0*/
3514 errno_t
3515 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
3516 {
3517 int _err;
3518 struct vnop_link_args a;
3519 int thread_safe;
3520 int funnel_state = 0;
3521
3522 /*
3523 * For file systems with non-native extended attributes,
3524 * disallow linking to an existing "._" Apple Double file.
3525 */
3526 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3527 const char *vname;
3528
3529 vname = vnode_getname(vp);
3530 if (vname != NULL) {
3531 _err = 0;
3532 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3533 _err = EPERM;
3534 }
3535 vnode_putname(vname);
3536 if (_err)
3537 return (_err);
3538 }
3539 }
3540 a.a_desc = &vnop_link_desc;
3541 a.a_vp = vp;
3542 a.a_tdvp = tdvp;
3543 a.a_cnp = cnp;
3544 a.a_context = ctx;
3545 thread_safe = THREAD_SAFE_FS(vp);
3546
3547 if (!thread_safe) {
3548 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3549 return (_err);
3550 }
3551 }
3552 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3553 if (!thread_safe) {
3554 unlock_fsnode(vp, &funnel_state);
3555 }
3556 return (_err);
3557 }
3558
3559
3560 #if 0
3561 /*
3562 *#
3563 *#% rename fdvp U U U
3564 *#% rename fvp U U U
3565 *#% rename tdvp L U U
3566 *#% rename tvp X U U
3567 *#
3568 */
3569 struct vnop_rename_args {
3570 struct vnodeop_desc *a_desc;
3571 vnode_t a_fdvp;
3572 vnode_t a_fvp;
3573 struct componentname *a_fcnp;
3574 vnode_t a_tdvp;
3575 vnode_t a_tvp;
3576 struct componentname *a_tcnp;
3577 vfs_context_t a_context;
3578 };
3579 #endif /* 0*/
3580 errno_t
3581 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
3582 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
3583 vfs_context_t ctx)
3584 {
3585 int _err;
3586 struct vnop_rename_args a;
3587 int funnel_state = 0;
3588 char smallname1[48];
3589 char smallname2[48];
3590 char *xfromname = NULL;
3591 char *xtoname = NULL;
3592 vnode_t lock_first = NULL, lock_second = NULL;
3593 vnode_t fdvp_unsafe = NULLVP;
3594 vnode_t tdvp_unsafe = NULLVP;
3595
3596 a.a_desc = &vnop_rename_desc;
3597 a.a_fdvp = fdvp;
3598 a.a_fvp = fvp;
3599 a.a_fcnp = fcnp;
3600 a.a_tdvp = tdvp;
3601 a.a_tvp = tvp;
3602 a.a_tcnp = tcnp;
3603 a.a_context = ctx;
3604
3605 if (!THREAD_SAFE_FS(fdvp))
3606 fdvp_unsafe = fdvp;
3607 if (!THREAD_SAFE_FS(tdvp))
3608 tdvp_unsafe = tdvp;
3609
3610 if (fdvp_unsafe != NULLVP) {
3611 /*
3612 * Lock parents in vnode address order to avoid deadlocks
3613 * note that it's possible for the fdvp to be unsafe,
3614 * but the tdvp to be safe because tvp could be a directory
3615 * in the root of a filesystem... in that case, tdvp is the
3616 * in the filesystem that this root is mounted on
3617 */
3618 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
3619 lock_first = fdvp_unsafe;
3620 lock_second = NULL;
3621 } else if (fdvp_unsafe < tdvp_unsafe) {
3622 lock_first = fdvp_unsafe;
3623 lock_second = tdvp_unsafe;
3624 } else {
3625 lock_first = tdvp_unsafe;
3626 lock_second = fdvp_unsafe;
3627 }
3628 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
3629 return (_err);
3630
3631 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3632 unlock_fsnode(lock_first, &funnel_state);
3633 return (_err);
3634 }
3635
3636 /*
3637 * Lock both children in vnode address order to avoid deadlocks
3638 */
3639 if (tvp == NULL || tvp == fvp) {
3640 lock_first = fvp;
3641 lock_second = NULL;
3642 } else if (fvp < tvp) {
3643 lock_first = fvp;
3644 lock_second = tvp;
3645 } else {
3646 lock_first = tvp;
3647 lock_second = fvp;
3648 }
3649 if ( (_err = lock_fsnode(lock_first, NULL)) )
3650 goto out1;
3651
3652 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3653 unlock_fsnode(lock_first, NULL);
3654 goto out1;
3655 }
3656 }
3657 /*
3658 * Save source and destination names (._ AppleDouble files).
3659 * Skip if source already has a "._" prefix.
3660 */
3661 if (!NATIVE_XATTR(fdvp) &&
3662 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3663 size_t len;
3664
3665 /* Get source attribute file name. */
3666 len = fcnp->cn_namelen + 3;
3667 if (len > sizeof(smallname1)) {
3668 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3669 } else {
3670 xfromname = &smallname1[0];
3671 }
3672 strlcpy(xfromname, "._", min(sizeof smallname1, len));
3673 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3674 xfromname[len-1] = '\0';
3675
3676 /* Get destination attribute file name. */
3677 len = tcnp->cn_namelen + 3;
3678 if (len > sizeof(smallname2)) {
3679 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3680 } else {
3681 xtoname = &smallname2[0];
3682 }
3683 strlcpy(xtoname, "._", min(sizeof smallname2, len));
3684 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3685 xtoname[len-1] = '\0';
3686 }
3687
3688 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3689
3690 if (fdvp_unsafe != NULLVP) {
3691 if (lock_second != NULL)
3692 unlock_fsnode(lock_second, NULL);
3693 unlock_fsnode(lock_first, NULL);
3694 }
3695 if (_err == 0) {
3696 if (tvp && tvp != fvp)
3697 vnode_setneedinactive(tvp);
3698 }
3699
3700 /*
3701 * Rename any associated extended attribute file (._ AppleDouble file).
3702 */
3703 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3704 struct nameidata fromnd, tond;
3705 int killdest = 0;
3706 int error;
3707
3708 /*
3709 * Get source attribute file vnode.
3710 * Note that fdvp already has an iocount reference and
3711 * using DELETE will take an additional reference.
3712 */
3713 NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
3714 CAST_USER_ADDR_T(xfromname), ctx);
3715 fromnd.ni_dvp = fdvp;
3716 error = namei(&fromnd);
3717
3718 if (error) {
3719 /* When source doesn't exist there still may be a destination. */
3720 if (error == ENOENT) {
3721 killdest = 1;
3722 } else {
3723 goto out;
3724 }
3725 } else if (fromnd.ni_vp->v_type != VREG) {
3726 vnode_put(fromnd.ni_vp);
3727 nameidone(&fromnd);
3728 killdest = 1;
3729 }
3730 if (killdest) {
3731 struct vnop_remove_args args;
3732
3733 /*
3734 * Get destination attribute file vnode.
3735 * Note that tdvp already has an iocount reference.
3736 */
3737 NDINIT(&tond, DELETE, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
3738 CAST_USER_ADDR_T(xtoname), ctx);
3739 tond.ni_dvp = tdvp;
3740 error = namei(&tond);
3741 if (error) {
3742 goto out;
3743 }
3744 if (tond.ni_vp->v_type != VREG) {
3745 vnode_put(tond.ni_vp);
3746 nameidone(&tond);
3747 goto out;
3748 }
3749 args.a_desc = &vnop_remove_desc;
3750 args.a_dvp = tdvp;
3751 args.a_vp = tond.ni_vp;
3752 args.a_cnp = &tond.ni_cnd;
3753 args.a_context = ctx;
3754
3755 if (fdvp_unsafe != NULLVP)
3756 error = lock_fsnode(tond.ni_vp, NULL);
3757 if (error == 0) {
3758 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3759
3760 if (fdvp_unsafe != NULLVP)
3761 unlock_fsnode(tond.ni_vp, NULL);
3762
3763 if (error == 0)
3764 vnode_setneedinactive(tond.ni_vp);
3765 }
3766 vnode_put(tond.ni_vp);
3767 nameidone(&tond);
3768 goto out;
3769 }
3770
3771 /*
3772 * Get destination attribute file vnode.
3773 */
3774 NDINIT(&tond, RENAME,
3775 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
3776 CAST_USER_ADDR_T(xtoname), ctx);
3777 tond.ni_dvp = tdvp;
3778 error = namei(&tond);
3779
3780 if (error) {
3781 vnode_put(fromnd.ni_vp);
3782 nameidone(&fromnd);
3783 goto out;
3784 }
3785 a.a_desc = &vnop_rename_desc;
3786 a.a_fdvp = fdvp;
3787 a.a_fvp = fromnd.ni_vp;
3788 a.a_fcnp = &fromnd.ni_cnd;
3789 a.a_tdvp = tdvp;
3790 a.a_tvp = tond.ni_vp;
3791 a.a_tcnp = &tond.ni_cnd;
3792 a.a_context = ctx;
3793
3794 if (fdvp_unsafe != NULLVP) {
3795 /*
3796 * Lock in vnode address order to avoid deadlocks
3797 */
3798 if (tond.ni_vp == NULL || tond.ni_vp == fromnd.ni_vp) {
3799 lock_first = fromnd.ni_vp;
3800 lock_second = NULL;
3801 } else if (fromnd.ni_vp < tond.ni_vp) {
3802 lock_first = fromnd.ni_vp;
3803 lock_second = tond.ni_vp;
3804 } else {
3805 lock_first = tond.ni_vp;
3806 lock_second = fromnd.ni_vp;
3807 }
3808 if ( (error = lock_fsnode(lock_first, NULL)) == 0) {
3809 if (lock_second != NULL && (error = lock_fsnode(lock_second, NULL)) )
3810 unlock_fsnode(lock_first, NULL);
3811 }
3812 }
3813 if (error == 0) {
3814 const char *oname;
3815 vnode_t oparent;
3816
3817 /* Save these off so we can later verify them (fix up below) */
3818 oname = fromnd.ni_vp->v_name;
3819 oparent = fromnd.ni_vp->v_parent;
3820
3821 error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3822
3823 if (fdvp_unsafe != NULLVP) {
3824 if (lock_second != NULL)
3825 unlock_fsnode(lock_second, NULL);
3826 unlock_fsnode(lock_first, NULL);
3827 }
3828 if (error == 0) {
3829 vnode_setneedinactive(fromnd.ni_vp);
3830
3831 if (tond.ni_vp && tond.ni_vp != fromnd.ni_vp)
3832 vnode_setneedinactive(tond.ni_vp);
3833 /*
3834 * Fix up name & parent pointers on ._ file
3835 */
3836 if (oname == fromnd.ni_vp->v_name &&
3837 oparent == fromnd.ni_vp->v_parent) {
3838 int update_flags;
3839
3840 update_flags = VNODE_UPDATE_NAME;
3841
3842 if (fdvp != tdvp)
3843 update_flags |= VNODE_UPDATE_PARENT;
3844
3845 vnode_update_identity(fromnd.ni_vp, tdvp,
3846 tond.ni_cnd.cn_nameptr,
3847 tond.ni_cnd.cn_namelen,
3848 tond.ni_cnd.cn_hash,
3849 update_flags);
3850 }
3851 }
3852 }
3853 vnode_put(fromnd.ni_vp);
3854 if (tond.ni_vp) {
3855 vnode_put(tond.ni_vp);
3856 }
3857 nameidone(&tond);
3858 nameidone(&fromnd);
3859 }
3860 out:
3861 if (xfromname && xfromname != &smallname1[0]) {
3862 FREE(xfromname, M_TEMP);
3863 }
3864 if (xtoname && xtoname != &smallname2[0]) {
3865 FREE(xtoname, M_TEMP);
3866 }
3867 out1:
3868 if (fdvp_unsafe != NULLVP) {
3869 if (tdvp_unsafe != NULLVP)
3870 unlock_fsnode(tdvp_unsafe, NULL);
3871 unlock_fsnode(fdvp_unsafe, &funnel_state);
3872 }
3873 return (_err);
3874 }
3875
3876 #if 0
3877 /*
3878 *#
3879 *#% mkdir dvp L U U
3880 *#% mkdir vpp - L -
3881 *#
3882 */
3883 struct vnop_mkdir_args {
3884 struct vnodeop_desc *a_desc;
3885 vnode_t a_dvp;
3886 vnode_t *a_vpp;
3887 struct componentname *a_cnp;
3888 struct vnode_attr *a_vap;
3889 vfs_context_t a_context;
3890 };
3891 #endif /* 0*/
3892 errno_t
3893 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3894 struct vnode_attr *vap, vfs_context_t ctx)
3895 {
3896 int _err;
3897 struct vnop_mkdir_args a;
3898 int thread_safe;
3899 int funnel_state = 0;
3900
3901 a.a_desc = &vnop_mkdir_desc;
3902 a.a_dvp = dvp;
3903 a.a_vpp = vpp;
3904 a.a_cnp = cnp;
3905 a.a_vap = vap;
3906 a.a_context = ctx;
3907 thread_safe = THREAD_SAFE_FS(dvp);
3908
3909 if (!thread_safe) {
3910 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3911 return (_err);
3912 }
3913 }
3914 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
3915 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3916 /*
3917 * Remove stale Apple Double file (if any).
3918 */
3919 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 0);
3920 }
3921 if (!thread_safe) {
3922 unlock_fsnode(dvp, &funnel_state);
3923 }
3924 return (_err);
3925 }
3926
3927
3928 #if 0
3929 /*
3930 *#
3931 *#% rmdir dvp L U U
3932 *#% rmdir vp L U U
3933 *#
3934 */
3935 struct vnop_rmdir_args {
3936 struct vnodeop_desc *a_desc;
3937 vnode_t a_dvp;
3938 vnode_t a_vp;
3939 struct componentname *a_cnp;
3940 vfs_context_t a_context;
3941 };
3942
3943 #endif /* 0*/
3944 errno_t
3945 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
3946 {
3947 int _err;
3948 struct vnop_rmdir_args a;
3949 int thread_safe;
3950 int funnel_state = 0;
3951
3952 a.a_desc = &vnop_rmdir_desc;
3953 a.a_dvp = dvp;
3954 a.a_vp = vp;
3955 a.a_cnp = cnp;
3956 a.a_context = ctx;
3957 thread_safe = THREAD_SAFE_FS(dvp);
3958
3959 if (!thread_safe) {
3960 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3961 return (_err);
3962 }
3963 }
3964 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
3965
3966 if (_err == 0) {
3967 vnode_setneedinactive(vp);
3968
3969 if ( !(NATIVE_XATTR(dvp)) ) {
3970 /*
3971 * Remove any associated extended attribute file (._ AppleDouble file).
3972 */
3973 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 1);
3974 }
3975 }
3976 if (!thread_safe) {
3977 unlock_fsnode(vp, &funnel_state);
3978 }
3979 return (_err);
3980 }
3981
3982 /*
3983 * Remove a ._ AppleDouble file
3984 */
3985 #define AD_STALE_SECS (180)
3986 static void
3987 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int thread_safe, int force) {
3988 vnode_t xvp;
3989 struct nameidata nd;
3990 char smallname[64];
3991 char *filename = NULL;
3992 size_t len;
3993
3994 if ((basename == NULL) || (basename[0] == '\0') ||
3995 (basename[0] == '.' && basename[1] == '_')) {
3996 return;
3997 }
3998 filename = &smallname[0];
3999 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4000 if (len >= sizeof(smallname)) {
4001 len++; /* snprintf result doesn't include '\0' */
4002 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4003 len = snprintf(filename, len, "._%s", basename);
4004 }
4005 NDINIT(&nd, DELETE, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4006 CAST_USER_ADDR_T(filename), ctx);
4007 nd.ni_dvp = dvp;
4008 if (namei(&nd) != 0)
4009 goto out2;
4010
4011 xvp = nd.ni_vp;
4012 nameidone(&nd);
4013 if (xvp->v_type != VREG)
4014 goto out1;
4015
4016 /*
4017 * When creating a new object and a "._" file already
4018 * exists, check to see if its a stale "._" file.
4019 *
4020 */
4021 if (!force) {
4022 struct vnode_attr va;
4023
4024 VATTR_INIT(&va);
4025 VATTR_WANTED(&va, va_data_size);
4026 VATTR_WANTED(&va, va_modify_time);
4027 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
4028 VATTR_IS_SUPPORTED(&va, va_data_size) &&
4029 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
4030 va.va_data_size != 0) {
4031 struct timeval tv;
4032
4033 microtime(&tv);
4034 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
4035 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
4036 force = 1; /* must be stale */
4037 }
4038 }
4039 }
4040 if (force) {
4041 struct vnop_remove_args a;
4042 int error;
4043
4044 a.a_desc = &vnop_remove_desc;
4045 a.a_dvp = nd.ni_dvp;
4046 a.a_vp = xvp;
4047 a.a_cnp = &nd.ni_cnd;
4048 a.a_context = ctx;
4049
4050 if (!thread_safe) {
4051 if ( (lock_fsnode(xvp, NULL)) )
4052 goto out1;
4053 }
4054 error = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4055
4056 if (!thread_safe)
4057 unlock_fsnode(xvp, NULL);
4058
4059 if (error == 0)
4060 vnode_setneedinactive(xvp);
4061 }
4062 out1:
4063 vnode_put(dvp);
4064 vnode_put(xvp);
4065 out2:
4066 if (filename && filename != &smallname[0]) {
4067 FREE(filename, M_TEMP);
4068 }
4069 }
4070
4071 /*
4072 * Shadow uid/gid/mod to a ._ AppleDouble file
4073 */
4074 static void
4075 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
4076 vfs_context_t ctx, int thread_safe) {
4077 vnode_t xvp;
4078 struct nameidata nd;
4079 char smallname[64];
4080 char *filename = NULL;
4081 size_t len;
4082
4083 if ((dvp == NULLVP) ||
4084 (basename == NULL) || (basename[0] == '\0') ||
4085 (basename[0] == '.' && basename[1] == '_')) {
4086 return;
4087 }
4088 filename = &smallname[0];
4089 len = snprintf(filename, sizeof(smallname), "._%s", basename);
4090 if (len >= sizeof(smallname)) {
4091 len++; /* snprintf result doesn't include '\0' */
4092 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
4093 len = snprintf(filename, len, "._%s", basename);
4094 }
4095 NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE,
4096 CAST_USER_ADDR_T(filename), ctx);
4097 nd.ni_dvp = dvp;
4098 if (namei(&nd) != 0)
4099 goto out2;
4100
4101 xvp = nd.ni_vp;
4102 nameidone(&nd);
4103
4104 if (xvp->v_type == VREG) {
4105 struct vnop_setattr_args a;
4106
4107 a.a_desc = &vnop_setattr_desc;
4108 a.a_vp = xvp;
4109 a.a_vap = vap;
4110 a.a_context = ctx;
4111
4112 if (!thread_safe) {
4113 if ( (lock_fsnode(xvp, NULL)) )
4114 goto out1;
4115 }
4116 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
4117 if (!thread_safe) {
4118 unlock_fsnode(xvp, NULL);
4119 }
4120 }
4121 out1:
4122 vnode_put(xvp);
4123 out2:
4124 if (filename && filename != &smallname[0]) {
4125 FREE(filename, M_TEMP);
4126 }
4127 }
4128
4129 #if 0
4130 /*
4131 *#
4132 *#% symlink dvp L U U
4133 *#% symlink vpp - U -
4134 *#
4135 */
4136 struct vnop_symlink_args {
4137 struct vnodeop_desc *a_desc;
4138 vnode_t a_dvp;
4139 vnode_t *a_vpp;
4140 struct componentname *a_cnp;
4141 struct vnode_attr *a_vap;
4142 char *a_target;
4143 vfs_context_t a_context;
4144 };
4145
4146 #endif /* 0*/
4147 errno_t
4148 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4149 struct vnode_attr *vap, char *target, vfs_context_t ctx)
4150 {
4151 int _err;
4152 struct vnop_symlink_args a;
4153 int thread_safe;
4154 int funnel_state = 0;
4155
4156 a.a_desc = &vnop_symlink_desc;
4157 a.a_dvp = dvp;
4158 a.a_vpp = vpp;
4159 a.a_cnp = cnp;
4160 a.a_vap = vap;
4161 a.a_target = target;
4162 a.a_context = ctx;
4163 thread_safe = THREAD_SAFE_FS(dvp);
4164
4165 if (!thread_safe) {
4166 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
4167 return (_err);
4168 }
4169 }
4170 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
4171 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4172 /*
4173 * Remove stale Apple Double file (if any).
4174 */
4175 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 0);
4176 }
4177 if (!thread_safe) {
4178 unlock_fsnode(dvp, &funnel_state);
4179 }
4180 return (_err);
4181 }
4182
4183 #if 0
4184 /*
4185 *#
4186 *#% readdir vp L L L
4187 *#
4188 */
4189 struct vnop_readdir_args {
4190 struct vnodeop_desc *a_desc;
4191 vnode_t a_vp;
4192 struct uio *a_uio;
4193 int a_flags;
4194 int *a_eofflag;
4195 int *a_numdirent;
4196 vfs_context_t a_context;
4197 };
4198
4199 #endif /* 0*/
4200 errno_t
4201 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
4202 int *numdirent, vfs_context_t ctx)
4203 {
4204 int _err;
4205 struct vnop_readdir_args a;
4206 int thread_safe;
4207 int funnel_state = 0;
4208
4209 a.a_desc = &vnop_readdir_desc;
4210 a.a_vp = vp;
4211 a.a_uio = uio;
4212 a.a_flags = flags;
4213 a.a_eofflag = eofflag;
4214 a.a_numdirent = numdirent;
4215 a.a_context = ctx;
4216 thread_safe = THREAD_SAFE_FS(vp);
4217
4218 if (!thread_safe) {
4219 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4220 return (_err);
4221 }
4222 }
4223 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
4224 if (!thread_safe) {
4225 unlock_fsnode(vp, &funnel_state);
4226 }
4227 return (_err);
4228 }
4229
4230 #if 0
4231 /*
4232 *#
4233 *#% readdirattr vp L L L
4234 *#
4235 */
4236 struct vnop_readdirattr_args {
4237 struct vnodeop_desc *a_desc;
4238 vnode_t a_vp;
4239 struct attrlist *a_alist;
4240 struct uio *a_uio;
4241 u_long a_maxcount;
4242 u_long a_options;
4243 u_long *a_newstate;
4244 int *a_eofflag;
4245 u_long *a_actualcount;
4246 vfs_context_t a_context;
4247 };
4248
4249 #endif /* 0*/
4250 errno_t
4251 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount,
4252 u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t ctx)
4253 {
4254 int _err;
4255 struct vnop_readdirattr_args a;
4256 int thread_safe;
4257 int funnel_state = 0;
4258
4259 a.a_desc = &vnop_readdirattr_desc;
4260 a.a_vp = vp;
4261 a.a_alist = alist;
4262 a.a_uio = uio;
4263 a.a_maxcount = maxcount;
4264 a.a_options = options;
4265 a.a_newstate = newstate;
4266 a.a_eofflag = eofflag;
4267 a.a_actualcount = actualcount;
4268 a.a_context = ctx;
4269 thread_safe = THREAD_SAFE_FS(vp);
4270
4271 if (!thread_safe) {
4272 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4273 return (_err);
4274 }
4275 }
4276 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
4277 if (!thread_safe) {
4278 unlock_fsnode(vp, &funnel_state);
4279 }
4280 return (_err);
4281 }
4282
4283 #if 0
4284 /*
4285 *#
4286 *#% readlink vp L L L
4287 *#
4288 */
4289 struct vnop_readlink_args {
4290 struct vnodeop_desc *a_desc;
4291 vnode_t a_vp;
4292 struct uio *a_uio;
4293 vfs_context_t a_context;
4294 };
4295 #endif /* 0 */
4296
4297 /*
4298 * Returns: 0 Success
4299 * lock_fsnode:ENOENT No such file or directory [only for VFS
4300 * that is not thread safe & vnode is
4301 * currently being/has been terminated]
4302 * <vfs_readlink>:EINVAL
4303 * <vfs_readlink>:???
4304 *
4305 * Note: The return codes from the underlying VFS's readlink routine
4306 * can't be fully enumerated here, since third party VFS authors
4307 * may not limit their error returns to the ones documented here,
4308 * even though this may result in some programs functioning
4309 * incorrectly.
4310 *
4311 * The return codes documented above are those which may currently
4312 * be returned by HFS from hfs_vnop_readlink, not including
4313 * additional error code which may be propagated from underlying
4314 * routines.
4315 */
4316 errno_t
4317 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
4318 {
4319 int _err;
4320 struct vnop_readlink_args a;
4321 int thread_safe;
4322 int funnel_state = 0;
4323
4324 a.a_desc = &vnop_readlink_desc;
4325 a.a_vp = vp;
4326 a.a_uio = uio;
4327 a.a_context = ctx;
4328 thread_safe = THREAD_SAFE_FS(vp);
4329
4330 if (!thread_safe) {
4331 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4332 return (_err);
4333 }
4334 }
4335 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
4336 if (!thread_safe) {
4337 unlock_fsnode(vp, &funnel_state);
4338 }
4339 return (_err);
4340 }
4341
4342 #if 0
4343 /*
4344 *#
4345 *#% inactive vp L U U
4346 *#
4347 */
4348 struct vnop_inactive_args {
4349 struct vnodeop_desc *a_desc;
4350 vnode_t a_vp;
4351 vfs_context_t a_context;
4352 };
4353 #endif /* 0*/
4354 errno_t
4355 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
4356 {
4357 int _err;
4358 struct vnop_inactive_args a;
4359 int thread_safe;
4360 int funnel_state = 0;
4361
4362 a.a_desc = &vnop_inactive_desc;
4363 a.a_vp = vp;
4364 a.a_context = ctx;
4365 thread_safe = THREAD_SAFE_FS(vp);
4366
4367 if (!thread_safe) {
4368 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4369 return (_err);
4370 }
4371 }
4372 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
4373 if (!thread_safe) {
4374 unlock_fsnode(vp, &funnel_state);
4375 }
4376
4377 #if NAMEDSTREAMS
4378 /* For file systems that do not support namedstreams natively, mark
4379 * the shadow stream file vnode to be recycled as soon as the last
4380 * reference goes away. To avoid re-entering reclaim code, do not
4381 * call recycle on terminating named stream vnodes.
4382 */
4383 if (vnode_isnamedstream(vp) &&
4384 (vp->v_parent != NULLVP) &&
4385 (vnode_isshadow(vp)) &&
4386 ((vp->v_lflag & VL_TERMINATE) == 0)) {
4387 vnode_recycle(vp);
4388 }
4389 #endif
4390
4391 return (_err);
4392 }
4393
4394
4395 #if 0
4396 /*
4397 *#
4398 *#% reclaim vp U U U
4399 *#
4400 */
4401 struct vnop_reclaim_args {
4402 struct vnodeop_desc *a_desc;
4403 vnode_t a_vp;
4404 vfs_context_t a_context;
4405 };
4406 #endif /* 0*/
4407 errno_t
4408 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
4409 {
4410 int _err;
4411 struct vnop_reclaim_args a;
4412 int thread_safe;
4413 int funnel_state = 0;
4414
4415 a.a_desc = &vnop_reclaim_desc;
4416 a.a_vp = vp;
4417 a.a_context = ctx;
4418 thread_safe = THREAD_SAFE_FS(vp);
4419
4420 if (!thread_safe) {
4421 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4422 }
4423 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
4424 if (!thread_safe) {
4425 (void) thread_funnel_set(kernel_flock, funnel_state);
4426 }
4427 return (_err);
4428 }
4429
4430
4431 /*
4432 * Returns: 0 Success
4433 * lock_fsnode:ENOENT No such file or directory [only for VFS
4434 * that is not thread safe & vnode is
4435 * currently being/has been terminated]
4436 * <vnop_pathconf_desc>:??? [per FS implementation specific]
4437 */
4438 #if 0
4439 /*
4440 *#
4441 *#% pathconf vp L L L
4442 *#
4443 */
4444 struct vnop_pathconf_args {
4445 struct vnodeop_desc *a_desc;
4446 vnode_t a_vp;
4447 int a_name;
4448 register_t *a_retval;
4449 vfs_context_t a_context;
4450 };
4451 #endif /* 0*/
4452 errno_t
4453 VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t ctx)
4454 {
4455 int _err;
4456 struct vnop_pathconf_args a;
4457 int thread_safe;
4458 int funnel_state = 0;
4459
4460 a.a_desc = &vnop_pathconf_desc;
4461 a.a_vp = vp;
4462 a.a_name = name;
4463 a.a_retval = retval;
4464 a.a_context = ctx;
4465 thread_safe = THREAD_SAFE_FS(vp);
4466
4467 if (!thread_safe) {
4468 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4469 return (_err);
4470 }
4471 }
4472 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
4473 if (!thread_safe) {
4474 unlock_fsnode(vp, &funnel_state);
4475 }
4476 return (_err);
4477 }
4478
4479 /*
4480 * Returns: 0 Success
4481 * err_advlock:ENOTSUP
4482 * lf_advlock:???
4483 * <vnop_advlock_desc>:???
4484 *
4485 * Notes: VFS implementations of advisory locking using calls through
4486 * <vnop_advlock_desc> because lock enforcement does not occur
4487 * locally should try to limit themselves to the return codes
4488 * documented above for lf_advlock and err_advlock.
4489 */
4490 #if 0
4491 /*
4492 *#
4493 *#% advlock vp U U U
4494 *#
4495 */
4496 struct vnop_advlock_args {
4497 struct vnodeop_desc *a_desc;
4498 vnode_t a_vp;
4499 caddr_t a_id;
4500 int a_op;
4501 struct flock *a_fl;
4502 int a_flags;
4503 vfs_context_t a_context;
4504 };
4505 #endif /* 0*/
4506 errno_t
4507 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx)
4508 {
4509 int _err;
4510 struct vnop_advlock_args a;
4511 int thread_safe;
4512 int funnel_state = 0;
4513 struct uthread * uth;
4514
4515 a.a_desc = &vnop_advlock_desc;
4516 a.a_vp = vp;
4517 a.a_id = id;
4518 a.a_op = op;
4519 a.a_fl = fl;
4520 a.a_flags = flags;
4521 a.a_context = ctx;
4522 thread_safe = THREAD_SAFE_FS(vp);
4523
4524 uth = get_bsdthread_info(current_thread());
4525 if (!thread_safe) {
4526 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4527 }
4528 /* Disallow advisory locking on non-seekable vnodes */
4529 if (vnode_isfifo(vp)) {
4530 _err = err_advlock(&a);
4531 } else {
4532 if ((vp->v_flag & VLOCKLOCAL)) {
4533 /* Advisory locking done at this layer */
4534 _err = lf_advlock(&a);
4535 } else {
4536 /* Advisory locking done by underlying filesystem */
4537 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4538 }
4539 }
4540 if (!thread_safe) {
4541 (void) thread_funnel_set(kernel_flock, funnel_state);
4542 }
4543 return (_err);
4544 }
4545
4546
4547
4548 #if 0
4549 /*
4550 *#
4551 *#% allocate vp L L L
4552 *#
4553 */
4554 struct vnop_allocate_args {
4555 struct vnodeop_desc *a_desc;
4556 vnode_t a_vp;
4557 off_t a_length;
4558 u_int32_t a_flags;
4559 off_t *a_bytesallocated;
4560 off_t a_offset;
4561 vfs_context_t a_context;
4562 };
4563
4564 #endif /* 0*/
4565 errno_t
4566 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
4567 {
4568 int _err;
4569 struct vnop_allocate_args a;
4570 int thread_safe;
4571 int funnel_state = 0;
4572
4573 a.a_desc = &vnop_allocate_desc;
4574 a.a_vp = vp;
4575 a.a_length = length;
4576 a.a_flags = flags;
4577 a.a_bytesallocated = bytesallocated;
4578 a.a_offset = offset;
4579 a.a_context = ctx;
4580 thread_safe = THREAD_SAFE_FS(vp);
4581
4582 if (!thread_safe) {
4583 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4584 return (_err);
4585 }
4586 }
4587 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4588 if (!thread_safe) {
4589 unlock_fsnode(vp, &funnel_state);
4590 }
4591 return (_err);
4592 }
4593
4594 #if 0
4595 /*
4596 *#
4597 *#% pagein vp = = =
4598 *#
4599 */
4600 struct vnop_pagein_args {
4601 struct vnodeop_desc *a_desc;
4602 vnode_t a_vp;
4603 upl_t a_pl;
4604 vm_offset_t a_pl_offset;
4605 off_t a_f_offset;
4606 size_t a_size;
4607 int a_flags;
4608 vfs_context_t a_context;
4609 };
4610 #endif /* 0*/
4611 errno_t
4612 VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
4613 {
4614 int _err;
4615 struct vnop_pagein_args a;
4616 int thread_safe;
4617 int funnel_state = 0;
4618
4619 a.a_desc = &vnop_pagein_desc;
4620 a.a_vp = vp;
4621 a.a_pl = pl;
4622 a.a_pl_offset = pl_offset;
4623 a.a_f_offset = f_offset;
4624 a.a_size = size;
4625 a.a_flags = flags;
4626 a.a_context = ctx;
4627 thread_safe = THREAD_SAFE_FS(vp);
4628
4629 if (!thread_safe) {
4630 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4631 }
4632 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
4633 if (!thread_safe) {
4634 (void) thread_funnel_set(kernel_flock, funnel_state);
4635 }
4636 return (_err);
4637 }
4638
4639 #if 0
4640 /*
4641 *#
4642 *#% pageout vp = = =
4643 *#
4644 */
4645 struct vnop_pageout_args {
4646 struct vnodeop_desc *a_desc;
4647 vnode_t a_vp;
4648 upl_t a_pl;
4649 vm_offset_t a_pl_offset;
4650 off_t a_f_offset;
4651 size_t a_size;
4652 int a_flags;
4653 vfs_context_t a_context;
4654 };
4655
4656 #endif /* 0*/
4657 errno_t
4658 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
4659 {
4660 int _err;
4661 struct vnop_pageout_args a;
4662 int thread_safe;
4663 int funnel_state = 0;
4664
4665 a.a_desc = &vnop_pageout_desc;
4666 a.a_vp = vp;
4667 a.a_pl = pl;
4668 a.a_pl_offset = pl_offset;
4669 a.a_f_offset = f_offset;
4670 a.a_size = size;
4671 a.a_flags = flags;
4672 a.a_context = ctx;
4673 thread_safe = THREAD_SAFE_FS(vp);
4674
4675 if (!thread_safe) {
4676 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4677 }
4678 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
4679 if (!thread_safe) {
4680 (void) thread_funnel_set(kernel_flock, funnel_state);
4681 }
4682 return (_err);
4683 }
4684
4685
4686 #if 0
4687 /*
4688 *#
4689 *#% searchfs vp L L L
4690 *#
4691 */
4692 struct vnop_searchfs_args {
4693 struct vnodeop_desc *a_desc;
4694 vnode_t a_vp;
4695 void *a_searchparams1;
4696 void *a_searchparams2;
4697 struct attrlist *a_searchattrs;
4698 u_long a_maxmatches;
4699 struct timeval *a_timelimit;
4700 struct attrlist *a_returnattrs;
4701 u_long *a_nummatches;
4702 u_long a_scriptcode;
4703 u_long a_options;
4704 struct uio *a_uio;
4705 struct searchstate *a_searchstate;
4706 vfs_context_t a_context;
4707 };
4708
4709 #endif /* 0*/
4710 errno_t
4711 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
4712 {
4713 int _err;
4714 struct vnop_searchfs_args a;
4715 int thread_safe;
4716 int funnel_state = 0;
4717
4718 a.a_desc = &vnop_searchfs_desc;
4719 a.a_vp = vp;
4720 a.a_searchparams1 = searchparams1;
4721 a.a_searchparams2 = searchparams2;
4722 a.a_searchattrs = searchattrs;
4723 a.a_maxmatches = maxmatches;
4724 a.a_timelimit = timelimit;
4725 a.a_returnattrs = returnattrs;
4726 a.a_nummatches = nummatches;
4727 a.a_scriptcode = scriptcode;
4728 a.a_options = options;
4729 a.a_uio = uio;
4730 a.a_searchstate = searchstate;
4731 a.a_context = ctx;
4732 thread_safe = THREAD_SAFE_FS(vp);
4733
4734 if (!thread_safe) {
4735 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4736 return (_err);
4737 }
4738 }
4739 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
4740 if (!thread_safe) {
4741 unlock_fsnode(vp, &funnel_state);
4742 }
4743 return (_err);
4744 }
4745
4746 #if 0
4747 /*
4748 *#
4749 *#% copyfile fvp U U U
4750 *#% copyfile tdvp L U U
4751 *#% copyfile tvp X U U
4752 *#
4753 */
4754 struct vnop_copyfile_args {
4755 struct vnodeop_desc *a_desc;
4756 vnode_t a_fvp;
4757 vnode_t a_tdvp;
4758 vnode_t a_tvp;
4759 struct componentname *a_tcnp;
4760 int a_mode;
4761 int a_flags;
4762 vfs_context_t a_context;
4763 };
4764 #endif /* 0*/
4765 errno_t
4766 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4767 int mode, int flags, vfs_context_t ctx)
4768 {
4769 int _err;
4770 struct vnop_copyfile_args a;
4771 a.a_desc = &vnop_copyfile_desc;
4772 a.a_fvp = fvp;
4773 a.a_tdvp = tdvp;
4774 a.a_tvp = tvp;
4775 a.a_tcnp = tcnp;
4776 a.a_mode = mode;
4777 a.a_flags = flags;
4778 a.a_context = ctx;
4779 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
4780 return (_err);
4781 }
4782
4783 errno_t
4784 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
4785 {
4786 struct vnop_getxattr_args a;
4787 int error;
4788 int thread_safe;
4789 int funnel_state = 0;
4790
4791 a.a_desc = &vnop_getxattr_desc;
4792 a.a_vp = vp;
4793 a.a_name = name;
4794 a.a_uio = uio;
4795 a.a_size = size;
4796 a.a_options = options;
4797 a.a_context = ctx;
4798
4799 thread_safe = THREAD_SAFE_FS(vp);
4800 if (!thread_safe) {
4801 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4802 return (error);
4803 }
4804 }
4805 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
4806 if (!thread_safe) {
4807 unlock_fsnode(vp, &funnel_state);
4808 }
4809 return (error);
4810 }
4811
4812 errno_t
4813 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
4814 {
4815 struct vnop_setxattr_args a;
4816 int error;
4817 int thread_safe;
4818 int funnel_state = 0;
4819
4820 a.a_desc = &vnop_setxattr_desc;
4821 a.a_vp = vp;
4822 a.a_name = name;
4823 a.a_uio = uio;
4824 a.a_options = options;
4825 a.a_context = ctx;
4826
4827 thread_safe = THREAD_SAFE_FS(vp);
4828 if (!thread_safe) {
4829 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4830 return (error);
4831 }
4832 }
4833 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
4834 if (!thread_safe) {
4835 unlock_fsnode(vp, &funnel_state);
4836 }
4837 if (error == 0)
4838 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
4839 return (error);
4840 }
4841
4842 errno_t
4843 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
4844 {
4845 struct vnop_removexattr_args a;
4846 int error;
4847 int thread_safe;
4848 int funnel_state = 0;
4849
4850 a.a_desc = &vnop_removexattr_desc;
4851 a.a_vp = vp;
4852 a.a_name = name;
4853 a.a_options = options;
4854 a.a_context = ctx;
4855
4856 thread_safe = THREAD_SAFE_FS(vp);
4857 if (!thread_safe) {
4858 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4859 return (error);
4860 }
4861 }
4862 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
4863 if (!thread_safe) {
4864 unlock_fsnode(vp, &funnel_state);
4865 }
4866 return (error);
4867 }
4868
4869 errno_t
4870 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
4871 {
4872 struct vnop_listxattr_args a;
4873 int error;
4874 int thread_safe;
4875 int funnel_state = 0;
4876
4877 a.a_desc = &vnop_listxattr_desc;
4878 a.a_vp = vp;
4879 a.a_uio = uio;
4880 a.a_size = size;
4881 a.a_options = options;
4882 a.a_context = ctx;
4883
4884 thread_safe = THREAD_SAFE_FS(vp);
4885 if (!thread_safe) {
4886 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4887 return (error);
4888 }
4889 }
4890 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
4891 if (!thread_safe) {
4892 unlock_fsnode(vp, &funnel_state);
4893 }
4894 return (error);
4895 }
4896
4897
4898 #if 0
4899 /*
4900 *#
4901 *#% blktooff vp = = =
4902 *#
4903 */
4904 struct vnop_blktooff_args {
4905 struct vnodeop_desc *a_desc;
4906 vnode_t a_vp;
4907 daddr64_t a_lblkno;
4908 off_t *a_offset;
4909 };
4910 #endif /* 0*/
4911 errno_t
4912 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
4913 {
4914 int _err;
4915 struct vnop_blktooff_args a;
4916 int thread_safe;
4917 int funnel_state = 0;
4918
4919 a.a_desc = &vnop_blktooff_desc;
4920 a.a_vp = vp;
4921 a.a_lblkno = lblkno;
4922 a.a_offset = offset;
4923 thread_safe = THREAD_SAFE_FS(vp);
4924
4925 if (!thread_safe) {
4926 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4927 }
4928 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
4929 if (!thread_safe) {
4930 (void) thread_funnel_set(kernel_flock, funnel_state);
4931 }
4932 return (_err);
4933 }
4934
4935 #if 0
4936 /*
4937 *#
4938 *#% offtoblk vp = = =
4939 *#
4940 */
4941 struct vnop_offtoblk_args {
4942 struct vnodeop_desc *a_desc;
4943 vnode_t a_vp;
4944 off_t a_offset;
4945 daddr64_t *a_lblkno;
4946 };
4947 #endif /* 0*/
4948 errno_t
4949 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
4950 {
4951 int _err;
4952 struct vnop_offtoblk_args a;
4953 int thread_safe;
4954 int funnel_state = 0;
4955
4956 a.a_desc = &vnop_offtoblk_desc;
4957 a.a_vp = vp;
4958 a.a_offset = offset;
4959 a.a_lblkno = lblkno;
4960 thread_safe = THREAD_SAFE_FS(vp);
4961
4962 if (!thread_safe) {
4963 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4964 }
4965 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
4966 if (!thread_safe) {
4967 (void) thread_funnel_set(kernel_flock, funnel_state);
4968 }
4969 return (_err);
4970 }
4971
4972 #if 0
4973 /*
4974 *#
4975 *#% blockmap vp L L L
4976 *#
4977 */
4978 struct vnop_blockmap_args {
4979 struct vnodeop_desc *a_desc;
4980 vnode_t a_vp;
4981 off_t a_foffset;
4982 size_t a_size;
4983 daddr64_t *a_bpn;
4984 size_t *a_run;
4985 void *a_poff;
4986 int a_flags;
4987 vfs_context_t a_context;
4988 };
4989 #endif /* 0*/
4990 errno_t
4991 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
4992 {
4993 int _err;
4994 struct vnop_blockmap_args a;
4995 int thread_safe;
4996 int funnel_state = 0;
4997
4998 if (ctx == NULL) {
4999 ctx = vfs_context_current();
5000 }
5001 a.a_desc = &vnop_blockmap_desc;
5002 a.a_vp = vp;
5003 a.a_foffset = foffset;
5004 a.a_size = size;
5005 a.a_bpn = bpn;
5006 a.a_run = run;
5007 a.a_poff = poff;
5008 a.a_flags = flags;
5009 a.a_context = ctx;
5010 thread_safe = THREAD_SAFE_FS(vp);
5011
5012 if (!thread_safe) {
5013 funnel_state = thread_funnel_set(kernel_flock, TRUE);
5014 }
5015 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
5016 if (!thread_safe) {
5017 (void) thread_funnel_set(kernel_flock, funnel_state);
5018 }
5019 return (_err);
5020 }
5021
5022 #if 0
5023 struct vnop_strategy_args {
5024 struct vnodeop_desc *a_desc;
5025 struct buf *a_bp;
5026 };
5027
5028 #endif /* 0*/
5029 errno_t
5030 VNOP_STRATEGY(struct buf *bp)
5031 {
5032 int _err;
5033 struct vnop_strategy_args a;
5034 a.a_desc = &vnop_strategy_desc;
5035 a.a_bp = bp;
5036 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
5037 return (_err);
5038 }
5039
5040 #if 0
5041 struct vnop_bwrite_args {
5042 struct vnodeop_desc *a_desc;
5043 buf_t a_bp;
5044 };
5045 #endif /* 0*/
5046 errno_t
5047 VNOP_BWRITE(struct buf *bp)
5048 {
5049 int _err;
5050 struct vnop_bwrite_args a;
5051 a.a_desc = &vnop_bwrite_desc;
5052 a.a_bp = bp;
5053 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
5054 return (_err);
5055 }
5056
5057 #if 0
5058 struct vnop_kqfilt_add_args {
5059 struct vnodeop_desc *a_desc;
5060 struct vnode *a_vp;
5061 struct knote *a_kn;
5062 vfs_context_t a_context;
5063 };
5064 #endif
5065 errno_t
5066 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
5067 {
5068 int _err;
5069 struct vnop_kqfilt_add_args a;
5070 int thread_safe;
5071 int funnel_state = 0;
5072
5073 a.a_desc = VDESC(vnop_kqfilt_add);
5074 a.a_vp = vp;
5075 a.a_kn = kn;
5076 a.a_context = ctx;
5077 thread_safe = THREAD_SAFE_FS(vp);
5078
5079 if (!thread_safe) {
5080 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5081 return (_err);
5082 }
5083 }
5084 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
5085 if (!thread_safe) {
5086 unlock_fsnode(vp, &funnel_state);
5087 }
5088 return(_err);
5089 }
5090
5091 #if 0
5092 struct vnop_kqfilt_remove_args {
5093 struct vnodeop_desc *a_desc;
5094 struct vnode *a_vp;
5095 uintptr_t a_ident;
5096 vfs_context_t a_context;
5097 };
5098 #endif
5099 errno_t
5100 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
5101 {
5102 int _err;
5103 struct vnop_kqfilt_remove_args a;
5104 int thread_safe;
5105 int funnel_state = 0;
5106
5107 a.a_desc = VDESC(vnop_kqfilt_remove);
5108 a.a_vp = vp;
5109 a.a_ident = ident;
5110 a.a_context = ctx;
5111 thread_safe = THREAD_SAFE_FS(vp);
5112
5113 if (!thread_safe) {
5114 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5115 return (_err);
5116 }
5117 }
5118 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
5119 if (!thread_safe) {
5120 unlock_fsnode(vp, &funnel_state);
5121 }
5122 return(_err);
5123 }
5124
5125 #if 0
5126 struct vnop_setlabel_args {
5127 struct vnodeop_desc *a_desc;
5128 struct vnode *a_vp;
5129 struct label *a_vl;
5130 vfs_context_t a_context;
5131 };
5132 #endif
5133 errno_t
5134 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
5135 {
5136 int _err;
5137 struct vnop_setlabel_args a;
5138 int thread_safe;
5139 int funnel_state = 0;
5140
5141 a.a_desc = VDESC(vnop_setlabel);
5142 a.a_vp = vp;
5143 a.a_vl = label;
5144 a.a_context = ctx;
5145 thread_safe = THREAD_SAFE_FS(vp);
5146
5147 if (!thread_safe) {
5148 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
5149 return (_err);
5150 }
5151 }
5152 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
5153 if (!thread_safe) {
5154 unlock_fsnode(vp, &funnel_state);
5155 }
5156 return(_err);
5157 }
5158
5159
5160 #if NAMEDSTREAMS
5161 /*
5162 * Get a named streamed
5163 */
5164 errno_t
5165 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
5166 {
5167 struct vnop_getnamedstream_args a;
5168
5169 if (!THREAD_SAFE_FS(vp))
5170 return (ENOTSUP);
5171 a.a_desc = &vnop_getnamedstream_desc;
5172 a.a_vp = vp;
5173 a.a_svpp = svpp;
5174 a.a_name = name;
5175 a.a_operation = operation;
5176 a.a_flags = flags;
5177 a.a_context = ctx;
5178
5179 return (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
5180 }
5181
5182 /*
5183 * Create a named streamed
5184 */
5185 errno_t
5186 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
5187 {
5188 struct vnop_makenamedstream_args a;
5189
5190 if (!THREAD_SAFE_FS(vp))
5191 return (ENOTSUP);
5192 a.a_desc = &vnop_makenamedstream_desc;
5193 a.a_vp = vp;
5194 a.a_svpp = svpp;
5195 a.a_name = name;
5196 a.a_flags = flags;
5197 a.a_context = ctx;
5198
5199 return (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
5200 }
5201
5202
5203 /*
5204 * Remove a named streamed
5205 */
5206 errno_t
5207 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
5208 {
5209 struct vnop_removenamedstream_args a;
5210
5211 if (!THREAD_SAFE_FS(vp))
5212 return (ENOTSUP);
5213 a.a_desc = &vnop_removenamedstream_desc;
5214 a.a_vp = vp;
5215 a.a_svp = svp;
5216 a.a_name = name;
5217 a.a_flags = flags;
5218 a.a_context = ctx;
5219
5220 return (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
5221 }
5222 #endif