]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
31 /*
32 * Copyright (c) 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 * (c) UNIX System Laboratories, Inc.
35 * All or some portions of this file are derived from material licensed
36 * to the University of California by American Telephone and Telegraph
37 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
38 * the permission of UNIX System Laboratories, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)kpi_vfs.c
69 */
70
71 /*
72 * External virtual filesystem routines
73 */
74
75 #undef DIAGNOSTIC
76 #define DIAGNOSTIC 1
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/proc_internal.h>
81 #include <sys/kauth.h>
82 #include <sys/mount.h>
83 #include <sys/mount_internal.h>
84 #include <sys/time.h>
85 #include <sys/vnode_internal.h>
86 #include <sys/stat.h>
87 #include <sys/namei.h>
88 #include <sys/ucred.h>
89 #include <sys/buf.h>
90 #include <sys/errno.h>
91 #include <sys/malloc.h>
92 #include <sys/domain.h>
93 #include <sys/mbuf.h>
94 #include <sys/syslog.h>
95 #include <sys/ubc.h>
96 #include <sys/vm.h>
97 #include <sys/sysctl.h>
98 #include <sys/filedesc.h>
99 #include <sys/fsevents.h>
100 #include <sys/user.h>
101 #include <sys/lockf.h>
102 #include <sys/xattr.h>
103
104 #include <kern/assert.h>
105 #include <kern/kalloc.h>
106
107 #include <miscfs/specfs/specdev.h>
108
109 #include <mach/mach_types.h>
110 #include <mach/memory_object_types.h>
111
112 #define ESUCCESS 0
113 #undef mount_t
114 #undef vnode_t
115
116 #define COMPAT_ONLY
117
118
119 #define THREAD_SAFE_FS(VP) \
120 ((VP)->v_unsafefs ? 0 : 1)
121
122 #define NATIVE_XATTR(VP) \
123 ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0)
124
125 static void xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context,
126 int thread_safe, int force);
127 static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
128 vfs_context_t context, int thread_safe);
129
130
131 static void
132 vnode_setneedinactive(vnode_t vp)
133 {
134 cache_purge(vp);
135
136 vnode_lock(vp);
137 vp->v_lflag |= VL_NEEDINACTIVE;
138 vnode_unlock(vp);
139 }
140
141
142 int
143 lock_fsnode(vnode_t vp, int *funnel_state)
144 {
145 if (funnel_state)
146 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
147
148 if (vp->v_unsafefs) {
149 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
150 vp->v_unsafefs->fsnode_count++;
151 } else {
152 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
153
154 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
155 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
156
157 if (funnel_state)
158 (void) thread_funnel_set(kernel_flock, *funnel_state);
159 return (ENOENT);
160 }
161 vp->v_unsafefs->fsnodeowner = current_thread();
162 vp->v_unsafefs->fsnode_count = 1;
163 }
164 }
165 return (0);
166 }
167
168
169 void
170 unlock_fsnode(vnode_t vp, int *funnel_state)
171 {
172 if (vp->v_unsafefs) {
173 if (--vp->v_unsafefs->fsnode_count == 0) {
174 vp->v_unsafefs->fsnodeowner = NULL;
175 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
176 }
177 }
178 if (funnel_state)
179 (void) thread_funnel_set(kernel_flock, *funnel_state);
180 }
181
182
183
184 /* ====================================================================== */
185 /* ************ EXTERNAL KERNEL APIS ********************************** */
186 /* ====================================================================== */
187
188 /*
189 * prototypes for exported VFS operations
190 */
191 int
192 VFS_MOUNT(struct mount * mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
193 {
194 int error;
195 int thread_safe;
196 int funnel_state = 0;
197
198 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
199 return(ENOTSUP);
200
201 thread_safe = mp->mnt_vtable->vfc_threadsafe;
202
203
204 if (!thread_safe) {
205 funnel_state = thread_funnel_set(kernel_flock, TRUE);
206 }
207
208 if (vfs_context_is64bit(context)) {
209 if (vfs_64bitready(mp)) {
210 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
211 }
212 else {
213 error = ENOTSUP;
214 }
215 }
216 else {
217 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
218 }
219
220 if (!thread_safe) {
221 (void) thread_funnel_set(kernel_flock, funnel_state);
222 }
223 return (error);
224 }
225
226 int
227 VFS_START(struct mount * mp, int flags, vfs_context_t context)
228 {
229 int error;
230 int thread_safe;
231 int funnel_state = 0;
232
233 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
234 return(ENOTSUP);
235
236 thread_safe = mp->mnt_vtable->vfc_threadsafe;
237
238 if (!thread_safe) {
239 funnel_state = thread_funnel_set(kernel_flock, TRUE);
240 }
241 error = (*mp->mnt_op->vfs_start)(mp, flags, context);
242 if (!thread_safe) {
243 (void) thread_funnel_set(kernel_flock, funnel_state);
244 }
245 return (error);
246 }
247
248 int
249 VFS_UNMOUNT(struct mount *mp, int flags, vfs_context_t context)
250 {
251 int error;
252 int thread_safe;
253 int funnel_state = 0;
254
255 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
256 return(ENOTSUP);
257
258 thread_safe = mp->mnt_vtable->vfc_threadsafe;
259
260 if (!thread_safe) {
261 funnel_state = thread_funnel_set(kernel_flock, TRUE);
262 }
263 error = (*mp->mnt_op->vfs_unmount)(mp, flags, context);
264 if (!thread_safe) {
265 (void) thread_funnel_set(kernel_flock, funnel_state);
266 }
267 return (error);
268 }
269
270 int
271 VFS_ROOT(struct mount * mp, struct vnode ** vpp, vfs_context_t context)
272 {
273 int error;
274 int thread_safe;
275 int funnel_state = 0;
276 struct vfs_context acontext;
277
278 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
279 return(ENOTSUP);
280
281 if (context == NULL) {
282 acontext.vc_proc = current_proc();
283 acontext.vc_ucred = kauth_cred_get();
284 context = &acontext;
285 }
286 thread_safe = mp->mnt_vtable->vfc_threadsafe;
287
288 if (!thread_safe) {
289 funnel_state = thread_funnel_set(kernel_flock, TRUE);
290 }
291 error = (*mp->mnt_op->vfs_root)(mp, vpp, context);
292 if (!thread_safe) {
293 (void) thread_funnel_set(kernel_flock, funnel_state);
294 }
295 return (error);
296 }
297
298 int
299 VFS_QUOTACTL(struct mount *mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t context)
300 {
301 int error;
302 int thread_safe;
303 int funnel_state = 0;
304
305 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
306 return(ENOTSUP);
307
308 thread_safe = mp->mnt_vtable->vfc_threadsafe;
309
310 if (!thread_safe) {
311 funnel_state = thread_funnel_set(kernel_flock, TRUE);
312 }
313 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, context);
314 if (!thread_safe) {
315 (void) thread_funnel_set(kernel_flock, funnel_state);
316 }
317 return (error);
318 }
319
320 int
321 VFS_GETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
322 {
323 int error;
324 int thread_safe;
325 int funnel_state = 0;
326 struct vfs_context acontext;
327
328 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
329 return(ENOTSUP);
330
331 if (context == NULL) {
332 acontext.vc_proc = current_proc();
333 acontext.vc_ucred = kauth_cred_get();
334 context = &acontext;
335 }
336 thread_safe = mp->mnt_vtable->vfc_threadsafe;
337
338 if (!thread_safe) {
339 funnel_state = thread_funnel_set(kernel_flock, TRUE);
340 }
341 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, context);
342 if (!thread_safe) {
343 (void) thread_funnel_set(kernel_flock, funnel_state);
344 }
345 return(error);
346 }
347
348 int
349 VFS_SETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
350 {
351 int error;
352 int thread_safe;
353 int funnel_state = 0;
354 struct vfs_context acontext;
355
356 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
357 return(ENOTSUP);
358
359 if (context == NULL) {
360 acontext.vc_proc = current_proc();
361 acontext.vc_ucred = kauth_cred_get();
362 context = &acontext;
363 }
364 thread_safe = mp->mnt_vtable->vfc_threadsafe;
365
366 if (!thread_safe) {
367 funnel_state = thread_funnel_set(kernel_flock, TRUE);
368 }
369 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, context);
370 if (!thread_safe) {
371 (void) thread_funnel_set(kernel_flock, funnel_state);
372 }
373 return(error);
374 }
375
376 int
377 VFS_SYNC(struct mount *mp, int flags, vfs_context_t context)
378 {
379 int error;
380 int thread_safe;
381 int funnel_state = 0;
382 struct vfs_context acontext;
383
384 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
385 return(ENOTSUP);
386
387 if (context == NULL) {
388 acontext.vc_proc = current_proc();
389 acontext.vc_ucred = kauth_cred_get();
390 context = &acontext;
391 }
392 thread_safe = mp->mnt_vtable->vfc_threadsafe;
393
394 if (!thread_safe) {
395 funnel_state = thread_funnel_set(kernel_flock, TRUE);
396 }
397 error = (*mp->mnt_op->vfs_sync)(mp, flags, context);
398 if (!thread_safe) {
399 (void) thread_funnel_set(kernel_flock, funnel_state);
400 }
401 return(error);
402 }
403
404 int
405 VFS_VGET(struct mount * mp, ino64_t ino, struct vnode **vpp, vfs_context_t context)
406 {
407 int error;
408 int thread_safe;
409 int funnel_state = 0;
410 struct vfs_context acontext;
411
412 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
413 return(ENOTSUP);
414
415 if (context == NULL) {
416 acontext.vc_proc = current_proc();
417 acontext.vc_ucred = kauth_cred_get();
418 context = &acontext;
419 }
420 thread_safe = mp->mnt_vtable->vfc_threadsafe;
421
422 if (!thread_safe) {
423 funnel_state = thread_funnel_set(kernel_flock, TRUE);
424 }
425 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, context);
426 if (!thread_safe) {
427 (void) thread_funnel_set(kernel_flock, funnel_state);
428 }
429 return(error);
430 }
431
432 int
433 VFS_FHTOVP(struct mount * mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t context)
434 {
435 int error;
436 int thread_safe;
437 int funnel_state = 0;
438 struct vfs_context acontext;
439
440 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
441 return(ENOTSUP);
442
443 if (context == NULL) {
444 acontext.vc_proc = current_proc();
445 acontext.vc_ucred = kauth_cred_get();
446 context = &acontext;
447 }
448 thread_safe = mp->mnt_vtable->vfc_threadsafe;
449
450 if (!thread_safe) {
451 funnel_state = thread_funnel_set(kernel_flock, TRUE);
452 }
453 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, context);
454 if (!thread_safe) {
455 (void) thread_funnel_set(kernel_flock, funnel_state);
456 }
457 return(error);
458 }
459
460 int
461 VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t context)
462 {
463 int error;
464 int thread_safe;
465 int funnel_state = 0;
466 struct vfs_context acontext;
467
468 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
469 return(ENOTSUP);
470
471 if (context == NULL) {
472 acontext.vc_proc = current_proc();
473 acontext.vc_ucred = kauth_cred_get();
474 context = &acontext;
475 }
476 thread_safe = THREAD_SAFE_FS(vp);
477
478 if (!thread_safe) {
479 funnel_state = thread_funnel_set(kernel_flock, TRUE);
480 }
481 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, context);
482 if (!thread_safe) {
483 (void) thread_funnel_set(kernel_flock, funnel_state);
484 }
485 return(error);
486 }
487
488
489 /* returns a copy of vfs type name for the mount_t */
490 void
491 vfs_name(mount_t mp, char * buffer)
492 {
493 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
494 }
495
496 /* returns vfs type number for the mount_t */
497 int
498 vfs_typenum(mount_t mp)
499 {
500 return(mp->mnt_vtable->vfc_typenum);
501 }
502
503
504 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
505 uint64_t
506 vfs_flags(mount_t mp)
507 {
508 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
509 }
510
511 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
512 void
513 vfs_setflags(mount_t mp, uint64_t flags)
514 {
515 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
516
517 mp->mnt_flag |= lflags;
518 }
519
520 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
521 void
522 vfs_clearflags(mount_t mp , uint64_t flags)
523 {
524 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
525
526 mp->mnt_flag &= ~lflags;
527 }
528
529 /* Is the mount_t ronly and upgrade read/write requested? */
530 int
531 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
532 {
533 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
534 }
535
536
537 /* Is the mount_t mounted ronly */
538 int
539 vfs_isrdonly(mount_t mp)
540 {
541 return (mp->mnt_flag & MNT_RDONLY);
542 }
543
544 /* Is the mount_t mounted for filesystem synchronous writes? */
545 int
546 vfs_issynchronous(mount_t mp)
547 {
548 return (mp->mnt_flag & MNT_SYNCHRONOUS);
549 }
550
551 /* Is the mount_t mounted read/write? */
552 int
553 vfs_isrdwr(mount_t mp)
554 {
555 return ((mp->mnt_flag & MNT_RDONLY) == 0);
556 }
557
558
559 /* Is mount_t marked for update (ie MNT_UPDATE) */
560 int
561 vfs_isupdate(mount_t mp)
562 {
563 return (mp->mnt_flag & MNT_UPDATE);
564 }
565
566
567 /* Is mount_t marked for reload (ie MNT_RELOAD) */
568 int
569 vfs_isreload(mount_t mp)
570 {
571 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
572 }
573
574 /* Is mount_t marked for reload (ie MNT_FORCE) */
575 int
576 vfs_isforce(mount_t mp)
577 {
578 if ((mp->mnt_flag & MNT_FORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
579 return(1);
580 else
581 return(0);
582 }
583
584 int
585 vfs_64bitready(mount_t mp)
586 {
587 if ((mp->mnt_vtable->vfc_64bitready))
588 return(1);
589 else
590 return(0);
591 }
592
593 int
594 vfs_authopaque(mount_t mp)
595 {
596 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
597 return(1);
598 else
599 return(0);
600 }
601
602 int
603 vfs_authopaqueaccess(mount_t mp)
604 {
605 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
606 return(1);
607 else
608 return(0);
609 }
610
611 void
612 vfs_setauthopaque(mount_t mp)
613 {
614 mount_lock(mp);
615 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
616 mount_unlock(mp);
617 }
618
619 void
620 vfs_setauthopaqueaccess(mount_t mp)
621 {
622 mount_lock(mp);
623 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
624 mount_unlock(mp);
625 }
626
627 void
628 vfs_clearauthopaque(mount_t mp)
629 {
630 mount_lock(mp);
631 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
632 mount_unlock(mp);
633 }
634
635 void
636 vfs_clearauthopaqueaccess(mount_t mp)
637 {
638 mount_lock(mp);
639 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
640 mount_unlock(mp);
641 }
642
643 void
644 vfs_setextendedsecurity(mount_t mp)
645 {
646 mount_lock(mp);
647 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
648 mount_unlock(mp);
649 }
650
651 void
652 vfs_clearextendedsecurity(mount_t mp)
653 {
654 mount_lock(mp);
655 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
656 mount_unlock(mp);
657 }
658
659 int
660 vfs_extendedsecurity(mount_t mp)
661 {
662 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
663 }
664
665 /* returns the max size of short symlink in this mount_t */
666 uint32_t
667 vfs_maxsymlen(mount_t mp)
668 {
669 return(mp->mnt_maxsymlinklen);
670 }
671
672 /* set max size of short symlink on mount_t */
673 void
674 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
675 {
676 mp->mnt_maxsymlinklen = symlen;
677 }
678
679 /* return a pointer to the RO vfs_statfs associated with mount_t */
680 struct vfsstatfs *
681 vfs_statfs(mount_t mp)
682 {
683 return(&mp->mnt_vfsstat);
684 }
685
686 int
687 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
688 {
689 int error;
690 char *vname;
691
692 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
693 return(error);
694
695 /*
696 * If we have a filesystem create time, use it to default some others.
697 */
698 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
699 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
700 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
701 }
702
703 return(0);
704 }
705
706 int
707 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
708 {
709 int error;
710
711 if (vfs_isrdonly(mp))
712 return EROFS;
713
714 error = VFS_SETATTR(mp, vfa, ctx);
715
716 /*
717 * If we had alternate ways of setting vfs attributes, we'd
718 * fall back here.
719 */
720
721 return error;
722 }
723
724 /* return the private data handle stored in mount_t */
725 void *
726 vfs_fsprivate(mount_t mp)
727 {
728 return(mp->mnt_data);
729 }
730
731 /* set the private data handle in mount_t */
732 void
733 vfs_setfsprivate(mount_t mp, void *mntdata)
734 {
735 mp->mnt_data = mntdata;
736 }
737
738
739 /*
740 * return the block size of the underlying
741 * device associated with mount_t
742 */
743 int
744 vfs_devblocksize(mount_t mp) {
745
746 return(mp->mnt_devblocksize);
747 }
748
749
750 /*
751 * return the io attributes associated with mount_t
752 */
753 void
754 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
755 {
756 if (mp == NULL) {
757 ioattrp->io_maxreadcnt = MAXPHYS;
758 ioattrp->io_maxwritecnt = MAXPHYS;
759 ioattrp->io_segreadcnt = 32;
760 ioattrp->io_segwritecnt = 32;
761 ioattrp->io_maxsegreadsize = MAXPHYS;
762 ioattrp->io_maxsegwritesize = MAXPHYS;
763 ioattrp->io_devblocksize = DEV_BSIZE;
764 } else {
765 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
766 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
767 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
768 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
769 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
770 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
771 ioattrp->io_devblocksize = mp->mnt_devblocksize;
772 }
773 ioattrp->io_reserved[0] = 0;
774 ioattrp->io_reserved[1] = 0;
775 ioattrp->io_reserved[2] = 0;
776 }
777
778
779 /*
780 * set the IO attributes associated with mount_t
781 */
782 void
783 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
784 {
785 if (mp == NULL)
786 return;
787 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
788 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
789 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
790 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
791 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
792 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
793 mp->mnt_devblocksize = ioattrp->io_devblocksize;
794 }
795
796 /*
797 * Add a new filesystem into the kernel specified in passed in
798 * vfstable structure. It fills in the vnode
799 * dispatch vector that is to be passed to when vnodes are created.
800 * It returns a handle which is to be used to when the FS is to be removed
801 */
802 typedef int (*PFI)(void *);
803 extern int vfs_opv_numops;
804 errno_t
805 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
806 {
807 #pragma unused(data)
808 struct vfstable *newvfstbl = NULL;
809 int i,j;
810 int (***opv_desc_vector_p)(void *);
811 int (**opv_desc_vector)(void *);
812 struct vnodeopv_entry_desc *opve_descp;
813 int desccount;
814 int descsize;
815 PFI *descptr;
816
817 /*
818 * This routine is responsible for all the initialization that would
819 * ordinarily be done as part of the system startup;
820 */
821
822 if (vfe == (struct vfs_fsentry *)0)
823 return(EINVAL);
824
825 desccount = vfe->vfe_vopcnt;
826 if ((desccount <=0) || ((desccount > 5)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
827 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
828 return(EINVAL);
829
830
831 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
832 M_WAITOK);
833 bzero(newvfstbl, sizeof(struct vfstable));
834 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
835 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
836 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
837 newvfstbl->vfc_typenum = maxvfsconf++;
838 else
839 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
840
841 newvfstbl->vfc_refcount = 0;
842 newvfstbl->vfc_flags = 0;
843 newvfstbl->vfc_mountroot = NULL;
844 newvfstbl->vfc_next = NULL;
845 newvfstbl->vfc_threadsafe = 0;
846 newvfstbl->vfc_vfsflags = 0;
847 if (vfe->vfe_flags & VFS_TBL64BITREADY)
848 newvfstbl->vfc_64bitready= 1;
849 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
850 newvfstbl->vfc_threadsafe= 1;
851 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
852 newvfstbl->vfc_threadsafe= 1;
853 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
854 newvfstbl->vfc_flags |= MNT_LOCAL;
855 if (vfe->vfe_flags & VFS_TBLLOCALVOL)
856 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
857 else
858 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
859
860
861 /*
862 * Allocate and init the vectors.
863 * Also handle backwards compatibility.
864 *
865 * We allocate one large block to hold all <desccount>
866 * vnode operation vectors stored contiguously.
867 */
868 /* XXX - shouldn't be M_TEMP */
869
870 descsize = desccount * vfs_opv_numops * sizeof(PFI);
871 MALLOC(descptr, PFI *, descsize,
872 M_TEMP, M_WAITOK);
873 bzero(descptr, descsize);
874
875 newvfstbl->vfc_descptr = descptr;
876 newvfstbl->vfc_descsize = descsize;
877
878
879 for (i= 0; i< desccount; i++ ) {
880 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
881 /*
882 * Fill in the caller's pointer to the start of the i'th vector.
883 * They'll need to supply it when calling vnode_create.
884 */
885 opv_desc_vector = descptr + i * vfs_opv_numops;
886 *opv_desc_vector_p = opv_desc_vector;
887
888 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
889 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
890
891 /*
892 * Sanity check: is this operation listed
893 * in the list of operations? We check this
894 * by seeing if its offest is zero. Since
895 * the default routine should always be listed
896 * first, it should be the only one with a zero
897 * offset. Any other operation with a zero
898 * offset is probably not listed in
899 * vfs_op_descs, and so is probably an error.
900 *
901 * A panic here means the layer programmer
902 * has committed the all-too common bug
903 * of adding a new operation to the layer's
904 * list of vnode operations but
905 * not adding the operation to the system-wide
906 * list of supported operations.
907 */
908 if (opve_descp->opve_op->vdesc_offset == 0 &&
909 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
910 printf("vfs_fsadd: operation %s not listed in %s.\n",
911 opve_descp->opve_op->vdesc_name,
912 "vfs_op_descs");
913 panic("vfs_fsadd: bad operation");
914 }
915 /*
916 * Fill in this entry.
917 */
918 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
919 opve_descp->opve_impl;
920 }
921
922
923 /*
924 * Finally, go back and replace unfilled routines
925 * with their default. (Sigh, an O(n^3) algorithm. I
926 * could make it better, but that'd be work, and n is small.)
927 */
928 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
929
930 /*
931 * Force every operations vector to have a default routine.
932 */
933 opv_desc_vector = *opv_desc_vector_p;
934 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
935 panic("vfs_fsadd: operation vector without default routine.");
936 for (j = 0; j < vfs_opv_numops; j++)
937 if (opv_desc_vector[j] == NULL)
938 opv_desc_vector[j] =
939 opv_desc_vector[VOFFSET(vnop_default)];
940
941 } /* end of each vnodeopv_desc parsing */
942
943
944
945 *handle = vfstable_add(newvfstbl);
946
947 if (newvfstbl->vfc_typenum <= maxvfsconf )
948 maxvfsconf = newvfstbl->vfc_typenum + 1;
949 numused_vfsslots++;
950
951 if (newvfstbl->vfc_vfsops->vfs_init)
952 (*newvfstbl->vfc_vfsops->vfs_init)((struct vfsconf *)handle);
953
954 FREE(newvfstbl, M_TEMP);
955
956 return(0);
957 }
958
959 /*
960 * Removes the filesystem from kernel.
961 * The argument passed in is the handle that was given when
962 * file system was added
963 */
964 errno_t
965 vfs_fsremove(vfstable_t handle)
966 {
967 struct vfstable * vfstbl = (struct vfstable *)handle;
968 void *old_desc = NULL;
969 errno_t err;
970
971 /* Preflight check for any mounts */
972 mount_list_lock();
973 if ( vfstbl->vfc_refcount != 0 ) {
974 mount_list_unlock();
975 return EBUSY;
976 }
977 mount_list_unlock();
978
979 /*
980 * save the old descriptor; the free cannot occur unconditionally,
981 * since vfstable_del() may fail.
982 */
983 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
984 old_desc = vfstbl->vfc_descptr;
985 }
986 err = vfstable_del(vfstbl);
987
988 /* free the descriptor if the delete was successful */
989 if (err == 0 && old_desc) {
990 FREE(old_desc, M_TEMP);
991 }
992
993 return(err);
994 }
995
996 /*
997 * This returns a reference to mount_t
998 * which should be dropped using vfs_mountrele().
999 * Not doing so will leak a mountpoint
1000 * and associated data structures.
1001 */
1002 errno_t
1003 vfs_mountref(__unused mount_t mp ) /* gives a reference */
1004 {
1005 return(0);
1006 }
1007
1008 /* This drops the reference on mount_t that was acquired */
1009 errno_t
1010 vfs_mountrele(__unused mount_t mp ) /* drops reference */
1011 {
1012 return(0);
1013 }
1014
1015 int
1016 vfs_context_pid(vfs_context_t context)
1017 {
1018 return (context->vc_proc->p_pid);
1019 }
1020
1021 int
1022 vfs_context_suser(vfs_context_t context)
1023 {
1024 return (suser(context->vc_ucred, 0));
1025 }
1026 int
1027 vfs_context_issignal(vfs_context_t context, sigset_t mask)
1028 {
1029 if (context->vc_proc)
1030 return(proc_pendingsignals(context->vc_proc, mask));
1031 return(0);
1032 }
1033
1034 int
1035 vfs_context_is64bit(vfs_context_t context)
1036 {
1037 if (context->vc_proc)
1038 return(proc_is64bit(context->vc_proc));
1039 return(0);
1040 }
1041
1042 proc_t
1043 vfs_context_proc(vfs_context_t context)
1044 {
1045 return (context->vc_proc);
1046 }
1047
1048 vfs_context_t
1049 vfs_context_create(vfs_context_t context)
1050 {
1051 struct vfs_context * newcontext;
1052
1053 newcontext = (struct vfs_context *)kalloc(sizeof(struct vfs_context));
1054
1055 if (newcontext) {
1056 if (context) {
1057 newcontext->vc_proc = context->vc_proc;
1058 newcontext->vc_ucred = context->vc_ucred;
1059 } else {
1060 newcontext->vc_proc = proc_self();
1061 newcontext->vc_ucred = kauth_cred_get();
1062 }
1063 return(newcontext);
1064 }
1065 return((vfs_context_t)0);
1066 }
1067
1068 int
1069 vfs_context_rele(vfs_context_t context)
1070 {
1071 if (context)
1072 kfree(context, sizeof(struct vfs_context));
1073 return(0);
1074 }
1075
1076
1077 ucred_t
1078 vfs_context_ucred(vfs_context_t context)
1079 {
1080 return (context->vc_ucred);
1081 }
1082
1083 /*
1084 * Return true if the context is owned by the superuser.
1085 */
1086 int
1087 vfs_context_issuser(vfs_context_t context)
1088 {
1089 return(context->vc_ucred->cr_uid == 0);
1090 }
1091
1092
1093 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1094
1095
1096 /*
1097 * Convert between vnode types and inode formats (since POSIX.1
1098 * defines mode word of stat structure in terms of inode formats).
1099 */
1100 enum vtype
1101 vnode_iftovt(int mode)
1102 {
1103 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1104 }
1105
1106 int
1107 vnode_vttoif(enum vtype indx)
1108 {
1109 return(vttoif_tab[(int)(indx)]);
1110 }
1111
1112 int
1113 vnode_makeimode(int indx, int mode)
1114 {
1115 return (int)(VTTOIF(indx) | (mode));
1116 }
1117
1118
1119 /*
1120 * vnode manipulation functions.
1121 */
1122
1123 /* returns system root vnode reference; It should be dropped using vrele() */
1124 vnode_t
1125 vfs_rootvnode(void)
1126 {
1127 int error;
1128
1129 error = vnode_get(rootvnode);
1130 if (error)
1131 return ((vnode_t)0);
1132 else
1133 return rootvnode;
1134 }
1135
1136
1137 uint32_t
1138 vnode_vid(vnode_t vp)
1139 {
1140 return ((uint32_t)(vp->v_id));
1141 }
1142
1143 /* returns a mount reference; drop it with vfs_mountrelease() */
1144 mount_t
1145 vnode_mount(vnode_t vp)
1146 {
1147 return (vp->v_mount);
1148 }
1149
1150 /* returns a mount reference iff vnode_t is a dir and is a mount point */
1151 mount_t
1152 vnode_mountedhere(vnode_t vp)
1153 {
1154 mount_t mp;
1155
1156 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1157 (mp->mnt_vnodecovered == vp))
1158 return (mp);
1159 else
1160 return (mount_t)NULL;
1161 }
1162
1163 /* returns vnode type of vnode_t */
1164 enum vtype
1165 vnode_vtype(vnode_t vp)
1166 {
1167 return (vp->v_type);
1168 }
1169
1170 /* returns FS specific node saved in vnode */
1171 void *
1172 vnode_fsnode(vnode_t vp)
1173 {
1174 return (vp->v_data);
1175 }
1176
1177 void
1178 vnode_clearfsnode(vnode_t vp)
1179 {
1180 vp->v_data = 0;
1181 }
1182
1183 dev_t
1184 vnode_specrdev(vnode_t vp)
1185 {
1186 return(vp->v_rdev);
1187 }
1188
1189
1190 /* Accessor functions */
1191 /* is vnode_t a root vnode */
1192 int
1193 vnode_isvroot(vnode_t vp)
1194 {
1195 return ((vp->v_flag & VROOT)? 1 : 0);
1196 }
1197
1198 /* is vnode_t a system vnode */
1199 int
1200 vnode_issystem(vnode_t vp)
1201 {
1202 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1203 }
1204
1205 /* if vnode_t mount operation in progress */
1206 int
1207 vnode_ismount(vnode_t vp)
1208 {
1209 return ((vp->v_flag & VMOUNT)? 1 : 0);
1210 }
1211
1212 /* is this vnode under recyle now */
1213 int
1214 vnode_isrecycled(vnode_t vp)
1215 {
1216 int ret;
1217
1218 vnode_lock(vp);
1219 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1220 vnode_unlock(vp);
1221 return(ret);
1222 }
1223
1224 /* is vnode_t marked to not keep data cached once it's been consumed */
1225 int
1226 vnode_isnocache(vnode_t vp)
1227 {
1228 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1229 }
1230
1231 /*
1232 * has sequential readahead been disabled on this vnode
1233 */
1234 int
1235 vnode_isnoreadahead(vnode_t vp)
1236 {
1237 return ((vp->v_flag & VRAOFF)? 1 : 0);
1238 }
1239
1240 /* is vnode_t a standard one? */
1241 int
1242 vnode_isstandard(vnode_t vp)
1243 {
1244 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1245 }
1246
1247 /* don't vflush() if SKIPSYSTEM */
1248 int
1249 vnode_isnoflush(vnode_t vp)
1250 {
1251 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1252 }
1253
1254 /* is vnode_t a regular file */
1255 int
1256 vnode_isreg(vnode_t vp)
1257 {
1258 return ((vp->v_type == VREG)? 1 : 0);
1259 }
1260
1261 /* is vnode_t a directory? */
1262 int
1263 vnode_isdir(vnode_t vp)
1264 {
1265 return ((vp->v_type == VDIR)? 1 : 0);
1266 }
1267
1268 /* is vnode_t a symbolic link ? */
1269 int
1270 vnode_islnk(vnode_t vp)
1271 {
1272 return ((vp->v_type == VLNK)? 1 : 0);
1273 }
1274
1275 /* is vnode_t a fifo ? */
1276 int
1277 vnode_isfifo(vnode_t vp)
1278 {
1279 return ((vp->v_type == VFIFO)? 1 : 0);
1280 }
1281
1282 /* is vnode_t a block device? */
1283 int
1284 vnode_isblk(vnode_t vp)
1285 {
1286 return ((vp->v_type == VBLK)? 1 : 0);
1287 }
1288
1289 /* is vnode_t a char device? */
1290 int
1291 vnode_ischr(vnode_t vp)
1292 {
1293 return ((vp->v_type == VCHR)? 1 : 0);
1294 }
1295
1296 /* is vnode_t a socket? */
1297 int
1298 vnode_issock(vnode_t vp)
1299 {
1300 return ((vp->v_type == VSOCK)? 1 : 0);
1301 }
1302
1303
1304 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1305 void
1306 vnode_setnocache(vnode_t vp)
1307 {
1308 vnode_lock(vp);
1309 vp->v_flag |= VNOCACHE_DATA;
1310 vnode_unlock(vp);
1311 }
1312
1313 void
1314 vnode_clearnocache(vnode_t vp)
1315 {
1316 vnode_lock(vp);
1317 vp->v_flag &= ~VNOCACHE_DATA;
1318 vnode_unlock(vp);
1319 }
1320
1321 void
1322 vnode_setnoreadahead(vnode_t vp)
1323 {
1324 vnode_lock(vp);
1325 vp->v_flag |= VRAOFF;
1326 vnode_unlock(vp);
1327 }
1328
1329 void
1330 vnode_clearnoreadahead(vnode_t vp)
1331 {
1332 vnode_lock(vp);
1333 vp->v_flag &= ~VRAOFF;
1334 vnode_unlock(vp);
1335 }
1336
1337
1338 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1339 void
1340 vnode_setnoflush(vnode_t vp)
1341 {
1342 vnode_lock(vp);
1343 vp->v_flag |= VNOFLUSH;
1344 vnode_unlock(vp);
1345 }
1346
1347 void
1348 vnode_clearnoflush(vnode_t vp)
1349 {
1350 vnode_lock(vp);
1351 vp->v_flag &= ~VNOFLUSH;
1352 vnode_unlock(vp);
1353 }
1354
1355
1356 /* is vnode_t a blkdevice and has a FS mounted on it */
1357 int
1358 vnode_ismountedon(vnode_t vp)
1359 {
1360 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1361 }
1362
1363 void
1364 vnode_setmountedon(vnode_t vp)
1365 {
1366 vnode_lock(vp);
1367 vp->v_specflags |= SI_MOUNTEDON;
1368 vnode_unlock(vp);
1369 }
1370
1371 void
1372 vnode_clearmountedon(vnode_t vp)
1373 {
1374 vnode_lock(vp);
1375 vp->v_specflags &= ~SI_MOUNTEDON;
1376 vnode_unlock(vp);
1377 }
1378
1379
1380 void
1381 vnode_settag(vnode_t vp, int tag)
1382 {
1383 vp->v_tag = tag;
1384
1385 }
1386
1387 int
1388 vnode_tag(vnode_t vp)
1389 {
1390 return(vp->v_tag);
1391 }
1392
1393 vnode_t
1394 vnode_parent(vnode_t vp)
1395 {
1396
1397 return(vp->v_parent);
1398 }
1399
1400 void
1401 vnode_setparent(vnode_t vp, vnode_t dvp)
1402 {
1403 vp->v_parent = dvp;
1404 }
1405
1406 char *
1407 vnode_name(vnode_t vp)
1408 {
1409 /* we try to keep v_name a reasonable name for the node */
1410 return(vp->v_name);
1411 }
1412
1413 void
1414 vnode_setname(vnode_t vp, char * name)
1415 {
1416 vp->v_name = name;
1417 }
1418
1419 /* return the registered FS name when adding the FS to kernel */
1420 void
1421 vnode_vfsname(vnode_t vp, char * buf)
1422 {
1423 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1424 }
1425
1426 /* return the FS type number */
1427 int
1428 vnode_vfstypenum(vnode_t vp)
1429 {
1430 return(vp->v_mount->mnt_vtable->vfc_typenum);
1431 }
1432
1433 int
1434 vnode_vfs64bitready(vnode_t vp)
1435 {
1436
1437 if ((vp->v_mount->mnt_vtable->vfc_64bitready))
1438 return(1);
1439 else
1440 return(0);
1441 }
1442
1443
1444
1445 /* return the visible flags on associated mount point of vnode_t */
1446 uint32_t
1447 vnode_vfsvisflags(vnode_t vp)
1448 {
1449 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1450 }
1451
1452 /* return the command modifier flags on associated mount point of vnode_t */
1453 uint32_t
1454 vnode_vfscmdflags(vnode_t vp)
1455 {
1456 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1457 }
1458
1459 /* return the max symlink of short links of vnode_t */
1460 uint32_t
1461 vnode_vfsmaxsymlen(vnode_t vp)
1462 {
1463 return(vp->v_mount->mnt_maxsymlinklen);
1464 }
1465
1466 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1467 struct vfsstatfs *
1468 vnode_vfsstatfs(vnode_t vp)
1469 {
1470 return(&vp->v_mount->mnt_vfsstat);
1471 }
1472
1473 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1474 void *
1475 vnode_vfsfsprivate(vnode_t vp)
1476 {
1477 return(vp->v_mount->mnt_data);
1478 }
1479
1480 /* is vnode_t in a rdonly mounted FS */
1481 int
1482 vnode_vfsisrdonly(vnode_t vp)
1483 {
1484 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1485 }
1486
1487
1488 /* returns vnode ref to current working directory */
1489 vnode_t
1490 current_workingdir(void)
1491 {
1492 struct proc *p = current_proc();
1493 struct vnode * vp ;
1494
1495 if ( (vp = p->p_fd->fd_cdir) ) {
1496 if ( (vnode_getwithref(vp)) )
1497 return (NULL);
1498 }
1499 return vp;
1500 }
1501
1502 /* returns vnode ref to current root(chroot) directory */
1503 vnode_t
1504 current_rootdir(void)
1505 {
1506 struct proc *p = current_proc();
1507 struct vnode * vp ;
1508
1509 if ( (vp = p->p_fd->fd_rdir) ) {
1510 if ( (vnode_getwithref(vp)) )
1511 return (NULL);
1512 }
1513 return vp;
1514 }
1515
1516 static int
1517 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
1518 {
1519 kauth_filesec_t fsec;
1520 uio_t fsec_uio;
1521 size_t fsec_size;
1522 size_t xsize, rsize;
1523 int error;
1524
1525 fsec = NULL;
1526 fsec_uio = NULL;
1527 error = 0;
1528
1529 /* find out how big the EA is */
1530 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
1531 /* no EA, no filesec */
1532 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1533 error = 0;
1534 /* either way, we are done */
1535 goto out;
1536 }
1537
1538 /* how many entries would fit? */
1539 fsec_size = KAUTH_FILESEC_COUNT(xsize);
1540
1541 /* get buffer and uio */
1542 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
1543 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
1544 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
1545 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1546 error = ENOMEM;
1547 goto out;
1548 }
1549
1550 /* read security attribute */
1551 rsize = xsize;
1552 if ((error = vn_getxattr(vp,
1553 KAUTH_FILESEC_XATTR,
1554 fsec_uio,
1555 &rsize,
1556 XATTR_NOSECURITY,
1557 ctx)) != 0) {
1558
1559 /* no attribute - no security data */
1560 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1561 error = 0;
1562 /* either way, we are done */
1563 goto out;
1564 }
1565
1566 /*
1567 * Validate security structure. If it's corrupt, we will
1568 * just ignore it.
1569 */
1570 if (rsize < KAUTH_FILESEC_SIZE(0)) {
1571 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
1572 goto out;
1573 }
1574 if (fsec->fsec_magic != KAUTH_FILESEC_MAGIC) {
1575 KAUTH_DEBUG("ACL - BAD MAGIC %x", fsec->fsec_magic);
1576 goto out;
1577 }
1578 if ((fsec->fsec_acl.acl_entrycount != KAUTH_FILESEC_NOACL) &&
1579 (fsec->fsec_acl.acl_entrycount > KAUTH_ACL_MAX_ENTRIES)) {
1580 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", fsec->fsec_entrycount);
1581 goto out;
1582 }
1583 if ((fsec->fsec_acl.acl_entrycount != KAUTH_FILESEC_NOACL) &&
1584 (KAUTH_FILESEC_SIZE(fsec->fsec_acl.acl_entrycount) > rsize)) {
1585 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", fsec->fsec_acl.acl_entrycount, rsize);
1586 goto out;
1587 }
1588
1589 *fsecp = fsec;
1590 fsec = NULL;
1591 error = 0;
1592 out:
1593 if (fsec != NULL)
1594 kauth_filesec_free(fsec);
1595 if (fsec_uio != NULL)
1596 uio_free(fsec_uio);
1597 if (error)
1598 *fsecp = NULL;
1599 return(error);
1600 }
1601
1602 static int
1603 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
1604 {
1605 uio_t fsec_uio;
1606 int error;
1607
1608 fsec_uio = NULL;
1609
1610 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
1611 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
1612 error = ENOMEM;
1613 goto out;
1614 }
1615 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), sizeof(struct kauth_filesec) - sizeof(struct kauth_acl));
1616 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), KAUTH_ACL_COPYSIZE(acl));
1617 error = vn_setxattr(vp,
1618 KAUTH_FILESEC_XATTR,
1619 fsec_uio,
1620 XATTR_NOSECURITY, /* we have auth'ed already */
1621 ctx);
1622 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
1623
1624 out:
1625 if (fsec_uio != NULL)
1626 uio_free(fsec_uio);
1627 return(error);
1628 }
1629
1630
1631 int
1632 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1633 {
1634 kauth_filesec_t fsec;
1635 kauth_acl_t facl;
1636 int error;
1637 uid_t nuid;
1638 gid_t ngid;
1639
1640 /* don't ask for extended security data if the filesystem doesn't support it */
1641 if (!vfs_extendedsecurity(vnode_mount(vp))) {
1642 VATTR_CLEAR_ACTIVE(vap, va_acl);
1643 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1644 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1645 }
1646
1647 /*
1648 * If the caller wants size values we might have to synthesise, give the
1649 * filesystem the opportunity to supply better intermediate results.
1650 */
1651 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1652 VATTR_IS_ACTIVE(vap, va_total_size) ||
1653 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1654 VATTR_SET_ACTIVE(vap, va_data_size);
1655 VATTR_SET_ACTIVE(vap, va_data_alloc);
1656 VATTR_SET_ACTIVE(vap, va_total_size);
1657 VATTR_SET_ACTIVE(vap, va_total_alloc);
1658 }
1659
1660 error = VNOP_GETATTR(vp, vap, ctx);
1661 if (error) {
1662 KAUTH_DEBUG("ERROR - returning %d", error);
1663 goto out;
1664 }
1665
1666 /*
1667 * If extended security data was requested but not returned, try the fallback
1668 * path.
1669 */
1670 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
1671 fsec = NULL;
1672
1673 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
1674 /* try to get the filesec */
1675 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
1676 goto out;
1677 }
1678 /* if no filesec, no attributes */
1679 if (fsec == NULL) {
1680 VATTR_RETURN(vap, va_acl, NULL);
1681 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
1682 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
1683 } else {
1684
1685 /* looks good, try to return what we were asked for */
1686 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
1687 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
1688
1689 /* only return the ACL if we were actually asked for it */
1690 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1691 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
1692 VATTR_RETURN(vap, va_acl, NULL);
1693 } else {
1694 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
1695 if (facl == NULL) {
1696 kauth_filesec_free(fsec);
1697 error = ENOMEM;
1698 goto out;
1699 }
1700 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
1701 VATTR_RETURN(vap, va_acl, facl);
1702 }
1703 }
1704 kauth_filesec_free(fsec);
1705 }
1706 }
1707 /*
1708 * If someone gave us an unsolicited filesec, toss it. We promise that
1709 * we're OK with a filesystem giving us anything back, but our callers
1710 * only expect what they asked for.
1711 */
1712 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
1713 if (vap->va_acl != NULL)
1714 kauth_acl_free(vap->va_acl);
1715 VATTR_CLEAR_SUPPORTED(vap, va_acl);
1716 }
1717
1718 #if 0 /* enable when we have a filesystem only supporting UUIDs */
1719 /*
1720 * Handle the case where we need a UID/GID, but only have extended
1721 * security information.
1722 */
1723 if (VATTR_NOT_RETURNED(vap, va_uid) &&
1724 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
1725 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
1726 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
1727 VATTR_RETURN(vap, va_uid, nuid);
1728 }
1729 if (VATTR_NOT_RETURNED(vap, va_gid) &&
1730 VATTR_IS_SUPPORTED(vap, va_guuid) &&
1731 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
1732 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
1733 VATTR_RETURN(vap, va_gid, ngid);
1734 }
1735 #endif
1736
1737 /*
1738 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
1739 */
1740 if (VATTR_IS_ACTIVE(vap, va_uid)) {
1741 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1742 nuid = vp->v_mount->mnt_fsowner;
1743 if (nuid == KAUTH_UID_NONE)
1744 nuid = 99;
1745 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
1746 nuid = vap->va_uid;
1747 } else {
1748 /* this will always be something sensible */
1749 nuid = vp->v_mount->mnt_fsowner;
1750 }
1751 if ((nuid == 99) && !vfs_context_issuser(ctx))
1752 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
1753 VATTR_RETURN(vap, va_uid, nuid);
1754 }
1755 if (VATTR_IS_ACTIVE(vap, va_gid)) {
1756 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1757 ngid = vp->v_mount->mnt_fsgroup;
1758 if (ngid == KAUTH_GID_NONE)
1759 ngid = 99;
1760 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
1761 ngid = vap->va_gid;
1762 } else {
1763 /* this will always be something sensible */
1764 ngid = vp->v_mount->mnt_fsgroup;
1765 }
1766 if ((ngid == 99) && !vfs_context_issuser(ctx))
1767 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
1768 VATTR_RETURN(vap, va_gid, ngid);
1769 }
1770
1771 /*
1772 * Synthesise some values that can be reasonably guessed.
1773 */
1774 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
1775 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
1776
1777 if (!VATTR_IS_SUPPORTED(vap, va_flags))
1778 VATTR_RETURN(vap, va_flags, 0);
1779
1780 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
1781 VATTR_RETURN(vap, va_filerev, 0);
1782
1783 if (!VATTR_IS_SUPPORTED(vap, va_gen))
1784 VATTR_RETURN(vap, va_gen, 0);
1785
1786 /*
1787 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
1788 */
1789 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
1790 VATTR_RETURN(vap, va_data_size, 0);
1791
1792 /* do we want any of the possibly-computed values? */
1793 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1794 VATTR_IS_ACTIVE(vap, va_total_size) ||
1795 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1796 /* make sure f_bsize is valid */
1797 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
1798 if ((error = vfs_update_vfsstat(vp->v_mount, ctx)) != 0)
1799 goto out;
1800 }
1801
1802 /* default va_data_alloc from va_data_size */
1803 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
1804 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
1805
1806 /* default va_total_size from va_data_size */
1807 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
1808 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
1809
1810 /* default va_total_alloc from va_total_size which is guaranteed at this point */
1811 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
1812 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
1813 }
1814
1815 /*
1816 * If we don't have a change time, pull it from the modtime.
1817 */
1818 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
1819 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
1820
1821 /*
1822 * This is really only supported for the creation VNOPs, but since the field is there
1823 * we should populate it correctly.
1824 */
1825 VATTR_RETURN(vap, va_type, vp->v_type);
1826
1827 /*
1828 * The fsid can be obtained from the mountpoint directly.
1829 */
1830 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
1831
1832 out:
1833
1834 return(error);
1835 }
1836
1837 int
1838 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1839 {
1840 int error, is_ownership_change=0;
1841
1842 /*
1843 * Make sure the filesystem is mounted R/W.
1844 * If not, return an error.
1845 */
1846 if (vfs_isrdonly(vp->v_mount))
1847 return(EROFS);
1848
1849 /*
1850 * If ownership is being ignored on this volume, we silently discard
1851 * ownership changes.
1852 */
1853 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1854 VATTR_CLEAR_ACTIVE(vap, va_uid);
1855 VATTR_CLEAR_ACTIVE(vap, va_gid);
1856 }
1857
1858 if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) {
1859 is_ownership_change = 1;
1860 }
1861
1862 /*
1863 * Make sure that extended security is enabled if we're going to try
1864 * to set any.
1865 */
1866 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
1867 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
1868 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
1869 return(ENOTSUP);
1870 }
1871
1872 error = VNOP_SETATTR(vp, vap, ctx);
1873
1874 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
1875 error = vnode_setattr_fallback(vp, vap, ctx);
1876
1877 /*
1878 * If we have changed any of the things about the file that are likely
1879 * to result in changes to authorisation results, blow the vnode auth
1880 * cache
1881 */
1882 if (VATTR_IS_SUPPORTED(vap, va_mode) ||
1883 VATTR_IS_SUPPORTED(vap, va_uid) ||
1884 VATTR_IS_SUPPORTED(vap, va_gid) ||
1885 VATTR_IS_SUPPORTED(vap, va_flags) ||
1886 VATTR_IS_SUPPORTED(vap, va_acl) ||
1887 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
1888 VATTR_IS_SUPPORTED(vap, va_guuid))
1889 vnode_uncache_credentials(vp);
1890 // only send a stat_changed event if this is more than
1891 // just an access time update
1892 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time))) {
1893 if (need_fsevent(FSE_STAT_CHANGED, vp) || (is_ownership_change && need_fsevent(FSE_CHOWN, vp))) {
1894 if (is_ownership_change == 0)
1895 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
1896 else
1897 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
1898 }
1899 }
1900 return(error);
1901 }
1902
1903 /*
1904 * Following an operation which sets attributes (setattr, create, etc.) we may
1905 * need to perform fallback operations to get attributes saved.
1906 */
1907 int
1908 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1909 {
1910 kauth_filesec_t fsec;
1911 kauth_acl_t facl;
1912 struct kauth_filesec lfsec;
1913 int error;
1914
1915 error = 0;
1916
1917 /*
1918 * Extended security fallback via extended attributes.
1919 *
1920 * Note that we do not free the filesec; the caller is expected to do this.
1921 */
1922 if (VATTR_NOT_RETURNED(vap, va_acl) ||
1923 VATTR_NOT_RETURNED(vap, va_uuuid) ||
1924 VATTR_NOT_RETURNED(vap, va_guuid)) {
1925 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
1926
1927 /*
1928 * Fail for file types that we don't permit extended security to be set on.
1929 */
1930 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
1931 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
1932 error = EINVAL;
1933 goto out;
1934 }
1935
1936 /*
1937 * If we don't have all the extended security items, we need to fetch the existing
1938 * data to perform a read-modify-write operation.
1939 */
1940 fsec = NULL;
1941 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
1942 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
1943 !VATTR_IS_ACTIVE(vap, va_guuid)) {
1944 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
1945 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
1946 goto out;
1947 }
1948 }
1949 /* if we didn't get a filesec, use our local one */
1950 if (fsec == NULL) {
1951 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
1952 fsec = &lfsec;
1953 } else {
1954 KAUTH_DEBUG("SETATTR - updating existing filesec");
1955 }
1956 /* find the ACL */
1957 facl = &fsec->fsec_acl;
1958
1959 /* if we're using the local filesec, we need to initialise it */
1960 if (fsec == &lfsec) {
1961 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
1962 fsec->fsec_owner = kauth_null_guid;
1963 fsec->fsec_group = kauth_null_guid;
1964 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
1965 facl->acl_flags = 0;
1966 }
1967
1968 /*
1969 * Update with the supplied attributes.
1970 */
1971 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
1972 KAUTH_DEBUG("SETATTR - updating owner UUID");
1973 fsec->fsec_owner = vap->va_uuuid;
1974 VATTR_SET_SUPPORTED(vap, va_uuuid);
1975 }
1976 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
1977 KAUTH_DEBUG("SETATTR - updating group UUID");
1978 fsec->fsec_group = vap->va_guuid;
1979 VATTR_SET_SUPPORTED(vap, va_guuid);
1980 }
1981 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1982 if (vap->va_acl == NULL) {
1983 KAUTH_DEBUG("SETATTR - removing ACL");
1984 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
1985 } else {
1986 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
1987 facl = vap->va_acl;
1988 }
1989 VATTR_SET_SUPPORTED(vap, va_acl);
1990 }
1991
1992 /*
1993 * If the filesec data is all invalid, we can just remove the EA completely.
1994 */
1995 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
1996 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
1997 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
1998 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
1999 /* no attribute is ok, nothing to delete */
2000 if (error == ENOATTR)
2001 error = 0;
2002 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2003 } else {
2004 /* write the EA */
2005 error = vnode_set_filesec(vp, fsec, facl, ctx);
2006 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2007 }
2008
2009 /* if we fetched a filesec, dispose of the buffer */
2010 if (fsec != &lfsec)
2011 kauth_filesec_free(fsec);
2012 }
2013 out:
2014
2015 return(error);
2016 }
2017
2018 /*
2019 * Definition of vnode operations.
2020 */
2021
2022 #if 0
2023 /*
2024 *#
2025 *#% lookup dvp L ? ?
2026 *#% lookup vpp - L -
2027 */
2028 struct vnop_lookup_args {
2029 struct vnodeop_desc *a_desc;
2030 vnode_t a_dvp;
2031 vnode_t *a_vpp;
2032 struct componentname *a_cnp;
2033 vfs_context_t a_context;
2034 };
2035 #endif /* 0*/
2036
2037 errno_t
2038 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t context)
2039 {
2040 int _err;
2041 struct vnop_lookup_args a;
2042 vnode_t vp;
2043 int thread_safe;
2044 int funnel_state = 0;
2045
2046 a.a_desc = &vnop_lookup_desc;
2047 a.a_dvp = dvp;
2048 a.a_vpp = vpp;
2049 a.a_cnp = cnp;
2050 a.a_context = context;
2051 thread_safe = THREAD_SAFE_FS(dvp);
2052
2053 vnode_cache_credentials(dvp, context);
2054
2055 if (!thread_safe) {
2056 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2057 return (_err);
2058 }
2059 }
2060 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2061
2062 vp = *vpp;
2063
2064 if (!thread_safe) {
2065 if ( (cnp->cn_flags & ISLASTCN) ) {
2066 if ( (cnp->cn_flags & LOCKPARENT) ) {
2067 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
2068 /*
2069 * leave the fsnode lock held on
2070 * the directory, but restore the funnel...
2071 * also indicate that we need to drop the
2072 * fsnode_lock when we're done with the
2073 * system call processing for this path
2074 */
2075 cnp->cn_flags |= FSNODELOCKHELD;
2076
2077 (void) thread_funnel_set(kernel_flock, funnel_state);
2078 return (_err);
2079 }
2080 }
2081 }
2082 unlock_fsnode(dvp, &funnel_state);
2083 }
2084 return (_err);
2085 }
2086
2087 #if 0
2088 /*
2089 *#
2090 *#% create dvp L L L
2091 *#% create vpp - L -
2092 *#
2093 */
2094
2095 struct vnop_create_args {
2096 struct vnodeop_desc *a_desc;
2097 vnode_t a_dvp;
2098 vnode_t *a_vpp;
2099 struct componentname *a_cnp;
2100 struct vnode_attr *a_vap;
2101 vfs_context_t a_context;
2102 };
2103 #endif /* 0*/
2104 errno_t
2105 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2106 {
2107 int _err;
2108 struct vnop_create_args a;
2109 int thread_safe;
2110 int funnel_state = 0;
2111
2112 a.a_desc = &vnop_create_desc;
2113 a.a_dvp = dvp;
2114 a.a_vpp = vpp;
2115 a.a_cnp = cnp;
2116 a.a_vap = vap;
2117 a.a_context = context;
2118 thread_safe = THREAD_SAFE_FS(dvp);
2119
2120 if (!thread_safe) {
2121 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2122 return (_err);
2123 }
2124 }
2125 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2126 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2127 /*
2128 * Remove stale Apple Double file (if any).
2129 */
2130 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
2131 }
2132 if (!thread_safe) {
2133 unlock_fsnode(dvp, &funnel_state);
2134 }
2135 return (_err);
2136 }
2137
2138 #if 0
2139 /*
2140 *#
2141 *#% whiteout dvp L L L
2142 *#% whiteout cnp - - -
2143 *#% whiteout flag - - -
2144 *#
2145 */
2146 struct vnop_whiteout_args {
2147 struct vnodeop_desc *a_desc;
2148 vnode_t a_dvp;
2149 struct componentname *a_cnp;
2150 int a_flags;
2151 vfs_context_t a_context;
2152 };
2153 #endif /* 0*/
2154 errno_t
2155 VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t context)
2156 {
2157 int _err;
2158 struct vnop_whiteout_args a;
2159 int thread_safe;
2160 int funnel_state = 0;
2161
2162 a.a_desc = &vnop_whiteout_desc;
2163 a.a_dvp = dvp;
2164 a.a_cnp = cnp;
2165 a.a_flags = flags;
2166 a.a_context = context;
2167 thread_safe = THREAD_SAFE_FS(dvp);
2168
2169 if (!thread_safe) {
2170 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2171 return (_err);
2172 }
2173 }
2174 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
2175 if (!thread_safe) {
2176 unlock_fsnode(dvp, &funnel_state);
2177 }
2178 return (_err);
2179 }
2180
2181 #if 0
2182 /*
2183 *#
2184 *#% mknod dvp L U U
2185 *#% mknod vpp - X -
2186 *#
2187 */
2188 struct vnop_mknod_args {
2189 struct vnodeop_desc *a_desc;
2190 vnode_t a_dvp;
2191 vnode_t *a_vpp;
2192 struct componentname *a_cnp;
2193 struct vnode_attr *a_vap;
2194 vfs_context_t a_context;
2195 };
2196 #endif /* 0*/
2197 errno_t
2198 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2199 {
2200
2201 int _err;
2202 struct vnop_mknod_args a;
2203 int thread_safe;
2204 int funnel_state = 0;
2205
2206 a.a_desc = &vnop_mknod_desc;
2207 a.a_dvp = dvp;
2208 a.a_vpp = vpp;
2209 a.a_cnp = cnp;
2210 a.a_vap = vap;
2211 a.a_context = context;
2212 thread_safe = THREAD_SAFE_FS(dvp);
2213
2214 if (!thread_safe) {
2215 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2216 return (_err);
2217 }
2218 }
2219 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
2220 if (!thread_safe) {
2221 unlock_fsnode(dvp, &funnel_state);
2222 }
2223 return (_err);
2224 }
2225
2226 #if 0
2227 /*
2228 *#
2229 *#% open vp L L L
2230 *#
2231 */
2232 struct vnop_open_args {
2233 struct vnodeop_desc *a_desc;
2234 vnode_t a_vp;
2235 int a_mode;
2236 vfs_context_t a_context;
2237 };
2238 #endif /* 0*/
2239 errno_t
2240 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t context)
2241 {
2242 int _err;
2243 struct vnop_open_args a;
2244 int thread_safe;
2245 int funnel_state = 0;
2246 struct vfs_context acontext;
2247
2248 if (context == NULL) {
2249 acontext.vc_proc = current_proc();
2250 acontext.vc_ucred = kauth_cred_get();
2251 context = &acontext;
2252 }
2253 a.a_desc = &vnop_open_desc;
2254 a.a_vp = vp;
2255 a.a_mode = mode;
2256 a.a_context = context;
2257 thread_safe = THREAD_SAFE_FS(vp);
2258
2259 if (!thread_safe) {
2260 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2261 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2262 if ( (_err = lock_fsnode(vp, NULL)) ) {
2263 (void) thread_funnel_set(kernel_flock, funnel_state);
2264 return (_err);
2265 }
2266 }
2267 }
2268 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
2269 if (!thread_safe) {
2270 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2271 unlock_fsnode(vp, NULL);
2272 }
2273 (void) thread_funnel_set(kernel_flock, funnel_state);
2274 }
2275 return (_err);
2276 }
2277
2278 #if 0
2279 /*
2280 *#
2281 *#% close vp U U U
2282 *#
2283 */
2284 struct vnop_close_args {
2285 struct vnodeop_desc *a_desc;
2286 vnode_t a_vp;
2287 int a_fflag;
2288 vfs_context_t a_context;
2289 };
2290 #endif /* 0*/
2291 errno_t
2292 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t context)
2293 {
2294 int _err;
2295 struct vnop_close_args a;
2296 int thread_safe;
2297 int funnel_state = 0;
2298 struct vfs_context acontext;
2299
2300 if (context == NULL) {
2301 acontext.vc_proc = current_proc();
2302 acontext.vc_ucred = kauth_cred_get();
2303 context = &acontext;
2304 }
2305 a.a_desc = &vnop_close_desc;
2306 a.a_vp = vp;
2307 a.a_fflag = fflag;
2308 a.a_context = context;
2309 thread_safe = THREAD_SAFE_FS(vp);
2310
2311 if (!thread_safe) {
2312 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2313 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2314 if ( (_err = lock_fsnode(vp, NULL)) ) {
2315 (void) thread_funnel_set(kernel_flock, funnel_state);
2316 return (_err);
2317 }
2318 }
2319 }
2320 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
2321 if (!thread_safe) {
2322 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2323 unlock_fsnode(vp, NULL);
2324 }
2325 (void) thread_funnel_set(kernel_flock, funnel_state);
2326 }
2327 return (_err);
2328 }
2329
2330 #if 0
2331 /*
2332 *#
2333 *#% access vp L L L
2334 *#
2335 */
2336 struct vnop_access_args {
2337 struct vnodeop_desc *a_desc;
2338 vnode_t a_vp;
2339 int a_action;
2340 vfs_context_t a_context;
2341 };
2342 #endif /* 0*/
2343 errno_t
2344 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t context)
2345 {
2346 int _err;
2347 struct vnop_access_args a;
2348 int thread_safe;
2349 int funnel_state = 0;
2350 struct vfs_context acontext;
2351
2352 if (context == NULL) {
2353 acontext.vc_proc = current_proc();
2354 acontext.vc_ucred = kauth_cred_get();
2355 context = &acontext;
2356 }
2357 a.a_desc = &vnop_access_desc;
2358 a.a_vp = vp;
2359 a.a_action = action;
2360 a.a_context = context;
2361 thread_safe = THREAD_SAFE_FS(vp);
2362
2363 if (!thread_safe) {
2364 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2365 return (_err);
2366 }
2367 }
2368 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
2369 if (!thread_safe) {
2370 unlock_fsnode(vp, &funnel_state);
2371 }
2372 return (_err);
2373 }
2374
2375 #if 0
2376 /*
2377 *#
2378 *#% getattr vp = = =
2379 *#
2380 */
2381 struct vnop_getattr_args {
2382 struct vnodeop_desc *a_desc;
2383 vnode_t a_vp;
2384 struct vnode_attr *a_vap;
2385 vfs_context_t a_context;
2386 };
2387 #endif /* 0*/
2388 errno_t
2389 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2390 {
2391 int _err;
2392 struct vnop_getattr_args a;
2393 int thread_safe;
2394 int funnel_state;
2395
2396 a.a_desc = &vnop_getattr_desc;
2397 a.a_vp = vp;
2398 a.a_vap = vap;
2399 a.a_context = context;
2400 thread_safe = THREAD_SAFE_FS(vp);
2401
2402 if (!thread_safe) {
2403 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2404 return (_err);
2405 }
2406 }
2407 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
2408 if (!thread_safe) {
2409 unlock_fsnode(vp, &funnel_state);
2410 }
2411 return (_err);
2412 }
2413
2414 #if 0
2415 /*
2416 *#
2417 *#% setattr vp L L L
2418 *#
2419 */
2420 struct vnop_setattr_args {
2421 struct vnodeop_desc *a_desc;
2422 vnode_t a_vp;
2423 struct vnode_attr *a_vap;
2424 vfs_context_t a_context;
2425 };
2426 #endif /* 0*/
2427 errno_t
2428 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2429 {
2430 int _err;
2431 struct vnop_setattr_args a;
2432 int thread_safe;
2433 int funnel_state;
2434
2435 a.a_desc = &vnop_setattr_desc;
2436 a.a_vp = vp;
2437 a.a_vap = vap;
2438 a.a_context = context;
2439 thread_safe = THREAD_SAFE_FS(vp);
2440
2441 if (!thread_safe) {
2442 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2443 return (_err);
2444 }
2445 }
2446 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
2447
2448 /*
2449 * Shadow uid/gid/mod change to extended attibute file.
2450 */
2451 if (_err == 0 && !NATIVE_XATTR(vp)) {
2452 struct vnode_attr va;
2453 int change = 0;
2454
2455 VATTR_INIT(&va);
2456 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2457 VATTR_SET(&va, va_uid, vap->va_uid);
2458 change = 1;
2459 }
2460 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2461 VATTR_SET(&va, va_gid, vap->va_gid);
2462 change = 1;
2463 }
2464 if (VATTR_IS_ACTIVE(vap, va_mode)) {
2465 VATTR_SET(&va, va_mode, vap->va_mode);
2466 change = 1;
2467 }
2468 if (change) {
2469 vnode_t dvp;
2470 char *vname;
2471
2472 dvp = vnode_getparent(vp);
2473 vname = vnode_getname(vp);
2474
2475 xattrfile_setattr(dvp, vname, &va, context, thread_safe);
2476 if (dvp != NULLVP)
2477 vnode_put(dvp);
2478 if (vname != NULL)
2479 vnode_putname(vname);
2480 }
2481 }
2482 if (!thread_safe) {
2483 unlock_fsnode(vp, &funnel_state);
2484 }
2485 return (_err);
2486 }
2487
2488 #if 0
2489 /*
2490 *#
2491 *#% getattrlist vp = = =
2492 *#
2493 */
2494 struct vnop_getattrlist_args {
2495 struct vnodeop_desc *a_desc;
2496 vnode_t a_vp;
2497 struct attrlist *a_alist;
2498 struct uio *a_uio;
2499 int a_options;
2500 vfs_context_t a_context;
2501 };
2502 #endif /* 0*/
2503 errno_t
2504 VNOP_GETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2505 {
2506 int _err;
2507 struct vnop_getattrlist_args a;
2508 int thread_safe;
2509 int funnel_state = 0;
2510
2511 a.a_desc = &vnop_getattrlist_desc;
2512 a.a_vp = vp;
2513 a.a_alist = alist;
2514 a.a_uio = uio;
2515 a.a_options = options;
2516 a.a_context = context;
2517 thread_safe = THREAD_SAFE_FS(vp);
2518
2519 if (!thread_safe) {
2520 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2521 return (_err);
2522 }
2523 }
2524 _err = (*vp->v_op[vnop_getattrlist_desc.vdesc_offset])(&a);
2525 if (!thread_safe) {
2526 unlock_fsnode(vp, &funnel_state);
2527 }
2528 return (_err);
2529 }
2530
2531 #if 0
2532 /*
2533 *#
2534 *#% setattrlist vp L L L
2535 *#
2536 */
2537 struct vnop_setattrlist_args {
2538 struct vnodeop_desc *a_desc;
2539 vnode_t a_vp;
2540 struct attrlist *a_alist;
2541 struct uio *a_uio;
2542 int a_options;
2543 vfs_context_t a_context;
2544 };
2545 #endif /* 0*/
2546 errno_t
2547 VNOP_SETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2548 {
2549 int _err;
2550 struct vnop_setattrlist_args a;
2551 int thread_safe;
2552 int funnel_state = 0;
2553
2554 a.a_desc = &vnop_setattrlist_desc;
2555 a.a_vp = vp;
2556 a.a_alist = alist;
2557 a.a_uio = uio;
2558 a.a_options = options;
2559 a.a_context = context;
2560 thread_safe = THREAD_SAFE_FS(vp);
2561
2562 if (!thread_safe) {
2563 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2564 return (_err);
2565 }
2566 }
2567 _err = (*vp->v_op[vnop_setattrlist_desc.vdesc_offset])(&a);
2568
2569 vnode_uncache_credentials(vp);
2570
2571 if (!thread_safe) {
2572 unlock_fsnode(vp, &funnel_state);
2573 }
2574 return (_err);
2575 }
2576
2577
2578 #if 0
2579 /*
2580 *#
2581 *#% read vp L L L
2582 *#
2583 */
2584 struct vnop_read_args {
2585 struct vnodeop_desc *a_desc;
2586 vnode_t a_vp;
2587 struct uio *a_uio;
2588 int a_ioflag;
2589 vfs_context_t a_context;
2590 };
2591 #endif /* 0*/
2592 errno_t
2593 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2594 {
2595 int _err;
2596 struct vnop_read_args a;
2597 int thread_safe;
2598 int funnel_state = 0;
2599 struct vfs_context acontext;
2600
2601 if (context == NULL) {
2602 acontext.vc_proc = current_proc();
2603 acontext.vc_ucred = kauth_cred_get();
2604 context = &acontext;
2605 }
2606
2607 a.a_desc = &vnop_read_desc;
2608 a.a_vp = vp;
2609 a.a_uio = uio;
2610 a.a_ioflag = ioflag;
2611 a.a_context = context;
2612 thread_safe = THREAD_SAFE_FS(vp);
2613
2614 if (!thread_safe) {
2615 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2616 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2617 if ( (_err = lock_fsnode(vp, NULL)) ) {
2618 (void) thread_funnel_set(kernel_flock, funnel_state);
2619 return (_err);
2620 }
2621 }
2622 }
2623 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
2624
2625 if (!thread_safe) {
2626 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2627 unlock_fsnode(vp, NULL);
2628 }
2629 (void) thread_funnel_set(kernel_flock, funnel_state);
2630 }
2631 return (_err);
2632 }
2633
2634
2635 #if 0
2636 /*
2637 *#
2638 *#% write vp L L L
2639 *#
2640 */
2641 struct vnop_write_args {
2642 struct vnodeop_desc *a_desc;
2643 vnode_t a_vp;
2644 struct uio *a_uio;
2645 int a_ioflag;
2646 vfs_context_t a_context;
2647 };
2648 #endif /* 0*/
2649 errno_t
2650 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2651 {
2652 struct vnop_write_args a;
2653 int _err;
2654 int thread_safe;
2655 int funnel_state = 0;
2656 struct vfs_context acontext;
2657
2658 if (context == NULL) {
2659 acontext.vc_proc = current_proc();
2660 acontext.vc_ucred = kauth_cred_get();
2661 context = &acontext;
2662 }
2663
2664 a.a_desc = &vnop_write_desc;
2665 a.a_vp = vp;
2666 a.a_uio = uio;
2667 a.a_ioflag = ioflag;
2668 a.a_context = context;
2669 thread_safe = THREAD_SAFE_FS(vp);
2670
2671 if (!thread_safe) {
2672 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2673 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2674 if ( (_err = lock_fsnode(vp, NULL)) ) {
2675 (void) thread_funnel_set(kernel_flock, funnel_state);
2676 return (_err);
2677 }
2678 }
2679 }
2680 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
2681
2682 if (!thread_safe) {
2683 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2684 unlock_fsnode(vp, NULL);
2685 }
2686 (void) thread_funnel_set(kernel_flock, funnel_state);
2687 }
2688 return (_err);
2689 }
2690
2691
2692 #if 0
2693 /*
2694 *#
2695 *#% ioctl vp U U U
2696 *#
2697 */
2698 struct vnop_ioctl_args {
2699 struct vnodeop_desc *a_desc;
2700 vnode_t a_vp;
2701 u_long a_command;
2702 caddr_t a_data;
2703 int a_fflag;
2704 vfs_context_t a_context;
2705 };
2706 #endif /* 0*/
2707 errno_t
2708 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t context)
2709 {
2710 int _err;
2711 struct vnop_ioctl_args a;
2712 int thread_safe;
2713 int funnel_state = 0;
2714 struct vfs_context acontext;
2715
2716 if (context == NULL) {
2717 acontext.vc_proc = current_proc();
2718 acontext.vc_ucred = kauth_cred_get();
2719 context = &acontext;
2720 }
2721
2722 if (vfs_context_is64bit(context)) {
2723 if (!vnode_vfs64bitready(vp)) {
2724 return(ENOTTY);
2725 }
2726 }
2727
2728 a.a_desc = &vnop_ioctl_desc;
2729 a.a_vp = vp;
2730 a.a_command = command;
2731 a.a_data = data;
2732 a.a_fflag = fflag;
2733 a.a_context= context;
2734 thread_safe = THREAD_SAFE_FS(vp);
2735
2736 if (!thread_safe) {
2737 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2738 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2739 if ( (_err = lock_fsnode(vp, NULL)) ) {
2740 (void) thread_funnel_set(kernel_flock, funnel_state);
2741 return (_err);
2742 }
2743 }
2744 }
2745 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
2746 if (!thread_safe) {
2747 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2748 unlock_fsnode(vp, NULL);
2749 }
2750 (void) thread_funnel_set(kernel_flock, funnel_state);
2751 }
2752 return (_err);
2753 }
2754
2755
2756 #if 0
2757 /*
2758 *#
2759 *#% select vp U U U
2760 *#
2761 */
2762 struct vnop_select_args {
2763 struct vnodeop_desc *a_desc;
2764 vnode_t a_vp;
2765 int a_which;
2766 int a_fflags;
2767 void *a_wql;
2768 vfs_context_t a_context;
2769 };
2770 #endif /* 0*/
2771 errno_t
2772 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t context)
2773 {
2774 int _err;
2775 struct vnop_select_args a;
2776 int thread_safe;
2777 int funnel_state = 0;
2778 struct vfs_context acontext;
2779
2780 if (context == NULL) {
2781 acontext.vc_proc = current_proc();
2782 acontext.vc_ucred = kauth_cred_get();
2783 context = &acontext;
2784 }
2785 a.a_desc = &vnop_select_desc;
2786 a.a_vp = vp;
2787 a.a_which = which;
2788 a.a_fflags = fflags;
2789 a.a_context = context;
2790 a.a_wql = wql;
2791 thread_safe = THREAD_SAFE_FS(vp);
2792
2793 if (!thread_safe) {
2794 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2795 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2796 if ( (_err = lock_fsnode(vp, NULL)) ) {
2797 (void) thread_funnel_set(kernel_flock, funnel_state);
2798 return (_err);
2799 }
2800 }
2801 }
2802 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
2803 if (!thread_safe) {
2804 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2805 unlock_fsnode(vp, NULL);
2806 }
2807 (void) thread_funnel_set(kernel_flock, funnel_state);
2808 }
2809 return (_err);
2810 }
2811
2812
2813 #if 0
2814 /*
2815 *#
2816 *#% exchange fvp L L L
2817 *#% exchange tvp L L L
2818 *#
2819 */
2820 struct vnop_exchange_args {
2821 struct vnodeop_desc *a_desc;
2822 vnode_t a_fvp;
2823 vnode_t a_tvp;
2824 int a_options;
2825 vfs_context_t a_context;
2826 };
2827 #endif /* 0*/
2828 errno_t
2829 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t context)
2830 {
2831 int _err;
2832 struct vnop_exchange_args a;
2833 int thread_safe;
2834 int funnel_state = 0;
2835 vnode_t lock_first = NULL, lock_second = NULL;
2836
2837 a.a_desc = &vnop_exchange_desc;
2838 a.a_fvp = fvp;
2839 a.a_tvp = tvp;
2840 a.a_options = options;
2841 a.a_context = context;
2842 thread_safe = THREAD_SAFE_FS(fvp);
2843
2844 if (!thread_safe) {
2845 /*
2846 * Lock in vnode address order to avoid deadlocks
2847 */
2848 if (fvp < tvp) {
2849 lock_first = fvp;
2850 lock_second = tvp;
2851 } else {
2852 lock_first = tvp;
2853 lock_second = fvp;
2854 }
2855 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
2856 return (_err);
2857 }
2858 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
2859 unlock_fsnode(lock_first, &funnel_state);
2860 return (_err);
2861 }
2862 }
2863 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
2864 if (!thread_safe) {
2865 unlock_fsnode(lock_second, NULL);
2866 unlock_fsnode(lock_first, &funnel_state);
2867 }
2868 return (_err);
2869 }
2870
2871
2872 #if 0
2873 /*
2874 *#
2875 *#% revoke vp U U U
2876 *#
2877 */
2878 struct vnop_revoke_args {
2879 struct vnodeop_desc *a_desc;
2880 vnode_t a_vp;
2881 int a_flags;
2882 vfs_context_t a_context;
2883 };
2884 #endif /* 0*/
2885 errno_t
2886 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t context)
2887 {
2888 struct vnop_revoke_args a;
2889 int _err;
2890 int thread_safe;
2891 int funnel_state = 0;
2892
2893 a.a_desc = &vnop_revoke_desc;
2894 a.a_vp = vp;
2895 a.a_flags = flags;
2896 a.a_context = context;
2897 thread_safe = THREAD_SAFE_FS(vp);
2898
2899 if (!thread_safe) {
2900 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2901 }
2902 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
2903 if (!thread_safe) {
2904 (void) thread_funnel_set(kernel_flock, funnel_state);
2905 }
2906 return (_err);
2907 }
2908
2909
2910 #if 0
2911 /*
2912 *#
2913 *# mmap - vp U U U
2914 *#
2915 */
2916 struct vnop_mmap_args {
2917 struct vnodeop_desc *a_desc;
2918 vnode_t a_vp;
2919 int a_fflags;
2920 vfs_context_t a_context;
2921 };
2922 #endif /* 0*/
2923 errno_t
2924 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t context)
2925 {
2926 int _err;
2927 struct vnop_mmap_args a;
2928 int thread_safe;
2929 int funnel_state = 0;
2930
2931 a.a_desc = &vnop_mmap_desc;
2932 a.a_vp = vp;
2933 a.a_fflags = fflags;
2934 a.a_context = context;
2935 thread_safe = THREAD_SAFE_FS(vp);
2936
2937 if (!thread_safe) {
2938 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2939 return (_err);
2940 }
2941 }
2942 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
2943 if (!thread_safe) {
2944 unlock_fsnode(vp, &funnel_state);
2945 }
2946 return (_err);
2947 }
2948
2949
2950 #if 0
2951 /*
2952 *#
2953 *# mnomap - vp U U U
2954 *#
2955 */
2956 struct vnop_mnomap_args {
2957 struct vnodeop_desc *a_desc;
2958 vnode_t a_vp;
2959 vfs_context_t a_context;
2960 };
2961 #endif /* 0*/
2962 errno_t
2963 VNOP_MNOMAP(vnode_t vp, vfs_context_t context)
2964 {
2965 int _err;
2966 struct vnop_mnomap_args a;
2967 int thread_safe;
2968 int funnel_state = 0;
2969
2970 a.a_desc = &vnop_mnomap_desc;
2971 a.a_vp = vp;
2972 a.a_context = context;
2973 thread_safe = THREAD_SAFE_FS(vp);
2974
2975 if (!thread_safe) {
2976 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2977 return (_err);
2978 }
2979 }
2980 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
2981 if (!thread_safe) {
2982 unlock_fsnode(vp, &funnel_state);
2983 }
2984 return (_err);
2985 }
2986
2987
2988 #if 0
2989 /*
2990 *#
2991 *#% fsync vp L L L
2992 *#
2993 */
2994 struct vnop_fsync_args {
2995 struct vnodeop_desc *a_desc;
2996 vnode_t a_vp;
2997 int a_waitfor;
2998 vfs_context_t a_context;
2999 };
3000 #endif /* 0*/
3001 errno_t
3002 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t context)
3003 {
3004 struct vnop_fsync_args a;
3005 int _err;
3006 int thread_safe;
3007 int funnel_state = 0;
3008
3009 a.a_desc = &vnop_fsync_desc;
3010 a.a_vp = vp;
3011 a.a_waitfor = waitfor;
3012 a.a_context = context;
3013 thread_safe = THREAD_SAFE_FS(vp);
3014
3015 if (!thread_safe) {
3016 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3017 return (_err);
3018 }
3019 }
3020 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3021 if (!thread_safe) {
3022 unlock_fsnode(vp, &funnel_state);
3023 }
3024 return (_err);
3025 }
3026
3027
3028 #if 0
3029 /*
3030 *#
3031 *#% remove dvp L U U
3032 *#% remove vp L U U
3033 *#
3034 */
3035 struct vnop_remove_args {
3036 struct vnodeop_desc *a_desc;
3037 vnode_t a_dvp;
3038 vnode_t a_vp;
3039 struct componentname *a_cnp;
3040 int a_flags;
3041 vfs_context_t a_context;
3042 };
3043 #endif /* 0*/
3044 errno_t
3045 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t context)
3046 {
3047 int _err;
3048 struct vnop_remove_args a;
3049 int thread_safe;
3050 int funnel_state = 0;
3051
3052 a.a_desc = &vnop_remove_desc;
3053 a.a_dvp = dvp;
3054 a.a_vp = vp;
3055 a.a_cnp = cnp;
3056 a.a_flags = flags;
3057 a.a_context = context;
3058 thread_safe = THREAD_SAFE_FS(dvp);
3059
3060 if (!thread_safe) {
3061 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3062 return (_err);
3063 }
3064 }
3065 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3066
3067 if (_err == 0) {
3068 vnode_setneedinactive(vp);
3069
3070 if ( !(NATIVE_XATTR(dvp)) ) {
3071 /*
3072 * Remove any associated extended attibute file (._ AppleDouble file).
3073 */
3074 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3075 }
3076 }
3077 if (!thread_safe) {
3078 unlock_fsnode(vp, &funnel_state);
3079 }
3080 return (_err);
3081 }
3082
3083
3084 #if 0
3085 /*
3086 *#
3087 *#% link vp U U U
3088 *#% link tdvp L U U
3089 *#
3090 */
3091 struct vnop_link_args {
3092 struct vnodeop_desc *a_desc;
3093 vnode_t a_vp;
3094 vnode_t a_tdvp;
3095 struct componentname *a_cnp;
3096 vfs_context_t a_context;
3097 };
3098 #endif /* 0*/
3099 errno_t
3100 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t context)
3101 {
3102 int _err;
3103 struct vnop_link_args a;
3104 int thread_safe;
3105 int funnel_state = 0;
3106
3107 /*
3108 * For file systems with non-native extended attributes,
3109 * disallow linking to an existing "._" Apple Double file.
3110 */
3111 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3112 char *vname;
3113
3114 vname = vnode_getname(vp);
3115 if (vname != NULL) {
3116 _err = 0;
3117 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3118 _err = EPERM;
3119 }
3120 vnode_putname(vname);
3121 if (_err)
3122 return (_err);
3123 }
3124 }
3125 a.a_desc = &vnop_link_desc;
3126 a.a_vp = vp;
3127 a.a_tdvp = tdvp;
3128 a.a_cnp = cnp;
3129 a.a_context = context;
3130 thread_safe = THREAD_SAFE_FS(vp);
3131
3132 if (!thread_safe) {
3133 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3134 return (_err);
3135 }
3136 }
3137 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3138 if (!thread_safe) {
3139 unlock_fsnode(vp, &funnel_state);
3140 }
3141 return (_err);
3142 }
3143
3144
3145 #if 0
3146 /*
3147 *#
3148 *#% rename fdvp U U U
3149 *#% rename fvp U U U
3150 *#% rename tdvp L U U
3151 *#% rename tvp X U U
3152 *#
3153 */
3154 struct vnop_rename_args {
3155 struct vnodeop_desc *a_desc;
3156 vnode_t a_fdvp;
3157 vnode_t a_fvp;
3158 struct componentname *a_fcnp;
3159 vnode_t a_tdvp;
3160 vnode_t a_tvp;
3161 struct componentname *a_tcnp;
3162 vfs_context_t a_context;
3163 };
3164 #endif /* 0*/
3165 errno_t
3166 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
3167 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
3168 vfs_context_t context)
3169 {
3170 int _err;
3171 struct vnop_rename_args a;
3172 int funnel_state = 0;
3173 char smallname1[48];
3174 char smallname2[48];
3175 char *xfromname = NULL;
3176 char *xtoname = NULL;
3177 vnode_t lock_first = NULL, lock_second = NULL;
3178 vnode_t fdvp_unsafe = NULLVP;
3179 vnode_t tdvp_unsafe = NULLVP;
3180
3181 a.a_desc = &vnop_rename_desc;
3182 a.a_fdvp = fdvp;
3183 a.a_fvp = fvp;
3184 a.a_fcnp = fcnp;
3185 a.a_tdvp = tdvp;
3186 a.a_tvp = tvp;
3187 a.a_tcnp = tcnp;
3188 a.a_context = context;
3189
3190 if (!THREAD_SAFE_FS(fdvp))
3191 fdvp_unsafe = fdvp;
3192 if (!THREAD_SAFE_FS(tdvp))
3193 tdvp_unsafe = tdvp;
3194
3195 if (fdvp_unsafe != NULLVP) {
3196 /*
3197 * Lock parents in vnode address order to avoid deadlocks
3198 * note that it's possible for the fdvp to be unsafe,
3199 * but the tdvp to be safe because tvp could be a directory
3200 * in the root of a filesystem... in that case, tdvp is the
3201 * in the filesystem that this root is mounted on
3202 */
3203 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
3204 lock_first = fdvp_unsafe;
3205 lock_second = NULL;
3206 } else if (fdvp_unsafe < tdvp_unsafe) {
3207 lock_first = fdvp_unsafe;
3208 lock_second = tdvp_unsafe;
3209 } else {
3210 lock_first = tdvp_unsafe;
3211 lock_second = fdvp_unsafe;
3212 }
3213 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
3214 return (_err);
3215
3216 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3217 unlock_fsnode(lock_first, &funnel_state);
3218 return (_err);
3219 }
3220
3221 /*
3222 * Lock both children in vnode address order to avoid deadlocks
3223 */
3224 if (tvp == NULL || tvp == fvp) {
3225 lock_first = fvp;
3226 lock_second = NULL;
3227 } else if (fvp < tvp) {
3228 lock_first = fvp;
3229 lock_second = tvp;
3230 } else {
3231 lock_first = tvp;
3232 lock_second = fvp;
3233 }
3234 if ( (_err = lock_fsnode(lock_first, NULL)) )
3235 goto out1;
3236
3237 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3238 unlock_fsnode(lock_first, NULL);
3239 goto out1;
3240 }
3241 }
3242 /*
3243 * Save source and destination names (._ AppleDouble files).
3244 * Skip if source already has a "._" prefix.
3245 */
3246 if (!NATIVE_XATTR(fdvp) &&
3247 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3248 size_t len;
3249
3250 /* Get source attribute file name. */
3251 len = fcnp->cn_namelen + 3;
3252 if (len > sizeof(smallname1)) {
3253 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3254 } else {
3255 xfromname = &smallname1[0];
3256 }
3257 strcpy(xfromname, "._");
3258 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3259 xfromname[len-1] = '\0';
3260
3261 /* Get destination attribute file name. */
3262 len = tcnp->cn_namelen + 3;
3263 if (len > sizeof(smallname2)) {
3264 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3265 } else {
3266 xtoname = &smallname2[0];
3267 }
3268 strcpy(xtoname, "._");
3269 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3270 xtoname[len-1] = '\0';
3271 }
3272
3273 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3274
3275 if (fdvp_unsafe != NULLVP) {
3276 if (lock_second != NULL)
3277 unlock_fsnode(lock_second, NULL);
3278 unlock_fsnode(lock_first, NULL);
3279 }
3280 if (_err == 0) {
3281 if (tvp && tvp != fvp)
3282 vnode_setneedinactive(tvp);
3283 }
3284
3285 /*
3286 * Rename any associated extended attibute file (._ AppleDouble file).
3287 */
3288 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3289 struct nameidata fromnd, tond;
3290 int killdest = 0;
3291 int error;
3292
3293 /*
3294 * Get source attribute file vnode.
3295 * Note that fdvp already has an iocount reference and
3296 * using DELETE will take an additional reference.
3297 */
3298 NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3299 CAST_USER_ADDR_T(xfromname), context);
3300 fromnd.ni_dvp = fdvp;
3301 error = namei(&fromnd);
3302
3303 if (error) {
3304 /* When source doesn't exist there still may be a destination. */
3305 if (error == ENOENT) {
3306 killdest = 1;
3307 } else {
3308 goto out;
3309 }
3310 } else if (fromnd.ni_vp->v_type != VREG) {
3311 vnode_put(fromnd.ni_vp);
3312 nameidone(&fromnd);
3313 killdest = 1;
3314 }
3315 if (killdest) {
3316 struct vnop_remove_args args;
3317
3318 /*
3319 * Get destination attribute file vnode.
3320 * Note that tdvp already has an iocount reference.
3321 */
3322 NDINIT(&tond, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3323 CAST_USER_ADDR_T(xtoname), context);
3324 tond.ni_dvp = tdvp;
3325 error = namei(&tond);
3326 if (error) {
3327 goto out;
3328 }
3329 if (tond.ni_vp->v_type != VREG) {
3330 vnode_put(tond.ni_vp);
3331 nameidone(&tond);
3332 goto out;
3333 }
3334 args.a_desc = &vnop_remove_desc;
3335 args.a_dvp = tdvp;
3336 args.a_vp = tond.ni_vp;
3337 args.a_cnp = &tond.ni_cnd;
3338 args.a_context = context;
3339
3340 if (fdvp_unsafe != NULLVP)
3341 error = lock_fsnode(tond.ni_vp, NULL);
3342 if (error == 0) {
3343 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3344
3345 if (fdvp_unsafe != NULLVP)
3346 unlock_fsnode(tond.ni_vp, NULL);
3347
3348 if (error == 0)
3349 vnode_setneedinactive(tond.ni_vp);
3350 }
3351 vnode_put(tond.ni_vp);
3352 nameidone(&tond);
3353 goto out;
3354 }
3355
3356 /*
3357 * Get destination attribute file vnode.
3358 */
3359 NDINIT(&tond, RENAME,
3360 NOCACHE | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3361 CAST_USER_ADDR_T(xtoname), context);
3362 tond.ni_dvp = tdvp;
3363 error = namei(&tond);
3364
3365 if (error) {
3366 vnode_put(fromnd.ni_vp);
3367 nameidone(&fromnd);
3368 goto out;
3369 }
3370 a.a_desc = &vnop_rename_desc;
3371 a.a_fdvp = fdvp;
3372 a.a_fvp = fromnd.ni_vp;
3373 a.a_fcnp = &fromnd.ni_cnd;
3374 a.a_tdvp = tdvp;
3375 a.a_tvp = tond.ni_vp;
3376 a.a_tcnp = &tond.ni_cnd;
3377 a.a_context = context;
3378
3379 if (fdvp_unsafe != NULLVP) {
3380 /*
3381 * Lock in vnode address order to avoid deadlocks
3382 */
3383 if (tond.ni_vp == NULL || tond.ni_vp == fromnd.ni_vp) {
3384 lock_first = fromnd.ni_vp;
3385 lock_second = NULL;
3386 } else if (fromnd.ni_vp < tond.ni_vp) {
3387 lock_first = fromnd.ni_vp;
3388 lock_second = tond.ni_vp;
3389 } else {
3390 lock_first = tond.ni_vp;
3391 lock_second = fromnd.ni_vp;
3392 }
3393 if ( (error = lock_fsnode(lock_first, NULL)) == 0) {
3394 if (lock_second != NULL && (error = lock_fsnode(lock_second, NULL)) )
3395 unlock_fsnode(lock_first, NULL);
3396 }
3397 }
3398 if (error == 0) {
3399 error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3400
3401 if (fdvp_unsafe != NULLVP) {
3402 if (lock_second != NULL)
3403 unlock_fsnode(lock_second, NULL);
3404 unlock_fsnode(lock_first, NULL);
3405 }
3406 if (error == 0) {
3407 vnode_setneedinactive(fromnd.ni_vp);
3408
3409 if (tond.ni_vp && tond.ni_vp != fromnd.ni_vp)
3410 vnode_setneedinactive(tond.ni_vp);
3411 }
3412 }
3413 vnode_put(fromnd.ni_vp);
3414 if (tond.ni_vp) {
3415 vnode_put(tond.ni_vp);
3416 }
3417 nameidone(&tond);
3418 nameidone(&fromnd);
3419 }
3420 out:
3421 if (xfromname && xfromname != &smallname1[0]) {
3422 FREE(xfromname, M_TEMP);
3423 }
3424 if (xtoname && xtoname != &smallname2[0]) {
3425 FREE(xtoname, M_TEMP);
3426 }
3427 out1:
3428 if (fdvp_unsafe != NULLVP) {
3429 if (tdvp_unsafe != NULLVP)
3430 unlock_fsnode(tdvp_unsafe, NULL);
3431 unlock_fsnode(fdvp_unsafe, &funnel_state);
3432 }
3433 return (_err);
3434 }
3435
3436 #if 0
3437 /*
3438 *#
3439 *#% mkdir dvp L U U
3440 *#% mkdir vpp - L -
3441 *#
3442 */
3443 struct vnop_mkdir_args {
3444 struct vnodeop_desc *a_desc;
3445 vnode_t a_dvp;
3446 vnode_t *a_vpp;
3447 struct componentname *a_cnp;
3448 struct vnode_attr *a_vap;
3449 vfs_context_t a_context;
3450 };
3451 #endif /* 0*/
3452 errno_t
3453 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3454 struct vnode_attr *vap, vfs_context_t context)
3455 {
3456 int _err;
3457 struct vnop_mkdir_args a;
3458 int thread_safe;
3459 int funnel_state = 0;
3460
3461 a.a_desc = &vnop_mkdir_desc;
3462 a.a_dvp = dvp;
3463 a.a_vpp = vpp;
3464 a.a_cnp = cnp;
3465 a.a_vap = vap;
3466 a.a_context = context;
3467 thread_safe = THREAD_SAFE_FS(dvp);
3468
3469 if (!thread_safe) {
3470 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3471 return (_err);
3472 }
3473 }
3474 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
3475 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3476 /*
3477 * Remove stale Apple Double file (if any).
3478 */
3479 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3480 }
3481 if (!thread_safe) {
3482 unlock_fsnode(dvp, &funnel_state);
3483 }
3484 return (_err);
3485 }
3486
3487
3488 #if 0
3489 /*
3490 *#
3491 *#% rmdir dvp L U U
3492 *#% rmdir vp L U U
3493 *#
3494 */
3495 struct vnop_rmdir_args {
3496 struct vnodeop_desc *a_desc;
3497 vnode_t a_dvp;
3498 vnode_t a_vp;
3499 struct componentname *a_cnp;
3500 vfs_context_t a_context;
3501 };
3502
3503 #endif /* 0*/
3504 errno_t
3505 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t context)
3506 {
3507 int _err;
3508 struct vnop_rmdir_args a;
3509 int thread_safe;
3510 int funnel_state = 0;
3511
3512 a.a_desc = &vnop_rmdir_desc;
3513 a.a_dvp = dvp;
3514 a.a_vp = vp;
3515 a.a_cnp = cnp;
3516 a.a_context = context;
3517 thread_safe = THREAD_SAFE_FS(dvp);
3518
3519 if (!thread_safe) {
3520 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3521 return (_err);
3522 }
3523 }
3524 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
3525
3526 if (_err == 0) {
3527 vnode_setneedinactive(vp);
3528
3529 if ( !(NATIVE_XATTR(dvp)) ) {
3530 /*
3531 * Remove any associated extended attibute file (._ AppleDouble file).
3532 */
3533 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3534 }
3535 }
3536 if (!thread_safe) {
3537 unlock_fsnode(vp, &funnel_state);
3538 }
3539 return (_err);
3540 }
3541
3542 /*
3543 * Remove a ._ AppleDouble file
3544 */
3545 #define AD_STALE_SECS (180)
3546 static void
3547 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int thread_safe, int force) {
3548 vnode_t xvp;
3549 struct nameidata nd;
3550 char smallname[64];
3551 char *filename = NULL;
3552 size_t len;
3553
3554 if ((basename == NULL) || (basename[0] == '\0') ||
3555 (basename[0] == '.' && basename[1] == '_')) {
3556 return;
3557 }
3558 filename = &smallname[0];
3559 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3560 if (len >= sizeof(smallname)) {
3561 len++; /* snprintf result doesn't include '\0' */
3562 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3563 len = snprintf(filename, len, "._%s", basename);
3564 }
3565 NDINIT(&nd, DELETE, LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3566 CAST_USER_ADDR_T(filename), context);
3567 nd.ni_dvp = dvp;
3568 if (namei(&nd) != 0)
3569 goto out2;
3570
3571 xvp = nd.ni_vp;
3572 nameidone(&nd);
3573 if (xvp->v_type != VREG)
3574 goto out1;
3575
3576 /*
3577 * When creating a new object and a "._" file already
3578 * exists, check to see if its a stale "._" file.
3579 *
3580 */
3581 if (!force) {
3582 struct vnode_attr va;
3583
3584 VATTR_INIT(&va);
3585 VATTR_WANTED(&va, va_data_size);
3586 VATTR_WANTED(&va, va_modify_time);
3587 if (VNOP_GETATTR(xvp, &va, context) == 0 &&
3588 VATTR_IS_SUPPORTED(&va, va_data_size) &&
3589 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
3590 va.va_data_size != 0) {
3591 struct timeval tv;
3592
3593 microtime(&tv);
3594 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
3595 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
3596 force = 1; /* must be stale */
3597 }
3598 }
3599 }
3600 if (force) {
3601 struct vnop_remove_args a;
3602 int error;
3603
3604 a.a_desc = &vnop_remove_desc;
3605 a.a_dvp = nd.ni_dvp;
3606 a.a_vp = xvp;
3607 a.a_cnp = &nd.ni_cnd;
3608 a.a_context = context;
3609
3610 if (!thread_safe) {
3611 if ( (lock_fsnode(xvp, NULL)) )
3612 goto out1;
3613 }
3614 error = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3615
3616 if (!thread_safe)
3617 unlock_fsnode(xvp, NULL);
3618
3619 if (error == 0)
3620 vnode_setneedinactive(xvp);
3621 }
3622 out1:
3623 /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */
3624 vnode_put(xvp);
3625 out2:
3626 if (filename && filename != &smallname[0]) {
3627 FREE(filename, M_TEMP);
3628 }
3629 }
3630
3631 /*
3632 * Shadow uid/gid/mod to a ._ AppleDouble file
3633 */
3634 static void
3635 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
3636 vfs_context_t context, int thread_safe) {
3637 vnode_t xvp;
3638 struct nameidata nd;
3639 char smallname[64];
3640 char *filename = NULL;
3641 size_t len;
3642
3643 if ((dvp == NULLVP) ||
3644 (basename == NULL) || (basename[0] == '\0') ||
3645 (basename[0] == '.' && basename[1] == '_')) {
3646 return;
3647 }
3648 filename = &smallname[0];
3649 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3650 if (len >= sizeof(smallname)) {
3651 len++; /* snprintf result doesn't include '\0' */
3652 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3653 len = snprintf(filename, len, "._%s", basename);
3654 }
3655 NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3656 CAST_USER_ADDR_T(filename), context);
3657 nd.ni_dvp = dvp;
3658 if (namei(&nd) != 0)
3659 goto out2;
3660
3661 xvp = nd.ni_vp;
3662 nameidone(&nd);
3663
3664 if (xvp->v_type == VREG) {
3665 struct vnop_setattr_args a;
3666
3667 a.a_desc = &vnop_setattr_desc;
3668 a.a_vp = xvp;
3669 a.a_vap = vap;
3670 a.a_context = context;
3671
3672 if (!thread_safe) {
3673 if ( (lock_fsnode(xvp, NULL)) )
3674 goto out1;
3675 }
3676 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3677 if (!thread_safe) {
3678 unlock_fsnode(xvp, NULL);
3679 }
3680 }
3681 out1:
3682 vnode_put(xvp);
3683 out2:
3684 if (filename && filename != &smallname[0]) {
3685 FREE(filename, M_TEMP);
3686 }
3687 }
3688
3689 #if 0
3690 /*
3691 *#
3692 *#% symlink dvp L U U
3693 *#% symlink vpp - U -
3694 *#
3695 */
3696 struct vnop_symlink_args {
3697 struct vnodeop_desc *a_desc;
3698 vnode_t a_dvp;
3699 vnode_t *a_vpp;
3700 struct componentname *a_cnp;
3701 struct vnode_attr *a_vap;
3702 char *a_target;
3703 vfs_context_t a_context;
3704 };
3705
3706 #endif /* 0*/
3707 errno_t
3708 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3709 struct vnode_attr *vap, char *target, vfs_context_t context)
3710 {
3711 int _err;
3712 struct vnop_symlink_args a;
3713 int thread_safe;
3714 int funnel_state = 0;
3715
3716 a.a_desc = &vnop_symlink_desc;
3717 a.a_dvp = dvp;
3718 a.a_vpp = vpp;
3719 a.a_cnp = cnp;
3720 a.a_vap = vap;
3721 a.a_target = target;
3722 a.a_context = context;
3723 thread_safe = THREAD_SAFE_FS(dvp);
3724
3725 if (!thread_safe) {
3726 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3727 return (_err);
3728 }
3729 }
3730 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
3731 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3732 /*
3733 * Remove stale Apple Double file (if any).
3734 */
3735 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3736 }
3737 if (!thread_safe) {
3738 unlock_fsnode(dvp, &funnel_state);
3739 }
3740 return (_err);
3741 }
3742
3743 #if 0
3744 /*
3745 *#
3746 *#% readdir vp L L L
3747 *#
3748 */
3749 struct vnop_readdir_args {
3750 struct vnodeop_desc *a_desc;
3751 vnode_t a_vp;
3752 struct uio *a_uio;
3753 int a_flags;
3754 int *a_eofflag;
3755 int *a_numdirent;
3756 vfs_context_t a_context;
3757 };
3758
3759 #endif /* 0*/
3760 errno_t
3761 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
3762 int *numdirent, vfs_context_t context)
3763 {
3764 int _err;
3765 struct vnop_readdir_args a;
3766 int thread_safe;
3767 int funnel_state = 0;
3768
3769 a.a_desc = &vnop_readdir_desc;
3770 a.a_vp = vp;
3771 a.a_uio = uio;
3772 a.a_flags = flags;
3773 a.a_eofflag = eofflag;
3774 a.a_numdirent = numdirent;
3775 a.a_context = context;
3776 thread_safe = THREAD_SAFE_FS(vp);
3777
3778 if (!thread_safe) {
3779 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3780 return (_err);
3781 }
3782 }
3783 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
3784 if (!thread_safe) {
3785 unlock_fsnode(vp, &funnel_state);
3786 }
3787 return (_err);
3788 }
3789
3790 #if 0
3791 /*
3792 *#
3793 *#% readdirattr vp L L L
3794 *#
3795 */
3796 struct vnop_readdirattr_args {
3797 struct vnodeop_desc *a_desc;
3798 vnode_t a_vp;
3799 struct attrlist *a_alist;
3800 struct uio *a_uio;
3801 u_long a_maxcount;
3802 u_long a_options;
3803 u_long *a_newstate;
3804 int *a_eofflag;
3805 u_long *a_actualcount;
3806 vfs_context_t a_context;
3807 };
3808
3809 #endif /* 0*/
3810 errno_t
3811 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount,
3812 u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t context)
3813 {
3814 int _err;
3815 struct vnop_readdirattr_args a;
3816 int thread_safe;
3817 int funnel_state = 0;
3818
3819 a.a_desc = &vnop_readdirattr_desc;
3820 a.a_vp = vp;
3821 a.a_alist = alist;
3822 a.a_uio = uio;
3823 a.a_maxcount = maxcount;
3824 a.a_options = options;
3825 a.a_newstate = newstate;
3826 a.a_eofflag = eofflag;
3827 a.a_actualcount = actualcount;
3828 a.a_context = context;
3829 thread_safe = THREAD_SAFE_FS(vp);
3830
3831 if (!thread_safe) {
3832 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3833 return (_err);
3834 }
3835 }
3836 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
3837 if (!thread_safe) {
3838 unlock_fsnode(vp, &funnel_state);
3839 }
3840 return (_err);
3841 }
3842
3843 #if 0
3844 /*
3845 *#
3846 *#% readlink vp L L L
3847 *#
3848 */
3849 struct vnop_readlink_args {
3850 struct vnodeop_desc *a_desc;
3851 vnode_t a_vp;
3852 struct uio *a_uio;
3853 vfs_context_t a_context;
3854 };
3855 #endif /* 0 */
3856
3857 errno_t
3858 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t context)
3859 {
3860 int _err;
3861 struct vnop_readlink_args a;
3862 int thread_safe;
3863 int funnel_state = 0;
3864
3865 a.a_desc = &vnop_readlink_desc;
3866 a.a_vp = vp;
3867 a.a_uio = uio;
3868 a.a_context = context;
3869 thread_safe = THREAD_SAFE_FS(vp);
3870
3871 if (!thread_safe) {
3872 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3873 return (_err);
3874 }
3875 }
3876 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
3877 if (!thread_safe) {
3878 unlock_fsnode(vp, &funnel_state);
3879 }
3880 return (_err);
3881 }
3882
3883 #if 0
3884 /*
3885 *#
3886 *#% inactive vp L U U
3887 *#
3888 */
3889 struct vnop_inactive_args {
3890 struct vnodeop_desc *a_desc;
3891 vnode_t a_vp;
3892 vfs_context_t a_context;
3893 };
3894 #endif /* 0*/
3895 errno_t
3896 VNOP_INACTIVE(struct vnode *vp, vfs_context_t context)
3897 {
3898 int _err;
3899 struct vnop_inactive_args a;
3900 int thread_safe;
3901 int funnel_state = 0;
3902
3903 a.a_desc = &vnop_inactive_desc;
3904 a.a_vp = vp;
3905 a.a_context = context;
3906 thread_safe = THREAD_SAFE_FS(vp);
3907
3908 if (!thread_safe) {
3909 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3910 return (_err);
3911 }
3912 }
3913 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
3914 if (!thread_safe) {
3915 unlock_fsnode(vp, &funnel_state);
3916 }
3917 return (_err);
3918 }
3919
3920
3921 #if 0
3922 /*
3923 *#
3924 *#% reclaim vp U U U
3925 *#
3926 */
3927 struct vnop_reclaim_args {
3928 struct vnodeop_desc *a_desc;
3929 vnode_t a_vp;
3930 vfs_context_t a_context;
3931 };
3932 #endif /* 0*/
3933 errno_t
3934 VNOP_RECLAIM(struct vnode *vp, vfs_context_t context)
3935 {
3936 int _err;
3937 struct vnop_reclaim_args a;
3938 int thread_safe;
3939 int funnel_state = 0;
3940
3941 a.a_desc = &vnop_reclaim_desc;
3942 a.a_vp = vp;
3943 a.a_context = context;
3944 thread_safe = THREAD_SAFE_FS(vp);
3945
3946 if (!thread_safe) {
3947 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3948 }
3949 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
3950 if (!thread_safe) {
3951 (void) thread_funnel_set(kernel_flock, funnel_state);
3952 }
3953 return (_err);
3954 }
3955
3956
3957 #if 0
3958 /*
3959 *#
3960 *#% pathconf vp L L L
3961 *#
3962 */
3963 struct vnop_pathconf_args {
3964 struct vnodeop_desc *a_desc;
3965 vnode_t a_vp;
3966 int a_name;
3967 register_t *a_retval;
3968 vfs_context_t a_context;
3969 };
3970 #endif /* 0*/
3971 errno_t
3972 VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t context)
3973 {
3974 int _err;
3975 struct vnop_pathconf_args a;
3976 int thread_safe;
3977 int funnel_state = 0;
3978
3979 a.a_desc = &vnop_pathconf_desc;
3980 a.a_vp = vp;
3981 a.a_name = name;
3982 a.a_retval = retval;
3983 a.a_context = context;
3984 thread_safe = THREAD_SAFE_FS(vp);
3985
3986 if (!thread_safe) {
3987 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3988 return (_err);
3989 }
3990 }
3991 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
3992 if (!thread_safe) {
3993 unlock_fsnode(vp, &funnel_state);
3994 }
3995 return (_err);
3996 }
3997
3998 #if 0
3999 /*
4000 *#
4001 *#% advlock vp U U U
4002 *#
4003 */
4004 struct vnop_advlock_args {
4005 struct vnodeop_desc *a_desc;
4006 vnode_t a_vp;
4007 caddr_t a_id;
4008 int a_op;
4009 struct flock *a_fl;
4010 int a_flags;
4011 vfs_context_t a_context;
4012 };
4013 #endif /* 0*/
4014 errno_t
4015 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t context)
4016 {
4017 int _err;
4018 struct vnop_advlock_args a;
4019 int thread_safe;
4020 int funnel_state = 0;
4021 struct uthread * uth;
4022
4023 a.a_desc = &vnop_advlock_desc;
4024 a.a_vp = vp;
4025 a.a_id = id;
4026 a.a_op = op;
4027 a.a_fl = fl;
4028 a.a_flags = flags;
4029 a.a_context = context;
4030 thread_safe = THREAD_SAFE_FS(vp);
4031
4032 uth = get_bsdthread_info(current_thread());
4033 if (!thread_safe) {
4034 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4035 }
4036 /* Disallow advisory locking on non-seekable vnodes */
4037 if (vnode_isfifo(vp)) {
4038 _err = err_advlock(&a);
4039 } else {
4040 if ((vp->v_flag & VLOCKLOCAL)) {
4041 /* Advisory locking done at this layer */
4042 _err = lf_advlock(&a);
4043 } else {
4044 /* Advisory locking done by underlying filesystem */
4045 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4046 }
4047 }
4048 if (!thread_safe) {
4049 (void) thread_funnel_set(kernel_flock, funnel_state);
4050 }
4051 return (_err);
4052 }
4053
4054
4055
4056 #if 0
4057 /*
4058 *#
4059 *#% allocate vp L L L
4060 *#
4061 */
4062 struct vnop_allocate_args {
4063 struct vnodeop_desc *a_desc;
4064 vnode_t a_vp;
4065 off_t a_length;
4066 u_int32_t a_flags;
4067 off_t *a_bytesallocated;
4068 off_t a_offset;
4069 vfs_context_t a_context;
4070 };
4071
4072 #endif /* 0*/
4073 errno_t
4074 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t context)
4075 {
4076 int _err;
4077 struct vnop_allocate_args a;
4078 int thread_safe;
4079 int funnel_state = 0;
4080
4081 a.a_desc = &vnop_allocate_desc;
4082 a.a_vp = vp;
4083 a.a_length = length;
4084 a.a_flags = flags;
4085 a.a_bytesallocated = bytesallocated;
4086 a.a_offset = offset;
4087 a.a_context = context;
4088 thread_safe = THREAD_SAFE_FS(vp);
4089
4090 if (!thread_safe) {
4091 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4092 return (_err);
4093 }
4094 }
4095 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4096 if (!thread_safe) {
4097 unlock_fsnode(vp, &funnel_state);
4098 }
4099 return (_err);
4100 }
4101
4102 #if 0
4103 /*
4104 *#
4105 *#% pagein vp = = =
4106 *#
4107 */
4108 struct vnop_pagein_args {
4109 struct vnodeop_desc *a_desc;
4110 vnode_t a_vp;
4111 upl_t a_pl;
4112 vm_offset_t a_pl_offset;
4113 off_t a_f_offset;
4114 size_t a_size;
4115 int a_flags;
4116 vfs_context_t a_context;
4117 };
4118 #endif /* 0*/
4119 errno_t
4120 VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4121 {
4122 int _err;
4123 struct vnop_pagein_args a;
4124 int thread_safe;
4125 int funnel_state = 0;
4126
4127 a.a_desc = &vnop_pagein_desc;
4128 a.a_vp = vp;
4129 a.a_pl = pl;
4130 a.a_pl_offset = pl_offset;
4131 a.a_f_offset = f_offset;
4132 a.a_size = size;
4133 a.a_flags = flags;
4134 a.a_context = context;
4135 thread_safe = THREAD_SAFE_FS(vp);
4136
4137 if (!thread_safe) {
4138 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4139 }
4140 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
4141 if (!thread_safe) {
4142 (void) thread_funnel_set(kernel_flock, funnel_state);
4143 }
4144 return (_err);
4145 }
4146
4147 #if 0
4148 /*
4149 *#
4150 *#% pageout vp = = =
4151 *#
4152 */
4153 struct vnop_pageout_args {
4154 struct vnodeop_desc *a_desc;
4155 vnode_t a_vp;
4156 upl_t a_pl;
4157 vm_offset_t a_pl_offset;
4158 off_t a_f_offset;
4159 size_t a_size;
4160 int a_flags;
4161 vfs_context_t a_context;
4162 };
4163
4164 #endif /* 0*/
4165 errno_t
4166 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4167 {
4168 int _err;
4169 struct vnop_pageout_args a;
4170 int thread_safe;
4171 int funnel_state = 0;
4172
4173 a.a_desc = &vnop_pageout_desc;
4174 a.a_vp = vp;
4175 a.a_pl = pl;
4176 a.a_pl_offset = pl_offset;
4177 a.a_f_offset = f_offset;
4178 a.a_size = size;
4179 a.a_flags = flags;
4180 a.a_context = context;
4181 thread_safe = THREAD_SAFE_FS(vp);
4182
4183 if (!thread_safe) {
4184 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4185 }
4186 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
4187 if (!thread_safe) {
4188 (void) thread_funnel_set(kernel_flock, funnel_state);
4189 }
4190 return (_err);
4191 }
4192
4193
4194 #if 0
4195 /*
4196 *#
4197 *#% searchfs vp L L L
4198 *#
4199 */
4200 struct vnop_searchfs_args {
4201 struct vnodeop_desc *a_desc;
4202 vnode_t a_vp;
4203 void *a_searchparams1;
4204 void *a_searchparams2;
4205 struct attrlist *a_searchattrs;
4206 u_long a_maxmatches;
4207 struct timeval *a_timelimit;
4208 struct attrlist *a_returnattrs;
4209 u_long *a_nummatches;
4210 u_long a_scriptcode;
4211 u_long a_options;
4212 struct uio *a_uio;
4213 struct searchstate *a_searchstate;
4214 vfs_context_t a_context;
4215 };
4216
4217 #endif /* 0*/
4218 errno_t
4219 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t context)
4220 {
4221 int _err;
4222 struct vnop_searchfs_args a;
4223 int thread_safe;
4224 int funnel_state = 0;
4225
4226 a.a_desc = &vnop_searchfs_desc;
4227 a.a_vp = vp;
4228 a.a_searchparams1 = searchparams1;
4229 a.a_searchparams2 = searchparams2;
4230 a.a_searchattrs = searchattrs;
4231 a.a_maxmatches = maxmatches;
4232 a.a_timelimit = timelimit;
4233 a.a_returnattrs = returnattrs;
4234 a.a_nummatches = nummatches;
4235 a.a_scriptcode = scriptcode;
4236 a.a_options = options;
4237 a.a_uio = uio;
4238 a.a_searchstate = searchstate;
4239 a.a_context = context;
4240 thread_safe = THREAD_SAFE_FS(vp);
4241
4242 if (!thread_safe) {
4243 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4244 return (_err);
4245 }
4246 }
4247 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
4248 if (!thread_safe) {
4249 unlock_fsnode(vp, &funnel_state);
4250 }
4251 return (_err);
4252 }
4253
4254 #if 0
4255 /*
4256 *#
4257 *#% copyfile fvp U U U
4258 *#% copyfile tdvp L U U
4259 *#% copyfile tvp X U U
4260 *#
4261 */
4262 struct vnop_copyfile_args {
4263 struct vnodeop_desc *a_desc;
4264 vnode_t a_fvp;
4265 vnode_t a_tdvp;
4266 vnode_t a_tvp;
4267 struct componentname *a_tcnp;
4268 int a_mode;
4269 int a_flags;
4270 vfs_context_t a_context;
4271 };
4272 #endif /* 0*/
4273 errno_t
4274 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4275 int mode, int flags, vfs_context_t context)
4276 {
4277 int _err;
4278 struct vnop_copyfile_args a;
4279 a.a_desc = &vnop_copyfile_desc;
4280 a.a_fvp = fvp;
4281 a.a_tdvp = tdvp;
4282 a.a_tvp = tvp;
4283 a.a_tcnp = tcnp;
4284 a.a_mode = mode;
4285 a.a_flags = flags;
4286 a.a_context = context;
4287 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
4288 return (_err);
4289 }
4290
4291
4292 errno_t
4293 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t context)
4294 {
4295 struct vnop_getxattr_args a;
4296 int error;
4297 int thread_safe;
4298 int funnel_state = 0;
4299
4300 a.a_desc = &vnop_getxattr_desc;
4301 a.a_vp = vp;
4302 a.a_name = name;
4303 a.a_uio = uio;
4304 a.a_size = size;
4305 a.a_options = options;
4306 a.a_context = context;
4307
4308 thread_safe = THREAD_SAFE_FS(vp);
4309 if (!thread_safe) {
4310 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4311 return (error);
4312 }
4313 }
4314 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
4315 if (!thread_safe) {
4316 unlock_fsnode(vp, &funnel_state);
4317 }
4318 return (error);
4319 }
4320
4321 errno_t
4322 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t context)
4323 {
4324 struct vnop_setxattr_args a;
4325 int error;
4326 int thread_safe;
4327 int funnel_state = 0;
4328
4329 a.a_desc = &vnop_setxattr_desc;
4330 a.a_vp = vp;
4331 a.a_name = name;
4332 a.a_uio = uio;
4333 a.a_options = options;
4334 a.a_context = context;
4335
4336 thread_safe = THREAD_SAFE_FS(vp);
4337 if (!thread_safe) {
4338 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4339 return (error);
4340 }
4341 }
4342 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
4343 if (!thread_safe) {
4344 unlock_fsnode(vp, &funnel_state);
4345 }
4346 return (error);
4347 }
4348
4349 errno_t
4350 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t context)
4351 {
4352 struct vnop_removexattr_args a;
4353 int error;
4354 int thread_safe;
4355 int funnel_state = 0;
4356
4357 a.a_desc = &vnop_removexattr_desc;
4358 a.a_vp = vp;
4359 a.a_name = name;
4360 a.a_options = options;
4361 a.a_context = context;
4362
4363 thread_safe = THREAD_SAFE_FS(vp);
4364 if (!thread_safe) {
4365 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4366 return (error);
4367 }
4368 }
4369 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
4370 if (!thread_safe) {
4371 unlock_fsnode(vp, &funnel_state);
4372 }
4373 return (error);
4374 }
4375
4376 errno_t
4377 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t context)
4378 {
4379 struct vnop_listxattr_args a;
4380 int error;
4381 int thread_safe;
4382 int funnel_state = 0;
4383
4384 a.a_desc = &vnop_listxattr_desc;
4385 a.a_vp = vp;
4386 a.a_uio = uio;
4387 a.a_size = size;
4388 a.a_options = options;
4389 a.a_context = context;
4390
4391 thread_safe = THREAD_SAFE_FS(vp);
4392 if (!thread_safe) {
4393 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4394 return (error);
4395 }
4396 }
4397 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
4398 if (!thread_safe) {
4399 unlock_fsnode(vp, &funnel_state);
4400 }
4401 return (error);
4402 }
4403
4404
4405 #if 0
4406 /*
4407 *#
4408 *#% blktooff vp = = =
4409 *#
4410 */
4411 struct vnop_blktooff_args {
4412 struct vnodeop_desc *a_desc;
4413 vnode_t a_vp;
4414 daddr64_t a_lblkno;
4415 off_t *a_offset;
4416 };
4417 #endif /* 0*/
4418 errno_t
4419 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
4420 {
4421 int _err;
4422 struct vnop_blktooff_args a;
4423 int thread_safe;
4424 int funnel_state = 0;
4425
4426 a.a_desc = &vnop_blktooff_desc;
4427 a.a_vp = vp;
4428 a.a_lblkno = lblkno;
4429 a.a_offset = offset;
4430 thread_safe = THREAD_SAFE_FS(vp);
4431
4432 if (!thread_safe) {
4433 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4434 }
4435 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
4436 if (!thread_safe) {
4437 (void) thread_funnel_set(kernel_flock, funnel_state);
4438 }
4439 return (_err);
4440 }
4441
4442 #if 0
4443 /*
4444 *#
4445 *#% offtoblk vp = = =
4446 *#
4447 */
4448 struct vnop_offtoblk_args {
4449 struct vnodeop_desc *a_desc;
4450 vnode_t a_vp;
4451 off_t a_offset;
4452 daddr64_t *a_lblkno;
4453 };
4454 #endif /* 0*/
4455 errno_t
4456 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
4457 {
4458 int _err;
4459 struct vnop_offtoblk_args a;
4460 int thread_safe;
4461 int funnel_state = 0;
4462
4463 a.a_desc = &vnop_offtoblk_desc;
4464 a.a_vp = vp;
4465 a.a_offset = offset;
4466 a.a_lblkno = lblkno;
4467 thread_safe = THREAD_SAFE_FS(vp);
4468
4469 if (!thread_safe) {
4470 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4471 }
4472 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
4473 if (!thread_safe) {
4474 (void) thread_funnel_set(kernel_flock, funnel_state);
4475 }
4476 return (_err);
4477 }
4478
4479 #if 0
4480 /*
4481 *#
4482 *#% blockmap vp L L L
4483 *#
4484 */
4485 struct vnop_blockmap_args {
4486 struct vnodeop_desc *a_desc;
4487 vnode_t a_vp;
4488 off_t a_foffset;
4489 size_t a_size;
4490 daddr64_t *a_bpn;
4491 size_t *a_run;
4492 void *a_poff;
4493 int a_flags;
4494 vfs_context_t a_context;
4495 };
4496 #endif /* 0*/
4497 errno_t
4498 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t context)
4499 {
4500 int _err;
4501 struct vnop_blockmap_args a;
4502 int thread_safe;
4503 int funnel_state = 0;
4504 struct vfs_context acontext;
4505
4506 if (context == NULL) {
4507 acontext.vc_proc = current_proc();
4508 acontext.vc_ucred = kauth_cred_get();
4509 context = &acontext;
4510 }
4511 a.a_desc = &vnop_blockmap_desc;
4512 a.a_vp = vp;
4513 a.a_foffset = foffset;
4514 a.a_size = size;
4515 a.a_bpn = bpn;
4516 a.a_run = run;
4517 a.a_poff = poff;
4518 a.a_flags = flags;
4519 a.a_context = context;
4520 thread_safe = THREAD_SAFE_FS(vp);
4521
4522 if (!thread_safe) {
4523 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4524 }
4525 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
4526 if (!thread_safe) {
4527 (void) thread_funnel_set(kernel_flock, funnel_state);
4528 }
4529 return (_err);
4530 }
4531
4532 #if 0
4533 struct vnop_strategy_args {
4534 struct vnodeop_desc *a_desc;
4535 struct buf *a_bp;
4536 };
4537
4538 #endif /* 0*/
4539 errno_t
4540 VNOP_STRATEGY(struct buf *bp)
4541 {
4542 int _err;
4543 struct vnop_strategy_args a;
4544 a.a_desc = &vnop_strategy_desc;
4545 a.a_bp = bp;
4546 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
4547 return (_err);
4548 }
4549
4550 #if 0
4551 struct vnop_bwrite_args {
4552 struct vnodeop_desc *a_desc;
4553 buf_t a_bp;
4554 };
4555 #endif /* 0*/
4556 errno_t
4557 VNOP_BWRITE(struct buf *bp)
4558 {
4559 int _err;
4560 struct vnop_bwrite_args a;
4561 a.a_desc = &vnop_bwrite_desc;
4562 a.a_bp = bp;
4563 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
4564 return (_err);
4565 }
4566
4567 #if 0
4568 struct vnop_kqfilt_add_args {
4569 struct vnodeop_desc *a_desc;
4570 struct vnode *a_vp;
4571 struct knote *a_kn;
4572 vfs_context_t a_context;
4573 };
4574 #endif
4575 errno_t
4576 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t context)
4577 {
4578 int _err;
4579 struct vnop_kqfilt_add_args a;
4580 int thread_safe;
4581 int funnel_state = 0;
4582
4583 a.a_desc = VDESC(vnop_kqfilt_add);
4584 a.a_vp = vp;
4585 a.a_kn = kn;
4586 a.a_context = context;
4587 thread_safe = THREAD_SAFE_FS(vp);
4588
4589 if (!thread_safe) {
4590 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4591 return (_err);
4592 }
4593 }
4594 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
4595 if (!thread_safe) {
4596 unlock_fsnode(vp, &funnel_state);
4597 }
4598 return(_err);
4599 }
4600
4601 #if 0
4602 struct vnop_kqfilt_remove_args {
4603 struct vnodeop_desc *a_desc;
4604 struct vnode *a_vp;
4605 uintptr_t a_ident;
4606 vfs_context_t a_context;
4607 };
4608 #endif
4609 errno_t
4610 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t context)
4611 {
4612 int _err;
4613 struct vnop_kqfilt_remove_args a;
4614 int thread_safe;
4615 int funnel_state = 0;
4616
4617 a.a_desc = VDESC(vnop_kqfilt_remove);
4618 a.a_vp = vp;
4619 a.a_ident = ident;
4620 a.a_context = context;
4621 thread_safe = THREAD_SAFE_FS(vp);
4622
4623 if (!thread_safe) {
4624 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4625 return (_err);
4626 }
4627 }
4628 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
4629 if (!thread_safe) {
4630 unlock_fsnode(vp, &funnel_state);
4631 }
4632 return(_err);
4633 }
4634