]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
ba4b4c87703c6ee0e2b495f10be0c9a340d0b960
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68
69 /*
70 * External virtual filesystem routines
71 */
72
73 #undef DIAGNOSTIC
74 #define DIAGNOSTIC 1
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/proc_internal.h>
79 #include <sys/kauth.h>
80 #include <sys/mount.h>
81 #include <sys/mount_internal.h>
82 #include <sys/time.h>
83 #include <sys/vnode_internal.h>
84 #include <sys/stat.h>
85 #include <sys/namei.h>
86 #include <sys/ucred.h>
87 #include <sys/buf.h>
88 #include <sys/errno.h>
89 #include <sys/malloc.h>
90 #include <sys/domain.h>
91 #include <sys/mbuf.h>
92 #include <sys/syslog.h>
93 #include <sys/ubc.h>
94 #include <sys/vm.h>
95 #include <sys/sysctl.h>
96 #include <sys/filedesc.h>
97 #include <sys/fsevents.h>
98 #include <sys/user.h>
99 #include <sys/lockf.h>
100 #include <sys/xattr.h>
101
102 #include <kern/assert.h>
103 #include <kern/kalloc.h>
104
105 #include <libkern/OSByteOrder.h>
106
107 #include <miscfs/specfs/specdev.h>
108
109 #include <mach/mach_types.h>
110 #include <mach/memory_object_types.h>
111
112 #define ESUCCESS 0
113 #undef mount_t
114 #undef vnode_t
115
116 #define COMPAT_ONLY
117
118
119 #define THREAD_SAFE_FS(VP) \
120 ((VP)->v_unsafefs ? 0 : 1)
121
122 #define NATIVE_XATTR(VP) \
123 ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0)
124
125 static void xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context,
126 int thread_safe, int force);
127 static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
128 vfs_context_t context, int thread_safe);
129
130
131 static void
132 vnode_setneedinactive(vnode_t vp)
133 {
134 cache_purge(vp);
135
136 vnode_lock(vp);
137 vp->v_lflag |= VL_NEEDINACTIVE;
138 vnode_unlock(vp);
139 }
140
141
142 int
143 lock_fsnode(vnode_t vp, int *funnel_state)
144 {
145 if (funnel_state)
146 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
147
148 if (vp->v_unsafefs) {
149 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
150 vp->v_unsafefs->fsnode_count++;
151 } else {
152 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
153
154 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
155 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
156
157 if (funnel_state)
158 (void) thread_funnel_set(kernel_flock, *funnel_state);
159 return (ENOENT);
160 }
161 vp->v_unsafefs->fsnodeowner = current_thread();
162 vp->v_unsafefs->fsnode_count = 1;
163 }
164 }
165 return (0);
166 }
167
168
169 void
170 unlock_fsnode(vnode_t vp, int *funnel_state)
171 {
172 if (vp->v_unsafefs) {
173 if (--vp->v_unsafefs->fsnode_count == 0) {
174 vp->v_unsafefs->fsnodeowner = NULL;
175 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
176 }
177 }
178 if (funnel_state)
179 (void) thread_funnel_set(kernel_flock, *funnel_state);
180 }
181
182
183
184 /* ====================================================================== */
185 /* ************ EXTERNAL KERNEL APIS ********************************** */
186 /* ====================================================================== */
187
188 /*
189 * prototypes for exported VFS operations
190 */
191 int
192 VFS_MOUNT(struct mount * mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
193 {
194 int error;
195 int thread_safe;
196 int funnel_state = 0;
197
198 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
199 return(ENOTSUP);
200
201 thread_safe = mp->mnt_vtable->vfc_threadsafe;
202
203
204 if (!thread_safe) {
205 funnel_state = thread_funnel_set(kernel_flock, TRUE);
206 }
207
208 if (vfs_context_is64bit(context)) {
209 if (vfs_64bitready(mp)) {
210 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
211 }
212 else {
213 error = ENOTSUP;
214 }
215 }
216 else {
217 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
218 }
219
220 if (!thread_safe) {
221 (void) thread_funnel_set(kernel_flock, funnel_state);
222 }
223 return (error);
224 }
225
226 int
227 VFS_START(struct mount * mp, int flags, vfs_context_t context)
228 {
229 int error;
230 int thread_safe;
231 int funnel_state = 0;
232
233 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
234 return(ENOTSUP);
235
236 thread_safe = mp->mnt_vtable->vfc_threadsafe;
237
238 if (!thread_safe) {
239 funnel_state = thread_funnel_set(kernel_flock, TRUE);
240 }
241 error = (*mp->mnt_op->vfs_start)(mp, flags, context);
242 if (!thread_safe) {
243 (void) thread_funnel_set(kernel_flock, funnel_state);
244 }
245 return (error);
246 }
247
248 int
249 VFS_UNMOUNT(struct mount *mp, int flags, vfs_context_t context)
250 {
251 int error;
252 int thread_safe;
253 int funnel_state = 0;
254
255 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
256 return(ENOTSUP);
257
258 thread_safe = mp->mnt_vtable->vfc_threadsafe;
259
260 if (!thread_safe) {
261 funnel_state = thread_funnel_set(kernel_flock, TRUE);
262 }
263 error = (*mp->mnt_op->vfs_unmount)(mp, flags, context);
264 if (!thread_safe) {
265 (void) thread_funnel_set(kernel_flock, funnel_state);
266 }
267 return (error);
268 }
269
270 int
271 VFS_ROOT(struct mount * mp, struct vnode ** vpp, vfs_context_t context)
272 {
273 int error;
274 int thread_safe;
275 int funnel_state = 0;
276 struct vfs_context acontext;
277
278 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
279 return(ENOTSUP);
280
281 if (context == NULL) {
282 acontext.vc_proc = current_proc();
283 acontext.vc_ucred = kauth_cred_get();
284 context = &acontext;
285 }
286 thread_safe = mp->mnt_vtable->vfc_threadsafe;
287
288 if (!thread_safe) {
289 funnel_state = thread_funnel_set(kernel_flock, TRUE);
290 }
291 error = (*mp->mnt_op->vfs_root)(mp, vpp, context);
292 if (!thread_safe) {
293 (void) thread_funnel_set(kernel_flock, funnel_state);
294 }
295 return (error);
296 }
297
298 int
299 VFS_QUOTACTL(struct mount *mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t context)
300 {
301 int error;
302 int thread_safe;
303 int funnel_state = 0;
304
305 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
306 return(ENOTSUP);
307
308 thread_safe = mp->mnt_vtable->vfc_threadsafe;
309
310 if (!thread_safe) {
311 funnel_state = thread_funnel_set(kernel_flock, TRUE);
312 }
313 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, context);
314 if (!thread_safe) {
315 (void) thread_funnel_set(kernel_flock, funnel_state);
316 }
317 return (error);
318 }
319
320 int
321 VFS_GETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
322 {
323 int error;
324 int thread_safe;
325 int funnel_state = 0;
326 struct vfs_context acontext;
327
328 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
329 return(ENOTSUP);
330
331 if (context == NULL) {
332 acontext.vc_proc = current_proc();
333 acontext.vc_ucred = kauth_cred_get();
334 context = &acontext;
335 }
336 thread_safe = mp->mnt_vtable->vfc_threadsafe;
337
338 if (!thread_safe) {
339 funnel_state = thread_funnel_set(kernel_flock, TRUE);
340 }
341 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, context);
342 if (!thread_safe) {
343 (void) thread_funnel_set(kernel_flock, funnel_state);
344 }
345 return(error);
346 }
347
348 int
349 VFS_SETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
350 {
351 int error;
352 int thread_safe;
353 int funnel_state = 0;
354 struct vfs_context acontext;
355
356 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
357 return(ENOTSUP);
358
359 if (context == NULL) {
360 acontext.vc_proc = current_proc();
361 acontext.vc_ucred = kauth_cred_get();
362 context = &acontext;
363 }
364 thread_safe = mp->mnt_vtable->vfc_threadsafe;
365
366 if (!thread_safe) {
367 funnel_state = thread_funnel_set(kernel_flock, TRUE);
368 }
369 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, context);
370 if (!thread_safe) {
371 (void) thread_funnel_set(kernel_flock, funnel_state);
372 }
373 return(error);
374 }
375
376 int
377 VFS_SYNC(struct mount *mp, int flags, vfs_context_t context)
378 {
379 int error;
380 int thread_safe;
381 int funnel_state = 0;
382 struct vfs_context acontext;
383
384 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
385 return(ENOTSUP);
386
387 if (context == NULL) {
388 acontext.vc_proc = current_proc();
389 acontext.vc_ucred = kauth_cred_get();
390 context = &acontext;
391 }
392 thread_safe = mp->mnt_vtable->vfc_threadsafe;
393
394 if (!thread_safe) {
395 funnel_state = thread_funnel_set(kernel_flock, TRUE);
396 }
397 error = (*mp->mnt_op->vfs_sync)(mp, flags, context);
398 if (!thread_safe) {
399 (void) thread_funnel_set(kernel_flock, funnel_state);
400 }
401 return(error);
402 }
403
404 int
405 VFS_VGET(struct mount * mp, ino64_t ino, struct vnode **vpp, vfs_context_t context)
406 {
407 int error;
408 int thread_safe;
409 int funnel_state = 0;
410 struct vfs_context acontext;
411
412 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
413 return(ENOTSUP);
414
415 if (context == NULL) {
416 acontext.vc_proc = current_proc();
417 acontext.vc_ucred = kauth_cred_get();
418 context = &acontext;
419 }
420 thread_safe = mp->mnt_vtable->vfc_threadsafe;
421
422 if (!thread_safe) {
423 funnel_state = thread_funnel_set(kernel_flock, TRUE);
424 }
425 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, context);
426 if (!thread_safe) {
427 (void) thread_funnel_set(kernel_flock, funnel_state);
428 }
429 return(error);
430 }
431
432 int
433 VFS_FHTOVP(struct mount * mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t context)
434 {
435 int error;
436 int thread_safe;
437 int funnel_state = 0;
438 struct vfs_context acontext;
439
440 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
441 return(ENOTSUP);
442
443 if (context == NULL) {
444 acontext.vc_proc = current_proc();
445 acontext.vc_ucred = kauth_cred_get();
446 context = &acontext;
447 }
448 thread_safe = mp->mnt_vtable->vfc_threadsafe;
449
450 if (!thread_safe) {
451 funnel_state = thread_funnel_set(kernel_flock, TRUE);
452 }
453 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, context);
454 if (!thread_safe) {
455 (void) thread_funnel_set(kernel_flock, funnel_state);
456 }
457 return(error);
458 }
459
460 int
461 VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t context)
462 {
463 int error;
464 int thread_safe;
465 int funnel_state = 0;
466 struct vfs_context acontext;
467
468 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
469 return(ENOTSUP);
470
471 if (context == NULL) {
472 acontext.vc_proc = current_proc();
473 acontext.vc_ucred = kauth_cred_get();
474 context = &acontext;
475 }
476 thread_safe = THREAD_SAFE_FS(vp);
477
478 if (!thread_safe) {
479 funnel_state = thread_funnel_set(kernel_flock, TRUE);
480 }
481 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, context);
482 if (!thread_safe) {
483 (void) thread_funnel_set(kernel_flock, funnel_state);
484 }
485 return(error);
486 }
487
488
489 /* returns a copy of vfs type name for the mount_t */
490 void
491 vfs_name(mount_t mp, char * buffer)
492 {
493 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
494 }
495
496 /* returns vfs type number for the mount_t */
497 int
498 vfs_typenum(mount_t mp)
499 {
500 return(mp->mnt_vtable->vfc_typenum);
501 }
502
503
504 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
505 uint64_t
506 vfs_flags(mount_t mp)
507 {
508 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
509 }
510
511 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
512 void
513 vfs_setflags(mount_t mp, uint64_t flags)
514 {
515 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
516
517 mp->mnt_flag |= lflags;
518 }
519
520 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
521 void
522 vfs_clearflags(mount_t mp , uint64_t flags)
523 {
524 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
525
526 mp->mnt_flag &= ~lflags;
527 }
528
529 /* Is the mount_t ronly and upgrade read/write requested? */
530 int
531 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
532 {
533 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
534 }
535
536
537 /* Is the mount_t mounted ronly */
538 int
539 vfs_isrdonly(mount_t mp)
540 {
541 return (mp->mnt_flag & MNT_RDONLY);
542 }
543
544 /* Is the mount_t mounted for filesystem synchronous writes? */
545 int
546 vfs_issynchronous(mount_t mp)
547 {
548 return (mp->mnt_flag & MNT_SYNCHRONOUS);
549 }
550
551 /* Is the mount_t mounted read/write? */
552 int
553 vfs_isrdwr(mount_t mp)
554 {
555 return ((mp->mnt_flag & MNT_RDONLY) == 0);
556 }
557
558
559 /* Is mount_t marked for update (ie MNT_UPDATE) */
560 int
561 vfs_isupdate(mount_t mp)
562 {
563 return (mp->mnt_flag & MNT_UPDATE);
564 }
565
566
567 /* Is mount_t marked for reload (ie MNT_RELOAD) */
568 int
569 vfs_isreload(mount_t mp)
570 {
571 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
572 }
573
574 /* Is mount_t marked for reload (ie MNT_FORCE) */
575 int
576 vfs_isforce(mount_t mp)
577 {
578 if ((mp->mnt_flag & MNT_FORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
579 return(1);
580 else
581 return(0);
582 }
583
584 int
585 vfs_64bitready(mount_t mp)
586 {
587 if ((mp->mnt_vtable->vfc_64bitready))
588 return(1);
589 else
590 return(0);
591 }
592
593 int
594 vfs_authopaque(mount_t mp)
595 {
596 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
597 return(1);
598 else
599 return(0);
600 }
601
602 int
603 vfs_authopaqueaccess(mount_t mp)
604 {
605 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
606 return(1);
607 else
608 return(0);
609 }
610
611 void
612 vfs_setauthopaque(mount_t mp)
613 {
614 mount_lock(mp);
615 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
616 mount_unlock(mp);
617 }
618
619 void
620 vfs_setauthopaqueaccess(mount_t mp)
621 {
622 mount_lock(mp);
623 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
624 mount_unlock(mp);
625 }
626
627 void
628 vfs_clearauthopaque(mount_t mp)
629 {
630 mount_lock(mp);
631 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
632 mount_unlock(mp);
633 }
634
635 void
636 vfs_clearauthopaqueaccess(mount_t mp)
637 {
638 mount_lock(mp);
639 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
640 mount_unlock(mp);
641 }
642
643 void
644 vfs_setextendedsecurity(mount_t mp)
645 {
646 mount_lock(mp);
647 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
648 mount_unlock(mp);
649 }
650
651 void
652 vfs_clearextendedsecurity(mount_t mp)
653 {
654 mount_lock(mp);
655 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
656 mount_unlock(mp);
657 }
658
659 int
660 vfs_extendedsecurity(mount_t mp)
661 {
662 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
663 }
664
665 /* returns the max size of short symlink in this mount_t */
666 uint32_t
667 vfs_maxsymlen(mount_t mp)
668 {
669 return(mp->mnt_maxsymlinklen);
670 }
671
672 /* set max size of short symlink on mount_t */
673 void
674 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
675 {
676 mp->mnt_maxsymlinklen = symlen;
677 }
678
679 /* return a pointer to the RO vfs_statfs associated with mount_t */
680 struct vfsstatfs *
681 vfs_statfs(mount_t mp)
682 {
683 return(&mp->mnt_vfsstat);
684 }
685
686 int
687 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
688 {
689 int error;
690 char *vname;
691
692 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
693 return(error);
694
695 /*
696 * If we have a filesystem create time, use it to default some others.
697 */
698 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
699 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
700 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
701 }
702
703 return(0);
704 }
705
706 int
707 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
708 {
709 int error;
710
711 if (vfs_isrdonly(mp))
712 return EROFS;
713
714 error = VFS_SETATTR(mp, vfa, ctx);
715
716 /*
717 * If we had alternate ways of setting vfs attributes, we'd
718 * fall back here.
719 */
720
721 return error;
722 }
723
724 /* return the private data handle stored in mount_t */
725 void *
726 vfs_fsprivate(mount_t mp)
727 {
728 return(mp->mnt_data);
729 }
730
731 /* set the private data handle in mount_t */
732 void
733 vfs_setfsprivate(mount_t mp, void *mntdata)
734 {
735 mp->mnt_data = mntdata;
736 }
737
738
739 /*
740 * return the block size of the underlying
741 * device associated with mount_t
742 */
743 int
744 vfs_devblocksize(mount_t mp) {
745
746 return(mp->mnt_devblocksize);
747 }
748
749
750 /*
751 * return the io attributes associated with mount_t
752 */
753 void
754 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
755 {
756 if (mp == NULL) {
757 ioattrp->io_maxreadcnt = MAXPHYS;
758 ioattrp->io_maxwritecnt = MAXPHYS;
759 ioattrp->io_segreadcnt = 32;
760 ioattrp->io_segwritecnt = 32;
761 ioattrp->io_maxsegreadsize = MAXPHYS;
762 ioattrp->io_maxsegwritesize = MAXPHYS;
763 ioattrp->io_devblocksize = DEV_BSIZE;
764 } else {
765 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
766 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
767 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
768 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
769 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
770 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
771 ioattrp->io_devblocksize = mp->mnt_devblocksize;
772 }
773 ioattrp->io_reserved[0] = 0;
774 ioattrp->io_reserved[1] = 0;
775 ioattrp->io_reserved[2] = 0;
776 }
777
778
779 /*
780 * set the IO attributes associated with mount_t
781 */
782 void
783 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
784 {
785 if (mp == NULL)
786 return;
787 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
788 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
789 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
790 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
791 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
792 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
793 mp->mnt_devblocksize = ioattrp->io_devblocksize;
794 }
795
796 /*
797 * Add a new filesystem into the kernel specified in passed in
798 * vfstable structure. It fills in the vnode
799 * dispatch vector that is to be passed to when vnodes are created.
800 * It returns a handle which is to be used to when the FS is to be removed
801 */
802 typedef int (*PFI)(void *);
803 extern int vfs_opv_numops;
804 errno_t
805 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
806 {
807 #pragma unused(data)
808 struct vfstable *newvfstbl = NULL;
809 int i,j;
810 int (***opv_desc_vector_p)(void *);
811 int (**opv_desc_vector)(void *);
812 struct vnodeopv_entry_desc *opve_descp;
813 int desccount;
814 int descsize;
815 PFI *descptr;
816
817 /*
818 * This routine is responsible for all the initialization that would
819 * ordinarily be done as part of the system startup;
820 */
821
822 if (vfe == (struct vfs_fsentry *)0)
823 return(EINVAL);
824
825 desccount = vfe->vfe_vopcnt;
826 if ((desccount <=0) || ((desccount > 5)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
827 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
828 return(EINVAL);
829
830
831 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
832 M_WAITOK);
833 bzero(newvfstbl, sizeof(struct vfstable));
834 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
835 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
836 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
837 newvfstbl->vfc_typenum = maxvfsconf++;
838 else
839 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
840
841 newvfstbl->vfc_refcount = 0;
842 newvfstbl->vfc_flags = 0;
843 newvfstbl->vfc_mountroot = NULL;
844 newvfstbl->vfc_next = NULL;
845 newvfstbl->vfc_threadsafe = 0;
846 newvfstbl->vfc_vfsflags = 0;
847 if (vfe->vfe_flags & VFS_TBL64BITREADY)
848 newvfstbl->vfc_64bitready= 1;
849 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
850 newvfstbl->vfc_threadsafe= 1;
851 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
852 newvfstbl->vfc_threadsafe= 1;
853 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
854 newvfstbl->vfc_flags |= MNT_LOCAL;
855 if (vfe->vfe_flags & VFS_TBLLOCALVOL)
856 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
857 else
858 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
859
860
861 /*
862 * Allocate and init the vectors.
863 * Also handle backwards compatibility.
864 *
865 * We allocate one large block to hold all <desccount>
866 * vnode operation vectors stored contiguously.
867 */
868 /* XXX - shouldn't be M_TEMP */
869
870 descsize = desccount * vfs_opv_numops * sizeof(PFI);
871 MALLOC(descptr, PFI *, descsize,
872 M_TEMP, M_WAITOK);
873 bzero(descptr, descsize);
874
875 newvfstbl->vfc_descptr = descptr;
876 newvfstbl->vfc_descsize = descsize;
877
878
879 for (i= 0; i< desccount; i++ ) {
880 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
881 /*
882 * Fill in the caller's pointer to the start of the i'th vector.
883 * They'll need to supply it when calling vnode_create.
884 */
885 opv_desc_vector = descptr + i * vfs_opv_numops;
886 *opv_desc_vector_p = opv_desc_vector;
887
888 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
889 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
890
891 /*
892 * Sanity check: is this operation listed
893 * in the list of operations? We check this
894 * by seeing if its offest is zero. Since
895 * the default routine should always be listed
896 * first, it should be the only one with a zero
897 * offset. Any other operation with a zero
898 * offset is probably not listed in
899 * vfs_op_descs, and so is probably an error.
900 *
901 * A panic here means the layer programmer
902 * has committed the all-too common bug
903 * of adding a new operation to the layer's
904 * list of vnode operations but
905 * not adding the operation to the system-wide
906 * list of supported operations.
907 */
908 if (opve_descp->opve_op->vdesc_offset == 0 &&
909 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
910 printf("vfs_fsadd: operation %s not listed in %s.\n",
911 opve_descp->opve_op->vdesc_name,
912 "vfs_op_descs");
913 panic("vfs_fsadd: bad operation");
914 }
915 /*
916 * Fill in this entry.
917 */
918 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
919 opve_descp->opve_impl;
920 }
921
922
923 /*
924 * Finally, go back and replace unfilled routines
925 * with their default. (Sigh, an O(n^3) algorithm. I
926 * could make it better, but that'd be work, and n is small.)
927 */
928 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
929
930 /*
931 * Force every operations vector to have a default routine.
932 */
933 opv_desc_vector = *opv_desc_vector_p;
934 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
935 panic("vfs_fsadd: operation vector without default routine.");
936 for (j = 0; j < vfs_opv_numops; j++)
937 if (opv_desc_vector[j] == NULL)
938 opv_desc_vector[j] =
939 opv_desc_vector[VOFFSET(vnop_default)];
940
941 } /* end of each vnodeopv_desc parsing */
942
943
944
945 *handle = vfstable_add(newvfstbl);
946
947 if (newvfstbl->vfc_typenum <= maxvfsconf )
948 maxvfsconf = newvfstbl->vfc_typenum + 1;
949 numused_vfsslots++;
950
951 if (newvfstbl->vfc_vfsops->vfs_init)
952 (*newvfstbl->vfc_vfsops->vfs_init)((struct vfsconf *)handle);
953
954 FREE(newvfstbl, M_TEMP);
955
956 return(0);
957 }
958
959 /*
960 * Removes the filesystem from kernel.
961 * The argument passed in is the handle that was given when
962 * file system was added
963 */
964 errno_t
965 vfs_fsremove(vfstable_t handle)
966 {
967 struct vfstable * vfstbl = (struct vfstable *)handle;
968 void *old_desc = NULL;
969 errno_t err;
970
971 /* Preflight check for any mounts */
972 mount_list_lock();
973 if ( vfstbl->vfc_refcount != 0 ) {
974 mount_list_unlock();
975 return EBUSY;
976 }
977 mount_list_unlock();
978
979 /*
980 * save the old descriptor; the free cannot occur unconditionally,
981 * since vfstable_del() may fail.
982 */
983 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
984 old_desc = vfstbl->vfc_descptr;
985 }
986 err = vfstable_del(vfstbl);
987
988 /* free the descriptor if the delete was successful */
989 if (err == 0 && old_desc) {
990 FREE(old_desc, M_TEMP);
991 }
992
993 return(err);
994 }
995
996 /*
997 * This returns a reference to mount_t
998 * which should be dropped using vfs_mountrele().
999 * Not doing so will leak a mountpoint
1000 * and associated data structures.
1001 */
1002 errno_t
1003 vfs_mountref(__unused mount_t mp ) /* gives a reference */
1004 {
1005 return(0);
1006 }
1007
1008 /* This drops the reference on mount_t that was acquired */
1009 errno_t
1010 vfs_mountrele(__unused mount_t mp ) /* drops reference */
1011 {
1012 return(0);
1013 }
1014
1015 int
1016 vfs_context_pid(vfs_context_t context)
1017 {
1018 return (context->vc_proc->p_pid);
1019 }
1020
1021 int
1022 vfs_context_suser(vfs_context_t context)
1023 {
1024 return (suser(context->vc_ucred, 0));
1025 }
1026 int
1027 vfs_context_issignal(vfs_context_t context, sigset_t mask)
1028 {
1029 if (context->vc_proc)
1030 return(proc_pendingsignals(context->vc_proc, mask));
1031 return(0);
1032 }
1033
1034 int
1035 vfs_context_is64bit(vfs_context_t context)
1036 {
1037 if (context->vc_proc)
1038 return(proc_is64bit(context->vc_proc));
1039 return(0);
1040 }
1041
1042 proc_t
1043 vfs_context_proc(vfs_context_t context)
1044 {
1045 return (context->vc_proc);
1046 }
1047
1048 vfs_context_t
1049 vfs_context_create(vfs_context_t context)
1050 {
1051 struct vfs_context * newcontext;
1052
1053 newcontext = (struct vfs_context *)kalloc(sizeof(struct vfs_context));
1054
1055 if (newcontext) {
1056 kauth_cred_t safecred;
1057 if (context) {
1058 newcontext->vc_proc = context->vc_proc;
1059 safecred = context->vc_ucred;
1060 } else {
1061 newcontext->vc_proc = proc_self();
1062 safecred = kauth_cred_get();
1063 }
1064 if (IS_VALID_CRED(safecred))
1065 kauth_cred_ref(safecred);
1066 newcontext->vc_ucred = safecred;
1067 return(newcontext);
1068 }
1069 return((vfs_context_t)0);
1070 }
1071
1072 int
1073 vfs_context_rele(vfs_context_t context)
1074 {
1075 if (context) {
1076 if (IS_VALID_CRED(context->vc_ucred))
1077 kauth_cred_unref(&context->vc_ucred);
1078 kfree(context, sizeof(struct vfs_context));
1079 }
1080 return(0);
1081 }
1082
1083
1084 ucred_t
1085 vfs_context_ucred(vfs_context_t context)
1086 {
1087 return (context->vc_ucred);
1088 }
1089
1090 /*
1091 * Return true if the context is owned by the superuser.
1092 */
1093 int
1094 vfs_context_issuser(vfs_context_t context)
1095 {
1096 return(context->vc_ucred->cr_uid == 0);
1097 }
1098
1099
1100 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1101
1102
1103 /*
1104 * Convert between vnode types and inode formats (since POSIX.1
1105 * defines mode word of stat structure in terms of inode formats).
1106 */
1107 enum vtype
1108 vnode_iftovt(int mode)
1109 {
1110 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1111 }
1112
1113 int
1114 vnode_vttoif(enum vtype indx)
1115 {
1116 return(vttoif_tab[(int)(indx)]);
1117 }
1118
1119 int
1120 vnode_makeimode(int indx, int mode)
1121 {
1122 return (int)(VTTOIF(indx) | (mode));
1123 }
1124
1125
1126 /*
1127 * vnode manipulation functions.
1128 */
1129
1130 /* returns system root vnode reference; It should be dropped using vrele() */
1131 vnode_t
1132 vfs_rootvnode(void)
1133 {
1134 int error;
1135
1136 error = vnode_get(rootvnode);
1137 if (error)
1138 return ((vnode_t)0);
1139 else
1140 return rootvnode;
1141 }
1142
1143
1144 uint32_t
1145 vnode_vid(vnode_t vp)
1146 {
1147 return ((uint32_t)(vp->v_id));
1148 }
1149
1150 /* returns a mount reference; drop it with vfs_mountrelease() */
1151 mount_t
1152 vnode_mount(vnode_t vp)
1153 {
1154 return (vp->v_mount);
1155 }
1156
1157 /* returns a mount reference iff vnode_t is a dir and is a mount point */
1158 mount_t
1159 vnode_mountedhere(vnode_t vp)
1160 {
1161 mount_t mp;
1162
1163 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1164 (mp->mnt_vnodecovered == vp))
1165 return (mp);
1166 else
1167 return (mount_t)NULL;
1168 }
1169
1170 /* returns vnode type of vnode_t */
1171 enum vtype
1172 vnode_vtype(vnode_t vp)
1173 {
1174 return (vp->v_type);
1175 }
1176
1177 /* returns FS specific node saved in vnode */
1178 void *
1179 vnode_fsnode(vnode_t vp)
1180 {
1181 return (vp->v_data);
1182 }
1183
1184 void
1185 vnode_clearfsnode(vnode_t vp)
1186 {
1187 vp->v_data = 0;
1188 }
1189
1190 dev_t
1191 vnode_specrdev(vnode_t vp)
1192 {
1193 return(vp->v_rdev);
1194 }
1195
1196
1197 /* Accessor functions */
1198 /* is vnode_t a root vnode */
1199 int
1200 vnode_isvroot(vnode_t vp)
1201 {
1202 return ((vp->v_flag & VROOT)? 1 : 0);
1203 }
1204
1205 /* is vnode_t a system vnode */
1206 int
1207 vnode_issystem(vnode_t vp)
1208 {
1209 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1210 }
1211
1212 /* if vnode_t mount operation in progress */
1213 int
1214 vnode_ismount(vnode_t vp)
1215 {
1216 return ((vp->v_flag & VMOUNT)? 1 : 0);
1217 }
1218
1219 /* is this vnode under recyle now */
1220 int
1221 vnode_isrecycled(vnode_t vp)
1222 {
1223 int ret;
1224
1225 vnode_lock(vp);
1226 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1227 vnode_unlock(vp);
1228 return(ret);
1229 }
1230
1231 /* is vnode_t marked to not keep data cached once it's been consumed */
1232 int
1233 vnode_isnocache(vnode_t vp)
1234 {
1235 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1236 }
1237
1238 /*
1239 * has sequential readahead been disabled on this vnode
1240 */
1241 int
1242 vnode_isnoreadahead(vnode_t vp)
1243 {
1244 return ((vp->v_flag & VRAOFF)? 1 : 0);
1245 }
1246
1247 /* is vnode_t a standard one? */
1248 int
1249 vnode_isstandard(vnode_t vp)
1250 {
1251 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1252 }
1253
1254 /* don't vflush() if SKIPSYSTEM */
1255 int
1256 vnode_isnoflush(vnode_t vp)
1257 {
1258 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1259 }
1260
1261 /* is vnode_t a regular file */
1262 int
1263 vnode_isreg(vnode_t vp)
1264 {
1265 return ((vp->v_type == VREG)? 1 : 0);
1266 }
1267
1268 /* is vnode_t a directory? */
1269 int
1270 vnode_isdir(vnode_t vp)
1271 {
1272 return ((vp->v_type == VDIR)? 1 : 0);
1273 }
1274
1275 /* is vnode_t a symbolic link ? */
1276 int
1277 vnode_islnk(vnode_t vp)
1278 {
1279 return ((vp->v_type == VLNK)? 1 : 0);
1280 }
1281
1282 /* is vnode_t a fifo ? */
1283 int
1284 vnode_isfifo(vnode_t vp)
1285 {
1286 return ((vp->v_type == VFIFO)? 1 : 0);
1287 }
1288
1289 /* is vnode_t a block device? */
1290 int
1291 vnode_isblk(vnode_t vp)
1292 {
1293 return ((vp->v_type == VBLK)? 1 : 0);
1294 }
1295
1296 /* is vnode_t a char device? */
1297 int
1298 vnode_ischr(vnode_t vp)
1299 {
1300 return ((vp->v_type == VCHR)? 1 : 0);
1301 }
1302
1303 /* is vnode_t a socket? */
1304 int
1305 vnode_issock(vnode_t vp)
1306 {
1307 return ((vp->v_type == VSOCK)? 1 : 0);
1308 }
1309
1310
1311 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1312 void
1313 vnode_setnocache(vnode_t vp)
1314 {
1315 vnode_lock(vp);
1316 vp->v_flag |= VNOCACHE_DATA;
1317 vnode_unlock(vp);
1318 }
1319
1320 void
1321 vnode_clearnocache(vnode_t vp)
1322 {
1323 vnode_lock(vp);
1324 vp->v_flag &= ~VNOCACHE_DATA;
1325 vnode_unlock(vp);
1326 }
1327
1328 void
1329 vnode_setnoreadahead(vnode_t vp)
1330 {
1331 vnode_lock(vp);
1332 vp->v_flag |= VRAOFF;
1333 vnode_unlock(vp);
1334 }
1335
1336 void
1337 vnode_clearnoreadahead(vnode_t vp)
1338 {
1339 vnode_lock(vp);
1340 vp->v_flag &= ~VRAOFF;
1341 vnode_unlock(vp);
1342 }
1343
1344
1345 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1346 void
1347 vnode_setnoflush(vnode_t vp)
1348 {
1349 vnode_lock(vp);
1350 vp->v_flag |= VNOFLUSH;
1351 vnode_unlock(vp);
1352 }
1353
1354 void
1355 vnode_clearnoflush(vnode_t vp)
1356 {
1357 vnode_lock(vp);
1358 vp->v_flag &= ~VNOFLUSH;
1359 vnode_unlock(vp);
1360 }
1361
1362
1363 /* is vnode_t a blkdevice and has a FS mounted on it */
1364 int
1365 vnode_ismountedon(vnode_t vp)
1366 {
1367 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1368 }
1369
1370 void
1371 vnode_setmountedon(vnode_t vp)
1372 {
1373 vnode_lock(vp);
1374 vp->v_specflags |= SI_MOUNTEDON;
1375 vnode_unlock(vp);
1376 }
1377
1378 void
1379 vnode_clearmountedon(vnode_t vp)
1380 {
1381 vnode_lock(vp);
1382 vp->v_specflags &= ~SI_MOUNTEDON;
1383 vnode_unlock(vp);
1384 }
1385
1386
1387 void
1388 vnode_settag(vnode_t vp, int tag)
1389 {
1390 vp->v_tag = tag;
1391
1392 }
1393
1394 int
1395 vnode_tag(vnode_t vp)
1396 {
1397 return(vp->v_tag);
1398 }
1399
1400 vnode_t
1401 vnode_parent(vnode_t vp)
1402 {
1403
1404 return(vp->v_parent);
1405 }
1406
1407 void
1408 vnode_setparent(vnode_t vp, vnode_t dvp)
1409 {
1410 vp->v_parent = dvp;
1411 }
1412
1413 char *
1414 vnode_name(vnode_t vp)
1415 {
1416 /* we try to keep v_name a reasonable name for the node */
1417 return(vp->v_name);
1418 }
1419
1420 void
1421 vnode_setname(vnode_t vp, char * name)
1422 {
1423 vp->v_name = name;
1424 }
1425
1426 /* return the registered FS name when adding the FS to kernel */
1427 void
1428 vnode_vfsname(vnode_t vp, char * buf)
1429 {
1430 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1431 }
1432
1433 /* return the FS type number */
1434 int
1435 vnode_vfstypenum(vnode_t vp)
1436 {
1437 return(vp->v_mount->mnt_vtable->vfc_typenum);
1438 }
1439
1440 int
1441 vnode_vfs64bitready(vnode_t vp)
1442 {
1443
1444 if ((vp->v_mount->mnt_vtable->vfc_64bitready))
1445 return(1);
1446 else
1447 return(0);
1448 }
1449
1450
1451
1452 /* return the visible flags on associated mount point of vnode_t */
1453 uint32_t
1454 vnode_vfsvisflags(vnode_t vp)
1455 {
1456 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1457 }
1458
1459 /* return the command modifier flags on associated mount point of vnode_t */
1460 uint32_t
1461 vnode_vfscmdflags(vnode_t vp)
1462 {
1463 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1464 }
1465
1466 /* return the max symlink of short links of vnode_t */
1467 uint32_t
1468 vnode_vfsmaxsymlen(vnode_t vp)
1469 {
1470 return(vp->v_mount->mnt_maxsymlinklen);
1471 }
1472
1473 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1474 struct vfsstatfs *
1475 vnode_vfsstatfs(vnode_t vp)
1476 {
1477 return(&vp->v_mount->mnt_vfsstat);
1478 }
1479
1480 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1481 void *
1482 vnode_vfsfsprivate(vnode_t vp)
1483 {
1484 return(vp->v_mount->mnt_data);
1485 }
1486
1487 /* is vnode_t in a rdonly mounted FS */
1488 int
1489 vnode_vfsisrdonly(vnode_t vp)
1490 {
1491 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1492 }
1493
1494
1495 /* returns vnode ref to current working directory */
1496 vnode_t
1497 current_workingdir(void)
1498 {
1499 struct proc *p = current_proc();
1500 struct vnode * vp ;
1501
1502 if ( (vp = p->p_fd->fd_cdir) ) {
1503 if ( (vnode_getwithref(vp)) )
1504 return (NULL);
1505 }
1506 return vp;
1507 }
1508
1509 /* returns vnode ref to current root(chroot) directory */
1510 vnode_t
1511 current_rootdir(void)
1512 {
1513 struct proc *p = current_proc();
1514 struct vnode * vp ;
1515
1516 if ( (vp = p->p_fd->fd_rdir) ) {
1517 if ( (vnode_getwithref(vp)) )
1518 return (NULL);
1519 }
1520 return vp;
1521 }
1522
1523 /*
1524 * Get a filesec and optional acl contents from an extended attribute.
1525 * Function will attempt to retrive ACL, UUID, and GUID information using a
1526 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1527 *
1528 * Parameters: vp The vnode on which to operate.
1529 * fsecp The filesec (and ACL, if any) being
1530 * retrieved.
1531 * ctx The vnode context in which the
1532 * operation is to be attempted.
1533 *
1534 * Returns: 0 Success
1535 * !0 errno value
1536 *
1537 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1538 * host byte order, as will be the ACL contents, if any.
1539 * Internally, we will cannonize these values from network (PPC)
1540 * byte order after we retrieve them so that the on-disk contents
1541 * of the extended attribute are identical for both PPC and Intel
1542 * (if we were not being required to provide this service via
1543 * fallback, this would be the job of the filesystem
1544 * 'VNOP_GETATTR' call).
1545 *
1546 * We use ntohl() because it has a transitive property on Intel
1547 * machines and no effect on PPC mancines. This guarantees us
1548 *
1549 * XXX: Deleting rather than ignoreing a corrupt security structure is
1550 * probably the only way to reset it without assistance from an
1551 * file system integrity checking tool. Right now we ignore it.
1552 *
1553 * XXX: We should enummerate the possible errno values here, and where
1554 * in the code they originated.
1555 */
1556 static int
1557 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
1558 {
1559 kauth_filesec_t fsec;
1560 uio_t fsec_uio;
1561 size_t fsec_size;
1562 size_t xsize, rsize;
1563 int error;
1564 int i;
1565 uint32_t host_fsec_magic;
1566 uint32_t host_acl_entrycount;
1567
1568 fsec = NULL;
1569 fsec_uio = NULL;
1570 error = 0;
1571
1572 /* find out how big the EA is */
1573 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
1574 /* no EA, no filesec */
1575 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1576 error = 0;
1577 /* either way, we are done */
1578 goto out;
1579 }
1580
1581 /*
1582 * To be valid, a kauth_filesec_t must be large enough to hold a zero
1583 * ACE entrly ACL, and if it's larger than that, it must have the right
1584 * number of bytes such that it contains an atomic number of ACEs,
1585 * rather than partial entries. Otherwise, we ignore it.
1586 */
1587 if (!KAUTH_FILESEC_VALID(xsize)) {
1588 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
1589 error = 0;
1590 goto out;
1591 }
1592
1593 /* how many entries would fit? */
1594 fsec_size = KAUTH_FILESEC_COUNT(xsize);
1595
1596 /* get buffer and uio */
1597 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
1598 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
1599 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
1600 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1601 error = ENOMEM;
1602 goto out;
1603 }
1604
1605 /* read security attribute */
1606 rsize = xsize;
1607 if ((error = vn_getxattr(vp,
1608 KAUTH_FILESEC_XATTR,
1609 fsec_uio,
1610 &rsize,
1611 XATTR_NOSECURITY,
1612 ctx)) != 0) {
1613
1614 /* no attribute - no security data */
1615 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1616 error = 0;
1617 /* either way, we are done */
1618 goto out;
1619 }
1620
1621 /*
1622 * Validate security structure; the validation must take place in host
1623 * byte order. If it's corrupt, we will just ignore it.
1624 */
1625
1626 /* Validate the size before trying to convert it */
1627 if (rsize < KAUTH_FILESEC_SIZE(0)) {
1628 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
1629 goto out;
1630 }
1631
1632 /* Validate the magic number before trying to convert it */
1633 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
1634 if (fsec->fsec_magic != host_fsec_magic) {
1635 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
1636 goto out;
1637 }
1638
1639 /* Validate the entry count before trying to convert it. */
1640 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
1641 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
1642 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
1643 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
1644 goto out;
1645 }
1646 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
1647 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
1648 goto out;
1649 }
1650 }
1651
1652 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
1653
1654 *fsecp = fsec;
1655 fsec = NULL;
1656 error = 0;
1657 out:
1658 if (fsec != NULL)
1659 kauth_filesec_free(fsec);
1660 if (fsec_uio != NULL)
1661 uio_free(fsec_uio);
1662 if (error)
1663 *fsecp = NULL;
1664 return(error);
1665 }
1666
1667 /*
1668 * Set a filesec and optional acl contents into an extended attribute.
1669 * function will attempt to store ACL, UUID, and GUID information using a
1670 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
1671 * may or may not point to the `fsec->fsec_acl`, depending on whether the
1672 * original caller supplied an acl.
1673 *
1674 * Parameters: vp The vnode on which to operate.
1675 * fsec The filesec being set.
1676 * acl The acl to be associated with 'fsec'.
1677 * ctx The vnode context in which the
1678 * operation is to be attempted.
1679 *
1680 * Returns: 0 Success
1681 * !0 errno value
1682 *
1683 * Notes: Both the fsec and the acl are always valid.
1684 *
1685 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
1686 * as are the acl contents, if they are used. Internally, we will
1687 * cannonize these values into network (PPC) byte order before we
1688 * attempt to write them so that the on-disk contents of the
1689 * extended attribute are identical for both PPC and Intel (if we
1690 * were not being required to provide this service via fallback,
1691 * this would be the job of the filesystem 'VNOP_SETATTR' call).
1692 * We reverse this process on the way out, so we leave with the
1693 * same byte order we started with.
1694 *
1695 * XXX: We should enummerate the possible errno values here, and where
1696 * in the code they originated.
1697 */
1698 static int
1699 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
1700 {
1701 uio_t fsec_uio;
1702 int error;
1703 int i;
1704 uint32_t saved_acl_copysize;
1705
1706 fsec_uio = NULL;
1707
1708 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
1709 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
1710 error = ENOMEM;
1711 goto out;
1712 }
1713 /*
1714 * Save the pre-converted ACL copysize, because it gets swapped too
1715 * if we are running with the wrong endianness.
1716 */
1717 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
1718
1719 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
1720
1721 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), sizeof(struct kauth_filesec) - sizeof(struct kauth_acl));
1722 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
1723 error = vn_setxattr(vp,
1724 KAUTH_FILESEC_XATTR,
1725 fsec_uio,
1726 XATTR_NOSECURITY, /* we have auth'ed already */
1727 ctx);
1728 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
1729
1730 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
1731
1732 out:
1733 if (fsec_uio != NULL)
1734 uio_free(fsec_uio);
1735 return(error);
1736 }
1737
1738
1739 int
1740 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1741 {
1742 kauth_filesec_t fsec;
1743 kauth_acl_t facl;
1744 int error;
1745 uid_t nuid;
1746 gid_t ngid;
1747
1748 /* don't ask for extended security data if the filesystem doesn't support it */
1749 if (!vfs_extendedsecurity(vnode_mount(vp))) {
1750 VATTR_CLEAR_ACTIVE(vap, va_acl);
1751 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1752 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1753 }
1754
1755 /*
1756 * If the caller wants size values we might have to synthesise, give the
1757 * filesystem the opportunity to supply better intermediate results.
1758 */
1759 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1760 VATTR_IS_ACTIVE(vap, va_total_size) ||
1761 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1762 VATTR_SET_ACTIVE(vap, va_data_size);
1763 VATTR_SET_ACTIVE(vap, va_data_alloc);
1764 VATTR_SET_ACTIVE(vap, va_total_size);
1765 VATTR_SET_ACTIVE(vap, va_total_alloc);
1766 }
1767
1768 error = VNOP_GETATTR(vp, vap, ctx);
1769 if (error) {
1770 KAUTH_DEBUG("ERROR - returning %d", error);
1771 goto out;
1772 }
1773
1774 /*
1775 * If extended security data was requested but not returned, try the fallback
1776 * path.
1777 */
1778 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
1779 fsec = NULL;
1780
1781 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
1782 /* try to get the filesec */
1783 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
1784 goto out;
1785 }
1786 /* if no filesec, no attributes */
1787 if (fsec == NULL) {
1788 VATTR_RETURN(vap, va_acl, NULL);
1789 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
1790 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
1791 } else {
1792
1793 /* looks good, try to return what we were asked for */
1794 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
1795 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
1796
1797 /* only return the ACL if we were actually asked for it */
1798 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1799 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
1800 VATTR_RETURN(vap, va_acl, NULL);
1801 } else {
1802 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
1803 if (facl == NULL) {
1804 kauth_filesec_free(fsec);
1805 error = ENOMEM;
1806 goto out;
1807 }
1808 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
1809 VATTR_RETURN(vap, va_acl, facl);
1810 }
1811 }
1812 kauth_filesec_free(fsec);
1813 }
1814 }
1815 /*
1816 * If someone gave us an unsolicited filesec, toss it. We promise that
1817 * we're OK with a filesystem giving us anything back, but our callers
1818 * only expect what they asked for.
1819 */
1820 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
1821 if (vap->va_acl != NULL)
1822 kauth_acl_free(vap->va_acl);
1823 VATTR_CLEAR_SUPPORTED(vap, va_acl);
1824 }
1825
1826 #if 0 /* enable when we have a filesystem only supporting UUIDs */
1827 /*
1828 * Handle the case where we need a UID/GID, but only have extended
1829 * security information.
1830 */
1831 if (VATTR_NOT_RETURNED(vap, va_uid) &&
1832 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
1833 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
1834 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
1835 VATTR_RETURN(vap, va_uid, nuid);
1836 }
1837 if (VATTR_NOT_RETURNED(vap, va_gid) &&
1838 VATTR_IS_SUPPORTED(vap, va_guuid) &&
1839 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
1840 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
1841 VATTR_RETURN(vap, va_gid, ngid);
1842 }
1843 #endif
1844
1845 /*
1846 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
1847 */
1848 if (VATTR_IS_ACTIVE(vap, va_uid)) {
1849 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1850 nuid = vp->v_mount->mnt_fsowner;
1851 if (nuid == KAUTH_UID_NONE)
1852 nuid = 99;
1853 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
1854 nuid = vap->va_uid;
1855 } else {
1856 /* this will always be something sensible */
1857 nuid = vp->v_mount->mnt_fsowner;
1858 }
1859 if ((nuid == 99) && !vfs_context_issuser(ctx))
1860 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
1861 VATTR_RETURN(vap, va_uid, nuid);
1862 }
1863 if (VATTR_IS_ACTIVE(vap, va_gid)) {
1864 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1865 ngid = vp->v_mount->mnt_fsgroup;
1866 if (ngid == KAUTH_GID_NONE)
1867 ngid = 99;
1868 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
1869 ngid = vap->va_gid;
1870 } else {
1871 /* this will always be something sensible */
1872 ngid = vp->v_mount->mnt_fsgroup;
1873 }
1874 if ((ngid == 99) && !vfs_context_issuser(ctx))
1875 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
1876 VATTR_RETURN(vap, va_gid, ngid);
1877 }
1878
1879 /*
1880 * Synthesise some values that can be reasonably guessed.
1881 */
1882 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
1883 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
1884
1885 if (!VATTR_IS_SUPPORTED(vap, va_flags))
1886 VATTR_RETURN(vap, va_flags, 0);
1887
1888 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
1889 VATTR_RETURN(vap, va_filerev, 0);
1890
1891 if (!VATTR_IS_SUPPORTED(vap, va_gen))
1892 VATTR_RETURN(vap, va_gen, 0);
1893
1894 /*
1895 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
1896 */
1897 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
1898 VATTR_RETURN(vap, va_data_size, 0);
1899
1900 /* do we want any of the possibly-computed values? */
1901 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1902 VATTR_IS_ACTIVE(vap, va_total_size) ||
1903 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1904 /* make sure f_bsize is valid */
1905 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
1906 if ((error = vfs_update_vfsstat(vp->v_mount, ctx)) != 0)
1907 goto out;
1908 }
1909
1910 /* default va_data_alloc from va_data_size */
1911 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
1912 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
1913
1914 /* default va_total_size from va_data_size */
1915 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
1916 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
1917
1918 /* default va_total_alloc from va_total_size which is guaranteed at this point */
1919 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
1920 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
1921 }
1922
1923 /*
1924 * If we don't have a change time, pull it from the modtime.
1925 */
1926 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
1927 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
1928
1929 /*
1930 * This is really only supported for the creation VNOPs, but since the field is there
1931 * we should populate it correctly.
1932 */
1933 VATTR_RETURN(vap, va_type, vp->v_type);
1934
1935 /*
1936 * The fsid can be obtained from the mountpoint directly.
1937 */
1938 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
1939
1940 out:
1941
1942 return(error);
1943 }
1944
1945 /*
1946 * Set the attributes on a vnode in a vnode context.
1947 *
1948 * Parameters: vp The vnode whose attributes to set.
1949 * vap A pointer to the attributes to set.
1950 * ctx The vnode context in which the
1951 * operation is to be attempted.
1952 *
1953 * Returns: 0 Success
1954 * !0 errno value
1955 *
1956 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
1957 *
1958 * The contents of the data area pointed to by 'vap' may be
1959 * modified if the vnode is on a filesystem which has been
1960 * mounted with ingore ownership flags, or by the underlyng
1961 * VFS itself, or by the fallback code, if the underlying VFS
1962 * does not support ACL, UUID, or GUUID attributes directly.
1963 *
1964 * XXX: We should enummerate the possible errno values here, and where
1965 * in the code they originated.
1966 */
1967 int
1968 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1969 {
1970 int error, is_ownership_change=0;
1971
1972 /*
1973 * Make sure the filesystem is mounted R/W.
1974 * If not, return an error.
1975 */
1976 if (vfs_isrdonly(vp->v_mount)) {
1977 error = EROFS;
1978 goto out;
1979 }
1980
1981 /*
1982 * If ownership is being ignored on this volume, we silently discard
1983 * ownership changes.
1984 */
1985 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1986 VATTR_CLEAR_ACTIVE(vap, va_uid);
1987 VATTR_CLEAR_ACTIVE(vap, va_gid);
1988 }
1989
1990 if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) {
1991 is_ownership_change = 1;
1992 }
1993
1994 /*
1995 * Make sure that extended security is enabled if we're going to try
1996 * to set any.
1997 */
1998 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
1999 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2000 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2001 error = ENOTSUP;
2002 goto out;
2003 }
2004
2005 error = VNOP_SETATTR(vp, vap, ctx);
2006
2007 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
2008 error = vnode_setattr_fallback(vp, vap, ctx);
2009
2010 /*
2011 * If we have changed any of the things about the file that are likely
2012 * to result in changes to authorisation results, blow the vnode auth
2013 * cache
2014 */
2015 if (VATTR_IS_SUPPORTED(vap, va_mode) ||
2016 VATTR_IS_SUPPORTED(vap, va_uid) ||
2017 VATTR_IS_SUPPORTED(vap, va_gid) ||
2018 VATTR_IS_SUPPORTED(vap, va_flags) ||
2019 VATTR_IS_SUPPORTED(vap, va_acl) ||
2020 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
2021 VATTR_IS_SUPPORTED(vap, va_guuid))
2022 vnode_uncache_credentials(vp);
2023 // only send a stat_changed event if this is more than
2024 // just an access time update
2025 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time))) {
2026 if (need_fsevent(FSE_STAT_CHANGED, vp) || (is_ownership_change && need_fsevent(FSE_CHOWN, vp))) {
2027 if (is_ownership_change == 0)
2028 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2029 else
2030 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2031 }
2032 }
2033
2034 out:
2035 return(error);
2036 }
2037
2038 /*
2039 * Fallback for setting the attributes on a vnode in a vnode context. This
2040 * Function will attempt to store ACL, UUID, and GUID information utilizing
2041 * a read/modify/write operation against an EA used as a backing store for
2042 * the object.
2043 *
2044 * Parameters: vp The vnode whose attributes to set.
2045 * vap A pointer to the attributes to set.
2046 * ctx The vnode context in which the
2047 * operation is to be attempted.
2048 *
2049 * Returns: 0 Success
2050 * !0 errno value
2051 *
2052 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2053 * as are the fsec and lfsec, if they are used.
2054 *
2055 * The contents of the data area pointed to by 'vap' may be
2056 * modified to indicate that the attribute is supported for
2057 * any given requested attribute.
2058 *
2059 * XXX: We should enummerate the possible errno values here, and where
2060 * in the code they originated.
2061 */
2062 int
2063 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2064 {
2065 kauth_filesec_t fsec;
2066 kauth_acl_t facl;
2067 struct kauth_filesec lfsec;
2068 int error;
2069
2070 error = 0;
2071
2072 /*
2073 * Extended security fallback via extended attributes.
2074 *
2075 * Note that we do not free the filesec; the caller is expected to
2076 * do this.
2077 */
2078 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2079 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2080 VATTR_NOT_RETURNED(vap, va_guuid)) {
2081 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2082
2083 /*
2084 * Fail for file types that we don't permit extended security
2085 * to be set on.
2086 */
2087 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
2088 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2089 error = EINVAL;
2090 goto out;
2091 }
2092
2093 /*
2094 * If we don't have all the extended security items, we need
2095 * to fetch the existing data to perform a read-modify-write
2096 * operation.
2097 */
2098 fsec = NULL;
2099 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2100 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2101 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2102 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2103 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2104 goto out;
2105 }
2106 }
2107 /* if we didn't get a filesec, use our local one */
2108 if (fsec == NULL) {
2109 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2110 fsec = &lfsec;
2111 } else {
2112 KAUTH_DEBUG("SETATTR - updating existing filesec");
2113 }
2114 /* find the ACL */
2115 facl = &fsec->fsec_acl;
2116
2117 /* if we're using the local filesec, we need to initialise it */
2118 if (fsec == &lfsec) {
2119 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2120 fsec->fsec_owner = kauth_null_guid;
2121 fsec->fsec_group = kauth_null_guid;
2122 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2123 facl->acl_flags = 0;
2124 }
2125
2126 /*
2127 * Update with the supplied attributes.
2128 */
2129 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2130 KAUTH_DEBUG("SETATTR - updating owner UUID");
2131 fsec->fsec_owner = vap->va_uuuid;
2132 VATTR_SET_SUPPORTED(vap, va_uuuid);
2133 }
2134 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2135 KAUTH_DEBUG("SETATTR - updating group UUID");
2136 fsec->fsec_group = vap->va_guuid;
2137 VATTR_SET_SUPPORTED(vap, va_guuid);
2138 }
2139 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2140 if (vap->va_acl == NULL) {
2141 KAUTH_DEBUG("SETATTR - removing ACL");
2142 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2143 } else {
2144 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2145 facl = vap->va_acl;
2146 }
2147 VATTR_SET_SUPPORTED(vap, va_acl);
2148 }
2149
2150 /*
2151 * If the filesec data is all invalid, we can just remove
2152 * the EA completely.
2153 */
2154 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2155 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2156 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2157 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2158 /* no attribute is ok, nothing to delete */
2159 if (error == ENOATTR)
2160 error = 0;
2161 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2162 } else {
2163 /* write the EA */
2164 error = vnode_set_filesec(vp, fsec, facl, ctx);
2165 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2166 }
2167
2168 /* if we fetched a filesec, dispose of the buffer */
2169 if (fsec != &lfsec)
2170 kauth_filesec_free(fsec);
2171 }
2172 out:
2173
2174 return(error);
2175 }
2176
2177 /*
2178 * Definition of vnode operations.
2179 */
2180
2181 #if 0
2182 /*
2183 *#
2184 *#% lookup dvp L ? ?
2185 *#% lookup vpp - L -
2186 */
2187 struct vnop_lookup_args {
2188 struct vnodeop_desc *a_desc;
2189 vnode_t a_dvp;
2190 vnode_t *a_vpp;
2191 struct componentname *a_cnp;
2192 vfs_context_t a_context;
2193 };
2194 #endif /* 0*/
2195
2196 errno_t
2197 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t context)
2198 {
2199 int _err;
2200 struct vnop_lookup_args a;
2201 vnode_t vp;
2202 int thread_safe;
2203 int funnel_state = 0;
2204
2205 a.a_desc = &vnop_lookup_desc;
2206 a.a_dvp = dvp;
2207 a.a_vpp = vpp;
2208 a.a_cnp = cnp;
2209 a.a_context = context;
2210 thread_safe = THREAD_SAFE_FS(dvp);
2211
2212 vnode_cache_credentials(dvp, context);
2213
2214 if (!thread_safe) {
2215 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2216 return (_err);
2217 }
2218 }
2219 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2220
2221 vp = *vpp;
2222
2223 if (!thread_safe) {
2224 if ( (cnp->cn_flags & ISLASTCN) ) {
2225 if ( (cnp->cn_flags & LOCKPARENT) ) {
2226 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
2227 /*
2228 * leave the fsnode lock held on
2229 * the directory, but restore the funnel...
2230 * also indicate that we need to drop the
2231 * fsnode_lock when we're done with the
2232 * system call processing for this path
2233 */
2234 cnp->cn_flags |= FSNODELOCKHELD;
2235
2236 (void) thread_funnel_set(kernel_flock, funnel_state);
2237 return (_err);
2238 }
2239 }
2240 }
2241 unlock_fsnode(dvp, &funnel_state);
2242 }
2243 return (_err);
2244 }
2245
2246 #if 0
2247 /*
2248 *#
2249 *#% create dvp L L L
2250 *#% create vpp - L -
2251 *#
2252 */
2253
2254 struct vnop_create_args {
2255 struct vnodeop_desc *a_desc;
2256 vnode_t a_dvp;
2257 vnode_t *a_vpp;
2258 struct componentname *a_cnp;
2259 struct vnode_attr *a_vap;
2260 vfs_context_t a_context;
2261 };
2262 #endif /* 0*/
2263 errno_t
2264 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2265 {
2266 int _err;
2267 struct vnop_create_args a;
2268 int thread_safe;
2269 int funnel_state = 0;
2270
2271 a.a_desc = &vnop_create_desc;
2272 a.a_dvp = dvp;
2273 a.a_vpp = vpp;
2274 a.a_cnp = cnp;
2275 a.a_vap = vap;
2276 a.a_context = context;
2277 thread_safe = THREAD_SAFE_FS(dvp);
2278
2279 if (!thread_safe) {
2280 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2281 return (_err);
2282 }
2283 }
2284 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2285 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2286 /*
2287 * Remove stale Apple Double file (if any).
2288 */
2289 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
2290 }
2291 if (!thread_safe) {
2292 unlock_fsnode(dvp, &funnel_state);
2293 }
2294 return (_err);
2295 }
2296
2297 #if 0
2298 /*
2299 *#
2300 *#% whiteout dvp L L L
2301 *#% whiteout cnp - - -
2302 *#% whiteout flag - - -
2303 *#
2304 */
2305 struct vnop_whiteout_args {
2306 struct vnodeop_desc *a_desc;
2307 vnode_t a_dvp;
2308 struct componentname *a_cnp;
2309 int a_flags;
2310 vfs_context_t a_context;
2311 };
2312 #endif /* 0*/
2313 errno_t
2314 VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t context)
2315 {
2316 int _err;
2317 struct vnop_whiteout_args a;
2318 int thread_safe;
2319 int funnel_state = 0;
2320
2321 a.a_desc = &vnop_whiteout_desc;
2322 a.a_dvp = dvp;
2323 a.a_cnp = cnp;
2324 a.a_flags = flags;
2325 a.a_context = context;
2326 thread_safe = THREAD_SAFE_FS(dvp);
2327
2328 if (!thread_safe) {
2329 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2330 return (_err);
2331 }
2332 }
2333 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
2334 if (!thread_safe) {
2335 unlock_fsnode(dvp, &funnel_state);
2336 }
2337 return (_err);
2338 }
2339
2340 #if 0
2341 /*
2342 *#
2343 *#% mknod dvp L U U
2344 *#% mknod vpp - X -
2345 *#
2346 */
2347 struct vnop_mknod_args {
2348 struct vnodeop_desc *a_desc;
2349 vnode_t a_dvp;
2350 vnode_t *a_vpp;
2351 struct componentname *a_cnp;
2352 struct vnode_attr *a_vap;
2353 vfs_context_t a_context;
2354 };
2355 #endif /* 0*/
2356 errno_t
2357 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2358 {
2359
2360 int _err;
2361 struct vnop_mknod_args a;
2362 int thread_safe;
2363 int funnel_state = 0;
2364
2365 a.a_desc = &vnop_mknod_desc;
2366 a.a_dvp = dvp;
2367 a.a_vpp = vpp;
2368 a.a_cnp = cnp;
2369 a.a_vap = vap;
2370 a.a_context = context;
2371 thread_safe = THREAD_SAFE_FS(dvp);
2372
2373 if (!thread_safe) {
2374 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2375 return (_err);
2376 }
2377 }
2378 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
2379 if (!thread_safe) {
2380 unlock_fsnode(dvp, &funnel_state);
2381 }
2382 return (_err);
2383 }
2384
2385 #if 0
2386 /*
2387 *#
2388 *#% open vp L L L
2389 *#
2390 */
2391 struct vnop_open_args {
2392 struct vnodeop_desc *a_desc;
2393 vnode_t a_vp;
2394 int a_mode;
2395 vfs_context_t a_context;
2396 };
2397 #endif /* 0*/
2398 errno_t
2399 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t context)
2400 {
2401 int _err;
2402 struct vnop_open_args a;
2403 int thread_safe;
2404 int funnel_state = 0;
2405 struct vfs_context acontext;
2406
2407 if (context == NULL) {
2408 acontext.vc_proc = current_proc();
2409 acontext.vc_ucred = kauth_cred_get();
2410 context = &acontext;
2411 }
2412 a.a_desc = &vnop_open_desc;
2413 a.a_vp = vp;
2414 a.a_mode = mode;
2415 a.a_context = context;
2416 thread_safe = THREAD_SAFE_FS(vp);
2417
2418 if (!thread_safe) {
2419 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2420 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2421 if ( (_err = lock_fsnode(vp, NULL)) ) {
2422 (void) thread_funnel_set(kernel_flock, funnel_state);
2423 return (_err);
2424 }
2425 }
2426 }
2427 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
2428 if (!thread_safe) {
2429 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2430 unlock_fsnode(vp, NULL);
2431 }
2432 (void) thread_funnel_set(kernel_flock, funnel_state);
2433 }
2434 return (_err);
2435 }
2436
2437 #if 0
2438 /*
2439 *#
2440 *#% close vp U U U
2441 *#
2442 */
2443 struct vnop_close_args {
2444 struct vnodeop_desc *a_desc;
2445 vnode_t a_vp;
2446 int a_fflag;
2447 vfs_context_t a_context;
2448 };
2449 #endif /* 0*/
2450 errno_t
2451 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t context)
2452 {
2453 int _err;
2454 struct vnop_close_args a;
2455 int thread_safe;
2456 int funnel_state = 0;
2457 struct vfs_context acontext;
2458
2459 if (context == NULL) {
2460 acontext.vc_proc = current_proc();
2461 acontext.vc_ucred = kauth_cred_get();
2462 context = &acontext;
2463 }
2464 a.a_desc = &vnop_close_desc;
2465 a.a_vp = vp;
2466 a.a_fflag = fflag;
2467 a.a_context = context;
2468 thread_safe = THREAD_SAFE_FS(vp);
2469
2470 if (!thread_safe) {
2471 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2472 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2473 if ( (_err = lock_fsnode(vp, NULL)) ) {
2474 (void) thread_funnel_set(kernel_flock, funnel_state);
2475 return (_err);
2476 }
2477 }
2478 }
2479 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
2480 if (!thread_safe) {
2481 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2482 unlock_fsnode(vp, NULL);
2483 }
2484 (void) thread_funnel_set(kernel_flock, funnel_state);
2485 }
2486 return (_err);
2487 }
2488
2489 #if 0
2490 /*
2491 *#
2492 *#% access vp L L L
2493 *#
2494 */
2495 struct vnop_access_args {
2496 struct vnodeop_desc *a_desc;
2497 vnode_t a_vp;
2498 int a_action;
2499 vfs_context_t a_context;
2500 };
2501 #endif /* 0*/
2502 errno_t
2503 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t context)
2504 {
2505 int _err;
2506 struct vnop_access_args a;
2507 int thread_safe;
2508 int funnel_state = 0;
2509 struct vfs_context acontext;
2510
2511 if (context == NULL) {
2512 acontext.vc_proc = current_proc();
2513 acontext.vc_ucred = kauth_cred_get();
2514 context = &acontext;
2515 }
2516 a.a_desc = &vnop_access_desc;
2517 a.a_vp = vp;
2518 a.a_action = action;
2519 a.a_context = context;
2520 thread_safe = THREAD_SAFE_FS(vp);
2521
2522 if (!thread_safe) {
2523 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2524 return (_err);
2525 }
2526 }
2527 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
2528 if (!thread_safe) {
2529 unlock_fsnode(vp, &funnel_state);
2530 }
2531 return (_err);
2532 }
2533
2534 #if 0
2535 /*
2536 *#
2537 *#% getattr vp = = =
2538 *#
2539 */
2540 struct vnop_getattr_args {
2541 struct vnodeop_desc *a_desc;
2542 vnode_t a_vp;
2543 struct vnode_attr *a_vap;
2544 vfs_context_t a_context;
2545 };
2546 #endif /* 0*/
2547 errno_t
2548 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2549 {
2550 int _err;
2551 struct vnop_getattr_args a;
2552 int thread_safe;
2553 int funnel_state;
2554
2555 a.a_desc = &vnop_getattr_desc;
2556 a.a_vp = vp;
2557 a.a_vap = vap;
2558 a.a_context = context;
2559 thread_safe = THREAD_SAFE_FS(vp);
2560
2561 if (!thread_safe) {
2562 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2563 return (_err);
2564 }
2565 }
2566 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
2567 if (!thread_safe) {
2568 unlock_fsnode(vp, &funnel_state);
2569 }
2570 return (_err);
2571 }
2572
2573 #if 0
2574 /*
2575 *#
2576 *#% setattr vp L L L
2577 *#
2578 */
2579 struct vnop_setattr_args {
2580 struct vnodeop_desc *a_desc;
2581 vnode_t a_vp;
2582 struct vnode_attr *a_vap;
2583 vfs_context_t a_context;
2584 };
2585 #endif /* 0*/
2586 errno_t
2587 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2588 {
2589 int _err;
2590 struct vnop_setattr_args a;
2591 int thread_safe;
2592 int funnel_state;
2593
2594 a.a_desc = &vnop_setattr_desc;
2595 a.a_vp = vp;
2596 a.a_vap = vap;
2597 a.a_context = context;
2598 thread_safe = THREAD_SAFE_FS(vp);
2599
2600 if (!thread_safe) {
2601 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2602 return (_err);
2603 }
2604 }
2605 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
2606
2607 /*
2608 * Shadow uid/gid/mod change to extended attibute file.
2609 */
2610 if (_err == 0 && !NATIVE_XATTR(vp)) {
2611 struct vnode_attr va;
2612 int change = 0;
2613
2614 VATTR_INIT(&va);
2615 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2616 VATTR_SET(&va, va_uid, vap->va_uid);
2617 change = 1;
2618 }
2619 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2620 VATTR_SET(&va, va_gid, vap->va_gid);
2621 change = 1;
2622 }
2623 if (VATTR_IS_ACTIVE(vap, va_mode)) {
2624 VATTR_SET(&va, va_mode, vap->va_mode);
2625 change = 1;
2626 }
2627 if (change) {
2628 vnode_t dvp;
2629 char *vname;
2630
2631 dvp = vnode_getparent(vp);
2632 vname = vnode_getname(vp);
2633
2634 xattrfile_setattr(dvp, vname, &va, context, thread_safe);
2635 if (dvp != NULLVP)
2636 vnode_put(dvp);
2637 if (vname != NULL)
2638 vnode_putname(vname);
2639 }
2640 }
2641 if (!thread_safe) {
2642 unlock_fsnode(vp, &funnel_state);
2643 }
2644 return (_err);
2645 }
2646
2647 #if 0
2648 /*
2649 *#
2650 *#% getattrlist vp = = =
2651 *#
2652 */
2653 struct vnop_getattrlist_args {
2654 struct vnodeop_desc *a_desc;
2655 vnode_t a_vp;
2656 struct attrlist *a_alist;
2657 struct uio *a_uio;
2658 int a_options;
2659 vfs_context_t a_context;
2660 };
2661 #endif /* 0*/
2662 errno_t
2663 VNOP_GETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2664 {
2665 int _err;
2666 struct vnop_getattrlist_args a;
2667 int thread_safe;
2668 int funnel_state = 0;
2669
2670 a.a_desc = &vnop_getattrlist_desc;
2671 a.a_vp = vp;
2672 a.a_alist = alist;
2673 a.a_uio = uio;
2674 a.a_options = options;
2675 a.a_context = context;
2676 thread_safe = THREAD_SAFE_FS(vp);
2677
2678 if (!thread_safe) {
2679 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2680 return (_err);
2681 }
2682 }
2683 _err = (*vp->v_op[vnop_getattrlist_desc.vdesc_offset])(&a);
2684 if (!thread_safe) {
2685 unlock_fsnode(vp, &funnel_state);
2686 }
2687 return (_err);
2688 }
2689
2690 #if 0
2691 /*
2692 *#
2693 *#% setattrlist vp L L L
2694 *#
2695 */
2696 struct vnop_setattrlist_args {
2697 struct vnodeop_desc *a_desc;
2698 vnode_t a_vp;
2699 struct attrlist *a_alist;
2700 struct uio *a_uio;
2701 int a_options;
2702 vfs_context_t a_context;
2703 };
2704 #endif /* 0*/
2705 errno_t
2706 VNOP_SETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2707 {
2708 int _err;
2709 struct vnop_setattrlist_args a;
2710 int thread_safe;
2711 int funnel_state = 0;
2712
2713 a.a_desc = &vnop_setattrlist_desc;
2714 a.a_vp = vp;
2715 a.a_alist = alist;
2716 a.a_uio = uio;
2717 a.a_options = options;
2718 a.a_context = context;
2719 thread_safe = THREAD_SAFE_FS(vp);
2720
2721 if (!thread_safe) {
2722 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2723 return (_err);
2724 }
2725 }
2726 _err = (*vp->v_op[vnop_setattrlist_desc.vdesc_offset])(&a);
2727
2728 vnode_uncache_credentials(vp);
2729
2730 if (!thread_safe) {
2731 unlock_fsnode(vp, &funnel_state);
2732 }
2733 return (_err);
2734 }
2735
2736
2737 #if 0
2738 /*
2739 *#
2740 *#% read vp L L L
2741 *#
2742 */
2743 struct vnop_read_args {
2744 struct vnodeop_desc *a_desc;
2745 vnode_t a_vp;
2746 struct uio *a_uio;
2747 int a_ioflag;
2748 vfs_context_t a_context;
2749 };
2750 #endif /* 0*/
2751 errno_t
2752 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2753 {
2754 int _err;
2755 struct vnop_read_args a;
2756 int thread_safe;
2757 int funnel_state = 0;
2758 struct vfs_context acontext;
2759
2760 if (context == NULL) {
2761 acontext.vc_proc = current_proc();
2762 acontext.vc_ucred = kauth_cred_get();
2763 context = &acontext;
2764 }
2765
2766 a.a_desc = &vnop_read_desc;
2767 a.a_vp = vp;
2768 a.a_uio = uio;
2769 a.a_ioflag = ioflag;
2770 a.a_context = context;
2771 thread_safe = THREAD_SAFE_FS(vp);
2772
2773 if (!thread_safe) {
2774 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2775 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2776 if ( (_err = lock_fsnode(vp, NULL)) ) {
2777 (void) thread_funnel_set(kernel_flock, funnel_state);
2778 return (_err);
2779 }
2780 }
2781 }
2782 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
2783
2784 if (!thread_safe) {
2785 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2786 unlock_fsnode(vp, NULL);
2787 }
2788 (void) thread_funnel_set(kernel_flock, funnel_state);
2789 }
2790 return (_err);
2791 }
2792
2793
2794 #if 0
2795 /*
2796 *#
2797 *#% write vp L L L
2798 *#
2799 */
2800 struct vnop_write_args {
2801 struct vnodeop_desc *a_desc;
2802 vnode_t a_vp;
2803 struct uio *a_uio;
2804 int a_ioflag;
2805 vfs_context_t a_context;
2806 };
2807 #endif /* 0*/
2808 errno_t
2809 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2810 {
2811 struct vnop_write_args a;
2812 int _err;
2813 int thread_safe;
2814 int funnel_state = 0;
2815 struct vfs_context acontext;
2816
2817 if (context == NULL) {
2818 acontext.vc_proc = current_proc();
2819 acontext.vc_ucred = kauth_cred_get();
2820 context = &acontext;
2821 }
2822
2823 a.a_desc = &vnop_write_desc;
2824 a.a_vp = vp;
2825 a.a_uio = uio;
2826 a.a_ioflag = ioflag;
2827 a.a_context = context;
2828 thread_safe = THREAD_SAFE_FS(vp);
2829
2830 if (!thread_safe) {
2831 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2832 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2833 if ( (_err = lock_fsnode(vp, NULL)) ) {
2834 (void) thread_funnel_set(kernel_flock, funnel_state);
2835 return (_err);
2836 }
2837 }
2838 }
2839 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
2840
2841 if (!thread_safe) {
2842 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2843 unlock_fsnode(vp, NULL);
2844 }
2845 (void) thread_funnel_set(kernel_flock, funnel_state);
2846 }
2847 return (_err);
2848 }
2849
2850
2851 #if 0
2852 /*
2853 *#
2854 *#% ioctl vp U U U
2855 *#
2856 */
2857 struct vnop_ioctl_args {
2858 struct vnodeop_desc *a_desc;
2859 vnode_t a_vp;
2860 u_long a_command;
2861 caddr_t a_data;
2862 int a_fflag;
2863 vfs_context_t a_context;
2864 };
2865 #endif /* 0*/
2866 errno_t
2867 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t context)
2868 {
2869 int _err;
2870 struct vnop_ioctl_args a;
2871 int thread_safe;
2872 int funnel_state = 0;
2873 struct vfs_context acontext;
2874
2875 if (context == NULL) {
2876 acontext.vc_proc = current_proc();
2877 acontext.vc_ucred = kauth_cred_get();
2878 context = &acontext;
2879 }
2880
2881 if (vfs_context_is64bit(context)) {
2882 if (!vnode_vfs64bitready(vp)) {
2883 return(ENOTTY);
2884 }
2885 }
2886
2887 a.a_desc = &vnop_ioctl_desc;
2888 a.a_vp = vp;
2889 a.a_command = command;
2890 a.a_data = data;
2891 a.a_fflag = fflag;
2892 a.a_context= context;
2893 thread_safe = THREAD_SAFE_FS(vp);
2894
2895 if (!thread_safe) {
2896 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2897 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2898 if ( (_err = lock_fsnode(vp, NULL)) ) {
2899 (void) thread_funnel_set(kernel_flock, funnel_state);
2900 return (_err);
2901 }
2902 }
2903 }
2904 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
2905 if (!thread_safe) {
2906 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2907 unlock_fsnode(vp, NULL);
2908 }
2909 (void) thread_funnel_set(kernel_flock, funnel_state);
2910 }
2911 return (_err);
2912 }
2913
2914
2915 #if 0
2916 /*
2917 *#
2918 *#% select vp U U U
2919 *#
2920 */
2921 struct vnop_select_args {
2922 struct vnodeop_desc *a_desc;
2923 vnode_t a_vp;
2924 int a_which;
2925 int a_fflags;
2926 void *a_wql;
2927 vfs_context_t a_context;
2928 };
2929 #endif /* 0*/
2930 errno_t
2931 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t context)
2932 {
2933 int _err;
2934 struct vnop_select_args a;
2935 int thread_safe;
2936 int funnel_state = 0;
2937 struct vfs_context acontext;
2938
2939 if (context == NULL) {
2940 acontext.vc_proc = current_proc();
2941 acontext.vc_ucred = kauth_cred_get();
2942 context = &acontext;
2943 }
2944 a.a_desc = &vnop_select_desc;
2945 a.a_vp = vp;
2946 a.a_which = which;
2947 a.a_fflags = fflags;
2948 a.a_context = context;
2949 a.a_wql = wql;
2950 thread_safe = THREAD_SAFE_FS(vp);
2951
2952 if (!thread_safe) {
2953 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2954 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2955 if ( (_err = lock_fsnode(vp, NULL)) ) {
2956 (void) thread_funnel_set(kernel_flock, funnel_state);
2957 return (_err);
2958 }
2959 }
2960 }
2961 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
2962 if (!thread_safe) {
2963 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2964 unlock_fsnode(vp, NULL);
2965 }
2966 (void) thread_funnel_set(kernel_flock, funnel_state);
2967 }
2968 return (_err);
2969 }
2970
2971
2972 #if 0
2973 /*
2974 *#
2975 *#% exchange fvp L L L
2976 *#% exchange tvp L L L
2977 *#
2978 */
2979 struct vnop_exchange_args {
2980 struct vnodeop_desc *a_desc;
2981 vnode_t a_fvp;
2982 vnode_t a_tvp;
2983 int a_options;
2984 vfs_context_t a_context;
2985 };
2986 #endif /* 0*/
2987 errno_t
2988 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t context)
2989 {
2990 int _err;
2991 struct vnop_exchange_args a;
2992 int thread_safe;
2993 int funnel_state = 0;
2994 vnode_t lock_first = NULL, lock_second = NULL;
2995
2996 a.a_desc = &vnop_exchange_desc;
2997 a.a_fvp = fvp;
2998 a.a_tvp = tvp;
2999 a.a_options = options;
3000 a.a_context = context;
3001 thread_safe = THREAD_SAFE_FS(fvp);
3002
3003 if (!thread_safe) {
3004 /*
3005 * Lock in vnode address order to avoid deadlocks
3006 */
3007 if (fvp < tvp) {
3008 lock_first = fvp;
3009 lock_second = tvp;
3010 } else {
3011 lock_first = tvp;
3012 lock_second = fvp;
3013 }
3014 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
3015 return (_err);
3016 }
3017 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
3018 unlock_fsnode(lock_first, &funnel_state);
3019 return (_err);
3020 }
3021 }
3022 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3023 if (!thread_safe) {
3024 unlock_fsnode(lock_second, NULL);
3025 unlock_fsnode(lock_first, &funnel_state);
3026 }
3027 return (_err);
3028 }
3029
3030
3031 #if 0
3032 /*
3033 *#
3034 *#% revoke vp U U U
3035 *#
3036 */
3037 struct vnop_revoke_args {
3038 struct vnodeop_desc *a_desc;
3039 vnode_t a_vp;
3040 int a_flags;
3041 vfs_context_t a_context;
3042 };
3043 #endif /* 0*/
3044 errno_t
3045 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t context)
3046 {
3047 struct vnop_revoke_args a;
3048 int _err;
3049 int thread_safe;
3050 int funnel_state = 0;
3051
3052 a.a_desc = &vnop_revoke_desc;
3053 a.a_vp = vp;
3054 a.a_flags = flags;
3055 a.a_context = context;
3056 thread_safe = THREAD_SAFE_FS(vp);
3057
3058 if (!thread_safe) {
3059 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3060 }
3061 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3062 if (!thread_safe) {
3063 (void) thread_funnel_set(kernel_flock, funnel_state);
3064 }
3065 return (_err);
3066 }
3067
3068
3069 #if 0
3070 /*
3071 *#
3072 *# mmap - vp U U U
3073 *#
3074 */
3075 struct vnop_mmap_args {
3076 struct vnodeop_desc *a_desc;
3077 vnode_t a_vp;
3078 int a_fflags;
3079 vfs_context_t a_context;
3080 };
3081 #endif /* 0*/
3082 errno_t
3083 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t context)
3084 {
3085 int _err;
3086 struct vnop_mmap_args a;
3087 int thread_safe;
3088 int funnel_state = 0;
3089
3090 a.a_desc = &vnop_mmap_desc;
3091 a.a_vp = vp;
3092 a.a_fflags = fflags;
3093 a.a_context = context;
3094 thread_safe = THREAD_SAFE_FS(vp);
3095
3096 if (!thread_safe) {
3097 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3098 return (_err);
3099 }
3100 }
3101 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3102 if (!thread_safe) {
3103 unlock_fsnode(vp, &funnel_state);
3104 }
3105 return (_err);
3106 }
3107
3108
3109 #if 0
3110 /*
3111 *#
3112 *# mnomap - vp U U U
3113 *#
3114 */
3115 struct vnop_mnomap_args {
3116 struct vnodeop_desc *a_desc;
3117 vnode_t a_vp;
3118 vfs_context_t a_context;
3119 };
3120 #endif /* 0*/
3121 errno_t
3122 VNOP_MNOMAP(vnode_t vp, vfs_context_t context)
3123 {
3124 int _err;
3125 struct vnop_mnomap_args a;
3126 int thread_safe;
3127 int funnel_state = 0;
3128
3129 a.a_desc = &vnop_mnomap_desc;
3130 a.a_vp = vp;
3131 a.a_context = context;
3132 thread_safe = THREAD_SAFE_FS(vp);
3133
3134 if (!thread_safe) {
3135 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3136 return (_err);
3137 }
3138 }
3139 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3140 if (!thread_safe) {
3141 unlock_fsnode(vp, &funnel_state);
3142 }
3143 return (_err);
3144 }
3145
3146
3147 #if 0
3148 /*
3149 *#
3150 *#% fsync vp L L L
3151 *#
3152 */
3153 struct vnop_fsync_args {
3154 struct vnodeop_desc *a_desc;
3155 vnode_t a_vp;
3156 int a_waitfor;
3157 vfs_context_t a_context;
3158 };
3159 #endif /* 0*/
3160 errno_t
3161 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t context)
3162 {
3163 struct vnop_fsync_args a;
3164 int _err;
3165 int thread_safe;
3166 int funnel_state = 0;
3167
3168 a.a_desc = &vnop_fsync_desc;
3169 a.a_vp = vp;
3170 a.a_waitfor = waitfor;
3171 a.a_context = context;
3172 thread_safe = THREAD_SAFE_FS(vp);
3173
3174 if (!thread_safe) {
3175 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3176 return (_err);
3177 }
3178 }
3179 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3180 if (!thread_safe) {
3181 unlock_fsnode(vp, &funnel_state);
3182 }
3183 return (_err);
3184 }
3185
3186
3187 #if 0
3188 /*
3189 *#
3190 *#% remove dvp L U U
3191 *#% remove vp L U U
3192 *#
3193 */
3194 struct vnop_remove_args {
3195 struct vnodeop_desc *a_desc;
3196 vnode_t a_dvp;
3197 vnode_t a_vp;
3198 struct componentname *a_cnp;
3199 int a_flags;
3200 vfs_context_t a_context;
3201 };
3202 #endif /* 0*/
3203 errno_t
3204 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t context)
3205 {
3206 int _err;
3207 struct vnop_remove_args a;
3208 int thread_safe;
3209 int funnel_state = 0;
3210
3211 a.a_desc = &vnop_remove_desc;
3212 a.a_dvp = dvp;
3213 a.a_vp = vp;
3214 a.a_cnp = cnp;
3215 a.a_flags = flags;
3216 a.a_context = context;
3217 thread_safe = THREAD_SAFE_FS(dvp);
3218
3219 if (!thread_safe) {
3220 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3221 return (_err);
3222 }
3223 }
3224 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3225
3226 if (_err == 0) {
3227 vnode_setneedinactive(vp);
3228
3229 if ( !(NATIVE_XATTR(dvp)) ) {
3230 /*
3231 * Remove any associated extended attibute file (._ AppleDouble file).
3232 */
3233 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3234 }
3235 }
3236 if (!thread_safe) {
3237 unlock_fsnode(vp, &funnel_state);
3238 }
3239 return (_err);
3240 }
3241
3242
3243 #if 0
3244 /*
3245 *#
3246 *#% link vp U U U
3247 *#% link tdvp L U U
3248 *#
3249 */
3250 struct vnop_link_args {
3251 struct vnodeop_desc *a_desc;
3252 vnode_t a_vp;
3253 vnode_t a_tdvp;
3254 struct componentname *a_cnp;
3255 vfs_context_t a_context;
3256 };
3257 #endif /* 0*/
3258 errno_t
3259 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t context)
3260 {
3261 int _err;
3262 struct vnop_link_args a;
3263 int thread_safe;
3264 int funnel_state = 0;
3265
3266 /*
3267 * For file systems with non-native extended attributes,
3268 * disallow linking to an existing "._" Apple Double file.
3269 */
3270 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3271 char *vname;
3272
3273 vname = vnode_getname(vp);
3274 if (vname != NULL) {
3275 _err = 0;
3276 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3277 _err = EPERM;
3278 }
3279 vnode_putname(vname);
3280 if (_err)
3281 return (_err);
3282 }
3283 }
3284 a.a_desc = &vnop_link_desc;
3285 a.a_vp = vp;
3286 a.a_tdvp = tdvp;
3287 a.a_cnp = cnp;
3288 a.a_context = context;
3289 thread_safe = THREAD_SAFE_FS(vp);
3290
3291 if (!thread_safe) {
3292 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3293 return (_err);
3294 }
3295 }
3296 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3297 if (!thread_safe) {
3298 unlock_fsnode(vp, &funnel_state);
3299 }
3300 return (_err);
3301 }
3302
3303
3304 #if 0
3305 /*
3306 *#
3307 *#% rename fdvp U U U
3308 *#% rename fvp U U U
3309 *#% rename tdvp L U U
3310 *#% rename tvp X U U
3311 *#
3312 */
3313 struct vnop_rename_args {
3314 struct vnodeop_desc *a_desc;
3315 vnode_t a_fdvp;
3316 vnode_t a_fvp;
3317 struct componentname *a_fcnp;
3318 vnode_t a_tdvp;
3319 vnode_t a_tvp;
3320 struct componentname *a_tcnp;
3321 vfs_context_t a_context;
3322 };
3323 #endif /* 0*/
3324 errno_t
3325 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
3326 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
3327 vfs_context_t context)
3328 {
3329 int _err;
3330 struct vnop_rename_args a;
3331 int funnel_state = 0;
3332 char smallname1[48];
3333 char smallname2[48];
3334 char *xfromname = NULL;
3335 char *xtoname = NULL;
3336 vnode_t lock_first = NULL, lock_second = NULL;
3337 vnode_t fdvp_unsafe = NULLVP;
3338 vnode_t tdvp_unsafe = NULLVP;
3339
3340 a.a_desc = &vnop_rename_desc;
3341 a.a_fdvp = fdvp;
3342 a.a_fvp = fvp;
3343 a.a_fcnp = fcnp;
3344 a.a_tdvp = tdvp;
3345 a.a_tvp = tvp;
3346 a.a_tcnp = tcnp;
3347 a.a_context = context;
3348
3349 if (!THREAD_SAFE_FS(fdvp))
3350 fdvp_unsafe = fdvp;
3351 if (!THREAD_SAFE_FS(tdvp))
3352 tdvp_unsafe = tdvp;
3353
3354 if (fdvp_unsafe != NULLVP) {
3355 /*
3356 * Lock parents in vnode address order to avoid deadlocks
3357 * note that it's possible for the fdvp to be unsafe,
3358 * but the tdvp to be safe because tvp could be a directory
3359 * in the root of a filesystem... in that case, tdvp is the
3360 * in the filesystem that this root is mounted on
3361 */
3362 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
3363 lock_first = fdvp_unsafe;
3364 lock_second = NULL;
3365 } else if (fdvp_unsafe < tdvp_unsafe) {
3366 lock_first = fdvp_unsafe;
3367 lock_second = tdvp_unsafe;
3368 } else {
3369 lock_first = tdvp_unsafe;
3370 lock_second = fdvp_unsafe;
3371 }
3372 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
3373 return (_err);
3374
3375 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3376 unlock_fsnode(lock_first, &funnel_state);
3377 return (_err);
3378 }
3379
3380 /*
3381 * Lock both children in vnode address order to avoid deadlocks
3382 */
3383 if (tvp == NULL || tvp == fvp) {
3384 lock_first = fvp;
3385 lock_second = NULL;
3386 } else if (fvp < tvp) {
3387 lock_first = fvp;
3388 lock_second = tvp;
3389 } else {
3390 lock_first = tvp;
3391 lock_second = fvp;
3392 }
3393 if ( (_err = lock_fsnode(lock_first, NULL)) )
3394 goto out1;
3395
3396 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3397 unlock_fsnode(lock_first, NULL);
3398 goto out1;
3399 }
3400 }
3401 /*
3402 * Save source and destination names (._ AppleDouble files).
3403 * Skip if source already has a "._" prefix.
3404 */
3405 if (!NATIVE_XATTR(fdvp) &&
3406 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3407 size_t len;
3408
3409 /* Get source attribute file name. */
3410 len = fcnp->cn_namelen + 3;
3411 if (len > sizeof(smallname1)) {
3412 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3413 } else {
3414 xfromname = &smallname1[0];
3415 }
3416 strcpy(xfromname, "._");
3417 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3418 xfromname[len-1] = '\0';
3419
3420 /* Get destination attribute file name. */
3421 len = tcnp->cn_namelen + 3;
3422 if (len > sizeof(smallname2)) {
3423 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3424 } else {
3425 xtoname = &smallname2[0];
3426 }
3427 strcpy(xtoname, "._");
3428 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3429 xtoname[len-1] = '\0';
3430 }
3431
3432 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3433
3434 if (fdvp_unsafe != NULLVP) {
3435 if (lock_second != NULL)
3436 unlock_fsnode(lock_second, NULL);
3437 unlock_fsnode(lock_first, NULL);
3438 }
3439 if (_err == 0) {
3440 if (tvp && tvp != fvp)
3441 vnode_setneedinactive(tvp);
3442 }
3443
3444 /*
3445 * Rename any associated extended attibute file (._ AppleDouble file).
3446 */
3447 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3448 struct nameidata fromnd, tond;
3449 int killdest = 0;
3450 int error;
3451
3452 /*
3453 * Get source attribute file vnode.
3454 * Note that fdvp already has an iocount reference and
3455 * using DELETE will take an additional reference.
3456 */
3457 NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3458 CAST_USER_ADDR_T(xfromname), context);
3459 fromnd.ni_dvp = fdvp;
3460 error = namei(&fromnd);
3461
3462 if (error) {
3463 /* When source doesn't exist there still may be a destination. */
3464 if (error == ENOENT) {
3465 killdest = 1;
3466 } else {
3467 goto out;
3468 }
3469 } else if (fromnd.ni_vp->v_type != VREG) {
3470 vnode_put(fromnd.ni_vp);
3471 nameidone(&fromnd);
3472 killdest = 1;
3473 }
3474 if (killdest) {
3475 struct vnop_remove_args args;
3476
3477 /*
3478 * Get destination attribute file vnode.
3479 * Note that tdvp already has an iocount reference.
3480 */
3481 NDINIT(&tond, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3482 CAST_USER_ADDR_T(xtoname), context);
3483 tond.ni_dvp = tdvp;
3484 error = namei(&tond);
3485 if (error) {
3486 goto out;
3487 }
3488 if (tond.ni_vp->v_type != VREG) {
3489 vnode_put(tond.ni_vp);
3490 nameidone(&tond);
3491 goto out;
3492 }
3493 args.a_desc = &vnop_remove_desc;
3494 args.a_dvp = tdvp;
3495 args.a_vp = tond.ni_vp;
3496 args.a_cnp = &tond.ni_cnd;
3497 args.a_context = context;
3498
3499 if (fdvp_unsafe != NULLVP)
3500 error = lock_fsnode(tond.ni_vp, NULL);
3501 if (error == 0) {
3502 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3503
3504 if (fdvp_unsafe != NULLVP)
3505 unlock_fsnode(tond.ni_vp, NULL);
3506
3507 if (error == 0)
3508 vnode_setneedinactive(tond.ni_vp);
3509 }
3510 vnode_put(tond.ni_vp);
3511 nameidone(&tond);
3512 goto out;
3513 }
3514
3515 /*
3516 * Get destination attribute file vnode.
3517 */
3518 NDINIT(&tond, RENAME,
3519 NOCACHE | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3520 CAST_USER_ADDR_T(xtoname), context);
3521 tond.ni_dvp = tdvp;
3522 error = namei(&tond);
3523
3524 if (error) {
3525 vnode_put(fromnd.ni_vp);
3526 nameidone(&fromnd);
3527 goto out;
3528 }
3529 a.a_desc = &vnop_rename_desc;
3530 a.a_fdvp = fdvp;
3531 a.a_fvp = fromnd.ni_vp;
3532 a.a_fcnp = &fromnd.ni_cnd;
3533 a.a_tdvp = tdvp;
3534 a.a_tvp = tond.ni_vp;
3535 a.a_tcnp = &tond.ni_cnd;
3536 a.a_context = context;
3537
3538 if (fdvp_unsafe != NULLVP) {
3539 /*
3540 * Lock in vnode address order to avoid deadlocks
3541 */
3542 if (tond.ni_vp == NULL || tond.ni_vp == fromnd.ni_vp) {
3543 lock_first = fromnd.ni_vp;
3544 lock_second = NULL;
3545 } else if (fromnd.ni_vp < tond.ni_vp) {
3546 lock_first = fromnd.ni_vp;
3547 lock_second = tond.ni_vp;
3548 } else {
3549 lock_first = tond.ni_vp;
3550 lock_second = fromnd.ni_vp;
3551 }
3552 if ( (error = lock_fsnode(lock_first, NULL)) == 0) {
3553 if (lock_second != NULL && (error = lock_fsnode(lock_second, NULL)) )
3554 unlock_fsnode(lock_first, NULL);
3555 }
3556 }
3557 if (error == 0) {
3558 error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3559
3560 if (fdvp_unsafe != NULLVP) {
3561 if (lock_second != NULL)
3562 unlock_fsnode(lock_second, NULL);
3563 unlock_fsnode(lock_first, NULL);
3564 }
3565 if (error == 0) {
3566 vnode_setneedinactive(fromnd.ni_vp);
3567
3568 if (tond.ni_vp && tond.ni_vp != fromnd.ni_vp)
3569 vnode_setneedinactive(tond.ni_vp);
3570 }
3571 }
3572 vnode_put(fromnd.ni_vp);
3573 if (tond.ni_vp) {
3574 vnode_put(tond.ni_vp);
3575 }
3576 nameidone(&tond);
3577 nameidone(&fromnd);
3578 }
3579 out:
3580 if (xfromname && xfromname != &smallname1[0]) {
3581 FREE(xfromname, M_TEMP);
3582 }
3583 if (xtoname && xtoname != &smallname2[0]) {
3584 FREE(xtoname, M_TEMP);
3585 }
3586 out1:
3587 if (fdvp_unsafe != NULLVP) {
3588 if (tdvp_unsafe != NULLVP)
3589 unlock_fsnode(tdvp_unsafe, NULL);
3590 unlock_fsnode(fdvp_unsafe, &funnel_state);
3591 }
3592 return (_err);
3593 }
3594
3595 #if 0
3596 /*
3597 *#
3598 *#% mkdir dvp L U U
3599 *#% mkdir vpp - L -
3600 *#
3601 */
3602 struct vnop_mkdir_args {
3603 struct vnodeop_desc *a_desc;
3604 vnode_t a_dvp;
3605 vnode_t *a_vpp;
3606 struct componentname *a_cnp;
3607 struct vnode_attr *a_vap;
3608 vfs_context_t a_context;
3609 };
3610 #endif /* 0*/
3611 errno_t
3612 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3613 struct vnode_attr *vap, vfs_context_t context)
3614 {
3615 int _err;
3616 struct vnop_mkdir_args a;
3617 int thread_safe;
3618 int funnel_state = 0;
3619
3620 a.a_desc = &vnop_mkdir_desc;
3621 a.a_dvp = dvp;
3622 a.a_vpp = vpp;
3623 a.a_cnp = cnp;
3624 a.a_vap = vap;
3625 a.a_context = context;
3626 thread_safe = THREAD_SAFE_FS(dvp);
3627
3628 if (!thread_safe) {
3629 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3630 return (_err);
3631 }
3632 }
3633 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
3634 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3635 /*
3636 * Remove stale Apple Double file (if any).
3637 */
3638 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3639 }
3640 if (!thread_safe) {
3641 unlock_fsnode(dvp, &funnel_state);
3642 }
3643 return (_err);
3644 }
3645
3646
3647 #if 0
3648 /*
3649 *#
3650 *#% rmdir dvp L U U
3651 *#% rmdir vp L U U
3652 *#
3653 */
3654 struct vnop_rmdir_args {
3655 struct vnodeop_desc *a_desc;
3656 vnode_t a_dvp;
3657 vnode_t a_vp;
3658 struct componentname *a_cnp;
3659 vfs_context_t a_context;
3660 };
3661
3662 #endif /* 0*/
3663 errno_t
3664 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t context)
3665 {
3666 int _err;
3667 struct vnop_rmdir_args a;
3668 int thread_safe;
3669 int funnel_state = 0;
3670
3671 a.a_desc = &vnop_rmdir_desc;
3672 a.a_dvp = dvp;
3673 a.a_vp = vp;
3674 a.a_cnp = cnp;
3675 a.a_context = context;
3676 thread_safe = THREAD_SAFE_FS(dvp);
3677
3678 if (!thread_safe) {
3679 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3680 return (_err);
3681 }
3682 }
3683 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
3684
3685 if (_err == 0) {
3686 vnode_setneedinactive(vp);
3687
3688 if ( !(NATIVE_XATTR(dvp)) ) {
3689 /*
3690 * Remove any associated extended attibute file (._ AppleDouble file).
3691 */
3692 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3693 }
3694 }
3695 if (!thread_safe) {
3696 unlock_fsnode(vp, &funnel_state);
3697 }
3698 return (_err);
3699 }
3700
3701 /*
3702 * Remove a ._ AppleDouble file
3703 */
3704 #define AD_STALE_SECS (180)
3705 static void
3706 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int thread_safe, int force) {
3707 vnode_t xvp;
3708 struct nameidata nd;
3709 char smallname[64];
3710 char *filename = NULL;
3711 size_t len;
3712
3713 if ((basename == NULL) || (basename[0] == '\0') ||
3714 (basename[0] == '.' && basename[1] == '_')) {
3715 return;
3716 }
3717 filename = &smallname[0];
3718 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3719 if (len >= sizeof(smallname)) {
3720 len++; /* snprintf result doesn't include '\0' */
3721 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3722 len = snprintf(filename, len, "._%s", basename);
3723 }
3724 NDINIT(&nd, DELETE, LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3725 CAST_USER_ADDR_T(filename), context);
3726 nd.ni_dvp = dvp;
3727 if (namei(&nd) != 0)
3728 goto out2;
3729
3730 xvp = nd.ni_vp;
3731 nameidone(&nd);
3732 if (xvp->v_type != VREG)
3733 goto out1;
3734
3735 /*
3736 * When creating a new object and a "._" file already
3737 * exists, check to see if its a stale "._" file.
3738 *
3739 */
3740 if (!force) {
3741 struct vnode_attr va;
3742
3743 VATTR_INIT(&va);
3744 VATTR_WANTED(&va, va_data_size);
3745 VATTR_WANTED(&va, va_modify_time);
3746 if (VNOP_GETATTR(xvp, &va, context) == 0 &&
3747 VATTR_IS_SUPPORTED(&va, va_data_size) &&
3748 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
3749 va.va_data_size != 0) {
3750 struct timeval tv;
3751
3752 microtime(&tv);
3753 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
3754 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
3755 force = 1; /* must be stale */
3756 }
3757 }
3758 }
3759 if (force) {
3760 struct vnop_remove_args a;
3761 int error;
3762
3763 a.a_desc = &vnop_remove_desc;
3764 a.a_dvp = nd.ni_dvp;
3765 a.a_vp = xvp;
3766 a.a_cnp = &nd.ni_cnd;
3767 a.a_context = context;
3768
3769 if (!thread_safe) {
3770 if ( (lock_fsnode(xvp, NULL)) )
3771 goto out1;
3772 }
3773 error = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3774
3775 if (!thread_safe)
3776 unlock_fsnode(xvp, NULL);
3777
3778 if (error == 0)
3779 vnode_setneedinactive(xvp);
3780 }
3781 out1:
3782 /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */
3783 vnode_put(xvp);
3784 out2:
3785 if (filename && filename != &smallname[0]) {
3786 FREE(filename, M_TEMP);
3787 }
3788 }
3789
3790 /*
3791 * Shadow uid/gid/mod to a ._ AppleDouble file
3792 */
3793 static void
3794 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
3795 vfs_context_t context, int thread_safe) {
3796 vnode_t xvp;
3797 struct nameidata nd;
3798 char smallname[64];
3799 char *filename = NULL;
3800 size_t len;
3801
3802 if ((dvp == NULLVP) ||
3803 (basename == NULL) || (basename[0] == '\0') ||
3804 (basename[0] == '.' && basename[1] == '_')) {
3805 return;
3806 }
3807 filename = &smallname[0];
3808 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3809 if (len >= sizeof(smallname)) {
3810 len++; /* snprintf result doesn't include '\0' */
3811 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3812 len = snprintf(filename, len, "._%s", basename);
3813 }
3814 NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3815 CAST_USER_ADDR_T(filename), context);
3816 nd.ni_dvp = dvp;
3817 if (namei(&nd) != 0)
3818 goto out2;
3819
3820 xvp = nd.ni_vp;
3821 nameidone(&nd);
3822
3823 if (xvp->v_type == VREG) {
3824 struct vnop_setattr_args a;
3825
3826 a.a_desc = &vnop_setattr_desc;
3827 a.a_vp = xvp;
3828 a.a_vap = vap;
3829 a.a_context = context;
3830
3831 if (!thread_safe) {
3832 if ( (lock_fsnode(xvp, NULL)) )
3833 goto out1;
3834 }
3835 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3836 if (!thread_safe) {
3837 unlock_fsnode(xvp, NULL);
3838 }
3839 }
3840 out1:
3841 vnode_put(xvp);
3842 out2:
3843 if (filename && filename != &smallname[0]) {
3844 FREE(filename, M_TEMP);
3845 }
3846 }
3847
3848 #if 0
3849 /*
3850 *#
3851 *#% symlink dvp L U U
3852 *#% symlink vpp - U -
3853 *#
3854 */
3855 struct vnop_symlink_args {
3856 struct vnodeop_desc *a_desc;
3857 vnode_t a_dvp;
3858 vnode_t *a_vpp;
3859 struct componentname *a_cnp;
3860 struct vnode_attr *a_vap;
3861 char *a_target;
3862 vfs_context_t a_context;
3863 };
3864
3865 #endif /* 0*/
3866 errno_t
3867 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3868 struct vnode_attr *vap, char *target, vfs_context_t context)
3869 {
3870 int _err;
3871 struct vnop_symlink_args a;
3872 int thread_safe;
3873 int funnel_state = 0;
3874
3875 a.a_desc = &vnop_symlink_desc;
3876 a.a_dvp = dvp;
3877 a.a_vpp = vpp;
3878 a.a_cnp = cnp;
3879 a.a_vap = vap;
3880 a.a_target = target;
3881 a.a_context = context;
3882 thread_safe = THREAD_SAFE_FS(dvp);
3883
3884 if (!thread_safe) {
3885 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3886 return (_err);
3887 }
3888 }
3889 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
3890 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3891 /*
3892 * Remove stale Apple Double file (if any).
3893 */
3894 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3895 }
3896 if (!thread_safe) {
3897 unlock_fsnode(dvp, &funnel_state);
3898 }
3899 return (_err);
3900 }
3901
3902 #if 0
3903 /*
3904 *#
3905 *#% readdir vp L L L
3906 *#
3907 */
3908 struct vnop_readdir_args {
3909 struct vnodeop_desc *a_desc;
3910 vnode_t a_vp;
3911 struct uio *a_uio;
3912 int a_flags;
3913 int *a_eofflag;
3914 int *a_numdirent;
3915 vfs_context_t a_context;
3916 };
3917
3918 #endif /* 0*/
3919 errno_t
3920 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
3921 int *numdirent, vfs_context_t context)
3922 {
3923 int _err;
3924 struct vnop_readdir_args a;
3925 int thread_safe;
3926 int funnel_state = 0;
3927
3928 a.a_desc = &vnop_readdir_desc;
3929 a.a_vp = vp;
3930 a.a_uio = uio;
3931 a.a_flags = flags;
3932 a.a_eofflag = eofflag;
3933 a.a_numdirent = numdirent;
3934 a.a_context = context;
3935 thread_safe = THREAD_SAFE_FS(vp);
3936
3937 if (!thread_safe) {
3938 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3939 return (_err);
3940 }
3941 }
3942 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
3943 if (!thread_safe) {
3944 unlock_fsnode(vp, &funnel_state);
3945 }
3946 return (_err);
3947 }
3948
3949 #if 0
3950 /*
3951 *#
3952 *#% readdirattr vp L L L
3953 *#
3954 */
3955 struct vnop_readdirattr_args {
3956 struct vnodeop_desc *a_desc;
3957 vnode_t a_vp;
3958 struct attrlist *a_alist;
3959 struct uio *a_uio;
3960 u_long a_maxcount;
3961 u_long a_options;
3962 u_long *a_newstate;
3963 int *a_eofflag;
3964 u_long *a_actualcount;
3965 vfs_context_t a_context;
3966 };
3967
3968 #endif /* 0*/
3969 errno_t
3970 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount,
3971 u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t context)
3972 {
3973 int _err;
3974 struct vnop_readdirattr_args a;
3975 int thread_safe;
3976 int funnel_state = 0;
3977
3978 a.a_desc = &vnop_readdirattr_desc;
3979 a.a_vp = vp;
3980 a.a_alist = alist;
3981 a.a_uio = uio;
3982 a.a_maxcount = maxcount;
3983 a.a_options = options;
3984 a.a_newstate = newstate;
3985 a.a_eofflag = eofflag;
3986 a.a_actualcount = actualcount;
3987 a.a_context = context;
3988 thread_safe = THREAD_SAFE_FS(vp);
3989
3990 if (!thread_safe) {
3991 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3992 return (_err);
3993 }
3994 }
3995 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
3996 if (!thread_safe) {
3997 unlock_fsnode(vp, &funnel_state);
3998 }
3999 return (_err);
4000 }
4001
4002 #if 0
4003 /*
4004 *#
4005 *#% readlink vp L L L
4006 *#
4007 */
4008 struct vnop_readlink_args {
4009 struct vnodeop_desc *a_desc;
4010 vnode_t a_vp;
4011 struct uio *a_uio;
4012 vfs_context_t a_context;
4013 };
4014 #endif /* 0 */
4015
4016 errno_t
4017 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t context)
4018 {
4019 int _err;
4020 struct vnop_readlink_args a;
4021 int thread_safe;
4022 int funnel_state = 0;
4023
4024 a.a_desc = &vnop_readlink_desc;
4025 a.a_vp = vp;
4026 a.a_uio = uio;
4027 a.a_context = context;
4028 thread_safe = THREAD_SAFE_FS(vp);
4029
4030 if (!thread_safe) {
4031 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4032 return (_err);
4033 }
4034 }
4035 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
4036 if (!thread_safe) {
4037 unlock_fsnode(vp, &funnel_state);
4038 }
4039 return (_err);
4040 }
4041
4042 #if 0
4043 /*
4044 *#
4045 *#% inactive vp L U U
4046 *#
4047 */
4048 struct vnop_inactive_args {
4049 struct vnodeop_desc *a_desc;
4050 vnode_t a_vp;
4051 vfs_context_t a_context;
4052 };
4053 #endif /* 0*/
4054 errno_t
4055 VNOP_INACTIVE(struct vnode *vp, vfs_context_t context)
4056 {
4057 int _err;
4058 struct vnop_inactive_args a;
4059 int thread_safe;
4060 int funnel_state = 0;
4061
4062 a.a_desc = &vnop_inactive_desc;
4063 a.a_vp = vp;
4064 a.a_context = context;
4065 thread_safe = THREAD_SAFE_FS(vp);
4066
4067 if (!thread_safe) {
4068 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4069 return (_err);
4070 }
4071 }
4072 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
4073 if (!thread_safe) {
4074 unlock_fsnode(vp, &funnel_state);
4075 }
4076 return (_err);
4077 }
4078
4079
4080 #if 0
4081 /*
4082 *#
4083 *#% reclaim vp U U U
4084 *#
4085 */
4086 struct vnop_reclaim_args {
4087 struct vnodeop_desc *a_desc;
4088 vnode_t a_vp;
4089 vfs_context_t a_context;
4090 };
4091 #endif /* 0*/
4092 errno_t
4093 VNOP_RECLAIM(struct vnode *vp, vfs_context_t context)
4094 {
4095 int _err;
4096 struct vnop_reclaim_args a;
4097 int thread_safe;
4098 int funnel_state = 0;
4099
4100 a.a_desc = &vnop_reclaim_desc;
4101 a.a_vp = vp;
4102 a.a_context = context;
4103 thread_safe = THREAD_SAFE_FS(vp);
4104
4105 if (!thread_safe) {
4106 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4107 }
4108 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
4109 if (!thread_safe) {
4110 (void) thread_funnel_set(kernel_flock, funnel_state);
4111 }
4112 return (_err);
4113 }
4114
4115
4116 #if 0
4117 /*
4118 *#
4119 *#% pathconf vp L L L
4120 *#
4121 */
4122 struct vnop_pathconf_args {
4123 struct vnodeop_desc *a_desc;
4124 vnode_t a_vp;
4125 int a_name;
4126 register_t *a_retval;
4127 vfs_context_t a_context;
4128 };
4129 #endif /* 0*/
4130 errno_t
4131 VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t context)
4132 {
4133 int _err;
4134 struct vnop_pathconf_args a;
4135 int thread_safe;
4136 int funnel_state = 0;
4137
4138 a.a_desc = &vnop_pathconf_desc;
4139 a.a_vp = vp;
4140 a.a_name = name;
4141 a.a_retval = retval;
4142 a.a_context = context;
4143 thread_safe = THREAD_SAFE_FS(vp);
4144
4145 if (!thread_safe) {
4146 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4147 return (_err);
4148 }
4149 }
4150 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
4151 if (!thread_safe) {
4152 unlock_fsnode(vp, &funnel_state);
4153 }
4154 return (_err);
4155 }
4156
4157 #if 0
4158 /*
4159 *#
4160 *#% advlock vp U U U
4161 *#
4162 */
4163 struct vnop_advlock_args {
4164 struct vnodeop_desc *a_desc;
4165 vnode_t a_vp;
4166 caddr_t a_id;
4167 int a_op;
4168 struct flock *a_fl;
4169 int a_flags;
4170 vfs_context_t a_context;
4171 };
4172 #endif /* 0*/
4173 errno_t
4174 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t context)
4175 {
4176 int _err;
4177 struct vnop_advlock_args a;
4178 int thread_safe;
4179 int funnel_state = 0;
4180 struct uthread * uth;
4181
4182 a.a_desc = &vnop_advlock_desc;
4183 a.a_vp = vp;
4184 a.a_id = id;
4185 a.a_op = op;
4186 a.a_fl = fl;
4187 a.a_flags = flags;
4188 a.a_context = context;
4189 thread_safe = THREAD_SAFE_FS(vp);
4190
4191 uth = get_bsdthread_info(current_thread());
4192 if (!thread_safe) {
4193 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4194 }
4195 /* Disallow advisory locking on non-seekable vnodes */
4196 if (vnode_isfifo(vp)) {
4197 _err = err_advlock(&a);
4198 } else {
4199 if ((vp->v_flag & VLOCKLOCAL)) {
4200 /* Advisory locking done at this layer */
4201 _err = lf_advlock(&a);
4202 } else {
4203 /* Advisory locking done by underlying filesystem */
4204 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4205 }
4206 }
4207 if (!thread_safe) {
4208 (void) thread_funnel_set(kernel_flock, funnel_state);
4209 }
4210 return (_err);
4211 }
4212
4213
4214
4215 #if 0
4216 /*
4217 *#
4218 *#% allocate vp L L L
4219 *#
4220 */
4221 struct vnop_allocate_args {
4222 struct vnodeop_desc *a_desc;
4223 vnode_t a_vp;
4224 off_t a_length;
4225 u_int32_t a_flags;
4226 off_t *a_bytesallocated;
4227 off_t a_offset;
4228 vfs_context_t a_context;
4229 };
4230
4231 #endif /* 0*/
4232 errno_t
4233 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t context)
4234 {
4235 int _err;
4236 struct vnop_allocate_args a;
4237 int thread_safe;
4238 int funnel_state = 0;
4239
4240 a.a_desc = &vnop_allocate_desc;
4241 a.a_vp = vp;
4242 a.a_length = length;
4243 a.a_flags = flags;
4244 a.a_bytesallocated = bytesallocated;
4245 a.a_offset = offset;
4246 a.a_context = context;
4247 thread_safe = THREAD_SAFE_FS(vp);
4248
4249 if (!thread_safe) {
4250 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4251 return (_err);
4252 }
4253 }
4254 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4255 if (!thread_safe) {
4256 unlock_fsnode(vp, &funnel_state);
4257 }
4258 return (_err);
4259 }
4260
4261 #if 0
4262 /*
4263 *#
4264 *#% pagein vp = = =
4265 *#
4266 */
4267 struct vnop_pagein_args {
4268 struct vnodeop_desc *a_desc;
4269 vnode_t a_vp;
4270 upl_t a_pl;
4271 vm_offset_t a_pl_offset;
4272 off_t a_f_offset;
4273 size_t a_size;
4274 int a_flags;
4275 vfs_context_t a_context;
4276 };
4277 #endif /* 0*/
4278 errno_t
4279 VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4280 {
4281 int _err;
4282 struct vnop_pagein_args a;
4283 int thread_safe;
4284 int funnel_state = 0;
4285
4286 a.a_desc = &vnop_pagein_desc;
4287 a.a_vp = vp;
4288 a.a_pl = pl;
4289 a.a_pl_offset = pl_offset;
4290 a.a_f_offset = f_offset;
4291 a.a_size = size;
4292 a.a_flags = flags;
4293 a.a_context = context;
4294 thread_safe = THREAD_SAFE_FS(vp);
4295
4296 if (!thread_safe) {
4297 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4298 }
4299 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
4300 if (!thread_safe) {
4301 (void) thread_funnel_set(kernel_flock, funnel_state);
4302 }
4303 return (_err);
4304 }
4305
4306 #if 0
4307 /*
4308 *#
4309 *#% pageout vp = = =
4310 *#
4311 */
4312 struct vnop_pageout_args {
4313 struct vnodeop_desc *a_desc;
4314 vnode_t a_vp;
4315 upl_t a_pl;
4316 vm_offset_t a_pl_offset;
4317 off_t a_f_offset;
4318 size_t a_size;
4319 int a_flags;
4320 vfs_context_t a_context;
4321 };
4322
4323 #endif /* 0*/
4324 errno_t
4325 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4326 {
4327 int _err;
4328 struct vnop_pageout_args a;
4329 int thread_safe;
4330 int funnel_state = 0;
4331
4332 a.a_desc = &vnop_pageout_desc;
4333 a.a_vp = vp;
4334 a.a_pl = pl;
4335 a.a_pl_offset = pl_offset;
4336 a.a_f_offset = f_offset;
4337 a.a_size = size;
4338 a.a_flags = flags;
4339 a.a_context = context;
4340 thread_safe = THREAD_SAFE_FS(vp);
4341
4342 if (!thread_safe) {
4343 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4344 }
4345 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
4346 if (!thread_safe) {
4347 (void) thread_funnel_set(kernel_flock, funnel_state);
4348 }
4349 return (_err);
4350 }
4351
4352
4353 #if 0
4354 /*
4355 *#
4356 *#% searchfs vp L L L
4357 *#
4358 */
4359 struct vnop_searchfs_args {
4360 struct vnodeop_desc *a_desc;
4361 vnode_t a_vp;
4362 void *a_searchparams1;
4363 void *a_searchparams2;
4364 struct attrlist *a_searchattrs;
4365 u_long a_maxmatches;
4366 struct timeval *a_timelimit;
4367 struct attrlist *a_returnattrs;
4368 u_long *a_nummatches;
4369 u_long a_scriptcode;
4370 u_long a_options;
4371 struct uio *a_uio;
4372 struct searchstate *a_searchstate;
4373 vfs_context_t a_context;
4374 };
4375
4376 #endif /* 0*/
4377 errno_t
4378 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t context)
4379 {
4380 int _err;
4381 struct vnop_searchfs_args a;
4382 int thread_safe;
4383 int funnel_state = 0;
4384
4385 a.a_desc = &vnop_searchfs_desc;
4386 a.a_vp = vp;
4387 a.a_searchparams1 = searchparams1;
4388 a.a_searchparams2 = searchparams2;
4389 a.a_searchattrs = searchattrs;
4390 a.a_maxmatches = maxmatches;
4391 a.a_timelimit = timelimit;
4392 a.a_returnattrs = returnattrs;
4393 a.a_nummatches = nummatches;
4394 a.a_scriptcode = scriptcode;
4395 a.a_options = options;
4396 a.a_uio = uio;
4397 a.a_searchstate = searchstate;
4398 a.a_context = context;
4399 thread_safe = THREAD_SAFE_FS(vp);
4400
4401 if (!thread_safe) {
4402 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4403 return (_err);
4404 }
4405 }
4406 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
4407 if (!thread_safe) {
4408 unlock_fsnode(vp, &funnel_state);
4409 }
4410 return (_err);
4411 }
4412
4413 #if 0
4414 /*
4415 *#
4416 *#% copyfile fvp U U U
4417 *#% copyfile tdvp L U U
4418 *#% copyfile tvp X U U
4419 *#
4420 */
4421 struct vnop_copyfile_args {
4422 struct vnodeop_desc *a_desc;
4423 vnode_t a_fvp;
4424 vnode_t a_tdvp;
4425 vnode_t a_tvp;
4426 struct componentname *a_tcnp;
4427 int a_mode;
4428 int a_flags;
4429 vfs_context_t a_context;
4430 };
4431 #endif /* 0*/
4432 errno_t
4433 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4434 int mode, int flags, vfs_context_t context)
4435 {
4436 int _err;
4437 struct vnop_copyfile_args a;
4438 a.a_desc = &vnop_copyfile_desc;
4439 a.a_fvp = fvp;
4440 a.a_tdvp = tdvp;
4441 a.a_tvp = tvp;
4442 a.a_tcnp = tcnp;
4443 a.a_mode = mode;
4444 a.a_flags = flags;
4445 a.a_context = context;
4446 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
4447 return (_err);
4448 }
4449
4450
4451 errno_t
4452 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t context)
4453 {
4454 struct vnop_getxattr_args a;
4455 int error;
4456 int thread_safe;
4457 int funnel_state = 0;
4458
4459 a.a_desc = &vnop_getxattr_desc;
4460 a.a_vp = vp;
4461 a.a_name = name;
4462 a.a_uio = uio;
4463 a.a_size = size;
4464 a.a_options = options;
4465 a.a_context = context;
4466
4467 thread_safe = THREAD_SAFE_FS(vp);
4468 if (!thread_safe) {
4469 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4470 return (error);
4471 }
4472 }
4473 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
4474 if (!thread_safe) {
4475 unlock_fsnode(vp, &funnel_state);
4476 }
4477 return (error);
4478 }
4479
4480 errno_t
4481 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t context)
4482 {
4483 struct vnop_setxattr_args a;
4484 int error;
4485 int thread_safe;
4486 int funnel_state = 0;
4487
4488 a.a_desc = &vnop_setxattr_desc;
4489 a.a_vp = vp;
4490 a.a_name = name;
4491 a.a_uio = uio;
4492 a.a_options = options;
4493 a.a_context = context;
4494
4495 thread_safe = THREAD_SAFE_FS(vp);
4496 if (!thread_safe) {
4497 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4498 return (error);
4499 }
4500 }
4501 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
4502 if (!thread_safe) {
4503 unlock_fsnode(vp, &funnel_state);
4504 }
4505 return (error);
4506 }
4507
4508 errno_t
4509 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t context)
4510 {
4511 struct vnop_removexattr_args a;
4512 int error;
4513 int thread_safe;
4514 int funnel_state = 0;
4515
4516 a.a_desc = &vnop_removexattr_desc;
4517 a.a_vp = vp;
4518 a.a_name = name;
4519 a.a_options = options;
4520 a.a_context = context;
4521
4522 thread_safe = THREAD_SAFE_FS(vp);
4523 if (!thread_safe) {
4524 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4525 return (error);
4526 }
4527 }
4528 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
4529 if (!thread_safe) {
4530 unlock_fsnode(vp, &funnel_state);
4531 }
4532 return (error);
4533 }
4534
4535 errno_t
4536 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t context)
4537 {
4538 struct vnop_listxattr_args a;
4539 int error;
4540 int thread_safe;
4541 int funnel_state = 0;
4542
4543 a.a_desc = &vnop_listxattr_desc;
4544 a.a_vp = vp;
4545 a.a_uio = uio;
4546 a.a_size = size;
4547 a.a_options = options;
4548 a.a_context = context;
4549
4550 thread_safe = THREAD_SAFE_FS(vp);
4551 if (!thread_safe) {
4552 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4553 return (error);
4554 }
4555 }
4556 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
4557 if (!thread_safe) {
4558 unlock_fsnode(vp, &funnel_state);
4559 }
4560 return (error);
4561 }
4562
4563
4564 #if 0
4565 /*
4566 *#
4567 *#% blktooff vp = = =
4568 *#
4569 */
4570 struct vnop_blktooff_args {
4571 struct vnodeop_desc *a_desc;
4572 vnode_t a_vp;
4573 daddr64_t a_lblkno;
4574 off_t *a_offset;
4575 };
4576 #endif /* 0*/
4577 errno_t
4578 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
4579 {
4580 int _err;
4581 struct vnop_blktooff_args a;
4582 int thread_safe;
4583 int funnel_state = 0;
4584
4585 a.a_desc = &vnop_blktooff_desc;
4586 a.a_vp = vp;
4587 a.a_lblkno = lblkno;
4588 a.a_offset = offset;
4589 thread_safe = THREAD_SAFE_FS(vp);
4590
4591 if (!thread_safe) {
4592 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4593 }
4594 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
4595 if (!thread_safe) {
4596 (void) thread_funnel_set(kernel_flock, funnel_state);
4597 }
4598 return (_err);
4599 }
4600
4601 #if 0
4602 /*
4603 *#
4604 *#% offtoblk vp = = =
4605 *#
4606 */
4607 struct vnop_offtoblk_args {
4608 struct vnodeop_desc *a_desc;
4609 vnode_t a_vp;
4610 off_t a_offset;
4611 daddr64_t *a_lblkno;
4612 };
4613 #endif /* 0*/
4614 errno_t
4615 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
4616 {
4617 int _err;
4618 struct vnop_offtoblk_args a;
4619 int thread_safe;
4620 int funnel_state = 0;
4621
4622 a.a_desc = &vnop_offtoblk_desc;
4623 a.a_vp = vp;
4624 a.a_offset = offset;
4625 a.a_lblkno = lblkno;
4626 thread_safe = THREAD_SAFE_FS(vp);
4627
4628 if (!thread_safe) {
4629 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4630 }
4631 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
4632 if (!thread_safe) {
4633 (void) thread_funnel_set(kernel_flock, funnel_state);
4634 }
4635 return (_err);
4636 }
4637
4638 #if 0
4639 /*
4640 *#
4641 *#% blockmap vp L L L
4642 *#
4643 */
4644 struct vnop_blockmap_args {
4645 struct vnodeop_desc *a_desc;
4646 vnode_t a_vp;
4647 off_t a_foffset;
4648 size_t a_size;
4649 daddr64_t *a_bpn;
4650 size_t *a_run;
4651 void *a_poff;
4652 int a_flags;
4653 vfs_context_t a_context;
4654 };
4655 #endif /* 0*/
4656 errno_t
4657 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t context)
4658 {
4659 int _err;
4660 struct vnop_blockmap_args a;
4661 int thread_safe;
4662 int funnel_state = 0;
4663 struct vfs_context acontext;
4664
4665 if (context == NULL) {
4666 acontext.vc_proc = current_proc();
4667 acontext.vc_ucred = kauth_cred_get();
4668 context = &acontext;
4669 }
4670 a.a_desc = &vnop_blockmap_desc;
4671 a.a_vp = vp;
4672 a.a_foffset = foffset;
4673 a.a_size = size;
4674 a.a_bpn = bpn;
4675 a.a_run = run;
4676 a.a_poff = poff;
4677 a.a_flags = flags;
4678 a.a_context = context;
4679 thread_safe = THREAD_SAFE_FS(vp);
4680
4681 if (!thread_safe) {
4682 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4683 }
4684 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
4685 if (!thread_safe) {
4686 (void) thread_funnel_set(kernel_flock, funnel_state);
4687 }
4688 return (_err);
4689 }
4690
4691 #if 0
4692 struct vnop_strategy_args {
4693 struct vnodeop_desc *a_desc;
4694 struct buf *a_bp;
4695 };
4696
4697 #endif /* 0*/
4698 errno_t
4699 VNOP_STRATEGY(struct buf *bp)
4700 {
4701 int _err;
4702 struct vnop_strategy_args a;
4703 a.a_desc = &vnop_strategy_desc;
4704 a.a_bp = bp;
4705 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
4706 return (_err);
4707 }
4708
4709 #if 0
4710 struct vnop_bwrite_args {
4711 struct vnodeop_desc *a_desc;
4712 buf_t a_bp;
4713 };
4714 #endif /* 0*/
4715 errno_t
4716 VNOP_BWRITE(struct buf *bp)
4717 {
4718 int _err;
4719 struct vnop_bwrite_args a;
4720 a.a_desc = &vnop_bwrite_desc;
4721 a.a_bp = bp;
4722 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
4723 return (_err);
4724 }
4725
4726 #if 0
4727 struct vnop_kqfilt_add_args {
4728 struct vnodeop_desc *a_desc;
4729 struct vnode *a_vp;
4730 struct knote *a_kn;
4731 vfs_context_t a_context;
4732 };
4733 #endif
4734 errno_t
4735 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t context)
4736 {
4737 int _err;
4738 struct vnop_kqfilt_add_args a;
4739 int thread_safe;
4740 int funnel_state = 0;
4741
4742 a.a_desc = VDESC(vnop_kqfilt_add);
4743 a.a_vp = vp;
4744 a.a_kn = kn;
4745 a.a_context = context;
4746 thread_safe = THREAD_SAFE_FS(vp);
4747
4748 if (!thread_safe) {
4749 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4750 return (_err);
4751 }
4752 }
4753 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
4754 if (!thread_safe) {
4755 unlock_fsnode(vp, &funnel_state);
4756 }
4757 return(_err);
4758 }
4759
4760 #if 0
4761 struct vnop_kqfilt_remove_args {
4762 struct vnodeop_desc *a_desc;
4763 struct vnode *a_vp;
4764 uintptr_t a_ident;
4765 vfs_context_t a_context;
4766 };
4767 #endif
4768 errno_t
4769 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t context)
4770 {
4771 int _err;
4772 struct vnop_kqfilt_remove_args a;
4773 int thread_safe;
4774 int funnel_state = 0;
4775
4776 a.a_desc = VDESC(vnop_kqfilt_remove);
4777 a.a_vp = vp;
4778 a.a_ident = ident;
4779 a.a_context = context;
4780 thread_safe = THREAD_SAFE_FS(vp);
4781
4782 if (!thread_safe) {
4783 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4784 return (_err);
4785 }
4786 }
4787 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
4788 if (!thread_safe) {
4789 unlock_fsnode(vp, &funnel_state);
4790 }
4791 return(_err);
4792 }
4793