]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/kpi_vfs.c
b357a58a3b482f7cfcf331f1c8dec8efb6fd8461
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
31 /*
32 * Copyright (c) 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 * (c) UNIX System Laboratories, Inc.
35 * All or some portions of this file are derived from material licensed
36 * to the University of California by American Telephone and Telegraph
37 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
38 * the permission of UNIX System Laboratories, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)kpi_vfs.c
69 */
70
71 /*
72 * External virtual filesystem routines
73 */
74
75 #undef DIAGNOSTIC
76 #define DIAGNOSTIC 1
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/proc_internal.h>
81 #include <sys/kauth.h>
82 #include <sys/mount.h>
83 #include <sys/mount_internal.h>
84 #include <sys/time.h>
85 #include <sys/vnode_internal.h>
86 #include <sys/stat.h>
87 #include <sys/namei.h>
88 #include <sys/ucred.h>
89 #include <sys/buf.h>
90 #include <sys/errno.h>
91 #include <sys/malloc.h>
92 #include <sys/domain.h>
93 #include <sys/mbuf.h>
94 #include <sys/syslog.h>
95 #include <sys/ubc.h>
96 #include <sys/vm.h>
97 #include <sys/sysctl.h>
98 #include <sys/filedesc.h>
99 #include <sys/fsevents.h>
100 #include <sys/user.h>
101 #include <sys/lockf.h>
102 #include <sys/xattr.h>
103
104 #include <kern/assert.h>
105 #include <kern/kalloc.h>
106
107 #include <libkern/OSByteOrder.h>
108
109 #include <miscfs/specfs/specdev.h>
110
111 #include <mach/mach_types.h>
112 #include <mach/memory_object_types.h>
113
114 #define ESUCCESS 0
115 #undef mount_t
116 #undef vnode_t
117
118 #define COMPAT_ONLY
119
120
121 #define THREAD_SAFE_FS(VP) \
122 ((VP)->v_unsafefs ? 0 : 1)
123
124 #define NATIVE_XATTR(VP) \
125 ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0)
126
127 static void xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context,
128 int thread_safe, int force);
129 static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
130 vfs_context_t context, int thread_safe);
131
132
133 static void
134 vnode_setneedinactive(vnode_t vp)
135 {
136 cache_purge(vp);
137
138 vnode_lock(vp);
139 vp->v_lflag |= VL_NEEDINACTIVE;
140 vnode_unlock(vp);
141 }
142
143
144 int
145 lock_fsnode(vnode_t vp, int *funnel_state)
146 {
147 if (funnel_state)
148 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
149
150 if (vp->v_unsafefs) {
151 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
152 vp->v_unsafefs->fsnode_count++;
153 } else {
154 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
155
156 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
157 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
158
159 if (funnel_state)
160 (void) thread_funnel_set(kernel_flock, *funnel_state);
161 return (ENOENT);
162 }
163 vp->v_unsafefs->fsnodeowner = current_thread();
164 vp->v_unsafefs->fsnode_count = 1;
165 }
166 }
167 return (0);
168 }
169
170
171 void
172 unlock_fsnode(vnode_t vp, int *funnel_state)
173 {
174 if (vp->v_unsafefs) {
175 if (--vp->v_unsafefs->fsnode_count == 0) {
176 vp->v_unsafefs->fsnodeowner = NULL;
177 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
178 }
179 }
180 if (funnel_state)
181 (void) thread_funnel_set(kernel_flock, *funnel_state);
182 }
183
184
185
186 /* ====================================================================== */
187 /* ************ EXTERNAL KERNEL APIS ********************************** */
188 /* ====================================================================== */
189
190 /*
191 * prototypes for exported VFS operations
192 */
193 int
194 VFS_MOUNT(struct mount * mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
195 {
196 int error;
197 int thread_safe;
198 int funnel_state = 0;
199
200 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
201 return(ENOTSUP);
202
203 thread_safe = mp->mnt_vtable->vfc_threadsafe;
204
205
206 if (!thread_safe) {
207 funnel_state = thread_funnel_set(kernel_flock, TRUE);
208 }
209
210 if (vfs_context_is64bit(context)) {
211 if (vfs_64bitready(mp)) {
212 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
213 }
214 else {
215 error = ENOTSUP;
216 }
217 }
218 else {
219 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
220 }
221
222 if (!thread_safe) {
223 (void) thread_funnel_set(kernel_flock, funnel_state);
224 }
225 return (error);
226 }
227
228 int
229 VFS_START(struct mount * mp, int flags, vfs_context_t context)
230 {
231 int error;
232 int thread_safe;
233 int funnel_state = 0;
234
235 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
236 return(ENOTSUP);
237
238 thread_safe = mp->mnt_vtable->vfc_threadsafe;
239
240 if (!thread_safe) {
241 funnel_state = thread_funnel_set(kernel_flock, TRUE);
242 }
243 error = (*mp->mnt_op->vfs_start)(mp, flags, context);
244 if (!thread_safe) {
245 (void) thread_funnel_set(kernel_flock, funnel_state);
246 }
247 return (error);
248 }
249
250 int
251 VFS_UNMOUNT(struct mount *mp, int flags, vfs_context_t context)
252 {
253 int error;
254 int thread_safe;
255 int funnel_state = 0;
256
257 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
258 return(ENOTSUP);
259
260 thread_safe = mp->mnt_vtable->vfc_threadsafe;
261
262 if (!thread_safe) {
263 funnel_state = thread_funnel_set(kernel_flock, TRUE);
264 }
265 error = (*mp->mnt_op->vfs_unmount)(mp, flags, context);
266 if (!thread_safe) {
267 (void) thread_funnel_set(kernel_flock, funnel_state);
268 }
269 return (error);
270 }
271
272 int
273 VFS_ROOT(struct mount * mp, struct vnode ** vpp, vfs_context_t context)
274 {
275 int error;
276 int thread_safe;
277 int funnel_state = 0;
278 struct vfs_context acontext;
279
280 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
281 return(ENOTSUP);
282
283 if (context == NULL) {
284 acontext.vc_proc = current_proc();
285 acontext.vc_ucred = kauth_cred_get();
286 context = &acontext;
287 }
288 thread_safe = mp->mnt_vtable->vfc_threadsafe;
289
290 if (!thread_safe) {
291 funnel_state = thread_funnel_set(kernel_flock, TRUE);
292 }
293 error = (*mp->mnt_op->vfs_root)(mp, vpp, context);
294 if (!thread_safe) {
295 (void) thread_funnel_set(kernel_flock, funnel_state);
296 }
297 return (error);
298 }
299
300 int
301 VFS_QUOTACTL(struct mount *mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t context)
302 {
303 int error;
304 int thread_safe;
305 int funnel_state = 0;
306
307 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
308 return(ENOTSUP);
309
310 thread_safe = mp->mnt_vtable->vfc_threadsafe;
311
312 if (!thread_safe) {
313 funnel_state = thread_funnel_set(kernel_flock, TRUE);
314 }
315 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, context);
316 if (!thread_safe) {
317 (void) thread_funnel_set(kernel_flock, funnel_state);
318 }
319 return (error);
320 }
321
322 int
323 VFS_GETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
324 {
325 int error;
326 int thread_safe;
327 int funnel_state = 0;
328 struct vfs_context acontext;
329
330 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
331 return(ENOTSUP);
332
333 if (context == NULL) {
334 acontext.vc_proc = current_proc();
335 acontext.vc_ucred = kauth_cred_get();
336 context = &acontext;
337 }
338 thread_safe = mp->mnt_vtable->vfc_threadsafe;
339
340 if (!thread_safe) {
341 funnel_state = thread_funnel_set(kernel_flock, TRUE);
342 }
343 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, context);
344 if (!thread_safe) {
345 (void) thread_funnel_set(kernel_flock, funnel_state);
346 }
347 return(error);
348 }
349
350 int
351 VFS_SETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
352 {
353 int error;
354 int thread_safe;
355 int funnel_state = 0;
356 struct vfs_context acontext;
357
358 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
359 return(ENOTSUP);
360
361 if (context == NULL) {
362 acontext.vc_proc = current_proc();
363 acontext.vc_ucred = kauth_cred_get();
364 context = &acontext;
365 }
366 thread_safe = mp->mnt_vtable->vfc_threadsafe;
367
368 if (!thread_safe) {
369 funnel_state = thread_funnel_set(kernel_flock, TRUE);
370 }
371 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, context);
372 if (!thread_safe) {
373 (void) thread_funnel_set(kernel_flock, funnel_state);
374 }
375 return(error);
376 }
377
378 int
379 VFS_SYNC(struct mount *mp, int flags, vfs_context_t context)
380 {
381 int error;
382 int thread_safe;
383 int funnel_state = 0;
384 struct vfs_context acontext;
385
386 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
387 return(ENOTSUP);
388
389 if (context == NULL) {
390 acontext.vc_proc = current_proc();
391 acontext.vc_ucred = kauth_cred_get();
392 context = &acontext;
393 }
394 thread_safe = mp->mnt_vtable->vfc_threadsafe;
395
396 if (!thread_safe) {
397 funnel_state = thread_funnel_set(kernel_flock, TRUE);
398 }
399 error = (*mp->mnt_op->vfs_sync)(mp, flags, context);
400 if (!thread_safe) {
401 (void) thread_funnel_set(kernel_flock, funnel_state);
402 }
403 return(error);
404 }
405
406 int
407 VFS_VGET(struct mount * mp, ino64_t ino, struct vnode **vpp, vfs_context_t context)
408 {
409 int error;
410 int thread_safe;
411 int funnel_state = 0;
412 struct vfs_context acontext;
413
414 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
415 return(ENOTSUP);
416
417 if (context == NULL) {
418 acontext.vc_proc = current_proc();
419 acontext.vc_ucred = kauth_cred_get();
420 context = &acontext;
421 }
422 thread_safe = mp->mnt_vtable->vfc_threadsafe;
423
424 if (!thread_safe) {
425 funnel_state = thread_funnel_set(kernel_flock, TRUE);
426 }
427 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, context);
428 if (!thread_safe) {
429 (void) thread_funnel_set(kernel_flock, funnel_state);
430 }
431 return(error);
432 }
433
434 int
435 VFS_FHTOVP(struct mount * mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t context)
436 {
437 int error;
438 int thread_safe;
439 int funnel_state = 0;
440 struct vfs_context acontext;
441
442 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
443 return(ENOTSUP);
444
445 if (context == NULL) {
446 acontext.vc_proc = current_proc();
447 acontext.vc_ucred = kauth_cred_get();
448 context = &acontext;
449 }
450 thread_safe = mp->mnt_vtable->vfc_threadsafe;
451
452 if (!thread_safe) {
453 funnel_state = thread_funnel_set(kernel_flock, TRUE);
454 }
455 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, context);
456 if (!thread_safe) {
457 (void) thread_funnel_set(kernel_flock, funnel_state);
458 }
459 return(error);
460 }
461
462 int
463 VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t context)
464 {
465 int error;
466 int thread_safe;
467 int funnel_state = 0;
468 struct vfs_context acontext;
469
470 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
471 return(ENOTSUP);
472
473 if (context == NULL) {
474 acontext.vc_proc = current_proc();
475 acontext.vc_ucred = kauth_cred_get();
476 context = &acontext;
477 }
478 thread_safe = THREAD_SAFE_FS(vp);
479
480 if (!thread_safe) {
481 funnel_state = thread_funnel_set(kernel_flock, TRUE);
482 }
483 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, context);
484 if (!thread_safe) {
485 (void) thread_funnel_set(kernel_flock, funnel_state);
486 }
487 return(error);
488 }
489
490
491 /* returns a copy of vfs type name for the mount_t */
492 void
493 vfs_name(mount_t mp, char * buffer)
494 {
495 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
496 }
497
498 /* returns vfs type number for the mount_t */
499 int
500 vfs_typenum(mount_t mp)
501 {
502 return(mp->mnt_vtable->vfc_typenum);
503 }
504
505
506 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
507 uint64_t
508 vfs_flags(mount_t mp)
509 {
510 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
511 }
512
513 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
514 void
515 vfs_setflags(mount_t mp, uint64_t flags)
516 {
517 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
518
519 mp->mnt_flag |= lflags;
520 }
521
522 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
523 void
524 vfs_clearflags(mount_t mp , uint64_t flags)
525 {
526 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
527
528 mp->mnt_flag &= ~lflags;
529 }
530
531 /* Is the mount_t ronly and upgrade read/write requested? */
532 int
533 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
534 {
535 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
536 }
537
538
539 /* Is the mount_t mounted ronly */
540 int
541 vfs_isrdonly(mount_t mp)
542 {
543 return (mp->mnt_flag & MNT_RDONLY);
544 }
545
546 /* Is the mount_t mounted for filesystem synchronous writes? */
547 int
548 vfs_issynchronous(mount_t mp)
549 {
550 return (mp->mnt_flag & MNT_SYNCHRONOUS);
551 }
552
553 /* Is the mount_t mounted read/write? */
554 int
555 vfs_isrdwr(mount_t mp)
556 {
557 return ((mp->mnt_flag & MNT_RDONLY) == 0);
558 }
559
560
561 /* Is mount_t marked for update (ie MNT_UPDATE) */
562 int
563 vfs_isupdate(mount_t mp)
564 {
565 return (mp->mnt_flag & MNT_UPDATE);
566 }
567
568
569 /* Is mount_t marked for reload (ie MNT_RELOAD) */
570 int
571 vfs_isreload(mount_t mp)
572 {
573 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
574 }
575
576 /* Is mount_t marked for reload (ie MNT_FORCE) */
577 int
578 vfs_isforce(mount_t mp)
579 {
580 if ((mp->mnt_flag & MNT_FORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
581 return(1);
582 else
583 return(0);
584 }
585
586 int
587 vfs_64bitready(mount_t mp)
588 {
589 if ((mp->mnt_vtable->vfc_64bitready))
590 return(1);
591 else
592 return(0);
593 }
594
595 int
596 vfs_authopaque(mount_t mp)
597 {
598 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
599 return(1);
600 else
601 return(0);
602 }
603
604 int
605 vfs_authopaqueaccess(mount_t mp)
606 {
607 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
608 return(1);
609 else
610 return(0);
611 }
612
613 void
614 vfs_setauthopaque(mount_t mp)
615 {
616 mount_lock(mp);
617 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
618 mount_unlock(mp);
619 }
620
621 void
622 vfs_setauthopaqueaccess(mount_t mp)
623 {
624 mount_lock(mp);
625 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
626 mount_unlock(mp);
627 }
628
629 void
630 vfs_clearauthopaque(mount_t mp)
631 {
632 mount_lock(mp);
633 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
634 mount_unlock(mp);
635 }
636
637 void
638 vfs_clearauthopaqueaccess(mount_t mp)
639 {
640 mount_lock(mp);
641 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
642 mount_unlock(mp);
643 }
644
645 void
646 vfs_setextendedsecurity(mount_t mp)
647 {
648 mount_lock(mp);
649 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
650 mount_unlock(mp);
651 }
652
653 void
654 vfs_clearextendedsecurity(mount_t mp)
655 {
656 mount_lock(mp);
657 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
658 mount_unlock(mp);
659 }
660
661 int
662 vfs_extendedsecurity(mount_t mp)
663 {
664 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
665 }
666
667 /* returns the max size of short symlink in this mount_t */
668 uint32_t
669 vfs_maxsymlen(mount_t mp)
670 {
671 return(mp->mnt_maxsymlinklen);
672 }
673
674 /* set max size of short symlink on mount_t */
675 void
676 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
677 {
678 mp->mnt_maxsymlinklen = symlen;
679 }
680
681 /* return a pointer to the RO vfs_statfs associated with mount_t */
682 struct vfsstatfs *
683 vfs_statfs(mount_t mp)
684 {
685 return(&mp->mnt_vfsstat);
686 }
687
688 int
689 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
690 {
691 int error;
692 char *vname;
693
694 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
695 return(error);
696
697 /*
698 * If we have a filesystem create time, use it to default some others.
699 */
700 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
701 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
702 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
703 }
704
705 return(0);
706 }
707
708 int
709 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
710 {
711 int error;
712
713 if (vfs_isrdonly(mp))
714 return EROFS;
715
716 error = VFS_SETATTR(mp, vfa, ctx);
717
718 /*
719 * If we had alternate ways of setting vfs attributes, we'd
720 * fall back here.
721 */
722
723 return error;
724 }
725
726 /* return the private data handle stored in mount_t */
727 void *
728 vfs_fsprivate(mount_t mp)
729 {
730 return(mp->mnt_data);
731 }
732
733 /* set the private data handle in mount_t */
734 void
735 vfs_setfsprivate(mount_t mp, void *mntdata)
736 {
737 mp->mnt_data = mntdata;
738 }
739
740
741 /*
742 * return the block size of the underlying
743 * device associated with mount_t
744 */
745 int
746 vfs_devblocksize(mount_t mp) {
747
748 return(mp->mnt_devblocksize);
749 }
750
751
752 /*
753 * return the io attributes associated with mount_t
754 */
755 void
756 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
757 {
758 if (mp == NULL) {
759 ioattrp->io_maxreadcnt = MAXPHYS;
760 ioattrp->io_maxwritecnt = MAXPHYS;
761 ioattrp->io_segreadcnt = 32;
762 ioattrp->io_segwritecnt = 32;
763 ioattrp->io_maxsegreadsize = MAXPHYS;
764 ioattrp->io_maxsegwritesize = MAXPHYS;
765 ioattrp->io_devblocksize = DEV_BSIZE;
766 } else {
767 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
768 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
769 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
770 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
771 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
772 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
773 ioattrp->io_devblocksize = mp->mnt_devblocksize;
774 }
775 ioattrp->io_reserved[0] = 0;
776 ioattrp->io_reserved[1] = 0;
777 ioattrp->io_reserved[2] = 0;
778 }
779
780
781 /*
782 * set the IO attributes associated with mount_t
783 */
784 void
785 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
786 {
787 if (mp == NULL)
788 return;
789 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
790 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
791 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
792 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
793 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
794 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
795 mp->mnt_devblocksize = ioattrp->io_devblocksize;
796 }
797
798 /*
799 * Add a new filesystem into the kernel specified in passed in
800 * vfstable structure. It fills in the vnode
801 * dispatch vector that is to be passed to when vnodes are created.
802 * It returns a handle which is to be used to when the FS is to be removed
803 */
804 typedef int (*PFI)(void *);
805 extern int vfs_opv_numops;
806 errno_t
807 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
808 {
809 #pragma unused(data)
810 struct vfstable *newvfstbl = NULL;
811 int i,j;
812 int (***opv_desc_vector_p)(void *);
813 int (**opv_desc_vector)(void *);
814 struct vnodeopv_entry_desc *opve_descp;
815 int desccount;
816 int descsize;
817 PFI *descptr;
818
819 /*
820 * This routine is responsible for all the initialization that would
821 * ordinarily be done as part of the system startup;
822 */
823
824 if (vfe == (struct vfs_fsentry *)0)
825 return(EINVAL);
826
827 desccount = vfe->vfe_vopcnt;
828 if ((desccount <=0) || ((desccount > 5)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
829 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
830 return(EINVAL);
831
832
833 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
834 M_WAITOK);
835 bzero(newvfstbl, sizeof(struct vfstable));
836 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
837 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
838 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
839 newvfstbl->vfc_typenum = maxvfsconf++;
840 else
841 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
842
843 newvfstbl->vfc_refcount = 0;
844 newvfstbl->vfc_flags = 0;
845 newvfstbl->vfc_mountroot = NULL;
846 newvfstbl->vfc_next = NULL;
847 newvfstbl->vfc_threadsafe = 0;
848 newvfstbl->vfc_vfsflags = 0;
849 if (vfe->vfe_flags & VFS_TBL64BITREADY)
850 newvfstbl->vfc_64bitready= 1;
851 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
852 newvfstbl->vfc_threadsafe= 1;
853 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
854 newvfstbl->vfc_threadsafe= 1;
855 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
856 newvfstbl->vfc_flags |= MNT_LOCAL;
857 if (vfe->vfe_flags & VFS_TBLLOCALVOL)
858 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
859 else
860 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
861
862
863 /*
864 * Allocate and init the vectors.
865 * Also handle backwards compatibility.
866 *
867 * We allocate one large block to hold all <desccount>
868 * vnode operation vectors stored contiguously.
869 */
870 /* XXX - shouldn't be M_TEMP */
871
872 descsize = desccount * vfs_opv_numops * sizeof(PFI);
873 MALLOC(descptr, PFI *, descsize,
874 M_TEMP, M_WAITOK);
875 bzero(descptr, descsize);
876
877 newvfstbl->vfc_descptr = descptr;
878 newvfstbl->vfc_descsize = descsize;
879
880
881 for (i= 0; i< desccount; i++ ) {
882 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
883 /*
884 * Fill in the caller's pointer to the start of the i'th vector.
885 * They'll need to supply it when calling vnode_create.
886 */
887 opv_desc_vector = descptr + i * vfs_opv_numops;
888 *opv_desc_vector_p = opv_desc_vector;
889
890 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
891 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
892
893 /*
894 * Sanity check: is this operation listed
895 * in the list of operations? We check this
896 * by seeing if its offest is zero. Since
897 * the default routine should always be listed
898 * first, it should be the only one with a zero
899 * offset. Any other operation with a zero
900 * offset is probably not listed in
901 * vfs_op_descs, and so is probably an error.
902 *
903 * A panic here means the layer programmer
904 * has committed the all-too common bug
905 * of adding a new operation to the layer's
906 * list of vnode operations but
907 * not adding the operation to the system-wide
908 * list of supported operations.
909 */
910 if (opve_descp->opve_op->vdesc_offset == 0 &&
911 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
912 printf("vfs_fsadd: operation %s not listed in %s.\n",
913 opve_descp->opve_op->vdesc_name,
914 "vfs_op_descs");
915 panic("vfs_fsadd: bad operation");
916 }
917 /*
918 * Fill in this entry.
919 */
920 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
921 opve_descp->opve_impl;
922 }
923
924
925 /*
926 * Finally, go back and replace unfilled routines
927 * with their default. (Sigh, an O(n^3) algorithm. I
928 * could make it better, but that'd be work, and n is small.)
929 */
930 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
931
932 /*
933 * Force every operations vector to have a default routine.
934 */
935 opv_desc_vector = *opv_desc_vector_p;
936 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
937 panic("vfs_fsadd: operation vector without default routine.");
938 for (j = 0; j < vfs_opv_numops; j++)
939 if (opv_desc_vector[j] == NULL)
940 opv_desc_vector[j] =
941 opv_desc_vector[VOFFSET(vnop_default)];
942
943 } /* end of each vnodeopv_desc parsing */
944
945
946
947 *handle = vfstable_add(newvfstbl);
948
949 if (newvfstbl->vfc_typenum <= maxvfsconf )
950 maxvfsconf = newvfstbl->vfc_typenum + 1;
951 numused_vfsslots++;
952
953 if (newvfstbl->vfc_vfsops->vfs_init)
954 (*newvfstbl->vfc_vfsops->vfs_init)((struct vfsconf *)handle);
955
956 FREE(newvfstbl, M_TEMP);
957
958 return(0);
959 }
960
961 /*
962 * Removes the filesystem from kernel.
963 * The argument passed in is the handle that was given when
964 * file system was added
965 */
966 errno_t
967 vfs_fsremove(vfstable_t handle)
968 {
969 struct vfstable * vfstbl = (struct vfstable *)handle;
970 void *old_desc = NULL;
971 errno_t err;
972
973 /* Preflight check for any mounts */
974 mount_list_lock();
975 if ( vfstbl->vfc_refcount != 0 ) {
976 mount_list_unlock();
977 return EBUSY;
978 }
979 mount_list_unlock();
980
981 /*
982 * save the old descriptor; the free cannot occur unconditionally,
983 * since vfstable_del() may fail.
984 */
985 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
986 old_desc = vfstbl->vfc_descptr;
987 }
988 err = vfstable_del(vfstbl);
989
990 /* free the descriptor if the delete was successful */
991 if (err == 0 && old_desc) {
992 FREE(old_desc, M_TEMP);
993 }
994
995 return(err);
996 }
997
998 /*
999 * This returns a reference to mount_t
1000 * which should be dropped using vfs_mountrele().
1001 * Not doing so will leak a mountpoint
1002 * and associated data structures.
1003 */
1004 errno_t
1005 vfs_mountref(__unused mount_t mp ) /* gives a reference */
1006 {
1007 return(0);
1008 }
1009
1010 /* This drops the reference on mount_t that was acquired */
1011 errno_t
1012 vfs_mountrele(__unused mount_t mp ) /* drops reference */
1013 {
1014 return(0);
1015 }
1016
1017 int
1018 vfs_context_pid(vfs_context_t context)
1019 {
1020 return (context->vc_proc->p_pid);
1021 }
1022
1023 int
1024 vfs_context_suser(vfs_context_t context)
1025 {
1026 return (suser(context->vc_ucred, 0));
1027 }
1028 int
1029 vfs_context_issignal(vfs_context_t context, sigset_t mask)
1030 {
1031 if (context->vc_proc)
1032 return(proc_pendingsignals(context->vc_proc, mask));
1033 return(0);
1034 }
1035
1036 int
1037 vfs_context_is64bit(vfs_context_t context)
1038 {
1039 if (context->vc_proc)
1040 return(proc_is64bit(context->vc_proc));
1041 return(0);
1042 }
1043
1044 proc_t
1045 vfs_context_proc(vfs_context_t context)
1046 {
1047 return (context->vc_proc);
1048 }
1049
1050 vfs_context_t
1051 vfs_context_create(vfs_context_t context)
1052 {
1053 struct vfs_context * newcontext;
1054
1055 newcontext = (struct vfs_context *)kalloc(sizeof(struct vfs_context));
1056
1057 if (newcontext) {
1058 if (context) {
1059 newcontext->vc_proc = context->vc_proc;
1060 newcontext->vc_ucred = context->vc_ucred;
1061 } else {
1062 newcontext->vc_proc = proc_self();
1063 newcontext->vc_ucred = kauth_cred_get();
1064 }
1065 return(newcontext);
1066 }
1067 return((vfs_context_t)0);
1068 }
1069
1070 int
1071 vfs_context_rele(vfs_context_t context)
1072 {
1073 if (context)
1074 kfree(context, sizeof(struct vfs_context));
1075 return(0);
1076 }
1077
1078
1079 ucred_t
1080 vfs_context_ucred(vfs_context_t context)
1081 {
1082 return (context->vc_ucred);
1083 }
1084
1085 /*
1086 * Return true if the context is owned by the superuser.
1087 */
1088 int
1089 vfs_context_issuser(vfs_context_t context)
1090 {
1091 return(context->vc_ucred->cr_uid == 0);
1092 }
1093
1094
1095 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1096
1097
1098 /*
1099 * Convert between vnode types and inode formats (since POSIX.1
1100 * defines mode word of stat structure in terms of inode formats).
1101 */
1102 enum vtype
1103 vnode_iftovt(int mode)
1104 {
1105 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1106 }
1107
1108 int
1109 vnode_vttoif(enum vtype indx)
1110 {
1111 return(vttoif_tab[(int)(indx)]);
1112 }
1113
1114 int
1115 vnode_makeimode(int indx, int mode)
1116 {
1117 return (int)(VTTOIF(indx) | (mode));
1118 }
1119
1120
1121 /*
1122 * vnode manipulation functions.
1123 */
1124
1125 /* returns system root vnode reference; It should be dropped using vrele() */
1126 vnode_t
1127 vfs_rootvnode(void)
1128 {
1129 int error;
1130
1131 error = vnode_get(rootvnode);
1132 if (error)
1133 return ((vnode_t)0);
1134 else
1135 return rootvnode;
1136 }
1137
1138
1139 uint32_t
1140 vnode_vid(vnode_t vp)
1141 {
1142 return ((uint32_t)(vp->v_id));
1143 }
1144
1145 /* returns a mount reference; drop it with vfs_mountrelease() */
1146 mount_t
1147 vnode_mount(vnode_t vp)
1148 {
1149 return (vp->v_mount);
1150 }
1151
1152 /* returns a mount reference iff vnode_t is a dir and is a mount point */
1153 mount_t
1154 vnode_mountedhere(vnode_t vp)
1155 {
1156 mount_t mp;
1157
1158 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1159 (mp->mnt_vnodecovered == vp))
1160 return (mp);
1161 else
1162 return (mount_t)NULL;
1163 }
1164
1165 /* returns vnode type of vnode_t */
1166 enum vtype
1167 vnode_vtype(vnode_t vp)
1168 {
1169 return (vp->v_type);
1170 }
1171
1172 /* returns FS specific node saved in vnode */
1173 void *
1174 vnode_fsnode(vnode_t vp)
1175 {
1176 return (vp->v_data);
1177 }
1178
1179 void
1180 vnode_clearfsnode(vnode_t vp)
1181 {
1182 vp->v_data = 0;
1183 }
1184
1185 dev_t
1186 vnode_specrdev(vnode_t vp)
1187 {
1188 return(vp->v_rdev);
1189 }
1190
1191
1192 /* Accessor functions */
1193 /* is vnode_t a root vnode */
1194 int
1195 vnode_isvroot(vnode_t vp)
1196 {
1197 return ((vp->v_flag & VROOT)? 1 : 0);
1198 }
1199
1200 /* is vnode_t a system vnode */
1201 int
1202 vnode_issystem(vnode_t vp)
1203 {
1204 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1205 }
1206
1207 /* if vnode_t mount operation in progress */
1208 int
1209 vnode_ismount(vnode_t vp)
1210 {
1211 return ((vp->v_flag & VMOUNT)? 1 : 0);
1212 }
1213
1214 /* is this vnode under recyle now */
1215 int
1216 vnode_isrecycled(vnode_t vp)
1217 {
1218 int ret;
1219
1220 vnode_lock(vp);
1221 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1222 vnode_unlock(vp);
1223 return(ret);
1224 }
1225
1226 /* is vnode_t marked to not keep data cached once it's been consumed */
1227 int
1228 vnode_isnocache(vnode_t vp)
1229 {
1230 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1231 }
1232
1233 /*
1234 * has sequential readahead been disabled on this vnode
1235 */
1236 int
1237 vnode_isnoreadahead(vnode_t vp)
1238 {
1239 return ((vp->v_flag & VRAOFF)? 1 : 0);
1240 }
1241
1242 /* is vnode_t a standard one? */
1243 int
1244 vnode_isstandard(vnode_t vp)
1245 {
1246 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1247 }
1248
1249 /* don't vflush() if SKIPSYSTEM */
1250 int
1251 vnode_isnoflush(vnode_t vp)
1252 {
1253 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1254 }
1255
1256 /* is vnode_t a regular file */
1257 int
1258 vnode_isreg(vnode_t vp)
1259 {
1260 return ((vp->v_type == VREG)? 1 : 0);
1261 }
1262
1263 /* is vnode_t a directory? */
1264 int
1265 vnode_isdir(vnode_t vp)
1266 {
1267 return ((vp->v_type == VDIR)? 1 : 0);
1268 }
1269
1270 /* is vnode_t a symbolic link ? */
1271 int
1272 vnode_islnk(vnode_t vp)
1273 {
1274 return ((vp->v_type == VLNK)? 1 : 0);
1275 }
1276
1277 /* is vnode_t a fifo ? */
1278 int
1279 vnode_isfifo(vnode_t vp)
1280 {
1281 return ((vp->v_type == VFIFO)? 1 : 0);
1282 }
1283
1284 /* is vnode_t a block device? */
1285 int
1286 vnode_isblk(vnode_t vp)
1287 {
1288 return ((vp->v_type == VBLK)? 1 : 0);
1289 }
1290
1291 /* is vnode_t a char device? */
1292 int
1293 vnode_ischr(vnode_t vp)
1294 {
1295 return ((vp->v_type == VCHR)? 1 : 0);
1296 }
1297
1298 /* is vnode_t a socket? */
1299 int
1300 vnode_issock(vnode_t vp)
1301 {
1302 return ((vp->v_type == VSOCK)? 1 : 0);
1303 }
1304
1305
1306 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1307 void
1308 vnode_setnocache(vnode_t vp)
1309 {
1310 vnode_lock(vp);
1311 vp->v_flag |= VNOCACHE_DATA;
1312 vnode_unlock(vp);
1313 }
1314
1315 void
1316 vnode_clearnocache(vnode_t vp)
1317 {
1318 vnode_lock(vp);
1319 vp->v_flag &= ~VNOCACHE_DATA;
1320 vnode_unlock(vp);
1321 }
1322
1323 void
1324 vnode_setnoreadahead(vnode_t vp)
1325 {
1326 vnode_lock(vp);
1327 vp->v_flag |= VRAOFF;
1328 vnode_unlock(vp);
1329 }
1330
1331 void
1332 vnode_clearnoreadahead(vnode_t vp)
1333 {
1334 vnode_lock(vp);
1335 vp->v_flag &= ~VRAOFF;
1336 vnode_unlock(vp);
1337 }
1338
1339
1340 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1341 void
1342 vnode_setnoflush(vnode_t vp)
1343 {
1344 vnode_lock(vp);
1345 vp->v_flag |= VNOFLUSH;
1346 vnode_unlock(vp);
1347 }
1348
1349 void
1350 vnode_clearnoflush(vnode_t vp)
1351 {
1352 vnode_lock(vp);
1353 vp->v_flag &= ~VNOFLUSH;
1354 vnode_unlock(vp);
1355 }
1356
1357
1358 /* is vnode_t a blkdevice and has a FS mounted on it */
1359 int
1360 vnode_ismountedon(vnode_t vp)
1361 {
1362 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1363 }
1364
1365 void
1366 vnode_setmountedon(vnode_t vp)
1367 {
1368 vnode_lock(vp);
1369 vp->v_specflags |= SI_MOUNTEDON;
1370 vnode_unlock(vp);
1371 }
1372
1373 void
1374 vnode_clearmountedon(vnode_t vp)
1375 {
1376 vnode_lock(vp);
1377 vp->v_specflags &= ~SI_MOUNTEDON;
1378 vnode_unlock(vp);
1379 }
1380
1381
1382 void
1383 vnode_settag(vnode_t vp, int tag)
1384 {
1385 vp->v_tag = tag;
1386
1387 }
1388
1389 int
1390 vnode_tag(vnode_t vp)
1391 {
1392 return(vp->v_tag);
1393 }
1394
1395 vnode_t
1396 vnode_parent(vnode_t vp)
1397 {
1398
1399 return(vp->v_parent);
1400 }
1401
1402 void
1403 vnode_setparent(vnode_t vp, vnode_t dvp)
1404 {
1405 vp->v_parent = dvp;
1406 }
1407
1408 char *
1409 vnode_name(vnode_t vp)
1410 {
1411 /* we try to keep v_name a reasonable name for the node */
1412 return(vp->v_name);
1413 }
1414
1415 void
1416 vnode_setname(vnode_t vp, char * name)
1417 {
1418 vp->v_name = name;
1419 }
1420
1421 /* return the registered FS name when adding the FS to kernel */
1422 void
1423 vnode_vfsname(vnode_t vp, char * buf)
1424 {
1425 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1426 }
1427
1428 /* return the FS type number */
1429 int
1430 vnode_vfstypenum(vnode_t vp)
1431 {
1432 return(vp->v_mount->mnt_vtable->vfc_typenum);
1433 }
1434
1435 int
1436 vnode_vfs64bitready(vnode_t vp)
1437 {
1438
1439 if ((vp->v_mount->mnt_vtable->vfc_64bitready))
1440 return(1);
1441 else
1442 return(0);
1443 }
1444
1445
1446
1447 /* return the visible flags on associated mount point of vnode_t */
1448 uint32_t
1449 vnode_vfsvisflags(vnode_t vp)
1450 {
1451 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1452 }
1453
1454 /* return the command modifier flags on associated mount point of vnode_t */
1455 uint32_t
1456 vnode_vfscmdflags(vnode_t vp)
1457 {
1458 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1459 }
1460
1461 /* return the max symlink of short links of vnode_t */
1462 uint32_t
1463 vnode_vfsmaxsymlen(vnode_t vp)
1464 {
1465 return(vp->v_mount->mnt_maxsymlinklen);
1466 }
1467
1468 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1469 struct vfsstatfs *
1470 vnode_vfsstatfs(vnode_t vp)
1471 {
1472 return(&vp->v_mount->mnt_vfsstat);
1473 }
1474
1475 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1476 void *
1477 vnode_vfsfsprivate(vnode_t vp)
1478 {
1479 return(vp->v_mount->mnt_data);
1480 }
1481
1482 /* is vnode_t in a rdonly mounted FS */
1483 int
1484 vnode_vfsisrdonly(vnode_t vp)
1485 {
1486 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1487 }
1488
1489
1490 /* returns vnode ref to current working directory */
1491 vnode_t
1492 current_workingdir(void)
1493 {
1494 struct proc *p = current_proc();
1495 struct vnode * vp ;
1496
1497 if ( (vp = p->p_fd->fd_cdir) ) {
1498 if ( (vnode_getwithref(vp)) )
1499 return (NULL);
1500 }
1501 return vp;
1502 }
1503
1504 /* returns vnode ref to current root(chroot) directory */
1505 vnode_t
1506 current_rootdir(void)
1507 {
1508 struct proc *p = current_proc();
1509 struct vnode * vp ;
1510
1511 if ( (vp = p->p_fd->fd_rdir) ) {
1512 if ( (vnode_getwithref(vp)) )
1513 return (NULL);
1514 }
1515 return vp;
1516 }
1517
1518 /*
1519 * Get a filesec and optional acl contents from an extended attribute.
1520 * Function will attempt to retrive ACL, UUID, and GUID information using a
1521 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1522 *
1523 * Parameters: vp The vnode on which to operate.
1524 * fsecp The filesec (and ACL, if any) being
1525 * retrieved.
1526 * ctx The vnode context in which the
1527 * operation is to be attempted.
1528 *
1529 * Returns: 0 Success
1530 * !0 errno value
1531 *
1532 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1533 * host byte order, as will be the ACL contents, if any.
1534 * Internally, we will cannonize these values from network (PPC)
1535 * byte order after we retrieve them so that the on-disk contents
1536 * of the extended attribute are identical for both PPC and Intel
1537 * (if we were not being required to provide this service via
1538 * fallback, this would be the job of the filesystem
1539 * 'VNOP_GETATTR' call).
1540 *
1541 * We use ntohl() because it has a transitive property on Intel
1542 * machines and no effect on PPC mancines. This guarantees us
1543 *
1544 * XXX: Deleting rather than ignoreing a corrupt security structure is
1545 * probably the only way to reset it without assistance from an
1546 * file system integrity checking tool. Right now we ignore it.
1547 *
1548 * XXX: We should enummerate the possible errno values here, and where
1549 * in the code they originated.
1550 */
1551 static int
1552 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
1553 {
1554 kauth_filesec_t fsec;
1555 uio_t fsec_uio;
1556 size_t fsec_size;
1557 size_t xsize, rsize;
1558 int error;
1559 int i;
1560 uint32_t host_fsec_magic;
1561 uint32_t host_acl_entrycount;
1562
1563 fsec = NULL;
1564 fsec_uio = NULL;
1565 error = 0;
1566
1567 /* find out how big the EA is */
1568 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
1569 /* no EA, no filesec */
1570 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1571 error = 0;
1572 /* either way, we are done */
1573 goto out;
1574 }
1575
1576 /* how many entries would fit? */
1577 fsec_size = KAUTH_FILESEC_COUNT(xsize);
1578
1579 /* get buffer and uio */
1580 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
1581 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
1582 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
1583 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1584 error = ENOMEM;
1585 goto out;
1586 }
1587
1588 /* read security attribute */
1589 rsize = xsize;
1590 if ((error = vn_getxattr(vp,
1591 KAUTH_FILESEC_XATTR,
1592 fsec_uio,
1593 &rsize,
1594 XATTR_NOSECURITY,
1595 ctx)) != 0) {
1596
1597 /* no attribute - no security data */
1598 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1599 error = 0;
1600 /* either way, we are done */
1601 goto out;
1602 }
1603
1604 /*
1605 * Validate security structure; the validation must take place in host
1606 * byte order. If it's corrupt, we will just ignore it.
1607 */
1608
1609 /* Validate the size before trying to convert it */
1610 if (rsize < KAUTH_FILESEC_SIZE(0)) {
1611 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
1612 goto out;
1613 }
1614
1615 /* Validate the magic number before trying to convert it */
1616 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
1617 if (fsec->fsec_magic != host_fsec_magic) {
1618 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
1619 goto out;
1620 }
1621
1622 /* Validate the entry count before trying to convert it. */
1623 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
1624 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
1625 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
1626 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
1627 goto out;
1628 }
1629 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
1630 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
1631 goto out;
1632 }
1633 }
1634
1635 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
1636
1637 *fsecp = fsec;
1638 fsec = NULL;
1639 error = 0;
1640 out:
1641 if (fsec != NULL)
1642 kauth_filesec_free(fsec);
1643 if (fsec_uio != NULL)
1644 uio_free(fsec_uio);
1645 if (error)
1646 *fsecp = NULL;
1647 return(error);
1648 }
1649
1650 /*
1651 * Set a filesec and optional acl contents into an extended attribute.
1652 * function will attempt to store ACL, UUID, and GUID information using a
1653 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
1654 * may or may not point to the `fsec->fsec_acl`, depending on whether the
1655 * original caller supplied an acl.
1656 *
1657 * Parameters: vp The vnode on which to operate.
1658 * fsec The filesec being set.
1659 * acl The acl to be associated with 'fsec'.
1660 * ctx The vnode context in which the
1661 * operation is to be attempted.
1662 *
1663 * Returns: 0 Success
1664 * !0 errno value
1665 *
1666 * Notes: Both the fsec and the acl are always valid.
1667 *
1668 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
1669 * as are the acl contents, if they are used. Internally, we will
1670 * cannonize these values into network (PPC) byte order before we
1671 * attempt to write them so that the on-disk contents of the
1672 * extended attribute are identical for both PPC and Intel (if we
1673 * were not being required to provide this service via fallback,
1674 * this would be the job of the filesystem 'VNOP_SETATTR' call).
1675 * We reverse this process on the way out, so we leave with the
1676 * same byte order we started with.
1677 *
1678 * XXX: We should enummerate the possible errno values here, and where
1679 * in the code they originated.
1680 */
1681 static int
1682 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
1683 {
1684 uio_t fsec_uio;
1685 int error;
1686 int i;
1687 uint32_t saved_acl_copysize;
1688
1689 fsec_uio = NULL;
1690
1691 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
1692 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
1693 error = ENOMEM;
1694 goto out;
1695 }
1696 /*
1697 * Save the pre-converted ACL copysize, because it gets swapped too
1698 * if we are running with the wrong endianness.
1699 */
1700 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
1701
1702 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
1703
1704 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), sizeof(struct kauth_filesec) - sizeof(struct kauth_acl));
1705 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
1706 error = vn_setxattr(vp,
1707 KAUTH_FILESEC_XATTR,
1708 fsec_uio,
1709 XATTR_NOSECURITY, /* we have auth'ed already */
1710 ctx);
1711 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
1712
1713 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
1714
1715 out:
1716 if (fsec_uio != NULL)
1717 uio_free(fsec_uio);
1718 return(error);
1719 }
1720
1721
1722 int
1723 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1724 {
1725 kauth_filesec_t fsec;
1726 kauth_acl_t facl;
1727 int error;
1728 uid_t nuid;
1729 gid_t ngid;
1730
1731 /* don't ask for extended security data if the filesystem doesn't support it */
1732 if (!vfs_extendedsecurity(vnode_mount(vp))) {
1733 VATTR_CLEAR_ACTIVE(vap, va_acl);
1734 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1735 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1736 }
1737
1738 /*
1739 * If the caller wants size values we might have to synthesise, give the
1740 * filesystem the opportunity to supply better intermediate results.
1741 */
1742 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1743 VATTR_IS_ACTIVE(vap, va_total_size) ||
1744 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1745 VATTR_SET_ACTIVE(vap, va_data_size);
1746 VATTR_SET_ACTIVE(vap, va_data_alloc);
1747 VATTR_SET_ACTIVE(vap, va_total_size);
1748 VATTR_SET_ACTIVE(vap, va_total_alloc);
1749 }
1750
1751 error = VNOP_GETATTR(vp, vap, ctx);
1752 if (error) {
1753 KAUTH_DEBUG("ERROR - returning %d", error);
1754 goto out;
1755 }
1756
1757 /*
1758 * If extended security data was requested but not returned, try the fallback
1759 * path.
1760 */
1761 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
1762 fsec = NULL;
1763
1764 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
1765 /* try to get the filesec */
1766 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
1767 goto out;
1768 }
1769 /* if no filesec, no attributes */
1770 if (fsec == NULL) {
1771 VATTR_RETURN(vap, va_acl, NULL);
1772 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
1773 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
1774 } else {
1775
1776 /* looks good, try to return what we were asked for */
1777 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
1778 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
1779
1780 /* only return the ACL if we were actually asked for it */
1781 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1782 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
1783 VATTR_RETURN(vap, va_acl, NULL);
1784 } else {
1785 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
1786 if (facl == NULL) {
1787 kauth_filesec_free(fsec);
1788 error = ENOMEM;
1789 goto out;
1790 }
1791 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
1792 VATTR_RETURN(vap, va_acl, facl);
1793 }
1794 }
1795 kauth_filesec_free(fsec);
1796 }
1797 }
1798 /*
1799 * If someone gave us an unsolicited filesec, toss it. We promise that
1800 * we're OK with a filesystem giving us anything back, but our callers
1801 * only expect what they asked for.
1802 */
1803 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
1804 if (vap->va_acl != NULL)
1805 kauth_acl_free(vap->va_acl);
1806 VATTR_CLEAR_SUPPORTED(vap, va_acl);
1807 }
1808
1809 #if 0 /* enable when we have a filesystem only supporting UUIDs */
1810 /*
1811 * Handle the case where we need a UID/GID, but only have extended
1812 * security information.
1813 */
1814 if (VATTR_NOT_RETURNED(vap, va_uid) &&
1815 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
1816 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
1817 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
1818 VATTR_RETURN(vap, va_uid, nuid);
1819 }
1820 if (VATTR_NOT_RETURNED(vap, va_gid) &&
1821 VATTR_IS_SUPPORTED(vap, va_guuid) &&
1822 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
1823 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
1824 VATTR_RETURN(vap, va_gid, ngid);
1825 }
1826 #endif
1827
1828 /*
1829 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
1830 */
1831 if (VATTR_IS_ACTIVE(vap, va_uid)) {
1832 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1833 nuid = vp->v_mount->mnt_fsowner;
1834 if (nuid == KAUTH_UID_NONE)
1835 nuid = 99;
1836 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
1837 nuid = vap->va_uid;
1838 } else {
1839 /* this will always be something sensible */
1840 nuid = vp->v_mount->mnt_fsowner;
1841 }
1842 if ((nuid == 99) && !vfs_context_issuser(ctx))
1843 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
1844 VATTR_RETURN(vap, va_uid, nuid);
1845 }
1846 if (VATTR_IS_ACTIVE(vap, va_gid)) {
1847 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1848 ngid = vp->v_mount->mnt_fsgroup;
1849 if (ngid == KAUTH_GID_NONE)
1850 ngid = 99;
1851 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
1852 ngid = vap->va_gid;
1853 } else {
1854 /* this will always be something sensible */
1855 ngid = vp->v_mount->mnt_fsgroup;
1856 }
1857 if ((ngid == 99) && !vfs_context_issuser(ctx))
1858 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
1859 VATTR_RETURN(vap, va_gid, ngid);
1860 }
1861
1862 /*
1863 * Synthesise some values that can be reasonably guessed.
1864 */
1865 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
1866 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
1867
1868 if (!VATTR_IS_SUPPORTED(vap, va_flags))
1869 VATTR_RETURN(vap, va_flags, 0);
1870
1871 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
1872 VATTR_RETURN(vap, va_filerev, 0);
1873
1874 if (!VATTR_IS_SUPPORTED(vap, va_gen))
1875 VATTR_RETURN(vap, va_gen, 0);
1876
1877 /*
1878 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
1879 */
1880 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
1881 VATTR_RETURN(vap, va_data_size, 0);
1882
1883 /* do we want any of the possibly-computed values? */
1884 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1885 VATTR_IS_ACTIVE(vap, va_total_size) ||
1886 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1887 /* make sure f_bsize is valid */
1888 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
1889 if ((error = vfs_update_vfsstat(vp->v_mount, ctx)) != 0)
1890 goto out;
1891 }
1892
1893 /* default va_data_alloc from va_data_size */
1894 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
1895 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
1896
1897 /* default va_total_size from va_data_size */
1898 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
1899 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
1900
1901 /* default va_total_alloc from va_total_size which is guaranteed at this point */
1902 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
1903 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
1904 }
1905
1906 /*
1907 * If we don't have a change time, pull it from the modtime.
1908 */
1909 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
1910 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
1911
1912 /*
1913 * This is really only supported for the creation VNOPs, but since the field is there
1914 * we should populate it correctly.
1915 */
1916 VATTR_RETURN(vap, va_type, vp->v_type);
1917
1918 /*
1919 * The fsid can be obtained from the mountpoint directly.
1920 */
1921 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
1922
1923 out:
1924
1925 return(error);
1926 }
1927
1928 /*
1929 * Set the attributes on a vnode in a vnode context.
1930 *
1931 * Parameters: vp The vnode whose attributes to set.
1932 * vap A pointer to the attributes to set.
1933 * ctx The vnode context in which the
1934 * operation is to be attempted.
1935 *
1936 * Returns: 0 Success
1937 * !0 errno value
1938 *
1939 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
1940 *
1941 * The contents of the data area pointed to by 'vap' may be
1942 * modified if the vnode is on a filesystem which has been
1943 * mounted with ingore ownership flags, or by the underlyng
1944 * VFS itself, or by the fallback code, if the underlying VFS
1945 * does not support ACL, UUID, or GUUID attributes directly.
1946 *
1947 * XXX: We should enummerate the possible errno values here, and where
1948 * in the code they originated.
1949 */
1950 int
1951 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1952 {
1953 int error, is_ownership_change=0;
1954
1955 /*
1956 * Make sure the filesystem is mounted R/W.
1957 * If not, return an error.
1958 */
1959 if (vfs_isrdonly(vp->v_mount)) {
1960 error = EROFS;
1961 goto out;
1962 }
1963
1964 /*
1965 * If ownership is being ignored on this volume, we silently discard
1966 * ownership changes.
1967 */
1968 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1969 VATTR_CLEAR_ACTIVE(vap, va_uid);
1970 VATTR_CLEAR_ACTIVE(vap, va_gid);
1971 }
1972
1973 if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) {
1974 is_ownership_change = 1;
1975 }
1976
1977 /*
1978 * Make sure that extended security is enabled if we're going to try
1979 * to set any.
1980 */
1981 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
1982 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
1983 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
1984 error = ENOTSUP;
1985 goto out;
1986 }
1987
1988 error = VNOP_SETATTR(vp, vap, ctx);
1989
1990 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
1991 error = vnode_setattr_fallback(vp, vap, ctx);
1992
1993 /*
1994 * If we have changed any of the things about the file that are likely
1995 * to result in changes to authorisation results, blow the vnode auth
1996 * cache
1997 */
1998 if (VATTR_IS_SUPPORTED(vap, va_mode) ||
1999 VATTR_IS_SUPPORTED(vap, va_uid) ||
2000 VATTR_IS_SUPPORTED(vap, va_gid) ||
2001 VATTR_IS_SUPPORTED(vap, va_flags) ||
2002 VATTR_IS_SUPPORTED(vap, va_acl) ||
2003 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
2004 VATTR_IS_SUPPORTED(vap, va_guuid))
2005 vnode_uncache_credentials(vp);
2006 // only send a stat_changed event if this is more than
2007 // just an access time update
2008 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time))) {
2009 if (need_fsevent(FSE_STAT_CHANGED, vp) || (is_ownership_change && need_fsevent(FSE_CHOWN, vp))) {
2010 if (is_ownership_change == 0)
2011 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2012 else
2013 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2014 }
2015 }
2016
2017 out:
2018 return(error);
2019 }
2020
2021 /*
2022 * Fallback for setting the attributes on a vnode in a vnode context. This
2023 * Function will attempt to store ACL, UUID, and GUID information utilizing
2024 * a read/modify/write operation against an EA used as a backing store for
2025 * the object.
2026 *
2027 * Parameters: vp The vnode whose attributes to set.
2028 * vap A pointer to the attributes to set.
2029 * ctx The vnode context in which the
2030 * operation is to be attempted.
2031 *
2032 * Returns: 0 Success
2033 * !0 errno value
2034 *
2035 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2036 * as are the fsec and lfsec, if they are used.
2037 *
2038 * The contents of the data area pointed to by 'vap' may be
2039 * modified to indicate that the attribute is supported for
2040 * any given requested attribute.
2041 *
2042 * XXX: We should enummerate the possible errno values here, and where
2043 * in the code they originated.
2044 */
2045 int
2046 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2047 {
2048 kauth_filesec_t fsec;
2049 kauth_acl_t facl;
2050 struct kauth_filesec lfsec;
2051 int error;
2052
2053 error = 0;
2054
2055 /*
2056 * Extended security fallback via extended attributes.
2057 *
2058 * Note that we do not free the filesec; the caller is expected to
2059 * do this.
2060 */
2061 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2062 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2063 VATTR_NOT_RETURNED(vap, va_guuid)) {
2064 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2065
2066 /*
2067 * Fail for file types that we don't permit extended security
2068 * to be set on.
2069 */
2070 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
2071 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2072 error = EINVAL;
2073 goto out;
2074 }
2075
2076 /*
2077 * If we don't have all the extended security items, we need
2078 * to fetch the existing data to perform a read-modify-write
2079 * operation.
2080 */
2081 fsec = NULL;
2082 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2083 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2084 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2085 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2086 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2087 goto out;
2088 }
2089 }
2090 /* if we didn't get a filesec, use our local one */
2091 if (fsec == NULL) {
2092 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2093 fsec = &lfsec;
2094 } else {
2095 KAUTH_DEBUG("SETATTR - updating existing filesec");
2096 }
2097 /* find the ACL */
2098 facl = &fsec->fsec_acl;
2099
2100 /* if we're using the local filesec, we need to initialise it */
2101 if (fsec == &lfsec) {
2102 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2103 fsec->fsec_owner = kauth_null_guid;
2104 fsec->fsec_group = kauth_null_guid;
2105 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2106 facl->acl_flags = 0;
2107 }
2108
2109 /*
2110 * Update with the supplied attributes.
2111 */
2112 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2113 KAUTH_DEBUG("SETATTR - updating owner UUID");
2114 fsec->fsec_owner = vap->va_uuuid;
2115 VATTR_SET_SUPPORTED(vap, va_uuuid);
2116 }
2117 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2118 KAUTH_DEBUG("SETATTR - updating group UUID");
2119 fsec->fsec_group = vap->va_guuid;
2120 VATTR_SET_SUPPORTED(vap, va_guuid);
2121 }
2122 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2123 if (vap->va_acl == NULL) {
2124 KAUTH_DEBUG("SETATTR - removing ACL");
2125 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2126 } else {
2127 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2128 facl = vap->va_acl;
2129 }
2130 VATTR_SET_SUPPORTED(vap, va_acl);
2131 }
2132
2133 /*
2134 * If the filesec data is all invalid, we can just remove
2135 * the EA completely.
2136 */
2137 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2138 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2139 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2140 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2141 /* no attribute is ok, nothing to delete */
2142 if (error == ENOATTR)
2143 error = 0;
2144 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2145 } else {
2146 /* write the EA */
2147 error = vnode_set_filesec(vp, fsec, facl, ctx);
2148 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2149 }
2150
2151 /* if we fetched a filesec, dispose of the buffer */
2152 if (fsec != &lfsec)
2153 kauth_filesec_free(fsec);
2154 }
2155 out:
2156
2157 return(error);
2158 }
2159
2160 /*
2161 * Definition of vnode operations.
2162 */
2163
2164 #if 0
2165 /*
2166 *#
2167 *#% lookup dvp L ? ?
2168 *#% lookup vpp - L -
2169 */
2170 struct vnop_lookup_args {
2171 struct vnodeop_desc *a_desc;
2172 vnode_t a_dvp;
2173 vnode_t *a_vpp;
2174 struct componentname *a_cnp;
2175 vfs_context_t a_context;
2176 };
2177 #endif /* 0*/
2178
2179 errno_t
2180 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t context)
2181 {
2182 int _err;
2183 struct vnop_lookup_args a;
2184 vnode_t vp;
2185 int thread_safe;
2186 int funnel_state = 0;
2187
2188 a.a_desc = &vnop_lookup_desc;
2189 a.a_dvp = dvp;
2190 a.a_vpp = vpp;
2191 a.a_cnp = cnp;
2192 a.a_context = context;
2193 thread_safe = THREAD_SAFE_FS(dvp);
2194
2195 vnode_cache_credentials(dvp, context);
2196
2197 if (!thread_safe) {
2198 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2199 return (_err);
2200 }
2201 }
2202 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2203
2204 vp = *vpp;
2205
2206 if (!thread_safe) {
2207 if ( (cnp->cn_flags & ISLASTCN) ) {
2208 if ( (cnp->cn_flags & LOCKPARENT) ) {
2209 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
2210 /*
2211 * leave the fsnode lock held on
2212 * the directory, but restore the funnel...
2213 * also indicate that we need to drop the
2214 * fsnode_lock when we're done with the
2215 * system call processing for this path
2216 */
2217 cnp->cn_flags |= FSNODELOCKHELD;
2218
2219 (void) thread_funnel_set(kernel_flock, funnel_state);
2220 return (_err);
2221 }
2222 }
2223 }
2224 unlock_fsnode(dvp, &funnel_state);
2225 }
2226 return (_err);
2227 }
2228
2229 #if 0
2230 /*
2231 *#
2232 *#% create dvp L L L
2233 *#% create vpp - L -
2234 *#
2235 */
2236
2237 struct vnop_create_args {
2238 struct vnodeop_desc *a_desc;
2239 vnode_t a_dvp;
2240 vnode_t *a_vpp;
2241 struct componentname *a_cnp;
2242 struct vnode_attr *a_vap;
2243 vfs_context_t a_context;
2244 };
2245 #endif /* 0*/
2246 errno_t
2247 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2248 {
2249 int _err;
2250 struct vnop_create_args a;
2251 int thread_safe;
2252 int funnel_state = 0;
2253
2254 a.a_desc = &vnop_create_desc;
2255 a.a_dvp = dvp;
2256 a.a_vpp = vpp;
2257 a.a_cnp = cnp;
2258 a.a_vap = vap;
2259 a.a_context = context;
2260 thread_safe = THREAD_SAFE_FS(dvp);
2261
2262 if (!thread_safe) {
2263 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2264 return (_err);
2265 }
2266 }
2267 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2268 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2269 /*
2270 * Remove stale Apple Double file (if any).
2271 */
2272 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
2273 }
2274 if (!thread_safe) {
2275 unlock_fsnode(dvp, &funnel_state);
2276 }
2277 return (_err);
2278 }
2279
2280 #if 0
2281 /*
2282 *#
2283 *#% whiteout dvp L L L
2284 *#% whiteout cnp - - -
2285 *#% whiteout flag - - -
2286 *#
2287 */
2288 struct vnop_whiteout_args {
2289 struct vnodeop_desc *a_desc;
2290 vnode_t a_dvp;
2291 struct componentname *a_cnp;
2292 int a_flags;
2293 vfs_context_t a_context;
2294 };
2295 #endif /* 0*/
2296 errno_t
2297 VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t context)
2298 {
2299 int _err;
2300 struct vnop_whiteout_args a;
2301 int thread_safe;
2302 int funnel_state = 0;
2303
2304 a.a_desc = &vnop_whiteout_desc;
2305 a.a_dvp = dvp;
2306 a.a_cnp = cnp;
2307 a.a_flags = flags;
2308 a.a_context = context;
2309 thread_safe = THREAD_SAFE_FS(dvp);
2310
2311 if (!thread_safe) {
2312 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2313 return (_err);
2314 }
2315 }
2316 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
2317 if (!thread_safe) {
2318 unlock_fsnode(dvp, &funnel_state);
2319 }
2320 return (_err);
2321 }
2322
2323 #if 0
2324 /*
2325 *#
2326 *#% mknod dvp L U U
2327 *#% mknod vpp - X -
2328 *#
2329 */
2330 struct vnop_mknod_args {
2331 struct vnodeop_desc *a_desc;
2332 vnode_t a_dvp;
2333 vnode_t *a_vpp;
2334 struct componentname *a_cnp;
2335 struct vnode_attr *a_vap;
2336 vfs_context_t a_context;
2337 };
2338 #endif /* 0*/
2339 errno_t
2340 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2341 {
2342
2343 int _err;
2344 struct vnop_mknod_args a;
2345 int thread_safe;
2346 int funnel_state = 0;
2347
2348 a.a_desc = &vnop_mknod_desc;
2349 a.a_dvp = dvp;
2350 a.a_vpp = vpp;
2351 a.a_cnp = cnp;
2352 a.a_vap = vap;
2353 a.a_context = context;
2354 thread_safe = THREAD_SAFE_FS(dvp);
2355
2356 if (!thread_safe) {
2357 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2358 return (_err);
2359 }
2360 }
2361 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
2362 if (!thread_safe) {
2363 unlock_fsnode(dvp, &funnel_state);
2364 }
2365 return (_err);
2366 }
2367
2368 #if 0
2369 /*
2370 *#
2371 *#% open vp L L L
2372 *#
2373 */
2374 struct vnop_open_args {
2375 struct vnodeop_desc *a_desc;
2376 vnode_t a_vp;
2377 int a_mode;
2378 vfs_context_t a_context;
2379 };
2380 #endif /* 0*/
2381 errno_t
2382 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t context)
2383 {
2384 int _err;
2385 struct vnop_open_args a;
2386 int thread_safe;
2387 int funnel_state = 0;
2388 struct vfs_context acontext;
2389
2390 if (context == NULL) {
2391 acontext.vc_proc = current_proc();
2392 acontext.vc_ucred = kauth_cred_get();
2393 context = &acontext;
2394 }
2395 a.a_desc = &vnop_open_desc;
2396 a.a_vp = vp;
2397 a.a_mode = mode;
2398 a.a_context = context;
2399 thread_safe = THREAD_SAFE_FS(vp);
2400
2401 if (!thread_safe) {
2402 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2403 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2404 if ( (_err = lock_fsnode(vp, NULL)) ) {
2405 (void) thread_funnel_set(kernel_flock, funnel_state);
2406 return (_err);
2407 }
2408 }
2409 }
2410 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
2411 if (!thread_safe) {
2412 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2413 unlock_fsnode(vp, NULL);
2414 }
2415 (void) thread_funnel_set(kernel_flock, funnel_state);
2416 }
2417 return (_err);
2418 }
2419
2420 #if 0
2421 /*
2422 *#
2423 *#% close vp U U U
2424 *#
2425 */
2426 struct vnop_close_args {
2427 struct vnodeop_desc *a_desc;
2428 vnode_t a_vp;
2429 int a_fflag;
2430 vfs_context_t a_context;
2431 };
2432 #endif /* 0*/
2433 errno_t
2434 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t context)
2435 {
2436 int _err;
2437 struct vnop_close_args a;
2438 int thread_safe;
2439 int funnel_state = 0;
2440 struct vfs_context acontext;
2441
2442 if (context == NULL) {
2443 acontext.vc_proc = current_proc();
2444 acontext.vc_ucred = kauth_cred_get();
2445 context = &acontext;
2446 }
2447 a.a_desc = &vnop_close_desc;
2448 a.a_vp = vp;
2449 a.a_fflag = fflag;
2450 a.a_context = context;
2451 thread_safe = THREAD_SAFE_FS(vp);
2452
2453 if (!thread_safe) {
2454 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2455 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2456 if ( (_err = lock_fsnode(vp, NULL)) ) {
2457 (void) thread_funnel_set(kernel_flock, funnel_state);
2458 return (_err);
2459 }
2460 }
2461 }
2462 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
2463 if (!thread_safe) {
2464 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2465 unlock_fsnode(vp, NULL);
2466 }
2467 (void) thread_funnel_set(kernel_flock, funnel_state);
2468 }
2469 return (_err);
2470 }
2471
2472 #if 0
2473 /*
2474 *#
2475 *#% access vp L L L
2476 *#
2477 */
2478 struct vnop_access_args {
2479 struct vnodeop_desc *a_desc;
2480 vnode_t a_vp;
2481 int a_action;
2482 vfs_context_t a_context;
2483 };
2484 #endif /* 0*/
2485 errno_t
2486 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t context)
2487 {
2488 int _err;
2489 struct vnop_access_args a;
2490 int thread_safe;
2491 int funnel_state = 0;
2492 struct vfs_context acontext;
2493
2494 if (context == NULL) {
2495 acontext.vc_proc = current_proc();
2496 acontext.vc_ucred = kauth_cred_get();
2497 context = &acontext;
2498 }
2499 a.a_desc = &vnop_access_desc;
2500 a.a_vp = vp;
2501 a.a_action = action;
2502 a.a_context = context;
2503 thread_safe = THREAD_SAFE_FS(vp);
2504
2505 if (!thread_safe) {
2506 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2507 return (_err);
2508 }
2509 }
2510 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
2511 if (!thread_safe) {
2512 unlock_fsnode(vp, &funnel_state);
2513 }
2514 return (_err);
2515 }
2516
2517 #if 0
2518 /*
2519 *#
2520 *#% getattr vp = = =
2521 *#
2522 */
2523 struct vnop_getattr_args {
2524 struct vnodeop_desc *a_desc;
2525 vnode_t a_vp;
2526 struct vnode_attr *a_vap;
2527 vfs_context_t a_context;
2528 };
2529 #endif /* 0*/
2530 errno_t
2531 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2532 {
2533 int _err;
2534 struct vnop_getattr_args a;
2535 int thread_safe;
2536 int funnel_state;
2537
2538 a.a_desc = &vnop_getattr_desc;
2539 a.a_vp = vp;
2540 a.a_vap = vap;
2541 a.a_context = context;
2542 thread_safe = THREAD_SAFE_FS(vp);
2543
2544 if (!thread_safe) {
2545 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2546 return (_err);
2547 }
2548 }
2549 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
2550 if (!thread_safe) {
2551 unlock_fsnode(vp, &funnel_state);
2552 }
2553 return (_err);
2554 }
2555
2556 #if 0
2557 /*
2558 *#
2559 *#% setattr vp L L L
2560 *#
2561 */
2562 struct vnop_setattr_args {
2563 struct vnodeop_desc *a_desc;
2564 vnode_t a_vp;
2565 struct vnode_attr *a_vap;
2566 vfs_context_t a_context;
2567 };
2568 #endif /* 0*/
2569 errno_t
2570 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2571 {
2572 int _err;
2573 struct vnop_setattr_args a;
2574 int thread_safe;
2575 int funnel_state;
2576
2577 a.a_desc = &vnop_setattr_desc;
2578 a.a_vp = vp;
2579 a.a_vap = vap;
2580 a.a_context = context;
2581 thread_safe = THREAD_SAFE_FS(vp);
2582
2583 if (!thread_safe) {
2584 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2585 return (_err);
2586 }
2587 }
2588 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
2589
2590 /*
2591 * Shadow uid/gid/mod change to extended attibute file.
2592 */
2593 if (_err == 0 && !NATIVE_XATTR(vp)) {
2594 struct vnode_attr va;
2595 int change = 0;
2596
2597 VATTR_INIT(&va);
2598 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2599 VATTR_SET(&va, va_uid, vap->va_uid);
2600 change = 1;
2601 }
2602 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2603 VATTR_SET(&va, va_gid, vap->va_gid);
2604 change = 1;
2605 }
2606 if (VATTR_IS_ACTIVE(vap, va_mode)) {
2607 VATTR_SET(&va, va_mode, vap->va_mode);
2608 change = 1;
2609 }
2610 if (change) {
2611 vnode_t dvp;
2612 char *vname;
2613
2614 dvp = vnode_getparent(vp);
2615 vname = vnode_getname(vp);
2616
2617 xattrfile_setattr(dvp, vname, &va, context, thread_safe);
2618 if (dvp != NULLVP)
2619 vnode_put(dvp);
2620 if (vname != NULL)
2621 vnode_putname(vname);
2622 }
2623 }
2624 if (!thread_safe) {
2625 unlock_fsnode(vp, &funnel_state);
2626 }
2627 return (_err);
2628 }
2629
2630 #if 0
2631 /*
2632 *#
2633 *#% getattrlist vp = = =
2634 *#
2635 */
2636 struct vnop_getattrlist_args {
2637 struct vnodeop_desc *a_desc;
2638 vnode_t a_vp;
2639 struct attrlist *a_alist;
2640 struct uio *a_uio;
2641 int a_options;
2642 vfs_context_t a_context;
2643 };
2644 #endif /* 0*/
2645 errno_t
2646 VNOP_GETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2647 {
2648 int _err;
2649 struct vnop_getattrlist_args a;
2650 int thread_safe;
2651 int funnel_state = 0;
2652
2653 a.a_desc = &vnop_getattrlist_desc;
2654 a.a_vp = vp;
2655 a.a_alist = alist;
2656 a.a_uio = uio;
2657 a.a_options = options;
2658 a.a_context = context;
2659 thread_safe = THREAD_SAFE_FS(vp);
2660
2661 if (!thread_safe) {
2662 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2663 return (_err);
2664 }
2665 }
2666 _err = (*vp->v_op[vnop_getattrlist_desc.vdesc_offset])(&a);
2667 if (!thread_safe) {
2668 unlock_fsnode(vp, &funnel_state);
2669 }
2670 return (_err);
2671 }
2672
2673 #if 0
2674 /*
2675 *#
2676 *#% setattrlist vp L L L
2677 *#
2678 */
2679 struct vnop_setattrlist_args {
2680 struct vnodeop_desc *a_desc;
2681 vnode_t a_vp;
2682 struct attrlist *a_alist;
2683 struct uio *a_uio;
2684 int a_options;
2685 vfs_context_t a_context;
2686 };
2687 #endif /* 0*/
2688 errno_t
2689 VNOP_SETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2690 {
2691 int _err;
2692 struct vnop_setattrlist_args a;
2693 int thread_safe;
2694 int funnel_state = 0;
2695
2696 a.a_desc = &vnop_setattrlist_desc;
2697 a.a_vp = vp;
2698 a.a_alist = alist;
2699 a.a_uio = uio;
2700 a.a_options = options;
2701 a.a_context = context;
2702 thread_safe = THREAD_SAFE_FS(vp);
2703
2704 if (!thread_safe) {
2705 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2706 return (_err);
2707 }
2708 }
2709 _err = (*vp->v_op[vnop_setattrlist_desc.vdesc_offset])(&a);
2710
2711 vnode_uncache_credentials(vp);
2712
2713 if (!thread_safe) {
2714 unlock_fsnode(vp, &funnel_state);
2715 }
2716 return (_err);
2717 }
2718
2719
2720 #if 0
2721 /*
2722 *#
2723 *#% read vp L L L
2724 *#
2725 */
2726 struct vnop_read_args {
2727 struct vnodeop_desc *a_desc;
2728 vnode_t a_vp;
2729 struct uio *a_uio;
2730 int a_ioflag;
2731 vfs_context_t a_context;
2732 };
2733 #endif /* 0*/
2734 errno_t
2735 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2736 {
2737 int _err;
2738 struct vnop_read_args a;
2739 int thread_safe;
2740 int funnel_state = 0;
2741 struct vfs_context acontext;
2742
2743 if (context == NULL) {
2744 acontext.vc_proc = current_proc();
2745 acontext.vc_ucred = kauth_cred_get();
2746 context = &acontext;
2747 }
2748
2749 a.a_desc = &vnop_read_desc;
2750 a.a_vp = vp;
2751 a.a_uio = uio;
2752 a.a_ioflag = ioflag;
2753 a.a_context = context;
2754 thread_safe = THREAD_SAFE_FS(vp);
2755
2756 if (!thread_safe) {
2757 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2758 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2759 if ( (_err = lock_fsnode(vp, NULL)) ) {
2760 (void) thread_funnel_set(kernel_flock, funnel_state);
2761 return (_err);
2762 }
2763 }
2764 }
2765 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
2766
2767 if (!thread_safe) {
2768 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2769 unlock_fsnode(vp, NULL);
2770 }
2771 (void) thread_funnel_set(kernel_flock, funnel_state);
2772 }
2773 return (_err);
2774 }
2775
2776
2777 #if 0
2778 /*
2779 *#
2780 *#% write vp L L L
2781 *#
2782 */
2783 struct vnop_write_args {
2784 struct vnodeop_desc *a_desc;
2785 vnode_t a_vp;
2786 struct uio *a_uio;
2787 int a_ioflag;
2788 vfs_context_t a_context;
2789 };
2790 #endif /* 0*/
2791 errno_t
2792 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2793 {
2794 struct vnop_write_args a;
2795 int _err;
2796 int thread_safe;
2797 int funnel_state = 0;
2798 struct vfs_context acontext;
2799
2800 if (context == NULL) {
2801 acontext.vc_proc = current_proc();
2802 acontext.vc_ucred = kauth_cred_get();
2803 context = &acontext;
2804 }
2805
2806 a.a_desc = &vnop_write_desc;
2807 a.a_vp = vp;
2808 a.a_uio = uio;
2809 a.a_ioflag = ioflag;
2810 a.a_context = context;
2811 thread_safe = THREAD_SAFE_FS(vp);
2812
2813 if (!thread_safe) {
2814 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2815 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2816 if ( (_err = lock_fsnode(vp, NULL)) ) {
2817 (void) thread_funnel_set(kernel_flock, funnel_state);
2818 return (_err);
2819 }
2820 }
2821 }
2822 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
2823
2824 if (!thread_safe) {
2825 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2826 unlock_fsnode(vp, NULL);
2827 }
2828 (void) thread_funnel_set(kernel_flock, funnel_state);
2829 }
2830 return (_err);
2831 }
2832
2833
2834 #if 0
2835 /*
2836 *#
2837 *#% ioctl vp U U U
2838 *#
2839 */
2840 struct vnop_ioctl_args {
2841 struct vnodeop_desc *a_desc;
2842 vnode_t a_vp;
2843 u_long a_command;
2844 caddr_t a_data;
2845 int a_fflag;
2846 vfs_context_t a_context;
2847 };
2848 #endif /* 0*/
2849 errno_t
2850 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t context)
2851 {
2852 int _err;
2853 struct vnop_ioctl_args a;
2854 int thread_safe;
2855 int funnel_state = 0;
2856 struct vfs_context acontext;
2857
2858 if (context == NULL) {
2859 acontext.vc_proc = current_proc();
2860 acontext.vc_ucred = kauth_cred_get();
2861 context = &acontext;
2862 }
2863
2864 if (vfs_context_is64bit(context)) {
2865 if (!vnode_vfs64bitready(vp)) {
2866 return(ENOTTY);
2867 }
2868 }
2869
2870 a.a_desc = &vnop_ioctl_desc;
2871 a.a_vp = vp;
2872 a.a_command = command;
2873 a.a_data = data;
2874 a.a_fflag = fflag;
2875 a.a_context= context;
2876 thread_safe = THREAD_SAFE_FS(vp);
2877
2878 if (!thread_safe) {
2879 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2880 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2881 if ( (_err = lock_fsnode(vp, NULL)) ) {
2882 (void) thread_funnel_set(kernel_flock, funnel_state);
2883 return (_err);
2884 }
2885 }
2886 }
2887 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
2888 if (!thread_safe) {
2889 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2890 unlock_fsnode(vp, NULL);
2891 }
2892 (void) thread_funnel_set(kernel_flock, funnel_state);
2893 }
2894 return (_err);
2895 }
2896
2897
2898 #if 0
2899 /*
2900 *#
2901 *#% select vp U U U
2902 *#
2903 */
2904 struct vnop_select_args {
2905 struct vnodeop_desc *a_desc;
2906 vnode_t a_vp;
2907 int a_which;
2908 int a_fflags;
2909 void *a_wql;
2910 vfs_context_t a_context;
2911 };
2912 #endif /* 0*/
2913 errno_t
2914 VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t context)
2915 {
2916 int _err;
2917 struct vnop_select_args a;
2918 int thread_safe;
2919 int funnel_state = 0;
2920 struct vfs_context acontext;
2921
2922 if (context == NULL) {
2923 acontext.vc_proc = current_proc();
2924 acontext.vc_ucred = kauth_cred_get();
2925 context = &acontext;
2926 }
2927 a.a_desc = &vnop_select_desc;
2928 a.a_vp = vp;
2929 a.a_which = which;
2930 a.a_fflags = fflags;
2931 a.a_context = context;
2932 a.a_wql = wql;
2933 thread_safe = THREAD_SAFE_FS(vp);
2934
2935 if (!thread_safe) {
2936 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2937 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2938 if ( (_err = lock_fsnode(vp, NULL)) ) {
2939 (void) thread_funnel_set(kernel_flock, funnel_state);
2940 return (_err);
2941 }
2942 }
2943 }
2944 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
2945 if (!thread_safe) {
2946 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2947 unlock_fsnode(vp, NULL);
2948 }
2949 (void) thread_funnel_set(kernel_flock, funnel_state);
2950 }
2951 return (_err);
2952 }
2953
2954
2955 #if 0
2956 /*
2957 *#
2958 *#% exchange fvp L L L
2959 *#% exchange tvp L L L
2960 *#
2961 */
2962 struct vnop_exchange_args {
2963 struct vnodeop_desc *a_desc;
2964 vnode_t a_fvp;
2965 vnode_t a_tvp;
2966 int a_options;
2967 vfs_context_t a_context;
2968 };
2969 #endif /* 0*/
2970 errno_t
2971 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t context)
2972 {
2973 int _err;
2974 struct vnop_exchange_args a;
2975 int thread_safe;
2976 int funnel_state = 0;
2977 vnode_t lock_first = NULL, lock_second = NULL;
2978
2979 a.a_desc = &vnop_exchange_desc;
2980 a.a_fvp = fvp;
2981 a.a_tvp = tvp;
2982 a.a_options = options;
2983 a.a_context = context;
2984 thread_safe = THREAD_SAFE_FS(fvp);
2985
2986 if (!thread_safe) {
2987 /*
2988 * Lock in vnode address order to avoid deadlocks
2989 */
2990 if (fvp < tvp) {
2991 lock_first = fvp;
2992 lock_second = tvp;
2993 } else {
2994 lock_first = tvp;
2995 lock_second = fvp;
2996 }
2997 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
2998 return (_err);
2999 }
3000 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
3001 unlock_fsnode(lock_first, &funnel_state);
3002 return (_err);
3003 }
3004 }
3005 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3006 if (!thread_safe) {
3007 unlock_fsnode(lock_second, NULL);
3008 unlock_fsnode(lock_first, &funnel_state);
3009 }
3010 return (_err);
3011 }
3012
3013
3014 #if 0
3015 /*
3016 *#
3017 *#% revoke vp U U U
3018 *#
3019 */
3020 struct vnop_revoke_args {
3021 struct vnodeop_desc *a_desc;
3022 vnode_t a_vp;
3023 int a_flags;
3024 vfs_context_t a_context;
3025 };
3026 #endif /* 0*/
3027 errno_t
3028 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t context)
3029 {
3030 struct vnop_revoke_args a;
3031 int _err;
3032 int thread_safe;
3033 int funnel_state = 0;
3034
3035 a.a_desc = &vnop_revoke_desc;
3036 a.a_vp = vp;
3037 a.a_flags = flags;
3038 a.a_context = context;
3039 thread_safe = THREAD_SAFE_FS(vp);
3040
3041 if (!thread_safe) {
3042 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3043 }
3044 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3045 if (!thread_safe) {
3046 (void) thread_funnel_set(kernel_flock, funnel_state);
3047 }
3048 return (_err);
3049 }
3050
3051
3052 #if 0
3053 /*
3054 *#
3055 *# mmap - vp U U U
3056 *#
3057 */
3058 struct vnop_mmap_args {
3059 struct vnodeop_desc *a_desc;
3060 vnode_t a_vp;
3061 int a_fflags;
3062 vfs_context_t a_context;
3063 };
3064 #endif /* 0*/
3065 errno_t
3066 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t context)
3067 {
3068 int _err;
3069 struct vnop_mmap_args a;
3070 int thread_safe;
3071 int funnel_state = 0;
3072
3073 a.a_desc = &vnop_mmap_desc;
3074 a.a_vp = vp;
3075 a.a_fflags = fflags;
3076 a.a_context = context;
3077 thread_safe = THREAD_SAFE_FS(vp);
3078
3079 if (!thread_safe) {
3080 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3081 return (_err);
3082 }
3083 }
3084 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3085 if (!thread_safe) {
3086 unlock_fsnode(vp, &funnel_state);
3087 }
3088 return (_err);
3089 }
3090
3091
3092 #if 0
3093 /*
3094 *#
3095 *# mnomap - vp U U U
3096 *#
3097 */
3098 struct vnop_mnomap_args {
3099 struct vnodeop_desc *a_desc;
3100 vnode_t a_vp;
3101 vfs_context_t a_context;
3102 };
3103 #endif /* 0*/
3104 errno_t
3105 VNOP_MNOMAP(vnode_t vp, vfs_context_t context)
3106 {
3107 int _err;
3108 struct vnop_mnomap_args a;
3109 int thread_safe;
3110 int funnel_state = 0;
3111
3112 a.a_desc = &vnop_mnomap_desc;
3113 a.a_vp = vp;
3114 a.a_context = context;
3115 thread_safe = THREAD_SAFE_FS(vp);
3116
3117 if (!thread_safe) {
3118 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3119 return (_err);
3120 }
3121 }
3122 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3123 if (!thread_safe) {
3124 unlock_fsnode(vp, &funnel_state);
3125 }
3126 return (_err);
3127 }
3128
3129
3130 #if 0
3131 /*
3132 *#
3133 *#% fsync vp L L L
3134 *#
3135 */
3136 struct vnop_fsync_args {
3137 struct vnodeop_desc *a_desc;
3138 vnode_t a_vp;
3139 int a_waitfor;
3140 vfs_context_t a_context;
3141 };
3142 #endif /* 0*/
3143 errno_t
3144 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t context)
3145 {
3146 struct vnop_fsync_args a;
3147 int _err;
3148 int thread_safe;
3149 int funnel_state = 0;
3150
3151 a.a_desc = &vnop_fsync_desc;
3152 a.a_vp = vp;
3153 a.a_waitfor = waitfor;
3154 a.a_context = context;
3155 thread_safe = THREAD_SAFE_FS(vp);
3156
3157 if (!thread_safe) {
3158 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3159 return (_err);
3160 }
3161 }
3162 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3163 if (!thread_safe) {
3164 unlock_fsnode(vp, &funnel_state);
3165 }
3166 return (_err);
3167 }
3168
3169
3170 #if 0
3171 /*
3172 *#
3173 *#% remove dvp L U U
3174 *#% remove vp L U U
3175 *#
3176 */
3177 struct vnop_remove_args {
3178 struct vnodeop_desc *a_desc;
3179 vnode_t a_dvp;
3180 vnode_t a_vp;
3181 struct componentname *a_cnp;
3182 int a_flags;
3183 vfs_context_t a_context;
3184 };
3185 #endif /* 0*/
3186 errno_t
3187 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t context)
3188 {
3189 int _err;
3190 struct vnop_remove_args a;
3191 int thread_safe;
3192 int funnel_state = 0;
3193
3194 a.a_desc = &vnop_remove_desc;
3195 a.a_dvp = dvp;
3196 a.a_vp = vp;
3197 a.a_cnp = cnp;
3198 a.a_flags = flags;
3199 a.a_context = context;
3200 thread_safe = THREAD_SAFE_FS(dvp);
3201
3202 if (!thread_safe) {
3203 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3204 return (_err);
3205 }
3206 }
3207 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3208
3209 if (_err == 0) {
3210 vnode_setneedinactive(vp);
3211
3212 if ( !(NATIVE_XATTR(dvp)) ) {
3213 /*
3214 * Remove any associated extended attibute file (._ AppleDouble file).
3215 */
3216 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3217 }
3218 }
3219 if (!thread_safe) {
3220 unlock_fsnode(vp, &funnel_state);
3221 }
3222 return (_err);
3223 }
3224
3225
3226 #if 0
3227 /*
3228 *#
3229 *#% link vp U U U
3230 *#% link tdvp L U U
3231 *#
3232 */
3233 struct vnop_link_args {
3234 struct vnodeop_desc *a_desc;
3235 vnode_t a_vp;
3236 vnode_t a_tdvp;
3237 struct componentname *a_cnp;
3238 vfs_context_t a_context;
3239 };
3240 #endif /* 0*/
3241 errno_t
3242 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t context)
3243 {
3244 int _err;
3245 struct vnop_link_args a;
3246 int thread_safe;
3247 int funnel_state = 0;
3248
3249 /*
3250 * For file systems with non-native extended attributes,
3251 * disallow linking to an existing "._" Apple Double file.
3252 */
3253 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3254 char *vname;
3255
3256 vname = vnode_getname(vp);
3257 if (vname != NULL) {
3258 _err = 0;
3259 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3260 _err = EPERM;
3261 }
3262 vnode_putname(vname);
3263 if (_err)
3264 return (_err);
3265 }
3266 }
3267 a.a_desc = &vnop_link_desc;
3268 a.a_vp = vp;
3269 a.a_tdvp = tdvp;
3270 a.a_cnp = cnp;
3271 a.a_context = context;
3272 thread_safe = THREAD_SAFE_FS(vp);
3273
3274 if (!thread_safe) {
3275 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3276 return (_err);
3277 }
3278 }
3279 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3280 if (!thread_safe) {
3281 unlock_fsnode(vp, &funnel_state);
3282 }
3283 return (_err);
3284 }
3285
3286
3287 #if 0
3288 /*
3289 *#
3290 *#% rename fdvp U U U
3291 *#% rename fvp U U U
3292 *#% rename tdvp L U U
3293 *#% rename tvp X U U
3294 *#
3295 */
3296 struct vnop_rename_args {
3297 struct vnodeop_desc *a_desc;
3298 vnode_t a_fdvp;
3299 vnode_t a_fvp;
3300 struct componentname *a_fcnp;
3301 vnode_t a_tdvp;
3302 vnode_t a_tvp;
3303 struct componentname *a_tcnp;
3304 vfs_context_t a_context;
3305 };
3306 #endif /* 0*/
3307 errno_t
3308 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
3309 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
3310 vfs_context_t context)
3311 {
3312 int _err;
3313 struct vnop_rename_args a;
3314 int funnel_state = 0;
3315 char smallname1[48];
3316 char smallname2[48];
3317 char *xfromname = NULL;
3318 char *xtoname = NULL;
3319 vnode_t lock_first = NULL, lock_second = NULL;
3320 vnode_t fdvp_unsafe = NULLVP;
3321 vnode_t tdvp_unsafe = NULLVP;
3322
3323 a.a_desc = &vnop_rename_desc;
3324 a.a_fdvp = fdvp;
3325 a.a_fvp = fvp;
3326 a.a_fcnp = fcnp;
3327 a.a_tdvp = tdvp;
3328 a.a_tvp = tvp;
3329 a.a_tcnp = tcnp;
3330 a.a_context = context;
3331
3332 if (!THREAD_SAFE_FS(fdvp))
3333 fdvp_unsafe = fdvp;
3334 if (!THREAD_SAFE_FS(tdvp))
3335 tdvp_unsafe = tdvp;
3336
3337 if (fdvp_unsafe != NULLVP) {
3338 /*
3339 * Lock parents in vnode address order to avoid deadlocks
3340 * note that it's possible for the fdvp to be unsafe,
3341 * but the tdvp to be safe because tvp could be a directory
3342 * in the root of a filesystem... in that case, tdvp is the
3343 * in the filesystem that this root is mounted on
3344 */
3345 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
3346 lock_first = fdvp_unsafe;
3347 lock_second = NULL;
3348 } else if (fdvp_unsafe < tdvp_unsafe) {
3349 lock_first = fdvp_unsafe;
3350 lock_second = tdvp_unsafe;
3351 } else {
3352 lock_first = tdvp_unsafe;
3353 lock_second = fdvp_unsafe;
3354 }
3355 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
3356 return (_err);
3357
3358 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3359 unlock_fsnode(lock_first, &funnel_state);
3360 return (_err);
3361 }
3362
3363 /*
3364 * Lock both children in vnode address order to avoid deadlocks
3365 */
3366 if (tvp == NULL || tvp == fvp) {
3367 lock_first = fvp;
3368 lock_second = NULL;
3369 } else if (fvp < tvp) {
3370 lock_first = fvp;
3371 lock_second = tvp;
3372 } else {
3373 lock_first = tvp;
3374 lock_second = fvp;
3375 }
3376 if ( (_err = lock_fsnode(lock_first, NULL)) )
3377 goto out1;
3378
3379 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3380 unlock_fsnode(lock_first, NULL);
3381 goto out1;
3382 }
3383 }
3384 /*
3385 * Save source and destination names (._ AppleDouble files).
3386 * Skip if source already has a "._" prefix.
3387 */
3388 if (!NATIVE_XATTR(fdvp) &&
3389 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3390 size_t len;
3391
3392 /* Get source attribute file name. */
3393 len = fcnp->cn_namelen + 3;
3394 if (len > sizeof(smallname1)) {
3395 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3396 } else {
3397 xfromname = &smallname1[0];
3398 }
3399 strcpy(xfromname, "._");
3400 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3401 xfromname[len-1] = '\0';
3402
3403 /* Get destination attribute file name. */
3404 len = tcnp->cn_namelen + 3;
3405 if (len > sizeof(smallname2)) {
3406 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3407 } else {
3408 xtoname = &smallname2[0];
3409 }
3410 strcpy(xtoname, "._");
3411 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3412 xtoname[len-1] = '\0';
3413 }
3414
3415 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3416
3417 if (fdvp_unsafe != NULLVP) {
3418 if (lock_second != NULL)
3419 unlock_fsnode(lock_second, NULL);
3420 unlock_fsnode(lock_first, NULL);
3421 }
3422 if (_err == 0) {
3423 if (tvp && tvp != fvp)
3424 vnode_setneedinactive(tvp);
3425 }
3426
3427 /*
3428 * Rename any associated extended attibute file (._ AppleDouble file).
3429 */
3430 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3431 struct nameidata fromnd, tond;
3432 int killdest = 0;
3433 int error;
3434
3435 /*
3436 * Get source attribute file vnode.
3437 * Note that fdvp already has an iocount reference and
3438 * using DELETE will take an additional reference.
3439 */
3440 NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3441 CAST_USER_ADDR_T(xfromname), context);
3442 fromnd.ni_dvp = fdvp;
3443 error = namei(&fromnd);
3444
3445 if (error) {
3446 /* When source doesn't exist there still may be a destination. */
3447 if (error == ENOENT) {
3448 killdest = 1;
3449 } else {
3450 goto out;
3451 }
3452 } else if (fromnd.ni_vp->v_type != VREG) {
3453 vnode_put(fromnd.ni_vp);
3454 nameidone(&fromnd);
3455 killdest = 1;
3456 }
3457 if (killdest) {
3458 struct vnop_remove_args args;
3459
3460 /*
3461 * Get destination attribute file vnode.
3462 * Note that tdvp already has an iocount reference.
3463 */
3464 NDINIT(&tond, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3465 CAST_USER_ADDR_T(xtoname), context);
3466 tond.ni_dvp = tdvp;
3467 error = namei(&tond);
3468 if (error) {
3469 goto out;
3470 }
3471 if (tond.ni_vp->v_type != VREG) {
3472 vnode_put(tond.ni_vp);
3473 nameidone(&tond);
3474 goto out;
3475 }
3476 args.a_desc = &vnop_remove_desc;
3477 args.a_dvp = tdvp;
3478 args.a_vp = tond.ni_vp;
3479 args.a_cnp = &tond.ni_cnd;
3480 args.a_context = context;
3481
3482 if (fdvp_unsafe != NULLVP)
3483 error = lock_fsnode(tond.ni_vp, NULL);
3484 if (error == 0) {
3485 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3486
3487 if (fdvp_unsafe != NULLVP)
3488 unlock_fsnode(tond.ni_vp, NULL);
3489
3490 if (error == 0)
3491 vnode_setneedinactive(tond.ni_vp);
3492 }
3493 vnode_put(tond.ni_vp);
3494 nameidone(&tond);
3495 goto out;
3496 }
3497
3498 /*
3499 * Get destination attribute file vnode.
3500 */
3501 NDINIT(&tond, RENAME,
3502 NOCACHE | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3503 CAST_USER_ADDR_T(xtoname), context);
3504 tond.ni_dvp = tdvp;
3505 error = namei(&tond);
3506
3507 if (error) {
3508 vnode_put(fromnd.ni_vp);
3509 nameidone(&fromnd);
3510 goto out;
3511 }
3512 a.a_desc = &vnop_rename_desc;
3513 a.a_fdvp = fdvp;
3514 a.a_fvp = fromnd.ni_vp;
3515 a.a_fcnp = &fromnd.ni_cnd;
3516 a.a_tdvp = tdvp;
3517 a.a_tvp = tond.ni_vp;
3518 a.a_tcnp = &tond.ni_cnd;
3519 a.a_context = context;
3520
3521 if (fdvp_unsafe != NULLVP) {
3522 /*
3523 * Lock in vnode address order to avoid deadlocks
3524 */
3525 if (tond.ni_vp == NULL || tond.ni_vp == fromnd.ni_vp) {
3526 lock_first = fromnd.ni_vp;
3527 lock_second = NULL;
3528 } else if (fromnd.ni_vp < tond.ni_vp) {
3529 lock_first = fromnd.ni_vp;
3530 lock_second = tond.ni_vp;
3531 } else {
3532 lock_first = tond.ni_vp;
3533 lock_second = fromnd.ni_vp;
3534 }
3535 if ( (error = lock_fsnode(lock_first, NULL)) == 0) {
3536 if (lock_second != NULL && (error = lock_fsnode(lock_second, NULL)) )
3537 unlock_fsnode(lock_first, NULL);
3538 }
3539 }
3540 if (error == 0) {
3541 error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3542
3543 if (fdvp_unsafe != NULLVP) {
3544 if (lock_second != NULL)
3545 unlock_fsnode(lock_second, NULL);
3546 unlock_fsnode(lock_first, NULL);
3547 }
3548 if (error == 0) {
3549 vnode_setneedinactive(fromnd.ni_vp);
3550
3551 if (tond.ni_vp && tond.ni_vp != fromnd.ni_vp)
3552 vnode_setneedinactive(tond.ni_vp);
3553 }
3554 }
3555 vnode_put(fromnd.ni_vp);
3556 if (tond.ni_vp) {
3557 vnode_put(tond.ni_vp);
3558 }
3559 nameidone(&tond);
3560 nameidone(&fromnd);
3561 }
3562 out:
3563 if (xfromname && xfromname != &smallname1[0]) {
3564 FREE(xfromname, M_TEMP);
3565 }
3566 if (xtoname && xtoname != &smallname2[0]) {
3567 FREE(xtoname, M_TEMP);
3568 }
3569 out1:
3570 if (fdvp_unsafe != NULLVP) {
3571 if (tdvp_unsafe != NULLVP)
3572 unlock_fsnode(tdvp_unsafe, NULL);
3573 unlock_fsnode(fdvp_unsafe, &funnel_state);
3574 }
3575 return (_err);
3576 }
3577
3578 #if 0
3579 /*
3580 *#
3581 *#% mkdir dvp L U U
3582 *#% mkdir vpp - L -
3583 *#
3584 */
3585 struct vnop_mkdir_args {
3586 struct vnodeop_desc *a_desc;
3587 vnode_t a_dvp;
3588 vnode_t *a_vpp;
3589 struct componentname *a_cnp;
3590 struct vnode_attr *a_vap;
3591 vfs_context_t a_context;
3592 };
3593 #endif /* 0*/
3594 errno_t
3595 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3596 struct vnode_attr *vap, vfs_context_t context)
3597 {
3598 int _err;
3599 struct vnop_mkdir_args a;
3600 int thread_safe;
3601 int funnel_state = 0;
3602
3603 a.a_desc = &vnop_mkdir_desc;
3604 a.a_dvp = dvp;
3605 a.a_vpp = vpp;
3606 a.a_cnp = cnp;
3607 a.a_vap = vap;
3608 a.a_context = context;
3609 thread_safe = THREAD_SAFE_FS(dvp);
3610
3611 if (!thread_safe) {
3612 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3613 return (_err);
3614 }
3615 }
3616 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
3617 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3618 /*
3619 * Remove stale Apple Double file (if any).
3620 */
3621 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3622 }
3623 if (!thread_safe) {
3624 unlock_fsnode(dvp, &funnel_state);
3625 }
3626 return (_err);
3627 }
3628
3629
3630 #if 0
3631 /*
3632 *#
3633 *#% rmdir dvp L U U
3634 *#% rmdir vp L U U
3635 *#
3636 */
3637 struct vnop_rmdir_args {
3638 struct vnodeop_desc *a_desc;
3639 vnode_t a_dvp;
3640 vnode_t a_vp;
3641 struct componentname *a_cnp;
3642 vfs_context_t a_context;
3643 };
3644
3645 #endif /* 0*/
3646 errno_t
3647 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t context)
3648 {
3649 int _err;
3650 struct vnop_rmdir_args a;
3651 int thread_safe;
3652 int funnel_state = 0;
3653
3654 a.a_desc = &vnop_rmdir_desc;
3655 a.a_dvp = dvp;
3656 a.a_vp = vp;
3657 a.a_cnp = cnp;
3658 a.a_context = context;
3659 thread_safe = THREAD_SAFE_FS(dvp);
3660
3661 if (!thread_safe) {
3662 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3663 return (_err);
3664 }
3665 }
3666 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
3667
3668 if (_err == 0) {
3669 vnode_setneedinactive(vp);
3670
3671 if ( !(NATIVE_XATTR(dvp)) ) {
3672 /*
3673 * Remove any associated extended attibute file (._ AppleDouble file).
3674 */
3675 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3676 }
3677 }
3678 if (!thread_safe) {
3679 unlock_fsnode(vp, &funnel_state);
3680 }
3681 return (_err);
3682 }
3683
3684 /*
3685 * Remove a ._ AppleDouble file
3686 */
3687 #define AD_STALE_SECS (180)
3688 static void
3689 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int thread_safe, int force) {
3690 vnode_t xvp;
3691 struct nameidata nd;
3692 char smallname[64];
3693 char *filename = NULL;
3694 size_t len;
3695
3696 if ((basename == NULL) || (basename[0] == '\0') ||
3697 (basename[0] == '.' && basename[1] == '_')) {
3698 return;
3699 }
3700 filename = &smallname[0];
3701 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3702 if (len >= sizeof(smallname)) {
3703 len++; /* snprintf result doesn't include '\0' */
3704 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3705 len = snprintf(filename, len, "._%s", basename);
3706 }
3707 NDINIT(&nd, DELETE, LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3708 CAST_USER_ADDR_T(filename), context);
3709 nd.ni_dvp = dvp;
3710 if (namei(&nd) != 0)
3711 goto out2;
3712
3713 xvp = nd.ni_vp;
3714 nameidone(&nd);
3715 if (xvp->v_type != VREG)
3716 goto out1;
3717
3718 /*
3719 * When creating a new object and a "._" file already
3720 * exists, check to see if its a stale "._" file.
3721 *
3722 */
3723 if (!force) {
3724 struct vnode_attr va;
3725
3726 VATTR_INIT(&va);
3727 VATTR_WANTED(&va, va_data_size);
3728 VATTR_WANTED(&va, va_modify_time);
3729 if (VNOP_GETATTR(xvp, &va, context) == 0 &&
3730 VATTR_IS_SUPPORTED(&va, va_data_size) &&
3731 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
3732 va.va_data_size != 0) {
3733 struct timeval tv;
3734
3735 microtime(&tv);
3736 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
3737 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
3738 force = 1; /* must be stale */
3739 }
3740 }
3741 }
3742 if (force) {
3743 struct vnop_remove_args a;
3744 int error;
3745
3746 a.a_desc = &vnop_remove_desc;
3747 a.a_dvp = nd.ni_dvp;
3748 a.a_vp = xvp;
3749 a.a_cnp = &nd.ni_cnd;
3750 a.a_context = context;
3751
3752 if (!thread_safe) {
3753 if ( (lock_fsnode(xvp, NULL)) )
3754 goto out1;
3755 }
3756 error = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3757
3758 if (!thread_safe)
3759 unlock_fsnode(xvp, NULL);
3760
3761 if (error == 0)
3762 vnode_setneedinactive(xvp);
3763 }
3764 out1:
3765 /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */
3766 vnode_put(xvp);
3767 out2:
3768 if (filename && filename != &smallname[0]) {
3769 FREE(filename, M_TEMP);
3770 }
3771 }
3772
3773 /*
3774 * Shadow uid/gid/mod to a ._ AppleDouble file
3775 */
3776 static void
3777 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
3778 vfs_context_t context, int thread_safe) {
3779 vnode_t xvp;
3780 struct nameidata nd;
3781 char smallname[64];
3782 char *filename = NULL;
3783 size_t len;
3784
3785 if ((dvp == NULLVP) ||
3786 (basename == NULL) || (basename[0] == '\0') ||
3787 (basename[0] == '.' && basename[1] == '_')) {
3788 return;
3789 }
3790 filename = &smallname[0];
3791 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3792 if (len >= sizeof(smallname)) {
3793 len++; /* snprintf result doesn't include '\0' */
3794 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3795 len = snprintf(filename, len, "._%s", basename);
3796 }
3797 NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3798 CAST_USER_ADDR_T(filename), context);
3799 nd.ni_dvp = dvp;
3800 if (namei(&nd) != 0)
3801 goto out2;
3802
3803 xvp = nd.ni_vp;
3804 nameidone(&nd);
3805
3806 if (xvp->v_type == VREG) {
3807 struct vnop_setattr_args a;
3808
3809 a.a_desc = &vnop_setattr_desc;
3810 a.a_vp = xvp;
3811 a.a_vap = vap;
3812 a.a_context = context;
3813
3814 if (!thread_safe) {
3815 if ( (lock_fsnode(xvp, NULL)) )
3816 goto out1;
3817 }
3818 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3819 if (!thread_safe) {
3820 unlock_fsnode(xvp, NULL);
3821 }
3822 }
3823 out1:
3824 vnode_put(xvp);
3825 out2:
3826 if (filename && filename != &smallname[0]) {
3827 FREE(filename, M_TEMP);
3828 }
3829 }
3830
3831 #if 0
3832 /*
3833 *#
3834 *#% symlink dvp L U U
3835 *#% symlink vpp - U -
3836 *#
3837 */
3838 struct vnop_symlink_args {
3839 struct vnodeop_desc *a_desc;
3840 vnode_t a_dvp;
3841 vnode_t *a_vpp;
3842 struct componentname *a_cnp;
3843 struct vnode_attr *a_vap;
3844 char *a_target;
3845 vfs_context_t a_context;
3846 };
3847
3848 #endif /* 0*/
3849 errno_t
3850 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3851 struct vnode_attr *vap, char *target, vfs_context_t context)
3852 {
3853 int _err;
3854 struct vnop_symlink_args a;
3855 int thread_safe;
3856 int funnel_state = 0;
3857
3858 a.a_desc = &vnop_symlink_desc;
3859 a.a_dvp = dvp;
3860 a.a_vpp = vpp;
3861 a.a_cnp = cnp;
3862 a.a_vap = vap;
3863 a.a_target = target;
3864 a.a_context = context;
3865 thread_safe = THREAD_SAFE_FS(dvp);
3866
3867 if (!thread_safe) {
3868 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3869 return (_err);
3870 }
3871 }
3872 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
3873 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3874 /*
3875 * Remove stale Apple Double file (if any).
3876 */
3877 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3878 }
3879 if (!thread_safe) {
3880 unlock_fsnode(dvp, &funnel_state);
3881 }
3882 return (_err);
3883 }
3884
3885 #if 0
3886 /*
3887 *#
3888 *#% readdir vp L L L
3889 *#
3890 */
3891 struct vnop_readdir_args {
3892 struct vnodeop_desc *a_desc;
3893 vnode_t a_vp;
3894 struct uio *a_uio;
3895 int a_flags;
3896 int *a_eofflag;
3897 int *a_numdirent;
3898 vfs_context_t a_context;
3899 };
3900
3901 #endif /* 0*/
3902 errno_t
3903 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
3904 int *numdirent, vfs_context_t context)
3905 {
3906 int _err;
3907 struct vnop_readdir_args a;
3908 int thread_safe;
3909 int funnel_state = 0;
3910
3911 a.a_desc = &vnop_readdir_desc;
3912 a.a_vp = vp;
3913 a.a_uio = uio;
3914 a.a_flags = flags;
3915 a.a_eofflag = eofflag;
3916 a.a_numdirent = numdirent;
3917 a.a_context = context;
3918 thread_safe = THREAD_SAFE_FS(vp);
3919
3920 if (!thread_safe) {
3921 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3922 return (_err);
3923 }
3924 }
3925 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
3926 if (!thread_safe) {
3927 unlock_fsnode(vp, &funnel_state);
3928 }
3929 return (_err);
3930 }
3931
3932 #if 0
3933 /*
3934 *#
3935 *#% readdirattr vp L L L
3936 *#
3937 */
3938 struct vnop_readdirattr_args {
3939 struct vnodeop_desc *a_desc;
3940 vnode_t a_vp;
3941 struct attrlist *a_alist;
3942 struct uio *a_uio;
3943 u_long a_maxcount;
3944 u_long a_options;
3945 u_long *a_newstate;
3946 int *a_eofflag;
3947 u_long *a_actualcount;
3948 vfs_context_t a_context;
3949 };
3950
3951 #endif /* 0*/
3952 errno_t
3953 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount,
3954 u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t context)
3955 {
3956 int _err;
3957 struct vnop_readdirattr_args a;
3958 int thread_safe;
3959 int funnel_state = 0;
3960
3961 a.a_desc = &vnop_readdirattr_desc;
3962 a.a_vp = vp;
3963 a.a_alist = alist;
3964 a.a_uio = uio;
3965 a.a_maxcount = maxcount;
3966 a.a_options = options;
3967 a.a_newstate = newstate;
3968 a.a_eofflag = eofflag;
3969 a.a_actualcount = actualcount;
3970 a.a_context = context;
3971 thread_safe = THREAD_SAFE_FS(vp);
3972
3973 if (!thread_safe) {
3974 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3975 return (_err);
3976 }
3977 }
3978 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
3979 if (!thread_safe) {
3980 unlock_fsnode(vp, &funnel_state);
3981 }
3982 return (_err);
3983 }
3984
3985 #if 0
3986 /*
3987 *#
3988 *#% readlink vp L L L
3989 *#
3990 */
3991 struct vnop_readlink_args {
3992 struct vnodeop_desc *a_desc;
3993 vnode_t a_vp;
3994 struct uio *a_uio;
3995 vfs_context_t a_context;
3996 };
3997 #endif /* 0 */
3998
3999 errno_t
4000 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t context)
4001 {
4002 int _err;
4003 struct vnop_readlink_args a;
4004 int thread_safe;
4005 int funnel_state = 0;
4006
4007 a.a_desc = &vnop_readlink_desc;
4008 a.a_vp = vp;
4009 a.a_uio = uio;
4010 a.a_context = context;
4011 thread_safe = THREAD_SAFE_FS(vp);
4012
4013 if (!thread_safe) {
4014 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4015 return (_err);
4016 }
4017 }
4018 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
4019 if (!thread_safe) {
4020 unlock_fsnode(vp, &funnel_state);
4021 }
4022 return (_err);
4023 }
4024
4025 #if 0
4026 /*
4027 *#
4028 *#% inactive vp L U U
4029 *#
4030 */
4031 struct vnop_inactive_args {
4032 struct vnodeop_desc *a_desc;
4033 vnode_t a_vp;
4034 vfs_context_t a_context;
4035 };
4036 #endif /* 0*/
4037 errno_t
4038 VNOP_INACTIVE(struct vnode *vp, vfs_context_t context)
4039 {
4040 int _err;
4041 struct vnop_inactive_args a;
4042 int thread_safe;
4043 int funnel_state = 0;
4044
4045 a.a_desc = &vnop_inactive_desc;
4046 a.a_vp = vp;
4047 a.a_context = context;
4048 thread_safe = THREAD_SAFE_FS(vp);
4049
4050 if (!thread_safe) {
4051 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4052 return (_err);
4053 }
4054 }
4055 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
4056 if (!thread_safe) {
4057 unlock_fsnode(vp, &funnel_state);
4058 }
4059 return (_err);
4060 }
4061
4062
4063 #if 0
4064 /*
4065 *#
4066 *#% reclaim vp U U U
4067 *#
4068 */
4069 struct vnop_reclaim_args {
4070 struct vnodeop_desc *a_desc;
4071 vnode_t a_vp;
4072 vfs_context_t a_context;
4073 };
4074 #endif /* 0*/
4075 errno_t
4076 VNOP_RECLAIM(struct vnode *vp, vfs_context_t context)
4077 {
4078 int _err;
4079 struct vnop_reclaim_args a;
4080 int thread_safe;
4081 int funnel_state = 0;
4082
4083 a.a_desc = &vnop_reclaim_desc;
4084 a.a_vp = vp;
4085 a.a_context = context;
4086 thread_safe = THREAD_SAFE_FS(vp);
4087
4088 if (!thread_safe) {
4089 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4090 }
4091 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
4092 if (!thread_safe) {
4093 (void) thread_funnel_set(kernel_flock, funnel_state);
4094 }
4095 return (_err);
4096 }
4097
4098
4099 #if 0
4100 /*
4101 *#
4102 *#% pathconf vp L L L
4103 *#
4104 */
4105 struct vnop_pathconf_args {
4106 struct vnodeop_desc *a_desc;
4107 vnode_t a_vp;
4108 int a_name;
4109 register_t *a_retval;
4110 vfs_context_t a_context;
4111 };
4112 #endif /* 0*/
4113 errno_t
4114 VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t context)
4115 {
4116 int _err;
4117 struct vnop_pathconf_args a;
4118 int thread_safe;
4119 int funnel_state = 0;
4120
4121 a.a_desc = &vnop_pathconf_desc;
4122 a.a_vp = vp;
4123 a.a_name = name;
4124 a.a_retval = retval;
4125 a.a_context = context;
4126 thread_safe = THREAD_SAFE_FS(vp);
4127
4128 if (!thread_safe) {
4129 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4130 return (_err);
4131 }
4132 }
4133 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
4134 if (!thread_safe) {
4135 unlock_fsnode(vp, &funnel_state);
4136 }
4137 return (_err);
4138 }
4139
4140 #if 0
4141 /*
4142 *#
4143 *#% advlock vp U U U
4144 *#
4145 */
4146 struct vnop_advlock_args {
4147 struct vnodeop_desc *a_desc;
4148 vnode_t a_vp;
4149 caddr_t a_id;
4150 int a_op;
4151 struct flock *a_fl;
4152 int a_flags;
4153 vfs_context_t a_context;
4154 };
4155 #endif /* 0*/
4156 errno_t
4157 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t context)
4158 {
4159 int _err;
4160 struct vnop_advlock_args a;
4161 int thread_safe;
4162 int funnel_state = 0;
4163 struct uthread * uth;
4164
4165 a.a_desc = &vnop_advlock_desc;
4166 a.a_vp = vp;
4167 a.a_id = id;
4168 a.a_op = op;
4169 a.a_fl = fl;
4170 a.a_flags = flags;
4171 a.a_context = context;
4172 thread_safe = THREAD_SAFE_FS(vp);
4173
4174 uth = get_bsdthread_info(current_thread());
4175 if (!thread_safe) {
4176 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4177 }
4178 /* Disallow advisory locking on non-seekable vnodes */
4179 if (vnode_isfifo(vp)) {
4180 _err = err_advlock(&a);
4181 } else {
4182 if ((vp->v_flag & VLOCKLOCAL)) {
4183 /* Advisory locking done at this layer */
4184 _err = lf_advlock(&a);
4185 } else {
4186 /* Advisory locking done by underlying filesystem */
4187 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4188 }
4189 }
4190 if (!thread_safe) {
4191 (void) thread_funnel_set(kernel_flock, funnel_state);
4192 }
4193 return (_err);
4194 }
4195
4196
4197
4198 #if 0
4199 /*
4200 *#
4201 *#% allocate vp L L L
4202 *#
4203 */
4204 struct vnop_allocate_args {
4205 struct vnodeop_desc *a_desc;
4206 vnode_t a_vp;
4207 off_t a_length;
4208 u_int32_t a_flags;
4209 off_t *a_bytesallocated;
4210 off_t a_offset;
4211 vfs_context_t a_context;
4212 };
4213
4214 #endif /* 0*/
4215 errno_t
4216 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t context)
4217 {
4218 int _err;
4219 struct vnop_allocate_args a;
4220 int thread_safe;
4221 int funnel_state = 0;
4222
4223 a.a_desc = &vnop_allocate_desc;
4224 a.a_vp = vp;
4225 a.a_length = length;
4226 a.a_flags = flags;
4227 a.a_bytesallocated = bytesallocated;
4228 a.a_offset = offset;
4229 a.a_context = context;
4230 thread_safe = THREAD_SAFE_FS(vp);
4231
4232 if (!thread_safe) {
4233 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4234 return (_err);
4235 }
4236 }
4237 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4238 if (!thread_safe) {
4239 unlock_fsnode(vp, &funnel_state);
4240 }
4241 return (_err);
4242 }
4243
4244 #if 0
4245 /*
4246 *#
4247 *#% pagein vp = = =
4248 *#
4249 */
4250 struct vnop_pagein_args {
4251 struct vnodeop_desc *a_desc;
4252 vnode_t a_vp;
4253 upl_t a_pl;
4254 vm_offset_t a_pl_offset;
4255 off_t a_f_offset;
4256 size_t a_size;
4257 int a_flags;
4258 vfs_context_t a_context;
4259 };
4260 #endif /* 0*/
4261 errno_t
4262 VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4263 {
4264 int _err;
4265 struct vnop_pagein_args a;
4266 int thread_safe;
4267 int funnel_state = 0;
4268
4269 a.a_desc = &vnop_pagein_desc;
4270 a.a_vp = vp;
4271 a.a_pl = pl;
4272 a.a_pl_offset = pl_offset;
4273 a.a_f_offset = f_offset;
4274 a.a_size = size;
4275 a.a_flags = flags;
4276 a.a_context = context;
4277 thread_safe = THREAD_SAFE_FS(vp);
4278
4279 if (!thread_safe) {
4280 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4281 }
4282 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
4283 if (!thread_safe) {
4284 (void) thread_funnel_set(kernel_flock, funnel_state);
4285 }
4286 return (_err);
4287 }
4288
4289 #if 0
4290 /*
4291 *#
4292 *#% pageout vp = = =
4293 *#
4294 */
4295 struct vnop_pageout_args {
4296 struct vnodeop_desc *a_desc;
4297 vnode_t a_vp;
4298 upl_t a_pl;
4299 vm_offset_t a_pl_offset;
4300 off_t a_f_offset;
4301 size_t a_size;
4302 int a_flags;
4303 vfs_context_t a_context;
4304 };
4305
4306 #endif /* 0*/
4307 errno_t
4308 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4309 {
4310 int _err;
4311 struct vnop_pageout_args a;
4312 int thread_safe;
4313 int funnel_state = 0;
4314
4315 a.a_desc = &vnop_pageout_desc;
4316 a.a_vp = vp;
4317 a.a_pl = pl;
4318 a.a_pl_offset = pl_offset;
4319 a.a_f_offset = f_offset;
4320 a.a_size = size;
4321 a.a_flags = flags;
4322 a.a_context = context;
4323 thread_safe = THREAD_SAFE_FS(vp);
4324
4325 if (!thread_safe) {
4326 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4327 }
4328 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
4329 if (!thread_safe) {
4330 (void) thread_funnel_set(kernel_flock, funnel_state);
4331 }
4332 return (_err);
4333 }
4334
4335
4336 #if 0
4337 /*
4338 *#
4339 *#% searchfs vp L L L
4340 *#
4341 */
4342 struct vnop_searchfs_args {
4343 struct vnodeop_desc *a_desc;
4344 vnode_t a_vp;
4345 void *a_searchparams1;
4346 void *a_searchparams2;
4347 struct attrlist *a_searchattrs;
4348 u_long a_maxmatches;
4349 struct timeval *a_timelimit;
4350 struct attrlist *a_returnattrs;
4351 u_long *a_nummatches;
4352 u_long a_scriptcode;
4353 u_long a_options;
4354 struct uio *a_uio;
4355 struct searchstate *a_searchstate;
4356 vfs_context_t a_context;
4357 };
4358
4359 #endif /* 0*/
4360 errno_t
4361 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t context)
4362 {
4363 int _err;
4364 struct vnop_searchfs_args a;
4365 int thread_safe;
4366 int funnel_state = 0;
4367
4368 a.a_desc = &vnop_searchfs_desc;
4369 a.a_vp = vp;
4370 a.a_searchparams1 = searchparams1;
4371 a.a_searchparams2 = searchparams2;
4372 a.a_searchattrs = searchattrs;
4373 a.a_maxmatches = maxmatches;
4374 a.a_timelimit = timelimit;
4375 a.a_returnattrs = returnattrs;
4376 a.a_nummatches = nummatches;
4377 a.a_scriptcode = scriptcode;
4378 a.a_options = options;
4379 a.a_uio = uio;
4380 a.a_searchstate = searchstate;
4381 a.a_context = context;
4382 thread_safe = THREAD_SAFE_FS(vp);
4383
4384 if (!thread_safe) {
4385 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4386 return (_err);
4387 }
4388 }
4389 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
4390 if (!thread_safe) {
4391 unlock_fsnode(vp, &funnel_state);
4392 }
4393 return (_err);
4394 }
4395
4396 #if 0
4397 /*
4398 *#
4399 *#% copyfile fvp U U U
4400 *#% copyfile tdvp L U U
4401 *#% copyfile tvp X U U
4402 *#
4403 */
4404 struct vnop_copyfile_args {
4405 struct vnodeop_desc *a_desc;
4406 vnode_t a_fvp;
4407 vnode_t a_tdvp;
4408 vnode_t a_tvp;
4409 struct componentname *a_tcnp;
4410 int a_mode;
4411 int a_flags;
4412 vfs_context_t a_context;
4413 };
4414 #endif /* 0*/
4415 errno_t
4416 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4417 int mode, int flags, vfs_context_t context)
4418 {
4419 int _err;
4420 struct vnop_copyfile_args a;
4421 a.a_desc = &vnop_copyfile_desc;
4422 a.a_fvp = fvp;
4423 a.a_tdvp = tdvp;
4424 a.a_tvp = tvp;
4425 a.a_tcnp = tcnp;
4426 a.a_mode = mode;
4427 a.a_flags = flags;
4428 a.a_context = context;
4429 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
4430 return (_err);
4431 }
4432
4433
4434 errno_t
4435 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t context)
4436 {
4437 struct vnop_getxattr_args a;
4438 int error;
4439 int thread_safe;
4440 int funnel_state = 0;
4441
4442 a.a_desc = &vnop_getxattr_desc;
4443 a.a_vp = vp;
4444 a.a_name = name;
4445 a.a_uio = uio;
4446 a.a_size = size;
4447 a.a_options = options;
4448 a.a_context = context;
4449
4450 thread_safe = THREAD_SAFE_FS(vp);
4451 if (!thread_safe) {
4452 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4453 return (error);
4454 }
4455 }
4456 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
4457 if (!thread_safe) {
4458 unlock_fsnode(vp, &funnel_state);
4459 }
4460 return (error);
4461 }
4462
4463 errno_t
4464 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t context)
4465 {
4466 struct vnop_setxattr_args a;
4467 int error;
4468 int thread_safe;
4469 int funnel_state = 0;
4470
4471 a.a_desc = &vnop_setxattr_desc;
4472 a.a_vp = vp;
4473 a.a_name = name;
4474 a.a_uio = uio;
4475 a.a_options = options;
4476 a.a_context = context;
4477
4478 thread_safe = THREAD_SAFE_FS(vp);
4479 if (!thread_safe) {
4480 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4481 return (error);
4482 }
4483 }
4484 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
4485 if (!thread_safe) {
4486 unlock_fsnode(vp, &funnel_state);
4487 }
4488 return (error);
4489 }
4490
4491 errno_t
4492 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t context)
4493 {
4494 struct vnop_removexattr_args a;
4495 int error;
4496 int thread_safe;
4497 int funnel_state = 0;
4498
4499 a.a_desc = &vnop_removexattr_desc;
4500 a.a_vp = vp;
4501 a.a_name = name;
4502 a.a_options = options;
4503 a.a_context = context;
4504
4505 thread_safe = THREAD_SAFE_FS(vp);
4506 if (!thread_safe) {
4507 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4508 return (error);
4509 }
4510 }
4511 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
4512 if (!thread_safe) {
4513 unlock_fsnode(vp, &funnel_state);
4514 }
4515 return (error);
4516 }
4517
4518 errno_t
4519 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t context)
4520 {
4521 struct vnop_listxattr_args a;
4522 int error;
4523 int thread_safe;
4524 int funnel_state = 0;
4525
4526 a.a_desc = &vnop_listxattr_desc;
4527 a.a_vp = vp;
4528 a.a_uio = uio;
4529 a.a_size = size;
4530 a.a_options = options;
4531 a.a_context = context;
4532
4533 thread_safe = THREAD_SAFE_FS(vp);
4534 if (!thread_safe) {
4535 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4536 return (error);
4537 }
4538 }
4539 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
4540 if (!thread_safe) {
4541 unlock_fsnode(vp, &funnel_state);
4542 }
4543 return (error);
4544 }
4545
4546
4547 #if 0
4548 /*
4549 *#
4550 *#% blktooff vp = = =
4551 *#
4552 */
4553 struct vnop_blktooff_args {
4554 struct vnodeop_desc *a_desc;
4555 vnode_t a_vp;
4556 daddr64_t a_lblkno;
4557 off_t *a_offset;
4558 };
4559 #endif /* 0*/
4560 errno_t
4561 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
4562 {
4563 int _err;
4564 struct vnop_blktooff_args a;
4565 int thread_safe;
4566 int funnel_state = 0;
4567
4568 a.a_desc = &vnop_blktooff_desc;
4569 a.a_vp = vp;
4570 a.a_lblkno = lblkno;
4571 a.a_offset = offset;
4572 thread_safe = THREAD_SAFE_FS(vp);
4573
4574 if (!thread_safe) {
4575 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4576 }
4577 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
4578 if (!thread_safe) {
4579 (void) thread_funnel_set(kernel_flock, funnel_state);
4580 }
4581 return (_err);
4582 }
4583
4584 #if 0
4585 /*
4586 *#
4587 *#% offtoblk vp = = =
4588 *#
4589 */
4590 struct vnop_offtoblk_args {
4591 struct vnodeop_desc *a_desc;
4592 vnode_t a_vp;
4593 off_t a_offset;
4594 daddr64_t *a_lblkno;
4595 };
4596 #endif /* 0*/
4597 errno_t
4598 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
4599 {
4600 int _err;
4601 struct vnop_offtoblk_args a;
4602 int thread_safe;
4603 int funnel_state = 0;
4604
4605 a.a_desc = &vnop_offtoblk_desc;
4606 a.a_vp = vp;
4607 a.a_offset = offset;
4608 a.a_lblkno = lblkno;
4609 thread_safe = THREAD_SAFE_FS(vp);
4610
4611 if (!thread_safe) {
4612 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4613 }
4614 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
4615 if (!thread_safe) {
4616 (void) thread_funnel_set(kernel_flock, funnel_state);
4617 }
4618 return (_err);
4619 }
4620
4621 #if 0
4622 /*
4623 *#
4624 *#% blockmap vp L L L
4625 *#
4626 */
4627 struct vnop_blockmap_args {
4628 struct vnodeop_desc *a_desc;
4629 vnode_t a_vp;
4630 off_t a_foffset;
4631 size_t a_size;
4632 daddr64_t *a_bpn;
4633 size_t *a_run;
4634 void *a_poff;
4635 int a_flags;
4636 vfs_context_t a_context;
4637 };
4638 #endif /* 0*/
4639 errno_t
4640 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t context)
4641 {
4642 int _err;
4643 struct vnop_blockmap_args a;
4644 int thread_safe;
4645 int funnel_state = 0;
4646 struct vfs_context acontext;
4647
4648 if (context == NULL) {
4649 acontext.vc_proc = current_proc();
4650 acontext.vc_ucred = kauth_cred_get();
4651 context = &acontext;
4652 }
4653 a.a_desc = &vnop_blockmap_desc;
4654 a.a_vp = vp;
4655 a.a_foffset = foffset;
4656 a.a_size = size;
4657 a.a_bpn = bpn;
4658 a.a_run = run;
4659 a.a_poff = poff;
4660 a.a_flags = flags;
4661 a.a_context = context;
4662 thread_safe = THREAD_SAFE_FS(vp);
4663
4664 if (!thread_safe) {
4665 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4666 }
4667 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
4668 if (!thread_safe) {
4669 (void) thread_funnel_set(kernel_flock, funnel_state);
4670 }
4671 return (_err);
4672 }
4673
4674 #if 0
4675 struct vnop_strategy_args {
4676 struct vnodeop_desc *a_desc;
4677 struct buf *a_bp;
4678 };
4679
4680 #endif /* 0*/
4681 errno_t
4682 VNOP_STRATEGY(struct buf *bp)
4683 {
4684 int _err;
4685 struct vnop_strategy_args a;
4686 a.a_desc = &vnop_strategy_desc;
4687 a.a_bp = bp;
4688 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
4689 return (_err);
4690 }
4691
4692 #if 0
4693 struct vnop_bwrite_args {
4694 struct vnodeop_desc *a_desc;
4695 buf_t a_bp;
4696 };
4697 #endif /* 0*/
4698 errno_t
4699 VNOP_BWRITE(struct buf *bp)
4700 {
4701 int _err;
4702 struct vnop_bwrite_args a;
4703 a.a_desc = &vnop_bwrite_desc;
4704 a.a_bp = bp;
4705 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
4706 return (_err);
4707 }
4708
4709 #if 0
4710 struct vnop_kqfilt_add_args {
4711 struct vnodeop_desc *a_desc;
4712 struct vnode *a_vp;
4713 struct knote *a_kn;
4714 vfs_context_t a_context;
4715 };
4716 #endif
4717 errno_t
4718 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t context)
4719 {
4720 int _err;
4721 struct vnop_kqfilt_add_args a;
4722 int thread_safe;
4723 int funnel_state = 0;
4724
4725 a.a_desc = VDESC(vnop_kqfilt_add);
4726 a.a_vp = vp;
4727 a.a_kn = kn;
4728 a.a_context = context;
4729 thread_safe = THREAD_SAFE_FS(vp);
4730
4731 if (!thread_safe) {
4732 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4733 return (_err);
4734 }
4735 }
4736 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
4737 if (!thread_safe) {
4738 unlock_fsnode(vp, &funnel_state);
4739 }
4740 return(_err);
4741 }
4742
4743 #if 0
4744 struct vnop_kqfilt_remove_args {
4745 struct vnodeop_desc *a_desc;
4746 struct vnode *a_vp;
4747 uintptr_t a_ident;
4748 vfs_context_t a_context;
4749 };
4750 #endif
4751 errno_t
4752 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t context)
4753 {
4754 int _err;
4755 struct vnop_kqfilt_remove_args a;
4756 int thread_safe;
4757 int funnel_state = 0;
4758
4759 a.a_desc = VDESC(vnop_kqfilt_remove);
4760 a.a_vp = vp;
4761 a.a_ident = ident;
4762 a.a_context = context;
4763 thread_safe = THREAD_SAFE_FS(vp);
4764
4765 if (!thread_safe) {
4766 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4767 return (_err);
4768 }
4769 }
4770 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
4771 if (!thread_safe) {
4772 unlock_fsnode(vp, &funnel_state);
4773 }
4774 return(_err);
4775 }
4776