]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/kpi_vfs.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
CommitLineData
91447636 1/*
5d5c5d0d
A
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
91447636 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
91447636
A
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*
24 * Copyright (c) 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kpi_vfs.c
61 */
62
63/*
64 * External virtual filesystem routines
65 */
66
67#undef DIAGNOSTIC
68#define DIAGNOSTIC 1
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/proc_internal.h>
73#include <sys/kauth.h>
74#include <sys/mount.h>
75#include <sys/mount_internal.h>
76#include <sys/time.h>
77#include <sys/vnode_internal.h>
78#include <sys/stat.h>
79#include <sys/namei.h>
80#include <sys/ucred.h>
81#include <sys/buf.h>
82#include <sys/errno.h>
83#include <sys/malloc.h>
84#include <sys/domain.h>
85#include <sys/mbuf.h>
86#include <sys/syslog.h>
87#include <sys/ubc.h>
88#include <sys/vm.h>
89#include <sys/sysctl.h>
90#include <sys/filedesc.h>
91#include <sys/fsevents.h>
92#include <sys/user.h>
93#include <sys/lockf.h>
94#include <sys/xattr.h>
95
96#include <kern/assert.h>
97#include <kern/kalloc.h>
98
99#include <miscfs/specfs/specdev.h>
100
101#include <mach/mach_types.h>
102#include <mach/memory_object_types.h>
103
104#define ESUCCESS 0
105#undef mount_t
106#undef vnode_t
107
108#define COMPAT_ONLY
109
110
111#define THREAD_SAFE_FS(VP) \
112 ((VP)->v_unsafefs ? 0 : 1)
113
114#define NATIVE_XATTR(VP) \
115 ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0)
116
117static void xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context,
118 int thread_safe, int force);
119static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
120 vfs_context_t context, int thread_safe);
121
122
123static void
124vnode_setneedinactive(vnode_t vp)
125{
126 cache_purge(vp);
127
128 vnode_lock(vp);
129 vp->v_lflag |= VL_NEEDINACTIVE;
130 vnode_unlock(vp);
131}
132
133
134int
135lock_fsnode(vnode_t vp, int *funnel_state)
136{
137 if (funnel_state)
138 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
139
140 if (vp->v_unsafefs) {
141 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
142 vp->v_unsafefs->fsnode_count++;
143 } else {
144 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
145
146 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
147 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
148
149 if (funnel_state)
150 (void) thread_funnel_set(kernel_flock, *funnel_state);
151 return (ENOENT);
152 }
153 vp->v_unsafefs->fsnodeowner = current_thread();
154 vp->v_unsafefs->fsnode_count = 1;
155 }
156 }
157 return (0);
158}
159
160
161void
162unlock_fsnode(vnode_t vp, int *funnel_state)
163{
164 if (vp->v_unsafefs) {
165 if (--vp->v_unsafefs->fsnode_count == 0) {
166 vp->v_unsafefs->fsnodeowner = NULL;
167 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
168 }
169 }
170 if (funnel_state)
171 (void) thread_funnel_set(kernel_flock, *funnel_state);
172}
173
174
175
176/* ====================================================================== */
177/* ************ EXTERNAL KERNEL APIS ********************************** */
178/* ====================================================================== */
179
180/*
181 * prototypes for exported VFS operations
182 */
183int
184VFS_MOUNT(struct mount * mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
185{
186 int error;
187 int thread_safe;
188 int funnel_state = 0;
189
190 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
191 return(ENOTSUP);
192
193 thread_safe = mp->mnt_vtable->vfc_threadsafe;
194
195
196 if (!thread_safe) {
197 funnel_state = thread_funnel_set(kernel_flock, TRUE);
198 }
199
200 if (vfs_context_is64bit(context)) {
201 if (vfs_64bitready(mp)) {
202 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
203 }
204 else {
205 error = ENOTSUP;
206 }
207 }
208 else {
209 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
210 }
211
212 if (!thread_safe) {
213 (void) thread_funnel_set(kernel_flock, funnel_state);
214 }
215 return (error);
216}
217
218int
219VFS_START(struct mount * mp, int flags, vfs_context_t context)
220{
221 int error;
222 int thread_safe;
223 int funnel_state = 0;
224
225 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
226 return(ENOTSUP);
227
228 thread_safe = mp->mnt_vtable->vfc_threadsafe;
229
230 if (!thread_safe) {
231 funnel_state = thread_funnel_set(kernel_flock, TRUE);
232 }
233 error = (*mp->mnt_op->vfs_start)(mp, flags, context);
234 if (!thread_safe) {
235 (void) thread_funnel_set(kernel_flock, funnel_state);
236 }
237 return (error);
238}
239
240int
241VFS_UNMOUNT(struct mount *mp, int flags, vfs_context_t context)
242{
243 int error;
244 int thread_safe;
245 int funnel_state = 0;
246
247 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
248 return(ENOTSUP);
249
250 thread_safe = mp->mnt_vtable->vfc_threadsafe;
251
252 if (!thread_safe) {
253 funnel_state = thread_funnel_set(kernel_flock, TRUE);
254 }
255 error = (*mp->mnt_op->vfs_unmount)(mp, flags, context);
256 if (!thread_safe) {
257 (void) thread_funnel_set(kernel_flock, funnel_state);
258 }
259 return (error);
260}
261
262int
263VFS_ROOT(struct mount * mp, struct vnode ** vpp, vfs_context_t context)
264{
265 int error;
266 int thread_safe;
267 int funnel_state = 0;
268 struct vfs_context acontext;
269
270 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
271 return(ENOTSUP);
272
273 if (context == NULL) {
274 acontext.vc_proc = current_proc();
275 acontext.vc_ucred = kauth_cred_get();
276 context = &acontext;
277 }
278 thread_safe = mp->mnt_vtable->vfc_threadsafe;
279
280 if (!thread_safe) {
281 funnel_state = thread_funnel_set(kernel_flock, TRUE);
282 }
283 error = (*mp->mnt_op->vfs_root)(mp, vpp, context);
284 if (!thread_safe) {
285 (void) thread_funnel_set(kernel_flock, funnel_state);
286 }
287 return (error);
288}
289
290int
291VFS_QUOTACTL(struct mount *mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t context)
292{
293 int error;
294 int thread_safe;
295 int funnel_state = 0;
296
297 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
298 return(ENOTSUP);
299
300 thread_safe = mp->mnt_vtable->vfc_threadsafe;
301
302 if (!thread_safe) {
303 funnel_state = thread_funnel_set(kernel_flock, TRUE);
304 }
305 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, context);
306 if (!thread_safe) {
307 (void) thread_funnel_set(kernel_flock, funnel_state);
308 }
309 return (error);
310}
311
312int
313VFS_GETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
314{
315 int error;
316 int thread_safe;
317 int funnel_state = 0;
318 struct vfs_context acontext;
319
320 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
321 return(ENOTSUP);
322
323 if (context == NULL) {
324 acontext.vc_proc = current_proc();
325 acontext.vc_ucred = kauth_cred_get();
326 context = &acontext;
327 }
328 thread_safe = mp->mnt_vtable->vfc_threadsafe;
329
330 if (!thread_safe) {
331 funnel_state = thread_funnel_set(kernel_flock, TRUE);
332 }
333 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, context);
334 if (!thread_safe) {
335 (void) thread_funnel_set(kernel_flock, funnel_state);
336 }
337 return(error);
338}
339
340int
341VFS_SETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
342{
343 int error;
344 int thread_safe;
345 int funnel_state = 0;
346 struct vfs_context acontext;
347
348 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
349 return(ENOTSUP);
350
351 if (context == NULL) {
352 acontext.vc_proc = current_proc();
353 acontext.vc_ucred = kauth_cred_get();
354 context = &acontext;
355 }
356 thread_safe = mp->mnt_vtable->vfc_threadsafe;
357
358 if (!thread_safe) {
359 funnel_state = thread_funnel_set(kernel_flock, TRUE);
360 }
361 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, context);
362 if (!thread_safe) {
363 (void) thread_funnel_set(kernel_flock, funnel_state);
364 }
365 return(error);
366}
367
368int
369VFS_SYNC(struct mount *mp, int flags, vfs_context_t context)
370{
371 int error;
372 int thread_safe;
373 int funnel_state = 0;
374 struct vfs_context acontext;
375
376 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
377 return(ENOTSUP);
378
379 if (context == NULL) {
380 acontext.vc_proc = current_proc();
381 acontext.vc_ucred = kauth_cred_get();
382 context = &acontext;
383 }
384 thread_safe = mp->mnt_vtable->vfc_threadsafe;
385
386 if (!thread_safe) {
387 funnel_state = thread_funnel_set(kernel_flock, TRUE);
388 }
389 error = (*mp->mnt_op->vfs_sync)(mp, flags, context);
390 if (!thread_safe) {
391 (void) thread_funnel_set(kernel_flock, funnel_state);
392 }
393 return(error);
394}
395
396int
397VFS_VGET(struct mount * mp, ino64_t ino, struct vnode **vpp, vfs_context_t context)
398{
399 int error;
400 int thread_safe;
401 int funnel_state = 0;
402 struct vfs_context acontext;
403
404 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
405 return(ENOTSUP);
406
407 if (context == NULL) {
408 acontext.vc_proc = current_proc();
409 acontext.vc_ucred = kauth_cred_get();
410 context = &acontext;
411 }
412 thread_safe = mp->mnt_vtable->vfc_threadsafe;
413
414 if (!thread_safe) {
415 funnel_state = thread_funnel_set(kernel_flock, TRUE);
416 }
417 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, context);
418 if (!thread_safe) {
419 (void) thread_funnel_set(kernel_flock, funnel_state);
420 }
421 return(error);
422}
423
424int
425VFS_FHTOVP(struct mount * mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t context)
426{
427 int error;
428 int thread_safe;
429 int funnel_state = 0;
430 struct vfs_context acontext;
431
432 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
433 return(ENOTSUP);
434
435 if (context == NULL) {
436 acontext.vc_proc = current_proc();
437 acontext.vc_ucred = kauth_cred_get();
438 context = &acontext;
439 }
440 thread_safe = mp->mnt_vtable->vfc_threadsafe;
441
442 if (!thread_safe) {
443 funnel_state = thread_funnel_set(kernel_flock, TRUE);
444 }
445 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, context);
446 if (!thread_safe) {
447 (void) thread_funnel_set(kernel_flock, funnel_state);
448 }
449 return(error);
450}
451
452int
453VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t context)
454{
455 int error;
456 int thread_safe;
457 int funnel_state = 0;
458 struct vfs_context acontext;
459
460 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
461 return(ENOTSUP);
462
463 if (context == NULL) {
464 acontext.vc_proc = current_proc();
465 acontext.vc_ucred = kauth_cred_get();
466 context = &acontext;
467 }
468 thread_safe = THREAD_SAFE_FS(vp);
469
470 if (!thread_safe) {
471 funnel_state = thread_funnel_set(kernel_flock, TRUE);
472 }
473 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, context);
474 if (!thread_safe) {
475 (void) thread_funnel_set(kernel_flock, funnel_state);
476 }
477 return(error);
478}
479
480
481/* returns a copy of vfs type name for the mount_t */
482void
483vfs_name(mount_t mp, char * buffer)
484{
485 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
486}
487
488/* returns vfs type number for the mount_t */
489int
490vfs_typenum(mount_t mp)
491{
492 return(mp->mnt_vtable->vfc_typenum);
493}
494
495
496/* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
497uint64_t
498vfs_flags(mount_t mp)
499{
500 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
501}
502
503/* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
504void
505vfs_setflags(mount_t mp, uint64_t flags)
506{
507 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
508
509 mp->mnt_flag |= lflags;
510}
511
512/* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
513void
514vfs_clearflags(mount_t mp , uint64_t flags)
515{
516 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
517
518 mp->mnt_flag &= ~lflags;
519}
520
521/* Is the mount_t ronly and upgrade read/write requested? */
522int
523vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
524{
525 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
526}
527
528
529/* Is the mount_t mounted ronly */
530int
531vfs_isrdonly(mount_t mp)
532{
533 return (mp->mnt_flag & MNT_RDONLY);
534}
535
536/* Is the mount_t mounted for filesystem synchronous writes? */
537int
538vfs_issynchronous(mount_t mp)
539{
540 return (mp->mnt_flag & MNT_SYNCHRONOUS);
541}
542
543/* Is the mount_t mounted read/write? */
544int
545vfs_isrdwr(mount_t mp)
546{
547 return ((mp->mnt_flag & MNT_RDONLY) == 0);
548}
549
550
551/* Is mount_t marked for update (ie MNT_UPDATE) */
552int
553vfs_isupdate(mount_t mp)
554{
555 return (mp->mnt_flag & MNT_UPDATE);
556}
557
558
559/* Is mount_t marked for reload (ie MNT_RELOAD) */
560int
561vfs_isreload(mount_t mp)
562{
563 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
564}
565
566/* Is mount_t marked for reload (ie MNT_FORCE) */
567int
568vfs_isforce(mount_t mp)
569{
570 if ((mp->mnt_flag & MNT_FORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
571 return(1);
572 else
573 return(0);
574}
575
576int
577vfs_64bitready(mount_t mp)
578{
579 if ((mp->mnt_vtable->vfc_64bitready))
580 return(1);
581 else
582 return(0);
583}
584
585int
586vfs_authopaque(mount_t mp)
587{
588 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
589 return(1);
590 else
591 return(0);
592}
593
594int
595vfs_authopaqueaccess(mount_t mp)
596{
597 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
598 return(1);
599 else
600 return(0);
601}
602
603void
604vfs_setauthopaque(mount_t mp)
605{
606 mount_lock(mp);
607 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
608 mount_unlock(mp);
609}
610
611void
612vfs_setauthopaqueaccess(mount_t mp)
613{
614 mount_lock(mp);
615 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
616 mount_unlock(mp);
617}
618
619void
620vfs_clearauthopaque(mount_t mp)
621{
622 mount_lock(mp);
623 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
624 mount_unlock(mp);
625}
626
627void
628vfs_clearauthopaqueaccess(mount_t mp)
629{
630 mount_lock(mp);
631 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
632 mount_unlock(mp);
633}
634
635void
636vfs_setextendedsecurity(mount_t mp)
637{
638 mount_lock(mp);
639 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
640 mount_unlock(mp);
641}
642
643void
644vfs_clearextendedsecurity(mount_t mp)
645{
646 mount_lock(mp);
647 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
648 mount_unlock(mp);
649}
650
651int
652vfs_extendedsecurity(mount_t mp)
653{
654 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
655}
656
657/* returns the max size of short symlink in this mount_t */
658uint32_t
659vfs_maxsymlen(mount_t mp)
660{
661 return(mp->mnt_maxsymlinklen);
662}
663
664/* set max size of short symlink on mount_t */
665void
666vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
667{
668 mp->mnt_maxsymlinklen = symlen;
669}
670
671/* return a pointer to the RO vfs_statfs associated with mount_t */
672struct vfsstatfs *
673vfs_statfs(mount_t mp)
674{
675 return(&mp->mnt_vfsstat);
676}
677
678int
679vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
680{
681 int error;
682 char *vname;
683
684 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
685 return(error);
686
687 /*
688 * If we have a filesystem create time, use it to default some others.
689 */
690 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
691 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
692 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
693 }
694
695 return(0);
696}
697
698int
699vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
700{
701 int error;
702
703 if (vfs_isrdonly(mp))
704 return EROFS;
705
706 error = VFS_SETATTR(mp, vfa, ctx);
707
708 /*
709 * If we had alternate ways of setting vfs attributes, we'd
710 * fall back here.
711 */
712
713 return error;
714}
715
716/* return the private data handle stored in mount_t */
717void *
718vfs_fsprivate(mount_t mp)
719{
720 return(mp->mnt_data);
721}
722
723/* set the private data handle in mount_t */
724void
725vfs_setfsprivate(mount_t mp, void *mntdata)
726{
727 mp->mnt_data = mntdata;
728}
729
730
731/*
732 * return the block size of the underlying
733 * device associated with mount_t
734 */
735int
736vfs_devblocksize(mount_t mp) {
737
738 return(mp->mnt_devblocksize);
739}
740
741
742/*
743 * return the io attributes associated with mount_t
744 */
745void
746vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
747{
748 if (mp == NULL) {
749 ioattrp->io_maxreadcnt = MAXPHYS;
750 ioattrp->io_maxwritecnt = MAXPHYS;
751 ioattrp->io_segreadcnt = 32;
752 ioattrp->io_segwritecnt = 32;
753 ioattrp->io_maxsegreadsize = MAXPHYS;
754 ioattrp->io_maxsegwritesize = MAXPHYS;
755 ioattrp->io_devblocksize = DEV_BSIZE;
756 } else {
757 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
758 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
759 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
760 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
761 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
762 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
763 ioattrp->io_devblocksize = mp->mnt_devblocksize;
764 }
765 ioattrp->io_reserved[0] = 0;
766 ioattrp->io_reserved[1] = 0;
767 ioattrp->io_reserved[2] = 0;
768}
769
770
771/*
772 * set the IO attributes associated with mount_t
773 */
774void
775vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
776{
777 if (mp == NULL)
778 return;
779 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
780 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
781 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
782 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
783 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
784 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
785 mp->mnt_devblocksize = ioattrp->io_devblocksize;
786}
787
788/*
789 * Add a new filesystem into the kernel specified in passed in
790 * vfstable structure. It fills in the vnode
791 * dispatch vector that is to be passed to when vnodes are created.
792 * It returns a handle which is to be used to when the FS is to be removed
793 */
794typedef int (*PFI)(void *);
795extern int vfs_opv_numops;
796errno_t
797vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
798{
799#pragma unused(data)
800 struct vfstable *newvfstbl = NULL;
801 int i,j;
802 int (***opv_desc_vector_p)(void *);
803 int (**opv_desc_vector)(void *);
804 struct vnodeopv_entry_desc *opve_descp;
805 int desccount;
806 int descsize;
807 PFI *descptr;
808
809 /*
810 * This routine is responsible for all the initialization that would
811 * ordinarily be done as part of the system startup;
812 */
813
814 if (vfe == (struct vfs_fsentry *)0)
815 return(EINVAL);
816
817 desccount = vfe->vfe_vopcnt;
818 if ((desccount <=0) || ((desccount > 5)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
819 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
820 return(EINVAL);
821
822
823 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
824 M_WAITOK);
825 bzero(newvfstbl, sizeof(struct vfstable));
826 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
827 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
828 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
829 newvfstbl->vfc_typenum = maxvfsconf++;
830 else
831 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
832
833 newvfstbl->vfc_refcount = 0;
834 newvfstbl->vfc_flags = 0;
835 newvfstbl->vfc_mountroot = NULL;
836 newvfstbl->vfc_next = NULL;
837 newvfstbl->vfc_threadsafe = 0;
838 newvfstbl->vfc_vfsflags = 0;
839 if (vfe->vfe_flags & VFS_TBL64BITREADY)
840 newvfstbl->vfc_64bitready= 1;
841 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
842 newvfstbl->vfc_threadsafe= 1;
843 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
844 newvfstbl->vfc_threadsafe= 1;
845 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
846 newvfstbl->vfc_flags |= MNT_LOCAL;
847 if (vfe->vfe_flags & VFS_TBLLOCALVOL)
848 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
849 else
850 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
851
852
853 /*
854 * Allocate and init the vectors.
855 * Also handle backwards compatibility.
856 *
857 * We allocate one large block to hold all <desccount>
858 * vnode operation vectors stored contiguously.
859 */
860 /* XXX - shouldn't be M_TEMP */
861
862 descsize = desccount * vfs_opv_numops * sizeof(PFI);
863 MALLOC(descptr, PFI *, descsize,
864 M_TEMP, M_WAITOK);
865 bzero(descptr, descsize);
866
867 newvfstbl->vfc_descptr = descptr;
868 newvfstbl->vfc_descsize = descsize;
869
870
871 for (i= 0; i< desccount; i++ ) {
872 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
873 /*
874 * Fill in the caller's pointer to the start of the i'th vector.
875 * They'll need to supply it when calling vnode_create.
876 */
877 opv_desc_vector = descptr + i * vfs_opv_numops;
878 *opv_desc_vector_p = opv_desc_vector;
879
880 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
881 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
882
883 /*
884 * Sanity check: is this operation listed
885 * in the list of operations? We check this
886 * by seeing if its offest is zero. Since
887 * the default routine should always be listed
888 * first, it should be the only one with a zero
889 * offset. Any other operation with a zero
890 * offset is probably not listed in
891 * vfs_op_descs, and so is probably an error.
892 *
893 * A panic here means the layer programmer
894 * has committed the all-too common bug
895 * of adding a new operation to the layer's
896 * list of vnode operations but
897 * not adding the operation to the system-wide
898 * list of supported operations.
899 */
900 if (opve_descp->opve_op->vdesc_offset == 0 &&
901 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
902 printf("vfs_fsadd: operation %s not listed in %s.\n",
903 opve_descp->opve_op->vdesc_name,
904 "vfs_op_descs");
905 panic("vfs_fsadd: bad operation");
906 }
907 /*
908 * Fill in this entry.
909 */
910 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
911 opve_descp->opve_impl;
912 }
913
914
915 /*
916 * Finally, go back and replace unfilled routines
917 * with their default. (Sigh, an O(n^3) algorithm. I
918 * could make it better, but that'd be work, and n is small.)
919 */
920 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
921
922 /*
923 * Force every operations vector to have a default routine.
924 */
925 opv_desc_vector = *opv_desc_vector_p;
926 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
927 panic("vfs_fsadd: operation vector without default routine.");
928 for (j = 0; j < vfs_opv_numops; j++)
929 if (opv_desc_vector[j] == NULL)
930 opv_desc_vector[j] =
931 opv_desc_vector[VOFFSET(vnop_default)];
932
933 } /* end of each vnodeopv_desc parsing */
934
935
936
937 *handle = vfstable_add(newvfstbl);
938
939 if (newvfstbl->vfc_typenum <= maxvfsconf )
940 maxvfsconf = newvfstbl->vfc_typenum + 1;
941 numused_vfsslots++;
942
943 if (newvfstbl->vfc_vfsops->vfs_init)
944 (*newvfstbl->vfc_vfsops->vfs_init)((struct vfsconf *)handle);
945
946 FREE(newvfstbl, M_TEMP);
947
948 return(0);
949}
950
951/*
952 * Removes the filesystem from kernel.
953 * The argument passed in is the handle that was given when
954 * file system was added
955 */
956errno_t
957vfs_fsremove(vfstable_t handle)
958{
959 struct vfstable * vfstbl = (struct vfstable *)handle;
960 void *old_desc = NULL;
961 errno_t err;
962
963 /* Preflight check for any mounts */
964 mount_list_lock();
965 if ( vfstbl->vfc_refcount != 0 ) {
966 mount_list_unlock();
967 return EBUSY;
968 }
969 mount_list_unlock();
970
971 /*
972 * save the old descriptor; the free cannot occur unconditionally,
973 * since vfstable_del() may fail.
974 */
975 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
976 old_desc = vfstbl->vfc_descptr;
977 }
978 err = vfstable_del(vfstbl);
979
980 /* free the descriptor if the delete was successful */
981 if (err == 0 && old_desc) {
982 FREE(old_desc, M_TEMP);
983 }
984
985 return(err);
986}
987
988/*
989 * This returns a reference to mount_t
990 * which should be dropped using vfs_mountrele().
991 * Not doing so will leak a mountpoint
992 * and associated data structures.
993 */
994errno_t
995vfs_mountref(__unused mount_t mp ) /* gives a reference */
996{
997 return(0);
998}
999
1000/* This drops the reference on mount_t that was acquired */
1001errno_t
1002vfs_mountrele(__unused mount_t mp ) /* drops reference */
1003{
1004 return(0);
1005}
1006
1007int
1008vfs_context_pid(vfs_context_t context)
1009{
1010 return (context->vc_proc->p_pid);
1011}
1012
1013int
1014vfs_context_suser(vfs_context_t context)
1015{
1016 return (suser(context->vc_ucred, 0));
1017}
1018int
1019vfs_context_issignal(vfs_context_t context, sigset_t mask)
1020{
1021 if (context->vc_proc)
1022 return(proc_pendingsignals(context->vc_proc, mask));
1023 return(0);
1024}
1025
1026int
1027vfs_context_is64bit(vfs_context_t context)
1028{
1029 if (context->vc_proc)
1030 return(proc_is64bit(context->vc_proc));
1031 return(0);
1032}
1033
1034proc_t
1035vfs_context_proc(vfs_context_t context)
1036{
1037 return (context->vc_proc);
1038}
1039
1040vfs_context_t
1041vfs_context_create(vfs_context_t context)
1042{
1043 struct vfs_context * newcontext;
1044
1045 newcontext = (struct vfs_context *)kalloc(sizeof(struct vfs_context));
1046
1047 if (newcontext) {
1048 if (context) {
1049 newcontext->vc_proc = context->vc_proc;
6601e61a 1050 newcontext->vc_ucred = context->vc_ucred;
91447636
A
1051 } else {
1052 newcontext->vc_proc = proc_self();
6601e61a 1053 newcontext->vc_ucred = kauth_cred_get();
91447636 1054 }
6601e61a 1055 return(newcontext);
91447636
A
1056 }
1057 return((vfs_context_t)0);
1058}
1059
1060int
1061vfs_context_rele(vfs_context_t context)
1062{
6601e61a 1063 if (context)
91447636
A
1064 kfree(context, sizeof(struct vfs_context));
1065 return(0);
1066}
1067
1068
1069ucred_t
1070vfs_context_ucred(vfs_context_t context)
1071{
1072 return (context->vc_ucred);
1073}
1074
1075/*
1076 * Return true if the context is owned by the superuser.
1077 */
1078int
1079vfs_context_issuser(vfs_context_t context)
1080{
1081 return(context->vc_ucred->cr_uid == 0);
1082}
1083
1084
1085/* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1086
1087
1088/*
1089 * Convert between vnode types and inode formats (since POSIX.1
1090 * defines mode word of stat structure in terms of inode formats).
1091 */
1092enum vtype
1093vnode_iftovt(int mode)
1094{
1095 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1096}
1097
1098int
1099vnode_vttoif(enum vtype indx)
1100{
1101 return(vttoif_tab[(int)(indx)]);
1102}
1103
1104int
1105vnode_makeimode(int indx, int mode)
1106{
1107 return (int)(VTTOIF(indx) | (mode));
1108}
1109
1110
1111/*
1112 * vnode manipulation functions.
1113 */
1114
1115/* returns system root vnode reference; It should be dropped using vrele() */
1116vnode_t
1117vfs_rootvnode(void)
1118{
1119 int error;
1120
1121 error = vnode_get(rootvnode);
1122 if (error)
1123 return ((vnode_t)0);
1124 else
1125 return rootvnode;
1126}
1127
1128
1129uint32_t
1130vnode_vid(vnode_t vp)
1131{
1132 return ((uint32_t)(vp->v_id));
1133}
1134
1135/* returns a mount reference; drop it with vfs_mountrelease() */
1136mount_t
1137vnode_mount(vnode_t vp)
1138{
1139 return (vp->v_mount);
1140}
1141
1142/* returns a mount reference iff vnode_t is a dir and is a mount point */
1143mount_t
1144vnode_mountedhere(vnode_t vp)
1145{
1146 mount_t mp;
1147
1148 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1149 (mp->mnt_vnodecovered == vp))
1150 return (mp);
1151 else
1152 return (mount_t)NULL;
1153}
1154
1155/* returns vnode type of vnode_t */
1156enum vtype
1157vnode_vtype(vnode_t vp)
1158{
1159 return (vp->v_type);
1160}
1161
1162/* returns FS specific node saved in vnode */
1163void *
1164vnode_fsnode(vnode_t vp)
1165{
1166 return (vp->v_data);
1167}
1168
1169void
1170vnode_clearfsnode(vnode_t vp)
1171{
1172 vp->v_data = 0;
1173}
1174
1175dev_t
1176vnode_specrdev(vnode_t vp)
1177{
1178 return(vp->v_rdev);
1179}
1180
1181
1182/* Accessor functions */
1183/* is vnode_t a root vnode */
1184int
1185vnode_isvroot(vnode_t vp)
1186{
1187 return ((vp->v_flag & VROOT)? 1 : 0);
1188}
1189
1190/* is vnode_t a system vnode */
1191int
1192vnode_issystem(vnode_t vp)
1193{
1194 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1195}
1196
1197/* if vnode_t mount operation in progress */
1198int
1199vnode_ismount(vnode_t vp)
1200{
1201 return ((vp->v_flag & VMOUNT)? 1 : 0);
1202}
1203
1204/* is this vnode under recyle now */
1205int
1206vnode_isrecycled(vnode_t vp)
1207{
1208 int ret;
1209
1210 vnode_lock(vp);
1211 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1212 vnode_unlock(vp);
1213 return(ret);
1214}
1215
1216/* is vnode_t marked to not keep data cached once it's been consumed */
1217int
1218vnode_isnocache(vnode_t vp)
1219{
1220 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1221}
1222
1223/*
1224 * has sequential readahead been disabled on this vnode
1225 */
1226int
1227vnode_isnoreadahead(vnode_t vp)
1228{
1229 return ((vp->v_flag & VRAOFF)? 1 : 0);
1230}
1231
1232/* is vnode_t a standard one? */
1233int
1234vnode_isstandard(vnode_t vp)
1235{
1236 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1237}
1238
1239/* don't vflush() if SKIPSYSTEM */
1240int
1241vnode_isnoflush(vnode_t vp)
1242{
1243 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1244}
1245
1246/* is vnode_t a regular file */
1247int
1248vnode_isreg(vnode_t vp)
1249{
1250 return ((vp->v_type == VREG)? 1 : 0);
1251}
1252
1253/* is vnode_t a directory? */
1254int
1255vnode_isdir(vnode_t vp)
1256{
1257 return ((vp->v_type == VDIR)? 1 : 0);
1258}
1259
1260/* is vnode_t a symbolic link ? */
1261int
1262vnode_islnk(vnode_t vp)
1263{
1264 return ((vp->v_type == VLNK)? 1 : 0);
1265}
1266
1267/* is vnode_t a fifo ? */
1268int
1269vnode_isfifo(vnode_t vp)
1270{
1271 return ((vp->v_type == VFIFO)? 1 : 0);
1272}
1273
1274/* is vnode_t a block device? */
1275int
1276vnode_isblk(vnode_t vp)
1277{
1278 return ((vp->v_type == VBLK)? 1 : 0);
1279}
1280
1281/* is vnode_t a char device? */
1282int
1283vnode_ischr(vnode_t vp)
1284{
1285 return ((vp->v_type == VCHR)? 1 : 0);
1286}
1287
1288/* is vnode_t a socket? */
1289int
1290vnode_issock(vnode_t vp)
1291{
1292 return ((vp->v_type == VSOCK)? 1 : 0);
1293}
1294
1295
1296/* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1297void
1298vnode_setnocache(vnode_t vp)
1299{
1300 vnode_lock(vp);
1301 vp->v_flag |= VNOCACHE_DATA;
1302 vnode_unlock(vp);
1303}
1304
1305void
1306vnode_clearnocache(vnode_t vp)
1307{
1308 vnode_lock(vp);
1309 vp->v_flag &= ~VNOCACHE_DATA;
1310 vnode_unlock(vp);
1311}
1312
1313void
1314vnode_setnoreadahead(vnode_t vp)
1315{
1316 vnode_lock(vp);
1317 vp->v_flag |= VRAOFF;
1318 vnode_unlock(vp);
1319}
1320
1321void
1322vnode_clearnoreadahead(vnode_t vp)
1323{
1324 vnode_lock(vp);
1325 vp->v_flag &= ~VRAOFF;
1326 vnode_unlock(vp);
1327}
1328
1329
1330/* mark vnode_t to skip vflush() is SKIPSYSTEM */
1331void
1332vnode_setnoflush(vnode_t vp)
1333{
1334 vnode_lock(vp);
1335 vp->v_flag |= VNOFLUSH;
1336 vnode_unlock(vp);
1337}
1338
1339void
1340vnode_clearnoflush(vnode_t vp)
1341{
1342 vnode_lock(vp);
1343 vp->v_flag &= ~VNOFLUSH;
1344 vnode_unlock(vp);
1345}
1346
1347
1348/* is vnode_t a blkdevice and has a FS mounted on it */
1349int
1350vnode_ismountedon(vnode_t vp)
1351{
1352 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1353}
1354
1355void
1356vnode_setmountedon(vnode_t vp)
1357{
1358 vnode_lock(vp);
1359 vp->v_specflags |= SI_MOUNTEDON;
1360 vnode_unlock(vp);
1361}
1362
1363void
1364vnode_clearmountedon(vnode_t vp)
1365{
1366 vnode_lock(vp);
1367 vp->v_specflags &= ~SI_MOUNTEDON;
1368 vnode_unlock(vp);
1369}
1370
1371
1372void
1373vnode_settag(vnode_t vp, int tag)
1374{
1375 vp->v_tag = tag;
1376
1377}
1378
1379int
1380vnode_tag(vnode_t vp)
1381{
1382 return(vp->v_tag);
1383}
1384
1385vnode_t
1386vnode_parent(vnode_t vp)
1387{
1388
1389 return(vp->v_parent);
1390}
1391
1392void
1393vnode_setparent(vnode_t vp, vnode_t dvp)
1394{
1395 vp->v_parent = dvp;
1396}
1397
1398char *
1399vnode_name(vnode_t vp)
1400{
1401 /* we try to keep v_name a reasonable name for the node */
1402 return(vp->v_name);
1403}
1404
1405void
1406vnode_setname(vnode_t vp, char * name)
1407{
1408 vp->v_name = name;
1409}
1410
1411/* return the registered FS name when adding the FS to kernel */
1412void
1413vnode_vfsname(vnode_t vp, char * buf)
1414{
1415 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1416}
1417
1418/* return the FS type number */
1419int
1420vnode_vfstypenum(vnode_t vp)
1421{
1422 return(vp->v_mount->mnt_vtable->vfc_typenum);
1423}
1424
1425int
1426vnode_vfs64bitready(vnode_t vp)
1427{
1428
1429 if ((vp->v_mount->mnt_vtable->vfc_64bitready))
1430 return(1);
1431 else
1432 return(0);
1433}
1434
1435
1436
1437/* return the visible flags on associated mount point of vnode_t */
1438uint32_t
1439vnode_vfsvisflags(vnode_t vp)
1440{
1441 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1442}
1443
1444/* return the command modifier flags on associated mount point of vnode_t */
1445uint32_t
1446vnode_vfscmdflags(vnode_t vp)
1447{
1448 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1449}
1450
1451/* return the max symlink of short links of vnode_t */
1452uint32_t
1453vnode_vfsmaxsymlen(vnode_t vp)
1454{
1455 return(vp->v_mount->mnt_maxsymlinklen);
1456}
1457
1458/* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1459struct vfsstatfs *
1460vnode_vfsstatfs(vnode_t vp)
1461{
1462 return(&vp->v_mount->mnt_vfsstat);
1463}
1464
1465/* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1466void *
1467vnode_vfsfsprivate(vnode_t vp)
1468{
1469 return(vp->v_mount->mnt_data);
1470}
1471
1472/* is vnode_t in a rdonly mounted FS */
1473int
1474vnode_vfsisrdonly(vnode_t vp)
1475{
1476 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1477}
1478
1479
1480/* returns vnode ref to current working directory */
1481vnode_t
1482current_workingdir(void)
1483{
1484 struct proc *p = current_proc();
1485 struct vnode * vp ;
1486
1487 if ( (vp = p->p_fd->fd_cdir) ) {
1488 if ( (vnode_getwithref(vp)) )
1489 return (NULL);
1490 }
1491 return vp;
1492}
1493
1494/* returns vnode ref to current root(chroot) directory */
1495vnode_t
1496current_rootdir(void)
1497{
1498 struct proc *p = current_proc();
1499 struct vnode * vp ;
1500
1501 if ( (vp = p->p_fd->fd_rdir) ) {
1502 if ( (vnode_getwithref(vp)) )
1503 return (NULL);
1504 }
1505 return vp;
1506}
1507
1508static int
1509vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
1510{
1511 kauth_filesec_t fsec;
1512 uio_t fsec_uio;
1513 size_t fsec_size;
1514 size_t xsize, rsize;
1515 int error;
1516
1517 fsec = NULL;
1518 fsec_uio = NULL;
1519 error = 0;
1520
1521 /* find out how big the EA is */
1522 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
1523 /* no EA, no filesec */
1524 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1525 error = 0;
1526 /* either way, we are done */
1527 goto out;
1528 }
1529
1530 /* how many entries would fit? */
1531 fsec_size = KAUTH_FILESEC_COUNT(xsize);
1532
1533 /* get buffer and uio */
1534 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
1535 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
1536 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
1537 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1538 error = ENOMEM;
1539 goto out;
1540 }
1541
1542 /* read security attribute */
1543 rsize = xsize;
1544 if ((error = vn_getxattr(vp,
1545 KAUTH_FILESEC_XATTR,
1546 fsec_uio,
1547 &rsize,
1548 XATTR_NOSECURITY,
1549 ctx)) != 0) {
1550
1551 /* no attribute - no security data */
1552 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1553 error = 0;
1554 /* either way, we are done */
1555 goto out;
1556 }
1557
1558 /*
6601e61a
A
1559 * Validate security structure. If it's corrupt, we will
1560 * just ignore it.
91447636
A
1561 */
1562 if (rsize < KAUTH_FILESEC_SIZE(0)) {
1563 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
1564 goto out;
1565 }
6601e61a
A
1566 if (fsec->fsec_magic != KAUTH_FILESEC_MAGIC) {
1567 KAUTH_DEBUG("ACL - BAD MAGIC %x", fsec->fsec_magic);
21362eb3
A
1568 goto out;
1569 }
6601e61a
A
1570 if ((fsec->fsec_acl.acl_entrycount != KAUTH_FILESEC_NOACL) &&
1571 (fsec->fsec_acl.acl_entrycount > KAUTH_ACL_MAX_ENTRIES)) {
1572 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", fsec->fsec_entrycount);
1573 goto out;
1574 }
1575 if ((fsec->fsec_acl.acl_entrycount != KAUTH_FILESEC_NOACL) &&
1576 (KAUTH_FILESEC_SIZE(fsec->fsec_acl.acl_entrycount) > rsize)) {
1577 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", fsec->fsec_acl.acl_entrycount, rsize);
1578 goto out;
91447636 1579 }
4452a7af 1580
91447636
A
1581 *fsecp = fsec;
1582 fsec = NULL;
1583 error = 0;
1584out:
1585 if (fsec != NULL)
1586 kauth_filesec_free(fsec);
1587 if (fsec_uio != NULL)
1588 uio_free(fsec_uio);
1589 if (error)
1590 *fsecp = NULL;
1591 return(error);
1592}
1593
1594static int
1595vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
1596{
6601e61a
A
1597 uio_t fsec_uio;
1598 int error;
91447636
A
1599
1600 fsec_uio = NULL;
1601
1602 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
1603 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
1604 error = ENOMEM;
1605 goto out;
1606 }
1607 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), sizeof(struct kauth_filesec) - sizeof(struct kauth_acl));
6601e61a 1608 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), KAUTH_ACL_COPYSIZE(acl));
91447636
A
1609 error = vn_setxattr(vp,
1610 KAUTH_FILESEC_XATTR,
1611 fsec_uio,
1612 XATTR_NOSECURITY, /* we have auth'ed already */
1613 ctx);
1614 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
1615
1616out:
1617 if (fsec_uio != NULL)
1618 uio_free(fsec_uio);
1619 return(error);
1620}
1621
1622
1623int
1624vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1625{
1626 kauth_filesec_t fsec;
1627 kauth_acl_t facl;
1628 int error;
1629 uid_t nuid;
1630 gid_t ngid;
1631
1632 /* don't ask for extended security data if the filesystem doesn't support it */
1633 if (!vfs_extendedsecurity(vnode_mount(vp))) {
1634 VATTR_CLEAR_ACTIVE(vap, va_acl);
1635 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1636 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1637 }
1638
1639 /*
1640 * If the caller wants size values we might have to synthesise, give the
1641 * filesystem the opportunity to supply better intermediate results.
1642 */
1643 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1644 VATTR_IS_ACTIVE(vap, va_total_size) ||
1645 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1646 VATTR_SET_ACTIVE(vap, va_data_size);
1647 VATTR_SET_ACTIVE(vap, va_data_alloc);
1648 VATTR_SET_ACTIVE(vap, va_total_size);
1649 VATTR_SET_ACTIVE(vap, va_total_alloc);
1650 }
1651
1652 error = VNOP_GETATTR(vp, vap, ctx);
1653 if (error) {
1654 KAUTH_DEBUG("ERROR - returning %d", error);
1655 goto out;
1656 }
1657
1658 /*
1659 * If extended security data was requested but not returned, try the fallback
1660 * path.
1661 */
1662 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
1663 fsec = NULL;
1664
1665 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
1666 /* try to get the filesec */
1667 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
1668 goto out;
1669 }
1670 /* if no filesec, no attributes */
1671 if (fsec == NULL) {
1672 VATTR_RETURN(vap, va_acl, NULL);
1673 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
1674 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
1675 } else {
1676
1677 /* looks good, try to return what we were asked for */
1678 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
1679 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
1680
1681 /* only return the ACL if we were actually asked for it */
1682 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1683 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
1684 VATTR_RETURN(vap, va_acl, NULL);
1685 } else {
1686 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
1687 if (facl == NULL) {
1688 kauth_filesec_free(fsec);
1689 error = ENOMEM;
1690 goto out;
1691 }
1692 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
1693 VATTR_RETURN(vap, va_acl, facl);
1694 }
1695 }
1696 kauth_filesec_free(fsec);
1697 }
1698 }
1699 /*
1700 * If someone gave us an unsolicited filesec, toss it. We promise that
1701 * we're OK with a filesystem giving us anything back, but our callers
1702 * only expect what they asked for.
1703 */
1704 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
1705 if (vap->va_acl != NULL)
1706 kauth_acl_free(vap->va_acl);
1707 VATTR_CLEAR_SUPPORTED(vap, va_acl);
1708 }
1709
1710#if 0 /* enable when we have a filesystem only supporting UUIDs */
1711 /*
1712 * Handle the case where we need a UID/GID, but only have extended
1713 * security information.
1714 */
1715 if (VATTR_NOT_RETURNED(vap, va_uid) &&
1716 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
1717 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
1718 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
1719 VATTR_RETURN(vap, va_uid, nuid);
1720 }
1721 if (VATTR_NOT_RETURNED(vap, va_gid) &&
1722 VATTR_IS_SUPPORTED(vap, va_guuid) &&
1723 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
1724 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
1725 VATTR_RETURN(vap, va_gid, ngid);
1726 }
1727#endif
1728
1729 /*
1730 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
1731 */
1732 if (VATTR_IS_ACTIVE(vap, va_uid)) {
1733 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1734 nuid = vp->v_mount->mnt_fsowner;
1735 if (nuid == KAUTH_UID_NONE)
1736 nuid = 99;
1737 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
1738 nuid = vap->va_uid;
1739 } else {
1740 /* this will always be something sensible */
1741 nuid = vp->v_mount->mnt_fsowner;
1742 }
1743 if ((nuid == 99) && !vfs_context_issuser(ctx))
1744 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
1745 VATTR_RETURN(vap, va_uid, nuid);
1746 }
1747 if (VATTR_IS_ACTIVE(vap, va_gid)) {
1748 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1749 ngid = vp->v_mount->mnt_fsgroup;
1750 if (ngid == KAUTH_GID_NONE)
1751 ngid = 99;
1752 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
1753 ngid = vap->va_gid;
1754 } else {
1755 /* this will always be something sensible */
1756 ngid = vp->v_mount->mnt_fsgroup;
1757 }
1758 if ((ngid == 99) && !vfs_context_issuser(ctx))
1759 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
1760 VATTR_RETURN(vap, va_gid, ngid);
1761 }
1762
1763 /*
1764 * Synthesise some values that can be reasonably guessed.
1765 */
1766 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
1767 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
1768
1769 if (!VATTR_IS_SUPPORTED(vap, va_flags))
1770 VATTR_RETURN(vap, va_flags, 0);
1771
1772 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
1773 VATTR_RETURN(vap, va_filerev, 0);
1774
1775 if (!VATTR_IS_SUPPORTED(vap, va_gen))
1776 VATTR_RETURN(vap, va_gen, 0);
1777
1778 /*
1779 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
1780 */
1781 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
1782 VATTR_RETURN(vap, va_data_size, 0);
1783
1784 /* do we want any of the possibly-computed values? */
1785 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1786 VATTR_IS_ACTIVE(vap, va_total_size) ||
1787 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1788 /* make sure f_bsize is valid */
1789 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
1790 if ((error = vfs_update_vfsstat(vp->v_mount, ctx)) != 0)
1791 goto out;
1792 }
1793
1794 /* default va_data_alloc from va_data_size */
1795 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
1796 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
1797
1798 /* default va_total_size from va_data_size */
1799 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
1800 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
1801
1802 /* default va_total_alloc from va_total_size which is guaranteed at this point */
1803 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
1804 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
1805 }
1806
1807 /*
1808 * If we don't have a change time, pull it from the modtime.
1809 */
1810 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
1811 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
1812
1813 /*
1814 * This is really only supported for the creation VNOPs, but since the field is there
1815 * we should populate it correctly.
1816 */
1817 VATTR_RETURN(vap, va_type, vp->v_type);
1818
1819 /*
1820 * The fsid can be obtained from the mountpoint directly.
1821 */
1822 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
1823
1824out:
1825
1826 return(error);
1827}
1828
1829int
1830vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1831{
1832 int error, is_ownership_change=0;
1833
1834 /*
1835 * Make sure the filesystem is mounted R/W.
1836 * If not, return an error.
1837 */
6601e61a
A
1838 if (vfs_isrdonly(vp->v_mount))
1839 return(EROFS);
91447636
A
1840
1841 /*
1842 * If ownership is being ignored on this volume, we silently discard
1843 * ownership changes.
1844 */
1845 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1846 VATTR_CLEAR_ACTIVE(vap, va_uid);
1847 VATTR_CLEAR_ACTIVE(vap, va_gid);
1848 }
1849
1850 if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) {
1851 is_ownership_change = 1;
1852 }
1853
1854 /*
1855 * Make sure that extended security is enabled if we're going to try
1856 * to set any.
1857 */
1858 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
1859 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
1860 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
6601e61a 1861 return(ENOTSUP);
91447636
A
1862 }
1863
1864 error = VNOP_SETATTR(vp, vap, ctx);
1865
1866 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
1867 error = vnode_setattr_fallback(vp, vap, ctx);
1868
1869 /*
1870 * If we have changed any of the things about the file that are likely
1871 * to result in changes to authorisation results, blow the vnode auth
1872 * cache
1873 */
1874 if (VATTR_IS_SUPPORTED(vap, va_mode) ||
1875 VATTR_IS_SUPPORTED(vap, va_uid) ||
1876 VATTR_IS_SUPPORTED(vap, va_gid) ||
1877 VATTR_IS_SUPPORTED(vap, va_flags) ||
1878 VATTR_IS_SUPPORTED(vap, va_acl) ||
1879 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
1880 VATTR_IS_SUPPORTED(vap, va_guuid))
1881 vnode_uncache_credentials(vp);
1882 // only send a stat_changed event if this is more than
1883 // just an access time update
1884 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time))) {
1885 if (need_fsevent(FSE_STAT_CHANGED, vp) || (is_ownership_change && need_fsevent(FSE_CHOWN, vp))) {
1886 if (is_ownership_change == 0)
1887 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
1888 else
1889 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
1890 }
1891 }
1892 return(error);
1893}
1894
1895/*
6601e61a
A
1896 * Following an operation which sets attributes (setattr, create, etc.) we may
1897 * need to perform fallback operations to get attributes saved.
1898 */
91447636
A
1899int
1900vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1901{
1902 kauth_filesec_t fsec;
1903 kauth_acl_t facl;
1904 struct kauth_filesec lfsec;
1905 int error;
1906
1907 error = 0;
1908
1909 /*
1910 * Extended security fallback via extended attributes.
1911 *
6601e61a 1912 * Note that we do not free the filesec; the caller is expected to do this.
91447636
A
1913 */
1914 if (VATTR_NOT_RETURNED(vap, va_acl) ||
1915 VATTR_NOT_RETURNED(vap, va_uuuid) ||
1916 VATTR_NOT_RETURNED(vap, va_guuid)) {
1917 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
1918
1919 /*
6601e61a 1920 * Fail for file types that we don't permit extended security to be set on.
91447636
A
1921 */
1922 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
1923 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
1924 error = EINVAL;
1925 goto out;
1926 }
1927
1928 /*
6601e61a
A
1929 * If we don't have all the extended security items, we need to fetch the existing
1930 * data to perform a read-modify-write operation.
91447636
A
1931 */
1932 fsec = NULL;
1933 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
1934 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
1935 !VATTR_IS_ACTIVE(vap, va_guuid)) {
1936 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
1937 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
1938 goto out;
1939 }
1940 }
1941 /* if we didn't get a filesec, use our local one */
1942 if (fsec == NULL) {
1943 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
1944 fsec = &lfsec;
1945 } else {
1946 KAUTH_DEBUG("SETATTR - updating existing filesec");
1947 }
1948 /* find the ACL */
1949 facl = &fsec->fsec_acl;
1950
1951 /* if we're using the local filesec, we need to initialise it */
1952 if (fsec == &lfsec) {
1953 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
1954 fsec->fsec_owner = kauth_null_guid;
1955 fsec->fsec_group = kauth_null_guid;
1956 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
1957 facl->acl_flags = 0;
1958 }
1959
1960 /*
1961 * Update with the supplied attributes.
1962 */
1963 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
1964 KAUTH_DEBUG("SETATTR - updating owner UUID");
1965 fsec->fsec_owner = vap->va_uuuid;
1966 VATTR_SET_SUPPORTED(vap, va_uuuid);
1967 }
1968 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
1969 KAUTH_DEBUG("SETATTR - updating group UUID");
1970 fsec->fsec_group = vap->va_guuid;
1971 VATTR_SET_SUPPORTED(vap, va_guuid);
1972 }
1973 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1974 if (vap->va_acl == NULL) {
1975 KAUTH_DEBUG("SETATTR - removing ACL");
1976 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
1977 } else {
1978 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
1979 facl = vap->va_acl;
1980 }
1981 VATTR_SET_SUPPORTED(vap, va_acl);
1982 }
1983
1984 /*
6601e61a 1985 * If the filesec data is all invalid, we can just remove the EA completely.
91447636
A
1986 */
1987 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
1988 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
1989 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
1990 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
1991 /* no attribute is ok, nothing to delete */
1992 if (error == ENOATTR)
1993 error = 0;
1994 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
1995 } else {
1996 /* write the EA */
1997 error = vnode_set_filesec(vp, fsec, facl, ctx);
1998 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
1999 }
2000
2001 /* if we fetched a filesec, dispose of the buffer */
2002 if (fsec != &lfsec)
2003 kauth_filesec_free(fsec);
2004 }
2005out:
2006
2007 return(error);
2008}
2009
2010/*
2011 * Definition of vnode operations.
2012 */
2013
2014#if 0
2015/*
2016 *#
2017 *#% lookup dvp L ? ?
2018 *#% lookup vpp - L -
2019 */
2020struct vnop_lookup_args {
2021 struct vnodeop_desc *a_desc;
2022 vnode_t a_dvp;
2023 vnode_t *a_vpp;
2024 struct componentname *a_cnp;
2025 vfs_context_t a_context;
2026};
2027#endif /* 0*/
2028
2029errno_t
2030VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t context)
2031{
2032 int _err;
2033 struct vnop_lookup_args a;
2034 vnode_t vp;
2035 int thread_safe;
2036 int funnel_state = 0;
2037
2038 a.a_desc = &vnop_lookup_desc;
2039 a.a_dvp = dvp;
2040 a.a_vpp = vpp;
2041 a.a_cnp = cnp;
2042 a.a_context = context;
2043 thread_safe = THREAD_SAFE_FS(dvp);
2044
2045 vnode_cache_credentials(dvp, context);
2046
2047 if (!thread_safe) {
2048 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2049 return (_err);
2050 }
2051 }
2052 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2053
2054 vp = *vpp;
2055
2056 if (!thread_safe) {
2057 if ( (cnp->cn_flags & ISLASTCN) ) {
2058 if ( (cnp->cn_flags & LOCKPARENT) ) {
2059 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
2060 /*
2061 * leave the fsnode lock held on
2062 * the directory, but restore the funnel...
2063 * also indicate that we need to drop the
2064 * fsnode_lock when we're done with the
2065 * system call processing for this path
2066 */
2067 cnp->cn_flags |= FSNODELOCKHELD;
2068
2069 (void) thread_funnel_set(kernel_flock, funnel_state);
2070 return (_err);
2071 }
2072 }
2073 }
2074 unlock_fsnode(dvp, &funnel_state);
2075 }
2076 return (_err);
2077}
2078
2079#if 0
2080/*
2081 *#
2082 *#% create dvp L L L
2083 *#% create vpp - L -
2084 *#
2085 */
2086
2087struct vnop_create_args {
2088 struct vnodeop_desc *a_desc;
2089 vnode_t a_dvp;
2090 vnode_t *a_vpp;
2091 struct componentname *a_cnp;
2092 struct vnode_attr *a_vap;
2093 vfs_context_t a_context;
2094};
2095#endif /* 0*/
2096errno_t
2097VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2098{
2099 int _err;
2100 struct vnop_create_args a;
2101 int thread_safe;
2102 int funnel_state = 0;
2103
2104 a.a_desc = &vnop_create_desc;
2105 a.a_dvp = dvp;
2106 a.a_vpp = vpp;
2107 a.a_cnp = cnp;
2108 a.a_vap = vap;
2109 a.a_context = context;
2110 thread_safe = THREAD_SAFE_FS(dvp);
2111
2112 if (!thread_safe) {
2113 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2114 return (_err);
2115 }
2116 }
2117 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2118 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2119 /*
2120 * Remove stale Apple Double file (if any).
2121 */
2122 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
2123 }
2124 if (!thread_safe) {
2125 unlock_fsnode(dvp, &funnel_state);
2126 }
2127 return (_err);
2128}
2129
2130#if 0
2131/*
2132 *#
2133 *#% whiteout dvp L L L
2134 *#% whiteout cnp - - -
2135 *#% whiteout flag - - -
2136 *#
2137 */
2138struct vnop_whiteout_args {
2139 struct vnodeop_desc *a_desc;
2140 vnode_t a_dvp;
2141 struct componentname *a_cnp;
2142 int a_flags;
2143 vfs_context_t a_context;
2144};
2145#endif /* 0*/
2146errno_t
2147VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t context)
2148{
2149 int _err;
2150 struct vnop_whiteout_args a;
2151 int thread_safe;
2152 int funnel_state = 0;
2153
2154 a.a_desc = &vnop_whiteout_desc;
2155 a.a_dvp = dvp;
2156 a.a_cnp = cnp;
2157 a.a_flags = flags;
2158 a.a_context = context;
2159 thread_safe = THREAD_SAFE_FS(dvp);
2160
2161 if (!thread_safe) {
2162 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2163 return (_err);
2164 }
2165 }
2166 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
2167 if (!thread_safe) {
2168 unlock_fsnode(dvp, &funnel_state);
2169 }
2170 return (_err);
2171}
2172
2173 #if 0
2174/*
2175 *#
2176 *#% mknod dvp L U U
2177 *#% mknod vpp - X -
2178 *#
2179 */
2180struct vnop_mknod_args {
2181 struct vnodeop_desc *a_desc;
2182 vnode_t a_dvp;
2183 vnode_t *a_vpp;
2184 struct componentname *a_cnp;
2185 struct vnode_attr *a_vap;
2186 vfs_context_t a_context;
2187};
2188#endif /* 0*/
2189errno_t
2190VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2191{
2192
2193 int _err;
2194 struct vnop_mknod_args a;
2195 int thread_safe;
2196 int funnel_state = 0;
2197
2198 a.a_desc = &vnop_mknod_desc;
2199 a.a_dvp = dvp;
2200 a.a_vpp = vpp;
2201 a.a_cnp = cnp;
2202 a.a_vap = vap;
2203 a.a_context = context;
2204 thread_safe = THREAD_SAFE_FS(dvp);
2205
2206 if (!thread_safe) {
2207 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2208 return (_err);
2209 }
2210 }
2211 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
2212 if (!thread_safe) {
2213 unlock_fsnode(dvp, &funnel_state);
2214 }
2215 return (_err);
2216}
2217
2218#if 0
2219/*
2220 *#
2221 *#% open vp L L L
2222 *#
2223 */
2224struct vnop_open_args {
2225 struct vnodeop_desc *a_desc;
2226 vnode_t a_vp;
2227 int a_mode;
2228 vfs_context_t a_context;
2229};
2230#endif /* 0*/
2231errno_t
2232VNOP_OPEN(vnode_t vp, int mode, vfs_context_t context)
2233{
2234 int _err;
2235 struct vnop_open_args a;
2236 int thread_safe;
2237 int funnel_state = 0;
2238 struct vfs_context acontext;
2239
2240 if (context == NULL) {
2241 acontext.vc_proc = current_proc();
2242 acontext.vc_ucred = kauth_cred_get();
2243 context = &acontext;
2244 }
2245 a.a_desc = &vnop_open_desc;
2246 a.a_vp = vp;
2247 a.a_mode = mode;
2248 a.a_context = context;
2249 thread_safe = THREAD_SAFE_FS(vp);
2250
2251 if (!thread_safe) {
2252 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2253 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2254 if ( (_err = lock_fsnode(vp, NULL)) ) {
2255 (void) thread_funnel_set(kernel_flock, funnel_state);
2256 return (_err);
2257 }
2258 }
2259 }
2260 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
2261 if (!thread_safe) {
2262 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2263 unlock_fsnode(vp, NULL);
2264 }
2265 (void) thread_funnel_set(kernel_flock, funnel_state);
2266 }
2267 return (_err);
2268}
2269
2270#if 0
2271/*
2272 *#
2273 *#% close vp U U U
2274 *#
2275 */
2276struct vnop_close_args {
2277 struct vnodeop_desc *a_desc;
2278 vnode_t a_vp;
2279 int a_fflag;
2280 vfs_context_t a_context;
2281};
2282#endif /* 0*/
2283errno_t
2284VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t context)
2285{
2286 int _err;
2287 struct vnop_close_args a;
2288 int thread_safe;
2289 int funnel_state = 0;
2290 struct vfs_context acontext;
2291
2292 if (context == NULL) {
2293 acontext.vc_proc = current_proc();
2294 acontext.vc_ucred = kauth_cred_get();
2295 context = &acontext;
2296 }
2297 a.a_desc = &vnop_close_desc;
2298 a.a_vp = vp;
2299 a.a_fflag = fflag;
2300 a.a_context = context;
2301 thread_safe = THREAD_SAFE_FS(vp);
2302
2303 if (!thread_safe) {
2304 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2305 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2306 if ( (_err = lock_fsnode(vp, NULL)) ) {
2307 (void) thread_funnel_set(kernel_flock, funnel_state);
2308 return (_err);
2309 }
2310 }
2311 }
2312 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
2313 if (!thread_safe) {
2314 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2315 unlock_fsnode(vp, NULL);
2316 }
2317 (void) thread_funnel_set(kernel_flock, funnel_state);
2318 }
2319 return (_err);
2320}
2321
2322#if 0
2323/*
2324 *#
2325 *#% access vp L L L
2326 *#
2327 */
2328struct vnop_access_args {
2329 struct vnodeop_desc *a_desc;
2330 vnode_t a_vp;
2331 int a_action;
2332 vfs_context_t a_context;
2333};
2334#endif /* 0*/
2335errno_t
2336VNOP_ACCESS(vnode_t vp, int action, vfs_context_t context)
2337{
2338 int _err;
2339 struct vnop_access_args a;
2340 int thread_safe;
2341 int funnel_state = 0;
2342 struct vfs_context acontext;
2343
2344 if (context == NULL) {
2345 acontext.vc_proc = current_proc();
2346 acontext.vc_ucred = kauth_cred_get();
2347 context = &acontext;
2348 }
2349 a.a_desc = &vnop_access_desc;
2350 a.a_vp = vp;
2351 a.a_action = action;
2352 a.a_context = context;
2353 thread_safe = THREAD_SAFE_FS(vp);
2354
2355 if (!thread_safe) {
2356 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2357 return (_err);
2358 }
2359 }
2360 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
2361 if (!thread_safe) {
2362 unlock_fsnode(vp, &funnel_state);
2363 }
2364 return (_err);
2365}
2366
2367#if 0
2368/*
2369 *#
2370 *#% getattr vp = = =
2371 *#
2372 */
2373struct vnop_getattr_args {
2374 struct vnodeop_desc *a_desc;
2375 vnode_t a_vp;
2376 struct vnode_attr *a_vap;
2377 vfs_context_t a_context;
2378};
2379#endif /* 0*/
2380errno_t
2381VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2382{
2383 int _err;
2384 struct vnop_getattr_args a;
2385 int thread_safe;
2386 int funnel_state;
2387
2388 a.a_desc = &vnop_getattr_desc;
2389 a.a_vp = vp;
2390 a.a_vap = vap;
2391 a.a_context = context;
2392 thread_safe = THREAD_SAFE_FS(vp);
2393
2394 if (!thread_safe) {
2395 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2396 return (_err);
2397 }
2398 }
2399 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
2400 if (!thread_safe) {
2401 unlock_fsnode(vp, &funnel_state);
2402 }
2403 return (_err);
2404}
2405
2406#if 0
2407/*
2408 *#
2409 *#% setattr vp L L L
2410 *#
2411 */
2412struct vnop_setattr_args {
2413 struct vnodeop_desc *a_desc;
2414 vnode_t a_vp;
2415 struct vnode_attr *a_vap;
2416 vfs_context_t a_context;
2417};
2418#endif /* 0*/
2419errno_t
2420VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2421{
2422 int _err;
2423 struct vnop_setattr_args a;
2424 int thread_safe;
2425 int funnel_state;
2426
2427 a.a_desc = &vnop_setattr_desc;
2428 a.a_vp = vp;
2429 a.a_vap = vap;
2430 a.a_context = context;
2431 thread_safe = THREAD_SAFE_FS(vp);
2432
2433 if (!thread_safe) {
2434 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2435 return (_err);
2436 }
2437 }
2438 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
2439
2440 /*
2441 * Shadow uid/gid/mod change to extended attibute file.
2442 */
2443 if (_err == 0 && !NATIVE_XATTR(vp)) {
2444 struct vnode_attr va;
2445 int change = 0;
2446
2447 VATTR_INIT(&va);
2448 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2449 VATTR_SET(&va, va_uid, vap->va_uid);
2450 change = 1;
2451 }
2452 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2453 VATTR_SET(&va, va_gid, vap->va_gid);
2454 change = 1;
2455 }
2456 if (VATTR_IS_ACTIVE(vap, va_mode)) {
2457 VATTR_SET(&va, va_mode, vap->va_mode);
2458 change = 1;
2459 }
2460 if (change) {
2461 vnode_t dvp;
2462 char *vname;
2463
2464 dvp = vnode_getparent(vp);
2465 vname = vnode_getname(vp);
2466
2467 xattrfile_setattr(dvp, vname, &va, context, thread_safe);
2468 if (dvp != NULLVP)
2469 vnode_put(dvp);
2470 if (vname != NULL)
2471 vnode_putname(vname);
2472 }
2473 }
2474 if (!thread_safe) {
2475 unlock_fsnode(vp, &funnel_state);
2476 }
2477 return (_err);
2478}
2479
2480#if 0
2481/*
2482 *#
2483 *#% getattrlist vp = = =
2484 *#
2485 */
2486struct vnop_getattrlist_args {
2487 struct vnodeop_desc *a_desc;
2488 vnode_t a_vp;
2489 struct attrlist *a_alist;
2490 struct uio *a_uio;
2491 int a_options;
2492 vfs_context_t a_context;
2493};
2494#endif /* 0*/
2495errno_t
2496VNOP_GETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2497{
2498 int _err;
2499 struct vnop_getattrlist_args a;
2500 int thread_safe;
2501 int funnel_state = 0;
2502
2503 a.a_desc = &vnop_getattrlist_desc;
2504 a.a_vp = vp;
2505 a.a_alist = alist;
2506 a.a_uio = uio;
2507 a.a_options = options;
2508 a.a_context = context;
2509 thread_safe = THREAD_SAFE_FS(vp);
2510
2511 if (!thread_safe) {
2512 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2513 return (_err);
2514 }
2515 }
2516 _err = (*vp->v_op[vnop_getattrlist_desc.vdesc_offset])(&a);
2517 if (!thread_safe) {
2518 unlock_fsnode(vp, &funnel_state);
2519 }
2520 return (_err);
2521}
2522
2523#if 0
2524/*
2525 *#
2526 *#% setattrlist vp L L L
2527 *#
2528 */
2529struct vnop_setattrlist_args {
2530 struct vnodeop_desc *a_desc;
2531 vnode_t a_vp;
2532 struct attrlist *a_alist;
2533 struct uio *a_uio;
2534 int a_options;
2535 vfs_context_t a_context;
2536};
2537#endif /* 0*/
2538errno_t
2539VNOP_SETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2540{
2541 int _err;
2542 struct vnop_setattrlist_args a;
2543 int thread_safe;
2544 int funnel_state = 0;
2545
2546 a.a_desc = &vnop_setattrlist_desc;
2547 a.a_vp = vp;
2548 a.a_alist = alist;
2549 a.a_uio = uio;
2550 a.a_options = options;
2551 a.a_context = context;
2552 thread_safe = THREAD_SAFE_FS(vp);
2553
2554 if (!thread_safe) {
2555 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2556 return (_err);
2557 }
2558 }
2559 _err = (*vp->v_op[vnop_setattrlist_desc.vdesc_offset])(&a);
2560
2561 vnode_uncache_credentials(vp);
2562
2563 if (!thread_safe) {
2564 unlock_fsnode(vp, &funnel_state);
2565 }
2566 return (_err);
2567}
2568
2569
2570#if 0
2571/*
2572 *#
2573 *#% read vp L L L
2574 *#
2575 */
2576struct vnop_read_args {
2577 struct vnodeop_desc *a_desc;
2578 vnode_t a_vp;
2579 struct uio *a_uio;
2580 int a_ioflag;
2581 vfs_context_t a_context;
2582};
2583#endif /* 0*/
2584errno_t
2585VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2586{
2587 int _err;
2588 struct vnop_read_args a;
2589 int thread_safe;
2590 int funnel_state = 0;
2591 struct vfs_context acontext;
2592
2593 if (context == NULL) {
2594 acontext.vc_proc = current_proc();
2595 acontext.vc_ucred = kauth_cred_get();
2596 context = &acontext;
2597 }
2598
2599 a.a_desc = &vnop_read_desc;
2600 a.a_vp = vp;
2601 a.a_uio = uio;
2602 a.a_ioflag = ioflag;
2603 a.a_context = context;
2604 thread_safe = THREAD_SAFE_FS(vp);
2605
2606 if (!thread_safe) {
2607 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2608 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2609 if ( (_err = lock_fsnode(vp, NULL)) ) {
2610 (void) thread_funnel_set(kernel_flock, funnel_state);
2611 return (_err);
2612 }
2613 }
2614 }
2615 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
2616
2617 if (!thread_safe) {
2618 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2619 unlock_fsnode(vp, NULL);
2620 }
2621 (void) thread_funnel_set(kernel_flock, funnel_state);
2622 }
2623 return (_err);
2624}
2625
2626
2627#if 0
2628/*
2629 *#
2630 *#% write vp L L L
2631 *#
2632 */
2633struct vnop_write_args {
2634 struct vnodeop_desc *a_desc;
2635 vnode_t a_vp;
2636 struct uio *a_uio;
2637 int a_ioflag;
2638 vfs_context_t a_context;
2639};
2640#endif /* 0*/
2641errno_t
2642VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2643{
2644 struct vnop_write_args a;
2645 int _err;
2646 int thread_safe;
2647 int funnel_state = 0;
2648 struct vfs_context acontext;
2649
2650 if (context == NULL) {
2651 acontext.vc_proc = current_proc();
2652 acontext.vc_ucred = kauth_cred_get();
2653 context = &acontext;
2654 }
2655
2656 a.a_desc = &vnop_write_desc;
2657 a.a_vp = vp;
2658 a.a_uio = uio;
2659 a.a_ioflag = ioflag;
2660 a.a_context = context;
2661 thread_safe = THREAD_SAFE_FS(vp);
2662
2663 if (!thread_safe) {
2664 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2665 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2666 if ( (_err = lock_fsnode(vp, NULL)) ) {
2667 (void) thread_funnel_set(kernel_flock, funnel_state);
2668 return (_err);
2669 }
2670 }
2671 }
2672 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
2673
2674 if (!thread_safe) {
2675 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2676 unlock_fsnode(vp, NULL);
2677 }
2678 (void) thread_funnel_set(kernel_flock, funnel_state);
2679 }
2680 return (_err);
2681}
2682
2683
2684#if 0
2685/*
2686 *#
2687 *#% ioctl vp U U U
2688 *#
2689 */
2690struct vnop_ioctl_args {
2691 struct vnodeop_desc *a_desc;
2692 vnode_t a_vp;
2693 u_long a_command;
2694 caddr_t a_data;
2695 int a_fflag;
2696 vfs_context_t a_context;
2697};
2698#endif /* 0*/
2699errno_t
2700VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t context)
2701{
2702 int _err;
2703 struct vnop_ioctl_args a;
2704 int thread_safe;
2705 int funnel_state = 0;
2706 struct vfs_context acontext;
2707
2708 if (context == NULL) {
2709 acontext.vc_proc = current_proc();
2710 acontext.vc_ucred = kauth_cred_get();
2711 context = &acontext;
2712 }
2713
2714 if (vfs_context_is64bit(context)) {
2715 if (!vnode_vfs64bitready(vp)) {
2716 return(ENOTTY);
2717 }
2718 }
2719
2720 a.a_desc = &vnop_ioctl_desc;
2721 a.a_vp = vp;
2722 a.a_command = command;
2723 a.a_data = data;
2724 a.a_fflag = fflag;
2725 a.a_context= context;
2726 thread_safe = THREAD_SAFE_FS(vp);
2727
2728 if (!thread_safe) {
2729 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2730 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2731 if ( (_err = lock_fsnode(vp, NULL)) ) {
2732 (void) thread_funnel_set(kernel_flock, funnel_state);
2733 return (_err);
2734 }
2735 }
2736 }
2737 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
2738 if (!thread_safe) {
2739 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2740 unlock_fsnode(vp, NULL);
2741 }
2742 (void) thread_funnel_set(kernel_flock, funnel_state);
2743 }
2744 return (_err);
2745}
2746
2747
2748#if 0
2749/*
2750 *#
2751 *#% select vp U U U
2752 *#
2753 */
2754struct vnop_select_args {
2755 struct vnodeop_desc *a_desc;
2756 vnode_t a_vp;
2757 int a_which;
2758 int a_fflags;
2759 void *a_wql;
2760 vfs_context_t a_context;
2761};
2762#endif /* 0*/
2763errno_t
2764VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t context)
2765{
2766 int _err;
2767 struct vnop_select_args a;
2768 int thread_safe;
2769 int funnel_state = 0;
2770 struct vfs_context acontext;
2771
2772 if (context == NULL) {
2773 acontext.vc_proc = current_proc();
2774 acontext.vc_ucred = kauth_cred_get();
2775 context = &acontext;
2776 }
2777 a.a_desc = &vnop_select_desc;
2778 a.a_vp = vp;
2779 a.a_which = which;
2780 a.a_fflags = fflags;
2781 a.a_context = context;
2782 a.a_wql = wql;
2783 thread_safe = THREAD_SAFE_FS(vp);
2784
2785 if (!thread_safe) {
2786 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2787 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2788 if ( (_err = lock_fsnode(vp, NULL)) ) {
2789 (void) thread_funnel_set(kernel_flock, funnel_state);
2790 return (_err);
2791 }
2792 }
2793 }
2794 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
2795 if (!thread_safe) {
2796 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2797 unlock_fsnode(vp, NULL);
2798 }
2799 (void) thread_funnel_set(kernel_flock, funnel_state);
2800 }
2801 return (_err);
2802}
2803
2804
2805#if 0
2806/*
2807 *#
2808 *#% exchange fvp L L L
2809 *#% exchange tvp L L L
2810 *#
2811 */
2812struct vnop_exchange_args {
2813 struct vnodeop_desc *a_desc;
2814 vnode_t a_fvp;
2815 vnode_t a_tvp;
2816 int a_options;
2817 vfs_context_t a_context;
2818};
2819#endif /* 0*/
2820errno_t
2821VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t context)
2822{
2823 int _err;
2824 struct vnop_exchange_args a;
2825 int thread_safe;
2826 int funnel_state = 0;
2827 vnode_t lock_first = NULL, lock_second = NULL;
2828
2829 a.a_desc = &vnop_exchange_desc;
2830 a.a_fvp = fvp;
2831 a.a_tvp = tvp;
2832 a.a_options = options;
2833 a.a_context = context;
2834 thread_safe = THREAD_SAFE_FS(fvp);
2835
2836 if (!thread_safe) {
2837 /*
2838 * Lock in vnode address order to avoid deadlocks
2839 */
2840 if (fvp < tvp) {
2841 lock_first = fvp;
2842 lock_second = tvp;
2843 } else {
2844 lock_first = tvp;
2845 lock_second = fvp;
2846 }
2847 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
2848 return (_err);
2849 }
2850 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
2851 unlock_fsnode(lock_first, &funnel_state);
2852 return (_err);
2853 }
2854 }
2855 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
2856 if (!thread_safe) {
2857 unlock_fsnode(lock_second, NULL);
2858 unlock_fsnode(lock_first, &funnel_state);
2859 }
2860 return (_err);
2861}
2862
2863
2864#if 0
2865/*
2866 *#
2867 *#% revoke vp U U U
2868 *#
2869 */
2870struct vnop_revoke_args {
2871 struct vnodeop_desc *a_desc;
2872 vnode_t a_vp;
2873 int a_flags;
2874 vfs_context_t a_context;
2875};
2876#endif /* 0*/
2877errno_t
2878VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t context)
2879{
2880 struct vnop_revoke_args a;
2881 int _err;
2882 int thread_safe;
2883 int funnel_state = 0;
2884
2885 a.a_desc = &vnop_revoke_desc;
2886 a.a_vp = vp;
2887 a.a_flags = flags;
2888 a.a_context = context;
2889 thread_safe = THREAD_SAFE_FS(vp);
2890
2891 if (!thread_safe) {
2892 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2893 }
2894 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
2895 if (!thread_safe) {
2896 (void) thread_funnel_set(kernel_flock, funnel_state);
2897 }
2898 return (_err);
2899}
2900
2901
2902#if 0
2903/*
2904 *#
2905 *# mmap - vp U U U
2906 *#
2907 */
2908struct vnop_mmap_args {
2909 struct vnodeop_desc *a_desc;
2910 vnode_t a_vp;
2911 int a_fflags;
2912 vfs_context_t a_context;
2913};
2914#endif /* 0*/
2915errno_t
2916VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t context)
2917{
2918 int _err;
2919 struct vnop_mmap_args a;
2920 int thread_safe;
2921 int funnel_state = 0;
2922
2923 a.a_desc = &vnop_mmap_desc;
2924 a.a_vp = vp;
2925 a.a_fflags = fflags;
2926 a.a_context = context;
2927 thread_safe = THREAD_SAFE_FS(vp);
2928
2929 if (!thread_safe) {
2930 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2931 return (_err);
2932 }
2933 }
2934 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
2935 if (!thread_safe) {
2936 unlock_fsnode(vp, &funnel_state);
2937 }
2938 return (_err);
2939}
2940
2941
2942#if 0
2943/*
2944 *#
2945 *# mnomap - vp U U U
2946 *#
2947 */
2948struct vnop_mnomap_args {
2949 struct vnodeop_desc *a_desc;
2950 vnode_t a_vp;
2951 vfs_context_t a_context;
2952};
2953#endif /* 0*/
2954errno_t
2955VNOP_MNOMAP(vnode_t vp, vfs_context_t context)
2956{
2957 int _err;
2958 struct vnop_mnomap_args a;
2959 int thread_safe;
2960 int funnel_state = 0;
2961
2962 a.a_desc = &vnop_mnomap_desc;
2963 a.a_vp = vp;
2964 a.a_context = context;
2965 thread_safe = THREAD_SAFE_FS(vp);
2966
2967 if (!thread_safe) {
2968 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2969 return (_err);
2970 }
2971 }
2972 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
2973 if (!thread_safe) {
2974 unlock_fsnode(vp, &funnel_state);
2975 }
2976 return (_err);
2977}
2978
2979
2980#if 0
2981/*
2982 *#
2983 *#% fsync vp L L L
2984 *#
2985 */
2986struct vnop_fsync_args {
2987 struct vnodeop_desc *a_desc;
2988 vnode_t a_vp;
2989 int a_waitfor;
2990 vfs_context_t a_context;
2991};
2992#endif /* 0*/
2993errno_t
2994VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t context)
2995{
2996 struct vnop_fsync_args a;
2997 int _err;
2998 int thread_safe;
2999 int funnel_state = 0;
3000
3001 a.a_desc = &vnop_fsync_desc;
3002 a.a_vp = vp;
3003 a.a_waitfor = waitfor;
3004 a.a_context = context;
3005 thread_safe = THREAD_SAFE_FS(vp);
3006
3007 if (!thread_safe) {
3008 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3009 return (_err);
3010 }
3011 }
3012 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3013 if (!thread_safe) {
3014 unlock_fsnode(vp, &funnel_state);
3015 }
3016 return (_err);
3017}
3018
3019
3020#if 0
3021/*
3022 *#
3023 *#% remove dvp L U U
3024 *#% remove vp L U U
3025 *#
3026 */
3027struct vnop_remove_args {
3028 struct vnodeop_desc *a_desc;
3029 vnode_t a_dvp;
3030 vnode_t a_vp;
3031 struct componentname *a_cnp;
3032 int a_flags;
3033 vfs_context_t a_context;
3034};
3035#endif /* 0*/
3036errno_t
3037VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t context)
3038{
3039 int _err;
3040 struct vnop_remove_args a;
3041 int thread_safe;
3042 int funnel_state = 0;
3043
3044 a.a_desc = &vnop_remove_desc;
3045 a.a_dvp = dvp;
3046 a.a_vp = vp;
3047 a.a_cnp = cnp;
3048 a.a_flags = flags;
3049 a.a_context = context;
3050 thread_safe = THREAD_SAFE_FS(dvp);
3051
3052 if (!thread_safe) {
3053 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3054 return (_err);
3055 }
3056 }
3057 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3058
3059 if (_err == 0) {
3060 vnode_setneedinactive(vp);
3061
3062 if ( !(NATIVE_XATTR(dvp)) ) {
3063 /*
3064 * Remove any associated extended attibute file (._ AppleDouble file).
3065 */
3066 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3067 }
3068 }
3069 if (!thread_safe) {
3070 unlock_fsnode(vp, &funnel_state);
3071 }
3072 return (_err);
3073}
3074
3075
3076#if 0
3077/*
3078 *#
3079 *#% link vp U U U
3080 *#% link tdvp L U U
3081 *#
3082 */
3083struct vnop_link_args {
3084 struct vnodeop_desc *a_desc;
3085 vnode_t a_vp;
3086 vnode_t a_tdvp;
3087 struct componentname *a_cnp;
3088 vfs_context_t a_context;
3089};
3090#endif /* 0*/
3091errno_t
3092VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t context)
3093{
3094 int _err;
3095 struct vnop_link_args a;
3096 int thread_safe;
3097 int funnel_state = 0;
3098
3099 /*
3100 * For file systems with non-native extended attributes,
3101 * disallow linking to an existing "._" Apple Double file.
3102 */
3103 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3104 char *vname;
3105
3106 vname = vnode_getname(vp);
3107 if (vname != NULL) {
3108 _err = 0;
3109 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3110 _err = EPERM;
3111 }
3112 vnode_putname(vname);
3113 if (_err)
3114 return (_err);
3115 }
3116 }
3117 a.a_desc = &vnop_link_desc;
3118 a.a_vp = vp;
3119 a.a_tdvp = tdvp;
3120 a.a_cnp = cnp;
3121 a.a_context = context;
3122 thread_safe = THREAD_SAFE_FS(vp);
3123
3124 if (!thread_safe) {
3125 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3126 return (_err);
3127 }
3128 }
3129 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3130 if (!thread_safe) {
3131 unlock_fsnode(vp, &funnel_state);
3132 }
3133 return (_err);
3134}
3135
3136
3137#if 0
3138/*
3139 *#
3140 *#% rename fdvp U U U
3141 *#% rename fvp U U U
3142 *#% rename tdvp L U U
3143 *#% rename tvp X U U
3144 *#
3145 */
3146struct vnop_rename_args {
3147 struct vnodeop_desc *a_desc;
3148 vnode_t a_fdvp;
3149 vnode_t a_fvp;
3150 struct componentname *a_fcnp;
3151 vnode_t a_tdvp;
3152 vnode_t a_tvp;
3153 struct componentname *a_tcnp;
3154 vfs_context_t a_context;
3155};
3156#endif /* 0*/
3157errno_t
3158VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
3159 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
3160 vfs_context_t context)
3161{
3162 int _err;
3163 struct vnop_rename_args a;
3164 int funnel_state = 0;
3165 char smallname1[48];
3166 char smallname2[48];
3167 char *xfromname = NULL;
3168 char *xtoname = NULL;
3169 vnode_t lock_first = NULL, lock_second = NULL;
3170 vnode_t fdvp_unsafe = NULLVP;
3171 vnode_t tdvp_unsafe = NULLVP;
3172
3173 a.a_desc = &vnop_rename_desc;
3174 a.a_fdvp = fdvp;
3175 a.a_fvp = fvp;
3176 a.a_fcnp = fcnp;
3177 a.a_tdvp = tdvp;
3178 a.a_tvp = tvp;
3179 a.a_tcnp = tcnp;
3180 a.a_context = context;
3181
3182 if (!THREAD_SAFE_FS(fdvp))
3183 fdvp_unsafe = fdvp;
3184 if (!THREAD_SAFE_FS(tdvp))
3185 tdvp_unsafe = tdvp;
3186
3187 if (fdvp_unsafe != NULLVP) {
3188 /*
3189 * Lock parents in vnode address order to avoid deadlocks
3190 * note that it's possible for the fdvp to be unsafe,
3191 * but the tdvp to be safe because tvp could be a directory
3192 * in the root of a filesystem... in that case, tdvp is the
3193 * in the filesystem that this root is mounted on
3194 */
3195 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
3196 lock_first = fdvp_unsafe;
3197 lock_second = NULL;
3198 } else if (fdvp_unsafe < tdvp_unsafe) {
3199 lock_first = fdvp_unsafe;
3200 lock_second = tdvp_unsafe;
3201 } else {
3202 lock_first = tdvp_unsafe;
3203 lock_second = fdvp_unsafe;
3204 }
3205 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
3206 return (_err);
3207
3208 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3209 unlock_fsnode(lock_first, &funnel_state);
3210 return (_err);
3211 }
3212
3213 /*
3214 * Lock both children in vnode address order to avoid deadlocks
3215 */
3216 if (tvp == NULL || tvp == fvp) {
3217 lock_first = fvp;
3218 lock_second = NULL;
3219 } else if (fvp < tvp) {
3220 lock_first = fvp;
3221 lock_second = tvp;
3222 } else {
3223 lock_first = tvp;
3224 lock_second = fvp;
3225 }
3226 if ( (_err = lock_fsnode(lock_first, NULL)) )
3227 goto out1;
3228
3229 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3230 unlock_fsnode(lock_first, NULL);
3231 goto out1;
3232 }
3233 }
3234 /*
3235 * Save source and destination names (._ AppleDouble files).
3236 * Skip if source already has a "._" prefix.
3237 */
3238 if (!NATIVE_XATTR(fdvp) &&
3239 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3240 size_t len;
3241
3242 /* Get source attribute file name. */
3243 len = fcnp->cn_namelen + 3;
3244 if (len > sizeof(smallname1)) {
3245 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3246 } else {
3247 xfromname = &smallname1[0];
3248 }
3249 strcpy(xfromname, "._");
3250 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3251 xfromname[len-1] = '\0';
3252
3253 /* Get destination attribute file name. */
3254 len = tcnp->cn_namelen + 3;
3255 if (len > sizeof(smallname2)) {
3256 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3257 } else {
3258 xtoname = &smallname2[0];
3259 }
3260 strcpy(xtoname, "._");
3261 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3262 xtoname[len-1] = '\0';
3263 }
3264
3265 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3266
3267 if (fdvp_unsafe != NULLVP) {
3268 if (lock_second != NULL)
3269 unlock_fsnode(lock_second, NULL);
3270 unlock_fsnode(lock_first, NULL);
3271 }
3272 if (_err == 0) {
3273 if (tvp && tvp != fvp)
3274 vnode_setneedinactive(tvp);
3275 }
3276
3277 /*
3278 * Rename any associated extended attibute file (._ AppleDouble file).
3279 */
3280 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3281 struct nameidata fromnd, tond;
3282 int killdest = 0;
3283 int error;
3284
3285 /*
3286 * Get source attribute file vnode.
3287 * Note that fdvp already has an iocount reference and
3288 * using DELETE will take an additional reference.
3289 */
3290 NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3291 CAST_USER_ADDR_T(xfromname), context);
3292 fromnd.ni_dvp = fdvp;
3293 error = namei(&fromnd);
3294
3295 if (error) {
3296 /* When source doesn't exist there still may be a destination. */
3297 if (error == ENOENT) {
3298 killdest = 1;
3299 } else {
3300 goto out;
3301 }
3302 } else if (fromnd.ni_vp->v_type != VREG) {
3303 vnode_put(fromnd.ni_vp);
3304 nameidone(&fromnd);
3305 killdest = 1;
3306 }
3307 if (killdest) {
3308 struct vnop_remove_args args;
3309
3310 /*
3311 * Get destination attribute file vnode.
3312 * Note that tdvp already has an iocount reference.
3313 */
3314 NDINIT(&tond, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3315 CAST_USER_ADDR_T(xtoname), context);
3316 tond.ni_dvp = tdvp;
3317 error = namei(&tond);
3318 if (error) {
3319 goto out;
3320 }
3321 if (tond.ni_vp->v_type != VREG) {
3322 vnode_put(tond.ni_vp);
3323 nameidone(&tond);
3324 goto out;
3325 }
3326 args.a_desc = &vnop_remove_desc;
3327 args.a_dvp = tdvp;
3328 args.a_vp = tond.ni_vp;
3329 args.a_cnp = &tond.ni_cnd;
3330 args.a_context = context;
3331
3332 if (fdvp_unsafe != NULLVP)
3333 error = lock_fsnode(tond.ni_vp, NULL);
3334 if (error == 0) {
3335 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3336
3337 if (fdvp_unsafe != NULLVP)
3338 unlock_fsnode(tond.ni_vp, NULL);
3339
3340 if (error == 0)
3341 vnode_setneedinactive(tond.ni_vp);
3342 }
3343 vnode_put(tond.ni_vp);
3344 nameidone(&tond);
3345 goto out;
3346 }
3347
3348 /*
3349 * Get destination attribute file vnode.
3350 */
3351 NDINIT(&tond, RENAME,
3352 NOCACHE | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3353 CAST_USER_ADDR_T(xtoname), context);
3354 tond.ni_dvp = tdvp;
3355 error = namei(&tond);
3356
3357 if (error) {
3358 vnode_put(fromnd.ni_vp);
3359 nameidone(&fromnd);
3360 goto out;
3361 }
3362 a.a_desc = &vnop_rename_desc;
3363 a.a_fdvp = fdvp;
3364 a.a_fvp = fromnd.ni_vp;
3365 a.a_fcnp = &fromnd.ni_cnd;
3366 a.a_tdvp = tdvp;
3367 a.a_tvp = tond.ni_vp;
3368 a.a_tcnp = &tond.ni_cnd;
3369 a.a_context = context;
3370
3371 if (fdvp_unsafe != NULLVP) {
3372 /*
3373 * Lock in vnode address order to avoid deadlocks
3374 */
3375 if (tond.ni_vp == NULL || tond.ni_vp == fromnd.ni_vp) {
3376 lock_first = fromnd.ni_vp;
3377 lock_second = NULL;
3378 } else if (fromnd.ni_vp < tond.ni_vp) {
3379 lock_first = fromnd.ni_vp;
3380 lock_second = tond.ni_vp;
3381 } else {
3382 lock_first = tond.ni_vp;
3383 lock_second = fromnd.ni_vp;
3384 }
3385 if ( (error = lock_fsnode(lock_first, NULL)) == 0) {
3386 if (lock_second != NULL && (error = lock_fsnode(lock_second, NULL)) )
3387 unlock_fsnode(lock_first, NULL);
3388 }
3389 }
3390 if (error == 0) {
3391 error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3392
3393 if (fdvp_unsafe != NULLVP) {
3394 if (lock_second != NULL)
3395 unlock_fsnode(lock_second, NULL);
3396 unlock_fsnode(lock_first, NULL);
3397 }
3398 if (error == 0) {
3399 vnode_setneedinactive(fromnd.ni_vp);
3400
3401 if (tond.ni_vp && tond.ni_vp != fromnd.ni_vp)
3402 vnode_setneedinactive(tond.ni_vp);
3403 }
3404 }
3405 vnode_put(fromnd.ni_vp);
3406 if (tond.ni_vp) {
3407 vnode_put(tond.ni_vp);
3408 }
3409 nameidone(&tond);
3410 nameidone(&fromnd);
3411 }
3412out:
3413 if (xfromname && xfromname != &smallname1[0]) {
3414 FREE(xfromname, M_TEMP);
3415 }
3416 if (xtoname && xtoname != &smallname2[0]) {
3417 FREE(xtoname, M_TEMP);
3418 }
3419out1:
3420 if (fdvp_unsafe != NULLVP) {
3421 if (tdvp_unsafe != NULLVP)
3422 unlock_fsnode(tdvp_unsafe, NULL);
3423 unlock_fsnode(fdvp_unsafe, &funnel_state);
3424 }
3425 return (_err);
3426}
3427
3428 #if 0
3429/*
3430 *#
3431 *#% mkdir dvp L U U
3432 *#% mkdir vpp - L -
3433 *#
3434 */
3435struct vnop_mkdir_args {
3436 struct vnodeop_desc *a_desc;
3437 vnode_t a_dvp;
3438 vnode_t *a_vpp;
3439 struct componentname *a_cnp;
3440 struct vnode_attr *a_vap;
3441 vfs_context_t a_context;
3442};
3443#endif /* 0*/
3444errno_t
3445VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3446 struct vnode_attr *vap, vfs_context_t context)
3447{
3448 int _err;
3449 struct vnop_mkdir_args a;
3450 int thread_safe;
3451 int funnel_state = 0;
3452
3453 a.a_desc = &vnop_mkdir_desc;
3454 a.a_dvp = dvp;
3455 a.a_vpp = vpp;
3456 a.a_cnp = cnp;
3457 a.a_vap = vap;
3458 a.a_context = context;
3459 thread_safe = THREAD_SAFE_FS(dvp);
3460
3461 if (!thread_safe) {
3462 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3463 return (_err);
3464 }
3465 }
3466 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
3467 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3468 /*
3469 * Remove stale Apple Double file (if any).
3470 */
3471 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3472 }
3473 if (!thread_safe) {
3474 unlock_fsnode(dvp, &funnel_state);
3475 }
3476 return (_err);
3477}
3478
3479
3480#if 0
3481/*
3482 *#
3483 *#% rmdir dvp L U U
3484 *#% rmdir vp L U U
3485 *#
3486 */
3487struct vnop_rmdir_args {
3488 struct vnodeop_desc *a_desc;
3489 vnode_t a_dvp;
3490 vnode_t a_vp;
3491 struct componentname *a_cnp;
3492 vfs_context_t a_context;
3493};
3494
3495#endif /* 0*/
3496errno_t
3497VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t context)
3498{
3499 int _err;
3500 struct vnop_rmdir_args a;
3501 int thread_safe;
3502 int funnel_state = 0;
3503
3504 a.a_desc = &vnop_rmdir_desc;
3505 a.a_dvp = dvp;
3506 a.a_vp = vp;
3507 a.a_cnp = cnp;
3508 a.a_context = context;
3509 thread_safe = THREAD_SAFE_FS(dvp);
3510
3511 if (!thread_safe) {
3512 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3513 return (_err);
3514 }
3515 }
3516 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
3517
3518 if (_err == 0) {
3519 vnode_setneedinactive(vp);
3520
3521 if ( !(NATIVE_XATTR(dvp)) ) {
3522 /*
3523 * Remove any associated extended attibute file (._ AppleDouble file).
3524 */
3525 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3526 }
3527 }
3528 if (!thread_safe) {
3529 unlock_fsnode(vp, &funnel_state);
3530 }
3531 return (_err);
3532}
3533
3534/*
3535 * Remove a ._ AppleDouble file
3536 */
3537#define AD_STALE_SECS (180)
3538static void
3539xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int thread_safe, int force) {
3540 vnode_t xvp;
3541 struct nameidata nd;
3542 char smallname[64];
3543 char *filename = NULL;
3544 size_t len;
3545
3546 if ((basename == NULL) || (basename[0] == '\0') ||
3547 (basename[0] == '.' && basename[1] == '_')) {
3548 return;
3549 }
3550 filename = &smallname[0];
3551 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3552 if (len >= sizeof(smallname)) {
3553 len++; /* snprintf result doesn't include '\0' */
3554 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3555 len = snprintf(filename, len, "._%s", basename);
3556 }
3557 NDINIT(&nd, DELETE, LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3558 CAST_USER_ADDR_T(filename), context);
3559 nd.ni_dvp = dvp;
3560 if (namei(&nd) != 0)
3561 goto out2;
3562
3563 xvp = nd.ni_vp;
3564 nameidone(&nd);
3565 if (xvp->v_type != VREG)
3566 goto out1;
3567
3568 /*
3569 * When creating a new object and a "._" file already
3570 * exists, check to see if its a stale "._" file.
3571 *
3572 */
3573 if (!force) {
3574 struct vnode_attr va;
3575
3576 VATTR_INIT(&va);
3577 VATTR_WANTED(&va, va_data_size);
3578 VATTR_WANTED(&va, va_modify_time);
3579 if (VNOP_GETATTR(xvp, &va, context) == 0 &&
3580 VATTR_IS_SUPPORTED(&va, va_data_size) &&
3581 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
3582 va.va_data_size != 0) {
3583 struct timeval tv;
3584
3585 microtime(&tv);
3586 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
3587 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
3588 force = 1; /* must be stale */
3589 }
3590 }
3591 }
3592 if (force) {
3593 struct vnop_remove_args a;
3594 int error;
3595
3596 a.a_desc = &vnop_remove_desc;
3597 a.a_dvp = nd.ni_dvp;
3598 a.a_vp = xvp;
3599 a.a_cnp = &nd.ni_cnd;
3600 a.a_context = context;
3601
3602 if (!thread_safe) {
3603 if ( (lock_fsnode(xvp, NULL)) )
3604 goto out1;
3605 }
3606 error = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3607
3608 if (!thread_safe)
3609 unlock_fsnode(xvp, NULL);
3610
3611 if (error == 0)
3612 vnode_setneedinactive(xvp);
3613 }
3614out1:
3615 /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */
3616 vnode_put(xvp);
3617out2:
3618 if (filename && filename != &smallname[0]) {
3619 FREE(filename, M_TEMP);
3620 }
3621}
3622
3623/*
3624 * Shadow uid/gid/mod to a ._ AppleDouble file
3625 */
3626static void
3627xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
3628 vfs_context_t context, int thread_safe) {
3629 vnode_t xvp;
3630 struct nameidata nd;
3631 char smallname[64];
3632 char *filename = NULL;
3633 size_t len;
3634
3635 if ((dvp == NULLVP) ||
3636 (basename == NULL) || (basename[0] == '\0') ||
3637 (basename[0] == '.' && basename[1] == '_')) {
3638 return;
3639 }
3640 filename = &smallname[0];
3641 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3642 if (len >= sizeof(smallname)) {
3643 len++; /* snprintf result doesn't include '\0' */
3644 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3645 len = snprintf(filename, len, "._%s", basename);
3646 }
3647 NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3648 CAST_USER_ADDR_T(filename), context);
3649 nd.ni_dvp = dvp;
3650 if (namei(&nd) != 0)
3651 goto out2;
3652
3653 xvp = nd.ni_vp;
3654 nameidone(&nd);
3655
3656 if (xvp->v_type == VREG) {
3657 struct vnop_setattr_args a;
3658
3659 a.a_desc = &vnop_setattr_desc;
3660 a.a_vp = xvp;
3661 a.a_vap = vap;
3662 a.a_context = context;
3663
3664 if (!thread_safe) {
3665 if ( (lock_fsnode(xvp, NULL)) )
3666 goto out1;
3667 }
3668 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3669 if (!thread_safe) {
3670 unlock_fsnode(xvp, NULL);
3671 }
3672 }
3673out1:
3674 vnode_put(xvp);
3675out2:
3676 if (filename && filename != &smallname[0]) {
3677 FREE(filename, M_TEMP);
3678 }
3679}
3680
3681 #if 0
3682/*
3683 *#
3684 *#% symlink dvp L U U
3685 *#% symlink vpp - U -
3686 *#
3687 */
3688struct vnop_symlink_args {
3689 struct vnodeop_desc *a_desc;
3690 vnode_t a_dvp;
3691 vnode_t *a_vpp;
3692 struct componentname *a_cnp;
3693 struct vnode_attr *a_vap;
3694 char *a_target;
3695 vfs_context_t a_context;
3696};
3697
3698#endif /* 0*/
3699errno_t
3700VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3701 struct vnode_attr *vap, char *target, vfs_context_t context)
3702{
3703 int _err;
3704 struct vnop_symlink_args a;
3705 int thread_safe;
3706 int funnel_state = 0;
3707
3708 a.a_desc = &vnop_symlink_desc;
3709 a.a_dvp = dvp;
3710 a.a_vpp = vpp;
3711 a.a_cnp = cnp;
3712 a.a_vap = vap;
3713 a.a_target = target;
3714 a.a_context = context;
3715 thread_safe = THREAD_SAFE_FS(dvp);
3716
3717 if (!thread_safe) {
3718 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3719 return (_err);
3720 }
3721 }
3722 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
3723 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3724 /*
3725 * Remove stale Apple Double file (if any).
3726 */
3727 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3728 }
3729 if (!thread_safe) {
3730 unlock_fsnode(dvp, &funnel_state);
3731 }
3732 return (_err);
3733}
3734
3735#if 0
3736/*
3737 *#
3738 *#% readdir vp L L L
3739 *#
3740 */
3741struct vnop_readdir_args {
3742 struct vnodeop_desc *a_desc;
3743 vnode_t a_vp;
3744 struct uio *a_uio;
3745 int a_flags;
3746 int *a_eofflag;
3747 int *a_numdirent;
3748 vfs_context_t a_context;
3749};
3750
3751#endif /* 0*/
3752errno_t
3753VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
3754 int *numdirent, vfs_context_t context)
3755{
3756 int _err;
3757 struct vnop_readdir_args a;
3758 int thread_safe;
3759 int funnel_state = 0;
3760
3761 a.a_desc = &vnop_readdir_desc;
3762 a.a_vp = vp;
3763 a.a_uio = uio;
3764 a.a_flags = flags;
3765 a.a_eofflag = eofflag;
3766 a.a_numdirent = numdirent;
3767 a.a_context = context;
3768 thread_safe = THREAD_SAFE_FS(vp);
3769
3770 if (!thread_safe) {
3771 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3772 return (_err);
3773 }
3774 }
3775 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
3776 if (!thread_safe) {
3777 unlock_fsnode(vp, &funnel_state);
3778 }
3779 return (_err);
3780}
3781
3782#if 0
3783/*
3784 *#
3785 *#% readdirattr vp L L L
3786 *#
3787 */
3788struct vnop_readdirattr_args {
3789 struct vnodeop_desc *a_desc;
3790 vnode_t a_vp;
3791 struct attrlist *a_alist;
3792 struct uio *a_uio;
3793 u_long a_maxcount;
3794 u_long a_options;
3795 u_long *a_newstate;
3796 int *a_eofflag;
3797 u_long *a_actualcount;
3798 vfs_context_t a_context;
3799};
3800
3801#endif /* 0*/
3802errno_t
3803VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount,
3804 u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t context)
3805{
3806 int _err;
3807 struct vnop_readdirattr_args a;
3808 int thread_safe;
3809 int funnel_state = 0;
3810
3811 a.a_desc = &vnop_readdirattr_desc;
3812 a.a_vp = vp;
3813 a.a_alist = alist;
3814 a.a_uio = uio;
3815 a.a_maxcount = maxcount;
3816 a.a_options = options;
3817 a.a_newstate = newstate;
3818 a.a_eofflag = eofflag;
3819 a.a_actualcount = actualcount;
3820 a.a_context = context;
3821 thread_safe = THREAD_SAFE_FS(vp);
3822
3823 if (!thread_safe) {
3824 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3825 return (_err);
3826 }
3827 }
3828 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
3829 if (!thread_safe) {
3830 unlock_fsnode(vp, &funnel_state);
3831 }
3832 return (_err);
3833}
3834
3835#if 0
3836/*
3837 *#
3838 *#% readlink vp L L L
3839 *#
3840 */
3841struct vnop_readlink_args {
3842 struct vnodeop_desc *a_desc;
3843 vnode_t a_vp;
3844 struct uio *a_uio;
3845 vfs_context_t a_context;
3846};
3847#endif /* 0 */
3848
3849errno_t
3850VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t context)
3851{
3852 int _err;
3853 struct vnop_readlink_args a;
3854 int thread_safe;
3855 int funnel_state = 0;
3856
3857 a.a_desc = &vnop_readlink_desc;
3858 a.a_vp = vp;
3859 a.a_uio = uio;
3860 a.a_context = context;
3861 thread_safe = THREAD_SAFE_FS(vp);
3862
3863 if (!thread_safe) {
3864 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3865 return (_err);
3866 }
3867 }
3868 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
3869 if (!thread_safe) {
3870 unlock_fsnode(vp, &funnel_state);
3871 }
3872 return (_err);
3873}
3874
3875#if 0
3876/*
3877 *#
3878 *#% inactive vp L U U
3879 *#
3880 */
3881struct vnop_inactive_args {
3882 struct vnodeop_desc *a_desc;
3883 vnode_t a_vp;
3884 vfs_context_t a_context;
3885};
3886#endif /* 0*/
3887errno_t
3888VNOP_INACTIVE(struct vnode *vp, vfs_context_t context)
3889{
3890 int _err;
3891 struct vnop_inactive_args a;
3892 int thread_safe;
3893 int funnel_state = 0;
3894
3895 a.a_desc = &vnop_inactive_desc;
3896 a.a_vp = vp;
3897 a.a_context = context;
3898 thread_safe = THREAD_SAFE_FS(vp);
3899
3900 if (!thread_safe) {
3901 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3902 return (_err);
3903 }
3904 }
3905 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
3906 if (!thread_safe) {
3907 unlock_fsnode(vp, &funnel_state);
3908 }
3909 return (_err);
3910}
3911
3912
3913#if 0
3914/*
3915 *#
3916 *#% reclaim vp U U U
3917 *#
3918 */
3919struct vnop_reclaim_args {
3920 struct vnodeop_desc *a_desc;
3921 vnode_t a_vp;
3922 vfs_context_t a_context;
3923};
3924#endif /* 0*/
3925errno_t
3926VNOP_RECLAIM(struct vnode *vp, vfs_context_t context)
3927{
3928 int _err;
3929 struct vnop_reclaim_args a;
3930 int thread_safe;
3931 int funnel_state = 0;
3932
3933 a.a_desc = &vnop_reclaim_desc;
3934 a.a_vp = vp;
3935 a.a_context = context;
3936 thread_safe = THREAD_SAFE_FS(vp);
3937
3938 if (!thread_safe) {
3939 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3940 }
3941 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
3942 if (!thread_safe) {
3943 (void) thread_funnel_set(kernel_flock, funnel_state);
3944 }
3945 return (_err);
3946}
3947
3948
3949#if 0
3950/*
3951 *#
3952 *#% pathconf vp L L L
3953 *#
3954 */
3955struct vnop_pathconf_args {
3956 struct vnodeop_desc *a_desc;
3957 vnode_t a_vp;
3958 int a_name;
3959 register_t *a_retval;
3960 vfs_context_t a_context;
3961};
3962#endif /* 0*/
3963errno_t
3964VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t context)
3965{
3966 int _err;
3967 struct vnop_pathconf_args a;
3968 int thread_safe;
3969 int funnel_state = 0;
3970
3971 a.a_desc = &vnop_pathconf_desc;
3972 a.a_vp = vp;
3973 a.a_name = name;
3974 a.a_retval = retval;
3975 a.a_context = context;
3976 thread_safe = THREAD_SAFE_FS(vp);
3977
3978 if (!thread_safe) {
3979 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3980 return (_err);
3981 }
3982 }
3983 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
3984 if (!thread_safe) {
3985 unlock_fsnode(vp, &funnel_state);
3986 }
3987 return (_err);
3988}
3989
3990#if 0
3991/*
3992 *#
3993 *#% advlock vp U U U
3994 *#
3995 */
3996struct vnop_advlock_args {
3997 struct vnodeop_desc *a_desc;
3998 vnode_t a_vp;
3999 caddr_t a_id;
4000 int a_op;
4001 struct flock *a_fl;
4002 int a_flags;
4003 vfs_context_t a_context;
4004};
4005#endif /* 0*/
4006errno_t
4007VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t context)
4008{
4009 int _err;
4010 struct vnop_advlock_args a;
4011 int thread_safe;
4012 int funnel_state = 0;
4013 struct uthread * uth;
4014
4015 a.a_desc = &vnop_advlock_desc;
4016 a.a_vp = vp;
4017 a.a_id = id;
4018 a.a_op = op;
4019 a.a_fl = fl;
4020 a.a_flags = flags;
4021 a.a_context = context;
4022 thread_safe = THREAD_SAFE_FS(vp);
4023
4024 uth = get_bsdthread_info(current_thread());
4025 if (!thread_safe) {
4026 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4027 }
4028 /* Disallow advisory locking on non-seekable vnodes */
4029 if (vnode_isfifo(vp)) {
4030 _err = err_advlock(&a);
4031 } else {
4032 if ((vp->v_flag & VLOCKLOCAL)) {
4033 /* Advisory locking done at this layer */
4034 _err = lf_advlock(&a);
4035 } else {
4036 /* Advisory locking done by underlying filesystem */
4037 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4038 }
4039 }
4040 if (!thread_safe) {
4041 (void) thread_funnel_set(kernel_flock, funnel_state);
4042 }
4043 return (_err);
4044}
4045
4046
4047
4048#if 0
4049/*
4050 *#
4051 *#% allocate vp L L L
4052 *#
4053 */
4054struct vnop_allocate_args {
4055 struct vnodeop_desc *a_desc;
4056 vnode_t a_vp;
4057 off_t a_length;
4058 u_int32_t a_flags;
4059 off_t *a_bytesallocated;
4060 off_t a_offset;
4061 vfs_context_t a_context;
4062};
4063
4064#endif /* 0*/
4065errno_t
4066VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t context)
4067{
4068 int _err;
4069 struct vnop_allocate_args a;
4070 int thread_safe;
4071 int funnel_state = 0;
4072
4073 a.a_desc = &vnop_allocate_desc;
4074 a.a_vp = vp;
4075 a.a_length = length;
4076 a.a_flags = flags;
4077 a.a_bytesallocated = bytesallocated;
4078 a.a_offset = offset;
4079 a.a_context = context;
4080 thread_safe = THREAD_SAFE_FS(vp);
4081
4082 if (!thread_safe) {
4083 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4084 return (_err);
4085 }
4086 }
4087 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4088 if (!thread_safe) {
4089 unlock_fsnode(vp, &funnel_state);
4090 }
4091 return (_err);
4092}
4093
4094#if 0
4095/*
4096 *#
4097 *#% pagein vp = = =
4098 *#
4099 */
4100struct vnop_pagein_args {
4101 struct vnodeop_desc *a_desc;
4102 vnode_t a_vp;
4103 upl_t a_pl;
4104 vm_offset_t a_pl_offset;
4105 off_t a_f_offset;
4106 size_t a_size;
4107 int a_flags;
4108 vfs_context_t a_context;
4109};
4110#endif /* 0*/
4111errno_t
4112VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4113{
4114 int _err;
4115 struct vnop_pagein_args a;
4116 int thread_safe;
4117 int funnel_state = 0;
4118
4119 a.a_desc = &vnop_pagein_desc;
4120 a.a_vp = vp;
4121 a.a_pl = pl;
4122 a.a_pl_offset = pl_offset;
4123 a.a_f_offset = f_offset;
4124 a.a_size = size;
4125 a.a_flags = flags;
4126 a.a_context = context;
4127 thread_safe = THREAD_SAFE_FS(vp);
4128
4129 if (!thread_safe) {
4130 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4131 }
4132 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
4133 if (!thread_safe) {
4134 (void) thread_funnel_set(kernel_flock, funnel_state);
4135 }
4136 return (_err);
4137}
4138
4139#if 0
4140/*
4141 *#
4142 *#% pageout vp = = =
4143 *#
4144 */
4145struct vnop_pageout_args {
4146 struct vnodeop_desc *a_desc;
4147 vnode_t a_vp;
4148 upl_t a_pl;
4149 vm_offset_t a_pl_offset;
4150 off_t a_f_offset;
4151 size_t a_size;
4152 int a_flags;
4153 vfs_context_t a_context;
4154};
4155
4156#endif /* 0*/
4157errno_t
4158VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4159{
4160 int _err;
4161 struct vnop_pageout_args a;
4162 int thread_safe;
4163 int funnel_state = 0;
4164
4165 a.a_desc = &vnop_pageout_desc;
4166 a.a_vp = vp;
4167 a.a_pl = pl;
4168 a.a_pl_offset = pl_offset;
4169 a.a_f_offset = f_offset;
4170 a.a_size = size;
4171 a.a_flags = flags;
4172 a.a_context = context;
4173 thread_safe = THREAD_SAFE_FS(vp);
4174
4175 if (!thread_safe) {
4176 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4177 }
4178 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
4179 if (!thread_safe) {
4180 (void) thread_funnel_set(kernel_flock, funnel_state);
4181 }
4182 return (_err);
4183}
4184
4185
4186#if 0
4187/*
4188 *#
4189 *#% searchfs vp L L L
4190 *#
4191 */
4192struct vnop_searchfs_args {
4193 struct vnodeop_desc *a_desc;
4194 vnode_t a_vp;
4195 void *a_searchparams1;
4196 void *a_searchparams2;
4197 struct attrlist *a_searchattrs;
4198 u_long a_maxmatches;
4199 struct timeval *a_timelimit;
4200 struct attrlist *a_returnattrs;
4201 u_long *a_nummatches;
4202 u_long a_scriptcode;
4203 u_long a_options;
4204 struct uio *a_uio;
4205 struct searchstate *a_searchstate;
4206 vfs_context_t a_context;
4207};
4208
4209#endif /* 0*/
4210errno_t
4211VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t context)
4212{
4213 int _err;
4214 struct vnop_searchfs_args a;
4215 int thread_safe;
4216 int funnel_state = 0;
4217
4218 a.a_desc = &vnop_searchfs_desc;
4219 a.a_vp = vp;
4220 a.a_searchparams1 = searchparams1;
4221 a.a_searchparams2 = searchparams2;
4222 a.a_searchattrs = searchattrs;
4223 a.a_maxmatches = maxmatches;
4224 a.a_timelimit = timelimit;
4225 a.a_returnattrs = returnattrs;
4226 a.a_nummatches = nummatches;
4227 a.a_scriptcode = scriptcode;
4228 a.a_options = options;
4229 a.a_uio = uio;
4230 a.a_searchstate = searchstate;
4231 a.a_context = context;
4232 thread_safe = THREAD_SAFE_FS(vp);
4233
4234 if (!thread_safe) {
4235 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4236 return (_err);
4237 }
4238 }
4239 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
4240 if (!thread_safe) {
4241 unlock_fsnode(vp, &funnel_state);
4242 }
4243 return (_err);
4244}
4245
4246#if 0
4247/*
4248 *#
4249 *#% copyfile fvp U U U
4250 *#% copyfile tdvp L U U
4251 *#% copyfile tvp X U U
4252 *#
4253 */
4254struct vnop_copyfile_args {
4255 struct vnodeop_desc *a_desc;
4256 vnode_t a_fvp;
4257 vnode_t a_tdvp;
4258 vnode_t a_tvp;
4259 struct componentname *a_tcnp;
4260 int a_mode;
4261 int a_flags;
4262 vfs_context_t a_context;
4263};
4264#endif /* 0*/
4265errno_t
4266VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4267 int mode, int flags, vfs_context_t context)
4268{
4269 int _err;
4270 struct vnop_copyfile_args a;
4271 a.a_desc = &vnop_copyfile_desc;
4272 a.a_fvp = fvp;
4273 a.a_tdvp = tdvp;
4274 a.a_tvp = tvp;
4275 a.a_tcnp = tcnp;
4276 a.a_mode = mode;
4277 a.a_flags = flags;
4278 a.a_context = context;
4279 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
4280 return (_err);
4281}
4282
4283
4284errno_t
4285VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t context)
4286{
4287 struct vnop_getxattr_args a;
4288 int error;
4289 int thread_safe;
4290 int funnel_state = 0;
4291
4292 a.a_desc = &vnop_getxattr_desc;
4293 a.a_vp = vp;
4294 a.a_name = name;
4295 a.a_uio = uio;
4296 a.a_size = size;
4297 a.a_options = options;
4298 a.a_context = context;
4299
4300 thread_safe = THREAD_SAFE_FS(vp);
4301 if (!thread_safe) {
4302 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4303 return (error);
4304 }
4305 }
4306 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
4307 if (!thread_safe) {
4308 unlock_fsnode(vp, &funnel_state);
4309 }
4310 return (error);
4311}
4312
4313errno_t
4314VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t context)
4315{
4316 struct vnop_setxattr_args a;
4317 int error;
4318 int thread_safe;
4319 int funnel_state = 0;
4320
4321 a.a_desc = &vnop_setxattr_desc;
4322 a.a_vp = vp;
4323 a.a_name = name;
4324 a.a_uio = uio;
4325 a.a_options = options;
4326 a.a_context = context;
4327
4328 thread_safe = THREAD_SAFE_FS(vp);
4329 if (!thread_safe) {
4330 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4331 return (error);
4332 }
4333 }
4334 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
4335 if (!thread_safe) {
4336 unlock_fsnode(vp, &funnel_state);
4337 }
4338 return (error);
4339}
4340
4341errno_t
4342VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t context)
4343{
4344 struct vnop_removexattr_args a;
4345 int error;
4346 int thread_safe;
4347 int funnel_state = 0;
4348
4349 a.a_desc = &vnop_removexattr_desc;
4350 a.a_vp = vp;
4351 a.a_name = name;
4352 a.a_options = options;
4353 a.a_context = context;
4354
4355 thread_safe = THREAD_SAFE_FS(vp);
4356 if (!thread_safe) {
4357 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4358 return (error);
4359 }
4360 }
4361 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
4362 if (!thread_safe) {
4363 unlock_fsnode(vp, &funnel_state);
4364 }
4365 return (error);
4366}
4367
4368errno_t
4369VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t context)
4370{
4371 struct vnop_listxattr_args a;
4372 int error;
4373 int thread_safe;
4374 int funnel_state = 0;
4375
4376 a.a_desc = &vnop_listxattr_desc;
4377 a.a_vp = vp;
4378 a.a_uio = uio;
4379 a.a_size = size;
4380 a.a_options = options;
4381 a.a_context = context;
4382
4383 thread_safe = THREAD_SAFE_FS(vp);
4384 if (!thread_safe) {
4385 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4386 return (error);
4387 }
4388 }
4389 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
4390 if (!thread_safe) {
4391 unlock_fsnode(vp, &funnel_state);
4392 }
4393 return (error);
4394}
4395
4396
4397#if 0
4398/*
4399 *#
4400 *#% blktooff vp = = =
4401 *#
4402 */
4403struct vnop_blktooff_args {
4404 struct vnodeop_desc *a_desc;
4405 vnode_t a_vp;
4406 daddr64_t a_lblkno;
4407 off_t *a_offset;
4408};
4409#endif /* 0*/
4410errno_t
4411VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
4412{
4413 int _err;
4414 struct vnop_blktooff_args a;
4415 int thread_safe;
4416 int funnel_state = 0;
4417
4418 a.a_desc = &vnop_blktooff_desc;
4419 a.a_vp = vp;
4420 a.a_lblkno = lblkno;
4421 a.a_offset = offset;
4422 thread_safe = THREAD_SAFE_FS(vp);
4423
4424 if (!thread_safe) {
4425 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4426 }
4427 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
4428 if (!thread_safe) {
4429 (void) thread_funnel_set(kernel_flock, funnel_state);
4430 }
4431 return (_err);
4432}
4433
4434#if 0
4435/*
4436 *#
4437 *#% offtoblk vp = = =
4438 *#
4439 */
4440struct vnop_offtoblk_args {
4441 struct vnodeop_desc *a_desc;
4442 vnode_t a_vp;
4443 off_t a_offset;
4444 daddr64_t *a_lblkno;
4445};
4446#endif /* 0*/
4447errno_t
4448VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
4449{
4450 int _err;
4451 struct vnop_offtoblk_args a;
4452 int thread_safe;
4453 int funnel_state = 0;
4454
4455 a.a_desc = &vnop_offtoblk_desc;
4456 a.a_vp = vp;
4457 a.a_offset = offset;
4458 a.a_lblkno = lblkno;
4459 thread_safe = THREAD_SAFE_FS(vp);
4460
4461 if (!thread_safe) {
4462 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4463 }
4464 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
4465 if (!thread_safe) {
4466 (void) thread_funnel_set(kernel_flock, funnel_state);
4467 }
4468 return (_err);
4469}
4470
4471#if 0
4472/*
4473 *#
4474 *#% blockmap vp L L L
4475 *#
4476 */
4477struct vnop_blockmap_args {
4478 struct vnodeop_desc *a_desc;
4479 vnode_t a_vp;
4480 off_t a_foffset;
4481 size_t a_size;
4482 daddr64_t *a_bpn;
4483 size_t *a_run;
4484 void *a_poff;
4485 int a_flags;
4486 vfs_context_t a_context;
4487};
4488#endif /* 0*/
4489errno_t
4490VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t context)
4491{
4492 int _err;
4493 struct vnop_blockmap_args a;
4494 int thread_safe;
4495 int funnel_state = 0;
4496 struct vfs_context acontext;
4497
4498 if (context == NULL) {
4499 acontext.vc_proc = current_proc();
4500 acontext.vc_ucred = kauth_cred_get();
4501 context = &acontext;
4502 }
4503 a.a_desc = &vnop_blockmap_desc;
4504 a.a_vp = vp;
4505 a.a_foffset = foffset;
4506 a.a_size = size;
4507 a.a_bpn = bpn;
4508 a.a_run = run;
4509 a.a_poff = poff;
4510 a.a_flags = flags;
4511 a.a_context = context;
4512 thread_safe = THREAD_SAFE_FS(vp);
4513
4514 if (!thread_safe) {
4515 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4516 }
4517 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
4518 if (!thread_safe) {
4519 (void) thread_funnel_set(kernel_flock, funnel_state);
4520 }
4521 return (_err);
4522}
4523
4524#if 0
4525struct vnop_strategy_args {
4526 struct vnodeop_desc *a_desc;
4527 struct buf *a_bp;
4528};
4529
4530#endif /* 0*/
4531errno_t
4532VNOP_STRATEGY(struct buf *bp)
4533{
4534 int _err;
4535 struct vnop_strategy_args a;
4536 a.a_desc = &vnop_strategy_desc;
4537 a.a_bp = bp;
4538 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
4539 return (_err);
4540}
4541
4542#if 0
4543struct vnop_bwrite_args {
4544 struct vnodeop_desc *a_desc;
4545 buf_t a_bp;
4546};
4547#endif /* 0*/
4548errno_t
4549VNOP_BWRITE(struct buf *bp)
4550{
4551 int _err;
4552 struct vnop_bwrite_args a;
4553 a.a_desc = &vnop_bwrite_desc;
4554 a.a_bp = bp;
4555 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
4556 return (_err);
4557}
4558
4559#if 0
4560struct vnop_kqfilt_add_args {
4561 struct vnodeop_desc *a_desc;
4562 struct vnode *a_vp;
4563 struct knote *a_kn;
4564 vfs_context_t a_context;
4565};
4566#endif
4567errno_t
4568VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t context)
4569{
4570 int _err;
4571 struct vnop_kqfilt_add_args a;
4572 int thread_safe;
4573 int funnel_state = 0;
4574
4575 a.a_desc = VDESC(vnop_kqfilt_add);
4576 a.a_vp = vp;
4577 a.a_kn = kn;
4578 a.a_context = context;
4579 thread_safe = THREAD_SAFE_FS(vp);
4580
4581 if (!thread_safe) {
4582 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4583 return (_err);
4584 }
4585 }
4586 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
4587 if (!thread_safe) {
4588 unlock_fsnode(vp, &funnel_state);
4589 }
4590 return(_err);
4591}
4592
4593#if 0
4594struct vnop_kqfilt_remove_args {
4595 struct vnodeop_desc *a_desc;
4596 struct vnode *a_vp;
4597 uintptr_t a_ident;
4598 vfs_context_t a_context;
4599};
4600#endif
4601errno_t
4602VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t context)
4603{
4604 int _err;
4605 struct vnop_kqfilt_remove_args a;
4606 int thread_safe;
4607 int funnel_state = 0;
4608
4609 a.a_desc = VDESC(vnop_kqfilt_remove);
4610 a.a_vp = vp;
4611 a.a_ident = ident;
4612 a.a_context = context;
4613 thread_safe = THREAD_SAFE_FS(vp);
4614
4615 if (!thread_safe) {
4616 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4617 return (_err);
4618 }
4619 }
4620 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
4621 if (!thread_safe) {
4622 unlock_fsnode(vp, &funnel_state);
4623 }
4624 return(_err);
4625}
4626