]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/kpi_vfs.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / bsd / vfs / kpi_vfs.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
91447636 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
91447636
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
91447636
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*
24 * Copyright (c) 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kpi_vfs.c
61 */
62
63/*
64 * External virtual filesystem routines
65 */
66
67#undef DIAGNOSTIC
68#define DIAGNOSTIC 1
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/proc_internal.h>
73#include <sys/kauth.h>
74#include <sys/mount.h>
75#include <sys/mount_internal.h>
76#include <sys/time.h>
77#include <sys/vnode_internal.h>
78#include <sys/stat.h>
79#include <sys/namei.h>
80#include <sys/ucred.h>
81#include <sys/buf.h>
82#include <sys/errno.h>
83#include <sys/malloc.h>
84#include <sys/domain.h>
85#include <sys/mbuf.h>
86#include <sys/syslog.h>
87#include <sys/ubc.h>
88#include <sys/vm.h>
89#include <sys/sysctl.h>
90#include <sys/filedesc.h>
91#include <sys/fsevents.h>
92#include <sys/user.h>
93#include <sys/lockf.h>
94#include <sys/xattr.h>
95
96#include <kern/assert.h>
97#include <kern/kalloc.h>
98
c0fea474
A
99#include <libkern/OSByteOrder.h>
100
91447636
A
101#include <miscfs/specfs/specdev.h>
102
103#include <mach/mach_types.h>
104#include <mach/memory_object_types.h>
105
106#define ESUCCESS 0
107#undef mount_t
108#undef vnode_t
109
110#define COMPAT_ONLY
111
112
113#define THREAD_SAFE_FS(VP) \
114 ((VP)->v_unsafefs ? 0 : 1)
115
116#define NATIVE_XATTR(VP) \
117 ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0)
118
119static void xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context,
120 int thread_safe, int force);
121static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
122 vfs_context_t context, int thread_safe);
123
124
125static void
126vnode_setneedinactive(vnode_t vp)
127{
128 cache_purge(vp);
129
130 vnode_lock(vp);
131 vp->v_lflag |= VL_NEEDINACTIVE;
132 vnode_unlock(vp);
133}
134
135
136int
137lock_fsnode(vnode_t vp, int *funnel_state)
138{
139 if (funnel_state)
140 *funnel_state = thread_funnel_set(kernel_flock, TRUE);
141
142 if (vp->v_unsafefs) {
143 if (vp->v_unsafefs->fsnodeowner == current_thread()) {
144 vp->v_unsafefs->fsnode_count++;
145 } else {
146 lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
147
148 if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
149 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
150
151 if (funnel_state)
152 (void) thread_funnel_set(kernel_flock, *funnel_state);
153 return (ENOENT);
154 }
155 vp->v_unsafefs->fsnodeowner = current_thread();
156 vp->v_unsafefs->fsnode_count = 1;
157 }
158 }
159 return (0);
160}
161
162
163void
164unlock_fsnode(vnode_t vp, int *funnel_state)
165{
166 if (vp->v_unsafefs) {
167 if (--vp->v_unsafefs->fsnode_count == 0) {
168 vp->v_unsafefs->fsnodeowner = NULL;
169 lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
170 }
171 }
172 if (funnel_state)
173 (void) thread_funnel_set(kernel_flock, *funnel_state);
174}
175
176
177
178/* ====================================================================== */
179/* ************ EXTERNAL KERNEL APIS ********************************** */
180/* ====================================================================== */
181
182/*
183 * prototypes for exported VFS operations
184 */
185int
186VFS_MOUNT(struct mount * mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
187{
188 int error;
189 int thread_safe;
190 int funnel_state = 0;
191
192 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
193 return(ENOTSUP);
194
195 thread_safe = mp->mnt_vtable->vfc_threadsafe;
196
197
198 if (!thread_safe) {
199 funnel_state = thread_funnel_set(kernel_flock, TRUE);
200 }
201
202 if (vfs_context_is64bit(context)) {
203 if (vfs_64bitready(mp)) {
204 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
205 }
206 else {
207 error = ENOTSUP;
208 }
209 }
210 else {
211 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context);
212 }
213
214 if (!thread_safe) {
215 (void) thread_funnel_set(kernel_flock, funnel_state);
216 }
217 return (error);
218}
219
220int
221VFS_START(struct mount * mp, int flags, vfs_context_t context)
222{
223 int error;
224 int thread_safe;
225 int funnel_state = 0;
226
227 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
228 return(ENOTSUP);
229
230 thread_safe = mp->mnt_vtable->vfc_threadsafe;
231
232 if (!thread_safe) {
233 funnel_state = thread_funnel_set(kernel_flock, TRUE);
234 }
235 error = (*mp->mnt_op->vfs_start)(mp, flags, context);
236 if (!thread_safe) {
237 (void) thread_funnel_set(kernel_flock, funnel_state);
238 }
239 return (error);
240}
241
242int
243VFS_UNMOUNT(struct mount *mp, int flags, vfs_context_t context)
244{
245 int error;
246 int thread_safe;
247 int funnel_state = 0;
248
249 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
250 return(ENOTSUP);
251
252 thread_safe = mp->mnt_vtable->vfc_threadsafe;
253
254 if (!thread_safe) {
255 funnel_state = thread_funnel_set(kernel_flock, TRUE);
256 }
257 error = (*mp->mnt_op->vfs_unmount)(mp, flags, context);
258 if (!thread_safe) {
259 (void) thread_funnel_set(kernel_flock, funnel_state);
260 }
261 return (error);
262}
263
264int
265VFS_ROOT(struct mount * mp, struct vnode ** vpp, vfs_context_t context)
266{
267 int error;
268 int thread_safe;
269 int funnel_state = 0;
270 struct vfs_context acontext;
271
272 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
273 return(ENOTSUP);
274
275 if (context == NULL) {
276 acontext.vc_proc = current_proc();
277 acontext.vc_ucred = kauth_cred_get();
278 context = &acontext;
279 }
280 thread_safe = mp->mnt_vtable->vfc_threadsafe;
281
282 if (!thread_safe) {
283 funnel_state = thread_funnel_set(kernel_flock, TRUE);
284 }
285 error = (*mp->mnt_op->vfs_root)(mp, vpp, context);
286 if (!thread_safe) {
287 (void) thread_funnel_set(kernel_flock, funnel_state);
288 }
289 return (error);
290}
291
292int
293VFS_QUOTACTL(struct mount *mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t context)
294{
295 int error;
296 int thread_safe;
297 int funnel_state = 0;
298
299 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
300 return(ENOTSUP);
301
302 thread_safe = mp->mnt_vtable->vfc_threadsafe;
303
304 if (!thread_safe) {
305 funnel_state = thread_funnel_set(kernel_flock, TRUE);
306 }
307 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, context);
308 if (!thread_safe) {
309 (void) thread_funnel_set(kernel_flock, funnel_state);
310 }
311 return (error);
312}
313
314int
315VFS_GETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
316{
317 int error;
318 int thread_safe;
319 int funnel_state = 0;
320 struct vfs_context acontext;
321
322 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
323 return(ENOTSUP);
324
325 if (context == NULL) {
326 acontext.vc_proc = current_proc();
327 acontext.vc_ucred = kauth_cred_get();
328 context = &acontext;
329 }
330 thread_safe = mp->mnt_vtable->vfc_threadsafe;
331
332 if (!thread_safe) {
333 funnel_state = thread_funnel_set(kernel_flock, TRUE);
334 }
335 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, context);
336 if (!thread_safe) {
337 (void) thread_funnel_set(kernel_flock, funnel_state);
338 }
339 return(error);
340}
341
342int
343VFS_SETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context)
344{
345 int error;
346 int thread_safe;
347 int funnel_state = 0;
348 struct vfs_context acontext;
349
350 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
351 return(ENOTSUP);
352
353 if (context == NULL) {
354 acontext.vc_proc = current_proc();
355 acontext.vc_ucred = kauth_cred_get();
356 context = &acontext;
357 }
358 thread_safe = mp->mnt_vtable->vfc_threadsafe;
359
360 if (!thread_safe) {
361 funnel_state = thread_funnel_set(kernel_flock, TRUE);
362 }
363 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, context);
364 if (!thread_safe) {
365 (void) thread_funnel_set(kernel_flock, funnel_state);
366 }
367 return(error);
368}
369
370int
371VFS_SYNC(struct mount *mp, int flags, vfs_context_t context)
372{
373 int error;
374 int thread_safe;
375 int funnel_state = 0;
376 struct vfs_context acontext;
377
378 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
379 return(ENOTSUP);
380
381 if (context == NULL) {
382 acontext.vc_proc = current_proc();
383 acontext.vc_ucred = kauth_cred_get();
384 context = &acontext;
385 }
386 thread_safe = mp->mnt_vtable->vfc_threadsafe;
387
388 if (!thread_safe) {
389 funnel_state = thread_funnel_set(kernel_flock, TRUE);
390 }
391 error = (*mp->mnt_op->vfs_sync)(mp, flags, context);
392 if (!thread_safe) {
393 (void) thread_funnel_set(kernel_flock, funnel_state);
394 }
395 return(error);
396}
397
398int
399VFS_VGET(struct mount * mp, ino64_t ino, struct vnode **vpp, vfs_context_t context)
400{
401 int error;
402 int thread_safe;
403 int funnel_state = 0;
404 struct vfs_context acontext;
405
406 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
407 return(ENOTSUP);
408
409 if (context == NULL) {
410 acontext.vc_proc = current_proc();
411 acontext.vc_ucred = kauth_cred_get();
412 context = &acontext;
413 }
414 thread_safe = mp->mnt_vtable->vfc_threadsafe;
415
416 if (!thread_safe) {
417 funnel_state = thread_funnel_set(kernel_flock, TRUE);
418 }
419 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, context);
420 if (!thread_safe) {
421 (void) thread_funnel_set(kernel_flock, funnel_state);
422 }
423 return(error);
424}
425
426int
427VFS_FHTOVP(struct mount * mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t context)
428{
429 int error;
430 int thread_safe;
431 int funnel_state = 0;
432 struct vfs_context acontext;
433
434 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
435 return(ENOTSUP);
436
437 if (context == NULL) {
438 acontext.vc_proc = current_proc();
439 acontext.vc_ucred = kauth_cred_get();
440 context = &acontext;
441 }
442 thread_safe = mp->mnt_vtable->vfc_threadsafe;
443
444 if (!thread_safe) {
445 funnel_state = thread_funnel_set(kernel_flock, TRUE);
446 }
447 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, context);
448 if (!thread_safe) {
449 (void) thread_funnel_set(kernel_flock, funnel_state);
450 }
451 return(error);
452}
453
454int
455VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t context)
456{
457 int error;
458 int thread_safe;
459 int funnel_state = 0;
460 struct vfs_context acontext;
461
462 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
463 return(ENOTSUP);
464
465 if (context == NULL) {
466 acontext.vc_proc = current_proc();
467 acontext.vc_ucred = kauth_cred_get();
468 context = &acontext;
469 }
470 thread_safe = THREAD_SAFE_FS(vp);
471
472 if (!thread_safe) {
473 funnel_state = thread_funnel_set(kernel_flock, TRUE);
474 }
475 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, context);
476 if (!thread_safe) {
477 (void) thread_funnel_set(kernel_flock, funnel_state);
478 }
479 return(error);
480}
481
482
483/* returns a copy of vfs type name for the mount_t */
484void
485vfs_name(mount_t mp, char * buffer)
486{
487 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
488}
489
490/* returns vfs type number for the mount_t */
491int
492vfs_typenum(mount_t mp)
493{
494 return(mp->mnt_vtable->vfc_typenum);
495}
496
497
498/* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
499uint64_t
500vfs_flags(mount_t mp)
501{
502 return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)));
503}
504
505/* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
506void
507vfs_setflags(mount_t mp, uint64_t flags)
508{
509 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
510
511 mp->mnt_flag |= lflags;
512}
513
514/* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
515void
516vfs_clearflags(mount_t mp , uint64_t flags)
517{
518 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
519
520 mp->mnt_flag &= ~lflags;
521}
522
523/* Is the mount_t ronly and upgrade read/write requested? */
524int
525vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
526{
527 return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR));
528}
529
530
531/* Is the mount_t mounted ronly */
532int
533vfs_isrdonly(mount_t mp)
534{
535 return (mp->mnt_flag & MNT_RDONLY);
536}
537
538/* Is the mount_t mounted for filesystem synchronous writes? */
539int
540vfs_issynchronous(mount_t mp)
541{
542 return (mp->mnt_flag & MNT_SYNCHRONOUS);
543}
544
545/* Is the mount_t mounted read/write? */
546int
547vfs_isrdwr(mount_t mp)
548{
549 return ((mp->mnt_flag & MNT_RDONLY) == 0);
550}
551
552
553/* Is mount_t marked for update (ie MNT_UPDATE) */
554int
555vfs_isupdate(mount_t mp)
556{
557 return (mp->mnt_flag & MNT_UPDATE);
558}
559
560
561/* Is mount_t marked for reload (ie MNT_RELOAD) */
562int
563vfs_isreload(mount_t mp)
564{
565 return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD));
566}
567
568/* Is mount_t marked for reload (ie MNT_FORCE) */
569int
570vfs_isforce(mount_t mp)
571{
572 if ((mp->mnt_flag & MNT_FORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
573 return(1);
574 else
575 return(0);
576}
577
578int
579vfs_64bitready(mount_t mp)
580{
581 if ((mp->mnt_vtable->vfc_64bitready))
582 return(1);
583 else
584 return(0);
585}
586
587int
588vfs_authopaque(mount_t mp)
589{
590 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE))
591 return(1);
592 else
593 return(0);
594}
595
596int
597vfs_authopaqueaccess(mount_t mp)
598{
599 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS))
600 return(1);
601 else
602 return(0);
603}
604
605void
606vfs_setauthopaque(mount_t mp)
607{
608 mount_lock(mp);
609 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
610 mount_unlock(mp);
611}
612
613void
614vfs_setauthopaqueaccess(mount_t mp)
615{
616 mount_lock(mp);
617 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
618 mount_unlock(mp);
619}
620
621void
622vfs_clearauthopaque(mount_t mp)
623{
624 mount_lock(mp);
625 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
626 mount_unlock(mp);
627}
628
629void
630vfs_clearauthopaqueaccess(mount_t mp)
631{
632 mount_lock(mp);
633 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
634 mount_unlock(mp);
635}
636
637void
638vfs_setextendedsecurity(mount_t mp)
639{
640 mount_lock(mp);
641 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
642 mount_unlock(mp);
643}
644
645void
646vfs_clearextendedsecurity(mount_t mp)
647{
648 mount_lock(mp);
649 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
650 mount_unlock(mp);
651}
652
653int
654vfs_extendedsecurity(mount_t mp)
655{
656 return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY);
657}
658
659/* returns the max size of short symlink in this mount_t */
660uint32_t
661vfs_maxsymlen(mount_t mp)
662{
663 return(mp->mnt_maxsymlinklen);
664}
665
666/* set max size of short symlink on mount_t */
667void
668vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
669{
670 mp->mnt_maxsymlinklen = symlen;
671}
672
673/* return a pointer to the RO vfs_statfs associated with mount_t */
674struct vfsstatfs *
675vfs_statfs(mount_t mp)
676{
677 return(&mp->mnt_vfsstat);
678}
679
680int
681vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
682{
683 int error;
684 char *vname;
685
686 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0)
687 return(error);
688
689 /*
690 * If we have a filesystem create time, use it to default some others.
691 */
692 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
693 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time))
694 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
695 }
696
697 return(0);
698}
699
700int
701vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
702{
703 int error;
704
705 if (vfs_isrdonly(mp))
706 return EROFS;
707
708 error = VFS_SETATTR(mp, vfa, ctx);
709
710 /*
711 * If we had alternate ways of setting vfs attributes, we'd
712 * fall back here.
713 */
714
715 return error;
716}
717
718/* return the private data handle stored in mount_t */
719void *
720vfs_fsprivate(mount_t mp)
721{
722 return(mp->mnt_data);
723}
724
725/* set the private data handle in mount_t */
726void
727vfs_setfsprivate(mount_t mp, void *mntdata)
728{
729 mp->mnt_data = mntdata;
730}
731
732
733/*
734 * return the block size of the underlying
735 * device associated with mount_t
736 */
737int
738vfs_devblocksize(mount_t mp) {
739
740 return(mp->mnt_devblocksize);
741}
742
743
744/*
745 * return the io attributes associated with mount_t
746 */
747void
748vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
749{
750 if (mp == NULL) {
751 ioattrp->io_maxreadcnt = MAXPHYS;
752 ioattrp->io_maxwritecnt = MAXPHYS;
753 ioattrp->io_segreadcnt = 32;
754 ioattrp->io_segwritecnt = 32;
755 ioattrp->io_maxsegreadsize = MAXPHYS;
756 ioattrp->io_maxsegwritesize = MAXPHYS;
757 ioattrp->io_devblocksize = DEV_BSIZE;
758 } else {
759 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
760 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
761 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
762 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
763 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
764 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
765 ioattrp->io_devblocksize = mp->mnt_devblocksize;
766 }
767 ioattrp->io_reserved[0] = 0;
768 ioattrp->io_reserved[1] = 0;
769 ioattrp->io_reserved[2] = 0;
770}
771
772
773/*
774 * set the IO attributes associated with mount_t
775 */
776void
777vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
778{
779 if (mp == NULL)
780 return;
781 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
782 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
783 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
784 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
785 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
786 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
787 mp->mnt_devblocksize = ioattrp->io_devblocksize;
788}
789
790/*
791 * Add a new filesystem into the kernel specified in passed in
792 * vfstable structure. It fills in the vnode
793 * dispatch vector that is to be passed to when vnodes are created.
794 * It returns a handle which is to be used to when the FS is to be removed
795 */
796typedef int (*PFI)(void *);
797extern int vfs_opv_numops;
798errno_t
799vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
800{
801#pragma unused(data)
802 struct vfstable *newvfstbl = NULL;
803 int i,j;
804 int (***opv_desc_vector_p)(void *);
805 int (**opv_desc_vector)(void *);
806 struct vnodeopv_entry_desc *opve_descp;
807 int desccount;
808 int descsize;
809 PFI *descptr;
810
811 /*
812 * This routine is responsible for all the initialization that would
813 * ordinarily be done as part of the system startup;
814 */
815
816 if (vfe == (struct vfs_fsentry *)0)
817 return(EINVAL);
818
819 desccount = vfe->vfe_vopcnt;
820 if ((desccount <=0) || ((desccount > 5)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
821 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
822 return(EINVAL);
823
824
825 MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
826 M_WAITOK);
827 bzero(newvfstbl, sizeof(struct vfstable));
828 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
829 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
830 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
831 newvfstbl->vfc_typenum = maxvfsconf++;
832 else
833 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
834
835 newvfstbl->vfc_refcount = 0;
836 newvfstbl->vfc_flags = 0;
837 newvfstbl->vfc_mountroot = NULL;
838 newvfstbl->vfc_next = NULL;
839 newvfstbl->vfc_threadsafe = 0;
840 newvfstbl->vfc_vfsflags = 0;
841 if (vfe->vfe_flags & VFS_TBL64BITREADY)
842 newvfstbl->vfc_64bitready= 1;
843 if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
844 newvfstbl->vfc_threadsafe= 1;
845 if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
846 newvfstbl->vfc_threadsafe= 1;
847 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
848 newvfstbl->vfc_flags |= MNT_LOCAL;
849 if (vfe->vfe_flags & VFS_TBLLOCALVOL)
850 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
851 else
852 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
853
854
855 /*
856 * Allocate and init the vectors.
857 * Also handle backwards compatibility.
858 *
859 * We allocate one large block to hold all <desccount>
860 * vnode operation vectors stored contiguously.
861 */
862 /* XXX - shouldn't be M_TEMP */
863
864 descsize = desccount * vfs_opv_numops * sizeof(PFI);
865 MALLOC(descptr, PFI *, descsize,
866 M_TEMP, M_WAITOK);
867 bzero(descptr, descsize);
868
869 newvfstbl->vfc_descptr = descptr;
870 newvfstbl->vfc_descsize = descsize;
871
872
873 for (i= 0; i< desccount; i++ ) {
874 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
875 /*
876 * Fill in the caller's pointer to the start of the i'th vector.
877 * They'll need to supply it when calling vnode_create.
878 */
879 opv_desc_vector = descptr + i * vfs_opv_numops;
880 *opv_desc_vector_p = opv_desc_vector;
881
882 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
883 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
884
885 /*
886 * Sanity check: is this operation listed
887 * in the list of operations? We check this
888 * by seeing if its offest is zero. Since
889 * the default routine should always be listed
890 * first, it should be the only one with a zero
891 * offset. Any other operation with a zero
892 * offset is probably not listed in
893 * vfs_op_descs, and so is probably an error.
894 *
895 * A panic here means the layer programmer
896 * has committed the all-too common bug
897 * of adding a new operation to the layer's
898 * list of vnode operations but
899 * not adding the operation to the system-wide
900 * list of supported operations.
901 */
902 if (opve_descp->opve_op->vdesc_offset == 0 &&
903 opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
904 printf("vfs_fsadd: operation %s not listed in %s.\n",
905 opve_descp->opve_op->vdesc_name,
906 "vfs_op_descs");
907 panic("vfs_fsadd: bad operation");
908 }
909 /*
910 * Fill in this entry.
911 */
912 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
913 opve_descp->opve_impl;
914 }
915
916
917 /*
918 * Finally, go back and replace unfilled routines
919 * with their default. (Sigh, an O(n^3) algorithm. I
920 * could make it better, but that'd be work, and n is small.)
921 */
922 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
923
924 /*
925 * Force every operations vector to have a default routine.
926 */
927 opv_desc_vector = *opv_desc_vector_p;
928 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL)
929 panic("vfs_fsadd: operation vector without default routine.");
930 for (j = 0; j < vfs_opv_numops; j++)
931 if (opv_desc_vector[j] == NULL)
932 opv_desc_vector[j] =
933 opv_desc_vector[VOFFSET(vnop_default)];
934
935 } /* end of each vnodeopv_desc parsing */
936
937
938
939 *handle = vfstable_add(newvfstbl);
940
941 if (newvfstbl->vfc_typenum <= maxvfsconf )
942 maxvfsconf = newvfstbl->vfc_typenum + 1;
943 numused_vfsslots++;
944
945 if (newvfstbl->vfc_vfsops->vfs_init)
946 (*newvfstbl->vfc_vfsops->vfs_init)((struct vfsconf *)handle);
947
948 FREE(newvfstbl, M_TEMP);
949
950 return(0);
951}
952
953/*
954 * Removes the filesystem from kernel.
955 * The argument passed in is the handle that was given when
956 * file system was added
957 */
958errno_t
959vfs_fsremove(vfstable_t handle)
960{
961 struct vfstable * vfstbl = (struct vfstable *)handle;
962 void *old_desc = NULL;
963 errno_t err;
964
965 /* Preflight check for any mounts */
966 mount_list_lock();
967 if ( vfstbl->vfc_refcount != 0 ) {
968 mount_list_unlock();
969 return EBUSY;
970 }
971 mount_list_unlock();
972
973 /*
974 * save the old descriptor; the free cannot occur unconditionally,
975 * since vfstable_del() may fail.
976 */
977 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
978 old_desc = vfstbl->vfc_descptr;
979 }
980 err = vfstable_del(vfstbl);
981
982 /* free the descriptor if the delete was successful */
983 if (err == 0 && old_desc) {
984 FREE(old_desc, M_TEMP);
985 }
986
987 return(err);
988}
989
990/*
991 * This returns a reference to mount_t
992 * which should be dropped using vfs_mountrele().
993 * Not doing so will leak a mountpoint
994 * and associated data structures.
995 */
996errno_t
997vfs_mountref(__unused mount_t mp ) /* gives a reference */
998{
999 return(0);
1000}
1001
1002/* This drops the reference on mount_t that was acquired */
1003errno_t
1004vfs_mountrele(__unused mount_t mp ) /* drops reference */
1005{
1006 return(0);
1007}
1008
1009int
1010vfs_context_pid(vfs_context_t context)
1011{
1012 return (context->vc_proc->p_pid);
1013}
1014
1015int
1016vfs_context_suser(vfs_context_t context)
1017{
1018 return (suser(context->vc_ucred, 0));
1019}
1020int
1021vfs_context_issignal(vfs_context_t context, sigset_t mask)
1022{
1023 if (context->vc_proc)
1024 return(proc_pendingsignals(context->vc_proc, mask));
1025 return(0);
1026}
1027
1028int
1029vfs_context_is64bit(vfs_context_t context)
1030{
1031 if (context->vc_proc)
1032 return(proc_is64bit(context->vc_proc));
1033 return(0);
1034}
1035
1036proc_t
1037vfs_context_proc(vfs_context_t context)
1038{
1039 return (context->vc_proc);
1040}
1041
1042vfs_context_t
1043vfs_context_create(vfs_context_t context)
1044{
1045 struct vfs_context * newcontext;
1046
1047 newcontext = (struct vfs_context *)kalloc(sizeof(struct vfs_context));
1048
1049 if (newcontext) {
1050 if (context) {
1051 newcontext->vc_proc = context->vc_proc;
1052 newcontext->vc_ucred = context->vc_ucred;
1053 } else {
1054 newcontext->vc_proc = proc_self();
1055 newcontext->vc_ucred = kauth_cred_get();
1056 }
1057 return(newcontext);
1058 }
1059 return((vfs_context_t)0);
1060}
1061
1062int
1063vfs_context_rele(vfs_context_t context)
1064{
1065 if (context)
1066 kfree(context, sizeof(struct vfs_context));
1067 return(0);
1068}
1069
1070
1071ucred_t
1072vfs_context_ucred(vfs_context_t context)
1073{
1074 return (context->vc_ucred);
1075}
1076
1077/*
1078 * Return true if the context is owned by the superuser.
1079 */
1080int
1081vfs_context_issuser(vfs_context_t context)
1082{
1083 return(context->vc_ucred->cr_uid == 0);
1084}
1085
1086
1087/* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1088
1089
1090/*
1091 * Convert between vnode types and inode formats (since POSIX.1
1092 * defines mode word of stat structure in terms of inode formats).
1093 */
1094enum vtype
1095vnode_iftovt(int mode)
1096{
1097 return(iftovt_tab[((mode) & S_IFMT) >> 12]);
1098}
1099
1100int
1101vnode_vttoif(enum vtype indx)
1102{
1103 return(vttoif_tab[(int)(indx)]);
1104}
1105
1106int
1107vnode_makeimode(int indx, int mode)
1108{
1109 return (int)(VTTOIF(indx) | (mode));
1110}
1111
1112
1113/*
1114 * vnode manipulation functions.
1115 */
1116
1117/* returns system root vnode reference; It should be dropped using vrele() */
1118vnode_t
1119vfs_rootvnode(void)
1120{
1121 int error;
1122
1123 error = vnode_get(rootvnode);
1124 if (error)
1125 return ((vnode_t)0);
1126 else
1127 return rootvnode;
1128}
1129
1130
1131uint32_t
1132vnode_vid(vnode_t vp)
1133{
1134 return ((uint32_t)(vp->v_id));
1135}
1136
1137/* returns a mount reference; drop it with vfs_mountrelease() */
1138mount_t
1139vnode_mount(vnode_t vp)
1140{
1141 return (vp->v_mount);
1142}
1143
1144/* returns a mount reference iff vnode_t is a dir and is a mount point */
1145mount_t
1146vnode_mountedhere(vnode_t vp)
1147{
1148 mount_t mp;
1149
1150 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1151 (mp->mnt_vnodecovered == vp))
1152 return (mp);
1153 else
1154 return (mount_t)NULL;
1155}
1156
1157/* returns vnode type of vnode_t */
1158enum vtype
1159vnode_vtype(vnode_t vp)
1160{
1161 return (vp->v_type);
1162}
1163
1164/* returns FS specific node saved in vnode */
1165void *
1166vnode_fsnode(vnode_t vp)
1167{
1168 return (vp->v_data);
1169}
1170
1171void
1172vnode_clearfsnode(vnode_t vp)
1173{
1174 vp->v_data = 0;
1175}
1176
1177dev_t
1178vnode_specrdev(vnode_t vp)
1179{
1180 return(vp->v_rdev);
1181}
1182
1183
1184/* Accessor functions */
1185/* is vnode_t a root vnode */
1186int
1187vnode_isvroot(vnode_t vp)
1188{
1189 return ((vp->v_flag & VROOT)? 1 : 0);
1190}
1191
1192/* is vnode_t a system vnode */
1193int
1194vnode_issystem(vnode_t vp)
1195{
1196 return ((vp->v_flag & VSYSTEM)? 1 : 0);
1197}
1198
1199/* if vnode_t mount operation in progress */
1200int
1201vnode_ismount(vnode_t vp)
1202{
1203 return ((vp->v_flag & VMOUNT)? 1 : 0);
1204}
1205
1206/* is this vnode under recyle now */
1207int
1208vnode_isrecycled(vnode_t vp)
1209{
1210 int ret;
1211
1212 vnode_lock(vp);
1213 ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0;
1214 vnode_unlock(vp);
1215 return(ret);
1216}
1217
1218/* is vnode_t marked to not keep data cached once it's been consumed */
1219int
1220vnode_isnocache(vnode_t vp)
1221{
1222 return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0);
1223}
1224
1225/*
1226 * has sequential readahead been disabled on this vnode
1227 */
1228int
1229vnode_isnoreadahead(vnode_t vp)
1230{
1231 return ((vp->v_flag & VRAOFF)? 1 : 0);
1232}
1233
1234/* is vnode_t a standard one? */
1235int
1236vnode_isstandard(vnode_t vp)
1237{
1238 return ((vp->v_flag & VSTANDARD)? 1 : 0);
1239}
1240
1241/* don't vflush() if SKIPSYSTEM */
1242int
1243vnode_isnoflush(vnode_t vp)
1244{
1245 return ((vp->v_flag & VNOFLUSH)? 1 : 0);
1246}
1247
1248/* is vnode_t a regular file */
1249int
1250vnode_isreg(vnode_t vp)
1251{
1252 return ((vp->v_type == VREG)? 1 : 0);
1253}
1254
1255/* is vnode_t a directory? */
1256int
1257vnode_isdir(vnode_t vp)
1258{
1259 return ((vp->v_type == VDIR)? 1 : 0);
1260}
1261
1262/* is vnode_t a symbolic link ? */
1263int
1264vnode_islnk(vnode_t vp)
1265{
1266 return ((vp->v_type == VLNK)? 1 : 0);
1267}
1268
1269/* is vnode_t a fifo ? */
1270int
1271vnode_isfifo(vnode_t vp)
1272{
1273 return ((vp->v_type == VFIFO)? 1 : 0);
1274}
1275
1276/* is vnode_t a block device? */
1277int
1278vnode_isblk(vnode_t vp)
1279{
1280 return ((vp->v_type == VBLK)? 1 : 0);
1281}
1282
1283/* is vnode_t a char device? */
1284int
1285vnode_ischr(vnode_t vp)
1286{
1287 return ((vp->v_type == VCHR)? 1 : 0);
1288}
1289
1290/* is vnode_t a socket? */
1291int
1292vnode_issock(vnode_t vp)
1293{
1294 return ((vp->v_type == VSOCK)? 1 : 0);
1295}
1296
1297
1298/* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1299void
1300vnode_setnocache(vnode_t vp)
1301{
1302 vnode_lock(vp);
1303 vp->v_flag |= VNOCACHE_DATA;
1304 vnode_unlock(vp);
1305}
1306
1307void
1308vnode_clearnocache(vnode_t vp)
1309{
1310 vnode_lock(vp);
1311 vp->v_flag &= ~VNOCACHE_DATA;
1312 vnode_unlock(vp);
1313}
1314
1315void
1316vnode_setnoreadahead(vnode_t vp)
1317{
1318 vnode_lock(vp);
1319 vp->v_flag |= VRAOFF;
1320 vnode_unlock(vp);
1321}
1322
1323void
1324vnode_clearnoreadahead(vnode_t vp)
1325{
1326 vnode_lock(vp);
1327 vp->v_flag &= ~VRAOFF;
1328 vnode_unlock(vp);
1329}
1330
1331
1332/* mark vnode_t to skip vflush() is SKIPSYSTEM */
1333void
1334vnode_setnoflush(vnode_t vp)
1335{
1336 vnode_lock(vp);
1337 vp->v_flag |= VNOFLUSH;
1338 vnode_unlock(vp);
1339}
1340
1341void
1342vnode_clearnoflush(vnode_t vp)
1343{
1344 vnode_lock(vp);
1345 vp->v_flag &= ~VNOFLUSH;
1346 vnode_unlock(vp);
1347}
1348
1349
1350/* is vnode_t a blkdevice and has a FS mounted on it */
1351int
1352vnode_ismountedon(vnode_t vp)
1353{
1354 return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0);
1355}
1356
1357void
1358vnode_setmountedon(vnode_t vp)
1359{
1360 vnode_lock(vp);
1361 vp->v_specflags |= SI_MOUNTEDON;
1362 vnode_unlock(vp);
1363}
1364
1365void
1366vnode_clearmountedon(vnode_t vp)
1367{
1368 vnode_lock(vp);
1369 vp->v_specflags &= ~SI_MOUNTEDON;
1370 vnode_unlock(vp);
1371}
1372
1373
1374void
1375vnode_settag(vnode_t vp, int tag)
1376{
1377 vp->v_tag = tag;
1378
1379}
1380
1381int
1382vnode_tag(vnode_t vp)
1383{
1384 return(vp->v_tag);
1385}
1386
1387vnode_t
1388vnode_parent(vnode_t vp)
1389{
1390
1391 return(vp->v_parent);
1392}
1393
1394void
1395vnode_setparent(vnode_t vp, vnode_t dvp)
1396{
1397 vp->v_parent = dvp;
1398}
1399
1400char *
1401vnode_name(vnode_t vp)
1402{
1403 /* we try to keep v_name a reasonable name for the node */
1404 return(vp->v_name);
1405}
1406
1407void
1408vnode_setname(vnode_t vp, char * name)
1409{
1410 vp->v_name = name;
1411}
1412
1413/* return the registered FS name when adding the FS to kernel */
1414void
1415vnode_vfsname(vnode_t vp, char * buf)
1416{
1417 strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
1418}
1419
1420/* return the FS type number */
1421int
1422vnode_vfstypenum(vnode_t vp)
1423{
1424 return(vp->v_mount->mnt_vtable->vfc_typenum);
1425}
1426
1427int
1428vnode_vfs64bitready(vnode_t vp)
1429{
1430
1431 if ((vp->v_mount->mnt_vtable->vfc_64bitready))
1432 return(1);
1433 else
1434 return(0);
1435}
1436
1437
1438
1439/* return the visible flags on associated mount point of vnode_t */
1440uint32_t
1441vnode_vfsvisflags(vnode_t vp)
1442{
1443 return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK);
1444}
1445
1446/* return the command modifier flags on associated mount point of vnode_t */
1447uint32_t
1448vnode_vfscmdflags(vnode_t vp)
1449{
1450 return(vp->v_mount->mnt_flag & MNT_CMDFLAGS);
1451}
1452
1453/* return the max symlink of short links of vnode_t */
1454uint32_t
1455vnode_vfsmaxsymlen(vnode_t vp)
1456{
1457 return(vp->v_mount->mnt_maxsymlinklen);
1458}
1459
1460/* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1461struct vfsstatfs *
1462vnode_vfsstatfs(vnode_t vp)
1463{
1464 return(&vp->v_mount->mnt_vfsstat);
1465}
1466
1467/* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1468void *
1469vnode_vfsfsprivate(vnode_t vp)
1470{
1471 return(vp->v_mount->mnt_data);
1472}
1473
1474/* is vnode_t in a rdonly mounted FS */
1475int
1476vnode_vfsisrdonly(vnode_t vp)
1477{
1478 return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0);
1479}
1480
1481
1482/* returns vnode ref to current working directory */
1483vnode_t
1484current_workingdir(void)
1485{
1486 struct proc *p = current_proc();
1487 struct vnode * vp ;
1488
1489 if ( (vp = p->p_fd->fd_cdir) ) {
1490 if ( (vnode_getwithref(vp)) )
1491 return (NULL);
1492 }
1493 return vp;
1494}
1495
1496/* returns vnode ref to current root(chroot) directory */
1497vnode_t
1498current_rootdir(void)
1499{
1500 struct proc *p = current_proc();
1501 struct vnode * vp ;
1502
1503 if ( (vp = p->p_fd->fd_rdir) ) {
1504 if ( (vnode_getwithref(vp)) )
1505 return (NULL);
1506 }
1507 return vp;
1508}
1509
c0fea474
A
1510/*
1511 * Get a filesec and optional acl contents from an extended attribute.
1512 * Function will attempt to retrive ACL, UUID, and GUID information using a
1513 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1514 *
1515 * Parameters: vp The vnode on which to operate.
1516 * fsecp The filesec (and ACL, if any) being
1517 * retrieved.
1518 * ctx The vnode context in which the
1519 * operation is to be attempted.
1520 *
1521 * Returns: 0 Success
1522 * !0 errno value
1523 *
1524 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1525 * host byte order, as will be the ACL contents, if any.
1526 * Internally, we will cannonize these values from network (PPC)
1527 * byte order after we retrieve them so that the on-disk contents
1528 * of the extended attribute are identical for both PPC and Intel
1529 * (if we were not being required to provide this service via
1530 * fallback, this would be the job of the filesystem
1531 * 'VNOP_GETATTR' call).
1532 *
1533 * We use ntohl() because it has a transitive property on Intel
1534 * machines and no effect on PPC mancines. This guarantees us
1535 *
1536 * XXX: Deleting rather than ignoreing a corrupt security structure is
1537 * probably the only way to reset it without assistance from an
1538 * file system integrity checking tool. Right now we ignore it.
1539 *
1540 * XXX: We should enummerate the possible errno values here, and where
1541 * in the code they originated.
1542 */
91447636
A
1543static int
1544vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
1545{
1546 kauth_filesec_t fsec;
1547 uio_t fsec_uio;
1548 size_t fsec_size;
1549 size_t xsize, rsize;
1550 int error;
c0fea474
A
1551 int i;
1552 uint32_t host_fsec_magic;
1553 uint32_t host_acl_entrycount;
91447636
A
1554
1555 fsec = NULL;
1556 fsec_uio = NULL;
1557 error = 0;
1558
1559 /* find out how big the EA is */
1560 if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
1561 /* no EA, no filesec */
1562 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1563 error = 0;
1564 /* either way, we are done */
1565 goto out;
1566 }
1567
1568 /* how many entries would fit? */
1569 fsec_size = KAUTH_FILESEC_COUNT(xsize);
1570
1571 /* get buffer and uio */
1572 if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) ||
1573 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
1574 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
1575 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1576 error = ENOMEM;
1577 goto out;
1578 }
1579
1580 /* read security attribute */
1581 rsize = xsize;
1582 if ((error = vn_getxattr(vp,
1583 KAUTH_FILESEC_XATTR,
1584 fsec_uio,
1585 &rsize,
1586 XATTR_NOSECURITY,
1587 ctx)) != 0) {
1588
1589 /* no attribute - no security data */
1590 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
1591 error = 0;
1592 /* either way, we are done */
1593 goto out;
1594 }
1595
1596 /*
c0fea474
A
1597 * Validate security structure; the validation must take place in host
1598 * byte order. If it's corrupt, we will just ignore it.
91447636 1599 */
c0fea474
A
1600
1601 /* Validate the size before trying to convert it */
91447636
A
1602 if (rsize < KAUTH_FILESEC_SIZE(0)) {
1603 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
1604 goto out;
1605 }
c0fea474
A
1606
1607 /* Validate the magic number before trying to convert it */
1608 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
1609 if (fsec->fsec_magic != host_fsec_magic) {
1610 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
91447636
A
1611 goto out;
1612 }
c0fea474
A
1613
1614 /* Validate the entry count before trying to convert it. */
1615 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
1616 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
1617 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
1618 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
1619 goto out;
1620 }
1621 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
1622 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
1623 goto out;
1624 }
91447636
A
1625 }
1626
c0fea474
A
1627 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
1628
91447636
A
1629 *fsecp = fsec;
1630 fsec = NULL;
1631 error = 0;
1632out:
1633 if (fsec != NULL)
1634 kauth_filesec_free(fsec);
1635 if (fsec_uio != NULL)
1636 uio_free(fsec_uio);
1637 if (error)
1638 *fsecp = NULL;
1639 return(error);
1640}
1641
c0fea474
A
1642/*
1643 * Set a filesec and optional acl contents into an extended attribute.
1644 * function will attempt to store ACL, UUID, and GUID information using a
1645 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
1646 * may or may not point to the `fsec->fsec_acl`, depending on whether the
1647 * original caller supplied an acl.
1648 *
1649 * Parameters: vp The vnode on which to operate.
1650 * fsec The filesec being set.
1651 * acl The acl to be associated with 'fsec'.
1652 * ctx The vnode context in which the
1653 * operation is to be attempted.
1654 *
1655 * Returns: 0 Success
1656 * !0 errno value
1657 *
1658 * Notes: Both the fsec and the acl are always valid.
1659 *
1660 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
1661 * as are the acl contents, if they are used. Internally, we will
1662 * cannonize these values into network (PPC) byte order before we
1663 * attempt to write them so that the on-disk contents of the
1664 * extended attribute are identical for both PPC and Intel (if we
1665 * were not being required to provide this service via fallback,
1666 * this would be the job of the filesystem 'VNOP_SETATTR' call).
1667 * We reverse this process on the way out, so we leave with the
1668 * same byte order we started with.
1669 *
1670 * XXX: We should enummerate the possible errno values here, and where
1671 * in the code they originated.
1672 */
91447636
A
1673static int
1674vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
1675{
c0fea474
A
1676 uio_t fsec_uio;
1677 int error;
1678 int i;
1679 uint32_t saved_acl_copysize;
91447636
A
1680
1681 fsec_uio = NULL;
1682
1683 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
1684 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
1685 error = ENOMEM;
1686 goto out;
1687 }
c0fea474
A
1688 /*
1689 * Save the pre-converted ACL copysize, because it gets swapped too
1690 * if we are running with the wrong endianness.
1691 */
1692 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
1693
1694 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
1695
91447636 1696 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), sizeof(struct kauth_filesec) - sizeof(struct kauth_acl));
c0fea474 1697 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
91447636
A
1698 error = vn_setxattr(vp,
1699 KAUTH_FILESEC_XATTR,
1700 fsec_uio,
1701 XATTR_NOSECURITY, /* we have auth'ed already */
1702 ctx);
1703 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
1704
c0fea474
A
1705 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
1706
91447636
A
1707out:
1708 if (fsec_uio != NULL)
1709 uio_free(fsec_uio);
1710 return(error);
1711}
1712
1713
1714int
1715vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1716{
1717 kauth_filesec_t fsec;
1718 kauth_acl_t facl;
1719 int error;
1720 uid_t nuid;
1721 gid_t ngid;
1722
1723 /* don't ask for extended security data if the filesystem doesn't support it */
1724 if (!vfs_extendedsecurity(vnode_mount(vp))) {
1725 VATTR_CLEAR_ACTIVE(vap, va_acl);
1726 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1727 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1728 }
1729
1730 /*
1731 * If the caller wants size values we might have to synthesise, give the
1732 * filesystem the opportunity to supply better intermediate results.
1733 */
1734 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1735 VATTR_IS_ACTIVE(vap, va_total_size) ||
1736 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1737 VATTR_SET_ACTIVE(vap, va_data_size);
1738 VATTR_SET_ACTIVE(vap, va_data_alloc);
1739 VATTR_SET_ACTIVE(vap, va_total_size);
1740 VATTR_SET_ACTIVE(vap, va_total_alloc);
1741 }
1742
1743 error = VNOP_GETATTR(vp, vap, ctx);
1744 if (error) {
1745 KAUTH_DEBUG("ERROR - returning %d", error);
1746 goto out;
1747 }
1748
1749 /*
1750 * If extended security data was requested but not returned, try the fallback
1751 * path.
1752 */
1753 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
1754 fsec = NULL;
1755
1756 if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
1757 /* try to get the filesec */
1758 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
1759 goto out;
1760 }
1761 /* if no filesec, no attributes */
1762 if (fsec == NULL) {
1763 VATTR_RETURN(vap, va_acl, NULL);
1764 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
1765 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
1766 } else {
1767
1768 /* looks good, try to return what we were asked for */
1769 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
1770 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
1771
1772 /* only return the ACL if we were actually asked for it */
1773 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1774 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
1775 VATTR_RETURN(vap, va_acl, NULL);
1776 } else {
1777 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
1778 if (facl == NULL) {
1779 kauth_filesec_free(fsec);
1780 error = ENOMEM;
1781 goto out;
1782 }
1783 bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
1784 VATTR_RETURN(vap, va_acl, facl);
1785 }
1786 }
1787 kauth_filesec_free(fsec);
1788 }
1789 }
1790 /*
1791 * If someone gave us an unsolicited filesec, toss it. We promise that
1792 * we're OK with a filesystem giving us anything back, but our callers
1793 * only expect what they asked for.
1794 */
1795 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
1796 if (vap->va_acl != NULL)
1797 kauth_acl_free(vap->va_acl);
1798 VATTR_CLEAR_SUPPORTED(vap, va_acl);
1799 }
1800
1801#if 0 /* enable when we have a filesystem only supporting UUIDs */
1802 /*
1803 * Handle the case where we need a UID/GID, but only have extended
1804 * security information.
1805 */
1806 if (VATTR_NOT_RETURNED(vap, va_uid) &&
1807 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
1808 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
1809 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0)
1810 VATTR_RETURN(vap, va_uid, nuid);
1811 }
1812 if (VATTR_NOT_RETURNED(vap, va_gid) &&
1813 VATTR_IS_SUPPORTED(vap, va_guuid) &&
1814 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
1815 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0)
1816 VATTR_RETURN(vap, va_gid, ngid);
1817 }
1818#endif
1819
1820 /*
1821 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
1822 */
1823 if (VATTR_IS_ACTIVE(vap, va_uid)) {
1824 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1825 nuid = vp->v_mount->mnt_fsowner;
1826 if (nuid == KAUTH_UID_NONE)
1827 nuid = 99;
1828 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
1829 nuid = vap->va_uid;
1830 } else {
1831 /* this will always be something sensible */
1832 nuid = vp->v_mount->mnt_fsowner;
1833 }
1834 if ((nuid == 99) && !vfs_context_issuser(ctx))
1835 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
1836 VATTR_RETURN(vap, va_uid, nuid);
1837 }
1838 if (VATTR_IS_ACTIVE(vap, va_gid)) {
1839 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1840 ngid = vp->v_mount->mnt_fsgroup;
1841 if (ngid == KAUTH_GID_NONE)
1842 ngid = 99;
1843 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
1844 ngid = vap->va_gid;
1845 } else {
1846 /* this will always be something sensible */
1847 ngid = vp->v_mount->mnt_fsgroup;
1848 }
1849 if ((ngid == 99) && !vfs_context_issuser(ctx))
1850 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
1851 VATTR_RETURN(vap, va_gid, ngid);
1852 }
1853
1854 /*
1855 * Synthesise some values that can be reasonably guessed.
1856 */
1857 if (!VATTR_IS_SUPPORTED(vap, va_iosize))
1858 VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
1859
1860 if (!VATTR_IS_SUPPORTED(vap, va_flags))
1861 VATTR_RETURN(vap, va_flags, 0);
1862
1863 if (!VATTR_IS_SUPPORTED(vap, va_filerev))
1864 VATTR_RETURN(vap, va_filerev, 0);
1865
1866 if (!VATTR_IS_SUPPORTED(vap, va_gen))
1867 VATTR_RETURN(vap, va_gen, 0);
1868
1869 /*
1870 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
1871 */
1872 if (!VATTR_IS_SUPPORTED(vap, va_data_size))
1873 VATTR_RETURN(vap, va_data_size, 0);
1874
1875 /* do we want any of the possibly-computed values? */
1876 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
1877 VATTR_IS_ACTIVE(vap, va_total_size) ||
1878 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1879 /* make sure f_bsize is valid */
1880 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
1881 if ((error = vfs_update_vfsstat(vp->v_mount, ctx)) != 0)
1882 goto out;
1883 }
1884
1885 /* default va_data_alloc from va_data_size */
1886 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc))
1887 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
1888
1889 /* default va_total_size from va_data_size */
1890 if (!VATTR_IS_SUPPORTED(vap, va_total_size))
1891 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
1892
1893 /* default va_total_alloc from va_total_size which is guaranteed at this point */
1894 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc))
1895 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
1896 }
1897
1898 /*
1899 * If we don't have a change time, pull it from the modtime.
1900 */
1901 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time))
1902 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
1903
1904 /*
1905 * This is really only supported for the creation VNOPs, but since the field is there
1906 * we should populate it correctly.
1907 */
1908 VATTR_RETURN(vap, va_type, vp->v_type);
1909
1910 /*
1911 * The fsid can be obtained from the mountpoint directly.
1912 */
1913 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
1914
1915out:
1916
1917 return(error);
1918}
1919
c0fea474
A
1920/*
1921 * Set the attributes on a vnode in a vnode context.
1922 *
1923 * Parameters: vp The vnode whose attributes to set.
1924 * vap A pointer to the attributes to set.
1925 * ctx The vnode context in which the
1926 * operation is to be attempted.
1927 *
1928 * Returns: 0 Success
1929 * !0 errno value
1930 *
1931 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
1932 *
1933 * The contents of the data area pointed to by 'vap' may be
1934 * modified if the vnode is on a filesystem which has been
1935 * mounted with ingore ownership flags, or by the underlyng
1936 * VFS itself, or by the fallback code, if the underlying VFS
1937 * does not support ACL, UUID, or GUUID attributes directly.
1938 *
1939 * XXX: We should enummerate the possible errno values here, and where
1940 * in the code they originated.
1941 */
91447636
A
1942int
1943vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
1944{
1945 int error, is_ownership_change=0;
1946
1947 /*
1948 * Make sure the filesystem is mounted R/W.
1949 * If not, return an error.
1950 */
c0fea474
A
1951 if (vfs_isrdonly(vp->v_mount)) {
1952 error = EROFS;
1953 goto out;
1954 }
91447636
A
1955
1956 /*
1957 * If ownership is being ignored on this volume, we silently discard
1958 * ownership changes.
1959 */
1960 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
1961 VATTR_CLEAR_ACTIVE(vap, va_uid);
1962 VATTR_CLEAR_ACTIVE(vap, va_gid);
1963 }
1964
1965 if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) {
1966 is_ownership_change = 1;
1967 }
1968
1969 /*
1970 * Make sure that extended security is enabled if we're going to try
1971 * to set any.
1972 */
1973 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
1974 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
1975 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
c0fea474
A
1976 error = ENOTSUP;
1977 goto out;
91447636
A
1978 }
1979
1980 error = VNOP_SETATTR(vp, vap, ctx);
1981
1982 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
1983 error = vnode_setattr_fallback(vp, vap, ctx);
1984
1985 /*
1986 * If we have changed any of the things about the file that are likely
1987 * to result in changes to authorisation results, blow the vnode auth
1988 * cache
1989 */
1990 if (VATTR_IS_SUPPORTED(vap, va_mode) ||
1991 VATTR_IS_SUPPORTED(vap, va_uid) ||
1992 VATTR_IS_SUPPORTED(vap, va_gid) ||
1993 VATTR_IS_SUPPORTED(vap, va_flags) ||
1994 VATTR_IS_SUPPORTED(vap, va_acl) ||
1995 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
1996 VATTR_IS_SUPPORTED(vap, va_guuid))
1997 vnode_uncache_credentials(vp);
1998 // only send a stat_changed event if this is more than
1999 // just an access time update
2000 if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time))) {
2001 if (need_fsevent(FSE_STAT_CHANGED, vp) || (is_ownership_change && need_fsevent(FSE_CHOWN, vp))) {
2002 if (is_ownership_change == 0)
2003 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2004 else
2005 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2006 }
2007 }
c0fea474
A
2008
2009out:
91447636
A
2010 return(error);
2011}
2012
2013/*
c0fea474
A
2014 * Fallback for setting the attributes on a vnode in a vnode context. This
2015 * Function will attempt to store ACL, UUID, and GUID information utilizing
2016 * a read/modify/write operation against an EA used as a backing store for
2017 * the object.
2018 *
2019 * Parameters: vp The vnode whose attributes to set.
2020 * vap A pointer to the attributes to set.
2021 * ctx The vnode context in which the
2022 * operation is to be attempted.
2023 *
2024 * Returns: 0 Success
2025 * !0 errno value
2026 *
2027 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2028 * as are the fsec and lfsec, if they are used.
2029 *
2030 * The contents of the data area pointed to by 'vap' may be
2031 * modified to indicate that the attribute is supported for
2032 * any given requested attribute.
2033 *
2034 * XXX: We should enummerate the possible errno values here, and where
2035 * in the code they originated.
2036 */
91447636
A
2037int
2038vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2039{
2040 kauth_filesec_t fsec;
2041 kauth_acl_t facl;
2042 struct kauth_filesec lfsec;
2043 int error;
2044
2045 error = 0;
2046
2047 /*
2048 * Extended security fallback via extended attributes.
2049 *
c0fea474
A
2050 * Note that we do not free the filesec; the caller is expected to
2051 * do this.
91447636
A
2052 */
2053 if (VATTR_NOT_RETURNED(vap, va_acl) ||
2054 VATTR_NOT_RETURNED(vap, va_uuuid) ||
2055 VATTR_NOT_RETURNED(vap, va_guuid)) {
2056 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
2057
2058 /*
c0fea474
A
2059 * Fail for file types that we don't permit extended security
2060 * to be set on.
91447636
A
2061 */
2062 if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
2063 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
2064 error = EINVAL;
2065 goto out;
2066 }
2067
2068 /*
c0fea474
A
2069 * If we don't have all the extended security items, we need
2070 * to fetch the existing data to perform a read-modify-write
2071 * operation.
91447636
A
2072 */
2073 fsec = NULL;
2074 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
2075 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
2076 !VATTR_IS_ACTIVE(vap, va_guuid)) {
2077 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2078 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
2079 goto out;
2080 }
2081 }
2082 /* if we didn't get a filesec, use our local one */
2083 if (fsec == NULL) {
2084 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2085 fsec = &lfsec;
2086 } else {
2087 KAUTH_DEBUG("SETATTR - updating existing filesec");
2088 }
2089 /* find the ACL */
2090 facl = &fsec->fsec_acl;
2091
2092 /* if we're using the local filesec, we need to initialise it */
2093 if (fsec == &lfsec) {
2094 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
2095 fsec->fsec_owner = kauth_null_guid;
2096 fsec->fsec_group = kauth_null_guid;
2097 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2098 facl->acl_flags = 0;
2099 }
2100
2101 /*
2102 * Update with the supplied attributes.
2103 */
2104 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
2105 KAUTH_DEBUG("SETATTR - updating owner UUID");
2106 fsec->fsec_owner = vap->va_uuuid;
2107 VATTR_SET_SUPPORTED(vap, va_uuuid);
2108 }
2109 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
2110 KAUTH_DEBUG("SETATTR - updating group UUID");
2111 fsec->fsec_group = vap->va_guuid;
2112 VATTR_SET_SUPPORTED(vap, va_guuid);
2113 }
2114 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2115 if (vap->va_acl == NULL) {
2116 KAUTH_DEBUG("SETATTR - removing ACL");
2117 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
2118 } else {
2119 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
2120 facl = vap->va_acl;
2121 }
2122 VATTR_SET_SUPPORTED(vap, va_acl);
2123 }
2124
2125 /*
c0fea474
A
2126 * If the filesec data is all invalid, we can just remove
2127 * the EA completely.
91447636
A
2128 */
2129 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
2130 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
2131 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
2132 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
2133 /* no attribute is ok, nothing to delete */
2134 if (error == ENOATTR)
2135 error = 0;
2136 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
2137 } else {
2138 /* write the EA */
2139 error = vnode_set_filesec(vp, fsec, facl, ctx);
2140 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
2141 }
2142
2143 /* if we fetched a filesec, dispose of the buffer */
2144 if (fsec != &lfsec)
2145 kauth_filesec_free(fsec);
2146 }
2147out:
2148
2149 return(error);
2150}
2151
2152/*
2153 * Definition of vnode operations.
2154 */
2155
2156#if 0
2157/*
2158 *#
2159 *#% lookup dvp L ? ?
2160 *#% lookup vpp - L -
2161 */
2162struct vnop_lookup_args {
2163 struct vnodeop_desc *a_desc;
2164 vnode_t a_dvp;
2165 vnode_t *a_vpp;
2166 struct componentname *a_cnp;
2167 vfs_context_t a_context;
2168};
2169#endif /* 0*/
2170
2171errno_t
2172VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t context)
2173{
2174 int _err;
2175 struct vnop_lookup_args a;
2176 vnode_t vp;
2177 int thread_safe;
2178 int funnel_state = 0;
2179
2180 a.a_desc = &vnop_lookup_desc;
2181 a.a_dvp = dvp;
2182 a.a_vpp = vpp;
2183 a.a_cnp = cnp;
2184 a.a_context = context;
2185 thread_safe = THREAD_SAFE_FS(dvp);
2186
2187 vnode_cache_credentials(dvp, context);
2188
2189 if (!thread_safe) {
2190 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2191 return (_err);
2192 }
2193 }
2194 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
2195
2196 vp = *vpp;
2197
2198 if (!thread_safe) {
2199 if ( (cnp->cn_flags & ISLASTCN) ) {
2200 if ( (cnp->cn_flags & LOCKPARENT) ) {
2201 if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
2202 /*
2203 * leave the fsnode lock held on
2204 * the directory, but restore the funnel...
2205 * also indicate that we need to drop the
2206 * fsnode_lock when we're done with the
2207 * system call processing for this path
2208 */
2209 cnp->cn_flags |= FSNODELOCKHELD;
2210
2211 (void) thread_funnel_set(kernel_flock, funnel_state);
2212 return (_err);
2213 }
2214 }
2215 }
2216 unlock_fsnode(dvp, &funnel_state);
2217 }
2218 return (_err);
2219}
2220
2221#if 0
2222/*
2223 *#
2224 *#% create dvp L L L
2225 *#% create vpp - L -
2226 *#
2227 */
2228
2229struct vnop_create_args {
2230 struct vnodeop_desc *a_desc;
2231 vnode_t a_dvp;
2232 vnode_t *a_vpp;
2233 struct componentname *a_cnp;
2234 struct vnode_attr *a_vap;
2235 vfs_context_t a_context;
2236};
2237#endif /* 0*/
2238errno_t
2239VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2240{
2241 int _err;
2242 struct vnop_create_args a;
2243 int thread_safe;
2244 int funnel_state = 0;
2245
2246 a.a_desc = &vnop_create_desc;
2247 a.a_dvp = dvp;
2248 a.a_vpp = vpp;
2249 a.a_cnp = cnp;
2250 a.a_vap = vap;
2251 a.a_context = context;
2252 thread_safe = THREAD_SAFE_FS(dvp);
2253
2254 if (!thread_safe) {
2255 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2256 return (_err);
2257 }
2258 }
2259 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
2260 if (_err == 0 && !NATIVE_XATTR(dvp)) {
2261 /*
2262 * Remove stale Apple Double file (if any).
2263 */
2264 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
2265 }
2266 if (!thread_safe) {
2267 unlock_fsnode(dvp, &funnel_state);
2268 }
2269 return (_err);
2270}
2271
2272#if 0
2273/*
2274 *#
2275 *#% whiteout dvp L L L
2276 *#% whiteout cnp - - -
2277 *#% whiteout flag - - -
2278 *#
2279 */
2280struct vnop_whiteout_args {
2281 struct vnodeop_desc *a_desc;
2282 vnode_t a_dvp;
2283 struct componentname *a_cnp;
2284 int a_flags;
2285 vfs_context_t a_context;
2286};
2287#endif /* 0*/
2288errno_t
2289VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t context)
2290{
2291 int _err;
2292 struct vnop_whiteout_args a;
2293 int thread_safe;
2294 int funnel_state = 0;
2295
2296 a.a_desc = &vnop_whiteout_desc;
2297 a.a_dvp = dvp;
2298 a.a_cnp = cnp;
2299 a.a_flags = flags;
2300 a.a_context = context;
2301 thread_safe = THREAD_SAFE_FS(dvp);
2302
2303 if (!thread_safe) {
2304 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2305 return (_err);
2306 }
2307 }
2308 _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
2309 if (!thread_safe) {
2310 unlock_fsnode(dvp, &funnel_state);
2311 }
2312 return (_err);
2313}
2314
2315 #if 0
2316/*
2317 *#
2318 *#% mknod dvp L U U
2319 *#% mknod vpp - X -
2320 *#
2321 */
2322struct vnop_mknod_args {
2323 struct vnodeop_desc *a_desc;
2324 vnode_t a_dvp;
2325 vnode_t *a_vpp;
2326 struct componentname *a_cnp;
2327 struct vnode_attr *a_vap;
2328 vfs_context_t a_context;
2329};
2330#endif /* 0*/
2331errno_t
2332VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context)
2333{
2334
2335 int _err;
2336 struct vnop_mknod_args a;
2337 int thread_safe;
2338 int funnel_state = 0;
2339
2340 a.a_desc = &vnop_mknod_desc;
2341 a.a_dvp = dvp;
2342 a.a_vpp = vpp;
2343 a.a_cnp = cnp;
2344 a.a_vap = vap;
2345 a.a_context = context;
2346 thread_safe = THREAD_SAFE_FS(dvp);
2347
2348 if (!thread_safe) {
2349 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
2350 return (_err);
2351 }
2352 }
2353 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
2354 if (!thread_safe) {
2355 unlock_fsnode(dvp, &funnel_state);
2356 }
2357 return (_err);
2358}
2359
2360#if 0
2361/*
2362 *#
2363 *#% open vp L L L
2364 *#
2365 */
2366struct vnop_open_args {
2367 struct vnodeop_desc *a_desc;
2368 vnode_t a_vp;
2369 int a_mode;
2370 vfs_context_t a_context;
2371};
2372#endif /* 0*/
2373errno_t
2374VNOP_OPEN(vnode_t vp, int mode, vfs_context_t context)
2375{
2376 int _err;
2377 struct vnop_open_args a;
2378 int thread_safe;
2379 int funnel_state = 0;
2380 struct vfs_context acontext;
2381
2382 if (context == NULL) {
2383 acontext.vc_proc = current_proc();
2384 acontext.vc_ucred = kauth_cred_get();
2385 context = &acontext;
2386 }
2387 a.a_desc = &vnop_open_desc;
2388 a.a_vp = vp;
2389 a.a_mode = mode;
2390 a.a_context = context;
2391 thread_safe = THREAD_SAFE_FS(vp);
2392
2393 if (!thread_safe) {
2394 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2395 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2396 if ( (_err = lock_fsnode(vp, NULL)) ) {
2397 (void) thread_funnel_set(kernel_flock, funnel_state);
2398 return (_err);
2399 }
2400 }
2401 }
2402 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
2403 if (!thread_safe) {
2404 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2405 unlock_fsnode(vp, NULL);
2406 }
2407 (void) thread_funnel_set(kernel_flock, funnel_state);
2408 }
2409 return (_err);
2410}
2411
2412#if 0
2413/*
2414 *#
2415 *#% close vp U U U
2416 *#
2417 */
2418struct vnop_close_args {
2419 struct vnodeop_desc *a_desc;
2420 vnode_t a_vp;
2421 int a_fflag;
2422 vfs_context_t a_context;
2423};
2424#endif /* 0*/
2425errno_t
2426VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t context)
2427{
2428 int _err;
2429 struct vnop_close_args a;
2430 int thread_safe;
2431 int funnel_state = 0;
2432 struct vfs_context acontext;
2433
2434 if (context == NULL) {
2435 acontext.vc_proc = current_proc();
2436 acontext.vc_ucred = kauth_cred_get();
2437 context = &acontext;
2438 }
2439 a.a_desc = &vnop_close_desc;
2440 a.a_vp = vp;
2441 a.a_fflag = fflag;
2442 a.a_context = context;
2443 thread_safe = THREAD_SAFE_FS(vp);
2444
2445 if (!thread_safe) {
2446 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2447 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2448 if ( (_err = lock_fsnode(vp, NULL)) ) {
2449 (void) thread_funnel_set(kernel_flock, funnel_state);
2450 return (_err);
2451 }
2452 }
2453 }
2454 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
2455 if (!thread_safe) {
2456 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2457 unlock_fsnode(vp, NULL);
2458 }
2459 (void) thread_funnel_set(kernel_flock, funnel_state);
2460 }
2461 return (_err);
2462}
2463
2464#if 0
2465/*
2466 *#
2467 *#% access vp L L L
2468 *#
2469 */
2470struct vnop_access_args {
2471 struct vnodeop_desc *a_desc;
2472 vnode_t a_vp;
2473 int a_action;
2474 vfs_context_t a_context;
2475};
2476#endif /* 0*/
2477errno_t
2478VNOP_ACCESS(vnode_t vp, int action, vfs_context_t context)
2479{
2480 int _err;
2481 struct vnop_access_args a;
2482 int thread_safe;
2483 int funnel_state = 0;
2484 struct vfs_context acontext;
2485
2486 if (context == NULL) {
2487 acontext.vc_proc = current_proc();
2488 acontext.vc_ucred = kauth_cred_get();
2489 context = &acontext;
2490 }
2491 a.a_desc = &vnop_access_desc;
2492 a.a_vp = vp;
2493 a.a_action = action;
2494 a.a_context = context;
2495 thread_safe = THREAD_SAFE_FS(vp);
2496
2497 if (!thread_safe) {
2498 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2499 return (_err);
2500 }
2501 }
2502 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
2503 if (!thread_safe) {
2504 unlock_fsnode(vp, &funnel_state);
2505 }
2506 return (_err);
2507}
2508
2509#if 0
2510/*
2511 *#
2512 *#% getattr vp = = =
2513 *#
2514 */
2515struct vnop_getattr_args {
2516 struct vnodeop_desc *a_desc;
2517 vnode_t a_vp;
2518 struct vnode_attr *a_vap;
2519 vfs_context_t a_context;
2520};
2521#endif /* 0*/
2522errno_t
2523VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2524{
2525 int _err;
2526 struct vnop_getattr_args a;
2527 int thread_safe;
2528 int funnel_state;
2529
2530 a.a_desc = &vnop_getattr_desc;
2531 a.a_vp = vp;
2532 a.a_vap = vap;
2533 a.a_context = context;
2534 thread_safe = THREAD_SAFE_FS(vp);
2535
2536 if (!thread_safe) {
2537 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2538 return (_err);
2539 }
2540 }
2541 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
2542 if (!thread_safe) {
2543 unlock_fsnode(vp, &funnel_state);
2544 }
2545 return (_err);
2546}
2547
2548#if 0
2549/*
2550 *#
2551 *#% setattr vp L L L
2552 *#
2553 */
2554struct vnop_setattr_args {
2555 struct vnodeop_desc *a_desc;
2556 vnode_t a_vp;
2557 struct vnode_attr *a_vap;
2558 vfs_context_t a_context;
2559};
2560#endif /* 0*/
2561errno_t
2562VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context)
2563{
2564 int _err;
2565 struct vnop_setattr_args a;
2566 int thread_safe;
2567 int funnel_state;
2568
2569 a.a_desc = &vnop_setattr_desc;
2570 a.a_vp = vp;
2571 a.a_vap = vap;
2572 a.a_context = context;
2573 thread_safe = THREAD_SAFE_FS(vp);
2574
2575 if (!thread_safe) {
2576 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2577 return (_err);
2578 }
2579 }
2580 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
2581
2582 /*
2583 * Shadow uid/gid/mod change to extended attibute file.
2584 */
2585 if (_err == 0 && !NATIVE_XATTR(vp)) {
2586 struct vnode_attr va;
2587 int change = 0;
2588
2589 VATTR_INIT(&va);
2590 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2591 VATTR_SET(&va, va_uid, vap->va_uid);
2592 change = 1;
2593 }
2594 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2595 VATTR_SET(&va, va_gid, vap->va_gid);
2596 change = 1;
2597 }
2598 if (VATTR_IS_ACTIVE(vap, va_mode)) {
2599 VATTR_SET(&va, va_mode, vap->va_mode);
2600 change = 1;
2601 }
2602 if (change) {
2603 vnode_t dvp;
2604 char *vname;
2605
2606 dvp = vnode_getparent(vp);
2607 vname = vnode_getname(vp);
2608
2609 xattrfile_setattr(dvp, vname, &va, context, thread_safe);
2610 if (dvp != NULLVP)
2611 vnode_put(dvp);
2612 if (vname != NULL)
2613 vnode_putname(vname);
2614 }
2615 }
2616 if (!thread_safe) {
2617 unlock_fsnode(vp, &funnel_state);
2618 }
2619 return (_err);
2620}
2621
2622#if 0
2623/*
2624 *#
2625 *#% getattrlist vp = = =
2626 *#
2627 */
2628struct vnop_getattrlist_args {
2629 struct vnodeop_desc *a_desc;
2630 vnode_t a_vp;
2631 struct attrlist *a_alist;
2632 struct uio *a_uio;
2633 int a_options;
2634 vfs_context_t a_context;
2635};
2636#endif /* 0*/
2637errno_t
2638VNOP_GETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2639{
2640 int _err;
2641 struct vnop_getattrlist_args a;
2642 int thread_safe;
2643 int funnel_state = 0;
2644
2645 a.a_desc = &vnop_getattrlist_desc;
2646 a.a_vp = vp;
2647 a.a_alist = alist;
2648 a.a_uio = uio;
2649 a.a_options = options;
2650 a.a_context = context;
2651 thread_safe = THREAD_SAFE_FS(vp);
2652
2653 if (!thread_safe) {
2654 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2655 return (_err);
2656 }
2657 }
2658 _err = (*vp->v_op[vnop_getattrlist_desc.vdesc_offset])(&a);
2659 if (!thread_safe) {
2660 unlock_fsnode(vp, &funnel_state);
2661 }
2662 return (_err);
2663}
2664
2665#if 0
2666/*
2667 *#
2668 *#% setattrlist vp L L L
2669 *#
2670 */
2671struct vnop_setattrlist_args {
2672 struct vnodeop_desc *a_desc;
2673 vnode_t a_vp;
2674 struct attrlist *a_alist;
2675 struct uio *a_uio;
2676 int a_options;
2677 vfs_context_t a_context;
2678};
2679#endif /* 0*/
2680errno_t
2681VNOP_SETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context)
2682{
2683 int _err;
2684 struct vnop_setattrlist_args a;
2685 int thread_safe;
2686 int funnel_state = 0;
2687
2688 a.a_desc = &vnop_setattrlist_desc;
2689 a.a_vp = vp;
2690 a.a_alist = alist;
2691 a.a_uio = uio;
2692 a.a_options = options;
2693 a.a_context = context;
2694 thread_safe = THREAD_SAFE_FS(vp);
2695
2696 if (!thread_safe) {
2697 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
2698 return (_err);
2699 }
2700 }
2701 _err = (*vp->v_op[vnop_setattrlist_desc.vdesc_offset])(&a);
2702
2703 vnode_uncache_credentials(vp);
2704
2705 if (!thread_safe) {
2706 unlock_fsnode(vp, &funnel_state);
2707 }
2708 return (_err);
2709}
2710
2711
2712#if 0
2713/*
2714 *#
2715 *#% read vp L L L
2716 *#
2717 */
2718struct vnop_read_args {
2719 struct vnodeop_desc *a_desc;
2720 vnode_t a_vp;
2721 struct uio *a_uio;
2722 int a_ioflag;
2723 vfs_context_t a_context;
2724};
2725#endif /* 0*/
2726errno_t
2727VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2728{
2729 int _err;
2730 struct vnop_read_args a;
2731 int thread_safe;
2732 int funnel_state = 0;
2733 struct vfs_context acontext;
2734
2735 if (context == NULL) {
2736 acontext.vc_proc = current_proc();
2737 acontext.vc_ucred = kauth_cred_get();
2738 context = &acontext;
2739 }
2740
2741 a.a_desc = &vnop_read_desc;
2742 a.a_vp = vp;
2743 a.a_uio = uio;
2744 a.a_ioflag = ioflag;
2745 a.a_context = context;
2746 thread_safe = THREAD_SAFE_FS(vp);
2747
2748 if (!thread_safe) {
2749 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2750 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2751 if ( (_err = lock_fsnode(vp, NULL)) ) {
2752 (void) thread_funnel_set(kernel_flock, funnel_state);
2753 return (_err);
2754 }
2755 }
2756 }
2757 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
2758
2759 if (!thread_safe) {
2760 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2761 unlock_fsnode(vp, NULL);
2762 }
2763 (void) thread_funnel_set(kernel_flock, funnel_state);
2764 }
2765 return (_err);
2766}
2767
2768
2769#if 0
2770/*
2771 *#
2772 *#% write vp L L L
2773 *#
2774 */
2775struct vnop_write_args {
2776 struct vnodeop_desc *a_desc;
2777 vnode_t a_vp;
2778 struct uio *a_uio;
2779 int a_ioflag;
2780 vfs_context_t a_context;
2781};
2782#endif /* 0*/
2783errno_t
2784VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context)
2785{
2786 struct vnop_write_args a;
2787 int _err;
2788 int thread_safe;
2789 int funnel_state = 0;
2790 struct vfs_context acontext;
2791
2792 if (context == NULL) {
2793 acontext.vc_proc = current_proc();
2794 acontext.vc_ucred = kauth_cred_get();
2795 context = &acontext;
2796 }
2797
2798 a.a_desc = &vnop_write_desc;
2799 a.a_vp = vp;
2800 a.a_uio = uio;
2801 a.a_ioflag = ioflag;
2802 a.a_context = context;
2803 thread_safe = THREAD_SAFE_FS(vp);
2804
2805 if (!thread_safe) {
2806 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2807 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2808 if ( (_err = lock_fsnode(vp, NULL)) ) {
2809 (void) thread_funnel_set(kernel_flock, funnel_state);
2810 return (_err);
2811 }
2812 }
2813 }
2814 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
2815
2816 if (!thread_safe) {
2817 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2818 unlock_fsnode(vp, NULL);
2819 }
2820 (void) thread_funnel_set(kernel_flock, funnel_state);
2821 }
2822 return (_err);
2823}
2824
2825
2826#if 0
2827/*
2828 *#
2829 *#% ioctl vp U U U
2830 *#
2831 */
2832struct vnop_ioctl_args {
2833 struct vnodeop_desc *a_desc;
2834 vnode_t a_vp;
2835 u_long a_command;
2836 caddr_t a_data;
2837 int a_fflag;
2838 vfs_context_t a_context;
2839};
2840#endif /* 0*/
2841errno_t
2842VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t context)
2843{
2844 int _err;
2845 struct vnop_ioctl_args a;
2846 int thread_safe;
2847 int funnel_state = 0;
2848 struct vfs_context acontext;
2849
2850 if (context == NULL) {
2851 acontext.vc_proc = current_proc();
2852 acontext.vc_ucred = kauth_cred_get();
2853 context = &acontext;
2854 }
2855
2856 if (vfs_context_is64bit(context)) {
2857 if (!vnode_vfs64bitready(vp)) {
2858 return(ENOTTY);
2859 }
2860 }
2861
2862 a.a_desc = &vnop_ioctl_desc;
2863 a.a_vp = vp;
2864 a.a_command = command;
2865 a.a_data = data;
2866 a.a_fflag = fflag;
2867 a.a_context= context;
2868 thread_safe = THREAD_SAFE_FS(vp);
2869
2870 if (!thread_safe) {
2871 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2872 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2873 if ( (_err = lock_fsnode(vp, NULL)) ) {
2874 (void) thread_funnel_set(kernel_flock, funnel_state);
2875 return (_err);
2876 }
2877 }
2878 }
2879 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
2880 if (!thread_safe) {
2881 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2882 unlock_fsnode(vp, NULL);
2883 }
2884 (void) thread_funnel_set(kernel_flock, funnel_state);
2885 }
2886 return (_err);
2887}
2888
2889
2890#if 0
2891/*
2892 *#
2893 *#% select vp U U U
2894 *#
2895 */
2896struct vnop_select_args {
2897 struct vnodeop_desc *a_desc;
2898 vnode_t a_vp;
2899 int a_which;
2900 int a_fflags;
2901 void *a_wql;
2902 vfs_context_t a_context;
2903};
2904#endif /* 0*/
2905errno_t
2906VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t context)
2907{
2908 int _err;
2909 struct vnop_select_args a;
2910 int thread_safe;
2911 int funnel_state = 0;
2912 struct vfs_context acontext;
2913
2914 if (context == NULL) {
2915 acontext.vc_proc = current_proc();
2916 acontext.vc_ucred = kauth_cred_get();
2917 context = &acontext;
2918 }
2919 a.a_desc = &vnop_select_desc;
2920 a.a_vp = vp;
2921 a.a_which = which;
2922 a.a_fflags = fflags;
2923 a.a_context = context;
2924 a.a_wql = wql;
2925 thread_safe = THREAD_SAFE_FS(vp);
2926
2927 if (!thread_safe) {
2928 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2929 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2930 if ( (_err = lock_fsnode(vp, NULL)) ) {
2931 (void) thread_funnel_set(kernel_flock, funnel_state);
2932 return (_err);
2933 }
2934 }
2935 }
2936 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
2937 if (!thread_safe) {
2938 if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
2939 unlock_fsnode(vp, NULL);
2940 }
2941 (void) thread_funnel_set(kernel_flock, funnel_state);
2942 }
2943 return (_err);
2944}
2945
2946
2947#if 0
2948/*
2949 *#
2950 *#% exchange fvp L L L
2951 *#% exchange tvp L L L
2952 *#
2953 */
2954struct vnop_exchange_args {
2955 struct vnodeop_desc *a_desc;
2956 vnode_t a_fvp;
2957 vnode_t a_tvp;
2958 int a_options;
2959 vfs_context_t a_context;
2960};
2961#endif /* 0*/
2962errno_t
2963VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t context)
2964{
2965 int _err;
2966 struct vnop_exchange_args a;
2967 int thread_safe;
2968 int funnel_state = 0;
2969 vnode_t lock_first = NULL, lock_second = NULL;
2970
2971 a.a_desc = &vnop_exchange_desc;
2972 a.a_fvp = fvp;
2973 a.a_tvp = tvp;
2974 a.a_options = options;
2975 a.a_context = context;
2976 thread_safe = THREAD_SAFE_FS(fvp);
2977
2978 if (!thread_safe) {
2979 /*
2980 * Lock in vnode address order to avoid deadlocks
2981 */
2982 if (fvp < tvp) {
2983 lock_first = fvp;
2984 lock_second = tvp;
2985 } else {
2986 lock_first = tvp;
2987 lock_second = fvp;
2988 }
2989 if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
2990 return (_err);
2991 }
2992 if ( (_err = lock_fsnode(lock_second, NULL)) ) {
2993 unlock_fsnode(lock_first, &funnel_state);
2994 return (_err);
2995 }
2996 }
2997 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
2998 if (!thread_safe) {
2999 unlock_fsnode(lock_second, NULL);
3000 unlock_fsnode(lock_first, &funnel_state);
3001 }
3002 return (_err);
3003}
3004
3005
3006#if 0
3007/*
3008 *#
3009 *#% revoke vp U U U
3010 *#
3011 */
3012struct vnop_revoke_args {
3013 struct vnodeop_desc *a_desc;
3014 vnode_t a_vp;
3015 int a_flags;
3016 vfs_context_t a_context;
3017};
3018#endif /* 0*/
3019errno_t
3020VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t context)
3021{
3022 struct vnop_revoke_args a;
3023 int _err;
3024 int thread_safe;
3025 int funnel_state = 0;
3026
3027 a.a_desc = &vnop_revoke_desc;
3028 a.a_vp = vp;
3029 a.a_flags = flags;
3030 a.a_context = context;
3031 thread_safe = THREAD_SAFE_FS(vp);
3032
3033 if (!thread_safe) {
3034 funnel_state = thread_funnel_set(kernel_flock, TRUE);
3035 }
3036 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3037 if (!thread_safe) {
3038 (void) thread_funnel_set(kernel_flock, funnel_state);
3039 }
3040 return (_err);
3041}
3042
3043
3044#if 0
3045/*
3046 *#
3047 *# mmap - vp U U U
3048 *#
3049 */
3050struct vnop_mmap_args {
3051 struct vnodeop_desc *a_desc;
3052 vnode_t a_vp;
3053 int a_fflags;
3054 vfs_context_t a_context;
3055};
3056#endif /* 0*/
3057errno_t
3058VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t context)
3059{
3060 int _err;
3061 struct vnop_mmap_args a;
3062 int thread_safe;
3063 int funnel_state = 0;
3064
3065 a.a_desc = &vnop_mmap_desc;
3066 a.a_vp = vp;
3067 a.a_fflags = fflags;
3068 a.a_context = context;
3069 thread_safe = THREAD_SAFE_FS(vp);
3070
3071 if (!thread_safe) {
3072 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3073 return (_err);
3074 }
3075 }
3076 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
3077 if (!thread_safe) {
3078 unlock_fsnode(vp, &funnel_state);
3079 }
3080 return (_err);
3081}
3082
3083
3084#if 0
3085/*
3086 *#
3087 *# mnomap - vp U U U
3088 *#
3089 */
3090struct vnop_mnomap_args {
3091 struct vnodeop_desc *a_desc;
3092 vnode_t a_vp;
3093 vfs_context_t a_context;
3094};
3095#endif /* 0*/
3096errno_t
3097VNOP_MNOMAP(vnode_t vp, vfs_context_t context)
3098{
3099 int _err;
3100 struct vnop_mnomap_args a;
3101 int thread_safe;
3102 int funnel_state = 0;
3103
3104 a.a_desc = &vnop_mnomap_desc;
3105 a.a_vp = vp;
3106 a.a_context = context;
3107 thread_safe = THREAD_SAFE_FS(vp);
3108
3109 if (!thread_safe) {
3110 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3111 return (_err);
3112 }
3113 }
3114 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
3115 if (!thread_safe) {
3116 unlock_fsnode(vp, &funnel_state);
3117 }
3118 return (_err);
3119}
3120
3121
3122#if 0
3123/*
3124 *#
3125 *#% fsync vp L L L
3126 *#
3127 */
3128struct vnop_fsync_args {
3129 struct vnodeop_desc *a_desc;
3130 vnode_t a_vp;
3131 int a_waitfor;
3132 vfs_context_t a_context;
3133};
3134#endif /* 0*/
3135errno_t
3136VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t context)
3137{
3138 struct vnop_fsync_args a;
3139 int _err;
3140 int thread_safe;
3141 int funnel_state = 0;
3142
3143 a.a_desc = &vnop_fsync_desc;
3144 a.a_vp = vp;
3145 a.a_waitfor = waitfor;
3146 a.a_context = context;
3147 thread_safe = THREAD_SAFE_FS(vp);
3148
3149 if (!thread_safe) {
3150 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3151 return (_err);
3152 }
3153 }
3154 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
3155 if (!thread_safe) {
3156 unlock_fsnode(vp, &funnel_state);
3157 }
3158 return (_err);
3159}
3160
3161
3162#if 0
3163/*
3164 *#
3165 *#% remove dvp L U U
3166 *#% remove vp L U U
3167 *#
3168 */
3169struct vnop_remove_args {
3170 struct vnodeop_desc *a_desc;
3171 vnode_t a_dvp;
3172 vnode_t a_vp;
3173 struct componentname *a_cnp;
3174 int a_flags;
3175 vfs_context_t a_context;
3176};
3177#endif /* 0*/
3178errno_t
3179VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t context)
3180{
3181 int _err;
3182 struct vnop_remove_args a;
3183 int thread_safe;
3184 int funnel_state = 0;
3185
3186 a.a_desc = &vnop_remove_desc;
3187 a.a_dvp = dvp;
3188 a.a_vp = vp;
3189 a.a_cnp = cnp;
3190 a.a_flags = flags;
3191 a.a_context = context;
3192 thread_safe = THREAD_SAFE_FS(dvp);
3193
3194 if (!thread_safe) {
3195 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3196 return (_err);
3197 }
3198 }
3199 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3200
3201 if (_err == 0) {
3202 vnode_setneedinactive(vp);
3203
3204 if ( !(NATIVE_XATTR(dvp)) ) {
3205 /*
3206 * Remove any associated extended attibute file (._ AppleDouble file).
3207 */
3208 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3209 }
3210 }
3211 if (!thread_safe) {
3212 unlock_fsnode(vp, &funnel_state);
3213 }
3214 return (_err);
3215}
3216
3217
3218#if 0
3219/*
3220 *#
3221 *#% link vp U U U
3222 *#% link tdvp L U U
3223 *#
3224 */
3225struct vnop_link_args {
3226 struct vnodeop_desc *a_desc;
3227 vnode_t a_vp;
3228 vnode_t a_tdvp;
3229 struct componentname *a_cnp;
3230 vfs_context_t a_context;
3231};
3232#endif /* 0*/
3233errno_t
3234VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t context)
3235{
3236 int _err;
3237 struct vnop_link_args a;
3238 int thread_safe;
3239 int funnel_state = 0;
3240
3241 /*
3242 * For file systems with non-native extended attributes,
3243 * disallow linking to an existing "._" Apple Double file.
3244 */
3245 if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
3246 char *vname;
3247
3248 vname = vnode_getname(vp);
3249 if (vname != NULL) {
3250 _err = 0;
3251 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
3252 _err = EPERM;
3253 }
3254 vnode_putname(vname);
3255 if (_err)
3256 return (_err);
3257 }
3258 }
3259 a.a_desc = &vnop_link_desc;
3260 a.a_vp = vp;
3261 a.a_tdvp = tdvp;
3262 a.a_cnp = cnp;
3263 a.a_context = context;
3264 thread_safe = THREAD_SAFE_FS(vp);
3265
3266 if (!thread_safe) {
3267 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3268 return (_err);
3269 }
3270 }
3271 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
3272 if (!thread_safe) {
3273 unlock_fsnode(vp, &funnel_state);
3274 }
3275 return (_err);
3276}
3277
3278
3279#if 0
3280/*
3281 *#
3282 *#% rename fdvp U U U
3283 *#% rename fvp U U U
3284 *#% rename tdvp L U U
3285 *#% rename tvp X U U
3286 *#
3287 */
3288struct vnop_rename_args {
3289 struct vnodeop_desc *a_desc;
3290 vnode_t a_fdvp;
3291 vnode_t a_fvp;
3292 struct componentname *a_fcnp;
3293 vnode_t a_tdvp;
3294 vnode_t a_tvp;
3295 struct componentname *a_tcnp;
3296 vfs_context_t a_context;
3297};
3298#endif /* 0*/
3299errno_t
3300VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
3301 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
3302 vfs_context_t context)
3303{
3304 int _err;
3305 struct vnop_rename_args a;
3306 int funnel_state = 0;
3307 char smallname1[48];
3308 char smallname2[48];
3309 char *xfromname = NULL;
3310 char *xtoname = NULL;
3311 vnode_t lock_first = NULL, lock_second = NULL;
3312 vnode_t fdvp_unsafe = NULLVP;
3313 vnode_t tdvp_unsafe = NULLVP;
3314
3315 a.a_desc = &vnop_rename_desc;
3316 a.a_fdvp = fdvp;
3317 a.a_fvp = fvp;
3318 a.a_fcnp = fcnp;
3319 a.a_tdvp = tdvp;
3320 a.a_tvp = tvp;
3321 a.a_tcnp = tcnp;
3322 a.a_context = context;
3323
3324 if (!THREAD_SAFE_FS(fdvp))
3325 fdvp_unsafe = fdvp;
3326 if (!THREAD_SAFE_FS(tdvp))
3327 tdvp_unsafe = tdvp;
3328
3329 if (fdvp_unsafe != NULLVP) {
3330 /*
3331 * Lock parents in vnode address order to avoid deadlocks
3332 * note that it's possible for the fdvp to be unsafe,
3333 * but the tdvp to be safe because tvp could be a directory
3334 * in the root of a filesystem... in that case, tdvp is the
3335 * in the filesystem that this root is mounted on
3336 */
3337 if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
3338 lock_first = fdvp_unsafe;
3339 lock_second = NULL;
3340 } else if (fdvp_unsafe < tdvp_unsafe) {
3341 lock_first = fdvp_unsafe;
3342 lock_second = tdvp_unsafe;
3343 } else {
3344 lock_first = tdvp_unsafe;
3345 lock_second = fdvp_unsafe;
3346 }
3347 if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
3348 return (_err);
3349
3350 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3351 unlock_fsnode(lock_first, &funnel_state);
3352 return (_err);
3353 }
3354
3355 /*
3356 * Lock both children in vnode address order to avoid deadlocks
3357 */
3358 if (tvp == NULL || tvp == fvp) {
3359 lock_first = fvp;
3360 lock_second = NULL;
3361 } else if (fvp < tvp) {
3362 lock_first = fvp;
3363 lock_second = tvp;
3364 } else {
3365 lock_first = tvp;
3366 lock_second = fvp;
3367 }
3368 if ( (_err = lock_fsnode(lock_first, NULL)) )
3369 goto out1;
3370
3371 if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
3372 unlock_fsnode(lock_first, NULL);
3373 goto out1;
3374 }
3375 }
3376 /*
3377 * Save source and destination names (._ AppleDouble files).
3378 * Skip if source already has a "._" prefix.
3379 */
3380 if (!NATIVE_XATTR(fdvp) &&
3381 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
3382 size_t len;
3383
3384 /* Get source attribute file name. */
3385 len = fcnp->cn_namelen + 3;
3386 if (len > sizeof(smallname1)) {
3387 MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK);
3388 } else {
3389 xfromname = &smallname1[0];
3390 }
3391 strcpy(xfromname, "._");
3392 strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen);
3393 xfromname[len-1] = '\0';
3394
3395 /* Get destination attribute file name. */
3396 len = tcnp->cn_namelen + 3;
3397 if (len > sizeof(smallname2)) {
3398 MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK);
3399 } else {
3400 xtoname = &smallname2[0];
3401 }
3402 strcpy(xtoname, "._");
3403 strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen);
3404 xtoname[len-1] = '\0';
3405 }
3406
3407 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3408
3409 if (fdvp_unsafe != NULLVP) {
3410 if (lock_second != NULL)
3411 unlock_fsnode(lock_second, NULL);
3412 unlock_fsnode(lock_first, NULL);
3413 }
3414 if (_err == 0) {
3415 if (tvp && tvp != fvp)
3416 vnode_setneedinactive(tvp);
3417 }
3418
3419 /*
3420 * Rename any associated extended attibute file (._ AppleDouble file).
3421 */
3422 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
3423 struct nameidata fromnd, tond;
3424 int killdest = 0;
3425 int error;
3426
3427 /*
3428 * Get source attribute file vnode.
3429 * Note that fdvp already has an iocount reference and
3430 * using DELETE will take an additional reference.
3431 */
3432 NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3433 CAST_USER_ADDR_T(xfromname), context);
3434 fromnd.ni_dvp = fdvp;
3435 error = namei(&fromnd);
3436
3437 if (error) {
3438 /* When source doesn't exist there still may be a destination. */
3439 if (error == ENOENT) {
3440 killdest = 1;
3441 } else {
3442 goto out;
3443 }
3444 } else if (fromnd.ni_vp->v_type != VREG) {
3445 vnode_put(fromnd.ni_vp);
3446 nameidone(&fromnd);
3447 killdest = 1;
3448 }
3449 if (killdest) {
3450 struct vnop_remove_args args;
3451
3452 /*
3453 * Get destination attribute file vnode.
3454 * Note that tdvp already has an iocount reference.
3455 */
3456 NDINIT(&tond, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3457 CAST_USER_ADDR_T(xtoname), context);
3458 tond.ni_dvp = tdvp;
3459 error = namei(&tond);
3460 if (error) {
3461 goto out;
3462 }
3463 if (tond.ni_vp->v_type != VREG) {
3464 vnode_put(tond.ni_vp);
3465 nameidone(&tond);
3466 goto out;
3467 }
3468 args.a_desc = &vnop_remove_desc;
3469 args.a_dvp = tdvp;
3470 args.a_vp = tond.ni_vp;
3471 args.a_cnp = &tond.ni_cnd;
3472 args.a_context = context;
3473
3474 if (fdvp_unsafe != NULLVP)
3475 error = lock_fsnode(tond.ni_vp, NULL);
3476 if (error == 0) {
3477 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
3478
3479 if (fdvp_unsafe != NULLVP)
3480 unlock_fsnode(tond.ni_vp, NULL);
3481
3482 if (error == 0)
3483 vnode_setneedinactive(tond.ni_vp);
3484 }
3485 vnode_put(tond.ni_vp);
3486 nameidone(&tond);
3487 goto out;
3488 }
3489
3490 /*
3491 * Get destination attribute file vnode.
3492 */
3493 NDINIT(&tond, RENAME,
3494 NOCACHE | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3495 CAST_USER_ADDR_T(xtoname), context);
3496 tond.ni_dvp = tdvp;
3497 error = namei(&tond);
3498
3499 if (error) {
3500 vnode_put(fromnd.ni_vp);
3501 nameidone(&fromnd);
3502 goto out;
3503 }
3504 a.a_desc = &vnop_rename_desc;
3505 a.a_fdvp = fdvp;
3506 a.a_fvp = fromnd.ni_vp;
3507 a.a_fcnp = &fromnd.ni_cnd;
3508 a.a_tdvp = tdvp;
3509 a.a_tvp = tond.ni_vp;
3510 a.a_tcnp = &tond.ni_cnd;
3511 a.a_context = context;
3512
3513 if (fdvp_unsafe != NULLVP) {
3514 /*
3515 * Lock in vnode address order to avoid deadlocks
3516 */
3517 if (tond.ni_vp == NULL || tond.ni_vp == fromnd.ni_vp) {
3518 lock_first = fromnd.ni_vp;
3519 lock_second = NULL;
3520 } else if (fromnd.ni_vp < tond.ni_vp) {
3521 lock_first = fromnd.ni_vp;
3522 lock_second = tond.ni_vp;
3523 } else {
3524 lock_first = tond.ni_vp;
3525 lock_second = fromnd.ni_vp;
3526 }
3527 if ( (error = lock_fsnode(lock_first, NULL)) == 0) {
3528 if (lock_second != NULL && (error = lock_fsnode(lock_second, NULL)) )
3529 unlock_fsnode(lock_first, NULL);
3530 }
3531 }
3532 if (error == 0) {
3533 error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
3534
3535 if (fdvp_unsafe != NULLVP) {
3536 if (lock_second != NULL)
3537 unlock_fsnode(lock_second, NULL);
3538 unlock_fsnode(lock_first, NULL);
3539 }
3540 if (error == 0) {
3541 vnode_setneedinactive(fromnd.ni_vp);
3542
3543 if (tond.ni_vp && tond.ni_vp != fromnd.ni_vp)
3544 vnode_setneedinactive(tond.ni_vp);
3545 }
3546 }
3547 vnode_put(fromnd.ni_vp);
3548 if (tond.ni_vp) {
3549 vnode_put(tond.ni_vp);
3550 }
3551 nameidone(&tond);
3552 nameidone(&fromnd);
3553 }
3554out:
3555 if (xfromname && xfromname != &smallname1[0]) {
3556 FREE(xfromname, M_TEMP);
3557 }
3558 if (xtoname && xtoname != &smallname2[0]) {
3559 FREE(xtoname, M_TEMP);
3560 }
3561out1:
3562 if (fdvp_unsafe != NULLVP) {
3563 if (tdvp_unsafe != NULLVP)
3564 unlock_fsnode(tdvp_unsafe, NULL);
3565 unlock_fsnode(fdvp_unsafe, &funnel_state);
3566 }
3567 return (_err);
3568}
3569
3570 #if 0
3571/*
3572 *#
3573 *#% mkdir dvp L U U
3574 *#% mkdir vpp - L -
3575 *#
3576 */
3577struct vnop_mkdir_args {
3578 struct vnodeop_desc *a_desc;
3579 vnode_t a_dvp;
3580 vnode_t *a_vpp;
3581 struct componentname *a_cnp;
3582 struct vnode_attr *a_vap;
3583 vfs_context_t a_context;
3584};
3585#endif /* 0*/
3586errno_t
3587VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3588 struct vnode_attr *vap, vfs_context_t context)
3589{
3590 int _err;
3591 struct vnop_mkdir_args a;
3592 int thread_safe;
3593 int funnel_state = 0;
3594
3595 a.a_desc = &vnop_mkdir_desc;
3596 a.a_dvp = dvp;
3597 a.a_vpp = vpp;
3598 a.a_cnp = cnp;
3599 a.a_vap = vap;
3600 a.a_context = context;
3601 thread_safe = THREAD_SAFE_FS(dvp);
3602
3603 if (!thread_safe) {
3604 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3605 return (_err);
3606 }
3607 }
3608 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
3609 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3610 /*
3611 * Remove stale Apple Double file (if any).
3612 */
3613 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3614 }
3615 if (!thread_safe) {
3616 unlock_fsnode(dvp, &funnel_state);
3617 }
3618 return (_err);
3619}
3620
3621
3622#if 0
3623/*
3624 *#
3625 *#% rmdir dvp L U U
3626 *#% rmdir vp L U U
3627 *#
3628 */
3629struct vnop_rmdir_args {
3630 struct vnodeop_desc *a_desc;
3631 vnode_t a_dvp;
3632 vnode_t a_vp;
3633 struct componentname *a_cnp;
3634 vfs_context_t a_context;
3635};
3636
3637#endif /* 0*/
3638errno_t
3639VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t context)
3640{
3641 int _err;
3642 struct vnop_rmdir_args a;
3643 int thread_safe;
3644 int funnel_state = 0;
3645
3646 a.a_desc = &vnop_rmdir_desc;
3647 a.a_dvp = dvp;
3648 a.a_vp = vp;
3649 a.a_cnp = cnp;
3650 a.a_context = context;
3651 thread_safe = THREAD_SAFE_FS(dvp);
3652
3653 if (!thread_safe) {
3654 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3655 return (_err);
3656 }
3657 }
3658 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
3659
3660 if (_err == 0) {
3661 vnode_setneedinactive(vp);
3662
3663 if ( !(NATIVE_XATTR(dvp)) ) {
3664 /*
3665 * Remove any associated extended attibute file (._ AppleDouble file).
3666 */
3667 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1);
3668 }
3669 }
3670 if (!thread_safe) {
3671 unlock_fsnode(vp, &funnel_state);
3672 }
3673 return (_err);
3674}
3675
3676/*
3677 * Remove a ._ AppleDouble file
3678 */
3679#define AD_STALE_SECS (180)
3680static void
3681xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int thread_safe, int force) {
3682 vnode_t xvp;
3683 struct nameidata nd;
3684 char smallname[64];
3685 char *filename = NULL;
3686 size_t len;
3687
3688 if ((basename == NULL) || (basename[0] == '\0') ||
3689 (basename[0] == '.' && basename[1] == '_')) {
3690 return;
3691 }
3692 filename = &smallname[0];
3693 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3694 if (len >= sizeof(smallname)) {
3695 len++; /* snprintf result doesn't include '\0' */
3696 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3697 len = snprintf(filename, len, "._%s", basename);
3698 }
3699 NDINIT(&nd, DELETE, LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
3700 CAST_USER_ADDR_T(filename), context);
3701 nd.ni_dvp = dvp;
3702 if (namei(&nd) != 0)
3703 goto out2;
3704
3705 xvp = nd.ni_vp;
3706 nameidone(&nd);
3707 if (xvp->v_type != VREG)
3708 goto out1;
3709
3710 /*
3711 * When creating a new object and a "._" file already
3712 * exists, check to see if its a stale "._" file.
3713 *
3714 */
3715 if (!force) {
3716 struct vnode_attr va;
3717
3718 VATTR_INIT(&va);
3719 VATTR_WANTED(&va, va_data_size);
3720 VATTR_WANTED(&va, va_modify_time);
3721 if (VNOP_GETATTR(xvp, &va, context) == 0 &&
3722 VATTR_IS_SUPPORTED(&va, va_data_size) &&
3723 VATTR_IS_SUPPORTED(&va, va_modify_time) &&
3724 va.va_data_size != 0) {
3725 struct timeval tv;
3726
3727 microtime(&tv);
3728 if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
3729 (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
3730 force = 1; /* must be stale */
3731 }
3732 }
3733 }
3734 if (force) {
3735 struct vnop_remove_args a;
3736 int error;
3737
3738 a.a_desc = &vnop_remove_desc;
3739 a.a_dvp = nd.ni_dvp;
3740 a.a_vp = xvp;
3741 a.a_cnp = &nd.ni_cnd;
3742 a.a_context = context;
3743
3744 if (!thread_safe) {
3745 if ( (lock_fsnode(xvp, NULL)) )
3746 goto out1;
3747 }
3748 error = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
3749
3750 if (!thread_safe)
3751 unlock_fsnode(xvp, NULL);
3752
3753 if (error == 0)
3754 vnode_setneedinactive(xvp);
3755 }
3756out1:
3757 /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */
3758 vnode_put(xvp);
3759out2:
3760 if (filename && filename != &smallname[0]) {
3761 FREE(filename, M_TEMP);
3762 }
3763}
3764
3765/*
3766 * Shadow uid/gid/mod to a ._ AppleDouble file
3767 */
3768static void
3769xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
3770 vfs_context_t context, int thread_safe) {
3771 vnode_t xvp;
3772 struct nameidata nd;
3773 char smallname[64];
3774 char *filename = NULL;
3775 size_t len;
3776
3777 if ((dvp == NULLVP) ||
3778 (basename == NULL) || (basename[0] == '\0') ||
3779 (basename[0] == '.' && basename[1] == '_')) {
3780 return;
3781 }
3782 filename = &smallname[0];
3783 len = snprintf(filename, sizeof(smallname), "._%s", basename);
3784 if (len >= sizeof(smallname)) {
3785 len++; /* snprintf result doesn't include '\0' */
3786 MALLOC(filename, char *, len, M_TEMP, M_WAITOK);
3787 len = snprintf(filename, len, "._%s", basename);
3788 }
3789 NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE,
3790 CAST_USER_ADDR_T(filename), context);
3791 nd.ni_dvp = dvp;
3792 if (namei(&nd) != 0)
3793 goto out2;
3794
3795 xvp = nd.ni_vp;
3796 nameidone(&nd);
3797
3798 if (xvp->v_type == VREG) {
3799 struct vnop_setattr_args a;
3800
3801 a.a_desc = &vnop_setattr_desc;
3802 a.a_vp = xvp;
3803 a.a_vap = vap;
3804 a.a_context = context;
3805
3806 if (!thread_safe) {
3807 if ( (lock_fsnode(xvp, NULL)) )
3808 goto out1;
3809 }
3810 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3811 if (!thread_safe) {
3812 unlock_fsnode(xvp, NULL);
3813 }
3814 }
3815out1:
3816 vnode_put(xvp);
3817out2:
3818 if (filename && filename != &smallname[0]) {
3819 FREE(filename, M_TEMP);
3820 }
3821}
3822
3823 #if 0
3824/*
3825 *#
3826 *#% symlink dvp L U U
3827 *#% symlink vpp - U -
3828 *#
3829 */
3830struct vnop_symlink_args {
3831 struct vnodeop_desc *a_desc;
3832 vnode_t a_dvp;
3833 vnode_t *a_vpp;
3834 struct componentname *a_cnp;
3835 struct vnode_attr *a_vap;
3836 char *a_target;
3837 vfs_context_t a_context;
3838};
3839
3840#endif /* 0*/
3841errno_t
3842VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3843 struct vnode_attr *vap, char *target, vfs_context_t context)
3844{
3845 int _err;
3846 struct vnop_symlink_args a;
3847 int thread_safe;
3848 int funnel_state = 0;
3849
3850 a.a_desc = &vnop_symlink_desc;
3851 a.a_dvp = dvp;
3852 a.a_vpp = vpp;
3853 a.a_cnp = cnp;
3854 a.a_vap = vap;
3855 a.a_target = target;
3856 a.a_context = context;
3857 thread_safe = THREAD_SAFE_FS(dvp);
3858
3859 if (!thread_safe) {
3860 if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
3861 return (_err);
3862 }
3863 }
3864 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
3865 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3866 /*
3867 * Remove stale Apple Double file (if any).
3868 */
3869 xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0);
3870 }
3871 if (!thread_safe) {
3872 unlock_fsnode(dvp, &funnel_state);
3873 }
3874 return (_err);
3875}
3876
3877#if 0
3878/*
3879 *#
3880 *#% readdir vp L L L
3881 *#
3882 */
3883struct vnop_readdir_args {
3884 struct vnodeop_desc *a_desc;
3885 vnode_t a_vp;
3886 struct uio *a_uio;
3887 int a_flags;
3888 int *a_eofflag;
3889 int *a_numdirent;
3890 vfs_context_t a_context;
3891};
3892
3893#endif /* 0*/
3894errno_t
3895VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
3896 int *numdirent, vfs_context_t context)
3897{
3898 int _err;
3899 struct vnop_readdir_args a;
3900 int thread_safe;
3901 int funnel_state = 0;
3902
3903 a.a_desc = &vnop_readdir_desc;
3904 a.a_vp = vp;
3905 a.a_uio = uio;
3906 a.a_flags = flags;
3907 a.a_eofflag = eofflag;
3908 a.a_numdirent = numdirent;
3909 a.a_context = context;
3910 thread_safe = THREAD_SAFE_FS(vp);
3911
3912 if (!thread_safe) {
3913 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3914 return (_err);
3915 }
3916 }
3917 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
3918 if (!thread_safe) {
3919 unlock_fsnode(vp, &funnel_state);
3920 }
3921 return (_err);
3922}
3923
3924#if 0
3925/*
3926 *#
3927 *#% readdirattr vp L L L
3928 *#
3929 */
3930struct vnop_readdirattr_args {
3931 struct vnodeop_desc *a_desc;
3932 vnode_t a_vp;
3933 struct attrlist *a_alist;
3934 struct uio *a_uio;
3935 u_long a_maxcount;
3936 u_long a_options;
3937 u_long *a_newstate;
3938 int *a_eofflag;
3939 u_long *a_actualcount;
3940 vfs_context_t a_context;
3941};
3942
3943#endif /* 0*/
3944errno_t
3945VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount,
3946 u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t context)
3947{
3948 int _err;
3949 struct vnop_readdirattr_args a;
3950 int thread_safe;
3951 int funnel_state = 0;
3952
3953 a.a_desc = &vnop_readdirattr_desc;
3954 a.a_vp = vp;
3955 a.a_alist = alist;
3956 a.a_uio = uio;
3957 a.a_maxcount = maxcount;
3958 a.a_options = options;
3959 a.a_newstate = newstate;
3960 a.a_eofflag = eofflag;
3961 a.a_actualcount = actualcount;
3962 a.a_context = context;
3963 thread_safe = THREAD_SAFE_FS(vp);
3964
3965 if (!thread_safe) {
3966 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
3967 return (_err);
3968 }
3969 }
3970 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
3971 if (!thread_safe) {
3972 unlock_fsnode(vp, &funnel_state);
3973 }
3974 return (_err);
3975}
3976
3977#if 0
3978/*
3979 *#
3980 *#% readlink vp L L L
3981 *#
3982 */
3983struct vnop_readlink_args {
3984 struct vnodeop_desc *a_desc;
3985 vnode_t a_vp;
3986 struct uio *a_uio;
3987 vfs_context_t a_context;
3988};
3989#endif /* 0 */
3990
3991errno_t
3992VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t context)
3993{
3994 int _err;
3995 struct vnop_readlink_args a;
3996 int thread_safe;
3997 int funnel_state = 0;
3998
3999 a.a_desc = &vnop_readlink_desc;
4000 a.a_vp = vp;
4001 a.a_uio = uio;
4002 a.a_context = context;
4003 thread_safe = THREAD_SAFE_FS(vp);
4004
4005 if (!thread_safe) {
4006 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4007 return (_err);
4008 }
4009 }
4010 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
4011 if (!thread_safe) {
4012 unlock_fsnode(vp, &funnel_state);
4013 }
4014 return (_err);
4015}
4016
4017#if 0
4018/*
4019 *#
4020 *#% inactive vp L U U
4021 *#
4022 */
4023struct vnop_inactive_args {
4024 struct vnodeop_desc *a_desc;
4025 vnode_t a_vp;
4026 vfs_context_t a_context;
4027};
4028#endif /* 0*/
4029errno_t
4030VNOP_INACTIVE(struct vnode *vp, vfs_context_t context)
4031{
4032 int _err;
4033 struct vnop_inactive_args a;
4034 int thread_safe;
4035 int funnel_state = 0;
4036
4037 a.a_desc = &vnop_inactive_desc;
4038 a.a_vp = vp;
4039 a.a_context = context;
4040 thread_safe = THREAD_SAFE_FS(vp);
4041
4042 if (!thread_safe) {
4043 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4044 return (_err);
4045 }
4046 }
4047 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
4048 if (!thread_safe) {
4049 unlock_fsnode(vp, &funnel_state);
4050 }
4051 return (_err);
4052}
4053
4054
4055#if 0
4056/*
4057 *#
4058 *#% reclaim vp U U U
4059 *#
4060 */
4061struct vnop_reclaim_args {
4062 struct vnodeop_desc *a_desc;
4063 vnode_t a_vp;
4064 vfs_context_t a_context;
4065};
4066#endif /* 0*/
4067errno_t
4068VNOP_RECLAIM(struct vnode *vp, vfs_context_t context)
4069{
4070 int _err;
4071 struct vnop_reclaim_args a;
4072 int thread_safe;
4073 int funnel_state = 0;
4074
4075 a.a_desc = &vnop_reclaim_desc;
4076 a.a_vp = vp;
4077 a.a_context = context;
4078 thread_safe = THREAD_SAFE_FS(vp);
4079
4080 if (!thread_safe) {
4081 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4082 }
4083 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
4084 if (!thread_safe) {
4085 (void) thread_funnel_set(kernel_flock, funnel_state);
4086 }
4087 return (_err);
4088}
4089
4090
4091#if 0
4092/*
4093 *#
4094 *#% pathconf vp L L L
4095 *#
4096 */
4097struct vnop_pathconf_args {
4098 struct vnodeop_desc *a_desc;
4099 vnode_t a_vp;
4100 int a_name;
4101 register_t *a_retval;
4102 vfs_context_t a_context;
4103};
4104#endif /* 0*/
4105errno_t
4106VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t context)
4107{
4108 int _err;
4109 struct vnop_pathconf_args a;
4110 int thread_safe;
4111 int funnel_state = 0;
4112
4113 a.a_desc = &vnop_pathconf_desc;
4114 a.a_vp = vp;
4115 a.a_name = name;
4116 a.a_retval = retval;
4117 a.a_context = context;
4118 thread_safe = THREAD_SAFE_FS(vp);
4119
4120 if (!thread_safe) {
4121 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4122 return (_err);
4123 }
4124 }
4125 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
4126 if (!thread_safe) {
4127 unlock_fsnode(vp, &funnel_state);
4128 }
4129 return (_err);
4130}
4131
4132#if 0
4133/*
4134 *#
4135 *#% advlock vp U U U
4136 *#
4137 */
4138struct vnop_advlock_args {
4139 struct vnodeop_desc *a_desc;
4140 vnode_t a_vp;
4141 caddr_t a_id;
4142 int a_op;
4143 struct flock *a_fl;
4144 int a_flags;
4145 vfs_context_t a_context;
4146};
4147#endif /* 0*/
4148errno_t
4149VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t context)
4150{
4151 int _err;
4152 struct vnop_advlock_args a;
4153 int thread_safe;
4154 int funnel_state = 0;
4155 struct uthread * uth;
4156
4157 a.a_desc = &vnop_advlock_desc;
4158 a.a_vp = vp;
4159 a.a_id = id;
4160 a.a_op = op;
4161 a.a_fl = fl;
4162 a.a_flags = flags;
4163 a.a_context = context;
4164 thread_safe = THREAD_SAFE_FS(vp);
4165
4166 uth = get_bsdthread_info(current_thread());
4167 if (!thread_safe) {
4168 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4169 }
4170 /* Disallow advisory locking on non-seekable vnodes */
4171 if (vnode_isfifo(vp)) {
4172 _err = err_advlock(&a);
4173 } else {
4174 if ((vp->v_flag & VLOCKLOCAL)) {
4175 /* Advisory locking done at this layer */
4176 _err = lf_advlock(&a);
4177 } else {
4178 /* Advisory locking done by underlying filesystem */
4179 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
4180 }
4181 }
4182 if (!thread_safe) {
4183 (void) thread_funnel_set(kernel_flock, funnel_state);
4184 }
4185 return (_err);
4186}
4187
4188
4189
4190#if 0
4191/*
4192 *#
4193 *#% allocate vp L L L
4194 *#
4195 */
4196struct vnop_allocate_args {
4197 struct vnodeop_desc *a_desc;
4198 vnode_t a_vp;
4199 off_t a_length;
4200 u_int32_t a_flags;
4201 off_t *a_bytesallocated;
4202 off_t a_offset;
4203 vfs_context_t a_context;
4204};
4205
4206#endif /* 0*/
4207errno_t
4208VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t context)
4209{
4210 int _err;
4211 struct vnop_allocate_args a;
4212 int thread_safe;
4213 int funnel_state = 0;
4214
4215 a.a_desc = &vnop_allocate_desc;
4216 a.a_vp = vp;
4217 a.a_length = length;
4218 a.a_flags = flags;
4219 a.a_bytesallocated = bytesallocated;
4220 a.a_offset = offset;
4221 a.a_context = context;
4222 thread_safe = THREAD_SAFE_FS(vp);
4223
4224 if (!thread_safe) {
4225 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4226 return (_err);
4227 }
4228 }
4229 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
4230 if (!thread_safe) {
4231 unlock_fsnode(vp, &funnel_state);
4232 }
4233 return (_err);
4234}
4235
4236#if 0
4237/*
4238 *#
4239 *#% pagein vp = = =
4240 *#
4241 */
4242struct vnop_pagein_args {
4243 struct vnodeop_desc *a_desc;
4244 vnode_t a_vp;
4245 upl_t a_pl;
4246 vm_offset_t a_pl_offset;
4247 off_t a_f_offset;
4248 size_t a_size;
4249 int a_flags;
4250 vfs_context_t a_context;
4251};
4252#endif /* 0*/
4253errno_t
4254VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4255{
4256 int _err;
4257 struct vnop_pagein_args a;
4258 int thread_safe;
4259 int funnel_state = 0;
4260
4261 a.a_desc = &vnop_pagein_desc;
4262 a.a_vp = vp;
4263 a.a_pl = pl;
4264 a.a_pl_offset = pl_offset;
4265 a.a_f_offset = f_offset;
4266 a.a_size = size;
4267 a.a_flags = flags;
4268 a.a_context = context;
4269 thread_safe = THREAD_SAFE_FS(vp);
4270
4271 if (!thread_safe) {
4272 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4273 }
4274 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
4275 if (!thread_safe) {
4276 (void) thread_funnel_set(kernel_flock, funnel_state);
4277 }
4278 return (_err);
4279}
4280
4281#if 0
4282/*
4283 *#
4284 *#% pageout vp = = =
4285 *#
4286 */
4287struct vnop_pageout_args {
4288 struct vnodeop_desc *a_desc;
4289 vnode_t a_vp;
4290 upl_t a_pl;
4291 vm_offset_t a_pl_offset;
4292 off_t a_f_offset;
4293 size_t a_size;
4294 int a_flags;
4295 vfs_context_t a_context;
4296};
4297
4298#endif /* 0*/
4299errno_t
4300VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context)
4301{
4302 int _err;
4303 struct vnop_pageout_args a;
4304 int thread_safe;
4305 int funnel_state = 0;
4306
4307 a.a_desc = &vnop_pageout_desc;
4308 a.a_vp = vp;
4309 a.a_pl = pl;
4310 a.a_pl_offset = pl_offset;
4311 a.a_f_offset = f_offset;
4312 a.a_size = size;
4313 a.a_flags = flags;
4314 a.a_context = context;
4315 thread_safe = THREAD_SAFE_FS(vp);
4316
4317 if (!thread_safe) {
4318 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4319 }
4320 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
4321 if (!thread_safe) {
4322 (void) thread_funnel_set(kernel_flock, funnel_state);
4323 }
4324 return (_err);
4325}
4326
4327
4328#if 0
4329/*
4330 *#
4331 *#% searchfs vp L L L
4332 *#
4333 */
4334struct vnop_searchfs_args {
4335 struct vnodeop_desc *a_desc;
4336 vnode_t a_vp;
4337 void *a_searchparams1;
4338 void *a_searchparams2;
4339 struct attrlist *a_searchattrs;
4340 u_long a_maxmatches;
4341 struct timeval *a_timelimit;
4342 struct attrlist *a_returnattrs;
4343 u_long *a_nummatches;
4344 u_long a_scriptcode;
4345 u_long a_options;
4346 struct uio *a_uio;
4347 struct searchstate *a_searchstate;
4348 vfs_context_t a_context;
4349};
4350
4351#endif /* 0*/
4352errno_t
4353VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t context)
4354{
4355 int _err;
4356 struct vnop_searchfs_args a;
4357 int thread_safe;
4358 int funnel_state = 0;
4359
4360 a.a_desc = &vnop_searchfs_desc;
4361 a.a_vp = vp;
4362 a.a_searchparams1 = searchparams1;
4363 a.a_searchparams2 = searchparams2;
4364 a.a_searchattrs = searchattrs;
4365 a.a_maxmatches = maxmatches;
4366 a.a_timelimit = timelimit;
4367 a.a_returnattrs = returnattrs;
4368 a.a_nummatches = nummatches;
4369 a.a_scriptcode = scriptcode;
4370 a.a_options = options;
4371 a.a_uio = uio;
4372 a.a_searchstate = searchstate;
4373 a.a_context = context;
4374 thread_safe = THREAD_SAFE_FS(vp);
4375
4376 if (!thread_safe) {
4377 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4378 return (_err);
4379 }
4380 }
4381 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
4382 if (!thread_safe) {
4383 unlock_fsnode(vp, &funnel_state);
4384 }
4385 return (_err);
4386}
4387
4388#if 0
4389/*
4390 *#
4391 *#% copyfile fvp U U U
4392 *#% copyfile tdvp L U U
4393 *#% copyfile tvp X U U
4394 *#
4395 */
4396struct vnop_copyfile_args {
4397 struct vnodeop_desc *a_desc;
4398 vnode_t a_fvp;
4399 vnode_t a_tdvp;
4400 vnode_t a_tvp;
4401 struct componentname *a_tcnp;
4402 int a_mode;
4403 int a_flags;
4404 vfs_context_t a_context;
4405};
4406#endif /* 0*/
4407errno_t
4408VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4409 int mode, int flags, vfs_context_t context)
4410{
4411 int _err;
4412 struct vnop_copyfile_args a;
4413 a.a_desc = &vnop_copyfile_desc;
4414 a.a_fvp = fvp;
4415 a.a_tdvp = tdvp;
4416 a.a_tvp = tvp;
4417 a.a_tcnp = tcnp;
4418 a.a_mode = mode;
4419 a.a_flags = flags;
4420 a.a_context = context;
4421 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
4422 return (_err);
4423}
4424
4425
4426errno_t
4427VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t context)
4428{
4429 struct vnop_getxattr_args a;
4430 int error;
4431 int thread_safe;
4432 int funnel_state = 0;
4433
4434 a.a_desc = &vnop_getxattr_desc;
4435 a.a_vp = vp;
4436 a.a_name = name;
4437 a.a_uio = uio;
4438 a.a_size = size;
4439 a.a_options = options;
4440 a.a_context = context;
4441
4442 thread_safe = THREAD_SAFE_FS(vp);
4443 if (!thread_safe) {
4444 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4445 return (error);
4446 }
4447 }
4448 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
4449 if (!thread_safe) {
4450 unlock_fsnode(vp, &funnel_state);
4451 }
4452 return (error);
4453}
4454
4455errno_t
4456VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t context)
4457{
4458 struct vnop_setxattr_args a;
4459 int error;
4460 int thread_safe;
4461 int funnel_state = 0;
4462
4463 a.a_desc = &vnop_setxattr_desc;
4464 a.a_vp = vp;
4465 a.a_name = name;
4466 a.a_uio = uio;
4467 a.a_options = options;
4468 a.a_context = context;
4469
4470 thread_safe = THREAD_SAFE_FS(vp);
4471 if (!thread_safe) {
4472 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4473 return (error);
4474 }
4475 }
4476 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
4477 if (!thread_safe) {
4478 unlock_fsnode(vp, &funnel_state);
4479 }
4480 return (error);
4481}
4482
4483errno_t
4484VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t context)
4485{
4486 struct vnop_removexattr_args a;
4487 int error;
4488 int thread_safe;
4489 int funnel_state = 0;
4490
4491 a.a_desc = &vnop_removexattr_desc;
4492 a.a_vp = vp;
4493 a.a_name = name;
4494 a.a_options = options;
4495 a.a_context = context;
4496
4497 thread_safe = THREAD_SAFE_FS(vp);
4498 if (!thread_safe) {
4499 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4500 return (error);
4501 }
4502 }
4503 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
4504 if (!thread_safe) {
4505 unlock_fsnode(vp, &funnel_state);
4506 }
4507 return (error);
4508}
4509
4510errno_t
4511VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t context)
4512{
4513 struct vnop_listxattr_args a;
4514 int error;
4515 int thread_safe;
4516 int funnel_state = 0;
4517
4518 a.a_desc = &vnop_listxattr_desc;
4519 a.a_vp = vp;
4520 a.a_uio = uio;
4521 a.a_size = size;
4522 a.a_options = options;
4523 a.a_context = context;
4524
4525 thread_safe = THREAD_SAFE_FS(vp);
4526 if (!thread_safe) {
4527 if ( (error = lock_fsnode(vp, &funnel_state)) ) {
4528 return (error);
4529 }
4530 }
4531 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
4532 if (!thread_safe) {
4533 unlock_fsnode(vp, &funnel_state);
4534 }
4535 return (error);
4536}
4537
4538
4539#if 0
4540/*
4541 *#
4542 *#% blktooff vp = = =
4543 *#
4544 */
4545struct vnop_blktooff_args {
4546 struct vnodeop_desc *a_desc;
4547 vnode_t a_vp;
4548 daddr64_t a_lblkno;
4549 off_t *a_offset;
4550};
4551#endif /* 0*/
4552errno_t
4553VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
4554{
4555 int _err;
4556 struct vnop_blktooff_args a;
4557 int thread_safe;
4558 int funnel_state = 0;
4559
4560 a.a_desc = &vnop_blktooff_desc;
4561 a.a_vp = vp;
4562 a.a_lblkno = lblkno;
4563 a.a_offset = offset;
4564 thread_safe = THREAD_SAFE_FS(vp);
4565
4566 if (!thread_safe) {
4567 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4568 }
4569 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
4570 if (!thread_safe) {
4571 (void) thread_funnel_set(kernel_flock, funnel_state);
4572 }
4573 return (_err);
4574}
4575
4576#if 0
4577/*
4578 *#
4579 *#% offtoblk vp = = =
4580 *#
4581 */
4582struct vnop_offtoblk_args {
4583 struct vnodeop_desc *a_desc;
4584 vnode_t a_vp;
4585 off_t a_offset;
4586 daddr64_t *a_lblkno;
4587};
4588#endif /* 0*/
4589errno_t
4590VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
4591{
4592 int _err;
4593 struct vnop_offtoblk_args a;
4594 int thread_safe;
4595 int funnel_state = 0;
4596
4597 a.a_desc = &vnop_offtoblk_desc;
4598 a.a_vp = vp;
4599 a.a_offset = offset;
4600 a.a_lblkno = lblkno;
4601 thread_safe = THREAD_SAFE_FS(vp);
4602
4603 if (!thread_safe) {
4604 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4605 }
4606 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
4607 if (!thread_safe) {
4608 (void) thread_funnel_set(kernel_flock, funnel_state);
4609 }
4610 return (_err);
4611}
4612
4613#if 0
4614/*
4615 *#
4616 *#% blockmap vp L L L
4617 *#
4618 */
4619struct vnop_blockmap_args {
4620 struct vnodeop_desc *a_desc;
4621 vnode_t a_vp;
4622 off_t a_foffset;
4623 size_t a_size;
4624 daddr64_t *a_bpn;
4625 size_t *a_run;
4626 void *a_poff;
4627 int a_flags;
4628 vfs_context_t a_context;
4629};
4630#endif /* 0*/
4631errno_t
4632VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t context)
4633{
4634 int _err;
4635 struct vnop_blockmap_args a;
4636 int thread_safe;
4637 int funnel_state = 0;
4638 struct vfs_context acontext;
4639
4640 if (context == NULL) {
4641 acontext.vc_proc = current_proc();
4642 acontext.vc_ucred = kauth_cred_get();
4643 context = &acontext;
4644 }
4645 a.a_desc = &vnop_blockmap_desc;
4646 a.a_vp = vp;
4647 a.a_foffset = foffset;
4648 a.a_size = size;
4649 a.a_bpn = bpn;
4650 a.a_run = run;
4651 a.a_poff = poff;
4652 a.a_flags = flags;
4653 a.a_context = context;
4654 thread_safe = THREAD_SAFE_FS(vp);
4655
4656 if (!thread_safe) {
4657 funnel_state = thread_funnel_set(kernel_flock, TRUE);
4658 }
4659 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
4660 if (!thread_safe) {
4661 (void) thread_funnel_set(kernel_flock, funnel_state);
4662 }
4663 return (_err);
4664}
4665
4666#if 0
4667struct vnop_strategy_args {
4668 struct vnodeop_desc *a_desc;
4669 struct buf *a_bp;
4670};
4671
4672#endif /* 0*/
4673errno_t
4674VNOP_STRATEGY(struct buf *bp)
4675{
4676 int _err;
4677 struct vnop_strategy_args a;
4678 a.a_desc = &vnop_strategy_desc;
4679 a.a_bp = bp;
4680 _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
4681 return (_err);
4682}
4683
4684#if 0
4685struct vnop_bwrite_args {
4686 struct vnodeop_desc *a_desc;
4687 buf_t a_bp;
4688};
4689#endif /* 0*/
4690errno_t
4691VNOP_BWRITE(struct buf *bp)
4692{
4693 int _err;
4694 struct vnop_bwrite_args a;
4695 a.a_desc = &vnop_bwrite_desc;
4696 a.a_bp = bp;
4697 _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
4698 return (_err);
4699}
4700
4701#if 0
4702struct vnop_kqfilt_add_args {
4703 struct vnodeop_desc *a_desc;
4704 struct vnode *a_vp;
4705 struct knote *a_kn;
4706 vfs_context_t a_context;
4707};
4708#endif
4709errno_t
4710VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t context)
4711{
4712 int _err;
4713 struct vnop_kqfilt_add_args a;
4714 int thread_safe;
4715 int funnel_state = 0;
4716
4717 a.a_desc = VDESC(vnop_kqfilt_add);
4718 a.a_vp = vp;
4719 a.a_kn = kn;
4720 a.a_context = context;
4721 thread_safe = THREAD_SAFE_FS(vp);
4722
4723 if (!thread_safe) {
4724 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4725 return (_err);
4726 }
4727 }
4728 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
4729 if (!thread_safe) {
4730 unlock_fsnode(vp, &funnel_state);
4731 }
4732 return(_err);
4733}
4734
4735#if 0
4736struct vnop_kqfilt_remove_args {
4737 struct vnodeop_desc *a_desc;
4738 struct vnode *a_vp;
4739 uintptr_t a_ident;
4740 vfs_context_t a_context;
4741};
4742#endif
4743errno_t
4744VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t context)
4745{
4746 int _err;
4747 struct vnop_kqfilt_remove_args a;
4748 int thread_safe;
4749 int funnel_state = 0;
4750
4751 a.a_desc = VDESC(vnop_kqfilt_remove);
4752 a.a_vp = vp;
4753 a.a_ident = ident;
4754 a.a_context = context;
4755 thread_safe = THREAD_SAFE_FS(vp);
4756
4757 if (!thread_safe) {
4758 if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
4759 return (_err);
4760 }
4761 }
4762 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
4763 if (!thread_safe) {
4764 unlock_fsnode(vp, &funnel_state);
4765 }
4766 return(_err);
4767}
4768